summaryrefslogtreecommitdiff
path: root/contrib/SDL-3.2.8/src/stdlib
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/SDL-3.2.8/src/stdlib')
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_casefolding.h2769
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_crc16.c52
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_crc32.c50
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_getenv.c601
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_getenv_c.h24
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_iconv.c860
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_malloc.c6507
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_memcpy.c101
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_memmove.c73
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_memset.c139
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_mslibc.c746
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_mslibc_arm64.masm26
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_mslibc_x64.masm29
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_murmur3.c87
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_qsort.c574
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_random.c115
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_stdlib.c567
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_string.c2515
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_strtokr.c95
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_sysstdlib.h32
-rw-r--r--contrib/SDL-3.2.8/src/stdlib/SDL_vacopy.h30
21 files changed, 15992 insertions, 0 deletions
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_casefolding.h b/contrib/SDL-3.2.8/src/stdlib/SDL_casefolding.h
new file mode 100644
index 0000000..6fbe0a7
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_casefolding.h
@@ -0,0 +1,2769 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21
22/*
23 * This data was generated by SDL/build-scripts/makecasefoldhashtable.pl
24 *
25 * Do not manually edit this file!
26 */
27
28#ifndef SDL_casefolding_h_
29#define SDL_casefolding_h_
30
31/* We build three simple hashmaps here: one that maps Unicode codepoints to
32a one, two, or three lowercase codepoints. To retrieve this info: look at
33case_fold_hashX, where X is 1, 2, or 3. Most foldable codepoints fold to one,
34a few dozen fold to two, and a handful fold to three. If the codepoint isn't
35in any of these hashes, it doesn't fold (no separate upper and lowercase).
36
37Almost all these codepoints fit into 16 bits, so we hash them as such to save
38memory. If a codepoint is > 0xFFFF, we have separate hashes for them,
39since there are (currently) only about 120 of them and (currently) all of them
40map to a single lowercase codepoint. */
41
42typedef struct CaseFoldMapping1_32
43{
44 Uint32 from;
45 Uint32 to0;
46} CaseFoldMapping1_32;
47
48typedef struct CaseFoldMapping1_16
49{
50 Uint16 from;
51 Uint16 to0;
52} CaseFoldMapping1_16;
53
54typedef struct CaseFoldMapping2_16
55{
56 Uint16 from;
57 Uint16 to0;
58 Uint16 to1;
59} CaseFoldMapping2_16;
60
61typedef struct CaseFoldMapping3_16
62{
63 Uint16 from;
64 Uint16 to0;
65 Uint16 to1;
66 Uint16 to2;
67} CaseFoldMapping3_16;
68
69typedef struct CaseFoldHashBucket1_16
70{
71 const CaseFoldMapping1_16 *list;
72 const Uint8 count;
73} CaseFoldHashBucket1_16;
74
75typedef struct CaseFoldHashBucket1_32
76{
77 const CaseFoldMapping1_32 *list;
78 const Uint8 count;
79} CaseFoldHashBucket1_32;
80
81typedef struct CaseFoldHashBucket2_16
82{
83 const CaseFoldMapping2_16 *list;
84 const Uint8 count;
85} CaseFoldHashBucket2_16;
86
87typedef struct CaseFoldHashBucket3_16
88{
89 const CaseFoldMapping3_16 *list;
90 const Uint8 count;
91} CaseFoldHashBucket3_16;
92
93static const CaseFoldMapping1_16 case_fold1_16_000[] = {
94 { 0x0202, 0x0203 },
95 { 0x0404, 0x0454 },
96 { 0x1E1E, 0x1E1F },
97 { 0x2C2C, 0x2C5C },
98 { 0xABAB, 0x13DB }
99};
100
101static const CaseFoldMapping1_16 case_fold1_16_001[] = {
102 { 0x0100, 0x0101 },
103 { 0x0405, 0x0455 },
104 { 0x0504, 0x0505 },
105 { 0x2C2D, 0x2C5D },
106 { 0xA7A6, 0xA7A7 },
107 { 0xABAA, 0x13DA }
108};
109
110static const CaseFoldMapping1_16 case_fold1_16_002[] = {
111 { 0x0200, 0x0201 },
112 { 0x0406, 0x0456 },
113 { 0x1E1C, 0x1E1D },
114 { 0x1F1D, 0x1F15 },
115 { 0x2C2E, 0x2C5E },
116 { 0xABA9, 0x13D9 }
117};
118
119static const CaseFoldMapping1_16 case_fold1_16_003[] = {
120 { 0x0102, 0x0103 },
121 { 0x0407, 0x0457 },
122 { 0x0506, 0x0507 },
123 { 0x1F1C, 0x1F14 },
124 { 0x2C2F, 0x2C5F },
125 { 0xA7A4, 0xA7A5 },
126 { 0xABA8, 0x13D8 }
127};
128
129static const CaseFoldMapping1_16 case_fold1_16_004[] = {
130 { 0x0206, 0x0207 },
131 { 0x0400, 0x0450 },
132 { 0x1E1A, 0x1E1B },
133 { 0x1F1B, 0x1F13 },
134 { 0x2C28, 0x2C58 },
135 { 0xABAF, 0x13DF }
136};
137
138static const CaseFoldMapping1_16 case_fold1_16_005[] = {
139 { 0x0104, 0x0105 },
140 { 0x0401, 0x0451 },
141 { 0x0500, 0x0501 },
142 { 0x1F1A, 0x1F12 },
143 { 0x2C29, 0x2C59 },
144 { 0xA7A2, 0xA7A3 },
145 { 0xABAE, 0x13DE }
146};
147
148static const CaseFoldMapping1_16 case_fold1_16_006[] = {
149 { 0x0204, 0x0205 },
150 { 0x0402, 0x0452 },
151 { 0x1E18, 0x1E19 },
152 { 0x1F19, 0x1F11 },
153 { 0x2C2A, 0x2C5A },
154 { 0xABAD, 0x13DD }
155};
156
157static const CaseFoldMapping1_16 case_fold1_16_007[] = {
158 { 0x0106, 0x0107 },
159 { 0x0403, 0x0453 },
160 { 0x0502, 0x0503 },
161 { 0x1F18, 0x1F10 },
162 { 0x2126, 0x03C9 },
163 { 0x2C2B, 0x2C5B },
164 { 0xA7A0, 0xA7A1 },
165 { 0xABAC, 0x13DC }
166};
167
168static const CaseFoldMapping1_16 case_fold1_16_008[] = {
169 { 0x020A, 0x020B },
170 { 0x040C, 0x045C },
171 { 0x1E16, 0x1E17 },
172 { 0x2C24, 0x2C54 },
173 { 0xABA3, 0x13D3 }
174};
175
176static const CaseFoldMapping1_16 case_fold1_16_009[] = {
177 { 0x0108, 0x0109 },
178 { 0x040D, 0x045D },
179 { 0x050C, 0x050D },
180 { 0x2C25, 0x2C55 },
181 { 0xA7AE, 0x026A },
182 { 0xABA2, 0x13D2 }
183};
184
185static const CaseFoldMapping1_16 case_fold1_16_010[] = {
186 { 0x0208, 0x0209 },
187 { 0x040E, 0x045E },
188 { 0x1E14, 0x1E15 },
189 { 0x212B, 0x00E5 },
190 { 0x2C26, 0x2C56 },
191 { 0xA7AD, 0x026C },
192 { 0xABA1, 0x13D1 }
193};
194
195static const CaseFoldMapping1_16 case_fold1_16_011[] = {
196 { 0x010A, 0x010B },
197 { 0x040F, 0x045F },
198 { 0x050E, 0x050F },
199 { 0x212A, 0x006B },
200 { 0x2C27, 0x2C57 },
201 { 0xA7AC, 0x0261 },
202 { 0xABA0, 0x13D0 }
203};
204
205static const CaseFoldMapping1_16 case_fold1_16_012[] = {
206 { 0x020E, 0x020F },
207 { 0x0408, 0x0458 },
208 { 0x1E12, 0x1E13 },
209 { 0x2C20, 0x2C50 },
210 { 0xA7AB, 0x025C },
211 { 0xABA7, 0x13D7 }
212};
213
214static const CaseFoldMapping1_16 case_fold1_16_013[] = {
215 { 0x010C, 0x010D },
216 { 0x0409, 0x0459 },
217 { 0x0508, 0x0509 },
218 { 0x2C21, 0x2C51 },
219 { 0xA7AA, 0x0266 },
220 { 0xABA6, 0x13D6 }
221};
222
223static const CaseFoldMapping1_16 case_fold1_16_014[] = {
224 { 0x020C, 0x020D },
225 { 0x040A, 0x045A },
226 { 0x1E10, 0x1E11 },
227 { 0x2C22, 0x2C52 },
228 { 0xABA5, 0x13D5 }
229};
230
231static const CaseFoldMapping1_16 case_fold1_16_015[] = {
232 { 0x010E, 0x010F },
233 { 0x040B, 0x045B },
234 { 0x050A, 0x050B },
235 { 0x2C23, 0x2C53 },
236 { 0xA7A8, 0xA7A9 },
237 { 0xABA4, 0x13D4 }
238};
239
240static const CaseFoldMapping1_16 case_fold1_16_016[] = {
241 { 0x0212, 0x0213 },
242 { 0x0414, 0x0434 },
243 { 0x1E0E, 0x1E0F },
244 { 0x1F0F, 0x1F07 },
245 { 0xABBB, 0x13EB }
246};
247
248static const CaseFoldMapping1_16 case_fold1_16_017[] = {
249 { 0x0110, 0x0111 },
250 { 0x0415, 0x0435 },
251 { 0x0514, 0x0515 },
252 { 0x1F0E, 0x1F06 },
253 { 0xA7B6, 0xA7B7 },
254 { 0xABBA, 0x13EA }
255};
256
257static const CaseFoldMapping1_16 case_fold1_16_018[] = {
258 { 0x0210, 0x0211 },
259 { 0x0416, 0x0436 },
260 { 0x1E0C, 0x1E0D },
261 { 0x1F0D, 0x1F05 },
262 { 0xABB9, 0x13E9 }
263};
264
265static const CaseFoldMapping1_16 case_fold1_16_019[] = {
266 { 0x0112, 0x0113 },
267 { 0x0417, 0x0437 },
268 { 0x0516, 0x0517 },
269 { 0x1F0C, 0x1F04 },
270 { 0x2132, 0x214E },
271 { 0xA7B4, 0xA7B5 },
272 { 0xABB8, 0x13E8 }
273};
274
275static const CaseFoldMapping1_16 case_fold1_16_020[] = {
276 { 0x0216, 0x0217 },
277 { 0x0410, 0x0430 },
278 { 0x1E0A, 0x1E0B },
279 { 0x1F0B, 0x1F03 },
280 { 0xA7B3, 0xAB53 },
281 { 0xABBF, 0x13EF }
282};
283
284static const CaseFoldMapping1_16 case_fold1_16_021[] = {
285 { 0x0114, 0x0115 },
286 { 0x0411, 0x0431 },
287 { 0x0510, 0x0511 },
288 { 0x1F0A, 0x1F02 },
289 { 0xA7B2, 0x029D },
290 { 0xABBE, 0x13EE }
291};
292
293static const CaseFoldMapping1_16 case_fold1_16_022[] = {
294 { 0x0214, 0x0215 },
295 { 0x0412, 0x0432 },
296 { 0x1E08, 0x1E09 },
297 { 0x1F09, 0x1F01 },
298 { 0xA7B1, 0x0287 },
299 { 0xABBD, 0x13ED }
300};
301
302static const CaseFoldMapping1_16 case_fold1_16_023[] = {
303 { 0x0116, 0x0117 },
304 { 0x0413, 0x0433 },
305 { 0x0512, 0x0513 },
306 { 0x1F08, 0x1F00 },
307 { 0xA7B0, 0x029E },
308 { 0xABBC, 0x13EC }
309};
310
311static const CaseFoldMapping1_16 case_fold1_16_024[] = {
312 { 0x021A, 0x021B },
313 { 0x041C, 0x043C },
314 { 0x1E06, 0x1E07 },
315 { 0xABB3, 0x13E3 }
316};
317
318static const CaseFoldMapping1_16 case_fold1_16_025[] = {
319 { 0x0118, 0x0119 },
320 { 0x041D, 0x043D },
321 { 0x051C, 0x051D },
322 { 0xA7BE, 0xA7BF },
323 { 0xABB2, 0x13E2 }
324};
325
326static const CaseFoldMapping1_16 case_fold1_16_026[] = {
327 { 0x0218, 0x0219 },
328 { 0x041E, 0x043E },
329 { 0x1E04, 0x1E05 },
330 { 0xABB1, 0x13E1 }
331};
332
333static const CaseFoldMapping1_16 case_fold1_16_027[] = {
334 { 0x011A, 0x011B },
335 { 0x041F, 0x043F },
336 { 0x051E, 0x051F },
337 { 0xA7BC, 0xA7BD },
338 { 0xABB0, 0x13E0 }
339};
340
341static const CaseFoldMapping1_16 case_fold1_16_028[] = {
342 { 0x021E, 0x021F },
343 { 0x0418, 0x0438 },
344 { 0x1E02, 0x1E03 },
345 { 0xABB7, 0x13E7 }
346};
347
348static const CaseFoldMapping1_16 case_fold1_16_029[] = {
349 { 0x011C, 0x011D },
350 { 0x0419, 0x0439 },
351 { 0x0518, 0x0519 },
352 { 0xA7BA, 0xA7BB },
353 { 0xABB6, 0x13E6 }
354};
355
356static const CaseFoldMapping1_16 case_fold1_16_030[] = {
357 { 0x021C, 0x021D },
358 { 0x041A, 0x043A },
359 { 0x1E00, 0x1E01 },
360 { 0xABB5, 0x13E5 }
361};
362
363static const CaseFoldMapping1_16 case_fold1_16_031[] = {
364 { 0x011E, 0x011F },
365 { 0x041B, 0x043B },
366 { 0x051A, 0x051B },
367 { 0xA7B8, 0xA7B9 },
368 { 0xABB4, 0x13E4 }
369};
370
371static const CaseFoldMapping1_16 case_fold1_16_032[] = {
372 { 0x0222, 0x0223 },
373 { 0x0424, 0x0444 },
374 { 0x1E3E, 0x1E3F },
375 { 0x1F3F, 0x1F37 },
376 { 0x2C0C, 0x2C3C },
377 { 0xA686, 0xA687 },
378 { 0xAB8B, 0x13BB }
379};
380
381static const CaseFoldMapping1_16 case_fold1_16_033[] = {
382 { 0x0120, 0x0121 },
383 { 0x0425, 0x0445 },
384 { 0x0524, 0x0525 },
385 { 0x1F3E, 0x1F36 },
386 { 0x2C0D, 0x2C3D },
387 { 0xA786, 0xA787 },
388 { 0xAB8A, 0x13BA }
389};
390
391static const CaseFoldMapping1_16 case_fold1_16_034[] = {
392 { 0x0220, 0x019E },
393 { 0x0426, 0x0446 },
394 { 0x1E3C, 0x1E3D },
395 { 0x1F3D, 0x1F35 },
396 { 0x2C0E, 0x2C3E },
397 { 0xA684, 0xA685 },
398 { 0xAB89, 0x13B9 }
399};
400
401static const CaseFoldMapping1_16 case_fold1_16_035[] = {
402 { 0x0122, 0x0123 },
403 { 0x0427, 0x0447 },
404 { 0x0526, 0x0527 },
405 { 0x1F3C, 0x1F34 },
406 { 0x2C0F, 0x2C3F },
407 { 0xA784, 0xA785 },
408 { 0xAB88, 0x13B8 }
409};
410
411static const CaseFoldMapping1_16 case_fold1_16_036[] = {
412 { 0x0226, 0x0227 },
413 { 0x0420, 0x0440 },
414 { 0x1E3A, 0x1E3B },
415 { 0x1F3B, 0x1F33 },
416 { 0x2C08, 0x2C38 },
417 { 0xA682, 0xA683 },
418 { 0xAB8F, 0x13BF }
419};
420
421static const CaseFoldMapping1_16 case_fold1_16_037[] = {
422 { 0x0124, 0x0125 },
423 { 0x0421, 0x0441 },
424 { 0x0520, 0x0521 },
425 { 0x1F3A, 0x1F32 },
426 { 0x2C09, 0x2C39 },
427 { 0xA782, 0xA783 },
428 { 0xAB8E, 0x13BE }
429};
430
431static const CaseFoldMapping1_16 case_fold1_16_038[] = {
432 { 0x0224, 0x0225 },
433 { 0x0422, 0x0442 },
434 { 0x1E38, 0x1E39 },
435 { 0x1F39, 0x1F31 },
436 { 0x2C0A, 0x2C3A },
437 { 0xA680, 0xA681 },
438 { 0xAB8D, 0x13BD }
439};
440
441static const CaseFoldMapping1_16 case_fold1_16_039[] = {
442 { 0x0126, 0x0127 },
443 { 0x0423, 0x0443 },
444 { 0x0522, 0x0523 },
445 { 0x1F38, 0x1F30 },
446 { 0x2C0B, 0x2C3B },
447 { 0xA780, 0xA781 },
448 { 0xAB8C, 0x13BC }
449};
450
451static const CaseFoldMapping1_16 case_fold1_16_040[] = {
452 { 0x022A, 0x022B },
453 { 0x042C, 0x044C },
454 { 0x1E36, 0x1E37 },
455 { 0x2C04, 0x2C34 },
456 { 0xA68E, 0xA68F },
457 { 0xAB83, 0x13B3 }
458};
459
460static const CaseFoldMapping1_16 case_fold1_16_041[] = {
461 { 0x0128, 0x0129 },
462 { 0x042D, 0x044D },
463 { 0x052C, 0x052D },
464 { 0x2C05, 0x2C35 },
465 { 0xAB82, 0x13B2 }
466};
467
468static const CaseFoldMapping1_16 case_fold1_16_042[] = {
469 { 0x0228, 0x0229 },
470 { 0x042E, 0x044E },
471 { 0x1E34, 0x1E35 },
472 { 0x2C06, 0x2C36 },
473 { 0xA68C, 0xA68D },
474 { 0xA78D, 0x0265 },
475 { 0xAB81, 0x13B1 }
476};
477
478static const CaseFoldMapping1_16 case_fold1_16_043[] = {
479 { 0x012A, 0x012B },
480 { 0x042F, 0x044F },
481 { 0x052E, 0x052F },
482 { 0x2C07, 0x2C37 },
483 { 0xAB80, 0x13B0 }
484};
485
486static const CaseFoldMapping1_16 case_fold1_16_044[] = {
487 { 0x022E, 0x022F },
488 { 0x0428, 0x0448 },
489 { 0x1E32, 0x1E33 },
490 { 0x2C00, 0x2C30 },
491 { 0xA68A, 0xA68B },
492 { 0xA78B, 0xA78C },
493 { 0xAB87, 0x13B7 }
494};
495
496static const CaseFoldMapping1_16 case_fold1_16_045[] = {
497 { 0x012C, 0x012D },
498 { 0x0429, 0x0449 },
499 { 0x0528, 0x0529 },
500 { 0x2C01, 0x2C31 },
501 { 0xAB86, 0x13B6 }
502};
503
504static const CaseFoldMapping1_16 case_fold1_16_046[] = {
505 { 0x022C, 0x022D },
506 { 0x042A, 0x044A },
507 { 0x1E30, 0x1E31 },
508 { 0x2C02, 0x2C32 },
509 { 0xA688, 0xA689 },
510 { 0xAB85, 0x13B5 }
511};
512
513static const CaseFoldMapping1_16 case_fold1_16_047[] = {
514 { 0x012E, 0x012F },
515 { 0x042B, 0x044B },
516 { 0x052A, 0x052B },
517 { 0x2C03, 0x2C33 },
518 { 0xAB84, 0x13B4 }
519};
520
521static const CaseFoldMapping1_16 case_fold1_16_048[] = {
522 { 0x0232, 0x0233 },
523 { 0x0535, 0x0565 },
524 { 0x1E2E, 0x1E2F },
525 { 0x1F2F, 0x1F27 },
526 { 0x2C1C, 0x2C4C },
527 { 0xA696, 0xA697 },
528 { 0xAB9B, 0x13CB }
529};
530
531static const CaseFoldMapping1_16 case_fold1_16_049[] = {
532 { 0x0534, 0x0564 },
533 { 0x1F2E, 0x1F26 },
534 { 0x2C1D, 0x2C4D },
535 { 0xA796, 0xA797 },
536 { 0xAB9A, 0x13CA }
537};
538
539static const CaseFoldMapping1_16 case_fold1_16_050[] = {
540 { 0x0230, 0x0231 },
541 { 0x0537, 0x0567 },
542 { 0x1E2C, 0x1E2D },
543 { 0x1F2D, 0x1F25 },
544 { 0x2C1E, 0x2C4E },
545 { 0xA694, 0xA695 },
546 { 0xAB99, 0x13C9 }
547};
548
549static const CaseFoldMapping1_16 case_fold1_16_051[] = {
550 { 0x0132, 0x0133 },
551 { 0x0536, 0x0566 },
552 { 0x1F2C, 0x1F24 },
553 { 0x2C1F, 0x2C4F },
554 { 0xAB98, 0x13C8 }
555};
556
557static const CaseFoldMapping1_16 case_fold1_16_052[] = {
558 { 0x0531, 0x0561 },
559 { 0x1E2A, 0x1E2B },
560 { 0x1F2B, 0x1F23 },
561 { 0x2C18, 0x2C48 },
562 { 0xA692, 0xA693 },
563 { 0xAB9F, 0x13CF }
564};
565
566static const CaseFoldMapping1_16 case_fold1_16_053[] = {
567 { 0x0134, 0x0135 },
568 { 0x1F2A, 0x1F22 },
569 { 0x2C19, 0x2C49 },
570 { 0xA792, 0xA793 },
571 { 0xAB9E, 0x13CE }
572};
573
574static const CaseFoldMapping1_16 case_fold1_16_054[] = {
575 { 0x0533, 0x0563 },
576 { 0x1E28, 0x1E29 },
577 { 0x1F29, 0x1F21 },
578 { 0x2C1A, 0x2C4A },
579 { 0xA690, 0xA691 },
580 { 0xAB9D, 0x13CD }
581};
582
583static const CaseFoldMapping1_16 case_fold1_16_055[] = {
584 { 0x0136, 0x0137 },
585 { 0x0532, 0x0562 },
586 { 0x1F28, 0x1F20 },
587 { 0x2C1B, 0x2C4B },
588 { 0xA790, 0xA791 },
589 { 0xAB9C, 0x13CC }
590};
591
592static const CaseFoldMapping1_16 case_fold1_16_056[] = {
593 { 0x0139, 0x013A },
594 { 0x023A, 0x2C65 },
595 { 0x053D, 0x056D },
596 { 0x1E26, 0x1E27 },
597 { 0x2C14, 0x2C44 },
598 { 0xAB93, 0x13C3 }
599};
600
601static const CaseFoldMapping1_16 case_fold1_16_057[] = {
602 { 0x023B, 0x023C },
603 { 0x053C, 0x056C },
604 { 0x2C15, 0x2C45 },
605 { 0xA79E, 0xA79F },
606 { 0xAB92, 0x13C2 }
607};
608
609static const CaseFoldMapping1_16 case_fold1_16_058[] = {
610 { 0x013B, 0x013C },
611 { 0x053F, 0x056F },
612 { 0x1E24, 0x1E25 },
613 { 0x2C16, 0x2C46 },
614 { 0xAB91, 0x13C1 }
615};
616
617static const CaseFoldMapping1_16 case_fold1_16_059[] = {
618 { 0x053E, 0x056E },
619 { 0x2C17, 0x2C47 },
620 { 0xA79C, 0xA79D },
621 { 0xAB90, 0x13C0 }
622};
623
624static const CaseFoldMapping1_16 case_fold1_16_060[] = {
625 { 0x013D, 0x013E },
626 { 0x023E, 0x2C66 },
627 { 0x0539, 0x0569 },
628 { 0x1E22, 0x1E23 },
629 { 0x2C10, 0x2C40 },
630 { 0xA69A, 0xA69B },
631 { 0xAB97, 0x13C7 }
632};
633
634static const CaseFoldMapping1_16 case_fold1_16_061[] = {
635 { 0x0538, 0x0568 },
636 { 0x2C11, 0x2C41 },
637 { 0xA79A, 0xA79B },
638 { 0xAB96, 0x13C6 }
639};
640
641static const CaseFoldMapping1_16 case_fold1_16_062[] = {
642 { 0x013F, 0x0140 },
643 { 0x053B, 0x056B },
644 { 0x1E20, 0x1E21 },
645 { 0x2C12, 0x2C42 },
646 { 0xA698, 0xA699 },
647 { 0xAB95, 0x13C5 }
648};
649
650static const CaseFoldMapping1_16 case_fold1_16_063[] = {
651 { 0x023D, 0x019A },
652 { 0x053A, 0x056A },
653 { 0x2C13, 0x2C43 },
654 { 0xA798, 0xA799 },
655 { 0xAB94, 0x13C4 }
656};
657
658static const CaseFoldMapping1_16 case_fold1_16_064[] = {
659 { 0x0141, 0x0142 },
660 { 0x0545, 0x0575 },
661 { 0x1E5E, 0x1E5F },
662 { 0x1F5F, 0x1F57 },
663 { 0x2161, 0x2171 }
664};
665
666static const CaseFoldMapping1_16 case_fold1_16_065[] = {
667 { 0x0243, 0x0180 },
668 { 0x0544, 0x0574 },
669 { 0x2160, 0x2170 },
670 { 0x2C6D, 0x0251 }
671};
672
673static const CaseFoldMapping1_16 case_fold1_16_066[] = {
674 { 0x0143, 0x0144 },
675 { 0x0547, 0x0577 },
676 { 0x1E5C, 0x1E5D },
677 { 0x1F5D, 0x1F55 },
678 { 0x2163, 0x2173 },
679 { 0x2C6E, 0x0271 }
680};
681
682static const CaseFoldMapping1_16 case_fold1_16_067[] = {
683 { 0x0241, 0x0242 },
684 { 0x0546, 0x0576 },
685 { 0x2162, 0x2172 },
686 { 0x2C6F, 0x0250 }
687};
688
689static const CaseFoldMapping1_16 case_fold1_16_068[] = {
690 { 0x0145, 0x0146 },
691 { 0x0246, 0x0247 },
692 { 0x0541, 0x0571 },
693 { 0x1E5A, 0x1E5B },
694 { 0x1F5B, 0x1F53 },
695 { 0x2165, 0x2175 }
696};
697
698static const CaseFoldMapping1_16 case_fold1_16_069[] = {
699 { 0x0540, 0x0570 },
700 { 0x2164, 0x2174 },
701 { 0x2C69, 0x2C6A }
702};
703
704static const CaseFoldMapping1_16 case_fold1_16_070[] = {
705 { 0x0147, 0x0148 },
706 { 0x0244, 0x0289 },
707 { 0x0345, 0x03B9 },
708 { 0x0543, 0x0573 },
709 { 0x1E58, 0x1E59 },
710 { 0x1F59, 0x1F51 },
711 { 0x2167, 0x2177 }
712};
713
714static const CaseFoldMapping1_16 case_fold1_16_071[] = {
715 { 0x0245, 0x028C },
716 { 0x0542, 0x0572 },
717 { 0x2166, 0x2176 },
718 { 0x2C6B, 0x2C6C }
719};
720
721static const CaseFoldMapping1_16 case_fold1_16_072[] = {
722 { 0x024A, 0x024B },
723 { 0x054D, 0x057D },
724 { 0x1E56, 0x1E57 },
725 { 0x2169, 0x2179 },
726 { 0x2C64, 0x027D }
727};
728
729static const CaseFoldMapping1_16 case_fold1_16_073[] = {
730 { 0x054C, 0x057C },
731 { 0x2168, 0x2178 }
732};
733
734static const CaseFoldMapping1_16 case_fold1_16_074[] = {
735 { 0x0248, 0x0249 },
736 { 0x054F, 0x057F },
737 { 0x1E54, 0x1E55 },
738 { 0x216B, 0x217B }
739};
740
741static const CaseFoldMapping1_16 case_fold1_16_075[] = {
742 { 0x014A, 0x014B },
743 { 0x054E, 0x057E },
744 { 0x216A, 0x217A },
745 { 0x2C67, 0x2C68 }
746};
747
748static const CaseFoldMapping1_16 case_fold1_16_076[] = {
749 { 0x024E, 0x024F },
750 { 0x0549, 0x0579 },
751 { 0x1E52, 0x1E53 },
752 { 0x216D, 0x217D },
753 { 0x2C60, 0x2C61 }
754};
755
756static const CaseFoldMapping1_16 case_fold1_16_077[] = {
757 { 0x014C, 0x014D },
758 { 0x0548, 0x0578 },
759 { 0x216C, 0x217C }
760};
761
762static const CaseFoldMapping1_16 case_fold1_16_078[] = {
763 { 0x024C, 0x024D },
764 { 0x054B, 0x057B },
765 { 0x1E50, 0x1E51 },
766 { 0x216F, 0x217F },
767 { 0x2C62, 0x026B }
768};
769
770static const CaseFoldMapping1_16 case_fold1_16_079[] = {
771 { 0x014E, 0x014F },
772 { 0x054A, 0x057A },
773 { 0x216E, 0x217E },
774 { 0x2C63, 0x1D7D }
775};
776
777static const CaseFoldMapping1_16 case_fold1_16_080[] = {
778 { 0x0555, 0x0585 },
779 { 0x1E4E, 0x1E4F }
780};
781
782static const CaseFoldMapping1_16 case_fold1_16_081[] = {
783 { 0x0150, 0x0151 },
784 { 0x0554, 0x0584 }
785};
786
787static const CaseFoldMapping1_16 case_fold1_16_082[] = {
788 { 0x1E4C, 0x1E4D },
789 { 0x1F4D, 0x1F45 },
790 { 0x2C7E, 0x023F },
791 { 0xA7F5, 0xA7F6 }
792};
793
794static const CaseFoldMapping1_16 case_fold1_16_083[] = {
795 { 0x0152, 0x0153 },
796 { 0x0556, 0x0586 },
797 { 0x1F4C, 0x1F44 },
798 { 0x2C7F, 0x0240 }
799};
800
801static const CaseFoldMapping1_16 case_fold1_16_084[] = {
802 { 0x0551, 0x0581 },
803 { 0x1E4A, 0x1E4B },
804 { 0x1F4B, 0x1F43 }
805};
806
807static const CaseFoldMapping1_16 case_fold1_16_085[] = {
808 { 0x0154, 0x0155 },
809 { 0x0550, 0x0580 },
810 { 0x1F4A, 0x1F42 }
811};
812
813static const CaseFoldMapping1_16 case_fold1_16_086[] = {
814 { 0x0553, 0x0583 },
815 { 0x1E48, 0x1E49 },
816 { 0x1F49, 0x1F41 }
817};
818
819static const CaseFoldMapping1_16 case_fold1_16_087[] = {
820 { 0x0156, 0x0157 },
821 { 0x0552, 0x0582 },
822 { 0x1F48, 0x1F40 }
823};
824
825static const CaseFoldMapping1_16 case_fold1_16_088[] = {
826 { 0x1E46, 0x1E47 }
827};
828
829static const CaseFoldMapping1_16 case_fold1_16_089[] = {
830 { 0x0158, 0x0159 },
831 { 0x2C75, 0x2C76 }
832};
833
834static const CaseFoldMapping1_16 case_fold1_16_090[] = {
835 { 0x1E44, 0x1E45 }
836};
837
838static const CaseFoldMapping1_16 case_fold1_16_091[] = {
839 { 0x015A, 0x015B }
840};
841
842static const CaseFoldMapping1_16 case_fold1_16_092[] = {
843 { 0x1E42, 0x1E43 },
844 { 0x2C70, 0x0252 }
845};
846
847static const CaseFoldMapping1_16 case_fold1_16_093[] = {
848 { 0x015C, 0x015D }
849};
850
851static const CaseFoldMapping1_16 case_fold1_16_094[] = {
852 { 0x1E40, 0x1E41 },
853 { 0x2C72, 0x2C73 }
854};
855
856static const CaseFoldMapping1_16 case_fold1_16_095[] = {
857 { 0x015E, 0x015F }
858};
859
860static const CaseFoldMapping1_16 case_fold1_16_096[] = {
861 { 0x0464, 0x0465 },
862 { 0x1E7E, 0x1E7F },
863 { 0xA7C7, 0xA7C8 }
864};
865
866static const CaseFoldMapping1_16 case_fold1_16_097[] = {
867 { 0x0160, 0x0161 },
868 { 0xA7C6, 0x1D8E }
869};
870
871static const CaseFoldMapping1_16 case_fold1_16_098[] = {
872 { 0x0466, 0x0467 },
873 { 0x1E7C, 0x1E7D },
874 { 0xA7C5, 0x0282 }
875};
876
877static const CaseFoldMapping1_16 case_fold1_16_099[] = {
878 { 0x0162, 0x0163 },
879 { 0xA7C4, 0xA794 }
880};
881
882static const CaseFoldMapping1_16 case_fold1_16_100[] = {
883 { 0x0460, 0x0461 },
884 { 0x1E7A, 0x1E7B }
885};
886
887static const CaseFoldMapping1_16 case_fold1_16_101[] = {
888 { 0x0164, 0x0165 },
889 { 0xA7C2, 0xA7C3 }
890};
891
892static const CaseFoldMapping1_16 case_fold1_16_102[] = {
893 { 0x0462, 0x0463 },
894 { 0x1E78, 0x1E79 }
895};
896
897static const CaseFoldMapping1_16 case_fold1_16_103[] = {
898 { 0x0166, 0x0167 },
899 { 0xA7C0, 0xA7C1 }
900};
901
902static const CaseFoldMapping1_16 case_fold1_16_104[] = {
903 { 0x046C, 0x046D },
904 { 0x1E76, 0x1E77 }
905};
906
907static const CaseFoldMapping1_16 case_fold1_16_105[] = {
908 { 0x0168, 0x0169 }
909};
910
911static const CaseFoldMapping1_16 case_fold1_16_106[] = {
912 { 0x046E, 0x046F },
913 { 0x1E74, 0x1E75 }
914};
915
916static const CaseFoldMapping1_16 case_fold1_16_107[] = {
917 { 0x016A, 0x016B }
918};
919
920static const CaseFoldMapping1_16 case_fold1_16_108[] = {
921 { 0x0468, 0x0469 },
922 { 0x1E72, 0x1E73 }
923};
924
925static const CaseFoldMapping1_16 case_fold1_16_109[] = {
926 { 0x016C, 0x016D }
927};
928
929static const CaseFoldMapping1_16 case_fold1_16_110[] = {
930 { 0x046A, 0x046B },
931 { 0x1E70, 0x1E71 },
932 { 0xA7C9, 0xA7CA }
933};
934
935static const CaseFoldMapping1_16 case_fold1_16_111[] = {
936 { 0x016E, 0x016F }
937};
938
939static const CaseFoldMapping1_16 case_fold1_16_112[] = {
940 { 0x0474, 0x0475 },
941 { 0x1E6E, 0x1E6F },
942 { 0x1F6F, 0x1F67 }
943};
944
945static const CaseFoldMapping1_16 case_fold1_16_113[] = {
946 { 0x0170, 0x0171 },
947 { 0x0372, 0x0373 },
948 { 0x1F6E, 0x1F66 },
949 { 0xA7D6, 0xA7D7 }
950};
951
952static const CaseFoldMapping1_16 case_fold1_16_114[] = {
953 { 0x0476, 0x0477 },
954 { 0x1E6C, 0x1E6D },
955 { 0x1F6D, 0x1F65 }
956};
957
958static const CaseFoldMapping1_16 case_fold1_16_115[] = {
959 { 0x0172, 0x0173 },
960 { 0x0370, 0x0371 },
961 { 0x1F6C, 0x1F64 }
962};
963
964static const CaseFoldMapping1_16 case_fold1_16_116[] = {
965 { 0x0470, 0x0471 },
966 { 0x1E6A, 0x1E6B },
967 { 0x1F6B, 0x1F63 }
968};
969
970static const CaseFoldMapping1_16 case_fold1_16_117[] = {
971 { 0x0174, 0x0175 },
972 { 0x0376, 0x0377 },
973 { 0x1F6A, 0x1F62 }
974};
975
976static const CaseFoldMapping1_16 case_fold1_16_118[] = {
977 { 0x0472, 0x0473 },
978 { 0x1E68, 0x1E69 },
979 { 0x1F69, 0x1F61 }
980};
981
982static const CaseFoldMapping1_16 case_fold1_16_119[] = {
983 { 0x0176, 0x0177 },
984 { 0x1F68, 0x1F60 },
985 { 0xA7D0, 0xA7D1 }
986};
987
988static const CaseFoldMapping1_16 case_fold1_16_120[] = {
989 { 0x0179, 0x017A },
990 { 0x047C, 0x047D },
991 { 0x1E66, 0x1E67 }
992};
993
994static const CaseFoldMapping1_16 case_fold1_16_121[] = {
995 { 0x0178, 0x00FF }
996};
997
998static const CaseFoldMapping1_16 case_fold1_16_122[] = {
999 { 0x017B, 0x017C },
1000 { 0x047E, 0x047F },
1001 { 0x1E64, 0x1E65 }
1002};
1003
1004static const CaseFoldMapping1_16 case_fold1_16_124[] = {
1005 { 0x017D, 0x017E },
1006 { 0x037F, 0x03F3 },
1007 { 0x0478, 0x0479 },
1008 { 0x1E62, 0x1E63 }
1009};
1010
1011static const CaseFoldMapping1_16 case_fold1_16_126[] = {
1012 { 0x017F, 0x0073 },
1013 { 0x047A, 0x047B },
1014 { 0x1E60, 0x1E61 }
1015};
1016
1017static const CaseFoldMapping1_16 case_fold1_16_127[] = {
1018 { 0xA7D8, 0xA7D9 }
1019};
1020
1021static const CaseFoldMapping1_16 case_fold1_16_128[] = {
1022 { 0x0181, 0x0253 },
1023 { 0x1C9C, 0x10DC },
1024 { 0x2CAC, 0x2CAD }
1025};
1026
1027static const CaseFoldMapping1_16 case_fold1_16_129[] = {
1028 { 0x1C9D, 0x10DD },
1029 { 0xA726, 0xA727 }
1030};
1031
1032static const CaseFoldMapping1_16 case_fold1_16_130[] = {
1033 { 0x1C9E, 0x10DE },
1034 { 0x2CAE, 0x2CAF }
1035};
1036
1037static const CaseFoldMapping1_16 case_fold1_16_131[] = {
1038 { 0x0182, 0x0183 },
1039 { 0x1C9F, 0x10DF },
1040 { 0xA724, 0xA725 }
1041};
1042
1043static const CaseFoldMapping1_16 case_fold1_16_132[] = {
1044 { 0x0480, 0x0481 },
1045 { 0x1C98, 0x10D8 },
1046 { 0x2CA8, 0x2CA9 }
1047};
1048
1049static const CaseFoldMapping1_16 case_fold1_16_133[] = {
1050 { 0x0184, 0x0185 },
1051 { 0x0386, 0x03AC },
1052 { 0x1C99, 0x10D9 },
1053 { 0x1E9B, 0x1E61 },
1054 { 0xA722, 0xA723 }
1055};
1056
1057static const CaseFoldMapping1_16 case_fold1_16_134[] = {
1058 { 0x0187, 0x0188 },
1059 { 0x1C9A, 0x10DA },
1060 { 0x2CAA, 0x2CAB }
1061};
1062
1063static const CaseFoldMapping1_16 case_fold1_16_135[] = {
1064 { 0x0186, 0x0254 },
1065 { 0x1C9B, 0x10DB }
1066};
1067
1068static const CaseFoldMapping1_16 case_fold1_16_136[] = {
1069 { 0x0189, 0x0256 },
1070 { 0x048C, 0x048D },
1071 { 0x1C94, 0x10D4 },
1072 { 0x2CA4, 0x2CA5 }
1073};
1074
1075static const CaseFoldMapping1_16 case_fold1_16_137[] = {
1076 { 0x038A, 0x03AF },
1077 { 0x1C95, 0x10D5 },
1078 { 0xA72E, 0xA72F }
1079};
1080
1081static const CaseFoldMapping1_16 case_fold1_16_138[] = {
1082 { 0x018B, 0x018C },
1083 { 0x0389, 0x03AE },
1084 { 0x048E, 0x048F },
1085 { 0x1C96, 0x10D6 },
1086 { 0x1E94, 0x1E95 },
1087 { 0x2CA6, 0x2CA7 }
1088};
1089
1090static const CaseFoldMapping1_16 case_fold1_16_139[] = {
1091 { 0x018A, 0x0257 },
1092 { 0x0388, 0x03AD },
1093 { 0x1C97, 0x10D7 },
1094 { 0xA72C, 0xA72D }
1095};
1096
1097static const CaseFoldMapping1_16 case_fold1_16_140[] = {
1098 { 0x038F, 0x03CE },
1099 { 0x1C90, 0x10D0 },
1100 { 0x1E92, 0x1E93 },
1101 { 0x2CA0, 0x2CA1 }
1102};
1103
1104static const CaseFoldMapping1_16 case_fold1_16_141[] = {
1105 { 0x038E, 0x03CD },
1106 { 0x1C91, 0x10D1 },
1107 { 0xA72A, 0xA72B }
1108};
1109
1110static const CaseFoldMapping1_16 case_fold1_16_142[] = {
1111 { 0x018F, 0x0259 },
1112 { 0x048A, 0x048B },
1113 { 0x1C92, 0x10D2 },
1114 { 0x1E90, 0x1E91 },
1115 { 0x2CA2, 0x2CA3 }
1116};
1117
1118static const CaseFoldMapping1_16 case_fold1_16_143[] = {
1119 { 0x018E, 0x01DD },
1120 { 0x038C, 0x03CC },
1121 { 0x1C93, 0x10D3 },
1122 { 0xA728, 0xA729 }
1123};
1124
1125static const CaseFoldMapping1_16 case_fold1_16_144[] = {
1126 { 0x0191, 0x0192 },
1127 { 0x0393, 0x03B3 },
1128 { 0x0494, 0x0495 },
1129 { 0x1E8E, 0x1E8F },
1130 { 0x2CBC, 0x2CBD }
1131};
1132
1133static const CaseFoldMapping1_16 case_fold1_16_145[] = {
1134 { 0x0190, 0x025B },
1135 { 0x0392, 0x03B2 },
1136 { 0xA736, 0xA737 }
1137};
1138
1139static const CaseFoldMapping1_16 case_fold1_16_146[] = {
1140 { 0x0193, 0x0260 },
1141 { 0x0391, 0x03B1 },
1142 { 0x0496, 0x0497 },
1143 { 0x1E8C, 0x1E8D },
1144 { 0x24B6, 0x24D0 },
1145 { 0x2CBE, 0x2CBF }
1146};
1147
1148static const CaseFoldMapping1_16 case_fold1_16_147[] = {
1149 { 0x24B7, 0x24D1 },
1150 { 0xA734, 0xA735 }
1151};
1152
1153static const CaseFoldMapping1_16 case_fold1_16_148[] = {
1154 { 0x0397, 0x03B7 },
1155 { 0x0490, 0x0491 },
1156 { 0x1C88, 0xA64B },
1157 { 0x1E8A, 0x1E8B },
1158 { 0x2CB8, 0x2CB9 }
1159};
1160
1161static const CaseFoldMapping1_16 case_fold1_16_149[] = {
1162 { 0x0194, 0x0263 },
1163 { 0x0396, 0x03B6 },
1164 { 0xA732, 0xA733 }
1165};
1166
1167static const CaseFoldMapping1_16 case_fold1_16_150[] = {
1168 { 0x0197, 0x0268 },
1169 { 0x0395, 0x03B5 },
1170 { 0x0492, 0x0493 },
1171 { 0x1E88, 0x1E89 },
1172 { 0x2CBA, 0x2CBB }
1173};
1174
1175static const CaseFoldMapping1_16 case_fold1_16_151[] = {
1176 { 0x0196, 0x0269 },
1177 { 0x0394, 0x03B4 }
1178};
1179
1180static const CaseFoldMapping1_16 case_fold1_16_152[] = {
1181 { 0x039B, 0x03BB },
1182 { 0x049C, 0x049D },
1183 { 0x1C84, 0x0442 },
1184 { 0x1E86, 0x1E87 },
1185 { 0x24BC, 0x24D6 },
1186 { 0x2CB4, 0x2CB5 }
1187};
1188
1189static const CaseFoldMapping1_16 case_fold1_16_153[] = {
1190 { 0x0198, 0x0199 },
1191 { 0x039A, 0x03BA },
1192 { 0x1C85, 0x0442 },
1193 { 0x24BD, 0x24D7 },
1194 { 0xA73E, 0xA73F }
1195};
1196
1197static const CaseFoldMapping1_16 case_fold1_16_154[] = {
1198 { 0x0399, 0x03B9 },
1199 { 0x049E, 0x049F },
1200 { 0x1C86, 0x044A },
1201 { 0x1E84, 0x1E85 },
1202 { 0x24BE, 0x24D8 },
1203 { 0x2CB6, 0x2CB7 }
1204};
1205
1206static const CaseFoldMapping1_16 case_fold1_16_155[] = {
1207 { 0x0398, 0x03B8 },
1208 { 0x1C87, 0x0463 },
1209 { 0x24BF, 0x24D9 },
1210 { 0xA73C, 0xA73D }
1211};
1212
1213static const CaseFoldMapping1_16 case_fold1_16_156[] = {
1214 { 0x019D, 0x0272 },
1215 { 0x039F, 0x03BF },
1216 { 0x0498, 0x0499 },
1217 { 0x1C80, 0x0432 },
1218 { 0x1E82, 0x1E83 },
1219 { 0x24B8, 0x24D2 },
1220 { 0x2CB0, 0x2CB1 }
1221};
1222
1223static const CaseFoldMapping1_16 case_fold1_16_157[] = {
1224 { 0x019C, 0x026F },
1225 { 0x039E, 0x03BE },
1226 { 0x1C81, 0x0434 },
1227 { 0x24B9, 0x24D3 },
1228 { 0xA73A, 0xA73B }
1229};
1230
1231static const CaseFoldMapping1_16 case_fold1_16_158[] = {
1232 { 0x019F, 0x0275 },
1233 { 0x039D, 0x03BD },
1234 { 0x049A, 0x049B },
1235 { 0x1C82, 0x043E },
1236 { 0x1E80, 0x1E81 },
1237 { 0x24BA, 0x24D4 },
1238 { 0x2CB2, 0x2CB3 }
1239};
1240
1241static const CaseFoldMapping1_16 case_fold1_16_159[] = {
1242 { 0x039C, 0x03BC },
1243 { 0x1C83, 0x0441 },
1244 { 0x24BB, 0x24D5 },
1245 { 0xA738, 0xA739 }
1246};
1247
1248static const CaseFoldMapping1_16 case_fold1_16_160[] = {
1249 { 0x03A3, 0x03C3 },
1250 { 0x04A4, 0x04A5 },
1251 { 0x10B0, 0x2D10 },
1252 { 0x1EBE, 0x1EBF },
1253 { 0x2C8C, 0x2C8D }
1254};
1255
1256static const CaseFoldMapping1_16 case_fold1_16_161[] = {
1257 { 0x01A0, 0x01A1 },
1258 { 0x10B1, 0x2D11 },
1259 { 0x1CBD, 0x10FD },
1260 { 0x1FBE, 0x03B9 }
1261};
1262
1263static const CaseFoldMapping1_16 case_fold1_16_162[] = {
1264 { 0x03A1, 0x03C1 },
1265 { 0x04A6, 0x04A7 },
1266 { 0x10B2, 0x2D12 },
1267 { 0x1CBE, 0x10FE },
1268 { 0x1EBC, 0x1EBD },
1269 { 0x2183, 0x2184 },
1270 { 0x2C8E, 0x2C8F }
1271};
1272
1273static const CaseFoldMapping1_16 case_fold1_16_163[] = {
1274 { 0x01A2, 0x01A3 },
1275 { 0x03A0, 0x03C0 },
1276 { 0x10B3, 0x2D13 },
1277 { 0x1CBF, 0x10FF }
1278};
1279
1280static const CaseFoldMapping1_16 case_fold1_16_164[] = {
1281 { 0x03A7, 0x03C7 },
1282 { 0x04A0, 0x04A1 },
1283 { 0x10B4, 0x2D14 },
1284 { 0x1CB8, 0x10F8 },
1285 { 0x1EBA, 0x1EBB },
1286 { 0x1FBB, 0x1F71 },
1287 { 0x2C88, 0x2C89 }
1288};
1289
1290static const CaseFoldMapping1_16 case_fold1_16_165[] = {
1291 { 0x01A4, 0x01A5 },
1292 { 0x03A6, 0x03C6 },
1293 { 0x10B5, 0x2D15 },
1294 { 0x1CB9, 0x10F9 },
1295 { 0x1FBA, 0x1F70 }
1296};
1297
1298static const CaseFoldMapping1_16 case_fold1_16_166[] = {
1299 { 0x01A7, 0x01A8 },
1300 { 0x03A5, 0x03C5 },
1301 { 0x04A2, 0x04A3 },
1302 { 0x10B6, 0x2D16 },
1303 { 0x1CBA, 0x10FA },
1304 { 0x1EB8, 0x1EB9 },
1305 { 0x1FB9, 0x1FB1 },
1306 { 0x2C8A, 0x2C8B }
1307};
1308
1309static const CaseFoldMapping1_16 case_fold1_16_167[] = {
1310 { 0x01A6, 0x0280 },
1311 { 0x03A4, 0x03C4 },
1312 { 0x10B7, 0x2D17 },
1313 { 0x1FB8, 0x1FB0 }
1314};
1315
1316static const CaseFoldMapping1_16 case_fold1_16_168[] = {
1317 { 0x01A9, 0x0283 },
1318 { 0x03AB, 0x03CB },
1319 { 0x04AC, 0x04AD },
1320 { 0x10B8, 0x2D18 },
1321 { 0x1CB4, 0x10F4 },
1322 { 0x1EB6, 0x1EB7 },
1323 { 0x2C84, 0x2C85 }
1324};
1325
1326static const CaseFoldMapping1_16 case_fold1_16_169[] = {
1327 { 0x03AA, 0x03CA },
1328 { 0x10B9, 0x2D19 },
1329 { 0x1CB5, 0x10F5 }
1330};
1331
1332static const CaseFoldMapping1_16 case_fold1_16_170[] = {
1333 { 0x03A9, 0x03C9 },
1334 { 0x04AE, 0x04AF },
1335 { 0x10BA, 0x2D1A },
1336 { 0x1CB6, 0x10F6 },
1337 { 0x1EB4, 0x1EB5 },
1338 { 0x2C86, 0x2C87 }
1339};
1340
1341static const CaseFoldMapping1_16 case_fold1_16_171[] = {
1342 { 0x03A8, 0x03C8 },
1343 { 0x10BB, 0x2D1B },
1344 { 0x1CB7, 0x10F7 }
1345};
1346
1347static const CaseFoldMapping1_16 case_fold1_16_172[] = {
1348 { 0x04A8, 0x04A9 },
1349 { 0x10BC, 0x2D1C },
1350 { 0x1CB0, 0x10F0 },
1351 { 0x1EB2, 0x1EB3 },
1352 { 0x2C80, 0x2C81 }
1353};
1354
1355static const CaseFoldMapping1_16 case_fold1_16_173[] = {
1356 { 0x01AC, 0x01AD },
1357 { 0x10BD, 0x2D1D },
1358 { 0x1CB1, 0x10F1 }
1359};
1360
1361static const CaseFoldMapping1_16 case_fold1_16_174[] = {
1362 { 0x01AF, 0x01B0 },
1363 { 0x04AA, 0x04AB },
1364 { 0x10BE, 0x2D1E },
1365 { 0x1CB2, 0x10F2 },
1366 { 0x1EB0, 0x1EB1 },
1367 { 0x2C82, 0x2C83 }
1368};
1369
1370static const CaseFoldMapping1_16 case_fold1_16_175[] = {
1371 { 0x01AE, 0x0288 },
1372 { 0x10BF, 0x2D1F },
1373 { 0x1CB3, 0x10F3 }
1374};
1375
1376static const CaseFoldMapping1_16 case_fold1_16_176[] = {
1377 { 0x01B1, 0x028A },
1378 { 0x04B4, 0x04B5 },
1379 { 0x10A0, 0x2D00 },
1380 { 0x1CAC, 0x10EC },
1381 { 0x1EAE, 0x1EAF },
1382 { 0x2C9C, 0x2C9D }
1383};
1384
1385static const CaseFoldMapping1_16 case_fold1_16_177[] = {
1386 { 0x10A1, 0x2D01 },
1387 { 0x1CAD, 0x10ED }
1388};
1389
1390static const CaseFoldMapping1_16 case_fold1_16_178[] = {
1391 { 0x01B3, 0x01B4 },
1392 { 0x04B6, 0x04B7 },
1393 { 0x10A2, 0x2D02 },
1394 { 0x1CAE, 0x10EE },
1395 { 0x1EAC, 0x1EAD },
1396 { 0x2C9E, 0x2C9F }
1397};
1398
1399static const CaseFoldMapping1_16 case_fold1_16_179[] = {
1400 { 0x01B2, 0x028B },
1401 { 0x10A3, 0x2D03 },
1402 { 0x1CAF, 0x10EF }
1403};
1404
1405static const CaseFoldMapping1_16 case_fold1_16_180[] = {
1406 { 0x01B5, 0x01B6 },
1407 { 0x04B0, 0x04B1 },
1408 { 0x10A4, 0x2D04 },
1409 { 0x1CA8, 0x10E8 },
1410 { 0x1EAA, 0x1EAB },
1411 { 0x2C98, 0x2C99 }
1412};
1413
1414static const CaseFoldMapping1_16 case_fold1_16_181[] = {
1415 { 0x00B5, 0x03BC },
1416 { 0x10A5, 0x2D05 },
1417 { 0x1CA9, 0x10E9 }
1418};
1419
1420static const CaseFoldMapping1_16 case_fold1_16_182[] = {
1421 { 0x01B7, 0x0292 },
1422 { 0x04B2, 0x04B3 },
1423 { 0x10A6, 0x2D06 },
1424 { 0x1CAA, 0x10EA },
1425 { 0x1EA8, 0x1EA9 },
1426 { 0x2C9A, 0x2C9B }
1427};
1428
1429static const CaseFoldMapping1_16 case_fold1_16_183[] = {
1430 { 0x10A7, 0x2D07 },
1431 { 0x1CAB, 0x10EB }
1432};
1433
1434static const CaseFoldMapping1_16 case_fold1_16_184[] = {
1435 { 0x04BC, 0x04BD },
1436 { 0x10A8, 0x2D08 },
1437 { 0x1CA4, 0x10E4 },
1438 { 0x1EA6, 0x1EA7 },
1439 { 0x2C94, 0x2C95 }
1440};
1441
1442static const CaseFoldMapping1_16 case_fold1_16_185[] = {
1443 { 0x01B8, 0x01B9 },
1444 { 0x10A9, 0x2D09 },
1445 { 0x1CA5, 0x10E5 }
1446};
1447
1448static const CaseFoldMapping1_16 case_fold1_16_186[] = {
1449 { 0x04BE, 0x04BF },
1450 { 0x10AA, 0x2D0A },
1451 { 0x1CA6, 0x10E6 },
1452 { 0x1EA4, 0x1EA5 },
1453 { 0x2C96, 0x2C97 }
1454};
1455
1456static const CaseFoldMapping1_16 case_fold1_16_187[] = {
1457 { 0x10AB, 0x2D0B },
1458 { 0x1CA7, 0x10E7 }
1459};
1460
1461static const CaseFoldMapping1_16 case_fold1_16_188[] = {
1462 { 0x04B8, 0x04B9 },
1463 { 0x10AC, 0x2D0C },
1464 { 0x1CA0, 0x10E0 },
1465 { 0x1EA2, 0x1EA3 },
1466 { 0x2C90, 0x2C91 }
1467};
1468
1469static const CaseFoldMapping1_16 case_fold1_16_189[] = {
1470 { 0x01BC, 0x01BD },
1471 { 0x10AD, 0x2D0D },
1472 { 0x1CA1, 0x10E1 }
1473};
1474
1475static const CaseFoldMapping1_16 case_fold1_16_190[] = {
1476 { 0x04BA, 0x04BB },
1477 { 0x10AE, 0x2D0E },
1478 { 0x1CA2, 0x10E2 },
1479 { 0x1EA0, 0x1EA1 },
1480 { 0x2C92, 0x2C93 }
1481};
1482
1483static const CaseFoldMapping1_16 case_fold1_16_191[] = {
1484 { 0x10AF, 0x2D0F },
1485 { 0x1CA3, 0x10E3 }
1486};
1487
1488static const CaseFoldMapping1_16 case_fold1_16_192[] = {
1489 { 0x00C0, 0x00E0 },
1490 { 0x1EDE, 0x1EDF },
1491 { 0xA666, 0xA667 }
1492};
1493
1494static const CaseFoldMapping1_16 case_fold1_16_193[] = {
1495 { 0x00C1, 0x00E1 },
1496 { 0x03C2, 0x03C3 },
1497 { 0x04C5, 0x04C6 },
1498 { 0x2CED, 0x2CEE },
1499 { 0xA766, 0xA767 }
1500};
1501
1502static const CaseFoldMapping1_16 case_fold1_16_194[] = {
1503 { 0x00C2, 0x00E2 },
1504 { 0x1EDC, 0x1EDD },
1505 { 0xA664, 0xA665 }
1506};
1507
1508static const CaseFoldMapping1_16 case_fold1_16_195[] = {
1509 { 0x00C3, 0x00E3 },
1510 { 0x04C7, 0x04C8 },
1511 { 0xA764, 0xA765 }
1512};
1513
1514static const CaseFoldMapping1_16 case_fold1_16_196[] = {
1515 { 0x00C4, 0x00E4 },
1516 { 0x01C5, 0x01C6 },
1517 { 0x04C0, 0x04CF },
1518 { 0x1EDA, 0x1EDB },
1519 { 0x1FDB, 0x1F77 },
1520 { 0xA662, 0xA663 }
1521};
1522
1523static const CaseFoldMapping1_16 case_fold1_16_197[] = {
1524 { 0x00C5, 0x00E5 },
1525 { 0x01C4, 0x01C6 },
1526 { 0x04C1, 0x04C2 },
1527 { 0x1FDA, 0x1F76 },
1528 { 0xA762, 0xA763 },
1529 { 0xFF3A, 0xFF5A }
1530};
1531
1532static const CaseFoldMapping1_16 case_fold1_16_198[] = {
1533 { 0x00C6, 0x00E6 },
1534 { 0x01C7, 0x01C9 },
1535 { 0x1ED8, 0x1ED9 },
1536 { 0x1FD9, 0x1FD1 },
1537 { 0xA660, 0xA661 },
1538 { 0xFF39, 0xFF59 }
1539};
1540
1541static const CaseFoldMapping1_16 case_fold1_16_199[] = {
1542 { 0x00C7, 0x00E7 },
1543 { 0x04C3, 0x04C4 },
1544 { 0x1FD8, 0x1FD0 },
1545 { 0x2CEB, 0x2CEC },
1546 { 0xA760, 0xA761 },
1547 { 0xFF38, 0xFF58 }
1548};
1549
1550static const CaseFoldMapping1_16 case_fold1_16_200[] = {
1551 { 0x00C8, 0x00E8 },
1552 { 0x1ED6, 0x1ED7 },
1553 { 0xFF37, 0xFF57 }
1554};
1555
1556static const CaseFoldMapping1_16 case_fold1_16_201[] = {
1557 { 0x00C9, 0x00E9 },
1558 { 0x01C8, 0x01C9 },
1559 { 0x04CD, 0x04CE },
1560 { 0xA76E, 0xA76F },
1561 { 0xFF36, 0xFF56 }
1562};
1563
1564static const CaseFoldMapping1_16 case_fold1_16_202[] = {
1565 { 0x00CA, 0x00EA },
1566 { 0x01CB, 0x01CC },
1567 { 0x1ED4, 0x1ED5 },
1568 { 0xA66C, 0xA66D },
1569 { 0xFF35, 0xFF55 }
1570};
1571
1572static const CaseFoldMapping1_16 case_fold1_16_203[] = {
1573 { 0x00CB, 0x00EB },
1574 { 0x01CA, 0x01CC },
1575 { 0xA76C, 0xA76D },
1576 { 0xFF34, 0xFF54 }
1577};
1578
1579static const CaseFoldMapping1_16 case_fold1_16_204[] = {
1580 { 0x00CC, 0x00EC },
1581 { 0x01CD, 0x01CE },
1582 { 0x03CF, 0x03D7 },
1583 { 0x1ED2, 0x1ED3 },
1584 { 0x2CE0, 0x2CE1 },
1585 { 0xA66A, 0xA66B },
1586 { 0xFF33, 0xFF53 }
1587};
1588
1589static const CaseFoldMapping1_16 case_fold1_16_205[] = {
1590 { 0x00CD, 0x00ED },
1591 { 0x04C9, 0x04CA },
1592 { 0xA76A, 0xA76B },
1593 { 0xFF32, 0xFF52 }
1594};
1595
1596static const CaseFoldMapping1_16 case_fold1_16_206[] = {
1597 { 0x00CE, 0x00EE },
1598 { 0x01CF, 0x01D0 },
1599 { 0x1ED0, 0x1ED1 },
1600 { 0x2CE2, 0x2CE3 },
1601 { 0xA668, 0xA669 },
1602 { 0xFF31, 0xFF51 }
1603};
1604
1605static const CaseFoldMapping1_16 case_fold1_16_207[] = {
1606 { 0x00CF, 0x00EF },
1607 { 0x04CB, 0x04CC },
1608 { 0xA768, 0xA769 },
1609 { 0xFF30, 0xFF50 }
1610};
1611
1612static const CaseFoldMapping1_16 case_fold1_16_208[] = {
1613 { 0x00D0, 0x00F0 },
1614 { 0x01D1, 0x01D2 },
1615 { 0x04D4, 0x04D5 },
1616 { 0x10C0, 0x2D20 },
1617 { 0x1ECE, 0x1ECF },
1618 { 0xAB7B, 0x13AB },
1619 { 0xFF2F, 0xFF4F }
1620};
1621
1622static const CaseFoldMapping1_16 case_fold1_16_209[] = {
1623 { 0x00D1, 0x00F1 },
1624 { 0x10C1, 0x2D21 },
1625 { 0xAB7A, 0x13AA },
1626 { 0xFF2E, 0xFF4E }
1627};
1628
1629static const CaseFoldMapping1_16 case_fold1_16_210[] = {
1630 { 0x00D2, 0x00F2 },
1631 { 0x01D3, 0x01D4 },
1632 { 0x03D1, 0x03B8 },
1633 { 0x04D6, 0x04D7 },
1634 { 0x10C2, 0x2D22 },
1635 { 0x1ECC, 0x1ECD },
1636 { 0xAB79, 0x13A9 },
1637 { 0xFF2D, 0xFF4D }
1638};
1639
1640static const CaseFoldMapping1_16 case_fold1_16_211[] = {
1641 { 0x00D3, 0x00F3 },
1642 { 0x03D0, 0x03B2 },
1643 { 0x10C3, 0x2D23 },
1644 { 0xAB78, 0x13A8 },
1645 { 0xFF2C, 0xFF4C }
1646};
1647
1648static const CaseFoldMapping1_16 case_fold1_16_212[] = {
1649 { 0x00D4, 0x00F4 },
1650 { 0x01D5, 0x01D6 },
1651 { 0x04D0, 0x04D1 },
1652 { 0x10C4, 0x2D24 },
1653 { 0x1ECA, 0x1ECB },
1654 { 0x1FCB, 0x1F75 },
1655 { 0xAB7F, 0x13AF },
1656 { 0xFF2B, 0xFF4B }
1657};
1658
1659static const CaseFoldMapping1_16 case_fold1_16_213[] = {
1660 { 0x00D5, 0x00F5 },
1661 { 0x03D6, 0x03C0 },
1662 { 0x10C5, 0x2D25 },
1663 { 0x1FCA, 0x1F74 },
1664 { 0xAB7E, 0x13AE },
1665 { 0xFF2A, 0xFF4A }
1666};
1667
1668static const CaseFoldMapping1_16 case_fold1_16_214[] = {
1669 { 0x00D6, 0x00F6 },
1670 { 0x01D7, 0x01D8 },
1671 { 0x03D5, 0x03C6 },
1672 { 0x04D2, 0x04D3 },
1673 { 0x1EC8, 0x1EC9 },
1674 { 0x1FC9, 0x1F73 },
1675 { 0xAB7D, 0x13AD },
1676 { 0xFF29, 0xFF49 }
1677};
1678
1679static const CaseFoldMapping1_16 case_fold1_16_215[] = {
1680 { 0x10C7, 0x2D27 },
1681 { 0x1FC8, 0x1F72 },
1682 { 0xAB7C, 0x13AC },
1683 { 0xFF28, 0xFF48 }
1684};
1685
1686static const CaseFoldMapping1_16 case_fold1_16_216[] = {
1687 { 0x00D8, 0x00F8 },
1688 { 0x01D9, 0x01DA },
1689 { 0x04DC, 0x04DD },
1690 { 0x1EC6, 0x1EC7 },
1691 { 0xAB73, 0x13A3 },
1692 { 0xFF27, 0xFF47 }
1693};
1694
1695static const CaseFoldMapping1_16 case_fold1_16_217[] = {
1696 { 0x00D9, 0x00F9 },
1697 { 0x03DA, 0x03DB },
1698 { 0xA77E, 0xA77F },
1699 { 0xAB72, 0x13A2 },
1700 { 0xFF26, 0xFF46 }
1701};
1702
1703static const CaseFoldMapping1_16 case_fold1_16_218[] = {
1704 { 0x00DA, 0x00FA },
1705 { 0x01DB, 0x01DC },
1706 { 0x04DE, 0x04DF },
1707 { 0x1EC4, 0x1EC5 },
1708 { 0xA77D, 0x1D79 },
1709 { 0xAB71, 0x13A1 },
1710 { 0xFF25, 0xFF45 }
1711};
1712
1713static const CaseFoldMapping1_16 case_fold1_16_219[] = {
1714 { 0x00DB, 0x00FB },
1715 { 0x03D8, 0x03D9 },
1716 { 0xAB70, 0x13A0 },
1717 { 0xFF24, 0xFF44 }
1718};
1719
1720static const CaseFoldMapping1_16 case_fold1_16_220[] = {
1721 { 0x00DC, 0x00FC },
1722 { 0x04D8, 0x04D9 },
1723 { 0x1EC2, 0x1EC3 },
1724 { 0xA77B, 0xA77C },
1725 { 0xAB77, 0x13A7 },
1726 { 0xFF23, 0xFF43 }
1727};
1728
1729static const CaseFoldMapping1_16 case_fold1_16_221[] = {
1730 { 0x00DD, 0x00FD },
1731 { 0x03DE, 0x03DF },
1732 { 0x10CD, 0x2D2D },
1733 { 0xAB76, 0x13A6 },
1734 { 0xFF22, 0xFF42 }
1735};
1736
1737static const CaseFoldMapping1_16 case_fold1_16_222[] = {
1738 { 0x00DE, 0x00FE },
1739 { 0x04DA, 0x04DB },
1740 { 0x1EC0, 0x1EC1 },
1741 { 0x2CF2, 0x2CF3 },
1742 { 0xA779, 0xA77A },
1743 { 0xAB75, 0x13A5 },
1744 { 0xFF21, 0xFF41 }
1745};
1746
1747static const CaseFoldMapping1_16 case_fold1_16_223[] = {
1748 { 0x01DE, 0x01DF },
1749 { 0x03DC, 0x03DD },
1750 { 0xAB74, 0x13A4 }
1751};
1752
1753static const CaseFoldMapping1_16 case_fold1_16_224[] = {
1754 { 0x04E4, 0x04E5 },
1755 { 0x1EFE, 0x1EFF },
1756 { 0x24C4, 0x24DE },
1757 { 0x2CCC, 0x2CCD },
1758 { 0xA646, 0xA647 }
1759};
1760
1761static const CaseFoldMapping1_16 case_fold1_16_225[] = {
1762 { 0x01E0, 0x01E1 },
1763 { 0x03E2, 0x03E3 },
1764 { 0x24C5, 0x24DF },
1765 { 0xA746, 0xA747 }
1766};
1767
1768static const CaseFoldMapping1_16 case_fold1_16_226[] = {
1769 { 0x04E6, 0x04E7 },
1770 { 0x1EFC, 0x1EFD },
1771 { 0x24C6, 0x24E0 },
1772 { 0x2CCE, 0x2CCF },
1773 { 0xA644, 0xA645 }
1774};
1775
1776static const CaseFoldMapping1_16 case_fold1_16_227[] = {
1777 { 0x01E2, 0x01E3 },
1778 { 0x03E0, 0x03E1 },
1779 { 0x24C7, 0x24E1 },
1780 { 0xA744, 0xA745 }
1781};
1782
1783static const CaseFoldMapping1_16 case_fold1_16_228[] = {
1784 { 0x04E0, 0x04E1 },
1785 { 0x1EFA, 0x1EFB },
1786 { 0x1FFB, 0x1F7D },
1787 { 0x24C0, 0x24DA },
1788 { 0x2CC8, 0x2CC9 },
1789 { 0xA642, 0xA643 }
1790};
1791
1792static const CaseFoldMapping1_16 case_fold1_16_229[] = {
1793 { 0x01E4, 0x01E5 },
1794 { 0x03E6, 0x03E7 },
1795 { 0x1FFA, 0x1F7C },
1796 { 0x24C1, 0x24DB },
1797 { 0xA742, 0xA743 }
1798};
1799
1800static const CaseFoldMapping1_16 case_fold1_16_230[] = {
1801 { 0x04E2, 0x04E3 },
1802 { 0x1EF8, 0x1EF9 },
1803 { 0x1FF9, 0x1F79 },
1804 { 0x24C2, 0x24DC },
1805 { 0x2CCA, 0x2CCB },
1806 { 0xA640, 0xA641 }
1807};
1808
1809static const CaseFoldMapping1_16 case_fold1_16_231[] = {
1810 { 0x01E6, 0x01E7 },
1811 { 0x03E4, 0x03E5 },
1812 { 0x1FF8, 0x1F78 },
1813 { 0x24C3, 0x24DD },
1814 { 0xA740, 0xA741 }
1815};
1816
1817static const CaseFoldMapping1_16 case_fold1_16_232[] = {
1818 { 0x04EC, 0x04ED },
1819 { 0x13FB, 0x13F3 },
1820 { 0x1EF6, 0x1EF7 },
1821 { 0x24CC, 0x24E6 },
1822 { 0x2CC4, 0x2CC5 },
1823 { 0xA64E, 0xA64F }
1824};
1825
1826static const CaseFoldMapping1_16 case_fold1_16_233[] = {
1827 { 0x01E8, 0x01E9 },
1828 { 0x03EA, 0x03EB },
1829 { 0x13FA, 0x13F2 },
1830 { 0x24CD, 0x24E7 },
1831 { 0xA74E, 0xA74F }
1832};
1833
1834static const CaseFoldMapping1_16 case_fold1_16_234[] = {
1835 { 0x04EE, 0x04EF },
1836 { 0x13F9, 0x13F1 },
1837 { 0x1EF4, 0x1EF5 },
1838 { 0x24CE, 0x24E8 },
1839 { 0x2CC6, 0x2CC7 },
1840 { 0xA64C, 0xA64D }
1841};
1842
1843static const CaseFoldMapping1_16 case_fold1_16_235[] = {
1844 { 0x01EA, 0x01EB },
1845 { 0x03E8, 0x03E9 },
1846 { 0x13F8, 0x13F0 },
1847 { 0x24CF, 0x24E9 },
1848 { 0xA74C, 0xA74D }
1849};
1850
1851static const CaseFoldMapping1_16 case_fold1_16_236[] = {
1852 { 0x04E8, 0x04E9 },
1853 { 0x1EF2, 0x1EF3 },
1854 { 0x24C8, 0x24E2 },
1855 { 0x2CC0, 0x2CC1 },
1856 { 0xA64A, 0xA64B }
1857};
1858
1859static const CaseFoldMapping1_16 case_fold1_16_237[] = {
1860 { 0x01EC, 0x01ED },
1861 { 0x03EE, 0x03EF },
1862 { 0x24C9, 0x24E3 },
1863 { 0xA74A, 0xA74B }
1864};
1865
1866static const CaseFoldMapping1_16 case_fold1_16_238[] = {
1867 { 0x04EA, 0x04EB },
1868 { 0x13FD, 0x13F5 },
1869 { 0x1EF0, 0x1EF1 },
1870 { 0x24CA, 0x24E4 },
1871 { 0x2CC2, 0x2CC3 },
1872 { 0xA648, 0xA649 }
1873};
1874
1875static const CaseFoldMapping1_16 case_fold1_16_239[] = {
1876 { 0x01EE, 0x01EF },
1877 { 0x03EC, 0x03ED },
1878 { 0x13FC, 0x13F4 },
1879 { 0x24CB, 0x24E5 },
1880 { 0xA748, 0xA749 }
1881};
1882
1883static const CaseFoldMapping1_16 case_fold1_16_240[] = {
1884 { 0x01F1, 0x01F3 },
1885 { 0x04F4, 0x04F5 },
1886 { 0x1EEE, 0x1EEF },
1887 { 0x2CDC, 0x2CDD },
1888 { 0xA656, 0xA657 }
1889};
1890
1891static const CaseFoldMapping1_16 case_fold1_16_241[] = {
1892 { 0xA756, 0xA757 }
1893};
1894
1895static const CaseFoldMapping1_16 case_fold1_16_242[] = {
1896 { 0x03F1, 0x03C1 },
1897 { 0x04F6, 0x04F7 },
1898 { 0x1EEC, 0x1EED },
1899 { 0x2CDE, 0x2CDF },
1900 { 0xA654, 0xA655 }
1901};
1902
1903static const CaseFoldMapping1_16 case_fold1_16_243[] = {
1904 { 0x01F2, 0x01F3 },
1905 { 0x03F0, 0x03BA },
1906 { 0x1FEC, 0x1FE5 },
1907 { 0xA754, 0xA755 }
1908};
1909
1910static const CaseFoldMapping1_16 case_fold1_16_244[] = {
1911 { 0x03F7, 0x03F8 },
1912 { 0x04F0, 0x04F1 },
1913 { 0x1EEA, 0x1EEB },
1914 { 0x1FEB, 0x1F7B },
1915 { 0x2CD8, 0x2CD9 },
1916 { 0xA652, 0xA653 }
1917};
1918
1919static const CaseFoldMapping1_16 case_fold1_16_245[] = {
1920 { 0x01F4, 0x01F5 },
1921 { 0x1FEA, 0x1F7A },
1922 { 0xA752, 0xA753 }
1923};
1924
1925static const CaseFoldMapping1_16 case_fold1_16_246[] = {
1926 { 0x01F7, 0x01BF },
1927 { 0x03F5, 0x03B5 },
1928 { 0x04F2, 0x04F3 },
1929 { 0x1EE8, 0x1EE9 },
1930 { 0x1FE9, 0x1FE1 },
1931 { 0x2CDA, 0x2CDB },
1932 { 0xA650, 0xA651 }
1933};
1934
1935static const CaseFoldMapping1_16 case_fold1_16_247[] = {
1936 { 0x01F6, 0x0195 },
1937 { 0x03F4, 0x03B8 },
1938 { 0x1FE8, 0x1FE0 },
1939 { 0xA750, 0xA751 }
1940};
1941
1942static const CaseFoldMapping1_16 case_fold1_16_248[] = {
1943 { 0x04FC, 0x04FD },
1944 { 0x1EE6, 0x1EE7 },
1945 { 0x2CD4, 0x2CD5 },
1946 { 0xA65E, 0xA65F }
1947};
1948
1949static const CaseFoldMapping1_16 case_fold1_16_249[] = {
1950 { 0x01F8, 0x01F9 },
1951 { 0x03FA, 0x03FB },
1952 { 0xA75E, 0xA75F }
1953};
1954
1955static const CaseFoldMapping1_16 case_fold1_16_250[] = {
1956 { 0x03F9, 0x03F2 },
1957 { 0x04FE, 0x04FF },
1958 { 0x1EE4, 0x1EE5 },
1959 { 0x2CD6, 0x2CD7 },
1960 { 0xA65C, 0xA65D }
1961};
1962
1963static const CaseFoldMapping1_16 case_fold1_16_251[] = {
1964 { 0x01FA, 0x01FB },
1965 { 0xA75C, 0xA75D }
1966};
1967
1968static const CaseFoldMapping1_16 case_fold1_16_252[] = {
1969 { 0x03FF, 0x037D },
1970 { 0x04F8, 0x04F9 },
1971 { 0x1EE2, 0x1EE3 },
1972 { 0x2CD0, 0x2CD1 },
1973 { 0xA65A, 0xA65B }
1974};
1975
1976static const CaseFoldMapping1_16 case_fold1_16_253[] = {
1977 { 0x01FC, 0x01FD },
1978 { 0x03FE, 0x037C },
1979 { 0xA75A, 0xA75B }
1980};
1981
1982static const CaseFoldMapping1_16 case_fold1_16_254[] = {
1983 { 0x03FD, 0x037B },
1984 { 0x04FA, 0x04FB },
1985 { 0x1EE0, 0x1EE1 },
1986 { 0x2CD2, 0x2CD3 },
1987 { 0xA658, 0xA659 }
1988};
1989
1990static const CaseFoldMapping1_16 case_fold1_16_255[] = {
1991 { 0x01FE, 0x01FF },
1992 { 0xA758, 0xA759 }
1993};
1994
1995static const CaseFoldMapping1_32 case_fold1_32_000[] = {
1996 { 0x10404, 0x1042C },
1997 { 0x10414, 0x1043C },
1998 { 0x10424, 0x1044C },
1999 { 0x104B4, 0x104DC },
2000 { 0x104C4, 0x104EC },
2001 { 0x10575, 0x1059C },
2002 { 0x10585, 0x105AC },
2003 { 0x10595, 0x105BC },
2004 { 0x10C8C, 0x10CCC },
2005 { 0x10C9C, 0x10CDC },
2006 { 0x10CAC, 0x10CEC },
2007 { 0x118A8, 0x118C8 },
2008 { 0x118B8, 0x118D8 },
2009 { 0x16E4E, 0x16E6E },
2010 { 0x16E5E, 0x16E7E },
2011 { 0x1E909, 0x1E92B },
2012 { 0x1E919, 0x1E93B }
2013};
2014
2015static const CaseFoldMapping1_32 case_fold1_32_001[] = {
2016 { 0x10405, 0x1042D },
2017 { 0x10415, 0x1043D },
2018 { 0x10425, 0x1044D },
2019 { 0x104B5, 0x104DD },
2020 { 0x104C5, 0x104ED },
2021 { 0x10574, 0x1059B },
2022 { 0x10584, 0x105AB },
2023 { 0x10594, 0x105BB },
2024 { 0x10C8D, 0x10CCD },
2025 { 0x10C9D, 0x10CDD },
2026 { 0x10CAD, 0x10CED },
2027 { 0x118A9, 0x118C9 },
2028 { 0x118B9, 0x118D9 },
2029 { 0x16E4F, 0x16E6F },
2030 { 0x16E5F, 0x16E7F },
2031 { 0x1E908, 0x1E92A },
2032 { 0x1E918, 0x1E93A }
2033};
2034
2035static const CaseFoldMapping1_32 case_fold1_32_002[] = {
2036 { 0x10406, 0x1042E },
2037 { 0x10416, 0x1043E },
2038 { 0x10426, 0x1044E },
2039 { 0x104B6, 0x104DE },
2040 { 0x104C6, 0x104EE },
2041 { 0x10577, 0x1059E },
2042 { 0x10587, 0x105AE },
2043 { 0x10C8E, 0x10CCE },
2044 { 0x10C9E, 0x10CDE },
2045 { 0x10CAE, 0x10CEE },
2046 { 0x118AA, 0x118CA },
2047 { 0x118BA, 0x118DA },
2048 { 0x16E4C, 0x16E6C },
2049 { 0x16E5C, 0x16E7C },
2050 { 0x1E90B, 0x1E92D },
2051 { 0x1E91B, 0x1E93D }
2052};
2053
2054static const CaseFoldMapping1_32 case_fold1_32_003[] = {
2055 { 0x10407, 0x1042F },
2056 { 0x10417, 0x1043F },
2057 { 0x10427, 0x1044F },
2058 { 0x104B7, 0x104DF },
2059 { 0x104C7, 0x104EF },
2060 { 0x10576, 0x1059D },
2061 { 0x10586, 0x105AD },
2062 { 0x10C8F, 0x10CCF },
2063 { 0x10C9F, 0x10CDF },
2064 { 0x10CAF, 0x10CEF },
2065 { 0x118AB, 0x118CB },
2066 { 0x118BB, 0x118DB },
2067 { 0x16E4D, 0x16E6D },
2068 { 0x16E5D, 0x16E7D },
2069 { 0x1E90A, 0x1E92C },
2070 { 0x1E91A, 0x1E93C }
2071};
2072
2073static const CaseFoldMapping1_32 case_fold1_32_004[] = {
2074 { 0x10400, 0x10428 },
2075 { 0x10410, 0x10438 },
2076 { 0x10420, 0x10448 },
2077 { 0x104B0, 0x104D8 },
2078 { 0x104C0, 0x104E8 },
2079 { 0x104D0, 0x104F8 },
2080 { 0x10571, 0x10598 },
2081 { 0x10581, 0x105A8 },
2082 { 0x10591, 0x105B8 },
2083 { 0x10C88, 0x10CC8 },
2084 { 0x10C98, 0x10CD8 },
2085 { 0x10CA8, 0x10CE8 },
2086 { 0x118AC, 0x118CC },
2087 { 0x118BC, 0x118DC },
2088 { 0x16E4A, 0x16E6A },
2089 { 0x16E5A, 0x16E7A },
2090 { 0x1E90D, 0x1E92F },
2091 { 0x1E91D, 0x1E93F }
2092};
2093
2094static const CaseFoldMapping1_32 case_fold1_32_005[] = {
2095 { 0x10401, 0x10429 },
2096 { 0x10411, 0x10439 },
2097 { 0x10421, 0x10449 },
2098 { 0x104B1, 0x104D9 },
2099 { 0x104C1, 0x104E9 },
2100 { 0x104D1, 0x104F9 },
2101 { 0x10570, 0x10597 },
2102 { 0x10580, 0x105A7 },
2103 { 0x10590, 0x105B7 },
2104 { 0x10C89, 0x10CC9 },
2105 { 0x10C99, 0x10CD9 },
2106 { 0x10CA9, 0x10CE9 },
2107 { 0x118AD, 0x118CD },
2108 { 0x118BD, 0x118DD },
2109 { 0x16E4B, 0x16E6B },
2110 { 0x16E5B, 0x16E7B },
2111 { 0x1E90C, 0x1E92E },
2112 { 0x1E91C, 0x1E93E }
2113};
2114
2115static const CaseFoldMapping1_32 case_fold1_32_006[] = {
2116 { 0x10402, 0x1042A },
2117 { 0x10412, 0x1043A },
2118 { 0x10422, 0x1044A },
2119 { 0x104B2, 0x104DA },
2120 { 0x104C2, 0x104EA },
2121 { 0x104D2, 0x104FA },
2122 { 0x10573, 0x1059A },
2123 { 0x10583, 0x105AA },
2124 { 0x10C8A, 0x10CCA },
2125 { 0x10C9A, 0x10CDA },
2126 { 0x10CAA, 0x10CEA },
2127 { 0x118AE, 0x118CE },
2128 { 0x118BE, 0x118DE },
2129 { 0x16E48, 0x16E68 },
2130 { 0x16E58, 0x16E78 },
2131 { 0x1E90F, 0x1E931 },
2132 { 0x1E91F, 0x1E941 }
2133};
2134
2135static const CaseFoldMapping1_32 case_fold1_32_007[] = {
2136 { 0x10403, 0x1042B },
2137 { 0x10413, 0x1043B },
2138 { 0x10423, 0x1044B },
2139 { 0x104B3, 0x104DB },
2140 { 0x104C3, 0x104EB },
2141 { 0x104D3, 0x104FB },
2142 { 0x10572, 0x10599 },
2143 { 0x10582, 0x105A9 },
2144 { 0x10592, 0x105B9 },
2145 { 0x10C8B, 0x10CCB },
2146 { 0x10C9B, 0x10CDB },
2147 { 0x10CAB, 0x10CEB },
2148 { 0x118AF, 0x118CF },
2149 { 0x118BF, 0x118DF },
2150 { 0x16E49, 0x16E69 },
2151 { 0x16E59, 0x16E79 },
2152 { 0x1E90E, 0x1E930 },
2153 { 0x1E91E, 0x1E940 }
2154};
2155
2156static const CaseFoldMapping1_32 case_fold1_32_008[] = {
2157 { 0x1040C, 0x10434 },
2158 { 0x1041C, 0x10444 },
2159 { 0x104BC, 0x104E4 },
2160 { 0x104CC, 0x104F4 },
2161 { 0x1057D, 0x105A4 },
2162 { 0x1058D, 0x105B4 },
2163 { 0x10C84, 0x10CC4 },
2164 { 0x10C94, 0x10CD4 },
2165 { 0x10CA4, 0x10CE4 },
2166 { 0x118A0, 0x118C0 },
2167 { 0x118B0, 0x118D0 },
2168 { 0x16E46, 0x16E66 },
2169 { 0x16E56, 0x16E76 },
2170 { 0x1E901, 0x1E923 },
2171 { 0x1E911, 0x1E933 },
2172 { 0x1E921, 0x1E943 }
2173};
2174
2175static const CaseFoldMapping1_32 case_fold1_32_009[] = {
2176 { 0x1040D, 0x10435 },
2177 { 0x1041D, 0x10445 },
2178 { 0x104BD, 0x104E5 },
2179 { 0x104CD, 0x104F5 },
2180 { 0x1057C, 0x105A3 },
2181 { 0x1058C, 0x105B3 },
2182 { 0x10C85, 0x10CC5 },
2183 { 0x10C95, 0x10CD5 },
2184 { 0x10CA5, 0x10CE5 },
2185 { 0x118A1, 0x118C1 },
2186 { 0x118B1, 0x118D1 },
2187 { 0x16E47, 0x16E67 },
2188 { 0x16E57, 0x16E77 },
2189 { 0x1E900, 0x1E922 },
2190 { 0x1E910, 0x1E932 },
2191 { 0x1E920, 0x1E942 }
2192};
2193
2194static const CaseFoldMapping1_32 case_fold1_32_010[] = {
2195 { 0x1040E, 0x10436 },
2196 { 0x1041E, 0x10446 },
2197 { 0x104BE, 0x104E6 },
2198 { 0x104CE, 0x104F6 },
2199 { 0x1057F, 0x105A6 },
2200 { 0x1058F, 0x105B6 },
2201 { 0x10C86, 0x10CC6 },
2202 { 0x10C96, 0x10CD6 },
2203 { 0x10CA6, 0x10CE6 },
2204 { 0x118A2, 0x118C2 },
2205 { 0x118B2, 0x118D2 },
2206 { 0x16E44, 0x16E64 },
2207 { 0x16E54, 0x16E74 },
2208 { 0x1E903, 0x1E925 },
2209 { 0x1E913, 0x1E935 }
2210};
2211
2212static const CaseFoldMapping1_32 case_fold1_32_011[] = {
2213 { 0x1040F, 0x10437 },
2214 { 0x1041F, 0x10447 },
2215 { 0x104BF, 0x104E7 },
2216 { 0x104CF, 0x104F7 },
2217 { 0x1057E, 0x105A5 },
2218 { 0x1058E, 0x105B5 },
2219 { 0x10C87, 0x10CC7 },
2220 { 0x10C97, 0x10CD7 },
2221 { 0x10CA7, 0x10CE7 },
2222 { 0x118A3, 0x118C3 },
2223 { 0x118B3, 0x118D3 },
2224 { 0x16E45, 0x16E65 },
2225 { 0x16E55, 0x16E75 },
2226 { 0x1E902, 0x1E924 },
2227 { 0x1E912, 0x1E934 }
2228};
2229
2230static const CaseFoldMapping1_32 case_fold1_32_012[] = {
2231 { 0x10408, 0x10430 },
2232 { 0x10418, 0x10440 },
2233 { 0x104B8, 0x104E0 },
2234 { 0x104C8, 0x104F0 },
2235 { 0x10579, 0x105A0 },
2236 { 0x10589, 0x105B0 },
2237 { 0x10C80, 0x10CC0 },
2238 { 0x10C90, 0x10CD0 },
2239 { 0x10CA0, 0x10CE0 },
2240 { 0x10CB0, 0x10CF0 },
2241 { 0x118A4, 0x118C4 },
2242 { 0x118B4, 0x118D4 },
2243 { 0x16E42, 0x16E62 },
2244 { 0x16E52, 0x16E72 },
2245 { 0x1E905, 0x1E927 },
2246 { 0x1E915, 0x1E937 }
2247};
2248
2249static const CaseFoldMapping1_32 case_fold1_32_013[] = {
2250 { 0x10409, 0x10431 },
2251 { 0x10419, 0x10441 },
2252 { 0x104B9, 0x104E1 },
2253 { 0x104C9, 0x104F1 },
2254 { 0x10578, 0x1059F },
2255 { 0x10588, 0x105AF },
2256 { 0x10C81, 0x10CC1 },
2257 { 0x10C91, 0x10CD1 },
2258 { 0x10CA1, 0x10CE1 },
2259 { 0x10CB1, 0x10CF1 },
2260 { 0x118A5, 0x118C5 },
2261 { 0x118B5, 0x118D5 },
2262 { 0x16E43, 0x16E63 },
2263 { 0x16E53, 0x16E73 },
2264 { 0x1E904, 0x1E926 },
2265 { 0x1E914, 0x1E936 }
2266};
2267
2268static const CaseFoldMapping1_32 case_fold1_32_014[] = {
2269 { 0x1040A, 0x10432 },
2270 { 0x1041A, 0x10442 },
2271 { 0x104BA, 0x104E2 },
2272 { 0x104CA, 0x104F2 },
2273 { 0x10C82, 0x10CC2 },
2274 { 0x10C92, 0x10CD2 },
2275 { 0x10CA2, 0x10CE2 },
2276 { 0x10CB2, 0x10CF2 },
2277 { 0x118A6, 0x118C6 },
2278 { 0x118B6, 0x118D6 },
2279 { 0x16E40, 0x16E60 },
2280 { 0x16E50, 0x16E70 },
2281 { 0x1E907, 0x1E929 },
2282 { 0x1E917, 0x1E939 }
2283};
2284
2285static const CaseFoldMapping1_32 case_fold1_32_015[] = {
2286 { 0x1040B, 0x10433 },
2287 { 0x1041B, 0x10443 },
2288 { 0x104BB, 0x104E3 },
2289 { 0x104CB, 0x104F3 },
2290 { 0x1057A, 0x105A1 },
2291 { 0x1058A, 0x105B1 },
2292 { 0x10C83, 0x10CC3 },
2293 { 0x10C93, 0x10CD3 },
2294 { 0x10CA3, 0x10CE3 },
2295 { 0x118A7, 0x118C7 },
2296 { 0x118B7, 0x118D7 },
2297 { 0x16E41, 0x16E61 },
2298 { 0x16E51, 0x16E71 },
2299 { 0x1E906, 0x1E928 },
2300 { 0x1E916, 0x1E938 }
2301};
2302
2303static const CaseFoldMapping2_16 case_fold2_16_000[] = {
2304 { 0x1E9E, 0x0073, 0x0073 },
2305 { 0x1F8F, 0x1F07, 0x03B9 },
2306 { 0x1F9F, 0x1F27, 0x03B9 },
2307 { 0x1FAF, 0x1F67, 0x03B9 }
2308};
2309
2310static const CaseFoldMapping2_16 case_fold2_16_001[] = {
2311 { 0x0130, 0x0069, 0x0307 },
2312 { 0x01F0, 0x006A, 0x030C },
2313 { 0x1F8E, 0x1F06, 0x03B9 },
2314 { 0x1F9E, 0x1F26, 0x03B9 },
2315 { 0x1FAE, 0x1F66, 0x03B9 }
2316};
2317
2318static const CaseFoldMapping2_16 case_fold2_16_002[] = {
2319 { 0x0587, 0x0565, 0x0582 },
2320 { 0x1F8D, 0x1F05, 0x03B9 },
2321 { 0x1F9D, 0x1F25, 0x03B9 },
2322 { 0x1FAD, 0x1F65, 0x03B9 }
2323};
2324
2325static const CaseFoldMapping2_16 case_fold2_16_003[] = {
2326 { 0x1F8C, 0x1F04, 0x03B9 },
2327 { 0x1F9C, 0x1F24, 0x03B9 },
2328 { 0x1FAC, 0x1F64, 0x03B9 },
2329 { 0x1FBC, 0x03B1, 0x03B9 },
2330 { 0x1FCC, 0x03B7, 0x03B9 },
2331 { 0x1FFC, 0x03C9, 0x03B9 }
2332};
2333
2334static const CaseFoldMapping2_16 case_fold2_16_004[] = {
2335 { 0x1E9A, 0x0061, 0x02BE },
2336 { 0x1F8B, 0x1F03, 0x03B9 },
2337 { 0x1F9B, 0x1F23, 0x03B9 },
2338 { 0x1FAB, 0x1F63, 0x03B9 }
2339};
2340
2341static const CaseFoldMapping2_16 case_fold2_16_005[] = {
2342 { 0x1F8A, 0x1F02, 0x03B9 },
2343 { 0x1F9A, 0x1F22, 0x03B9 },
2344 { 0x1FAA, 0x1F62, 0x03B9 }
2345};
2346
2347static const CaseFoldMapping2_16 case_fold2_16_006[] = {
2348 { 0x1E98, 0x0077, 0x030A },
2349 { 0x1F89, 0x1F01, 0x03B9 },
2350 { 0x1F99, 0x1F21, 0x03B9 },
2351 { 0x1FA9, 0x1F61, 0x03B9 }
2352};
2353
2354static const CaseFoldMapping2_16 case_fold2_16_007[] = {
2355 { 0x1E99, 0x0079, 0x030A },
2356 { 0x1F88, 0x1F00, 0x03B9 },
2357 { 0x1F98, 0x1F20, 0x03B9 },
2358 { 0x1FA8, 0x1F60, 0x03B9 }
2359};
2360
2361static const CaseFoldMapping2_16 case_fold2_16_008[] = {
2362 { 0x0149, 0x02BC, 0x006E },
2363 { 0x1E96, 0x0068, 0x0331 },
2364 { 0x1F87, 0x1F07, 0x03B9 },
2365 { 0x1F97, 0x1F27, 0x03B9 },
2366 { 0x1FA7, 0x1F67, 0x03B9 },
2367 { 0xFB13, 0x0574, 0x0576 }
2368};
2369
2370static const CaseFoldMapping2_16 case_fold2_16_009[] = {
2371 { 0x1E97, 0x0074, 0x0308 },
2372 { 0x1F86, 0x1F06, 0x03B9 },
2373 { 0x1F96, 0x1F26, 0x03B9 },
2374 { 0x1FA6, 0x1F66, 0x03B9 },
2375 { 0x1FB6, 0x03B1, 0x0342 },
2376 { 0x1FC6, 0x03B7, 0x0342 },
2377 { 0x1FD6, 0x03B9, 0x0342 },
2378 { 0x1FE6, 0x03C5, 0x0342 },
2379 { 0x1FF6, 0x03C9, 0x0342 },
2380 { 0xFB02, 0x0066, 0x006C }
2381};
2382
2383static const CaseFoldMapping2_16 case_fold2_16_010[] = {
2384 { 0x1F85, 0x1F05, 0x03B9 },
2385 { 0x1F95, 0x1F25, 0x03B9 },
2386 { 0x1FA5, 0x1F65, 0x03B9 },
2387 { 0xFB01, 0x0066, 0x0069 }
2388};
2389
2390static const CaseFoldMapping2_16 case_fold2_16_011[] = {
2391 { 0x1F84, 0x1F04, 0x03B9 },
2392 { 0x1F94, 0x1F24, 0x03B9 },
2393 { 0x1FA4, 0x1F64, 0x03B9 },
2394 { 0x1FB4, 0x03AC, 0x03B9 },
2395 { 0x1FC4, 0x03AE, 0x03B9 },
2396 { 0x1FE4, 0x03C1, 0x0313 },
2397 { 0x1FF4, 0x03CE, 0x03B9 },
2398 { 0xFB00, 0x0066, 0x0066 }
2399};
2400
2401static const CaseFoldMapping2_16 case_fold2_16_012[] = {
2402 { 0x1F83, 0x1F03, 0x03B9 },
2403 { 0x1F93, 0x1F23, 0x03B9 },
2404 { 0x1FA3, 0x1F63, 0x03B9 },
2405 { 0x1FB3, 0x03B1, 0x03B9 },
2406 { 0x1FC3, 0x03B7, 0x03B9 },
2407 { 0x1FF3, 0x03C9, 0x03B9 },
2408 { 0xFB17, 0x0574, 0x056D }
2409};
2410
2411static const CaseFoldMapping2_16 case_fold2_16_013[] = {
2412 { 0x1F82, 0x1F02, 0x03B9 },
2413 { 0x1F92, 0x1F22, 0x03B9 },
2414 { 0x1FA2, 0x1F62, 0x03B9 },
2415 { 0x1FB2, 0x1F70, 0x03B9 },
2416 { 0x1FC2, 0x1F74, 0x03B9 },
2417 { 0x1FF2, 0x1F7C, 0x03B9 },
2418 { 0xFB06, 0x0073, 0x0074 },
2419 { 0xFB16, 0x057E, 0x0576 }
2420};
2421
2422static const CaseFoldMapping2_16 case_fold2_16_014[] = {
2423 { 0x1F81, 0x1F01, 0x03B9 },
2424 { 0x1F91, 0x1F21, 0x03B9 },
2425 { 0x1FA1, 0x1F61, 0x03B9 },
2426 { 0xFB05, 0x0073, 0x0074 },
2427 { 0xFB15, 0x0574, 0x056B }
2428};
2429
2430static const CaseFoldMapping2_16 case_fold2_16_015[] = {
2431 { 0x00DF, 0x0073, 0x0073 },
2432 { 0x1F50, 0x03C5, 0x0313 },
2433 { 0x1F80, 0x1F00, 0x03B9 },
2434 { 0x1F90, 0x1F20, 0x03B9 },
2435 { 0x1FA0, 0x1F60, 0x03B9 },
2436 { 0xFB14, 0x0574, 0x0565 }
2437};
2438
2439static const CaseFoldMapping3_16 case_fold3_16_000[] = {
2440 { 0x1FB7, 0x03B1, 0x0342, 0x03B9 },
2441 { 0x1FC7, 0x03B7, 0x0342, 0x03B9 },
2442 { 0x1FD3, 0x03B9, 0x0308, 0x0301 },
2443 { 0x1FD7, 0x03B9, 0x0308, 0x0342 },
2444 { 0x1FE3, 0x03C5, 0x0308, 0x0301 },
2445 { 0x1FE7, 0x03C5, 0x0308, 0x0342 },
2446 { 0x1FF7, 0x03C9, 0x0342, 0x03B9 },
2447 { 0xFB03, 0x0066, 0x0066, 0x0069 }
2448};
2449
2450static const CaseFoldMapping3_16 case_fold3_16_001[] = {
2451 { 0x1F52, 0x03C5, 0x0313, 0x0300 },
2452 { 0x1F56, 0x03C5, 0x0313, 0x0342 },
2453 { 0x1FD2, 0x03B9, 0x0308, 0x0300 },
2454 { 0x1FE2, 0x03C5, 0x0308, 0x0300 }
2455};
2456
2457static const CaseFoldMapping3_16 case_fold3_16_003[] = {
2458 { 0x0390, 0x03B9, 0x0308, 0x0301 },
2459 { 0x03B0, 0x03C5, 0x0308, 0x0301 },
2460 { 0x1F54, 0x03C5, 0x0313, 0x0301 },
2461 { 0xFB04, 0x0066, 0x0066, 0x006C }
2462};
2463
2464static const CaseFoldHashBucket1_16 case_fold_hash1_16[] = {
2465 { case_fold1_16_000, SDL_arraysize(case_fold1_16_000) },
2466 { case_fold1_16_001, SDL_arraysize(case_fold1_16_001) },
2467 { case_fold1_16_002, SDL_arraysize(case_fold1_16_002) },
2468 { case_fold1_16_003, SDL_arraysize(case_fold1_16_003) },
2469 { case_fold1_16_004, SDL_arraysize(case_fold1_16_004) },
2470 { case_fold1_16_005, SDL_arraysize(case_fold1_16_005) },
2471 { case_fold1_16_006, SDL_arraysize(case_fold1_16_006) },
2472 { case_fold1_16_007, SDL_arraysize(case_fold1_16_007) },
2473 { case_fold1_16_008, SDL_arraysize(case_fold1_16_008) },
2474 { case_fold1_16_009, SDL_arraysize(case_fold1_16_009) },
2475 { case_fold1_16_010, SDL_arraysize(case_fold1_16_010) },
2476 { case_fold1_16_011, SDL_arraysize(case_fold1_16_011) },
2477 { case_fold1_16_012, SDL_arraysize(case_fold1_16_012) },
2478 { case_fold1_16_013, SDL_arraysize(case_fold1_16_013) },
2479 { case_fold1_16_014, SDL_arraysize(case_fold1_16_014) },
2480 { case_fold1_16_015, SDL_arraysize(case_fold1_16_015) },
2481 { case_fold1_16_016, SDL_arraysize(case_fold1_16_016) },
2482 { case_fold1_16_017, SDL_arraysize(case_fold1_16_017) },
2483 { case_fold1_16_018, SDL_arraysize(case_fold1_16_018) },
2484 { case_fold1_16_019, SDL_arraysize(case_fold1_16_019) },
2485 { case_fold1_16_020, SDL_arraysize(case_fold1_16_020) },
2486 { case_fold1_16_021, SDL_arraysize(case_fold1_16_021) },
2487 { case_fold1_16_022, SDL_arraysize(case_fold1_16_022) },
2488 { case_fold1_16_023, SDL_arraysize(case_fold1_16_023) },
2489 { case_fold1_16_024, SDL_arraysize(case_fold1_16_024) },
2490 { case_fold1_16_025, SDL_arraysize(case_fold1_16_025) },
2491 { case_fold1_16_026, SDL_arraysize(case_fold1_16_026) },
2492 { case_fold1_16_027, SDL_arraysize(case_fold1_16_027) },
2493 { case_fold1_16_028, SDL_arraysize(case_fold1_16_028) },
2494 { case_fold1_16_029, SDL_arraysize(case_fold1_16_029) },
2495 { case_fold1_16_030, SDL_arraysize(case_fold1_16_030) },
2496 { case_fold1_16_031, SDL_arraysize(case_fold1_16_031) },
2497 { case_fold1_16_032, SDL_arraysize(case_fold1_16_032) },
2498 { case_fold1_16_033, SDL_arraysize(case_fold1_16_033) },
2499 { case_fold1_16_034, SDL_arraysize(case_fold1_16_034) },
2500 { case_fold1_16_035, SDL_arraysize(case_fold1_16_035) },
2501 { case_fold1_16_036, SDL_arraysize(case_fold1_16_036) },
2502 { case_fold1_16_037, SDL_arraysize(case_fold1_16_037) },
2503 { case_fold1_16_038, SDL_arraysize(case_fold1_16_038) },
2504 { case_fold1_16_039, SDL_arraysize(case_fold1_16_039) },
2505 { case_fold1_16_040, SDL_arraysize(case_fold1_16_040) },
2506 { case_fold1_16_041, SDL_arraysize(case_fold1_16_041) },
2507 { case_fold1_16_042, SDL_arraysize(case_fold1_16_042) },
2508 { case_fold1_16_043, SDL_arraysize(case_fold1_16_043) },
2509 { case_fold1_16_044, SDL_arraysize(case_fold1_16_044) },
2510 { case_fold1_16_045, SDL_arraysize(case_fold1_16_045) },
2511 { case_fold1_16_046, SDL_arraysize(case_fold1_16_046) },
2512 { case_fold1_16_047, SDL_arraysize(case_fold1_16_047) },
2513 { case_fold1_16_048, SDL_arraysize(case_fold1_16_048) },
2514 { case_fold1_16_049, SDL_arraysize(case_fold1_16_049) },
2515 { case_fold1_16_050, SDL_arraysize(case_fold1_16_050) },
2516 { case_fold1_16_051, SDL_arraysize(case_fold1_16_051) },
2517 { case_fold1_16_052, SDL_arraysize(case_fold1_16_052) },
2518 { case_fold1_16_053, SDL_arraysize(case_fold1_16_053) },
2519 { case_fold1_16_054, SDL_arraysize(case_fold1_16_054) },
2520 { case_fold1_16_055, SDL_arraysize(case_fold1_16_055) },
2521 { case_fold1_16_056, SDL_arraysize(case_fold1_16_056) },
2522 { case_fold1_16_057, SDL_arraysize(case_fold1_16_057) },
2523 { case_fold1_16_058, SDL_arraysize(case_fold1_16_058) },
2524 { case_fold1_16_059, SDL_arraysize(case_fold1_16_059) },
2525 { case_fold1_16_060, SDL_arraysize(case_fold1_16_060) },
2526 { case_fold1_16_061, SDL_arraysize(case_fold1_16_061) },
2527 { case_fold1_16_062, SDL_arraysize(case_fold1_16_062) },
2528 { case_fold1_16_063, SDL_arraysize(case_fold1_16_063) },
2529 { case_fold1_16_064, SDL_arraysize(case_fold1_16_064) },
2530 { case_fold1_16_065, SDL_arraysize(case_fold1_16_065) },
2531 { case_fold1_16_066, SDL_arraysize(case_fold1_16_066) },
2532 { case_fold1_16_067, SDL_arraysize(case_fold1_16_067) },
2533 { case_fold1_16_068, SDL_arraysize(case_fold1_16_068) },
2534 { case_fold1_16_069, SDL_arraysize(case_fold1_16_069) },
2535 { case_fold1_16_070, SDL_arraysize(case_fold1_16_070) },
2536 { case_fold1_16_071, SDL_arraysize(case_fold1_16_071) },
2537 { case_fold1_16_072, SDL_arraysize(case_fold1_16_072) },
2538 { case_fold1_16_073, SDL_arraysize(case_fold1_16_073) },
2539 { case_fold1_16_074, SDL_arraysize(case_fold1_16_074) },
2540 { case_fold1_16_075, SDL_arraysize(case_fold1_16_075) },
2541 { case_fold1_16_076, SDL_arraysize(case_fold1_16_076) },
2542 { case_fold1_16_077, SDL_arraysize(case_fold1_16_077) },
2543 { case_fold1_16_078, SDL_arraysize(case_fold1_16_078) },
2544 { case_fold1_16_079, SDL_arraysize(case_fold1_16_079) },
2545 { case_fold1_16_080, SDL_arraysize(case_fold1_16_080) },
2546 { case_fold1_16_081, SDL_arraysize(case_fold1_16_081) },
2547 { case_fold1_16_082, SDL_arraysize(case_fold1_16_082) },
2548 { case_fold1_16_083, SDL_arraysize(case_fold1_16_083) },
2549 { case_fold1_16_084, SDL_arraysize(case_fold1_16_084) },
2550 { case_fold1_16_085, SDL_arraysize(case_fold1_16_085) },
2551 { case_fold1_16_086, SDL_arraysize(case_fold1_16_086) },
2552 { case_fold1_16_087, SDL_arraysize(case_fold1_16_087) },
2553 { case_fold1_16_088, SDL_arraysize(case_fold1_16_088) },
2554 { case_fold1_16_089, SDL_arraysize(case_fold1_16_089) },
2555 { case_fold1_16_090, SDL_arraysize(case_fold1_16_090) },
2556 { case_fold1_16_091, SDL_arraysize(case_fold1_16_091) },
2557 { case_fold1_16_092, SDL_arraysize(case_fold1_16_092) },
2558 { case_fold1_16_093, SDL_arraysize(case_fold1_16_093) },
2559 { case_fold1_16_094, SDL_arraysize(case_fold1_16_094) },
2560 { case_fold1_16_095, SDL_arraysize(case_fold1_16_095) },
2561 { case_fold1_16_096, SDL_arraysize(case_fold1_16_096) },
2562 { case_fold1_16_097, SDL_arraysize(case_fold1_16_097) },
2563 { case_fold1_16_098, SDL_arraysize(case_fold1_16_098) },
2564 { case_fold1_16_099, SDL_arraysize(case_fold1_16_099) },
2565 { case_fold1_16_100, SDL_arraysize(case_fold1_16_100) },
2566 { case_fold1_16_101, SDL_arraysize(case_fold1_16_101) },
2567 { case_fold1_16_102, SDL_arraysize(case_fold1_16_102) },
2568 { case_fold1_16_103, SDL_arraysize(case_fold1_16_103) },
2569 { case_fold1_16_104, SDL_arraysize(case_fold1_16_104) },
2570 { case_fold1_16_105, SDL_arraysize(case_fold1_16_105) },
2571 { case_fold1_16_106, SDL_arraysize(case_fold1_16_106) },
2572 { case_fold1_16_107, SDL_arraysize(case_fold1_16_107) },
2573 { case_fold1_16_108, SDL_arraysize(case_fold1_16_108) },
2574 { case_fold1_16_109, SDL_arraysize(case_fold1_16_109) },
2575 { case_fold1_16_110, SDL_arraysize(case_fold1_16_110) },
2576 { case_fold1_16_111, SDL_arraysize(case_fold1_16_111) },
2577 { case_fold1_16_112, SDL_arraysize(case_fold1_16_112) },
2578 { case_fold1_16_113, SDL_arraysize(case_fold1_16_113) },
2579 { case_fold1_16_114, SDL_arraysize(case_fold1_16_114) },
2580 { case_fold1_16_115, SDL_arraysize(case_fold1_16_115) },
2581 { case_fold1_16_116, SDL_arraysize(case_fold1_16_116) },
2582 { case_fold1_16_117, SDL_arraysize(case_fold1_16_117) },
2583 { case_fold1_16_118, SDL_arraysize(case_fold1_16_118) },
2584 { case_fold1_16_119, SDL_arraysize(case_fold1_16_119) },
2585 { case_fold1_16_120, SDL_arraysize(case_fold1_16_120) },
2586 { case_fold1_16_121, SDL_arraysize(case_fold1_16_121) },
2587 { case_fold1_16_122, SDL_arraysize(case_fold1_16_122) },
2588 { NULL, 0 },
2589 { case_fold1_16_124, SDL_arraysize(case_fold1_16_124) },
2590 { NULL, 0 },
2591 { case_fold1_16_126, SDL_arraysize(case_fold1_16_126) },
2592 { case_fold1_16_127, SDL_arraysize(case_fold1_16_127) },
2593 { case_fold1_16_128, SDL_arraysize(case_fold1_16_128) },
2594 { case_fold1_16_129, SDL_arraysize(case_fold1_16_129) },
2595 { case_fold1_16_130, SDL_arraysize(case_fold1_16_130) },
2596 { case_fold1_16_131, SDL_arraysize(case_fold1_16_131) },
2597 { case_fold1_16_132, SDL_arraysize(case_fold1_16_132) },
2598 { case_fold1_16_133, SDL_arraysize(case_fold1_16_133) },
2599 { case_fold1_16_134, SDL_arraysize(case_fold1_16_134) },
2600 { case_fold1_16_135, SDL_arraysize(case_fold1_16_135) },
2601 { case_fold1_16_136, SDL_arraysize(case_fold1_16_136) },
2602 { case_fold1_16_137, SDL_arraysize(case_fold1_16_137) },
2603 { case_fold1_16_138, SDL_arraysize(case_fold1_16_138) },
2604 { case_fold1_16_139, SDL_arraysize(case_fold1_16_139) },
2605 { case_fold1_16_140, SDL_arraysize(case_fold1_16_140) },
2606 { case_fold1_16_141, SDL_arraysize(case_fold1_16_141) },
2607 { case_fold1_16_142, SDL_arraysize(case_fold1_16_142) },
2608 { case_fold1_16_143, SDL_arraysize(case_fold1_16_143) },
2609 { case_fold1_16_144, SDL_arraysize(case_fold1_16_144) },
2610 { case_fold1_16_145, SDL_arraysize(case_fold1_16_145) },
2611 { case_fold1_16_146, SDL_arraysize(case_fold1_16_146) },
2612 { case_fold1_16_147, SDL_arraysize(case_fold1_16_147) },
2613 { case_fold1_16_148, SDL_arraysize(case_fold1_16_148) },
2614 { case_fold1_16_149, SDL_arraysize(case_fold1_16_149) },
2615 { case_fold1_16_150, SDL_arraysize(case_fold1_16_150) },
2616 { case_fold1_16_151, SDL_arraysize(case_fold1_16_151) },
2617 { case_fold1_16_152, SDL_arraysize(case_fold1_16_152) },
2618 { case_fold1_16_153, SDL_arraysize(case_fold1_16_153) },
2619 { case_fold1_16_154, SDL_arraysize(case_fold1_16_154) },
2620 { case_fold1_16_155, SDL_arraysize(case_fold1_16_155) },
2621 { case_fold1_16_156, SDL_arraysize(case_fold1_16_156) },
2622 { case_fold1_16_157, SDL_arraysize(case_fold1_16_157) },
2623 { case_fold1_16_158, SDL_arraysize(case_fold1_16_158) },
2624 { case_fold1_16_159, SDL_arraysize(case_fold1_16_159) },
2625 { case_fold1_16_160, SDL_arraysize(case_fold1_16_160) },
2626 { case_fold1_16_161, SDL_arraysize(case_fold1_16_161) },
2627 { case_fold1_16_162, SDL_arraysize(case_fold1_16_162) },
2628 { case_fold1_16_163, SDL_arraysize(case_fold1_16_163) },
2629 { case_fold1_16_164, SDL_arraysize(case_fold1_16_164) },
2630 { case_fold1_16_165, SDL_arraysize(case_fold1_16_165) },
2631 { case_fold1_16_166, SDL_arraysize(case_fold1_16_166) },
2632 { case_fold1_16_167, SDL_arraysize(case_fold1_16_167) },
2633 { case_fold1_16_168, SDL_arraysize(case_fold1_16_168) },
2634 { case_fold1_16_169, SDL_arraysize(case_fold1_16_169) },
2635 { case_fold1_16_170, SDL_arraysize(case_fold1_16_170) },
2636 { case_fold1_16_171, SDL_arraysize(case_fold1_16_171) },
2637 { case_fold1_16_172, SDL_arraysize(case_fold1_16_172) },
2638 { case_fold1_16_173, SDL_arraysize(case_fold1_16_173) },
2639 { case_fold1_16_174, SDL_arraysize(case_fold1_16_174) },
2640 { case_fold1_16_175, SDL_arraysize(case_fold1_16_175) },
2641 { case_fold1_16_176, SDL_arraysize(case_fold1_16_176) },
2642 { case_fold1_16_177, SDL_arraysize(case_fold1_16_177) },
2643 { case_fold1_16_178, SDL_arraysize(case_fold1_16_178) },
2644 { case_fold1_16_179, SDL_arraysize(case_fold1_16_179) },
2645 { case_fold1_16_180, SDL_arraysize(case_fold1_16_180) },
2646 { case_fold1_16_181, SDL_arraysize(case_fold1_16_181) },
2647 { case_fold1_16_182, SDL_arraysize(case_fold1_16_182) },
2648 { case_fold1_16_183, SDL_arraysize(case_fold1_16_183) },
2649 { case_fold1_16_184, SDL_arraysize(case_fold1_16_184) },
2650 { case_fold1_16_185, SDL_arraysize(case_fold1_16_185) },
2651 { case_fold1_16_186, SDL_arraysize(case_fold1_16_186) },
2652 { case_fold1_16_187, SDL_arraysize(case_fold1_16_187) },
2653 { case_fold1_16_188, SDL_arraysize(case_fold1_16_188) },
2654 { case_fold1_16_189, SDL_arraysize(case_fold1_16_189) },
2655 { case_fold1_16_190, SDL_arraysize(case_fold1_16_190) },
2656 { case_fold1_16_191, SDL_arraysize(case_fold1_16_191) },
2657 { case_fold1_16_192, SDL_arraysize(case_fold1_16_192) },
2658 { case_fold1_16_193, SDL_arraysize(case_fold1_16_193) },
2659 { case_fold1_16_194, SDL_arraysize(case_fold1_16_194) },
2660 { case_fold1_16_195, SDL_arraysize(case_fold1_16_195) },
2661 { case_fold1_16_196, SDL_arraysize(case_fold1_16_196) },
2662 { case_fold1_16_197, SDL_arraysize(case_fold1_16_197) },
2663 { case_fold1_16_198, SDL_arraysize(case_fold1_16_198) },
2664 { case_fold1_16_199, SDL_arraysize(case_fold1_16_199) },
2665 { case_fold1_16_200, SDL_arraysize(case_fold1_16_200) },
2666 { case_fold1_16_201, SDL_arraysize(case_fold1_16_201) },
2667 { case_fold1_16_202, SDL_arraysize(case_fold1_16_202) },
2668 { case_fold1_16_203, SDL_arraysize(case_fold1_16_203) },
2669 { case_fold1_16_204, SDL_arraysize(case_fold1_16_204) },
2670 { case_fold1_16_205, SDL_arraysize(case_fold1_16_205) },
2671 { case_fold1_16_206, SDL_arraysize(case_fold1_16_206) },
2672 { case_fold1_16_207, SDL_arraysize(case_fold1_16_207) },
2673 { case_fold1_16_208, SDL_arraysize(case_fold1_16_208) },
2674 { case_fold1_16_209, SDL_arraysize(case_fold1_16_209) },
2675 { case_fold1_16_210, SDL_arraysize(case_fold1_16_210) },
2676 { case_fold1_16_211, SDL_arraysize(case_fold1_16_211) },
2677 { case_fold1_16_212, SDL_arraysize(case_fold1_16_212) },
2678 { case_fold1_16_213, SDL_arraysize(case_fold1_16_213) },
2679 { case_fold1_16_214, SDL_arraysize(case_fold1_16_214) },
2680 { case_fold1_16_215, SDL_arraysize(case_fold1_16_215) },
2681 { case_fold1_16_216, SDL_arraysize(case_fold1_16_216) },
2682 { case_fold1_16_217, SDL_arraysize(case_fold1_16_217) },
2683 { case_fold1_16_218, SDL_arraysize(case_fold1_16_218) },
2684 { case_fold1_16_219, SDL_arraysize(case_fold1_16_219) },
2685 { case_fold1_16_220, SDL_arraysize(case_fold1_16_220) },
2686 { case_fold1_16_221, SDL_arraysize(case_fold1_16_221) },
2687 { case_fold1_16_222, SDL_arraysize(case_fold1_16_222) },
2688 { case_fold1_16_223, SDL_arraysize(case_fold1_16_223) },
2689 { case_fold1_16_224, SDL_arraysize(case_fold1_16_224) },
2690 { case_fold1_16_225, SDL_arraysize(case_fold1_16_225) },
2691 { case_fold1_16_226, SDL_arraysize(case_fold1_16_226) },
2692 { case_fold1_16_227, SDL_arraysize(case_fold1_16_227) },
2693 { case_fold1_16_228, SDL_arraysize(case_fold1_16_228) },
2694 { case_fold1_16_229, SDL_arraysize(case_fold1_16_229) },
2695 { case_fold1_16_230, SDL_arraysize(case_fold1_16_230) },
2696 { case_fold1_16_231, SDL_arraysize(case_fold1_16_231) },
2697 { case_fold1_16_232, SDL_arraysize(case_fold1_16_232) },
2698 { case_fold1_16_233, SDL_arraysize(case_fold1_16_233) },
2699 { case_fold1_16_234, SDL_arraysize(case_fold1_16_234) },
2700 { case_fold1_16_235, SDL_arraysize(case_fold1_16_235) },
2701 { case_fold1_16_236, SDL_arraysize(case_fold1_16_236) },
2702 { case_fold1_16_237, SDL_arraysize(case_fold1_16_237) },
2703 { case_fold1_16_238, SDL_arraysize(case_fold1_16_238) },
2704 { case_fold1_16_239, SDL_arraysize(case_fold1_16_239) },
2705 { case_fold1_16_240, SDL_arraysize(case_fold1_16_240) },
2706 { case_fold1_16_241, SDL_arraysize(case_fold1_16_241) },
2707 { case_fold1_16_242, SDL_arraysize(case_fold1_16_242) },
2708 { case_fold1_16_243, SDL_arraysize(case_fold1_16_243) },
2709 { case_fold1_16_244, SDL_arraysize(case_fold1_16_244) },
2710 { case_fold1_16_245, SDL_arraysize(case_fold1_16_245) },
2711 { case_fold1_16_246, SDL_arraysize(case_fold1_16_246) },
2712 { case_fold1_16_247, SDL_arraysize(case_fold1_16_247) },
2713 { case_fold1_16_248, SDL_arraysize(case_fold1_16_248) },
2714 { case_fold1_16_249, SDL_arraysize(case_fold1_16_249) },
2715 { case_fold1_16_250, SDL_arraysize(case_fold1_16_250) },
2716 { case_fold1_16_251, SDL_arraysize(case_fold1_16_251) },
2717 { case_fold1_16_252, SDL_arraysize(case_fold1_16_252) },
2718 { case_fold1_16_253, SDL_arraysize(case_fold1_16_253) },
2719 { case_fold1_16_254, SDL_arraysize(case_fold1_16_254) },
2720 { case_fold1_16_255, SDL_arraysize(case_fold1_16_255) },
2721};
2722
2723static const CaseFoldHashBucket1_32 case_fold_hash1_32[] = {
2724 { case_fold1_32_000, SDL_arraysize(case_fold1_32_000) },
2725 { case_fold1_32_001, SDL_arraysize(case_fold1_32_001) },
2726 { case_fold1_32_002, SDL_arraysize(case_fold1_32_002) },
2727 { case_fold1_32_003, SDL_arraysize(case_fold1_32_003) },
2728 { case_fold1_32_004, SDL_arraysize(case_fold1_32_004) },
2729 { case_fold1_32_005, SDL_arraysize(case_fold1_32_005) },
2730 { case_fold1_32_006, SDL_arraysize(case_fold1_32_006) },
2731 { case_fold1_32_007, SDL_arraysize(case_fold1_32_007) },
2732 { case_fold1_32_008, SDL_arraysize(case_fold1_32_008) },
2733 { case_fold1_32_009, SDL_arraysize(case_fold1_32_009) },
2734 { case_fold1_32_010, SDL_arraysize(case_fold1_32_010) },
2735 { case_fold1_32_011, SDL_arraysize(case_fold1_32_011) },
2736 { case_fold1_32_012, SDL_arraysize(case_fold1_32_012) },
2737 { case_fold1_32_013, SDL_arraysize(case_fold1_32_013) },
2738 { case_fold1_32_014, SDL_arraysize(case_fold1_32_014) },
2739 { case_fold1_32_015, SDL_arraysize(case_fold1_32_015) },
2740};
2741
2742static const CaseFoldHashBucket2_16 case_fold_hash2_16[] = {
2743 { case_fold2_16_000, SDL_arraysize(case_fold2_16_000) },
2744 { case_fold2_16_001, SDL_arraysize(case_fold2_16_001) },
2745 { case_fold2_16_002, SDL_arraysize(case_fold2_16_002) },
2746 { case_fold2_16_003, SDL_arraysize(case_fold2_16_003) },
2747 { case_fold2_16_004, SDL_arraysize(case_fold2_16_004) },
2748 { case_fold2_16_005, SDL_arraysize(case_fold2_16_005) },
2749 { case_fold2_16_006, SDL_arraysize(case_fold2_16_006) },
2750 { case_fold2_16_007, SDL_arraysize(case_fold2_16_007) },
2751 { case_fold2_16_008, SDL_arraysize(case_fold2_16_008) },
2752 { case_fold2_16_009, SDL_arraysize(case_fold2_16_009) },
2753 { case_fold2_16_010, SDL_arraysize(case_fold2_16_010) },
2754 { case_fold2_16_011, SDL_arraysize(case_fold2_16_011) },
2755 { case_fold2_16_012, SDL_arraysize(case_fold2_16_012) },
2756 { case_fold2_16_013, SDL_arraysize(case_fold2_16_013) },
2757 { case_fold2_16_014, SDL_arraysize(case_fold2_16_014) },
2758 { case_fold2_16_015, SDL_arraysize(case_fold2_16_015) },
2759};
2760
2761static const CaseFoldHashBucket3_16 case_fold_hash3_16[] = {
2762 { case_fold3_16_000, SDL_arraysize(case_fold3_16_000) },
2763 { case_fold3_16_001, SDL_arraysize(case_fold3_16_001) },
2764 { NULL, 0 },
2765 { case_fold3_16_003, SDL_arraysize(case_fold3_16_003) },
2766};
2767
2768#endif // SDL_casefolding_h_
2769
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_crc16.c b/contrib/SDL-3.2.8/src/stdlib/SDL_crc16.c
new file mode 100644
index 0000000..828ce94
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_crc16.c
@@ -0,0 +1,52 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23/* Public domain CRC implementation adapted from:
24 http://home.thep.lu.se/~bjorn/crc/crc32_simple.c
25
26 This algorithm is compatible with the 16-bit CRC described here:
27 https://www.lammertbies.nl/comm/info/crc-calculation
28*/
29/* NOTE: DO NOT CHANGE THIS ALGORITHM
30 There is code that relies on this in the joystick code
31*/
32
33static Uint16 crc16_for_byte(Uint8 r)
34{
35 Uint16 crc = 0;
36 int i;
37 for (i = 0; i < 8; ++i) {
38 crc = ((crc ^ r) & 1 ? 0xA001 : 0) ^ crc >> 1;
39 r >>= 1;
40 }
41 return crc;
42}
43
44Uint16 SDL_crc16(Uint16 crc, const void *data, size_t len)
45{
46 // As an optimization we can precalculate a 256 entry table for each byte
47 size_t i;
48 for (i = 0; i < len; ++i) {
49 crc = crc16_for_byte((Uint8)crc ^ ((const Uint8 *)data)[i]) ^ crc >> 8;
50 }
51 return crc;
52}
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_crc32.c b/contrib/SDL-3.2.8/src/stdlib/SDL_crc32.c
new file mode 100644
index 0000000..9c2d097
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_crc32.c
@@ -0,0 +1,50 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23/* Public domain CRC implementation adapted from:
24 http://home.thep.lu.se/~bjorn/crc/crc32_simple.c
25
26 This algorithm is compatible with the 32-bit CRC described here:
27 https://www.lammertbies.nl/comm/info/crc-calculation
28*/
29/* NOTE: DO NOT CHANGE THIS ALGORITHM
30 There is code that relies on this in the joystick code
31*/
32
33static Uint32 crc32_for_byte(Uint32 r)
34{
35 int i;
36 for (i = 0; i < 8; ++i) {
37 r = (r & 1 ? 0 : (Uint32)0xEDB88320L) ^ r >> 1;
38 }
39 return r ^ (Uint32)0xFF000000L;
40}
41
42Uint32 SDL_crc32(Uint32 crc, const void *data, size_t len)
43{
44 // As an optimization we can precalculate a 256 entry table for each byte
45 size_t i;
46 for (i = 0; i < len; ++i) {
47 crc = crc32_for_byte((Uint8)crc ^ ((const Uint8 *)data)[i]) ^ crc >> 8;
48 }
49 return crc;
50}
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_getenv.c b/contrib/SDL-3.2.8/src/stdlib/SDL_getenv.c
new file mode 100644
index 0000000..b4a1922
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_getenv.c
@@ -0,0 +1,601 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23#include "SDL_getenv_c.h"
24
25#if defined(SDL_PLATFORM_WINDOWS)
26#include "../core/windows/SDL_windows.h"
27#endif
28
29#ifdef SDL_PLATFORM_ANDROID
30#include "../core/android/SDL_android.h"
31#endif
32
33#if defined(SDL_PLATFORM_WINDOWS)
34#define HAVE_WIN32_ENVIRONMENT
35#elif defined(HAVE_GETENV) && \
36 (defined(HAVE_SETENV) || defined(HAVE_PUTENV)) && \
37 (defined(HAVE_UNSETENV) || defined(HAVE_PUTENV))
38#define HAVE_LIBC_ENVIRONMENT
39#if defined(SDL_PLATFORM_MACOS)
40#include <crt_externs.h>
41#define environ (*_NSGetEnviron())
42#elif defined(SDL_PLATFORM_FREEBSD)
43#include <dlfcn.h>
44#define environ ((char **)dlsym(RTLD_DEFAULT, "environ"))
45#else
46extern char **environ;
47#endif
48#else
49#define HAVE_LOCAL_ENVIRONMENT
50static char **environ;
51#endif
52
53
54struct SDL_Environment
55{
56 SDL_Mutex *lock; // !!! FIXME: reuse SDL_HashTable's lock.
57 SDL_HashTable *strings;
58};
59static SDL_Environment *SDL_environment;
60
61SDL_Environment *SDL_GetEnvironment(void)
62{
63 if (!SDL_environment) {
64 SDL_environment = SDL_CreateEnvironment(true);
65 }
66 return SDL_environment;
67}
68
69bool SDL_InitEnvironment(void)
70{
71 return (SDL_GetEnvironment() != NULL);
72}
73
74void SDL_QuitEnvironment(void)
75{
76 SDL_Environment *env = SDL_environment;
77
78 if (env) {
79 SDL_environment = NULL;
80 SDL_DestroyEnvironment(env);
81 }
82}
83
84SDL_Environment *SDL_CreateEnvironment(bool populated)
85{
86 SDL_Environment *env = SDL_calloc(1, sizeof(*env));
87 if (!env) {
88 return NULL;
89 }
90
91 env->strings = SDL_CreateHashTable(0, false, SDL_HashString, SDL_KeyMatchString, SDL_DestroyHashKey, NULL);
92 if (!env->strings) {
93 SDL_free(env);
94 return NULL;
95 }
96
97 // Don't fail if we can't create a mutex (e.g. on a single-thread environment) // !!! FIXME: single-threaded environments should still return a non-NULL, do-nothing object here. Check for failure!
98 env->lock = SDL_CreateMutex();
99
100 if (populated) {
101#ifdef SDL_PLATFORM_WINDOWS
102 LPWCH strings = GetEnvironmentStringsW();
103 if (strings) {
104 for (LPWCH string = strings; *string; string += SDL_wcslen(string) + 1) {
105 char *variable = WIN_StringToUTF8W(string);
106 if (!variable) {
107 continue;
108 }
109
110 char *value = SDL_strchr(variable, '=');
111 if (!value || value == variable) {
112 SDL_free(variable);
113 continue;
114 }
115 *value++ = '\0';
116
117 SDL_InsertIntoHashTable(env->strings, variable, value, true);
118 }
119 FreeEnvironmentStringsW(strings);
120 }
121#else
122#ifdef SDL_PLATFORM_ANDROID
123 // Make sure variables from the application manifest are available
124 Android_JNI_GetManifestEnvironmentVariables();
125#endif
126 char **strings = environ;
127 if (strings) {
128 for (int i = 0; strings[i]; ++i) {
129 char *variable = SDL_strdup(strings[i]);
130 if (!variable) {
131 continue;
132 }
133
134 char *value = SDL_strchr(variable, '=');
135 if (!value || value == variable) {
136 SDL_free(variable);
137 continue;
138 }
139 *value++ = '\0';
140
141 SDL_InsertIntoHashTable(env->strings, variable, value, true);
142 }
143 }
144#endif // SDL_PLATFORM_WINDOWS
145 }
146
147 return env;
148}
149
150const char *SDL_GetEnvironmentVariable(SDL_Environment *env, const char *name)
151{
152 const char *result = NULL;
153
154 if (!env) {
155 return NULL;
156 } else if (!name || *name == '\0') {
157 return NULL;
158 }
159
160 SDL_LockMutex(env->lock);
161 {
162 const char *value;
163
164 if (SDL_FindInHashTable(env->strings, name, (const void **)&value)) {
165 result = SDL_GetPersistentString(value);
166 }
167 }
168 SDL_UnlockMutex(env->lock);
169
170 return result;
171}
172
173typedef struct CountEnvStringsData
174{
175 size_t count;
176 size_t length;
177} CountEnvStringsData;
178
179static bool SDLCALL CountEnvStrings(void *userdata, const SDL_HashTable *table, const void *key, const void *value)
180{
181 CountEnvStringsData *data = (CountEnvStringsData *) userdata;
182 data->length += SDL_strlen((const char *) key) + 1 + SDL_strlen((const char *) value) + 1;
183 data->count++;
184 return true; // keep iterating.
185}
186
187typedef struct CopyEnvStringsData
188{
189 char **result;
190 char *string;
191 size_t count;
192} CopyEnvStringsData;
193
194static bool SDLCALL CopyEnvStrings(void *userdata, const SDL_HashTable *table, const void *vkey, const void *vvalue)
195{
196 CopyEnvStringsData *data = (CopyEnvStringsData *) userdata;
197 const char *key = (const char *) vkey;
198 const char *value = (const char *) vvalue;
199 size_t len;
200
201 len = SDL_strlen(key);
202 data->result[data->count] = data->string;
203 SDL_memcpy(data->string, key, len);
204 data->string += len;
205 *(data->string++) = '=';
206
207 len = SDL_strlen(value);
208 SDL_memcpy(data->string, value, len);
209 data->string += len;
210 *(data->string++) = '\0';
211 data->count++;
212
213 return true; // keep iterating.
214}
215
216char **SDL_GetEnvironmentVariables(SDL_Environment *env)
217{
218 char **result = NULL;
219
220 if (!env) {
221 SDL_InvalidParamError("env");
222 return NULL;
223 }
224
225 SDL_LockMutex(env->lock);
226 {
227 // First pass, get the size we need for all the strings
228 CountEnvStringsData countdata = { 0, 0 };
229 SDL_IterateHashTable(env->strings, CountEnvStrings, &countdata);
230
231 // Allocate memory for the strings
232 result = (char **)SDL_malloc((countdata.count + 1) * sizeof(*result) + countdata.length);
233 if (result) {
234 // Second pass, copy the strings
235 char *string = (char *)(result + countdata.count + 1);
236 CopyEnvStringsData cpydata = { result, string, 0 };
237 SDL_IterateHashTable(env->strings, CopyEnvStrings, &cpydata);
238 SDL_assert(countdata.count == cpydata.count);
239 result[cpydata.count] = NULL;
240 }
241 }
242 SDL_UnlockMutex(env->lock);
243
244 return result;
245}
246
247bool SDL_SetEnvironmentVariable(SDL_Environment *env, const char *name, const char *value, bool overwrite)
248{
249 bool result = false;
250
251 if (!env) {
252 return SDL_InvalidParamError("env");
253 } else if (!name || *name == '\0' || SDL_strchr(name, '=') != NULL) {
254 return SDL_InvalidParamError("name");
255 } else if (!value) {
256 return SDL_InvalidParamError("value");
257 }
258
259 SDL_LockMutex(env->lock);
260 {
261 char *string = NULL;
262 if (SDL_asprintf(&string, "%s=%s", name, value) > 0) {
263 const size_t len = SDL_strlen(name);
264 string[len] = '\0';
265 const char *origname = name;
266 name = string;
267 value = string + len + 1;
268 result = SDL_InsertIntoHashTable(env->strings, name, value, overwrite);
269 if (!result) {
270 SDL_free(string);
271 if (!overwrite) {
272 const void *existing_value = NULL;
273 // !!! FIXME: InsertIntoHashTable does this lookup too, maybe we should have a means to report that, to avoid duplicate work?
274 if (SDL_FindInHashTable(env->strings, origname, &existing_value)) {
275 result = true; // it already existed, and we refused to overwrite it. Call it success.
276 }
277 }
278 }
279 }
280 }
281 SDL_UnlockMutex(env->lock);
282
283 return result;
284}
285
286bool SDL_UnsetEnvironmentVariable(SDL_Environment *env, const char *name)
287{
288 bool result = false;
289
290 if (!env) {
291 return SDL_InvalidParamError("env");
292 } else if (!name || *name == '\0' || SDL_strchr(name, '=') != NULL) {
293 return SDL_InvalidParamError("name");
294 }
295
296 SDL_LockMutex(env->lock);
297 {
298 const void *value;
299 if (SDL_FindInHashTable(env->strings, name, &value)) {
300 result = SDL_RemoveFromHashTable(env->strings, name);
301 } else {
302 result = true;
303 }
304 }
305 SDL_UnlockMutex(env->lock);
306
307 return result;
308}
309
310void SDL_DestroyEnvironment(SDL_Environment *env)
311{
312 if (!env || env == SDL_environment) {
313 return;
314 }
315
316 SDL_DestroyMutex(env->lock);
317 SDL_DestroyHashTable(env->strings);
318 SDL_free(env);
319}
320
321// Put a variable into the environment
322// Note: Name may not contain a '=' character. (Reference: http://www.unix.com/man-page/Linux/3/setenv/)
323#ifdef HAVE_LIBC_ENVIRONMENT
324#if defined(HAVE_SETENV)
325int SDL_setenv_unsafe(const char *name, const char *value, int overwrite)
326{
327 // Input validation
328 if (!name || *name == '\0' || SDL_strchr(name, '=') != NULL || !value) {
329 return -1;
330 }
331
332 SDL_SetEnvironmentVariable(SDL_GetEnvironment(), name, value, (overwrite != 0));
333
334 return setenv(name, value, overwrite);
335}
336// We have a real environment table, but no real setenv? Fake it w/ putenv.
337#else
338int SDL_setenv_unsafe(const char *name, const char *value, int overwrite)
339{
340 char *new_variable;
341
342 // Input validation
343 if (!name || *name == '\0' || SDL_strchr(name, '=') != NULL || !value) {
344 return -1;
345 }
346
347 SDL_SetEnvironmentVariable(SDL_GetEnvironment(), name, value, (overwrite != 0));
348
349 if (getenv(name) != NULL) {
350 if (!overwrite) {
351 return 0; // leave the existing one there.
352 }
353 }
354
355 // This leaks. Sorry. Get a better OS so we don't have to do this.
356 SDL_asprintf(&new_variable, "%s=%s", name, value);
357 if (!new_variable) {
358 return -1;
359 }
360 return putenv(new_variable);
361}
362#endif
363#elif defined(HAVE_WIN32_ENVIRONMENT)
364int SDL_setenv_unsafe(const char *name, const char *value, int overwrite)
365{
366 // Input validation
367 if (!name || *name == '\0' || SDL_strchr(name, '=') != NULL || !value) {
368 return -1;
369 }
370
371 SDL_SetEnvironmentVariable(SDL_GetEnvironment(), name, value, (overwrite != 0));
372
373 if (!overwrite) {
374 if (GetEnvironmentVariableA(name, NULL, 0) > 0) {
375 return 0; // asked not to overwrite existing value.
376 }
377 }
378 if (!SetEnvironmentVariableA(name, value)) {
379 return -1;
380 }
381 return 0;
382}
383#else // roll our own
384
385int SDL_setenv_unsafe(const char *name, const char *value, int overwrite)
386{
387 int added;
388 size_t len, i;
389 char **new_env;
390 char *new_variable;
391
392 // Input validation
393 if (!name || *name == '\0' || SDL_strchr(name, '=') != NULL || !value) {
394 return -1;
395 }
396
397 // See if it already exists
398 if (!overwrite && SDL_getenv_unsafe(name)) {
399 return 0;
400 }
401
402 SDL_SetEnvironmentVariable(SDL_GetEnvironment(), name, value, (overwrite != 0));
403
404 // Allocate memory for the variable
405 len = SDL_strlen(name) + SDL_strlen(value) + 2;
406 new_variable = (char *)SDL_malloc(len);
407 if (!new_variable) {
408 return -1;
409 }
410
411 SDL_snprintf(new_variable, len, "%s=%s", name, value);
412 value = new_variable + SDL_strlen(name) + 1;
413 name = new_variable;
414
415 // Actually put it into the environment
416 added = 0;
417 i = 0;
418 if (environ) {
419 // Check to see if it's already there...
420 len = (value - name);
421 for (; environ[i]; ++i) {
422 if (SDL_strncmp(environ[i], name, len) == 0) {
423 // If we found it, just replace the entry
424 SDL_free(environ[i]);
425 environ[i] = new_variable;
426 added = 1;
427 break;
428 }
429 }
430 }
431
432 // Didn't find it in the environment, expand and add
433 if (!added) {
434 new_env = SDL_realloc(environ, (i + 2) * sizeof(char *));
435 if (new_env) {
436 environ = new_env;
437 environ[i++] = new_variable;
438 environ[i++] = (char *)0;
439 added = 1;
440 } else {
441 SDL_free(new_variable);
442 }
443 }
444 return added ? 0 : -1;
445}
446#endif // HAVE_LIBC_ENVIRONMENT
447
448#ifdef HAVE_LIBC_ENVIRONMENT
449#if defined(HAVE_UNSETENV)
450int SDL_unsetenv_unsafe(const char *name)
451{
452 // Input validation
453 if (!name || *name == '\0' || SDL_strchr(name, '=') != NULL) {
454 return -1;
455 }
456
457 SDL_UnsetEnvironmentVariable(SDL_GetEnvironment(), name);
458
459 return unsetenv(name);
460}
461// We have a real environment table, but no unsetenv? Fake it w/ putenv.
462#else
463int SDL_unsetenv_unsafe(const char *name)
464{
465 // Input validation
466 if (!name || *name == '\0' || SDL_strchr(name, '=') != NULL) {
467 return -1;
468 }
469
470 SDL_UnsetEnvironmentVariable(SDL_GetEnvironment(), name);
471
472 // Hope this environment uses the non-standard extension of removing the environment variable if it has no '='
473 return putenv(name);
474}
475#endif
476#elif defined(HAVE_WIN32_ENVIRONMENT)
477int SDL_unsetenv_unsafe(const char *name)
478{
479 // Input validation
480 if (!name || *name == '\0' || SDL_strchr(name, '=') != NULL) {
481 return -1;
482 }
483
484 SDL_UnsetEnvironmentVariable(SDL_GetEnvironment(), name);
485
486 if (!SetEnvironmentVariableA(name, NULL)) {
487 return -1;
488 }
489 return 0;
490}
491#else
492int SDL_unsetenv_unsafe(const char *name)
493{
494 size_t len, i;
495
496 // Input validation
497 if (!name || *name == '\0' || SDL_strchr(name, '=') != NULL) {
498 return -1;
499 }
500
501 SDL_UnsetEnvironmentVariable(SDL_GetEnvironment(), name);
502
503 if (environ) {
504 len = SDL_strlen(name);
505 for (i = 0; environ[i]; ++i) {
506 if ((SDL_strncmp(environ[i], name, len) == 0) &&
507 (environ[i][len] == '=')) {
508 // Just clear out this entry for now
509 *environ[i] = '\0';
510 break;
511 }
512 }
513 }
514 return 0;
515}
516#endif // HAVE_LIBC_ENVIRONMENT
517
518// Retrieve a variable named "name" from the environment
519#ifdef HAVE_LIBC_ENVIRONMENT
520const char *SDL_getenv_unsafe(const char *name)
521{
522#ifdef SDL_PLATFORM_ANDROID
523 // Make sure variables from the application manifest are available
524 Android_JNI_GetManifestEnvironmentVariables();
525#endif
526
527 // Input validation
528 if (!name || *name == '\0') {
529 return NULL;
530 }
531
532 return getenv(name);
533}
534#elif defined(HAVE_WIN32_ENVIRONMENT)
535const char *SDL_getenv_unsafe(const char *name)
536{
537 DWORD length, maxlen = 0;
538 char *string = NULL;
539 const char *result = NULL;
540
541 // Input validation
542 if (!name || *name == '\0') {
543 return NULL;
544 }
545
546 for ( ; ; ) {
547 SetLastError(ERROR_SUCCESS);
548 length = GetEnvironmentVariableA(name, string, maxlen);
549
550 if (length > maxlen) {
551 char *temp = (char *)SDL_realloc(string, length);
552 if (!temp) {
553 return NULL;
554 }
555 string = temp;
556 maxlen = length;
557 } else {
558 if (GetLastError() != ERROR_SUCCESS) {
559 if (string) {
560 SDL_free(string);
561 }
562 return NULL;
563 }
564 break;
565 }
566 }
567 if (string) {
568 result = SDL_GetPersistentString(string);
569 SDL_free(string);
570 }
571 return result;
572}
573#else
574const char *SDL_getenv_unsafe(const char *name)
575{
576 size_t len, i;
577 const char *value = NULL;
578
579 // Input validation
580 if (!name || *name == '\0') {
581 return NULL;
582 }
583
584 if (environ) {
585 len = SDL_strlen(name);
586 for (i = 0; environ[i]; ++i) {
587 if ((SDL_strncmp(environ[i], name, len) == 0) &&
588 (environ[i][len] == '=')) {
589 value = &environ[i][len + 1];
590 break;
591 }
592 }
593 }
594 return value;
595}
596#endif // HAVE_LIBC_ENVIRONMENT
597
598const char *SDL_getenv(const char *name)
599{
600 return SDL_GetEnvironmentVariable(SDL_GetEnvironment(), name);
601}
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_getenv_c.h b/contrib/SDL-3.2.8/src/stdlib/SDL_getenv_c.h
new file mode 100644
index 0000000..9cf997d
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_getenv_c.h
@@ -0,0 +1,24 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23extern bool SDL_InitEnvironment(void);
24extern void SDL_QuitEnvironment(void);
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_iconv.c b/contrib/SDL-3.2.8/src/stdlib/SDL_iconv.c
new file mode 100644
index 0000000..fbea033
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_iconv.c
@@ -0,0 +1,860 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23// This file contains portable iconv functions for SDL
24
25#if defined(HAVE_ICONV) && defined(HAVE_ICONV_H)
26#ifndef SDL_USE_LIBICONV
27// Define LIBICONV_PLUG to use iconv from the base instead of ports and avoid linker errors.
28#define LIBICONV_PLUG 1
29#endif
30#include <iconv.h>
31#include <errno.h>
32
33SDL_COMPILE_TIME_ASSERT(iconv_t, sizeof(iconv_t) <= sizeof(SDL_iconv_t));
34
35SDL_iconv_t SDL_iconv_open(const char *tocode, const char *fromcode)
36{
37 return (SDL_iconv_t)((uintptr_t)iconv_open(tocode, fromcode));
38}
39
40int SDL_iconv_close(SDL_iconv_t cd)
41{
42 if ((size_t)cd == SDL_ICONV_ERROR) {
43 return -1;
44 }
45 return iconv_close((iconv_t)((uintptr_t)cd));
46}
47
48size_t SDL_iconv(SDL_iconv_t cd,
49 const char **inbuf, size_t *inbytesleft,
50 char **outbuf, size_t *outbytesleft)
51{
52 if ((size_t)cd == SDL_ICONV_ERROR) {
53 return SDL_ICONV_ERROR;
54 }
55 /* iconv's second parameter may or may not be `const char const *` depending on the
56 C runtime's whims. Casting to void * seems to make everyone happy, though. */
57 const size_t retCode = iconv((iconv_t)((uintptr_t)cd), (void *)inbuf, inbytesleft, outbuf, outbytesleft);
58 if (retCode == (size_t)-1) {
59 switch (errno) {
60 case E2BIG:
61 return SDL_ICONV_E2BIG;
62 case EILSEQ:
63 return SDL_ICONV_EILSEQ;
64 case EINVAL:
65 return SDL_ICONV_EINVAL;
66 default:
67 return SDL_ICONV_ERROR;
68 }
69 }
70 return retCode;
71}
72
73#else
74
75/* Lots of useful information on Unicode at:
76 http://www.cl.cam.ac.uk/~mgk25/unicode.html
77*/
78
79#define UNICODE_BOM 0xFEFF
80
81#define UNKNOWN_ASCII '?'
82#define UNKNOWN_UNICODE 0xFFFD
83
84enum
85{
86 ENCODING_UNKNOWN,
87 ENCODING_ASCII,
88 ENCODING_LATIN1,
89 ENCODING_UTF8,
90 ENCODING_UTF16, // Needs byte order marker
91 ENCODING_UTF16BE,
92 ENCODING_UTF16LE,
93 ENCODING_UTF32, // Needs byte order marker
94 ENCODING_UTF32BE,
95 ENCODING_UTF32LE,
96 ENCODING_UCS2BE,
97 ENCODING_UCS2LE,
98 ENCODING_UCS4BE,
99 ENCODING_UCS4LE,
100};
101#if SDL_BYTEORDER == SDL_BIG_ENDIAN
102#define ENCODING_UTF16NATIVE ENCODING_UTF16BE
103#define ENCODING_UTF32NATIVE ENCODING_UTF32BE
104#define ENCODING_UCS2NATIVE ENCODING_UCS2BE
105#define ENCODING_UCS4NATIVE ENCODING_UCS4BE
106#else
107#define ENCODING_UTF16NATIVE ENCODING_UTF16LE
108#define ENCODING_UTF32NATIVE ENCODING_UTF32LE
109#define ENCODING_UCS2NATIVE ENCODING_UCS2LE
110#define ENCODING_UCS4NATIVE ENCODING_UCS4LE
111#endif
112
113struct SDL_iconv_data_t
114{
115 int src_fmt;
116 int dst_fmt;
117};
118
119static struct
120{
121 const char *name;
122 int format;
123} encodings[] = {
124 /* *INDENT-OFF* */ // clang-format off
125 { "ASCII", ENCODING_ASCII },
126 { "US-ASCII", ENCODING_ASCII },
127 { "8859-1", ENCODING_LATIN1 },
128 { "ISO-8859-1", ENCODING_LATIN1 },
129#if defined(SDL_PLATFORM_WINDOWS) || defined(SDL_PLATFORM_OS2)
130 { "WCHAR_T", ENCODING_UTF16LE },
131#else
132 { "WCHAR_T", ENCODING_UCS4NATIVE },
133#endif
134 { "UTF8", ENCODING_UTF8 },
135 { "UTF-8", ENCODING_UTF8 },
136 { "UTF16", ENCODING_UTF16 },
137 { "UTF-16", ENCODING_UTF16 },
138 { "UTF16BE", ENCODING_UTF16BE },
139 { "UTF-16BE", ENCODING_UTF16BE },
140 { "UTF16LE", ENCODING_UTF16LE },
141 { "UTF-16LE", ENCODING_UTF16LE },
142 { "UTF32", ENCODING_UTF32 },
143 { "UTF-32", ENCODING_UTF32 },
144 { "UTF32BE", ENCODING_UTF32BE },
145 { "UTF-32BE", ENCODING_UTF32BE },
146 { "UTF32LE", ENCODING_UTF32LE },
147 { "UTF-32LE", ENCODING_UTF32LE },
148 { "UCS2", ENCODING_UCS2BE },
149 { "UCS-2", ENCODING_UCS2BE },
150 { "UCS-2LE", ENCODING_UCS2LE },
151 { "UCS-2BE", ENCODING_UCS2BE },
152 { "UCS-2-INTERNAL", ENCODING_UCS2NATIVE },
153 { "UCS4", ENCODING_UCS4BE },
154 { "UCS-4", ENCODING_UCS4BE },
155 { "UCS-4LE", ENCODING_UCS4LE },
156 { "UCS-4BE", ENCODING_UCS4BE },
157 { "UCS-4-INTERNAL", ENCODING_UCS4NATIVE },
158/* *INDENT-ON* */ // clang-format on
159};
160
161static const char *getlocale(char *buffer, size_t bufsize)
162{
163 const char *lang;
164 char *ptr;
165
166 lang = SDL_getenv("LC_ALL");
167 if (!lang) {
168 lang = SDL_getenv("LC_CTYPE");
169 }
170 if (!lang) {
171 lang = SDL_getenv("LC_MESSAGES");
172 }
173 if (!lang) {
174 lang = SDL_getenv("LANG");
175 }
176 if (!lang || !*lang || SDL_strcmp(lang, "C") == 0) {
177 lang = "ASCII";
178 }
179
180 // We need to trim down strings like "en_US.UTF-8@blah" to "UTF-8"
181 ptr = SDL_strchr(lang, '.');
182 if (ptr) {
183 lang = ptr + 1;
184 }
185
186 SDL_strlcpy(buffer, lang, bufsize);
187 ptr = SDL_strchr(buffer, '@');
188 if (ptr) {
189 *ptr = '\0'; // chop end of string.
190 }
191
192 return buffer;
193}
194
195SDL_iconv_t SDL_iconv_open(const char *tocode, const char *fromcode)
196{
197 int src_fmt = ENCODING_UNKNOWN;
198 int dst_fmt = ENCODING_UNKNOWN;
199 int i;
200 char fromcode_buffer[64];
201 char tocode_buffer[64];
202
203 if (!fromcode || !*fromcode) {
204 fromcode = getlocale(fromcode_buffer, sizeof(fromcode_buffer));
205 }
206 if (!tocode || !*tocode) {
207 tocode = getlocale(tocode_buffer, sizeof(tocode_buffer));
208 }
209 for (i = 0; i < SDL_arraysize(encodings); ++i) {
210 if (SDL_strcasecmp(fromcode, encodings[i].name) == 0) {
211 src_fmt = encodings[i].format;
212 if (dst_fmt != ENCODING_UNKNOWN) {
213 break;
214 }
215 }
216 if (SDL_strcasecmp(tocode, encodings[i].name) == 0) {
217 dst_fmt = encodings[i].format;
218 if (src_fmt != ENCODING_UNKNOWN) {
219 break;
220 }
221 }
222 }
223 if (src_fmt != ENCODING_UNKNOWN && dst_fmt != ENCODING_UNKNOWN) {
224 SDL_iconv_t cd = (SDL_iconv_t)SDL_malloc(sizeof(*cd));
225 if (cd) {
226 cd->src_fmt = src_fmt;
227 cd->dst_fmt = dst_fmt;
228 return cd;
229 }
230 }
231 return (SDL_iconv_t)-1;
232}
233
234size_t SDL_iconv(SDL_iconv_t cd,
235 const char **inbuf, size_t *inbytesleft,
236 char **outbuf, size_t *outbytesleft)
237{
238 // For simplicity, we'll convert everything to and from UCS-4
239 const char *src;
240 char *dst;
241 size_t srclen, dstlen;
242 Uint32 ch = 0;
243 size_t total;
244
245 if ((size_t)cd == SDL_ICONV_ERROR) {
246 return SDL_ICONV_ERROR;
247 }
248 if (!inbuf || !*inbuf) {
249 // Reset the context
250 return 0;
251 }
252 if (!outbuf || !*outbuf || !outbytesleft || !*outbytesleft) {
253 return SDL_ICONV_E2BIG;
254 }
255 src = *inbuf;
256 srclen = (inbytesleft ? *inbytesleft : 0);
257 dst = *outbuf;
258 dstlen = *outbytesleft;
259
260 switch (cd->src_fmt) {
261 case ENCODING_UTF16:
262 // Scan for a byte order marker
263 {
264 Uint8 *p = (Uint8 *)src;
265 size_t n = srclen / 2;
266 while (n) {
267 if (p[0] == 0xFF && p[1] == 0xFE) {
268 cd->src_fmt = ENCODING_UTF16BE;
269 break;
270 } else if (p[0] == 0xFE && p[1] == 0xFF) {
271 cd->src_fmt = ENCODING_UTF16LE;
272 break;
273 }
274 p += 2;
275 --n;
276 }
277 if (n == 0) {
278 // We can't tell, default to host order
279 cd->src_fmt = ENCODING_UTF16NATIVE;
280 }
281 }
282 break;
283 case ENCODING_UTF32:
284 // Scan for a byte order marker
285 {
286 Uint8 *p = (Uint8 *)src;
287 size_t n = srclen / 4;
288 while (n) {
289 if (p[0] == 0xFF && p[1] == 0xFE &&
290 p[2] == 0x00 && p[3] == 0x00) {
291 cd->src_fmt = ENCODING_UTF32BE;
292 break;
293 } else if (p[0] == 0x00 && p[1] == 0x00 &&
294 p[2] == 0xFE && p[3] == 0xFF) {
295 cd->src_fmt = ENCODING_UTF32LE;
296 break;
297 }
298 p += 4;
299 --n;
300 }
301 if (n == 0) {
302 // We can't tell, default to host order
303 cd->src_fmt = ENCODING_UTF32NATIVE;
304 }
305 }
306 break;
307 }
308
309 switch (cd->dst_fmt) {
310 case ENCODING_UTF16:
311 // Default to host order, need to add byte order marker
312 if (dstlen < 2) {
313 return SDL_ICONV_E2BIG;
314 }
315 *(Uint16 *)dst = UNICODE_BOM;
316 dst += 2;
317 dstlen -= 2;
318 cd->dst_fmt = ENCODING_UTF16NATIVE;
319 break;
320 case ENCODING_UTF32:
321 // Default to host order, need to add byte order marker
322 if (dstlen < 4) {
323 return SDL_ICONV_E2BIG;
324 }
325 *(Uint32 *)dst = UNICODE_BOM;
326 dst += 4;
327 dstlen -= 4;
328 cd->dst_fmt = ENCODING_UTF32NATIVE;
329 break;
330 }
331
332 total = 0;
333 while (srclen > 0) {
334 // Decode a character
335 switch (cd->src_fmt) {
336 case ENCODING_ASCII:
337 {
338 Uint8 *p = (Uint8 *)src;
339 ch = (Uint32)(p[0] & 0x7F);
340 ++src;
341 --srclen;
342 } break;
343 case ENCODING_LATIN1:
344 {
345 Uint8 *p = (Uint8 *)src;
346 ch = (Uint32)p[0];
347 ++src;
348 --srclen;
349 } break;
350 case ENCODING_UTF8: // RFC 3629
351 {
352 Uint8 *p = (Uint8 *)src;
353 size_t left = 0;
354 bool overlong = false;
355 if (p[0] >= 0xF0) {
356 if ((p[0] & 0xF8) != 0xF0) {
357 /* Skip illegal sequences
358 return SDL_ICONV_EILSEQ;
359 */
360 ch = UNKNOWN_UNICODE;
361 } else {
362 if (p[0] == 0xF0 && srclen > 1 && (p[1] & 0xF0) == 0x80) {
363 overlong = true;
364 }
365 ch = (Uint32)(p[0] & 0x07);
366 left = 3;
367 }
368 } else if (p[0] >= 0xE0) {
369 if ((p[0] & 0xF0) != 0xE0) {
370 /* Skip illegal sequences
371 return SDL_ICONV_EILSEQ;
372 */
373 ch = UNKNOWN_UNICODE;
374 } else {
375 if (p[0] == 0xE0 && srclen > 1 && (p[1] & 0xE0) == 0x80) {
376 overlong = true;
377 }
378 ch = (Uint32)(p[0] & 0x0F);
379 left = 2;
380 }
381 } else if (p[0] >= 0xC0) {
382 if ((p[0] & 0xE0) != 0xC0) {
383 /* Skip illegal sequences
384 return SDL_ICONV_EILSEQ;
385 */
386 ch = UNKNOWN_UNICODE;
387 } else {
388 if ((p[0] & 0xDE) == 0xC0) {
389 overlong = true;
390 }
391 ch = (Uint32)(p[0] & 0x1F);
392 left = 1;
393 }
394 } else {
395 if (p[0] & 0x80) {
396 /* Skip illegal sequences
397 return SDL_ICONV_EILSEQ;
398 */
399 ch = UNKNOWN_UNICODE;
400 } else {
401 ch = (Uint32)p[0];
402 }
403 }
404 ++src;
405 --srclen;
406 if (srclen < left) {
407 return SDL_ICONV_EINVAL;
408 }
409 while (left--) {
410 ++p;
411 if ((p[0] & 0xC0) != 0x80) {
412 /* Skip illegal sequences
413 return SDL_ICONV_EILSEQ;
414 */
415 ch = UNKNOWN_UNICODE;
416 break;
417 }
418 ch <<= 6;
419 ch |= (p[0] & 0x3F);
420 ++src;
421 --srclen;
422 }
423 if (overlong) {
424 /* Potential security risk
425 return SDL_ICONV_EILSEQ;
426 */
427 ch = UNKNOWN_UNICODE;
428 }
429 if ((ch >= 0xD800 && ch <= 0xDFFF) ||
430 (ch == 0xFFFE || ch == 0xFFFF) || ch > 0x10FFFF) {
431 /* Skip illegal sequences
432 return SDL_ICONV_EILSEQ;
433 */
434 ch = UNKNOWN_UNICODE;
435 }
436 } break;
437 case ENCODING_UTF16BE: // RFC 2781
438 {
439 Uint8 *p = (Uint8 *)src;
440 Uint16 W1, W2;
441 if (srclen < 2) {
442 return SDL_ICONV_EINVAL;
443 }
444 W1 = ((Uint16)p[0] << 8) | (Uint16)p[1];
445 src += 2;
446 srclen -= 2;
447 if (W1 < 0xD800 || W1 > 0xDFFF) {
448 ch = (Uint32)W1;
449 break;
450 }
451 if (W1 > 0xDBFF) {
452 /* Skip illegal sequences
453 return SDL_ICONV_EILSEQ;
454 */
455 ch = UNKNOWN_UNICODE;
456 break;
457 }
458 if (srclen < 2) {
459 return SDL_ICONV_EINVAL;
460 }
461 p = (Uint8 *)src;
462 W2 = ((Uint16)p[0] << 8) | (Uint16)p[1];
463 src += 2;
464 srclen -= 2;
465 if (W2 < 0xDC00 || W2 > 0xDFFF) {
466 /* Skip illegal sequences
467 return SDL_ICONV_EILSEQ;
468 */
469 ch = UNKNOWN_UNICODE;
470 break;
471 }
472 ch = (((Uint32)(W1 & 0x3FF) << 10) |
473 (Uint32)(W2 & 0x3FF)) +
474 0x10000;
475 } break;
476 case ENCODING_UTF16LE: // RFC 2781
477 {
478 Uint8 *p = (Uint8 *)src;
479 Uint16 W1, W2;
480 if (srclen < 2) {
481 return SDL_ICONV_EINVAL;
482 }
483 W1 = ((Uint16)p[1] << 8) | (Uint16)p[0];
484 src += 2;
485 srclen -= 2;
486 if (W1 < 0xD800 || W1 > 0xDFFF) {
487 ch = (Uint32)W1;
488 break;
489 }
490 if (W1 > 0xDBFF) {
491 /* Skip illegal sequences
492 return SDL_ICONV_EILSEQ;
493 */
494 ch = UNKNOWN_UNICODE;
495 break;
496 }
497 if (srclen < 2) {
498 return SDL_ICONV_EINVAL;
499 }
500 p = (Uint8 *)src;
501 W2 = ((Uint16)p[1] << 8) | (Uint16)p[0];
502 src += 2;
503 srclen -= 2;
504 if (W2 < 0xDC00 || W2 > 0xDFFF) {
505 /* Skip illegal sequences
506 return SDL_ICONV_EILSEQ;
507 */
508 ch = UNKNOWN_UNICODE;
509 break;
510 }
511 ch = (((Uint32)(W1 & 0x3FF) << 10) |
512 (Uint32)(W2 & 0x3FF)) +
513 0x10000;
514 } break;
515 case ENCODING_UCS2LE:
516 {
517 Uint8 *p = (Uint8 *)src;
518 if (srclen < 2) {
519 return SDL_ICONV_EINVAL;
520 }
521 ch = ((Uint32)p[1] << 8) | (Uint32)p[0];
522 src += 2;
523 srclen -= 2;
524 } break;
525 case ENCODING_UCS2BE:
526 {
527 Uint8 *p = (Uint8 *)src;
528 if (srclen < 2) {
529 return SDL_ICONV_EINVAL;
530 }
531 ch = ((Uint32)p[0] << 8) | (Uint32)p[1];
532 src += 2;
533 srclen -= 2;
534 } break;
535 case ENCODING_UCS4BE:
536 case ENCODING_UTF32BE:
537 {
538 Uint8 *p = (Uint8 *)src;
539 if (srclen < 4) {
540 return SDL_ICONV_EINVAL;
541 }
542 ch = ((Uint32)p[0] << 24) |
543 ((Uint32)p[1] << 16) |
544 ((Uint32)p[2] << 8) | (Uint32)p[3];
545 src += 4;
546 srclen -= 4;
547 } break;
548 case ENCODING_UCS4LE:
549 case ENCODING_UTF32LE:
550 {
551 Uint8 *p = (Uint8 *)src;
552 if (srclen < 4) {
553 return SDL_ICONV_EINVAL;
554 }
555 ch = ((Uint32)p[3] << 24) |
556 ((Uint32)p[2] << 16) |
557 ((Uint32)p[1] << 8) | (Uint32)p[0];
558 src += 4;
559 srclen -= 4;
560 } break;
561 }
562
563 // Encode a character
564 switch (cd->dst_fmt) {
565 case ENCODING_ASCII:
566 {
567 Uint8 *p = (Uint8 *)dst;
568 if (dstlen < 1) {
569 return SDL_ICONV_E2BIG;
570 }
571 if (ch > 0x7F) {
572 *p = UNKNOWN_ASCII;
573 } else {
574 *p = (Uint8)ch;
575 }
576 ++dst;
577 --dstlen;
578 } break;
579 case ENCODING_LATIN1:
580 {
581 Uint8 *p = (Uint8 *)dst;
582 if (dstlen < 1) {
583 return SDL_ICONV_E2BIG;
584 }
585 if (ch > 0xFF) {
586 *p = UNKNOWN_ASCII;
587 } else {
588 *p = (Uint8)ch;
589 }
590 ++dst;
591 --dstlen;
592 } break;
593 case ENCODING_UTF8: // RFC 3629
594 {
595 Uint8 *p = (Uint8 *)dst;
596 if (ch > 0x10FFFF) {
597 ch = UNKNOWN_UNICODE;
598 }
599 if (ch <= 0x7F) {
600 if (dstlen < 1) {
601 return SDL_ICONV_E2BIG;
602 }
603 *p = (Uint8)ch;
604 ++dst;
605 --dstlen;
606 } else if (ch <= 0x7FF) {
607 if (dstlen < 2) {
608 return SDL_ICONV_E2BIG;
609 }
610 p[0] = 0xC0 | (Uint8)((ch >> 6) & 0x1F);
611 p[1] = 0x80 | (Uint8)(ch & 0x3F);
612 dst += 2;
613 dstlen -= 2;
614 } else if (ch <= 0xFFFF) {
615 if (dstlen < 3) {
616 return SDL_ICONV_E2BIG;
617 }
618 p[0] = 0xE0 | (Uint8)((ch >> 12) & 0x0F);
619 p[1] = 0x80 | (Uint8)((ch >> 6) & 0x3F);
620 p[2] = 0x80 | (Uint8)(ch & 0x3F);
621 dst += 3;
622 dstlen -= 3;
623 } else {
624 if (dstlen < 4) {
625 return SDL_ICONV_E2BIG;
626 }
627 p[0] = 0xF0 | (Uint8)((ch >> 18) & 0x07);
628 p[1] = 0x80 | (Uint8)((ch >> 12) & 0x3F);
629 p[2] = 0x80 | (Uint8)((ch >> 6) & 0x3F);
630 p[3] = 0x80 | (Uint8)(ch & 0x3F);
631 dst += 4;
632 dstlen -= 4;
633 }
634 } break;
635 case ENCODING_UTF16BE: // RFC 2781
636 {
637 Uint8 *p = (Uint8 *)dst;
638 if (ch > 0x10FFFF) {
639 ch = UNKNOWN_UNICODE;
640 }
641 if (ch < 0x10000) {
642 if (dstlen < 2) {
643 return SDL_ICONV_E2BIG;
644 }
645 p[0] = (Uint8)(ch >> 8);
646 p[1] = (Uint8)ch;
647 dst += 2;
648 dstlen -= 2;
649 } else {
650 Uint16 W1, W2;
651 if (dstlen < 4) {
652 return SDL_ICONV_E2BIG;
653 }
654 ch = ch - 0x10000;
655 W1 = 0xD800 | (Uint16)((ch >> 10) & 0x3FF);
656 W2 = 0xDC00 | (Uint16)(ch & 0x3FF);
657 p[0] = (Uint8)(W1 >> 8);
658 p[1] = (Uint8)W1;
659 p[2] = (Uint8)(W2 >> 8);
660 p[3] = (Uint8)W2;
661 dst += 4;
662 dstlen -= 4;
663 }
664 } break;
665 case ENCODING_UTF16LE: // RFC 2781
666 {
667 Uint8 *p = (Uint8 *)dst;
668 if (ch > 0x10FFFF) {
669 ch = UNKNOWN_UNICODE;
670 }
671 if (ch < 0x10000) {
672 if (dstlen < 2) {
673 return SDL_ICONV_E2BIG;
674 }
675 p[1] = (Uint8)(ch >> 8);
676 p[0] = (Uint8)ch;
677 dst += 2;
678 dstlen -= 2;
679 } else {
680 Uint16 W1, W2;
681 if (dstlen < 4) {
682 return SDL_ICONV_E2BIG;
683 }
684 ch = ch - 0x10000;
685 W1 = 0xD800 | (Uint16)((ch >> 10) & 0x3FF);
686 W2 = 0xDC00 | (Uint16)(ch & 0x3FF);
687 p[1] = (Uint8)(W1 >> 8);
688 p[0] = (Uint8)W1;
689 p[3] = (Uint8)(W2 >> 8);
690 p[2] = (Uint8)W2;
691 dst += 4;
692 dstlen -= 4;
693 }
694 } break;
695 case ENCODING_UCS2BE:
696 {
697 Uint8 *p = (Uint8 *)dst;
698 if (ch > 0xFFFF) {
699 ch = UNKNOWN_UNICODE;
700 }
701 if (dstlen < 2) {
702 return SDL_ICONV_E2BIG;
703 }
704 p[0] = (Uint8)(ch >> 8);
705 p[1] = (Uint8)ch;
706 dst += 2;
707 dstlen -= 2;
708 } break;
709 case ENCODING_UCS2LE:
710 {
711 Uint8 *p = (Uint8 *)dst;
712 if (ch > 0xFFFF) {
713 ch = UNKNOWN_UNICODE;
714 }
715 if (dstlen < 2) {
716 return SDL_ICONV_E2BIG;
717 }
718 p[1] = (Uint8)(ch >> 8);
719 p[0] = (Uint8)ch;
720 dst += 2;
721 dstlen -= 2;
722 } break;
723 case ENCODING_UTF32BE:
724 if (ch > 0x10FFFF) {
725 ch = UNKNOWN_UNICODE;
726 }
727 SDL_FALLTHROUGH;
728 case ENCODING_UCS4BE:
729 if (ch > 0x7FFFFFFF) {
730 ch = UNKNOWN_UNICODE;
731 }
732 {
733 Uint8 *p = (Uint8 *)dst;
734 if (dstlen < 4) {
735 return SDL_ICONV_E2BIG;
736 }
737 p[0] = (Uint8)(ch >> 24);
738 p[1] = (Uint8)(ch >> 16);
739 p[2] = (Uint8)(ch >> 8);
740 p[3] = (Uint8)ch;
741 dst += 4;
742 dstlen -= 4;
743 }
744 break;
745 case ENCODING_UTF32LE:
746 if (ch > 0x10FFFF) {
747 ch = UNKNOWN_UNICODE;
748 }
749 SDL_FALLTHROUGH;
750 case ENCODING_UCS4LE:
751 if (ch > 0x7FFFFFFF) {
752 ch = UNKNOWN_UNICODE;
753 }
754 {
755 Uint8 *p = (Uint8 *)dst;
756 if (dstlen < 4) {
757 return SDL_ICONV_E2BIG;
758 }
759 p[3] = (Uint8)(ch >> 24);
760 p[2] = (Uint8)(ch >> 16);
761 p[1] = (Uint8)(ch >> 8);
762 p[0] = (Uint8)ch;
763 dst += 4;
764 dstlen -= 4;
765 }
766 break;
767 }
768
769 // Update state
770 *inbuf = src;
771 *inbytesleft = srclen;
772 *outbuf = dst;
773 *outbytesleft = dstlen;
774 ++total;
775 }
776 return total;
777}
778
779int SDL_iconv_close(SDL_iconv_t cd)
780{
781 if (cd == (SDL_iconv_t)-1) {
782 return -1;
783 }
784 SDL_free(cd);
785 return 0;
786}
787
788#endif // !HAVE_ICONV
789
790char *SDL_iconv_string(const char *tocode, const char *fromcode, const char *inbuf, size_t inbytesleft)
791{
792 SDL_iconv_t cd;
793 char *string;
794 size_t stringsize;
795 char *outbuf;
796 size_t outbytesleft;
797 size_t retCode = 0;
798
799 if (!tocode || !*tocode) {
800 tocode = "UTF-8";
801 }
802 if (!fromcode || !*fromcode) {
803 fromcode = "UTF-8";
804 }
805 cd = SDL_iconv_open(tocode, fromcode);
806 if (cd == (SDL_iconv_t)-1) {
807 return NULL;
808 }
809
810 stringsize = inbytesleft;
811 string = (char *)SDL_malloc(stringsize + sizeof(Uint32));
812 if (!string) {
813 SDL_iconv_close(cd);
814 return NULL;
815 }
816 outbuf = string;
817 outbytesleft = stringsize;
818 SDL_memset(outbuf, 0, sizeof(Uint32));
819
820 while (inbytesleft > 0) {
821 const size_t oldinbytesleft = inbytesleft;
822 retCode = SDL_iconv(cd, &inbuf, &inbytesleft, &outbuf, &outbytesleft);
823 switch (retCode) {
824 case SDL_ICONV_E2BIG:
825 {
826 const ptrdiff_t diff = (ptrdiff_t) (outbuf - string);
827 char *oldstring = string;
828 stringsize *= 2;
829 string = (char *)SDL_realloc(string, stringsize + sizeof(Uint32));
830 if (!string) {
831 SDL_free(oldstring);
832 SDL_iconv_close(cd);
833 return NULL;
834 }
835 outbuf = string + diff;
836 outbytesleft = stringsize - diff;
837 SDL_memset(outbuf, 0, sizeof(Uint32));
838 continue;
839 }
840 case SDL_ICONV_EILSEQ:
841 // Try skipping some input data - not perfect, but...
842 ++inbuf;
843 --inbytesleft;
844 break;
845 case SDL_ICONV_EINVAL:
846 case SDL_ICONV_ERROR:
847 // We can't continue...
848 inbytesleft = 0;
849 break;
850 }
851 // Avoid infinite loops when nothing gets converted
852 if (oldinbytesleft == inbytesleft) {
853 break;
854 }
855 }
856 SDL_memset(outbuf, 0, sizeof(Uint32));
857 SDL_iconv_close(cd);
858
859 return string;
860}
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_malloc.c b/contrib/SDL-3.2.8/src/stdlib/SDL_malloc.c
new file mode 100644
index 0000000..008675f
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_malloc.c
@@ -0,0 +1,6507 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23/* This file contains portable memory management functions for SDL */
24
25#ifndef HAVE_MALLOC
26#define LACKS_SYS_TYPES_H
27#define LACKS_STDIO_H
28#define LACKS_STRINGS_H
29#define LACKS_STRING_H
30#define LACKS_STDLIB_H
31#define ABORT
32#define NO_MALLOC_STATS 1
33#define USE_LOCKS 1
34#define USE_DL_PREFIX
35
36/*
37 This is a version (aka dlmalloc) of malloc/free/realloc written by
38 Doug Lea and released to the public domain, as explained at
39 http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
40 comments, complaints, performance data, etc to dl@cs.oswego.edu
41
42* Version 2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
43 Note: There may be an updated version of this malloc obtainable at
44 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
45 Check before installing!
46
47* Quickstart
48
49 This library is all in one file to simplify the most common usage:
50 ftp it, compile it (-O3), and link it into another program. All of
51 the compile-time options default to reasonable values for use on
52 most platforms. You might later want to step through various
53 compile-time and dynamic tuning options.
54
55 For convenience, an include file for code using this malloc is at:
56 ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h
57 You don't really need this .h file unless you call functions not
58 defined in your system include files. The .h file contains only the
59 excerpts from this file needed for using this malloc on ANSI C/C++
60 systems, so long as you haven't changed compile-time options about
61 naming and tuning parameters. If you do, then you can create your
62 own malloc.h that does include all settings by cutting at the point
63 indicated below. Note that you may already by default be using a C
64 library containing a malloc that is based on some version of this
65 malloc (for example in linux). You might still want to use the one
66 in this file to customize settings or to avoid overheads associated
67 with library versions.
68
69* Vital statistics:
70
71 Supported pointer/size_t representation: 4 or 8 bytes
72 size_t MUST be an unsigned type of the same width as
73 pointers. (If you are using an ancient system that declares
74 size_t as a signed type, or need it to be a different width
75 than pointers, you can use a previous release of this malloc
76 (e.g. 2.7.2) supporting these.)
77
78 Alignment: 8 bytes (minimum)
79 This suffices for nearly all current machines and C compilers.
80 However, you can define MALLOC_ALIGNMENT to be wider than this
81 if necessary (up to 128bytes), at the expense of using more space.
82
83 Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)
84 8 or 16 bytes (if 8byte sizes)
85 Each malloced chunk has a hidden word of overhead holding size
86 and status information, and additional cross-check word
87 if FOOTERS is defined.
88
89 Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead)
90 8-byte ptrs: 32 bytes (including overhead)
91
92 Even a request for zero bytes (i.e., malloc(0)) returns a
93 pointer to something of the minimum allocatable size.
94 The maximum overhead wastage (i.e., number of extra bytes
95 allocated than were requested in malloc) is less than or equal
96 to the minimum size, except for requests >= mmap_threshold that
97 are serviced via mmap(), where the worst case wastage is about
98 32 bytes plus the remainder from a system page (the minimal
99 mmap unit); typically 4096 or 8192 bytes.
100
101 Security: static-safe; optionally more or less
102 The "security" of malloc refers to the ability of malicious
103 code to accentuate the effects of errors (for example, freeing
104 space that is not currently malloc'ed or overwriting past the
105 ends of chunks) in code that calls malloc. This malloc
106 guarantees not to modify any memory locations below the base of
107 heap, i.e., static variables, even in the presence of usage
108 errors. The routines additionally detect most improper frees
109 and reallocs. All this holds as long as the static bookkeeping
110 for malloc itself is not corrupted by some other means. This
111 is only one aspect of security -- these checks do not, and
112 cannot, detect all possible programming errors.
113
114 If FOOTERS is defined nonzero, then each allocated chunk
115 carries an additional check word to verify that it was malloced
116 from its space. These check words are the same within each
117 execution of a program using malloc, but differ across
118 executions, so externally crafted fake chunks cannot be
119 freed. This improves security by rejecting frees/reallocs that
120 could corrupt heap memory, in addition to the checks preventing
121 writes to statics that are always on. This may further improve
122 security at the expense of time and space overhead. (Note that
123 FOOTERS may also be worth using with MSPACES.)
124
125 By default detected errors cause the program to abort (calling
126 "abort()"). You can override this to instead proceed past
127 errors by defining PROCEED_ON_ERROR. In this case, a bad free
128 has no effect, and a malloc that encounters a bad address
129 caused by user overwrites will ignore the bad address by
130 dropping pointers and indices to all known memory. This may
131 be appropriate for programs that should continue if at all
132 possible in the face of programming errors, although they may
133 run out of memory because dropped memory is never reclaimed.
134
135 If you don't like either of these options, you can define
136 CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
137 else. And if if you are sure that your program using malloc has
138 no errors or vulnerabilities, you can define INSECURE to 1,
139 which might (or might not) provide a small performance improvement.
140
141 It is also possible to limit the maximum total allocatable
142 space, using malloc_set_footprint_limit. This is not
143 designed as a security feature in itself (calls to set limits
144 are not screened or privileged), but may be useful as one
145 aspect of a secure implementation.
146
147 Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero
148 When USE_LOCKS is defined, each public call to malloc, free,
149 etc is surrounded with a lock. By default, this uses a plain
150 pthread mutex, win32 critical section, or a spin-lock if if
151 available for the platform and not disabled by setting
152 USE_SPIN_LOCKS=0. However, if USE_RECURSIVE_LOCKS is defined,
153 recursive versions are used instead (which are not required for
154 base functionality but may be needed in layered extensions).
155 Using a global lock is not especially fast, and can be a major
156 bottleneck. It is designed only to provide minimal protection
157 in concurrent environments, and to provide a basis for
158 extensions. If you are using malloc in a concurrent program,
159 consider instead using nedmalloc
160 (http://www.nedprod.com/programs/portable/nedmalloc/) or
161 ptmalloc (See http://www.malloc.de), which are derived from
162 versions of this malloc.
163
164 System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
165 This malloc can use unix sbrk or any emulation (invoked using
166 the CALL_MORECORE macro) and/or mmap/munmap or any emulation
167 (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
168 memory. On most unix systems, it tends to work best if both
169 MORECORE and MMAP are enabled. On Win32, it uses emulations
170 based on VirtualAlloc. It also uses common C library functions
171 like memset.
172
173 Compliance: I believe it is compliant with the Single Unix Specification
174 (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
175 others as well.
176
177* Overview of algorithms
178
179 This is not the fastest, most space-conserving, most portable, or
180 most tunable malloc ever written. However it is among the fastest
181 while also being among the most space-conserving, portable and
182 tunable. Consistent balance across these factors results in a good
183 general-purpose allocator for malloc-intensive programs.
184
185 In most ways, this malloc is a best-fit allocator. Generally, it
186 chooses the best-fitting existing chunk for a request, with ties
187 broken in approximately least-recently-used order. (This strategy
188 normally maintains low fragmentation.) However, for requests less
189 than 256bytes, it deviates from best-fit when there is not an
190 exactly fitting available chunk by preferring to use space adjacent
191 to that used for the previous small request, as well as by breaking
192 ties in approximately most-recently-used order. (These enhance
193 locality of series of small allocations.) And for very large requests
194 (>= 256Kb by default), it relies on system memory mapping
195 facilities, if supported. (This helps avoid carrying around and
196 possibly fragmenting memory used only for large chunks.)
197
198 All operations (except malloc_stats and mallinfo) have execution
199 times that are bounded by a constant factor of the number of bits in
200 a size_t, not counting any clearing in calloc or copying in realloc,
201 or actions surrounding MORECORE and MMAP that have times
202 proportional to the number of non-contiguous regions returned by
203 system allocation routines, which is often just 1. In real-time
204 applications, you can optionally suppress segment traversals using
205 NO_SEGMENT_TRAVERSAL, which assures bounded execution even when
206 system allocators return non-contiguous spaces, at the typical
207 expense of carrying around more memory and increased fragmentation.
208
209 The implementation is not very modular and seriously overuses
210 macros. Perhaps someday all C compilers will do as good a job
211 inlining modular code as can now be done by brute-force expansion,
212 but now, enough of them seem not to.
213
214 Some compilers issue a lot of warnings about code that is
215 dead/unreachable only on some platforms, and also about intentional
216 uses of negation on unsigned types. All known cases of each can be
217 ignored.
218
219 For a longer but out of date high-level description, see
220 http://gee.cs.oswego.edu/dl/html/malloc.html
221
222* MSPACES
223 If MSPACES is defined, then in addition to malloc, free, etc.,
224 this file also defines mspace_malloc, mspace_free, etc. These
225 are versions of malloc routines that take an "mspace" argument
226 obtained using create_mspace, to control all internal bookkeeping.
227 If ONLY_MSPACES is defined, only these versions are compiled.
228 So if you would like to use this allocator for only some allocations,
229 and your system malloc for others, you can compile with
230 ONLY_MSPACES and then do something like...
231 static mspace mymspace = create_mspace(0,0); // for example
232 #define mymalloc(bytes) mspace_malloc(mymspace, bytes)
233
234 (Note: If you only need one instance of an mspace, you can instead
235 use "USE_DL_PREFIX" to relabel the global malloc.)
236
237 You can similarly create thread-local allocators by storing
238 mspaces as thread-locals. For example:
239 static __thread mspace tlms = 0;
240 void* tlmalloc(size_t bytes) {
241 if (tlms == 0) tlms = create_mspace(0, 0);
242 return mspace_malloc(tlms, bytes);
243 }
244 void tlfree(void* mem) { mspace_free(tlms, mem); }
245
246 Unless FOOTERS is defined, each mspace is completely independent.
247 You cannot allocate from one and free to another (although
248 conformance is only weakly checked, so usage errors are not always
249 caught). If FOOTERS is defined, then each chunk carries around a tag
250 indicating its originating mspace, and frees are directed to their
251 originating spaces. Normally, this requires use of locks.
252
253 ------------------------- Compile-time options ---------------------------
254
255Be careful in setting #define values for numerical constants of type
256size_t. On some systems, literal values are not automatically extended
257to size_t precision unless they are explicitly casted. You can also
258use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below.
259
260WIN32 default: defined if _WIN32 defined
261 Defining WIN32 sets up defaults for MS environment and compilers.
262 Otherwise defaults are for unix. Beware that there seem to be some
263 cases where this malloc might not be a pure drop-in replacement for
264 Win32 malloc: Random-looking failures from Win32 GDI API's (eg;
265 SetDIBits()) may be due to bugs in some video driver implementations
266 when pixel buffers are malloc()ed, and the region spans more than
267 one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb)
268 default granularity, pixel buffers may straddle virtual allocation
269 regions more often than when using the Microsoft allocator. You can
270 avoid this by using VirtualAlloc() and VirtualFree() for all pixel
271 buffers rather than using malloc(). If this is not possible,
272 recompile this malloc with a larger DEFAULT_GRANULARITY. Note:
273 in cases where MSC and gcc (cygwin) are known to differ on WIN32,
274 conditions use _MSC_VER to distinguish them.
275
276DLMALLOC_EXPORT default: extern
277 Defines how public APIs are declared. If you want to export via a
278 Windows DLL, you might define this as
279 #define DLMALLOC_EXPORT extern __declspec(dllexport)
280 If you want a POSIX ELF shared object, you might use
281 #define DLMALLOC_EXPORT extern __attribute__((visibility("default")))
282
283MALLOC_ALIGNMENT default: (size_t)(2 * sizeof(void *))
284 Controls the minimum alignment for malloc'ed chunks. It must be a
285 power of two and at least 8, even on machines for which smaller
286 alignments would suffice. It may be defined as larger than this
287 though. Note however that code and data structures are optimized for
288 the case of 8-byte alignment.
289
290MSPACES default: 0 (false)
291 If true, compile in support for independent allocation spaces.
292 This is only supported if HAVE_MMAP is true.
293
294ONLY_MSPACES default: 0 (false)
295 If true, only compile in mspace versions, not regular versions.
296
297USE_LOCKS default: 0 (false)
298 Causes each call to each public routine to be surrounded with
299 pthread or WIN32 mutex lock/unlock. (If set true, this can be
300 overridden on a per-mspace basis for mspace versions.) If set to a
301 non-zero value other than 1, locks are used, but their
302 implementation is left out, so lock functions must be supplied manually,
303 as described below.
304
305USE_SPIN_LOCKS default: 1 iff USE_LOCKS and spin locks available
306 If true, uses custom spin locks for locking. This is currently
307 supported only gcc >= 4.1, older gccs on x86 platforms, and recent
308 MS compilers. Otherwise, posix locks or win32 critical sections are
309 used.
310
311USE_RECURSIVE_LOCKS default: not defined
312 If defined nonzero, uses recursive (aka reentrant) locks, otherwise
313 uses plain mutexes. This is not required for malloc proper, but may
314 be needed for layered allocators such as nedmalloc.
315
316LOCK_AT_FORK default: not defined
317 If defined nonzero, performs pthread_atfork upon initialization
318 to initialize child lock while holding parent lock. The implementation
319 assumes that pthread locks (not custom locks) are being used. In other
320 cases, you may need to customize the implementation.
321
322FOOTERS default: 0
323 If true, provide extra checking and dispatching by placing
324 information in the footers of allocated chunks. This adds
325 space and time overhead.
326
327INSECURE default: 0
328 If true, omit checks for usage errors and heap space overwrites.
329
330USE_DL_PREFIX default: NOT defined
331 Causes compiler to prefix all public routines with the string 'dl'.
332 This can be useful when you only want to use this malloc in one part
333 of a program, using your regular system malloc elsewhere.
334
335MALLOC_INSPECT_ALL default: NOT defined
336 If defined, compiles malloc_inspect_all and mspace_inspect_all, that
337 perform traversal of all heap space. Unless access to these
338 functions is otherwise restricted, you probably do not want to
339 include them in secure implementations.
340
341ABORT default: defined as abort()
342 Defines how to abort on failed checks. On most systems, a failed
343 check cannot die with an "assert" or even print an informative
344 message, because the underlying print routines in turn call malloc,
345 which will fail again. Generally, the best policy is to simply call
346 abort(). It's not very useful to do more than this because many
347 errors due to overwriting will show up as address faults (null, odd
348 addresses etc) rather than malloc-triggered checks, so will also
349 abort. Also, most compilers know that abort() does not return, so
350 can better optimize code conditionally calling it.
351
352PROCEED_ON_ERROR default: defined as 0 (false)
353 Controls whether detected bad addresses cause them to bypassed
354 rather than aborting. If set, detected bad arguments to free and
355 realloc are ignored. And all bookkeeping information is zeroed out
356 upon a detected overwrite of freed heap space, thus losing the
357 ability to ever return it from malloc again, but enabling the
358 application to proceed. If PROCEED_ON_ERROR is defined, the
359 static variable malloc_corruption_error_count is compiled in
360 and can be examined to see if errors have occurred. This option
361 generates slower code than the default abort policy.
362
363DEBUG default: NOT defined
364 The DEBUG setting is mainly intended for people trying to modify
365 this code or diagnose problems when porting to new platforms.
366 However, it may also be able to better isolate user errors than just
367 using runtime checks. The assertions in the check routines spell
368 out in more detail the assumptions and invariants underlying the
369 algorithms. The checking is fairly extensive, and will slow down
370 execution noticeably. Calling malloc_stats or mallinfo with DEBUG
371 set will attempt to check every non-mmapped allocated and free chunk
372 in the course of computing the summaries.
373
374ABORT_ON_ASSERT_FAILURE default: defined as 1 (true)
375 Debugging assertion failures can be nearly impossible if your
376 version of the assert macro causes malloc to be called, which will
377 lead to a cascade of further failures, blowing the runtime stack.
378 ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
379 which will usually make debugging easier.
380
381MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32
382 The action to take before "return 0" when malloc fails to be able to
383 return memory because there is none available.
384
385HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES
386 True if this system supports sbrk or an emulation of it.
387
388MORECORE default: sbrk
389 The name of the sbrk-style system routine to call to obtain more
390 memory. See below for guidance on writing custom MORECORE
391 functions. The type of the argument to sbrk/MORECORE varies across
392 systems. It cannot be size_t, because it supports negative
393 arguments, so it is normally the signed type of the same width as
394 size_t (sometimes declared as "intptr_t"). It doesn't much matter
395 though. Internally, we only call it with arguments less than half
396 the max value of a size_t, which should work across all reasonable
397 possibilities, although sometimes generating compiler warnings.
398
399MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE
400 If true, take advantage of fact that consecutive calls to MORECORE
401 with positive arguments always return contiguous increasing
402 addresses. This is true of unix sbrk. It does not hurt too much to
403 set it true anyway, since malloc copes with non-contiguities.
404 Setting it false when definitely non-contiguous saves time
405 and possibly wasted space it would take to discover this though.
406
407MORECORE_CANNOT_TRIM default: NOT defined
408 True if MORECORE cannot release space back to the system when given
409 negative arguments. This is generally necessary only if you are
410 using a hand-crafted MORECORE function that cannot handle negative
411 arguments.
412
413NO_SEGMENT_TRAVERSAL default: 0
414 If non-zero, suppresses traversals of memory segments
415 returned by either MORECORE or CALL_MMAP. This disables
416 merging of segments that are contiguous, and selectively
417 releasing them to the OS if unused, but bounds execution times.
418
419HAVE_MMAP default: 1 (true)
420 True if this system supports mmap or an emulation of it. If so, and
421 HAVE_MORECORE is not true, MMAP is used for all system
422 allocation. If set and HAVE_MORECORE is true as well, MMAP is
423 primarily used to directly allocate very large blocks. It is also
424 used as a backup strategy in cases where MORECORE fails to provide
425 space from system. Note: A single call to MUNMAP is assumed to be
426 able to unmap memory that may have be allocated using multiple calls
427 to MMAP, so long as they are adjacent.
428
429HAVE_MREMAP default: 1 on linux, else 0
430 If true realloc() uses mremap() to re-allocate large blocks and
431 extend or shrink allocation spaces.
432
433MMAP_CLEARS default: 1 except on WINCE.
434 True if mmap clears memory so calloc doesn't need to. This is true
435 for standard unix mmap using /dev/zero and on WIN32 except for WINCE.
436
437USE_BUILTIN_FFS default: 0 (i.e., not used)
438 Causes malloc to use the builtin ffs() function to compute indices.
439 Some compilers may recognize and intrinsify ffs to be faster than the
440 supplied C version. Also, the case of x86 using gcc is special-cased
441 to an asm instruction, so is already as fast as it can be, and so
442 this setting has no effect. Similarly for Win32 under recent MS compilers.
443 (On most x86s, the asm version is only slightly faster than the C version.)
444
445malloc_getpagesize default: derive from system includes, or 4096.
446 The system page size. To the extent possible, this malloc manages
447 memory from the system in page-size units. This may be (and
448 usually is) a function rather than a constant. This is ignored
449 if WIN32, where page size is determined using getSystemInfo during
450 initialization.
451
452USE_DEV_RANDOM default: 0 (i.e., not used)
453 Causes malloc to use /dev/random to initialize secure magic seed for
454 stamping footers. Otherwise, the current time is used.
455
456NO_MALLINFO default: 0
457 If defined, don't compile "mallinfo". This can be a simple way
458 of dealing with mismatches between system declarations and
459 those in this file.
460
461MALLINFO_FIELD_TYPE default: size_t
462 The type of the fields in the mallinfo struct. This was originally
463 defined as "int" in SVID etc, but is more usefully defined as
464 size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set
465
466NO_MALLOC_STATS default: 0
467 If defined, don't compile "malloc_stats". This avoids calls to
468 fprintf and bringing in stdio dependencies you might not want.
469
470REALLOC_ZERO_BYTES_FREES default: not defined
471 This should be set if a call to realloc with zero bytes should
472 be the same as a call to free. Some people think it should. Otherwise,
473 since this malloc returns a unique pointer for malloc(0), so does
474 realloc(p, 0).
475
476LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
477LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H
478LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H default: NOT defined unless on WIN32
479 Define these if your system does not have these header files.
480 You might need to manually insert some of the declarations they provide.
481
482DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,
483 system_info.dwAllocationGranularity in WIN32,
484 otherwise 64K.
485 Also settable using mallopt(M_GRANULARITY, x)
486 The unit for allocating and deallocating memory from the system. On
487 most systems with contiguous MORECORE, there is no reason to
488 make this more than a page. However, systems with MMAP tend to
489 either require or encourage larger granularities. You can increase
490 this value to prevent system allocation functions to be called so
491 often, especially if they are slow. The value must be at least one
492 page and must be a power of two. Setting to 0 causes initialization
493 to either page size or win32 region size. (Note: In previous
494 versions of malloc, the equivalent of this option was called
495 "TOP_PAD")
496
497DEFAULT_TRIM_THRESHOLD default: 2MB
498 Also settable using mallopt(M_TRIM_THRESHOLD, x)
499 The maximum amount of unused top-most memory to keep before
500 releasing via malloc_trim in free(). Automatic trimming is mainly
501 useful in long-lived programs using contiguous MORECORE. Because
502 trimming via sbrk can be slow on some systems, and can sometimes be
503 wasteful (in cases where programs immediately afterward allocate
504 more large chunks) the value should be high enough so that your
505 overall system performance would improve by releasing this much
506 memory. As a rough guide, you might set to a value close to the
507 average size of a process (program) running on your system.
508 Releasing this much memory would allow such a process to run in
509 memory. Generally, it is worth tuning trim thresholds when a
510 program undergoes phases where several large chunks are allocated
511 and released in ways that can reuse each other's storage, perhaps
512 mixed with phases where there are no such chunks at all. The trim
513 value must be greater than page size to have any useful effect. To
514 disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
515 some people use of mallocing a huge space and then freeing it at
516 program startup, in an attempt to reserve system memory, doesn't
517 have the intended effect under automatic trimming, since that memory
518 will immediately be returned to the system.
519
520DEFAULT_MMAP_THRESHOLD default: 256K
521 Also settable using mallopt(M_MMAP_THRESHOLD, x)
522 The request size threshold for using MMAP to directly service a
523 request. Requests of at least this size that cannot be allocated
524 using already-existing space will be serviced via mmap. (If enough
525 normal freed space already exists it is used instead.) Using mmap
526 segregates relatively large chunks of memory so that they can be
527 individually obtained and released from the host system. A request
528 serviced through mmap is never reused by any other request (at least
529 not directly; the system may just so happen to remap successive
530 requests to the same locations). Segregating space in this way has
531 the benefits that: Mmapped space can always be individually released
532 back to the system, which helps keep the system level memory demands
533 of a long-lived program low. Also, mapped memory doesn't become
534 `locked' between other chunks, as can happen with normally allocated
535 chunks, which means that even trimming via malloc_trim would not
536 release them. However, it has the disadvantage that the space
537 cannot be reclaimed, consolidated, and then used to service later
538 requests, as happens with normal chunks. The advantages of mmap
539 nearly always outweigh disadvantages for "large" chunks, but the
540 value of "large" may vary across systems. The default is an
541 empirically derived value that works well in most systems. You can
542 disable mmap by setting to MAX_SIZE_T.
543
544MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
545 The number of consolidated frees between checks to release
546 unused segments when freeing. When using non-contiguous segments,
547 especially with multiple mspaces, checking only for topmost space
548 doesn't always suffice to trigger trimming. To compensate for this,
549 free() will, with a period of MAX_RELEASE_CHECK_RATE (or the
550 current number of segments, if greater) try to release unused
551 segments to the OS when freeing chunks that result in
552 consolidation. The best value for this parameter is a compromise
553 between slowing down frees with relatively costly checks that
554 rarely trigger versus holding on to unused memory. To effectively
555 disable, set to MAX_SIZE_T. This may lead to a very slight speed
556 improvement at the expense of carrying around more memory.
557*/
558
559/* Version identifier to allow people to support multiple versions */
560#ifndef DLMALLOC_VERSION
561#define DLMALLOC_VERSION 20806
562#endif /* DLMALLOC_VERSION */
563
564#ifndef DLMALLOC_EXPORT
565#define DLMALLOC_EXPORT extern
566#endif
567
568#ifndef WIN32
569#ifdef _WIN32
570#define WIN32 1
571#endif /* _WIN32 */
572#ifdef _WIN32_WCE
573#define LACKS_FCNTL_H
574#define WIN32 1
575#endif /* _WIN32_WCE */
576#endif /* WIN32 */
577#ifdef WIN32
578#define WIN32_LEAN_AND_MEAN
579#include <windows.h>
580#include <tchar.h>
581#define HAVE_MMAP 1
582#define HAVE_MORECORE 0
583#define LACKS_UNISTD_H
584#define LACKS_SYS_PARAM_H
585#define LACKS_SYS_MMAN_H
586#define LACKS_STRING_H
587#define LACKS_STRINGS_H
588#define LACKS_SYS_TYPES_H
589#define LACKS_ERRNO_H
590#define LACKS_SCHED_H
591#ifndef MALLOC_FAILURE_ACTION
592#define MALLOC_FAILURE_ACTION
593#endif /* MALLOC_FAILURE_ACTION */
594#ifndef MMAP_CLEARS
595#ifdef _WIN32_WCE /* WINCE reportedly does not clear */
596#define MMAP_CLEARS 0
597#else
598#define MMAP_CLEARS 1
599#endif /* _WIN32_WCE */
600#endif /*MMAP_CLEARS */
601#endif /* WIN32 */
602
603#if defined(DARWIN) || defined(_DARWIN)
604/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
605#ifndef HAVE_MORECORE
606#define HAVE_MORECORE 0
607#define HAVE_MMAP 1
608/* OSX allocators provide 16 byte alignment */
609#ifndef MALLOC_ALIGNMENT
610#define MALLOC_ALIGNMENT ((size_t)16U)
611#endif
612#endif /* HAVE_MORECORE */
613#endif /* DARWIN */
614
615#ifndef LACKS_SYS_TYPES_H
616#include <sys/types.h> /* For size_t */
617#endif /* LACKS_SYS_TYPES_H */
618
619/* The maximum possible size_t value has all bits set */
620#define MAX_SIZE_T (~(size_t)0)
621
622#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */
623#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \
624 (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0))
625#endif /* USE_LOCKS */
626
627#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */
628#if ((defined(__GNUC__) && \
629 ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \
630 defined(__i386__) || defined(__x86_64__))) || \
631 (defined(_MSC_VER) && _MSC_VER>=1310))
632#ifndef USE_SPIN_LOCKS
633#define USE_SPIN_LOCKS 1
634#endif /* USE_SPIN_LOCKS */
635#elif USE_SPIN_LOCKS
636#error "USE_SPIN_LOCKS defined without implementation"
637#endif /* ... locks available... */
638#elif !defined(USE_SPIN_LOCKS)
639#define USE_SPIN_LOCKS 0
640#endif /* USE_LOCKS */
641
642#ifndef ONLY_MSPACES
643#define ONLY_MSPACES 0
644#endif /* ONLY_MSPACES */
645#ifndef MSPACES
646#if ONLY_MSPACES
647#define MSPACES 1
648#else /* ONLY_MSPACES */
649#define MSPACES 0
650#endif /* ONLY_MSPACES */
651#endif /* MSPACES */
652#ifndef MALLOC_ALIGNMENT
653#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))
654#endif /* MALLOC_ALIGNMENT */
655#ifndef FOOTERS
656#define FOOTERS 0
657#endif /* FOOTERS */
658#ifndef ABORT
659#define ABORT abort()
660#endif /* ABORT */
661#ifndef ABORT_ON_ASSERT_FAILURE
662#define ABORT_ON_ASSERT_FAILURE 1
663#endif /* ABORT_ON_ASSERT_FAILURE */
664#ifndef PROCEED_ON_ERROR
665#define PROCEED_ON_ERROR 0
666#endif /* PROCEED_ON_ERROR */
667
668#ifndef INSECURE
669#define INSECURE 0
670#endif /* INSECURE */
671#ifndef MALLOC_INSPECT_ALL
672#define MALLOC_INSPECT_ALL 0
673#endif /* MALLOC_INSPECT_ALL */
674#ifndef HAVE_MMAP
675#define HAVE_MMAP 1
676#endif /* HAVE_MMAP */
677#ifndef MMAP_CLEARS
678#define MMAP_CLEARS 1
679#endif /* MMAP_CLEARS */
680#ifndef HAVE_MREMAP
681#ifdef linux
682#define HAVE_MREMAP 1
683#define _GNU_SOURCE /* Turns on mremap() definition */
684#else /* linux */
685#define HAVE_MREMAP 0
686#endif /* linux */
687#endif /* HAVE_MREMAP */
688#ifndef MALLOC_FAILURE_ACTION
689#define MALLOC_FAILURE_ACTION errno = ENOMEM;
690#endif /* MALLOC_FAILURE_ACTION */
691#ifndef HAVE_MORECORE
692#if ONLY_MSPACES
693#define HAVE_MORECORE 0
694#else /* ONLY_MSPACES */
695#define HAVE_MORECORE 1
696#endif /* ONLY_MSPACES */
697#endif /* HAVE_MORECORE */
698#if !HAVE_MORECORE
699#define MORECORE_CONTIGUOUS 0
700#else /* !HAVE_MORECORE */
701#define MORECORE_DEFAULT sbrk
702#ifndef MORECORE_CONTIGUOUS
703#define MORECORE_CONTIGUOUS 1
704#endif /* MORECORE_CONTIGUOUS */
705#endif /* HAVE_MORECORE */
706#ifndef DEFAULT_GRANULARITY
707#if (MORECORE_CONTIGUOUS || defined(WIN32))
708#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
709#else /* MORECORE_CONTIGUOUS */
710#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
711#endif /* MORECORE_CONTIGUOUS */
712#endif /* DEFAULT_GRANULARITY */
713#ifndef DEFAULT_TRIM_THRESHOLD
714#ifndef MORECORE_CANNOT_TRIM
715#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
716#else /* MORECORE_CANNOT_TRIM */
717#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
718#endif /* MORECORE_CANNOT_TRIM */
719#endif /* DEFAULT_TRIM_THRESHOLD */
720#ifndef DEFAULT_MMAP_THRESHOLD
721#if HAVE_MMAP
722#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
723#else /* HAVE_MMAP */
724#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
725#endif /* HAVE_MMAP */
726#endif /* DEFAULT_MMAP_THRESHOLD */
727#ifndef MAX_RELEASE_CHECK_RATE
728#if HAVE_MMAP
729#define MAX_RELEASE_CHECK_RATE 4095
730#else
731#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
732#endif /* HAVE_MMAP */
733#endif /* MAX_RELEASE_CHECK_RATE */
734#ifndef USE_BUILTIN_FFS
735#define USE_BUILTIN_FFS 0
736#endif /* USE_BUILTIN_FFS */
737#ifndef USE_DEV_RANDOM
738#define USE_DEV_RANDOM 0
739#endif /* USE_DEV_RANDOM */
740#ifndef NO_MALLINFO
741#define NO_MALLINFO 0
742#endif /* NO_MALLINFO */
743#ifndef MALLINFO_FIELD_TYPE
744#define MALLINFO_FIELD_TYPE size_t
745#endif /* MALLINFO_FIELD_TYPE */
746#ifndef NO_MALLOC_STATS
747#define NO_MALLOC_STATS 0
748#endif /* NO_MALLOC_STATS */
749#ifndef NO_SEGMENT_TRAVERSAL
750#define NO_SEGMENT_TRAVERSAL 0
751#endif /* NO_SEGMENT_TRAVERSAL */
752
753/*
754 mallopt tuning options. SVID/XPG defines four standard parameter
755 numbers for mallopt, normally defined in malloc.h. None of these
756 are used in this malloc, so setting them has no effect. But this
757 malloc does support the following options.
758*/
759
760#define M_TRIM_THRESHOLD (-1)
761#define M_GRANULARITY (-2)
762#define M_MMAP_THRESHOLD (-3)
763
764/* ------------------------ Mallinfo declarations ------------------------ */
765
766#if !NO_MALLINFO
767/*
768 This version of malloc supports the standard SVID/XPG mallinfo
769 routine that returns a struct containing usage properties and
770 statistics. It should work on any system that has a
771 /usr/include/malloc.h defining struct mallinfo. The main
772 declaration needed is the mallinfo struct that is returned (by-copy)
773 by mallinfo(). The malloinfo struct contains a bunch of fields that
774 are not even meaningful in this version of malloc. These fields are
775 are instead filled by mallinfo() with other numbers that might be of
776 interest.
777
778 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
779 /usr/include/malloc.h file that includes a declaration of struct
780 mallinfo. If so, it is included; else a compliant version is
781 declared below. These must be precisely the same for mallinfo() to
782 work. The original SVID version of this struct, defined on most
783 systems with mallinfo, declares all fields as ints. But some others
784 define as unsigned long. If your system defines the fields using a
785 type of different width than listed here, you MUST #include your
786 system version and #define HAVE_USR_INCLUDE_MALLOC_H.
787*/
788
789/* #define HAVE_USR_INCLUDE_MALLOC_H */
790
791#ifdef HAVE_USR_INCLUDE_MALLOC_H
792#include "/usr/include/malloc.h"
793#else /* HAVE_USR_INCLUDE_MALLOC_H */
794#ifndef STRUCT_MALLINFO_DECLARED
795/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */
796#define _STRUCT_MALLINFO
797#define STRUCT_MALLINFO_DECLARED 1
798struct mallinfo {
799 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
800 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
801 MALLINFO_FIELD_TYPE smblks; /* always 0 */
802 MALLINFO_FIELD_TYPE hblks; /* always 0 */
803 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
804 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
805 MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
806 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
807 MALLINFO_FIELD_TYPE fordblks; /* total free space */
808 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
809};
810#endif /* STRUCT_MALLINFO_DECLARED */
811#endif /* HAVE_USR_INCLUDE_MALLOC_H */
812#endif /* NO_MALLINFO */
813
814/*
815 Try to persuade compilers to inline. The most critical functions for
816 inlining are defined as macros, so these aren't used for them.
817*/
818
819#if 0 /* SDL */
820#ifndef FORCEINLINE
821 #if defined(__GNUC__)
822#define FORCEINLINE __inline __attribute__ ((always_inline))
823 #elif defined(_MSC_VER)
824 #define FORCEINLINE __forceinline
825 #endif
826#endif
827#endif /* SDL */
828#ifndef NOINLINE
829 #if defined(__GNUC__)
830 #define NOINLINE __attribute__ ((noinline))
831 #elif defined(_MSC_VER)
832 #define NOINLINE __declspec(noinline)
833 #else
834 #define NOINLINE
835 #endif
836#endif
837
838#ifdef __cplusplus
839extern "C" {
840#if 0 /* SDL */
841#ifndef FORCEINLINE
842 #define FORCEINLINE inline
843#endif
844#endif /* SDL */
845#endif /* __cplusplus */
846#if 0 /* SDL */
847#ifndef FORCEINLINE
848 #define FORCEINLINE
849#endif
850#endif /* SDL_FORCE_INLINE */
851
852#if !ONLY_MSPACES
853
854/* ------------------- Declarations of public routines ------------------- */
855
856#ifndef USE_DL_PREFIX
857#define dlcalloc calloc
858#define dlfree free
859#define dlmalloc malloc
860#define dlmemalign memalign
861#define dlposix_memalign posix_memalign
862#define dlrealloc realloc
863#define dlrealloc_in_place realloc_in_place
864#define dlvalloc valloc
865#define dlpvalloc pvalloc
866#define dlmallinfo mallinfo
867#define dlmallopt mallopt
868#define dlmalloc_trim malloc_trim
869#define dlmalloc_stats malloc_stats
870#define dlmalloc_usable_size malloc_usable_size
871#define dlmalloc_footprint malloc_footprint
872#define dlmalloc_max_footprint malloc_max_footprint
873#define dlmalloc_footprint_limit malloc_footprint_limit
874#define dlmalloc_set_footprint_limit malloc_set_footprint_limit
875#define dlmalloc_inspect_all malloc_inspect_all
876#define dlindependent_calloc independent_calloc
877#define dlindependent_comalloc independent_comalloc
878#define dlbulk_free bulk_free
879#endif /* USE_DL_PREFIX */
880
881/*
882 malloc(size_t n)
883 Returns a pointer to a newly allocated chunk of at least n bytes, or
884 null if no space is available, in which case errno is set to ENOMEM
885 on ANSI C systems.
886
887 If n is zero, malloc returns a minimum-sized chunk. (The minimum
888 size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
889 systems.) Note that size_t is an unsigned type, so calls with
890 arguments that would be negative if signed are interpreted as
891 requests for huge amounts of space, which will often fail. The
892 maximum supported value of n differs across systems, but is in all
893 cases less than the maximum representable value of a size_t.
894*/
895DLMALLOC_EXPORT void* dlmalloc(size_t);
896
897/*
898 free(void* p)
899 Releases the chunk of memory pointed to by p, that had been previously
900 allocated using malloc or a related routine such as realloc.
901 It has no effect if p is null. If p was not malloced or already
902 freed, free(p) will by default cause the current program to abort.
903*/
904DLMALLOC_EXPORT void dlfree(void*);
905
906/*
907 calloc(size_t n_elements, size_t element_size);
908 Returns a pointer to n_elements * element_size bytes, with all locations
909 set to zero.
910*/
911DLMALLOC_EXPORT void* dlcalloc(size_t, size_t);
912
913/*
914 realloc(void* p, size_t n)
915 Returns a pointer to a chunk of size n that contains the same data
916 as does chunk p up to the minimum of (n, p's size) bytes, or null
917 if no space is available.
918
919 The returned pointer may or may not be the same as p. The algorithm
920 prefers extending p in most cases when possible, otherwise it
921 employs the equivalent of a malloc-copy-free sequence.
922
923 If p is null, realloc is equivalent to malloc.
924
925 If space is not available, realloc returns null, errno is set (if on
926 ANSI) and p is NOT freed.
927
928 if n is for fewer bytes than already held by p, the newly unused
929 space is lopped off and freed if possible. realloc with a size
930 argument of zero (re)allocates a minimum-sized chunk.
931
932 The old unix realloc convention of allowing the last-free'd chunk
933 to be used as an argument to realloc is not supported.
934*/
935DLMALLOC_EXPORT void* dlrealloc(void*, size_t);
936
937/*
938 realloc_in_place(void* p, size_t n)
939 Resizes the space allocated for p to size n, only if this can be
940 done without moving p (i.e., only if there is adjacent space
941 available if n is greater than p's current allocated size, or n is
942 less than or equal to p's size). This may be used instead of plain
943 realloc if an alternative allocation strategy is needed upon failure
944 to expand space; for example, reallocation of a buffer that must be
945 memory-aligned or cleared. You can use realloc_in_place to trigger
946 these alternatives only when needed.
947
948 Returns p if successful; otherwise null.
949*/
950DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t);
951
952/*
953 memalign(size_t alignment, size_t n);
954 Returns a pointer to a newly allocated chunk of n bytes, aligned
955 in accord with the alignment argument.
956
957 The alignment argument should be a power of two. If the argument is
958 not a power of two, the nearest greater power is used.
959 8-byte alignment is guaranteed by normal malloc calls, so don't
960 bother calling memalign with an argument of 8 or less.
961
962 Overreliance on memalign is a sure way to fragment space.
963*/
964DLMALLOC_EXPORT void* dlmemalign(size_t, size_t);
965
966/*
967 int posix_memalign(void** pp, size_t alignment, size_t n);
968 Allocates a chunk of n bytes, aligned in accord with the alignment
969 argument. Differs from memalign only in that it (1) assigns the
970 allocated memory to *pp rather than returning it, (2) fails and
971 returns EINVAL if the alignment is not a power of two (3) fails and
972 returns ENOMEM if memory cannot be allocated.
973*/
974DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t);
975
976/*
977 valloc(size_t n);
978 Equivalent to memalign(pagesize, n), where pagesize is the page
979 size of the system. If the pagesize is unknown, 4096 is used.
980*/
981DLMALLOC_EXPORT void* dlvalloc(size_t);
982
983/*
984 mallopt(int parameter_number, int parameter_value)
985 Sets tunable parameters The format is to provide a
986 (parameter-number, parameter-value) pair. mallopt then sets the
987 corresponding parameter to the argument value if it can (i.e., so
988 long as the value is meaningful), and returns 1 if successful else
989 0. To workaround the fact that mallopt is specified to use int,
990 not size_t parameters, the value -1 is specially treated as the
991 maximum unsigned size_t value.
992
993 SVID/XPG/ANSI defines four standard param numbers for mallopt,
994 normally defined in malloc.h. None of these are use in this malloc,
995 so setting them has no effect. But this malloc also supports other
996 options in mallopt. See below for details. Briefly, supported
997 parameters are as follows (listed defaults are for "typical"
998 configurations).
999
1000 Symbol param # default allowed param values
1001 M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables)
1002 M_GRANULARITY -2 page size any power of 2 >= page size
1003 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
1004*/
1005DLMALLOC_EXPORT int dlmallopt(int, int);
1006
1007/*
1008 malloc_footprint();
1009 Returns the number of bytes obtained from the system. The total
1010 number of bytes allocated by malloc, realloc etc., is less than this
1011 value. Unlike mallinfo, this function returns only a precomputed
1012 result, so can be called frequently to monitor memory consumption.
1013 Even if locks are otherwise defined, this function does not use them,
1014 so results might not be up to date.
1015*/
1016DLMALLOC_EXPORT size_t dlmalloc_footprint(void);
1017
1018/*
1019 malloc_max_footprint();
1020 Returns the maximum number of bytes obtained from the system. This
1021 value will be greater than current footprint if deallocated space
1022 has been reclaimed by the system. The peak number of bytes allocated
1023 by malloc, realloc etc., is less than this value. Unlike mallinfo,
1024 this function returns only a precomputed result, so can be called
1025 frequently to monitor memory consumption. Even if locks are
1026 otherwise defined, this function does not use them, so results might
1027 not be up to date.
1028*/
1029DLMALLOC_EXPORT size_t dlmalloc_max_footprint(void);
1030
1031/*
1032 malloc_footprint_limit();
1033 Returns the number of bytes that the heap is allowed to obtain from
1034 the system, returning the last value returned by
1035 malloc_set_footprint_limit, or the maximum size_t value if
1036 never set. The returned value reflects a permission. There is no
1037 guarantee that this number of bytes can actually be obtained from
1038 the system.
1039*/
1040DLMALLOC_EXPORT size_t dlmalloc_footprint_limit();
1041
1042/*
1043 malloc_set_footprint_limit();
1044 Sets the maximum number of bytes to obtain from the system, causing
1045 failure returns from malloc and related functions upon attempts to
1046 exceed this value. The argument value may be subject to page
1047 rounding to an enforceable limit; this actual value is returned.
1048 Using an argument of the maximum possible size_t effectively
1049 disables checks. If the argument is less than or equal to the
1050 current malloc_footprint, then all future allocations that require
1051 additional system memory will fail. However, invocation cannot
1052 retroactively deallocate existing used memory.
1053*/
1054DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes);
1055
1056#if MALLOC_INSPECT_ALL
1057/*
1058 malloc_inspect_all(void(*handler)(void *start,
1059 void *end,
1060 size_t used_bytes,
1061 void* callback_arg),
1062 void* arg);
1063 Traverses the heap and calls the given handler for each managed
1064 region, skipping all bytes that are (or may be) used for bookkeeping
1065 purposes. Traversal does not include include chunks that have been
1066 directly memory mapped. Each reported region begins at the start
1067 address, and continues up to but not including the end address. The
1068 first used_bytes of the region contain allocated data. If
1069 used_bytes is zero, the region is unallocated. The handler is
1070 invoked with the given callback argument. If locks are defined, they
1071 are held during the entire traversal. It is a bad idea to invoke
1072 other malloc functions from within the handler.
1073
1074 For example, to count the number of in-use chunks with size greater
1075 than 1000, you could write:
1076 static int count = 0;
1077 void count_chunks(void* start, void* end, size_t used, void* arg) {
1078 if (used >= 1000) ++count;
1079 }
1080 then:
1081 malloc_inspect_all(count_chunks, NULL);
1082
1083 malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
1084*/
1085DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
1086 void* arg);
1087
1088#endif /* MALLOC_INSPECT_ALL */
1089
1090#if !NO_MALLINFO
1091/*
1092 mallinfo()
1093 Returns (by copy) a struct containing various summary statistics:
1094
1095 arena: current total non-mmapped bytes allocated from system
1096 ordblks: the number of free chunks
1097 smblks: always zero.
1098 hblks: current number of mmapped regions
1099 hblkhd: total bytes held in mmapped regions
1100 usmblks: the maximum total allocated space. This will be greater
1101 than current total if trimming has occurred.
1102 fsmblks: always zero
1103 uordblks: current total allocated space (normal or mmapped)
1104 fordblks: total free space
1105 keepcost: the maximum number of bytes that could ideally be released
1106 back to system via malloc_trim. ("ideally" means that
1107 it ignores page restrictions etc.)
1108
1109 Because these fields are ints, but internal bookkeeping may
1110 be kept as longs, the reported values may wrap around zero and
1111 thus be inaccurate.
1112*/
1113DLMALLOC_EXPORT struct mallinfo dlmallinfo(void);
1114#endif /* NO_MALLINFO */
1115
1116/*
1117 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
1118
1119 independent_calloc is similar to calloc, but instead of returning a
1120 single cleared space, it returns an array of pointers to n_elements
1121 independent elements that can hold contents of size elem_size, each
1122 of which starts out cleared, and can be independently freed,
1123 realloc'ed etc. The elements are guaranteed to be adjacently
1124 allocated (this is not guaranteed to occur with multiple callocs or
1125 mallocs), which may also improve cache locality in some
1126 applications.
1127
1128 The "chunks" argument is optional (i.e., may be null, which is
1129 probably the most typical usage). If it is null, the returned array
1130 is itself dynamically allocated and should also be freed when it is
1131 no longer needed. Otherwise, the chunks array must be of at least
1132 n_elements in length. It is filled in with the pointers to the
1133 chunks.
1134
1135 In either case, independent_calloc returns this pointer array, or
1136 null if the allocation failed. If n_elements is zero and "chunks"
1137 is null, it returns a chunk representing an array with zero elements
1138 (which should be freed if not wanted).
1139
1140 Each element must be freed when it is no longer needed. This can be
1141 done all at once using bulk_free.
1142
1143 independent_calloc simplifies and speeds up implementations of many
1144 kinds of pools. It may also be useful when constructing large data
1145 structures that initially have a fixed number of fixed-sized nodes,
1146 but the number is not known at compile time, and some of the nodes
1147 may later need to be freed. For example:
1148
1149 struct Node { int item; struct Node* next; };
1150
1151 struct Node* build_list() {
1152 struct Node** pool;
1153 int n = read_number_of_nodes_needed();
1154 if (n <= 0) return 0;
1155 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1156 if (pool == 0) die();
1157 // organize into a linked list...
1158 struct Node* first = pool[0];
1159 for (i = 0; i < n-1; ++i)
1160 pool[i]->next = pool[i+1];
1161 free(pool); // Can now free the array (or not, if it is needed later)
1162 return first;
1163 }
1164*/
1165DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**);
1166
1167/*
1168 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
1169
1170 independent_comalloc allocates, all at once, a set of n_elements
1171 chunks with sizes indicated in the "sizes" array. It returns
1172 an array of pointers to these elements, each of which can be
1173 independently freed, realloc'ed etc. The elements are guaranteed to
1174 be adjacently allocated (this is not guaranteed to occur with
1175 multiple callocs or mallocs), which may also improve cache locality
1176 in some applications.
1177
1178 The "chunks" argument is optional (i.e., may be null). If it is null
1179 the returned array is itself dynamically allocated and should also
1180 be freed when it is no longer needed. Otherwise, the chunks array
1181 must be of at least n_elements in length. It is filled in with the
1182 pointers to the chunks.
1183
1184 In either case, independent_comalloc returns this pointer array, or
1185 null if the allocation failed. If n_elements is zero and chunks is
1186 null, it returns a chunk representing an array with zero elements
1187 (which should be freed if not wanted).
1188
1189 Each element must be freed when it is no longer needed. This can be
1190 done all at once using bulk_free.
1191
1192 independent_comallac differs from independent_calloc in that each
1193 element may have a different size, and also that it does not
1194 automatically clear elements.
1195
1196 independent_comalloc can be used to speed up allocation in cases
1197 where several structs or objects must always be allocated at the
1198 same time. For example:
1199
1200 struct Head { ... }
1201 struct Foot { ... }
1202
1203 void send_message(char* msg) {
1204 int msglen = strlen(msg);
1205 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1206 void* chunks[3];
1207 if (independent_comalloc(3, sizes, chunks) == 0)
1208 die();
1209 struct Head* head = (struct Head*)(chunks[0]);
1210 char* body = (char*)(chunks[1]);
1211 struct Foot* foot = (struct Foot*)(chunks[2]);
1212 // ...
1213 }
1214
1215 In general though, independent_comalloc is worth using only for
1216 larger values of n_elements. For small values, you probably won't
1217 detect enough difference from series of malloc calls to bother.
1218
1219 Overuse of independent_comalloc can increase overall memory usage,
1220 since it cannot reuse existing noncontiguous small chunks that
1221 might be available for some of the elements.
1222*/
1223DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**);
1224
1225/*
1226 bulk_free(void* array[], size_t n_elements)
1227 Frees and clears (sets to null) each non-null pointer in the given
1228 array. This is likely to be faster than freeing them one-by-one.
1229 If footers are used, pointers that have been allocated in different
1230 mspaces are not freed or cleared, and the count of all such pointers
1231 is returned. For large arrays of pointers with poor locality, it
1232 may be worthwhile to sort this array before calling bulk_free.
1233*/
1234DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements);
1235
1236/*
1237 pvalloc(size_t n);
1238 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1239 round up n to nearest pagesize.
1240 */
1241DLMALLOC_EXPORT void* dlpvalloc(size_t);
1242
1243/*
1244 malloc_trim(size_t pad);
1245
1246 If possible, gives memory back to the system (via negative arguments
1247 to sbrk) if there is unused memory at the `high' end of the malloc
1248 pool or in unused MMAP segments. You can call this after freeing
1249 large blocks of memory to potentially reduce the system-level memory
1250 requirements of a program. However, it cannot guarantee to reduce
1251 memory. Under some allocation patterns, some large free blocks of
1252 memory will be locked between two used chunks, so they cannot be
1253 given back to the system.
1254
1255 The `pad' argument to malloc_trim represents the amount of free
1256 trailing space to leave untrimmed. If this argument is zero, only
1257 the minimum amount of memory to maintain internal data structures
1258 will be left. Non-zero arguments can be supplied to maintain enough
1259 trailing space to service future expected allocations without having
1260 to re-obtain memory from the system.
1261
1262 Malloc_trim returns 1 if it actually released any memory, else 0.
1263*/
1264DLMALLOC_EXPORT int dlmalloc_trim(size_t);
1265
1266/*
1267 malloc_stats();
1268 Prints on stderr the amount of space obtained from the system (both
1269 via sbrk and mmap), the maximum amount (which may be more than
1270 current if malloc_trim and/or munmap got called), and the current
1271 number of bytes allocated via malloc (or realloc, etc) but not yet
1272 freed. Note that this is the number of bytes allocated, not the
1273 number requested. It will be larger than the number requested
1274 because of alignment and bookkeeping overhead. Because it includes
1275 alignment wastage as being in use, this figure may be greater than
1276 zero even when no user-level chunks are allocated.
1277
1278 The reported current and maximum system memory can be inaccurate if
1279 a program makes other calls to system memory allocation functions
1280 (normally sbrk) outside of malloc.
1281
1282 malloc_stats prints only the most commonly interesting statistics.
1283 More information can be obtained by calling mallinfo.
1284*/
1285DLMALLOC_EXPORT void dlmalloc_stats(void);
1286
1287/*
1288 malloc_usable_size(void* p);
1289
1290 Returns the number of bytes you can actually use in
1291 an allocated chunk, which may be more than you requested (although
1292 often not) due to alignment and minimum size constraints.
1293 You can use this many bytes without worrying about
1294 overwriting other allocated objects. This is not a particularly great
1295 programming practice. malloc_usable_size can be more useful in
1296 debugging and assertions, for example:
1297
1298 p = malloc(n);
1299 assert(malloc_usable_size(p) >= 256);
1300*/
1301size_t dlmalloc_usable_size(void*);
1302
1303#endif /* ONLY_MSPACES */
1304
1305#if MSPACES
1306
1307/*
1308 mspace is an opaque type representing an independent
1309 region of space that supports mspace_malloc, etc.
1310*/
1311typedef void* mspace;
1312
1313/*
1314 create_mspace creates and returns a new independent space with the
1315 given initial capacity, or, if 0, the default granularity size. It
1316 returns null if there is no system memory available to create the
1317 space. If argument locked is non-zero, the space uses a separate
1318 lock to control access. The capacity of the space will grow
1319 dynamically as needed to service mspace_malloc requests. You can
1320 control the sizes of incremental increases of this space by
1321 compiling with a different DEFAULT_GRANULARITY or dynamically
1322 setting with mallopt(M_GRANULARITY, value).
1323*/
1324DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked);
1325
1326/*
1327 destroy_mspace destroys the given space, and attempts to return all
1328 of its memory back to the system, returning the total number of
1329 bytes freed. After destruction, the results of access to all memory
1330 used by the space become undefined.
1331*/
1332DLMALLOC_EXPORT size_t destroy_mspace(mspace msp);
1333
1334/*
1335 create_mspace_with_base uses the memory supplied as the initial base
1336 of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
1337 space is used for bookkeeping, so the capacity must be at least this
1338 large. (Otherwise 0 is returned.) When this initial space is
1339 exhausted, additional memory will be obtained from the system.
1340 Destroying this space will deallocate all additionally allocated
1341 space (if possible) but not the initial base.
1342*/
1343DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked);
1344
1345/*
1346 mspace_track_large_chunks controls whether requests for large chunks
1347 are allocated in their own untracked mmapped regions, separate from
1348 others in this mspace. By default large chunks are not tracked,
1349 which reduces fragmentation. However, such chunks are not
1350 necessarily released to the system upon destroy_mspace. Enabling
1351 tracking by setting to true may increase fragmentation, but avoids
1352 leakage when relying on destroy_mspace to release all memory
1353 allocated using this space. The function returns the previous
1354 setting.
1355*/
1356DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable);
1357
1358
1359/*
1360 mspace_malloc behaves as malloc, but operates within
1361 the given space.
1362*/
1363DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes);
1364
1365/*
1366 mspace_free behaves as free, but operates within
1367 the given space.
1368
1369 If compiled with FOOTERS==1, mspace_free is not actually needed.
1370 free may be called instead of mspace_free because freed chunks from
1371 any space are handled by their originating spaces.
1372*/
1373DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem);
1374
1375/*
1376 mspace_realloc behaves as realloc, but operates within
1377 the given space.
1378
1379 If compiled with FOOTERS==1, mspace_realloc is not actually
1380 needed. realloc may be called instead of mspace_realloc because
1381 realloced chunks from any space are handled by their originating
1382 spaces.
1383*/
1384DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize);
1385
1386/*
1387 mspace_calloc behaves as calloc, but operates within
1388 the given space.
1389*/
1390DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
1391
1392/*
1393 mspace_memalign behaves as memalign, but operates within
1394 the given space.
1395*/
1396DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
1397
1398/*
1399 mspace_independent_calloc behaves as independent_calloc, but
1400 operates within the given space.
1401*/
1402DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements,
1403 size_t elem_size, void* chunks[]);
1404
1405/*
1406 mspace_independent_comalloc behaves as independent_comalloc, but
1407 operates within the given space.
1408*/
1409DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements,
1410 size_t sizes[], void* chunks[]);
1411
1412/*
1413 mspace_footprint() returns the number of bytes obtained from the
1414 system for this space.
1415*/
1416DLMALLOC_EXPORT size_t mspace_footprint(mspace msp);
1417
1418/*
1419 mspace_max_footprint() returns the peak number of bytes obtained from the
1420 system for this space.
1421*/
1422DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp);
1423
1424
1425#if !NO_MALLINFO
1426/*
1427 mspace_mallinfo behaves as mallinfo, but reports properties of
1428 the given space.
1429*/
1430DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp);
1431#endif /* NO_MALLINFO */
1432
1433/*
1434 malloc_usable_size(void* p) behaves the same as malloc_usable_size;
1435*/
1436DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem);
1437
1438/*
1439 mspace_malloc_stats behaves as malloc_stats, but reports
1440 properties of the given space.
1441*/
1442DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp);
1443
1444/*
1445 mspace_trim behaves as malloc_trim, but
1446 operates within the given space.
1447*/
1448DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad);
1449
1450/*
1451 An alias for mallopt.
1452*/
1453DLMALLOC_EXPORT int mspace_mallopt(int, int);
1454
1455#endif /* MSPACES */
1456
1457#ifdef __cplusplus
1458} /* end of extern "C" */
1459#endif /* __cplusplus */
1460
1461/*
1462 ========================================================================
1463 To make a fully customizable malloc.h header file, cut everything
1464 above this line, put into file malloc.h, edit to suit, and #include it
1465 on the next line, as well as in programs that use this malloc.
1466 ========================================================================
1467*/
1468
1469/* #include "malloc.h" */
1470
1471/*------------------------------ internal #includes ---------------------- */
1472
1473#ifdef _MSC_VER
1474#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
1475#endif /* _MSC_VER */
1476#if !NO_MALLOC_STATS
1477#include <stdio.h> /* for printing in malloc_stats */
1478#endif /* NO_MALLOC_STATS */
1479#ifndef LACKS_ERRNO_H
1480#include <errno.h> /* for MALLOC_FAILURE_ACTION */
1481#endif /* LACKS_ERRNO_H */
1482#ifdef DEBUG
1483#if ABORT_ON_ASSERT_FAILURE
1484#undef assert
1485#define assert(x) if(!(x)) ABORT
1486#else /* ABORT_ON_ASSERT_FAILURE */
1487#include <assert.h>
1488#endif /* ABORT_ON_ASSERT_FAILURE */
1489#else /* DEBUG */
1490#ifndef assert
1491#define assert(x)
1492#endif
1493#define DEBUG 0
1494#endif /* DEBUG */
1495#if !defined(WIN32) && !defined(LACKS_TIME_H)
1496#include <time.h> /* for magic initialization */
1497#endif /* WIN32 */
1498#ifndef LACKS_STDLIB_H
1499#include <stdlib.h> /* for abort() */
1500#endif /* LACKS_STDLIB_H */
1501#ifndef LACKS_STRING_H
1502#include <string.h> /* for memset etc */
1503#endif /* LACKS_STRING_H */
1504#if USE_BUILTIN_FFS
1505#ifndef LACKS_STRINGS_H
1506#include <strings.h> /* for ffs */
1507#endif /* LACKS_STRINGS_H */
1508#endif /* USE_BUILTIN_FFS */
1509#if HAVE_MMAP
1510#ifndef LACKS_SYS_MMAN_H
1511/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
1512#if (defined(linux) && !defined(__USE_GNU))
1513#define __USE_GNU 1
1514#include <sys/mman.h> /* for mmap */
1515#undef __USE_GNU
1516#else
1517#include <sys/mman.h> /* for mmap */
1518#endif /* linux */
1519#endif /* LACKS_SYS_MMAN_H */
1520#ifndef LACKS_FCNTL_H
1521#include <fcntl.h>
1522#endif /* LACKS_FCNTL_H */
1523#endif /* HAVE_MMAP */
1524#ifndef LACKS_UNISTD_H
1525#include <unistd.h> /* for sbrk, sysconf */
1526#else /* LACKS_UNISTD_H */
1527#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1528extern void* sbrk(ptrdiff_t);
1529#endif /* FreeBSD etc */
1530#endif /* LACKS_UNISTD_H */
1531
1532/* Declarations for locking */
1533#if USE_LOCKS
1534#ifndef WIN32
1535#if defined (__SVR4) && defined (__sun) /* solaris */
1536#include <thread.h>
1537#elif !defined(LACKS_SCHED_H)
1538#include <sched.h>
1539#endif /* solaris or LACKS_SCHED_H */
1540#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
1541#include <pthread.h>
1542#endif /* USE_RECURSIVE_LOCKS ... */
1543#elif defined(_MSC_VER)
1544#ifndef _M_AMD64
1545/* These are already defined on AMD64 builds */
1546#ifdef __cplusplus
1547extern "C" {
1548#endif /* __cplusplus */
1549LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
1550LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
1551#ifdef __cplusplus
1552}
1553#endif /* __cplusplus */
1554#endif /* _M_AMD64 */
1555#pragma intrinsic (_InterlockedCompareExchange)
1556#pragma intrinsic (_InterlockedExchange)
1557#define interlockedcompareexchange _InterlockedCompareExchange
1558#define interlockedexchange _InterlockedExchange
1559#elif defined(WIN32) && defined(__GNUC__)
1560#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
1561#define interlockedexchange __sync_lock_test_and_set
1562#endif /* Win32 */
1563#else /* USE_LOCKS */
1564#endif /* USE_LOCKS */
1565
1566#ifndef LOCK_AT_FORK
1567#define LOCK_AT_FORK 0
1568#endif
1569
1570/* Declarations for bit scanning on win32 */
1571#if defined(_MSC_VER) && _MSC_VER>=1300
1572#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
1573#ifdef __cplusplus
1574extern "C" {
1575#endif /* __cplusplus */
1576unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
1577unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
1578#ifdef __cplusplus
1579}
1580#endif /* __cplusplus */
1581
1582#define BitScanForward _BitScanForward
1583#define BitScanReverse _BitScanReverse
1584#pragma intrinsic(_BitScanForward)
1585#pragma intrinsic(_BitScanReverse)
1586#endif /* BitScanForward */
1587#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
1588
1589#ifndef WIN32
1590#ifndef malloc_getpagesize
1591# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
1592# ifndef _SC_PAGE_SIZE
1593# define _SC_PAGE_SIZE _SC_PAGESIZE
1594# endif
1595# endif
1596# ifdef _SC_PAGE_SIZE
1597# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1598# else
1599# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1600 extern int getpagesize();
1601# define malloc_getpagesize getpagesize()
1602# else
1603# ifdef WIN32 /* use supplied emulation of getpagesize */
1604# define malloc_getpagesize getpagesize()
1605# else
1606# ifndef LACKS_SYS_PARAM_H
1607# include <sys/param.h>
1608# endif
1609# ifdef EXEC_PAGESIZE
1610# define malloc_getpagesize EXEC_PAGESIZE
1611# else
1612# ifdef NBPG
1613# ifndef CLSIZE
1614# define malloc_getpagesize NBPG
1615# else
1616# define malloc_getpagesize (NBPG * CLSIZE)
1617# endif
1618# else
1619# ifdef NBPC
1620# define malloc_getpagesize NBPC
1621# else
1622# ifdef PAGESIZE
1623# define malloc_getpagesize PAGESIZE
1624# else /* just guess */
1625# define malloc_getpagesize ((size_t)4096U)
1626# endif
1627# endif
1628# endif
1629# endif
1630# endif
1631# endif
1632# endif
1633#endif
1634#endif
1635
1636/* ------------------- size_t and alignment properties -------------------- */
1637
1638/* The byte and bit size of a size_t */
1639#define SIZE_T_SIZE (sizeof(size_t))
1640#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
1641
1642/* Some constants coerced to size_t */
1643/* Annoying but necessary to avoid errors on some platforms */
1644#define SIZE_T_ZERO ((size_t)0)
1645#define SIZE_T_ONE ((size_t)1)
1646#define SIZE_T_TWO ((size_t)2)
1647#define SIZE_T_FOUR ((size_t)4)
1648#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
1649#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
1650#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1651#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
1652
1653/* The bit mask value corresponding to MALLOC_ALIGNMENT */
1654#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
1655
1656/* True if address a has acceptable alignment */
1657#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1658
1659/* the number of bytes to offset an address to align it */
1660#define align_offset(A)\
1661 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1662 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1663
1664/* -------------------------- MMAP preliminaries ------------------------- */
1665
1666/*
1667 If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
1668 checks to fail so compiler optimizer can delete code rather than
1669 using so many "#if"s.
1670*/
1671
1672
1673/* MORECORE and MMAP must return MFAIL on failure */
1674#define MFAIL ((void*)(MAX_SIZE_T))
1675#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
1676
1677#if HAVE_MMAP
1678
1679#ifndef WIN32
1680#define MUNMAP_DEFAULT(a, s) munmap((a), (s))
1681#define MMAP_PROT (PROT_READ|PROT_WRITE)
1682#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1683#define MAP_ANONYMOUS MAP_ANON
1684#endif /* MAP_ANON */
1685#ifdef MAP_ANONYMOUS
1686#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
1687#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1688#else /* MAP_ANONYMOUS */
1689/*
1690 Nearly all versions of mmap support MAP_ANONYMOUS, so the following
1691 is unlikely to be needed, but is supplied just in case.
1692*/
1693#define MMAP_FLAGS (MAP_PRIVATE)
1694static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1695#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
1696 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1697 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1698 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1699#endif /* MAP_ANONYMOUS */
1700
1701#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
1702
1703#else /* WIN32 */
1704
1705/* Win32 MMAP via VirtualAlloc */
1706SDL_FORCE_INLINE void* win32mmap(size_t size) {
1707 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
1708 return (ptr != 0)? ptr: MFAIL;
1709}
1710
1711/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
1712SDL_FORCE_INLINE void* win32direct_mmap(size_t size) {
1713 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
1714 PAGE_READWRITE);
1715 return (ptr != 0)? ptr: MFAIL;
1716}
1717
1718/* This function supports releasing coalesed segments */
1719SDL_FORCE_INLINE int win32munmap(void* ptr, size_t size) {
1720 MEMORY_BASIC_INFORMATION minfo;
1721 char* cptr = (char*)ptr;
1722 while (size) {
1723 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
1724 return -1;
1725 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1726 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1727 return -1;
1728 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1729 return -1;
1730 cptr += minfo.RegionSize;
1731 size -= minfo.RegionSize;
1732 }
1733 return 0;
1734}
1735
1736#define MMAP_DEFAULT(s) win32mmap(s)
1737#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
1738#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
1739#endif /* WIN32 */
1740#endif /* HAVE_MMAP */
1741
1742#if HAVE_MREMAP
1743#ifndef WIN32
1744#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1745#endif /* WIN32 */
1746#endif /* HAVE_MREMAP */
1747
1748/**
1749 * Define CALL_MORECORE
1750 */
1751#if HAVE_MORECORE
1752 #ifdef MORECORE
1753 #define CALL_MORECORE(S) MORECORE(S)
1754 #else /* MORECORE */
1755 #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
1756 #endif /* MORECORE */
1757#else /* HAVE_MORECORE */
1758 #define CALL_MORECORE(S) MFAIL
1759#endif /* HAVE_MORECORE */
1760
1761/**
1762 * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
1763 */
1764#if HAVE_MMAP
1765 #define USE_MMAP_BIT (SIZE_T_ONE)
1766
1767 #ifdef MMAP
1768 #define CALL_MMAP(s) MMAP(s)
1769 #else /* MMAP */
1770 #define CALL_MMAP(s) MMAP_DEFAULT(s)
1771 #endif /* MMAP */
1772 #ifdef MUNMAP
1773 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
1774 #else /* MUNMAP */
1775 #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
1776 #endif /* MUNMAP */
1777 #ifdef DIRECT_MMAP
1778 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
1779 #else /* DIRECT_MMAP */
1780 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
1781 #endif /* DIRECT_MMAP */
1782#else /* HAVE_MMAP */
1783 #define USE_MMAP_BIT (SIZE_T_ZERO)
1784
1785 #define MMAP(s) MFAIL
1786 #define MUNMAP(a, s) (-1)
1787 #define DIRECT_MMAP(s) MFAIL
1788 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
1789 #define CALL_MMAP(s) MMAP(s)
1790 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
1791#endif /* HAVE_MMAP */
1792
1793/**
1794 * Define CALL_MREMAP
1795 */
1796#if HAVE_MMAP && HAVE_MREMAP
1797 #ifdef MREMAP
1798 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
1799 #else /* MREMAP */
1800 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
1801 #endif /* MREMAP */
1802#else /* HAVE_MMAP && HAVE_MREMAP */
1803 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
1804#endif /* HAVE_MMAP && HAVE_MREMAP */
1805
1806/* mstate bit set if continguous morecore disabled or failed */
1807#define USE_NONCONTIGUOUS_BIT (4U)
1808
1809/* segment bit set in create_mspace_with_base */
1810#define EXTERN_BIT (8U)
1811
1812
1813/* --------------------------- Lock preliminaries ------------------------ */
1814
1815/*
1816 When locks are defined, there is one global lock, plus
1817 one per-mspace lock.
1818
1819 The global lock_ensures that mparams.magic and other unique
1820 mparams values are initialized only once. It also protects
1821 sequences of calls to MORECORE. In many cases sys_alloc requires
1822 two calls, that should not be interleaved with calls by other
1823 threads. This does not protect against direct calls to MORECORE
1824 by other threads not using this lock, so there is still code to
1825 cope the best we can on interference.
1826
1827 Per-mspace locks surround calls to malloc, free, etc.
1828 By default, locks are simple non-reentrant mutexes.
1829
1830 Because lock-protected regions generally have bounded times, it is
1831 OK to use the supplied simple spinlocks. Spinlocks are likely to
1832 improve performance for lightly contended applications, but worsen
1833 performance under heavy contention.
1834
1835 If USE_LOCKS is > 1, the definitions of lock routines here are
1836 bypassed, in which case you will need to define the type MLOCK_T,
1837 and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
1838 and TRY_LOCK. You must also declare a
1839 static MLOCK_T malloc_global_mutex = { initialization values };.
1840
1841*/
1842
1843#if !USE_LOCKS
1844#define USE_LOCK_BIT (0U)
1845#define INITIAL_LOCK(l) (0)
1846#define DESTROY_LOCK(l) (0)
1847#define ACQUIRE_MALLOC_GLOBAL_LOCK()
1848#define RELEASE_MALLOC_GLOBAL_LOCK()
1849
1850#else
1851#if USE_LOCKS > 1
1852/* ----------------------- User-defined locks ------------------------ */
1853/* Define your own lock implementation here */
1854/* #define INITIAL_LOCK(lk) ... */
1855/* #define DESTROY_LOCK(lk) ... */
1856/* #define ACQUIRE_LOCK(lk) ... */
1857/* #define RELEASE_LOCK(lk) ... */
1858/* #define TRY_LOCK(lk) ... */
1859/* static MLOCK_T malloc_global_mutex = ... */
1860
1861#elif USE_SPIN_LOCKS
1862
1863/* First, define CAS_LOCK and CLEAR_LOCK on ints */
1864/* Note CAS_LOCK defined to return 0 on success */
1865
1866#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
1867#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
1868#define CLEAR_LOCK(sl) __sync_lock_release(sl)
1869
1870#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
1871/* Custom spin locks for older gcc on x86 */
1872SDL_FORCE_INLINE int x86_cas_lock(int *sl) {
1873 int ret;
1874 int val = 1;
1875 int cmp = 0;
1876 __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
1877 : "=a" (ret)
1878 : "r" (val), "m" (*(sl)), "0"(cmp)
1879 : "memory", "cc");
1880 return ret;
1881}
1882
1883SDL_FORCE_INLINE void x86_clear_lock(int* sl) {
1884 assert(*sl != 0);
1885 int prev = 0;
1886 int ret;
1887 __asm__ __volatile__ ("lock; xchgl %0, %1"
1888 : "=r" (ret)
1889 : "m" (*(sl)), "0"(prev)
1890 : "memory");
1891}
1892
1893#define CAS_LOCK(sl) x86_cas_lock(sl)
1894#define CLEAR_LOCK(sl) x86_clear_lock(sl)
1895
1896#else /* Win32 MSC */
1897#define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
1898#define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
1899
1900#endif /* ... gcc spins locks ... */
1901
1902/* How to yield for a spin lock */
1903#define SPINS_PER_YIELD 63
1904#if defined(_MSC_VER)
1905#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */
1906#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
1907#elif defined (__SVR4) && defined (__sun) /* solaris */
1908#define SPIN_LOCK_YIELD thr_yield();
1909#elif !defined(LACKS_SCHED_H)
1910#define SPIN_LOCK_YIELD sched_yield();
1911#else
1912#define SPIN_LOCK_YIELD
1913#endif /* ... yield ... */
1914
1915#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
1916/* Plain spin locks use single word (embedded in malloc_states) */
1917static int spin_acquire_lock(volatile long *sl) {
1918 int spins = 0;
1919 while (*sl != 0 || CAS_LOCK(sl)) {
1920 if ((++spins & SPINS_PER_YIELD) == 0) {
1921 SPIN_LOCK_YIELD;
1922 }
1923 }
1924 return 0;
1925}
1926
1927#define MLOCK_T volatile long
1928#define TRY_LOCK(sl) !CAS_LOCK(sl)
1929#define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
1930#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
1931#define INITIAL_LOCK(sl) (*sl = 0)
1932#define DESTROY_LOCK(sl) (0)
1933static MLOCK_T malloc_global_mutex = 0;
1934
1935#else /* USE_RECURSIVE_LOCKS */
1936/* types for lock owners */
1937#ifdef WIN32
1938#define THREAD_ID_T DWORD
1939#define CURRENT_THREAD GetCurrentThreadId()
1940#define EQ_OWNER(X,Y) ((X) == (Y))
1941#else
1942/*
1943 Note: the following assume that pthread_t is a type that can be
1944 initialized to (casted) zero. If this is not the case, you will need to
1945 somehow redefine these or not use spin locks.
1946*/
1947#define THREAD_ID_T pthread_t
1948#define CURRENT_THREAD pthread_self()
1949#define EQ_OWNER(X,Y) pthread_equal(X, Y)
1950#endif
1951
1952struct malloc_recursive_lock {
1953 int sl;
1954 unsigned int c;
1955 THREAD_ID_T threadid;
1956};
1957
1958#define MLOCK_T struct malloc_recursive_lock
1959static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
1960
1961SDL_FORCE_INLINE void recursive_release_lock(MLOCK_T *lk) {
1962 assert(lk->sl != 0);
1963 if (--lk->c == 0) {
1964 CLEAR_LOCK(&lk->sl);
1965 }
1966}
1967
1968SDL_FORCE_INLINE int recursive_acquire_lock(MLOCK_T *lk) {
1969 THREAD_ID_T mythreadid = CURRENT_THREAD;
1970 int spins = 0;
1971 for (;;) {
1972 if (*((volatile int *)(&lk->sl)) == 0) {
1973 if (!CAS_LOCK(&lk->sl)) {
1974 lk->threadid = mythreadid;
1975 lk->c = 1;
1976 return 0;
1977 }
1978 }
1979 else if (EQ_OWNER(lk->threadid, mythreadid)) {
1980 ++lk->c;
1981 return 0;
1982 }
1983 if ((++spins & SPINS_PER_YIELD) == 0) {
1984 SPIN_LOCK_YIELD;
1985 }
1986 }
1987}
1988
1989SDL_FORCE_INLINE int recursive_try_lock(MLOCK_T *lk) {
1990 THREAD_ID_T mythreadid = CURRENT_THREAD;
1991 if (*((volatile int *)(&lk->sl)) == 0) {
1992 if (!CAS_LOCK(&lk->sl)) {
1993 lk->threadid = mythreadid;
1994 lk->c = 1;
1995 return 1;
1996 }
1997 }
1998 else if (EQ_OWNER(lk->threadid, mythreadid)) {
1999 ++lk->c;
2000 return 1;
2001 }
2002 return 0;
2003}
2004
2005#define RELEASE_LOCK(lk) recursive_release_lock(lk)
2006#define TRY_LOCK(lk) recursive_try_lock(lk)
2007#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
2008#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
2009#define DESTROY_LOCK(lk) (0)
2010#endif /* USE_RECURSIVE_LOCKS */
2011
2012#elif defined(WIN32) /* Win32 critical sections */
2013#define MLOCK_T CRITICAL_SECTION
2014#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
2015#define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
2016#define TRY_LOCK(lk) TryEnterCriticalSection(lk)
2017#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
2018#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
2019#define NEED_GLOBAL_LOCK_INIT
2020
2021static MLOCK_T malloc_global_mutex;
2022static volatile LONG malloc_global_mutex_status;
2023
2024/* Use spin loop to initialize global lock */
2025static void init_malloc_global_mutex() {
2026 for (;;) {
2027 long stat = malloc_global_mutex_status;
2028 if (stat > 0)
2029 return;
2030 /* transition to < 0 while initializing, then to > 0) */
2031 if (stat == 0 &&
2032 interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
2033 InitializeCriticalSection(&malloc_global_mutex);
2034 interlockedexchange(&malloc_global_mutex_status, (LONG)1);
2035 return;
2036 }
2037 SleepEx(0, FALSE);
2038 }
2039}
2040
2041#else /* pthreads-based locks */
2042#define MLOCK_T pthread_mutex_t
2043#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
2044#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
2045#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
2046#define INITIAL_LOCK(lk) pthread_init_lock(lk)
2047#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
2048
2049#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
2050/* Cope with old-style linux recursive lock initialization by adding */
2051/* skipped internal declaration from pthread.h */
2052extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
2053 int __kind));
2054#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
2055#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
2056#endif /* USE_RECURSIVE_LOCKS ... */
2057
2058static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
2059
2060static int pthread_init_lock (MLOCK_T *lk) {
2061 pthread_mutexattr_t attr;
2062 if (pthread_mutexattr_init(&attr)) return 1;
2063#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
2064 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
2065#endif
2066 if (pthread_mutex_init(lk, &attr)) return 1;
2067 if (pthread_mutexattr_destroy(&attr)) return 1;
2068 return 0;
2069}
2070
2071#endif /* ... lock types ... */
2072
2073/* Common code for all lock types */
2074#define USE_LOCK_BIT (2U)
2075
2076#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
2077#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
2078#endif
2079
2080#ifndef RELEASE_MALLOC_GLOBAL_LOCK
2081#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
2082#endif
2083
2084#endif /* USE_LOCKS */
2085
2086/* ----------------------- Chunk representations ------------------------ */
2087
2088/*
2089 (The following includes lightly edited explanations by Colin Plumb.)
2090
2091 The malloc_chunk declaration below is misleading (but accurate and
2092 necessary). It declares a "view" into memory allowing access to
2093 necessary fields at known offsets from a given base.
2094
2095 Chunks of memory are maintained using a `boundary tag' method as
2096 originally described by Knuth. (See the paper by Paul Wilson
2097 ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
2098 techniques.) Sizes of free chunks are stored both in the front of
2099 each chunk and at the end. This makes consolidating fragmented
2100 chunks into bigger chunks fast. The head fields also hold bits
2101 representing whether chunks are free or in use.
2102
2103 Here are some pictures to make it clearer. They are "exploded" to
2104 show that the state of a chunk can be thought of as extending from
2105 the high 31 bits of the head field of its header through the
2106 prev_foot and PINUSE_BIT bit of the following chunk header.
2107
2108 A chunk that's in use looks like:
2109
2110 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2111 | Size of previous chunk (if P = 0) |
2112 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2113 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
2114 | Size of this chunk 1| +-+
2115 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2116 | |
2117 +- -+
2118 | |
2119 +- -+
2120 | :
2121 +- size - sizeof(size_t) available payload bytes -+
2122 : |
2123 chunk-> +- -+
2124 | |
2125 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2126 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
2127 | Size of next chunk (may or may not be in use) | +-+
2128 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2129
2130 And if it's free, it looks like this:
2131
2132 chunk-> +- -+
2133 | User payload (must be in use, or we would have merged!) |
2134 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2135 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
2136 | Size of this chunk 0| +-+
2137 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2138 | Next pointer |
2139 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2140 | Prev pointer |
2141 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2142 | :
2143 +- size - sizeof(struct chunk) unused bytes -+
2144 : |
2145 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2146 | Size of this chunk |
2147 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2148 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
2149 | Size of next chunk (must be in use, or we would have merged)| +-+
2150 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2151 | :
2152 +- User payload -+
2153 : |
2154 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2155 |0|
2156 +-+
2157 Note that since we always merge adjacent free chunks, the chunks
2158 adjacent to a free chunk must be in use.
2159
2160 Given a pointer to a chunk (which can be derived trivially from the
2161 payload pointer) we can, in O(1) time, find out whether the adjacent
2162 chunks are free, and if so, unlink them from the lists that they
2163 are on and merge them with the current chunk.
2164
2165 Chunks always begin on even word boundaries, so the mem portion
2166 (which is returned to the user) is also on an even word boundary, and
2167 thus at least double-word aligned.
2168
2169 The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
2170 chunk size (which is always a multiple of two words), is an in-use
2171 bit for the *previous* chunk. If that bit is *clear*, then the
2172 word before the current chunk size contains the previous chunk
2173 size, and can be used to find the front of the previous chunk.
2174 The very first chunk allocated always has this bit set, preventing
2175 access to non-existent (or non-owned) memory. If pinuse is set for
2176 any given chunk, then you CANNOT determine the size of the
2177 previous chunk, and might even get a memory addressing fault when
2178 trying to do so.
2179
2180 The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
2181 the chunk size redundantly records whether the current chunk is
2182 inuse (unless the chunk is mmapped). This redundancy enables usage
2183 checks within free and realloc, and reduces indirection when freeing
2184 and consolidating chunks.
2185
2186 Each freshly allocated chunk must have both cinuse and pinuse set.
2187 That is, each allocated chunk borders either a previously allocated
2188 and still in-use chunk, or the base of its memory arena. This is
2189 ensured by making all allocations from the `lowest' part of any
2190 found chunk. Further, no free chunk physically borders another one,
2191 so each free chunk is known to be preceded and followed by either
2192 inuse chunks or the ends of memory.
2193
2194 Note that the `foot' of the current chunk is actually represented
2195 as the prev_foot of the NEXT chunk. This makes it easier to
2196 deal with alignments etc but can be very confusing when trying
2197 to extend or adapt this code.
2198
2199 The exceptions to all this are
2200
2201 1. The special chunk `top' is the top-most available chunk (i.e.,
2202 the one bordering the end of available memory). It is treated
2203 specially. Top is never included in any bin, is used only if
2204 no other chunk is available, and is released back to the
2205 system if it is very large (see M_TRIM_THRESHOLD). In effect,
2206 the top chunk is treated as larger (and thus less well
2207 fitting) than any other available chunk. The top chunk
2208 doesn't update its trailing size field since there is no next
2209 contiguous chunk that would have to index off it. However,
2210 space is still allocated for it (TOP_FOOT_SIZE) to enable
2211 separation or merging when space is extended.
2212
2213 3. Chunks allocated via mmap, have both cinuse and pinuse bits
2214 cleared in their head fields. Because they are allocated
2215 one-by-one, each must carry its own prev_foot field, which is
2216 also used to hold the offset this chunk has within its mmapped
2217 region, which is needed to preserve alignment. Each mmapped
2218 chunk is trailed by the first two fields of a fake next-chunk
2219 for sake of usage checks.
2220
2221*/
2222
2223struct malloc_chunk {
2224 size_t prev_foot; /* Size of previous chunk (if free). */
2225 size_t head; /* Size and inuse bits. */
2226 struct malloc_chunk* fd; /* double links -- used only if free. */
2227 struct malloc_chunk* bk;
2228};
2229
2230typedef struct malloc_chunk mchunk;
2231typedef struct malloc_chunk* mchunkptr;
2232typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
2233typedef unsigned int bindex_t; /* Described below */
2234typedef unsigned int binmap_t; /* Described below */
2235typedef unsigned int flag_t; /* The type of various bit flag sets */
2236
2237/* ------------------- Chunks sizes and alignments ----------------------- */
2238
2239#define MCHUNK_SIZE (sizeof(mchunk))
2240
2241#if FOOTERS
2242#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
2243#else /* FOOTERS */
2244#define CHUNK_OVERHEAD (SIZE_T_SIZE)
2245#endif /* FOOTERS */
2246
2247/* MMapped chunks need a second word of overhead ... */
2248#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
2249/* ... and additional padding for fake next-chunk at foot */
2250#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
2251
2252/* The smallest size we can malloc is an aligned minimal chunk */
2253#define MIN_CHUNK_SIZE\
2254 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
2255
2256/* conversion from malloc headers to user pointers, and back */
2257#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
2258#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
2259/* chunk associated with aligned address A */
2260#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
2261
2262/* Bounds on request (not chunk) sizes. */
2263#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
2264#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
2265
2266/* pad request bytes into a usable size */
2267#define pad_request(req) \
2268 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
2269
2270/* pad request, checking for minimum (but not maximum) */
2271#define request2size(req) \
2272 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
2273
2274
2275/* ------------------ Operations on head and foot fields ----------------- */
2276
2277/*
2278 The head field of a chunk is or'ed with PINUSE_BIT when previous
2279 adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
2280 use, unless mmapped, in which case both bits are cleared.
2281
2282 FLAG4_BIT is not used by this malloc, but might be useful in extensions.
2283*/
2284
2285#define PINUSE_BIT (SIZE_T_ONE)
2286#define CINUSE_BIT (SIZE_T_TWO)
2287#define FLAG4_BIT (SIZE_T_FOUR)
2288#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
2289#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
2290
2291/* Head value for fenceposts */
2292#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
2293
2294/* extraction of fields from head words */
2295#define cinuse(p) ((p)->head & CINUSE_BIT)
2296#define pinuse(p) ((p)->head & PINUSE_BIT)
2297#define flag4inuse(p) ((p)->head & FLAG4_BIT)
2298#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
2299#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
2300
2301#define chunksize(p) ((p)->head & ~(FLAG_BITS))
2302
2303#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
2304#define set_flag4(p) ((p)->head |= FLAG4_BIT)
2305#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
2306
2307/* Treat space at ptr +/- offset as a chunk */
2308#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2309#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
2310
2311/* Ptr to next or previous physical malloc_chunk. */
2312#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
2313#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
2314
2315/* extract next chunk's pinuse bit */
2316#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
2317
2318/* Get/set size at footer */
2319#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
2320#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
2321
2322/* Set size, pinuse bit, and foot */
2323#define set_size_and_pinuse_of_free_chunk(p, s)\
2324 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
2325
2326/* Set size, pinuse bit, foot, and clear next pinuse */
2327#define set_free_with_pinuse(p, s, n)\
2328 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
2329
2330/* Get the internal overhead associated with chunk p */
2331#define overhead_for(p)\
2332 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
2333
2334/* Return true if malloced space is not necessarily cleared */
2335#if MMAP_CLEARS
2336#define calloc_must_clear(p) (!is_mmapped(p))
2337#else /* MMAP_CLEARS */
2338#define calloc_must_clear(p) (1)
2339#endif /* MMAP_CLEARS */
2340
2341/* ---------------------- Overlaid data structures ----------------------- */
2342
2343/*
2344 When chunks are not in use, they are treated as nodes of either
2345 lists or trees.
2346
2347 "Small" chunks are stored in circular doubly-linked lists, and look
2348 like this:
2349
2350 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2351 | Size of previous chunk |
2352 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2353 `head:' | Size of chunk, in bytes |P|
2354 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2355 | Forward pointer to next chunk in list |
2356 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2357 | Back pointer to previous chunk in list |
2358 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2359 | Unused space (may be 0 bytes long) .
2360 . .
2361 . |
2362nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2363 `foot:' | Size of chunk, in bytes |
2364 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2365
2366 Larger chunks are kept in a form of bitwise digital trees (aka
2367 tries) keyed on chunksizes. Because malloc_tree_chunks are only for
2368 free chunks greater than 256 bytes, their size doesn't impose any
2369 constraints on user chunk sizes. Each node looks like:
2370
2371 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2372 | Size of previous chunk |
2373 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2374 `head:' | Size of chunk, in bytes |P|
2375 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2376 | Forward pointer to next chunk of same size |
2377 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2378 | Back pointer to previous chunk of same size |
2379 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2380 | Pointer to left child (child[0]) |
2381 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2382 | Pointer to right child (child[1]) |
2383 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2384 | Pointer to parent |
2385 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2386 | bin index of this chunk |
2387 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2388 | Unused space .
2389 . |
2390nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2391 `foot:' | Size of chunk, in bytes |
2392 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2393
2394 Each tree holding treenodes is a tree of unique chunk sizes. Chunks
2395 of the same size are arranged in a circularly-linked list, with only
2396 the oldest chunk (the next to be used, in our FIFO ordering)
2397 actually in the tree. (Tree members are distinguished by a non-null
2398 parent pointer.) If a chunk with the same size an an existing node
2399 is inserted, it is linked off the existing node using pointers that
2400 work in the same way as fd/bk pointers of small chunks.
2401
2402 Each tree contains a power of 2 sized range of chunk sizes (the
2403 smallest is 0x100 <= x < 0x180), which is is divided in half at each
2404 tree level, with the chunks in the smaller half of the range (0x100
2405 <= x < 0x140 for the top nose) in the left subtree and the larger
2406 half (0x140 <= x < 0x180) in the right subtree. This is, of course,
2407 done by inspecting individual bits.
2408
2409 Using these rules, each node's left subtree contains all smaller
2410 sizes than its right subtree. However, the node at the root of each
2411 subtree has no particular ordering relationship to either. (The
2412 dividing line between the subtree sizes is based on trie relation.)
2413 If we remove the last chunk of a given size from the interior of the
2414 tree, we need to replace it with a leaf node. The tree ordering
2415 rules permit a node to be replaced by any leaf below it.
2416
2417 The smallest chunk in a tree (a common operation in a best-fit
2418 allocator) can be found by walking a path to the leftmost leaf in
2419 the tree. Unlike a usual binary tree, where we follow left child
2420 pointers until we reach a null, here we follow the right child
2421 pointer any time the left one is null, until we reach a leaf with
2422 both child pointers null. The smallest chunk in the tree will be
2423 somewhere along that path.
2424
2425 The worst case number of steps to add, find, or remove a node is
2426 bounded by the number of bits differentiating chunks within
2427 bins. Under current bin calculations, this ranges from 6 up to 21
2428 (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
2429 is of course much better.
2430*/
2431
2432struct malloc_tree_chunk {
2433 /* The first four fields must be compatible with malloc_chunk */
2434 size_t prev_foot;
2435 size_t head;
2436 struct malloc_tree_chunk* fd;
2437 struct malloc_tree_chunk* bk;
2438
2439 struct malloc_tree_chunk* child[2];
2440 struct malloc_tree_chunk* parent;
2441 bindex_t index;
2442};
2443
2444typedef struct malloc_tree_chunk tchunk;
2445typedef struct malloc_tree_chunk* tchunkptr;
2446typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
2447
2448/* A little helper macro for trees */
2449#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
2450
2451/* ----------------------------- Segments -------------------------------- */
2452
2453/*
2454 Each malloc space may include non-contiguous segments, held in a
2455 list headed by an embedded malloc_segment record representing the
2456 top-most space. Segments also include flags holding properties of
2457 the space. Large chunks that are directly allocated by mmap are not
2458 included in this list. They are instead independently created and
2459 destroyed without otherwise keeping track of them.
2460
2461 Segment management mainly comes into play for spaces allocated by
2462 MMAP. Any call to MMAP might or might not return memory that is
2463 adjacent to an existing segment. MORECORE normally contiguously
2464 extends the current space, so this space is almost always adjacent,
2465 which is simpler and faster to deal with. (This is why MORECORE is
2466 used preferentially to MMAP when both are available -- see
2467 sys_alloc.) When allocating using MMAP, we don't use any of the
2468 hinting mechanisms (inconsistently) supported in various
2469 implementations of unix mmap, or distinguish reserving from
2470 committing memory. Instead, we just ask for space, and exploit
2471 contiguity when we get it. It is probably possible to do
2472 better than this on some systems, but no general scheme seems
2473 to be significantly better.
2474
2475 Management entails a simpler variant of the consolidation scheme
2476 used for chunks to reduce fragmentation -- new adjacent memory is
2477 normally prepended or appended to an existing segment. However,
2478 there are limitations compared to chunk consolidation that mostly
2479 reflect the fact that segment processing is relatively infrequent
2480 (occurring only when getting memory from system) and that we
2481 don't expect to have huge numbers of segments:
2482
2483 * Segments are not indexed, so traversal requires linear scans. (It
2484 would be possible to index these, but is not worth the extra
2485 overhead and complexity for most programs on most platforms.)
2486 * New segments are only appended to old ones when holding top-most
2487 memory; if they cannot be prepended to others, they are held in
2488 different segments.
2489
2490 Except for the top-most segment of an mstate, each segment record
2491 is kept at the tail of its segment. Segments are added by pushing
2492 segment records onto the list headed by &mstate.seg for the
2493 containing mstate.
2494
2495 Segment flags control allocation/merge/deallocation policies:
2496 * If EXTERN_BIT set, then we did not allocate this segment,
2497 and so should not try to deallocate or merge with others.
2498 (This currently holds only for the initial segment passed
2499 into create_mspace_with_base.)
2500 * If USE_MMAP_BIT set, the segment may be merged with
2501 other surrounding mmapped segments and trimmed/de-allocated
2502 using munmap.
2503 * If neither bit is set, then the segment was obtained using
2504 MORECORE so can be merged with surrounding MORECORE'd segments
2505 and deallocated/trimmed using MORECORE with negative arguments.
2506*/
2507
2508struct malloc_segment {
2509 char* base; /* base address */
2510 size_t size; /* allocated size */
2511 struct malloc_segment* next; /* ptr to next segment */
2512 flag_t sflags; /* mmap and extern flag */
2513};
2514
2515#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
2516#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
2517
2518typedef struct malloc_segment msegment;
2519typedef struct malloc_segment* msegmentptr;
2520
2521/* ---------------------------- malloc_state ----------------------------- */
2522
2523/*
2524 A malloc_state holds all of the bookkeeping for a space.
2525 The main fields are:
2526
2527 Top
2528 The topmost chunk of the currently active segment. Its size is
2529 cached in topsize. The actual size of topmost space is
2530 topsize+TOP_FOOT_SIZE, which includes space reserved for adding
2531 fenceposts and segment records if necessary when getting more
2532 space from the system. The size at which to autotrim top is
2533 cached from mparams in trim_check, except that it is disabled if
2534 an autotrim fails.
2535
2536 Designated victim (dv)
2537 This is the preferred chunk for servicing small requests that
2538 don't have exact fits. It is normally the chunk split off most
2539 recently to service another small request. Its size is cached in
2540 dvsize. The link fields of this chunk are not maintained since it
2541 is not kept in a bin.
2542
2543 SmallBins
2544 An array of bin headers for free chunks. These bins hold chunks
2545 with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
2546 chunks of all the same size, spaced 8 bytes apart. To simplify
2547 use in double-linked lists, each bin header acts as a malloc_chunk
2548 pointing to the real first node, if it exists (else pointing to
2549 itself). This avoids special-casing for headers. But to avoid
2550 waste, we allocate only the fd/bk pointers of bins, and then use
2551 repositioning tricks to treat these as the fields of a chunk.
2552
2553 TreeBins
2554 Treebins are pointers to the roots of trees holding a range of
2555 sizes. There are 2 equally spaced treebins for each power of two
2556 from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
2557 larger.
2558
2559 Bin maps
2560 There is one bit map for small bins ("smallmap") and one for
2561 treebins ("treemap). Each bin sets its bit when non-empty, and
2562 clears the bit when empty. Bit operations are then used to avoid
2563 bin-by-bin searching -- nearly all "search" is done without ever
2564 looking at bins that won't be selected. The bit maps
2565 conservatively use 32 bits per map word, even if on 64bit system.
2566 For a good description of some of the bit-based techniques used
2567 here, see Henry S. Warren Jr's book "Hacker's Delight" (and
2568 supplement at http://hackersdelight.org/). Many of these are
2569 intended to reduce the branchiness of paths through malloc etc, as
2570 well as to reduce the number of memory locations read or written.
2571
2572 Segments
2573 A list of segments headed by an embedded malloc_segment record
2574 representing the initial space.
2575
2576 Address check support
2577 The least_addr field is the least address ever obtained from
2578 MORECORE or MMAP. Attempted frees and reallocs of any address less
2579 than this are trapped (unless INSECURE is defined).
2580
2581 Magic tag
2582 A cross-check field that should always hold same value as mparams.magic.
2583
2584 Max allowed footprint
2585 The maximum allowed bytes to allocate from system (zero means no limit)
2586
2587 Flags
2588 Bits recording whether to use MMAP, locks, or contiguous MORECORE
2589
2590 Statistics
2591 Each space keeps track of current and maximum system memory
2592 obtained via MORECORE or MMAP.
2593
2594 Trim support
2595 Fields holding the amount of unused topmost memory that should trigger
2596 trimming, and a counter to force periodic scanning to release unused
2597 non-topmost segments.
2598
2599 Locking
2600 If USE_LOCKS is defined, the "mutex" lock is acquired and released
2601 around every public call using this mspace.
2602
2603 Extension support
2604 A void* pointer and a size_t field that can be used to help implement
2605 extensions to this malloc.
2606*/
2607
2608/* Bin types, widths and sizes */
2609#define NSMALLBINS (32U)
2610#define NTREEBINS (32U)
2611#define SMALLBIN_SHIFT (3U)
2612#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
2613#define TREEBIN_SHIFT (8U)
2614#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2615#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2616#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2617
2618struct malloc_state {
2619 binmap_t smallmap;
2620 binmap_t treemap;
2621 size_t dvsize;
2622 size_t topsize;
2623 char* least_addr;
2624 mchunkptr dv;
2625 mchunkptr top;
2626 size_t trim_check;
2627 size_t release_checks;
2628 size_t magic;
2629 mchunkptr smallbins[(NSMALLBINS+1)*2];
2630 tbinptr treebins[NTREEBINS];
2631 size_t footprint;
2632 size_t max_footprint;
2633 size_t footprint_limit; /* zero means no limit */
2634 flag_t mflags;
2635#if USE_LOCKS
2636 MLOCK_T mutex; /* locate lock among fields that rarely change */
2637#endif /* USE_LOCKS */
2638 msegment seg;
2639 void* extp; /* Unused but available for extensions */
2640 size_t exts;
2641};
2642
2643typedef struct malloc_state* mstate;
2644
2645/* ------------- Global malloc_state and malloc_params ------------------- */
2646
2647/*
2648 malloc_params holds global properties, including those that can be
2649 dynamically set using mallopt. There is a single instance, mparams,
2650 initialized in init_mparams. Note that the non-zeroness of "magic"
2651 also serves as an initialization flag.
2652*/
2653
2654struct malloc_params {
2655 size_t magic;
2656 size_t page_size;
2657 size_t granularity;
2658 size_t mmap_threshold;
2659 size_t trim_threshold;
2660 flag_t default_mflags;
2661};
2662
2663static struct malloc_params mparams;
2664
2665/* Ensure mparams initialized */
2666#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
2667
2668#if !ONLY_MSPACES
2669
2670/* The global malloc_state used for all non-"mspace" calls */
2671static struct malloc_state _gm_;
2672#define gm (&_gm_)
2673#define is_global(M) ((M) == &_gm_)
2674
2675#endif /* !ONLY_MSPACES */
2676
2677#define is_initialized(M) ((M)->top != 0)
2678
2679/* -------------------------- system alloc setup ------------------------- */
2680
2681/* Operations on mflags */
2682
2683#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
2684#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
2685#if USE_LOCKS
2686#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
2687#else
2688#define disable_lock(M)
2689#endif
2690
2691#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
2692#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
2693#if HAVE_MMAP
2694#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
2695#else
2696#define disable_mmap(M)
2697#endif
2698
2699#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
2700#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
2701
2702#define set_lock(M,L)\
2703 ((M)->mflags = (L)?\
2704 ((M)->mflags | USE_LOCK_BIT) :\
2705 ((M)->mflags & ~USE_LOCK_BIT))
2706
2707/* page-align a size */
2708#define page_align(S)\
2709 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
2710
2711/* granularity-align a size */
2712#define granularity_align(S)\
2713 (((S) + (mparams.granularity - SIZE_T_ONE))\
2714 & ~(mparams.granularity - SIZE_T_ONE))
2715
2716
2717/* For mmap, use granularity alignment on windows, else page-align */
2718#ifdef WIN32
2719#define mmap_align(S) granularity_align(S)
2720#else
2721#define mmap_align(S) page_align(S)
2722#endif
2723
2724/* For sys_alloc, enough padding to ensure can malloc request on success */
2725#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
2726
2727#define is_page_aligned(S)\
2728 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2729#define is_granularity_aligned(S)\
2730 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2731
2732/* True if segment S holds address A */
2733#define segment_holds(S, A)\
2734 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2735
2736/* Return segment holding given address */
2737static msegmentptr segment_holding(mstate m, char* addr) {
2738 msegmentptr sp = &m->seg;
2739 for (;;) {
2740 if (addr >= sp->base && addr < sp->base + sp->size)
2741 return sp;
2742 if ((sp = sp->next) == 0)
2743 return 0;
2744 }
2745}
2746
2747/* Return true if segment contains a segment link */
2748static int has_segment_link(mstate m, msegmentptr ss) {
2749 msegmentptr sp = &m->seg;
2750 for (;;) {
2751 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
2752 return 1;
2753 if ((sp = sp->next) == 0)
2754 return 0;
2755 }
2756}
2757
2758#ifndef MORECORE_CANNOT_TRIM
2759#define should_trim(M,s) ((s) > (M)->trim_check)
2760#else /* MORECORE_CANNOT_TRIM */
2761#define should_trim(M,s) (0)
2762#endif /* MORECORE_CANNOT_TRIM */
2763
2764/*
2765 TOP_FOOT_SIZE is padding at the end of a segment, including space
2766 that may be needed to place segment records and fenceposts when new
2767 noncontiguous segments are added.
2768*/
2769#define TOP_FOOT_SIZE\
2770 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2771
2772
2773/* ------------------------------- Hooks -------------------------------- */
2774
2775/*
2776 PREACTION should be defined to return 0 on success, and nonzero on
2777 failure. If you are not using locking, you can redefine these to do
2778 anything you like.
2779*/
2780
2781#if USE_LOCKS
2782#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2783#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2784#else /* USE_LOCKS */
2785
2786#ifndef PREACTION
2787#define PREACTION(M) (0)
2788#endif /* PREACTION */
2789
2790#ifndef POSTACTION
2791#define POSTACTION(M)
2792#endif /* POSTACTION */
2793
2794#endif /* USE_LOCKS */
2795
2796/*
2797 CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
2798 USAGE_ERROR_ACTION is triggered on detected bad frees and
2799 reallocs. The argument p is an address that might have triggered the
2800 fault. It is ignored by the two predefined actions, but might be
2801 useful in custom actions that try to help diagnose errors.
2802*/
2803
2804#if PROCEED_ON_ERROR
2805
2806/* A count of the number of corruption errors causing resets */
2807int malloc_corruption_error_count;
2808
2809/* default corruption action */
2810static void reset_on_error(mstate m);
2811
2812#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2813#define USAGE_ERROR_ACTION(m, p)
2814
2815#else /* PROCEED_ON_ERROR */
2816
2817#ifndef CORRUPTION_ERROR_ACTION
2818#define CORRUPTION_ERROR_ACTION(m) ABORT
2819#endif /* CORRUPTION_ERROR_ACTION */
2820
2821#ifndef USAGE_ERROR_ACTION
2822#define USAGE_ERROR_ACTION(m,p) ABORT
2823#endif /* USAGE_ERROR_ACTION */
2824
2825#endif /* PROCEED_ON_ERROR */
2826
2827
2828/* -------------------------- Debugging setup ---------------------------- */
2829
2830#if ! DEBUG
2831
2832#define check_free_chunk(M,P)
2833#define check_inuse_chunk(M,P)
2834#define check_malloced_chunk(M,P,N)
2835#define check_mmapped_chunk(M,P)
2836#define check_malloc_state(M)
2837#define check_top_chunk(M,P)
2838
2839#else /* DEBUG */
2840#define check_free_chunk(M,P) do_check_free_chunk(M,P)
2841#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
2842#define check_top_chunk(M,P) do_check_top_chunk(M,P)
2843#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2844#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2845#define check_malloc_state(M) do_check_malloc_state(M)
2846
2847static void do_check_any_chunk(mstate m, mchunkptr p);
2848static void do_check_top_chunk(mstate m, mchunkptr p);
2849static void do_check_mmapped_chunk(mstate m, mchunkptr p);
2850static void do_check_inuse_chunk(mstate m, mchunkptr p);
2851static void do_check_free_chunk(mstate m, mchunkptr p);
2852static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
2853static void do_check_tree(mstate m, tchunkptr t);
2854static void do_check_treebin(mstate m, bindex_t i);
2855static void do_check_smallbin(mstate m, bindex_t i);
2856static void do_check_malloc_state(mstate m);
2857static int bin_find(mstate m, mchunkptr x);
2858static size_t traverse_and_check(mstate m);
2859#endif /* DEBUG */
2860
2861/* ---------------------------- Indexing Bins ---------------------------- */
2862
2863#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2864#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
2865#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
2866#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
2867
2868/* addressing by index. See above about smallbin repositioning */
2869#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2870#define treebin_at(M,i) (&((M)->treebins[i]))
2871
2872/* assign tree index for size S to variable I. Use x86 asm if possible */
2873#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2874#define compute_tree_index(S, I)\
2875{\
2876 unsigned int X = S >> TREEBIN_SHIFT;\
2877 if (X == 0)\
2878 I = 0;\
2879 else if (X > 0xFFFF)\
2880 I = NTREEBINS-1;\
2881 else {\
2882 unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
2883 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2884 }\
2885}
2886
2887#elif defined (__INTEL_COMPILER)
2888#define compute_tree_index(S, I)\
2889{\
2890 size_t X = S >> TREEBIN_SHIFT;\
2891 if (X == 0)\
2892 I = 0;\
2893 else if (X > 0xFFFF)\
2894 I = NTREEBINS-1;\
2895 else {\
2896 unsigned int K = _bit_scan_reverse (X); \
2897 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2898 }\
2899}
2900
2901#elif defined(_MSC_VER) && _MSC_VER>=1300
2902#define compute_tree_index(S, I)\
2903{\
2904 size_t X = S >> TREEBIN_SHIFT;\
2905 if (X == 0)\
2906 I = 0;\
2907 else if (X > 0xFFFF)\
2908 I = NTREEBINS-1;\
2909 else {\
2910 unsigned int K;\
2911 _BitScanReverse((DWORD *) &K, (DWORD) X);\
2912 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2913 }\
2914}
2915
2916#else /* GNUC */
2917#define compute_tree_index(S, I)\
2918{\
2919 size_t X = S >> TREEBIN_SHIFT;\
2920 if (X == 0)\
2921 I = 0;\
2922 else if (X > 0xFFFF)\
2923 I = NTREEBINS-1;\
2924 else {\
2925 unsigned int Y = (unsigned int)X;\
2926 unsigned int N = ((Y - 0x100) >> 16) & 8;\
2927 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2928 N += K;\
2929 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2930 K = 14 - N + ((Y <<= K) >> 15);\
2931 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2932 }\
2933}
2934#endif /* GNUC */
2935
2936/* Bit representing maximum resolved size in a treebin at i */
2937#define bit_for_tree_index(i) \
2938 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2939
2940/* Shift placing maximum resolved bit in a treebin at i as sign bit */
2941#define leftshift_for_tree_index(i) \
2942 ((i == NTREEBINS-1)? 0 : \
2943 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2944
2945/* The size of the smallest chunk held in bin with index i */
2946#define minsize_for_tree_index(i) \
2947 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
2948 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2949
2950
2951/* ------------------------ Operations on bin maps ----------------------- */
2952
2953/* bit corresponding to given index */
2954#define idx2bit(i) ((binmap_t)(1) << (i))
2955
2956/* Mark/Clear bits with given index */
2957#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
2958#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
2959#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
2960
2961#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
2962#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
2963#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
2964
2965/* isolate the least set bit of a bitmap */
2966#define least_bit(x) ((x) & -(x))
2967
2968/* mask with all bits to left of least bit of x on */
2969#define left_bits(x) ((x<<1) | -(x<<1))
2970
2971/* mask with all bits to left of or equal to least bit of x on */
2972#define same_or_left_bits(x) ((x) | -(x))
2973
2974/* index corresponding to given bit. Use x86 asm if possible */
2975
2976#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2977#define compute_bit2idx(X, I)\
2978{\
2979 unsigned int J;\
2980 J = __builtin_ctz(X); \
2981 I = (bindex_t)J;\
2982}
2983
2984#elif defined (__INTEL_COMPILER)
2985#define compute_bit2idx(X, I)\
2986{\
2987 unsigned int J;\
2988 J = _bit_scan_forward (X); \
2989 I = (bindex_t)J;\
2990}
2991
2992#elif defined(_MSC_VER) && _MSC_VER>=1300
2993#define compute_bit2idx(X, I)\
2994{\
2995 unsigned int J;\
2996 _BitScanForward((DWORD *) &J, X);\
2997 I = (bindex_t)J;\
2998}
2999
3000#elif USE_BUILTIN_FFS
3001#define compute_bit2idx(X, I) I = ffs(X)-1
3002
3003#else
3004#define compute_bit2idx(X, I)\
3005{\
3006 unsigned int Y = X - 1;\
3007 unsigned int K = Y >> (16-4) & 16;\
3008 unsigned int N = K; Y >>= K;\
3009 N += K = Y >> (8-3) & 8; Y >>= K;\
3010 N += K = Y >> (4-2) & 4; Y >>= K;\
3011 N += K = Y >> (2-1) & 2; Y >>= K;\
3012 N += K = Y >> (1-0) & 1; Y >>= K;\
3013 I = (bindex_t)(N + Y);\
3014}
3015#endif /* GNUC */
3016
3017
3018/* ----------------------- Runtime Check Support ------------------------- */
3019
3020/*
3021 For security, the main invariant is that malloc/free/etc never
3022 writes to a static address other than malloc_state, unless static
3023 malloc_state itself has been corrupted, which cannot occur via
3024 malloc (because of these checks). In essence this means that we
3025 believe all pointers, sizes, maps etc held in malloc_state, but
3026 check all of those linked or offsetted from other embedded data
3027 structures. These checks are interspersed with main code in a way
3028 that tends to minimize their run-time cost.
3029
3030 When FOOTERS is defined, in addition to range checking, we also
3031 verify footer fields of inuse chunks, which can be used guarantee
3032 that the mstate controlling malloc/free is intact. This is a
3033 streamlined version of the approach described by William Robertson
3034 et al in "Run-time Detection of Heap-based Overflows" LISA'03
3035 http://www.usenix.org/events/lisa03/tech/robertson.html The footer
3036 of an inuse chunk holds the xor of its mstate and a random seed,
3037 that is checked upon calls to free() and realloc(). This is
3038 (probabalistically) unguessable from outside the program, but can be
3039 computed by any code successfully malloc'ing any chunk, so does not
3040 itself provide protection against code that has already broken
3041 security through some other means. Unlike Robertson et al, we
3042 always dynamically check addresses of all offset chunks (previous,
3043 next, etc). This turns out to be cheaper than relying on hashes.
3044*/
3045
3046#if !INSECURE
3047/* Check if address a is at least as high as any from MORECORE or MMAP */
3048#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
3049/* Check if address of next chunk n is higher than base chunk p */
3050#define ok_next(p, n) ((char*)(p) < (char*)(n))
3051/* Check if p has inuse status */
3052#define ok_inuse(p) is_inuse(p)
3053/* Check if p has its pinuse bit on */
3054#define ok_pinuse(p) pinuse(p)
3055
3056#else /* !INSECURE */
3057#define ok_address(M, a) (1)
3058#define ok_next(b, n) (1)
3059#define ok_inuse(p) (1)
3060#define ok_pinuse(p) (1)
3061#endif /* !INSECURE */
3062
3063#if (FOOTERS && !INSECURE)
3064/* Check if (alleged) mstate m has expected magic field */
3065#define ok_magic(M) ((M)->magic == mparams.magic)
3066#else /* (FOOTERS && !INSECURE) */
3067#define ok_magic(M) (1)
3068#endif /* (FOOTERS && !INSECURE) */
3069
3070/* In gcc, use __builtin_expect to minimize impact of checks */
3071#if !INSECURE
3072#if defined(__GNUC__) && __GNUC__ >= 3
3073#define RTCHECK(e) __builtin_expect(e, 1)
3074#else /* GNUC */
3075#define RTCHECK(e) (e)
3076#endif /* GNUC */
3077#else /* !INSECURE */
3078#define RTCHECK(e) (1)
3079#endif /* !INSECURE */
3080
3081/* macros to set up inuse chunks with or without footers */
3082
3083#if !FOOTERS
3084
3085#define mark_inuse_foot(M,p,s)
3086
3087/* Macros for setting head/foot of non-mmapped chunks */
3088
3089/* Set cinuse bit and pinuse bit of next chunk */
3090#define set_inuse(M,p,s)\
3091 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
3092 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
3093
3094/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
3095#define set_inuse_and_pinuse(M,p,s)\
3096 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
3097 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
3098
3099/* Set size, cinuse and pinuse bit of this chunk */
3100#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
3101 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
3102
3103#else /* FOOTERS */
3104
3105/* Set foot of inuse chunk to be xor of mstate and seed */
3106#define mark_inuse_foot(M,p,s)\
3107 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
3108
3109#define get_mstate_for(p)\
3110 ((mstate)(((mchunkptr)((char*)(p) +\
3111 (chunksize(p))))->prev_foot ^ mparams.magic))
3112
3113#define set_inuse(M,p,s)\
3114 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
3115 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
3116 mark_inuse_foot(M,p,s))
3117
3118#define set_inuse_and_pinuse(M,p,s)\
3119 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
3120 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
3121 mark_inuse_foot(M,p,s))
3122
3123#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
3124 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
3125 mark_inuse_foot(M, p, s))
3126
3127#endif /* !FOOTERS */
3128
3129/* ---------------------------- setting mparams -------------------------- */
3130
3131#if LOCK_AT_FORK
3132static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }
3133static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
3134static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); }
3135#endif /* LOCK_AT_FORK */
3136
3137/* Initialize mparams */
3138static int init_mparams(void) {
3139#ifdef NEED_GLOBAL_LOCK_INIT
3140 if (malloc_global_mutex_status <= 0)
3141 init_malloc_global_mutex();
3142#endif
3143
3144 ACQUIRE_MALLOC_GLOBAL_LOCK();
3145 if (mparams.magic == 0) {
3146 size_t magic;
3147 size_t psize;
3148 size_t gsize;
3149
3150#ifndef WIN32
3151 psize = malloc_getpagesize;
3152 gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
3153#else /* WIN32 */
3154 {
3155 SYSTEM_INFO system_info;
3156 GetSystemInfo(&system_info);
3157 psize = system_info.dwPageSize;
3158 gsize = ((DEFAULT_GRANULARITY != 0)?
3159 DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
3160 }
3161#endif /* WIN32 */
3162
3163 /* Sanity-check configuration:
3164 size_t must be unsigned and as wide as pointer type.
3165 ints must be at least 4 bytes.
3166 alignment must be at least 8.
3167 Alignment, min chunk size, and page size must all be powers of 2.
3168 */
3169 if ((sizeof(size_t) != sizeof(char*)) ||
3170 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
3171 (sizeof(int) < 4) ||
3172 (MALLOC_ALIGNMENT < (size_t)8U) ||
3173 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
3174 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
3175 ((gsize & (gsize-SIZE_T_ONE)) != 0) ||
3176 ((psize & (psize-SIZE_T_ONE)) != 0))
3177 ABORT;
3178 mparams.granularity = gsize;
3179 mparams.page_size = psize;
3180 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
3181 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
3182#if MORECORE_CONTIGUOUS
3183 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
3184#else /* MORECORE_CONTIGUOUS */
3185 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
3186#endif /* MORECORE_CONTIGUOUS */
3187
3188#if !ONLY_MSPACES
3189 /* Set up lock for main malloc area */
3190 gm->mflags = mparams.default_mflags;
3191 (void)INITIAL_LOCK(&gm->mutex);
3192#endif
3193#if LOCK_AT_FORK
3194 pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
3195#endif
3196
3197 {
3198#if USE_DEV_RANDOM
3199 int fd;
3200 unsigned char buf[sizeof(size_t)];
3201 /* Try to use /dev/urandom, else fall back on using time */
3202 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
3203 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
3204 magic = *((size_t *) buf);
3205 close(fd);
3206 }
3207 else
3208#endif /* USE_DEV_RANDOM */
3209#ifdef WIN32
3210 magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
3211#elif defined(LACKS_TIME_H)
3212 magic = (size_t)&magic ^ (size_t)0x55555555U;
3213#else
3214 magic = (size_t)(time(0) ^ (size_t)0x55555555U);
3215#endif
3216 magic |= (size_t)8U; /* ensure nonzero */
3217 magic &= ~(size_t)7U; /* improve chances of fault for bad values */
3218 /* Until memory modes commonly available, use volatile-write */
3219 (*(volatile size_t *)(&(mparams.magic))) = magic;
3220 }
3221 }
3222
3223 RELEASE_MALLOC_GLOBAL_LOCK();
3224 return 1;
3225}
3226
3227/* support for mallopt */
3228static int change_mparam(int param_number, int value) {
3229 size_t val;
3230 ensure_initialization();
3231 val = (value == -1)? MAX_SIZE_T : (size_t)value;
3232 switch(param_number) {
3233 case M_TRIM_THRESHOLD:
3234 mparams.trim_threshold = val;
3235 return 1;
3236 case M_GRANULARITY:
3237 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
3238 mparams.granularity = val;
3239 return 1;
3240 }
3241 else
3242 return 0;
3243 case M_MMAP_THRESHOLD:
3244 mparams.mmap_threshold = val;
3245 return 1;
3246 default:
3247 return 0;
3248 }
3249}
3250
3251#if DEBUG
3252/* ------------------------- Debugging Support --------------------------- */
3253
3254/* Check properties of any chunk, whether free, inuse, mmapped etc */
3255static void do_check_any_chunk(mstate m, mchunkptr p) {
3256 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
3257 assert(ok_address(m, p));
3258}
3259
3260/* Check properties of top chunk */
3261static void do_check_top_chunk(mstate m, mchunkptr p) {
3262 msegmentptr sp = segment_holding(m, (char*)p);
3263 size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
3264 assert(sp != 0);
3265 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
3266 assert(ok_address(m, p));
3267 assert(sz == m->topsize);
3268 assert(sz > 0);
3269 assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
3270 assert(pinuse(p));
3271 assert(!pinuse(chunk_plus_offset(p, sz)));
3272}
3273
3274/* Check properties of (inuse) mmapped chunks */
3275static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
3276 size_t sz = chunksize(p);
3277 size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
3278 assert(is_mmapped(p));
3279 assert(use_mmap(m));
3280 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
3281 assert(ok_address(m, p));
3282 assert(!is_small(sz));
3283 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
3284 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
3285 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
3286}
3287
3288/* Check properties of inuse chunks */
3289static void do_check_inuse_chunk(mstate m, mchunkptr p) {
3290 do_check_any_chunk(m, p);
3291 assert(is_inuse(p));
3292 assert(next_pinuse(p));
3293 /* If not pinuse and not mmapped, previous chunk has OK offset */
3294 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
3295 if (is_mmapped(p))
3296 do_check_mmapped_chunk(m, p);
3297}
3298
3299/* Check properties of free chunks */
3300static void do_check_free_chunk(mstate m, mchunkptr p) {
3301 size_t sz = chunksize(p);
3302 mchunkptr next = chunk_plus_offset(p, sz);
3303 do_check_any_chunk(m, p);
3304 assert(!is_inuse(p));
3305 assert(!next_pinuse(p));
3306 assert (!is_mmapped(p));
3307 if (p != m->dv && p != m->top) {
3308 if (sz >= MIN_CHUNK_SIZE) {
3309 assert((sz & CHUNK_ALIGN_MASK) == 0);
3310 assert(is_aligned(chunk2mem(p)));
3311 assert(next->prev_foot == sz);
3312 assert(pinuse(p));
3313 assert (next == m->top || is_inuse(next));
3314 assert(p->fd->bk == p);
3315 assert(p->bk->fd == p);
3316 }
3317 else /* markers are always of size SIZE_T_SIZE */
3318 assert(sz == SIZE_T_SIZE);
3319 }
3320}
3321
3322/* Check properties of malloced chunks at the point they are malloced */
3323static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
3324 if (mem != 0) {
3325 mchunkptr p = mem2chunk(mem);
3326 size_t sz = p->head & ~INUSE_BITS;
3327 do_check_inuse_chunk(m, p);
3328 assert((sz & CHUNK_ALIGN_MASK) == 0);
3329 assert(sz >= MIN_CHUNK_SIZE);
3330 assert(sz >= s);
3331 /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
3332 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
3333 }
3334}
3335
3336/* Check a tree and its subtrees. */
3337static void do_check_tree(mstate m, tchunkptr t) {
3338 tchunkptr head = 0;
3339 tchunkptr u = t;
3340 bindex_t tindex = t->index;
3341 size_t tsize = chunksize(t);
3342 bindex_t idx;
3343 compute_tree_index(tsize, idx);
3344 assert(tindex == idx);
3345 assert(tsize >= MIN_LARGE_SIZE);
3346 assert(tsize >= minsize_for_tree_index(idx));
3347 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
3348
3349 do { /* traverse through chain of same-sized nodes */
3350 do_check_any_chunk(m, ((mchunkptr)u));
3351 assert(u->index == tindex);
3352 assert(chunksize(u) == tsize);
3353 assert(!is_inuse(u));
3354 assert(!next_pinuse(u));
3355 assert(u->fd->bk == u);
3356 assert(u->bk->fd == u);
3357 if (u->parent == 0) {
3358 assert(u->child[0] == 0);
3359 assert(u->child[1] == 0);
3360 }
3361 else {
3362 assert(head == 0); /* only one node on chain has parent */
3363 head = u;
3364 assert(u->parent != u);
3365 assert (u->parent->child[0] == u ||
3366 u->parent->child[1] == u ||
3367 *((tbinptr*)(u->parent)) == u);
3368 if (u->child[0] != 0) {
3369 assert(u->child[0]->parent == u);
3370 assert(u->child[0] != u);
3371 do_check_tree(m, u->child[0]);
3372 }
3373 if (u->child[1] != 0) {
3374 assert(u->child[1]->parent == u);
3375 assert(u->child[1] != u);
3376 do_check_tree(m, u->child[1]);
3377 }
3378 if (u->child[0] != 0 && u->child[1] != 0) {
3379 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
3380 }
3381 }
3382 u = u->fd;
3383 } while (u != t);
3384 assert(head != 0);
3385}
3386
3387/* Check all the chunks in a treebin. */
3388static void do_check_treebin(mstate m, bindex_t i) {
3389 tbinptr* tb = treebin_at(m, i);
3390 tchunkptr t = *tb;
3391 int empty = (m->treemap & (1U << i)) == 0;
3392 if (t == 0)
3393 assert(empty);
3394 if (!empty)
3395 do_check_tree(m, t);
3396}
3397
3398/* Check all the chunks in a smallbin. */
3399static void do_check_smallbin(mstate m, bindex_t i) {
3400 sbinptr b = smallbin_at(m, i);
3401 mchunkptr p = b->bk;
3402 unsigned int empty = (m->smallmap & (1U << i)) == 0;
3403 if (p == b)
3404 assert(empty);
3405 if (!empty) {
3406 for (; p != b; p = p->bk) {
3407 size_t size = chunksize(p);
3408 mchunkptr q;
3409 /* each chunk claims to be free */
3410 do_check_free_chunk(m, p);
3411 /* chunk belongs in bin */
3412 assert(small_index(size) == i);
3413 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
3414 /* chunk is followed by an inuse chunk */
3415 q = next_chunk(p);
3416 if (q->head != FENCEPOST_HEAD)
3417 do_check_inuse_chunk(m, q);
3418 }
3419 }
3420}
3421
3422/* Find x in a bin. Used in other check functions. */
3423static int bin_find(mstate m, mchunkptr x) {
3424 size_t size = chunksize(x);
3425 if (is_small(size)) {
3426 bindex_t sidx = small_index(size);
3427 sbinptr b = smallbin_at(m, sidx);
3428 if (smallmap_is_marked(m, sidx)) {
3429 mchunkptr p = b;
3430 do {
3431 if (p == x)
3432 return 1;
3433 } while ((p = p->fd) != b);
3434 }
3435 }
3436 else {
3437 bindex_t tidx;
3438 compute_tree_index(size, tidx);
3439 if (treemap_is_marked(m, tidx)) {
3440 tchunkptr t = *treebin_at(m, tidx);
3441 size_t sizebits = size << leftshift_for_tree_index(tidx);
3442 while (t != 0 && chunksize(t) != size) {
3443 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3444 sizebits <<= 1;
3445 }
3446 if (t != 0) {
3447 tchunkptr u = t;
3448 do {
3449 if (u == (tchunkptr)x)
3450 return 1;
3451 } while ((u = u->fd) != t);
3452 }
3453 }
3454 }
3455 return 0;
3456}
3457
3458/* Traverse each chunk and check it; return total */
3459static size_t traverse_and_check(mstate m) {
3460 size_t sum = 0;
3461 if (is_initialized(m)) {
3462 msegmentptr s = &m->seg;
3463 sum += m->topsize + TOP_FOOT_SIZE;
3464 while (s != 0) {
3465 mchunkptr q = align_as_chunk(s->base);
3466 mchunkptr lastq = 0;
3467 assert(pinuse(q));
3468 while (segment_holds(s, q) &&
3469 q != m->top && q->head != FENCEPOST_HEAD) {
3470 sum += chunksize(q);
3471 if (is_inuse(q)) {
3472 assert(!bin_find(m, q));
3473 do_check_inuse_chunk(m, q);
3474 }
3475 else {
3476 assert(q == m->dv || bin_find(m, q));
3477 assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
3478 do_check_free_chunk(m, q);
3479 }
3480 lastq = q;
3481 q = next_chunk(q);
3482 }
3483 s = s->next;
3484 }
3485 }
3486 return sum;
3487}
3488
3489
3490/* Check all properties of malloc_state. */
3491static void do_check_malloc_state(mstate m) {
3492 bindex_t i;
3493 size_t total;
3494 /* check bins */
3495 for (i = 0; i < NSMALLBINS; ++i)
3496 do_check_smallbin(m, i);
3497 for (i = 0; i < NTREEBINS; ++i)
3498 do_check_treebin(m, i);
3499
3500 if (m->dvsize != 0) { /* check dv chunk */
3501 do_check_any_chunk(m, m->dv);
3502 assert(m->dvsize == chunksize(m->dv));
3503 assert(m->dvsize >= MIN_CHUNK_SIZE);
3504 assert(bin_find(m, m->dv) == 0);
3505 }
3506
3507 if (m->top != 0) { /* check top chunk */
3508 do_check_top_chunk(m, m->top);
3509 /*assert(m->topsize == chunksize(m->top)); redundant */
3510 assert(m->topsize > 0);
3511 assert(bin_find(m, m->top) == 0);
3512 }
3513
3514 total = traverse_and_check(m);
3515 assert(total <= m->footprint);
3516 assert(m->footprint <= m->max_footprint);
3517}
3518#endif /* DEBUG */
3519
3520/* ----------------------------- statistics ------------------------------ */
3521
3522#if !NO_MALLINFO
3523static struct mallinfo internal_mallinfo(mstate m) {
3524 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
3525 ensure_initialization();
3526 if (!PREACTION(m)) {
3527 check_malloc_state(m);
3528 if (is_initialized(m)) {
3529 size_t nfree = SIZE_T_ONE; /* top always free */
3530 size_t mfree = m->topsize + TOP_FOOT_SIZE;
3531 size_t sum = mfree;
3532 msegmentptr s = &m->seg;
3533 while (s != 0) {
3534 mchunkptr q = align_as_chunk(s->base);
3535 while (segment_holds(s, q) &&
3536 q != m->top && q->head != FENCEPOST_HEAD) {
3537 size_t sz = chunksize(q);
3538 sum += sz;
3539 if (!is_inuse(q)) {
3540 mfree += sz;
3541 ++nfree;
3542 }
3543 q = next_chunk(q);
3544 }
3545 s = s->next;
3546 }
3547
3548 nm.arena = sum;
3549 nm.ordblks = nfree;
3550 nm.hblkhd = m->footprint - sum;
3551 nm.usmblks = m->max_footprint;
3552 nm.uordblks = m->footprint - mfree;
3553 nm.fordblks = mfree;
3554 nm.keepcost = m->topsize;
3555 }
3556
3557 POSTACTION(m);
3558 }
3559 return nm;
3560}
3561#endif /* !NO_MALLINFO */
3562
3563#if !NO_MALLOC_STATS
3564static void internal_malloc_stats(mstate m) {
3565 ensure_initialization();
3566 if (!PREACTION(m)) {
3567 size_t maxfp = 0;
3568 size_t fp = 0;
3569 size_t used = 0;
3570 check_malloc_state(m);
3571 if (is_initialized(m)) {
3572 msegmentptr s = &m->seg;
3573 maxfp = m->max_footprint;
3574 fp = m->footprint;
3575 used = fp - (m->topsize + TOP_FOOT_SIZE);
3576
3577 while (s != 0) {
3578 mchunkptr q = align_as_chunk(s->base);
3579 while (segment_holds(s, q) &&
3580 q != m->top && q->head != FENCEPOST_HEAD) {
3581 if (!is_inuse(q))
3582 used -= chunksize(q);
3583 q = next_chunk(q);
3584 }
3585 s = s->next;
3586 }
3587 }
3588 POSTACTION(m); /* drop lock */
3589 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
3590 fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
3591 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
3592 }
3593}
3594#endif /* NO_MALLOC_STATS */
3595
3596/* ----------------------- Operations on smallbins ----------------------- */
3597
3598/*
3599 Various forms of linking and unlinking are defined as macros. Even
3600 the ones for trees, which are very long but have very short typical
3601 paths. This is ugly but reduces reliance on inlining support of
3602 compilers.
3603*/
3604
3605/* Link a free chunk into a smallbin */
3606#define insert_small_chunk(M, P, S) {\
3607 bindex_t I = small_index(S);\
3608 mchunkptr B = smallbin_at(M, I);\
3609 mchunkptr F = B;\
3610 assert(S >= MIN_CHUNK_SIZE);\
3611 if (!smallmap_is_marked(M, I))\
3612 mark_smallmap(M, I);\
3613 else if (RTCHECK(ok_address(M, B->fd)))\
3614 F = B->fd;\
3615 else {\
3616 CORRUPTION_ERROR_ACTION(M);\
3617 }\
3618 B->fd = P;\
3619 F->bk = P;\
3620 P->fd = F;\
3621 P->bk = B;\
3622}
3623
3624/* Unlink a chunk from a smallbin */
3625#define unlink_small_chunk(M, P, S) {\
3626 mchunkptr F = P->fd;\
3627 mchunkptr B = P->bk;\
3628 bindex_t I = small_index(S);\
3629 assert(P != B);\
3630 assert(P != F);\
3631 assert(chunksize(P) == small_index2size(I));\
3632 if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
3633 if (B == F) {\
3634 clear_smallmap(M, I);\
3635 }\
3636 else if (RTCHECK(B == smallbin_at(M,I) ||\
3637 (ok_address(M, B) && B->fd == P))) {\
3638 F->bk = B;\
3639 B->fd = F;\
3640 }\
3641 else {\
3642 CORRUPTION_ERROR_ACTION(M);\
3643 }\
3644 }\
3645 else {\
3646 CORRUPTION_ERROR_ACTION(M);\
3647 }\
3648}
3649
3650/* Unlink the first chunk from a smallbin */
3651#define unlink_first_small_chunk(M, B, P, I) {\
3652 mchunkptr F = P->fd;\
3653 assert(P != B);\
3654 assert(P != F);\
3655 assert(chunksize(P) == small_index2size(I));\
3656 if (B == F) {\
3657 clear_smallmap(M, I);\
3658 }\
3659 else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
3660 F->bk = B;\
3661 B->fd = F;\
3662 }\
3663 else {\
3664 CORRUPTION_ERROR_ACTION(M);\
3665 }\
3666}
3667
3668/* Replace dv node, binning the old one */
3669/* Used only when dvsize known to be small */
3670#define replace_dv(M, P, S) {\
3671 size_t DVS = M->dvsize;\
3672 assert(is_small(DVS));\
3673 if (DVS != 0) {\
3674 mchunkptr DV = M->dv;\
3675 insert_small_chunk(M, DV, DVS);\
3676 }\
3677 M->dvsize = S;\
3678 M->dv = P;\
3679}
3680
3681/* ------------------------- Operations on trees ------------------------- */
3682
3683/* Insert chunk into tree */
3684#define insert_large_chunk(M, X, S) {\
3685 tbinptr* H;\
3686 bindex_t I;\
3687 compute_tree_index(S, I);\
3688 H = treebin_at(M, I);\
3689 X->index = I;\
3690 X->child[0] = X->child[1] = 0;\
3691 if (!treemap_is_marked(M, I)) {\
3692 mark_treemap(M, I);\
3693 *H = X;\
3694 X->parent = (tchunkptr)H;\
3695 X->fd = X->bk = X;\
3696 }\
3697 else {\
3698 tchunkptr T = *H;\
3699 size_t K = S << leftshift_for_tree_index(I);\
3700 for (;;) {\
3701 if (chunksize(T) != S) {\
3702 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3703 K <<= 1;\
3704 if (*C != 0)\
3705 T = *C;\
3706 else if (RTCHECK(ok_address(M, C))) {\
3707 *C = X;\
3708 X->parent = T;\
3709 X->fd = X->bk = X;\
3710 break;\
3711 }\
3712 else {\
3713 CORRUPTION_ERROR_ACTION(M);\
3714 break;\
3715 }\
3716 }\
3717 else {\
3718 tchunkptr F = T->fd;\
3719 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3720 T->fd = F->bk = X;\
3721 X->fd = F;\
3722 X->bk = T;\
3723 X->parent = 0;\
3724 break;\
3725 }\
3726 else {\
3727 CORRUPTION_ERROR_ACTION(M);\
3728 break;\
3729 }\
3730 }\
3731 }\
3732 }\
3733}
3734
3735/*
3736 Unlink steps:
3737
3738 1. If x is a chained node, unlink it from its same-sized fd/bk links
3739 and choose its bk node as its replacement.
3740 2. If x was the last node of its size, but not a leaf node, it must
3741 be replaced with a leaf node (not merely one with an open left or
3742 right), to make sure that lefts and rights of descendents
3743 correspond properly to bit masks. We use the rightmost descendent
3744 of x. We could use any other leaf, but this is easy to locate and
3745 tends to counteract removal of leftmosts elsewhere, and so keeps
3746 paths shorter than minimally guaranteed. This doesn't loop much
3747 because on average a node in a tree is near the bottom.
3748 3. If x is the base of a chain (i.e., has parent links) relink
3749 x's parent and children to x's replacement (or null if none).
3750*/
3751
3752#define unlink_large_chunk(M, X) {\
3753 tchunkptr XP = X->parent;\
3754 tchunkptr R;\
3755 if (X->bk != X) {\
3756 tchunkptr F = X->fd;\
3757 R = X->bk;\
3758 if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
3759 F->bk = R;\
3760 R->fd = F;\
3761 }\
3762 else {\
3763 CORRUPTION_ERROR_ACTION(M);\
3764 }\
3765 }\
3766 else {\
3767 tchunkptr* RP;\
3768 if (((R = *(RP = &(X->child[1]))) != 0) ||\
3769 ((R = *(RP = &(X->child[0]))) != 0)) {\
3770 tchunkptr* CP;\
3771 while ((*(CP = &(R->child[1])) != 0) ||\
3772 (*(CP = &(R->child[0])) != 0)) {\
3773 R = *(RP = CP);\
3774 }\
3775 if (RTCHECK(ok_address(M, RP)))\
3776 *RP = 0;\
3777 else {\
3778 CORRUPTION_ERROR_ACTION(M);\
3779 }\
3780 }\
3781 }\
3782 if (XP != 0) {\
3783 tbinptr* H = treebin_at(M, X->index);\
3784 if (X == *H) {\
3785 if ((*H = R) == 0) \
3786 clear_treemap(M, X->index);\
3787 }\
3788 else if (RTCHECK(ok_address(M, XP))) {\
3789 if (XP->child[0] == X) \
3790 XP->child[0] = R;\
3791 else \
3792 XP->child[1] = R;\
3793 }\
3794 else\
3795 CORRUPTION_ERROR_ACTION(M);\
3796 if (R != 0) {\
3797 if (RTCHECK(ok_address(M, R))) {\
3798 tchunkptr C0, C1;\
3799 R->parent = XP;\
3800 if ((C0 = X->child[0]) != 0) {\
3801 if (RTCHECK(ok_address(M, C0))) {\
3802 R->child[0] = C0;\
3803 C0->parent = R;\
3804 }\
3805 else\
3806 CORRUPTION_ERROR_ACTION(M);\
3807 }\
3808 if ((C1 = X->child[1]) != 0) {\
3809 if (RTCHECK(ok_address(M, C1))) {\
3810 R->child[1] = C1;\
3811 C1->parent = R;\
3812 }\
3813 else\
3814 CORRUPTION_ERROR_ACTION(M);\
3815 }\
3816 }\
3817 else\
3818 CORRUPTION_ERROR_ACTION(M);\
3819 }\
3820 }\
3821}
3822
3823/* Relays to large vs small bin operations */
3824
3825#define insert_chunk(M, P, S)\
3826 if (is_small(S)) insert_small_chunk(M, P, S)\
3827 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3828
3829#define unlink_chunk(M, P, S)\
3830 if (is_small(S)) unlink_small_chunk(M, P, S)\
3831 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3832
3833
3834/* Relays to internal calls to malloc/free from realloc, memalign etc */
3835
3836#if ONLY_MSPACES
3837#define internal_malloc(m, b) mspace_malloc(m, b)
3838#define internal_free(m, mem) mspace_free(m,mem);
3839#else /* ONLY_MSPACES */
3840#if MSPACES
3841#define internal_malloc(m, b)\
3842 ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
3843#define internal_free(m, mem)\
3844 if (m == gm) dlfree(mem); else mspace_free(m,mem);
3845#else /* MSPACES */
3846#define internal_malloc(m, b) dlmalloc(b)
3847#define internal_free(m, mem) dlfree(mem)
3848#endif /* MSPACES */
3849#endif /* ONLY_MSPACES */
3850
3851/* ----------------------- Direct-mmapping chunks ----------------------- */
3852
3853/*
3854 Directly mmapped chunks are set up with an offset to the start of
3855 the mmapped region stored in the prev_foot field of the chunk. This
3856 allows reconstruction of the required argument to MUNMAP when freed,
3857 and also allows adjustment of the returned chunk to meet alignment
3858 requirements (especially in memalign).
3859*/
3860
3861/* Malloc using mmap */
3862static void* mmap_alloc(mstate m, size_t nb) {
3863 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3864 if (m->footprint_limit != 0) {
3865 size_t fp = m->footprint + mmsize;
3866 if (fp <= m->footprint || fp > m->footprint_limit)
3867 return 0;
3868 }
3869 if (mmsize > nb) { /* Check for wrap around 0 */
3870 char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
3871 if (mm != CMFAIL) {
3872 size_t offset = align_offset(chunk2mem(mm));
3873 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
3874 mchunkptr p = (mchunkptr)(mm + offset);
3875 p->prev_foot = offset;
3876 p->head = psize;
3877 mark_inuse_foot(m, p, psize);
3878 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3879 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
3880
3881 if (m->least_addr == 0 || mm < m->least_addr)
3882 m->least_addr = mm;
3883 if ((m->footprint += mmsize) > m->max_footprint)
3884 m->max_footprint = m->footprint;
3885 assert(is_aligned(chunk2mem(p)));
3886 check_mmapped_chunk(m, p);
3887 return chunk2mem(p);
3888 }
3889 }
3890 return 0;
3891}
3892
3893/* Realloc using mmap */
3894static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
3895 size_t oldsize = chunksize(oldp);
3896 (void)flags; /* placate people compiling -Wunused */
3897 if (is_small(nb)) /* Can't shrink mmap regions below small size */
3898 return 0;
3899 /* Keep old chunk if big enough but not too big */
3900 if (oldsize >= nb + SIZE_T_SIZE &&
3901 (oldsize - nb) <= (mparams.granularity << 1))
3902 return oldp;
3903 else {
3904 size_t offset = oldp->prev_foot;
3905 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3906 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3907 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
3908 oldmmsize, newmmsize, flags);
3909 if (cp != CMFAIL) {
3910 mchunkptr newp = (mchunkptr)(cp + offset);
3911 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3912 newp->head = psize;
3913 mark_inuse_foot(m, newp, psize);
3914 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
3915 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
3916
3917 if (cp < m->least_addr)
3918 m->least_addr = cp;
3919 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3920 m->max_footprint = m->footprint;
3921 check_mmapped_chunk(m, newp);
3922 return newp;
3923 }
3924 }
3925 return 0;
3926}
3927
3928
3929/* -------------------------- mspace management -------------------------- */
3930
3931/* Initialize top chunk and its size */
3932static void init_top(mstate m, mchunkptr p, size_t psize) {
3933 /* Ensure alignment */
3934 size_t offset = align_offset(chunk2mem(p));
3935 p = (mchunkptr)((char*)p + offset);
3936 psize -= offset;
3937
3938 m->top = p;
3939 m->topsize = psize;
3940 p->head = psize | PINUSE_BIT;
3941 /* set size of fake trailing chunk holding overhead space only once */
3942 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
3943 m->trim_check = mparams.trim_threshold; /* reset on each update */
3944}
3945
3946/* Initialize bins for a new mstate that is otherwise zeroed out */
3947static void init_bins(mstate m) {
3948 /* Establish circular links for smallbins */
3949 bindex_t i;
3950 for (i = 0; i < NSMALLBINS; ++i) {
3951 sbinptr bin = smallbin_at(m,i);
3952 bin->fd = bin->bk = bin;
3953 }
3954}
3955
3956#if PROCEED_ON_ERROR
3957
3958/* default corruption action */
3959static void reset_on_error(mstate m) {
3960 int i;
3961 ++malloc_corruption_error_count;
3962 /* Reinitialize fields to forget about all memory */
3963 m->smallmap = m->treemap = 0;
3964 m->dvsize = m->topsize = 0;
3965 m->seg.base = 0;
3966 m->seg.size = 0;
3967 m->seg.next = 0;
3968 m->top = m->dv = 0;
3969 for (i = 0; i < NTREEBINS; ++i)
3970 *treebin_at(m, i) = 0;
3971 init_bins(m);
3972}
3973#endif /* PROCEED_ON_ERROR */
3974
3975/* Allocate chunk and prepend remainder with chunk in successor base. */
3976static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
3977 size_t nb) {
3978 mchunkptr p = align_as_chunk(newbase);
3979 mchunkptr oldfirst = align_as_chunk(oldbase);
3980 size_t psize = (char*)oldfirst - (char*)p;
3981 mchunkptr q = chunk_plus_offset(p, nb);
3982 size_t qsize = psize - nb;
3983 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
3984
3985 assert((char*)oldfirst > (char*)q);
3986 assert(pinuse(oldfirst));
3987 assert(qsize >= MIN_CHUNK_SIZE);
3988
3989 /* consolidate remainder with first chunk of old base */
3990 if (oldfirst == m->top) {
3991 size_t tsize = m->topsize += qsize;
3992 m->top = q;
3993 q->head = tsize | PINUSE_BIT;
3994 check_top_chunk(m, q);
3995 }
3996 else if (oldfirst == m->dv) {
3997 size_t dsize = m->dvsize += qsize;
3998 m->dv = q;
3999 set_size_and_pinuse_of_free_chunk(q, dsize);
4000 }
4001 else {
4002 if (!is_inuse(oldfirst)) {
4003 size_t nsize = chunksize(oldfirst);
4004 unlink_chunk(m, oldfirst, nsize);
4005 oldfirst = chunk_plus_offset(oldfirst, nsize);
4006 qsize += nsize;
4007 }
4008 set_free_with_pinuse(q, qsize, oldfirst);
4009 insert_chunk(m, q, qsize);
4010 check_free_chunk(m, q);
4011 }
4012
4013 check_malloced_chunk(m, chunk2mem(p), nb);
4014 return chunk2mem(p);
4015}
4016
4017/* Add a segment to hold a new noncontiguous region */
4018static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
4019 /* Determine locations and sizes of segment, fenceposts, old top */
4020 char* old_top = (char*)m->top;
4021 msegmentptr oldsp = segment_holding(m, old_top);
4022 char* old_end = oldsp->base + oldsp->size;
4023 size_t ssize = pad_request(sizeof(struct malloc_segment));
4024 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
4025 size_t offset = align_offset(chunk2mem(rawsp));
4026 char* asp = rawsp + offset;
4027 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
4028 mchunkptr sp = (mchunkptr)csp;
4029 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
4030 mchunkptr tnext = chunk_plus_offset(sp, ssize);
4031 mchunkptr p = tnext;
4032 int nfences = 0;
4033
4034 /* reset top to new space */
4035 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
4036
4037 /* Set up segment record */
4038 assert(is_aligned(ss));
4039 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
4040 *ss = m->seg; /* Push current record */
4041 m->seg.base = tbase;
4042 m->seg.size = tsize;
4043 m->seg.sflags = mmapped;
4044 m->seg.next = ss;
4045
4046 /* Insert trailing fenceposts */
4047 for (;;) {
4048 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
4049 p->head = FENCEPOST_HEAD;
4050 ++nfences;
4051 if ((char*)(&(nextp->head)) < old_end)
4052 p = nextp;
4053 else
4054 break;
4055 }
4056 (void)nfences;
4057 assert(nfences >= 2);
4058
4059 /* Insert the rest of old top into a bin as an ordinary free chunk */
4060 if (csp != old_top) {
4061 mchunkptr q = (mchunkptr)old_top;
4062 size_t psize = csp - old_top;
4063 mchunkptr tn = chunk_plus_offset(q, psize);
4064 set_free_with_pinuse(q, psize, tn);
4065 insert_chunk(m, q, psize);
4066 }
4067
4068 check_top_chunk(m, m->top);
4069}
4070
4071/* -------------------------- System allocation -------------------------- */
4072
4073/* Get memory from system using MORECORE or MMAP */
4074static void* sys_alloc(mstate m, size_t nb) {
4075 char* tbase = CMFAIL;
4076 size_t tsize = 0;
4077 flag_t mmap_flag = 0;
4078 size_t asize; /* allocation size */
4079
4080 ensure_initialization();
4081
4082 /* Directly map large chunks, but only if already initialized */
4083 if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
4084 void* mem = mmap_alloc(m, nb);
4085 if (mem != 0)
4086 return mem;
4087 }
4088
4089 asize = granularity_align(nb + SYS_ALLOC_PADDING);
4090 if (asize <= nb)
4091 return 0; /* wraparound */
4092 if (m->footprint_limit != 0) {
4093 size_t fp = m->footprint + asize;
4094 if (fp <= m->footprint || fp > m->footprint_limit)
4095 return 0;
4096 }
4097
4098 /*
4099 Try getting memory in any of three ways (in most-preferred to
4100 least-preferred order):
4101 1. A call to MORECORE that can normally contiguously extend memory.
4102 (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
4103 or main space is mmapped or a previous contiguous call failed)
4104 2. A call to MMAP new space (disabled if not HAVE_MMAP).
4105 Note that under the default settings, if MORECORE is unable to
4106 fulfill a request, and HAVE_MMAP is true, then mmap is
4107 used as a noncontiguous system allocator. This is a useful backup
4108 strategy for systems with holes in address spaces -- in this case
4109 sbrk cannot contiguously expand the heap, but mmap may be able to
4110 find space.
4111 3. A call to MORECORE that cannot usually contiguously extend memory.
4112 (disabled if not HAVE_MORECORE)
4113
4114 In all cases, we need to request enough bytes from system to ensure
4115 we can malloc nb bytes upon success, so pad with enough space for
4116 top_foot, plus alignment-pad to make sure we don't lose bytes if
4117 not on boundary, and round this up to a granularity unit.
4118 */
4119
4120 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
4121 char* br = CMFAIL;
4122 size_t ssize = asize; /* sbrk call size */
4123 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
4124 ACQUIRE_MALLOC_GLOBAL_LOCK();
4125
4126 if (ss == 0) { /* First time through or recovery */
4127 char* base = (char*)CALL_MORECORE(0);
4128 if (base != CMFAIL) {
4129 size_t fp;
4130 /* Adjust to end on a page boundary */
4131 if (!is_page_aligned(base))
4132 ssize += (page_align((size_t)base) - (size_t)base);
4133 fp = m->footprint + ssize; /* recheck limits */
4134 if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
4135 (m->footprint_limit == 0 ||
4136 (fp > m->footprint && fp <= m->footprint_limit)) &&
4137 (br = (char*)(CALL_MORECORE(ssize))) == base) {
4138 tbase = base;
4139 tsize = ssize;
4140 }
4141 }
4142 }
4143 else {
4144 /* Subtract out existing available top space from MORECORE request. */
4145 ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
4146 /* Use mem here only if it did continuously extend old space */
4147 if (ssize < HALF_MAX_SIZE_T &&
4148 (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
4149 tbase = br;
4150 tsize = ssize;
4151 }
4152 }
4153
4154 if (tbase == CMFAIL) { /* Cope with partial failure */
4155 if (br != CMFAIL) { /* Try to use/extend the space we did get */
4156 if (ssize < HALF_MAX_SIZE_T &&
4157 ssize < nb + SYS_ALLOC_PADDING) {
4158 size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
4159 if (esize < HALF_MAX_SIZE_T) {
4160 char* end = (char*)CALL_MORECORE(esize);
4161 if (end != CMFAIL)
4162 ssize += esize;
4163 else { /* Can't use; try to release */
4164 (void) CALL_MORECORE(-ssize);
4165 br = CMFAIL;
4166 }
4167 }
4168 }
4169 }
4170 if (br != CMFAIL) { /* Use the space we did get */
4171 tbase = br;
4172 tsize = ssize;
4173 }
4174 else
4175 disable_contiguous(m); /* Don't try contiguous path in the future */
4176 }
4177
4178 RELEASE_MALLOC_GLOBAL_LOCK();
4179 }
4180
4181 if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
4182 char* mp = (char*)(CALL_MMAP(asize));
4183 if (mp != CMFAIL) {
4184 tbase = mp;
4185 tsize = asize;
4186 mmap_flag = USE_MMAP_BIT;
4187 }
4188 }
4189
4190 if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
4191 if (asize < HALF_MAX_SIZE_T) {
4192 char* br = CMFAIL;
4193 char* end = CMFAIL;
4194 ACQUIRE_MALLOC_GLOBAL_LOCK();
4195 br = (char*)(CALL_MORECORE(asize));
4196 end = (char*)(CALL_MORECORE(0));
4197 RELEASE_MALLOC_GLOBAL_LOCK();
4198 if (br != CMFAIL && end != CMFAIL && br < end) {
4199 size_t ssize = end - br;
4200 if (ssize > nb + TOP_FOOT_SIZE) {
4201 tbase = br;
4202 tsize = ssize;
4203 }
4204 }
4205 }
4206 }
4207
4208 if (tbase != CMFAIL) {
4209
4210 if ((m->footprint += tsize) > m->max_footprint)
4211 m->max_footprint = m->footprint;
4212
4213 if (!is_initialized(m)) { /* first-time initialization */
4214 if (m->least_addr == 0 || tbase < m->least_addr)
4215 m->least_addr = tbase;
4216 m->seg.base = tbase;
4217 m->seg.size = tsize;
4218 m->seg.sflags = mmap_flag;
4219 m->magic = mparams.magic;
4220 m->release_checks = MAX_RELEASE_CHECK_RATE;
4221 init_bins(m);
4222#if !ONLY_MSPACES
4223 if (is_global(m))
4224 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
4225 else
4226#endif
4227 {
4228 /* Offset top by embedded malloc_state */
4229 mchunkptr mn = next_chunk(mem2chunk(m));
4230 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
4231 }
4232 }
4233
4234 else {
4235 /* Try to merge with an existing segment */
4236 msegmentptr sp = &m->seg;
4237 /* Only consider most recent segment if traversal suppressed */
4238 while (sp != 0 && tbase != sp->base + sp->size)
4239 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
4240 if (sp != 0 &&
4241 !is_extern_segment(sp) &&
4242 (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
4243 segment_holds(sp, m->top)) { /* append */
4244 sp->size += tsize;
4245 init_top(m, m->top, m->topsize + tsize);
4246 }
4247 else {
4248 if (tbase < m->least_addr)
4249 m->least_addr = tbase;
4250 sp = &m->seg;
4251 while (sp != 0 && sp->base != tbase + tsize)
4252 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
4253 if (sp != 0 &&
4254 !is_extern_segment(sp) &&
4255 (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
4256 char* oldbase = sp->base;
4257 sp->base = tbase;
4258 sp->size += tsize;
4259 return prepend_alloc(m, tbase, oldbase, nb);
4260 }
4261 else
4262 add_segment(m, tbase, tsize, mmap_flag);
4263 }
4264 }
4265
4266 if (nb < m->topsize) { /* Allocate from new or extended top space */
4267 size_t rsize = m->topsize -= nb;
4268 mchunkptr p = m->top;
4269 mchunkptr r = m->top = chunk_plus_offset(p, nb);
4270 r->head = rsize | PINUSE_BIT;
4271 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
4272 check_top_chunk(m, m->top);
4273 check_malloced_chunk(m, chunk2mem(p), nb);
4274 return chunk2mem(p);
4275 }
4276 }
4277
4278 MALLOC_FAILURE_ACTION;
4279 return 0;
4280}
4281
4282/* ----------------------- system deallocation -------------------------- */
4283
4284/* Unmap and unlink any mmapped segments that don't contain used chunks */
4285static size_t release_unused_segments(mstate m) {
4286 size_t released = 0;
4287 int nsegs = 0;
4288 msegmentptr pred = &m->seg;
4289 msegmentptr sp = pred->next;
4290 while (sp != 0) {
4291 char* base = sp->base;
4292 size_t size = sp->size;
4293 msegmentptr next = sp->next;
4294 ++nsegs;
4295 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
4296 mchunkptr p = align_as_chunk(base);
4297 size_t psize = chunksize(p);
4298 /* Can unmap if first chunk holds entire segment and not pinned */
4299 if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
4300 tchunkptr tp = (tchunkptr)p;
4301 assert(segment_holds(sp, (char*)sp));
4302 if (p == m->dv) {
4303 m->dv = 0;
4304 m->dvsize = 0;
4305 }
4306 else {
4307 unlink_large_chunk(m, tp);
4308 }
4309 if (CALL_MUNMAP(base, size) == 0) {
4310 released += size;
4311 m->footprint -= size;
4312 /* unlink obsoleted record */
4313 sp = pred;
4314 sp->next = next;
4315 }
4316 else { /* back out if cannot unmap */
4317 insert_large_chunk(m, tp, psize);
4318 }
4319 }
4320 }
4321 if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
4322 break;
4323 pred = sp;
4324 sp = next;
4325 }
4326 /* Reset check counter */
4327 m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
4328 (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
4329 return released;
4330}
4331
4332static int sys_trim(mstate m, size_t pad) {
4333 size_t released = 0;
4334 ensure_initialization();
4335 if (pad < MAX_REQUEST && is_initialized(m)) {
4336 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
4337
4338 if (m->topsize > pad) {
4339 /* Shrink top space in granularity-size units, keeping at least one */
4340 size_t unit = mparams.granularity;
4341 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
4342 SIZE_T_ONE) * unit;
4343 msegmentptr sp = segment_holding(m, (char*)m->top);
4344
4345 if (!is_extern_segment(sp)) {
4346 if (is_mmapped_segment(sp)) {
4347 if (HAVE_MMAP &&
4348 sp->size >= extra &&
4349 !has_segment_link(m, sp)) { /* can't shrink if pinned */
4350 size_t newsize = sp->size - extra;
4351 (void)newsize; /* placate people compiling -Wunused-variable */
4352 /* Prefer mremap, fall back to munmap */
4353 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
4354 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
4355 released = extra;
4356 }
4357 }
4358 }
4359 else if (HAVE_MORECORE) {
4360 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
4361 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
4362 ACQUIRE_MALLOC_GLOBAL_LOCK();
4363 {
4364 /* Make sure end of memory is where we last set it. */
4365 char* old_br = (char*)(CALL_MORECORE(0));
4366 if (old_br == sp->base + sp->size) {
4367 char* rel_br = (char*)(CALL_MORECORE(-extra));
4368 char* new_br = (char*)(CALL_MORECORE(0));
4369 if (rel_br != CMFAIL && new_br < old_br)
4370 released = old_br - new_br;
4371 }
4372 }
4373 RELEASE_MALLOC_GLOBAL_LOCK();
4374 }
4375 }
4376
4377 if (released != 0) {
4378 sp->size -= released;
4379 m->footprint -= released;
4380 init_top(m, m->top, m->topsize - released);
4381 check_top_chunk(m, m->top);
4382 }
4383 }
4384
4385 /* Unmap any unused mmapped segments */
4386 if (HAVE_MMAP)
4387 released += release_unused_segments(m);
4388
4389 /* On failure, disable autotrim to avoid repeated failed future calls */
4390 if (released == 0 && m->topsize > m->trim_check)
4391 m->trim_check = MAX_SIZE_T;
4392 }
4393
4394 return (released != 0)? 1 : 0;
4395}
4396
4397/* Consolidate and bin a chunk. Differs from exported versions
4398 of free mainly in that the chunk need not be marked as inuse.
4399*/
4400static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
4401 mchunkptr next = chunk_plus_offset(p, psize);
4402 if (!pinuse(p)) {
4403 mchunkptr prev;
4404 size_t prevsize = p->prev_foot;
4405 if (is_mmapped(p)) {
4406 psize += prevsize + MMAP_FOOT_PAD;
4407 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4408 m->footprint -= psize;
4409 return;
4410 }
4411 prev = chunk_minus_offset(p, prevsize);
4412 psize += prevsize;
4413 p = prev;
4414 if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
4415 if (p != m->dv) {
4416 unlink_chunk(m, p, prevsize);
4417 }
4418 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4419 m->dvsize = psize;
4420 set_free_with_pinuse(p, psize, next);
4421 return;
4422 }
4423 }
4424 else {
4425 CORRUPTION_ERROR_ACTION(m);
4426 return;
4427 }
4428 }
4429 if (RTCHECK(ok_address(m, next))) {
4430 if (!cinuse(next)) { /* consolidate forward */
4431 if (next == m->top) {
4432 size_t tsize = m->topsize += psize;
4433 m->top = p;
4434 p->head = tsize | PINUSE_BIT;
4435 if (p == m->dv) {
4436 m->dv = 0;
4437 m->dvsize = 0;
4438 }
4439 return;
4440 }
4441 else if (next == m->dv) {
4442 size_t dsize = m->dvsize += psize;
4443 m->dv = p;
4444 set_size_and_pinuse_of_free_chunk(p, dsize);
4445 return;
4446 }
4447 else {
4448 size_t nsize = chunksize(next);
4449 psize += nsize;
4450 unlink_chunk(m, next, nsize);
4451 set_size_and_pinuse_of_free_chunk(p, psize);
4452 if (p == m->dv) {
4453 m->dvsize = psize;
4454 return;
4455 }
4456 }
4457 }
4458 else {
4459 set_free_with_pinuse(p, psize, next);
4460 }
4461 insert_chunk(m, p, psize);
4462 }
4463 else {
4464 CORRUPTION_ERROR_ACTION(m);
4465 }
4466}
4467
4468/* ---------------------------- malloc --------------------------- */
4469
4470/* allocate a large request from the best fitting chunk in a treebin */
4471static void* tmalloc_large(mstate m, size_t nb) {
4472 tchunkptr v = 0;
4473 size_t rsize = -nb; /* Unsigned negation */
4474 tchunkptr t;
4475 bindex_t idx;
4476 compute_tree_index(nb, idx);
4477 if ((t = *treebin_at(m, idx)) != 0) {
4478 /* Traverse tree for this bin looking for node with size == nb */
4479 size_t sizebits = nb << leftshift_for_tree_index(idx);
4480 tchunkptr rst = 0; /* The deepest untaken right subtree */
4481 for (;;) {
4482 tchunkptr rt;
4483 size_t trem = chunksize(t) - nb;
4484 if (trem < rsize) {
4485 v = t;
4486 if ((rsize = trem) == 0)
4487 break;
4488 }
4489 rt = t->child[1];
4490 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
4491 if (rt != 0 && rt != t)
4492 rst = rt;
4493 if (t == 0) {
4494 t = rst; /* set t to least subtree holding sizes > nb */
4495 break;
4496 }
4497 sizebits <<= 1;
4498 }
4499 }
4500 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
4501 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
4502 if (leftbits != 0) {
4503 bindex_t i;
4504 binmap_t leastbit = least_bit(leftbits);
4505 compute_bit2idx(leastbit, i);
4506 t = *treebin_at(m, i);
4507 }
4508 }
4509
4510 while (t != 0) { /* find smallest of tree or subtree */
4511 size_t trem = chunksize(t) - nb;
4512 if (trem < rsize) {
4513 rsize = trem;
4514 v = t;
4515 }
4516 t = leftmost_child(t);
4517 }
4518
4519 /* If dv is a better fit, return 0 so malloc will use it */
4520 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
4521 if (RTCHECK(ok_address(m, v))) { /* split */
4522 mchunkptr r = chunk_plus_offset(v, nb);
4523 assert(chunksize(v) == rsize + nb);
4524 if (RTCHECK(ok_next(v, r))) {
4525 unlink_large_chunk(m, v);
4526 if (rsize < MIN_CHUNK_SIZE)
4527 set_inuse_and_pinuse(m, v, (rsize + nb));
4528 else {
4529 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
4530 set_size_and_pinuse_of_free_chunk(r, rsize);
4531 insert_chunk(m, r, rsize);
4532 }
4533 return chunk2mem(v);
4534 }
4535 }
4536 CORRUPTION_ERROR_ACTION(m);
4537 }
4538 return 0;
4539}
4540
4541/* allocate a small request from the best fitting chunk in a treebin */
4542static void* tmalloc_small(mstate m, size_t nb) {
4543 tchunkptr t, v;
4544 size_t rsize;
4545 bindex_t i;
4546 binmap_t leastbit = least_bit(m->treemap);
4547 compute_bit2idx(leastbit, i);
4548 v = t = *treebin_at(m, i);
4549 rsize = chunksize(t) - nb;
4550
4551 while ((t = leftmost_child(t)) != 0) {
4552 size_t trem = chunksize(t) - nb;
4553 if (trem < rsize) {
4554 rsize = trem;
4555 v = t;
4556 }
4557 }
4558
4559 if (RTCHECK(ok_address(m, v))) {
4560 mchunkptr r = chunk_plus_offset(v, nb);
4561 assert(chunksize(v) == rsize + nb);
4562 if (RTCHECK(ok_next(v, r))) {
4563 unlink_large_chunk(m, v);
4564 if (rsize < MIN_CHUNK_SIZE)
4565 set_inuse_and_pinuse(m, v, (rsize + nb));
4566 else {
4567 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
4568 set_size_and_pinuse_of_free_chunk(r, rsize);
4569 replace_dv(m, r, rsize);
4570 }
4571 return chunk2mem(v);
4572 }
4573 }
4574
4575 CORRUPTION_ERROR_ACTION(m);
4576 return 0;
4577}
4578
4579#if !ONLY_MSPACES
4580
4581void* dlmalloc(size_t bytes) {
4582 /*
4583 Basic algorithm:
4584 If a small request (< 256 bytes minus per-chunk overhead):
4585 1. If one exists, use a remainderless chunk in associated smallbin.
4586 (Remainderless means that there are too few excess bytes to
4587 represent as a chunk.)
4588 2. If it is big enough, use the dv chunk, which is normally the
4589 chunk adjacent to the one used for the most recent small request.
4590 3. If one exists, split the smallest available chunk in a bin,
4591 saving remainder in dv.
4592 4. If it is big enough, use the top chunk.
4593 5. If available, get memory from system and use it
4594 Otherwise, for a large request:
4595 1. Find the smallest available binned chunk that fits, and use it
4596 if it is better fitting than dv chunk, splitting if necessary.
4597 2. If better fitting than any binned chunk, use the dv chunk.
4598 3. If it is big enough, use the top chunk.
4599 4. If request size >= mmap threshold, try to directly mmap this chunk.
4600 5. If available, get memory from system and use it
4601
4602 The ugly goto's here ensure that postaction occurs along all paths.
4603 */
4604
4605#if USE_LOCKS
4606 ensure_initialization(); /* initialize in sys_alloc if not using locks */
4607#endif
4608
4609 if (!PREACTION(gm)) {
4610 void* mem;
4611 size_t nb;
4612 if (bytes <= MAX_SMALL_REQUEST) {
4613 bindex_t idx;
4614 binmap_t smallbits;
4615 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4616 idx = small_index(nb);
4617 smallbits = gm->smallmap >> idx;
4618
4619 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4620 mchunkptr b, p;
4621 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4622 b = smallbin_at(gm, idx);
4623 p = b->fd;
4624 assert(chunksize(p) == small_index2size(idx));
4625 unlink_first_small_chunk(gm, b, p, idx);
4626 set_inuse_and_pinuse(gm, p, small_index2size(idx));
4627 mem = chunk2mem(p);
4628 check_malloced_chunk(gm, mem, nb);
4629 goto postaction;
4630 }
4631
4632 else if (nb > gm->dvsize) {
4633 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4634 mchunkptr b, p, r;
4635 size_t rsize;
4636 bindex_t i;
4637 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4638 binmap_t leastbit = least_bit(leftbits);
4639 compute_bit2idx(leastbit, i);
4640 b = smallbin_at(gm, i);
4641 p = b->fd;
4642 assert(chunksize(p) == small_index2size(i));
4643 unlink_first_small_chunk(gm, b, p, i);
4644 rsize = small_index2size(i) - nb;
4645 /* Fit here cannot be remainderless if 4byte sizes */
4646 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4647 set_inuse_and_pinuse(gm, p, small_index2size(i));
4648 else {
4649 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4650 r = chunk_plus_offset(p, nb);
4651 set_size_and_pinuse_of_free_chunk(r, rsize);
4652 replace_dv(gm, r, rsize);
4653 }
4654 mem = chunk2mem(p);
4655 check_malloced_chunk(gm, mem, nb);
4656 goto postaction;
4657 }
4658
4659 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
4660 check_malloced_chunk(gm, mem, nb);
4661 goto postaction;
4662 }
4663 }
4664 }
4665 else if (bytes >= MAX_REQUEST)
4666 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4667 else {
4668 nb = pad_request(bytes);
4669 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
4670 check_malloced_chunk(gm, mem, nb);
4671 goto postaction;
4672 }
4673 }
4674
4675 if (nb <= gm->dvsize) {
4676 size_t rsize = gm->dvsize - nb;
4677 mchunkptr p = gm->dv;
4678 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4679 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
4680 gm->dvsize = rsize;
4681 set_size_and_pinuse_of_free_chunk(r, rsize);
4682 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4683 }
4684 else { /* exhaust dv */
4685 size_t dvs = gm->dvsize;
4686 gm->dvsize = 0;
4687 gm->dv = 0;
4688 set_inuse_and_pinuse(gm, p, dvs);
4689 }
4690 mem = chunk2mem(p);
4691 check_malloced_chunk(gm, mem, nb);
4692 goto postaction;
4693 }
4694
4695 else if (nb < gm->topsize) { /* Split top */
4696 size_t rsize = gm->topsize -= nb;
4697 mchunkptr p = gm->top;
4698 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
4699 r->head = rsize | PINUSE_BIT;
4700 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4701 mem = chunk2mem(p);
4702 check_top_chunk(gm, gm->top);
4703 check_malloced_chunk(gm, mem, nb);
4704 goto postaction;
4705 }
4706
4707 mem = sys_alloc(gm, nb);
4708
4709 postaction:
4710 POSTACTION(gm);
4711 return mem;
4712 }
4713
4714 return 0;
4715}
4716
4717/* ---------------------------- free --------------------------- */
4718
4719void dlfree(void* mem) {
4720 /*
4721 Consolidate freed chunks with preceeding or succeeding bordering
4722 free chunks, if they exist, and then place in a bin. Intermixed
4723 with special cases for top, dv, mmapped chunks, and usage errors.
4724 */
4725
4726 if (mem != 0) {
4727 mchunkptr p = mem2chunk(mem);
4728#if FOOTERS
4729 mstate fm = get_mstate_for(p);
4730 if (!ok_magic(fm)) {
4731 USAGE_ERROR_ACTION(fm, p);
4732 return;
4733 }
4734#else /* FOOTERS */
4735#define fm gm
4736#endif /* FOOTERS */
4737 if (!PREACTION(fm)) {
4738 check_inuse_chunk(fm, p);
4739 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
4740 size_t psize = chunksize(p);
4741 mchunkptr next = chunk_plus_offset(p, psize);
4742 if (!pinuse(p)) {
4743 size_t prevsize = p->prev_foot;
4744 if (is_mmapped(p)) {
4745 psize += prevsize + MMAP_FOOT_PAD;
4746 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4747 fm->footprint -= psize;
4748 goto postaction;
4749 }
4750 else {
4751 mchunkptr prev = chunk_minus_offset(p, prevsize);
4752 psize += prevsize;
4753 p = prev;
4754 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4755 if (p != fm->dv) {
4756 unlink_chunk(fm, p, prevsize);
4757 }
4758 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4759 fm->dvsize = psize;
4760 set_free_with_pinuse(p, psize, next);
4761 goto postaction;
4762 }
4763 }
4764 else
4765 goto erroraction;
4766 }
4767 }
4768
4769 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4770 if (!cinuse(next)) { /* consolidate forward */
4771 if (next == fm->top) {
4772 size_t tsize = fm->topsize += psize;
4773 fm->top = p;
4774 p->head = tsize | PINUSE_BIT;
4775 if (p == fm->dv) {
4776 fm->dv = 0;
4777 fm->dvsize = 0;
4778 }
4779 if (should_trim(fm, tsize))
4780 sys_trim(fm, 0);
4781 goto postaction;
4782 }
4783 else if (next == fm->dv) {
4784 size_t dsize = fm->dvsize += psize;
4785 fm->dv = p;
4786 set_size_and_pinuse_of_free_chunk(p, dsize);
4787 goto postaction;
4788 }
4789 else {
4790 size_t nsize = chunksize(next);
4791 psize += nsize;
4792 unlink_chunk(fm, next, nsize);
4793 set_size_and_pinuse_of_free_chunk(p, psize);
4794 if (p == fm->dv) {
4795 fm->dvsize = psize;
4796 goto postaction;
4797 }
4798 }
4799 }
4800 else
4801 set_free_with_pinuse(p, psize, next);
4802
4803 if (is_small(psize)) {
4804 insert_small_chunk(fm, p, psize);
4805 check_free_chunk(fm, p);
4806 }
4807 else {
4808 tchunkptr tp = (tchunkptr)p;
4809 insert_large_chunk(fm, tp, psize);
4810 check_free_chunk(fm, p);
4811 if (--fm->release_checks == 0)
4812 release_unused_segments(fm);
4813 }
4814 goto postaction;
4815 }
4816 }
4817 erroraction:
4818 USAGE_ERROR_ACTION(fm, p);
4819 postaction:
4820 POSTACTION(fm);
4821 }
4822 }
4823#if !FOOTERS
4824#undef fm
4825#endif /* FOOTERS */
4826}
4827
4828void* dlcalloc(size_t n_elements, size_t elem_size) {
4829 void* mem;
4830 size_t req = 0;
4831 if (n_elements != 0) {
4832 req = n_elements * elem_size;
4833 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
4834 (req / n_elements != elem_size))
4835 req = MAX_SIZE_T; /* force downstream failure on overflow */
4836 }
4837 mem = dlmalloc(req);
4838 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4839 memset(mem, 0, req);
4840 return mem;
4841}
4842
4843#endif /* !ONLY_MSPACES */
4844
4845/* ------------ Internal support for realloc, memalign, etc -------------- */
4846
4847/* Try to realloc; only in-place unless can_move true */
4848static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
4849 int can_move) {
4850 mchunkptr newp = 0;
4851 size_t oldsize = chunksize(p);
4852 mchunkptr next = chunk_plus_offset(p, oldsize);
4853 if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
4854 ok_next(p, next) && ok_pinuse(next))) {
4855 if (is_mmapped(p)) {
4856 newp = mmap_resize(m, p, nb, can_move);
4857 }
4858 else if (oldsize >= nb) { /* already big enough */
4859 size_t rsize = oldsize - nb;
4860 if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
4861 mchunkptr r = chunk_plus_offset(p, nb);
4862 set_inuse(m, p, nb);
4863 set_inuse(m, r, rsize);
4864 dispose_chunk(m, r, rsize);
4865 }
4866 newp = p;
4867 }
4868 else if (next == m->top) { /* extend into top */
4869 if (oldsize + m->topsize > nb) {
4870 size_t newsize = oldsize + m->topsize;
4871 size_t newtopsize = newsize - nb;
4872 mchunkptr newtop = chunk_plus_offset(p, nb);
4873 set_inuse(m, p, nb);
4874 newtop->head = newtopsize |PINUSE_BIT;
4875 m->top = newtop;
4876 m->topsize = newtopsize;
4877 newp = p;
4878 }
4879 }
4880 else if (next == m->dv) { /* extend into dv */
4881 size_t dvs = m->dvsize;
4882 if (oldsize + dvs >= nb) {
4883 size_t dsize = oldsize + dvs - nb;
4884 if (dsize >= MIN_CHUNK_SIZE) {
4885 mchunkptr r = chunk_plus_offset(p, nb);
4886 mchunkptr n = chunk_plus_offset(r, dsize);
4887 set_inuse(m, p, nb);
4888 set_size_and_pinuse_of_free_chunk(r, dsize);
4889 clear_pinuse(n);
4890 m->dvsize = dsize;
4891 m->dv = r;
4892 }
4893 else { /* exhaust dv */
4894 size_t newsize = oldsize + dvs;
4895 set_inuse(m, p, newsize);
4896 m->dvsize = 0;
4897 m->dv = 0;
4898 }
4899 newp = p;
4900 }
4901 }
4902 else if (!cinuse(next)) { /* extend into next free chunk */
4903 size_t nextsize = chunksize(next);
4904 if (oldsize + nextsize >= nb) {
4905 size_t rsize = oldsize + nextsize - nb;
4906 unlink_chunk(m, next, nextsize);
4907 if (rsize < MIN_CHUNK_SIZE) {
4908 size_t newsize = oldsize + nextsize;
4909 set_inuse(m, p, newsize);
4910 }
4911 else {
4912 mchunkptr r = chunk_plus_offset(p, nb);
4913 set_inuse(m, p, nb);
4914 set_inuse(m, r, rsize);
4915 dispose_chunk(m, r, rsize);
4916 }
4917 newp = p;
4918 }
4919 }
4920 }
4921 else {
4922 USAGE_ERROR_ACTION(m, chunk2mem(p));
4923 }
4924 return newp;
4925}
4926
4927static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
4928 void* mem = 0;
4929 if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
4930 alignment = MIN_CHUNK_SIZE;
4931 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
4932 size_t a = MALLOC_ALIGNMENT << 1;
4933 while (a < alignment) a <<= 1;
4934 alignment = a;
4935 }
4936 if (bytes >= MAX_REQUEST - alignment) {
4937 if (m != 0) { /* Test isn't needed but avoids compiler warning */
4938 MALLOC_FAILURE_ACTION;
4939 }
4940 }
4941 else {
4942 size_t nb = request2size(bytes);
4943 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
4944 mem = internal_malloc(m, req);
4945 if (mem != 0) {
4946 mchunkptr p = mem2chunk(mem);
4947 if (PREACTION(m))
4948 return 0;
4949 if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
4950 /*
4951 Find an aligned spot inside chunk. Since we need to give
4952 back leading space in a chunk of at least MIN_CHUNK_SIZE, if
4953 the first calculation places us at a spot with less than
4954 MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
4955 We've allocated enough total room so that this is always
4956 possible.
4957 */
4958 char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -
4959 SIZE_T_ONE)) &
4960 -alignment));
4961 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
4962 br : br+alignment;
4963 mchunkptr newp = (mchunkptr)pos;
4964 size_t leadsize = pos - (char*)(p);
4965 size_t newsize = chunksize(p) - leadsize;
4966
4967 if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
4968 newp->prev_foot = p->prev_foot + leadsize;
4969 newp->head = newsize;
4970 }
4971 else { /* Otherwise, give back leader, use the rest */
4972 set_inuse(m, newp, newsize);
4973 set_inuse(m, p, leadsize);
4974 dispose_chunk(m, p, leadsize);
4975 }
4976 p = newp;
4977 }
4978
4979 /* Give back spare room at the end */
4980 if (!is_mmapped(p)) {
4981 size_t size = chunksize(p);
4982 if (size > nb + MIN_CHUNK_SIZE) {
4983 size_t remainder_size = size - nb;
4984 mchunkptr remainder = chunk_plus_offset(p, nb);
4985 set_inuse(m, p, nb);
4986 set_inuse(m, remainder, remainder_size);
4987 dispose_chunk(m, remainder, remainder_size);
4988 }
4989 }
4990
4991 mem = chunk2mem(p);
4992 assert (chunksize(p) >= nb);
4993 assert(((size_t)mem & (alignment - 1)) == 0);
4994 check_inuse_chunk(m, p);
4995 POSTACTION(m);
4996 }
4997 }
4998 return mem;
4999}
5000
5001/*
5002 Common support for independent_X routines, handling
5003 all of the combinations that can result.
5004 The opts arg has:
5005 bit 0 set if all elements are same size (using sizes[0])
5006 bit 1 set if elements should be zeroed
5007*/
5008static void** ialloc(mstate m,
5009 size_t n_elements,
5010 size_t* sizes,
5011 int opts,
5012 void* chunks[]) {
5013
5014 size_t element_size; /* chunksize of each element, if all same */
5015 size_t contents_size; /* total size of elements */
5016 size_t array_size; /* request size of pointer array */
5017 void* mem; /* malloced aggregate space */
5018 mchunkptr p; /* corresponding chunk */
5019 size_t remainder_size; /* remaining bytes while splitting */
5020 void** marray; /* either "chunks" or malloced ptr array */
5021 mchunkptr array_chunk; /* chunk for malloced ptr array */
5022 flag_t was_enabled; /* to disable mmap */
5023 size_t size;
5024 size_t i;
5025
5026 ensure_initialization();
5027 /* compute array length, if needed */
5028 if (chunks != 0) {
5029 if (n_elements == 0)
5030 return chunks; /* nothing to do */
5031 marray = chunks;
5032 array_size = 0;
5033 }
5034 else {
5035 /* if empty req, must still return chunk representing empty array */
5036 if (n_elements == 0)
5037 return (void**)internal_malloc(m, 0);
5038 marray = 0;
5039 array_size = request2size(n_elements * (sizeof(void*)));
5040 }
5041
5042 /* compute total element size */
5043 if (opts & 0x1) { /* all-same-size */
5044 element_size = request2size(*sizes);
5045 contents_size = n_elements * element_size;
5046 }
5047 else { /* add up all the sizes */
5048 element_size = 0;
5049 contents_size = 0;
5050 for (i = 0; i != n_elements; ++i)
5051 contents_size += request2size(sizes[i]);
5052 }
5053
5054 size = contents_size + array_size;
5055
5056 /*
5057 Allocate the aggregate chunk. First disable direct-mmapping so
5058 malloc won't use it, since we would not be able to later
5059 free/realloc space internal to a segregated mmap region.
5060 */
5061 was_enabled = use_mmap(m);
5062 disable_mmap(m);
5063 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
5064 if (was_enabled)
5065 enable_mmap(m);
5066 if (mem == 0)
5067 return 0;
5068
5069 if (PREACTION(m)) return 0;
5070 p = mem2chunk(mem);
5071 remainder_size = chunksize(p);
5072
5073 assert(!is_mmapped(p));
5074
5075 if (opts & 0x2) { /* optionally clear the elements */
5076 memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
5077 }
5078
5079 /* If not provided, allocate the pointer array as final part of chunk */
5080 if (marray == 0) {
5081 size_t array_chunk_size;
5082 array_chunk = chunk_plus_offset(p, contents_size);
5083 array_chunk_size = remainder_size - contents_size;
5084 marray = (void**) (chunk2mem(array_chunk));
5085 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
5086 remainder_size = contents_size;
5087 }
5088
5089 /* split out elements */
5090 for (i = 0; ; ++i) {
5091 marray[i] = chunk2mem(p);
5092 if (i != n_elements-1) {
5093 if (element_size != 0)
5094 size = element_size;
5095 else
5096 size = request2size(sizes[i]);
5097 remainder_size -= size;
5098 set_size_and_pinuse_of_inuse_chunk(m, p, size);
5099 p = chunk_plus_offset(p, size);
5100 }
5101 else { /* the final element absorbs any overallocation slop */
5102 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
5103 break;
5104 }
5105 }
5106
5107#if DEBUG
5108 if (marray != chunks) {
5109 /* final element must have exactly exhausted chunk */
5110 if (element_size != 0) {
5111 assert(remainder_size == element_size);
5112 }
5113 else {
5114 assert(remainder_size == request2size(sizes[i]));
5115 }
5116 check_inuse_chunk(m, mem2chunk(marray));
5117 }
5118 for (i = 0; i != n_elements; ++i)
5119 check_inuse_chunk(m, mem2chunk(marray[i]));
5120
5121#endif /* DEBUG */
5122
5123 POSTACTION(m);
5124 return marray;
5125}
5126
5127/* Try to free all pointers in the given array.
5128 Note: this could be made faster, by delaying consolidation,
5129 at the price of disabling some user integrity checks, We
5130 still optimize some consolidations by combining adjacent
5131 chunks before freeing, which will occur often if allocated
5132 with ialloc or the array is sorted.
5133*/
5134static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {
5135 size_t unfreed = 0;
5136 if (!PREACTION(m)) {
5137 void** a;
5138 void** fence = &(array[nelem]);
5139 for (a = array; a != fence; ++a) {
5140 void* mem = *a;
5141 if (mem != 0) {
5142 mchunkptr p = mem2chunk(mem);
5143 size_t psize = chunksize(p);
5144#if FOOTERS
5145 if (get_mstate_for(p) != m) {
5146 ++unfreed;
5147 continue;
5148 }
5149#endif
5150 check_inuse_chunk(m, p);
5151 *a = 0;
5152 if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
5153 void ** b = a + 1; /* try to merge with next chunk */
5154 mchunkptr next = next_chunk(p);
5155 if (b != fence && *b == chunk2mem(next)) {
5156 size_t newsize = chunksize(next) + psize;
5157 set_inuse(m, p, newsize);
5158 *b = chunk2mem(p);
5159 }
5160 else
5161 dispose_chunk(m, p, psize);
5162 }
5163 else {
5164 CORRUPTION_ERROR_ACTION(m);
5165 break;
5166 }
5167 }
5168 }
5169 if (should_trim(m, m->topsize))
5170 sys_trim(m, 0);
5171 POSTACTION(m);
5172 }
5173 return unfreed;
5174}
5175
5176/* Traversal */
5177#if MALLOC_INSPECT_ALL
5178static void internal_inspect_all(mstate m,
5179 void(*handler)(void *start,
5180 void *end,
5181 size_t used_bytes,
5182 void* callback_arg),
5183 void* arg) {
5184 if (is_initialized(m)) {
5185 mchunkptr top = m->top;
5186 msegmentptr s;
5187 for (s = &m->seg; s != 0; s = s->next) {
5188 mchunkptr q = align_as_chunk(s->base);
5189 while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
5190 mchunkptr next = next_chunk(q);
5191 size_t sz = chunksize(q);
5192 size_t used;
5193 void* start;
5194 if (is_inuse(q)) {
5195 used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
5196 start = chunk2mem(q);
5197 }
5198 else {
5199 used = 0;
5200 if (is_small(sz)) { /* offset by possible bookkeeping */
5201 start = (void*)((char*)q + sizeof(struct malloc_chunk));
5202 }
5203 else {
5204 start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
5205 }
5206 }
5207 if (start < (void*)next) /* skip if all space is bookkeeping */
5208 handler(start, next, used, arg);
5209 if (q == top)
5210 break;
5211 q = next;
5212 }
5213 }
5214 }
5215}
5216#endif /* MALLOC_INSPECT_ALL */
5217
5218/* ------------------ Exported realloc, memalign, etc -------------------- */
5219
5220#if !ONLY_MSPACES
5221
5222void* dlrealloc(void* oldmem, size_t bytes) {
5223 void* mem = 0;
5224 if (oldmem == 0) {
5225 mem = dlmalloc(bytes);
5226 }
5227 else if (bytes >= MAX_REQUEST) {
5228 MALLOC_FAILURE_ACTION;
5229 }
5230#ifdef REALLOC_ZERO_BYTES_FREES
5231 else if (bytes == 0) {
5232 dlfree(oldmem);
5233 }
5234#endif /* REALLOC_ZERO_BYTES_FREES */
5235 else {
5236 size_t nb = request2size(bytes);
5237 mchunkptr oldp = mem2chunk(oldmem);
5238#if ! FOOTERS
5239 mstate m = gm;
5240#else /* FOOTERS */
5241 mstate m = get_mstate_for(oldp);
5242 if (!ok_magic(m)) {
5243 USAGE_ERROR_ACTION(m, oldmem);
5244 return 0;
5245 }
5246#endif /* FOOTERS */
5247 if (!PREACTION(m)) {
5248 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
5249 POSTACTION(m);
5250 if (newp != 0) {
5251 check_inuse_chunk(m, newp);
5252 mem = chunk2mem(newp);
5253 }
5254 else {
5255 mem = internal_malloc(m, bytes);
5256 if (mem != 0) {
5257 size_t oc = chunksize(oldp) - overhead_for(oldp);
5258 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
5259 internal_free(m, oldmem);
5260 }
5261 }
5262 }
5263 }
5264 return mem;
5265}
5266
5267void* dlrealloc_in_place(void* oldmem, size_t bytes) {
5268 void* mem = 0;
5269 if (oldmem != 0) {
5270 if (bytes >= MAX_REQUEST) {
5271 MALLOC_FAILURE_ACTION;
5272 }
5273 else {
5274 size_t nb = request2size(bytes);
5275 mchunkptr oldp = mem2chunk(oldmem);
5276#if ! FOOTERS
5277 mstate m = gm;
5278#else /* FOOTERS */
5279 mstate m = get_mstate_for(oldp);
5280 if (!ok_magic(m)) {
5281 USAGE_ERROR_ACTION(m, oldmem);
5282 return 0;
5283 }
5284#endif /* FOOTERS */
5285 if (!PREACTION(m)) {
5286 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
5287 POSTACTION(m);
5288 if (newp == oldp) {
5289 check_inuse_chunk(m, newp);
5290 mem = oldmem;
5291 }
5292 }
5293 }
5294 }
5295 return mem;
5296}
5297
5298void* dlmemalign(size_t alignment, size_t bytes) {
5299 if (alignment <= MALLOC_ALIGNMENT) {
5300 return dlmalloc(bytes);
5301 }
5302 return internal_memalign(gm, alignment, bytes);
5303}
5304
5305int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
5306 void* mem = 0;
5307 if (alignment == MALLOC_ALIGNMENT)
5308 mem = dlmalloc(bytes);
5309 else {
5310 size_t d = alignment / sizeof(void*);
5311 size_t r = alignment % sizeof(void*);
5312 if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
5313 return EINVAL;
5314 else if (bytes <= MAX_REQUEST - alignment) {
5315 if (alignment < MIN_CHUNK_SIZE)
5316 alignment = MIN_CHUNK_SIZE;
5317 mem = internal_memalign(gm, alignment, bytes);
5318 }
5319 }
5320 if (mem == 0)
5321 return ENOMEM;
5322 else {
5323 *pp = mem;
5324 return 0;
5325 }
5326}
5327
5328void* dlvalloc(size_t bytes) {
5329 size_t pagesz;
5330 ensure_initialization();
5331 pagesz = mparams.page_size;
5332 return dlmemalign(pagesz, bytes);
5333}
5334
5335void* dlpvalloc(size_t bytes) {
5336 size_t pagesz;
5337 ensure_initialization();
5338 pagesz = mparams.page_size;
5339 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
5340}
5341
5342void** dlindependent_calloc(size_t n_elements, size_t elem_size,
5343 void* chunks[]) {
5344 size_t sz = elem_size; /* serves as 1-element array */
5345 return ialloc(gm, n_elements, &sz, 3, chunks);
5346}
5347
5348void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
5349 void* chunks[]) {
5350 return ialloc(gm, n_elements, sizes, 0, chunks);
5351}
5352
5353size_t dlbulk_free(void* array[], size_t nelem) {
5354 return internal_bulk_free(gm, array, nelem);
5355}
5356
5357#if MALLOC_INSPECT_ALL
5358void dlmalloc_inspect_all(void(*handler)(void *start,
5359 void *end,
5360 size_t used_bytes,
5361 void* callback_arg),
5362 void* arg) {
5363 ensure_initialization();
5364 if (!PREACTION(gm)) {
5365 internal_inspect_all(gm, handler, arg);
5366 POSTACTION(gm);
5367 }
5368}
5369#endif /* MALLOC_INSPECT_ALL */
5370
5371int dlmalloc_trim(size_t pad) {
5372 int result = 0;
5373 ensure_initialization();
5374 if (!PREACTION(gm)) {
5375 result = sys_trim(gm, pad);
5376 POSTACTION(gm);
5377 }
5378 return result;
5379}
5380
5381size_t dlmalloc_footprint(void) {
5382 return gm->footprint;
5383}
5384
5385size_t dlmalloc_max_footprint(void) {
5386 return gm->max_footprint;
5387}
5388
5389size_t dlmalloc_footprint_limit(void) {
5390 size_t maf = gm->footprint_limit;
5391 return maf == 0 ? MAX_SIZE_T : maf;
5392}
5393
5394size_t dlmalloc_set_footprint_limit(size_t bytes) {
5395 size_t result; /* invert sense of 0 */
5396 if (bytes == 0)
5397 result = granularity_align(1); /* Use minimal size */
5398 if (bytes == MAX_SIZE_T)
5399 result = 0; /* disable */
5400 else
5401 result = granularity_align(bytes);
5402 return gm->footprint_limit = result;
5403}
5404
5405#if !NO_MALLINFO
5406struct mallinfo dlmallinfo(void) {
5407 return internal_mallinfo(gm);
5408}
5409#endif /* NO_MALLINFO */
5410
5411#if !NO_MALLOC_STATS
5412void dlmalloc_stats() {
5413 internal_malloc_stats(gm);
5414}
5415#endif /* NO_MALLOC_STATS */
5416
5417int dlmallopt(int param_number, int value) {
5418 return change_mparam(param_number, value);
5419}
5420
5421size_t dlmalloc_usable_size(void* mem) {
5422 if (mem != 0) {
5423 mchunkptr p = mem2chunk(mem);
5424 if (is_inuse(p))
5425 return chunksize(p) - overhead_for(p);
5426 }
5427 return 0;
5428}
5429
5430#endif /* !ONLY_MSPACES */
5431
5432/* ----------------------------- user mspaces ---------------------------- */
5433
5434#if MSPACES
5435
5436static mstate init_user_mstate(char* tbase, size_t tsize) {
5437 size_t msize = pad_request(sizeof(struct malloc_state));
5438 mchunkptr mn;
5439 mchunkptr msp = align_as_chunk(tbase);
5440 mstate m = (mstate)(chunk2mem(msp));
5441 memset(m, 0, msize);
5442 (void)INITIAL_LOCK(&m->mutex);
5443 msp->head = (msize|INUSE_BITS);
5444 m->seg.base = m->least_addr = tbase;
5445 m->seg.size = m->footprint = m->max_footprint = tsize;
5446 m->magic = mparams.magic;
5447 m->release_checks = MAX_RELEASE_CHECK_RATE;
5448 m->mflags = mparams.default_mflags;
5449 m->extp = 0;
5450 m->exts = 0;
5451 disable_contiguous(m);
5452 init_bins(m);
5453 mn = next_chunk(mem2chunk(m));
5454 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
5455 check_top_chunk(m, m->top);
5456 return m;
5457}
5458
5459mspace create_mspace(size_t capacity, int locked) {
5460 mstate m = 0;
5461 size_t msize;
5462 ensure_initialization();
5463 msize = pad_request(sizeof(struct malloc_state));
5464 if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
5465 size_t rs = ((capacity == 0)? mparams.granularity :
5466 (capacity + TOP_FOOT_SIZE + msize));
5467 size_t tsize = granularity_align(rs);
5468 char* tbase = (char*)(CALL_MMAP(tsize));
5469 if (tbase != CMFAIL) {
5470 m = init_user_mstate(tbase, tsize);
5471 m->seg.sflags = USE_MMAP_BIT;
5472 set_lock(m, locked);
5473 }
5474 }
5475 return (mspace)m;
5476}
5477
5478mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
5479 mstate m = 0;
5480 size_t msize;
5481 ensure_initialization();
5482 msize = pad_request(sizeof(struct malloc_state));
5483 if (capacity > msize + TOP_FOOT_SIZE &&
5484 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
5485 m = init_user_mstate((char*)base, capacity);
5486 m->seg.sflags = EXTERN_BIT;
5487 set_lock(m, locked);
5488 }
5489 return (mspace)m;
5490}
5491
5492int mspace_track_large_chunks(mspace msp, int enable) {
5493 int ret = 0;
5494 mstate ms = (mstate)msp;
5495 if (!PREACTION(ms)) {
5496 if (!use_mmap(ms)) {
5497 ret = 1;
5498 }
5499 if (!enable) {
5500 enable_mmap(ms);
5501 } else {
5502 disable_mmap(ms);
5503 }
5504 POSTACTION(ms);
5505 }
5506 return ret;
5507}
5508
5509size_t destroy_mspace(mspace msp) {
5510 size_t freed = 0;
5511 mstate ms = (mstate)msp;
5512 if (ok_magic(ms)) {
5513 msegmentptr sp = &ms->seg;
5514 (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */
5515 while (sp != 0) {
5516 char* base = sp->base;
5517 size_t size = sp->size;
5518 flag_t flag = sp->sflags;
5519 (void)base; /* placate people compiling -Wunused-variable */
5520 sp = sp->next;
5521 if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
5522 CALL_MUNMAP(base, size) == 0)
5523 freed += size;
5524 }
5525 }
5526 else {
5527 USAGE_ERROR_ACTION(ms,ms);
5528 }
5529 return freed;
5530}
5531
5532/*
5533 mspace versions of routines are near-clones of the global
5534 versions. This is not so nice but better than the alternatives.
5535*/
5536
5537void* mspace_malloc(mspace msp, size_t bytes) {
5538 mstate ms = (mstate)msp;
5539 if (!ok_magic(ms)) {
5540 USAGE_ERROR_ACTION(ms,ms);
5541 return 0;
5542 }
5543 if (!PREACTION(ms)) {
5544 void* mem;
5545 size_t nb;
5546 if (bytes <= MAX_SMALL_REQUEST) {
5547 bindex_t idx;
5548 binmap_t smallbits;
5549 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
5550 idx = small_index(nb);
5551 smallbits = ms->smallmap >> idx;
5552
5553 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
5554 mchunkptr b, p;
5555 idx += ~smallbits & 1; /* Uses next bin if idx empty */
5556 b = smallbin_at(ms, idx);
5557 p = b->fd;
5558 assert(chunksize(p) == small_index2size(idx));
5559 unlink_first_small_chunk(ms, b, p, idx);
5560 set_inuse_and_pinuse(ms, p, small_index2size(idx));
5561 mem = chunk2mem(p);
5562 check_malloced_chunk(ms, mem, nb);
5563 goto postaction;
5564 }
5565
5566 else if (nb > ms->dvsize) {
5567 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
5568 mchunkptr b, p, r;
5569 size_t rsize;
5570 bindex_t i;
5571 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
5572 binmap_t leastbit = least_bit(leftbits);
5573 compute_bit2idx(leastbit, i);
5574 b = smallbin_at(ms, i);
5575 p = b->fd;
5576 assert(chunksize(p) == small_index2size(i));
5577 unlink_first_small_chunk(ms, b, p, i);
5578 rsize = small_index2size(i) - nb;
5579 /* Fit here cannot be remainderless if 4byte sizes */
5580 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
5581 set_inuse_and_pinuse(ms, p, small_index2size(i));
5582 else {
5583 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
5584 r = chunk_plus_offset(p, nb);
5585 set_size_and_pinuse_of_free_chunk(r, rsize);
5586 replace_dv(ms, r, rsize);
5587 }
5588 mem = chunk2mem(p);
5589 check_malloced_chunk(ms, mem, nb);
5590 goto postaction;
5591 }
5592
5593 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
5594 check_malloced_chunk(ms, mem, nb);
5595 goto postaction;
5596 }
5597 }
5598 }
5599 else if (bytes >= MAX_REQUEST)
5600 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
5601 else {
5602 nb = pad_request(bytes);
5603 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
5604 check_malloced_chunk(ms, mem, nb);
5605 goto postaction;
5606 }
5607 }
5608
5609 if (nb <= ms->dvsize) {
5610 size_t rsize = ms->dvsize - nb;
5611 mchunkptr p = ms->dv;
5612 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
5613 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
5614 ms->dvsize = rsize;
5615 set_size_and_pinuse_of_free_chunk(r, rsize);
5616 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
5617 }
5618 else { /* exhaust dv */
5619 size_t dvs = ms->dvsize;
5620 ms->dvsize = 0;
5621 ms->dv = 0;
5622 set_inuse_and_pinuse(ms, p, dvs);
5623 }
5624 mem = chunk2mem(p);
5625 check_malloced_chunk(ms, mem, nb);
5626 goto postaction;
5627 }
5628
5629 else if (nb < ms->topsize) { /* Split top */
5630 size_t rsize = ms->topsize -= nb;
5631 mchunkptr p = ms->top;
5632 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
5633 r->head = rsize | PINUSE_BIT;
5634 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
5635 mem = chunk2mem(p);
5636 check_top_chunk(ms, ms->top);
5637 check_malloced_chunk(ms, mem, nb);
5638 goto postaction;
5639 }
5640
5641 mem = sys_alloc(ms, nb);
5642
5643 postaction:
5644 POSTACTION(ms);
5645 return mem;
5646 }
5647
5648 return 0;
5649}
5650
5651void mspace_free(mspace msp, void* mem) {
5652 if (mem != 0) {
5653 mchunkptr p = mem2chunk(mem);
5654#if FOOTERS
5655 mstate fm = get_mstate_for(p);
5656 (void)msp; /* placate people compiling -Wunused */
5657#else /* FOOTERS */
5658 mstate fm = (mstate)msp;
5659#endif /* FOOTERS */
5660 if (!ok_magic(fm)) {
5661 USAGE_ERROR_ACTION(fm, p);
5662 return;
5663 }
5664 if (!PREACTION(fm)) {
5665 check_inuse_chunk(fm, p);
5666 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
5667 size_t psize = chunksize(p);
5668 mchunkptr next = chunk_plus_offset(p, psize);
5669 if (!pinuse(p)) {
5670 size_t prevsize = p->prev_foot;
5671 if (is_mmapped(p)) {
5672 psize += prevsize + MMAP_FOOT_PAD;
5673 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
5674 fm->footprint -= psize;
5675 goto postaction;
5676 }
5677 else {
5678 mchunkptr prev = chunk_minus_offset(p, prevsize);
5679 psize += prevsize;
5680 p = prev;
5681 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
5682 if (p != fm->dv) {
5683 unlink_chunk(fm, p, prevsize);
5684 }
5685 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
5686 fm->dvsize = psize;
5687 set_free_with_pinuse(p, psize, next);
5688 goto postaction;
5689 }
5690 }
5691 else
5692 goto erroraction;
5693 }
5694 }
5695
5696 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
5697 if (!cinuse(next)) { /* consolidate forward */
5698 if (next == fm->top) {
5699 size_t tsize = fm->topsize += psize;
5700 fm->top = p;
5701 p->head = tsize | PINUSE_BIT;
5702 if (p == fm->dv) {
5703 fm->dv = 0;
5704 fm->dvsize = 0;
5705 }
5706 if (should_trim(fm, tsize))
5707 sys_trim(fm, 0);
5708 goto postaction;
5709 }
5710 else if (next == fm->dv) {
5711 size_t dsize = fm->dvsize += psize;
5712 fm->dv = p;
5713 set_size_and_pinuse_of_free_chunk(p, dsize);
5714 goto postaction;
5715 }
5716 else {
5717 size_t nsize = chunksize(next);
5718 psize += nsize;
5719 unlink_chunk(fm, next, nsize);
5720 set_size_and_pinuse_of_free_chunk(p, psize);
5721 if (p == fm->dv) {
5722 fm->dvsize = psize;
5723 goto postaction;
5724 }
5725 }
5726 }
5727 else
5728 set_free_with_pinuse(p, psize, next);
5729
5730 if (is_small(psize)) {
5731 insert_small_chunk(fm, p, psize);
5732 check_free_chunk(fm, p);
5733 }
5734 else {
5735 tchunkptr tp = (tchunkptr)p;
5736 insert_large_chunk(fm, tp, psize);
5737 check_free_chunk(fm, p);
5738 if (--fm->release_checks == 0)
5739 release_unused_segments(fm);
5740 }
5741 goto postaction;
5742 }
5743 }
5744 erroraction:
5745 USAGE_ERROR_ACTION(fm, p);
5746 postaction:
5747 POSTACTION(fm);
5748 }
5749 }
5750}
5751
5752void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
5753 void* mem;
5754 size_t req = 0;
5755 mstate ms = (mstate)msp;
5756 if (!ok_magic(ms)) {
5757 USAGE_ERROR_ACTION(ms,ms);
5758 return 0;
5759 }
5760 if (n_elements != 0) {
5761 req = n_elements * elem_size;
5762 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
5763 (req / n_elements != elem_size))
5764 req = MAX_SIZE_T; /* force downstream failure on overflow */
5765 }
5766 mem = internal_malloc(ms, req);
5767 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
5768 memset(mem, 0, req);
5769 return mem;
5770}
5771
5772void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
5773 void* mem = 0;
5774 if (oldmem == 0) {
5775 mem = mspace_malloc(msp, bytes);
5776 }
5777 else if (bytes >= MAX_REQUEST) {
5778 MALLOC_FAILURE_ACTION;
5779 }
5780#ifdef REALLOC_ZERO_BYTES_FREES
5781 else if (bytes == 0) {
5782 mspace_free(msp, oldmem);
5783 }
5784#endif /* REALLOC_ZERO_BYTES_FREES */
5785 else {
5786 size_t nb = request2size(bytes);
5787 mchunkptr oldp = mem2chunk(oldmem);
5788#if ! FOOTERS
5789 mstate m = (mstate)msp;
5790#else /* FOOTERS */
5791 mstate m = get_mstate_for(oldp);
5792 if (!ok_magic(m)) {
5793 USAGE_ERROR_ACTION(m, oldmem);
5794 return 0;
5795 }
5796#endif /* FOOTERS */
5797 if (!PREACTION(m)) {
5798 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
5799 POSTACTION(m);
5800 if (newp != 0) {
5801 check_inuse_chunk(m, newp);
5802 mem = chunk2mem(newp);
5803 }
5804 else {
5805 mem = mspace_malloc(m, bytes);
5806 if (mem != 0) {
5807 size_t oc = chunksize(oldp) - overhead_for(oldp);
5808 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
5809 mspace_free(m, oldmem);
5810 }
5811 }
5812 }
5813 }
5814 return mem;
5815}
5816
5817void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
5818 void* mem = 0;
5819 if (oldmem != 0) {
5820 if (bytes >= MAX_REQUEST) {
5821 MALLOC_FAILURE_ACTION;
5822 }
5823 else {
5824 size_t nb = request2size(bytes);
5825 mchunkptr oldp = mem2chunk(oldmem);
5826#if ! FOOTERS
5827 mstate m = (mstate)msp;
5828#else /* FOOTERS */
5829 mstate m = get_mstate_for(oldp);
5830 (void)msp; /* placate people compiling -Wunused */
5831 if (!ok_magic(m)) {
5832 USAGE_ERROR_ACTION(m, oldmem);
5833 return 0;
5834 }
5835#endif /* FOOTERS */
5836 if (!PREACTION(m)) {
5837 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
5838 POSTACTION(m);
5839 if (newp == oldp) {
5840 check_inuse_chunk(m, newp);
5841 mem = oldmem;
5842 }
5843 }
5844 }
5845 }
5846 return mem;
5847}
5848
5849void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
5850 mstate ms = (mstate)msp;
5851 if (!ok_magic(ms)) {
5852 USAGE_ERROR_ACTION(ms,ms);
5853 return 0;
5854 }
5855 if (alignment <= MALLOC_ALIGNMENT)
5856 return mspace_malloc(msp, bytes);
5857 return internal_memalign(ms, alignment, bytes);
5858}
5859
5860void** mspace_independent_calloc(mspace msp, size_t n_elements,
5861 size_t elem_size, void* chunks[]) {
5862 size_t sz = elem_size; /* serves as 1-element array */
5863 mstate ms = (mstate)msp;
5864 if (!ok_magic(ms)) {
5865 USAGE_ERROR_ACTION(ms,ms);
5866 return 0;
5867 }
5868 return ialloc(ms, n_elements, &sz, 3, chunks);
5869}
5870
5871void** mspace_independent_comalloc(mspace msp, size_t n_elements,
5872 size_t sizes[], void* chunks[]) {
5873 mstate ms = (mstate)msp;
5874 if (!ok_magic(ms)) {
5875 USAGE_ERROR_ACTION(ms,ms);
5876 return 0;
5877 }
5878 return ialloc(ms, n_elements, sizes, 0, chunks);
5879}
5880
5881size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {
5882 return internal_bulk_free((mstate)msp, array, nelem);
5883}
5884
5885#if MALLOC_INSPECT_ALL
5886void mspace_inspect_all(mspace msp,
5887 void(*handler)(void *start,
5888 void *end,
5889 size_t used_bytes,
5890 void* callback_arg),
5891 void* arg) {
5892 mstate ms = (mstate)msp;
5893 if (ok_magic(ms)) {
5894 if (!PREACTION(ms)) {
5895 internal_inspect_all(ms, handler, arg);
5896 POSTACTION(ms);
5897 }
5898 }
5899 else {
5900 USAGE_ERROR_ACTION(ms,ms);
5901 }
5902}
5903#endif /* MALLOC_INSPECT_ALL */
5904
5905int mspace_trim(mspace msp, size_t pad) {
5906 int result = 0;
5907 mstate ms = (mstate)msp;
5908 if (ok_magic(ms)) {
5909 if (!PREACTION(ms)) {
5910 result = sys_trim(ms, pad);
5911 POSTACTION(ms);
5912 }
5913 }
5914 else {
5915 USAGE_ERROR_ACTION(ms,ms);
5916 }
5917 return result;
5918}
5919
5920#if !NO_MALLOC_STATS
5921void mspace_malloc_stats(mspace msp) {
5922 mstate ms = (mstate)msp;
5923 if (ok_magic(ms)) {
5924 internal_malloc_stats(ms);
5925 }
5926 else {
5927 USAGE_ERROR_ACTION(ms,ms);
5928 }
5929}
5930#endif /* NO_MALLOC_STATS */
5931
5932size_t mspace_footprint(mspace msp) {
5933 size_t result = 0;
5934 mstate ms = (mstate)msp;
5935 if (ok_magic(ms)) {
5936 result = ms->footprint;
5937 }
5938 else {
5939 USAGE_ERROR_ACTION(ms,ms);
5940 }
5941 return result;
5942}
5943
5944size_t mspace_max_footprint(mspace msp) {
5945 size_t result = 0;
5946 mstate ms = (mstate)msp;
5947 if (ok_magic(ms)) {
5948 result = ms->max_footprint;
5949 }
5950 else {
5951 USAGE_ERROR_ACTION(ms,ms);
5952 }
5953 return result;
5954}
5955
5956size_t mspace_footprint_limit(mspace msp) {
5957 size_t result = 0;
5958 mstate ms = (mstate)msp;
5959 if (ok_magic(ms)) {
5960 size_t maf = ms->footprint_limit;
5961 result = (maf == 0) ? MAX_SIZE_T : maf;
5962 }
5963 else {
5964 USAGE_ERROR_ACTION(ms,ms);
5965 }
5966 return result;
5967}
5968
5969size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
5970 size_t result = 0;
5971 mstate ms = (mstate)msp;
5972 if (ok_magic(ms)) {
5973 if (bytes == 0)
5974 result = granularity_align(1); /* Use minimal size */
5975 if (bytes == MAX_SIZE_T)
5976 result = 0; /* disable */
5977 else
5978 result = granularity_align(bytes);
5979 ms->footprint_limit = result;
5980 }
5981 else {
5982 USAGE_ERROR_ACTION(ms,ms);
5983 }
5984 return result;
5985}
5986
5987#if !NO_MALLINFO
5988struct mallinfo mspace_mallinfo(mspace msp) {
5989 mstate ms = (mstate)msp;
5990 if (!ok_magic(ms)) {
5991 USAGE_ERROR_ACTION(ms,ms);
5992 }
5993 return internal_mallinfo(ms);
5994}
5995#endif /* NO_MALLINFO */
5996
5997size_t mspace_usable_size(const void* mem) {
5998 if (mem != 0) {
5999 mchunkptr p = mem2chunk(mem);
6000 if (is_inuse(p))
6001 return chunksize(p) - overhead_for(p);
6002 }
6003 return 0;
6004}
6005
6006int mspace_mallopt(int param_number, int value) {
6007 return change_mparam(param_number, value);
6008}
6009
6010#endif /* MSPACES */
6011
6012
6013/* -------------------- Alternative MORECORE functions ------------------- */
6014
6015/*
6016 Guidelines for creating a custom version of MORECORE:
6017
6018 * For best performance, MORECORE should allocate in multiples of pagesize.
6019 * MORECORE may allocate more memory than requested. (Or even less,
6020 but this will usually result in a malloc failure.)
6021 * MORECORE must not allocate memory when given argument zero, but
6022 instead return one past the end address of memory from previous
6023 nonzero call.
6024 * For best performance, consecutive calls to MORECORE with positive
6025 arguments should return increasing addresses, indicating that
6026 space has been contiguously extended.
6027 * Even though consecutive calls to MORECORE need not return contiguous
6028 addresses, it must be OK for malloc'ed chunks to span multiple
6029 regions in those cases where they do happen to be contiguous.
6030 * MORECORE need not handle negative arguments -- it may instead
6031 just return MFAIL when given negative arguments.
6032 Negative arguments are always multiples of pagesize. MORECORE
6033 must not misinterpret negative args as large positive unsigned
6034 args. You can suppress all such calls from even occurring by defining
6035 MORECORE_CANNOT_TRIM,
6036
6037 As an example alternative MORECORE, here is a custom allocator
6038 kindly contributed for pre-OSX macOS. It uses virtually but not
6039 necessarily physically contiguous non-paged memory (locked in,
6040 present and won't get swapped out). You can use it by uncommenting
6041 this section, adding some #includes, and setting up the appropriate
6042 defines above:
6043
6044 #define MORECORE osMoreCore
6045
6046 There is also a shutdown routine that should somehow be called for
6047 cleanup upon program exit.
6048
6049 #define MAX_POOL_ENTRIES 100
6050 #define MINIMUM_MORECORE_SIZE (64 * 1024U)
6051 static int next_os_pool;
6052 void *our_os_pools[MAX_POOL_ENTRIES];
6053
6054 void *osMoreCore(int size)
6055 {
6056 void *ptr = 0;
6057 static void *sbrk_top = 0;
6058
6059 if (size > 0)
6060 {
6061 if (size < MINIMUM_MORECORE_SIZE)
6062 size = MINIMUM_MORECORE_SIZE;
6063 if (CurrentExecutionLevel() == kTaskLevel)
6064 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
6065 if (ptr == 0)
6066 {
6067 return (void *) MFAIL;
6068 }
6069 // save ptrs so they can be freed during cleanup
6070 our_os_pools[next_os_pool] = ptr;
6071 next_os_pool++;
6072 ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
6073 sbrk_top = (char *) ptr + size;
6074 return ptr;
6075 }
6076 else if (size < 0)
6077 {
6078 // we don't currently support shrink behavior
6079 return (void *) MFAIL;
6080 }
6081 else
6082 {
6083 return sbrk_top;
6084 }
6085 }
6086
6087 // cleanup any allocated memory pools
6088 // called as last thing before shutting down driver
6089
6090 void osCleanupMem(void)
6091 {
6092 void **ptr;
6093
6094 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
6095 if (*ptr)
6096 {
6097 PoolDeallocate(*ptr);
6098 *ptr = 0;
6099 }
6100 }
6101
6102*/
6103
6104
6105/* -----------------------------------------------------------------------
6106History:
6107 v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
6108 * fix bad comparison in dlposix_memalign
6109 * don't reuse adjusted asize in sys_alloc
6110 * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion
6111 * reduce compiler warnings -- thanks to all who reported/suggested these
6112
6113 v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee)
6114 * Always perform unlink checks unless INSECURE
6115 * Add posix_memalign.
6116 * Improve realloc to expand in more cases; expose realloc_in_place.
6117 Thanks to Peter Buhr for the suggestion.
6118 * Add footprint_limit, inspect_all, bulk_free. Thanks
6119 to Barry Hayes and others for the suggestions.
6120 * Internal refactorings to avoid calls while holding locks
6121 * Use non-reentrant locks by default. Thanks to Roland McGrath
6122 for the suggestion.
6123 * Small fixes to mspace_destroy, reset_on_error.
6124 * Various configuration extensions/changes. Thanks
6125 to all who contributed these.
6126
6127 V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu)
6128 * Update Creative Commons URL
6129
6130 V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)
6131 * Use zeros instead of prev foot for is_mmapped
6132 * Add mspace_track_large_chunks; thanks to Jean Brouwers
6133 * Fix set_inuse in internal_realloc; thanks to Jean Brouwers
6134 * Fix insufficient sys_alloc padding when using 16byte alignment
6135 * Fix bad error check in mspace_footprint
6136 * Adaptations for ptmalloc; thanks to Wolfram Gloger.
6137 * Reentrant spin locks; thanks to Earl Chew and others
6138 * Win32 improvements; thanks to Niall Douglas and Earl Chew
6139 * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
6140 * Extension hook in malloc_state
6141 * Various small adjustments to reduce warnings on some compilers
6142 * Various configuration extensions/changes for more platforms. Thanks
6143 to all who contributed these.
6144
6145 V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
6146 * Add max_footprint functions
6147 * Ensure all appropriate literals are size_t
6148 * Fix conditional compilation problem for some #define settings
6149 * Avoid concatenating segments with the one provided
6150 in create_mspace_with_base
6151 * Rename some variables to avoid compiler shadowing warnings
6152 * Use explicit lock initialization.
6153 * Better handling of sbrk interference.
6154 * Simplify and fix segment insertion, trimming and mspace_destroy
6155 * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
6156 * Thanks especially to Dennis Flanagan for help on these.
6157
6158 V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
6159 * Fix memalign brace error.
6160
6161 V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
6162 * Fix improper #endif nesting in C++
6163 * Add explicit casts needed for C++
6164
6165 V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
6166 * Use trees for large bins
6167 * Support mspaces
6168 * Use segments to unify sbrk-based and mmap-based system allocation,
6169 removing need for emulation on most platforms without sbrk.
6170 * Default safety checks
6171 * Optional footer checks. Thanks to William Robertson for the idea.
6172 * Internal code refactoring
6173 * Incorporate suggestions and platform-specific changes.
6174 Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
6175 Aaron Bachmann, Emery Berger, and others.
6176 * Speed up non-fastbin processing enough to remove fastbins.
6177 * Remove useless cfree() to avoid conflicts with other apps.
6178 * Remove internal memcpy, memset. Compilers handle builtins better.
6179 * Remove some options that no one ever used and rename others.
6180
6181 V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
6182 * Fix malloc_state bitmap array misdeclaration
6183
6184 V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
6185 * Allow tuning of FIRST_SORTED_BIN_SIZE
6186 * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
6187 * Better detection and support for non-contiguousness of MORECORE.
6188 Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
6189 * Bypass most of malloc if no frees. Thanks To Emery Berger.
6190 * Fix freeing of old top non-contiguous chunk im sysmalloc.
6191 * Raised default trim and map thresholds to 256K.
6192 * Fix mmap-related #defines. Thanks to Lubos Lunak.
6193 * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
6194 * Branch-free bin calculation
6195 * Default trim and mmap thresholds now 256K.
6196
6197 V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
6198 * Introduce independent_comalloc and independent_calloc.
6199 Thanks to Michael Pachos for motivation and help.
6200 * Make optional .h file available
6201 * Allow > 2GB requests on 32bit systems.
6202 * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
6203 Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
6204 and Anonymous.
6205 * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
6206 helping test this.)
6207 * memalign: check alignment arg
6208 * realloc: don't try to shift chunks backwards, since this
6209 leads to more fragmentation in some programs and doesn't
6210 seem to help in any others.
6211 * Collect all cases in malloc requiring system memory into sysmalloc
6212 * Use mmap as backup to sbrk
6213 * Place all internal state in malloc_state
6214 * Introduce fastbins (although similar to 2.5.1)
6215 * Many minor tunings and cosmetic improvements
6216 * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
6217 * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
6218 Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
6219 * Include errno.h to support default failure action.
6220
6221 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
6222 * return null for negative arguments
6223 * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
6224 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
6225 (e.g. WIN32 platforms)
6226 * Cleanup header file inclusion for WIN32 platforms
6227 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
6228 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
6229 memory allocation routines
6230 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
6231 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
6232 usage of 'assert' in non-WIN32 code
6233 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
6234 avoid infinite loop
6235 * Always call 'fREe()' rather than 'free()'
6236
6237 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
6238 * Fixed ordering problem with boundary-stamping
6239
6240 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
6241 * Added pvalloc, as recommended by H.J. Liu
6242 * Added 64bit pointer support mainly from Wolfram Gloger
6243 * Added anonymously donated WIN32 sbrk emulation
6244 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
6245 * malloc_extend_top: fix mask error that caused wastage after
6246 foreign sbrks
6247 * Add linux mremap support code from HJ Liu
6248
6249 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
6250 * Integrated most documentation with the code.
6251 * Add support for mmap, with help from
6252 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
6253 * Use last_remainder in more cases.
6254 * Pack bins using idea from colin@nyx10.cs.du.edu
6255 * Use ordered bins instead of best-fit threshhold
6256 * Eliminate block-local decls to simplify tracing and debugging.
6257 * Support another case of realloc via move into top
6258 * Fix error occuring when initial sbrk_base not word-aligned.
6259 * Rely on page size for units instead of SBRK_UNIT to
6260 avoid surprises about sbrk alignment conventions.
6261 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
6262 (raymond@es.ele.tue.nl) for the suggestion.
6263 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
6264 * More precautions for cases where other routines call sbrk,
6265 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
6266 * Added macros etc., allowing use in linux libc from
6267 H.J. Lu (hjl@gnu.ai.mit.edu)
6268 * Inverted this history list
6269
6270 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
6271 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
6272 * Removed all preallocation code since under current scheme
6273 the work required to undo bad preallocations exceeds
6274 the work saved in good cases for most test programs.
6275 * No longer use return list or unconsolidated bins since
6276 no scheme using them consistently outperforms those that don't
6277 given above changes.
6278 * Use best fit for very large chunks to prevent some worst-cases.
6279 * Added some support for debugging
6280
6281 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
6282 * Removed footers when chunks are in use. Thanks to
6283 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
6284
6285 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
6286 * Added malloc_trim, with help from Wolfram Gloger
6287 (wmglo@Dent.MED.Uni-Muenchen.DE).
6288
6289 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
6290
6291 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
6292 * realloc: try to expand in both directions
6293 * malloc: swap order of clean-bin strategy;
6294 * realloc: only conditionally expand backwards
6295 * Try not to scavenge used bins
6296 * Use bin counts as a guide to preallocation
6297 * Occasionally bin return list chunks in first scan
6298 * Add a few optimizations from colin@nyx10.cs.du.edu
6299
6300 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
6301 * faster bin computation & slightly different binning
6302 * merged all consolidations to one part of malloc proper
6303 (eliminating old malloc_find_space & malloc_clean_bin)
6304 * Scan 2 returns chunks (not just 1)
6305 * Propagate failure in realloc if malloc returns 0
6306 * Add stuff to allow compilation on non-ANSI compilers
6307 from kpv@research.att.com
6308
6309 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
6310 * removed potential for odd address access in prev_chunk
6311 * removed dependency on getpagesize.h
6312 * misc cosmetics and a bit more internal documentation
6313 * anticosmetics: mangled names in macros to evade debugger strangeness
6314 * tested on sparc, hp-700, dec-mips, rs6000
6315 with gcc & native cc (hp, dec only) allowing
6316 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
6317
6318 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
6319 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
6320 structure of old version, but most details differ.)
6321
6322*/
6323
6324#endif /* !HAVE_MALLOC */
6325
6326#ifdef HAVE_MALLOC
6327static void * SDLCALL real_malloc(size_t s) { return malloc(s); }
6328static void * SDLCALL real_calloc(size_t n, size_t s) { return calloc(n, s); }
6329static void * SDLCALL real_realloc(void *p, size_t s) { return realloc(p,s); }
6330static void SDLCALL real_free(void *p) { free(p); }
6331#else
6332#define real_malloc dlmalloc
6333#define real_calloc dlcalloc
6334#define real_realloc dlrealloc
6335#define real_free dlfree
6336#endif
6337
6338// mark the allocator entry points as KEEPALIVE so we can call these from JavaScript.
6339// otherwise they could could get so aggressively inlined that their symbols
6340// don't exist at all in the final binary!
6341#ifdef SDL_PLATFORM_EMSCRIPTEN
6342#include <emscripten/emscripten.h>
6343extern SDL_DECLSPEC SDL_MALLOC EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_malloc(size_t size);
6344extern SDL_DECLSPEC SDL_MALLOC SDL_ALLOC_SIZE2(1, 2) EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_calloc(size_t nmemb, size_t size);
6345extern SDL_DECLSPEC SDL_ALLOC_SIZE(2) EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_realloc(void *mem, size_t size);
6346extern SDL_DECLSPEC EMSCRIPTEN_KEEPALIVE void SDLCALL SDL_free(void *mem);
6347#endif
6348
6349/* Memory functions used by SDL that can be replaced by the application */
6350static struct
6351{
6352 SDL_malloc_func malloc_func;
6353 SDL_calloc_func calloc_func;
6354 SDL_realloc_func realloc_func;
6355 SDL_free_func free_func;
6356 SDL_AtomicInt num_allocations;
6357} s_mem = {
6358 real_malloc, real_calloc, real_realloc, real_free, { 0 }
6359};
6360
6361// Define this if you want to track the number of allocations active
6362// #define SDL_TRACK_ALLOCATION_COUNT
6363#ifdef SDL_TRACK_ALLOCATION_COUNT
6364#define INCREMENT_ALLOCATION_COUNT() (void)SDL_AtomicIncRef(&s_mem.num_allocations)
6365#define DECREMENT_ALLOCATION_COUNT() (void)SDL_AtomicDecRef(&s_mem.num_allocations)
6366#else
6367#define INCREMENT_ALLOCATION_COUNT()
6368#define DECREMENT_ALLOCATION_COUNT()
6369#endif
6370
6371
6372void SDL_GetOriginalMemoryFunctions(SDL_malloc_func *malloc_func,
6373 SDL_calloc_func *calloc_func,
6374 SDL_realloc_func *realloc_func,
6375 SDL_free_func *free_func)
6376{
6377 if (malloc_func) {
6378 *malloc_func = real_malloc;
6379 }
6380 if (calloc_func) {
6381 *calloc_func = real_calloc;
6382 }
6383 if (realloc_func) {
6384 *realloc_func = real_realloc;
6385 }
6386 if (free_func) {
6387 *free_func = real_free;
6388 }
6389}
6390
6391void SDL_GetMemoryFunctions(SDL_malloc_func *malloc_func,
6392 SDL_calloc_func *calloc_func,
6393 SDL_realloc_func *realloc_func,
6394 SDL_free_func *free_func)
6395{
6396 if (malloc_func) {
6397 *malloc_func = s_mem.malloc_func;
6398 }
6399 if (calloc_func) {
6400 *calloc_func = s_mem.calloc_func;
6401 }
6402 if (realloc_func) {
6403 *realloc_func = s_mem.realloc_func;
6404 }
6405 if (free_func) {
6406 *free_func = s_mem.free_func;
6407 }
6408}
6409
6410bool SDL_SetMemoryFunctions(SDL_malloc_func malloc_func,
6411 SDL_calloc_func calloc_func,
6412 SDL_realloc_func realloc_func,
6413 SDL_free_func free_func)
6414{
6415 if (!malloc_func) {
6416 return SDL_InvalidParamError("malloc_func");
6417 }
6418 if (!calloc_func) {
6419 return SDL_InvalidParamError("calloc_func");
6420 }
6421 if (!realloc_func) {
6422 return SDL_InvalidParamError("realloc_func");
6423 }
6424 if (!free_func) {
6425 return SDL_InvalidParamError("free_func");
6426 }
6427
6428 s_mem.malloc_func = malloc_func;
6429 s_mem.calloc_func = calloc_func;
6430 s_mem.realloc_func = realloc_func;
6431 s_mem.free_func = free_func;
6432 return true;
6433}
6434
6435int SDL_GetNumAllocations(void)
6436{
6437#ifdef SDL_TRACK_ALLOCATION_COUNT
6438 return SDL_GetAtomicInt(&s_mem.num_allocations);
6439#else
6440 return -1;
6441#endif
6442}
6443
6444void *SDL_malloc(size_t size)
6445{
6446 void *mem;
6447
6448 if (!size) {
6449 size = 1;
6450 }
6451
6452 mem = s_mem.malloc_func(size);
6453 if (mem) {
6454 INCREMENT_ALLOCATION_COUNT();
6455 } else {
6456 SDL_OutOfMemory();
6457 }
6458
6459 return mem;
6460}
6461
6462void *SDL_calloc(size_t nmemb, size_t size)
6463{
6464 void *mem;
6465
6466 if (!nmemb || !size) {
6467 nmemb = 1;
6468 size = 1;
6469 }
6470
6471 mem = s_mem.calloc_func(nmemb, size);
6472 if (mem) {
6473 INCREMENT_ALLOCATION_COUNT();
6474 } else {
6475 SDL_OutOfMemory();
6476 }
6477
6478 return mem;
6479}
6480
6481void *SDL_realloc(void *ptr, size_t size)
6482{
6483 void *mem;
6484
6485 if (!size) {
6486 size = 1;
6487 }
6488
6489 mem = s_mem.realloc_func(ptr, size);
6490 if (mem && !ptr) {
6491 INCREMENT_ALLOCATION_COUNT();
6492 } else if (!mem) {
6493 SDL_OutOfMemory();
6494 }
6495
6496 return mem;
6497}
6498
6499void SDL_free(void *ptr)
6500{
6501 if (!ptr) {
6502 return;
6503 }
6504
6505 s_mem.free_func(ptr);
6506 DECREMENT_ALLOCATION_COUNT();
6507}
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_memcpy.c b/contrib/SDL-3.2.8/src/stdlib/SDL_memcpy.c
new file mode 100644
index 0000000..315edf0
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_memcpy.c
@@ -0,0 +1,101 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23
24#ifdef SDL_memcpy
25#undef SDL_memcpy
26#endif
27#if SDL_DYNAMIC_API
28#define SDL_memcpy SDL_memcpy_REAL
29#endif
30void *SDL_memcpy(SDL_OUT_BYTECAP(len) void *dst, SDL_IN_BYTECAP(len) const void *src, size_t len)
31{
32#if defined(__GNUC__) && (defined(HAVE_LIBC) && HAVE_LIBC)
33 /* Presumably this is well tuned for speed.
34 On my machine this is twice as fast as the C code below.
35 */
36 return __builtin_memcpy(dst, src, len);
37#elif defined(HAVE_MEMCPY)
38 return memcpy(dst, src, len);
39#elif defined(HAVE_BCOPY)
40 bcopy(src, dst, len);
41 return dst;
42#else
43 /* GCC 4.9.0 with -O3 will generate movaps instructions with the loop
44 using Uint32* pointers, so we need to make sure the pointers are
45 aligned before we loop using them.
46 */
47 if (((uintptr_t)src & 0x3) || ((uintptr_t)dst & 0x3)) {
48 // Do an unaligned byte copy
49 Uint8 *srcp1 = (Uint8 *)src;
50 Uint8 *dstp1 = (Uint8 *)dst;
51
52 while (len--) {
53 *dstp1++ = *srcp1++;
54 }
55 } else {
56 size_t left = (len % 4);
57 Uint32 *srcp4, *dstp4;
58 Uint8 *srcp1, *dstp1;
59
60 srcp4 = (Uint32 *)src;
61 dstp4 = (Uint32 *)dst;
62 len /= 4;
63 while (len--) {
64 *dstp4++ = *srcp4++;
65 }
66
67 srcp1 = (Uint8 *)srcp4;
68 dstp1 = (Uint8 *)dstp4;
69 switch (left) {
70 case 3:
71 *dstp1++ = *srcp1++;
72 SDL_FALLTHROUGH;
73 case 2:
74 *dstp1++ = *srcp1++;
75 SDL_FALLTHROUGH;
76 case 1:
77 *dstp1++ = *srcp1++;
78 }
79 }
80 return dst;
81#endif // HAVE_MEMCPY
82}
83
84/* The optimizer on Visual Studio 2005 and later generates memcpy() and memset() calls.
85 We will provide our own implementation if we're not building with a C runtime. */
86#ifndef HAVE_LIBC
87// NOLINTNEXTLINE(readability-redundant-declaration)
88extern void *memcpy(void *dst, const void *src, size_t len);
89#if defined(_MSC_VER) && !defined(__INTEL_LLVM_COMPILER)
90#pragma intrinsic(memcpy)
91#endif
92
93#if defined(_MSC_VER) && !defined(__clang__)
94#pragma function(memcpy)
95#endif
96// NOLINTNEXTLINE(readability-inconsistent-declaration-parameter-name)
97void *memcpy(void *dst, const void *src, size_t len)
98{
99 return SDL_memcpy(dst, src, len);
100}
101#endif // !HAVE_LIBC
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_memmove.c b/contrib/SDL-3.2.8/src/stdlib/SDL_memmove.c
new file mode 100644
index 0000000..4e0d26c
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_memmove.c
@@ -0,0 +1,73 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23
24#ifdef SDL_memmove
25#undef SDL_memmove
26#endif
27#if SDL_DYNAMIC_API
28#define SDL_memmove SDL_memmove_REAL
29#endif
30void *SDL_memmove(SDL_OUT_BYTECAP(len) void *dst, SDL_IN_BYTECAP(len) const void *src, size_t len)
31{
32#if defined(__GNUC__) && (defined(HAVE_LIBC) && HAVE_LIBC)
33 // Presumably this is well tuned for speed.
34 return __builtin_memmove(dst, src, len);
35#elif defined(HAVE_MEMMOVE)
36 return memmove(dst, src, len);
37#else
38 char *srcp = (char *)src;
39 char *dstp = (char *)dst;
40
41 if (src < dst) {
42 srcp += len - 1;
43 dstp += len - 1;
44 while (len--) {
45 *dstp-- = *srcp--;
46 }
47 } else {
48 while (len--) {
49 *dstp++ = *srcp++;
50 }
51 }
52 return dst;
53#endif // HAVE_MEMMOVE
54}
55
56
57#ifndef HAVE_LIBC
58// NOLINTNEXTLINE(readability-redundant-declaration)
59extern void *memmove(void *dst, const void *src, size_t len);
60#if defined(_MSC_VER) && !defined(__INTEL_LLVM_COMPILER)
61#pragma intrinsic(memmove)
62#endif
63
64#if defined(_MSC_VER) && !defined(__clang__)
65#pragma function(memmove)
66#endif
67// NOLINTNEXTLINE(readability-inconsistent-declaration-parameter-name)
68void *memmove(void *dst, const void *src, size_t len)
69{
70 return SDL_memmove(dst, src, len);
71}
72#endif // !HAVE_LIBC
73
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_memset.c b/contrib/SDL-3.2.8/src/stdlib/SDL_memset.c
new file mode 100644
index 0000000..324f917
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_memset.c
@@ -0,0 +1,139 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23
24#ifdef SDL_memset
25#undef SDL_memset
26#endif
27#if SDL_DYNAMIC_API
28#define SDL_memset SDL_memset_REAL
29#endif
30void *SDL_memset(SDL_OUT_BYTECAP(len) void *dst, int c, size_t len)
31{
32#if defined(__GNUC__) && (defined(HAVE_LIBC) && HAVE_LIBC)
33 return __builtin_memset(dst, c, len);
34#elif defined(HAVE_MEMSET)
35 return memset(dst, c, len);
36#else
37 size_t left;
38 Uint32 *dstp4;
39 Uint8 *dstp1 = (Uint8 *)dst;
40 Uint8 value1;
41 Uint32 value4;
42
43 // The value used in memset() is a byte, passed as an int
44 c &= 0xff;
45
46 /* The destination pointer needs to be aligned on a 4-byte boundary to
47 * execute a 32-bit set. Set first bytes manually if needed until it is
48 * aligned. */
49 value1 = (Uint8)c;
50 while ((uintptr_t)dstp1 & 0x3) {
51 if (len--) {
52 *dstp1++ = value1;
53 } else {
54 return dst;
55 }
56 }
57
58 value4 = ((Uint32)c | ((Uint32)c << 8) | ((Uint32)c << 16) | ((Uint32)c << 24));
59 dstp4 = (Uint32 *)dstp1;
60 left = (len % 4);
61 len /= 4;
62 while (len--) {
63 *dstp4++ = value4;
64 }
65
66 dstp1 = (Uint8 *)dstp4;
67 switch (left) {
68 case 3:
69 *dstp1++ = value1;
70 SDL_FALLTHROUGH;
71 case 2:
72 *dstp1++ = value1;
73 SDL_FALLTHROUGH;
74 case 1:
75 *dstp1++ = value1;
76 }
77
78 return dst;
79#endif // HAVE_MEMSET
80}
81
82// Note that memset() is a byte assignment and this is a 32-bit assignment, so they're not directly equivalent.
83void *SDL_memset4(void *dst, Uint32 val, size_t dwords)
84{
85#if defined(__APPLE__) && defined(HAVE_STRING_H)
86 memset_pattern4(dst, &val, dwords * 4);
87#elif defined(__GNUC__) && defined(__i386__)
88 int u0, u1, u2;
89 __asm__ __volatile__(
90 "cld \n\t"
91 "rep ; stosl \n\t"
92 : "=&D"(u0), "=&a"(u1), "=&c"(u2)
93 : "0"(dst), "1"(val), "2"(SDL_static_cast(Uint32, dwords))
94 : "memory");
95#else
96 size_t _n = (dwords + 3) / 4;
97 Uint32 *_p = SDL_static_cast(Uint32 *, dst);
98 Uint32 _val = (val);
99 if (dwords == 0) {
100 return dst;
101 }
102 switch (dwords % 4) {
103 case 0:
104 do {
105 *_p++ = _val;
106 SDL_FALLTHROUGH;
107 case 3:
108 *_p++ = _val;
109 SDL_FALLTHROUGH;
110 case 2:
111 *_p++ = _val;
112 SDL_FALLTHROUGH;
113 case 1:
114 *_p++ = _val;
115 } while (--_n);
116 }
117#endif
118 return dst;
119}
120
121/* The optimizer on Visual Studio 2005 and later generates memcpy() and memset() calls.
122 We will provide our own implementation if we're not building with a C runtime. */
123#ifndef HAVE_LIBC
124// NOLINTNEXTLINE(readability-redundant-declaration)
125extern void *memset(void *dst, int c, size_t len);
126#if defined(_MSC_VER) && !defined(__INTEL_LLVM_COMPILER)
127#pragma intrinsic(memset)
128#endif
129
130#if defined(_MSC_VER) && !defined(__clang__)
131#pragma function(memset)
132#endif
133// NOLINTNEXTLINE(readability-inconsistent-declaration-parameter-name)
134void *memset(void *dst, int c, size_t len)
135{
136 return SDL_memset(dst, c, len);
137}
138#endif // !HAVE_LIBC
139
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_mslibc.c b/contrib/SDL-3.2.8/src/stdlib/SDL_mslibc.c
new file mode 100644
index 0000000..6698403
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_mslibc.c
@@ -0,0 +1,746 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23// This file contains SDL replacements for functions in the C library
24
25#if !defined(HAVE_LIBC) && !defined(SDL_STATIC_LIB)
26
27// These are some C runtime intrinsics that need to be defined
28
29#ifdef _MSC_VER
30
31#ifndef __FLTUSED__
32#define __FLTUSED__
33__declspec(selectany) int _fltused = 1;
34#endif
35
36#ifdef _M_IX86
37
38// Float to long
39void __declspec(naked) _ftol()
40{
41 /* *INDENT-OFF* */
42 __asm {
43 push ebp
44 mov ebp,esp
45 sub esp,20h
46 and esp,0FFFFFFF0h
47 fld st(0)
48 fst dword ptr [esp+18h]
49 fistp qword ptr [esp+10h]
50 fild qword ptr [esp+10h]
51 mov edx,dword ptr [esp+18h]
52 mov eax,dword ptr [esp+10h]
53 test eax,eax
54 je integer_QnaN_or_zero
55arg_is_not_integer_QnaN:
56 fsubp st(1),st
57 test edx,edx
58 jns positive
59 fstp dword ptr [esp]
60 mov ecx,dword ptr [esp]
61 xor ecx,80000000h
62 add ecx,7FFFFFFFh
63 adc eax,0
64 mov edx,dword ptr [esp+14h]
65 adc edx,0
66 jmp localexit
67positive:
68 fstp dword ptr [esp]
69 mov ecx,dword ptr [esp]
70 add ecx,7FFFFFFFh
71 sbb eax,0
72 mov edx,dword ptr [esp+14h]
73 sbb edx,0
74 jmp localexit
75integer_QnaN_or_zero:
76 mov edx,dword ptr [esp+14h]
77 test edx,7FFFFFFFh
78 jne arg_is_not_integer_QnaN
79 fstp dword ptr [esp+18h]
80 fstp dword ptr [esp+18h]
81localexit:
82 leave
83 ret
84 }
85 /* *INDENT-ON* */
86}
87
88void _ftol2_sse()
89{
90 _ftol();
91}
92
93void _ftol2()
94{
95 _ftol();
96}
97
98// 64-bit math operators for 32-bit systems
99void __declspec(naked) _allmul()
100{
101 /* *INDENT-OFF* */
102 __asm {
103 mov eax, dword ptr[esp+8]
104 mov ecx, dword ptr[esp+10h]
105 or ecx, eax
106 mov ecx, dword ptr[esp+0Ch]
107 jne hard
108 mov eax, dword ptr[esp+4]
109 mul ecx
110 ret 10h
111hard:
112 push ebx
113 mul ecx
114 mov ebx, eax
115 mov eax, dword ptr[esp+8]
116 mul dword ptr[esp+14h]
117 add ebx, eax
118 mov eax, dword ptr[esp+8]
119 mul ecx
120 add edx, ebx
121 pop ebx
122 ret 10h
123 }
124 /* *INDENT-ON* */
125}
126
127void __declspec(naked) _alldiv()
128{
129 /* *INDENT-OFF* */
130 __asm {
131 push edi
132 push esi
133 push ebx
134 xor edi,edi
135 mov eax,dword ptr [esp+14h]
136 or eax,eax
137 jge L1
138 inc edi
139 mov edx,dword ptr [esp+10h]
140 neg eax
141 neg edx
142 sbb eax,0
143 mov dword ptr [esp+14h],eax
144 mov dword ptr [esp+10h],edx
145L1:
146 mov eax,dword ptr [esp+1Ch]
147 or eax,eax
148 jge L2
149 inc edi
150 mov edx,dword ptr [esp+18h]
151 neg eax
152 neg edx
153 sbb eax,0
154 mov dword ptr [esp+1Ch],eax
155 mov dword ptr [esp+18h],edx
156L2:
157 or eax,eax
158 jne L3
159 mov ecx,dword ptr [esp+18h]
160 mov eax,dword ptr [esp+14h]
161 xor edx,edx
162 div ecx
163 mov ebx,eax
164 mov eax,dword ptr [esp+10h]
165 div ecx
166 mov edx,ebx
167 jmp L4
168L3:
169 mov ebx,eax
170 mov ecx,dword ptr [esp+18h]
171 mov edx,dword ptr [esp+14h]
172 mov eax,dword ptr [esp+10h]
173L5:
174 shr ebx,1
175 rcr ecx,1
176 shr edx,1
177 rcr eax,1
178 or ebx,ebx
179 jne L5
180 div ecx
181 mov esi,eax
182 mul dword ptr [esp+1Ch]
183 mov ecx,eax
184 mov eax,dword ptr [esp+18h]
185 mul esi
186 add edx,ecx
187 jb L6
188 cmp edx,dword ptr [esp+14h]
189 ja L6
190 jb L7
191 cmp eax,dword ptr [esp+10h]
192 jbe L7
193L6:
194 dec esi
195L7:
196 xor edx,edx
197 mov eax,esi
198L4:
199 dec edi
200 jne L8
201 neg edx
202 neg eax
203 sbb edx,0
204L8:
205 pop ebx
206 pop esi
207 pop edi
208 ret 10h
209 }
210 /* *INDENT-ON* */
211}
212
213void __declspec(naked) _aulldiv()
214{
215 /* *INDENT-OFF* */
216 __asm {
217 push ebx
218 push esi
219 mov eax,dword ptr [esp+18h]
220 or eax,eax
221 jne L1
222 mov ecx,dword ptr [esp+14h]
223 mov eax,dword ptr [esp+10h]
224 xor edx,edx
225 div ecx
226 mov ebx,eax
227 mov eax,dword ptr [esp+0Ch]
228 div ecx
229 mov edx,ebx
230 jmp L2
231L1:
232 mov ecx,eax
233 mov ebx,dword ptr [esp+14h]
234 mov edx,dword ptr [esp+10h]
235 mov eax,dword ptr [esp+0Ch]
236L3:
237 shr ecx,1
238 rcr ebx,1
239 shr edx,1
240 rcr eax,1
241 or ecx,ecx
242 jne L3
243 div ebx
244 mov esi,eax
245 mul dword ptr [esp+18h]
246 mov ecx,eax
247 mov eax,dword ptr [esp+14h]
248 mul esi
249 add edx,ecx
250 jb L4
251 cmp edx,dword ptr [esp+10h]
252 ja L4
253 jb L5
254 cmp eax,dword ptr [esp+0Ch]
255 jbe L5
256L4:
257 dec esi
258L5:
259 xor edx,edx
260 mov eax,esi
261L2:
262 pop esi
263 pop ebx
264 ret 10h
265 }
266 /* *INDENT-ON* */
267}
268
269void __declspec(naked) _allrem()
270{
271 /* *INDENT-OFF* */
272 __asm {
273 push ebx
274 push edi
275 xor edi,edi
276 mov eax,dword ptr [esp+10h]
277 or eax,eax
278 jge L1
279 inc edi
280 mov edx,dword ptr [esp+0Ch]
281 neg eax
282 neg edx
283 sbb eax,0
284 mov dword ptr [esp+10h],eax
285 mov dword ptr [esp+0Ch],edx
286L1:
287 mov eax,dword ptr [esp+18h]
288 or eax,eax
289 jge L2
290 mov edx,dword ptr [esp+14h]
291 neg eax
292 neg edx
293 sbb eax,0
294 mov dword ptr [esp+18h],eax
295 mov dword ptr [esp+14h],edx
296L2:
297 or eax,eax
298 jne L3
299 mov ecx,dword ptr [esp+14h]
300 mov eax,dword ptr [esp+10h]
301 xor edx,edx
302 div ecx
303 mov eax,dword ptr [esp+0Ch]
304 div ecx
305 mov eax,edx
306 xor edx,edx
307 dec edi
308 jns L4
309 jmp L8
310L3:
311 mov ebx,eax
312 mov ecx,dword ptr [esp+14h]
313 mov edx,dword ptr [esp+10h]
314 mov eax,dword ptr [esp+0Ch]
315L5:
316 shr ebx,1
317 rcr ecx,1
318 shr edx,1
319 rcr eax,1
320 or ebx,ebx
321 jne L5
322 div ecx
323 mov ecx,eax
324 mul dword ptr [esp+18h]
325 xchg eax,ecx
326 mul dword ptr [esp+14h]
327 add edx,ecx
328 jb L6
329 cmp edx,dword ptr [esp+10h]
330 ja L6
331 jb L7
332 cmp eax,dword ptr [esp+0Ch]
333 jbe L7
334L6:
335 sub eax,dword ptr [esp+14h]
336 sbb edx,dword ptr [esp+18h]
337L7:
338 sub eax,dword ptr [esp+0Ch]
339 sbb edx,dword ptr [esp+10h]
340 dec edi
341 jns L8
342L4:
343 neg edx
344 neg eax
345 sbb edx,0
346L8:
347 pop edi
348 pop ebx
349 ret 10h
350 }
351 /* *INDENT-ON* */
352}
353
354void __declspec(naked) _aullrem()
355{
356 /* *INDENT-OFF* */
357 __asm {
358 push ebx
359 mov eax,dword ptr [esp+14h]
360 or eax,eax
361 jne L1
362 mov ecx,dword ptr [esp+10h]
363 mov eax,dword ptr [esp+0Ch]
364 xor edx,edx
365 div ecx
366 mov eax,dword ptr [esp+8]
367 div ecx
368 mov eax,edx
369 xor edx,edx
370 jmp L2
371L1:
372 mov ecx,eax
373 mov ebx,dword ptr [esp+10h]
374 mov edx,dword ptr [esp+0Ch]
375 mov eax,dword ptr [esp+8]
376L3:
377 shr ecx,1
378 rcr ebx,1
379 shr edx,1
380 rcr eax,1
381 or ecx,ecx
382 jne L3
383 div ebx
384 mov ecx,eax
385 mul dword ptr [esp+14h]
386 xchg eax,ecx
387 mul dword ptr [esp+10h]
388 add edx,ecx
389 jb L4
390 cmp edx,dword ptr [esp+0Ch]
391 ja L4
392 jb L5
393 cmp eax,dword ptr [esp+8]
394 jbe L5
395L4:
396 sub eax,dword ptr [esp+10h]
397 sbb edx,dword ptr [esp+14h]
398L5:
399 sub eax,dword ptr [esp+8]
400 sbb edx,dword ptr [esp+0Ch]
401 neg edx
402 neg eax
403 sbb edx,0
404L2:
405 pop ebx
406 ret 10h
407 }
408 /* *INDENT-ON* */
409}
410
411void __declspec(naked) _alldvrm()
412{
413 /* *INDENT-OFF* */
414 __asm {
415 push edi
416 push esi
417 push ebp
418 xor edi,edi
419 xor ebp,ebp
420 mov eax,dword ptr [esp+14h]
421 or eax,eax
422 jge L1
423 inc edi
424 inc ebp
425 mov edx,dword ptr [esp+10h]
426 neg eax
427 neg edx
428 sbb eax,0
429 mov dword ptr [esp+14h],eax
430 mov dword ptr [esp+10h],edx
431L1:
432 mov eax,dword ptr [esp+1Ch]
433 or eax,eax
434 jge L2
435 inc edi
436 mov edx,dword ptr [esp+18h]
437 neg eax
438 neg edx
439 sbb eax,0
440 mov dword ptr [esp+1Ch],eax
441 mov dword ptr [esp+18h],edx
442L2:
443 or eax,eax
444 jne L3
445 mov ecx,dword ptr [esp+18h]
446 mov eax,dword ptr [esp+14h]
447 xor edx,edx
448 div ecx
449 mov ebx,eax
450 mov eax,dword ptr [esp+10h]
451 div ecx
452 mov esi,eax
453 mov eax,ebx
454 mul dword ptr [esp+18h]
455 mov ecx,eax
456 mov eax,esi
457 mul dword ptr [esp+18h]
458 add edx,ecx
459 jmp L4
460L3:
461 mov ebx,eax
462 mov ecx,dword ptr [esp+18h]
463 mov edx,dword ptr [esp+14h]
464 mov eax,dword ptr [esp+10h]
465L5:
466 shr ebx,1
467 rcr ecx,1
468 shr edx,1
469 rcr eax,1
470 or ebx,ebx
471 jne L5
472 div ecx
473 mov esi,eax
474 mul dword ptr [esp+1Ch]
475 mov ecx,eax
476 mov eax,dword ptr [esp+18h]
477 mul esi
478 add edx,ecx
479 jb L6
480 cmp edx,dword ptr [esp+14h]
481 ja L6
482 jb L7
483 cmp eax,dword ptr [esp+10h]
484 jbe L7
485L6:
486 dec esi
487 sub eax,dword ptr [esp+18h]
488 sbb edx,dword ptr [esp+1Ch]
489L7:
490 xor ebx,ebx
491L4:
492 sub eax,dword ptr [esp+10h]
493 sbb edx,dword ptr [esp+14h]
494 dec ebp
495 jns L9
496 neg edx
497 neg eax
498 sbb edx,0
499L9:
500 mov ecx,edx
501 mov edx,ebx
502 mov ebx,ecx
503 mov ecx,eax
504 mov eax,esi
505 dec edi
506 jne L8
507 neg edx
508 neg eax
509 sbb edx,0
510L8:
511 pop ebp
512 pop esi
513 pop edi
514 ret 10h
515 }
516 /* *INDENT-ON* */
517}
518
519void __declspec(naked) _aulldvrm()
520{
521 /* *INDENT-OFF* */
522 __asm {
523 push esi
524 mov eax,dword ptr [esp+14h]
525 or eax,eax
526 jne L1
527 mov ecx,dword ptr [esp+10h]
528 mov eax,dword ptr [esp+0Ch]
529 xor edx,edx
530 div ecx
531 mov ebx,eax
532 mov eax,dword ptr [esp+8]
533 div ecx
534 mov esi,eax
535 mov eax,ebx
536 mul dword ptr [esp+10h]
537 mov ecx,eax
538 mov eax,esi
539 mul dword ptr [esp+10h]
540 add edx,ecx
541 jmp L2
542L1:
543 mov ecx,eax
544 mov ebx,dword ptr [esp+10h]
545 mov edx,dword ptr [esp+0Ch]
546 mov eax,dword ptr [esp+8]
547L3:
548 shr ecx,1
549 rcr ebx,1
550 shr edx,1
551 rcr eax,1
552 or ecx,ecx
553 jne L3
554 div ebx
555 mov esi,eax
556 mul dword ptr [esp+14h]
557 mov ecx,eax
558 mov eax,dword ptr [esp+10h]
559 mul esi
560 add edx,ecx
561 jb L4
562 cmp edx,dword ptr [esp+0Ch]
563 ja L4
564 jb L5
565 cmp eax,dword ptr [esp+8]
566 jbe L5
567L4:
568 dec esi
569 sub eax,dword ptr [esp+10h]
570 sbb edx,dword ptr [esp+14h]
571L5:
572 xor ebx,ebx
573L2:
574 sub eax,dword ptr [esp+8]
575 sbb edx,dword ptr [esp+0Ch]
576 neg edx
577 neg eax
578 sbb edx,0
579 mov ecx,edx
580 mov edx,ebx
581 mov ebx,ecx
582 mov ecx,eax
583 mov eax,esi
584 pop esi
585 ret 10h
586 }
587 /* *INDENT-ON* */
588}
589
590void __declspec(naked) _allshl()
591{
592 /* *INDENT-OFF* */
593 __asm {
594 cmp cl,40h
595 jae RETZERO
596 cmp cl,20h
597 jae MORE32
598 shld edx,eax,cl
599 shl eax,cl
600 ret
601MORE32:
602 mov edx,eax
603 xor eax,eax
604 and cl,1Fh
605 shl edx,cl
606 ret
607RETZERO:
608 xor eax,eax
609 xor edx,edx
610 ret
611 }
612 /* *INDENT-ON* */
613}
614
615void __declspec(naked) _allshr()
616{
617 /* *INDENT-OFF* */
618 __asm {
619 cmp cl,3Fh
620 jae RETSIGN
621 cmp cl,20h
622 jae MORE32
623 shrd eax,edx,cl
624 sar edx,cl
625 ret
626MORE32:
627 mov eax,edx
628 sar edx,1Fh
629 and cl,1Fh
630 sar eax,cl
631 ret
632RETSIGN:
633 sar edx,1Fh
634 mov eax,edx
635 ret
636 }
637 /* *INDENT-ON* */
638}
639
640void __declspec(naked) _aullshr()
641{
642 /* *INDENT-OFF* */
643 __asm {
644 cmp cl,40h
645 jae RETZERO
646 cmp cl,20h
647 jae MORE32
648 shrd eax,edx,cl
649 shr edx,cl
650 ret
651MORE32:
652 mov eax,edx
653 xor edx,edx
654 and cl,1Fh
655 shr eax,cl
656 ret
657RETZERO:
658 xor eax,eax
659 xor edx,edx
660 ret
661 }
662 /* *INDENT-ON* */
663}
664
665void __declspec(naked) _chkstk(void)
666{
667 __asm {
668 push ecx
669 mov ecx,esp ; lea ecx,dword ptr [esp]+4
670 add ecx,4
671 sub ecx,eax
672 sbb eax,eax
673 not eax
674 and ecx,eax
675 mov eax,esp
676 and eax,0xfffff000
677L1:
678 cmp ecx,eax
679 jb short L2
680 mov eax,ecx
681 pop ecx
682 xchg esp,eax
683 mov eax,dword ptr [eax]
684 mov dword ptr [esp],eax
685 ret
686L2:
687 sub eax,0x1000
688 test dword ptr [eax],eax
689 jmp short L1
690 }
691}
692
693void __declspec(naked) _alloca_probe_8(void)
694{
695 /* *INDENT-OFF* */
696 __asm {
697 push ecx
698 mov ecx,esp ; lea ecx,dword ptr [esp]+8
699 add ecx,8
700 sub ecx,eax
701 and ecx,0x7
702 add eax,ecx
703 sbb ecx,ecx
704 or eax,ecx
705 pop ecx
706 jmp _chkstk
707 }
708 /* *INDENT-ON* */
709}
710
711void __declspec(naked) _alloca_probe_16(void)
712{
713 /* *INDENT-OFF* */
714 __asm {
715 push ecx
716 mov ecx,esp ; lea ecx,dword ptr [esp]+8
717 add ecx,8
718 sub ecx,eax
719 and ecx,0xf
720 add eax,ecx
721 sbb ecx,ecx
722 or eax,ecx
723 pop ecx
724 jmp _chkstk
725 }
726 /* *INDENT-ON* */
727}
728
729#endif // _M_IX86
730
731#endif // MSC_VER
732
733#ifdef __ICL
734/* The classic Intel compiler generates calls to _intel_fast_memcpy
735 * and _intel_fast_memset when building an optimized SDL library */
736void *_intel_fast_memcpy(void *dst, const void *src, size_t len)
737{
738 return SDL_memcpy(dst, src, len);
739}
740void *_intel_fast_memset(void *dst, int c, size_t len)
741{
742 return SDL_memset(dst, c, len);
743}
744#endif
745
746#endif // !HAVE_LIBC && !SDL_STATIC_LIB
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_mslibc_arm64.masm b/contrib/SDL-3.2.8/src/stdlib/SDL_mslibc_arm64.masm
new file mode 100644
index 0000000..a769cc1
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_mslibc_arm64.masm
@@ -0,0 +1,26 @@
1TeStackLimit EQU 0x00010
2PAGE_SIZE equ 0x1000
3
4 AREA CODE, READONLY
5
6 EXPORT __chkstk
7
8__chkstk PROC
9 ldr x17,[x18, #TeStackLimit]
10 subs x16,sp,x15, LSL #0x4
11 csel x16,xzr,x16,cc
12 cmp x16,x17
13 b.cc chkstk_start_loop
14 ret
15chkstk_start_loop
16 and x16,x16,#-PAGE_SIZE
17chkstk_loop
18 sub x17,x17,#0x1, LSL #12
19 ldr xzr,[x17]
20 cmp x17,x16
21 b.ne chkstk_loop
22 ret
23
24 ENDP
25
26 END
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_mslibc_x64.masm b/contrib/SDL-3.2.8/src/stdlib/SDL_mslibc_x64.masm
new file mode 100644
index 0000000..1590d88
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_mslibc_x64.masm
@@ -0,0 +1,29 @@
1include ksamd64.inc
2
3text SEGMENT EXECUTE
4
5public __chkstk
6
7__chkstk:
8 sub rsp,010h
9 mov QWORD PTR [rsp],r10
10 mov QWORD PTR [rsp+08h],r11
11 xor r11,r11
12 lea r10,[rsp+018h]
13 sub r10,rax
14 cmovb r10,r11
15 mov r11,QWORD PTR gs:[TeStackLimit]
16 cmp r10,r11
17 jae chkstk_finish
18 and r10w,0f000h
19chkstk_loop:
20 lea r11,[r11-PAGE_SIZE]
21 mov BYTE PTR [r11],0h
22 cmp r10,r11
23 jne chkstk_loop
24chkstk_finish:
25 mov r10,QWORD PTR [rsp]
26 mov r11,QWORD PTR [rsp+08h]
27 add rsp,010h
28 ret
29end
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_murmur3.c b/contrib/SDL-3.2.8/src/stdlib/SDL_murmur3.c
new file mode 100644
index 0000000..6b030bd
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_murmur3.c
@@ -0,0 +1,87 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23// Public domain murmur3 32-bit hash algorithm
24//
25// Adapted from: https://en.wikipedia.org/wiki/MurmurHash
26
27static SDL_INLINE Uint32 murmur_32_scramble(Uint32 k)
28{
29 k *= 0xcc9e2d51;
30 k = (k << 15) | (k >> 17);
31 k *= 0x1b873593;
32 return k;
33}
34
35Uint32 SDLCALL SDL_murmur3_32(const void *data, size_t len, Uint32 seed)
36{
37 const Uint8 *bytes = (const Uint8 *)data;
38 Uint32 hash = seed;
39 Uint32 k;
40
41 // Read in groups of 4.
42 if ((((uintptr_t)bytes) & 3) == 0) {
43 // We can do aligned 32-bit reads
44 for (size_t i = len >> 2; i--; ) {
45 k = *(const Uint32 *)bytes;
46 k = SDL_Swap32LE(k);
47 bytes += sizeof(Uint32);
48 hash ^= murmur_32_scramble(k);
49 hash = (hash << 13) | (hash >> 19);
50 hash = hash * 5 + 0xe6546b64;
51 }
52 } else {
53 for (size_t i = len >> 2; i--; ) {
54 SDL_memcpy(&k, bytes, sizeof(Uint32));
55 k = SDL_Swap32LE(k);
56 bytes += sizeof(Uint32);
57 hash ^= murmur_32_scramble(k);
58 hash = (hash << 13) | (hash >> 19);
59 hash = hash * 5 + 0xe6546b64;
60 }
61 }
62
63 // Read the rest.
64 size_t left = (len & 3);
65 if (left) {
66 k = 0;
67 for (size_t i = left; i--; ) {
68 k <<= 8;
69 k |= bytes[i];
70 }
71
72 // A swap is *not* necessary here because the preceding loop already
73 // places the low bytes in the low places according to whatever endianness
74 // we use. Swaps only apply when the memory is copied in a chunk.
75 hash ^= murmur_32_scramble(k);
76 }
77
78 /* Finalize. */
79 hash ^= len;
80 hash ^= hash >> 16;
81 hash *= 0x85ebca6b;
82 hash ^= hash >> 13;
83 hash *= 0xc2b2ae35;
84 hash ^= hash >> 16;
85
86 return hash;
87}
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_qsort.c b/contrib/SDL-3.2.8/src/stdlib/SDL_qsort.c
new file mode 100644
index 0000000..4ed2863
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_qsort.c
@@ -0,0 +1,574 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23// SDL3 always uses its own internal qsort implementation, below, so
24// it can guarantee stable sorts across platforms and not have to
25// tapdance to support the various qsort_r interfaces, or bridge from
26// the C runtime's non-SDLCALL compare functions.
27
28#ifdef assert
29#undef assert
30#endif
31#define assert SDL_assert
32#ifdef malloc
33#undef malloc
34#endif
35#define malloc SDL_malloc
36#ifdef free
37#undef free
38#endif
39#define free SDL_free
40#ifdef memcpy
41#undef memcpy
42#endif
43#define memcpy SDL_memcpy
44#ifdef memmove
45#undef memmove
46#endif
47#define memmove SDL_memmove
48
49/*
50This code came from Gareth McCaughan, under the zlib license.
51Specifically this: https://www.mccaughan.org.uk/software/qsort.c-1.16
52
53Everything below this comment until the HAVE_QSORT #endif was from Gareth
54(any minor changes will be noted inline).
55
56Thank you to Gareth for relicensing this code under the zlib license for our
57benefit!
58
59Update for SDL3: we have modified this from a qsort function to qsort_r.
60
61--ryan.
62*/
63
64/* This is a drop-in replacement for the C library's |qsort()| routine.
65 *
66 * It is intended for use where you know or suspect that your
67 * platform's qsort is bad. If that isn't the case, then you
68 * should probably use the qsort your system gives you in preference
69 * to mine -- it will likely have been tested and tuned better.
70 *
71 * Features:
72 * - Median-of-three pivoting (and more)
73 * - Truncation and final polishing by a single insertion sort
74 * - Early truncation when no swaps needed in pivoting step
75 * - Explicit recursion, guaranteed not to overflow
76 * - A few little wrinkles stolen from the GNU |qsort()|.
77 * (For the avoidance of doubt, no code was stolen, only
78 * broad ideas.)
79 * - separate code for non-aligned / aligned / word-size objects
80 *
81 * Earlier releases of this code used an idiosyncratic licence
82 * I wrote myself, because I'm an idiot. The code is now released
83 * under the "zlib/libpng licence"; you will find the actual
84 * terms in the next comment. I request (but do not require)
85 * that if you make any changes beyond the name of the exported
86 * routine and reasonable tweaks to the TRUNC_* and
87 * PIVOT_THRESHOLD values, you modify the _ID string so as
88 * to make it clear that you have changed the code.
89 *
90 * If you find problems with this code, or find ways of
91 * making it significantly faster, please let me know!
92 * My e-mail address, valid as of early 2016 and for the
93 * foreseeable future, is
94 * gareth.mccaughan@pobox.com
95 * Thanks!
96 *
97 * Gareth McCaughan
98 */
99
100/* Copyright (c) 1998-2021 Gareth McCaughan
101 *
102 * This software is provided 'as-is', without any express or implied
103 * warranty. In no event will the authors be held liable for any
104 * damages arising from the use of this software.
105 *
106 * Permission is granted to anyone to use this software for any purpose,
107 * including commercial applications, and to alter it and redistribute it
108 * freely, subject to the following restrictions:
109 *
110 * 1. The origin of this software must not be misrepresented;
111 * you must not claim that you wrote the original software.
112 * If you use this software in a product, an acknowledgment
113 * in the product documentation would be appreciated but
114 * is not required.
115 *
116 * 2. Altered source versions must be plainly marked as such,
117 * and must not be misrepresented as being the original software.
118 *
119 * 3. This notice may not be removed or altered from any source
120 * distribution.
121 */
122
123/* Revision history since release:
124 * 1998-03-19 v1.12 First release I have any records of.
125 * 2007-09-02 v1.13 Fix bug kindly reported by Dan Bodoh
126 * (premature termination of recursion).
127 * Add a few clarifying comments.
128 * Minor improvements to debug output.
129 * 2016-02-21 v1.14 Replace licence with 2-clause BSD,
130 * and clarify a couple of things in
131 * comments. No code changes.
132 * 2016-03-10 v1.15 Fix bug kindly reported by Ryan Gordon
133 * (pre-insertion-sort messed up).
134 * Disable DEBUG_QSORT by default.
135 * Tweak comments very slightly.
136 * 2021-02-20 v1.16 Fix bug kindly reported by Ray Gardner
137 * (error in recursion leading to possible
138 * stack overflow).
139 * When checking alignment, avoid casting
140 * pointer to possibly-smaller integer.
141 */
142
143/* BEGIN SDL CHANGE ... commented this out with an #if 0 block. --ryan. */
144#if 0
145#include <assert.h>
146#include <stdint.h>
147#include <stdlib.h>
148#include <string.h>
149
150#undef DEBUG_QSORT
151
152static char _ID[]="<qsort.c gjm WITH CHANGES FOR SDL3 1.16 2021-02-20>";
153#endif
154/* END SDL CHANGE ... commented this out with an #if 0 block. --ryan. */
155
156/* How many bytes are there per word? (Must be a power of 2,
157 * and must in fact equal sizeof(int).)
158 */
159#define WORD_BYTES sizeof(int)
160
161/* How big does our stack need to be? Answer: one entry per
162 * bit in a |size_t|. (Actually, a bit less because we don't
163 * recurse all the way down to size-1 subarrays.)
164 */
165#define STACK_SIZE (8*sizeof(size_t))
166
167/* Different situations have slightly different requirements,
168 * and we make life epsilon easier by using different truncation
169 * points for the three different cases.
170 * So far, I have tuned TRUNC_words and guessed that the same
171 * value might work well for the other two cases. Of course
172 * what works well on my machine might work badly on yours.
173 */
174#define TRUNC_nonaligned 12
175#define TRUNC_aligned 12
176#define TRUNC_words 12*WORD_BYTES /* nb different meaning */
177
178/* We use a simple pivoting algorithm for shortish sub-arrays
179 * and a more complicated one for larger ones. The threshold
180 * is PIVOT_THRESHOLD.
181 */
182#define PIVOT_THRESHOLD 40
183
184typedef struct { char * first; char * last; } stack_entry;
185#define pushLeft {stack[stacktop].first=ffirst;stack[stacktop++].last=last;}
186#define pushRight {stack[stacktop].first=first;stack[stacktop++].last=llast;}
187#define doLeft {first=ffirst;llast=last;continue;}
188#define doRight {ffirst=first;last=llast;continue;}
189#define pop {if (--stacktop<0) break;\
190 first=ffirst=stack[stacktop].first;\
191 last=llast=stack[stacktop].last;\
192 continue;}
193
194/* Some comments on the implementation.
195 * 1. When we finish partitioning the array into "low"
196 * and "high", we forget entirely about short subarrays,
197 * because they'll be done later by insertion sort.
198 * Doing lots of little insertion sorts might be a win
199 * on large datasets for locality-of-reference reasons,
200 * but it makes the code much nastier and increases
201 * bookkeeping overhead.
202 * 2. We always save the longer and get to work on the
203 * shorter. This guarantees that whenever we push
204 * a k'th entry onto the stack we are about to get
205 * working on something of size <= N/2^k where N is
206 * the original array size; so the stack can't need
207 * more than log_2(max-array-size) entries.
208 * 3. We choose a pivot by looking at the first, last
209 * and middle elements. We arrange them into order
210 * because it's easy to do that in conjunction with
211 * choosing the pivot, and it makes things a little
212 * easier in the partitioning step. Anyway, the pivot
213 * is the middle of these three. It's still possible
214 * to construct datasets where the algorithm takes
215 * time of order n^2, but it simply never happens in
216 * practice.
217 * 3' Newsflash: On further investigation I find that
218 * it's easy to construct datasets where median-of-3
219 * simply isn't good enough. So on large-ish subarrays
220 * we do a more sophisticated pivoting: we take three
221 * sets of 3 elements, find their medians, and then
222 * take the median of those.
223 * 4. We copy the pivot element to a separate place
224 * because that way we can always do our comparisons
225 * directly against a pointer to that separate place,
226 * and don't have to wonder "did we move the pivot
227 * element?". This makes the inner loop better.
228 * 5. It's possible to make the pivoting even more
229 * reliable by looking at more candidates when n
230 * is larger. (Taking this to its logical conclusion
231 * results in a variant of quicksort that doesn't
232 * have that n^2 worst case.) However, the overhead
233 * from the extra bookkeeping means that it's just
234 * not worth while.
235 * 6. This is pretty clean and portable code. Here are
236 * all the potential portability pitfalls and problems
237 * I know of:
238 * - In one place (the insertion sort) I construct
239 * a pointer that points just past the end of the
240 * supplied array, and assume that (a) it won't
241 * compare equal to any pointer within the array,
242 * and (b) it will compare equal to a pointer
243 * obtained by stepping off the end of the array.
244 * These might fail on some segmented architectures.
245 * - I assume that there are 8 bits in a |char| when
246 * computing the size of stack needed. This would
247 * fail on machines with 9-bit or 16-bit bytes.
248 * - I assume that if |((int)base&(sizeof(int)-1))==0|
249 * and |(size&(sizeof(int)-1))==0| then it's safe to
250 * get at array elements via |int*|s, and that if
251 * actually |size==sizeof(int)| as well then it's
252 * safe to treat the elements as |int|s. This might
253 * fail on systems that convert pointers to integers
254 * in non-standard ways.
255 * - I assume that |8*sizeof(size_t)<=INT_MAX|. This
256 * would be false on a machine with 8-bit |char|s,
257 * 16-bit |int|s and 4096-bit |size_t|s. :-)
258 */
259
260/* The recursion logic is the same in each case.
261 * We keep chopping up until we reach subarrays of size
262 * strictly less than Trunc; we leave these unsorted. */
263#define Recurse(Trunc) \
264 { size_t l=last-ffirst,r=llast-first; \
265 if (l<Trunc) { \
266 if (r>=Trunc) doRight \
267 else pop \
268 } \
269 else if (l<=r) { pushRight; doLeft } \
270 else if (r>=Trunc) { pushLeft; doRight }\
271 else doLeft \
272 }
273
274/* and so is the pivoting logic (note: last is inclusive): */
275#define Pivot(swapper,sz) \
276 if ((size_t)(last-first)>PIVOT_THRESHOLD*sz) mid=pivot_big(first,mid,last,sz,compare,userdata);\
277 else { \
278 if (compare(userdata,first,mid)<0) { \
279 if (compare(userdata,mid,last)>0) { \
280 swapper(mid,last); \
281 if (compare(userdata,first,mid)>0) swapper(first,mid);\
282 } \
283 } \
284 else { \
285 if (compare(userdata,mid,last)>0) swapper(first,last)\
286 else { \
287 swapper(first,mid); \
288 if (compare(userdata,mid,last)>0) swapper(mid,last);\
289 } \
290 } \
291 first+=sz; last-=sz; \
292 }
293
294#ifdef DEBUG_QSORT
295#include <stdio.h>
296#endif
297
298/* and so is the partitioning logic: */
299#define Partition(swapper,sz) { \
300 do { \
301 while (compare(userdata,first,pivot)<0) first+=sz; \
302 while (compare(userdata,pivot,last)<0) last-=sz; \
303 if (first<last) { \
304 swapper(first,last); \
305 first+=sz; last-=sz; } \
306 else if (first==last) { first+=sz; last-=sz; break; }\
307 } while (first<=last); \
308}
309
310/* and so is the pre-insertion-sort operation of putting
311 * the smallest element into place as a sentinel.
312 * Doing this makes the inner loop nicer. I got this
313 * idea from the GNU implementation of qsort().
314 * We find the smallest element from the first |nmemb|,
315 * or the first |limit|, whichever is smaller;
316 * therefore we must have ensured that the globally smallest
317 * element is in the first |limit| (because our
318 * quicksort recursion bottoms out only once we
319 * reach subarrays smaller than |limit|).
320 */
321#define PreInsertion(swapper,limit,sz) \
322 first=base; \
323 last=first + ((nmemb>limit ? limit : nmemb)-1)*sz;\
324 while (last!=base) { \
325 if (compare(userdata,first,last)>0) first=last; \
326 last-=sz; } \
327 if (first!=base) swapper(first,(char*)base);
328
329/* and so is the insertion sort, in the first two cases: */
330#define Insertion(swapper) \
331 last=((char*)base)+nmemb*size; \
332 for (first=((char*)base)+size;first!=last;first+=size) { \
333 char *test; \
334 /* Find the right place for |first|. \
335 * My apologies for var reuse. */ \
336 for (test=first-size;compare(userdata,test,first)>0;test-=size) ; \
337 test+=size; \
338 if (test!=first) { \
339 /* Shift everything in [test,first) \
340 * up by one, and place |first| \
341 * where |test| is. */ \
342 memcpy(pivot,first,size); \
343 memmove(test+size,test,first-test); \
344 memcpy(test,pivot,size); \
345 } \
346 }
347
348#define SWAP_nonaligned(a,b) { \
349 register char *aa=(a),*bb=(b); \
350 register size_t sz=size; \
351 do { register char t=*aa; *aa++=*bb; *bb++=t; } while (--sz); }
352
353#define SWAP_aligned(a,b) { \
354 register int *aa=(int*)(a),*bb=(int*)(b); \
355 register size_t sz=size; \
356 do { register int t=*aa;*aa++=*bb; *bb++=t; } while (sz-=WORD_BYTES); }
357
358#define SWAP_words(a,b) { \
359 register int t=*((int*)a); *((int*)a)=*((int*)b); *((int*)b)=t; }
360
361/* ---------------------------------------------------------------------- */
362
363static char * pivot_big(char *first, char *mid, char *last, size_t size,
364 int (SDLCALL *compare)(void *, const void *, const void *), void *userdata) {
365 size_t d=(((last-first)/size)>>3)*size;
366#ifdef DEBUG_QSORT
367fprintf(stderr, "pivot_big: first=%p last=%p size=%lu n=%lu\n", first, (unsigned long)last, size, (unsigned long)((last-first+1)/size));
368#endif
369 char *m1,*m2,*m3;
370 { char *a=first, *b=first+d, *c=first+2*d;
371#ifdef DEBUG_QSORT
372fprintf(stderr,"< %d %d %d @ %p %p %p\n",*(int*)a,*(int*)b,*(int*)c, a,b,c);
373#endif
374 m1 = compare(userdata,a,b)<0 ?
375 (compare(userdata,b,c)<0 ? b : (compare(userdata,a,c)<0 ? c : a))
376 : (compare(userdata,a,c)<0 ? a : (compare(userdata,b,c)<0 ? c : b));
377 }
378 { char *a=mid-d, *b=mid, *c=mid+d;
379#ifdef DEBUG_QSORT
380fprintf(stderr,". %d %d %d @ %p %p %p\n",*(int*)a,*(int*)b,*(int*)c, a,b,c);
381#endif
382 m2 = compare(userdata,a,b)<0 ?
383 (compare(userdata,b,c)<0 ? b : (compare(userdata,a,c)<0 ? c : a))
384 : (compare(userdata,a,c)<0 ? a : (compare(userdata,b,c)<0 ? c : b));
385 }
386 { char *a=last-2*d, *b=last-d, *c=last;
387#ifdef DEBUG_QSORT
388fprintf(stderr,"> %d %d %d @ %p %p %p\n",*(int*)a,*(int*)b,*(int*)c, a,b,c);
389#endif
390 m3 = compare(userdata,a,b)<0 ?
391 (compare(userdata,b,c)<0 ? b : (compare(userdata,a,c)<0 ? c : a))
392 : (compare(userdata,a,c)<0 ? a : (compare(userdata,b,c)<0 ? c : b));
393 }
394#ifdef DEBUG_QSORT
395fprintf(stderr,"-> %d %d %d @ %p %p %p\n",*(int*)m1,*(int*)m2,*(int*)m3, m1,m2,m3);
396#endif
397 return compare(userdata,m1,m2)<0 ?
398 (compare(userdata,m2,m3)<0 ? m2 : (compare(userdata,m1,m3)<0 ? m3 : m1))
399 : (compare(userdata,m1,m3)<0 ? m1 : (compare(userdata,m2,m3)<0 ? m3 : m2));
400}
401
402/* ---------------------------------------------------------------------- */
403
404static void qsort_r_nonaligned(void *base, size_t nmemb, size_t size,
405 int (SDLCALL *compare)(void *, const void *, const void *), void *userdata) {
406
407 stack_entry stack[STACK_SIZE];
408 int stacktop=0;
409 char *first,*last;
410 char *pivot=malloc(size);
411 size_t trunc=TRUNC_nonaligned*size;
412 assert(pivot != NULL);
413
414 first=(char*)base; last=first+(nmemb-1)*size;
415
416 if ((size_t)(last-first)>=trunc) {
417 char *ffirst=first, *llast=last;
418 while (1) {
419 /* Select pivot */
420 { char * mid=first+size*((last-first)/size >> 1);
421 Pivot(SWAP_nonaligned,size);
422 memcpy(pivot,mid,size);
423 }
424 /* Partition. */
425 Partition(SWAP_nonaligned,size);
426 /* Prepare to recurse/iterate. */
427 Recurse(trunc)
428 }
429 }
430 PreInsertion(SWAP_nonaligned,TRUNC_nonaligned,size);
431 Insertion(SWAP_nonaligned);
432 free(pivot);
433}
434
435static void qsort_r_aligned(void *base, size_t nmemb, size_t size,
436 int (SDLCALL *compare)(void *,const void *, const void *), void *userdata) {
437
438 stack_entry stack[STACK_SIZE];
439 int stacktop=0;
440 char *first,*last;
441 char *pivot=malloc(size);
442 size_t trunc=TRUNC_aligned*size;
443 assert(pivot != NULL);
444
445 first=(char*)base; last=first+(nmemb-1)*size;
446
447 if ((size_t)(last-first)>=trunc) {
448 char *ffirst=first,*llast=last;
449 while (1) {
450 /* Select pivot */
451 { char * mid=first+size*((last-first)/size >> 1);
452 Pivot(SWAP_aligned,size);
453 memcpy(pivot,mid,size);
454 }
455 /* Partition. */
456 Partition(SWAP_aligned,size);
457 /* Prepare to recurse/iterate. */
458 Recurse(trunc)
459 }
460 }
461 PreInsertion(SWAP_aligned,TRUNC_aligned,size);
462 Insertion(SWAP_aligned);
463 free(pivot);
464}
465
466static void qsort_r_words(void *base, size_t nmemb,
467 int (SDLCALL *compare)(void *,const void *, const void *), void *userdata) {
468
469 stack_entry stack[STACK_SIZE];
470 int stacktop=0;
471 char *first,*last;
472 char *pivot=malloc(WORD_BYTES);
473 assert(pivot != NULL);
474
475 first=(char*)base; last=first+(nmemb-1)*WORD_BYTES;
476
477 if (last-first>=TRUNC_words) {
478 char *ffirst=first, *llast=last;
479 while (1) {
480#ifdef DEBUG_QSORT
481fprintf(stderr,"Doing %d:%d: ",
482 (first-(char*)base)/WORD_BYTES,
483 (last-(char*)base)/WORD_BYTES);
484#endif
485 /* Select pivot */
486 { char * mid=first+WORD_BYTES*((last-first) / (2*WORD_BYTES));
487 Pivot(SWAP_words,WORD_BYTES);
488 *(int*)pivot=*(int*)mid;
489#ifdef DEBUG_QSORT
490fprintf(stderr,"pivot = %p = #%lu = %d\n", mid, (unsigned long)(((int*)mid)-((int*)base)), *(int*)mid);
491#endif
492 }
493 /* Partition. */
494 Partition(SWAP_words,WORD_BYTES);
495#ifdef DEBUG_QSORT
496fprintf(stderr, "after partitioning first=#%lu last=#%lu\n", (first-(char*)base)/4lu, (last-(char*)base)/4lu);
497#endif
498 /* Prepare to recurse/iterate. */
499 Recurse(TRUNC_words)
500 }
501 }
502 PreInsertion(SWAP_words,TRUNC_words/WORD_BYTES,WORD_BYTES);
503 /* Now do insertion sort. */
504 last=((char*)base)+nmemb*WORD_BYTES;
505 for (first=((char*)base)+WORD_BYTES;first!=last;first+=WORD_BYTES) {
506 /* Find the right place for |first|. My apologies for var reuse */
507 int *pl=(int*)(first-WORD_BYTES),*pr=(int*)first;
508 *(int*)pivot=*(int*)first;
509 for (;compare(userdata,pl,pivot)>0;pr=pl,--pl) {
510 *pr=*pl; }
511 if (pr!=(int*)first) *pr=*(int*)pivot;
512 }
513 free(pivot);
514}
515
516/* ---------------------------------------------------------------------- */
517
518void SDL_qsort_r(void *base, size_t nmemb, size_t size,
519 SDL_CompareCallback_r compare, void *userdata) {
520
521 if (nmemb<=1) return;
522 if (((uintptr_t)base|size)&(WORD_BYTES-1))
523 qsort_r_nonaligned(base,nmemb,size,compare,userdata);
524 else if (size!=WORD_BYTES)
525 qsort_r_aligned(base,nmemb,size,compare,userdata);
526 else
527 qsort_r_words(base,nmemb,compare,userdata);
528}
529
530static int SDLCALL qsort_non_r_bridge(void *userdata, const void *a, const void *b)
531{
532 int (SDLCALL *compare)(const void *, const void *) = (int (SDLCALL *)(const void *, const void *)) userdata;
533 return compare(a, b);
534}
535
536void SDL_qsort(void *base, size_t nmemb, size_t size, SDL_CompareCallback compare)
537{
538 SDL_qsort_r(base, nmemb, size, qsort_non_r_bridge, compare);
539}
540
541// Don't use the C runtime for such a simple function, since we want to allow SDLCALL callbacks and userdata.
542// SDL's replacement: Taken from the Public Domain C Library (PDCLib):
543// Permission is granted to use, modify, and / or redistribute at will.
544void *SDL_bsearch_r(const void *key, const void *base, size_t nmemb, size_t size, SDL_CompareCallback_r compare, void *userdata)
545{
546 const void *pivot;
547 size_t corr;
548 int rc;
549
550 while (nmemb) {
551 /* algorithm needs -1 correction if remaining elements are an even number. */
552 corr = nmemb % 2;
553 nmemb /= 2;
554 pivot = (const char *)base + (nmemb * size);
555 rc = compare(userdata, key, pivot);
556
557 if (rc > 0) {
558 base = (const char *)pivot + size;
559 /* applying correction */
560 nmemb -= (1 - corr);
561 } else if (rc == 0) {
562 return (void *)pivot;
563 }
564 }
565
566 return NULL;
567}
568
569void *SDL_bsearch(const void *key, const void *base, size_t nmemb, size_t size, SDL_CompareCallback compare)
570{
571 // qsort_non_r_bridge just happens to match calling conventions, so reuse it.
572 return SDL_bsearch_r(key, base, nmemb, size, qsort_non_r_bridge, compare);
573}
574
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_random.c b/contrib/SDL-3.2.8/src/stdlib/SDL_random.c
new file mode 100644
index 0000000..a774d30
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_random.c
@@ -0,0 +1,115 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23// This file contains portable random functions for SDL
24
25static Uint64 SDL_rand_state;
26static bool SDL_rand_initialized = false;
27
28void SDL_srand(Uint64 seed)
29{
30 if (!seed) {
31 seed = SDL_GetPerformanceCounter();
32 }
33 SDL_rand_state = seed;
34 SDL_rand_initialized = true;
35}
36
37Sint32 SDL_rand(Sint32 n)
38{
39 if (!SDL_rand_initialized) {
40 SDL_srand(0);
41 }
42
43 return SDL_rand_r(&SDL_rand_state, n);
44}
45
46float SDL_randf(void)
47{
48 if (!SDL_rand_initialized) {
49 SDL_srand(0);
50 }
51
52 return SDL_randf_r(&SDL_rand_state);
53}
54
55Uint32 SDL_rand_bits(void)
56{
57 if (!SDL_rand_initialized) {
58 SDL_srand(0);
59 }
60
61 return SDL_rand_bits_r(&SDL_rand_state);
62}
63
64Uint32 SDL_rand_bits_r(Uint64 *state)
65{
66 if (!state) {
67 return 0;
68 }
69
70 // The C and A parameters of this LCG have been chosen based on hundreds
71 // of core-hours of testing with PractRand and TestU01's Crush.
72 // Using a 32-bit A improves performance on 32-bit architectures.
73 // C can be any odd number, but < 256 generates smaller code on ARM32
74 // These values perform as well as a full 64-bit implementation against
75 // Crush and PractRand. Plus, their worst-case performance is better
76 // than common 64-bit constants when tested against PractRand using seeds
77 // with only a single bit set.
78
79 // We tested all 32-bit and 33-bit A with all C < 256 from a v2 of:
80 // Steele GL, Vigna S. Computationally easy, spectrally good multipliers
81 // for congruential pseudorandom number generators.
82 // Softw Pract Exper. 2022;52(2):443-458. doi: 10.1002/spe.3030
83 // https://arxiv.org/abs/2001.05304v2
84
85 *state = *state * 0xff1cd035ul + 0x05;
86
87 // Only return top 32 bits because they have a longer period
88 return (Uint32)(*state >> 32);
89}
90
91Sint32 SDL_rand_r(Uint64 *state, Sint32 n)
92{
93 // Algorithm: get 32 bits from SDL_rand_bits() and treat it as a 0.32 bit
94 // fixed point number. Multiply by the 31.0 bit n to get a 31.32 bit
95 // result. Shift right by 32 to get the 31 bit integer that we want.
96
97 if (n < 0) {
98 // The algorithm looks like it works for numbers < 0 but it has an
99 // infinitesimal chance of returning a value out of range.
100 // Returning -SDL_rand(abs(n)) blows up at INT_MIN instead.
101 // It's easier to just say no.
102 return 0;
103 }
104
105 // On 32-bit arch, the compiler will optimize to a single 32-bit multiply
106 Uint64 val = (Uint64)SDL_rand_bits_r(state) * n;
107 return (Sint32)(val >> 32);
108}
109
110float SDL_randf_r(Uint64 *state)
111{
112 // Note: its using 24 bits because float has 23 bits significand + 1 implicit bit
113 return (SDL_rand_bits_r(state) >> (32 - 24)) * 0x1p-24f;
114}
115
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_stdlib.c b/contrib/SDL-3.2.8/src/stdlib/SDL_stdlib.c
new file mode 100644
index 0000000..98faab9
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_stdlib.c
@@ -0,0 +1,567 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23// This file contains portable stdlib functions for SDL
24
25#include "../libm/math_libm.h"
26
27double SDL_atan(double x)
28{
29#ifdef HAVE_ATAN
30 return atan(x);
31#else
32 return SDL_uclibc_atan(x);
33#endif
34}
35
36float SDL_atanf(float x)
37{
38#ifdef HAVE_ATANF
39 return atanf(x);
40#else
41 return (float)SDL_atan((double)x);
42#endif
43}
44
45double SDL_atan2(double y, double x)
46{
47#ifdef HAVE_ATAN2
48 return atan2(y, x);
49#else
50 return SDL_uclibc_atan2(y, x);
51#endif
52}
53
54float SDL_atan2f(float y, float x)
55{
56#ifdef HAVE_ATAN2F
57 return atan2f(y, x);
58#else
59 return (float)SDL_atan2((double)y, (double)x);
60#endif
61}
62
63double SDL_acos(double val)
64{
65#ifdef HAVE_ACOS
66 return acos(val);
67#else
68 double result;
69 if (val == -1.0) {
70 result = SDL_PI_D;
71 } else {
72 result = SDL_atan(SDL_sqrt(1.0 - val * val) / val);
73 if (result < 0.0) {
74 result += SDL_PI_D;
75 }
76 }
77 return result;
78#endif
79}
80
81float SDL_acosf(float val)
82{
83#ifdef HAVE_ACOSF
84 return acosf(val);
85#else
86 return (float)SDL_acos((double)val);
87#endif
88}
89
90double SDL_asin(double val)
91{
92#ifdef HAVE_ASIN
93 return asin(val);
94#else
95 double result;
96 if (val == -1.0) {
97 result = -(SDL_PI_D / 2.0);
98 } else {
99 result = (SDL_PI_D / 2.0) - SDL_acos(val);
100 }
101 return result;
102#endif
103}
104
105float SDL_asinf(float val)
106{
107#ifdef HAVE_ASINF
108 return asinf(val);
109#else
110 return (float)SDL_asin((double)val);
111#endif
112}
113
114double SDL_ceil(double x)
115{
116#ifdef HAVE_CEIL
117 return ceil(x);
118#else
119 double integer = SDL_floor(x);
120 double fraction = x - integer;
121 if (fraction > 0.0) {
122 integer += 1.0;
123 }
124 return integer;
125#endif // HAVE_CEIL
126}
127
128float SDL_ceilf(float x)
129{
130#ifdef HAVE_CEILF
131 return ceilf(x);
132#else
133 return (float)SDL_ceil((double)x);
134#endif
135}
136
137double SDL_copysign(double x, double y)
138{
139#ifdef HAVE_COPYSIGN
140 return copysign(x, y);
141#elif defined(HAVE__COPYSIGN)
142 return _copysign(x, y);
143#elif defined(__WATCOMC__) && defined(__386__)
144 // this is nasty as hell, but it works..
145 unsigned int *xi = (unsigned int *)&x,
146 *yi = (unsigned int *)&y;
147 xi[1] = (yi[1] & 0x80000000) | (xi[1] & 0x7fffffff);
148 return x;
149#else
150 return SDL_uclibc_copysign(x, y);
151#endif // HAVE_COPYSIGN
152}
153
154float SDL_copysignf(float x, float y)
155{
156#ifdef HAVE_COPYSIGNF
157 return copysignf(x, y);
158#else
159 return (float)SDL_copysign((double)x, (double)y);
160#endif
161}
162
163double SDL_cos(double x)
164{
165#ifdef HAVE_COS
166 return cos(x);
167#else
168 return SDL_uclibc_cos(x);
169#endif
170}
171
172float SDL_cosf(float x)
173{
174#ifdef HAVE_COSF
175 return cosf(x);
176#else
177 return (float)SDL_cos((double)x);
178#endif
179}
180
181double SDL_exp(double x)
182{
183#ifdef HAVE_EXP
184 return exp(x);
185#else
186 return SDL_uclibc_exp(x);
187#endif
188}
189
190float SDL_expf(float x)
191{
192#ifdef HAVE_EXPF
193 return expf(x);
194#else
195 return (float)SDL_exp((double)x);
196#endif
197}
198
199double SDL_fabs(double x)
200{
201#ifdef HAVE_FABS
202 return fabs(x);
203#else
204 return SDL_uclibc_fabs(x);
205#endif
206}
207
208float SDL_fabsf(float x)
209{
210#ifdef HAVE_FABSF
211 return fabsf(x);
212#else
213 return (float)SDL_fabs((double)x);
214#endif
215}
216
217double SDL_floor(double x)
218{
219#ifdef HAVE_FLOOR
220 return floor(x);
221#else
222 return SDL_uclibc_floor(x);
223#endif
224}
225
226float SDL_floorf(float x)
227{
228#ifdef HAVE_FLOORF
229 return floorf(x);
230#else
231 return (float)SDL_floor((double)x);
232#endif
233}
234
235double SDL_trunc(double x)
236{
237#ifdef HAVE_TRUNC
238 return trunc(x);
239#else
240 if (x >= 0.0f) {
241 return SDL_floor(x);
242 } else {
243 return SDL_ceil(x);
244 }
245#endif
246}
247
248float SDL_truncf(float x)
249{
250#ifdef HAVE_TRUNCF
251 return truncf(x);
252#else
253 return (float)SDL_trunc((double)x);
254#endif
255}
256
257double SDL_fmod(double x, double y)
258{
259#ifdef HAVE_FMOD
260 return fmod(x, y);
261#else
262 return SDL_uclibc_fmod(x, y);
263#endif
264}
265
266float SDL_fmodf(float x, float y)
267{
268#ifdef HAVE_FMODF
269 return fmodf(x, y);
270#else
271 return (float)SDL_fmod((double)x, (double)y);
272#endif
273}
274
275int SDL_isinf(double x)
276{
277#ifdef HAVE_ISINF
278 return isinf(x);
279#else
280 return SDL_uclibc_isinf(x);
281#endif
282}
283
284int SDL_isinff(float x)
285{
286#ifdef HAVE_ISINF_FLOAT_MACRO
287 return isinf(x);
288#elif defined(HAVE_ISINFF)
289 return isinff(x);
290#else
291 return SDL_uclibc_isinff(x);
292#endif
293}
294
295int SDL_isnan(double x)
296{
297#ifdef HAVE_ISNAN
298 return isnan(x);
299#else
300 return SDL_uclibc_isnan(x);
301#endif
302}
303
304int SDL_isnanf(float x)
305{
306#ifdef HAVE_ISNAN_FLOAT_MACRO
307 return isnan(x);
308#elif defined(HAVE_ISNANF)
309 return isnanf(x);
310#else
311 return SDL_uclibc_isnanf(x);
312#endif
313}
314
315double SDL_log(double x)
316{
317#ifdef HAVE_LOG
318 return log(x);
319#else
320 return SDL_uclibc_log(x);
321#endif
322}
323
324float SDL_logf(float x)
325{
326#ifdef HAVE_LOGF
327 return logf(x);
328#else
329 return (float)SDL_log((double)x);
330#endif
331}
332
333double SDL_log10(double x)
334{
335#ifdef HAVE_LOG10
336 return log10(x);
337#else
338 return SDL_uclibc_log10(x);
339#endif
340}
341
342float SDL_log10f(float x)
343{
344#ifdef HAVE_LOG10F
345 return log10f(x);
346#else
347 return (float)SDL_log10((double)x);
348#endif
349}
350
351double SDL_modf(double x, double *y)
352{
353#ifdef HAVE_MODF
354 return modf(x, y);
355#else
356 return SDL_uclibc_modf(x, y);
357#endif
358}
359
360float SDL_modff(float x, float *y)
361{
362#ifdef HAVE_MODFF
363 return modff(x, y);
364#else
365 double double_result, double_y;
366 double_result = SDL_modf((double)x, &double_y);
367 *y = (float)double_y;
368 return (float)double_result;
369#endif
370}
371
372double SDL_pow(double x, double y)
373{
374#ifdef HAVE_POW
375 return pow(x, y);
376#else
377 return SDL_uclibc_pow(x, y);
378#endif
379}
380
381float SDL_powf(float x, float y)
382{
383#ifdef HAVE_POWF
384 return powf(x, y);
385#else
386 return (float)SDL_pow((double)x, (double)y);
387#endif
388}
389
390double SDL_round(double arg)
391{
392#if defined HAVE_ROUND
393 return round(arg);
394#else
395 if (arg >= 0.0) {
396 return SDL_floor(arg + 0.5);
397 } else {
398 return SDL_ceil(arg - 0.5);
399 }
400#endif
401}
402
403float SDL_roundf(float arg)
404{
405#if defined HAVE_ROUNDF
406 return roundf(arg);
407#else
408 return (float)SDL_round((double)arg);
409#endif
410}
411
412long SDL_lround(double arg)
413{
414#if defined HAVE_LROUND
415 return lround(arg);
416#else
417 return (long)SDL_round(arg);
418#endif
419}
420
421long SDL_lroundf(float arg)
422{
423#if defined HAVE_LROUNDF
424 return lroundf(arg);
425#else
426 return (long)SDL_round((double)arg);
427#endif
428}
429
430double SDL_scalbn(double x, int n)
431{
432#ifdef HAVE_SCALBN
433 return scalbn(x, n);
434#elif defined(HAVE__SCALB)
435 return _scalb(x, n);
436#elif defined(HAVE_LIBC) && defined(HAVE_FLOAT_H) && (FLT_RADIX == 2)
437 /* from scalbn(3): If FLT_RADIX equals 2 (which is
438 * usual), then scalbn() is equivalent to ldexp(3). */
439 return ldexp(x, n);
440#else
441 return SDL_uclibc_scalbn(x, n);
442#endif
443}
444
445float SDL_scalbnf(float x, int n)
446{
447#ifdef HAVE_SCALBNF
448 return scalbnf(x, n);
449#else
450 return (float)SDL_scalbn((double)x, n);
451#endif
452}
453
454double SDL_sin(double x)
455{
456#ifdef HAVE_SIN
457 return sin(x);
458#else
459 return SDL_uclibc_sin(x);
460#endif
461}
462
463float SDL_sinf(float x)
464{
465#ifdef HAVE_SINF
466 return sinf(x);
467#else
468 return (float)SDL_sin((double)x);
469#endif
470}
471
472double SDL_sqrt(double x)
473{
474#ifdef HAVE_SQRT
475 return sqrt(x);
476#else
477 return SDL_uclibc_sqrt(x);
478#endif
479}
480
481float SDL_sqrtf(float x)
482{
483#ifdef HAVE_SQRTF
484 return sqrtf(x);
485#else
486 return (float)SDL_sqrt((double)x);
487#endif
488}
489
490double SDL_tan(double x)
491{
492#ifdef HAVE_TAN
493 return tan(x);
494#else
495 return SDL_uclibc_tan(x);
496#endif
497}
498
499float SDL_tanf(float x)
500{
501#ifdef HAVE_TANF
502 return tanf(x);
503#else
504 return (float)SDL_tan((double)x);
505#endif
506}
507
508int SDL_abs(int x)
509{
510#ifdef HAVE_ABS
511 return abs(x);
512#else
513 return (x < 0) ? -x : x;
514#endif
515}
516
517int SDL_isalpha(int x) { return (SDL_isupper(x)) || (SDL_islower(x)); }
518int SDL_isalnum(int x) { return (SDL_isalpha(x)) || (SDL_isdigit(x)); }
519int SDL_isdigit(int x) { return ((x) >= '0') && ((x) <= '9'); }
520int SDL_isxdigit(int x) { return (((x) >= 'A') && ((x) <= 'F')) || (((x) >= 'a') && ((x) <= 'f')) || (SDL_isdigit(x)); }
521int SDL_ispunct(int x) { return (SDL_isgraph(x)) && (!SDL_isalnum(x)); }
522int SDL_isspace(int x) { return ((x) == ' ') || ((x) == '\t') || ((x) == '\r') || ((x) == '\n') || ((x) == '\f') || ((x) == '\v'); }
523int SDL_isupper(int x) { return ((x) >= 'A') && ((x) <= 'Z'); }
524int SDL_islower(int x) { return ((x) >= 'a') && ((x) <= 'z'); }
525int SDL_isprint(int x) { return ((x) >= ' ') && ((x) < '\x7f'); }
526int SDL_isgraph(int x) { return (SDL_isprint(x)) && ((x) != ' '); }
527int SDL_iscntrl(int x) { return (((x) >= '\0') && ((x) <= '\x1f')) || ((x) == '\x7f'); }
528int SDL_toupper(int x) { return ((x) >= 'a') && ((x) <= 'z') ? ('A' + ((x) - 'a')) : (x); }
529int SDL_tolower(int x) { return ((x) >= 'A') && ((x) <= 'Z') ? ('a' + ((x) - 'A')) : (x); }
530int SDL_isblank(int x) { return ((x) == ' ') || ((x) == '\t'); }
531
532void *SDL_aligned_alloc(size_t alignment, size_t size)
533{
534 size_t padding;
535 Uint8 *result = NULL;
536
537 if (alignment < sizeof(void*)) {
538 alignment = sizeof(void*);
539 }
540 padding = (alignment - (size % alignment));
541
542 if (SDL_size_add_check_overflow(size, alignment, &size) &&
543 SDL_size_add_check_overflow(size, sizeof(void *), &size) &&
544 SDL_size_add_check_overflow(size, padding, &size)) {
545 void *original = SDL_malloc(size);
546 if (original) {
547 // Make sure we have enough space to store the original pointer
548 result = (Uint8 *)original + sizeof(original);
549
550 // Align the pointer we're going to return
551 result += alignment - (((size_t)result) % alignment);
552
553 // Store the original pointer right before the returned value
554 SDL_memcpy(result - sizeof(original), &original, sizeof(original));
555 }
556 }
557 return result;
558}
559
560void SDL_aligned_free(void *mem)
561{
562 if (mem) {
563 void *original;
564 SDL_memcpy(&original, ((Uint8 *)mem - sizeof(original)), sizeof(original));
565 SDL_free(original);
566 }
567}
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_string.c b/contrib/SDL-3.2.8/src/stdlib/SDL_string.c
new file mode 100644
index 0000000..007719e
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_string.c
@@ -0,0 +1,2515 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23// This file contains portable string manipulation functions for SDL
24
25#include "SDL_vacopy.h"
26
27#ifdef SDL_PLATFORM_VITA
28#include <psp2/kernel/clib.h>
29#endif
30
31#include "SDL_sysstdlib.h"
32
33#include "SDL_casefolding.h"
34
35#if defined(__SIZEOF_WCHAR_T__)
36#define SDL_SIZEOF_WCHAR_T __SIZEOF_WCHAR_T__
37#elif defined(SDL_PLATFORM_WINDOWS)
38#define SDL_SIZEOF_WCHAR_T 2
39#else // assume everything else is UTF-32 (add more tests if compiler-assert fails below!)
40#define SDL_SIZEOF_WCHAR_T 4
41#endif
42SDL_COMPILE_TIME_ASSERT(sizeof_wchar_t, sizeof(wchar_t) == SDL_SIZEOF_WCHAR_T);
43
44
45char *SDL_UCS4ToUTF8(Uint32 codepoint, char *dst)
46{
47 if (!dst) {
48 return NULL; // I guess...?
49 } else if (codepoint > 0x10FFFF) { // Outside the range of Unicode codepoints (also, larger than can be encoded in 4 bytes of UTF-8!).
50 codepoint = SDL_INVALID_UNICODE_CODEPOINT;
51 } else if ((codepoint >= 0xD800) && (codepoint <= 0xDFFF)) { // UTF-16 surrogate values are illegal in UTF-8.
52 codepoint = SDL_INVALID_UNICODE_CODEPOINT;
53 }
54
55 Uint8 *p = (Uint8 *)dst;
56 if (codepoint <= 0x7F) {
57 *p = (Uint8)codepoint;
58 ++dst;
59 } else if (codepoint <= 0x7FF) {
60 p[0] = 0xC0 | (Uint8)((codepoint >> 6) & 0x1F);
61 p[1] = 0x80 | (Uint8)(codepoint & 0x3F);
62 dst += 2;
63 } else if (codepoint <= 0xFFFF) {
64 p[0] = 0xE0 | (Uint8)((codepoint >> 12) & 0x0F);
65 p[1] = 0x80 | (Uint8)((codepoint >> 6) & 0x3F);
66 p[2] = 0x80 | (Uint8)(codepoint & 0x3F);
67 dst += 3;
68 } else {
69 SDL_assert(codepoint <= 0x10FFFF);
70 p[0] = 0xF0 | (Uint8)((codepoint >> 18) & 0x07);
71 p[1] = 0x80 | (Uint8)((codepoint >> 12) & 0x3F);
72 p[2] = 0x80 | (Uint8)((codepoint >> 6) & 0x3F);
73 p[3] = 0x80 | (Uint8)(codepoint & 0x3F);
74 dst += 4;
75 }
76
77 return dst;
78}
79
80
81// this expects `from` and `to` to be UTF-32 encoding!
82int SDL_CaseFoldUnicode(Uint32 from, Uint32 *to)
83{
84 // !!! FIXME: since the hashtable is static, maybe we should binary
85 // !!! FIXME: search it instead of walking the whole bucket.
86
87 if (from < 128) { // low-ASCII, easy!
88 if ((from >= 'A') && (from <= 'Z')) {
89 *to = 'a' + (from - 'A');
90 return 1;
91 }
92 } else if (from <= 0xFFFF) { // the Basic Multilingual Plane.
93 const Uint8 hash = ((from ^ (from >> 8)) & 0xFF);
94 const Uint16 from16 = (Uint16) from;
95
96 // see if it maps to a single char (most common)...
97 {
98 const CaseFoldHashBucket1_16 *bucket = &case_fold_hash1_16[hash];
99 const int count = (int) bucket->count;
100 for (int i = 0; i < count; i++) {
101 const CaseFoldMapping1_16 *mapping = &bucket->list[i];
102 if (mapping->from == from16) {
103 *to = mapping->to0;
104 return 1;
105 }
106 }
107 }
108
109 // see if it folds down to two chars...
110 {
111 const CaseFoldHashBucket2_16 *bucket = &case_fold_hash2_16[hash & 15];
112 const int count = (int) bucket->count;
113 for (int i = 0; i < count; i++) {
114 const CaseFoldMapping2_16 *mapping = &bucket->list[i];
115 if (mapping->from == from16) {
116 to[0] = mapping->to0;
117 to[1] = mapping->to1;
118 return 2;
119 }
120 }
121 }
122
123 // okay, maybe it's _three_ characters!
124 {
125 const CaseFoldHashBucket3_16 *bucket = &case_fold_hash3_16[hash & 3];
126 const int count = (int) bucket->count;
127 for (int i = 0; i < count; i++) {
128 const CaseFoldMapping3_16 *mapping = &bucket->list[i];
129 if (mapping->from == from16) {
130 to[0] = mapping->to0;
131 to[1] = mapping->to1;
132 to[2] = mapping->to2;
133 return 3;
134 }
135 }
136 }
137
138 } else { // codepoint that doesn't fit in 16 bits.
139 const Uint8 hash = ((from ^ (from >> 8)) & 0xFF);
140 const CaseFoldHashBucket1_32 *bucket = &case_fold_hash1_32[hash & 15];
141 const int count = (int) bucket->count;
142 for (int i = 0; i < count; i++) {
143 const CaseFoldMapping1_32 *mapping = &bucket->list[i];
144 if (mapping->from == from) {
145 *to = mapping->to0;
146 return 1;
147 }
148 }
149 }
150
151 // Not found...there's no folding needed for this codepoint.
152 *to = from;
153 return 1;
154}
155
156#define UNICODE_STRCASECMP(bits, slen1, slen2, update_slen1, update_slen2) \
157 Uint32 folded1[3], folded2[3]; \
158 int head1 = 0, tail1 = 0, head2 = 0, tail2 = 0; \
159 while (true) { \
160 Uint32 cp1, cp2; \
161 if (head1 != tail1) { \
162 cp1 = folded1[tail1++]; \
163 } else { \
164 const Uint##bits *str1start = (const Uint##bits *) str1; \
165 head1 = SDL_CaseFoldUnicode(StepUTF##bits(&str1, slen1), folded1); \
166 update_slen1; \
167 cp1 = folded1[0]; \
168 tail1 = 1; \
169 } \
170 if (head2 != tail2) { \
171 cp2 = folded2[tail2++]; \
172 } else { \
173 const Uint##bits *str2start = (const Uint##bits *) str2; \
174 head2 = SDL_CaseFoldUnicode(StepUTF##bits(&str2, slen2), folded2); \
175 update_slen2; \
176 cp2 = folded2[0]; \
177 tail2 = 1; \
178 } \
179 if (cp1 < cp2) { \
180 return -1; \
181 } else if (cp1 > cp2) { \
182 return 1; \
183 } else if (cp1 == 0) { \
184 break; /* complete match. */ \
185 } \
186 } \
187 return 0
188
189
190static Uint32 StepUTF8(const char **_str, const size_t slen)
191{
192 /*
193 * From rfc3629, the UTF-8 spec:
194 * https://www.ietf.org/rfc/rfc3629.txt
195 *
196 * Char. number range | UTF-8 octet sequence
197 * (hexadecimal) | (binary)
198 * --------------------+---------------------------------------------
199 * 0000 0000-0000 007F | 0xxxxxxx
200 * 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
201 * 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
202 * 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
203 */
204
205 const Uint8 *str = (const Uint8 *) *_str;
206 const Uint32 octet = (Uint32) (slen ? *str : 0);
207
208 if (octet == 0) { // null terminator, end of string.
209 return 0; // don't advance `*_str`.
210 } else if ((octet & 0x80) == 0) { // 0xxxxxxx: one byte codepoint.
211 (*_str)++;
212 return octet;
213 } else if (((octet & 0xE0) == 0xC0) && (slen >= 2)) { // 110xxxxx 10xxxxxx: two byte codepoint.
214 const Uint8 str1 = str[1];
215 if ((str1 & 0xC0) == 0x80) { // If trailing bytes aren't 10xxxxxx, sequence is bogus.
216 const Uint32 result = ((octet & 0x1F) << 6) | (str1 & 0x3F);
217 if (result >= 0x0080) { // rfc3629 says you can't use overlong sequences for smaller values.
218 *_str += 2;
219 return result;
220 }
221 }
222 } else if (((octet & 0xF0) == 0xE0) && (slen >= 3)) { // 1110xxxx 10xxxxxx 10xxxxxx: three byte codepoint.
223 const Uint8 str1 = str[1];
224 const Uint8 str2 = str[2];
225 if (((str1 & 0xC0) == 0x80) && ((str2 & 0xC0) == 0x80)) { // If trailing bytes aren't 10xxxxxx, sequence is bogus.
226 const Uint32 octet2 = ((Uint32) (str1 & 0x3F)) << 6;
227 const Uint32 octet3 = ((Uint32) (str2 & 0x3F));
228 const Uint32 result = ((octet & 0x0F) << 12) | octet2 | octet3;
229 if (result >= 0x800) { // rfc3629 says you can't use overlong sequences for smaller values.
230 if ((result < 0xD800) || (result > 0xDFFF)) { // UTF-16 surrogate values are illegal in UTF-8.
231 *_str += 3;
232 return result;
233 }
234 }
235 }
236 } else if (((octet & 0xF8) == 0xF0) && (slen >= 4)) { // 11110xxxx 10xxxxxx 10xxxxxx 10xxxxxx: four byte codepoint.
237 const Uint8 str1 = str[1];
238 const Uint8 str2 = str[2];
239 const Uint8 str3 = str[3];
240 if (((str1 & 0xC0) == 0x80) && ((str2 & 0xC0) == 0x80) && ((str3 & 0xC0) == 0x80)) { // If trailing bytes aren't 10xxxxxx, sequence is bogus.
241 const Uint32 octet2 = ((Uint32) (str1 & 0x1F)) << 12;
242 const Uint32 octet3 = ((Uint32) (str2 & 0x3F)) << 6;
243 const Uint32 octet4 = ((Uint32) (str3 & 0x3F));
244 const Uint32 result = ((octet & 0x07) << 18) | octet2 | octet3 | octet4;
245 if (result >= 0x10000) { // rfc3629 says you can't use overlong sequences for smaller values.
246 *_str += 4;
247 return result;
248 }
249 }
250 }
251
252 // bogus byte, skip ahead, return a REPLACEMENT CHARACTER.
253 (*_str)++;
254 return SDL_INVALID_UNICODE_CODEPOINT;
255}
256
257Uint32 SDL_StepUTF8(const char **pstr, size_t *pslen)
258{
259 if (!pslen) {
260 return StepUTF8(pstr, 4); // 4 == max codepoint size.
261 }
262 const char *origstr = *pstr;
263 const Uint32 result = StepUTF8(pstr, *pslen);
264 *pslen -= (size_t) (*pstr - origstr);
265 return result;
266}
267
268Uint32 SDL_StepBackUTF8(const char *start, const char **pstr)
269{
270 if (!pstr || *pstr <= start) {
271 return 0;
272 }
273
274 // Step back over the previous UTF-8 character
275 const char *str = *pstr;
276 do {
277 if (str == start) {
278 break;
279 }
280 --str;
281 } while ((*str & 0xC0) == 0x80);
282
283 size_t length = (*pstr - str);
284 *pstr = str;
285 return StepUTF8(&str, length);
286}
287
288#if (SDL_SIZEOF_WCHAR_T == 2)
289static Uint32 StepUTF16(const Uint16 **_str, const size_t slen)
290{
291 const Uint16 *str = *_str;
292 Uint32 cp = (Uint32) *(str++);
293 if (cp == 0) {
294 return 0; // don't advance string pointer.
295 } else if ((cp >= 0xDC00) && (cp <= 0xDFFF)) {
296 cp = SDL_INVALID_UNICODE_CODEPOINT; // Orphaned second half of surrogate pair
297 } else if ((cp >= 0xD800) && (cp <= 0xDBFF)) { // start of surrogate pair!
298 const Uint32 pair = (Uint32) *str;
299 if ((pair == 0) || ((pair < 0xDC00) || (pair > 0xDFFF))) {
300 cp = SDL_INVALID_UNICODE_CODEPOINT;
301 } else {
302 str++; // eat the other surrogate.
303 cp = 0x10000 + (((cp - 0xD800) << 10) | (pair - 0xDC00));
304 }
305 }
306
307 *_str = str;
308 return (cp > 0x10FFFF) ? SDL_INVALID_UNICODE_CODEPOINT : cp;
309}
310#elif (SDL_SIZEOF_WCHAR_T == 4)
311static Uint32 StepUTF32(const Uint32 **_str, const size_t slen)
312{
313 if (!slen) {
314 return 0;
315 }
316
317 const Uint32 *str = *_str;
318 const Uint32 cp = *str;
319 if (cp == 0) {
320 return 0; // don't advance string pointer.
321 }
322
323 (*_str)++;
324 return (cp > 0x10FFFF) ? SDL_INVALID_UNICODE_CODEPOINT : cp;
325}
326#endif
327
328#define UTF8_IsLeadByte(c) ((c) >= 0xC0 && (c) <= 0xF4)
329#define UTF8_IsTrailingByte(c) ((c) >= 0x80 && (c) <= 0xBF)
330
331static size_t UTF8_GetTrailingBytes(unsigned char c)
332{
333 if (c >= 0xC0 && c <= 0xDF) {
334 return 1;
335 } else if (c >= 0xE0 && c <= 0xEF) {
336 return 2;
337 } else if (c >= 0xF0 && c <= 0xF4) {
338 return 3;
339 }
340
341 return 0;
342}
343
344#if !defined(HAVE_VSSCANF) || !defined(HAVE_STRTOL) || !defined(HAVE_STRTOUL) || !defined(HAVE_STRTOLL) || !defined(HAVE_STRTOULL) || !defined(HAVE_STRTOD)
345/**
346 * Parses an unsigned long long and returns the unsigned value and sign bit.
347 *
348 * Positive values are clamped to ULLONG_MAX.
349 * The result `value == 0 && negative` indicates negative overflow
350 * and might need to be handled differently depending on whether a
351 * signed or unsigned integer is being parsed.
352 */
353static size_t SDL_ScanUnsignedLongLongInternal(const char *text, int count, int radix, unsigned long long *valuep, bool *negativep)
354{
355 const unsigned long long ullong_max = ~0ULL;
356
357 const char *text_start = text;
358 const char *number_start = text_start;
359 unsigned long long value = 0;
360 bool negative = false;
361 bool overflow = false;
362
363 if (radix == 0 || (radix >= 2 && radix <= 36)) {
364 while (SDL_isspace(*text)) {
365 ++text;
366 }
367 if (*text == '-' || *text == '+') {
368 negative = *text == '-';
369 ++text;
370 }
371 if ((radix == 0 || radix == 16) && *text == '0' && text[1] != '\0') {
372 ++text;
373 if (*text == 'x' || *text == 'X') {
374 radix = 16;
375 ++text;
376 } else if (radix == 0) {
377 radix = 8;
378 }
379 } else if (radix == 0) {
380 radix = 10;
381 }
382 number_start = text;
383 do {
384 unsigned long long digit;
385 if (*text >= '0' && *text <= '9') {
386 digit = *text - '0';
387 } else if (radix > 10) {
388 if (*text >= 'A' && *text < 'A' + (radix - 10)) {
389 digit = 10 + (*text - 'A');
390 } else if (*text >= 'a' && *text < 'a' + (radix - 10)) {
391 digit = 10 + (*text - 'a');
392 } else {
393 break;
394 }
395 } else {
396 break;
397 }
398 if (value != 0 && radix > ullong_max / value) {
399 overflow = true;
400 } else {
401 value *= radix;
402 if (digit > ullong_max - value) {
403 overflow = true;
404 } else {
405 value += digit;
406 }
407 }
408 ++text;
409 } while (count == 0 || (text - text_start) != count);
410 }
411 if (text == number_start) {
412 if (radix == 16 && text > text_start && (*(text - 1) == 'x' || *(text - 1) == 'X')) {
413 // the string was "0x"; consume the '0' but not the 'x'
414 --text;
415 } else {
416 // no number was parsed, and thus no characters were consumed
417 text = text_start;
418 }
419 }
420 if (overflow) {
421 if (negative) {
422 value = 0;
423 } else {
424 value = ullong_max;
425 }
426 } else if (value == 0) {
427 negative = false;
428 }
429 *valuep = value;
430 *negativep = negative;
431 return text - text_start;
432}
433#endif
434
435#ifndef HAVE_WCSTOL
436// SDL_ScanUnsignedLongLongInternalW assumes that wchar_t can be converted to int without truncating bits
437SDL_COMPILE_TIME_ASSERT(wchar_t_int, sizeof(wchar_t) <= sizeof(int));
438
439/**
440 * Parses an unsigned long long and returns the unsigned value and sign bit.
441 *
442 * Positive values are clamped to ULLONG_MAX.
443 * The result `value == 0 && negative` indicates negative overflow
444 * and might need to be handled differently depending on whether a
445 * signed or unsigned integer is being parsed.
446 */
447static size_t SDL_ScanUnsignedLongLongInternalW(const wchar_t *text, int count, int radix, unsigned long long *valuep, bool *negativep)
448{
449 const unsigned long long ullong_max = ~0ULL;
450
451 const wchar_t *text_start = text;
452 const wchar_t *number_start = text_start;
453 unsigned long long value = 0;
454 bool negative = false;
455 bool overflow = false;
456
457 if (radix == 0 || (radix >= 2 && radix <= 36)) {
458 while (SDL_isspace(*text)) {
459 ++text;
460 }
461 if (*text == '-' || *text == '+') {
462 negative = *text == '-';
463 ++text;
464 }
465 if ((radix == 0 || radix == 16) && *text == '0') {
466 ++text;
467 if (*text == 'x' || *text == 'X') {
468 radix = 16;
469 ++text;
470 } else if (radix == 0) {
471 radix = 8;
472 }
473 } else if (radix == 0) {
474 radix = 10;
475 }
476 number_start = text;
477 do {
478 unsigned long long digit;
479 if (*text >= '0' && *text <= '9') {
480 digit = *text - '0';
481 } else if (radix > 10) {
482 if (*text >= 'A' && *text < 'A' + (radix - 10)) {
483 digit = 10 + (*text - 'A');
484 } else if (*text >= 'a' && *text < 'a' + (radix - 10)) {
485 digit = 10 + (*text - 'a');
486 } else {
487 break;
488 }
489 } else {
490 break;
491 }
492 if (value != 0 && radix > ullong_max / value) {
493 overflow = true;
494 } else {
495 value *= radix;
496 if (digit > ullong_max - value) {
497 overflow = true;
498 } else {
499 value += digit;
500 }
501 }
502 ++text;
503 } while (count == 0 || (text - text_start) != count);
504 }
505 if (text == number_start) {
506 if (radix == 16 && text > text_start && (*(text - 1) == 'x' || *(text - 1) == 'X')) {
507 // the string was "0x"; consume the '0' but not the 'x'
508 --text;
509 } else {
510 // no number was parsed, and thus no characters were consumed
511 text = text_start;
512 }
513 }
514 if (overflow) {
515 if (negative) {
516 value = 0;
517 } else {
518 value = ullong_max;
519 }
520 } else if (value == 0) {
521 negative = false;
522 }
523 *valuep = value;
524 *negativep = negative;
525 return text - text_start;
526}
527#endif
528
529#if !defined(HAVE_VSSCANF) || !defined(HAVE_STRTOL)
530static size_t SDL_ScanLong(const char *text, int count, int radix, long *valuep)
531{
532 const unsigned long long_max = (~0UL) >> 1;
533 unsigned long long value;
534 bool negative;
535 size_t len = SDL_ScanUnsignedLongLongInternal(text, count, radix, &value, &negative);
536 if (negative) {
537 const unsigned long abs_long_min = long_max + 1;
538 if (value == 0 || value > abs_long_min) {
539 value = 0ULL - abs_long_min;
540 } else {
541 value = 0ULL - value;
542 }
543 } else if (value > long_max) {
544 value = long_max;
545 }
546 *valuep = (long)value;
547 return len;
548}
549#endif
550
551#ifndef HAVE_WCSTOL
552static size_t SDL_ScanLongW(const wchar_t *text, int count, int radix, long *valuep)
553{
554 const unsigned long long_max = (~0UL) >> 1;
555 unsigned long long value;
556 bool negative;
557 size_t len = SDL_ScanUnsignedLongLongInternalW(text, count, radix, &value, &negative);
558 if (negative) {
559 const unsigned long abs_long_min = long_max + 1;
560 if (value == 0 || value > abs_long_min) {
561 value = 0ULL - abs_long_min;
562 } else {
563 value = 0ULL - value;
564 }
565 } else if (value > long_max) {
566 value = long_max;
567 }
568 *valuep = (long)value;
569 return len;
570}
571#endif
572
573#if !defined(HAVE_VSSCANF) || !defined(HAVE_STRTOUL)
574static size_t SDL_ScanUnsignedLong(const char *text, int count, int radix, unsigned long *valuep)
575{
576 const unsigned long ulong_max = ~0UL;
577 unsigned long long value;
578 bool negative;
579 size_t len = SDL_ScanUnsignedLongLongInternal(text, count, radix, &value, &negative);
580 if (negative) {
581 if (value == 0 || value > ulong_max) {
582 value = ulong_max;
583 } else if (value == ulong_max) {
584 value = 1;
585 } else {
586 value = 0ULL - value;
587 }
588 } else if (value > ulong_max) {
589 value = ulong_max;
590 }
591 *valuep = (unsigned long)value;
592 return len;
593}
594#endif
595
596#ifndef HAVE_VSSCANF
597static size_t SDL_ScanUintPtrT(const char *text, uintptr_t *valuep)
598{
599 const uintptr_t uintptr_max = ~(uintptr_t)0;
600 unsigned long long value;
601 bool negative;
602 size_t len = SDL_ScanUnsignedLongLongInternal(text, 0, 16, &value, &negative);
603 if (negative) {
604 if (value == 0 || value > uintptr_max) {
605 value = uintptr_max;
606 } else if (value == uintptr_max) {
607 value = 1;
608 } else {
609 value = 0ULL - value;
610 }
611 } else if (value > uintptr_max) {
612 value = uintptr_max;
613 }
614 *valuep = (uintptr_t)value;
615 return len;
616}
617#endif
618
619#if !defined(HAVE_VSSCANF) || !defined(HAVE_STRTOLL)
620static size_t SDL_ScanLongLong(const char *text, int count, int radix, long long *valuep)
621{
622 const unsigned long long llong_max = (~0ULL) >> 1;
623 unsigned long long value;
624 bool negative;
625 size_t len = SDL_ScanUnsignedLongLongInternal(text, count, radix, &value, &negative);
626 if (negative) {
627 const unsigned long long abs_llong_min = llong_max + 1;
628 if (value == 0 || value > abs_llong_min) {
629 value = 0ULL - abs_llong_min;
630 } else {
631 value = 0ULL - value;
632 }
633 } else if (value > llong_max) {
634 value = llong_max;
635 }
636 *valuep = value;
637 return len;
638}
639#endif
640
641#if !defined(HAVE_VSSCANF) || !defined(HAVE_STRTOULL) || !defined(HAVE_STRTOD)
642static size_t SDL_ScanUnsignedLongLong(const char *text, int count, int radix, unsigned long long *valuep)
643{
644 const unsigned long long ullong_max = ~0ULL;
645 bool negative;
646 size_t len = SDL_ScanUnsignedLongLongInternal(text, count, radix, valuep, &negative);
647 if (negative) {
648 if (*valuep == 0) {
649 *valuep = ullong_max;
650 } else {
651 *valuep = 0ULL - *valuep;
652 }
653 }
654 return len;
655}
656#endif
657
658#if !defined(HAVE_VSSCANF) || !defined(HAVE_STRTOD)
659static size_t SDL_ScanFloat(const char *text, double *valuep)
660{
661 const char *text_start = text;
662 const char *number_start = text_start;
663 double value = 0.0;
664 bool negative = false;
665
666 while (SDL_isspace(*text)) {
667 ++text;
668 }
669 if (*text == '-' || *text == '+') {
670 negative = *text == '-';
671 ++text;
672 }
673 number_start = text;
674 if (SDL_isdigit(*text)) {
675 value += SDL_strtoull(text, (char **)(&text), 10);
676 if (*text == '.') {
677 double denom = 10;
678 ++text;
679 while (SDL_isdigit(*text)) {
680 value += (double)(*text - '0') / denom;
681 denom *= 10;
682 ++text;
683 }
684 }
685 }
686 if (text == number_start) {
687 // no number was parsed, and thus no characters were consumed
688 text = text_start;
689 } else if (negative) {
690 value = -value;
691 }
692 *valuep = value;
693 return text - text_start;
694}
695#endif
696
697int SDL_memcmp(const void *s1, const void *s2, size_t len)
698{
699#ifdef SDL_PLATFORM_VITA
700 /*
701 Using memcmp on NULL is UB per POSIX / C99 7.21.1/2.
702 But, both linux and bsd allow that, with an exception:
703 zero length strings are always identical, so NULLs are never dereferenced.
704 sceClibMemcmp on PSVita doesn't allow that, so we check ourselves.
705 */
706 if (len == 0) {
707 return 0;
708 }
709 return sceClibMemcmp(s1, s2, len);
710#elif defined(HAVE_MEMCMP)
711 return memcmp(s1, s2, len);
712#else
713 char *s1p = (char *)s1;
714 char *s2p = (char *)s2;
715 while (len--) {
716 if (*s1p != *s2p) {
717 return *s1p - *s2p;
718 }
719 ++s1p;
720 ++s2p;
721 }
722 return 0;
723#endif // HAVE_MEMCMP
724}
725
726size_t SDL_strlen(const char *string)
727{
728#ifdef HAVE_STRLEN
729 return strlen(string);
730#else
731 size_t len = 0;
732 while (*string++) {
733 ++len;
734 }
735 return len;
736#endif // HAVE_STRLEN
737}
738
739size_t SDL_strnlen(const char *string, size_t maxlen)
740{
741#ifdef HAVE_STRNLEN
742 return strnlen(string, maxlen);
743#else
744 size_t len = 0;
745 while (len < maxlen && *string++) {
746 ++len;
747 }
748 return len;
749#endif // HAVE_STRNLEN
750}
751
752size_t SDL_wcslen(const wchar_t *string)
753{
754#ifdef HAVE_WCSLEN
755 return wcslen(string);
756#else
757 size_t len = 0;
758 while (*string++) {
759 ++len;
760 }
761 return len;
762#endif // HAVE_WCSLEN
763}
764
765size_t SDL_wcsnlen(const wchar_t *string, size_t maxlen)
766{
767#ifdef HAVE_WCSNLEN
768 return wcsnlen(string, maxlen);
769#else
770 size_t len = 0;
771 while (len < maxlen && *string++) {
772 ++len;
773 }
774 return len;
775#endif // HAVE_WCSNLEN
776}
777
778size_t SDL_wcslcpy(SDL_OUT_Z_CAP(maxlen) wchar_t *dst, const wchar_t *src, size_t maxlen)
779{
780#ifdef HAVE_WCSLCPY
781 return wcslcpy(dst, src, maxlen);
782#else
783 size_t srclen = SDL_wcslen(src);
784 if (maxlen > 0) {
785 size_t len = SDL_min(srclen, maxlen - 1);
786 SDL_memcpy(dst, src, len * sizeof(wchar_t));
787 dst[len] = '\0';
788 }
789 return srclen;
790#endif // HAVE_WCSLCPY
791}
792
793size_t SDL_wcslcat(SDL_INOUT_Z_CAP(maxlen) wchar_t *dst, const wchar_t *src, size_t maxlen)
794{
795#ifdef HAVE_WCSLCAT
796 return wcslcat(dst, src, maxlen);
797#else
798 size_t dstlen = SDL_wcslen(dst);
799 size_t srclen = SDL_wcslen(src);
800 if (dstlen < maxlen) {
801 SDL_wcslcpy(dst + dstlen, src, maxlen - dstlen);
802 }
803 return dstlen + srclen;
804#endif // HAVE_WCSLCAT
805}
806
807wchar_t *SDL_wcsdup(const wchar_t *string)
808{
809 size_t len = ((SDL_wcslen(string) + 1) * sizeof(wchar_t));
810 wchar_t *newstr = (wchar_t *)SDL_malloc(len);
811 if (newstr) {
812 SDL_memcpy(newstr, string, len);
813 }
814 return newstr;
815}
816
817wchar_t *SDL_wcsnstr(const wchar_t *haystack, const wchar_t *needle, size_t maxlen)
818{
819 size_t length = SDL_wcslen(needle);
820 if (length == 0) {
821 return (wchar_t *)haystack;
822 }
823 while (maxlen >= length && *haystack) {
824 if (maxlen >= length && SDL_wcsncmp(haystack, needle, length) == 0) {
825 return (wchar_t *)haystack;
826 }
827 ++haystack;
828 --maxlen;
829 }
830 return NULL;
831}
832
833wchar_t *SDL_wcsstr(const wchar_t *haystack, const wchar_t *needle)
834{
835#ifdef HAVE_WCSSTR
836 return SDL_const_cast(wchar_t *, wcsstr(haystack, needle));
837#else
838 return SDL_wcsnstr(haystack, needle, SDL_wcslen(haystack));
839#endif // HAVE_WCSSTR
840}
841
842int SDL_wcscmp(const wchar_t *str1, const wchar_t *str2)
843{
844#ifdef HAVE_WCSCMP
845 return wcscmp(str1, str2);
846#else
847 while (*str1 && *str2) {
848 if (*str1 != *str2) {
849 break;
850 }
851 ++str1;
852 ++str2;
853 }
854 return *str1 - *str2;
855#endif // HAVE_WCSCMP
856}
857
858int SDL_wcsncmp(const wchar_t *str1, const wchar_t *str2, size_t maxlen)
859{
860#ifdef HAVE_WCSNCMP
861 return wcsncmp(str1, str2, maxlen);
862#else
863 while (*str1 && *str2 && maxlen) {
864 if (*str1 != *str2) {
865 break;
866 }
867 ++str1;
868 ++str2;
869 --maxlen;
870 }
871 if (!maxlen) {
872 return 0;
873 }
874 return *str1 - *str2;
875
876#endif // HAVE_WCSNCMP
877}
878
879int SDL_wcscasecmp(const wchar_t *wstr1, const wchar_t *wstr2)
880{
881#if (SDL_SIZEOF_WCHAR_T == 2)
882 const Uint16 *str1 = (const Uint16 *) wstr1;
883 const Uint16 *str2 = (const Uint16 *) wstr2;
884 UNICODE_STRCASECMP(16, 2, 2, (void) str1start, (void) str2start); // always NULL-terminated, no need to adjust lengths.
885#elif (SDL_SIZEOF_WCHAR_T == 4)
886 const Uint32 *str1 = (const Uint32 *) wstr1;
887 const Uint32 *str2 = (const Uint32 *) wstr2;
888 UNICODE_STRCASECMP(32, 1, 1, (void) str1start, (void) str2start); // always NULL-terminated, no need to adjust lengths.
889#else
890 #error Unexpected wchar_t size
891 return -1;
892#endif
893}
894
895int SDL_wcsncasecmp(const wchar_t *wstr1, const wchar_t *wstr2, size_t maxlen)
896{
897 size_t slen1 = maxlen;
898 size_t slen2 = maxlen;
899
900#if (SDL_SIZEOF_WCHAR_T == 2)
901 const Uint16 *str1 = (const Uint16 *) wstr1;
902 const Uint16 *str2 = (const Uint16 *) wstr2;
903 UNICODE_STRCASECMP(16, slen1, slen2, slen1 -= (size_t) (str1 - str1start), slen2 -= (size_t) (str2 - str2start));
904#elif (SDL_SIZEOF_WCHAR_T == 4)
905 const Uint32 *str1 = (const Uint32 *) wstr1;
906 const Uint32 *str2 = (const Uint32 *) wstr2;
907 UNICODE_STRCASECMP(32, slen1, slen2, slen1 -= (size_t) (str1 - str1start), slen2 -= (size_t) (str2 - str2start));
908#else
909 #error Unexpected wchar_t size
910 return -1;
911#endif
912}
913
914long SDL_wcstol(const wchar_t *string, wchar_t **endp, int base)
915{
916#ifdef HAVE_WCSTOL
917 return wcstol(string, endp, base);
918#else
919 long value = 0;
920 size_t len = SDL_ScanLongW(string, 0, base, &value);
921 if (endp) {
922 *endp = (wchar_t *)string + len;
923 }
924 return value;
925#endif // HAVE_WCSTOL
926}
927
928size_t SDL_strlcpy(SDL_OUT_Z_CAP(maxlen) char *dst, const char *src, size_t maxlen)
929{
930#ifdef HAVE_STRLCPY
931 return strlcpy(dst, src, maxlen);
932#else
933 size_t srclen = SDL_strlen(src);
934 if (maxlen > 0) {
935 size_t len = SDL_min(srclen, maxlen - 1);
936 SDL_memcpy(dst, src, len);
937 dst[len] = '\0';
938 }
939 return srclen;
940#endif // HAVE_STRLCPY
941}
942
943size_t SDL_utf8strlcpy(SDL_OUT_Z_CAP(dst_bytes) char *dst, const char *src, size_t dst_bytes)
944{
945 size_t bytes = 0;
946
947 if (dst_bytes > 0) {
948 size_t src_bytes = SDL_strlen(src);
949 size_t i = 0;
950 size_t trailing_bytes = 0;
951
952 bytes = SDL_min(src_bytes, dst_bytes - 1);
953 if (bytes) {
954 unsigned char c = (unsigned char)src[bytes - 1];
955 if (UTF8_IsLeadByte(c)) {
956 --bytes;
957 } else if (UTF8_IsTrailingByte(c)) {
958 for (i = bytes - 1; i != 0; --i) {
959 c = (unsigned char)src[i];
960 trailing_bytes = UTF8_GetTrailingBytes(c);
961 if (trailing_bytes) {
962 if ((bytes - i) != (trailing_bytes + 1)) {
963 bytes = i;
964 }
965
966 break;
967 }
968 }
969 }
970 SDL_memcpy(dst, src, bytes);
971 }
972 dst[bytes] = '\0';
973 }
974
975 return bytes;
976}
977
978size_t SDL_utf8strlen(const char *str)
979{
980 size_t result = 0;
981 while (SDL_StepUTF8(&str, NULL)) {
982 result++;
983 }
984 return result;
985}
986
987size_t SDL_utf8strnlen(const char *str, size_t bytes)
988{
989 size_t result = 0;
990 while (SDL_StepUTF8(&str, &bytes)) {
991 result++;
992 }
993 return result;
994}
995
996size_t SDL_strlcat(SDL_INOUT_Z_CAP(maxlen) char *dst, const char *src, size_t maxlen)
997{
998#ifdef HAVE_STRLCAT
999 return strlcat(dst, src, maxlen);
1000#else
1001 size_t dstlen = SDL_strlen(dst);
1002 size_t srclen = SDL_strlen(src);
1003 if (dstlen < maxlen) {
1004 SDL_strlcpy(dst + dstlen, src, maxlen - dstlen);
1005 }
1006 return dstlen + srclen;
1007#endif // HAVE_STRLCAT
1008}
1009
1010char *SDL_strdup(const char *string)
1011{
1012 size_t len = SDL_strlen(string) + 1;
1013 char *newstr = (char *)SDL_malloc(len);
1014 if (newstr) {
1015 SDL_memcpy(newstr, string, len);
1016 }
1017 return newstr;
1018}
1019
1020char *SDL_strndup(const char *string, size_t maxlen)
1021{
1022 size_t len = SDL_strnlen(string, maxlen);
1023 char *newstr = (char *)SDL_malloc(len + 1);
1024 if (newstr) {
1025 SDL_memcpy(newstr, string, len);
1026 newstr[len] = '\0';
1027 }
1028 return newstr;
1029}
1030
1031char *SDL_strrev(char *string)
1032{
1033#ifdef HAVE__STRREV
1034 return _strrev(string);
1035#else
1036 size_t len = SDL_strlen(string);
1037 char *a = &string[0];
1038 char *b = &string[len - 1];
1039 len /= 2;
1040 while (len--) {
1041 const char c = *a; // NOLINT(clang-analyzer-core.uninitialized.Assign)
1042 *a++ = *b;
1043 *b-- = c;
1044 }
1045 return string;
1046#endif // HAVE__STRREV
1047}
1048
1049char *SDL_strupr(char *string)
1050{
1051 char *bufp = string;
1052 while (*bufp) {
1053 *bufp = (char)SDL_toupper((unsigned char)*bufp);
1054 ++bufp;
1055 }
1056 return string;
1057}
1058
1059char *SDL_strlwr(char *string)
1060{
1061 char *bufp = string;
1062 while (*bufp) {
1063 *bufp = (char)SDL_tolower((unsigned char)*bufp);
1064 ++bufp;
1065 }
1066 return string;
1067}
1068
1069char *SDL_strchr(const char *string, int c)
1070{
1071#ifdef HAVE_STRCHR
1072 return SDL_const_cast(char *, strchr(string, c));
1073#elif defined(HAVE_INDEX)
1074 return SDL_const_cast(char *, index(string, c));
1075#else
1076 while (*string) {
1077 if (*string == c) {
1078 return (char *)string;
1079 }
1080 ++string;
1081 }
1082 if (c == '\0') {
1083 return (char *)string;
1084 }
1085 return NULL;
1086#endif // HAVE_STRCHR
1087}
1088
1089char *SDL_strrchr(const char *string, int c)
1090{
1091#ifdef HAVE_STRRCHR
1092 return SDL_const_cast(char *, strrchr(string, c));
1093#elif defined(HAVE_RINDEX)
1094 return SDL_const_cast(char *, rindex(string, c));
1095#else
1096 const char *bufp = string + SDL_strlen(string);
1097 while (bufp >= string) {
1098 if (*bufp == c) {
1099 return (char *)bufp;
1100 }
1101 --bufp;
1102 }
1103 return NULL;
1104#endif // HAVE_STRRCHR
1105}
1106
1107char *SDL_strnstr(const char *haystack, const char *needle, size_t maxlen)
1108{
1109#ifdef HAVE_STRNSTR
1110 return SDL_const_cast(char *, strnstr(haystack, needle, maxlen));
1111#else
1112 size_t length = SDL_strlen(needle);
1113 if (length == 0) {
1114 return (char *)haystack;
1115 }
1116 while (maxlen >= length && *haystack) {
1117 if (SDL_strncmp(haystack, needle, length) == 0) {
1118 return (char *)haystack;
1119 }
1120 ++haystack;
1121 --maxlen;
1122 }
1123 return NULL;
1124#endif // HAVE_STRSTR
1125}
1126
1127char *SDL_strstr(const char *haystack, const char *needle)
1128{
1129#ifdef HAVE_STRSTR
1130 return SDL_const_cast(char *, strstr(haystack, needle));
1131#else
1132 return SDL_strnstr(haystack, needle, SDL_strlen(haystack));
1133#endif // HAVE_STRSTR
1134}
1135
1136char *SDL_strcasestr(const char *haystack, const char *needle)
1137{
1138 const size_t length = SDL_strlen(needle);
1139 do {
1140 if (SDL_strncasecmp(haystack, needle, length) == 0) {
1141 return (char *)haystack;
1142 }
1143 } while (SDL_StepUTF8(&haystack, NULL)); // move ahead by a full codepoint at a time, regardless of bytes.
1144
1145 return NULL;
1146}
1147
1148#if !defined(HAVE__LTOA) || !defined(HAVE__I64TOA) || \
1149 !defined(HAVE__ULTOA) || !defined(HAVE__UI64TOA)
1150static const char ntoa_table[] = {
1151 '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
1152 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
1153 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
1154 'U', 'V', 'W', 'X', 'Y', 'Z'
1155};
1156#endif // ntoa() conversion table
1157
1158char *SDL_itoa(int value, char *string, int radix)
1159{
1160#ifdef HAVE_ITOA
1161 return itoa(value, string, radix);
1162#else
1163 return SDL_ltoa((long)value, string, radix);
1164#endif // HAVE_ITOA
1165}
1166
1167char *SDL_uitoa(unsigned int value, char *string, int radix)
1168{
1169#ifdef HAVE__UITOA
1170 return _uitoa(value, string, radix);
1171#else
1172 return SDL_ultoa((unsigned long)value, string, radix);
1173#endif // HAVE__UITOA
1174}
1175
1176char *SDL_ltoa(long value, char *string, int radix)
1177{
1178#ifdef HAVE__LTOA
1179 return _ltoa(value, string, radix);
1180#else
1181 char *bufp = string;
1182
1183 if (value < 0) {
1184 *bufp++ = '-';
1185 SDL_ultoa(-value, bufp, radix);
1186 } else {
1187 SDL_ultoa(value, bufp, radix);
1188 }
1189
1190 return string;
1191#endif // HAVE__LTOA
1192}
1193
1194char *SDL_ultoa(unsigned long value, char *string, int radix)
1195{
1196#ifdef HAVE__ULTOA
1197 return _ultoa(value, string, radix);
1198#else
1199 char *bufp = string;
1200
1201 if (value) {
1202 while (value > 0) {
1203 *bufp++ = ntoa_table[value % radix];
1204 value /= radix;
1205 }
1206 } else {
1207 *bufp++ = '0';
1208 }
1209 *bufp = '\0';
1210
1211 // The numbers went into the string backwards. :)
1212 SDL_strrev(string);
1213
1214 return string;
1215#endif // HAVE__ULTOA
1216}
1217
1218char *SDL_lltoa(long long value, char *string, int radix)
1219{
1220#ifdef HAVE__I64TOA
1221 return _i64toa(value, string, radix);
1222#else
1223 char *bufp = string;
1224
1225 if (value < 0) {
1226 *bufp++ = '-';
1227 SDL_ulltoa(-value, bufp, radix);
1228 } else {
1229 SDL_ulltoa(value, bufp, radix);
1230 }
1231
1232 return string;
1233#endif // HAVE__I64TOA
1234}
1235
1236char *SDL_ulltoa(unsigned long long value, char *string, int radix)
1237{
1238#ifdef HAVE__UI64TOA
1239 return _ui64toa(value, string, radix);
1240#else
1241 char *bufp = string;
1242
1243 if (value) {
1244 while (value > 0) {
1245 *bufp++ = ntoa_table[value % radix];
1246 value /= radix;
1247 }
1248 } else {
1249 *bufp++ = '0';
1250 }
1251 *bufp = '\0';
1252
1253 // The numbers went into the string backwards. :)
1254 SDL_strrev(string);
1255
1256 return string;
1257#endif // HAVE__UI64TOA
1258}
1259
1260int SDL_atoi(const char *string)
1261{
1262#ifdef HAVE_ATOI
1263 return atoi(string);
1264#else
1265 return SDL_strtol(string, NULL, 10);
1266#endif // HAVE_ATOI
1267}
1268
1269double SDL_atof(const char *string)
1270{
1271#ifdef HAVE_ATOF
1272 return atof(string);
1273#else
1274 return SDL_strtod(string, NULL);
1275#endif // HAVE_ATOF
1276}
1277
1278long SDL_strtol(const char *string, char **endp, int base)
1279{
1280#ifdef HAVE_STRTOL
1281 return strtol(string, endp, base);
1282#else
1283 long value = 0;
1284 size_t len = SDL_ScanLong(string, 0, base, &value);
1285 if (endp) {
1286 *endp = (char *)string + len;
1287 }
1288 return value;
1289#endif // HAVE_STRTOL
1290}
1291
1292unsigned long SDL_strtoul(const char *string, char **endp, int base)
1293{
1294#ifdef HAVE_STRTOUL
1295 return strtoul(string, endp, base);
1296#else
1297 unsigned long value = 0;
1298 size_t len = SDL_ScanUnsignedLong(string, 0, base, &value);
1299 if (endp) {
1300 *endp = (char *)string + len;
1301 }
1302 return value;
1303#endif // HAVE_STRTOUL
1304}
1305
1306long long SDL_strtoll(const char *string, char **endp, int base)
1307{
1308#ifdef HAVE_STRTOLL
1309 return strtoll(string, endp, base);
1310#else
1311 long long value = 0;
1312 size_t len = SDL_ScanLongLong(string, 0, base, &value);
1313 if (endp) {
1314 *endp = (char *)string + len;
1315 }
1316 return value;
1317#endif // HAVE_STRTOLL
1318}
1319
1320unsigned long long SDL_strtoull(const char *string, char **endp, int base)
1321{
1322#ifdef HAVE_STRTOULL
1323 return strtoull(string, endp, base);
1324#else
1325 unsigned long long value = 0;
1326 size_t len = SDL_ScanUnsignedLongLong(string, 0, base, &value);
1327 if (endp) {
1328 *endp = (char *)string + len;
1329 }
1330 return value;
1331#endif // HAVE_STRTOULL
1332}
1333
1334double SDL_strtod(const char *string, char **endp)
1335{
1336#ifdef HAVE_STRTOD
1337 return strtod(string, endp);
1338#else
1339 double value;
1340 size_t len = SDL_ScanFloat(string, &value);
1341 if (endp) {
1342 *endp = (char *)string + len;
1343 }
1344 return value;
1345#endif // HAVE_STRTOD
1346}
1347
1348int SDL_strcmp(const char *str1, const char *str2)
1349{
1350#ifdef HAVE_STRCMP
1351 return strcmp(str1, str2);
1352#else
1353 int result;
1354
1355 while (1) {
1356 result = ((unsigned char)*str1 - (unsigned char)*str2);
1357 if (result != 0 || (*str1 == '\0' /* && *str2 == '\0'*/)) {
1358 break;
1359 }
1360 ++str1;
1361 ++str2;
1362 }
1363 return result;
1364#endif // HAVE_STRCMP
1365}
1366
1367int SDL_strncmp(const char *str1, const char *str2, size_t maxlen)
1368{
1369#ifdef HAVE_STRNCMP
1370 return strncmp(str1, str2, maxlen);
1371#else
1372 int result = 0;
1373
1374 while (maxlen) {
1375 result = (int)(unsigned char)*str1 - (unsigned char)*str2;
1376 if (result != 0 || *str1 == '\0' /* && *str2 == '\0'*/) {
1377 break;
1378 }
1379 ++str1;
1380 ++str2;
1381 --maxlen;
1382 }
1383 return result;
1384#endif // HAVE_STRNCMP
1385}
1386
1387int SDL_strcasecmp(const char *str1, const char *str2)
1388{
1389 UNICODE_STRCASECMP(8, 4, 4, (void) str1start, (void) str2start); // always NULL-terminated, no need to adjust lengths.
1390}
1391
1392int SDL_strncasecmp(const char *str1, const char *str2, size_t maxlen)
1393{
1394 size_t slen1 = maxlen;
1395 size_t slen2 = maxlen;
1396 UNICODE_STRCASECMP(8, slen1, slen2, slen1 -= (size_t) (str1 - ((const char *) str1start)), slen2 -= (size_t) (str2 - ((const char *) str2start)));
1397}
1398
1399int SDL_sscanf(const char *text, SDL_SCANF_FORMAT_STRING const char *fmt, ...)
1400{
1401 int rc;
1402 va_list ap;
1403 va_start(ap, fmt);
1404 rc = SDL_vsscanf(text, fmt, ap);
1405 va_end(ap);
1406 return rc;
1407}
1408
1409#ifdef HAVE_VSSCANF
1410int SDL_vsscanf(const char *text, const char *fmt, va_list ap)
1411{
1412 return vsscanf(text, fmt, ap);
1413}
1414#else
1415static bool CharacterMatchesSet(char c, const char *set, size_t set_len)
1416{
1417 bool invert = false;
1418 bool result = false;
1419
1420 if (*set == '^') {
1421 invert = true;
1422 ++set;
1423 --set_len;
1424 }
1425 while (set_len > 0 && !result) {
1426 if (set_len >= 3 && set[1] == '-') {
1427 char low_char = SDL_min(set[0], set[2]);
1428 char high_char = SDL_max(set[0], set[2]);
1429 if (c >= low_char && c <= high_char) {
1430 result = true;
1431 }
1432 set += 3;
1433 set_len -= 3;
1434 } else {
1435 if (c == *set) {
1436 result = true;
1437 }
1438 ++set;
1439 --set_len;
1440 }
1441 }
1442 if (invert) {
1443 result = !result;
1444 }
1445 return result;
1446}
1447
1448// NOLINTNEXTLINE(readability-non-const-parameter)
1449int SDL_vsscanf(const char *text, SDL_SCANF_FORMAT_STRING const char *fmt, va_list ap)
1450{
1451 const char *start = text;
1452 int result = 0;
1453
1454 if (!text || !*text) {
1455 return -1;
1456 }
1457
1458 while (*fmt) {
1459 if (*fmt == ' ') {
1460 while (SDL_isspace((unsigned char)*text)) {
1461 ++text;
1462 }
1463 ++fmt;
1464 continue;
1465 }
1466 if (*fmt == '%') {
1467 bool done = false;
1468 long count = 0;
1469 int radix = 10;
1470 enum
1471 {
1472 DO_SHORT,
1473 DO_INT,
1474 DO_LONG,
1475 DO_LONGLONG,
1476 DO_SIZE_T
1477 } inttype = DO_INT;
1478 size_t advance;
1479 bool suppress = false;
1480
1481 ++fmt;
1482 if (*fmt == '%') {
1483 if (*text == '%') {
1484 ++text;
1485 ++fmt;
1486 continue;
1487 }
1488 break;
1489 }
1490 if (*fmt == '*') {
1491 suppress = true;
1492 ++fmt;
1493 }
1494 fmt += SDL_ScanLong(fmt, 0, 10, &count);
1495
1496 if (*fmt == 'c') {
1497 if (!count) {
1498 count = 1;
1499 }
1500 if (suppress) {
1501 while (count--) {
1502 ++text;
1503 }
1504 } else {
1505 char *valuep = va_arg(ap, char *);
1506 while (count--) {
1507 *valuep++ = *text++;
1508 }
1509 ++result;
1510 }
1511 continue;
1512 }
1513
1514 while (SDL_isspace((unsigned char)*text)) {
1515 ++text;
1516 }
1517
1518 // FIXME: implement more of the format specifiers
1519 while (!done) {
1520 switch (*fmt) {
1521 case '*':
1522 suppress = true;
1523 break;
1524 case 'h':
1525 if (inttype == DO_INT) {
1526 inttype = DO_SHORT;
1527 } else if (inttype > DO_SHORT) {
1528 ++inttype;
1529 }
1530 break;
1531 case 'l':
1532 if (inttype < DO_LONGLONG) {
1533 ++inttype;
1534 }
1535 break;
1536 case 'I':
1537 if (SDL_strncmp(fmt, "I64", 3) == 0) {
1538 fmt += 2;
1539 inttype = DO_LONGLONG;
1540 }
1541 break;
1542 case 'z':
1543 inttype = DO_SIZE_T;
1544 break;
1545 case 'i':
1546 {
1547 int index = 0;
1548 if (text[index] == '-') {
1549 ++index;
1550 }
1551 if (text[index] == '0') {
1552 if (SDL_tolower((unsigned char)text[index + 1]) == 'x') {
1553 radix = 16;
1554 } else {
1555 radix = 8;
1556 }
1557 }
1558 }
1559 SDL_FALLTHROUGH;
1560 case 'd':
1561 if (inttype == DO_LONGLONG) {
1562 long long value = 0;
1563 advance = SDL_ScanLongLong(text, count, radix, &value);
1564 text += advance;
1565 if (advance && !suppress) {
1566 Sint64 *valuep = va_arg(ap, Sint64 *);
1567 *valuep = value;
1568 ++result;
1569 }
1570 } else if (inttype == DO_SIZE_T) {
1571 long long value = 0;
1572 advance = SDL_ScanLongLong(text, count, radix, &value);
1573 text += advance;
1574 if (advance && !suppress) {
1575 size_t *valuep = va_arg(ap, size_t *);
1576 *valuep = (size_t)value;
1577 ++result;
1578 }
1579 } else {
1580 long value = 0;
1581 advance = SDL_ScanLong(text, count, radix, &value);
1582 text += advance;
1583 if (advance && !suppress) {
1584 switch (inttype) {
1585 case DO_SHORT:
1586 {
1587 short *valuep = va_arg(ap, short *);
1588 *valuep = (short)value;
1589 } break;
1590 case DO_INT:
1591 {
1592 int *valuep = va_arg(ap, int *);
1593 *valuep = (int)value;
1594 } break;
1595 case DO_LONG:
1596 {
1597 long *valuep = va_arg(ap, long *);
1598 *valuep = value;
1599 } break;
1600 case DO_LONGLONG:
1601 case DO_SIZE_T:
1602 // Handled above
1603 break;
1604 }
1605 ++result;
1606 }
1607 }
1608 done = true;
1609 break;
1610 case 'o':
1611 if (radix == 10) {
1612 radix = 8;
1613 }
1614 SDL_FALLTHROUGH;
1615 case 'x':
1616 case 'X':
1617 if (radix == 10) {
1618 radix = 16;
1619 }
1620 SDL_FALLTHROUGH;
1621 case 'u':
1622 if (inttype == DO_LONGLONG) {
1623 unsigned long long value = 0;
1624 advance = SDL_ScanUnsignedLongLong(text, count, radix, &value);
1625 text += advance;
1626 if (advance && !suppress) {
1627 Uint64 *valuep = va_arg(ap, Uint64 *);
1628 *valuep = value;
1629 ++result;
1630 }
1631 } else if (inttype == DO_SIZE_T) {
1632 unsigned long long value = 0;
1633 advance = SDL_ScanUnsignedLongLong(text, count, radix, &value);
1634 text += advance;
1635 if (advance && !suppress) {
1636 size_t *valuep = va_arg(ap, size_t *);
1637 *valuep = (size_t)value;
1638 ++result;
1639 }
1640 } else {
1641 unsigned long value = 0;
1642 advance = SDL_ScanUnsignedLong(text, count, radix, &value);
1643 text += advance;
1644 if (advance && !suppress) {
1645 switch (inttype) {
1646 case DO_SHORT:
1647 {
1648 short *valuep = va_arg(ap, short *);
1649 *valuep = (short)value;
1650 } break;
1651 case DO_INT:
1652 {
1653 int *valuep = va_arg(ap, int *);
1654 *valuep = (int)value;
1655 } break;
1656 case DO_LONG:
1657 {
1658 long *valuep = va_arg(ap, long *);
1659 *valuep = value;
1660 } break;
1661 case DO_LONGLONG:
1662 case DO_SIZE_T:
1663 // Handled above
1664 break;
1665 }
1666 ++result;
1667 }
1668 }
1669 done = true;
1670 break;
1671 case 'p':
1672 {
1673 uintptr_t value = 0;
1674 advance = SDL_ScanUintPtrT(text, &value);
1675 text += advance;
1676 if (advance && !suppress) {
1677 void **valuep = va_arg(ap, void **);
1678 *valuep = (void *)value;
1679 ++result;
1680 }
1681 }
1682 done = true;
1683 break;
1684 case 'f':
1685 {
1686 double value = 0.0;
1687 advance = SDL_ScanFloat(text, &value);
1688 text += advance;
1689 if (advance && !suppress) {
1690 float *valuep = va_arg(ap, float *);
1691 *valuep = (float)value;
1692 ++result;
1693 }
1694 }
1695 done = true;
1696 break;
1697 case 's':
1698 if (suppress) {
1699 while (!SDL_isspace((unsigned char)*text)) {
1700 ++text;
1701 if (count) {
1702 if (--count == 0) {
1703 break;
1704 }
1705 }
1706 }
1707 } else {
1708 char *valuep = va_arg(ap, char *);
1709 while (!SDL_isspace((unsigned char)*text)) {
1710 *valuep++ = *text++;
1711 if (count) {
1712 if (--count == 0) {
1713 break;
1714 }
1715 }
1716 }
1717 *valuep = '\0';
1718 ++result;
1719 }
1720 done = true;
1721 break;
1722 case 'n':
1723 switch (inttype) {
1724 case DO_SHORT:
1725 {
1726 short *valuep = va_arg(ap, short *);
1727 *valuep = (short)(text - start);
1728 } break;
1729 case DO_INT:
1730 {
1731 int *valuep = va_arg(ap, int *);
1732 *valuep = (int)(text - start);
1733 } break;
1734 case DO_LONG:
1735 {
1736 long *valuep = va_arg(ap, long *);
1737 *valuep = (long)(text - start);
1738 } break;
1739 case DO_LONGLONG:
1740 {
1741 long long *valuep = va_arg(ap, long long *);
1742 *valuep = (long long)(text - start);
1743 } break;
1744 case DO_SIZE_T:
1745 {
1746 size_t *valuep = va_arg(ap, size_t *);
1747 *valuep = (size_t)(text - start);
1748 } break;
1749 }
1750 done = true;
1751 break;
1752 case '[':
1753 {
1754 const char *set = fmt + 1;
1755 while (*fmt && *fmt != ']') {
1756 ++fmt;
1757 }
1758 if (*fmt) {
1759 size_t set_len = (fmt - set);
1760 if (suppress) {
1761 while (CharacterMatchesSet(*text, set, set_len)) {
1762 ++text;
1763 if (count) {
1764 if (--count == 0) {
1765 break;
1766 }
1767 }
1768 }
1769 } else {
1770 bool had_match = false;
1771 char *valuep = va_arg(ap, char *);
1772 while (CharacterMatchesSet(*text, set, set_len)) {
1773 had_match = true;
1774 *valuep++ = *text++;
1775 if (count) {
1776 if (--count == 0) {
1777 break;
1778 }
1779 }
1780 }
1781 *valuep = '\0';
1782 if (had_match) {
1783 ++result;
1784 }
1785 }
1786 }
1787 }
1788 done = true;
1789 break;
1790 default:
1791 done = true;
1792 break;
1793 }
1794 ++fmt;
1795 }
1796 continue;
1797 }
1798 if (*text == *fmt) {
1799 ++text;
1800 ++fmt;
1801 continue;
1802 }
1803 // Text didn't match format specifier
1804 break;
1805 }
1806
1807 return result;
1808}
1809#endif // HAVE_VSSCANF
1810
1811int SDL_snprintf(SDL_OUT_Z_CAP(maxlen) char *text, size_t maxlen, SDL_PRINTF_FORMAT_STRING const char *fmt, ...)
1812{
1813 va_list ap;
1814 int result;
1815
1816 va_start(ap, fmt);
1817 result = SDL_vsnprintf(text, maxlen, fmt, ap);
1818 va_end(ap);
1819
1820 return result;
1821}
1822
1823int SDL_swprintf(SDL_OUT_Z_CAP(maxlen) wchar_t *text, size_t maxlen, SDL_PRINTF_FORMAT_STRING const wchar_t *fmt, ...)
1824{
1825 va_list ap;
1826 int result;
1827
1828 va_start(ap, fmt);
1829 result = SDL_vswprintf(text, maxlen, fmt, ap);
1830 va_end(ap);
1831
1832 return result;
1833}
1834
1835#if defined(HAVE_LIBC) && defined(__WATCOMC__)
1836// _vsnprintf() doesn't ensure nul termination
1837int SDL_vsnprintf(SDL_OUT_Z_CAP(maxlen) char *text, size_t maxlen, const char *fmt, va_list ap)
1838{
1839 int result;
1840 if (!fmt) {
1841 fmt = "";
1842 }
1843 result = _vsnprintf(text, maxlen, fmt, ap);
1844 if (maxlen > 0) {
1845 text[maxlen - 1] = '\0';
1846 }
1847 if (result < 0) {
1848 result = (int)maxlen;
1849 }
1850 return result;
1851}
1852#elif defined(HAVE_VSNPRINTF)
1853int SDL_vsnprintf(SDL_OUT_Z_CAP(maxlen) char *text, size_t maxlen, const char *fmt, va_list ap)
1854{
1855 if (!fmt) {
1856 fmt = "";
1857 }
1858 return vsnprintf(text, maxlen, fmt, ap);
1859}
1860#else
1861#define TEXT_AND_LEN_ARGS (length < maxlen) ? &text[length] : NULL, (length < maxlen) ? (maxlen - length) : 0
1862
1863// FIXME: implement more of the format specifiers
1864typedef enum
1865{
1866 SDL_CASE_NOCHANGE,
1867 SDL_CASE_LOWER,
1868 SDL_CASE_UPPER
1869} SDL_letter_case;
1870
1871typedef struct
1872{
1873 bool left_justify;
1874 bool force_sign;
1875 bool force_type; // for now: used only by float printer, ignored otherwise.
1876 bool pad_zeroes;
1877 SDL_letter_case force_case;
1878 int width;
1879 int radix;
1880 int precision;
1881} SDL_FormatInfo;
1882
1883static size_t SDL_PrintString(char *text, size_t maxlen, SDL_FormatInfo *info, const char *string)
1884{
1885 const char fill = (info && info->pad_zeroes) ? '0' : ' ';
1886 size_t width = 0;
1887 size_t filllen = 0;
1888 size_t length = 0;
1889 size_t slen, sz;
1890
1891 if (!string) {
1892 string = "(null)";
1893 }
1894
1895 sz = SDL_strlen(string);
1896 if (info && info->width > 0 && (size_t)info->width > sz) {
1897 width = info->width - sz;
1898 if (info->precision >= 0 && (size_t)info->precision < sz) {
1899 width += sz - (size_t)info->precision;
1900 }
1901
1902 filllen = SDL_min(width, maxlen);
1903 if (!info->left_justify) {
1904 SDL_memset(text, fill, filllen);
1905 text += filllen;
1906 maxlen -= filllen;
1907 length += width;
1908 filllen = 0;
1909 }
1910 }
1911
1912 SDL_strlcpy(text, string, maxlen);
1913 length += sz;
1914
1915 if (filllen > 0) {
1916 SDL_memset(text + sz, fill, filllen);
1917 length += width;
1918 }
1919
1920 if (info) {
1921 if (info->precision >= 0 && (size_t)info->precision < sz) {
1922 slen = (size_t)info->precision;
1923 if (slen < maxlen) {
1924 text[slen] = '\0';
1925 }
1926 length -= (sz - slen);
1927 }
1928 if (maxlen > 1) {
1929 if (info->force_case == SDL_CASE_LOWER) {
1930 SDL_strlwr(text);
1931 } else if (info->force_case == SDL_CASE_UPPER) {
1932 SDL_strupr(text);
1933 }
1934 }
1935 }
1936 return length;
1937}
1938
1939static size_t SDL_PrintStringW(char *text, size_t maxlen, SDL_FormatInfo *info, const wchar_t *wide_string)
1940{
1941 size_t length = 0;
1942 if (wide_string) {
1943 char *string = SDL_iconv_string("UTF-8", "WCHAR_T", (char *)(wide_string), (SDL_wcslen(wide_string) + 1) * sizeof(*wide_string));
1944 length = SDL_PrintString(TEXT_AND_LEN_ARGS, info, string);
1945 SDL_free(string);
1946 } else {
1947 length = SDL_PrintString(TEXT_AND_LEN_ARGS, info, NULL);
1948 }
1949 return length;
1950}
1951
1952static void SDL_IntPrecisionAdjust(char *num, size_t maxlen, SDL_FormatInfo *info)
1953{ // left-pad num with zeroes.
1954 size_t sz, pad, have_sign;
1955
1956 if (!info) {
1957 return;
1958 }
1959
1960 have_sign = 0;
1961 if (*num == '-' || *num == '+') {
1962 have_sign = 1;
1963 ++num;
1964 --maxlen;
1965 }
1966 sz = SDL_strlen(num);
1967 if (info->precision > 0 && sz < (size_t)info->precision) {
1968 pad = (size_t)info->precision - sz;
1969 if (pad + sz + 1 <= maxlen) { // otherwise ignore the precision
1970 SDL_memmove(num + pad, num, sz + 1);
1971 SDL_memset(num, '0', pad);
1972 }
1973 }
1974 info->precision = -1; // so that SDL_PrintString() doesn't make a mess.
1975
1976 if (info->pad_zeroes && info->width > 0 && (size_t)info->width > sz + have_sign) {
1977 /* handle here: spaces are added before the sign
1978 but zeroes must be placed _after_ the sign. */
1979 // sz hasn't changed: we ignore pad_zeroes if a precision is given.
1980 pad = (size_t)info->width - sz - have_sign;
1981 if (pad + sz + 1 <= maxlen) {
1982 SDL_memmove(num + pad, num, sz + 1);
1983 SDL_memset(num, '0', pad);
1984 }
1985 info->width = 0; // so that SDL_PrintString() doesn't make a mess.
1986 }
1987}
1988
1989static size_t SDL_PrintLong(char *text, size_t maxlen, SDL_FormatInfo *info, long value)
1990{
1991 char num[130], *p = num;
1992
1993 if (info->force_sign && value >= 0L) {
1994 *p++ = '+';
1995 }
1996
1997 SDL_ltoa(value, p, info ? info->radix : 10);
1998 SDL_IntPrecisionAdjust(num, sizeof(num), info);
1999 return SDL_PrintString(text, maxlen, info, num);
2000}
2001
2002static size_t SDL_PrintUnsignedLong(char *text, size_t maxlen, SDL_FormatInfo *info, unsigned long value)
2003{
2004 char num[130];
2005
2006 SDL_ultoa(value, num, info ? info->radix : 10);
2007 SDL_IntPrecisionAdjust(num, sizeof(num), info);
2008 return SDL_PrintString(text, maxlen, info, num);
2009}
2010
2011static size_t SDL_PrintLongLong(char *text, size_t maxlen, SDL_FormatInfo *info, long long value)
2012{
2013 char num[130], *p = num;
2014
2015 if (info->force_sign && value >= (Sint64)0) {
2016 *p++ = '+';
2017 }
2018
2019 SDL_lltoa(value, p, info ? info->radix : 10);
2020 SDL_IntPrecisionAdjust(num, sizeof(num), info);
2021 return SDL_PrintString(text, maxlen, info, num);
2022}
2023
2024static size_t SDL_PrintUnsignedLongLong(char *text, size_t maxlen, SDL_FormatInfo *info, unsigned long long value)
2025{
2026 char num[130];
2027
2028 SDL_ulltoa(value, num, info ? info->radix : 10);
2029 SDL_IntPrecisionAdjust(num, sizeof(num), info);
2030 return SDL_PrintString(text, maxlen, info, num);
2031}
2032
2033static size_t SDL_PrintFloat(char *text, size_t maxlen, SDL_FormatInfo *info, double arg, bool g)
2034{
2035 char num[327];
2036 size_t length = 0;
2037 size_t integer_length;
2038 int precision = info->precision;
2039
2040 // This isn't especially accurate, but hey, it's easy. :)
2041 unsigned long long value;
2042
2043 if (arg < 0.0 || (arg == 0.0 && 1.0 / arg < 0.0)) { // additional check for signed zero
2044 num[length++] = '-';
2045 arg = -arg;
2046 } else if (info->force_sign) {
2047 num[length++] = '+';
2048 }
2049 value = (unsigned long long)arg;
2050 integer_length = SDL_PrintUnsignedLongLong(&num[length], sizeof(num) - length, NULL, value);
2051 length += integer_length;
2052 arg -= value;
2053 if (precision < 0) {
2054 precision = 6;
2055 }
2056 if (g) {
2057 // The precision includes the integer portion
2058 precision -= SDL_min((int)integer_length, precision);
2059 }
2060 if (info->force_type || precision > 0) {
2061 const char decimal_separator = '.';
2062 double integer_value;
2063
2064 SDL_assert(length < sizeof(num));
2065 num[length++] = decimal_separator;
2066 while (precision > 1) {
2067 arg *= 10.0;
2068 arg = SDL_modf(arg, &integer_value);
2069 SDL_assert(length < sizeof(num));
2070 num[length++] = '0' + (char)integer_value;
2071 --precision;
2072 }
2073 if (precision == 1) {
2074 arg *= 10.0;
2075 integer_value = SDL_round(arg);
2076 if (integer_value == 10.0) {
2077 // Carry the one...
2078 size_t i;
2079
2080 for (i = length; i--; ) {
2081 if (num[i] == decimal_separator) {
2082 continue;
2083 }
2084 if (num[i] == '9') {
2085 num[i] = '0';
2086 if (i == 0 || num[i - 1] == '-' || num[i - 1] == '+') {
2087 SDL_memmove(&num[i+1], &num[i], length - i);
2088 num[i] = '1';
2089 ++length;
2090 break;
2091 }
2092 } else {
2093 ++num[i];
2094 break;
2095 }
2096 }
2097 SDL_assert(length < sizeof(num));
2098 num[length++] = '0';
2099 } else {
2100 SDL_assert(length < sizeof(num));
2101 num[length++] = '0' + (char)integer_value;
2102 }
2103 }
2104
2105 if (g) {
2106 // Trim trailing zeroes and decimal separator
2107 size_t i;
2108
2109 for (i = length - 1; num[i] != decimal_separator; --i) {
2110 if (num[i] == '0') {
2111 --length;
2112 } else {
2113 break;
2114 }
2115 }
2116 if (num[i] == decimal_separator) {
2117 --length;
2118 }
2119 }
2120 }
2121 num[length] = '\0';
2122
2123 info->precision = -1;
2124 length = SDL_PrintString(text, maxlen, info, num);
2125
2126 if (info->width > 0 && (size_t)info->width > length) {
2127 const char fill = info->pad_zeroes ? '0' : ' ';
2128 size_t width = info->width - length;
2129 size_t filllen, movelen;
2130
2131 filllen = SDL_min(width, maxlen);
2132 movelen = SDL_min(length, (maxlen - filllen));
2133 SDL_memmove(&text[filllen], text, movelen);
2134 SDL_memset(text, fill, filllen);
2135 length += width;
2136 }
2137 return length;
2138}
2139
2140static size_t SDL_PrintPointer(char *text, size_t maxlen, SDL_FormatInfo *info, const void *value)
2141{
2142 char num[130];
2143 size_t length;
2144
2145 if (!value) {
2146 return SDL_PrintString(text, maxlen, info, NULL);
2147 }
2148
2149 SDL_ulltoa((unsigned long long)(uintptr_t)value, num, 16);
2150 length = SDL_PrintString(text, maxlen, info, "0x");
2151 return length + SDL_PrintString(TEXT_AND_LEN_ARGS, info, num);
2152}
2153
2154// NOLINTNEXTLINE(readability-non-const-parameter)
2155int SDL_vsnprintf(SDL_OUT_Z_CAP(maxlen) char *text, size_t maxlen, SDL_PRINTF_FORMAT_STRING const char *fmt, va_list ap)
2156{
2157 size_t length = 0;
2158
2159 if (!text) {
2160 maxlen = 0;
2161 }
2162 if (!fmt) {
2163 fmt = "";
2164 }
2165 while (*fmt) {
2166 if (*fmt == '%') {
2167 bool done = false;
2168 bool check_flag;
2169 SDL_FormatInfo info;
2170 enum
2171 {
2172 DO_INT,
2173 DO_LONG,
2174 DO_LONGLONG,
2175 DO_SIZE_T
2176 } inttype = DO_INT;
2177
2178 SDL_zero(info);
2179 info.radix = 10;
2180 info.precision = -1;
2181
2182 check_flag = true;
2183 while (check_flag) {
2184 ++fmt;
2185 switch (*fmt) {
2186 case '-':
2187 info.left_justify = true;
2188 break;
2189 case '+':
2190 info.force_sign = true;
2191 break;
2192 case '#':
2193 info.force_type = true;
2194 break;
2195 case '0':
2196 info.pad_zeroes = true;
2197 break;
2198 default:
2199 check_flag = false;
2200 break;
2201 }
2202 }
2203
2204 if (*fmt >= '0' && *fmt <= '9') {
2205 info.width = SDL_strtol(fmt, (char **)&fmt, 0);
2206 } else if (*fmt == '*') {
2207 ++fmt;
2208 info.width = va_arg(ap, int);
2209 }
2210
2211 if (*fmt == '.') {
2212 ++fmt;
2213 if (*fmt >= '0' && *fmt <= '9') {
2214 info.precision = SDL_strtol(fmt, (char **)&fmt, 0);
2215 } else if (*fmt == '*') {
2216 ++fmt;
2217 info.precision = va_arg(ap, int);
2218 } else {
2219 info.precision = 0;
2220 }
2221 if (info.precision < 0) {
2222 info.precision = 0;
2223 }
2224 }
2225
2226 while (!done) {
2227 switch (*fmt) {
2228 case '%':
2229 if (length < maxlen) {
2230 text[length] = '%';
2231 }
2232 ++length;
2233 done = true;
2234 break;
2235 case 'c':
2236 // char is promoted to int when passed through (...)
2237 if (length < maxlen) {
2238 text[length] = (char)va_arg(ap, int);
2239 }
2240 ++length;
2241 done = true;
2242 break;
2243 case 'h':
2244 // short is promoted to int when passed through (...)
2245 break;
2246 case 'l':
2247 if (inttype < DO_LONGLONG) {
2248 ++inttype;
2249 }
2250 break;
2251 case 'I':
2252 if (SDL_strncmp(fmt, "I64", 3) == 0) {
2253 fmt += 2;
2254 inttype = DO_LONGLONG;
2255 }
2256 break;
2257 case 'z':
2258 inttype = DO_SIZE_T;
2259 break;
2260 case 'i':
2261 case 'd':
2262 if (info.precision >= 0) {
2263 info.pad_zeroes = false;
2264 }
2265 switch (inttype) {
2266 case DO_INT:
2267 length += SDL_PrintLong(TEXT_AND_LEN_ARGS, &info,
2268 (long)va_arg(ap, int));
2269 break;
2270 case DO_LONG:
2271 length += SDL_PrintLong(TEXT_AND_LEN_ARGS, &info,
2272 va_arg(ap, long));
2273 break;
2274 case DO_LONGLONG:
2275 length += SDL_PrintLongLong(TEXT_AND_LEN_ARGS, &info,
2276 va_arg(ap, long long));
2277 break;
2278 case DO_SIZE_T:
2279 length += SDL_PrintLongLong(TEXT_AND_LEN_ARGS, &info,
2280 va_arg(ap, size_t));
2281 break;
2282 }
2283 done = true;
2284 break;
2285 case 'p':
2286 info.force_case = SDL_CASE_LOWER;
2287 length += SDL_PrintPointer(TEXT_AND_LEN_ARGS, &info, va_arg(ap, void *));
2288 done = true;
2289 break;
2290 case 'x':
2291 info.force_case = SDL_CASE_LOWER;
2292 SDL_FALLTHROUGH;
2293 case 'X':
2294 if (info.force_case == SDL_CASE_NOCHANGE) {
2295 info.force_case = SDL_CASE_UPPER;
2296 }
2297 if (info.radix == 10) {
2298 info.radix = 16;
2299 }
2300 SDL_FALLTHROUGH;
2301 case 'o':
2302 if (info.radix == 10) {
2303 info.radix = 8;
2304 }
2305 SDL_FALLTHROUGH;
2306 case 'u':
2307 info.force_sign = false;
2308 if (info.precision >= 0) {
2309 info.pad_zeroes = false;
2310 }
2311 switch (inttype) {
2312 case DO_INT:
2313 length += SDL_PrintUnsignedLong(TEXT_AND_LEN_ARGS, &info,
2314 va_arg(ap, unsigned int));
2315 break;
2316 case DO_LONG:
2317 length += SDL_PrintUnsignedLong(TEXT_AND_LEN_ARGS, &info,
2318 va_arg(ap, unsigned long));
2319 break;
2320 case DO_LONGLONG:
2321 length += SDL_PrintUnsignedLongLong(TEXT_AND_LEN_ARGS, &info,
2322 va_arg(ap, unsigned long long));
2323 break;
2324 case DO_SIZE_T:
2325 length += SDL_PrintUnsignedLongLong(TEXT_AND_LEN_ARGS, &info,
2326 va_arg(ap, size_t));
2327 break;
2328 }
2329 done = true;
2330 break;
2331 case 'f':
2332 length += SDL_PrintFloat(TEXT_AND_LEN_ARGS, &info, va_arg(ap, double), false);
2333 done = true;
2334 break;
2335 case 'g':
2336 length += SDL_PrintFloat(TEXT_AND_LEN_ARGS, &info, va_arg(ap, double), true);
2337 done = true;
2338 break;
2339 case 'S':
2340 info.pad_zeroes = false;
2341 length += SDL_PrintStringW(TEXT_AND_LEN_ARGS, &info, va_arg(ap, wchar_t *));
2342 done = true;
2343 break;
2344 case 's':
2345 info.pad_zeroes = false;
2346 if (inttype > DO_INT) {
2347 length += SDL_PrintStringW(TEXT_AND_LEN_ARGS, &info, va_arg(ap, wchar_t *));
2348 } else {
2349 length += SDL_PrintString(TEXT_AND_LEN_ARGS, &info, va_arg(ap, char *));
2350 }
2351 done = true;
2352 break;
2353 default:
2354 done = true;
2355 break;
2356 }
2357 ++fmt;
2358 }
2359 } else {
2360 if (length < maxlen) {
2361 text[length] = *fmt;
2362 }
2363 ++fmt;
2364 ++length;
2365 }
2366 }
2367 if (length < maxlen) {
2368 text[length] = '\0';
2369 } else if (maxlen > 0) {
2370 text[maxlen - 1] = '\0';
2371 }
2372 return (int)length;
2373}
2374
2375#undef TEXT_AND_LEN_ARGS
2376#endif // HAVE_VSNPRINTF
2377
2378int SDL_vswprintf(SDL_OUT_Z_CAP(maxlen) wchar_t *text, size_t maxlen, const wchar_t *fmt, va_list ap)
2379{
2380 char *fmt_utf8 = NULL;
2381 if (fmt) {
2382 fmt_utf8 = SDL_iconv_string("UTF-8", "WCHAR_T", (const char *)fmt, (SDL_wcslen(fmt) + 1) * sizeof(wchar_t));
2383 if (!fmt_utf8) {
2384 return -1;
2385 }
2386 }
2387
2388 char tinybuf[64]; // for really small strings, calculate it once.
2389
2390 // generate the text to find the final text length
2391 va_list aq;
2392 va_copy(aq, ap);
2393 const int utf8len = SDL_vsnprintf(tinybuf, sizeof (tinybuf), fmt_utf8, aq);
2394 va_end(aq);
2395
2396 if (utf8len < 0) {
2397 SDL_free(fmt_utf8);
2398 return -1;
2399 }
2400
2401 bool isstack = false;
2402 char *smallbuf = NULL;
2403 char *utf8buf;
2404 int result;
2405
2406 if (utf8len < sizeof (tinybuf)) { // whole thing fit in the stack buffer, just use that copy.
2407 utf8buf = tinybuf;
2408 } else { // didn't fit in the stack buffer, allocate the needed space and run it again.
2409 utf8buf = smallbuf = SDL_small_alloc(char, utf8len + 1, &isstack);
2410 if (!smallbuf) {
2411 SDL_free(fmt_utf8);
2412 return -1; // oh well.
2413 }
2414 const int utf8len2 = SDL_vsnprintf(smallbuf, utf8len + 1, fmt_utf8, ap);
2415 if (utf8len2 > utf8len) {
2416 SDL_free(fmt_utf8);
2417 return SDL_SetError("Formatted output changed between two runs"); // race condition on the parameters, and we no longer have room...yikes.
2418 }
2419 }
2420
2421 SDL_free(fmt_utf8);
2422
2423 wchar_t *wbuf = (wchar_t *)SDL_iconv_string("WCHAR_T", "UTF-8", utf8buf, utf8len + 1);
2424 if (wbuf) {
2425 if (text) {
2426 SDL_wcslcpy(text, wbuf, maxlen);
2427 }
2428 result = (int)SDL_wcslen(wbuf);
2429 SDL_free(wbuf);
2430 } else {
2431 result = -1;
2432 }
2433
2434 if (smallbuf != NULL) {
2435 SDL_small_free(smallbuf, isstack);
2436 }
2437
2438 return result;
2439}
2440
2441int SDL_asprintf(char **strp, SDL_PRINTF_FORMAT_STRING const char *fmt, ...)
2442{
2443 va_list ap;
2444 int result;
2445
2446 va_start(ap, fmt);
2447 result = SDL_vasprintf(strp, fmt, ap);
2448 va_end(ap);
2449
2450 return result;
2451}
2452
2453int SDL_vasprintf(char **strp, SDL_PRINTF_FORMAT_STRING const char *fmt, va_list ap)
2454{
2455 int result;
2456 int size = 100; // Guess we need no more than 100 bytes
2457 char *p, *np;
2458 va_list aq;
2459
2460 *strp = NULL;
2461
2462 p = (char *)SDL_malloc(size);
2463 if (!p) {
2464 return -1;
2465 }
2466
2467 while (1) {
2468 // Try to print in the allocated space
2469 va_copy(aq, ap);
2470 result = SDL_vsnprintf(p, size, fmt, aq);
2471 va_end(aq);
2472
2473 // Check error code
2474 if (result < 0) {
2475 SDL_free(p);
2476 return result;
2477 }
2478
2479 // If that worked, return the string
2480 if (result < size) {
2481 *strp = p;
2482 return result;
2483 }
2484
2485 // Else try again with more space
2486 size = result + 1; // Precisely what is needed
2487
2488 np = (char *)SDL_realloc(p, size);
2489 if (!np) {
2490 SDL_free(p);
2491 return -1;
2492 } else {
2493 p = np;
2494 }
2495 }
2496}
2497
2498char * SDL_strpbrk(const char *str, const char *breakset)
2499{
2500#ifdef HAVE_STRPBRK
2501 return strpbrk(str, breakset);
2502#else
2503
2504 for (; *str; str++) {
2505 const char *b;
2506
2507 for (b = breakset; *b; b++) {
2508 if (*str == *b) {
2509 return (char *) str;
2510 }
2511 }
2512 }
2513 return NULL;
2514#endif
2515}
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_strtokr.c b/contrib/SDL-3.2.8/src/stdlib/SDL_strtokr.c
new file mode 100644
index 0000000..e600808
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_strtokr.c
@@ -0,0 +1,95 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23
24char *SDL_strtok_r(char *s1, const char *s2, char **ptr)
25{
26#ifdef HAVE_STRTOK_R
27 return strtok_r(s1, s2, ptr);
28
29#else /* SDL implementation */
30/*
31 * Adapted from _PDCLIB_strtok() of PDClib library at
32 * https://github.com/DevSolar/pdclib.git
33 *
34 * The code was under CC0 license:
35 * https://creativecommons.org/publicdomain/zero/1.0/legalcode :
36 *
37 * No Copyright
38 *
39 * The person who associated a work with this deed has dedicated the
40 * work to the public domain by waiving all of his or her rights to
41 * the work worldwide under copyright law, including all related and
42 * neighboring rights, to the extent allowed by law.
43 *
44 * You can copy, modify, distribute and perform the work, even for
45 * commercial purposes, all without asking permission. See Other
46 * Information below.
47 */
48 const char *p = s2;
49
50 if (!s2 || !ptr || (!s1 && !*ptr)) return NULL;
51
52 if (s1 != NULL) { /* new string */
53 *ptr = s1;
54 } else { /* old string continued */
55 if (*ptr == NULL) {
56 /* No old string, no new string, nothing to do */
57 return NULL;
58 }
59 s1 = *ptr;
60 }
61
62 /* skip leading s2 characters */
63 while (*p && *s1) {
64 if (*s1 == *p) {
65 /* found separator; skip and start over */
66 ++s1;
67 p = s2;
68 continue;
69 }
70 ++p;
71 }
72
73 if (! *s1) { /* no more to parse */
74 *ptr = s1;
75 return NULL;
76 }
77
78 /* skipping non-s2 characters */
79 *ptr = s1;
80 while (**ptr) {
81 p = s2;
82 while (*p) {
83 if (**ptr == *p++) {
84 /* found separator; overwrite with '\0', position *ptr, return */
85 *((*ptr)++) = '\0';
86 return s1;
87 }
88 }
89 ++(*ptr);
90 }
91
92 /* parsed to end of string */
93 return s1;
94#endif
95}
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_sysstdlib.h b/contrib/SDL-3.2.8/src/stdlib/SDL_sysstdlib.h
new file mode 100644
index 0000000..7d43ab0
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_sysstdlib.h
@@ -0,0 +1,32 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21
22#ifndef SDL_sysstdlib_h_
23#define SDL_sysstdlib_h_
24
25// most things you might need internally in here are public APIs, this is
26// just a few special pieces right now.
27
28// this expects `from` to be a Unicode codepoint, and `to` to point to AT LEAST THREE Uint32s.
29int SDL_CaseFoldUnicode(Uint32 from, Uint32 *to);
30
31#endif
32
diff --git a/contrib/SDL-3.2.8/src/stdlib/SDL_vacopy.h b/contrib/SDL-3.2.8/src/stdlib/SDL_vacopy.h
new file mode 100644
index 0000000..fee560e
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/stdlib/SDL_vacopy.h
@@ -0,0 +1,30 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21
22// Do our best to make sure va_copy is working
23#if defined(_MSC_VER) && _MSC_VER <= 1800
24// Visual Studio 2013 tries to link with _vacopy in the C runtime. Newer versions do an inline assignment
25#undef va_copy
26#define va_copy(dst, src) dst = src
27
28#elif defined(__GNUC__) && (__GNUC__ < 3)
29#define va_copy(dst, src) __va_copy(dst, src)
30#endif