From f5c4671bfbad96bf346bd7e9a21fc4317b4959df Mon Sep 17 00:00:00 2001 From: Indrajith K L Date: Sat, 3 Dec 2022 17:00:20 +0530 Subject: Adds most of the tools --- v_windows/v/old/thirdparty/.gitignore | 11 + v_windows/v/old/thirdparty/bignum/README.md | 2 + v_windows/v/old/thirdparty/bignum/bn.c | 664 + v_windows/v/old/thirdparty/bignum/bn.h | 123 + v_windows/v/old/thirdparty/cJSON/cJSON.c | 2973 ++ v_windows/v/old/thirdparty/cJSON/cJSON.h | 285 + v_windows/v/old/thirdparty/cJSON/readme.txt | 7 + v_windows/v/old/thirdparty/fontstash/fontstash.h | 1742 ++ .../v/old/thirdparty/fontstash/stb_truetype.h | 5011 +++ v_windows/v/old/thirdparty/ios/ios.m | 8 + v_windows/v/old/thirdparty/libgc/amalgamation.txt | 7 + v_windows/v/old/thirdparty/libgc/gc.c | 30266 +++++++++++++++++++ v_windows/v/old/thirdparty/libgc/gc.h | 1081 + .../v/old/thirdparty/mssql/include/.gitignore | 4 + v_windows/v/old/thirdparty/mssql/include/mssql.h | 20 + v_windows/v/old/thirdparty/picoev/picoev.c | 9 + v_windows/v/old/thirdparty/picoev/src/README.md | 21 + v_windows/v/old/thirdparty/picoev/src/picoev.h | 404 + .../v/old/thirdparty/picoev/src/picoev_epoll.c | 168 + .../v/old/thirdparty/picoev/src/picoev_kqueue.c | 209 + .../v/old/thirdparty/picoev/src/picoev_select.c | 169 + v_windows/v/old/thirdparty/picoev/src/picoev_w32.h | 15 + .../old/thirdparty/picohttpparser/picohttpparser.c | 28 + .../old/thirdparty/picohttpparser/picohttpparser.h | 112 + .../v/old/thirdparty/picohttpparser/src/README.md | 116 + .../thirdparty/picohttpparser/src/picohttpparser.c | 645 + .../thirdparty/picohttpparser/src/picohttpparser.h | 87 + v_windows/v/old/thirdparty/sokol/sokol_app.h | 10475 +++++++ v_windows/v/old/thirdparty/sokol/sokol_app2.h | 40 + v_windows/v/old/thirdparty/sokol/sokol_audio.h | 2147 ++ v_windows/v/old/thirdparty/sokol/sokol_gfx.h | 15672 ++++++++++ v_windows/v/old/thirdparty/sokol/sokol_v.h | 20 + .../v/old/thirdparty/sokol/util/sokol_fontstash.h | 1785 ++ v_windows/v/old/thirdparty/sokol/util/sokol_gl.h | 3297 ++ v_windows/v/old/thirdparty/stb_image/stb_image.h | 7766 +++++ v_windows/v/old/thirdparty/stb_image/stbi.c | 3 + v_windows/v/old/thirdparty/stdatomic/nix/atomic.h | 435 + .../v/old/thirdparty/stdatomic/nix/atomic_cpp.h | 541 + v_windows/v/old/thirdparty/stdatomic/win/atomic.h | 366 + v_windows/v/old/thirdparty/vschannel/vschannel.c | 1095 + v_windows/v/old/thirdparty/vschannel/vschannel.h | 47 + v_windows/v/old/thirdparty/zip/miniz.h | 6859 +++++ v_windows/v/old/thirdparty/zip/zip.c | 1034 + v_windows/v/old/thirdparty/zip/zip.h | 345 + 44 files changed, 96114 insertions(+) create mode 100644 v_windows/v/old/thirdparty/.gitignore create mode 100644 v_windows/v/old/thirdparty/bignum/README.md create mode 100644 v_windows/v/old/thirdparty/bignum/bn.c create mode 100644 v_windows/v/old/thirdparty/bignum/bn.h create mode 100644 v_windows/v/old/thirdparty/cJSON/cJSON.c create mode 100644 v_windows/v/old/thirdparty/cJSON/cJSON.h create mode 100644 v_windows/v/old/thirdparty/cJSON/readme.txt create mode 100644 v_windows/v/old/thirdparty/fontstash/fontstash.h create mode 100644 v_windows/v/old/thirdparty/fontstash/stb_truetype.h create mode 100644 v_windows/v/old/thirdparty/ios/ios.m create mode 100644 v_windows/v/old/thirdparty/libgc/amalgamation.txt create mode 100644 v_windows/v/old/thirdparty/libgc/gc.c create mode 100644 v_windows/v/old/thirdparty/libgc/gc.h create mode 100644 v_windows/v/old/thirdparty/mssql/include/.gitignore create mode 100644 v_windows/v/old/thirdparty/mssql/include/mssql.h create mode 100644 v_windows/v/old/thirdparty/picoev/picoev.c create mode 100644 v_windows/v/old/thirdparty/picoev/src/README.md create mode 100644 v_windows/v/old/thirdparty/picoev/src/picoev.h create mode 100644 v_windows/v/old/thirdparty/picoev/src/picoev_epoll.c create mode 100644 v_windows/v/old/thirdparty/picoev/src/picoev_kqueue.c create mode 100644 v_windows/v/old/thirdparty/picoev/src/picoev_select.c create mode 100644 v_windows/v/old/thirdparty/picoev/src/picoev_w32.h create mode 100644 v_windows/v/old/thirdparty/picohttpparser/picohttpparser.c create mode 100644 v_windows/v/old/thirdparty/picohttpparser/picohttpparser.h create mode 100644 v_windows/v/old/thirdparty/picohttpparser/src/README.md create mode 100644 v_windows/v/old/thirdparty/picohttpparser/src/picohttpparser.c create mode 100644 v_windows/v/old/thirdparty/picohttpparser/src/picohttpparser.h create mode 100644 v_windows/v/old/thirdparty/sokol/sokol_app.h create mode 100644 v_windows/v/old/thirdparty/sokol/sokol_app2.h create mode 100644 v_windows/v/old/thirdparty/sokol/sokol_audio.h create mode 100644 v_windows/v/old/thirdparty/sokol/sokol_gfx.h create mode 100644 v_windows/v/old/thirdparty/sokol/sokol_v.h create mode 100644 v_windows/v/old/thirdparty/sokol/util/sokol_fontstash.h create mode 100644 v_windows/v/old/thirdparty/sokol/util/sokol_gl.h create mode 100644 v_windows/v/old/thirdparty/stb_image/stb_image.h create mode 100644 v_windows/v/old/thirdparty/stb_image/stbi.c create mode 100644 v_windows/v/old/thirdparty/stdatomic/nix/atomic.h create mode 100644 v_windows/v/old/thirdparty/stdatomic/nix/atomic_cpp.h create mode 100644 v_windows/v/old/thirdparty/stdatomic/win/atomic.h create mode 100644 v_windows/v/old/thirdparty/vschannel/vschannel.c create mode 100644 v_windows/v/old/thirdparty/vschannel/vschannel.h create mode 100644 v_windows/v/old/thirdparty/zip/miniz.h create mode 100644 v_windows/v/old/thirdparty/zip/zip.c create mode 100644 v_windows/v/old/thirdparty/zip/zip.h (limited to 'v_windows/v/old/thirdparty') diff --git a/v_windows/v/old/thirdparty/.gitignore b/v_windows/v/old/thirdparty/.gitignore new file mode 100644 index 0000000..c964467 --- /dev/null +++ b/v_windows/v/old/thirdparty/.gitignore @@ -0,0 +1,11 @@ +!glfw/glfw3.dll +!glfw/msvc/glfw3.lib + +freetype/ +sdl2/ +SDL2_image/ +SDL2_mixer/ +SDL2_ttf/ +pg/ +tcc/ +sqlite/ diff --git a/v_windows/v/old/thirdparty/bignum/README.md b/v_windows/v/old/thirdparty/bignum/README.md new file mode 100644 index 0000000..04eb555 --- /dev/null +++ b/v_windows/v/old/thirdparty/bignum/README.md @@ -0,0 +1,2 @@ +This folder contains bn.h and bn.c files +from https://github.com/kokke/tiny-bignum-c diff --git a/v_windows/v/old/thirdparty/bignum/bn.c b/v_windows/v/old/thirdparty/bignum/bn.c new file mode 100644 index 0000000..7ec0895 --- /dev/null +++ b/v_windows/v/old/thirdparty/bignum/bn.c @@ -0,0 +1,664 @@ +/* + +Big number library - arithmetic on multiple-precision unsigned integers. + +This library is an implementation of arithmetic on arbitrarily large integers. + +The difference between this and other implementations, is that the data structure +has optimal memory utilization (i.e. a 1024 bit integer takes up 128 bytes RAM), +and all memory is allocated statically: no dynamic allocation for better or worse. + +Primary goals are correctness, clarity of code and clean, portable implementation. +Secondary goal is a memory footprint small enough to make it suitable for use in +embedded applications. + + +The current state is correct functionality and adequate performance. +There may well be room for performance-optimizations and improvements. + +*/ + +#include +#include +#include +#include "bn.h" + + + +/* Functions for shifting number in-place. */ +static void _lshift_one_bit(struct bn* a); +static void _rshift_one_bit(struct bn* a); +static void _lshift_word(struct bn* a, int nwords); +static void _rshift_word(struct bn* a, int nwords); + + + +/* Public / Exported functions. */ +void bignum_init(struct bn* n) +{ + require(n, "n is null"); + + int i; + for (i = 0; i < BN_ARRAY_SIZE; ++i) + { + n->array[i] = 0; + } +} + + +void bignum_from_int(struct bn* n, DTYPE_TMP i) +{ + require(n, "n is null"); + + bignum_init(n); + + /* Endianness issue if machine is not little-endian? */ +#ifdef WORD_SIZE + #if (WORD_SIZE == 1) + n->array[0] = (i & 0x000000ff); + n->array[1] = (i & 0x0000ff00) >> 8; + n->array[2] = (i & 0x00ff0000) >> 16; + n->array[3] = (i & 0xff000000) >> 24; + #elif (WORD_SIZE == 2) + n->array[0] = (i & 0x0000ffff); + n->array[1] = (i & 0xffff0000) >> 16; + #elif (WORD_SIZE == 4) + n->array[0] = i; + DTYPE_TMP num_32 = 32; + DTYPE_TMP tmp = i >> num_32; /* bit-shift with U64 operands to force 64-bit results */ + n->array[1] = tmp; + #endif +#endif +} + + +int bignum_to_int(struct bn* n) +{ + require(n, "n is null"); + + int ret = 0; + + /* Endianness issue if machine is not little-endian? */ +#if (WORD_SIZE == 1) + ret += n->array[0]; + ret += n->array[1] << 8; + ret += n->array[2] << 16; + ret += n->array[3] << 24; +#elif (WORD_SIZE == 2) + ret += n->array[0]; + ret += n->array[1] << 16; +#elif (WORD_SIZE == 4) + ret += n->array[0]; +#endif + + return ret; +} + + +void bignum_from_string(struct bn* n, char* str, int nbytes) +{ + require(n, "n is null"); + require(str, "str is null"); + require(nbytes > 0, "nbytes must be positive"); + require((nbytes & 1) == 0, "string format must be in hex -> equal number of bytes"); + + bignum_init(n); + + DTYPE tmp; /* DTYPE is defined in bn.h - uint{8,16,32,64}_t */ + int i = nbytes - (2 * WORD_SIZE); /* index into string */ + int j = 0; /* index into array */ + + /* reading last hex-byte "MSB" from string first -> big endian */ + /* MSB ~= most significant byte / block ? :) */ + while (i >= 0) + { + tmp = 0; + sscanf(&str[i], SSCANF_FORMAT_STR, &tmp); + n->array[j] = tmp; + i -= (2 * WORD_SIZE); /* step WORD_SIZE hex-byte(s) back in the string. */ + j += 1; /* step one element forward in the array. */ + } +} + + +void bignum_to_string(struct bn* n, char* str, int nbytes) +{ + require(n, "n is null"); + require(str, "str is null"); + require(nbytes > 0, "nbytes must be positive"); + require((nbytes & 1) == 0, "string format must be in hex -> equal number of bytes"); + + int j = BN_ARRAY_SIZE - 1; /* index into array - reading "MSB" first -> big-endian */ + int i = 0; /* index into string representation. */ + + /* reading last array-element "MSB" first -> big endian */ + while ((j >= 0) && (nbytes > (i + 1))) + { + sprintf(&str[i], SPRINTF_FORMAT_STR, n->array[j]); + i += (2 * WORD_SIZE); /* step WORD_SIZE hex-byte(s) forward in the string. */ + j -= 1; /* step one element back in the array. */ + } + + /* Count leading zeros: */ + j = 0; + while (str[j] == '0') + { + j += 1; + } + + /* Move string j places ahead, effectively skipping leading zeros */ + for (i = 0; i < (nbytes - j); ++i) + { + str[i] = str[i + j]; + } + + /* Zero-terminate string */ + str[i] = 0; +} + + +void bignum_dec(struct bn* n) +{ + require(n, "n is null"); + + DTYPE tmp; /* copy of n */ + DTYPE res; + + int i; + for (i = 0; i < BN_ARRAY_SIZE; ++i) + { + tmp = n->array[i]; + res = tmp - 1; + n->array[i] = res; + + if (!(res > tmp)) + { + break; + } + } +} + + +void bignum_inc(struct bn* n) +{ + require(n, "n is null"); + + DTYPE res; + DTYPE_TMP tmp; /* copy of n */ + + int i; + for (i = 0; i < BN_ARRAY_SIZE; ++i) + { + tmp = n->array[i]; + res = tmp + 1; + n->array[i] = res; + + if (res > tmp) + { + break; + } + } +} + + +void bignum_add(struct bn* a, struct bn* b, struct bn* c) +{ + require(a, "a is null"); + require(b, "b is null"); + require(c, "c is null"); + + DTYPE_TMP tmp; + int carry = 0; + int i; + for (i = 0; i < BN_ARRAY_SIZE; ++i) + { + tmp = (DTYPE_TMP)a->array[i] + b->array[i] + carry; + carry = (tmp > MAX_VAL); + c->array[i] = (tmp & MAX_VAL); + } +} + + +void bignum_sub(struct bn* a, struct bn* b, struct bn* c) +{ + require(a, "a is null"); + require(b, "b is null"); + require(c, "c is null"); + + DTYPE_TMP res; + DTYPE_TMP tmp1; + DTYPE_TMP tmp2; + int borrow = 0; + int i; + for (i = 0; i < BN_ARRAY_SIZE; ++i) + { + tmp1 = (DTYPE_TMP)a->array[i] + (MAX_VAL + 1); /* + number_base */ + tmp2 = (DTYPE_TMP)b->array[i] + borrow;; + res = (tmp1 - tmp2); + c->array[i] = (DTYPE)(res & MAX_VAL); /* "modulo number_base" == "% (number_base - 1)" if number_base is 2^N */ + borrow = (res <= MAX_VAL); + } +} + + +void bignum_mul(struct bn* a, struct bn* b, struct bn* c) +{ + require(a, "a is null"); + require(b, "b is null"); + require(c, "c is null"); + + struct bn row; + struct bn tmp; + int i, j; + + bignum_init(c); + + for (i = 0; i < BN_ARRAY_SIZE; ++i) + { + bignum_init(&row); + + for (j = 0; j < BN_ARRAY_SIZE; ++j) + { + if (i + j < BN_ARRAY_SIZE) + { + bignum_init(&tmp); + DTYPE_TMP intermediate = ((DTYPE_TMP)a->array[i] * (DTYPE_TMP)b->array[j]); + bignum_from_int(&tmp, intermediate); + _lshift_word(&tmp, i + j); + bignum_add(&tmp, &row, &row); + } + } + bignum_add(c, &row, c); + } +} + + +void bignum_div(struct bn* a, struct bn* b, struct bn* c) +{ + require(a, "a is null"); + require(b, "b is null"); + require(c, "c is null"); + + struct bn current; + struct bn denom; + struct bn tmp; + + bignum_from_int(¤t, 1); // int current = 1; + bignum_assign(&denom, b); // denom = b + bignum_assign(&tmp, a); // tmp = a + + const DTYPE_TMP half_max = 1 + (DTYPE_TMP)(MAX_VAL / 2); + bool overflow = false; + while (bignum_cmp(&denom, a) != LARGER) // while (denom <= a) { + { + if (denom.array[BN_ARRAY_SIZE - 1] >= half_max) + { + overflow = true; + break; + } + _lshift_one_bit(¤t); // current <<= 1; + _lshift_one_bit(&denom); // denom <<= 1; + } + if (!overflow) + { + _rshift_one_bit(&denom); // denom >>= 1; + _rshift_one_bit(¤t); // current >>= 1; + } + bignum_init(c); // int answer = 0; + + while (!bignum_is_zero(¤t)) // while (current != 0) + { + if (bignum_cmp(&tmp, &denom) != SMALLER) // if (dividend >= denom) + { + bignum_sub(&tmp, &denom, &tmp); // dividend -= denom; + bignum_or(c, ¤t, c); // answer |= current; + } + _rshift_one_bit(¤t); // current >>= 1; + _rshift_one_bit(&denom); // denom >>= 1; + } // return answer; +} + + +void bignum_lshift(struct bn* a, struct bn* b, int nbits) +{ + require(a, "a is null"); + require(b, "b is null"); + require(nbits >= 0, "no negative shifts"); + + bignum_assign(b, a); + /* Handle shift in multiples of word-size */ + const int nbits_pr_word = (WORD_SIZE * 8); + int nwords = nbits / nbits_pr_word; + if (nwords != 0) + { + _lshift_word(b, nwords); + nbits -= (nwords * nbits_pr_word); + } + + if (nbits != 0) + { + int i; + for (i = (BN_ARRAY_SIZE - 1); i > 0; --i) + { + b->array[i] = (b->array[i] << nbits) | (b->array[i - 1] >> ((8 * WORD_SIZE) - nbits)); + } + b->array[i] <<= nbits; + } +} + + +void bignum_rshift(struct bn* a, struct bn* b, int nbits) +{ + require(a, "a is null"); + require(b, "b is null"); + require(nbits >= 0, "no negative shifts"); + + bignum_assign(b, a); + /* Handle shift in multiples of word-size */ + const int nbits_pr_word = (WORD_SIZE * 8); + int nwords = nbits / nbits_pr_word; + if (nwords != 0) + { + _rshift_word(b, nwords); + nbits -= (nwords * nbits_pr_word); + } + + if (nbits != 0) + { + int i; + for (i = 0; i < (BN_ARRAY_SIZE - 1); ++i) + { + b->array[i] = (b->array[i] >> nbits) | (b->array[i + 1] << ((8 * WORD_SIZE) - nbits)); + } + b->array[i] >>= nbits; + } + +} + + +void bignum_mod(struct bn* a, struct bn* b, struct bn* c) +{ + /* + Take divmod and throw away div part + */ + require(a, "a is null"); + require(b, "b is null"); + require(c, "c is null"); + + struct bn tmp; + + bignum_divmod(a,b,&tmp,c); +} + +void bignum_divmod(struct bn* a, struct bn* b, struct bn* c, struct bn* d) +{ + /* + Puts a%b in d + and a/b in c + + mod(a,b) = a - ((a / b) * b) + + example: + mod(8, 3) = 8 - ((8 / 3) * 3) = 2 + */ + require(a, "a is null"); + require(b, "b is null"); + require(c, "c is null"); + + struct bn tmp; + + /* c = (a / b) */ + bignum_div(a, b, c); + + /* tmp = (c * b) */ + bignum_mul(c, b, &tmp); + + /* c = a - tmp */ + bignum_sub(a, &tmp, d); +} + + +void bignum_and(struct bn* a, struct bn* b, struct bn* c) +{ + require(a, "a is null"); + require(b, "b is null"); + require(c, "c is null"); + + int i; + for (i = 0; i < BN_ARRAY_SIZE; ++i) + { + c->array[i] = (a->array[i] & b->array[i]); + } +} + + +void bignum_or(struct bn* a, struct bn* b, struct bn* c) +{ + require(a, "a is null"); + require(b, "b is null"); + require(c, "c is null"); + + int i; + for (i = 0; i < BN_ARRAY_SIZE; ++i) + { + c->array[i] = (a->array[i] | b->array[i]); + } +} + + +void bignum_xor(struct bn* a, struct bn* b, struct bn* c) +{ + require(a, "a is null"); + require(b, "b is null"); + require(c, "c is null"); + + int i; + for (i = 0; i < BN_ARRAY_SIZE; ++i) + { + c->array[i] = (a->array[i] ^ b->array[i]); + } +} + + +int bignum_cmp(struct bn* a, struct bn* b) +{ + require(a, "a is null"); + require(b, "b is null"); + + int i = BN_ARRAY_SIZE; + do + { + i -= 1; /* Decrement first, to start with last array element */ + if (a->array[i] > b->array[i]) + { + return LARGER; + } + else if (a->array[i] < b->array[i]) + { + return SMALLER; + } + } + while (i != 0); + + return EQUAL; +} + + +int bignum_is_zero(struct bn* n) +{ + require(n, "n is null"); + + int i; + for (i = 0; i < BN_ARRAY_SIZE; ++i) + { + if (n->array[i]) + { + return 0; + } + } + + return 1; +} + + +void bignum_pow(struct bn* a, struct bn* b, struct bn* c) +{ + require(a, "a is null"); + require(b, "b is null"); + require(c, "c is null"); + + struct bn tmp; + + bignum_init(c); + + if (bignum_cmp(b, c) == EQUAL) + { + /* Return 1 when exponent is 0 -- n^0 = 1 */ + bignum_inc(c); + } + else + { + struct bn bcopy; + bignum_assign(&bcopy, b); + + /* Copy a -> tmp */ + bignum_assign(&tmp, a); + + bignum_dec(&bcopy); + + /* Begin summing products: */ + while (!bignum_is_zero(&bcopy)) + { + + /* c = tmp * tmp */ + bignum_mul(&tmp, a, c); + /* Decrement b by one */ + bignum_dec(&bcopy); + + bignum_assign(&tmp, c); + } + + /* c = tmp */ + bignum_assign(c, &tmp); + } +} + +void bignum_isqrt(struct bn *a, struct bn* b) +{ + require(a, "a is null"); + require(b, "b is null"); + + struct bn low, high, mid, tmp; + + bignum_init(&low); + bignum_assign(&high, a); + bignum_rshift(&high, &mid, 1); + bignum_inc(&mid); + + while (bignum_cmp(&high, &low) > 0) + { + bignum_mul(&mid, &mid, &tmp); + if (bignum_cmp(&tmp, a) > 0) + { + bignum_assign(&high, &mid); + bignum_dec(&high); + } + else + { + bignum_assign(&low, &mid); + } + bignum_sub(&high,&low,&mid); + _rshift_one_bit(&mid); + bignum_add(&low,&mid,&mid); + bignum_inc(&mid); + } + bignum_assign(b,&low); +} + + +void bignum_assign(struct bn* dst, struct bn* src) +{ + require(dst, "dst is null"); + require(src, "src is null"); + + int i; + for (i = 0; i < BN_ARRAY_SIZE; ++i) + { + dst->array[i] = src->array[i]; + } +} + + +/* Private / Static functions. */ +static void _rshift_word(struct bn* a, int nwords) +{ + /* Naive method: */ + require(a, "a is null"); + require(nwords >= 0, "no negative shifts"); + + int i; + if (nwords >= BN_ARRAY_SIZE) + { + for (i = 0; i < BN_ARRAY_SIZE; ++i) + { + a->array[i] = 0; + } + return; + } + + for (i = 0; i < BN_ARRAY_SIZE - nwords; ++i) + { + a->array[i] = a->array[i + nwords]; + } + for (; i < BN_ARRAY_SIZE; ++i) + { + a->array[i] = 0; + } +} + + +static void _lshift_word(struct bn* a, int nwords) +{ + require(a, "a is null"); + require(nwords >= 0, "no negative shifts"); + + int i; + /* Shift whole words */ + for (i = (BN_ARRAY_SIZE - 1); i >= nwords; --i) + { + a->array[i] = a->array[i - nwords]; + } + /* Zero pad shifted words. */ + for (; i >= 0; --i) + { + a->array[i] = 0; + } +} + + +static void _lshift_one_bit(struct bn* a) +{ + require(a, "a is null"); + + int i; + for (i = (BN_ARRAY_SIZE - 1); i > 0; --i) + { + a->array[i] = (a->array[i] << 1) | (a->array[i - 1] >> ((8 * WORD_SIZE) - 1)); + } + a->array[0] <<= 1; +} + + +static void _rshift_one_bit(struct bn* a) +{ + require(a, "a is null"); + + int i; + for (i = 0; i < (BN_ARRAY_SIZE - 1); ++i) + { + a->array[i] = (a->array[i] >> 1) | (a->array[i + 1] << ((8 * WORD_SIZE) - 1)); + } + a->array[BN_ARRAY_SIZE - 1] >>= 1; +} + + diff --git a/v_windows/v/old/thirdparty/bignum/bn.h b/v_windows/v/old/thirdparty/bignum/bn.h new file mode 100644 index 0000000..f82676f --- /dev/null +++ b/v_windows/v/old/thirdparty/bignum/bn.h @@ -0,0 +1,123 @@ +#ifndef __BIGNUM_H__ +#define __BIGNUM_H__ +/* + +Big number library - arithmetic on multiple-precision unsigned integers. + +This library is an implementation of arithmetic on arbitrarily large integers. + +The difference between this and other implementations, is that the data structure +has optimal memory utilization (i.e. a 1024 bit integer takes up 128 bytes RAM), +and all memory is allocated statically: no dynamic allocation for better or worse. + +Primary goals are correctness, clarity of code and clean, portable implementation. +Secondary goal is a memory footprint small enough to make it suitable for use in +embedded applications. + + +The current state is correct functionality and adequate performance. +There may well be room for performance-optimizations and improvements. + +*/ + +#include +#include + + +/* This macro defines the word size in bytes of the array that constitues the big-number data structure. */ +#ifndef WORD_SIZE + #define WORD_SIZE 4 +#endif + +/* Size of big-numbers in bytes */ +#define BN_ARRAY_SIZE (128 / WORD_SIZE) + + +/* Here comes the compile-time specialization for how large the underlying array size should be. */ +/* The choices are 1, 2 and 4 bytes in size with uint32, uint64 for WORD_SIZE==4, as temporary. */ +#ifndef WORD_SIZE + #error Must define WORD_SIZE to be 1, 2, 4 +#elif (WORD_SIZE == 1) + /* Data type of array in structure */ + #define DTYPE uint8_t + /* bitmask for getting MSB */ + #define DTYPE_MSB ((DTYPE_TMP)(0x80)) + /* Data-type larger than DTYPE, for holding intermediate results of calculations */ + #define DTYPE_TMP uint32_t + /* sprintf format string */ + #define SPRINTF_FORMAT_STR "%.02x" + #define SSCANF_FORMAT_STR "%2hhx" + /* Max value of integer type */ + #define MAX_VAL ((DTYPE_TMP)0xFF) +#elif (WORD_SIZE == 2) + #define DTYPE uint16_t + #define DTYPE_TMP uint32_t + #define DTYPE_MSB ((DTYPE_TMP)(0x8000)) + #define SPRINTF_FORMAT_STR "%.04x" + #define SSCANF_FORMAT_STR "%4hx" + #define MAX_VAL ((DTYPE_TMP)0xFFFF) +#elif (WORD_SIZE == 4) + #define DTYPE uint32_t + #define DTYPE_TMP uint64_t + #define DTYPE_MSB ((DTYPE_TMP)(0x80000000)) + #define SPRINTF_FORMAT_STR "%.08x" + #define SSCANF_FORMAT_STR "%8x" + #define MAX_VAL ((DTYPE_TMP)0xFFFFFFFF) +#endif +#ifndef DTYPE + #error DTYPE must be defined to uint8_t, uint16_t uint32_t or whatever +#endif + + +/* Custom assert macro - easy to disable */ +#define require(p, msg) assert(p && #msg) + + +/* Data-holding structure: array of DTYPEs */ +struct bn +{ + DTYPE array[BN_ARRAY_SIZE]; +}; + + + +/* Tokens returned by bignum_cmp() for value comparison */ +enum { SMALLER = -1, EQUAL = 0, LARGER = 1 }; + + + +/* Initialization functions: */ +void bignum_init(struct bn* n); +void bignum_from_int(struct bn* n, DTYPE_TMP i); +int bignum_to_int(struct bn* n); +void bignum_from_string(struct bn* n, char* str, int nbytes); +void bignum_to_string(struct bn* n, char* str, int maxsize); + +/* Basic arithmetic operations: */ +void bignum_add(struct bn* a, struct bn* b, struct bn* c); /* c = a + b */ +void bignum_sub(struct bn* a, struct bn* b, struct bn* c); /* c = a - b */ +void bignum_mul(struct bn* a, struct bn* b, struct bn* c); /* c = a * b */ +void bignum_div(struct bn* a, struct bn* b, struct bn* c); /* c = a / b */ +void bignum_mod(struct bn* a, struct bn* b, struct bn* c); /* c = a % b */ +void bignum_divmod(struct bn* a, struct bn* b, struct bn* c, struct bn* d); /* c = a/b, d = a%b */ + +/* Bitwise operations: */ +void bignum_and(struct bn* a, struct bn* b, struct bn* c); /* c = a & b */ +void bignum_or(struct bn* a, struct bn* b, struct bn* c); /* c = a | b */ +void bignum_xor(struct bn* a, struct bn* b, struct bn* c); /* c = a ^ b */ +void bignum_lshift(struct bn* a, struct bn* b, int nbits); /* b = a << nbits */ +void bignum_rshift(struct bn* a, struct bn* b, int nbits); /* b = a >> nbits */ + +/* Special operators and comparison */ +int bignum_cmp(struct bn* a, struct bn* b); /* Compare: returns LARGER, EQUAL or SMALLER */ +int bignum_is_zero(struct bn* n); /* For comparison with zero */ +void bignum_inc(struct bn* n); /* Increment: add one to n */ +void bignum_dec(struct bn* n); /* Decrement: subtract one from n */ +void bignum_pow(struct bn* a, struct bn* b, struct bn* c); /* Calculate a^b -- e.g. 2^10 => 1024 */ +void bignum_isqrt(struct bn* a, struct bn* b); /* Integer square root -- e.g. isqrt(5) => 2*/ +void bignum_assign(struct bn* dst, struct bn* src); /* Copy src into dst -- dst := src */ + + +#endif /* #ifndef __BIGNUM_H__ */ + + diff --git a/v_windows/v/old/thirdparty/cJSON/cJSON.c b/v_windows/v/old/thirdparty/cJSON/cJSON.c new file mode 100644 index 0000000..60b72c0 --- /dev/null +++ b/v_windows/v/old/thirdparty/cJSON/cJSON.c @@ -0,0 +1,2973 @@ +/* + Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. +*/ + +/* cJSON */ +/* JSON parser in C. */ + +/* disable warnings about old C89 functions in MSVC */ +#if !defined(_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) +#define _CRT_SECURE_NO_DEPRECATE +#endif + +#ifdef __GNUC__ +#pragma GCC visibility push(default) +#endif +#if defined(_MSC_VER) +#pragma warning (push) +/* disable warning about single line comments in system headers */ +#pragma warning (disable : 4001) +#endif + +#include +#include +#include +#include +#include +#include + +#ifdef ENABLE_LOCALES +#include +#endif + +#if defined(_MSC_VER) +#pragma warning (pop) +#endif +#ifdef __GNUC__ +#pragma GCC visibility pop +#endif + +#include "cJSON.h" + +/* define our own boolean type */ +#ifdef true +#undef true +#endif +#define true ((cJSON_bool)1) + +#ifdef false +#undef false +#endif +#define false ((cJSON_bool)0) + +typedef struct { + const unsigned char *json; + size_t position; +} error; +static error global_error = { NULL, 0 }; + +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) +{ + return (const char*) (global_error.json + global_error.position); +} + +CJSON_PUBLIC(char *) cJSON_GetStringValue(cJSON *item) { + if (!cJSON_IsString(item)) { + return NULL; + } + + return item->valuestring; +} + +/* This is a safeguard to prevent copy-pasters from using incompatible C and header files */ +#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || (CJSON_VERSION_PATCH != 12) + #error cJSON.h and cJSON.c have different versions. Make sure that both have the same. +#endif + +CJSON_PUBLIC(const char*) cJSON_Version(void) +{ + static char version[15]; + sprintf(version, "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH); + + return version; +} + +/* Case insensitive string comparison, doesn't consider two NULL pointers equal though */ +static int case_insensitive_strcmp(const unsigned char *string1, const unsigned char *string2) +{ + if ((string1 == NULL) || (string2 == NULL)) + { + return 1; + } + + if (string1 == string2) + { + return 0; + } + + for(; tolower(*string1) == tolower(*string2); (void)string1++, string2++) + { + if (*string1 == '\0') + { + return 0; + } + } + + return tolower(*string1) - tolower(*string2); +} + +typedef struct internal_hooks +{ + void *(CJSON_CDECL *allocate)(size_t size); + void (CJSON_CDECL *deallocate)(void *pointer); + void *(CJSON_CDECL *reallocate)(void *pointer, size_t size); +} internal_hooks; + +#if defined(_MSC_VER) +/* work around MSVC error C2322: '...' address of dillimport '...' is not static */ +static void * CJSON_CDECL internal_malloc(size_t size) +{ + return malloc(size); +} +static void CJSON_CDECL internal_free(void *pointer) +{ + free(pointer); +} +static void * CJSON_CDECL internal_realloc(void *pointer, size_t size) +{ + return realloc(pointer, size); +} +#else +#define internal_malloc malloc +#define internal_free free +#define internal_realloc realloc +#endif + +/* strlen of character literals resolved at compile time */ +#define static_strlen(string_literal) (sizeof(string_literal) - sizeof("")) + +static internal_hooks global_hooks = { internal_malloc, internal_free, internal_realloc }; + +static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks) +{ + size_t length = 0; + unsigned char *copy = NULL; + + if (string == NULL) + { + return NULL; + } + + length = strlen((const char*)string) + sizeof(""); + copy = (unsigned char*)hooks->allocate(length); + if (copy == NULL) + { + return NULL; + } + memcpy(copy, string, length); + + return copy; +} + +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks) +{ + if (hooks == NULL) + { + /* Reset hooks */ + global_hooks.allocate = malloc; + global_hooks.deallocate = free; + global_hooks.reallocate = realloc; + return; + } + + global_hooks.allocate = malloc; + if (hooks->malloc_fn != NULL) + { + global_hooks.allocate = hooks->malloc_fn; + } + + global_hooks.deallocate = free; + if (hooks->free_fn != NULL) + { + global_hooks.deallocate = hooks->free_fn; + } + + /* use realloc only if both free and malloc are used */ + global_hooks.reallocate = NULL; + if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free)) + { + global_hooks.reallocate = realloc; + } +} + +/* Internal constructor. */ +static cJSON *cJSON_New_Item(const internal_hooks * const hooks) +{ + cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON)); + if (node) + { + memset(node, '\0', sizeof(cJSON)); + } + + return node; +} + +/* Delete a cJSON structure. */ +CJSON_PUBLIC(void) cJSON_Delete(cJSON *item) +{ + cJSON *next = NULL; + while (item != NULL) + { + next = item->next; + if (!(item->type & cJSON_IsReference) && (item->child != NULL)) + { + cJSON_Delete(item->child); + } + if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL)) + { + global_hooks.deallocate(item->valuestring); + } + if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) + { + global_hooks.deallocate(item->string); + } + global_hooks.deallocate(item); + item = next; + } +} + +/* get the decimal point character of the current locale */ +static unsigned char get_decimal_point(void) +{ +#ifdef ENABLE_LOCALES + struct lconv *lconv = localeconv(); + return (unsigned char) lconv->decimal_point[0]; +#else + return '.'; +#endif +} + +typedef struct +{ + const unsigned char *content; + size_t length; + size_t offset; + size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */ + internal_hooks hooks; +} parse_buffer; + +/* check if the given size is left to read in a given parse buffer (starting with 1) */ +#define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) +/* check if the buffer can be accessed at the given index (starting with 0) */ +#define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) +#define cannot_access_at_index(buffer, index) (!can_access_at_index(buffer, index)) +/* get a pointer to the buffer at the position */ +#define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset) + +/* Parse the input text to generate a number, and populate the result into item. */ +static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer) +{ + double number = 0; + unsigned char *after_end = NULL; + unsigned char number_c_string[64]; + unsigned char decimal_point = get_decimal_point(); + size_t i = 0; + + if ((input_buffer == NULL) || (input_buffer->content == NULL)) + { + return false; + } + + /* copy the number into a temporary buffer and replace '.' with the decimal point + * of the current locale (for strtod) + * This also takes care of '\0' not necessarily being available for marking the end of the input */ + for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++) + { + switch (buffer_at_offset(input_buffer)[i]) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '+': + case '-': + case 'e': + case 'E': + number_c_string[i] = buffer_at_offset(input_buffer)[i]; + break; + + case '.': + number_c_string[i] = decimal_point; + break; + + default: + goto loop_end; + } + } +loop_end: + number_c_string[i] = '\0'; + + number = strtod((const char*)number_c_string, (char**)&after_end); + if (number_c_string == after_end) + { + return false; /* parse_error */ + } + + item->valuedouble = number; + + /* use saturation in case of overflow */ + if (number >= INT_MAX) + { + item->valueint = INT_MAX; + } + else if (number <= (double)INT_MIN) + { + item->valueint = INT_MIN; + } + else + { + item->valueint = (int)number; + } + + item->type = cJSON_Number; + + input_buffer->offset += (size_t)(after_end - number_c_string); + return true; +} + +/* don't ask me, but the original cJSON_SetNumberValue returns an integer or double */ +CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) +{ + if (number >= INT_MAX) + { + object->valueint = INT_MAX; + } + else if (number <= (double)INT_MIN) + { + object->valueint = INT_MIN; + } + else + { + object->valueint = (int)number; + } + + return object->valuedouble = number; +} + +typedef struct +{ + unsigned char *buffer; + size_t length; + size_t offset; + size_t depth; /* current nesting depth (for formatted printing) */ + cJSON_bool noalloc; + cJSON_bool format; /* is this print a formatted print */ + internal_hooks hooks; +} printbuffer; + +/* realloc printbuffer if necessary to have at least "needed" bytes more */ +static unsigned char* ensure(printbuffer * const p, size_t needed) +{ + unsigned char *newbuffer = NULL; + size_t newsize = 0; + + if ((p == NULL) || (p->buffer == NULL)) + { + return NULL; + } + + if ((p->length > 0) && (p->offset >= p->length)) + { + /* make sure that offset is valid */ + return NULL; + } + + if (needed > INT_MAX) + { + /* sizes bigger than INT_MAX are currently not supported */ + return NULL; + } + + needed += p->offset + 1; + if (needed <= p->length) + { + return p->buffer + p->offset; + } + + if (p->noalloc) { + return NULL; + } + + /* calculate new buffer size */ + if (needed > (INT_MAX / 2)) + { + /* overflow of int, use INT_MAX if possible */ + if (needed <= INT_MAX) + { + newsize = INT_MAX; + } + else + { + return NULL; + } + } + else + { + newsize = needed * 2; + } + + if (p->hooks.reallocate != NULL) + { + /* reallocate with realloc if available */ + newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize); + if (newbuffer == NULL) + { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + } + else + { + /* otherwise reallocate manually */ + newbuffer = (unsigned char*)p->hooks.allocate(newsize); + if (!newbuffer) + { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + if (newbuffer) + { + memcpy(newbuffer, p->buffer, p->offset + 1); + } + p->hooks.deallocate(p->buffer); + } + p->length = newsize; + p->buffer = newbuffer; + + return newbuffer + p->offset; +} + +/* calculate the new length of the string in a printbuffer and update the offset */ +static void update_offset(printbuffer * const buffer) +{ + const unsigned char *buffer_pointer = NULL; + if ((buffer == NULL) || (buffer->buffer == NULL)) + { + return; + } + buffer_pointer = buffer->buffer + buffer->offset; + + buffer->offset += strlen((const char*)buffer_pointer); +} + +/* Render the number nicely from the given item into a string. */ +static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + double d = item->valuedouble; + int length = 0; + size_t i = 0; + unsigned char number_buffer[26]; /* temporary buffer to print the number into */ + unsigned char decimal_point = get_decimal_point(); + double test; + + if (output_buffer == NULL) + { + return false; + } + + /* This checks for NaN and Infinity */ + if ((d * 0) != 0) + { + length = sprintf((char*)number_buffer, "null"); + } + else + { + /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */ + length = sprintf((char*)number_buffer, "%1.15g", d); + + /* Check whether the original double can be recovered */ + if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || ((double)test != d)) + { + /* If not, print with 17 decimal places of precision */ + length = sprintf((char*)number_buffer, "%1.17g", d); + } + } + + /* sprintf failed or buffer overrun occurred */ + if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1))) + { + return false; + } + + /* reserve appropriate space in the output */ + output_pointer = ensure(output_buffer, (size_t)length + sizeof("")); + if (output_pointer == NULL) + { + return false; + } + + /* copy the printed number to the output and replace locale + * dependent decimal point with '.' */ + for (i = 0; i < ((size_t)length); i++) + { + if (number_buffer[i] == decimal_point) + { + output_pointer[i] = '.'; + continue; + } + + output_pointer[i] = number_buffer[i]; + } + output_pointer[i] = '\0'; + + output_buffer->offset += (size_t)length; + + return true; +} + +/* parse 4 digit hexadecimal number */ +static unsigned parse_hex4(const unsigned char * const input) +{ + unsigned int h = 0; + size_t i = 0; + + for (i = 0; i < 4; i++) + { + /* parse digit */ + if ((input[i] >= '0') && (input[i] <= '9')) + { + h += (unsigned int) input[i] - '0'; + } + else if ((input[i] >= 'A') && (input[i] <= 'F')) + { + h += (unsigned int) 10 + input[i] - 'A'; + } + else if ((input[i] >= 'a') && (input[i] <= 'f')) + { + h += (unsigned int) 10 + input[i] - 'a'; + } + else /* invalid */ + { + return 0; + } + + if (i < 3) + { + /* shift left to make place for the next nibble */ + h = h << 4; + } + } + + return h; +} + +/* converts a UTF-16 literal to UTF-8 + * A literal can be one or two sequences of the form \uXXXX */ +static unsigned char utf16_literal_to_utf8(const unsigned char * const input_pointer, const unsigned char * const input_end, unsigned char **output_pointer) +{ + long unsigned int codepoint = 0; + unsigned int first_code = 0; + const unsigned char *first_sequence = input_pointer; + unsigned char utf8_length = 0; + unsigned char utf8_position = 0; + unsigned char sequence_length = 0; + unsigned char first_byte_mark = 0; + + if ((input_end - first_sequence) < 6) + { + /* input ends unexpectedly */ + goto fail; + } + + /* get the first utf16 sequence */ + first_code = parse_hex4(first_sequence + 2); + + /* check that the code is valid */ + if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) + { + goto fail; + } + + /* UTF16 surrogate pair */ + if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) + { + const unsigned char *second_sequence = first_sequence + 6; + unsigned int second_code = 0; + sequence_length = 12; /* \uXXXX\uXXXX */ + + if ((input_end - second_sequence) < 6) + { + /* input ends unexpectedly */ + goto fail; + } + + if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u')) + { + /* missing second half of the surrogate pair */ + goto fail; + } + + /* get the second utf16 sequence */ + second_code = parse_hex4(second_sequence + 2); + /* check that the code is valid */ + if ((second_code < 0xDC00) || (second_code > 0xDFFF)) + { + /* invalid second half of the surrogate pair */ + goto fail; + } + + + /* calculate the unicode codepoint from the surrogate pair */ + codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF)); + } + else + { + sequence_length = 6; /* \uXXXX */ + codepoint = first_code; + } + + /* encode as UTF-8 + * takes at maximum 4 bytes to encode: + * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ + if (codepoint < 0x80) + { + /* normal ascii, encoding 0xxxxxxx */ + utf8_length = 1; + } + else if (codepoint < 0x800) + { + /* two bytes, encoding 110xxxxx 10xxxxxx */ + utf8_length = 2; + first_byte_mark = 0xC0; /* 11000000 */ + } + else if (codepoint < 0x10000) + { + /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */ + utf8_length = 3; + first_byte_mark = 0xE0; /* 11100000 */ + } + else if (codepoint <= 0x10FFFF) + { + /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */ + utf8_length = 4; + first_byte_mark = 0xF0; /* 11110000 */ + } + else + { + /* invalid unicode codepoint */ + goto fail; + } + + /* encode as utf8 */ + for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--) + { + /* 10xxxxxx */ + (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF); + codepoint >>= 6; + } + /* encode first byte */ + if (utf8_length > 1) + { + (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF); + } + else + { + (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F); + } + + *output_pointer += utf8_length; + + return sequence_length; + +fail: + return 0; +} + +/* Parse the input text into an unescaped cinput, and populate item. */ +static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer) +{ + const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1; + const unsigned char *input_end = buffer_at_offset(input_buffer) + 1; + unsigned char *output_pointer = NULL; + unsigned char *output = NULL; + + /* not a string */ + if (buffer_at_offset(input_buffer)[0] != '\"') + { + goto fail; + } + + { + /* calculate approximate size of the output (overestimate) */ + size_t allocation_length = 0; + size_t skipped_bytes = 0; + while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"')) + { + /* is escape sequence */ + if (input_end[0] == '\\') + { + if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length) + { + /* prevent buffer overflow when last input character is a backslash */ + goto fail; + } + skipped_bytes++; + input_end++; + } + input_end++; + } + if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"')) + { + goto fail; /* string ended unexpectedly */ + } + + /* This is at most how much we need for the output */ + allocation_length = (size_t) (input_end - buffer_at_offset(input_buffer)) - skipped_bytes; + output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof("")); + if (output == NULL) + { + goto fail; /* allocation failure */ + } + } + + output_pointer = output; + /* loop through the string literal */ + while (input_pointer < input_end) + { + if (*input_pointer != '\\') + { + *output_pointer++ = *input_pointer++; + } + /* escape sequence */ + else + { + unsigned char sequence_length = 2; + if ((input_end - input_pointer) < 1) + { + goto fail; + } + + switch (input_pointer[1]) + { + case 'b': + *output_pointer++ = '\b'; + break; + case 'f': + *output_pointer++ = '\f'; + break; + case 'n': + *output_pointer++ = '\n'; + break; + case 'r': + *output_pointer++ = '\r'; + break; + case 't': + *output_pointer++ = '\t'; + break; + case '\"': + case '\\': + case '/': + *output_pointer++ = input_pointer[1]; + break; + + /* UTF-16 literal */ + case 'u': + sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer); + if (sequence_length == 0) + { + /* failed to convert UTF16-literal to UTF-8 */ + goto fail; + } + break; + + default: + goto fail; + } + input_pointer += sequence_length; + } + } + + /* zero terminate the output */ + *output_pointer = '\0'; + + item->type = cJSON_String; + item->valuestring = (char*)output; + + input_buffer->offset = (size_t) (input_end - input_buffer->content); + input_buffer->offset++; + + return true; + +fail: + if (output != NULL) + { + input_buffer->hooks.deallocate(output); + } + + if (input_pointer != NULL) + { + input_buffer->offset = (size_t)(input_pointer - input_buffer->content); + } + + return false; +} + +/* Render the cstring provided to an escaped version that can be printed. */ +static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer) +{ + const unsigned char *input_pointer = NULL; + unsigned char *output = NULL; + unsigned char *output_pointer = NULL; + size_t output_length = 0; + /* numbers of additional characters needed for escaping */ + size_t escape_characters = 0; + + if (output_buffer == NULL) + { + return false; + } + + /* empty string */ + if (input == NULL) + { + output = ensure(output_buffer, sizeof("\"\"")); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "\"\""); + + return true; + } + + /* set "flag" to 1 if something needs to be escaped */ + for (input_pointer = input; *input_pointer; input_pointer++) + { + switch (*input_pointer) + { + case '\"': + case '\\': + case '\b': + case '\f': + case '\n': + case '\r': + case '\t': + /* one character escape sequence */ + escape_characters++; + break; + default: + if (*input_pointer < 32) + { + /* UTF-16 escape sequence uXXXX */ + escape_characters += 5; + } + break; + } + } + output_length = (size_t)(input_pointer - input) + escape_characters; + + output = ensure(output_buffer, output_length + sizeof("\"\"")); + if (output == NULL) + { + return false; + } + + /* no characters have to be escaped */ + if (escape_characters == 0) + { + output[0] = '\"'; + memcpy(output + 1, input, output_length); + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; + } + + output[0] = '\"'; + output_pointer = output + 1; + /* copy the string */ + for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++) + { + if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\')) + { + /* normal character, copy */ + *output_pointer = *input_pointer; + } + else + { + /* character needs to be escaped */ + *output_pointer++ = '\\'; + switch (*input_pointer) + { + case '\\': + *output_pointer = '\\'; + break; + case '\"': + *output_pointer = '\"'; + break; + case '\b': + *output_pointer = 'b'; + break; + case '\f': + *output_pointer = 'f'; + break; + case '\n': + *output_pointer = 'n'; + break; + case '\r': + *output_pointer = 'r'; + break; + case '\t': + *output_pointer = 't'; + break; + default: + /* escape and print as unicode codepoint */ + sprintf((char*)output_pointer, "u%04x", *input_pointer); + output_pointer += 4; + break; + } + } + } + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; +} + +/* Invoke print_string_ptr (which is useful) on an item. */ +static cJSON_bool print_string(const cJSON * const item, printbuffer * const p) +{ + return print_string_ptr((unsigned char*)item->valuestring, p); +} + +/* Predeclare these prototypes. */ +static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer); +static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer); +static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer); + +/* Utility to jump whitespace and cr/lf */ +static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer) +{ + if ((buffer == NULL) || (buffer->content == NULL)) + { + return NULL; + } + + while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32)) + { + buffer->offset++; + } + + if (buffer->offset == buffer->length) + { + buffer->offset--; + } + + return buffer; +} + +/* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */ +static parse_buffer *skip_utf8_bom(parse_buffer * const buffer) +{ + if ((buffer == NULL) || (buffer->content == NULL) || (buffer->offset != 0)) + { + return NULL; + } + + if (can_access_at_index(buffer, 4) && (strncmp((const char*)buffer_at_offset(buffer), "\xEF\xBB\xBF", 3) == 0)) + { + buffer->offset += 3; + } + + return buffer; +} + +/* Parse an object - create a new root, and populate. */ +CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated) +{ + parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } }; + cJSON *item = NULL; + + /* reset error position */ + global_error.json = NULL; + global_error.position = 0; + + if (value == NULL) + { + goto fail; + } + + buffer.content = (const unsigned char*)value; + buffer.length = strlen((const char*)value) + sizeof(""); + buffer.offset = 0; + buffer.hooks = global_hooks; + + item = cJSON_New_Item(&global_hooks); + if (item == NULL) /* memory fail */ + { + goto fail; + } + + if (!parse_value(item, buffer_skip_whitespace(skip_utf8_bom(&buffer)))) + { + /* parse failure. ep is set. */ + goto fail; + } + + /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */ + if (require_null_terminated) + { + buffer_skip_whitespace(&buffer); + if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0') + { + goto fail; + } + } + if (return_parse_end) + { + *return_parse_end = (const char*)buffer_at_offset(&buffer); + } + + return item; + +fail: + if (item != NULL) + { + cJSON_Delete(item); + } + + if (value != NULL) + { + error local_error; + local_error.json = (const unsigned char*)value; + local_error.position = 0; + + if (buffer.offset < buffer.length) + { + local_error.position = buffer.offset; + } + else if (buffer.length > 0) + { + local_error.position = buffer.length - 1; + } + + if (return_parse_end != NULL) + { + *return_parse_end = (const char*)local_error.json + local_error.position; + } + + global_error = local_error; + } + + return NULL; +} + +/* Default options for cJSON_Parse */ +CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value) +{ + return cJSON_ParseWithOpts(value, 0, 0); +} + +#define cjson_min(a, b) ((a < b) ? a : b) + +static unsigned char *print(const cJSON * const item, cJSON_bool format, const internal_hooks * const hooks) +{ + static const size_t default_buffer_size = 256; + printbuffer buffer[1]; + unsigned char *printed = NULL; + + memset(buffer, 0, sizeof(buffer)); + + /* create buffer */ + buffer->buffer = (unsigned char*) hooks->allocate(default_buffer_size); + buffer->length = default_buffer_size; + buffer->format = format; + buffer->hooks = *hooks; + if (buffer->buffer == NULL) + { + goto fail; + } + + /* print the value */ + if (!print_value(item, buffer)) + { + goto fail; + } + update_offset(buffer); + + /* check if reallocate is available */ + if (hooks->reallocate != NULL) + { + printed = (unsigned char*) hooks->reallocate(buffer->buffer, buffer->offset + 1); + if (printed == NULL) { + goto fail; + } + buffer->buffer = NULL; + } + else /* otherwise copy the JSON over to a new buffer */ + { + printed = (unsigned char*) hooks->allocate(buffer->offset + 1); + if (printed == NULL) + { + goto fail; + } + memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1)); + printed[buffer->offset] = '\0'; /* just to be sure */ + + /* free the buffer */ + hooks->deallocate(buffer->buffer); + } + + return printed; + +fail: + if (buffer->buffer != NULL) + { + hooks->deallocate(buffer->buffer); + } + + if (printed != NULL) + { + hooks->deallocate(printed); + } + + return NULL; +} + +/* Render a cJSON item/entity/structure to text. */ +CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item) +{ + return (char*)print(item, true, &global_hooks); +} + +CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item) +{ + return (char*)print(item, false, &global_hooks); +} + +CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt) +{ + printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; + + if (prebuffer < 0) + { + return NULL; + } + + p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer); + if (!p.buffer) + { + return NULL; + } + + p.length = (size_t)prebuffer; + p.offset = 0; + p.noalloc = false; + p.format = fmt; + p.hooks = global_hooks; + + if (!print_value(item, &p)) + { + global_hooks.deallocate(p.buffer); + return NULL; + } + + return (char*)p.buffer; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buf, const int len, const cJSON_bool fmt) +{ + printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; + + if ((len < 0) || (buf == NULL)) + { + return false; + } + + p.buffer = (unsigned char*)buf; + p.length = (size_t)len; + p.offset = 0; + p.noalloc = true; + p.format = fmt; + p.hooks = global_hooks; + + return print_value(item, &p); +} + +/* Parser core - when encountering text, process appropriately. */ +static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer) +{ + if ((input_buffer == NULL) || (input_buffer->content == NULL)) + { + return false; /* no input */ + } + + /* parse the different types of values */ + /* null */ + if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0)) + { + item->type = cJSON_NULL; + input_buffer->offset += 4; + return true; + } + /* false */ + if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0)) + { + item->type = cJSON_False; + input_buffer->offset += 5; + return true; + } + /* true */ + if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0)) + { + item->type = cJSON_True; + item->valueint = 1; + input_buffer->offset += 4; + return true; + } + /* string */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"')) + { + return parse_string(item, input_buffer); + } + /* number */ + if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9')))) + { + return parse_number(item, input_buffer); + } + /* array */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '[')) + { + return parse_array(item, input_buffer); + } + /* object */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{')) + { + return parse_object(item, input_buffer); + } + + return false; +} + +/* Render a value to text. */ +static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output = NULL; + + if ((item == NULL) || (output_buffer == NULL)) + { + return false; + } + + switch ((item->type) & 0xFF) + { + case cJSON_NULL: + output = ensure(output_buffer, 5); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "null"); + return true; + + case cJSON_False: + output = ensure(output_buffer, 6); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "false"); + return true; + + case cJSON_True: + output = ensure(output_buffer, 5); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "true"); + return true; + + case cJSON_Number: + return print_number(item, output_buffer); + + case cJSON_Raw: + { + size_t raw_length = 0; + if (item->valuestring == NULL) + { + return false; + } + + raw_length = strlen(item->valuestring) + sizeof(""); + output = ensure(output_buffer, raw_length); + if (output == NULL) + { + return false; + } + memcpy(output, item->valuestring, raw_length); + return true; + } + + case cJSON_String: + return print_string(item, output_buffer); + + case cJSON_Array: + return print_array(item, output_buffer); + + case cJSON_Object: + return print_object(item, output_buffer); + + default: + return false; + } +} + +/* Build an array from input text. */ +static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer) +{ + cJSON *head = NULL; /* head of the linked list */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) + { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (buffer_at_offset(input_buffer)[0] != '[') + { + /* not an array */ + goto fail; + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']')) + { + /* empty array */ + goto success; + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) + { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do + { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) + { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) + { + /* start the linked list */ + current_item = head = new_item; + } + else + { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse next value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) + { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } + while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']') + { + goto fail; /* expected end of array */ + } + +success: + input_buffer->depth--; + + item->type = cJSON_Array; + item->child = head; + + input_buffer->offset++; + + return true; + +fail: + if (head != NULL) + { + cJSON_Delete(head); + } + + return false; +} + +/* Render an array to text */ +static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_element = item->child; + + if (output_buffer == NULL) + { + return false; + } + + /* Compose the output array. */ + /* opening square bracket */ + output_pointer = ensure(output_buffer, 1); + if (output_pointer == NULL) + { + return false; + } + + *output_pointer = '['; + output_buffer->offset++; + output_buffer->depth++; + + while (current_element != NULL) + { + if (!print_value(current_element, output_buffer)) + { + return false; + } + update_offset(output_buffer); + if (current_element->next) + { + length = (size_t) (output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ','; + if(output_buffer->format) + { + *output_pointer++ = ' '; + } + *output_pointer = '\0'; + output_buffer->offset += length; + } + current_element = current_element->next; + } + + output_pointer = ensure(output_buffer, 2); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ']'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; +} + +/* Build an object from the text. */ +static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer) +{ + cJSON *head = NULL; /* linked list head */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) + { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{')) + { + goto fail; /* not an object */ + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}')) + { + goto success; /* empty object */ + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) + { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do + { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) + { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) + { + /* start the linked list */ + current_item = head = new_item; + } + else + { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse the name of the child */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_string(current_item, input_buffer)) + { + goto fail; /* failed to parse name */ + } + buffer_skip_whitespace(input_buffer); + + /* swap valuestring and string, because we parsed the name */ + current_item->string = current_item->valuestring; + current_item->valuestring = NULL; + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':')) + { + goto fail; /* invalid object */ + } + + /* parse the value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) + { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } + while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}')) + { + goto fail; /* expected end of object */ + } + +success: + input_buffer->depth--; + + item->type = cJSON_Object; + item->child = head; + + input_buffer->offset++; + return true; + +fail: + if (head != NULL) + { + cJSON_Delete(head); + } + + return false; +} + +/* Render an object to text. */ +static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_item = item->child; + + if (output_buffer == NULL) + { + return false; + } + + /* Compose the output: */ + length = (size_t) (output_buffer->format ? 2 : 1); /* fmt: {\n */ + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + + *output_pointer++ = '{'; + output_buffer->depth++; + if (output_buffer->format) + { + *output_pointer++ = '\n'; + } + output_buffer->offset += length; + + while (current_item) + { + if (output_buffer->format) + { + size_t i; + output_pointer = ensure(output_buffer, output_buffer->depth); + if (output_pointer == NULL) + { + return false; + } + for (i = 0; i < output_buffer->depth; i++) + { + *output_pointer++ = '\t'; + } + output_buffer->offset += output_buffer->depth; + } + + /* print key */ + if (!print_string_ptr((unsigned char*)current_item->string, output_buffer)) + { + return false; + } + update_offset(output_buffer); + + length = (size_t) (output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ':'; + if (output_buffer->format) + { + *output_pointer++ = '\t'; + } + output_buffer->offset += length; + + /* print value */ + if (!print_value(current_item, output_buffer)) + { + return false; + } + update_offset(output_buffer); + + /* print comma if not last */ + length = ((size_t)(output_buffer->format ? 1 : 0) + (size_t)(current_item->next ? 1 : 0)); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + if (current_item->next) + { + *output_pointer++ = ','; + } + + if (output_buffer->format) + { + *output_pointer++ = '\n'; + } + *output_pointer = '\0'; + output_buffer->offset += length; + + current_item = current_item->next; + } + + output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2); + if (output_pointer == NULL) + { + return false; + } + if (output_buffer->format) + { + size_t i; + for (i = 0; i < (output_buffer->depth - 1); i++) + { + *output_pointer++ = '\t'; + } + } + *output_pointer++ = '}'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; +} + +/* Get Array size/item / object item. */ +CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) +{ + cJSON *child = NULL; + size_t size = 0; + + if (array == NULL) + { + return 0; + } + + child = array->child; + + while(child != NULL) + { + size++; + child = child->next; + } + + /* FIXME: Can overflow here. Cannot be fixed without breaking the API */ + + return (int)size; +} + +static cJSON* get_array_item(const cJSON *array, size_t index) +{ + cJSON *current_child = NULL; + + if (array == NULL) + { + return NULL; + } + + current_child = array->child; + while ((current_child != NULL) && (index > 0)) + { + index--; + current_child = current_child->next; + } + + return current_child; +} + +CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) +{ + if (index < 0) + { + return NULL; + } + + return get_array_item(array, (size_t)index); +} + +static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive) +{ + cJSON *current_element = NULL; + + if ((object == NULL) || (name == NULL)) + { + return NULL; + } + + current_element = object->child; + if (case_sensitive) + { + while ((current_element != NULL) && (current_element->string != NULL) && (strcmp(name, current_element->string) != 0)) + { + current_element = current_element->next; + } + } + else + { + while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0)) + { + current_element = current_element->next; + } + } + + if ((current_element == NULL) || (current_element->string == NULL)) { + return NULL; + } + + return current_element; +} + +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string) +{ + return get_object_item(object, string, false); +} + +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string) +{ + return get_object_item(object, string, true); +} + +CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string) +{ + return cJSON_GetObjectItem(object, string) ? 1 : 0; +} + +/* Utility for array list handling. */ +static void suffix_object(cJSON *prev, cJSON *item) +{ + prev->next = item; + item->prev = prev; +} + +/* Utility for handling references. */ +static cJSON *create_reference(const cJSON *item, const internal_hooks * const hooks) +{ + cJSON *reference = NULL; + if (item == NULL) + { + return NULL; + } + + reference = cJSON_New_Item(hooks); + if (reference == NULL) + { + return NULL; + } + + memcpy(reference, item, sizeof(cJSON)); + reference->string = NULL; + reference->type |= cJSON_IsReference; + reference->next = reference->prev = NULL; + return reference; +} + +static cJSON_bool add_item_to_array(cJSON *array, cJSON *item) +{ + cJSON *child = NULL; + + if ((item == NULL) || (array == NULL)) + { + return false; + } + + child = array->child; + + if (child == NULL) + { + /* list is empty, start new one */ + array->child = item; + } + else + { + /* append to the end */ + while (child->next) + { + child = child->next; + } + suffix_object(child, item); + } + + return true; +} + +/* Add item to array/object. */ +CJSON_PUBLIC(void) cJSON_AddItemToArray(cJSON *array, cJSON *item) +{ + add_item_to_array(array, item); +} + +#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) + #pragma GCC diagnostic push +#endif +#ifdef __GNUC__ +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif +/* helper function to cast away const */ +static void* cast_away_const(const void* string) +{ + return (void*)string; +} +#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) + #pragma GCC diagnostic pop +#endif + + +static cJSON_bool add_item_to_object(cJSON * const object, const char * const string, cJSON * const item, const internal_hooks * const hooks, const cJSON_bool constant_key) +{ + char *new_key = NULL; + int new_type = cJSON_Invalid; + + if ((object == NULL) || (string == NULL) || (item == NULL)) + { + return false; + } + + if (constant_key) + { + new_key = (char*)cast_away_const(string); + new_type = item->type | cJSON_StringIsConst; + } + else + { + new_key = (char*)cJSON_strdup((const unsigned char*)string, hooks); + if (new_key == NULL) + { + return false; + } + + new_type = item->type & ~cJSON_StringIsConst; + } + + if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) + { + hooks->deallocate(item->string); + } + + item->string = new_key; + item->type = new_type; + + return add_item_to_array(object, item); +} + +CJSON_PUBLIC(void) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) +{ + add_item_to_object(object, string, item, &global_hooks, false); +} + +/* Add an item to an object with constant string as key */ +CJSON_PUBLIC(void) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) +{ + add_item_to_object(object, string, item, &global_hooks, true); +} + +CJSON_PUBLIC(void) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) +{ + if (array == NULL) + { + return; + } + + add_item_to_array(array, create_reference(item, &global_hooks)); +} + +CJSON_PUBLIC(void) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) +{ + if ((object == NULL) || (string == NULL)) + { + return; + } + + add_item_to_object(object, string, create_reference(item, &global_hooks), &global_hooks, false); +} + +CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name) +{ + cJSON *null = cJSON_CreateNull(); + if (add_item_to_object(object, name, null, &global_hooks, false)) + { + return null; + } + + cJSON_Delete(null); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name) +{ + cJSON *true_item = cJSON_CreateTrue(); + if (add_item_to_object(object, name, true_item, &global_hooks, false)) + { + return true_item; + } + + cJSON_Delete(true_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name) +{ + cJSON *false_item = cJSON_CreateFalse(); + if (add_item_to_object(object, name, false_item, &global_hooks, false)) + { + return false_item; + } + + cJSON_Delete(false_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean) +{ + cJSON *bool_item = cJSON_CreateBool(boolean); + if (add_item_to_object(object, name, bool_item, &global_hooks, false)) + { + return bool_item; + } + + cJSON_Delete(bool_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number) +{ + cJSON *number_item = cJSON_CreateNumber(number); + if (add_item_to_object(object, name, number_item, &global_hooks, false)) + { + return number_item; + } + + cJSON_Delete(number_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string) +{ + cJSON *string_item = cJSON_CreateString(string); + if (add_item_to_object(object, name, string_item, &global_hooks, false)) + { + return string_item; + } + + cJSON_Delete(string_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw) +{ + cJSON *raw_item = cJSON_CreateRaw(raw); + if (add_item_to_object(object, name, raw_item, &global_hooks, false)) + { + return raw_item; + } + + cJSON_Delete(raw_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name) +{ + cJSON *object_item = cJSON_CreateObject(); + if (add_item_to_object(object, name, object_item, &global_hooks, false)) + { + return object_item; + } + + cJSON_Delete(object_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name) +{ + cJSON *array = cJSON_CreateArray(); + if (add_item_to_object(object, name, array, &global_hooks, false)) + { + return array; + } + + cJSON_Delete(array); + return NULL; +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item) +{ + if ((parent == NULL) || (item == NULL)) + { + return NULL; + } + + if (item->prev != NULL) + { + /* not the first element */ + item->prev->next = item->next; + } + if (item->next != NULL) + { + /* not the last element */ + item->next->prev = item->prev; + } + + if (item == parent->child) + { + /* first element */ + parent->child = item->next; + } + /* make sure the detached item doesn't point anywhere anymore */ + item->prev = NULL; + item->next = NULL; + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which) +{ + if (which < 0) + { + return NULL; + } + + return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which)); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which) +{ + cJSON_Delete(cJSON_DetachItemFromArray(array, which)); +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string) +{ + cJSON *to_detach = cJSON_GetObjectItem(object, string); + + return cJSON_DetachItemViaPointer(object, to_detach); +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string) +{ + cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string); + + return cJSON_DetachItemViaPointer(object, to_detach); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string) +{ + cJSON_Delete(cJSON_DetachItemFromObject(object, string)); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string) +{ + cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string)); +} + +/* Replace array/object items with new ones. */ +CJSON_PUBLIC(void) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) +{ + cJSON *after_inserted = NULL; + + if (which < 0) + { + return; + } + + after_inserted = get_array_item(array, (size_t)which); + if (after_inserted == NULL) + { + add_item_to_array(array, newitem); + return; + } + + newitem->next = after_inserted; + newitem->prev = after_inserted->prev; + after_inserted->prev = newitem; + if (after_inserted == array->child) + { + array->child = newitem; + } + else + { + newitem->prev->next = newitem; + } +} + +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement) +{ + if ((parent == NULL) || (replacement == NULL) || (item == NULL)) + { + return false; + } + + if (replacement == item) + { + return true; + } + + replacement->next = item->next; + replacement->prev = item->prev; + + if (replacement->next != NULL) + { + replacement->next->prev = replacement; + } + if (replacement->prev != NULL) + { + replacement->prev->next = replacement; + } + if (parent->child == item) + { + parent->child = replacement; + } + + item->next = NULL; + item->prev = NULL; + cJSON_Delete(item); + + return true; +} + +CJSON_PUBLIC(void) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) +{ + if (which < 0) + { + return; + } + + cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem); +} + +static cJSON_bool replace_item_in_object(cJSON *object, const char *string, cJSON *replacement, cJSON_bool case_sensitive) +{ + if ((replacement == NULL) || (string == NULL)) + { + return false; + } + + /* replace the name in the replacement */ + if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL)) + { + cJSON_free(replacement->string); + } + replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); + replacement->type &= ~cJSON_StringIsConst; + + cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement); + + return true; +} + +CJSON_PUBLIC(void) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) +{ + replace_item_in_object(object, string, newitem, false); +} + +CJSON_PUBLIC(void) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem) +{ + replace_item_in_object(object, string, newitem, true); +} + +/* Create basic types: */ +CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_NULL; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_True; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_False; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool b) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = b ? cJSON_True : cJSON_False; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_Number; + item->valuedouble = num; + + /* use saturation in case of overflow */ + if (num >= INT_MAX) + { + item->valueint = INT_MAX; + } + else if (num <= (double)INT_MIN) + { + item->valueint = INT_MIN; + } + else + { + item->valueint = (int)num; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_String; + item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); + if(!item->valuestring) + { + cJSON_Delete(item); + return NULL; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) + { + item->type = cJSON_String | cJSON_IsReference; + item->valuestring = (char*)cast_away_const(string); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Object | cJSON_IsReference; + item->child = (cJSON*)cast_away_const(child); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Array | cJSON_IsReference; + item->child = (cJSON*)cast_away_const(child); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_Raw; + item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks); + if(!item->valuestring) + { + cJSON_Delete(item); + return NULL; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type=cJSON_Array; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = cJSON_Object; + } + + return item; +} + +/* Create Arrays: */ +CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + for(i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber(numbers[i]); + if (!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for(i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber((double)numbers[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for(i = 0;a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber(numbers[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char **strings, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (strings == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for (i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateString(strings[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p,n); + } + p = n; + } + + return a; +} + +/* Duplication */ +CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse) +{ + cJSON *newitem = NULL; + cJSON *child = NULL; + cJSON *next = NULL; + cJSON *newchild = NULL; + + /* Bail on bad ptr */ + if (!item) + { + goto fail; + } + /* Create new item */ + newitem = cJSON_New_Item(&global_hooks); + if (!newitem) + { + goto fail; + } + /* Copy over all vars */ + newitem->type = item->type & (~cJSON_IsReference); + newitem->valueint = item->valueint; + newitem->valuedouble = item->valuedouble; + if (item->valuestring) + { + newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks); + if (!newitem->valuestring) + { + goto fail; + } + } + if (item->string) + { + newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks); + if (!newitem->string) + { + goto fail; + } + } + /* If non-recursive, then we're done! */ + if (!recurse) + { + return newitem; + } + /* Walk the ->next chain for the child. */ + child = item->child; + while (child != NULL) + { + newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */ + if (!newchild) + { + goto fail; + } + if (next != NULL) + { + /* If newitem->child already set, then crosswire ->prev and ->next and move on */ + next->next = newchild; + newchild->prev = next; + next = newchild; + } + else + { + /* Set newitem->child and move to it */ + newitem->child = newchild; + next = newchild; + } + child = child->next; + } + + return newitem; + +fail: + if (newitem != NULL) + { + cJSON_Delete(newitem); + } + + return NULL; +} + +static void skip_oneline_comment(char **input) +{ + *input += static_strlen("//"); + + for (; (*input)[0] != '\0'; ++(*input)) + { + if ((*input)[0] == '\n') { + *input += static_strlen("\n"); + return; + } + } +} + +static void skip_multiline_comment(char **input) +{ + *input += static_strlen("/*"); + + for (; (*input)[0] != '\0'; ++(*input)) + { + if (((*input)[0] == '*') && ((*input)[1] == '/')) + { + *input += static_strlen("*/"); + return; + } + } +} + +static void minify_string(char **input, char **output) { + (*output)[0] = (*input)[0]; + *input += static_strlen("\""); + *output += static_strlen("\""); + + + for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) { + (*output)[0] = (*input)[0]; + + if ((*input)[0] == '\"') { + (*output)[0] = '\"'; + *input += static_strlen("\""); + *output += static_strlen("\""); + return; + } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) { + (*output)[1] = (*input)[1]; + *input += static_strlen("\""); + *output += static_strlen("\""); + } + } +} + +CJSON_PUBLIC(void) cJSON_Minify(char *json) +{ + char *into = json; + + if (json == NULL) + { + return; + } + + while (json[0] != '\0') + { + switch (json[0]) + { + case ' ': + case '\t': + case '\r': + case '\n': + json++; + break; + + case '/': + if (json[1] == '/') + { + skip_oneline_comment(&json); + } + else if (json[1] == '*') + { + skip_multiline_comment(&json); + } else { + json++; + } + break; + + case '\"': + minify_string(&json, (char**)&into); + break; + + default: + into[0] = json[0]; + json++; + into++; + } + } + + /* and null-terminate. */ + *into = '\0'; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Invalid; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_False; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xff) == cJSON_True; +} + + +CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & (cJSON_True | cJSON_False)) != 0; +} +CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_NULL; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Number; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_String; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Array; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Object; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Raw; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive) +{ + if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF)) || cJSON_IsInvalid(a)) + { + return false; + } + + /* check if type is valid */ + switch (a->type & 0xFF) + { + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + case cJSON_Number: + case cJSON_String: + case cJSON_Raw: + case cJSON_Array: + case cJSON_Object: + break; + + default: + return false; + } + + /* identical objects are equal */ + if (a == b) + { + return true; + } + + switch (a->type & 0xFF) + { + /* in these cases and equal type is enough */ + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + return true; + + case cJSON_Number: + if (a->valuedouble == b->valuedouble) + { + return true; + } + return false; + + case cJSON_String: + case cJSON_Raw: + if ((a->valuestring == NULL) || (b->valuestring == NULL)) + { + return false; + } + if (strcmp(a->valuestring, b->valuestring) == 0) + { + return true; + } + + return false; + + case cJSON_Array: + { + cJSON *a_element = a->child; + cJSON *b_element = b->child; + + for (; (a_element != NULL) && (b_element != NULL);) + { + if (!cJSON_Compare(a_element, b_element, case_sensitive)) + { + return false; + } + + a_element = a_element->next; + b_element = b_element->next; + } + + /* one of the arrays is longer than the other */ + if (a_element != b_element) { + return false; + } + + return true; + } + + case cJSON_Object: + { + cJSON *a_element = NULL; + cJSON *b_element = NULL; + cJSON_ArrayForEach(a_element, a) + { + /* TODO This has O(n^2) runtime, which is horrible! */ + b_element = get_object_item(b, a_element->string, case_sensitive); + if (b_element == NULL) + { + return false; + } + + if (!cJSON_Compare(a_element, b_element, case_sensitive)) + { + return false; + } + } + + /* doing this twice, once on a and b to prevent true comparison if a subset of b + * TODO: Do this the proper way, this is just a fix for now */ + cJSON_ArrayForEach(b_element, b) + { + a_element = get_object_item(a, b_element->string, case_sensitive); + if (a_element == NULL) + { + return false; + } + + if (!cJSON_Compare(b_element, a_element, case_sensitive)) + { + return false; + } + } + + return true; + } + + default: + return false; + } +} + +CJSON_PUBLIC(void *) cJSON_malloc(size_t size) +{ + return global_hooks.allocate(size); +} + +CJSON_PUBLIC(void) cJSON_free(void *object) +{ + global_hooks.deallocate(object); +} diff --git a/v_windows/v/old/thirdparty/cJSON/cJSON.h b/v_windows/v/old/thirdparty/cJSON/cJSON.h new file mode 100644 index 0000000..592986b --- /dev/null +++ b/v_windows/v/old/thirdparty/cJSON/cJSON.h @@ -0,0 +1,285 @@ +/* + Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. +*/ + +#ifndef cJSON__h +#define cJSON__h + +#ifdef __cplusplus +extern "C" +{ +#endif + +#if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32)) +#define __WINDOWS__ +#endif + +#ifdef __WINDOWS__ + +/* When compiling for windows, we specify a specific calling convention to avoid issues where we are being called from a project with a different default calling convention. For windows you have 3 define options: + +CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever dllexport symbols +CJSON_EXPORT_SYMBOLS - Define this on library build when you want to dllexport symbols (default) +CJSON_IMPORT_SYMBOLS - Define this if you want to dllimport symbol + +For *nix builds that support visibility attribute, you can define similar behavior by + +setting default visibility to hidden by adding +-fvisibility=hidden (for gcc) +or +-xldscope=hidden (for sun cc) +to CFLAGS + +then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJSON_EXPORT_SYMBOLS does + +*/ + +#define CJSON_CDECL __cdecl +#define CJSON_STDCALL __stdcall + +/* export symbols by default, this is necessary for copy pasting the C and header file */ +#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && !defined(CJSON_EXPORT_SYMBOLS) +#define CJSON_EXPORT_SYMBOLS +#endif + +#if defined(CJSON_HIDE_SYMBOLS) +#define CJSON_PUBLIC(type) type CJSON_STDCALL +#elif defined(CJSON_EXPORT_SYMBOLS) +#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL +#elif defined(CJSON_IMPORT_SYMBOLS) +#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL +#endif +#else /* !__WINDOWS__ */ +#define CJSON_CDECL +#define CJSON_STDCALL + +#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(CJSON_API_VISIBILITY) +#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type +#else +#define CJSON_PUBLIC(type) type +#endif +#endif + +/* project version */ +#define CJSON_VERSION_MAJOR 1 +#define CJSON_VERSION_MINOR 7 +#define CJSON_VERSION_PATCH 12 + +#include + +/* cJSON Types: */ +#define cJSON_Invalid (0) +#define cJSON_False (1 << 0) +#define cJSON_True (1 << 1) +#define cJSON_NULL (1 << 2) +#define cJSON_Number (1 << 3) +#define cJSON_String (1 << 4) +#define cJSON_Array (1 << 5) +#define cJSON_Object (1 << 6) +#define cJSON_Raw (1 << 7) /* raw json */ + +#define cJSON_IsReference 256 +#define cJSON_StringIsConst 512 + +/* The cJSON structure: */ +typedef struct cJSON +{ + /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */ + struct cJSON *next; + struct cJSON *prev; + /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */ + struct cJSON *child; + + /* The type of the item, as above. */ + int type; + + /* The item's string, if type==cJSON_String and type == cJSON_Raw */ + char *valuestring; + /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */ + int valueint; + /* The item's number, if type==cJSON_Number */ + double valuedouble; + + /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */ + char *string; +} cJSON; + +typedef struct cJSON_Hooks +{ + /* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */ + void *(CJSON_CDECL *malloc_fn)(size_t sz); + void (CJSON_CDECL *free_fn)(void *ptr); +} cJSON_Hooks; + +typedef int cJSON_bool; + +/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them. + * This is to prevent stack overflows. */ +#ifndef CJSON_NESTING_LIMIT +#define CJSON_NESTING_LIMIT 1000 +#endif + +/* returns the version of cJSON as a string */ +CJSON_PUBLIC(const char*) cJSON_Version(void); + +/* Supply malloc, realloc and free functions to cJSON */ +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks); + +/* Memory Management: the caller is always responsible to free the results from all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is cJSON_PrintPreallocated, where the caller has full responsibility of the buffer. */ +/* Supply a block of JSON, and this returns a cJSON object you can interrogate. */ +CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value); +/* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */ +/* If you supply a ptr in return_parse_end and parsing fails, then return_parse_end will contain a pointer to the error so will match cJSON_GetErrorPtr(). */ +CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated); + +/* Render a cJSON entity to text for transfer/storage. */ +CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item); +/* Render a cJSON entity to text for transfer/storage without any formatting. */ +CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item); +/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */ +CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt); +/* Render a cJSON entity to text using a buffer already allocated in memory with given length. Returns 1 on success and 0 on failure. */ +/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will use, so to be safe allocate 5 bytes more than you actually need */ +CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format); +/* Delete a cJSON entity and all subentities. */ +CJSON_PUBLIC(void) cJSON_Delete(cJSON *c); + +/* Returns the number of items in an array (or object). */ +CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array); +/* Retrieve item number "index" from array "array". Returns NULL if unsuccessful. */ +CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index); +/* Get item "string" from object. Case insensitive. */ +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string); +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string); +CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string); +/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void); + +/* Check if the item is a string and return its valuestring */ +CJSON_PUBLIC(char *) cJSON_GetStringValue(cJSON *item); + +/* These functions check the type of an item */ +CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item); + +/* These calls create a cJSON item of the appropriate type. */ +CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean); +CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num); +CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string); +/* raw json */ +CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw); +CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void); + +/* Create a string where valuestring references a string so + * it will not be freed by cJSON_Delete */ +CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string); +/* Create an object/arrray that only references it's elements so + * they will not be freed by cJSON_Delete */ +CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child); +CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child); + +/* These utilities create an Array of count items. */ +CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char **strings, int count); + +/* Append item to the specified array/object. */ +CJSON_PUBLIC(void) cJSON_AddItemToArray(cJSON *array, cJSON *item); +CJSON_PUBLIC(void) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item); +/* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object. + * WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before + * writing to `item->string` */ +CJSON_PUBLIC(void) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item); +/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */ +CJSON_PUBLIC(void) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); +CJSON_PUBLIC(void) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item); + +/* Remove/Detatch items from Arrays/Objects. */ +CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item); +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which); +CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which); +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string); +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string); +CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string); +CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string); + +/* Update array items. */ +CJSON_PUBLIC(void) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */ +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement); +CJSON_PUBLIC(void) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem); +CJSON_PUBLIC(void) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem); +CJSON_PUBLIC(void) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,const char *string,cJSON *newitem); + +/* Duplicate a cJSON item */ +CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse); +/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will +need to be released. With recurse!=0, it will duplicate any children connected to the item. +The item->next and ->prev pointers are always zero on return from Duplicate. */ +/* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal. + * case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */ +CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive); + + +CJSON_PUBLIC(void) cJSON_Minify(char *json); + +/* Helper functions for creating and adding items to an object at the same time. + * They return the added item or NULL on failure. */ +CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name); +CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name); +CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name); +CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean); +CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number); +CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string); +CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw); +CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name); +CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name); + +/* When assigning an integer value, it needs to be propagated to valuedouble too. */ +#define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number)) +/* helper for the cJSON_SetNumberValue macro */ +CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number); +#define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number)) + +/* Macro for iterating over an array or object */ +#define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next) + +/* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */ +CJSON_PUBLIC(void *) cJSON_malloc(size_t size); +CJSON_PUBLIC(void) cJSON_free(void *object); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/v_windows/v/old/thirdparty/cJSON/readme.txt b/v_windows/v/old/thirdparty/cJSON/readme.txt new file mode 100644 index 0000000..3a67b57 --- /dev/null +++ b/v_windows/v/old/thirdparty/cJSON/readme.txt @@ -0,0 +1,7 @@ +Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/v_windows/v/old/thirdparty/fontstash/fontstash.h b/v_windows/v/old/thirdparty/fontstash/fontstash.h new file mode 100644 index 0000000..9ae21c1 --- /dev/null +++ b/v_windows/v/old/thirdparty/fontstash/fontstash.h @@ -0,0 +1,1742 @@ +// +// NOTE sokol: all IO functions have been removed +// +// Copyright (c) 2009-2013 Mikko Mononen memon@inside.org +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +// + +#ifndef FONS_H +#define FONS_H + +#ifdef __cplusplus +extern "C" { +#endif + +// To make the implementation private to the file that generates the implementation +#ifdef FONS_STATIC +#define FONS_DEF static +#else +#define FONS_DEF extern +#endif + +#define FONS_INVALID -1 + +#if !defined(FONTSTASH_MALLOC) +#define FONTSTASH_MALLOC malloc +#define FONTSTASH_REALLOC realloc +#define FONTSTASH_FREE free +#endif + +enum FONSflags { + FONS_ZERO_TOPLEFT = 1, + FONS_ZERO_BOTTOMLEFT = 2, +}; + +enum FONSalign { + // Horizontal align + FONS_ALIGN_LEFT = 1<<0, // Default + FONS_ALIGN_CENTER = 1<<1, + FONS_ALIGN_RIGHT = 1<<2, + // Vertical align + FONS_ALIGN_TOP = 1<<3, + FONS_ALIGN_MIDDLE = 1<<4, + FONS_ALIGN_BOTTOM = 1<<5, + FONS_ALIGN_BASELINE = 1<<6, // Default +}; + +enum FONSerrorCode { + // Font atlas is full. + FONS_ATLAS_FULL = 1, + // Scratch memory used to render glyphs is full, requested size reported in 'val', you may need to bump up FONS_SCRATCH_BUF_SIZE. + FONS_SCRATCH_FULL = 2, + // Calls to fonsPushState has created too large stack, if you need deep state stack bump up FONS_MAX_STATES. + FONS_STATES_OVERFLOW = 3, + // Trying to pop too many states fonsPopState(). + FONS_STATES_UNDERFLOW = 4, +}; + +struct FONSparams { + int width, height; + unsigned char flags; + void* userPtr; + int (*renderCreate)(void* uptr, int width, int height); + int (*renderResize)(void* uptr, int width, int height); + void (*renderUpdate)(void* uptr, int* rect, const unsigned char* data); + void (*renderDraw)(void* uptr, const float* verts, const float* tcoords, const unsigned int* colors, int nverts); + void (*renderDelete)(void* uptr); +}; +typedef struct FONSparams FONSparams; + +struct FONSquad +{ + float x0,y0,s0,t0; + float x1,y1,s1,t1; +}; +typedef struct FONSquad FONSquad; + +struct FONStextIter { + float x, y, nextx, nexty, scale, spacing; + unsigned int codepoint; + short isize, iblur; + struct FONSfont* font; + int prevGlyphIndex; + const char* str; + const char* next; + const char* end; + unsigned int utf8state; +}; +typedef struct FONStextIter FONStextIter; + +typedef struct FONScontext FONScontext; + +// Contructor and destructor. +FONS_DEF FONScontext* fonsCreateInternal(FONSparams* params); +FONS_DEF void fonsDeleteInternal(FONScontext* s); + +FONS_DEF void fonsSetErrorCallback(FONScontext* s, void (*callback)(void* uptr, int error, int val), void* uptr); +// Returns current atlas size. +FONS_DEF void fonsGetAtlasSize(FONScontext* s, int* width, int* height); +// Expands the atlas size. +FONS_DEF int fonsExpandAtlas(FONScontext* s, int width, int height); +// Resets the whole stash. +FONS_DEF int fonsResetAtlas(FONScontext* stash, int width, int height); + +// Add fonts +FONS_DEF int fonsGetFontByName(FONScontext* s, const char* name); +FONS_DEF int fonsAddFallbackFont(FONScontext* stash, int base, int fallback); + +// State handling +FONS_DEF void fonsPushState(FONScontext* s); +FONS_DEF void fonsPopState(FONScontext* s); +FONS_DEF void fonsClearState(FONScontext* s); + +// State setting +FONS_DEF void fonsSetSize(FONScontext* s, float size); +FONS_DEF void fonsSetColor(FONScontext* s, unsigned int color); +FONS_DEF void fonsSetSpacing(FONScontext* s, float spacing); +FONS_DEF void fonsSetBlur(FONScontext* s, float blur); +FONS_DEF void fonsSetAlign(FONScontext* s, int align); +FONS_DEF void fonsSetFont(FONScontext* s, int font); + +// Draw text +FONS_DEF float fonsDrawText(FONScontext* s, float x, float y, const char* string, const char* end); + +// Measure text +FONS_DEF float fonsTextBounds(FONScontext* s, float x, float y, const char* string, const char* end, float* bounds); +FONS_DEF void fonsLineBounds(FONScontext* s, float y, float* miny, float* maxy); +FONS_DEF void fonsVertMetrics(FONScontext* s, float* ascender, float* descender, float* lineh); + +// Text iterator +FONS_DEF int fonsTextIterInit(FONScontext* stash, FONStextIter* iter, float x, float y, const char* str, const char* end); +FONS_DEF int fonsTextIterNext(FONScontext* stash, FONStextIter* iter, struct FONSquad* quad); + +// Pull texture changes +FONS_DEF const unsigned char* fonsGetTextureData(FONScontext* stash, int* width, int* height); +FONS_DEF int fonsValidateTexture(FONScontext* s, int* dirty); + +// Draws the stash texture for debugging +FONS_DEF void fonsDrawDebug(FONScontext* s, float x, float y); + +#ifdef __cplusplus +} +#endif + +#endif // FONS_H + + +#ifdef FONTSTASH_IMPLEMENTATION + +#define FONS_NOTUSED(v) (void)sizeof(v) + + +// Use FreeType on non-Windows systems +#ifndef _WIN32 +//#define FONS_USE_FREETYPE +#endif + +#ifdef _WIN32 +#undef FONS_USE_FREETYPE +#endif + +#ifdef __APPLE__ + #include "TargetConditionals.h" + #if TARGET_OS_IPHONE + #undef FONS_USE_FREETYPE + #endif +#endif + +//#undef FONS_USE_FREETYPE + +//#define FONS_USE_FREETYPE 1 + +#ifdef FONS_USE_FREETYPE + +#include +#include FT_FREETYPE_H +#include FT_ADVANCES_H +#include + +struct FONSttFontImpl { + FT_Face font; +}; +typedef struct FONSttFontImpl FONSttFontImpl; + +static FT_Library ftLibrary; + +static int fons__tt_init() +{ + puts("free type fons init"); + FT_Error ftError; + //FONS_NOTUSED(context); + ftError = FT_Init_FreeType(&ftLibrary); + return ftError == 0; +} + +static int fons__tt_loadFont(FONScontext *context, FONSttFontImpl *font, unsigned char *data, int dataSize) +{ + FT_Error ftError; + FONS_NOTUSED(context); + + //font->font.userdata = stash; + ftError = FT_New_Memory_Face(ftLibrary, (const FT_Byte*)data, dataSize, 0, &font->font); + return ftError == 0; +} + +static void fons__tt_getFontVMetrics(FONSttFontImpl *font, int *ascent, int *descent, int *lineGap) +{ + *ascent = font->font->ascender; + *descent = font->font->descender; + *lineGap = font->font->height - (*ascent - *descent); +} + +static float fons__tt_getPixelHeightScale(FONSttFontImpl *font, float size) +{ + return size / (font->font->ascender - font->font->descender); +} + +static int fons__tt_getGlyphIndex(FONSttFontImpl *font, int codepoint) +{ + return FT_Get_Char_Index(font->font, codepoint); +} + +static int fons__tt_buildGlyphBitmap(FONSttFontImpl *font, int glyph, float size, float scale, + int *advance, int *lsb, int *x0, int *y0, int *x1, int *y1) +{ + FT_Error ftError; + FT_GlyphSlot ftGlyph; + FT_Fixed advFixed; + FONS_NOTUSED(scale); + + ftError = FT_Set_Pixel_Sizes(font->font, 0, (FT_UInt)(size * (float)font->font->units_per_EM / (float)(font->font->ascender - font->font->descender))); + if (ftError) return 0; + ftError = FT_Load_Glyph(font->font, glyph, FT_LOAD_RENDER | FT_LOAD_FORCE_AUTOHINT); + if (ftError) return 0; + ftError = FT_Get_Advance(font->font, glyph, FT_LOAD_NO_SCALE, &advFixed); + if (ftError) return 0; + ftGlyph = font->font->glyph; + *advance = (int)advFixed; + *lsb = (int)ftGlyph->metrics.horiBearingX; + *x0 = ftGlyph->bitmap_left; + *x1 = *x0 + ftGlyph->bitmap.width; + *y0 = -ftGlyph->bitmap_top; + *y1 = *y0 + ftGlyph->bitmap.rows; + return 1; +} + +static void fons__tt_renderGlyphBitmap(FONSttFontImpl *font, unsigned char *output, int outWidth, int outHeight, int outStride, + float scaleX, float scaleY, int glyph) +{ + FT_GlyphSlot ftGlyph = font->font->glyph; + int ftGlyphOffset = 0; + int x, y; + FONS_NOTUSED(outWidth); + FONS_NOTUSED(outHeight); + FONS_NOTUSED(scaleX); + FONS_NOTUSED(scaleY); + FONS_NOTUSED(glyph); // glyph has already been loaded by fons__tt_buildGlyphBitmap + + for ( y = 0; y < ftGlyph->bitmap.rows; y++ ) { + for ( x = 0; x < ftGlyph->bitmap.width; x++ ) { + output[(y * outStride) + x] = ftGlyph->bitmap.buffer[ftGlyphOffset++]; + } + } +} + +static int fons__tt_getGlyphKernAdvance(FONSttFontImpl *font, int glyph1, int glyph2) +{ + FT_Vector ftKerning; + FT_Get_Kerning(font->font, glyph1, glyph2, FT_KERNING_DEFAULT, &ftKerning); + return (int)((ftKerning.x + 32) >> 6); // Round up and convert to integer +} + +#else + +#define STB_TRUETYPE_IMPLEMENTATION +#define STBTT_STATIC +static void* fons__tmpalloc(size_t size, void* up); +static void fons__tmpfree(void* ptr, void* up); +#define STBTT_malloc(x,u) fons__tmpalloc(x,u) +#define STBTT_free(x,u) fons__tmpfree(x,u) +#include "stb_truetype.h" + +struct FONSttFontImpl { + stbtt_fontinfo font; +}; +typedef struct FONSttFontImpl FONSttFontImpl; + +static int fons__tt_init(FONScontext *context) +{ + FONS_NOTUSED(context); + return 1; +} + +static int fons__tt_loadFont(FONScontext *context, FONSttFontImpl *font, unsigned char *data, int dataSize) +{ + int stbError; + FONS_NOTUSED(dataSize); + + font->font.userdata = context; + stbError = stbtt_InitFont(&font->font, data, 0); + return stbError; +} + +static void fons__tt_getFontVMetrics(FONSttFontImpl *font, int *ascent, int *descent, int *lineGap) +{ + stbtt_GetFontVMetrics(&font->font, ascent, descent, lineGap); +} + +static float fons__tt_getPixelHeightScale(FONSttFontImpl *font, float size) +{ + return stbtt_ScaleForPixelHeight(&font->font, size); +} + +static int fons__tt_getGlyphIndex(FONSttFontImpl *font, int codepoint) +{ + return stbtt_FindGlyphIndex(&font->font, codepoint); +} + +static int fons__tt_buildGlyphBitmap(FONSttFontImpl *font, int glyph, float size, float scale, + int *advance, int *lsb, int *x0, int *y0, int *x1, int *y1) +{ + FONS_NOTUSED(size); + stbtt_GetGlyphHMetrics(&font->font, glyph, advance, lsb); + stbtt_GetGlyphBitmapBox(&font->font, glyph, scale, scale, x0, y0, x1, y1); + return 1; +} + +static void fons__tt_renderGlyphBitmap(FONSttFontImpl *font, unsigned char *output, int outWidth, int outHeight, int outStride, + float scaleX, float scaleY, int glyph) +{ + stbtt_MakeGlyphBitmap(&font->font, output, outWidth, outHeight, outStride, scaleX, scaleY, glyph); +} + +static int fons__tt_getGlyphKernAdvance(FONSttFontImpl *font, int glyph1, int glyph2) +{ + return stbtt_GetGlyphKernAdvance(&font->font, glyph1, glyph2); +} + +#endif + +#ifndef FONS_SCRATCH_BUF_SIZE +# define FONS_SCRATCH_BUF_SIZE 64000 +#endif +#ifndef FONS_HASH_LUT_SIZE +# define FONS_HASH_LUT_SIZE 256 +#endif +#ifndef FONS_INIT_FONTS +# define FONS_INIT_FONTS 4 +#endif +#ifndef FONS_INIT_GLYPHS +# define FONS_INIT_GLYPHS 256 +#endif +#ifndef FONS_INIT_ATLAS_NODES +# define FONS_INIT_ATLAS_NODES 256 +#endif +#ifndef FONS_VERTEX_COUNT +# define FONS_VERTEX_COUNT 1024 +#endif +#ifndef FONS_MAX_STATES +# define FONS_MAX_STATES 20 +#endif +#ifndef FONS_MAX_FALLBACKS +# define FONS_MAX_FALLBACKS 20 +#endif + +static unsigned int fons__hashint(unsigned int a) +{ + a += ~(a<<15); + a ^= (a>>10); + a += (a<<3); + a ^= (a>>6); + a += ~(a<<11); + a ^= (a>>16); + return a; +} + +static int fons__mini(int a, int b) +{ + return a < b ? a : b; +} + +static int fons__maxi(int a, int b) +{ + return a > b ? a : b; +} + +struct FONSglyph +{ + unsigned int codepoint; + int index; + int next; + short size, blur; + short x0,y0,x1,y1; + short xadv,xoff,yoff; +}; +typedef struct FONSglyph FONSglyph; + +struct FONSfont +{ + FONSttFontImpl font; + char name[64]; + unsigned char* data; + int dataSize; + unsigned char freeData; + float ascender; + float descender; + float lineh; + FONSglyph* glyphs; + int cglyphs; + int nglyphs; + int lut[FONS_HASH_LUT_SIZE]; + int fallbacks[FONS_MAX_FALLBACKS]; + int nfallbacks; +}; +typedef struct FONSfont FONSfont; + +struct FONSstate +{ + int font; + int align; + float size; + unsigned int color; + float blur; + float spacing; +}; +typedef struct FONSstate FONSstate; + +struct FONSatlasNode { + short x, y, width; +}; +typedef struct FONSatlasNode FONSatlasNode; + +struct FONSatlas +{ + int width, height; + FONSatlasNode* nodes; + int nnodes; + int cnodes; +}; +typedef struct FONSatlas FONSatlas; + +struct FONScontext +{ + FONSparams params; + float itw,ith; + unsigned char* texData; + int dirtyRect[4]; + FONSfont** fonts; + FONSatlas* atlas; + int cfonts; + int nfonts; + float verts[FONS_VERTEX_COUNT*2]; + float tcoords[FONS_VERTEX_COUNT*2]; + unsigned int colors[FONS_VERTEX_COUNT]; + int nverts; + unsigned char* scratch; + int nscratch; + FONSstate states[FONS_MAX_STATES]; + int nstates; + void (*handleError)(void* uptr, int error, int val); + void* errorUptr; +}; + +#ifdef STB_TRUETYPE_IMPLEMENTATION + +static void* fons__tmpalloc(size_t size, void* up) +{ + unsigned char* ptr; + FONScontext* stash = (FONScontext*)up; + + // 16-byte align the returned pointer + size = (size + 0xf) & ~0xf; + + if (stash->nscratch+(int)size > FONS_SCRATCH_BUF_SIZE) { + if (stash->handleError) + stash->handleError(stash->errorUptr, FONS_SCRATCH_FULL, stash->nscratch+(int)size); + return NULL; + } + ptr = stash->scratch + stash->nscratch; + stash->nscratch += (int)size; + return ptr; +} + +static void fons__tmpfree(void* ptr, void* up) +{ + (void)ptr; + (void)up; + // empty +} + +#endif // STB_TRUETYPE_IMPLEMENTATION + +// Copyright (c) 2008-2010 Bjoern Hoehrmann +// See http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ for details. + +#define FONS_UTF8_ACCEPT 0 +#define FONS_UTF8_REJECT 12 + +static unsigned int fons__decutf8(unsigned int* state, unsigned int* codep, unsigned int byte) +{ + static const unsigned char utf8d[] = { + // The first part of the table maps bytes to character classes that + // to reduce the size of the transition table and create bitmasks. + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, + 10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8, + + // The second part is a transition table that maps a combination + // of a state of the automaton and a character class to a state. + 0,12,24,36,60,96,84,12,12,12,48,72, 12,12,12,12,12,12,12,12,12,12,12,12, + 12, 0,12,12,12,12,12, 0,12, 0,12,12, 12,24,12,12,12,12,12,24,12,24,12,12, + 12,12,12,12,12,12,12,24,12,12,12,12, 12,24,12,12,12,12,12,12,12,24,12,12, + 12,12,12,12,12,12,12,36,12,36,12,12, 12,36,12,12,12,12,12,36,12,36,12,12, + 12,36,12,12,12,12,12,12,12,12,12,12, + }; + + unsigned int type = utf8d[byte]; + + *codep = (*state != FONS_UTF8_ACCEPT) ? + (byte & 0x3fu) | (*codep << 6) : + (0xff >> type) & (byte); + + *state = utf8d[256 + *state + type]; + return *state; +} + +// Atlas based on Skyline Bin Packer by Jukka Jylänki + +static void fons__deleteAtlas(FONSatlas* atlas) +{ + if (atlas == NULL) return; + if (atlas->nodes != NULL) FONTSTASH_FREE(atlas->nodes); + FONTSTASH_FREE(atlas); +} + +static FONSatlas* fons__allocAtlas(int w, int h, int nnodes) +{ + FONSatlas* atlas = NULL; + + // Allocate memory for the font stash. + atlas = (FONSatlas*)FONTSTASH_MALLOC(sizeof(FONSatlas)); + if (atlas == NULL) goto error; + memset(atlas, 0, sizeof(FONSatlas)); + + atlas->width = w; + atlas->height = h; + + // Allocate space for skyline nodes + atlas->nodes = (FONSatlasNode*)FONTSTASH_MALLOC(sizeof(FONSatlasNode) * nnodes); + if (atlas->nodes == NULL) goto error; + memset(atlas->nodes, 0, sizeof(FONSatlasNode) * nnodes); + atlas->nnodes = 0; + atlas->cnodes = nnodes; + + // Init root node. + atlas->nodes[0].x = 0; + atlas->nodes[0].y = 0; + atlas->nodes[0].width = (short)w; + atlas->nnodes++; + + return atlas; + +error: + if (atlas) fons__deleteAtlas(atlas); + return NULL; +} + +static int fons__atlasInsertNode(FONSatlas* atlas, int idx, int x, int y, int w) +{ + int i; + // Insert node + if (atlas->nnodes+1 > atlas->cnodes) { + atlas->cnodes = atlas->cnodes == 0 ? 8 : atlas->cnodes * 2; + atlas->nodes = (FONSatlasNode*)FONTSTASH_REALLOC(atlas->nodes, sizeof(FONSatlasNode) * atlas->cnodes); + if (atlas->nodes == NULL) + return 0; + } + for (i = atlas->nnodes; i > idx; i--) + atlas->nodes[i] = atlas->nodes[i-1]; + atlas->nodes[idx].x = (short)x; + atlas->nodes[idx].y = (short)y; + atlas->nodes[idx].width = (short)w; + atlas->nnodes++; + + return 1; +} + +static void fons__atlasRemoveNode(FONSatlas* atlas, int idx) +{ + int i; + if (atlas->nnodes == 0) return; + for (i = idx; i < atlas->nnodes-1; i++) + atlas->nodes[i] = atlas->nodes[i+1]; + atlas->nnodes--; +} + +static void fons__atlasExpand(FONSatlas* atlas, int w, int h) +{ + // Insert node for empty space + if (w > atlas->width) + fons__atlasInsertNode(atlas, atlas->nnodes, atlas->width, 0, w - atlas->width); + atlas->width = w; + atlas->height = h; +} + +static void fons__atlasReset(FONSatlas* atlas, int w, int h) +{ + atlas->width = w; + atlas->height = h; + atlas->nnodes = 0; + + // Init root node. + atlas->nodes[0].x = 0; + atlas->nodes[0].y = 0; + atlas->nodes[0].width = (short)w; + atlas->nnodes++; +} + +static int fons__atlasAddSkylineLevel(FONSatlas* atlas, int idx, int x, int y, int w, int h) +{ + int i; + + // Insert new node + if (fons__atlasInsertNode(atlas, idx, x, y+h, w) == 0) + return 0; + + // Delete skyline segments that fall under the shadow of the new segment. + for (i = idx+1; i < atlas->nnodes; i++) { + if (atlas->nodes[i].x < atlas->nodes[i-1].x + atlas->nodes[i-1].width) { + int shrink = atlas->nodes[i-1].x + atlas->nodes[i-1].width - atlas->nodes[i].x; + atlas->nodes[i].x += (short)shrink; + atlas->nodes[i].width -= (short)shrink; + if (atlas->nodes[i].width <= 0) { + fons__atlasRemoveNode(atlas, i); + i--; + } else { + break; + } + } else { + break; + } + } + + // Merge same height skyline segments that are next to each other. + for (i = 0; i < atlas->nnodes-1; i++) { + if (atlas->nodes[i].y == atlas->nodes[i+1].y) { + atlas->nodes[i].width += atlas->nodes[i+1].width; + fons__atlasRemoveNode(atlas, i+1); + i--; + } + } + + return 1; +} + +static int fons__atlasRectFits(FONSatlas* atlas, int i, int w, int h) +{ + // Checks if there is enough space at the location of skyline span 'i', + // and return the max height of all skyline spans under that at that location, + // (think tetris block being dropped at that position). Or -1 if no space found. + int x = atlas->nodes[i].x; + int y = atlas->nodes[i].y; + int spaceLeft; + if (x + w > atlas->width) + return -1; + spaceLeft = w; + while (spaceLeft > 0) { + if (i == atlas->nnodes) return -1; + y = fons__maxi(y, atlas->nodes[i].y); + if (y + h > atlas->height) return -1; + spaceLeft -= atlas->nodes[i].width; + ++i; + } + return y; +} + +static int fons__atlasAddRect(FONSatlas* atlas, int rw, int rh, int* rx, int* ry) +{ + int besth = atlas->height, bestw = atlas->width, besti = -1; + int bestx = -1, besty = -1, i; + + // Bottom left fit heuristic. + for (i = 0; i < atlas->nnodes; i++) { + int y = fons__atlasRectFits(atlas, i, rw, rh); + if (y != -1) { + if (y + rh < besth || (y + rh == besth && atlas->nodes[i].width < bestw)) { + besti = i; + bestw = atlas->nodes[i].width; + besth = y + rh; + bestx = atlas->nodes[i].x; + besty = y; + } + } + } + + if (besti == -1) + return 0; + + // Perform the actual packing. + if (fons__atlasAddSkylineLevel(atlas, besti, bestx, besty, rw, rh) == 0) + return 0; + + *rx = bestx; + *ry = besty; + + return 1; +} + +static void fons__addWhiteRect(FONScontext* stash, int w, int h) +{ + int x, y, gx, gy; + unsigned char* dst; + if (fons__atlasAddRect(stash->atlas, w, h, &gx, &gy) == 0) + return; + + // Rasterize + dst = &stash->texData[gx + gy * stash->params.width]; + for (y = 0; y < h; y++) { + for (x = 0; x < w; x++) + dst[x] = 0xff; + dst += stash->params.width; + } + + stash->dirtyRect[0] = fons__mini(stash->dirtyRect[0], gx); + stash->dirtyRect[1] = fons__mini(stash->dirtyRect[1], gy); + stash->dirtyRect[2] = fons__maxi(stash->dirtyRect[2], gx+w); + stash->dirtyRect[3] = fons__maxi(stash->dirtyRect[3], gy+h); +} + +FONScontext* fonsCreateInternal(FONSparams* params) +{ + FONScontext* stash = NULL; + + // Allocate memory for the font stash. + stash = (FONScontext*)FONTSTASH_MALLOC(sizeof(FONScontext)); + if (stash == NULL) goto error; + memset(stash, 0, sizeof(FONScontext)); + + stash->params = *params; + + // Allocate scratch buffer. + stash->scratch = (unsigned char*)FONTSTASH_MALLOC(FONS_SCRATCH_BUF_SIZE); + if (stash->scratch == NULL) goto error; + + // Initialize implementation library + if (!fons__tt_init(stash)) goto error; + + if (stash->params.renderCreate != NULL) { + if (stash->params.renderCreate(stash->params.userPtr, stash->params.width, stash->params.height) == 0) + goto error; + } + + stash->atlas = fons__allocAtlas(stash->params.width, stash->params.height, FONS_INIT_ATLAS_NODES); + if (stash->atlas == NULL) goto error; + + // Allocate space for fonts. + stash->fonts = (FONSfont**)FONTSTASH_MALLOC(sizeof(FONSfont*) * FONS_INIT_FONTS); + if (stash->fonts == NULL) goto error; + memset(stash->fonts, 0, sizeof(FONSfont*) * FONS_INIT_FONTS); + stash->cfonts = FONS_INIT_FONTS; + stash->nfonts = 0; + + // Create texture for the cache. + stash->itw = 1.0f/stash->params.width; + stash->ith = 1.0f/stash->params.height; + stash->texData = (unsigned char*)FONTSTASH_MALLOC(stash->params.width * stash->params.height); + if (stash->texData == NULL) goto error; + memset(stash->texData, 0, stash->params.width * stash->params.height); + + stash->dirtyRect[0] = stash->params.width; + stash->dirtyRect[1] = stash->params.height; + stash->dirtyRect[2] = 0; + stash->dirtyRect[3] = 0; + + // Add white rect at 0,0 for debug drawing. + fons__addWhiteRect(stash, 2,2); + + fonsPushState(stash); + fonsClearState(stash); + + return stash; + +error: + fonsDeleteInternal(stash); + return NULL; +} + +static FONSstate* fons__getState(FONScontext* stash) +{ + return &stash->states[stash->nstates-1]; +} + +int fonsAddFallbackFont(FONScontext* stash, int base, int fallback) +{ + FONSfont* baseFont = stash->fonts[base]; + if (baseFont->nfallbacks < FONS_MAX_FALLBACKS) { + baseFont->fallbacks[baseFont->nfallbacks++] = fallback; + return 1; + } + return 0; +} + +void fonsSetSize(FONScontext* stash, float size) +{ + fons__getState(stash)->size = size; +} + +void fonsSetColor(FONScontext* stash, unsigned int color) +{ + fons__getState(stash)->color = color; +} + +void fonsSetSpacing(FONScontext* stash, float spacing) +{ + fons__getState(stash)->spacing = spacing; +} + +void fonsSetBlur(FONScontext* stash, float blur) +{ + fons__getState(stash)->blur = blur; +} + +void fonsSetAlign(FONScontext* stash, int align) +{ + fons__getState(stash)->align = align; +} + +void fonsSetFont(FONScontext* stash, int font) +{ + fons__getState(stash)->font = font; +} + +void fonsPushState(FONScontext* stash) +{ + if (stash->nstates >= FONS_MAX_STATES) { + if (stash->handleError) + stash->handleError(stash->errorUptr, FONS_STATES_OVERFLOW, 0); + return; + } + if (stash->nstates > 0) + memcpy(&stash->states[stash->nstates], &stash->states[stash->nstates-1], sizeof(FONSstate)); + stash->nstates++; +} + +void fonsPopState(FONScontext* stash) +{ + if (stash->nstates <= 1) { + if (stash->handleError) + stash->handleError(stash->errorUptr, FONS_STATES_UNDERFLOW, 0); + return; + } + stash->nstates--; +} + +void fonsClearState(FONScontext* stash) +{ + FONSstate* state = fons__getState(stash); + state->size = 12.0f; + state->color = 0xffffffff; + state->font = 0; + state->blur = 0; + state->spacing = 0; + state->align = FONS_ALIGN_LEFT | FONS_ALIGN_BASELINE; +} + +static void fons__freeFont(FONSfont* font) +{ + if (font == NULL) return; + if (font->glyphs) FONTSTASH_FREE(font->glyphs); + if (font->freeData && font->data) FONTSTASH_FREE(font->data); + FONTSTASH_FREE(font); +} + +static int fons__allocFont(FONScontext* stash) +{ + FONSfont* font = NULL; + if (stash->nfonts+1 > stash->cfonts) { + stash->cfonts = stash->cfonts == 0 ? 8 : stash->cfonts * 2; + stash->fonts = (FONSfont**)FONTSTASH_REALLOC(stash->fonts, sizeof(FONSfont*) * stash->cfonts); + if (stash->fonts == NULL) + return -1; + } + font = (FONSfont*)FONTSTASH_MALLOC(sizeof(FONSfont)); + if (font == NULL) goto error; + memset(font, 0, sizeof(FONSfont)); + + font->glyphs = (FONSglyph*)FONTSTASH_MALLOC(sizeof(FONSglyph) * FONS_INIT_GLYPHS); + if (font->glyphs == NULL) goto error; + font->cglyphs = FONS_INIT_GLYPHS; + font->nglyphs = 0; + + stash->fonts[stash->nfonts++] = font; + return stash->nfonts-1; + +error: + fons__freeFont(font); + + return FONS_INVALID; +} + +int fonsAddFontMem(FONScontext* stash, const char* name, unsigned char* data, int dataSize, int freeData) +{ + int i, ascent, descent, fh, lineGap; + FONSfont* font; + + int idx = fons__allocFont(stash); + if (idx == FONS_INVALID) + return FONS_INVALID; + + font = stash->fonts[idx]; + + strncpy(font->name, name, sizeof(font->name)); + font->name[sizeof(font->name)-1] = '\0'; + + // Init hash lookup. + for (i = 0; i < FONS_HASH_LUT_SIZE; ++i) + font->lut[i] = -1; + + // Read in the font data. + font->dataSize = dataSize; + font->data = data; + font->freeData = (unsigned char)freeData; + + // Init font + stash->nscratch = 0; + if (!fons__tt_loadFont(stash, &font->font, data, dataSize)) goto error; + + // Store normalized line height. The real line height is got + // by multiplying the lineh by font size. + fons__tt_getFontVMetrics( &font->font, &ascent, &descent, &lineGap); + fh = ascent - descent; + font->ascender = (float)ascent / (float)fh; + font->descender = (float)descent / (float)fh; + font->lineh = (float)(fh + lineGap) / (float)fh; + + return idx; + +error: + fons__freeFont(font); + stash->nfonts--; + return FONS_INVALID; +} + +int fonsGetFontByName(FONScontext* s, const char* name) +{ + int i; + for (i = 0; i < s->nfonts; i++) { + if (strcmp(s->fonts[i]->name, name) == 0) + return i; + } + return FONS_INVALID; +} + + +static FONSglyph* fons__allocGlyph(FONSfont* font) +{ + if (font->nglyphs+1 > font->cglyphs) { + font->cglyphs = font->cglyphs == 0 ? 8 : font->cglyphs * 2; + font->glyphs = (FONSglyph*)FONTSTASH_REALLOC(font->glyphs, sizeof(FONSglyph) * font->cglyphs); + if (font->glyphs == NULL) return NULL; + } + font->nglyphs++; + return &font->glyphs[font->nglyphs-1]; +} + + +// Based on Exponential blur, Jani Huhtanen, 2006 + +#define APREC 16 +#define ZPREC 7 + +static void fons__blurCols(unsigned char* dst, int w, int h, int dstStride, int alpha) +{ + int x, y; + for (y = 0; y < h; y++) { + int z = 0; // force zero border + for (x = 1; x < w; x++) { + z += (alpha * (((int)(dst[x]) << ZPREC) - z)) >> APREC; + dst[x] = (unsigned char)(z >> ZPREC); + } + dst[w-1] = 0; // force zero border + z = 0; + for (x = w-2; x >= 0; x--) { + z += (alpha * (((int)(dst[x]) << ZPREC) - z)) >> APREC; + dst[x] = (unsigned char)(z >> ZPREC); + } + dst[0] = 0; // force zero border + dst += dstStride; + } +} + +static void fons__blurRows(unsigned char* dst, int w, int h, int dstStride, int alpha) +{ + int x, y; + for (x = 0; x < w; x++) { + int z = 0; // force zero border + for (y = dstStride; y < h*dstStride; y += dstStride) { + z += (alpha * (((int)(dst[y]) << ZPREC) - z)) >> APREC; + dst[y] = (unsigned char)(z >> ZPREC); + } + dst[(h-1)*dstStride] = 0; // force zero border + z = 0; + for (y = (h-2)*dstStride; y >= 0; y -= dstStride) { + z += (alpha * (((int)(dst[y]) << ZPREC) - z)) >> APREC; + dst[y] = (unsigned char)(z >> ZPREC); + } + dst[0] = 0; // force zero border + dst++; + } +} + + +static void fons__blur(FONScontext* stash, unsigned char* dst, int w, int h, int dstStride, int blur) +{ + int alpha; + float sigma; + (void)stash; + + if (blur < 1) + return; + // Calculate the alpha such that 90% of the kernel is within the radius. (Kernel extends to infinity) + sigma = (float)blur * 0.57735f; // 1 / sqrt(3) + alpha = (int)((1< 20) iblur = 20; + pad = iblur+2; + + // Reset allocator. + stash->nscratch = 0; + + // Find code point and size. + h = fons__hashint(codepoint) & (FONS_HASH_LUT_SIZE-1); + i = font->lut[h]; + while (i != -1) { + if (font->glyphs[i].codepoint == codepoint && font->glyphs[i].size == isize && font->glyphs[i].blur == iblur) + return &font->glyphs[i]; + i = font->glyphs[i].next; + } + + // Could not find glyph, create it. + g = fons__tt_getGlyphIndex(&font->font, codepoint); + // Try to find the glyph in fallback fonts. + if (g == 0) { + for (i = 0; i < font->nfallbacks; ++i) { + FONSfont* fallbackFont = stash->fonts[font->fallbacks[i]]; + int fallbackIndex = fons__tt_getGlyphIndex(&fallbackFont->font, codepoint); + if (fallbackIndex != 0) { + g = fallbackIndex; + renderFont = fallbackFont; + break; + } + } + // It is possible that we did not find a fallback glyph. + // In that case the glyph index 'g' is 0, and we'll proceed below and cache empty glyph. + } + scale = fons__tt_getPixelHeightScale(&renderFont->font, size); + fons__tt_buildGlyphBitmap(&renderFont->font, g, size, scale, &advance, &lsb, &x0, &y0, &x1, &y1); + gw = x1-x0 + pad*2; + gh = y1-y0 + pad*2; + + // Find free spot for the rect in the atlas + added = fons__atlasAddRect(stash->atlas, gw, gh, &gx, &gy); + if (added == 0 && stash->handleError != NULL) { + // Atlas is full, let the user to resize the atlas (or not), and try again. + stash->handleError(stash->errorUptr, FONS_ATLAS_FULL, 0); + added = fons__atlasAddRect(stash->atlas, gw, gh, &gx, &gy); + } + if (added == 0) return NULL; + + // Init glyph. + glyph = fons__allocGlyph(font); + glyph->codepoint = codepoint; + glyph->size = isize; + glyph->blur = iblur; + glyph->index = g; + glyph->x0 = (short)gx; + glyph->y0 = (short)gy; + glyph->x1 = (short)(glyph->x0+gw); + glyph->y1 = (short)(glyph->y0+gh); + glyph->xadv = (short)(scale * advance * 10.0f); + glyph->xoff = (short)(x0 - pad); + glyph->yoff = (short)(y0 - pad); + glyph->next = 0; + + // Insert char to hash lookup. + glyph->next = font->lut[h]; + font->lut[h] = font->nglyphs-1; + + // Rasterize + dst = &stash->texData[(glyph->x0+pad) + (glyph->y0+pad) * stash->params.width]; + fons__tt_renderGlyphBitmap(&renderFont->font, dst, gw-pad*2,gh-pad*2, stash->params.width, scale,scale, g); + + // Make sure there is one pixel empty border. + dst = &stash->texData[glyph->x0 + glyph->y0 * stash->params.width]; + for (y = 0; y < gh; y++) { + dst[y*stash->params.width] = 0; + dst[gw-1 + y*stash->params.width] = 0; + } + for (x = 0; x < gw; x++) { + dst[x] = 0; + dst[x + (gh-1)*stash->params.width] = 0; + } + + // Debug code to color the glyph background +/* unsigned char* fdst = &stash->texData[glyph->x0 + glyph->y0 * stash->params.width]; + for (y = 0; y < gh; y++) { + for (x = 0; x < gw; x++) { + int a = (int)fdst[x+y*stash->params.width] + 20; + if (a > 255) a = 255; + fdst[x+y*stash->params.width] = a; + } + }*/ + + // Blur + if (iblur > 0) { + stash->nscratch = 0; + bdst = &stash->texData[glyph->x0 + glyph->y0 * stash->params.width]; + fons__blur(stash, bdst, gw,gh, stash->params.width, iblur); + } + + stash->dirtyRect[0] = fons__mini(stash->dirtyRect[0], glyph->x0); + stash->dirtyRect[1] = fons__mini(stash->dirtyRect[1], glyph->y0); + stash->dirtyRect[2] = fons__maxi(stash->dirtyRect[2], glyph->x1); + stash->dirtyRect[3] = fons__maxi(stash->dirtyRect[3], glyph->y1); + + return glyph; +} + +static void fons__getQuad(FONScontext* stash, FONSfont* font, + int prevGlyphIndex, FONSglyph* glyph, + float scale, float spacing, float* x, float* y, FONSquad* q) +{ + float rx,ry,xoff,yoff,x0,y0,x1,y1; + + if (prevGlyphIndex != -1) { + float adv = fons__tt_getGlyphKernAdvance(&font->font, prevGlyphIndex, glyph->index) * scale; + *x += (int)(adv + spacing + 0.5f); + } + + // Each glyph has 2px border to allow good interpolation, + // one pixel to prevent leaking, and one to allow good interpolation for rendering. + // Inset the texture region by one pixel for correct interpolation. + xoff = (short)(glyph->xoff+1); + yoff = (short)(glyph->yoff+1); + x0 = (float)(glyph->x0+1); + y0 = (float)(glyph->y0+1); + x1 = (float)(glyph->x1-1); + y1 = (float)(glyph->y1-1); + + if (stash->params.flags & FONS_ZERO_TOPLEFT) { + rx = (float)(int)(*x + xoff); + ry = (float)(int)(*y + yoff); + + q->x0 = rx; + q->y0 = ry; + q->x1 = rx + x1 - x0; + q->y1 = ry + y1 - y0; + + q->s0 = x0 * stash->itw; + q->t0 = y0 * stash->ith; + q->s1 = x1 * stash->itw; + q->t1 = y1 * stash->ith; + } else { + rx = (float)(int)(*x + xoff); + ry = (float)(int)(*y - yoff); + + q->x0 = rx; + q->y0 = ry; + q->x1 = rx + x1 - x0; + q->y1 = ry - y1 + y0; + + q->s0 = x0 * stash->itw; + q->t0 = y0 * stash->ith; + q->s1 = x1 * stash->itw; + q->t1 = y1 * stash->ith; + } + + *x += (int)(glyph->xadv / 10.0f + 0.5f); +} + +static void fons__flush(FONScontext* stash) +{ + // Flush texture + if (stash->dirtyRect[0] < stash->dirtyRect[2] && stash->dirtyRect[1] < stash->dirtyRect[3]) { + if (stash->params.renderUpdate != NULL) + stash->params.renderUpdate(stash->params.userPtr, stash->dirtyRect, stash->texData); + // Reset dirty rect + stash->dirtyRect[0] = stash->params.width; + stash->dirtyRect[1] = stash->params.height; + stash->dirtyRect[2] = 0; + stash->dirtyRect[3] = 0; + } + + // Flush triangles + if (stash->nverts > 0) { + if (stash->params.renderDraw != NULL) + stash->params.renderDraw(stash->params.userPtr, stash->verts, stash->tcoords, stash->colors, stash->nverts); + stash->nverts = 0; + } +} + +static __inline void fons__vertex(FONScontext* stash, float x, float y, float s, float t, unsigned int c) +{ + stash->verts[stash->nverts*2+0] = x; + stash->verts[stash->nverts*2+1] = y; + stash->tcoords[stash->nverts*2+0] = s; + stash->tcoords[stash->nverts*2+1] = t; + stash->colors[stash->nverts] = c; + stash->nverts++; +} + +static float fons__getVertAlign(FONScontext* stash, FONSfont* font, int align, short isize) +{ + if (stash->params.flags & FONS_ZERO_TOPLEFT) { + if (align & FONS_ALIGN_TOP) { + return font->ascender * (float)isize/10.0f; + } else if (align & FONS_ALIGN_MIDDLE) { + return (font->ascender + font->descender) / 2.0f * (float)isize/10.0f; + } else if (align & FONS_ALIGN_BASELINE) { + return 0.0f; + } else if (align & FONS_ALIGN_BOTTOM) { + return font->descender * (float)isize/10.0f; + } + } else { + if (align & FONS_ALIGN_TOP) { + return -font->ascender * (float)isize/10.0f; + } else if (align & FONS_ALIGN_MIDDLE) { + return -(font->ascender + font->descender) / 2.0f * (float)isize/10.0f; + } else if (align & FONS_ALIGN_BASELINE) { + return 0.0f; + } else if (align & FONS_ALIGN_BOTTOM) { + return -font->descender * (float)isize/10.0f; + } + } + return 0.0; +} + +FONS_DEF float fonsDrawText(FONScontext* stash, + float x, float y, + const char* str, const char* end) +{ + FONSstate* state = fons__getState(stash); + unsigned int codepoint; + unsigned int utf8state = 0; + FONSglyph* glyph = NULL; + FONSquad q; + int prevGlyphIndex = -1; + short isize = (short)(state->size*10.0f); + short iblur = (short)state->blur; + float scale; + FONSfont* font; + float width; + + if (stash == NULL) return x; + if (state->font < 0 || state->font >= stash->nfonts) return x; + font = stash->fonts[state->font]; + if (font->data == NULL) return x; + + scale = fons__tt_getPixelHeightScale(&font->font, (float)isize/10.0f); + + if (end == NULL) + end = str + strlen(str); + + // Align horizontally + if (state->align & FONS_ALIGN_LEFT) { + // empty + } else if (state->align & FONS_ALIGN_RIGHT) { + width = fonsTextBounds(stash, x,y, str, end, NULL); + x -= width; + } else if (state->align & FONS_ALIGN_CENTER) { + width = fonsTextBounds(stash, x,y, str, end, NULL); + x -= width * 0.5f; + } + // Align vertically. + y += fons__getVertAlign(stash, font, state->align, isize); + + for (; str != end; ++str) { + if (fons__decutf8(&utf8state, &codepoint, *(const unsigned char*)str)) + continue; + glyph = fons__getGlyph(stash, font, codepoint, isize, iblur); + if (glyph != NULL) { + fons__getQuad(stash, font, prevGlyphIndex, glyph, scale, state->spacing, &x, &y, &q); + + if (stash->nverts+6 > FONS_VERTEX_COUNT) + fons__flush(stash); + + fons__vertex(stash, q.x0, q.y0, q.s0, q.t0, state->color); + fons__vertex(stash, q.x1, q.y1, q.s1, q.t1, state->color); + fons__vertex(stash, q.x1, q.y0, q.s1, q.t0, state->color); + + fons__vertex(stash, q.x0, q.y0, q.s0, q.t0, state->color); + fons__vertex(stash, q.x0, q.y1, q.s0, q.t1, state->color); + fons__vertex(stash, q.x1, q.y1, q.s1, q.t1, state->color); + } + prevGlyphIndex = glyph != NULL ? glyph->index : -1; + } + fons__flush(stash); + + return x; +} + +FONS_DEF int fonsTextIterInit(FONScontext* stash, FONStextIter* iter, + float x, float y, const char* str, const char* end) +{ + FONSstate* state = fons__getState(stash); + float width; + + memset(iter, 0, sizeof(*iter)); + + if (stash == NULL) return 0; + if (state->font < 0 || state->font >= stash->nfonts) return 0; + iter->font = stash->fonts[state->font]; + if (iter->font->data == NULL) return 0; + + iter->isize = (short)(state->size*10.0f); + iter->iblur = (short)state->blur; + iter->scale = fons__tt_getPixelHeightScale(&iter->font->font, (float)iter->isize/10.0f); + + // Align horizontally + if (state->align & FONS_ALIGN_LEFT) { + // empty + } else if (state->align & FONS_ALIGN_RIGHT) { + width = fonsTextBounds(stash, x,y, str, end, NULL); + x -= width; + } else if (state->align & FONS_ALIGN_CENTER) { + width = fonsTextBounds(stash, x,y, str, end, NULL); + x -= width * 0.5f; + } + // Align vertically. + y += fons__getVertAlign(stash, iter->font, state->align, iter->isize); + + if (end == NULL) + end = str + strlen(str); + + iter->x = iter->nextx = x; + iter->y = iter->nexty = y; + iter->spacing = state->spacing; + iter->str = str; + iter->next = str; + iter->end = end; + iter->codepoint = 0; + iter->prevGlyphIndex = -1; + + return 1; +} + +FONS_DEF int fonsTextIterNext(FONScontext* stash, FONStextIter* iter, FONSquad* quad) +{ + FONSglyph* glyph = NULL; + const char* str = iter->next; + iter->str = iter->next; + + if (str == iter->end) + return 0; + + for (; str != iter->end; str++) { + if (fons__decutf8(&iter->utf8state, &iter->codepoint, *(const unsigned char*)str)) + continue; + str++; + // Get glyph and quad + iter->x = iter->nextx; + iter->y = iter->nexty; + glyph = fons__getGlyph(stash, iter->font, iter->codepoint, iter->isize, iter->iblur); + if (glyph != NULL) + fons__getQuad(stash, iter->font, iter->prevGlyphIndex, glyph, iter->scale, iter->spacing, &iter->nextx, &iter->nexty, quad); + iter->prevGlyphIndex = glyph != NULL ? glyph->index : -1; + break; + } + iter->next = str; + + return 1; +} + +FONS_DEF void fonsDrawDebug(FONScontext* stash, float x, float y) +{ + int i; + int w = stash->params.width; + int h = stash->params.height; + float u = w == 0 ? 0 : (1.0f / w); + float v = h == 0 ? 0 : (1.0f / h); + + if (stash->nverts+6+6 > FONS_VERTEX_COUNT) + fons__flush(stash); + + // Draw background + fons__vertex(stash, x+0, y+0, u, v, 0x0fffffff); + fons__vertex(stash, x+w, y+h, u, v, 0x0fffffff); + fons__vertex(stash, x+w, y+0, u, v, 0x0fffffff); + + fons__vertex(stash, x+0, y+0, u, v, 0x0fffffff); + fons__vertex(stash, x+0, y+h, u, v, 0x0fffffff); + fons__vertex(stash, x+w, y+h, u, v, 0x0fffffff); + + // Draw texture + fons__vertex(stash, x+0, y+0, 0, 0, 0xffffffff); + fons__vertex(stash, x+w, y+h, 1, 1, 0xffffffff); + fons__vertex(stash, x+w, y+0, 1, 0, 0xffffffff); + + fons__vertex(stash, x+0, y+0, 0, 0, 0xffffffff); + fons__vertex(stash, x+0, y+h, 0, 1, 0xffffffff); + fons__vertex(stash, x+w, y+h, 1, 1, 0xffffffff); + + // Drawbug draw atlas + for (i = 0; i < stash->atlas->nnodes; i++) { + FONSatlasNode* n = &stash->atlas->nodes[i]; + + if (stash->nverts+6 > FONS_VERTEX_COUNT) + fons__flush(stash); + + fons__vertex(stash, x+n->x+0, y+n->y+0, u, v, 0xc00000ff); + fons__vertex(stash, x+n->x+n->width, y+n->y+1, u, v, 0xc00000ff); + fons__vertex(stash, x+n->x+n->width, y+n->y+0, u, v, 0xc00000ff); + + fons__vertex(stash, x+n->x+0, y+n->y+0, u, v, 0xc00000ff); + fons__vertex(stash, x+n->x+0, y+n->y+1, u, v, 0xc00000ff); + fons__vertex(stash, x+n->x+n->width, y+n->y+1, u, v, 0xc00000ff); + } + + fons__flush(stash); +} + +FONS_DEF float fonsTextBounds(FONScontext* stash, + float x, float y, + const char* str, const char* end, + float* bounds) +{ + FONSstate* state = fons__getState(stash); + unsigned int codepoint; + unsigned int utf8state = 0; + FONSquad q; + FONSglyph* glyph = NULL; + int prevGlyphIndex = -1; + short isize = (short)(state->size*10.0f); + short iblur = (short)state->blur; + float scale; + FONSfont* font; + float startx, advance; + float minx, miny, maxx, maxy; + + if (stash == NULL) return 0; + if (state->font < 0 || state->font >= stash->nfonts) return 0; + font = stash->fonts[state->font]; + if (font->data == NULL) return 0; + + scale = fons__tt_getPixelHeightScale(&font->font, (float)isize/10.0f); + + // Align vertically. + y += fons__getVertAlign(stash, font, state->align, isize); + + minx = maxx = x; + miny = maxy = y; + startx = x; + + if (end == NULL) + end = str + strlen(str); + + for (; str != end; ++str) { + if (fons__decutf8(&utf8state, &codepoint, *(const unsigned char*)str)) + continue; + glyph = fons__getGlyph(stash, font, codepoint, isize, iblur); + if (glyph != NULL) { + fons__getQuad(stash, font, prevGlyphIndex, glyph, scale, state->spacing, &x, &y, &q); + if (q.x0 < minx) minx = q.x0; + if (q.x1 > maxx) maxx = q.x1; + if (stash->params.flags & FONS_ZERO_TOPLEFT) { + if (q.y0 < miny) miny = q.y0; + if (q.y1 > maxy) maxy = q.y1; + } else { + if (q.y1 < miny) miny = q.y1; + if (q.y0 > maxy) maxy = q.y0; + } + } + prevGlyphIndex = glyph != NULL ? glyph->index : -1; + } + + advance = x - startx; + + // Align horizontally + if (state->align & FONS_ALIGN_LEFT) { + // empty + } else if (state->align & FONS_ALIGN_RIGHT) { + minx -= advance; + maxx -= advance; + } else if (state->align & FONS_ALIGN_CENTER) { + minx -= advance * 0.5f; + maxx -= advance * 0.5f; + } + + if (bounds) { + bounds[0] = minx; + bounds[1] = miny; + bounds[2] = maxx; + bounds[3] = maxy; + } + + return advance; +} + +FONS_DEF void fonsVertMetrics(FONScontext* stash, + float* ascender, float* descender, float* lineh) +{ + FONSfont* font; + FONSstate* state = fons__getState(stash); + short isize; + + if (stash == NULL) return; + if (state->font < 0 || state->font >= stash->nfonts) return; + font = stash->fonts[state->font]; + isize = (short)(state->size*10.0f); + if (font->data == NULL) return; + + if (ascender) + *ascender = font->ascender*isize/10.0f; + if (descender) + *descender = font->descender*isize/10.0f; + if (lineh) + *lineh = font->lineh*isize/10.0f; +} + +FONS_DEF void fonsLineBounds(FONScontext* stash, float y, float* miny, float* maxy) +{ + FONSfont* font; + FONSstate* state = fons__getState(stash); + short isize; + + if (stash == NULL) return; + if (state->font < 0 || state->font >= stash->nfonts) return; + font = stash->fonts[state->font]; + isize = (short)(state->size*10.0f); + if (font->data == NULL) return; + + y += fons__getVertAlign(stash, font, state->align, isize); + + if (stash->params.flags & FONS_ZERO_TOPLEFT) { + *miny = y - font->ascender * (float)isize/10.0f; + *maxy = *miny + font->lineh*isize/10.0f; + } else { + *maxy = y + font->descender * (float)isize/10.0f; + *miny = *maxy - font->lineh*isize/10.0f; + } +} + +FONS_DEF const unsigned char* fonsGetTextureData(FONScontext* stash, int* width, int* height) +{ + if (width != NULL) + *width = stash->params.width; + if (height != NULL) + *height = stash->params.height; + return stash->texData; +} + +FONS_DEF int fonsValidateTexture(FONScontext* stash, int* dirty) +{ + if (stash->dirtyRect[0] < stash->dirtyRect[2] && stash->dirtyRect[1] < stash->dirtyRect[3]) { + dirty[0] = stash->dirtyRect[0]; + dirty[1] = stash->dirtyRect[1]; + dirty[2] = stash->dirtyRect[2]; + dirty[3] = stash->dirtyRect[3]; + // Reset dirty rect + stash->dirtyRect[0] = stash->params.width; + stash->dirtyRect[1] = stash->params.height; + stash->dirtyRect[2] = 0; + stash->dirtyRect[3] = 0; + return 1; + } + return 0; +} + +FONS_DEF void fonsDeleteInternal(FONScontext* stash) +{ + int i; + if (stash == NULL) return; + + if (stash->params.renderDelete) + stash->params.renderDelete(stash->params.userPtr); + + for (i = 0; i < stash->nfonts; ++i) + fons__freeFont(stash->fonts[i]); + + if (stash->atlas) fons__deleteAtlas(stash->atlas); + if (stash->fonts) FONTSTASH_FREE(stash->fonts); + if (stash->texData) FONTSTASH_FREE(stash->texData); + if (stash->scratch) FONTSTASH_FREE(stash->scratch); + FONTSTASH_FREE(stash); +} + +FONS_DEF void fonsSetErrorCallback(FONScontext* stash, void (*callback)(void* uptr, int error, int val), void* uptr) +{ + if (stash == NULL) return; + stash->handleError = callback; + stash->errorUptr = uptr; +} + +FONS_DEF void fonsGetAtlasSize(FONScontext* stash, int* width, int* height) +{ + if (stash == NULL) return; + *width = stash->params.width; + *height = stash->params.height; +} + +FONS_DEF int fonsExpandAtlas(FONScontext* stash, int width, int height) +{ + int i, maxy = 0; + unsigned char* data = NULL; + if (stash == NULL) return 0; + + width = fons__maxi(width, stash->params.width); + height = fons__maxi(height, stash->params.height); + + if (width == stash->params.width && height == stash->params.height) + return 1; + + // Flush pending glyphs. + fons__flush(stash); + + // Create new texture + if (stash->params.renderResize != NULL) { + if (stash->params.renderResize(stash->params.userPtr, width, height) == 0) + return 0; + } + // Copy old texture data over. + data = (unsigned char*)FONTSTASH_MALLOC(width * height); + if (data == NULL) + return 0; + for (i = 0; i < stash->params.height; i++) { + unsigned char* dst = &data[i*width]; + unsigned char* src = &stash->texData[i*stash->params.width]; + memcpy(dst, src, stash->params.width); + if (width > stash->params.width) + memset(dst+stash->params.width, 0, width - stash->params.width); + } + if (height > stash->params.height) + memset(&data[stash->params.height * width], 0, (height - stash->params.height) * width); + + FONTSTASH_FREE(stash->texData); + stash->texData = data; + + // Increase atlas size + fons__atlasExpand(stash->atlas, width, height); + + // Add existing data as dirty. + for (i = 0; i < stash->atlas->nnodes; i++) + maxy = fons__maxi(maxy, stash->atlas->nodes[i].y); + stash->dirtyRect[0] = 0; + stash->dirtyRect[1] = 0; + stash->dirtyRect[2] = stash->params.width; + stash->dirtyRect[3] = maxy; + + stash->params.width = width; + stash->params.height = height; + stash->itw = 1.0f/stash->params.width; + stash->ith = 1.0f/stash->params.height; + + return 1; +} + +FONS_DEF int fonsResetAtlas(FONScontext* stash, int width, int height) +{ + int i, j; + if (stash == NULL) return 0; + + // Flush pending glyphs. + fons__flush(stash); + + // Create new texture + if (stash->params.renderResize != NULL) { + if (stash->params.renderResize(stash->params.userPtr, width, height) == 0) + return 0; + } + + // Reset atlas + fons__atlasReset(stash->atlas, width, height); + + // Clear texture data. + stash->texData = (unsigned char*)FONTSTASH_REALLOC(stash->texData, width * height); + if (stash->texData == NULL) return 0; + memset(stash->texData, 0, width * height); + + // Reset dirty rect + stash->dirtyRect[0] = width; + stash->dirtyRect[1] = height; + stash->dirtyRect[2] = 0; + stash->dirtyRect[3] = 0; + + // Reset cached glyphs + for (i = 0; i < stash->nfonts; i++) { + FONSfont* font = stash->fonts[i]; + font->nglyphs = 0; + for (j = 0; j < FONS_HASH_LUT_SIZE; j++) + font->lut[j] = -1; + } + + stash->params.width = width; + stash->params.height = height; + stash->itw = 1.0f/stash->params.width; + stash->ith = 1.0f/stash->params.height; + + // Add white rect at 0,0 for debug drawing. + fons__addWhiteRect(stash, 2,2); + + return 1; +} + +#endif // FONTSTASH_IMPLEMENTATION diff --git a/v_windows/v/old/thirdparty/fontstash/stb_truetype.h b/v_windows/v/old/thirdparty/fontstash/stb_truetype.h new file mode 100644 index 0000000..f0f98a4 --- /dev/null +++ b/v_windows/v/old/thirdparty/fontstash/stb_truetype.h @@ -0,0 +1,5011 @@ +// stb_truetype.h - v1.24 - public domain +// authored from 2009-2020 by Sean Barrett / RAD Game Tools +// +// ======================================================================= +// +// NO SECURITY GUARANTEE -- DO NOT USE THIS ON UNTRUSTED FONT FILES +// +// This library does no range checking of the offsets found in the file, +// meaning an attacker can use it to read arbitrary memory. +// +// ======================================================================= +// +// This library processes TrueType files: +// parse files +// extract glyph metrics +// extract glyph shapes +// render glyphs to one-channel bitmaps with antialiasing (box filter) +// render glyphs to one-channel SDF bitmaps (signed-distance field/function) +// +// Todo: +// non-MS cmaps +// crashproof on bad data +// hinting? (no longer patented) +// cleartype-style AA? +// optimize: use simple memory allocator for intermediates +// optimize: build edge-list directly from curves +// optimize: rasterize directly from curves? +// +// ADDITIONAL CONTRIBUTORS +// +// Mikko Mononen: compound shape support, more cmap formats +// Tor Andersson: kerning, subpixel rendering +// Dougall Johnson: OpenType / Type 2 font handling +// Daniel Ribeiro Maciel: basic GPOS-based kerning +// +// Misc other: +// Ryan Gordon +// Simon Glass +// github:IntellectualKitty +// Imanol Celaya +// Daniel Ribeiro Maciel +// +// Bug/warning reports/fixes: +// "Zer" on mollyrocket Fabian "ryg" Giesen github:NiLuJe +// Cass Everitt Martins Mozeiko github:aloucks +// stoiko (Haemimont Games) Cap Petschulat github:oyvindjam +// Brian Hook Omar Cornut github:vassvik +// Walter van Niftrik Ryan Griege +// David Gow Peter LaValle +// David Given Sergey Popov +// Ivan-Assen Ivanov Giumo X. Clanjor +// Anthony Pesch Higor Euripedes +// Johan Duparc Thomas Fields +// Hou Qiming Derek Vinyard +// Rob Loach Cort Stratton +// Kenney Phillis Jr. Brian Costabile +// Ken Voskuil (kaesve) +// +// VERSION HISTORY +// +// 1.24 (2020-02-05) fix warning +// 1.23 (2020-02-02) query SVG data for glyphs; query whole kerning table (but only kern not GPOS) +// 1.22 (2019-08-11) minimize missing-glyph duplication; fix kerning if both 'GPOS' and 'kern' are defined +// 1.21 (2019-02-25) fix warning +// 1.20 (2019-02-07) PackFontRange skips missing codepoints; GetScaleFontVMetrics() +// 1.19 (2018-02-11) GPOS kerning, STBTT_fmod +// 1.18 (2018-01-29) add missing function +// 1.17 (2017-07-23) make more arguments const; doc fix +// 1.16 (2017-07-12) SDF support +// 1.15 (2017-03-03) make more arguments const +// 1.14 (2017-01-16) num-fonts-in-TTC function +// 1.13 (2017-01-02) support OpenType fonts, certain Apple fonts +// 1.12 (2016-10-25) suppress warnings about casting away const with -Wcast-qual +// 1.11 (2016-04-02) fix unused-variable warning +// 1.10 (2016-04-02) user-defined fabs(); rare memory leak; remove duplicate typedef +// 1.09 (2016-01-16) warning fix; avoid crash on outofmem; use allocation userdata properly +// 1.08 (2015-09-13) document stbtt_Rasterize(); fixes for vertical & horizontal edges +// 1.07 (2015-08-01) allow PackFontRanges to accept arrays of sparse codepoints; +// variant PackFontRanges to pack and render in separate phases; +// fix stbtt_GetFontOFfsetForIndex (never worked for non-0 input?); +// fixed an assert() bug in the new rasterizer +// replace assert() with STBTT_assert() in new rasterizer +// +// Full history can be found at the end of this file. +// +// LICENSE +// +// See end of file for license information. +// +// USAGE +// +// Include this file in whatever places need to refer to it. In ONE C/C++ +// file, write: +// #define STB_TRUETYPE_IMPLEMENTATION +// before the #include of this file. This expands out the actual +// implementation into that C/C++ file. +// +// To make the implementation private to the file that generates the implementation, +// #define STBTT_STATIC +// +// Simple 3D API (don't ship this, but it's fine for tools and quick start) +// stbtt_BakeFontBitmap() -- bake a font to a bitmap for use as texture +// stbtt_GetBakedQuad() -- compute quad to draw for a given char +// +// Improved 3D API (more shippable): +// #include "stb_rect_pack.h" -- optional, but you really want it +// stbtt_PackBegin() +// stbtt_PackSetOversampling() -- for improved quality on small fonts +// stbtt_PackFontRanges() -- pack and renders +// stbtt_PackEnd() +// stbtt_GetPackedQuad() +// +// "Load" a font file from a memory buffer (you have to keep the buffer loaded) +// stbtt_InitFont() +// stbtt_GetFontOffsetForIndex() -- indexing for TTC font collections +// stbtt_GetNumberOfFonts() -- number of fonts for TTC font collections +// +// Render a unicode codepoint to a bitmap +// stbtt_GetCodepointBitmap() -- allocates and returns a bitmap +// stbtt_MakeCodepointBitmap() -- renders into bitmap you provide +// stbtt_GetCodepointBitmapBox() -- how big the bitmap must be +// +// Character advance/positioning +// stbtt_GetCodepointHMetrics() +// stbtt_GetFontVMetrics() +// stbtt_GetFontVMetricsOS2() +// stbtt_GetCodepointKernAdvance() +// +// Starting with version 1.06, the rasterizer was replaced with a new, +// faster and generally-more-precise rasterizer. The new rasterizer more +// accurately measures pixel coverage for anti-aliasing, except in the case +// where multiple shapes overlap, in which case it overestimates the AA pixel +// coverage. Thus, anti-aliasing of intersecting shapes may look wrong. If +// this turns out to be a problem, you can re-enable the old rasterizer with +// #define STBTT_RASTERIZER_VERSION 1 +// which will incur about a 15% speed hit. +// +// ADDITIONAL DOCUMENTATION +// +// Immediately after this block comment are a series of sample programs. +// +// After the sample programs is the "header file" section. This section +// includes documentation for each API function. +// +// Some important concepts to understand to use this library: +// +// Codepoint +// Characters are defined by unicode codepoints, e.g. 65 is +// uppercase A, 231 is lowercase c with a cedilla, 0x7e30 is +// the hiragana for "ma". +// +// Glyph +// A visual character shape (every codepoint is rendered as +// some glyph) +// +// Glyph index +// A font-specific integer ID representing a glyph +// +// Baseline +// Glyph shapes are defined relative to a baseline, which is the +// bottom of uppercase characters. Characters extend both above +// and below the baseline. +// +// Current Point +// As you draw text to the screen, you keep track of a "current point" +// which is the origin of each character. The current point's vertical +// position is the baseline. Even "baked fonts" use this model. +// +// Vertical Font Metrics +// The vertical qualities of the font, used to vertically position +// and space the characters. See docs for stbtt_GetFontVMetrics. +// +// Font Size in Pixels or Points +// The preferred interface for specifying font sizes in stb_truetype +// is to specify how tall the font's vertical extent should be in pixels. +// If that sounds good enough, skip the next paragraph. +// +// Most font APIs instead use "points", which are a common typographic +// measurement for describing font size, defined as 72 points per inch. +// stb_truetype provides a point API for compatibility. However, true +// "per inch" conventions don't make much sense on computer displays +// since different monitors have different number of pixels per +// inch. For example, Windows traditionally uses a convention that +// there are 96 pixels per inch, thus making 'inch' measurements have +// nothing to do with inches, and thus effectively defining a point to +// be 1.333 pixels. Additionally, the TrueType font data provides +// an explicit scale factor to scale a given font's glyphs to points, +// but the author has observed that this scale factor is often wrong +// for non-commercial fonts, thus making fonts scaled in points +// according to the TrueType spec incoherently sized in practice. +// +// DETAILED USAGE: +// +// Scale: +// Select how high you want the font to be, in points or pixels. +// Call ScaleForPixelHeight or ScaleForMappingEmToPixels to compute +// a scale factor SF that will be used by all other functions. +// +// Baseline: +// You need to select a y-coordinate that is the baseline of where +// your text will appear. Call GetFontBoundingBox to get the baseline-relative +// bounding box for all characters. SF*-y0 will be the distance in pixels +// that the worst-case character could extend above the baseline, so if +// you want the top edge of characters to appear at the top of the +// screen where y=0, then you would set the baseline to SF*-y0. +// +// Current point: +// Set the current point where the first character will appear. The +// first character could extend left of the current point; this is font +// dependent. You can either choose a current point that is the leftmost +// point and hope, or add some padding, or check the bounding box or +// left-side-bearing of the first character to be displayed and set +// the current point based on that. +// +// Displaying a character: +// Compute the bounding box of the character. It will contain signed values +// relative to . I.e. if it returns x0,y0,x1,y1, +// then the character should be displayed in the rectangle from +// to = 32 && *text < 128) { + stbtt_aligned_quad q; + stbtt_GetBakedQuad(cdata, 512,512, *text-32, &x,&y,&q,1);//1=opengl & d3d10+,0=d3d9 + glTexCoord2f(q.s0,q.t1); glVertex2f(q.x0,q.y0); + glTexCoord2f(q.s1,q.t1); glVertex2f(q.x1,q.y0); + glTexCoord2f(q.s1,q.t0); glVertex2f(q.x1,q.y1); + glTexCoord2f(q.s0,q.t0); glVertex2f(q.x0,q.y1); + } + ++text; + } + glEnd(); +} +#endif +// +// +////////////////////////////////////////////////////////////////////////////// +// +// Complete program (this compiles): get a single bitmap, print as ASCII art +// +#if 0 +#include +#define STB_TRUETYPE_IMPLEMENTATION // force following include to generate implementation +#include "stb_truetype.h" + +char ttf_buffer[1<<25]; + +int main(int argc, char **argv) +{ + stbtt_fontinfo font; + unsigned char *bitmap; + int w,h,i,j,c = (argc > 1 ? atoi(argv[1]) : 'a'), s = (argc > 2 ? atoi(argv[2]) : 20); + + fread(ttf_buffer, 1, 1<<25, fopen(argc > 3 ? argv[3] : "c:/windows/fonts/arialbd.ttf", "rb")); + + stbtt_InitFont(&font, ttf_buffer, stbtt_GetFontOffsetForIndex(ttf_buffer,0)); + bitmap = stbtt_GetCodepointBitmap(&font, 0,stbtt_ScaleForPixelHeight(&font, s), c, &w, &h, 0,0); + + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) + putchar(" .:ioVM@"[bitmap[j*w+i]>>5]); + putchar('\n'); + } + return 0; +} +#endif +// +// Output: +// +// .ii. +// @@@@@@. +// V@Mio@@o +// :i. V@V +// :oM@@M +// :@@@MM@M +// @@o o@M +// :@@. M@M +// @@@o@@@@ +// :M@@V:@@. +// +////////////////////////////////////////////////////////////////////////////// +// +// Complete program: print "Hello World!" banner, with bugs +// +#if 0 +char buffer[24<<20]; +unsigned char screen[20][79]; + +int main(int arg, char **argv) +{ + stbtt_fontinfo font; + int i,j,ascent,baseline,ch=0; + float scale, xpos=2; // leave a little padding in case the character extends left + char *text = "Heljo World!"; // intentionally misspelled to show 'lj' brokenness + + fread(buffer, 1, 1000000, fopen("c:/windows/fonts/arialbd.ttf", "rb")); + stbtt_InitFont(&font, buffer, 0); + + scale = stbtt_ScaleForPixelHeight(&font, 15); + stbtt_GetFontVMetrics(&font, &ascent,0,0); + baseline = (int) (ascent*scale); + + while (text[ch]) { + int advance,lsb,x0,y0,x1,y1; + float x_shift = xpos - (float) floor(xpos); + stbtt_GetCodepointHMetrics(&font, text[ch], &advance, &lsb); + stbtt_GetCodepointBitmapBoxSubpixel(&font, text[ch], scale,scale,x_shift,0, &x0,&y0,&x1,&y1); + stbtt_MakeCodepointBitmapSubpixel(&font, &screen[baseline + y0][(int) xpos + x0], x1-x0,y1-y0, 79, scale,scale,x_shift,0, text[ch]); + // note that this stomps the old data, so where character boxes overlap (e.g. 'lj') it's wrong + // because this API is really for baking character bitmaps into textures. if you want to render + // a sequence of characters, you really need to render each bitmap to a temp buffer, then + // "alpha blend" that into the working buffer + xpos += (advance * scale); + if (text[ch+1]) + xpos += scale*stbtt_GetCodepointKernAdvance(&font, text[ch],text[ch+1]); + ++ch; + } + + for (j=0; j < 20; ++j) { + for (i=0; i < 78; ++i) + putchar(" .:ioVM@"[screen[j][i]>>5]); + putchar('\n'); + } + + return 0; +} +#endif + + +////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////// +//// +//// INTEGRATION WITH YOUR CODEBASE +//// +//// The following sections allow you to supply alternate definitions +//// of C library functions used by stb_truetype, e.g. if you don't +//// link with the C runtime library. + +#ifdef STB_TRUETYPE_IMPLEMENTATION + // #define your own (u)stbtt_int8/16/32 before including to override this + #ifndef stbtt_uint8 + typedef unsigned char stbtt_uint8; + typedef signed char stbtt_int8; + typedef unsigned short stbtt_uint16; + typedef signed short stbtt_int16; + typedef unsigned int stbtt_uint32; + typedef signed int stbtt_int32; + #endif + + typedef char stbtt__check_size32[sizeof(stbtt_int32)==4 ? 1 : -1]; + typedef char stbtt__check_size16[sizeof(stbtt_int16)==2 ? 1 : -1]; + + // e.g. #define your own STBTT_ifloor/STBTT_iceil() to avoid math.h + #ifndef STBTT_ifloor + #include + #define STBTT_ifloor(x) ((int) floor(x)) + #define STBTT_iceil(x) ((int) ceil(x)) + #endif + + #ifndef STBTT_sqrt + #include + #define STBTT_sqrt(x) sqrt(x) + #define STBTT_pow(x,y) pow(x,y) + #endif + + #ifndef STBTT_fmod + #include + #define STBTT_fmod(x,y) fmod(x,y) + #endif + + #ifndef STBTT_cos + #include + #define STBTT_cos(x) cos(x) + #define STBTT_acos(x) acos(x) + #endif + + #ifndef STBTT_fabs + #include + #define STBTT_fabs(x) fabs(x) + #endif + + // #define your own functions "STBTT_malloc" / "STBTT_free" to avoid malloc.h + #ifndef STBTT_malloc + #include + #define STBTT_malloc(x,u) ((void)(u),FONTSTASH_MALLOC(x)) + #define STBTT_free(x,u) ((void)(u),FONTSTASH_FREE(x)) + #endif + + #ifndef STBTT_assert + #include + #define STBTT_assert(x) assert(x) + #endif + + #ifndef STBTT_strlen + #include + #define STBTT_strlen(x) strlen(x) + #endif + + #ifndef STBTT_memcpy + #include + #define STBTT_memcpy memcpy + #define STBTT_memset memset + #endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// +//// +//// INTERFACE +//// +//// + +#ifndef __STB_INCLUDE_STB_TRUETYPE_H__ +#define __STB_INCLUDE_STB_TRUETYPE_H__ + +#ifdef STBTT_STATIC +#define STBTT_DEF static +#else +#define STBTT_DEF extern +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +// private structure +typedef struct +{ + unsigned char *data; + int cursor; + int size; +} stbtt__buf; + +////////////////////////////////////////////////////////////////////////////// +// +// TEXTURE BAKING API +// +// If you use this API, you only have to call two functions ever. +// + +typedef struct +{ + unsigned short x0,y0,x1,y1; // coordinates of bbox in bitmap + float xoff,yoff,xadvance; +} stbtt_bakedchar; + +STBTT_DEF int stbtt_BakeFontBitmap(const unsigned char *data, int offset, // font location (use offset=0 for plain .ttf) + float pixel_height, // height of font in pixels + unsigned char *pixels, int pw, int ph, // bitmap to be filled in + int first_char, int num_chars, // characters to bake + stbtt_bakedchar *chardata); // you allocate this, it's num_chars long +// if return is positive, the first unused row of the bitmap +// if return is negative, returns the negative of the number of characters that fit +// if return is 0, no characters fit and no rows were used +// This uses a very crappy packing. + +typedef struct +{ + float x0,y0,s0,t0; // top-left + float x1,y1,s1,t1; // bottom-right +} stbtt_aligned_quad; + +STBTT_DEF void stbtt_GetBakedQuad(const stbtt_bakedchar *chardata, int pw, int ph, // same data as above + int char_index, // character to display + float *xpos, float *ypos, // pointers to current position in screen pixel space + stbtt_aligned_quad *q, // output: quad to draw + int opengl_fillrule); // true if opengl fill rule; false if DX9 or earlier +// Call GetBakedQuad with char_index = 'character - first_char', and it +// creates the quad you need to draw and advances the current position. +// +// The coordinate system used assumes y increases downwards. +// +// Characters will extend both above and below the current position; +// see discussion of "BASELINE" above. +// +// It's inefficient; you might want to c&p it and optimize it. + +STBTT_DEF void stbtt_GetScaledFontVMetrics(const unsigned char *fontdata, int index, float size, float *ascent, float *descent, float *lineGap); +// Query the font vertical metrics without having to create a font first. + + +////////////////////////////////////////////////////////////////////////////// +// +// NEW TEXTURE BAKING API +// +// This provides options for packing multiple fonts into one atlas, not +// perfectly but better than nothing. + +typedef struct +{ + unsigned short x0,y0,x1,y1; // coordinates of bbox in bitmap + float xoff,yoff,xadvance; + float xoff2,yoff2; +} stbtt_packedchar; + +typedef struct stbtt_pack_context stbtt_pack_context; +typedef struct stbtt_fontinfo stbtt_fontinfo; +#ifndef STB_RECT_PACK_VERSION +typedef struct stbrp_rect stbrp_rect; +#endif + +STBTT_DEF int stbtt_PackBegin(stbtt_pack_context *spc, unsigned char *pixels, int width, int height, int stride_in_bytes, int padding, void *alloc_context); +// Initializes a packing context stored in the passed-in stbtt_pack_context. +// Future calls using this context will pack characters into the bitmap passed +// in here: a 1-channel bitmap that is width * height. stride_in_bytes is +// the distance from one row to the next (or 0 to mean they are packed tightly +// together). "padding" is the amount of padding to leave between each +// character (normally you want '1' for bitmaps you'll use as textures with +// bilinear filtering). +// +// Returns 0 on failure, 1 on success. + +STBTT_DEF void stbtt_PackEnd (stbtt_pack_context *spc); +// Cleans up the packing context and frees all memory. + +#define STBTT_POINT_SIZE(x) (-(x)) + +STBTT_DEF int stbtt_PackFontRange(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, float font_size, + int first_unicode_char_in_range, int num_chars_in_range, stbtt_packedchar *chardata_for_range); +// Creates character bitmaps from the font_index'th font found in fontdata (use +// font_index=0 if you don't know what that is). It creates num_chars_in_range +// bitmaps for characters with unicode values starting at first_unicode_char_in_range +// and increasing. Data for how to render them is stored in chardata_for_range; +// pass these to stbtt_GetPackedQuad to get back renderable quads. +// +// font_size is the full height of the character from ascender to descender, +// as computed by stbtt_ScaleForPixelHeight. To use a point size as computed +// by stbtt_ScaleForMappingEmToPixels, wrap the point size in STBTT_POINT_SIZE() +// and pass that result as 'font_size': +// ..., 20 , ... // font max minus min y is 20 pixels tall +// ..., STBTT_POINT_SIZE(20), ... // 'M' is 20 pixels tall + +typedef struct +{ + float font_size; + int first_unicode_codepoint_in_range; // if non-zero, then the chars are continuous, and this is the first codepoint + int *array_of_unicode_codepoints; // if non-zero, then this is an array of unicode codepoints + int num_chars; + stbtt_packedchar *chardata_for_range; // output + unsigned char h_oversample, v_oversample; // don't set these, they're used internally +} stbtt_pack_range; + +STBTT_DEF int stbtt_PackFontRanges(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, stbtt_pack_range *ranges, int num_ranges); +// Creates character bitmaps from multiple ranges of characters stored in +// ranges. This will usually create a better-packed bitmap than multiple +// calls to stbtt_PackFontRange. Note that you can call this multiple +// times within a single PackBegin/PackEnd. + +STBTT_DEF void stbtt_PackSetOversampling(stbtt_pack_context *spc, unsigned int h_oversample, unsigned int v_oversample); +// Oversampling a font increases the quality by allowing higher-quality subpixel +// positioning, and is especially valuable at smaller text sizes. +// +// This function sets the amount of oversampling for all following calls to +// stbtt_PackFontRange(s) or stbtt_PackFontRangesGatherRects for a given +// pack context. The default (no oversampling) is achieved by h_oversample=1 +// and v_oversample=1. The total number of pixels required is +// h_oversample*v_oversample larger than the default; for example, 2x2 +// oversampling requires 4x the storage of 1x1. For best results, render +// oversampled textures with bilinear filtering. Look at the readme in +// stb/tests/oversample for information about oversampled fonts +// +// To use with PackFontRangesGather etc., you must set it before calls +// call to PackFontRangesGatherRects. + +STBTT_DEF void stbtt_PackSetSkipMissingCodepoints(stbtt_pack_context *spc, int skip); +// If skip != 0, this tells stb_truetype to skip any codepoints for which +// there is no corresponding glyph. If skip=0, which is the default, then +// codepoints without a glyph recived the font's "missing character" glyph, +// typically an empty box by convention. + +STBTT_DEF void stbtt_GetPackedQuad(const stbtt_packedchar *chardata, int pw, int ph, // same data as above + int char_index, // character to display + float *xpos, float *ypos, // pointers to current position in screen pixel space + stbtt_aligned_quad *q, // output: quad to draw + int align_to_integer); + +STBTT_DEF int stbtt_PackFontRangesGatherRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects); +STBTT_DEF void stbtt_PackFontRangesPackRects(stbtt_pack_context *spc, stbrp_rect *rects, int num_rects); +STBTT_DEF int stbtt_PackFontRangesRenderIntoRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects); +// Calling these functions in sequence is roughly equivalent to calling +// stbtt_PackFontRanges(). If you more control over the packing of multiple +// fonts, or if you want to pack custom data into a font texture, take a look +// at the source to of stbtt_PackFontRanges() and create a custom version +// using these functions, e.g. call GatherRects multiple times, +// building up a single array of rects, then call PackRects once, +// then call RenderIntoRects repeatedly. This may result in a +// better packing than calling PackFontRanges multiple times +// (or it may not). + +// this is an opaque structure that you shouldn't mess with which holds +// all the context needed from PackBegin to PackEnd. +struct stbtt_pack_context { + void *user_allocator_context; + void *pack_info; + int width; + int height; + int stride_in_bytes; + int padding; + int skip_missing; + unsigned int h_oversample, v_oversample; + unsigned char *pixels; + void *nodes; +}; + +////////////////////////////////////////////////////////////////////////////// +// +// FONT LOADING +// +// + +STBTT_DEF int stbtt_GetNumberOfFonts(const unsigned char *data); +// This function will determine the number of fonts in a font file. TrueType +// collection (.ttc) files may contain multiple fonts, while TrueType font +// (.ttf) files only contain one font. The number of fonts can be used for +// indexing with the previous function where the index is between zero and one +// less than the total fonts. If an error occurs, -1 is returned. + +STBTT_DEF int stbtt_GetFontOffsetForIndex(const unsigned char *data, int index); +// Each .ttf/.ttc file may have more than one font. Each font has a sequential +// index number starting from 0. Call this function to get the font offset for +// a given index; it returns -1 if the index is out of range. A regular .ttf +// file will only define one font and it always be at offset 0, so it will +// return '0' for index 0, and -1 for all other indices. + +// The following structure is defined publicly so you can declare one on +// the stack or as a global or etc, but you should treat it as opaque. +struct stbtt_fontinfo +{ + void * userdata; + unsigned char * data; // pointer to .ttf file + int fontstart; // offset of start of font + + int numGlyphs; // number of glyphs, needed for range checking + + int loca,head,glyf,hhea,hmtx,kern,gpos,svg; // table locations as offset from start of .ttf + int index_map; // a cmap mapping for our chosen character encoding + int indexToLocFormat; // format needed to map from glyph index to glyph + + stbtt__buf cff; // cff font data + stbtt__buf charstrings; // the charstring index + stbtt__buf gsubrs; // global charstring subroutines index + stbtt__buf subrs; // private charstring subroutines index + stbtt__buf fontdicts; // array of font dicts + stbtt__buf fdselect; // map from glyph to fontdict +}; + +STBTT_DEF int stbtt_InitFont(stbtt_fontinfo *info, const unsigned char *data, int offset); +// Given an offset into the file that defines a font, this function builds +// the necessary cached info for the rest of the system. You must allocate +// the stbtt_fontinfo yourself, and stbtt_InitFont will fill it out. You don't +// need to do anything special to free it, because the contents are pure +// value data with no additional data structures. Returns 0 on failure. + + +////////////////////////////////////////////////////////////////////////////// +// +// CHARACTER TO GLYPH-INDEX CONVERSIOn + +STBTT_DEF int stbtt_FindGlyphIndex(const stbtt_fontinfo *info, int unicode_codepoint); +// If you're going to perform multiple operations on the same character +// and you want a speed-up, call this function with the character you're +// going to process, then use glyph-based functions instead of the +// codepoint-based functions. +// Returns 0 if the character codepoint is not defined in the font. + + +////////////////////////////////////////////////////////////////////////////// +// +// CHARACTER PROPERTIES +// + +STBTT_DEF float stbtt_ScaleForPixelHeight(const stbtt_fontinfo *info, float pixels); +// computes a scale factor to produce a font whose "height" is 'pixels' tall. +// Height is measured as the distance from the highest ascender to the lowest +// descender; in other words, it's equivalent to calling stbtt_GetFontVMetrics +// and computing: +// scale = pixels / (ascent - descent) +// so if you prefer to measure height by the ascent only, use a similar calculation. + +STBTT_DEF float stbtt_ScaleForMappingEmToPixels(const stbtt_fontinfo *info, float pixels); +// computes a scale factor to produce a font whose EM size is mapped to +// 'pixels' tall. This is probably what traditional APIs compute, but +// I'm not positive. + +STBTT_DEF void stbtt_GetFontVMetrics(const stbtt_fontinfo *info, int *ascent, int *descent, int *lineGap); +// ascent is the coordinate above the baseline the font extends; descent +// is the coordinate below the baseline the font extends (i.e. it is typically negative) +// lineGap is the spacing between one row's descent and the next row's ascent... +// so you should advance the vertical position by "*ascent - *descent + *lineGap" +// these are expressed in unscaled coordinates, so you must multiply by +// the scale factor for a given size + +STBTT_DEF int stbtt_GetFontVMetricsOS2(const stbtt_fontinfo *info, int *typoAscent, int *typoDescent, int *typoLineGap); +// analogous to GetFontVMetrics, but returns the "typographic" values from the OS/2 +// table (specific to MS/Windows TTF files). +// +// Returns 1 on success (table present), 0 on failure. + +STBTT_DEF void stbtt_GetFontBoundingBox(const stbtt_fontinfo *info, int *x0, int *y0, int *x1, int *y1); +// the bounding box around all possible characters + +STBTT_DEF void stbtt_GetCodepointHMetrics(const stbtt_fontinfo *info, int codepoint, int *advanceWidth, int *leftSideBearing); +// leftSideBearing is the offset from the current horizontal position to the left edge of the character +// advanceWidth is the offset from the current horizontal position to the next horizontal position +// these are expressed in unscaled coordinates + +STBTT_DEF int stbtt_GetCodepointKernAdvance(const stbtt_fontinfo *info, int ch1, int ch2); +// an additional amount to add to the 'advance' value between ch1 and ch2 + +STBTT_DEF int stbtt_GetCodepointBox(const stbtt_fontinfo *info, int codepoint, int *x0, int *y0, int *x1, int *y1); +// Gets the bounding box of the visible part of the glyph, in unscaled coordinates + +STBTT_DEF void stbtt_GetGlyphHMetrics(const stbtt_fontinfo *info, int glyph_index, int *advanceWidth, int *leftSideBearing); +STBTT_DEF int stbtt_GetGlyphKernAdvance(const stbtt_fontinfo *info, int glyph1, int glyph2); +STBTT_DEF int stbtt_GetGlyphBox(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1); +// as above, but takes one or more glyph indices for greater efficiency + +typedef struct stbtt_kerningentry +{ + int glyph1; // use stbtt_FindGlyphIndex + int glyph2; + int advance; +} stbtt_kerningentry; + +STBTT_DEF int stbtt_GetKerningTableLength(const stbtt_fontinfo *info); +STBTT_DEF int stbtt_GetKerningTable(const stbtt_fontinfo *info, stbtt_kerningentry* table, int table_length); +// Retrieves a complete list of all of the kerning pairs provided by the font +// stbtt_GetKerningTable never writes more than table_length entries and returns how many entries it did write. +// The table will be sorted by (a.glyph1 == b.glyph1)?(a.glyph2 < b.glyph2):(a.glyph1 < b.glyph1) + +////////////////////////////////////////////////////////////////////////////// +// +// GLYPH SHAPES (you probably don't need these, but they have to go before +// the bitmaps for C declaration-order reasons) +// + +#ifndef STBTT_vmove // you can predefine these to use different values (but why?) + enum { + STBTT_vmove=1, + STBTT_vline, + STBTT_vcurve, + STBTT_vcubic + }; +#endif + +#ifndef stbtt_vertex // you can predefine this to use different values + // (we share this with other code at RAD) + #define stbtt_vertex_type short // can't use stbtt_int16 because that's not visible in the header file + typedef struct + { + stbtt_vertex_type x,y,cx,cy,cx1,cy1; + unsigned char type,padding; + } stbtt_vertex; +#endif + +STBTT_DEF int stbtt_IsGlyphEmpty(const stbtt_fontinfo *info, int glyph_index); +// returns non-zero if nothing is drawn for this glyph + +STBTT_DEF int stbtt_GetCodepointShape(const stbtt_fontinfo *info, int unicode_codepoint, stbtt_vertex **vertices); +STBTT_DEF int stbtt_GetGlyphShape(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **vertices); +// returns # of vertices and fills *vertices with the pointer to them +// these are expressed in "unscaled" coordinates +// +// The shape is a series of contours. Each one starts with +// a STBTT_moveto, then consists of a series of mixed +// STBTT_lineto and STBTT_curveto segments. A lineto +// draws a line from previous endpoint to its x,y; a curveto +// draws a quadratic bezier from previous endpoint to +// its x,y, using cx,cy as the bezier control point. + +STBTT_DEF void stbtt_FreeShape(const stbtt_fontinfo *info, stbtt_vertex *vertices); +// frees the data allocated above + +STBTT_DEF int stbtt_GetCodepointSVG(const stbtt_fontinfo *info, int unicode_codepoint, const char **svg); +STBTT_DEF int stbtt_GetGlyphSVG(const stbtt_fontinfo *info, int gl, const char **svg); +// fills svg with the character's SVG data. +// returns data size or 0 if SVG not found. + +////////////////////////////////////////////////////////////////////////////// +// +// BITMAP RENDERING +// + +STBTT_DEF void stbtt_FreeBitmap(unsigned char *bitmap, void *userdata); +// frees the bitmap allocated below + +STBTT_DEF unsigned char *stbtt_GetCodepointBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int codepoint, int *width, int *height, int *xoff, int *yoff); +// allocates a large-enough single-channel 8bpp bitmap and renders the +// specified character/glyph at the specified scale into it, with +// antialiasing. 0 is no coverage (transparent), 255 is fully covered (opaque). +// *width & *height are filled out with the width & height of the bitmap, +// which is stored left-to-right, top-to-bottom. +// +// xoff/yoff are the offset it pixel space from the glyph origin to the top-left of the bitmap + +STBTT_DEF unsigned char *stbtt_GetCodepointBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint, int *width, int *height, int *xoff, int *yoff); +// the same as stbtt_GetCodepoitnBitmap, but you can specify a subpixel +// shift for the character + +STBTT_DEF void stbtt_MakeCodepointBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int codepoint); +// the same as stbtt_GetCodepointBitmap, but you pass in storage for the bitmap +// in the form of 'output', with row spacing of 'out_stride' bytes. the bitmap +// is clipped to out_w/out_h bytes. Call stbtt_GetCodepointBitmapBox to get the +// width and height and positioning info for it first. + +STBTT_DEF void stbtt_MakeCodepointBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint); +// same as stbtt_MakeCodepointBitmap, but you can specify a subpixel +// shift for the character + +STBTT_DEF void stbtt_MakeCodepointBitmapSubpixelPrefilter(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int oversample_x, int oversample_y, float *sub_x, float *sub_y, int codepoint); +// same as stbtt_MakeCodepointBitmapSubpixel, but prefiltering +// is performed (see stbtt_PackSetOversampling) + +STBTT_DEF void stbtt_GetCodepointBitmapBox(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1); +// get the bbox of the bitmap centered around the glyph origin; so the +// bitmap width is ix1-ix0, height is iy1-iy0, and location to place +// the bitmap top left is (leftSideBearing*scale,iy0). +// (Note that the bitmap uses y-increases-down, but the shape uses +// y-increases-up, so CodepointBitmapBox and CodepointBox are inverted.) + +STBTT_DEF void stbtt_GetCodepointBitmapBoxSubpixel(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1); +// same as stbtt_GetCodepointBitmapBox, but you can specify a subpixel +// shift for the character + +// the following functions are equivalent to the above functions, but operate +// on glyph indices instead of Unicode codepoints (for efficiency) +STBTT_DEF unsigned char *stbtt_GetGlyphBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int glyph, int *width, int *height, int *xoff, int *yoff); +STBTT_DEF unsigned char *stbtt_GetGlyphBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int glyph, int *width, int *height, int *xoff, int *yoff); +STBTT_DEF void stbtt_MakeGlyphBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int glyph); +STBTT_DEF void stbtt_MakeGlyphBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int glyph); +STBTT_DEF void stbtt_MakeGlyphBitmapSubpixelPrefilter(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int oversample_x, int oversample_y, float *sub_x, float *sub_y, int glyph); +STBTT_DEF void stbtt_GetGlyphBitmapBox(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1); +STBTT_DEF void stbtt_GetGlyphBitmapBoxSubpixel(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y,float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1); + + +// @TODO: don't expose this structure +typedef struct +{ + int w,h,stride; + unsigned char *pixels; +} stbtt__bitmap; + +// rasterize a shape with quadratic beziers into a bitmap +STBTT_DEF void stbtt_Rasterize(stbtt__bitmap *result, // 1-channel bitmap to draw into + float flatness_in_pixels, // allowable error of curve in pixels + stbtt_vertex *vertices, // array of vertices defining shape + int num_verts, // number of vertices in above array + float scale_x, float scale_y, // scale applied to input vertices + float shift_x, float shift_y, // translation applied to input vertices + int x_off, int y_off, // another translation applied to input + int invert, // if non-zero, vertically flip shape + void *userdata); // context for to STBTT_MALLOC + +////////////////////////////////////////////////////////////////////////////// +// +// Signed Distance Function (or Field) rendering + +STBTT_DEF void stbtt_FreeSDF(unsigned char *bitmap, void *userdata); +// frees the SDF bitmap allocated below + +STBTT_DEF unsigned char * stbtt_GetGlyphSDF(const stbtt_fontinfo *info, float scale, int glyph, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff); +STBTT_DEF unsigned char * stbtt_GetCodepointSDF(const stbtt_fontinfo *info, float scale, int codepoint, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff); +// These functions compute a discretized SDF field for a single character, suitable for storing +// in a single-channel texture, sampling with bilinear filtering, and testing against +// larger than some threshold to produce scalable fonts. +// info -- the font +// scale -- controls the size of the resulting SDF bitmap, same as it would be creating a regular bitmap +// glyph/codepoint -- the character to generate the SDF for +// padding -- extra "pixels" around the character which are filled with the distance to the character (not 0), +// which allows effects like bit outlines +// onedge_value -- value 0-255 to test the SDF against to reconstruct the character (i.e. the isocontour of the character) +// pixel_dist_scale -- what value the SDF should increase by when moving one SDF "pixel" away from the edge (on the 0..255 scale) +// if positive, > onedge_value is inside; if negative, < onedge_value is inside +// width,height -- output height & width of the SDF bitmap (including padding) +// xoff,yoff -- output origin of the character +// return value -- a 2D array of bytes 0..255, width*height in size +// +// pixel_dist_scale & onedge_value are a scale & bias that allows you to make +// optimal use of the limited 0..255 for your application, trading off precision +// and special effects. SDF values outside the range 0..255 are clamped to 0..255. +// +// Example: +// scale = stbtt_ScaleForPixelHeight(22) +// padding = 5 +// onedge_value = 180 +// pixel_dist_scale = 180/5.0 = 36.0 +// +// This will create an SDF bitmap in which the character is about 22 pixels +// high but the whole bitmap is about 22+5+5=32 pixels high. To produce a filled +// shape, sample the SDF at each pixel and fill the pixel if the SDF value +// is greater than or equal to 180/255. (You'll actually want to antialias, +// which is beyond the scope of this example.) Additionally, you can compute +// offset outlines (e.g. to stroke the character border inside & outside, +// or only outside). For example, to fill outside the character up to 3 SDF +// pixels, you would compare against (180-36.0*3)/255 = 72/255. The above +// choice of variables maps a range from 5 pixels outside the shape to +// 2 pixels inside the shape to 0..255; this is intended primarily for apply +// outside effects only (the interior range is needed to allow proper +// antialiasing of the font at *smaller* sizes) +// +// The function computes the SDF analytically at each SDF pixel, not by e.g. +// building a higher-res bitmap and approximating it. In theory the quality +// should be as high as possible for an SDF of this size & representation, but +// unclear if this is true in practice (perhaps building a higher-res bitmap +// and computing from that can allow drop-out prevention). +// +// The algorithm has not been optimized at all, so expect it to be slow +// if computing lots of characters or very large sizes. + + + +////////////////////////////////////////////////////////////////////////////// +// +// Finding the right font... +// +// You should really just solve this offline, keep your own tables +// of what font is what, and don't try to get it out of the .ttf file. +// That's because getting it out of the .ttf file is really hard, because +// the names in the file can appear in many possible encodings, in many +// possible languages, and e.g. if you need a case-insensitive comparison, +// the details of that depend on the encoding & language in a complex way +// (actually underspecified in truetype, but also gigantic). +// +// But you can use the provided functions in two possible ways: +// stbtt_FindMatchingFont() will use *case-sensitive* comparisons on +// unicode-encoded names to try to find the font you want; +// you can run this before calling stbtt_InitFont() +// +// stbtt_GetFontNameString() lets you get any of the various strings +// from the file yourself and do your own comparisons on them. +// You have to have called stbtt_InitFont() first. + + +STBTT_DEF int stbtt_FindMatchingFont(const unsigned char *fontdata, const char *name, int flags); +// returns the offset (not index) of the font that matches, or -1 if none +// if you use STBTT_MACSTYLE_DONTCARE, use a font name like "Arial Bold". +// if you use any other flag, use a font name like "Arial"; this checks +// the 'macStyle' header field; i don't know if fonts set this consistently +#define STBTT_MACSTYLE_DONTCARE 0 +#define STBTT_MACSTYLE_BOLD 1 +#define STBTT_MACSTYLE_ITALIC 2 +#define STBTT_MACSTYLE_UNDERSCORE 4 +#define STBTT_MACSTYLE_NONE 8 // <= not same as 0, this makes us check the bitfield is 0 + +STBTT_DEF int stbtt_CompareUTF8toUTF16_bigendian(const char *s1, int len1, const char *s2, int len2); +// returns 1/0 whether the first string interpreted as utf8 is identical to +// the second string interpreted as big-endian utf16... useful for strings from next func + +STBTT_DEF const char *stbtt_GetFontNameString(const stbtt_fontinfo *font, int *length, int platformID, int encodingID, int languageID, int nameID); +// returns the string (which may be big-endian double byte, e.g. for unicode) +// and puts the length in bytes in *length. +// +// some of the values for the IDs are below; for more see the truetype spec: +// http://developer.apple.com/textfonts/TTRefMan/RM06/Chap6name.html +// http://www.microsoft.com/typography/otspec/name.htm + +enum { // platformID + STBTT_PLATFORM_ID_UNICODE =0, + STBTT_PLATFORM_ID_MAC =1, + STBTT_PLATFORM_ID_ISO =2, + STBTT_PLATFORM_ID_MICROSOFT =3 +}; + +enum { // encodingID for STBTT_PLATFORM_ID_UNICODE + STBTT_UNICODE_EID_UNICODE_1_0 =0, + STBTT_UNICODE_EID_UNICODE_1_1 =1, + STBTT_UNICODE_EID_ISO_10646 =2, + STBTT_UNICODE_EID_UNICODE_2_0_BMP=3, + STBTT_UNICODE_EID_UNICODE_2_0_FULL=4 +}; + +enum { // encodingID for STBTT_PLATFORM_ID_MICROSOFT + STBTT_MS_EID_SYMBOL =0, + STBTT_MS_EID_UNICODE_BMP =1, + STBTT_MS_EID_SHIFTJIS =2, + STBTT_MS_EID_UNICODE_FULL =10 +}; + +enum { // encodingID for STBTT_PLATFORM_ID_MAC; same as Script Manager codes + STBTT_MAC_EID_ROMAN =0, STBTT_MAC_EID_ARABIC =4, + STBTT_MAC_EID_JAPANESE =1, STBTT_MAC_EID_HEBREW =5, + STBTT_MAC_EID_CHINESE_TRAD =2, STBTT_MAC_EID_GREEK =6, + STBTT_MAC_EID_KOREAN =3, STBTT_MAC_EID_RUSSIAN =7 +}; + +enum { // languageID for STBTT_PLATFORM_ID_MICROSOFT; same as LCID... + // problematic because there are e.g. 16 english LCIDs and 16 arabic LCIDs + STBTT_MS_LANG_ENGLISH =0x0409, STBTT_MS_LANG_ITALIAN =0x0410, + STBTT_MS_LANG_CHINESE =0x0804, STBTT_MS_LANG_JAPANESE =0x0411, + STBTT_MS_LANG_DUTCH =0x0413, STBTT_MS_LANG_KOREAN =0x0412, + STBTT_MS_LANG_FRENCH =0x040c, STBTT_MS_LANG_RUSSIAN =0x0419, + STBTT_MS_LANG_GERMAN =0x0407, STBTT_MS_LANG_SPANISH =0x0409, + STBTT_MS_LANG_HEBREW =0x040d, STBTT_MS_LANG_SWEDISH =0x041D +}; + +enum { // languageID for STBTT_PLATFORM_ID_MAC + STBTT_MAC_LANG_ENGLISH =0 , STBTT_MAC_LANG_JAPANESE =11, + STBTT_MAC_LANG_ARABIC =12, STBTT_MAC_LANG_KOREAN =23, + STBTT_MAC_LANG_DUTCH =4 , STBTT_MAC_LANG_RUSSIAN =32, + STBTT_MAC_LANG_FRENCH =1 , STBTT_MAC_LANG_SPANISH =6 , + STBTT_MAC_LANG_GERMAN =2 , STBTT_MAC_LANG_SWEDISH =5 , + STBTT_MAC_LANG_HEBREW =10, STBTT_MAC_LANG_CHINESE_SIMPLIFIED =33, + STBTT_MAC_LANG_ITALIAN =3 , STBTT_MAC_LANG_CHINESE_TRAD =19 +}; + +#ifdef __cplusplus +} +#endif + +#endif // __STB_INCLUDE_STB_TRUETYPE_H__ + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// +//// +//// IMPLEMENTATION +//// +//// + +#ifdef STB_TRUETYPE_IMPLEMENTATION + +#ifndef STBTT_MAX_OVERSAMPLE +#define STBTT_MAX_OVERSAMPLE 8 +#endif + +#if STBTT_MAX_OVERSAMPLE > 255 +#error "STBTT_MAX_OVERSAMPLE cannot be > 255" +#endif + +typedef int stbtt__test_oversample_pow2[(STBTT_MAX_OVERSAMPLE & (STBTT_MAX_OVERSAMPLE-1)) == 0 ? 1 : -1]; + +#ifndef STBTT_RASTERIZER_VERSION +#define STBTT_RASTERIZER_VERSION 2 +#endif + +#ifdef _MSC_VER +#define STBTT__NOTUSED(v) (void)(v) +#else +#define STBTT__NOTUSED(v) (void)sizeof(v) +#endif + +////////////////////////////////////////////////////////////////////////// +// +// stbtt__buf helpers to parse data from file +// + +static stbtt_uint8 stbtt__buf_get8(stbtt__buf *b) +{ + if (b->cursor >= b->size) + return 0; + return b->data[b->cursor++]; +} + +static stbtt_uint8 stbtt__buf_peek8(stbtt__buf *b) +{ + if (b->cursor >= b->size) + return 0; + return b->data[b->cursor]; +} + +static void stbtt__buf_seek(stbtt__buf *b, int o) +{ + STBTT_assert(!(o > b->size || o < 0)); + b->cursor = (o > b->size || o < 0) ? b->size : o; +} + +static void stbtt__buf_skip(stbtt__buf *b, int o) +{ + stbtt__buf_seek(b, b->cursor + o); +} + +static stbtt_uint32 stbtt__buf_get(stbtt__buf *b, int n) +{ + stbtt_uint32 v = 0; + int i; + STBTT_assert(n >= 1 && n <= 4); + for (i = 0; i < n; i++) + v = (v << 8) | stbtt__buf_get8(b); + return v; +} + +static stbtt__buf stbtt__new_buf(const void *p, size_t size) +{ + stbtt__buf r; + STBTT_assert(size < 0x40000000); + r.data = (stbtt_uint8*) p; + r.size = (int) size; + r.cursor = 0; + return r; +} + +#define stbtt__buf_get16(b) stbtt__buf_get((b), 2) +#define stbtt__buf_get32(b) stbtt__buf_get((b), 4) + +static stbtt__buf stbtt__buf_range(const stbtt__buf *b, int o, int s) +{ + stbtt__buf r = stbtt__new_buf(NULL, 0); + if (o < 0 || s < 0 || o > b->size || s > b->size - o) return r; + r.data = b->data + o; + r.size = s; + return r; +} + +static stbtt__buf stbtt__cff_get_index(stbtt__buf *b) +{ + int count, start, offsize; + start = b->cursor; + count = stbtt__buf_get16(b); + if (count) { + offsize = stbtt__buf_get8(b); + STBTT_assert(offsize >= 1 && offsize <= 4); + stbtt__buf_skip(b, offsize * count); + stbtt__buf_skip(b, stbtt__buf_get(b, offsize) - 1); + } + return stbtt__buf_range(b, start, b->cursor - start); +} + +static stbtt_uint32 stbtt__cff_int(stbtt__buf *b) +{ + int b0 = stbtt__buf_get8(b); + if (b0 >= 32 && b0 <= 246) return b0 - 139; + else if (b0 >= 247 && b0 <= 250) return (b0 - 247)*256 + stbtt__buf_get8(b) + 108; + else if (b0 >= 251 && b0 <= 254) return -(b0 - 251)*256 - stbtt__buf_get8(b) - 108; + else if (b0 == 28) return stbtt__buf_get16(b); + else if (b0 == 29) return stbtt__buf_get32(b); + STBTT_assert(0); + return 0; +} + +static void stbtt__cff_skip_operand(stbtt__buf *b) { + int v, b0 = stbtt__buf_peek8(b); + STBTT_assert(b0 >= 28); + if (b0 == 30) { + stbtt__buf_skip(b, 1); + while (b->cursor < b->size) { + v = stbtt__buf_get8(b); + if ((v & 0xF) == 0xF || (v >> 4) == 0xF) + break; + } + } else { + stbtt__cff_int(b); + } +} + +static stbtt__buf stbtt__dict_get(stbtt__buf *b, int key) +{ + stbtt__buf_seek(b, 0); + while (b->cursor < b->size) { + int start = b->cursor, end, op; + while (stbtt__buf_peek8(b) >= 28) + stbtt__cff_skip_operand(b); + end = b->cursor; + op = stbtt__buf_get8(b); + if (op == 12) op = stbtt__buf_get8(b) | 0x100; + if (op == key) return stbtt__buf_range(b, start, end-start); + } + return stbtt__buf_range(b, 0, 0); +} + +static void stbtt__dict_get_ints(stbtt__buf *b, int key, int outcount, stbtt_uint32 *out) +{ + int i; + stbtt__buf operands = stbtt__dict_get(b, key); + for (i = 0; i < outcount && operands.cursor < operands.size; i++) + out[i] = stbtt__cff_int(&operands); +} + +static int stbtt__cff_index_count(stbtt__buf *b) +{ + stbtt__buf_seek(b, 0); + return stbtt__buf_get16(b); +} + +static stbtt__buf stbtt__cff_index_get(stbtt__buf b, int i) +{ + int count, offsize, start, end; + stbtt__buf_seek(&b, 0); + count = stbtt__buf_get16(&b); + offsize = stbtt__buf_get8(&b); + STBTT_assert(i >= 0 && i < count); + STBTT_assert(offsize >= 1 && offsize <= 4); + stbtt__buf_skip(&b, i*offsize); + start = stbtt__buf_get(&b, offsize); + end = stbtt__buf_get(&b, offsize); + return stbtt__buf_range(&b, 2+(count+1)*offsize+start, end - start); +} + +////////////////////////////////////////////////////////////////////////// +// +// accessors to parse data from file +// + +// on platforms that don't allow misaligned reads, if we want to allow +// truetype fonts that aren't padded to alignment, define ALLOW_UNALIGNED_TRUETYPE + +#define ttBYTE(p) (* (stbtt_uint8 *) (p)) +#define ttCHAR(p) (* (stbtt_int8 *) (p)) +#define ttFixed(p) ttLONG(p) + +static stbtt_uint16 ttUSHORT(stbtt_uint8 *p) { return p[0]*256 + p[1]; } +static stbtt_int16 ttSHORT(stbtt_uint8 *p) { return p[0]*256 + p[1]; } +static stbtt_uint32 ttULONG(stbtt_uint8 *p) { return (p[0]<<24) + (p[1]<<16) + (p[2]<<8) + p[3]; } +static stbtt_int32 ttLONG(stbtt_uint8 *p) { return (p[0]<<24) + (p[1]<<16) + (p[2]<<8) + p[3]; } + +#define stbtt_tag4(p,c0,c1,c2,c3) ((p)[0] == (c0) && (p)[1] == (c1) && (p)[2] == (c2) && (p)[3] == (c3)) +#define stbtt_tag(p,str) stbtt_tag4(p,str[0],str[1],str[2],str[3]) + +static int stbtt__isfont(stbtt_uint8 *font) +{ + // check the version number + if (stbtt_tag4(font, '1',0,0,0)) return 1; // TrueType 1 + if (stbtt_tag(font, "typ1")) return 1; // TrueType with type 1 font -- we don't support this! + if (stbtt_tag(font, "OTTO")) return 1; // OpenType with CFF + if (stbtt_tag4(font, 0,1,0,0)) return 1; // OpenType 1.0 + if (stbtt_tag(font, "true")) return 1; // Apple specification for TrueType fonts + return 0; +} + +// @OPTIMIZE: binary search +static stbtt_uint32 stbtt__find_table(stbtt_uint8 *data, stbtt_uint32 fontstart, const char *tag) +{ + stbtt_int32 num_tables = ttUSHORT(data+fontstart+4); + stbtt_uint32 tabledir = fontstart + 12; + stbtt_int32 i; + for (i=0; i < num_tables; ++i) { + stbtt_uint32 loc = tabledir + 16*i; + if (stbtt_tag(data+loc+0, tag)) + return ttULONG(data+loc+8); + } + return 0; +} + +static int stbtt_GetFontOffsetForIndex_internal(unsigned char *font_collection, int index) +{ + // if it's just a font, there's only one valid index + if (stbtt__isfont(font_collection)) + return index == 0 ? 0 : -1; + + // check if it's a TTC + if (stbtt_tag(font_collection, "ttcf")) { + // version 1? + if (ttULONG(font_collection+4) == 0x00010000 || ttULONG(font_collection+4) == 0x00020000) { + stbtt_int32 n = ttLONG(font_collection+8); + if (index >= n) + return -1; + return ttULONG(font_collection+12+index*4); + } + } + return -1; +} + +static int stbtt_GetNumberOfFonts_internal(unsigned char *font_collection) +{ + // if it's just a font, there's only one valid font + if (stbtt__isfont(font_collection)) + return 1; + + // check if it's a TTC + if (stbtt_tag(font_collection, "ttcf")) { + // version 1? + if (ttULONG(font_collection+4) == 0x00010000 || ttULONG(font_collection+4) == 0x00020000) { + return ttLONG(font_collection+8); + } + } + return 0; +} + +static stbtt__buf stbtt__get_subrs(stbtt__buf cff, stbtt__buf fontdict) +{ + stbtt_uint32 subrsoff = 0, private_loc[2] = { 0, 0 }; + stbtt__buf pdict; + stbtt__dict_get_ints(&fontdict, 18, 2, private_loc); + if (!private_loc[1] || !private_loc[0]) return stbtt__new_buf(NULL, 0); + pdict = stbtt__buf_range(&cff, private_loc[1], private_loc[0]); + stbtt__dict_get_ints(&pdict, 19, 1, &subrsoff); + if (!subrsoff) return stbtt__new_buf(NULL, 0); + stbtt__buf_seek(&cff, private_loc[1]+subrsoff); + return stbtt__cff_get_index(&cff); +} + +// since most people won't use this, find this table the first time it's needed +static int stbtt__get_svg(stbtt_fontinfo *info) +{ + stbtt_uint32 t; + if (info->svg < 0) { + t = stbtt__find_table(info->data, info->fontstart, "SVG "); + if (t) { + stbtt_uint32 offset = ttULONG(info->data + t + 2); + info->svg = t + offset; + } else { + info->svg = 0; + } + } + return info->svg; +} + +static int stbtt_InitFont_internal(stbtt_fontinfo *info, unsigned char *data, int fontstart) +{ + stbtt_uint32 cmap, t; + stbtt_int32 i,numTables; + + info->data = data; + info->fontstart = fontstart; + info->cff = stbtt__new_buf(NULL, 0); + + cmap = stbtt__find_table(data, fontstart, "cmap"); // required + info->loca = stbtt__find_table(data, fontstart, "loca"); // required + info->head = stbtt__find_table(data, fontstart, "head"); // required + info->glyf = stbtt__find_table(data, fontstart, "glyf"); // required + info->hhea = stbtt__find_table(data, fontstart, "hhea"); // required + info->hmtx = stbtt__find_table(data, fontstart, "hmtx"); // required + info->kern = stbtt__find_table(data, fontstart, "kern"); // not required + info->gpos = stbtt__find_table(data, fontstart, "GPOS"); // not required + + if (!cmap || !info->head || !info->hhea || !info->hmtx) + return 0; + if (info->glyf) { + // required for truetype + if (!info->loca) return 0; + } else { + // initialization for CFF / Type2 fonts (OTF) + stbtt__buf b, topdict, topdictidx; + stbtt_uint32 cstype = 2, charstrings = 0, fdarrayoff = 0, fdselectoff = 0; + stbtt_uint32 cff; + + cff = stbtt__find_table(data, fontstart, "CFF "); + if (!cff) return 0; + + info->fontdicts = stbtt__new_buf(NULL, 0); + info->fdselect = stbtt__new_buf(NULL, 0); + + // @TODO this should use size from table (not 512MB) + info->cff = stbtt__new_buf(data+cff, 512*1024*1024); + b = info->cff; + + // read the header + stbtt__buf_skip(&b, 2); + stbtt__buf_seek(&b, stbtt__buf_get8(&b)); // hdrsize + + // @TODO the name INDEX could list multiple fonts, + // but we just use the first one. + stbtt__cff_get_index(&b); // name INDEX + topdictidx = stbtt__cff_get_index(&b); + topdict = stbtt__cff_index_get(topdictidx, 0); + stbtt__cff_get_index(&b); // string INDEX + info->gsubrs = stbtt__cff_get_index(&b); + + stbtt__dict_get_ints(&topdict, 17, 1, &charstrings); + stbtt__dict_get_ints(&topdict, 0x100 | 6, 1, &cstype); + stbtt__dict_get_ints(&topdict, 0x100 | 36, 1, &fdarrayoff); + stbtt__dict_get_ints(&topdict, 0x100 | 37, 1, &fdselectoff); + info->subrs = stbtt__get_subrs(b, topdict); + + // we only support Type 2 charstrings + if (cstype != 2) return 0; + if (charstrings == 0) return 0; + + if (fdarrayoff) { + // looks like a CID font + if (!fdselectoff) return 0; + stbtt__buf_seek(&b, fdarrayoff); + info->fontdicts = stbtt__cff_get_index(&b); + info->fdselect = stbtt__buf_range(&b, fdselectoff, b.size-fdselectoff); + } + + stbtt__buf_seek(&b, charstrings); + info->charstrings = stbtt__cff_get_index(&b); + } + + t = stbtt__find_table(data, fontstart, "maxp"); + if (t) + info->numGlyphs = ttUSHORT(data+t+4); + else + info->numGlyphs = 0xffff; + + info->svg = -1; + + // find a cmap encoding table we understand *now* to avoid searching + // later. (todo: could make this installable) + // the same regardless of glyph. + numTables = ttUSHORT(data + cmap + 2); + info->index_map = 0; + for (i=0; i < numTables; ++i) { + stbtt_uint32 encoding_record = cmap + 4 + 8 * i; + // find an encoding we understand: + switch(ttUSHORT(data+encoding_record)) { + case STBTT_PLATFORM_ID_MICROSOFT: + switch (ttUSHORT(data+encoding_record+2)) { + case STBTT_MS_EID_UNICODE_BMP: + case STBTT_MS_EID_UNICODE_FULL: + // MS/Unicode + info->index_map = cmap + ttULONG(data+encoding_record+4); + break; + } + break; + case STBTT_PLATFORM_ID_UNICODE: + // Mac/iOS has these + // all the encodingIDs are unicode, so we don't bother to check it + info->index_map = cmap + ttULONG(data+encoding_record+4); + break; + } + } + if (info->index_map == 0) + return 0; + + info->indexToLocFormat = ttUSHORT(data+info->head + 50); + return 1; +} + +STBTT_DEF int stbtt_FindGlyphIndex(const stbtt_fontinfo *info, int unicode_codepoint) +{ + stbtt_uint8 *data = info->data; + stbtt_uint32 index_map = info->index_map; + + stbtt_uint16 format = ttUSHORT(data + index_map + 0); + if (format == 0) { // apple byte encoding + stbtt_int32 bytes = ttUSHORT(data + index_map + 2); + if (unicode_codepoint < bytes-6) + return ttBYTE(data + index_map + 6 + unicode_codepoint); + return 0; + } else if (format == 6) { + stbtt_uint32 first = ttUSHORT(data + index_map + 6); + stbtt_uint32 count = ttUSHORT(data + index_map + 8); + if ((stbtt_uint32) unicode_codepoint >= first && (stbtt_uint32) unicode_codepoint < first+count) + return ttUSHORT(data + index_map + 10 + (unicode_codepoint - first)*2); + return 0; + } else if (format == 2) { + STBTT_assert(0); // @TODO: high-byte mapping for japanese/chinese/korean + return 0; + } else if (format == 4) { // standard mapping for windows fonts: binary search collection of ranges + stbtt_uint16 segcount = ttUSHORT(data+index_map+6) >> 1; + stbtt_uint16 searchRange = ttUSHORT(data+index_map+8) >> 1; + stbtt_uint16 entrySelector = ttUSHORT(data+index_map+10); + stbtt_uint16 rangeShift = ttUSHORT(data+index_map+12) >> 1; + + // do a binary search of the segments + stbtt_uint32 endCount = index_map + 14; + stbtt_uint32 search = endCount; + + if (unicode_codepoint > 0xffff) + return 0; + + // they lie from endCount .. endCount + segCount + // but searchRange is the nearest power of two, so... + if (unicode_codepoint >= ttUSHORT(data + search + rangeShift*2)) + search += rangeShift*2; + + // now decrement to bias correctly to find smallest + search -= 2; + while (entrySelector) { + stbtt_uint16 end; + searchRange >>= 1; + end = ttUSHORT(data + search + searchRange*2); + if (unicode_codepoint > end) + search += searchRange*2; + --entrySelector; + } + search += 2; + + { + stbtt_uint16 offset, start; + stbtt_uint16 item = (stbtt_uint16) ((search - endCount) >> 1); + + STBTT_assert(unicode_codepoint <= ttUSHORT(data + endCount + 2*item)); + start = ttUSHORT(data + index_map + 14 + segcount*2 + 2 + 2*item); + if (unicode_codepoint < start) + return 0; + + offset = ttUSHORT(data + index_map + 14 + segcount*6 + 2 + 2*item); + if (offset == 0) + return (stbtt_uint16) (unicode_codepoint + ttSHORT(data + index_map + 14 + segcount*4 + 2 + 2*item)); + + return ttUSHORT(data + offset + (unicode_codepoint-start)*2 + index_map + 14 + segcount*6 + 2 + 2*item); + } + } else if (format == 12 || format == 13) { + stbtt_uint32 ngroups = ttULONG(data+index_map+12); + stbtt_int32 low,high; + low = 0; high = (stbtt_int32)ngroups; + // Binary search the right group. + while (low < high) { + stbtt_int32 mid = low + ((high-low) >> 1); // rounds down, so low <= mid < high + stbtt_uint32 start_char = ttULONG(data+index_map+16+mid*12); + stbtt_uint32 end_char = ttULONG(data+index_map+16+mid*12+4); + if ((stbtt_uint32) unicode_codepoint < start_char) + high = mid; + else if ((stbtt_uint32) unicode_codepoint > end_char) + low = mid+1; + else { + stbtt_uint32 start_glyph = ttULONG(data+index_map+16+mid*12+8); + if (format == 12) + return start_glyph + unicode_codepoint-start_char; + else // format == 13 + return start_glyph; + } + } + return 0; // not found + } + // @TODO + STBTT_assert(0); + return 0; +} + +STBTT_DEF int stbtt_GetCodepointShape(const stbtt_fontinfo *info, int unicode_codepoint, stbtt_vertex **vertices) +{ + return stbtt_GetGlyphShape(info, stbtt_FindGlyphIndex(info, unicode_codepoint), vertices); +} + +static void stbtt_setvertex(stbtt_vertex *v, stbtt_uint8 type, stbtt_int32 x, stbtt_int32 y, stbtt_int32 cx, stbtt_int32 cy) +{ + v->type = type; + v->x = (stbtt_int16) x; + v->y = (stbtt_int16) y; + v->cx = (stbtt_int16) cx; + v->cy = (stbtt_int16) cy; +} + +static int stbtt__GetGlyfOffset(const stbtt_fontinfo *info, int glyph_index) +{ + int g1,g2; + + STBTT_assert(!info->cff.size); + + if (glyph_index >= info->numGlyphs) return -1; // glyph index out of range + if (info->indexToLocFormat >= 2) return -1; // unknown index->glyph map format + + if (info->indexToLocFormat == 0) { + g1 = info->glyf + ttUSHORT(info->data + info->loca + glyph_index * 2) * 2; + g2 = info->glyf + ttUSHORT(info->data + info->loca + glyph_index * 2 + 2) * 2; + } else { + g1 = info->glyf + ttULONG (info->data + info->loca + glyph_index * 4); + g2 = info->glyf + ttULONG (info->data + info->loca + glyph_index * 4 + 4); + } + + return g1==g2 ? -1 : g1; // if length is 0, return -1 +} + +static int stbtt__GetGlyphInfoT2(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1); + +STBTT_DEF int stbtt_GetGlyphBox(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1) +{ + if (info->cff.size) { + stbtt__GetGlyphInfoT2(info, glyph_index, x0, y0, x1, y1); + } else { + int g = stbtt__GetGlyfOffset(info, glyph_index); + if (g < 0) return 0; + + if (x0) *x0 = ttSHORT(info->data + g + 2); + if (y0) *y0 = ttSHORT(info->data + g + 4); + if (x1) *x1 = ttSHORT(info->data + g + 6); + if (y1) *y1 = ttSHORT(info->data + g + 8); + } + return 1; +} + +STBTT_DEF int stbtt_GetCodepointBox(const stbtt_fontinfo *info, int codepoint, int *x0, int *y0, int *x1, int *y1) +{ + return stbtt_GetGlyphBox(info, stbtt_FindGlyphIndex(info,codepoint), x0,y0,x1,y1); +} + +STBTT_DEF int stbtt_IsGlyphEmpty(const stbtt_fontinfo *info, int glyph_index) +{ + stbtt_int16 numberOfContours; + int g; + if (info->cff.size) + return stbtt__GetGlyphInfoT2(info, glyph_index, NULL, NULL, NULL, NULL) == 0; + g = stbtt__GetGlyfOffset(info, glyph_index); + if (g < 0) return 1; + numberOfContours = ttSHORT(info->data + g); + return numberOfContours == 0; +} + +static int stbtt__close_shape(stbtt_vertex *vertices, int num_vertices, int was_off, int start_off, + stbtt_int32 sx, stbtt_int32 sy, stbtt_int32 scx, stbtt_int32 scy, stbtt_int32 cx, stbtt_int32 cy) +{ + if (start_off) { + if (was_off) + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, (cx+scx)>>1, (cy+scy)>>1, cx,cy); + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, sx,sy,scx,scy); + } else { + if (was_off) + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve,sx,sy,cx,cy); + else + stbtt_setvertex(&vertices[num_vertices++], STBTT_vline,sx,sy,0,0); + } + return num_vertices; +} + +static int stbtt__GetGlyphShapeTT(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **pvertices) +{ + stbtt_int16 numberOfContours; + stbtt_uint8 *endPtsOfContours; + stbtt_uint8 *data = info->data; + stbtt_vertex *vertices=0; + int num_vertices=0; + int g = stbtt__GetGlyfOffset(info, glyph_index); + + *pvertices = NULL; + + if (g < 0) return 0; + + numberOfContours = ttSHORT(data + g); + + if (numberOfContours > 0) { + stbtt_uint8 flags=0,flagcount; + stbtt_int32 ins, i,j=0,m,n, next_move, was_off=0, off, start_off=0; + stbtt_int32 x,y,cx,cy,sx,sy, scx,scy; + stbtt_uint8 *points; + endPtsOfContours = (data + g + 10); + ins = ttUSHORT(data + g + 10 + numberOfContours * 2); + points = data + g + 10 + numberOfContours * 2 + 2 + ins; + + n = 1+ttUSHORT(endPtsOfContours + numberOfContours*2-2); + + m = n + 2*numberOfContours; // a loose bound on how many vertices we might need + vertices = (stbtt_vertex *) STBTT_malloc(m * sizeof(vertices[0]), info->userdata); + if (vertices == 0) + return 0; + + next_move = 0; + flagcount=0; + + // in first pass, we load uninterpreted data into the allocated array + // above, shifted to the end of the array so we won't overwrite it when + // we create our final data starting from the front + + off = m - n; // starting offset for uninterpreted data, regardless of how m ends up being calculated + + // first load flags + + for (i=0; i < n; ++i) { + if (flagcount == 0) { + flags = *points++; + if (flags & 8) + flagcount = *points++; + } else + --flagcount; + vertices[off+i].type = flags; + } + + // now load x coordinates + x=0; + for (i=0; i < n; ++i) { + flags = vertices[off+i].type; + if (flags & 2) { + stbtt_int16 dx = *points++; + x += (flags & 16) ? dx : -dx; // ??? + } else { + if (!(flags & 16)) { + x = x + (stbtt_int16) (points[0]*256 + points[1]); + points += 2; + } + } + vertices[off+i].x = (stbtt_int16) x; + } + + // now load y coordinates + y=0; + for (i=0; i < n; ++i) { + flags = vertices[off+i].type; + if (flags & 4) { + stbtt_int16 dy = *points++; + y += (flags & 32) ? dy : -dy; // ??? + } else { + if (!(flags & 32)) { + y = y + (stbtt_int16) (points[0]*256 + points[1]); + points += 2; + } + } + vertices[off+i].y = (stbtt_int16) y; + } + + // now convert them to our format + num_vertices=0; + sx = sy = cx = cy = scx = scy = 0; + for (i=0; i < n; ++i) { + flags = vertices[off+i].type; + x = (stbtt_int16) vertices[off+i].x; + y = (stbtt_int16) vertices[off+i].y; + + if (next_move == i) { + if (i != 0) + num_vertices = stbtt__close_shape(vertices, num_vertices, was_off, start_off, sx,sy,scx,scy,cx,cy); + + // now start the new one + start_off = !(flags & 1); + if (start_off) { + // if we start off with an off-curve point, then when we need to find a point on the curve + // where we can start, and we need to save some state for when we wraparound. + scx = x; + scy = y; + if (!(vertices[off+i+1].type & 1)) { + // next point is also a curve point, so interpolate an on-point curve + sx = (x + (stbtt_int32) vertices[off+i+1].x) >> 1; + sy = (y + (stbtt_int32) vertices[off+i+1].y) >> 1; + } else { + // otherwise just use the next point as our start point + sx = (stbtt_int32) vertices[off+i+1].x; + sy = (stbtt_int32) vertices[off+i+1].y; + ++i; // we're using point i+1 as the starting point, so skip it + } + } else { + sx = x; + sy = y; + } + stbtt_setvertex(&vertices[num_vertices++], STBTT_vmove,sx,sy,0,0); + was_off = 0; + next_move = 1 + ttUSHORT(endPtsOfContours+j*2); + ++j; + } else { + if (!(flags & 1)) { // if it's a curve + if (was_off) // two off-curve control points in a row means interpolate an on-curve midpoint + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, (cx+x)>>1, (cy+y)>>1, cx, cy); + cx = x; + cy = y; + was_off = 1; + } else { + if (was_off) + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, x,y, cx, cy); + else + stbtt_setvertex(&vertices[num_vertices++], STBTT_vline, x,y,0,0); + was_off = 0; + } + } + } + num_vertices = stbtt__close_shape(vertices, num_vertices, was_off, start_off, sx,sy,scx,scy,cx,cy); + } else if (numberOfContours < 0) { + // Compound shapes. + int more = 1; + stbtt_uint8 *comp = data + g + 10; + num_vertices = 0; + vertices = 0; + while (more) { + stbtt_uint16 flags, gidx; + int comp_num_verts = 0, i; + stbtt_vertex *comp_verts = 0, *tmp = 0; + float mtx[6] = {1,0,0,1,0,0}, m, n; + + flags = ttSHORT(comp); comp+=2; + gidx = ttSHORT(comp); comp+=2; + + if (flags & 2) { // XY values + if (flags & 1) { // shorts + mtx[4] = ttSHORT(comp); comp+=2; + mtx[5] = ttSHORT(comp); comp+=2; + } else { + mtx[4] = ttCHAR(comp); comp+=1; + mtx[5] = ttCHAR(comp); comp+=1; + } + } + else { + // @TODO handle matching point + STBTT_assert(0); + } + if (flags & (1<<3)) { // WE_HAVE_A_SCALE + mtx[0] = mtx[3] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[1] = mtx[2] = 0; + } else if (flags & (1<<6)) { // WE_HAVE_AN_X_AND_YSCALE + mtx[0] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[1] = mtx[2] = 0; + mtx[3] = ttSHORT(comp)/16384.0f; comp+=2; + } else if (flags & (1<<7)) { // WE_HAVE_A_TWO_BY_TWO + mtx[0] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[1] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[2] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[3] = ttSHORT(comp)/16384.0f; comp+=2; + } + + // Find transformation scales. + m = (float) STBTT_sqrt(mtx[0]*mtx[0] + mtx[1]*mtx[1]); + n = (float) STBTT_sqrt(mtx[2]*mtx[2] + mtx[3]*mtx[3]); + + // Get indexed glyph. + comp_num_verts = stbtt_GetGlyphShape(info, gidx, &comp_verts); + if (comp_num_verts > 0) { + // Transform vertices. + for (i = 0; i < comp_num_verts; ++i) { + stbtt_vertex* v = &comp_verts[i]; + stbtt_vertex_type x,y; + x=v->x; y=v->y; + v->x = (stbtt_vertex_type)(m * (mtx[0]*x + mtx[2]*y + mtx[4])); + v->y = (stbtt_vertex_type)(n * (mtx[1]*x + mtx[3]*y + mtx[5])); + x=v->cx; y=v->cy; + v->cx = (stbtt_vertex_type)(m * (mtx[0]*x + mtx[2]*y + mtx[4])); + v->cy = (stbtt_vertex_type)(n * (mtx[1]*x + mtx[3]*y + mtx[5])); + } + // Append vertices. + tmp = (stbtt_vertex*)STBTT_malloc((num_vertices+comp_num_verts)*sizeof(stbtt_vertex), info->userdata); + if (!tmp) { + if (vertices) STBTT_free(vertices, info->userdata); + if (comp_verts) STBTT_free(comp_verts, info->userdata); + return 0; + } + if (num_vertices > 0) STBTT_memcpy(tmp, vertices, num_vertices*sizeof(stbtt_vertex)); + STBTT_memcpy(tmp+num_vertices, comp_verts, comp_num_verts*sizeof(stbtt_vertex)); + if (vertices) STBTT_free(vertices, info->userdata); + vertices = tmp; + STBTT_free(comp_verts, info->userdata); + num_vertices += comp_num_verts; + } + // More components ? + more = flags & (1<<5); + } + } else { + // numberOfCounters == 0, do nothing + } + + *pvertices = vertices; + return num_vertices; +} + +typedef struct +{ + int bounds; + int started; + float first_x, first_y; + float x, y; + stbtt_int32 min_x, max_x, min_y, max_y; + + stbtt_vertex *pvertices; + int num_vertices; +} stbtt__csctx; + +#define STBTT__CSCTX_INIT(bounds) {bounds,0, 0,0, 0,0, 0,0,0,0, NULL, 0} + +static void stbtt__track_vertex(stbtt__csctx *c, stbtt_int32 x, stbtt_int32 y) +{ + if (x > c->max_x || !c->started) c->max_x = x; + if (y > c->max_y || !c->started) c->max_y = y; + if (x < c->min_x || !c->started) c->min_x = x; + if (y < c->min_y || !c->started) c->min_y = y; + c->started = 1; +} + +static void stbtt__csctx_v(stbtt__csctx *c, stbtt_uint8 type, stbtt_int32 x, stbtt_int32 y, stbtt_int32 cx, stbtt_int32 cy, stbtt_int32 cx1, stbtt_int32 cy1) +{ + if (c->bounds) { + stbtt__track_vertex(c, x, y); + if (type == STBTT_vcubic) { + stbtt__track_vertex(c, cx, cy); + stbtt__track_vertex(c, cx1, cy1); + } + } else { + stbtt_setvertex(&c->pvertices[c->num_vertices], type, x, y, cx, cy); + c->pvertices[c->num_vertices].cx1 = (stbtt_int16) cx1; + c->pvertices[c->num_vertices].cy1 = (stbtt_int16) cy1; + } + c->num_vertices++; +} + +static void stbtt__csctx_close_shape(stbtt__csctx *ctx) +{ + if (ctx->first_x != ctx->x || ctx->first_y != ctx->y) + stbtt__csctx_v(ctx, STBTT_vline, (int)ctx->first_x, (int)ctx->first_y, 0, 0, 0, 0); +} + +static void stbtt__csctx_rmove_to(stbtt__csctx *ctx, float dx, float dy) +{ + stbtt__csctx_close_shape(ctx); + ctx->first_x = ctx->x = ctx->x + dx; + ctx->first_y = ctx->y = ctx->y + dy; + stbtt__csctx_v(ctx, STBTT_vmove, (int)ctx->x, (int)ctx->y, 0, 0, 0, 0); +} + +static void stbtt__csctx_rline_to(stbtt__csctx *ctx, float dx, float dy) +{ + ctx->x += dx; + ctx->y += dy; + stbtt__csctx_v(ctx, STBTT_vline, (int)ctx->x, (int)ctx->y, 0, 0, 0, 0); +} + +static void stbtt__csctx_rccurve_to(stbtt__csctx *ctx, float dx1, float dy1, float dx2, float dy2, float dx3, float dy3) +{ + float cx1 = ctx->x + dx1; + float cy1 = ctx->y + dy1; + float cx2 = cx1 + dx2; + float cy2 = cy1 + dy2; + ctx->x = cx2 + dx3; + ctx->y = cy2 + dy3; + stbtt__csctx_v(ctx, STBTT_vcubic, (int)ctx->x, (int)ctx->y, (int)cx1, (int)cy1, (int)cx2, (int)cy2); +} + +static stbtt__buf stbtt__get_subr(stbtt__buf idx, int n) +{ + int count = stbtt__cff_index_count(&idx); + int bias = 107; + if (count >= 33900) + bias = 32768; + else if (count >= 1240) + bias = 1131; + n += bias; + if (n < 0 || n >= count) + return stbtt__new_buf(NULL, 0); + return stbtt__cff_index_get(idx, n); +} + +static stbtt__buf stbtt__cid_get_glyph_subrs(const stbtt_fontinfo *info, int glyph_index) +{ + stbtt__buf fdselect = info->fdselect; + int nranges, start, end, v, fmt, fdselector = -1, i; + + stbtt__buf_seek(&fdselect, 0); + fmt = stbtt__buf_get8(&fdselect); + if (fmt == 0) { + // untested + stbtt__buf_skip(&fdselect, glyph_index); + fdselector = stbtt__buf_get8(&fdselect); + } else if (fmt == 3) { + nranges = stbtt__buf_get16(&fdselect); + start = stbtt__buf_get16(&fdselect); + for (i = 0; i < nranges; i++) { + v = stbtt__buf_get8(&fdselect); + end = stbtt__buf_get16(&fdselect); + if (glyph_index >= start && glyph_index < end) { + fdselector = v; + break; + } + start = end; + } + } + if (fdselector == -1) stbtt__new_buf(NULL, 0); + return stbtt__get_subrs(info->cff, stbtt__cff_index_get(info->fontdicts, fdselector)); +} + +static int stbtt__run_charstring(const stbtt_fontinfo *info, int glyph_index, stbtt__csctx *c) +{ + int in_header = 1, maskbits = 0, subr_stack_height = 0, sp = 0, v, i, b0; + int has_subrs = 0, clear_stack; + float s[48]; + stbtt__buf subr_stack[10], subrs = info->subrs, b; + float f; + +#define STBTT__CSERR(s) (0) + + // this currently ignores the initial width value, which isn't needed if we have hmtx + b = stbtt__cff_index_get(info->charstrings, glyph_index); + while (b.cursor < b.size) { + i = 0; + clear_stack = 1; + b0 = stbtt__buf_get8(&b); + switch (b0) { + // @TODO implement hinting + case 0x13: // hintmask + case 0x14: // cntrmask + if (in_header) + maskbits += (sp / 2); // implicit "vstem" + in_header = 0; + stbtt__buf_skip(&b, (maskbits + 7) / 8); + break; + + case 0x01: // hstem + case 0x03: // vstem + case 0x12: // hstemhm + case 0x17: // vstemhm + maskbits += (sp / 2); + break; + + case 0x15: // rmoveto + in_header = 0; + if (sp < 2) return STBTT__CSERR("rmoveto stack"); + stbtt__csctx_rmove_to(c, s[sp-2], s[sp-1]); + break; + case 0x04: // vmoveto + in_header = 0; + if (sp < 1) return STBTT__CSERR("vmoveto stack"); + stbtt__csctx_rmove_to(c, 0, s[sp-1]); + break; + case 0x16: // hmoveto + in_header = 0; + if (sp < 1) return STBTT__CSERR("hmoveto stack"); + stbtt__csctx_rmove_to(c, s[sp-1], 0); + break; + + case 0x05: // rlineto + if (sp < 2) return STBTT__CSERR("rlineto stack"); + for (; i + 1 < sp; i += 2) + stbtt__csctx_rline_to(c, s[i], s[i+1]); + break; + + // hlineto/vlineto and vhcurveto/hvcurveto alternate horizontal and vertical + // starting from a different place. + + case 0x07: // vlineto + if (sp < 1) return STBTT__CSERR("vlineto stack"); + goto vlineto; + case 0x06: // hlineto + if (sp < 1) return STBTT__CSERR("hlineto stack"); + for (;;) { + if (i >= sp) break; + stbtt__csctx_rline_to(c, s[i], 0); + i++; + vlineto: + if (i >= sp) break; + stbtt__csctx_rline_to(c, 0, s[i]); + i++; + } + break; + + case 0x1F: // hvcurveto + if (sp < 4) return STBTT__CSERR("hvcurveto stack"); + goto hvcurveto; + case 0x1E: // vhcurveto + if (sp < 4) return STBTT__CSERR("vhcurveto stack"); + for (;;) { + if (i + 3 >= sp) break; + stbtt__csctx_rccurve_to(c, 0, s[i], s[i+1], s[i+2], s[i+3], (sp - i == 5) ? s[i + 4] : 0.0f); + i += 4; + hvcurveto: + if (i + 3 >= sp) break; + stbtt__csctx_rccurve_to(c, s[i], 0, s[i+1], s[i+2], (sp - i == 5) ? s[i+4] : 0.0f, s[i+3]); + i += 4; + } + break; + + case 0x08: // rrcurveto + if (sp < 6) return STBTT__CSERR("rcurveline stack"); + for (; i + 5 < sp; i += 6) + stbtt__csctx_rccurve_to(c, s[i], s[i+1], s[i+2], s[i+3], s[i+4], s[i+5]); + break; + + case 0x18: // rcurveline + if (sp < 8) return STBTT__CSERR("rcurveline stack"); + for (; i + 5 < sp - 2; i += 6) + stbtt__csctx_rccurve_to(c, s[i], s[i+1], s[i+2], s[i+3], s[i+4], s[i+5]); + if (i + 1 >= sp) return STBTT__CSERR("rcurveline stack"); + stbtt__csctx_rline_to(c, s[i], s[i+1]); + break; + + case 0x19: // rlinecurve + if (sp < 8) return STBTT__CSERR("rlinecurve stack"); + for (; i + 1 < sp - 6; i += 2) + stbtt__csctx_rline_to(c, s[i], s[i+1]); + if (i + 5 >= sp) return STBTT__CSERR("rlinecurve stack"); + stbtt__csctx_rccurve_to(c, s[i], s[i+1], s[i+2], s[i+3], s[i+4], s[i+5]); + break; + + case 0x1A: // vvcurveto + case 0x1B: // hhcurveto + if (sp < 4) return STBTT__CSERR("(vv|hh)curveto stack"); + f = 0.0; + if (sp & 1) { f = s[i]; i++; } + for (; i + 3 < sp; i += 4) { + if (b0 == 0x1B) + stbtt__csctx_rccurve_to(c, s[i], f, s[i+1], s[i+2], s[i+3], 0.0); + else + stbtt__csctx_rccurve_to(c, f, s[i], s[i+1], s[i+2], 0.0, s[i+3]); + f = 0.0; + } + break; + + case 0x0A: // callsubr + if (!has_subrs) { + if (info->fdselect.size) + subrs = stbtt__cid_get_glyph_subrs(info, glyph_index); + has_subrs = 1; + } + // fallthrough + case 0x1D: // callgsubr + if (sp < 1) return STBTT__CSERR("call(g|)subr stack"); + v = (int) s[--sp]; + if (subr_stack_height >= 10) return STBTT__CSERR("recursion limit"); + subr_stack[subr_stack_height++] = b; + b = stbtt__get_subr(b0 == 0x0A ? subrs : info->gsubrs, v); + if (b.size == 0) return STBTT__CSERR("subr not found"); + b.cursor = 0; + clear_stack = 0; + break; + + case 0x0B: // return + if (subr_stack_height <= 0) return STBTT__CSERR("return outside subr"); + b = subr_stack[--subr_stack_height]; + clear_stack = 0; + break; + + case 0x0E: // endchar + stbtt__csctx_close_shape(c); + return 1; + + case 0x0C: { // two-byte escape + float dx1, dx2, dx3, dx4, dx5, dx6, dy1, dy2, dy3, dy4, dy5, dy6; + float dx, dy; + int b1 = stbtt__buf_get8(&b); + switch (b1) { + // @TODO These "flex" implementations ignore the flex-depth and resolution, + // and always draw beziers. + case 0x22: // hflex + if (sp < 7) return STBTT__CSERR("hflex stack"); + dx1 = s[0]; + dx2 = s[1]; + dy2 = s[2]; + dx3 = s[3]; + dx4 = s[4]; + dx5 = s[5]; + dx6 = s[6]; + stbtt__csctx_rccurve_to(c, dx1, 0, dx2, dy2, dx3, 0); + stbtt__csctx_rccurve_to(c, dx4, 0, dx5, -dy2, dx6, 0); + break; + + case 0x23: // flex + if (sp < 13) return STBTT__CSERR("flex stack"); + dx1 = s[0]; + dy1 = s[1]; + dx2 = s[2]; + dy2 = s[3]; + dx3 = s[4]; + dy3 = s[5]; + dx4 = s[6]; + dy4 = s[7]; + dx5 = s[8]; + dy5 = s[9]; + dx6 = s[10]; + dy6 = s[11]; + //fd is s[12] + stbtt__csctx_rccurve_to(c, dx1, dy1, dx2, dy2, dx3, dy3); + stbtt__csctx_rccurve_to(c, dx4, dy4, dx5, dy5, dx6, dy6); + break; + + case 0x24: // hflex1 + if (sp < 9) return STBTT__CSERR("hflex1 stack"); + dx1 = s[0]; + dy1 = s[1]; + dx2 = s[2]; + dy2 = s[3]; + dx3 = s[4]; + dx4 = s[5]; + dx5 = s[6]; + dy5 = s[7]; + dx6 = s[8]; + stbtt__csctx_rccurve_to(c, dx1, dy1, dx2, dy2, dx3, 0); + stbtt__csctx_rccurve_to(c, dx4, 0, dx5, dy5, dx6, -(dy1+dy2+dy5)); + break; + + case 0x25: // flex1 + if (sp < 11) return STBTT__CSERR("flex1 stack"); + dx1 = s[0]; + dy1 = s[1]; + dx2 = s[2]; + dy2 = s[3]; + dx3 = s[4]; + dy3 = s[5]; + dx4 = s[6]; + dy4 = s[7]; + dx5 = s[8]; + dy5 = s[9]; + dx6 = dy6 = s[10]; + dx = dx1+dx2+dx3+dx4+dx5; + dy = dy1+dy2+dy3+dy4+dy5; + if (STBTT_fabs(dx) > STBTT_fabs(dy)) + dy6 = -dy; + else + dx6 = -dx; + stbtt__csctx_rccurve_to(c, dx1, dy1, dx2, dy2, dx3, dy3); + stbtt__csctx_rccurve_to(c, dx4, dy4, dx5, dy5, dx6, dy6); + break; + + default: + return STBTT__CSERR("unimplemented"); + } + } break; + + default: + if (b0 != 255 && b0 != 28 && (b0 < 32 || b0 > 254)) + return STBTT__CSERR("reserved operator"); + + // push immediate + if (b0 == 255) { + f = (float)(stbtt_int32)stbtt__buf_get32(&b) / 0x10000; + } else { + stbtt__buf_skip(&b, -1); + f = (float)(stbtt_int16)stbtt__cff_int(&b); + } + if (sp >= 48) return STBTT__CSERR("push stack overflow"); + s[sp++] = f; + clear_stack = 0; + break; + } + if (clear_stack) sp = 0; + } + return STBTT__CSERR("no endchar"); + +#undef STBTT__CSERR +} + +static int stbtt__GetGlyphShapeT2(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **pvertices) +{ + // runs the charstring twice, once to count and once to output (to avoid realloc) + stbtt__csctx count_ctx = STBTT__CSCTX_INIT(1); + stbtt__csctx output_ctx = STBTT__CSCTX_INIT(0); + if (stbtt__run_charstring(info, glyph_index, &count_ctx)) { + *pvertices = (stbtt_vertex*)STBTT_malloc(count_ctx.num_vertices*sizeof(stbtt_vertex), info->userdata); + output_ctx.pvertices = *pvertices; + if (stbtt__run_charstring(info, glyph_index, &output_ctx)) { + STBTT_assert(output_ctx.num_vertices == count_ctx.num_vertices); + return output_ctx.num_vertices; + } + } + *pvertices = NULL; + return 0; +} + +static int stbtt__GetGlyphInfoT2(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1) +{ + stbtt__csctx c = STBTT__CSCTX_INIT(1); + int r = stbtt__run_charstring(info, glyph_index, &c); + if (x0) *x0 = r ? c.min_x : 0; + if (y0) *y0 = r ? c.min_y : 0; + if (x1) *x1 = r ? c.max_x : 0; + if (y1) *y1 = r ? c.max_y : 0; + return r ? c.num_vertices : 0; +} + +STBTT_DEF int stbtt_GetGlyphShape(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **pvertices) +{ + if (!info->cff.size) + return stbtt__GetGlyphShapeTT(info, glyph_index, pvertices); + else + return stbtt__GetGlyphShapeT2(info, glyph_index, pvertices); +} + +STBTT_DEF void stbtt_GetGlyphHMetrics(const stbtt_fontinfo *info, int glyph_index, int *advanceWidth, int *leftSideBearing) +{ + stbtt_uint16 numOfLongHorMetrics = ttUSHORT(info->data+info->hhea + 34); + if (glyph_index < numOfLongHorMetrics) { + if (advanceWidth) *advanceWidth = ttSHORT(info->data + info->hmtx + 4*glyph_index); + if (leftSideBearing) *leftSideBearing = ttSHORT(info->data + info->hmtx + 4*glyph_index + 2); + } else { + if (advanceWidth) *advanceWidth = ttSHORT(info->data + info->hmtx + 4*(numOfLongHorMetrics-1)); + if (leftSideBearing) *leftSideBearing = ttSHORT(info->data + info->hmtx + 4*numOfLongHorMetrics + 2*(glyph_index - numOfLongHorMetrics)); + } +} + +STBTT_DEF int stbtt_GetKerningTableLength(const stbtt_fontinfo *info) +{ + stbtt_uint8 *data = info->data + info->kern; + + // we only look at the first table. it must be 'horizontal' and format 0. + if (!info->kern) + return 0; + if (ttUSHORT(data+2) < 1) // number of tables, need at least 1 + return 0; + if (ttUSHORT(data+8) != 1) // horizontal flag must be set in format + return 0; + + return ttUSHORT(data+10); +} + +STBTT_DEF int stbtt_GetKerningTable(const stbtt_fontinfo *info, stbtt_kerningentry* table, int table_length) +{ + stbtt_uint8 *data = info->data + info->kern; + int k, length; + + // we only look at the first table. it must be 'horizontal' and format 0. + if (!info->kern) + return 0; + if (ttUSHORT(data+2) < 1) // number of tables, need at least 1 + return 0; + if (ttUSHORT(data+8) != 1) // horizontal flag must be set in format + return 0; + + length = ttUSHORT(data+10); + if (table_length < length) + length = table_length; + + for (k = 0; k < length; k++) + { + table[k].glyph1 = ttUSHORT(data+18+(k*6)); + table[k].glyph2 = ttUSHORT(data+20+(k*6)); + table[k].advance = ttSHORT(data+22+(k*6)); + } + + return length; +} + +static int stbtt__GetGlyphKernInfoAdvance(const stbtt_fontinfo *info, int glyph1, int glyph2) +{ + stbtt_uint8 *data = info->data + info->kern; + stbtt_uint32 needle, straw; + int l, r, m; + + // we only look at the first table. it must be 'horizontal' and format 0. + if (!info->kern) + return 0; + if (ttUSHORT(data+2) < 1) // number of tables, need at least 1 + return 0; + if (ttUSHORT(data+8) != 1) // horizontal flag must be set in format + return 0; + + l = 0; + r = ttUSHORT(data+10) - 1; + needle = glyph1 << 16 | glyph2; + while (l <= r) { + m = (l + r) >> 1; + straw = ttULONG(data+18+(m*6)); // note: unaligned read + if (needle < straw) + r = m - 1; + else if (needle > straw) + l = m + 1; + else + return ttSHORT(data+22+(m*6)); + } + return 0; +} + +static stbtt_int32 stbtt__GetCoverageIndex(stbtt_uint8 *coverageTable, int glyph) +{ + stbtt_uint16 coverageFormat = ttUSHORT(coverageTable); + switch(coverageFormat) { + case 1: { + stbtt_uint16 glyphCount = ttUSHORT(coverageTable + 2); + + // Binary search. + stbtt_int32 l=0, r=glyphCount-1, m; + int straw, needle=glyph; + while (l <= r) { + stbtt_uint8 *glyphArray = coverageTable + 4; + stbtt_uint16 glyphID; + m = (l + r) >> 1; + glyphID = ttUSHORT(glyphArray + 2 * m); + straw = glyphID; + if (needle < straw) + r = m - 1; + else if (needle > straw) + l = m + 1; + else { + return m; + } + } + } break; + + case 2: { + stbtt_uint16 rangeCount = ttUSHORT(coverageTable + 2); + stbtt_uint8 *rangeArray = coverageTable + 4; + + // Binary search. + stbtt_int32 l=0, r=rangeCount-1, m; + int strawStart, strawEnd, needle=glyph; + while (l <= r) { + stbtt_uint8 *rangeRecord; + m = (l + r) >> 1; + rangeRecord = rangeArray + 6 * m; + strawStart = ttUSHORT(rangeRecord); + strawEnd = ttUSHORT(rangeRecord + 2); + if (needle < strawStart) + r = m - 1; + else if (needle > strawEnd) + l = m + 1; + else { + stbtt_uint16 startCoverageIndex = ttUSHORT(rangeRecord + 4); + return startCoverageIndex + glyph - strawStart; + } + } + } break; + + default: { + // There are no other cases. + STBTT_assert(0); + } break; + } + + return -1; +} + +static stbtt_int32 stbtt__GetGlyphClass(stbtt_uint8 *classDefTable, int glyph) +{ + stbtt_uint16 classDefFormat = ttUSHORT(classDefTable); + switch(classDefFormat) + { + case 1: { + stbtt_uint16 startGlyphID = ttUSHORT(classDefTable + 2); + stbtt_uint16 glyphCount = ttUSHORT(classDefTable + 4); + stbtt_uint8 *classDef1ValueArray = classDefTable + 6; + + if (glyph >= startGlyphID && glyph < startGlyphID + glyphCount) + return (stbtt_int32)ttUSHORT(classDef1ValueArray + 2 * (glyph - startGlyphID)); + + classDefTable = classDef1ValueArray + 2 * glyphCount; + } break; + + case 2: { + stbtt_uint16 classRangeCount = ttUSHORT(classDefTable + 2); + stbtt_uint8 *classRangeRecords = classDefTable + 4; + + // Binary search. + stbtt_int32 l=0, r=classRangeCount-1, m; + int strawStart, strawEnd, needle=glyph; + while (l <= r) { + stbtt_uint8 *classRangeRecord; + m = (l + r) >> 1; + classRangeRecord = classRangeRecords + 6 * m; + strawStart = ttUSHORT(classRangeRecord); + strawEnd = ttUSHORT(classRangeRecord + 2); + if (needle < strawStart) + r = m - 1; + else if (needle > strawEnd) + l = m + 1; + else + return (stbtt_int32)ttUSHORT(classRangeRecord + 4); + } + + classDefTable = classRangeRecords + 6 * classRangeCount; + } break; + + default: { + // There are no other cases. + STBTT_assert(0); + } break; + } + + return -1; +} + +// Define to STBTT_assert(x) if you want to break on unimplemented formats. +#define STBTT_GPOS_TODO_assert(x) + +static stbtt_int32 stbtt__GetGlyphGPOSInfoAdvance(const stbtt_fontinfo *info, int glyph1, int glyph2) +{ + stbtt_uint16 lookupListOffset; + stbtt_uint8 *lookupList; + stbtt_uint16 lookupCount; + stbtt_uint8 *data; + stbtt_int32 i; + + if (!info->gpos) return 0; + + data = info->data + info->gpos; + + if (ttUSHORT(data+0) != 1) return 0; // Major version 1 + if (ttUSHORT(data+2) != 0) return 0; // Minor version 0 + + lookupListOffset = ttUSHORT(data+8); + lookupList = data + lookupListOffset; + lookupCount = ttUSHORT(lookupList); + + for (i=0; i> 1; + pairValue = pairValueArray + (2 + valueRecordPairSizeInBytes) * m; + secondGlyph = ttUSHORT(pairValue); + straw = secondGlyph; + if (needle < straw) + r = m - 1; + else if (needle > straw) + l = m + 1; + else { + stbtt_int16 xAdvance = ttSHORT(pairValue + 2); + return xAdvance; + } + } + } break; + + case 2: { + stbtt_uint16 valueFormat1 = ttUSHORT(table + 4); + stbtt_uint16 valueFormat2 = ttUSHORT(table + 6); + + stbtt_uint16 classDef1Offset = ttUSHORT(table + 8); + stbtt_uint16 classDef2Offset = ttUSHORT(table + 10); + int glyph1class = stbtt__GetGlyphClass(table + classDef1Offset, glyph1); + int glyph2class = stbtt__GetGlyphClass(table + classDef2Offset, glyph2); + + stbtt_uint16 class1Count = ttUSHORT(table + 12); + stbtt_uint16 class2Count = ttUSHORT(table + 14); + STBTT_assert(glyph1class < class1Count); + STBTT_assert(glyph2class < class2Count); + + // TODO: Support more formats. + STBTT_GPOS_TODO_assert(valueFormat1 == 4); + if (valueFormat1 != 4) return 0; + STBTT_GPOS_TODO_assert(valueFormat2 == 0); + if (valueFormat2 != 0) return 0; + + if (glyph1class >= 0 && glyph1class < class1Count && glyph2class >= 0 && glyph2class < class2Count) { + stbtt_uint8 *class1Records = table + 16; + stbtt_uint8 *class2Records = class1Records + 2 * (glyph1class * class2Count); + stbtt_int16 xAdvance = ttSHORT(class2Records + 2 * glyph2class); + return xAdvance; + } + } break; + + default: { + // There are no other cases. + STBTT_assert(0); + break; + }; + } + } + break; + }; + + default: + // TODO: Implement other stuff. + break; + } + } + + return 0; +} + +STBTT_DEF int stbtt_GetGlyphKernAdvance(const stbtt_fontinfo *info, int g1, int g2) +{ + int xAdvance = 0; + + if (info->gpos) + xAdvance += stbtt__GetGlyphGPOSInfoAdvance(info, g1, g2); + else if (info->kern) + xAdvance += stbtt__GetGlyphKernInfoAdvance(info, g1, g2); + + return xAdvance; +} + +STBTT_DEF int stbtt_GetCodepointKernAdvance(const stbtt_fontinfo *info, int ch1, int ch2) +{ + if (!info->kern && !info->gpos) // if no kerning table, don't waste time looking up both codepoint->glyphs + return 0; + return stbtt_GetGlyphKernAdvance(info, stbtt_FindGlyphIndex(info,ch1), stbtt_FindGlyphIndex(info,ch2)); +} + +STBTT_DEF void stbtt_GetCodepointHMetrics(const stbtt_fontinfo *info, int codepoint, int *advanceWidth, int *leftSideBearing) +{ + stbtt_GetGlyphHMetrics(info, stbtt_FindGlyphIndex(info,codepoint), advanceWidth, leftSideBearing); +} + +STBTT_DEF void stbtt_GetFontVMetrics(const stbtt_fontinfo *info, int *ascent, int *descent, int *lineGap) +{ + if (ascent ) *ascent = ttSHORT(info->data+info->hhea + 4); + if (descent) *descent = ttSHORT(info->data+info->hhea + 6); + if (lineGap) *lineGap = ttSHORT(info->data+info->hhea + 8); +} + +STBTT_DEF int stbtt_GetFontVMetricsOS2(const stbtt_fontinfo *info, int *typoAscent, int *typoDescent, int *typoLineGap) +{ + int tab = stbtt__find_table(info->data, info->fontstart, "OS/2"); + if (!tab) + return 0; + if (typoAscent ) *typoAscent = ttSHORT(info->data+tab + 68); + if (typoDescent) *typoDescent = ttSHORT(info->data+tab + 70); + if (typoLineGap) *typoLineGap = ttSHORT(info->data+tab + 72); + return 1; +} + +STBTT_DEF void stbtt_GetFontBoundingBox(const stbtt_fontinfo *info, int *x0, int *y0, int *x1, int *y1) +{ + *x0 = ttSHORT(info->data + info->head + 36); + *y0 = ttSHORT(info->data + info->head + 38); + *x1 = ttSHORT(info->data + info->head + 40); + *y1 = ttSHORT(info->data + info->head + 42); +} + +STBTT_DEF float stbtt_ScaleForPixelHeight(const stbtt_fontinfo *info, float height) +{ + int fheight = ttSHORT(info->data + info->hhea + 4) - ttSHORT(info->data + info->hhea + 6); + return (float) height / fheight; +} + +STBTT_DEF float stbtt_ScaleForMappingEmToPixels(const stbtt_fontinfo *info, float pixels) +{ + int unitsPerEm = ttUSHORT(info->data + info->head + 18); + return pixels / unitsPerEm; +} + +STBTT_DEF void stbtt_FreeShape(const stbtt_fontinfo *info, stbtt_vertex *v) +{ + STBTT_free(v, info->userdata); +} + +STBTT_DEF stbtt_uint8 *stbtt_FindSVGDoc(const stbtt_fontinfo *info, int gl) +{ + int i; + stbtt_uint8 *data = info->data; + stbtt_uint8 *svg_doc_list = data + stbtt__get_svg((stbtt_fontinfo *) info); + + int numEntries = ttUSHORT(svg_doc_list); + stbtt_uint8 *svg_docs = svg_doc_list + 2; + + for(i=0; i= ttUSHORT(svg_doc)) && (gl <= ttUSHORT(svg_doc + 2))) + return svg_doc; + } + return 0; +} + +STBTT_DEF int stbtt_GetGlyphSVG(const stbtt_fontinfo *info, int gl, const char **svg) +{ + stbtt_uint8 *data = info->data; + stbtt_uint8 *svg_doc; + + if (info->svg == 0) + return 0; + + svg_doc = stbtt_FindSVGDoc(info, gl); + if (svg_doc != NULL) { + *svg = (char *) data + info->svg + ttULONG(svg_doc + 4); + return ttULONG(svg_doc + 8); + } else { + return 0; + } +} + +STBTT_DEF int stbtt_GetCodepointSVG(const stbtt_fontinfo *info, int unicode_codepoint, const char **svg) +{ + return stbtt_GetGlyphSVG(info, stbtt_FindGlyphIndex(info, unicode_codepoint), svg); +} + +////////////////////////////////////////////////////////////////////////////// +// +// antialiasing software rasterizer +// + +STBTT_DEF void stbtt_GetGlyphBitmapBoxSubpixel(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y,float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1) +{ + int x0=0,y0=0,x1,y1; // =0 suppresses compiler warning + if (!stbtt_GetGlyphBox(font, glyph, &x0,&y0,&x1,&y1)) { + // e.g. space character + if (ix0) *ix0 = 0; + if (iy0) *iy0 = 0; + if (ix1) *ix1 = 0; + if (iy1) *iy1 = 0; + } else { + // move to integral bboxes (treating pixels as little squares, what pixels get touched)? + if (ix0) *ix0 = STBTT_ifloor( x0 * scale_x + shift_x); + if (iy0) *iy0 = STBTT_ifloor(-y1 * scale_y + shift_y); + if (ix1) *ix1 = STBTT_iceil ( x1 * scale_x + shift_x); + if (iy1) *iy1 = STBTT_iceil (-y0 * scale_y + shift_y); + } +} + +STBTT_DEF void stbtt_GetGlyphBitmapBox(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1) +{ + stbtt_GetGlyphBitmapBoxSubpixel(font, glyph, scale_x, scale_y,0.0f,0.0f, ix0, iy0, ix1, iy1); +} + +STBTT_DEF void stbtt_GetCodepointBitmapBoxSubpixel(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1) +{ + stbtt_GetGlyphBitmapBoxSubpixel(font, stbtt_FindGlyphIndex(font,codepoint), scale_x, scale_y,shift_x,shift_y, ix0,iy0,ix1,iy1); +} + +STBTT_DEF void stbtt_GetCodepointBitmapBox(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1) +{ + stbtt_GetCodepointBitmapBoxSubpixel(font, codepoint, scale_x, scale_y,0.0f,0.0f, ix0,iy0,ix1,iy1); +} + +////////////////////////////////////////////////////////////////////////////// +// +// Rasterizer + +typedef struct stbtt__hheap_chunk +{ + struct stbtt__hheap_chunk *next; +} stbtt__hheap_chunk; + +typedef struct stbtt__hheap +{ + struct stbtt__hheap_chunk *head; + void *first_free; + int num_remaining_in_head_chunk; +} stbtt__hheap; + +static void *stbtt__hheap_alloc(stbtt__hheap *hh, size_t size, void *userdata) +{ + if (hh->first_free) { + void *p = hh->first_free; + hh->first_free = * (void **) p; + return p; + } else { + if (hh->num_remaining_in_head_chunk == 0) { + int count = (size < 32 ? 2000 : size < 128 ? 800 : 100); + stbtt__hheap_chunk *c = (stbtt__hheap_chunk *) STBTT_malloc(sizeof(stbtt__hheap_chunk) + size * count, userdata); + if (c == NULL) + return NULL; + c->next = hh->head; + hh->head = c; + hh->num_remaining_in_head_chunk = count; + } + --hh->num_remaining_in_head_chunk; + return (char *) (hh->head) + sizeof(stbtt__hheap_chunk) + size * hh->num_remaining_in_head_chunk; + } +} + +static void stbtt__hheap_free(stbtt__hheap *hh, void *p) +{ + *(void **) p = hh->first_free; + hh->first_free = p; +} + +static void stbtt__hheap_cleanup(stbtt__hheap *hh, void *userdata) +{ + stbtt__hheap_chunk *c = hh->head; + while (c) { + stbtt__hheap_chunk *n = c->next; + STBTT_free(c, userdata); + c = n; + } +} + +typedef struct stbtt__edge { + float x0,y0, x1,y1; + int invert; +} stbtt__edge; + + +typedef struct stbtt__active_edge +{ + struct stbtt__active_edge *next; + #if STBTT_RASTERIZER_VERSION==1 + int x,dx; + float ey; + int direction; + #elif STBTT_RASTERIZER_VERSION==2 + float fx,fdx,fdy; + float direction; + float sy; + float ey; + #else + #error "Unrecognized value of STBTT_RASTERIZER_VERSION" + #endif +} stbtt__active_edge; + +#if STBTT_RASTERIZER_VERSION == 1 +#define STBTT_FIXSHIFT 10 +#define STBTT_FIX (1 << STBTT_FIXSHIFT) +#define STBTT_FIXMASK (STBTT_FIX-1) + +static stbtt__active_edge *stbtt__new_active(stbtt__hheap *hh, stbtt__edge *e, int off_x, float start_point, void *userdata) +{ + stbtt__active_edge *z = (stbtt__active_edge *) stbtt__hheap_alloc(hh, sizeof(*z), userdata); + float dxdy = (e->x1 - e->x0) / (e->y1 - e->y0); + STBTT_assert(z != NULL); + if (!z) return z; + + // round dx down to avoid overshooting + if (dxdy < 0) + z->dx = -STBTT_ifloor(STBTT_FIX * -dxdy); + else + z->dx = STBTT_ifloor(STBTT_FIX * dxdy); + + z->x = STBTT_ifloor(STBTT_FIX * e->x0 + z->dx * (start_point - e->y0)); // use z->dx so when we offset later it's by the same amount + z->x -= off_x * STBTT_FIX; + + z->ey = e->y1; + z->next = 0; + z->direction = e->invert ? 1 : -1; + return z; +} +#elif STBTT_RASTERIZER_VERSION == 2 +static stbtt__active_edge *stbtt__new_active(stbtt__hheap *hh, stbtt__edge *e, int off_x, float start_point, void *userdata) +{ + stbtt__active_edge *z = (stbtt__active_edge *) stbtt__hheap_alloc(hh, sizeof(*z), userdata); + float dxdy = (e->x1 - e->x0) / (e->y1 - e->y0); + STBTT_assert(z != NULL); + //STBTT_assert(e->y0 <= start_point); + if (!z) return z; + z->fdx = dxdy; + z->fdy = dxdy != 0.0f ? (1.0f/dxdy) : 0.0f; + z->fx = e->x0 + dxdy * (start_point - e->y0); + z->fx -= off_x; + z->direction = e->invert ? 1.0f : -1.0f; + z->sy = e->y0; + z->ey = e->y1; + z->next = 0; + return z; +} +#else +#error "Unrecognized value of STBTT_RASTERIZER_VERSION" +#endif + +#if STBTT_RASTERIZER_VERSION == 1 +// note: this routine clips fills that extend off the edges... ideally this +// wouldn't happen, but it could happen if the truetype glyph bounding boxes +// are wrong, or if the user supplies a too-small bitmap +static void stbtt__fill_active_edges(unsigned char *scanline, int len, stbtt__active_edge *e, int max_weight) +{ + // non-zero winding fill + int x0=0, w=0; + + while (e) { + if (w == 0) { + // if we're currently at zero, we need to record the edge start point + x0 = e->x; w += e->direction; + } else { + int x1 = e->x; w += e->direction; + // if we went to zero, we need to draw + if (w == 0) { + int i = x0 >> STBTT_FIXSHIFT; + int j = x1 >> STBTT_FIXSHIFT; + + if (i < len && j >= 0) { + if (i == j) { + // x0,x1 are the same pixel, so compute combined coverage + scanline[i] = scanline[i] + (stbtt_uint8) ((x1 - x0) * max_weight >> STBTT_FIXSHIFT); + } else { + if (i >= 0) // add antialiasing for x0 + scanline[i] = scanline[i] + (stbtt_uint8) (((STBTT_FIX - (x0 & STBTT_FIXMASK)) * max_weight) >> STBTT_FIXSHIFT); + else + i = -1; // clip + + if (j < len) // add antialiasing for x1 + scanline[j] = scanline[j] + (stbtt_uint8) (((x1 & STBTT_FIXMASK) * max_weight) >> STBTT_FIXSHIFT); + else + j = len; // clip + + for (++i; i < j; ++i) // fill pixels between x0 and x1 + scanline[i] = scanline[i] + (stbtt_uint8) max_weight; + } + } + } + } + + e = e->next; + } +} + +static void stbtt__rasterize_sorted_edges(stbtt__bitmap *result, stbtt__edge *e, int n, int vsubsample, int off_x, int off_y, void *userdata) +{ + stbtt__hheap hh = { 0, 0, 0 }; + stbtt__active_edge *active = NULL; + int y,j=0; + int max_weight = (255 / vsubsample); // weight per vertical scanline + int s; // vertical subsample index + unsigned char scanline_data[512], *scanline; + + if (result->w > 512) + scanline = (unsigned char *) STBTT_malloc(result->w, userdata); + else + scanline = scanline_data; + + y = off_y * vsubsample; + e[n].y0 = (off_y + result->h) * (float) vsubsample + 1; + + while (j < result->h) { + STBTT_memset(scanline, 0, result->w); + for (s=0; s < vsubsample; ++s) { + // find center of pixel for this scanline + float scan_y = y + 0.5f; + stbtt__active_edge **step = &active; + + // update all active edges; + // remove all active edges that terminate before the center of this scanline + while (*step) { + stbtt__active_edge * z = *step; + if (z->ey <= scan_y) { + *step = z->next; // delete from list + STBTT_assert(z->direction); + z->direction = 0; + stbtt__hheap_free(&hh, z); + } else { + z->x += z->dx; // advance to position for current scanline + step = &((*step)->next); // advance through list + } + } + + // resort the list if needed + for(;;) { + int changed=0; + step = &active; + while (*step && (*step)->next) { + if ((*step)->x > (*step)->next->x) { + stbtt__active_edge *t = *step; + stbtt__active_edge *q = t->next; + + t->next = q->next; + q->next = t; + *step = q; + changed = 1; + } + step = &(*step)->next; + } + if (!changed) break; + } + + // insert all edges that start before the center of this scanline -- omit ones that also end on this scanline + while (e->y0 <= scan_y) { + if (e->y1 > scan_y) { + stbtt__active_edge *z = stbtt__new_active(&hh, e, off_x, scan_y, userdata); + if (z != NULL) { + // find insertion point + if (active == NULL) + active = z; + else if (z->x < active->x) { + // insert at front + z->next = active; + active = z; + } else { + // find thing to insert AFTER + stbtt__active_edge *p = active; + while (p->next && p->next->x < z->x) + p = p->next; + // at this point, p->next->x is NOT < z->x + z->next = p->next; + p->next = z; + } + } + } + ++e; + } + + // now process all active edges in XOR fashion + if (active) + stbtt__fill_active_edges(scanline, result->w, active, max_weight); + + ++y; + } + STBTT_memcpy(result->pixels + j * result->stride, scanline, result->w); + ++j; + } + + stbtt__hheap_cleanup(&hh, userdata); + + if (scanline != scanline_data) + STBTT_free(scanline, userdata); +} + +#elif STBTT_RASTERIZER_VERSION == 2 + +// the edge passed in here does not cross the vertical line at x or the vertical line at x+1 +// (i.e. it has already been clipped to those) +static void stbtt__handle_clipped_edge(float *scanline, int x, stbtt__active_edge *e, float x0, float y0, float x1, float y1) +{ + if (y0 == y1) return; + STBTT_assert(y0 < y1); + STBTT_assert(e->sy <= e->ey); + if (y0 > e->ey) return; + if (y1 < e->sy) return; + if (y0 < e->sy) { + x0 += (x1-x0) * (e->sy - y0) / (y1-y0); + y0 = e->sy; + } + if (y1 > e->ey) { + x1 += (x1-x0) * (e->ey - y1) / (y1-y0); + y1 = e->ey; + } + + if (x0 == x) + STBTT_assert(x1 <= x+1); + else if (x0 == x+1) + STBTT_assert(x1 >= x); + else if (x0 <= x) + STBTT_assert(x1 <= x); + else if (x0 >= x+1) + STBTT_assert(x1 >= x+1); + else + STBTT_assert(x1 >= x && x1 <= x+1); + + if (x0 <= x && x1 <= x) + scanline[x] += e->direction * (y1-y0); + else if (x0 >= x+1 && x1 >= x+1) + ; + else { + STBTT_assert(x0 >= x && x0 <= x+1 && x1 >= x && x1 <= x+1); + scanline[x] += e->direction * (y1-y0) * (1-((x0-x)+(x1-x))/2); // coverage = 1 - average x position + } +} + +static void stbtt__fill_active_edges_new(float *scanline, float *scanline_fill, int len, stbtt__active_edge *e, float y_top) +{ + float y_bottom = y_top+1; + + while (e) { + // brute force every pixel + + // compute intersection points with top & bottom + STBTT_assert(e->ey >= y_top); + + if (e->fdx == 0) { + float x0 = e->fx; + if (x0 < len) { + if (x0 >= 0) { + stbtt__handle_clipped_edge(scanline,(int) x0,e, x0,y_top, x0,y_bottom); + stbtt__handle_clipped_edge(scanline_fill-1,(int) x0+1,e, x0,y_top, x0,y_bottom); + } else { + stbtt__handle_clipped_edge(scanline_fill-1,0,e, x0,y_top, x0,y_bottom); + } + } + } else { + float x0 = e->fx; + float dx = e->fdx; + float xb = x0 + dx; + float x_top, x_bottom; + float sy0,sy1; + float dy = e->fdy; + STBTT_assert(e->sy <= y_bottom && e->ey >= y_top); + + // compute endpoints of line segment clipped to this scanline (if the + // line segment starts on this scanline. x0 is the intersection of the + // line with y_top, but that may be off the line segment. + if (e->sy > y_top) { + x_top = x0 + dx * (e->sy - y_top); + sy0 = e->sy; + } else { + x_top = x0; + sy0 = y_top; + } + if (e->ey < y_bottom) { + x_bottom = x0 + dx * (e->ey - y_top); + sy1 = e->ey; + } else { + x_bottom = xb; + sy1 = y_bottom; + } + + if (x_top >= 0 && x_bottom >= 0 && x_top < len && x_bottom < len) { + // from here on, we don't have to range check x values + + if ((int) x_top == (int) x_bottom) { + float height; + // simple case, only spans one pixel + int x = (int) x_top; + height = sy1 - sy0; + STBTT_assert(x >= 0 && x < len); + scanline[x] += e->direction * (1-((x_top - x) + (x_bottom-x))/2) * height; + scanline_fill[x] += e->direction * height; // everything right of this pixel is filled + } else { + int x,x1,x2; + float y_crossing, step, sign, area; + // covers 2+ pixels + if (x_top > x_bottom) { + // flip scanline vertically; signed area is the same + float t; + sy0 = y_bottom - (sy0 - y_top); + sy1 = y_bottom - (sy1 - y_top); + t = sy0, sy0 = sy1, sy1 = t; + t = x_bottom, x_bottom = x_top, x_top = t; + dx = -dx; + dy = -dy; + t = x0, x0 = xb, xb = t; + } + + x1 = (int) x_top; + x2 = (int) x_bottom; + // compute intersection with y axis at x1+1 + y_crossing = (x1+1 - x0) * dy + y_top; + + sign = e->direction; + // area of the rectangle covered from y0..y_crossing + area = sign * (y_crossing-sy0); + // area of the triangle (x_top,y0), (x+1,y0), (x+1,y_crossing) + scanline[x1] += area * (1-((x_top - x1)+(x1+1-x1))/2); + + step = sign * dy; + for (x = x1+1; x < x2; ++x) { + scanline[x] += area + step/2; + area += step; + } + y_crossing += dy * (x2 - (x1+1)); + + STBTT_assert(STBTT_fabs(area) <= 1.01f); + + scanline[x2] += area + sign * (1-((x2-x2)+(x_bottom-x2))/2) * (sy1-y_crossing); + + scanline_fill[x2] += sign * (sy1-sy0); + } + } else { + // if edge goes outside of box we're drawing, we require + // clipping logic. since this does not match the intended use + // of this library, we use a different, very slow brute + // force implementation + int x; + for (x=0; x < len; ++x) { + // cases: + // + // there can be up to two intersections with the pixel. any intersection + // with left or right edges can be handled by splitting into two (or three) + // regions. intersections with top & bottom do not necessitate case-wise logic. + // + // the old way of doing this found the intersections with the left & right edges, + // then used some simple logic to produce up to three segments in sorted order + // from top-to-bottom. however, this had a problem: if an x edge was epsilon + // across the x border, then the corresponding y position might not be distinct + // from the other y segment, and it might ignored as an empty segment. to avoid + // that, we need to explicitly produce segments based on x positions. + + // rename variables to clearly-defined pairs + float y0 = y_top; + float x1 = (float) (x); + float x2 = (float) (x+1); + float x3 = xb; + float y3 = y_bottom; + + // x = e->x + e->dx * (y-y_top) + // (y-y_top) = (x - e->x) / e->dx + // y = (x - e->x) / e->dx + y_top + float y1 = (x - x0) / dx + y_top; + float y2 = (x+1 - x0) / dx + y_top; + + if (x0 < x1 && x3 > x2) { // three segments descending down-right + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x1,y1); + stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x2,y2); + stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3); + } else if (x3 < x1 && x0 > x2) { // three segments descending down-left + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x2,y2); + stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x1,y1); + stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x3,y3); + } else if (x0 < x1 && x3 > x1) { // two segments across x, down-right + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x1,y1); + stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x3,y3); + } else if (x3 < x1 && x0 > x1) { // two segments across x, down-left + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x1,y1); + stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x3,y3); + } else if (x0 < x2 && x3 > x2) { // two segments across x+1, down-right + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x2,y2); + stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3); + } else if (x3 < x2 && x0 > x2) { // two segments across x+1, down-left + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x2,y2); + stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3); + } else { // one segment + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x3,y3); + } + } + } + } + e = e->next; + } +} + +// directly AA rasterize edges w/o supersampling +static void stbtt__rasterize_sorted_edges(stbtt__bitmap *result, stbtt__edge *e, int n, int vsubsample, int off_x, int off_y, void *userdata) +{ + stbtt__hheap hh = { 0, 0, 0 }; + stbtt__active_edge *active = NULL; + int y,j=0, i; + float scanline_data[129], *scanline, *scanline2; + + STBTT__NOTUSED(vsubsample); + + if (result->w > 64) + scanline = (float *) STBTT_malloc((result->w*2+1) * sizeof(float), userdata); + else + scanline = scanline_data; + + scanline2 = scanline + result->w; + + y = off_y; + e[n].y0 = (float) (off_y + result->h) + 1; + + while (j < result->h) { + // find center of pixel for this scanline + float scan_y_top = y + 0.0f; + float scan_y_bottom = y + 1.0f; + stbtt__active_edge **step = &active; + + STBTT_memset(scanline , 0, result->w*sizeof(scanline[0])); + STBTT_memset(scanline2, 0, (result->w+1)*sizeof(scanline[0])); + + // update all active edges; + // remove all active edges that terminate before the top of this scanline + while (*step) { + stbtt__active_edge * z = *step; + if (z->ey <= scan_y_top) { + *step = z->next; // delete from list + STBTT_assert(z->direction); + z->direction = 0; + stbtt__hheap_free(&hh, z); + } else { + step = &((*step)->next); // advance through list + } + } + + // insert all edges that start before the bottom of this scanline + while (e->y0 <= scan_y_bottom) { + if (e->y0 != e->y1) { + stbtt__active_edge *z = stbtt__new_active(&hh, e, off_x, scan_y_top, userdata); + if (z != NULL) { + if (j == 0 && off_y != 0) { + if (z->ey < scan_y_top) { + // this can happen due to subpixel positioning and some kind of fp rounding error i think + z->ey = scan_y_top; + } + } + STBTT_assert(z->ey >= scan_y_top); // if we get really unlucky a tiny bit of an edge can be out of bounds + // insert at front + z->next = active; + active = z; + } + } + ++e; + } + + // now process all active edges + if (active) + stbtt__fill_active_edges_new(scanline, scanline2+1, result->w, active, scan_y_top); + + { + float sum = 0; + for (i=0; i < result->w; ++i) { + float k; + int m; + sum += scanline2[i]; + k = scanline[i] + sum; + k = (float) STBTT_fabs(k)*255 + 0.5f; + m = (int) k; + if (m > 255) m = 255; + result->pixels[j*result->stride + i] = (unsigned char) m; + } + } + // advance all the edges + step = &active; + while (*step) { + stbtt__active_edge *z = *step; + z->fx += z->fdx; // advance to position for current scanline + step = &((*step)->next); // advance through list + } + + ++y; + ++j; + } + + stbtt__hheap_cleanup(&hh, userdata); + + if (scanline != scanline_data) + STBTT_free(scanline, userdata); +} +#else +#error "Unrecognized value of STBTT_RASTERIZER_VERSION" +#endif + +#define STBTT__COMPARE(a,b) ((a)->y0 < (b)->y0) + +static void stbtt__sort_edges_ins_sort(stbtt__edge *p, int n) +{ + int i,j; + for (i=1; i < n; ++i) { + stbtt__edge t = p[i], *a = &t; + j = i; + while (j > 0) { + stbtt__edge *b = &p[j-1]; + int c = STBTT__COMPARE(a,b); + if (!c) break; + p[j] = p[j-1]; + --j; + } + if (i != j) + p[j] = t; + } +} + +static void stbtt__sort_edges_quicksort(stbtt__edge *p, int n) +{ + /* threshold for transitioning to insertion sort */ + while (n > 12) { + stbtt__edge t; + int c01,c12,c,m,i,j; + + /* compute median of three */ + m = n >> 1; + c01 = STBTT__COMPARE(&p[0],&p[m]); + c12 = STBTT__COMPARE(&p[m],&p[n-1]); + /* if 0 >= mid >= end, or 0 < mid < end, then use mid */ + if (c01 != c12) { + /* otherwise, we'll need to swap something else to middle */ + int z; + c = STBTT__COMPARE(&p[0],&p[n-1]); + /* 0>mid && midn => n; 0 0 */ + /* 0n: 0>n => 0; 0 n */ + z = (c == c12) ? 0 : n-1; + t = p[z]; + p[z] = p[m]; + p[m] = t; + } + /* now p[m] is the median-of-three */ + /* swap it to the beginning so it won't move around */ + t = p[0]; + p[0] = p[m]; + p[m] = t; + + /* partition loop */ + i=1; + j=n-1; + for(;;) { + /* handling of equality is crucial here */ + /* for sentinels & efficiency with duplicates */ + for (;;++i) { + if (!STBTT__COMPARE(&p[i], &p[0])) break; + } + for (;;--j) { + if (!STBTT__COMPARE(&p[0], &p[j])) break; + } + /* make sure we haven't crossed */ + if (i >= j) break; + t = p[i]; + p[i] = p[j]; + p[j] = t; + + ++i; + --j; + } + /* recurse on smaller side, iterate on larger */ + if (j < (n-i)) { + stbtt__sort_edges_quicksort(p,j); + p = p+i; + n = n-i; + } else { + stbtt__sort_edges_quicksort(p+i, n-i); + n = j; + } + } +} + +static void stbtt__sort_edges(stbtt__edge *p, int n) +{ + stbtt__sort_edges_quicksort(p, n); + stbtt__sort_edges_ins_sort(p, n); +} + +typedef struct +{ + float x,y; +} stbtt__point; + +static void stbtt__rasterize(stbtt__bitmap *result, stbtt__point *pts, int *wcount, int windings, float scale_x, float scale_y, float shift_x, float shift_y, int off_x, int off_y, int invert, void *userdata) +{ + float y_scale_inv = invert ? -scale_y : scale_y; + stbtt__edge *e; + int n,i,j,k,m; +#if STBTT_RASTERIZER_VERSION == 1 + int vsubsample = result->h < 8 ? 15 : 5; +#elif STBTT_RASTERIZER_VERSION == 2 + int vsubsample = 1; +#else + #error "Unrecognized value of STBTT_RASTERIZER_VERSION" +#endif + // vsubsample should divide 255 evenly; otherwise we won't reach full opacity + + // now we have to blow out the windings into explicit edge lists + n = 0; + for (i=0; i < windings; ++i) + n += wcount[i]; + + e = (stbtt__edge *) STBTT_malloc(sizeof(*e) * (n+1), userdata); // add an extra one as a sentinel + if (e == 0) return; + n = 0; + + m=0; + for (i=0; i < windings; ++i) { + stbtt__point *p = pts + m; + m += wcount[i]; + j = wcount[i]-1; + for (k=0; k < wcount[i]; j=k++) { + int a=k,b=j; + // skip the edge if horizontal + if (p[j].y == p[k].y) + continue; + // add edge from j to k to the list + e[n].invert = 0; + if (invert ? p[j].y > p[k].y : p[j].y < p[k].y) { + e[n].invert = 1; + a=j,b=k; + } + e[n].x0 = p[a].x * scale_x + shift_x; + e[n].y0 = (p[a].y * y_scale_inv + shift_y) * vsubsample; + e[n].x1 = p[b].x * scale_x + shift_x; + e[n].y1 = (p[b].y * y_scale_inv + shift_y) * vsubsample; + ++n; + } + } + + // now sort the edges by their highest point (should snap to integer, and then by x) + //STBTT_sort(e, n, sizeof(e[0]), stbtt__edge_compare); + stbtt__sort_edges(e, n); + + // now, traverse the scanlines and find the intersections on each scanline, use xor winding rule + stbtt__rasterize_sorted_edges(result, e, n, vsubsample, off_x, off_y, userdata); + + STBTT_free(e, userdata); +} + +static void stbtt__add_point(stbtt__point *points, int n, float x, float y) +{ + if (!points) return; // during first pass, it's unallocated + points[n].x = x; + points[n].y = y; +} + +// tessellate until threshold p is happy... @TODO warped to compensate for non-linear stretching +static int stbtt__tesselate_curve(stbtt__point *points, int *num_points, float x0, float y0, float x1, float y1, float x2, float y2, float objspace_flatness_squared, int n) +{ + // midpoint + float mx = (x0 + 2*x1 + x2)/4; + float my = (y0 + 2*y1 + y2)/4; + // versus directly drawn line + float dx = (x0+x2)/2 - mx; + float dy = (y0+y2)/2 - my; + if (n > 16) // 65536 segments on one curve better be enough! + return 1; + if (dx*dx+dy*dy > objspace_flatness_squared) { // half-pixel error allowed... need to be smaller if AA + stbtt__tesselate_curve(points, num_points, x0,y0, (x0+x1)/2.0f,(y0+y1)/2.0f, mx,my, objspace_flatness_squared,n+1); + stbtt__tesselate_curve(points, num_points, mx,my, (x1+x2)/2.0f,(y1+y2)/2.0f, x2,y2, objspace_flatness_squared,n+1); + } else { + stbtt__add_point(points, *num_points,x2,y2); + *num_points = *num_points+1; + } + return 1; +} + +static void stbtt__tesselate_cubic(stbtt__point *points, int *num_points, float x0, float y0, float x1, float y1, float x2, float y2, float x3, float y3, float objspace_flatness_squared, int n) +{ + // @TODO this "flatness" calculation is just made-up nonsense that seems to work well enough + float dx0 = x1-x0; + float dy0 = y1-y0; + float dx1 = x2-x1; + float dy1 = y2-y1; + float dx2 = x3-x2; + float dy2 = y3-y2; + float dx = x3-x0; + float dy = y3-y0; + float longlen = (float) (STBTT_sqrt(dx0*dx0+dy0*dy0)+STBTT_sqrt(dx1*dx1+dy1*dy1)+STBTT_sqrt(dx2*dx2+dy2*dy2)); + float shortlen = (float) STBTT_sqrt(dx*dx+dy*dy); + float flatness_squared = longlen*longlen-shortlen*shortlen; + + if (n > 16) // 65536 segments on one curve better be enough! + return; + + if (flatness_squared > objspace_flatness_squared) { + float x01 = (x0+x1)/2; + float y01 = (y0+y1)/2; + float x12 = (x1+x2)/2; + float y12 = (y1+y2)/2; + float x23 = (x2+x3)/2; + float y23 = (y2+y3)/2; + + float xa = (x01+x12)/2; + float ya = (y01+y12)/2; + float xb = (x12+x23)/2; + float yb = (y12+y23)/2; + + float mx = (xa+xb)/2; + float my = (ya+yb)/2; + + stbtt__tesselate_cubic(points, num_points, x0,y0, x01,y01, xa,ya, mx,my, objspace_flatness_squared,n+1); + stbtt__tesselate_cubic(points, num_points, mx,my, xb,yb, x23,y23, x3,y3, objspace_flatness_squared,n+1); + } else { + stbtt__add_point(points, *num_points,x3,y3); + *num_points = *num_points+1; + } +} + +// returns number of contours +static stbtt__point *stbtt_FlattenCurves(stbtt_vertex *vertices, int num_verts, float objspace_flatness, int **contour_lengths, int *num_contours, void *userdata) +{ + stbtt__point *points=0; + int num_points=0; + + float objspace_flatness_squared = objspace_flatness * objspace_flatness; + int i,n=0,start=0, pass; + + // count how many "moves" there are to get the contour count + for (i=0; i < num_verts; ++i) + if (vertices[i].type == STBTT_vmove) + ++n; + + *num_contours = n; + if (n == 0) return 0; + + *contour_lengths = (int *) STBTT_malloc(sizeof(**contour_lengths) * n, userdata); + + if (*contour_lengths == 0) { + *num_contours = 0; + return 0; + } + + // make two passes through the points so we don't need to realloc + for (pass=0; pass < 2; ++pass) { + float x=0,y=0; + if (pass == 1) { + points = (stbtt__point *) STBTT_malloc(num_points * sizeof(points[0]), userdata); + if (points == NULL) goto error; + } + num_points = 0; + n= -1; + for (i=0; i < num_verts; ++i) { + switch (vertices[i].type) { + case STBTT_vmove: + // start the next contour + if (n >= 0) + (*contour_lengths)[n] = num_points - start; + ++n; + start = num_points; + + x = vertices[i].x, y = vertices[i].y; + stbtt__add_point(points, num_points++, x,y); + break; + case STBTT_vline: + x = vertices[i].x, y = vertices[i].y; + stbtt__add_point(points, num_points++, x, y); + break; + case STBTT_vcurve: + stbtt__tesselate_curve(points, &num_points, x,y, + vertices[i].cx, vertices[i].cy, + vertices[i].x, vertices[i].y, + objspace_flatness_squared, 0); + x = vertices[i].x, y = vertices[i].y; + break; + case STBTT_vcubic: + stbtt__tesselate_cubic(points, &num_points, x,y, + vertices[i].cx, vertices[i].cy, + vertices[i].cx1, vertices[i].cy1, + vertices[i].x, vertices[i].y, + objspace_flatness_squared, 0); + x = vertices[i].x, y = vertices[i].y; + break; + } + } + (*contour_lengths)[n] = num_points - start; + } + + return points; +error: + STBTT_free(points, userdata); + STBTT_free(*contour_lengths, userdata); + *contour_lengths = 0; + *num_contours = 0; + return NULL; +} + +STBTT_DEF void stbtt_Rasterize(stbtt__bitmap *result, float flatness_in_pixels, stbtt_vertex *vertices, int num_verts, float scale_x, float scale_y, float shift_x, float shift_y, int x_off, int y_off, int invert, void *userdata) +{ + float scale = scale_x > scale_y ? scale_y : scale_x; + int winding_count = 0; + int *winding_lengths = NULL; + stbtt__point *windings = stbtt_FlattenCurves(vertices, num_verts, flatness_in_pixels / scale, &winding_lengths, &winding_count, userdata); + if (windings) { + stbtt__rasterize(result, windings, winding_lengths, winding_count, scale_x, scale_y, shift_x, shift_y, x_off, y_off, invert, userdata); + STBTT_free(winding_lengths, userdata); + STBTT_free(windings, userdata); + } +} + +STBTT_DEF void stbtt_FreeBitmap(unsigned char *bitmap, void *userdata) +{ + STBTT_free(bitmap, userdata); +} + +STBTT_DEF unsigned char *stbtt_GetGlyphBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int glyph, int *width, int *height, int *xoff, int *yoff) +{ + int ix0,iy0,ix1,iy1; + stbtt__bitmap gbm; + stbtt_vertex *vertices; + int num_verts = stbtt_GetGlyphShape(info, glyph, &vertices); + + if (scale_x == 0) scale_x = scale_y; + if (scale_y == 0) { + if (scale_x == 0) { + STBTT_free(vertices, info->userdata); + return NULL; + } + scale_y = scale_x; + } + + stbtt_GetGlyphBitmapBoxSubpixel(info, glyph, scale_x, scale_y, shift_x, shift_y, &ix0,&iy0,&ix1,&iy1); + + // now we get the size + gbm.w = (ix1 - ix0); + gbm.h = (iy1 - iy0); + gbm.pixels = NULL; // in case we error + + if (width ) *width = gbm.w; + if (height) *height = gbm.h; + if (xoff ) *xoff = ix0; + if (yoff ) *yoff = iy0; + + if (gbm.w && gbm.h) { + gbm.pixels = (unsigned char *) STBTT_malloc(gbm.w * gbm.h, info->userdata); + if (gbm.pixels) { + gbm.stride = gbm.w; + + stbtt_Rasterize(&gbm, 0.35f, vertices, num_verts, scale_x, scale_y, shift_x, shift_y, ix0, iy0, 1, info->userdata); + } + } + STBTT_free(vertices, info->userdata); + return gbm.pixels; +} + +STBTT_DEF unsigned char *stbtt_GetGlyphBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int glyph, int *width, int *height, int *xoff, int *yoff) +{ + return stbtt_GetGlyphBitmapSubpixel(info, scale_x, scale_y, 0.0f, 0.0f, glyph, width, height, xoff, yoff); +} + +STBTT_DEF void stbtt_MakeGlyphBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int glyph) +{ + int ix0,iy0; + stbtt_vertex *vertices; + int num_verts = stbtt_GetGlyphShape(info, glyph, &vertices); + stbtt__bitmap gbm; + + stbtt_GetGlyphBitmapBoxSubpixel(info, glyph, scale_x, scale_y, shift_x, shift_y, &ix0,&iy0,0,0); + gbm.pixels = output; + gbm.w = out_w; + gbm.h = out_h; + gbm.stride = out_stride; + + if (gbm.w && gbm.h) + stbtt_Rasterize(&gbm, 0.35f, vertices, num_verts, scale_x, scale_y, shift_x, shift_y, ix0,iy0, 1, info->userdata); + + STBTT_free(vertices, info->userdata); +} + +STBTT_DEF void stbtt_MakeGlyphBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int glyph) +{ + stbtt_MakeGlyphBitmapSubpixel(info, output, out_w, out_h, out_stride, scale_x, scale_y, 0.0f,0.0f, glyph); +} + +STBTT_DEF unsigned char *stbtt_GetCodepointBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint, int *width, int *height, int *xoff, int *yoff) +{ + return stbtt_GetGlyphBitmapSubpixel(info, scale_x, scale_y,shift_x,shift_y, stbtt_FindGlyphIndex(info,codepoint), width,height,xoff,yoff); +} + +STBTT_DEF void stbtt_MakeCodepointBitmapSubpixelPrefilter(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int oversample_x, int oversample_y, float *sub_x, float *sub_y, int codepoint) +{ + stbtt_MakeGlyphBitmapSubpixelPrefilter(info, output, out_w, out_h, out_stride, scale_x, scale_y, shift_x, shift_y, oversample_x, oversample_y, sub_x, sub_y, stbtt_FindGlyphIndex(info,codepoint)); +} + +STBTT_DEF void stbtt_MakeCodepointBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint) +{ + stbtt_MakeGlyphBitmapSubpixel(info, output, out_w, out_h, out_stride, scale_x, scale_y, shift_x, shift_y, stbtt_FindGlyphIndex(info,codepoint)); +} + +STBTT_DEF unsigned char *stbtt_GetCodepointBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int codepoint, int *width, int *height, int *xoff, int *yoff) +{ + return stbtt_GetCodepointBitmapSubpixel(info, scale_x, scale_y, 0.0f,0.0f, codepoint, width,height,xoff,yoff); +} + +STBTT_DEF void stbtt_MakeCodepointBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int codepoint) +{ + stbtt_MakeCodepointBitmapSubpixel(info, output, out_w, out_h, out_stride, scale_x, scale_y, 0.0f,0.0f, codepoint); +} + +////////////////////////////////////////////////////////////////////////////// +// +// bitmap baking +// +// This is SUPER-CRAPPY packing to keep source code small + +static int stbtt_BakeFontBitmap_internal(unsigned char *data, int offset, // font location (use offset=0 for plain .ttf) + float pixel_height, // height of font in pixels + unsigned char *pixels, int pw, int ph, // bitmap to be filled in + int first_char, int num_chars, // characters to bake + stbtt_bakedchar *chardata) +{ + float scale; + int x,y,bottom_y, i; + stbtt_fontinfo f; + f.userdata = NULL; + if (!stbtt_InitFont(&f, data, offset)) + return -1; + STBTT_memset(pixels, 0, pw*ph); // background of 0 around pixels + x=y=1; + bottom_y = 1; + + scale = stbtt_ScaleForPixelHeight(&f, pixel_height); + + for (i=0; i < num_chars; ++i) { + int advance, lsb, x0,y0,x1,y1,gw,gh; + int g = stbtt_FindGlyphIndex(&f, first_char + i); + stbtt_GetGlyphHMetrics(&f, g, &advance, &lsb); + stbtt_GetGlyphBitmapBox(&f, g, scale,scale, &x0,&y0,&x1,&y1); + gw = x1-x0; + gh = y1-y0; + if (x + gw + 1 >= pw) + y = bottom_y, x = 1; // advance to next row + if (y + gh + 1 >= ph) // check if it fits vertically AFTER potentially moving to next row + return -i; + STBTT_assert(x+gw < pw); + STBTT_assert(y+gh < ph); + stbtt_MakeGlyphBitmap(&f, pixels+x+y*pw, gw,gh,pw, scale,scale, g); + chardata[i].x0 = (stbtt_int16) x; + chardata[i].y0 = (stbtt_int16) y; + chardata[i].x1 = (stbtt_int16) (x + gw); + chardata[i].y1 = (stbtt_int16) (y + gh); + chardata[i].xadvance = scale * advance; + chardata[i].xoff = (float) x0; + chardata[i].yoff = (float) y0; + x = x + gw + 1; + if (y+gh+1 > bottom_y) + bottom_y = y+gh+1; + } + return bottom_y; +} + +STBTT_DEF void stbtt_GetBakedQuad(const stbtt_bakedchar *chardata, int pw, int ph, int char_index, float *xpos, float *ypos, stbtt_aligned_quad *q, int opengl_fillrule) +{ + float d3d_bias = opengl_fillrule ? 0 : -0.5f; + float ipw = 1.0f / pw, iph = 1.0f / ph; + const stbtt_bakedchar *b = chardata + char_index; + int round_x = STBTT_ifloor((*xpos + b->xoff) + 0.5f); + int round_y = STBTT_ifloor((*ypos + b->yoff) + 0.5f); + + q->x0 = round_x + d3d_bias; + q->y0 = round_y + d3d_bias; + q->x1 = round_x + b->x1 - b->x0 + d3d_bias; + q->y1 = round_y + b->y1 - b->y0 + d3d_bias; + + q->s0 = b->x0 * ipw; + q->t0 = b->y0 * iph; + q->s1 = b->x1 * ipw; + q->t1 = b->y1 * iph; + + *xpos += b->xadvance; +} + +////////////////////////////////////////////////////////////////////////////// +// +// rectangle packing replacement routines if you don't have stb_rect_pack.h +// + +#ifndef STB_RECT_PACK_VERSION + +typedef int stbrp_coord; + +//////////////////////////////////////////////////////////////////////////////////// +// // +// // +// COMPILER WARNING ?!?!? // +// // +// // +// if you get a compile warning due to these symbols being defined more than // +// once, move #include "stb_rect_pack.h" before #include "stb_truetype.h" // +// // +//////////////////////////////////////////////////////////////////////////////////// + +typedef struct +{ + int width,height; + int x,y,bottom_y; +} stbrp_context; + +typedef struct +{ + unsigned char x; +} stbrp_node; + +struct stbrp_rect +{ + stbrp_coord x,y; + int id,w,h,was_packed; +}; + +static void stbrp_init_target(stbrp_context *con, int pw, int ph, stbrp_node *nodes, int num_nodes) +{ + con->width = pw; + con->height = ph; + con->x = 0; + con->y = 0; + con->bottom_y = 0; + STBTT__NOTUSED(nodes); + STBTT__NOTUSED(num_nodes); +} + +static void stbrp_pack_rects(stbrp_context *con, stbrp_rect *rects, int num_rects) +{ + int i; + for (i=0; i < num_rects; ++i) { + if (con->x + rects[i].w > con->width) { + con->x = 0; + con->y = con->bottom_y; + } + if (con->y + rects[i].h > con->height) + break; + rects[i].x = con->x; + rects[i].y = con->y; + rects[i].was_packed = 1; + con->x += rects[i].w; + if (con->y + rects[i].h > con->bottom_y) + con->bottom_y = con->y + rects[i].h; + } + for ( ; i < num_rects; ++i) + rects[i].was_packed = 0; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// bitmap baking +// +// This is SUPER-AWESOME (tm Ryan Gordon) packing using stb_rect_pack.h. If +// stb_rect_pack.h isn't available, it uses the BakeFontBitmap strategy. + +STBTT_DEF int stbtt_PackBegin(stbtt_pack_context *spc, unsigned char *pixels, int pw, int ph, int stride_in_bytes, int padding, void *alloc_context) +{ + stbrp_context *context = (stbrp_context *) STBTT_malloc(sizeof(*context) ,alloc_context); + int num_nodes = pw - padding; + stbrp_node *nodes = (stbrp_node *) STBTT_malloc(sizeof(*nodes ) * num_nodes,alloc_context); + + if (context == NULL || nodes == NULL) { + if (context != NULL) STBTT_free(context, alloc_context); + if (nodes != NULL) STBTT_free(nodes , alloc_context); + return 0; + } + + spc->user_allocator_context = alloc_context; + spc->width = pw; + spc->height = ph; + spc->pixels = pixels; + spc->pack_info = context; + spc->nodes = nodes; + spc->padding = padding; + spc->stride_in_bytes = stride_in_bytes != 0 ? stride_in_bytes : pw; + spc->h_oversample = 1; + spc->v_oversample = 1; + spc->skip_missing = 0; + + stbrp_init_target(context, pw-padding, ph-padding, nodes, num_nodes); + + if (pixels) + STBTT_memset(pixels, 0, pw*ph); // background of 0 around pixels + + return 1; +} + +STBTT_DEF void stbtt_PackEnd (stbtt_pack_context *spc) +{ + STBTT_free(spc->nodes , spc->user_allocator_context); + STBTT_free(spc->pack_info, spc->user_allocator_context); +} + +STBTT_DEF void stbtt_PackSetOversampling(stbtt_pack_context *spc, unsigned int h_oversample, unsigned int v_oversample) +{ + STBTT_assert(h_oversample <= STBTT_MAX_OVERSAMPLE); + STBTT_assert(v_oversample <= STBTT_MAX_OVERSAMPLE); + if (h_oversample <= STBTT_MAX_OVERSAMPLE) + spc->h_oversample = h_oversample; + if (v_oversample <= STBTT_MAX_OVERSAMPLE) + spc->v_oversample = v_oversample; +} + +STBTT_DEF void stbtt_PackSetSkipMissingCodepoints(stbtt_pack_context *spc, int skip) +{ + spc->skip_missing = skip; +} + +#define STBTT__OVER_MASK (STBTT_MAX_OVERSAMPLE-1) + +static void stbtt__h_prefilter(unsigned char *pixels, int w, int h, int stride_in_bytes, unsigned int kernel_width) +{ + unsigned char buffer[STBTT_MAX_OVERSAMPLE]; + int safe_w = w - kernel_width; + int j; + STBTT_memset(buffer, 0, STBTT_MAX_OVERSAMPLE); // suppress bogus warning from VS2013 -analyze + for (j=0; j < h; ++j) { + int i; + unsigned int total; + STBTT_memset(buffer, 0, kernel_width); + + total = 0; + + // make kernel_width a constant in common cases so compiler can optimize out the divide + switch (kernel_width) { + case 2: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / 2); + } + break; + case 3: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / 3); + } + break; + case 4: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / 4); + } + break; + case 5: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / 5); + } + break; + default: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / kernel_width); + } + break; + } + + for (; i < w; ++i) { + STBTT_assert(pixels[i] == 0); + total -= buffer[i & STBTT__OVER_MASK]; + pixels[i] = (unsigned char) (total / kernel_width); + } + + pixels += stride_in_bytes; + } +} + +static void stbtt__v_prefilter(unsigned char *pixels, int w, int h, int stride_in_bytes, unsigned int kernel_width) +{ + unsigned char buffer[STBTT_MAX_OVERSAMPLE]; + int safe_h = h - kernel_width; + int j; + STBTT_memset(buffer, 0, STBTT_MAX_OVERSAMPLE); // suppress bogus warning from VS2013 -analyze + for (j=0; j < w; ++j) { + int i; + unsigned int total; + STBTT_memset(buffer, 0, kernel_width); + + total = 0; + + // make kernel_width a constant in common cases so compiler can optimize out the divide + switch (kernel_width) { + case 2: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / 2); + } + break; + case 3: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / 3); + } + break; + case 4: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / 4); + } + break; + case 5: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / 5); + } + break; + default: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / kernel_width); + } + break; + } + + for (; i < h; ++i) { + STBTT_assert(pixels[i*stride_in_bytes] == 0); + total -= buffer[i & STBTT__OVER_MASK]; + pixels[i*stride_in_bytes] = (unsigned char) (total / kernel_width); + } + + pixels += 1; + } +} + +static float stbtt__oversample_shift(int oversample) +{ + if (!oversample) + return 0.0f; + + // The prefilter is a box filter of width "oversample", + // which shifts phase by (oversample - 1)/2 pixels in + // oversampled space. We want to shift in the opposite + // direction to counter this. + return (float)-(oversample - 1) / (2.0f * (float)oversample); +} + +// rects array must be big enough to accommodate all characters in the given ranges +STBTT_DEF int stbtt_PackFontRangesGatherRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects) +{ + int i,j,k; + int missing_glyph_added = 0; + + k=0; + for (i=0; i < num_ranges; ++i) { + float fh = ranges[i].font_size; + float scale = fh > 0 ? stbtt_ScaleForPixelHeight(info, fh) : stbtt_ScaleForMappingEmToPixels(info, -fh); + ranges[i].h_oversample = (unsigned char) spc->h_oversample; + ranges[i].v_oversample = (unsigned char) spc->v_oversample; + for (j=0; j < ranges[i].num_chars; ++j) { + int x0,y0,x1,y1; + int codepoint = ranges[i].array_of_unicode_codepoints == NULL ? ranges[i].first_unicode_codepoint_in_range + j : ranges[i].array_of_unicode_codepoints[j]; + int glyph = stbtt_FindGlyphIndex(info, codepoint); + if (glyph == 0 && (spc->skip_missing || missing_glyph_added)) { + rects[k].w = rects[k].h = 0; + } else { + stbtt_GetGlyphBitmapBoxSubpixel(info,glyph, + scale * spc->h_oversample, + scale * spc->v_oversample, + 0,0, + &x0,&y0,&x1,&y1); + rects[k].w = (stbrp_coord) (x1-x0 + spc->padding + spc->h_oversample-1); + rects[k].h = (stbrp_coord) (y1-y0 + spc->padding + spc->v_oversample-1); + if (glyph == 0) + missing_glyph_added = 1; + } + ++k; + } + } + + return k; +} + +STBTT_DEF void stbtt_MakeGlyphBitmapSubpixelPrefilter(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int prefilter_x, int prefilter_y, float *sub_x, float *sub_y, int glyph) +{ + stbtt_MakeGlyphBitmapSubpixel(info, + output, + out_w - (prefilter_x - 1), + out_h - (prefilter_y - 1), + out_stride, + scale_x, + scale_y, + shift_x, + shift_y, + glyph); + + if (prefilter_x > 1) + stbtt__h_prefilter(output, out_w, out_h, out_stride, prefilter_x); + + if (prefilter_y > 1) + stbtt__v_prefilter(output, out_w, out_h, out_stride, prefilter_y); + + *sub_x = stbtt__oversample_shift(prefilter_x); + *sub_y = stbtt__oversample_shift(prefilter_y); +} + +// rects array must be big enough to accommodate all characters in the given ranges +STBTT_DEF int stbtt_PackFontRangesRenderIntoRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects) +{ + int i,j,k, missing_glyph = -1, return_value = 1; + + // save current values + int old_h_over = spc->h_oversample; + int old_v_over = spc->v_oversample; + + k = 0; + for (i=0; i < num_ranges; ++i) { + float fh = ranges[i].font_size; + float scale = fh > 0 ? stbtt_ScaleForPixelHeight(info, fh) : stbtt_ScaleForMappingEmToPixels(info, -fh); + float recip_h,recip_v,sub_x,sub_y; + spc->h_oversample = ranges[i].h_oversample; + spc->v_oversample = ranges[i].v_oversample; + recip_h = 1.0f / spc->h_oversample; + recip_v = 1.0f / spc->v_oversample; + sub_x = stbtt__oversample_shift(spc->h_oversample); + sub_y = stbtt__oversample_shift(spc->v_oversample); + for (j=0; j < ranges[i].num_chars; ++j) { + stbrp_rect *r = &rects[k]; + if (r->was_packed && r->w != 0 && r->h != 0) { + stbtt_packedchar *bc = &ranges[i].chardata_for_range[j]; + int advance, lsb, x0,y0,x1,y1; + int codepoint = ranges[i].array_of_unicode_codepoints == NULL ? ranges[i].first_unicode_codepoint_in_range + j : ranges[i].array_of_unicode_codepoints[j]; + int glyph = stbtt_FindGlyphIndex(info, codepoint); + stbrp_coord pad = (stbrp_coord) spc->padding; + + // pad on left and top + r->x += pad; + r->y += pad; + r->w -= pad; + r->h -= pad; + stbtt_GetGlyphHMetrics(info, glyph, &advance, &lsb); + stbtt_GetGlyphBitmapBox(info, glyph, + scale * spc->h_oversample, + scale * spc->v_oversample, + &x0,&y0,&x1,&y1); + stbtt_MakeGlyphBitmapSubpixel(info, + spc->pixels + r->x + r->y*spc->stride_in_bytes, + r->w - spc->h_oversample+1, + r->h - spc->v_oversample+1, + spc->stride_in_bytes, + scale * spc->h_oversample, + scale * spc->v_oversample, + 0,0, + glyph); + + if (spc->h_oversample > 1) + stbtt__h_prefilter(spc->pixels + r->x + r->y*spc->stride_in_bytes, + r->w, r->h, spc->stride_in_bytes, + spc->h_oversample); + + if (spc->v_oversample > 1) + stbtt__v_prefilter(spc->pixels + r->x + r->y*spc->stride_in_bytes, + r->w, r->h, spc->stride_in_bytes, + spc->v_oversample); + + bc->x0 = (stbtt_int16) r->x; + bc->y0 = (stbtt_int16) r->y; + bc->x1 = (stbtt_int16) (r->x + r->w); + bc->y1 = (stbtt_int16) (r->y + r->h); + bc->xadvance = scale * advance; + bc->xoff = (float) x0 * recip_h + sub_x; + bc->yoff = (float) y0 * recip_v + sub_y; + bc->xoff2 = (x0 + r->w) * recip_h + sub_x; + bc->yoff2 = (y0 + r->h) * recip_v + sub_y; + + if (glyph == 0) + missing_glyph = j; + } else if (spc->skip_missing) { + return_value = 0; + } else if (r->was_packed && r->w == 0 && r->h == 0 && missing_glyph >= 0) { + ranges[i].chardata_for_range[j] = ranges[i].chardata_for_range[missing_glyph]; + } else { + return_value = 0; // if any fail, report failure + } + + ++k; + } + } + + // restore original values + spc->h_oversample = old_h_over; + spc->v_oversample = old_v_over; + + return return_value; +} + +STBTT_DEF void stbtt_PackFontRangesPackRects(stbtt_pack_context *spc, stbrp_rect *rects, int num_rects) +{ + stbrp_pack_rects((stbrp_context *) spc->pack_info, rects, num_rects); +} + +STBTT_DEF int stbtt_PackFontRanges(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, stbtt_pack_range *ranges, int num_ranges) +{ + stbtt_fontinfo info; + int i,j,n, return_value = 1; + //stbrp_context *context = (stbrp_context *) spc->pack_info; + stbrp_rect *rects; + + // flag all characters as NOT packed + for (i=0; i < num_ranges; ++i) + for (j=0; j < ranges[i].num_chars; ++j) + ranges[i].chardata_for_range[j].x0 = + ranges[i].chardata_for_range[j].y0 = + ranges[i].chardata_for_range[j].x1 = + ranges[i].chardata_for_range[j].y1 = 0; + + n = 0; + for (i=0; i < num_ranges; ++i) + n += ranges[i].num_chars; + + rects = (stbrp_rect *) STBTT_malloc(sizeof(*rects) * n, spc->user_allocator_context); + if (rects == NULL) + return 0; + + info.userdata = spc->user_allocator_context; + stbtt_InitFont(&info, fontdata, stbtt_GetFontOffsetForIndex(fontdata,font_index)); + + n = stbtt_PackFontRangesGatherRects(spc, &info, ranges, num_ranges, rects); + + stbtt_PackFontRangesPackRects(spc, rects, n); + + return_value = stbtt_PackFontRangesRenderIntoRects(spc, &info, ranges, num_ranges, rects); + + STBTT_free(rects, spc->user_allocator_context); + return return_value; +} + +STBTT_DEF int stbtt_PackFontRange(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, float font_size, + int first_unicode_codepoint_in_range, int num_chars_in_range, stbtt_packedchar *chardata_for_range) +{ + stbtt_pack_range range; + range.first_unicode_codepoint_in_range = first_unicode_codepoint_in_range; + range.array_of_unicode_codepoints = NULL; + range.num_chars = num_chars_in_range; + range.chardata_for_range = chardata_for_range; + range.font_size = font_size; + return stbtt_PackFontRanges(spc, fontdata, font_index, &range, 1); +} + +STBTT_DEF void stbtt_GetScaledFontVMetrics(const unsigned char *fontdata, int index, float size, float *ascent, float *descent, float *lineGap) +{ + int i_ascent, i_descent, i_lineGap; + float scale; + stbtt_fontinfo info; + stbtt_InitFont(&info, fontdata, stbtt_GetFontOffsetForIndex(fontdata, index)); + scale = size > 0 ? stbtt_ScaleForPixelHeight(&info, size) : stbtt_ScaleForMappingEmToPixels(&info, -size); + stbtt_GetFontVMetrics(&info, &i_ascent, &i_descent, &i_lineGap); + *ascent = (float) i_ascent * scale; + *descent = (float) i_descent * scale; + *lineGap = (float) i_lineGap * scale; +} + +STBTT_DEF void stbtt_GetPackedQuad(const stbtt_packedchar *chardata, int pw, int ph, int char_index, float *xpos, float *ypos, stbtt_aligned_quad *q, int align_to_integer) +{ + float ipw = 1.0f / pw, iph = 1.0f / ph; + const stbtt_packedchar *b = chardata + char_index; + + if (align_to_integer) { + float x = (float) STBTT_ifloor((*xpos + b->xoff) + 0.5f); + float y = (float) STBTT_ifloor((*ypos + b->yoff) + 0.5f); + q->x0 = x; + q->y0 = y; + q->x1 = x + b->xoff2 - b->xoff; + q->y1 = y + b->yoff2 - b->yoff; + } else { + q->x0 = *xpos + b->xoff; + q->y0 = *ypos + b->yoff; + q->x1 = *xpos + b->xoff2; + q->y1 = *ypos + b->yoff2; + } + + q->s0 = b->x0 * ipw; + q->t0 = b->y0 * iph; + q->s1 = b->x1 * ipw; + q->t1 = b->y1 * iph; + + *xpos += b->xadvance; +} + +////////////////////////////////////////////////////////////////////////////// +// +// sdf computation +// + +#define STBTT_min(a,b) ((a) < (b) ? (a) : (b)) +#define STBTT_max(a,b) ((a) < (b) ? (b) : (a)) + +static int stbtt__ray_intersect_bezier(float orig[2], float ray[2], float q0[2], float q1[2], float q2[2], float hits[2][2]) +{ + float q0perp = q0[1]*ray[0] - q0[0]*ray[1]; + float q1perp = q1[1]*ray[0] - q1[0]*ray[1]; + float q2perp = q2[1]*ray[0] - q2[0]*ray[1]; + float roperp = orig[1]*ray[0] - orig[0]*ray[1]; + + float a = q0perp - 2*q1perp + q2perp; + float b = q1perp - q0perp; + float c = q0perp - roperp; + + float s0 = 0., s1 = 0.; + int num_s = 0; + + if (a != 0.0) { + float discr = b*b - a*c; + if (discr > 0.0) { + float rcpna = -1 / a; + float d = (float) STBTT_sqrt(discr); + s0 = (b+d) * rcpna; + s1 = (b-d) * rcpna; + if (s0 >= 0.0 && s0 <= 1.0) + num_s = 1; + if (d > 0.0 && s1 >= 0.0 && s1 <= 1.0) { + if (num_s == 0) s0 = s1; + ++num_s; + } + } + } else { + // 2*b*s + c = 0 + // s = -c / (2*b) + s0 = c / (-2 * b); + if (s0 >= 0.0 && s0 <= 1.0) + num_s = 1; + } + + if (num_s == 0) + return 0; + else { + float rcp_len2 = 1 / (ray[0]*ray[0] + ray[1]*ray[1]); + float rayn_x = ray[0] * rcp_len2, rayn_y = ray[1] * rcp_len2; + + float q0d = q0[0]*rayn_x + q0[1]*rayn_y; + float q1d = q1[0]*rayn_x + q1[1]*rayn_y; + float q2d = q2[0]*rayn_x + q2[1]*rayn_y; + float rod = orig[0]*rayn_x + orig[1]*rayn_y; + + float q10d = q1d - q0d; + float q20d = q2d - q0d; + float q0rd = q0d - rod; + + hits[0][0] = q0rd + s0*(2.0f - 2.0f*s0)*q10d + s0*s0*q20d; + hits[0][1] = a*s0+b; + + if (num_s > 1) { + hits[1][0] = q0rd + s1*(2.0f - 2.0f*s1)*q10d + s1*s1*q20d; + hits[1][1] = a*s1+b; + return 2; + } else { + return 1; + } + } +} + +static int equal(float *a, float *b) +{ + return (a[0] == b[0] && a[1] == b[1]); +} + +static int stbtt__compute_crossings_x(float x, float y, int nverts, stbtt_vertex *verts) +{ + int i; + float orig[2], ray[2] = { 1, 0 }; + float y_frac; + int winding = 0; + + orig[0] = x; + orig[1] = y; + + // make sure y never passes through a vertex of the shape + y_frac = (float) STBTT_fmod(y, 1.0f); + if (y_frac < 0.01f) + y += 0.01f; + else if (y_frac > 0.99f) + y -= 0.01f; + orig[1] = y; + + // test a ray from (-infinity,y) to (x,y) + for (i=0; i < nverts; ++i) { + if (verts[i].type == STBTT_vline) { + int x0 = (int) verts[i-1].x, y0 = (int) verts[i-1].y; + int x1 = (int) verts[i ].x, y1 = (int) verts[i ].y; + if (y > STBTT_min(y0,y1) && y < STBTT_max(y0,y1) && x > STBTT_min(x0,x1)) { + float x_inter = (y - y0) / (y1 - y0) * (x1-x0) + x0; + if (x_inter < x) + winding += (y0 < y1) ? 1 : -1; + } + } + if (verts[i].type == STBTT_vcurve) { + int x0 = (int) verts[i-1].x , y0 = (int) verts[i-1].y ; + int x1 = (int) verts[i ].cx, y1 = (int) verts[i ].cy; + int x2 = (int) verts[i ].x , y2 = (int) verts[i ].y ; + int ax = STBTT_min(x0,STBTT_min(x1,x2)), ay = STBTT_min(y0,STBTT_min(y1,y2)); + int by = STBTT_max(y0,STBTT_max(y1,y2)); + if (y > ay && y < by && x > ax) { + float q0[2],q1[2],q2[2]; + float hits[2][2]; + q0[0] = (float)x0; + q0[1] = (float)y0; + q1[0] = (float)x1; + q1[1] = (float)y1; + q2[0] = (float)x2; + q2[1] = (float)y2; + if (equal(q0,q1) || equal(q1,q2)) { + x0 = (int)verts[i-1].x; + y0 = (int)verts[i-1].y; + x1 = (int)verts[i ].x; + y1 = (int)verts[i ].y; + if (y > STBTT_min(y0,y1) && y < STBTT_max(y0,y1) && x > STBTT_min(x0,x1)) { + float x_inter = (y - y0) / (y1 - y0) * (x1-x0) + x0; + if (x_inter < x) + winding += (y0 < y1) ? 1 : -1; + } + } else { + int num_hits = stbtt__ray_intersect_bezier(orig, ray, q0, q1, q2, hits); + if (num_hits >= 1) + if (hits[0][0] < 0) + winding += (hits[0][1] < 0 ? -1 : 1); + if (num_hits >= 2) + if (hits[1][0] < 0) + winding += (hits[1][1] < 0 ? -1 : 1); + } + } + } + } + return winding; +} + +static float stbtt__cuberoot( float x ) +{ + if (x<0) + return -(float) STBTT_pow(-x,1.0f/3.0f); + else + return (float) STBTT_pow( x,1.0f/3.0f); +} + +// x^3 + c*x^2 + b*x + a = 0 +static int stbtt__solve_cubic(float a, float b, float c, float* r) +{ + float s = -a / 3; + float p = b - a*a / 3; + float q = a * (2*a*a - 9*b) / 27 + c; + float p3 = p*p*p; + float d = q*q + 4*p3 / 27; + if (d >= 0) { + float z = (float) STBTT_sqrt(d); + float u = (-q + z) / 2; + float v = (-q - z) / 2; + u = stbtt__cuberoot(u); + v = stbtt__cuberoot(v); + r[0] = s + u + v; + return 1; + } else { + float u = (float) STBTT_sqrt(-p/3); + float v = (float) STBTT_acos(-STBTT_sqrt(-27/p3) * q / 2) / 3; // p3 must be negative, since d is negative + float m = (float) STBTT_cos(v); + float n = (float) STBTT_cos(v-3.141592/2)*1.732050808f; + r[0] = s + u * 2 * m; + r[1] = s - u * (m + n); + r[2] = s - u * (m - n); + + //STBTT_assert( STBTT_fabs(((r[0]+a)*r[0]+b)*r[0]+c) < 0.05f); // these asserts may not be safe at all scales, though they're in bezier t parameter units so maybe? + //STBTT_assert( STBTT_fabs(((r[1]+a)*r[1]+b)*r[1]+c) < 0.05f); + //STBTT_assert( STBTT_fabs(((r[2]+a)*r[2]+b)*r[2]+c) < 0.05f); + return 3; + } +} + +STBTT_DEF unsigned char * stbtt_GetGlyphSDF(const stbtt_fontinfo *info, float scale, int glyph, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff) +{ + float scale_x = scale, scale_y = scale; + int ix0,iy0,ix1,iy1; + int w,h; + unsigned char *data; + + if (scale == 0) return NULL; + + stbtt_GetGlyphBitmapBoxSubpixel(info, glyph, scale, scale, 0.0f,0.0f, &ix0,&iy0,&ix1,&iy1); + + // if empty, return NULL + if (ix0 == ix1 || iy0 == iy1) + return NULL; + + ix0 -= padding; + iy0 -= padding; + ix1 += padding; + iy1 += padding; + + w = (ix1 - ix0); + h = (iy1 - iy0); + + if (width ) *width = w; + if (height) *height = h; + if (xoff ) *xoff = ix0; + if (yoff ) *yoff = iy0; + + // invert for y-downwards bitmaps + scale_y = -scale_y; + + { + int x,y,i,j; + float *precompute; + stbtt_vertex *verts; + int num_verts = stbtt_GetGlyphShape(info, glyph, &verts); + data = (unsigned char *) STBTT_malloc(w * h, info->userdata); + precompute = (float *) STBTT_malloc(num_verts * sizeof(float), info->userdata); + + for (i=0,j=num_verts-1; i < num_verts; j=i++) { + if (verts[i].type == STBTT_vline) { + float x0 = verts[i].x*scale_x, y0 = verts[i].y*scale_y; + float x1 = verts[j].x*scale_x, y1 = verts[j].y*scale_y; + float dist = (float) STBTT_sqrt((x1-x0)*(x1-x0) + (y1-y0)*(y1-y0)); + precompute[i] = (dist == 0) ? 0.0f : 1.0f / dist; + } else if (verts[i].type == STBTT_vcurve) { + float x2 = verts[j].x *scale_x, y2 = verts[j].y *scale_y; + float x1 = verts[i].cx*scale_x, y1 = verts[i].cy*scale_y; + float x0 = verts[i].x *scale_x, y0 = verts[i].y *scale_y; + float bx = x0 - 2*x1 + x2, by = y0 - 2*y1 + y2; + float len2 = bx*bx + by*by; + if (len2 != 0.0f) + precompute[i] = 1.0f / (bx*bx + by*by); + else + precompute[i] = 0.0f; + } else + precompute[i] = 0.0f; + } + + for (y=iy0; y < iy1; ++y) { + for (x=ix0; x < ix1; ++x) { + float val; + float min_dist = 999999.0f; + float sx = (float) x + 0.5f; + float sy = (float) y + 0.5f; + float x_gspace = (sx / scale_x); + float y_gspace = (sy / scale_y); + + int winding = stbtt__compute_crossings_x(x_gspace, y_gspace, num_verts, verts); // @OPTIMIZE: this could just be a rasterization, but needs to be line vs. non-tesselated curves so a new path + + for (i=0; i < num_verts; ++i) { + float x0 = verts[i].x*scale_x, y0 = verts[i].y*scale_y; + + // check against every point here rather than inside line/curve primitives -- @TODO: wrong if multiple 'moves' in a row produce a garbage point, and given culling, probably more efficient to do within line/curve + float dist2 = (x0-sx)*(x0-sx) + (y0-sy)*(y0-sy); + if (dist2 < min_dist*min_dist) + min_dist = (float) STBTT_sqrt(dist2); + + if (verts[i].type == STBTT_vline) { + float x1 = verts[i-1].x*scale_x, y1 = verts[i-1].y*scale_y; + + // coarse culling against bbox + //if (sx > STBTT_min(x0,x1)-min_dist && sx < STBTT_max(x0,x1)+min_dist && + // sy > STBTT_min(y0,y1)-min_dist && sy < STBTT_max(y0,y1)+min_dist) + float dist = (float) STBTT_fabs((x1-x0)*(y0-sy) - (y1-y0)*(x0-sx)) * precompute[i]; + STBTT_assert(i != 0); + if (dist < min_dist) { + // check position along line + // x' = x0 + t*(x1-x0), y' = y0 + t*(y1-y0) + // minimize (x'-sx)*(x'-sx)+(y'-sy)*(y'-sy) + float dx = x1-x0, dy = y1-y0; + float px = x0-sx, py = y0-sy; + // minimize (px+t*dx)^2 + (py+t*dy)^2 = px*px + 2*px*dx*t + t^2*dx*dx + py*py + 2*py*dy*t + t^2*dy*dy + // derivative: 2*px*dx + 2*py*dy + (2*dx*dx+2*dy*dy)*t, set to 0 and solve + float t = -(px*dx + py*dy) / (dx*dx + dy*dy); + if (t >= 0.0f && t <= 1.0f) + min_dist = dist; + } + } else if (verts[i].type == STBTT_vcurve) { + float x2 = verts[i-1].x *scale_x, y2 = verts[i-1].y *scale_y; + float x1 = verts[i ].cx*scale_x, y1 = verts[i ].cy*scale_y; + float box_x0 = STBTT_min(STBTT_min(x0,x1),x2); + float box_y0 = STBTT_min(STBTT_min(y0,y1),y2); + float box_x1 = STBTT_max(STBTT_max(x0,x1),x2); + float box_y1 = STBTT_max(STBTT_max(y0,y1),y2); + // coarse culling against bbox to avoid computing cubic unnecessarily + if (sx > box_x0-min_dist && sx < box_x1+min_dist && sy > box_y0-min_dist && sy < box_y1+min_dist) { + int num=0; + float ax = x1-x0, ay = y1-y0; + float bx = x0 - 2*x1 + x2, by = y0 - 2*y1 + y2; + float mx = x0 - sx, my = y0 - sy; + float res[3],px,py,t,it; + float a_inv = precompute[i]; + if (a_inv == 0.0) { // if a_inv is 0, it's 2nd degree so use quadratic formula + float a = 3*(ax*bx + ay*by); + float b = 2*(ax*ax + ay*ay) + (mx*bx+my*by); + float c = mx*ax+my*ay; + if (a == 0.0) { // if a is 0, it's linear + if (b != 0.0) { + res[num++] = -c/b; + } + } else { + float discriminant = b*b - 4*a*c; + if (discriminant < 0) + num = 0; + else { + float root = (float) STBTT_sqrt(discriminant); + res[0] = (-b - root)/(2*a); + res[1] = (-b + root)/(2*a); + num = 2; // don't bother distinguishing 1-solution case, as code below will still work + } + } + } else { + float b = 3*(ax*bx + ay*by) * a_inv; // could precompute this as it doesn't depend on sample point + float c = (2*(ax*ax + ay*ay) + (mx*bx+my*by)) * a_inv; + float d = (mx*ax+my*ay) * a_inv; + num = stbtt__solve_cubic(b, c, d, res); + } + if (num >= 1 && res[0] >= 0.0f && res[0] <= 1.0f) { + t = res[0], it = 1.0f - t; + px = it*it*x0 + 2*t*it*x1 + t*t*x2; + py = it*it*y0 + 2*t*it*y1 + t*t*y2; + dist2 = (px-sx)*(px-sx) + (py-sy)*(py-sy); + if (dist2 < min_dist * min_dist) + min_dist = (float) STBTT_sqrt(dist2); + } + if (num >= 2 && res[1] >= 0.0f && res[1] <= 1.0f) { + t = res[1], it = 1.0f - t; + px = it*it*x0 + 2*t*it*x1 + t*t*x2; + py = it*it*y0 + 2*t*it*y1 + t*t*y2; + dist2 = (px-sx)*(px-sx) + (py-sy)*(py-sy); + if (dist2 < min_dist * min_dist) + min_dist = (float) STBTT_sqrt(dist2); + } + if (num >= 3 && res[2] >= 0.0f && res[2] <= 1.0f) { + t = res[2], it = 1.0f - t; + px = it*it*x0 + 2*t*it*x1 + t*t*x2; + py = it*it*y0 + 2*t*it*y1 + t*t*y2; + dist2 = (px-sx)*(px-sx) + (py-sy)*(py-sy); + if (dist2 < min_dist * min_dist) + min_dist = (float) STBTT_sqrt(dist2); + } + } + } + } + if (winding == 0) + min_dist = -min_dist; // if outside the shape, value is negative + val = onedge_value + pixel_dist_scale * min_dist; + if (val < 0) + val = 0; + else if (val > 255) + val = 255; + data[(y-iy0)*w+(x-ix0)] = (unsigned char) val; + } + } + STBTT_free(precompute, info->userdata); + STBTT_free(verts, info->userdata); + } + return data; +} + +STBTT_DEF unsigned char * stbtt_GetCodepointSDF(const stbtt_fontinfo *info, float scale, int codepoint, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff) +{ + return stbtt_GetGlyphSDF(info, scale, stbtt_FindGlyphIndex(info, codepoint), padding, onedge_value, pixel_dist_scale, width, height, xoff, yoff); +} + +STBTT_DEF void stbtt_FreeSDF(unsigned char *bitmap, void *userdata) +{ + STBTT_free(bitmap, userdata); +} + +////////////////////////////////////////////////////////////////////////////// +// +// font name matching -- recommended not to use this +// + +// check if a utf8 string contains a prefix which is the utf16 string; if so return length of matching utf8 string +static stbtt_int32 stbtt__CompareUTF8toUTF16_bigendian_prefix(stbtt_uint8 *s1, stbtt_int32 len1, stbtt_uint8 *s2, stbtt_int32 len2) +{ + stbtt_int32 i=0; + + // convert utf16 to utf8 and compare the results while converting + while (len2) { + stbtt_uint16 ch = s2[0]*256 + s2[1]; + if (ch < 0x80) { + if (i >= len1) return -1; + if (s1[i++] != ch) return -1; + } else if (ch < 0x800) { + if (i+1 >= len1) return -1; + if (s1[i++] != 0xc0 + (ch >> 6)) return -1; + if (s1[i++] != 0x80 + (ch & 0x3f)) return -1; + } else if (ch >= 0xd800 && ch < 0xdc00) { + stbtt_uint32 c; + stbtt_uint16 ch2 = s2[2]*256 + s2[3]; + if (i+3 >= len1) return -1; + c = ((ch - 0xd800) << 10) + (ch2 - 0xdc00) + 0x10000; + if (s1[i++] != 0xf0 + (c >> 18)) return -1; + if (s1[i++] != 0x80 + ((c >> 12) & 0x3f)) return -1; + if (s1[i++] != 0x80 + ((c >> 6) & 0x3f)) return -1; + if (s1[i++] != 0x80 + ((c ) & 0x3f)) return -1; + s2 += 2; // plus another 2 below + len2 -= 2; + } else if (ch >= 0xdc00 && ch < 0xe000) { + return -1; + } else { + if (i+2 >= len1) return -1; + if (s1[i++] != 0xe0 + (ch >> 12)) return -1; + if (s1[i++] != 0x80 + ((ch >> 6) & 0x3f)) return -1; + if (s1[i++] != 0x80 + ((ch ) & 0x3f)) return -1; + } + s2 += 2; + len2 -= 2; + } + return i; +} + +static int stbtt_CompareUTF8toUTF16_bigendian_internal(char *s1, int len1, char *s2, int len2) +{ + return len1 == stbtt__CompareUTF8toUTF16_bigendian_prefix((stbtt_uint8*) s1, len1, (stbtt_uint8*) s2, len2); +} + +// returns results in whatever encoding you request... but note that 2-byte encodings +// will be BIG-ENDIAN... use stbtt_CompareUTF8toUTF16_bigendian() to compare +STBTT_DEF const char *stbtt_GetFontNameString(const stbtt_fontinfo *font, int *length, int platformID, int encodingID, int languageID, int nameID) +{ + stbtt_int32 i,count,stringOffset; + stbtt_uint8 *fc = font->data; + stbtt_uint32 offset = font->fontstart; + stbtt_uint32 nm = stbtt__find_table(fc, offset, "name"); + if (!nm) return NULL; + + count = ttUSHORT(fc+nm+2); + stringOffset = nm + ttUSHORT(fc+nm+4); + for (i=0; i < count; ++i) { + stbtt_uint32 loc = nm + 6 + 12 * i; + if (platformID == ttUSHORT(fc+loc+0) && encodingID == ttUSHORT(fc+loc+2) + && languageID == ttUSHORT(fc+loc+4) && nameID == ttUSHORT(fc+loc+6)) { + *length = ttUSHORT(fc+loc+8); + return (const char *) (fc+stringOffset+ttUSHORT(fc+loc+10)); + } + } + return NULL; +} + +static int stbtt__matchpair(stbtt_uint8 *fc, stbtt_uint32 nm, stbtt_uint8 *name, stbtt_int32 nlen, stbtt_int32 target_id, stbtt_int32 next_id) +{ + stbtt_int32 i; + stbtt_int32 count = ttUSHORT(fc+nm+2); + stbtt_int32 stringOffset = nm + ttUSHORT(fc+nm+4); + + for (i=0; i < count; ++i) { + stbtt_uint32 loc = nm + 6 + 12 * i; + stbtt_int32 id = ttUSHORT(fc+loc+6); + if (id == target_id) { + // find the encoding + stbtt_int32 platform = ttUSHORT(fc+loc+0), encoding = ttUSHORT(fc+loc+2), language = ttUSHORT(fc+loc+4); + + // is this a Unicode encoding? + if (platform == 0 || (platform == 3 && encoding == 1) || (platform == 3 && encoding == 10)) { + stbtt_int32 slen = ttUSHORT(fc+loc+8); + stbtt_int32 off = ttUSHORT(fc+loc+10); + + // check if there's a prefix match + stbtt_int32 matchlen = stbtt__CompareUTF8toUTF16_bigendian_prefix(name, nlen, fc+stringOffset+off,slen); + if (matchlen >= 0) { + // check for target_id+1 immediately following, with same encoding & language + if (i+1 < count && ttUSHORT(fc+loc+12+6) == next_id && ttUSHORT(fc+loc+12) == platform && ttUSHORT(fc+loc+12+2) == encoding && ttUSHORT(fc+loc+12+4) == language) { + slen = ttUSHORT(fc+loc+12+8); + off = ttUSHORT(fc+loc+12+10); + if (slen == 0) { + if (matchlen == nlen) + return 1; + } else if (matchlen < nlen && name[matchlen] == ' ') { + ++matchlen; + if (stbtt_CompareUTF8toUTF16_bigendian_internal((char*) (name+matchlen), nlen-matchlen, (char*)(fc+stringOffset+off),slen)) + return 1; + } + } else { + // if nothing immediately following + if (matchlen == nlen) + return 1; + } + } + } + + // @TODO handle other encodings + } + } + return 0; +} + +static int stbtt__matches(stbtt_uint8 *fc, stbtt_uint32 offset, stbtt_uint8 *name, stbtt_int32 flags) +{ + stbtt_int32 nlen = (stbtt_int32) STBTT_strlen((char *) name); + stbtt_uint32 nm,hd; + if (!stbtt__isfont(fc+offset)) return 0; + + // check italics/bold/underline flags in macStyle... + if (flags) { + hd = stbtt__find_table(fc, offset, "head"); + if ((ttUSHORT(fc+hd+44) & 7) != (flags & 7)) return 0; + } + + nm = stbtt__find_table(fc, offset, "name"); + if (!nm) return 0; + + if (flags) { + // if we checked the macStyle flags, then just check the family and ignore the subfamily + if (stbtt__matchpair(fc, nm, name, nlen, 16, -1)) return 1; + if (stbtt__matchpair(fc, nm, name, nlen, 1, -1)) return 1; + if (stbtt__matchpair(fc, nm, name, nlen, 3, -1)) return 1; + } else { + if (stbtt__matchpair(fc, nm, name, nlen, 16, 17)) return 1; + if (stbtt__matchpair(fc, nm, name, nlen, 1, 2)) return 1; + if (stbtt__matchpair(fc, nm, name, nlen, 3, -1)) return 1; + } + + return 0; +} + +static int stbtt_FindMatchingFont_internal(unsigned char *font_collection, char *name_utf8, stbtt_int32 flags) +{ + stbtt_int32 i; + for (i=0;;++i) { + stbtt_int32 off = stbtt_GetFontOffsetForIndex(font_collection, i); + if (off < 0) return off; + if (stbtt__matches((stbtt_uint8 *) font_collection, off, (stbtt_uint8*) name_utf8, flags)) + return off; + } +} + +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +STBTT_DEF int stbtt_BakeFontBitmap(const unsigned char *data, int offset, + float pixel_height, unsigned char *pixels, int pw, int ph, + int first_char, int num_chars, stbtt_bakedchar *chardata) +{ + return stbtt_BakeFontBitmap_internal((unsigned char *) data, offset, pixel_height, pixels, pw, ph, first_char, num_chars, chardata); +} + +STBTT_DEF int stbtt_GetFontOffsetForIndex(const unsigned char *data, int index) +{ + return stbtt_GetFontOffsetForIndex_internal((unsigned char *) data, index); +} + +STBTT_DEF int stbtt_GetNumberOfFonts(const unsigned char *data) +{ + return stbtt_GetNumberOfFonts_internal((unsigned char *) data); +} + +STBTT_DEF int stbtt_InitFont(stbtt_fontinfo *info, const unsigned char *data, int offset) +{ + return stbtt_InitFont_internal(info, (unsigned char *) data, offset); +} + +STBTT_DEF int stbtt_FindMatchingFont(const unsigned char *fontdata, const char *name, int flags) +{ + return stbtt_FindMatchingFont_internal((unsigned char *) fontdata, (char *) name, flags); +} + +STBTT_DEF int stbtt_CompareUTF8toUTF16_bigendian(const char *s1, int len1, const char *s2, int len2) +{ + return stbtt_CompareUTF8toUTF16_bigendian_internal((char *) s1, len1, (char *) s2, len2); +} + +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#endif + +#endif // STB_TRUETYPE_IMPLEMENTATION + + +// FULL VERSION HISTORY +// +// 1.19 (2018-02-11) OpenType GPOS kerning (horizontal only), STBTT_fmod +// 1.18 (2018-01-29) add missing function +// 1.17 (2017-07-23) make more arguments const; doc fix +// 1.16 (2017-07-12) SDF support +// 1.15 (2017-03-03) make more arguments const +// 1.14 (2017-01-16) num-fonts-in-TTC function +// 1.13 (2017-01-02) support OpenType fonts, certain Apple fonts +// 1.12 (2016-10-25) suppress warnings about casting away const with -Wcast-qual +// 1.11 (2016-04-02) fix unused-variable warning +// 1.10 (2016-04-02) allow user-defined fabs() replacement +// fix memory leak if fontsize=0.0 +// fix warning from duplicate typedef +// 1.09 (2016-01-16) warning fix; avoid crash on outofmem; use alloc userdata for PackFontRanges +// 1.08 (2015-09-13) document stbtt_Rasterize(); fixes for vertical & horizontal edges +// 1.07 (2015-08-01) allow PackFontRanges to accept arrays of sparse codepoints; +// allow PackFontRanges to pack and render in separate phases; +// fix stbtt_GetFontOFfsetForIndex (never worked for non-0 input?); +// fixed an assert() bug in the new rasterizer +// replace assert() with STBTT_assert() in new rasterizer +// 1.06 (2015-07-14) performance improvements (~35% faster on x86 and x64 on test machine) +// also more precise AA rasterizer, except if shapes overlap +// remove need for STBTT_sort +// 1.05 (2015-04-15) fix misplaced definitions for STBTT_STATIC +// 1.04 (2015-04-15) typo in example +// 1.03 (2015-04-12) STBTT_STATIC, fix memory leak in new packing, various fixes +// 1.02 (2014-12-10) fix various warnings & compile issues w/ stb_rect_pack, C++ +// 1.01 (2014-12-08) fix subpixel position when oversampling to exactly match +// non-oversampled; STBTT_POINT_SIZE for packed case only +// 1.00 (2014-12-06) add new PackBegin etc. API, w/ support for oversampling +// 0.99 (2014-09-18) fix multiple bugs with subpixel rendering (ryg) +// 0.9 (2014-08-07) support certain mac/iOS fonts without an MS platformID +// 0.8b (2014-07-07) fix a warning +// 0.8 (2014-05-25) fix a few more warnings +// 0.7 (2013-09-25) bugfix: subpixel glyph bug fixed in 0.5 had come back +// 0.6c (2012-07-24) improve documentation +// 0.6b (2012-07-20) fix a few more warnings +// 0.6 (2012-07-17) fix warnings; added stbtt_ScaleForMappingEmToPixels, +// stbtt_GetFontBoundingBox, stbtt_IsGlyphEmpty +// 0.5 (2011-12-09) bugfixes: +// subpixel glyph renderer computed wrong bounding box +// first vertex of shape can be off-curve (FreeSans) +// 0.4b (2011-12-03) fixed an error in the font baking example +// 0.4 (2011-12-01) kerning, subpixel rendering (tor) +// bugfixes for: +// codepoint-to-glyph conversion using table fmt=12 +// codepoint-to-glyph conversion using table fmt=4 +// stbtt_GetBakedQuad with non-square texture (Zer) +// updated Hello World! sample to use kerning and subpixel +// fixed some warnings +// 0.3 (2009-06-24) cmap fmt=12, compound shapes (MM) +// userdata, malloc-from-userdata, non-zero fill (stb) +// 0.2 (2009-03-11) Fix unsigned/signed char warnings +// 0.1 (2009-03-09) First public release +// + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/v_windows/v/old/thirdparty/ios/ios.m b/v_windows/v/old/thirdparty/ios/ios.m new file mode 100644 index 0000000..801df16 --- /dev/null +++ b/v_windows/v/old/thirdparty/ios/ios.m @@ -0,0 +1,8 @@ +#import + +void WrappedNSLog(const char *message,...) { + va_list args; + va_start(args, message); + NSLog(@"%@",[[NSString alloc] initWithFormat:[NSString stringWithUTF8String:message] arguments:args]); + va_end(args); +} diff --git a/v_windows/v/old/thirdparty/libgc/amalgamation.txt b/v_windows/v/old/thirdparty/libgc/amalgamation.txt new file mode 100644 index 0000000..de25a3d --- /dev/null +++ b/v_windows/v/old/thirdparty/libgc/amalgamation.txt @@ -0,0 +1,7 @@ +The libgc source is distributed here as an amalgamation (https://sqlite.org/amalgamation.html). +This means that, rather than mirroring the entire bdwgc repo here, +[this script](https://gist.github.com/spaceface777/34d25420f2dc4953fb7864f44a211105) was used +to bundle all local includes together into a single C file, which is much easier to handle. +Furthermore, the script above was also used to minify (i.e. remove comments and whitespace in) +the garbage collector source. Together, these details help keep the V source distribution small, +can reduce compile times by 3%-15%, and can help C compilers generate more optimized code. diff --git a/v_windows/v/old/thirdparty/libgc/gc.c b/v_windows/v/old/thirdparty/libgc/gc.c new file mode 100644 index 0000000..2459097 --- /dev/null +++ b/v_windows/v/old/thirdparty/libgc/gc.c @@ -0,0 +1,30266 @@ +/* + * Copyright (c) 1994 by Xerox Corporation. All rights reserved. + * Copyright (c) 1996 by Silicon Graphics. All rights reserved. + * Copyright (c) 1998 by Fergus Henderson. All rights reserved. + * Copyright (c) 2000-2009 by Hewlett-Packard Development Company. + * All rights reserved. + * Copyright (c) 2009-2018 Ivan Maidanski + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + */ + +#ifndef __cplusplus +#define GC_INNER STATIC +#define GC_EXTERN GC_INNER +#endif +#ifndef GC_DBG_MLC_H +#define GC_DBG_MLC_H +#ifndef GC_PRIVATE_H +#define GC_PRIVATE_H +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif +#if!defined(GC_BUILD)&&!defined(NOT_GCBUILD) +#define GC_BUILD +#endif +#if (defined(__linux__)||defined(__GLIBC__)||defined(__GNU__)||(defined(__CYGWIN__)&&(defined(GC_THREADS)||!defined(USE_MMAP))))&&!defined(_GNU_SOURCE) +#define _GNU_SOURCE 1 +#endif +#if defined(__INTERIX)&&!defined(_ALL_SOURCE) +#define _ALL_SOURCE 1 +#endif +#if (defined(DGUX)&&defined(GC_THREADS)||defined(DGUX386_THREADS)||defined(GC_DGUX386_THREADS))&&!defined(_USING_POSIX4A_DRAFT10) +#define _USING_POSIX4A_DRAFT10 1 +#endif +#if defined(__MINGW32__)&&!defined(__MINGW_EXCPT_DEFINE_PSDK)&&defined(__i386__)&&defined(GC_EXTERN) +#define __MINGW_EXCPT_DEFINE_PSDK 1 +#endif +#if defined(NO_DEBUGGING)&&!defined(GC_ASSERTIONS)&&!defined(NDEBUG) +#define NDEBUG 1 +#endif +#ifndef GC_H +#ifndef GC_H +#define GC_H +#if (defined(WIN64)&&!defined(_WIN64))&&defined(_MSC_VER) +#pragma message("Warning:Expecting _WIN64 for x64 targets!Notice the leading underscore!") +#endif +#if defined(GC_H) +#define GC_TMP_VERSION_MAJOR 8 +#define GC_TMP_VERSION_MINOR 1 +#define GC_TMP_VERSION_MICRO 0 +#ifdef GC_VERSION_MAJOR +#if GC_TMP_VERSION_MAJOR!=GC_VERSION_MAJOR||GC_TMP_VERSION_MINOR!=GC_VERSION_MINOR||GC_TMP_VERSION_MICRO!=GC_VERSION_MICRO +#error Inconsistent version info. Check README.md,include/gc_version.h and configure.ac. +#endif +#else +#define GC_VERSION_MAJOR GC_TMP_VERSION_MAJOR +#define GC_VERSION_MINOR GC_TMP_VERSION_MINOR +#define GC_VERSION_MICRO GC_TMP_VERSION_MICRO +#endif +#endif +#if defined(GC_H) +#if defined(__GNUC__)&&defined(__GNUC_MINOR__) +#define GC_GNUC_PREREQ(major,minor)((__GNUC__<<16)+__GNUC_MINOR__>=((major)<<16)+(minor)) +#else +#define GC_GNUC_PREREQ(major,minor)0 +#endif +#if defined(SOLARIS_THREADS)||defined(_SOLARIS_THREADS)||defined(_SOLARIS_PTHREADS)||defined(GC_SOLARIS_PTHREADS) +#ifndef GC_SOLARIS_THREADS +#define GC_SOLARIS_THREADS +#endif +#endif +#if defined(IRIX_THREADS) +#define GC_IRIX_THREADS +#endif +#if defined(DGUX_THREADS)&&!defined(GC_DGUX386_THREADS) +#define GC_DGUX386_THREADS +#endif +#if defined(AIX_THREADS) +#define GC_AIX_THREADS +#endif +#if defined(HPUX_THREADS) +#define GC_HPUX_THREADS +#endif +#if defined(OSF1_THREADS) +#define GC_OSF1_THREADS +#endif +#if defined(LINUX_THREADS) +#define GC_LINUX_THREADS +#endif +#if defined(WIN32_THREADS) +#define GC_WIN32_THREADS +#endif +#if defined(RTEMS_THREADS) +#define GC_RTEMS_PTHREADS +#endif +#if defined(USE_LD_WRAP) +#define GC_USE_LD_WRAP +#endif +#if defined(GC_WIN32_PTHREADS)&&!defined(GC_WIN32_THREADS) +#define GC_WIN32_THREADS +#endif +#if defined(GC_AIX_THREADS)||defined(GC_DARWIN_THREADS)||defined(GC_DGUX386_THREADS)||defined(GC_FREEBSD_THREADS)||defined(GC_HPUX_THREADS)||defined(GC_IRIX_THREADS)||defined(GC_LINUX_THREADS)||defined(GC_NETBSD_THREADS)||defined(GC_OPENBSD_THREADS)||defined(GC_OSF1_THREADS)||defined(GC_SOLARIS_THREADS)||defined(GC_WIN32_THREADS)||defined(GC_RTEMS_PTHREADS) +#ifndef GC_THREADS +#define GC_THREADS +#endif +#elif defined(GC_THREADS) +#if defined(__linux__) +#define GC_LINUX_THREADS +#elif defined(__OpenBSD__) +#define GC_OPENBSD_THREADS +#elif defined(_PA_RISC1_1)||defined(_PA_RISC2_0)||defined(hppa)||defined(__HPPA)||(defined(__ia64)&&defined(_HPUX_SOURCE)) +#define GC_HPUX_THREADS +#elif defined(__HAIKU__) +#define GC_HAIKU_THREADS +#elif defined(__DragonFly__)||defined(__FreeBSD_kernel__)||(defined(__FreeBSD__)&&!defined(SN_TARGET_ORBIS)) +#define GC_FREEBSD_THREADS +#elif defined(__NetBSD__) +#define GC_NETBSD_THREADS +#elif defined(__alpha)||defined(__alpha__) +#define GC_OSF1_THREADS +#elif (defined(mips)||defined(__mips)||defined(_mips))&&!(defined(nec_ews)||defined(_nec_ews)||defined(ultrix)||defined(__ultrix)) +#define GC_IRIX_THREADS +#elif defined(__sparc)||((defined(sun)||defined(__sun))&&(defined(i386)||defined(__i386__)||defined(__amd64)||defined(__amd64__))) +#define GC_SOLARIS_THREADS +#elif defined(__APPLE__)&&defined(__MACH__) +#define GC_DARWIN_THREADS +#endif +#if defined(DGUX)&&(defined(i386)||defined(__i386__)) +#define GC_DGUX386_THREADS +#endif +#if defined(_AIX) +#define GC_AIX_THREADS +#endif +#if (defined(_WIN32)||defined(_MSC_VER)||defined(__BORLANDC__)||defined(__CYGWIN32__)||defined(__CYGWIN__)||defined(__CEGCC__)||defined(_WIN32_WCE)||defined(__MINGW32__))&&!defined(GC_WIN32_THREADS) +#define GC_WIN32_THREADS +#endif +#if defined(__rtems__)&&(defined(i386)||defined(__i386__)) +#define GC_RTEMS_PTHREADS +#endif +#endif +#undef GC_PTHREADS +#if (!defined(GC_WIN32_THREADS)||defined(GC_WIN32_PTHREADS)||defined(__CYGWIN32__)||defined(__CYGWIN__))&&defined(GC_THREADS)&&!defined(NN_PLATFORM_CTR)&&!defined(NN_BUILD_TARGET_PLATFORM_NX) +#define GC_PTHREADS +#endif +#if!defined(_PTHREADS)&&defined(GC_NETBSD_THREADS) +#define _PTHREADS +#endif +#if defined(GC_DGUX386_THREADS)&&!defined(_POSIX4A_DRAFT10_SOURCE) +#define _POSIX4A_DRAFT10_SOURCE 1 +#endif +#if!defined(_REENTRANT)&&defined(GC_PTHREADS)&&!defined(GC_WIN32_THREADS) +#define _REENTRANT 1 +#endif +#define __GC +#if!defined(_WIN32_WCE)||defined(__GNUC__) +#include +#if defined(__MINGW32__)&&!defined(_WIN32_WCE) +#include +#endif +#else +#include +#ifndef _PTRDIFF_T_DEFINED +#define _PTRDIFF_T_DEFINED +typedef long ptrdiff_t; +#endif +#endif +#if!defined(GC_NOT_DLL)&&!defined(GC_DLL)&&((defined(_DLL)&&!defined(__GNUC__))||(defined(DLL_EXPORT)&&defined(GC_BUILD))) +#define GC_DLL +#endif +#if defined(GC_DLL)&&!defined(GC_API) +#if defined(__CEGCC__) +#if defined(GC_BUILD) +#define GC_API __declspec(dllexport) +#else +#define GC_API __declspec(dllimport) +#endif +#elif defined(__MINGW32__) +#if defined(__cplusplus)&&defined(GC_BUILD) +#define GC_API extern __declspec(dllexport) +#elif defined(GC_BUILD)||defined(__MINGW32_DELAY_LOAD__) +#define GC_API __declspec(dllexport) +#else +#define GC_API extern __declspec(dllimport) +#endif +#elif defined(_MSC_VER)||defined(__DMC__)||defined(__BORLANDC__)||defined(__CYGWIN__) +#ifdef GC_BUILD +#define GC_API extern __declspec(dllexport) +#else +#define GC_API __declspec(dllimport) +#endif +#elif defined(__WATCOMC__) +#ifdef GC_BUILD +#define GC_API extern __declspec(dllexport) +#else +#define GC_API extern __declspec(dllimport) +#endif +#elif defined(__SYMBIAN32__) +#ifdef GC_BUILD +#define GC_API extern EXPORT_C +#else +#define GC_API extern IMPORT_C +#endif +#elif defined(__GNUC__) +#if defined(GC_BUILD)&&!defined(GC_NO_VISIBILITY)&&(GC_GNUC_PREREQ(4,0)||defined(GC_VISIBILITY_HIDDEN_SET)) +#define GC_API extern __attribute__((__visibility__("default"))) +#endif +#endif +#endif +#ifndef GC_API +#define GC_API extern +#endif +#ifndef GC_CALL +#define GC_CALL +#endif +#ifndef GC_CALLBACK +#define GC_CALLBACK GC_CALL +#endif +#ifndef GC_ATTR_MALLOC +#ifdef GC_OOM_FUNC_RETURNS_ALIAS +#define GC_ATTR_MALLOC +#elif GC_GNUC_PREREQ(3,1) +#define GC_ATTR_MALLOC __attribute__((__malloc__)) +#elif defined(_MSC_VER)&&(_MSC_VER>=1900)&&!defined(__EDG__) +#define GC_ATTR_MALLOC __declspec(allocator)__declspec(noalias)__declspec(restrict) +#elif defined(_MSC_VER)&&_MSC_VER>=1400 +#define GC_ATTR_MALLOC __declspec(noalias)__declspec(restrict) +#else +#define GC_ATTR_MALLOC +#endif +#endif +#ifndef GC_ATTR_ALLOC_SIZE +#undef GC_ATTR_CALLOC_SIZE +#ifdef __clang__ +#if __has_attribute(__alloc_size__) +#define GC_ATTR_ALLOC_SIZE(argnum)__attribute__((__alloc_size__(argnum))) +#define GC_ATTR_CALLOC_SIZE(n,s)__attribute__((__alloc_size__(n,s))) +#else +#define GC_ATTR_ALLOC_SIZE(argnum) +#endif +#elif GC_GNUC_PREREQ(4,3)&&!defined(__ICC) +#define GC_ATTR_ALLOC_SIZE(argnum)__attribute__((__alloc_size__(argnum))) +#define GC_ATTR_CALLOC_SIZE(n,s)__attribute__((__alloc_size__(n,s))) +#else +#define GC_ATTR_ALLOC_SIZE(argnum) +#endif +#endif +#ifndef GC_ATTR_CALLOC_SIZE +#define GC_ATTR_CALLOC_SIZE(n,s) +#endif +#ifndef GC_ATTR_NONNULL +#if GC_GNUC_PREREQ(4,0) +#define GC_ATTR_NONNULL(argnum)__attribute__((__nonnull__(argnum))) +#else +#define GC_ATTR_NONNULL(argnum) +#endif +#endif +#ifndef GC_ATTR_CONST +#if GC_GNUC_PREREQ(4,0) +#define GC_ATTR_CONST __attribute__((__const__)) +#else +#define GC_ATTR_CONST +#endif +#endif +#ifndef GC_ATTR_DEPRECATED +#ifdef GC_BUILD +#undef GC_ATTR_DEPRECATED +#define GC_ATTR_DEPRECATED +#elif GC_GNUC_PREREQ(4,0) +#define GC_ATTR_DEPRECATED __attribute__((__deprecated__)) +#elif defined(_MSC_VER)&&_MSC_VER>=1200 +#define GC_ATTR_DEPRECATED __declspec(deprecated) +#else +#define GC_ATTR_DEPRECATED +#endif +#endif +#if defined(__sgi)&&!defined(__GNUC__)&&_COMPILER_VERSION>=720 +#define GC_ADD_CALLER +#define GC_RETURN_ADDR (GC_word)__return_address +#endif +#if defined(__linux__)||defined(__GLIBC__) +#if!defined(__native_client__) +#include +#endif +#if (__GLIBC__==2&&__GLIBC_MINOR__>=1||__GLIBC__ > 2)&&!defined(__ia64__)&&!defined(GC_MISSING_EXECINFO_H)&&!defined(GC_HAVE_BUILTIN_BACKTRACE) +#define GC_HAVE_BUILTIN_BACKTRACE +#endif +#if defined(__i386__)||defined(__amd64__)||defined(__x86_64__) +#define GC_CAN_SAVE_CALL_STACKS +#endif +#endif +#if defined(_MSC_VER)&&_MSC_VER>=1200&&!defined(_AMD64_)&&!defined(_M_X64)&&!defined(_WIN32_WCE)&&!defined(GC_HAVE_NO_BUILTIN_BACKTRACE)&&!defined(GC_HAVE_BUILTIN_BACKTRACE) +#define GC_HAVE_BUILTIN_BACKTRACE +#endif +#if defined(GC_HAVE_BUILTIN_BACKTRACE)&&!defined(GC_CAN_SAVE_CALL_STACKS) +#define GC_CAN_SAVE_CALL_STACKS +#endif +#if defined(__sparc__) +#define GC_CAN_SAVE_CALL_STACKS +#endif +#if (defined(__linux__)||defined(__DragonFly__)||defined(__FreeBSD__)||defined(__FreeBSD_kernel__)||defined(__HAIKU__)||defined(__NetBSD__)||defined(__OpenBSD__)||defined(HOST_ANDROID)||defined(__ANDROID__))&&!defined(GC_CAN_SAVE_CALL_STACKS) +#define GC_ADD_CALLER +#if GC_GNUC_PREREQ(2,95) +#define GC_RETURN_ADDR (GC_word)__builtin_return_address(0) +#if GC_GNUC_PREREQ(4,0)&&(defined(__i386__)||defined(__amd64__)||defined(__x86_64__)) +#define GC_HAVE_RETURN_ADDR_PARENT +#define GC_RETURN_ADDR_PARENT (GC_word)__builtin_extract_return_addr(__builtin_return_address(1)) +#endif +#else +#define GC_RETURN_ADDR 0 +#endif +#endif +#ifdef GC_PTHREADS +#if (defined(GC_DARWIN_THREADS)||defined(GC_WIN32_PTHREADS)||defined(__native_client__)||defined(GC_RTEMS_PTHREADS))&&!defined(GC_NO_DLOPEN) +#define GC_NO_DLOPEN +#endif +#if (defined(GC_DARWIN_THREADS)||defined(GC_WIN32_PTHREADS)||defined(GC_OPENBSD_THREADS)||defined(__native_client__))&&!defined(GC_NO_PTHREAD_SIGMASK) +#define GC_NO_PTHREAD_SIGMASK +#endif +#if defined(__native_client__) +#ifndef GC_PTHREAD_CREATE_CONST +#define GC_PTHREAD_CREATE_CONST +#endif +#ifndef GC_HAVE_PTHREAD_EXIT +#define GC_HAVE_PTHREAD_EXIT +#define GC_PTHREAD_EXIT_ATTRIBUTE +#endif +#endif +#if!defined(GC_HAVE_PTHREAD_EXIT)&&!defined(HOST_ANDROID)&&!defined(__ANDROID__)&&(defined(GC_LINUX_THREADS)||defined(GC_SOLARIS_THREADS)) +#define GC_HAVE_PTHREAD_EXIT +#if GC_GNUC_PREREQ(2,7) +#define GC_PTHREAD_EXIT_ATTRIBUTE __attribute__((__noreturn__)) +#elif defined(__NORETURN) +#define GC_PTHREAD_EXIT_ATTRIBUTE __NORETURN +#else +#define GC_PTHREAD_EXIT_ATTRIBUTE +#endif +#endif +#if (!defined(GC_HAVE_PTHREAD_EXIT)||defined(__native_client__))&&!defined(GC_NO_PTHREAD_CANCEL) +#define GC_NO_PTHREAD_CANCEL +#endif +#endif +#ifdef __cplusplus +#ifndef GC_ATTR_EXPLICIT +#if __cplusplus>=201103L&&!defined(__clang__)||_MSVC_LANG>=201103L||defined(CPPCHECK) +#define GC_ATTR_EXPLICIT explicit +#else +#define GC_ATTR_EXPLICIT +#endif +#endif +#ifndef GC_NOEXCEPT +#if defined(__DMC__)||(defined(__BORLANDC__)&&(defined(_RWSTD_NO_EXCEPTIONS)||defined(_RWSTD_NO_EX_SPEC)))||(defined(_MSC_VER)&&defined(_HAS_EXCEPTIONS)&&!_HAS_EXCEPTIONS)||(defined(__WATCOMC__)&&!defined(_CPPUNWIND)) +#define GC_NOEXCEPT +#ifndef GC_NEW_ABORTS_ON_OOM +#define GC_NEW_ABORTS_ON_OOM +#endif +#elif __cplusplus>=201103L||_MSVC_LANG>=201103L +#define GC_NOEXCEPT noexcept +#else +#define GC_NOEXCEPT throw() +#endif +#endif +#endif +#endif +#ifdef __cplusplus +extern "C" { +#endif +typedef void*GC_PTR; +#ifdef _WIN64 +#if defined(__int64)&&!defined(CPPCHECK) +typedef unsigned __int64 GC_word; +typedef __int64 GC_signed_word; +#else +typedef unsigned long long GC_word; +typedef long long GC_signed_word; +#endif +#else +typedef unsigned long GC_word; +typedef long GC_signed_word; +#endif +GC_API unsigned GC_CALL GC_get_version(void); +GC_API GC_ATTR_DEPRECATED GC_word GC_gc_no; +GC_API GC_word GC_CALL GC_get_gc_no(void); +#ifdef GC_THREADS +GC_API GC_ATTR_DEPRECATED int GC_parallel; +GC_API int GC_CALL GC_get_parallel(void); +GC_API void GC_CALL GC_set_markers_count(unsigned); +#endif +typedef void*(GC_CALLBACK*GC_oom_func)(size_t); +GC_API GC_ATTR_DEPRECATED GC_oom_func GC_oom_fn; +GC_API void GC_CALL GC_set_oom_fn(GC_oom_func)GC_ATTR_NONNULL(1); +GC_API GC_oom_func GC_CALL GC_get_oom_fn(void); +typedef void (GC_CALLBACK*GC_on_heap_resize_proc)(GC_word); +GC_API GC_ATTR_DEPRECATED GC_on_heap_resize_proc GC_on_heap_resize; +GC_API void GC_CALL GC_set_on_heap_resize(GC_on_heap_resize_proc); +GC_API GC_on_heap_resize_proc GC_CALL GC_get_on_heap_resize(void); +typedef enum { +GC_EVENT_START, +GC_EVENT_MARK_START, +GC_EVENT_MARK_END, +GC_EVENT_RECLAIM_START, +GC_EVENT_RECLAIM_END, +GC_EVENT_END, +GC_EVENT_PRE_STOP_WORLD, +GC_EVENT_POST_STOP_WORLD, +GC_EVENT_PRE_START_WORLD, +GC_EVENT_POST_START_WORLD, +GC_EVENT_THREAD_SUSPENDED, +GC_EVENT_THREAD_UNSUSPENDED +} GC_EventType; +typedef void (GC_CALLBACK*GC_on_collection_event_proc)(GC_EventType); +GC_API void GC_CALL GC_set_on_collection_event(GC_on_collection_event_proc); +GC_API GC_on_collection_event_proc GC_CALL GC_get_on_collection_event(void); +#if defined(GC_THREADS)||(defined(GC_BUILD)&&defined(NN_PLATFORM_CTR)) +typedef void (GC_CALLBACK*GC_on_thread_event_proc)(GC_EventType, +void*); +GC_API void GC_CALL GC_set_on_thread_event(GC_on_thread_event_proc); +GC_API GC_on_thread_event_proc GC_CALL GC_get_on_thread_event(void); +#endif +GC_API GC_ATTR_DEPRECATED int GC_find_leak; +GC_API void GC_CALL GC_set_find_leak(int); +GC_API int GC_CALL GC_get_find_leak(void); +GC_API GC_ATTR_DEPRECATED int GC_all_interior_pointers; +GC_API void GC_CALL GC_set_all_interior_pointers(int); +GC_API int GC_CALL GC_get_all_interior_pointers(void); +GC_API GC_ATTR_DEPRECATED int GC_finalize_on_demand; +GC_API void GC_CALL GC_set_finalize_on_demand(int); +GC_API int GC_CALL GC_get_finalize_on_demand(void); +GC_API GC_ATTR_DEPRECATED int GC_java_finalization; +GC_API void GC_CALL GC_set_java_finalization(int); +GC_API int GC_CALL GC_get_java_finalization(void); +typedef void (GC_CALLBACK*GC_finalizer_notifier_proc)(void); +GC_API GC_ATTR_DEPRECATED GC_finalizer_notifier_proc GC_finalizer_notifier; +GC_API void GC_CALL GC_set_finalizer_notifier(GC_finalizer_notifier_proc); +GC_API GC_finalizer_notifier_proc GC_CALL GC_get_finalizer_notifier(void); +GC_API +#ifndef GC_DONT_GC +GC_ATTR_DEPRECATED +#endif +int GC_dont_gc; +GC_API GC_ATTR_DEPRECATED int GC_dont_expand; +GC_API void GC_CALL GC_set_dont_expand(int); +GC_API int GC_CALL GC_get_dont_expand(void); +GC_API GC_ATTR_DEPRECATED int GC_use_entire_heap; +GC_API GC_ATTR_DEPRECATED int GC_full_freq; +GC_API void GC_CALL GC_set_full_freq(int); +GC_API int GC_CALL GC_get_full_freq(void); +GC_API GC_ATTR_DEPRECATED GC_word GC_non_gc_bytes; +GC_API void GC_CALL GC_set_non_gc_bytes(GC_word); +GC_API GC_word GC_CALL GC_get_non_gc_bytes(void); +GC_API GC_ATTR_DEPRECATED int GC_no_dls; +GC_API void GC_CALL GC_set_no_dls(int); +GC_API int GC_CALL GC_get_no_dls(void); +GC_API GC_ATTR_DEPRECATED GC_word GC_free_space_divisor; +GC_API void GC_CALL GC_set_free_space_divisor(GC_word); +GC_API GC_word GC_CALL GC_get_free_space_divisor(void); +GC_API GC_ATTR_DEPRECATED GC_word GC_max_retries; +GC_API void GC_CALL GC_set_max_retries(GC_word); +GC_API GC_word GC_CALL GC_get_max_retries(void); +GC_API GC_ATTR_DEPRECATED char*GC_stackbottom; +GC_API GC_ATTR_DEPRECATED int GC_dont_precollect; +GC_API void GC_CALL GC_set_dont_precollect(int); +GC_API int GC_CALL GC_get_dont_precollect(void); +GC_API GC_ATTR_DEPRECATED unsigned long GC_time_limit; +#define GC_TIME_UNLIMITED 999999 +GC_API void GC_CALL GC_set_time_limit(unsigned long); +GC_API unsigned long GC_CALL GC_get_time_limit(void); +struct GC_timeval_s { +unsigned long tv_ms; +unsigned long tv_nsec; +}; +GC_API void GC_CALL GC_set_time_limit_tv(struct GC_timeval_s); +GC_API struct GC_timeval_s GC_CALL GC_get_time_limit_tv(void); +GC_API void GC_CALL GC_set_allocd_bytes_per_finalizer(GC_word); +GC_API GC_word GC_CALL GC_get_allocd_bytes_per_finalizer(void); +GC_API void GC_CALL GC_start_performance_measurement(void); +GC_API unsigned long GC_CALL GC_get_full_gc_total_time(void); +GC_API void GC_CALL GC_set_pages_executable(int); +GC_API int GC_CALL GC_get_pages_executable(void); +GC_API void GC_CALL GC_set_min_bytes_allocd(size_t); +GC_API size_t GC_CALL GC_get_min_bytes_allocd(void); +GC_API void GC_CALL GC_set_rate(int); +GC_API int GC_CALL GC_get_rate(void); +GC_API void GC_CALL GC_set_max_prior_attempts(int); +GC_API int GC_CALL GC_get_max_prior_attempts(void); +GC_API void GC_CALL GC_set_handle_fork(int); +GC_API void GC_CALL GC_atfork_prepare(void); +GC_API void GC_CALL GC_atfork_parent(void); +GC_API void GC_CALL GC_atfork_child(void); +GC_API void GC_CALL GC_init(void); +GC_API int GC_CALL GC_is_init_called(void); +GC_API void GC_CALL GC_deinit(void); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc(size_t); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_atomic(size_t); +GC_API GC_ATTR_MALLOC char*GC_CALL GC_strdup(const char*); +GC_API GC_ATTR_MALLOC char*GC_CALL +GC_strndup(const char*,size_t)GC_ATTR_NONNULL(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_uncollectable(size_t); +GC_API GC_ATTR_DEPRECATED void*GC_CALL GC_malloc_stubborn(size_t); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(2)void*GC_CALL +GC_memalign(size_t,size_t); +GC_API int GC_CALL GC_posix_memalign(void**,size_t, +size_t)GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_free(void*); +#define GC_MALLOC_STUBBORN(sz)GC_MALLOC(sz) +#define GC_NEW_STUBBORN(t)GC_NEW(t) +#define GC_CHANGE_STUBBORN(p)GC_change_stubborn(p) +GC_API GC_ATTR_DEPRECATED void GC_CALL GC_change_stubborn(const void*); +GC_API void GC_CALL GC_end_stubborn_change(const void*)GC_ATTR_NONNULL(1); +GC_API void*GC_CALL GC_base(void*); +GC_API int GC_CALL GC_is_heap_ptr(const void*); +GC_API size_t GC_CALL GC_size(const void*)GC_ATTR_NONNULL(1); +GC_API void*GC_CALL GC_realloc(void*, +size_t) +GC_ATTR_ALLOC_SIZE(2); +GC_API int GC_CALL GC_expand_hp(size_t); +GC_API void GC_CALL GC_set_max_heap_size(GC_word); +GC_API void GC_CALL GC_exclude_static_roots(void*, +void*); +GC_API void GC_CALL GC_clear_exclusion_table(void); +GC_API void GC_CALL GC_clear_roots(void); +GC_API void GC_CALL GC_add_roots(void*, +void*); +GC_API void GC_CALL GC_remove_roots(void*, +void*); +GC_API void GC_CALL GC_register_displacement(size_t); +GC_API void GC_CALL GC_debug_register_displacement(size_t); +GC_API void GC_CALL GC_gcollect(void); +GC_API void GC_CALL GC_gcollect_and_unmap(void); +typedef int (GC_CALLBACK*GC_stop_func)(void); +GC_API int GC_CALL GC_try_to_collect(GC_stop_func) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_set_stop_func(GC_stop_func) +GC_ATTR_NONNULL(1); +GC_API GC_stop_func GC_CALL GC_get_stop_func(void); +GC_API size_t GC_CALL GC_get_heap_size(void); +GC_API size_t GC_CALL GC_get_free_bytes(void); +GC_API size_t GC_CALL GC_get_unmapped_bytes(void); +GC_API size_t GC_CALL GC_get_bytes_since_gc(void); +GC_API size_t GC_CALL GC_get_expl_freed_bytes_since_gc(void); +GC_API size_t GC_CALL GC_get_total_bytes(void); +GC_API void GC_CALL GC_get_heap_usage_safe(GC_word*, +GC_word*, +GC_word*, +GC_word*, +GC_word*); +struct GC_prof_stats_s { +GC_word heapsize_full; +GC_word free_bytes_full; +GC_word unmapped_bytes; +GC_word bytes_allocd_since_gc; +GC_word allocd_bytes_before_gc; +GC_word non_gc_bytes; +GC_word gc_no; +GC_word markers_m1; +GC_word bytes_reclaimed_since_gc; +GC_word reclaimed_bytes_before_gc; +GC_word expl_freed_bytes_since_gc; +}; +GC_API size_t GC_CALL GC_get_prof_stats(struct GC_prof_stats_s*, +size_t); +#ifdef GC_THREADS +GC_API size_t GC_CALL GC_get_prof_stats_unsafe(struct GC_prof_stats_s*, +size_t); +#endif +GC_API size_t GC_CALL GC_get_size_map_at(int i); +GC_API size_t GC_CALL GC_get_memory_use(void); +GC_API void GC_CALL GC_disable(void); +GC_API int GC_CALL GC_is_disabled(void); +GC_API void GC_CALL GC_enable(void); +GC_API void GC_CALL GC_set_manual_vdb_allowed(int); +GC_API int GC_CALL GC_get_manual_vdb_allowed(void); +GC_API void GC_CALL GC_enable_incremental(void); +GC_API int GC_CALL GC_is_incremental_mode(void); +#define GC_PROTECTS_POINTER_HEAP 1 +#define GC_PROTECTS_PTRFREE_HEAP 2 +#define GC_PROTECTS_STATIC_DATA 4 +#define GC_PROTECTS_STACK 8 +#define GC_PROTECTS_NONE 0 +GC_API int GC_CALL GC_incremental_protection_needs(void); +GC_API int GC_CALL GC_collect_a_little(void); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_ignore_off_page(size_t); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_atomic_ignore_off_page(size_t); +#ifdef GC_ADD_CALLER +#define GC_EXTRAS GC_RETURN_ADDR,__FILE__,__LINE__ +#define GC_EXTRA_PARAMS GC_word ra,const char*s,int i +#else +#define GC_EXTRAS __FILE__,__LINE__ +#define GC_EXTRA_PARAMS const char*s,int i +#endif +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_atomic_uncollectable(size_t); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc_atomic_uncollectable(size_t,GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc(size_t,GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc_atomic(size_t,GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC char*GC_CALL +GC_debug_strdup(const char*,GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC char*GC_CALL +GC_debug_strndup(const char*,size_t,GC_EXTRA_PARAMS) +GC_ATTR_NONNULL(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc_uncollectable(size_t, +GC_EXTRA_PARAMS); +GC_API GC_ATTR_DEPRECATED void*GC_CALL +GC_debug_malloc_stubborn(size_t,GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc_ignore_off_page(size_t, +GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc_atomic_ignore_off_page(size_t, +GC_EXTRA_PARAMS); +GC_API void GC_CALL GC_debug_free(void*); +GC_API void*GC_CALL GC_debug_realloc(void*, +size_t,GC_EXTRA_PARAMS) +GC_ATTR_ALLOC_SIZE(2); +GC_API GC_ATTR_DEPRECATED void GC_CALL GC_debug_change_stubborn(const void*); +GC_API void GC_CALL GC_debug_end_stubborn_change(const void*) +GC_ATTR_NONNULL(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc_replacement(size_t); +GC_API GC_ATTR_ALLOC_SIZE(2)void*GC_CALL +GC_debug_realloc_replacement(void*, +size_t); +#ifdef GC_DEBUG_REPLACEMENT +#define GC_MALLOC(sz)GC_debug_malloc_replacement(sz) +#define GC_REALLOC(old,sz)GC_debug_realloc_replacement(old,sz) +#elif defined(GC_DEBUG) +#define GC_MALLOC(sz)GC_debug_malloc(sz,GC_EXTRAS) +#define GC_REALLOC(old,sz)GC_debug_realloc(old,sz,GC_EXTRAS) +#else +#define GC_MALLOC(sz)GC_malloc(sz) +#define GC_REALLOC(old,sz)GC_realloc(old,sz) +#endif +#ifdef GC_DEBUG +#define GC_MALLOC_ATOMIC(sz)GC_debug_malloc_atomic(sz,GC_EXTRAS) +#define GC_STRDUP(s)GC_debug_strdup(s,GC_EXTRAS) +#define GC_STRNDUP(s,sz)GC_debug_strndup(s,sz,GC_EXTRAS) +#define GC_MALLOC_ATOMIC_UNCOLLECTABLE(sz)GC_debug_malloc_atomic_uncollectable(sz,GC_EXTRAS) +#define GC_MALLOC_UNCOLLECTABLE(sz)GC_debug_malloc_uncollectable(sz,GC_EXTRAS) +#define GC_MALLOC_IGNORE_OFF_PAGE(sz)GC_debug_malloc_ignore_off_page(sz,GC_EXTRAS) +#define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz)GC_debug_malloc_atomic_ignore_off_page(sz,GC_EXTRAS) +#define GC_FREE(p)GC_debug_free(p) +#define GC_REGISTER_FINALIZER(p,f,d,of,od)GC_debug_register_finalizer(p,f,d,of,od) +#define GC_REGISTER_FINALIZER_IGNORE_SELF(p,f,d,of,od)GC_debug_register_finalizer_ignore_self(p,f,d,of,od) +#define GC_REGISTER_FINALIZER_NO_ORDER(p,f,d,of,od)GC_debug_register_finalizer_no_order(p,f,d,of,od) +#define GC_REGISTER_FINALIZER_UNREACHABLE(p,f,d,of,od)GC_debug_register_finalizer_unreachable(p,f,d,of,od) +#define GC_END_STUBBORN_CHANGE(p)GC_debug_end_stubborn_change(p) +#define GC_PTR_STORE_AND_DIRTY(p,q)GC_debug_ptr_store_and_dirty(p,q) +#define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link,obj)GC_general_register_disappearing_link(link,GC_base(( void*)(obj))) +#define GC_REGISTER_LONG_LINK(link,obj)GC_register_long_link(link,GC_base(( void*)(obj))) +#define GC_REGISTER_DISPLACEMENT(n)GC_debug_register_displacement(n) +#else +#define GC_MALLOC_ATOMIC(sz)GC_malloc_atomic(sz) +#define GC_STRDUP(s)GC_strdup(s) +#define GC_STRNDUP(s,sz)GC_strndup(s,sz) +#define GC_MALLOC_ATOMIC_UNCOLLECTABLE(sz)GC_malloc_atomic_uncollectable(sz) +#define GC_MALLOC_UNCOLLECTABLE(sz)GC_malloc_uncollectable(sz) +#define GC_MALLOC_IGNORE_OFF_PAGE(sz)GC_malloc_ignore_off_page(sz) +#define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz)GC_malloc_atomic_ignore_off_page(sz) +#define GC_FREE(p)GC_free(p) +#define GC_REGISTER_FINALIZER(p,f,d,of,od)GC_register_finalizer(p,f,d,of,od) +#define GC_REGISTER_FINALIZER_IGNORE_SELF(p,f,d,of,od)GC_register_finalizer_ignore_self(p,f,d,of,od) +#define GC_REGISTER_FINALIZER_NO_ORDER(p,f,d,of,od)GC_register_finalizer_no_order(p,f,d,of,od) +#define GC_REGISTER_FINALIZER_UNREACHABLE(p,f,d,of,od)GC_register_finalizer_unreachable(p,f,d,of,od) +#define GC_END_STUBBORN_CHANGE(p)GC_end_stubborn_change(p) +#define GC_PTR_STORE_AND_DIRTY(p,q)GC_ptr_store_and_dirty(p,q) +#define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link,obj)GC_general_register_disappearing_link(link,obj) +#define GC_REGISTER_LONG_LINK(link,obj)GC_register_long_link(link,obj) +#define GC_REGISTER_DISPLACEMENT(n)GC_register_displacement(n) +#endif +#define GC_NEW(t)((t*)GC_MALLOC(sizeof(t))) +#define GC_NEW_ATOMIC(t)((t*)GC_MALLOC_ATOMIC(sizeof(t))) +#define GC_NEW_UNCOLLECTABLE(t)((t*)GC_MALLOC_UNCOLLECTABLE(sizeof(t))) +#ifdef GC_REQUIRE_WCSDUP +GC_API GC_ATTR_MALLOC wchar_t*GC_CALL +GC_wcsdup(const wchar_t*)GC_ATTR_NONNULL(1); +GC_API GC_ATTR_MALLOC wchar_t*GC_CALL +GC_debug_wcsdup(const wchar_t*,GC_EXTRA_PARAMS)GC_ATTR_NONNULL(1); +#ifdef GC_DEBUG +#define GC_WCSDUP(s)GC_debug_wcsdup(s,GC_EXTRAS) +#else +#define GC_WCSDUP(s)GC_wcsdup(s) +#endif +#endif +typedef void (GC_CALLBACK*GC_finalization_proc)(void*, +void*); +GC_API void GC_CALL GC_register_finalizer(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_debug_register_finalizer(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_register_finalizer_ignore_self(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_debug_register_finalizer_ignore_self(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_register_finalizer_no_order(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_debug_register_finalizer_no_order(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_register_finalizer_unreachable(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_debug_register_finalizer_unreachable(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +#define GC_NO_MEMORY 2 +GC_API int GC_CALL GC_register_disappearing_link(void**) +GC_ATTR_NONNULL(1); +GC_API int GC_CALL GC_general_register_disappearing_link(void**, +const void*) +GC_ATTR_NONNULL(1)GC_ATTR_NONNULL(2); +GC_API int GC_CALL GC_move_disappearing_link(void**, +void**) +GC_ATTR_NONNULL(2); +GC_API int GC_CALL GC_unregister_disappearing_link(void**); +GC_API int GC_CALL GC_register_long_link(void**, +const void*) +GC_ATTR_NONNULL(1)GC_ATTR_NONNULL(2); +GC_API int GC_CALL GC_move_long_link(void**, +void**) +GC_ATTR_NONNULL(2); +GC_API int GC_CALL GC_unregister_long_link(void**); +typedef enum { +GC_TOGGLE_REF_DROP, +GC_TOGGLE_REF_STRONG, +GC_TOGGLE_REF_WEAK +} GC_ToggleRefStatus; +typedef GC_ToggleRefStatus (GC_CALLBACK*GC_toggleref_func)(void*); +GC_API void GC_CALL GC_set_toggleref_func(GC_toggleref_func); +GC_API GC_toggleref_func GC_CALL GC_get_toggleref_func(void); +GC_API int GC_CALL GC_toggleref_add(void*,int) +GC_ATTR_NONNULL(1); +typedef void (GC_CALLBACK*GC_await_finalize_proc)(void*); +GC_API void GC_CALL GC_set_await_finalize_proc(GC_await_finalize_proc); +GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void); +GC_API int GC_CALL GC_should_invoke_finalizers(void); +GC_API int GC_CALL GC_invoke_finalizers(void); +#if defined(__GNUC__)&&!defined(__INTEL_COMPILER) +#define GC_reachable_here(ptr)__asm__ __volatile__(" "::"X"(ptr):"memory") +#else +GC_API void GC_CALL GC_noop1(GC_word); +#ifdef LINT2 +#define GC_reachable_here(ptr)GC_noop1(~(GC_word)(ptr)^(~(GC_word)0)) +#else +#define GC_reachable_here(ptr)GC_noop1((GC_word)(ptr)) +#endif +#endif +typedef void (GC_CALLBACK*GC_warn_proc)(char*, +GC_word); +GC_API void GC_CALL GC_set_warn_proc(GC_warn_proc)GC_ATTR_NONNULL(1); +GC_API GC_warn_proc GC_CALL GC_get_warn_proc(void); +GC_API void GC_CALLBACK GC_ignore_warn_proc(char*,GC_word); +GC_API void GC_CALL GC_set_log_fd(int); +typedef void (GC_CALLBACK*GC_abort_func)(const char*); +GC_API void GC_CALL GC_set_abort_func(GC_abort_func)GC_ATTR_NONNULL(1); +GC_API GC_abort_func GC_CALL GC_get_abort_func(void); +GC_API void GC_CALL GC_abort_on_oom(void); +typedef GC_word GC_hidden_pointer; +#define GC_HIDE_POINTER(p)(~(GC_hidden_pointer)(p)) +#define GC_REVEAL_POINTER(p)((void*)GC_HIDE_POINTER(p)) +#if defined(I_HIDE_POINTERS)||defined(GC_I_HIDE_POINTERS) +#define HIDE_POINTER(p)GC_HIDE_POINTER(p) +#define REVEAL_POINTER(p)GC_REVEAL_POINTER(p) +#endif +#ifdef GC_THREADS +GC_API void GC_CALL GC_alloc_lock(void); +GC_API void GC_CALL GC_alloc_unlock(void); +#else +#define GC_alloc_lock()(void)0 +#define GC_alloc_unlock()(void)0 +#endif +typedef void*(GC_CALLBACK*GC_fn_type)(void*); +GC_API void*GC_CALL GC_call_with_alloc_lock(GC_fn_type, +void*)GC_ATTR_NONNULL(1); +struct GC_stack_base { +void*mem_base; +#if defined(__ia64)||defined(__ia64__)||defined(_M_IA64) +void*reg_base; +#endif +}; +typedef void*(GC_CALLBACK*GC_stack_base_func)( +struct GC_stack_base*,void*); +GC_API void*GC_CALL GC_call_with_stack_base(GC_stack_base_func, +void*)GC_ATTR_NONNULL(1); +#define GC_SUCCESS 0 +#define GC_DUPLICATE 1 +#define GC_NO_THREADS 2 +#define GC_UNIMPLEMENTED 3 +#define GC_NOT_FOUND 4 +#if defined(GC_DARWIN_THREADS)||defined(GC_WIN32_THREADS) +GC_API void GC_CALL GC_use_threads_discovery(void); +#endif +#ifdef GC_THREADS +GC_API void GC_CALL GC_set_suspend_signal(int); +GC_API void GC_CALL GC_set_thr_restart_signal(int); +GC_API int GC_CALL GC_get_suspend_signal(void); +GC_API int GC_CALL GC_get_thr_restart_signal(void); +GC_API void GC_CALL GC_start_mark_threads(void); +GC_API void GC_CALL GC_allow_register_threads(void); +GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base*) +GC_ATTR_NONNULL(1); +GC_API int GC_CALL GC_thread_is_registered(void); +GC_API void GC_CALL GC_register_altstack(void*, +GC_word, +void*, +GC_word); +GC_API int GC_CALL GC_unregister_my_thread(void); +GC_API void GC_CALL GC_stop_world_external(void); +GC_API void GC_CALL GC_start_world_external(void); +#endif +GC_API void*GC_CALL GC_do_blocking(GC_fn_type, +void*)GC_ATTR_NONNULL(1); +GC_API void*GC_CALL GC_call_with_gc_active(GC_fn_type, +void*)GC_ATTR_NONNULL(1); +GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base*) +GC_ATTR_NONNULL(1); +GC_API void*GC_CALL GC_get_my_stackbottom(struct GC_stack_base*) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_set_stackbottom(void*, +const struct GC_stack_base*) +GC_ATTR_NONNULL(2); +GC_API void*GC_CALL GC_same_obj(void*,void*); +GC_API void*GC_CALL GC_pre_incr(void**,ptrdiff_t) +GC_ATTR_NONNULL(1); +GC_API void*GC_CALL GC_post_incr(void**,ptrdiff_t) +GC_ATTR_NONNULL(1); +GC_API void*GC_CALL GC_is_visible(void*); +GC_API void*GC_CALL GC_is_valid_displacement(void*); +GC_API void GC_CALL GC_dump(void); +GC_API void GC_CALL GC_dump_named(const char*); +GC_API void GC_CALL GC_dump_regions(void); +GC_API void GC_CALL GC_dump_finalization(void); +#if defined(GC_DEBUG)&&defined(__GNUC__) +#define GC_PTR_ADD3(x,n,type_of_result)((type_of_result)GC_same_obj((x)+(n),(x))) +#define GC_PRE_INCR3(x,n,type_of_result)((type_of_result)GC_pre_incr((void**)(&(x)),(n)*sizeof(*x))) +#define GC_POST_INCR3(x,n,type_of_result)((type_of_result)GC_post_incr((void**)(&(x)),(n)*sizeof(*x))) +#define GC_PTR_ADD(x,n)GC_PTR_ADD3(x,n,__typeof__(x)) +#define GC_PRE_INCR(x,n)GC_PRE_INCR3(x,n,__typeof__(x)) +#define GC_POST_INCR(x)GC_POST_INCR3(x,1,__typeof__(x)) +#define GC_POST_DECR(x)GC_POST_INCR3(x,-1,__typeof__(x)) +#else +#define GC_PTR_ADD(x,n)((x)+(n)) +#define GC_PRE_INCR(x,n)((x)+=(n)) +#define GC_POST_INCR(x)((x)++) +#define GC_POST_DECR(x)((x)--) +#endif +#ifdef GC_DEBUG +#define GC_PTR_STORE(p,q)(*(void**)GC_is_visible((void*)(p))=GC_is_valid_displacement((void*)(q))) +#else +#define GC_PTR_STORE(p,q)(*(void**)(p)=(void*)(q)) +#endif +GC_API void GC_CALL GC_ptr_store_and_dirty(void*, +const void*); +GC_API void GC_CALL GC_debug_ptr_store_and_dirty(void*, +const void*); +GC_API void (GC_CALLBACK*GC_same_obj_print_proc)(void*, +void*); +GC_API void (GC_CALLBACK*GC_is_valid_displacement_print_proc)(void*); +GC_API void (GC_CALLBACK*GC_is_visible_print_proc)(void*); +#ifdef GC_PTHREADS +#ifdef __cplusplus +} +#endif +#ifndef GC_PTHREAD_REDIRECTS_H +#define GC_PTHREAD_REDIRECTS_H +#if defined(GC_H)&&defined(GC_PTHREADS) +#ifndef GC_PTHREAD_REDIRECTS_ONLY +#include +#ifndef GC_NO_DLOPEN +#include +#endif +#ifndef GC_NO_PTHREAD_SIGMASK +#include +#endif +#ifdef __cplusplus +extern "C" { +#endif +#ifndef GC_SUSPEND_THREAD_ID +#define GC_SUSPEND_THREAD_ID pthread_t +#endif +#ifndef GC_NO_DLOPEN +GC_API void*GC_dlopen(const char*,int); +#endif +#ifndef GC_NO_PTHREAD_SIGMASK +#if defined(GC_PTHREAD_SIGMASK_NEEDED)||defined(_BSD_SOURCE)||defined(_GNU_SOURCE)||(_POSIX_C_SOURCE>=199506L)||(_XOPEN_SOURCE>=500) +GC_API int GC_pthread_sigmask(int,const sigset_t*, +sigset_t*); +#endif +#endif +#ifndef GC_PTHREAD_CREATE_CONST +#define GC_PTHREAD_CREATE_CONST const +#endif +GC_API int GC_pthread_create(pthread_t*, +GC_PTHREAD_CREATE_CONST pthread_attr_t*, +void*(*)(void*),void*); +GC_API int GC_pthread_join(pthread_t,void**); +GC_API int GC_pthread_detach(pthread_t); +#ifndef GC_NO_PTHREAD_CANCEL +GC_API int GC_pthread_cancel(pthread_t); +#endif +#if defined(GC_HAVE_PTHREAD_EXIT)&&!defined(GC_PTHREAD_EXIT_DECLARED) +#define GC_PTHREAD_EXIT_DECLARED +GC_API void GC_pthread_exit(void*)GC_PTHREAD_EXIT_ATTRIBUTE; +#endif +#ifdef __cplusplus +} +#endif +#endif +#if!defined(GC_NO_THREAD_REDIRECTS)&&!defined(GC_USE_LD_WRAP) +#undef pthread_create +#undef pthread_join +#undef pthread_detach +#define pthread_create GC_pthread_create +#define pthread_join GC_pthread_join +#define pthread_detach GC_pthread_detach +#ifndef GC_NO_PTHREAD_SIGMASK +#undef pthread_sigmask +#define pthread_sigmask GC_pthread_sigmask +#endif +#ifndef GC_NO_DLOPEN +#undef dlopen +#define dlopen GC_dlopen +#endif +#ifndef GC_NO_PTHREAD_CANCEL +#undef pthread_cancel +#define pthread_cancel GC_pthread_cancel +#endif +#ifdef GC_HAVE_PTHREAD_EXIT +#undef pthread_exit +#define pthread_exit GC_pthread_exit +#endif +#endif +#endif +#endif +#ifdef __cplusplus +extern "C" { +#endif +#endif +GC_API GC_ATTR_MALLOC void*GC_CALL GC_malloc_many(size_t); +#define GC_NEXT(p)(*(void**)(p)) +typedef int (GC_CALLBACK*GC_has_static_roots_func)( +const char*, +void*, +size_t); +GC_API void GC_CALL GC_register_has_static_roots_callback( +GC_has_static_roots_func); +#if!defined(CPPCHECK)&&!defined(GC_WINDOWS_H_INCLUDED)&&defined(WINAPI) +#define GC_WINDOWS_H_INCLUDED +#endif +#if defined(GC_WIN32_THREADS)&&(!defined(GC_PTHREADS)||defined(GC_BUILD)||defined(GC_WINDOWS_H_INCLUDED)) +#if (!defined(GC_NO_THREAD_DECLS)||defined(GC_BUILD))&&!defined(GC_DONT_INCL_WINDOWS_H) +#ifdef __cplusplus +} +#endif +#if!defined(_WIN32_WCE)&&!defined(__CEGCC__) +#include +#endif +#if defined(GC_BUILD)||!defined(GC_DONT_INCLUDE_WINDOWS_H) +#include +#define GC_WINDOWS_H_INCLUDED +#endif +#ifdef __cplusplus +extern "C" { +#endif +#ifdef GC_UNDERSCORE_STDCALL +#define GC_CreateThread _GC_CreateThread +#define GC_ExitThread _GC_ExitThread +#endif +#ifndef DECLSPEC_NORETURN +#ifdef GC_WINDOWS_H_INCLUDED +#define DECLSPEC_NORETURN +#else +#define DECLSPEC_NORETURN __declspec(noreturn) +#endif +#endif +#if!defined(_UINTPTR_T)&&!defined(_UINTPTR_T_DEFINED)&&!defined(UINTPTR_MAX) +typedef GC_word GC_uintptr_t; +#else +typedef uintptr_t GC_uintptr_t; +#endif +#ifdef _WIN64 +#define GC_WIN32_SIZE_T GC_uintptr_t +#elif defined(GC_WINDOWS_H_INCLUDED) +#define GC_WIN32_SIZE_T DWORD +#else +#define GC_WIN32_SIZE_T unsigned long +#endif +#ifdef GC_INSIDE_DLL +#ifdef GC_UNDERSCORE_STDCALL +#define GC_DllMain _GC_DllMain +#endif +#ifdef GC_WINDOWS_H_INCLUDED +GC_API BOOL WINAPI GC_DllMain(HINSTANCE, +ULONG, +LPVOID); +#else +GC_API int __stdcall GC_DllMain(void*,unsigned long,void*); +#endif +#endif +#ifdef GC_WINDOWS_H_INCLUDED +GC_API HANDLE WINAPI GC_CreateThread( +LPSECURITY_ATTRIBUTES, +GC_WIN32_SIZE_T, +LPTHREAD_START_ROUTINE, +LPVOID,DWORD, +LPDWORD); +GC_API DECLSPEC_NORETURN void WINAPI GC_ExitThread( +DWORD); +#else +struct _SECURITY_ATTRIBUTES; +GC_API void*__stdcall GC_CreateThread(struct _SECURITY_ATTRIBUTES*, +GC_WIN32_SIZE_T, +unsigned long (__stdcall*)(void*), +void*,unsigned long,unsigned long*); +GC_API DECLSPEC_NORETURN void __stdcall GC_ExitThread(unsigned long); +#endif +#if!defined(_WIN32_WCE)&&!defined(__CEGCC__) +GC_API GC_uintptr_t GC_CALL GC_beginthreadex( +void*,unsigned, +unsigned (__stdcall*)(void*), +void*,unsigned, +unsigned*); +GC_API void GC_CALL GC_endthreadex(unsigned); +#endif +#endif +#ifdef GC_WINMAIN_REDIRECT +#define WinMain GC_WinMain +#endif +#define GC_use_DllMain GC_use_threads_discovery +#ifndef GC_NO_THREAD_REDIRECTS +#define CreateThread GC_CreateThread +#define ExitThread GC_ExitThread +#undef _beginthreadex +#define _beginthreadex GC_beginthreadex +#undef _endthreadex +#define _endthreadex GC_endthreadex +#endif +#endif +GC_API void GC_CALL GC_set_force_unmap_on_gcollect(int); +GC_API int GC_CALL GC_get_force_unmap_on_gcollect(void); +#if defined(__CYGWIN32__)||defined(__CYGWIN__) +#ifdef __x86_64__ +extern int __data_start__[],__data_end__[]; +extern int __bss_start__[],__bss_end__[]; +#define GC_DATASTART ((GC_word)__data_start__ < (GC_word)__bss_start__?(void*)__data_start__:(void*)__bss_start__) +#define GC_DATAEND ((GC_word)__data_end__ > (GC_word)__bss_end__?(void*)__data_end__:(void*)__bss_end__) +#else +extern int _data_start__[],_data_end__[],_bss_start__[],_bss_end__[]; +#define GC_DATASTART ((GC_word)_data_start__ < (GC_word)_bss_start__?(void*)_data_start__:(void*)_bss_start__) +#define GC_DATAEND ((GC_word)_data_end__ > (GC_word)_bss_end__?(void*)_data_end__:(void*)_bss_end__) +#endif +#define GC_INIT_CONF_ROOTS GC_add_roots(GC_DATASTART,GC_DATAEND);GC_gcollect() +#elif defined(_AIX) +extern int _data[],_end[]; +#define GC_DATASTART ((void*)_data) +#define GC_DATAEND ((void*)_end) +#define GC_INIT_CONF_ROOTS GC_add_roots(GC_DATASTART,GC_DATAEND) +#elif (defined(HOST_ANDROID)||defined(__ANDROID__))&&defined(IGNORE_DYNAMIC_LOADING) +#pragma weak __dso_handle +extern int __dso_handle[]; +GC_API void*GC_CALL GC_find_limit(void*,int); +#define GC_INIT_CONF_ROOTS (void)(__dso_handle!=0?(GC_add_roots(__dso_handle,GC_find_limit(__dso_handle,1)),0):0) +#else +#define GC_INIT_CONF_ROOTS +#endif +#ifdef GC_DONT_EXPAND +#define GC_INIT_CONF_DONT_EXPAND GC_set_dont_expand(1) +#else +#define GC_INIT_CONF_DONT_EXPAND +#endif +#ifdef GC_FORCE_UNMAP_ON_GCOLLECT +#define GC_INIT_CONF_FORCE_UNMAP_ON_GCOLLECT GC_set_force_unmap_on_gcollect(1) +#else +#define GC_INIT_CONF_FORCE_UNMAP_ON_GCOLLECT +#endif +#ifdef GC_DONT_GC +#define GC_INIT_CONF_MAX_RETRIES (void)(GC_dont_gc=1) +#elif defined(GC_MAX_RETRIES)&&!defined(CPPCHECK) +#define GC_INIT_CONF_MAX_RETRIES GC_set_max_retries(GC_MAX_RETRIES) +#else +#define GC_INIT_CONF_MAX_RETRIES +#endif +#if defined(GC_ALLOCD_BYTES_PER_FINALIZER)&&!defined(CPPCHECK) +#define GC_INIT_CONF_ALLOCD_BYTES_PER_FINALIZER GC_set_allocd_bytes_per_finalizer(GC_ALLOCD_BYTES_PER_FINALIZER) +#else +#define GC_INIT_CONF_ALLOCD_BYTES_PER_FINALIZER +#endif +#if defined(GC_FREE_SPACE_DIVISOR)&&!defined(CPPCHECK) +#define GC_INIT_CONF_FREE_SPACE_DIVISOR GC_set_free_space_divisor(GC_FREE_SPACE_DIVISOR) +#else +#define GC_INIT_CONF_FREE_SPACE_DIVISOR +#endif +#if defined(GC_FULL_FREQ)&&!defined(CPPCHECK) +#define GC_INIT_CONF_FULL_FREQ GC_set_full_freq(GC_FULL_FREQ) +#else +#define GC_INIT_CONF_FULL_FREQ +#endif +#if defined(GC_TIME_LIMIT)&&!defined(CPPCHECK) +#define GC_INIT_CONF_TIME_LIMIT GC_set_time_limit(GC_TIME_LIMIT) +#else +#define GC_INIT_CONF_TIME_LIMIT +#endif +#if defined(GC_MARKERS)&&defined(GC_THREADS)&&!defined(CPPCHECK) +#define GC_INIT_CONF_MARKERS GC_set_markers_count(GC_MARKERS) +#else +#define GC_INIT_CONF_MARKERS +#endif +#if defined(GC_SIG_SUSPEND)&&defined(GC_THREADS)&&!defined(CPPCHECK) +#define GC_INIT_CONF_SUSPEND_SIGNAL GC_set_suspend_signal(GC_SIG_SUSPEND) +#else +#define GC_INIT_CONF_SUSPEND_SIGNAL +#endif +#if defined(GC_SIG_THR_RESTART)&&defined(GC_THREADS)&&!defined(CPPCHECK) +#define GC_INIT_CONF_THR_RESTART_SIGNAL GC_set_thr_restart_signal(GC_SIG_THR_RESTART) +#else +#define GC_INIT_CONF_THR_RESTART_SIGNAL +#endif +#if defined(GC_MAXIMUM_HEAP_SIZE)&&!defined(CPPCHECK) +#define GC_INIT_CONF_MAXIMUM_HEAP_SIZE GC_set_max_heap_size(GC_MAXIMUM_HEAP_SIZE) +#else +#define GC_INIT_CONF_MAXIMUM_HEAP_SIZE +#endif +#ifdef GC_IGNORE_WARN +#define GC_INIT_CONF_IGNORE_WARN GC_set_warn_proc(GC_ignore_warn_proc) +#else +#define GC_INIT_CONF_IGNORE_WARN +#endif +#if defined(GC_INITIAL_HEAP_SIZE)&&!defined(CPPCHECK) +#define GC_INIT_CONF_INITIAL_HEAP_SIZE { size_t heap_size=GC_get_heap_size();if (heap_size < (GC_INITIAL_HEAP_SIZE))(void)GC_expand_hp((GC_INITIAL_HEAP_SIZE)- heap_size);} +#else +#define GC_INIT_CONF_INITIAL_HEAP_SIZE +#endif +#define GC_INIT(){ GC_INIT_CONF_DONT_EXPAND;GC_INIT_CONF_FORCE_UNMAP_ON_GCOLLECT;GC_INIT_CONF_MAX_RETRIES;GC_INIT_CONF_ALLOCD_BYTES_PER_FINALIZER;GC_INIT_CONF_FREE_SPACE_DIVISOR;GC_INIT_CONF_FULL_FREQ;GC_INIT_CONF_TIME_LIMIT;GC_INIT_CONF_MARKERS;GC_INIT_CONF_SUSPEND_SIGNAL;GC_INIT_CONF_THR_RESTART_SIGNAL;GC_INIT_CONF_MAXIMUM_HEAP_SIZE;GC_init();GC_INIT_CONF_ROOTS;GC_INIT_CONF_IGNORE_WARN;GC_INIT_CONF_INITIAL_HEAP_SIZE;} +GC_API void GC_CALL GC_win32_free_heap(void); +#if defined(__SYMBIAN32__) +void GC_init_global_static_roots(void); +#endif +#if defined(_AMIGA)&&!defined(GC_AMIGA_MAKINGLIB) +void*GC_amiga_realloc(void*,size_t); +#define GC_realloc(a,b)GC_amiga_realloc(a,b) +void GC_amiga_set_toany(void (*)(void)); +extern int GC_amiga_free_space_divisor_inc; +extern void*(*GC_amiga_allocwrapper_do)(size_t,void*(GC_CALL*)(size_t)); +#define GC_malloc(a)(*GC_amiga_allocwrapper_do)(a,GC_malloc) +#define GC_malloc_atomic(a)(*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic) +#define GC_malloc_uncollectable(a)(*GC_amiga_allocwrapper_do)(a,GC_malloc_uncollectable) +#define GC_malloc_atomic_uncollectable(a)(*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_uncollectable) +#define GC_malloc_ignore_off_page(a)(*GC_amiga_allocwrapper_do)(a,GC_malloc_ignore_off_page) +#define GC_malloc_atomic_ignore_off_page(a)(*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_ignore_off_page) +#endif +#ifdef __cplusplus +} +#endif +#endif +#endif +#include +#if!defined(sony_news) +#include +#endif +#ifdef DGUX +#include +#include +#include +#endif +#ifdef BSD_TIME +#include +#include +#include +#endif +#ifdef PARALLEL_MARK +#define AO_REQUIRE_CAS +#if!defined(__GNUC__)&&!defined(AO_ASSUME_WINDOWS98) +#define AO_ASSUME_WINDOWS98 +#endif +#endif +#ifndef GC_TINY_FL_H +#define GC_TINY_FL_H +#ifndef GC_GRANULE_BYTES +#if defined(__LP64__)||defined (_LP64)||defined(_WIN64)||defined(__s390x__)||(defined(__x86_64__)&&!defined(__ILP32__))||defined(__alpha__)||defined(__powerpc64__)||defined(__arch64__) +#define GC_GRANULE_BYTES 16 +#define GC_GRANULE_WORDS 2 +#else +#define GC_GRANULE_BYTES 8 +#define GC_GRANULE_WORDS 2 +#endif +#endif +#if GC_GRANULE_WORDS==2 +#define GC_WORDS_TO_GRANULES(n)((n)>>1) +#else +#define GC_WORDS_TO_GRANULES(n)((n)*sizeof(void*)/GC_GRANULE_BYTES) +#endif +#ifndef GC_TINY_FREELISTS +#if GC_GRANULE_BYTES==16 +#define GC_TINY_FREELISTS 25 +#else +#define GC_TINY_FREELISTS 33 +#endif +#endif +#define GC_RAW_BYTES_FROM_INDEX(i)((i)*GC_GRANULE_BYTES) +#endif +#ifndef GC_MARK_H +#define GC_MARK_H +#ifndef GC_H +#endif +#ifdef __cplusplus +extern "C" { +#endif +#define GC_PROC_BYTES 100 +#if defined(GC_BUILD)||defined(NOT_GCBUILD) +struct GC_ms_entry; +#else +struct GC_ms_entry { void*opaque;}; +#endif +typedef struct GC_ms_entry*(*GC_mark_proc)(GC_word*, +struct GC_ms_entry*, +struct GC_ms_entry*, +GC_word); +#define GC_LOG_MAX_MARK_PROCS 6 +#define GC_MAX_MARK_PROCS (1<=(GC_word)GC_least_plausible_heap_addr&&(GC_word)(obj)<=(GC_word)GC_greatest_plausible_heap_addr?GC_mark_and_push(obj,msp,lim,src):(msp)) +GC_API GC_ATTR_CONST size_t GC_CALL GC_get_debug_header_size(void); +#define GC_USR_PTR_FROM_BASE(p)((void*)((char*)(p)+GC_get_debug_header_size())) +GC_API GC_ATTR_DEPRECATED +#ifdef GC_BUILD +const +#endif +size_t GC_debug_header_size; +GC_API void**GC_CALL GC_new_free_list(void); +GC_API void**GC_CALL GC_new_free_list_inner(void); +GC_API unsigned GC_CALL GC_new_kind(void**, +GC_word, +int, +int)GC_ATTR_NONNULL(1); +GC_API unsigned GC_CALL GC_new_kind_inner(void**, +GC_word, +int, +int)GC_ATTR_NONNULL(1); +GC_API unsigned GC_CALL GC_new_proc(GC_mark_proc); +GC_API unsigned GC_CALL GC_new_proc_inner(GC_mark_proc); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL GC_generic_malloc( +size_t, +int); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_generic_malloc_ignore_off_page( +size_t,int); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_generic_malloc_uncollectable( +size_t,int); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_generic_or_special_malloc( +size_t,int); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_generic_or_special_malloc( +size_t,int, +GC_EXTRA_PARAMS); +#ifdef GC_DEBUG +#define GC_GENERIC_OR_SPECIAL_MALLOC(sz,knd)GC_debug_generic_or_special_malloc(sz,knd,GC_EXTRAS) +#else +#define GC_GENERIC_OR_SPECIAL_MALLOC(sz,knd)GC_generic_or_special_malloc(sz,knd) +#endif +GC_API int GC_CALL GC_get_kind_and_size(const void*,size_t*) +GC_ATTR_NONNULL(1); +typedef void (GC_CALLBACK*GC_describe_type_fn)(void*, +char*); +#define GC_TYPE_DESCR_LEN 40 +GC_API void GC_CALL GC_register_describe_type_fn(int, +GC_describe_type_fn); +GC_API void*GC_CALL GC_clear_stack(void*); +typedef void (GC_CALLBACK*GC_start_callback_proc)(void); +GC_API void GC_CALL GC_set_start_callback(GC_start_callback_proc); +GC_API GC_start_callback_proc GC_CALL GC_get_start_callback(void); +GC_API int GC_CALL GC_is_marked(const void*)GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_clear_mark_bit(const void*)GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_set_mark_bit(const void*)GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_push_all(void*,void*); +GC_API void GC_CALL GC_push_all_eager(void*,void*); +GC_API void GC_CALL GC_push_conditional(void*,void*, +int); +GC_API void GC_CALL GC_push_finalizer_structures(void); +typedef void (GC_CALLBACK*GC_push_other_roots_proc)(void); +GC_API void GC_CALL GC_set_push_other_roots(GC_push_other_roots_proc); +GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void); +typedef void (GC_CALLBACK*GC_reachable_object_proc)(void*, +size_t, +void*); +GC_API void GC_CALL GC_enumerate_reachable_objects_inner( +GC_reachable_object_proc, +void*)GC_ATTR_NONNULL(1); +GC_API int GC_CALL GC_is_tmp_root(void*); +GC_API void GC_CALL GC_print_trace(GC_word); +GC_API void GC_CALL GC_print_trace_inner(GC_word); +#ifdef __cplusplus +} +#endif +#endif +typedef GC_word word; +typedef GC_signed_word signed_word; +typedef unsigned int unsigned32; +typedef int GC_bool; +#define TRUE 1 +#define FALSE 0 +#ifndef PTR_T_DEFINED +typedef char*ptr_t; +#define PTR_T_DEFINED +#endif +#ifndef SIZE_MAX +#include +#endif +#if defined(SIZE_MAX)&&!defined(CPPCHECK) +#define GC_SIZE_MAX ((size_t)SIZE_MAX) +#else +#define GC_SIZE_MAX (~(size_t)0) +#endif +#if GC_GNUC_PREREQ(3,0)&&!defined(LINT2) +#define EXPECT(expr,outcome)__builtin_expect(expr,outcome) +#else +#define EXPECT(expr,outcome)(expr) +#endif +#define SIZET_SAT_ADD(a,b)(EXPECT((a)< GC_SIZE_MAX - (b),TRUE)?(a)+(b):GC_SIZE_MAX) +#ifndef GCCONFIG_H +#define GCCONFIG_H +#ifdef CPPCHECK +#undef CLOCKS_PER_SEC +#undef FIXUP_POINTER +#undef POINTER_MASK +#undef POINTER_SHIFT +#undef REDIRECT_REALLOC +#undef _MAX_PATH +#endif +#ifndef PTR_T_DEFINED +typedef char*ptr_t; +#define PTR_T_DEFINED +#endif +#if!defined(sony_news) +#include +#endif +#ifdef __cplusplus +#define EXTERN_C_BEGIN extern "C" { +#define EXTERN_C_END } +#else +#define EXTERN_C_BEGIN +#define EXTERN_C_END +#endif +EXTERN_C_BEGIN +#if defined(__clang__)&&defined(__clang_major__) +#define GC_CLANG_PREREQ(major,minor)((__clang_major__<<16)+__clang_minor__>=((major)<<16)+(minor)) +#define GC_CLANG_PREREQ_FULL(major,minor,patchlevel)(GC_CLANG_PREREQ(major,(minor)+1)||(__clang_major__==(major)&&__clang_minor__==(minor)&&__clang_patchlevel__>=(patchlevel))) +#else +#define GC_CLANG_PREREQ(major,minor)0 +#define GC_CLANG_PREREQ_FULL(major,minor,patchlevel)0 +#endif +#ifdef LINT2 +#define COVERT_DATAFLOW(w)(~(GC_word)(w)^(~(GC_word)0)) +#else +#define COVERT_DATAFLOW(w)((GC_word)(w)) +#endif +#if defined(__ANDROID__)&&!defined(HOST_ANDROID) +#define HOST_ANDROID 1 +#endif +#if defined(TIZEN)&&!defined(HOST_TIZEN) +#define HOST_TIZEN 1 +#endif +#if defined(__SYMBIAN32__)&&!defined(SYMBIAN) +#define SYMBIAN +#ifdef __WINS__ +#pragma data_seg(".data2") +#endif +#endif +#if (defined(linux)||defined(__linux__)||defined(HOST_ANDROID))&&!defined(LINUX)&&!defined(__native_client__) +#define LINUX +#endif +#if defined(__NetBSD__) +#define NETBSD +#endif +#if defined(__OpenBSD__) +#define OPENBSD +#endif +#if (defined(__FreeBSD__)||defined(__DragonFly__)||defined(__FreeBSD_kernel__))&&!defined(FREEBSD)&&!defined(SN_TARGET_ORBIS) +#define FREEBSD +#endif +#if defined(macosx)||(defined(__APPLE__)&&defined(__MACH__)) +#define DARWIN +EXTERN_C_END +#include +EXTERN_C_BEGIN +#endif +#if defined(__native_client__) +#define NACL +#if!defined(__portable_native_client__)&&!defined(__arm__) +#define I386 +#define mach_type_known +#else +#endif +#endif +#if defined(__aarch64__) +#define AARCH64 +#if!defined(LINUX)&&!defined(DARWIN)&&!defined(FREEBSD)&&!defined(NETBSD)&&!defined(NN_BUILD_TARGET_PLATFORM_NX)&&!defined(OPENBSD) +#define NOSYS +#define mach_type_known +#endif +#endif +#if defined(__arm)||defined(__arm__)||defined(__thumb__) +#define ARM32 +#if defined(NACL) +#define mach_type_known +#elif!defined(LINUX)&&!defined(NETBSD)&&!defined(FREEBSD)&&!defined(OPENBSD)&&!defined(DARWIN)&&!defined(_WIN32)&&!defined(__CEGCC__)&&!defined(NN_PLATFORM_CTR)&&!defined(SN_TARGET_ORBIS)&&!defined(SN_TARGET_PSP2)&&!defined(SYMBIAN) +#define NOSYS +#define mach_type_known +#endif +#endif +#if defined(sun)&&defined(mc68000)&&!defined(CPPCHECK) +#error SUNOS4 no longer supported +#endif +#if defined(hp9000s300)&&!defined(CPPCHECK) +#error M68K based HP machines no longer supported +#endif +#if defined(OPENBSD)&&defined(m68k) +#define M68K +#define mach_type_known +#endif +#if defined(OPENBSD)&&defined(__sparc__) +#define SPARC +#define mach_type_known +#endif +#if defined(OPENBSD)&&defined(__arm__) +#define ARM32 +#define mach_type_known +#endif +#if defined(OPENBSD)&&defined(__aarch64__) +#define AARCH64 +#define mach_type_known +#endif +#if defined(OPENBSD)&&defined(__sh__) +#define SH +#define mach_type_known +#endif +#if defined(NETBSD)&&(defined(m68k)||defined(__m68k__)) +#define M68K +#define mach_type_known +#endif +#if defined(NETBSD)&&defined(__powerpc__) +#define POWERPC +#define mach_type_known +#endif +#if defined(NETBSD)&&(defined(__arm32__)||defined(__arm__)) +#define ARM32 +#define mach_type_known +#endif +#if defined(NETBSD)&&defined(__aarch64__) +#define AARCH64 +#define mach_type_known +#endif +#if defined(NETBSD)&&defined(__sh__) +#define SH +#define mach_type_known +#endif +#if defined(vax)||defined(__vax__) +#define VAX +#ifdef ultrix +#define ULTRIX +#else +#define BSD +#endif +#define mach_type_known +#endif +#if defined(NETBSD)&&defined(__vax__) +#define VAX +#define mach_type_known +#endif +#if defined(mips)||defined(__mips)||defined(_mips) +#define MIPS +#if defined(nec_ews)||defined(_nec_ews) +#define EWS4800 +#endif +#if!defined(LINUX)&&!defined(EWS4800)&&!defined(NETBSD)&&!defined(OPENBSD) +#if defined(ultrix)||defined(__ultrix) +#define ULTRIX +#else +#define IRIX5 +#endif +#endif +#if defined(NETBSD)&&defined(__MIPSEL__) +#undef ULTRIX +#endif +#define mach_type_known +#endif +#if defined(__QNX__) +#define I386 +#define mach_type_known +#endif +#if defined(__NIOS2__)||defined(__NIOS2)||defined(__nios2__) +#define NIOS2 +#define mach_type_known +#endif +#if defined(__or1k__) +#define OR1K +#define mach_type_known +#endif +#if defined(DGUX)&&(defined(i386)||defined(__i386__)) +#define I386 +#ifndef _USING_DGUX +#define _USING_DGUX +#endif +#define mach_type_known +#endif +#if defined(sequent)&&(defined(i386)||defined(__i386__)) +#define I386 +#define SEQUENT +#define mach_type_known +#endif +#if (defined(sun)||defined(__sun))&&(defined(i386)||defined(__i386__)) +#define I386 +#define SOLARIS +#define mach_type_known +#endif +#if (defined(sun)||defined(__sun))&&defined(__amd64) +#define X86_64 +#define SOLARIS +#define mach_type_known +#endif +#if (defined(__OS2__)||defined(__EMX__))&&defined(__32BIT__) +#define I386 +#define OS2 +#define mach_type_known +#endif +#if defined(ibm032)&&!defined(CPPCHECK) +#error IBM PC/RT no longer supported +#endif +#if (defined(sun)||defined(__sun))&&(defined(sparc)||defined(__sparc)) +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define SPARC +#define SOLARIS +#define mach_type_known +#elif defined(sparc)&&defined(unix)&&!defined(sun)&&!defined(linux)&&!defined(FREEBSD)&&!defined(NETBSD)&&!defined(OPENBSD) +#define SPARC +#define DRSNX +#define mach_type_known +#endif +#if defined(_IBMR2) +#define POWERPC +#define AIX +#define mach_type_known +#endif +#if defined(NETBSD)&&defined(__sparc__) +#define SPARC +#define mach_type_known +#endif +#if defined(_M_XENIX)&&defined(_M_SYSV)&&defined(_M_I386) +#define I386 +#if defined(_SCO_ELF) +#define SCO_ELF +#else +#define SCO +#endif +#define mach_type_known +#endif +#if defined(_AUX_SOURCE)&&!defined(CPPCHECK) +#error A/UX no longer supported +#endif +#if defined(_PA_RISC1_0)||defined(_PA_RISC1_1)||defined(_PA_RISC2_0)||defined(hppa)||defined(__hppa__) +#define HP_PA +#if!defined(LINUX)&&!defined(HPUX)&&!defined(OPENBSD) +#define HPUX +#endif +#define mach_type_known +#endif +#if defined(__ia64)&&(defined(_HPUX_SOURCE)||defined(__HP_aCC)) +#define IA64 +#ifndef HPUX +#define HPUX +#endif +#define mach_type_known +#endif +#if (defined(__BEOS__)||defined(__HAIKU__))&&defined(_X86_) +#define I386 +#define HAIKU +#define mach_type_known +#endif +#if defined(__HAIKU__)&&(defined(__amd64__)||defined(__x86_64__)) +#define X86_64 +#define HAIKU +#define mach_type_known +#endif +#if defined(OPENBSD)&&defined(__amd64__) +#define X86_64 +#define mach_type_known +#endif +#if defined(LINUX)&&(defined(i386)||defined(__i386__)) +#define I386 +#define mach_type_known +#endif +#if defined(LINUX)&&defined(__x86_64__) +#define X86_64 +#define mach_type_known +#endif +#if defined(LINUX)&&(defined(__ia64__)||defined(__ia64)) +#define IA64 +#define mach_type_known +#endif +#if defined(LINUX)&&defined(__aarch64__) +#define AARCH64 +#define mach_type_known +#endif +#if defined(LINUX)&&(defined(__arm)||defined(__arm__)) +#define ARM32 +#define mach_type_known +#endif +#if defined(LINUX)&&defined(__cris__) +#ifndef CRIS +#define CRIS +#endif +#define mach_type_known +#endif +#if defined(LINUX)&&(defined(powerpc)||defined(__powerpc__)||defined(powerpc64)||defined(__powerpc64__)) +#define POWERPC +#define mach_type_known +#endif +#if defined(LINUX)&&defined(__mc68000__) +#define M68K +#define mach_type_known +#endif +#if defined(LINUX)&&(defined(sparc)||defined(__sparc__)) +#define SPARC +#define mach_type_known +#endif +#if defined(LINUX)&&defined(__sh__) +#define SH +#define mach_type_known +#endif +#if defined(LINUX)&&defined(__avr32__) +#define AVR32 +#define mach_type_known +#endif +#if defined(LINUX)&&defined(__m32r__) +#define M32R +#define mach_type_known +#endif +#if defined(__alpha)||defined(__alpha__) +#define ALPHA +#if!defined(LINUX)&&!defined(NETBSD)&&!defined(OPENBSD)&&!defined(FREEBSD) +#define OSF1 +#endif +#define mach_type_known +#endif +#if defined(_AMIGA)&&!defined(AMIGA) +#define AMIGA +#endif +#ifdef AMIGA +#define M68K +#define mach_type_known +#endif +#if defined(THINK_C)||(defined(__MWERKS__)&&!defined(__powerc)&&!defined(SYMBIAN)) +#define M68K +#define MACOS +#define mach_type_known +#endif +#if defined(__MWERKS__)&&defined(__powerc)&&!defined(__MACH__)&&!defined(SYMBIAN) +#define POWERPC +#define MACOS +#define mach_type_known +#endif +#if defined(OPENBSD)&&defined(__powerpc__) +#define POWERPC +#define mach_type_known +#endif +#if defined(DARWIN) +#if defined(__ppc__)||defined(__ppc64__) +#define POWERPC +#define mach_type_known +#elif defined(__x86_64__)||defined(__x86_64) +#define X86_64 +#define mach_type_known +#elif defined(__i386__) +#define I386 +#define mach_type_known +#elif defined(__arm__) +#define ARM32 +#define mach_type_known +#elif defined(__aarch64__) +#define AARCH64 +#define mach_type_known +#endif +#endif +#if defined(__rtems__)&&(defined(i386)||defined(__i386__)) +#define I386 +#define RTEMS +#define mach_type_known +#endif +#if defined(NeXT)&&defined(mc68000) +#define M68K +#define NEXT +#define mach_type_known +#endif +#if defined(NeXT)&&(defined(i386)||defined(__i386__)) +#define I386 +#define NEXT +#define mach_type_known +#endif +#if defined(OPENBSD)&&(defined(i386)||defined(__i386__)) +#define I386 +#define mach_type_known +#endif +#if defined(NETBSD)&&(defined(i386)||defined(__i386__)) +#define I386 +#define mach_type_known +#endif +#if defined(NETBSD)&&defined(__x86_64__) +#define X86_64 +#define mach_type_known +#endif +#if defined(FREEBSD)&&(defined(i386)||defined(__i386__)) +#define I386 +#define mach_type_known +#endif +#if (defined(FREEBSD)||defined(SN_TARGET_ORBIS))&&(defined(__amd64__)||defined(__x86_64__)) +#define X86_64 +#define mach_type_known +#endif +#if defined(FREEBSD)&&defined(__sparc__) +#define SPARC +#define mach_type_known +#endif +#if defined(FREEBSD)&&(defined(powerpc)||defined(__powerpc__)) +#define POWERPC +#define mach_type_known +#endif +#if defined(FREEBSD)&&defined(__arm__) +#define ARM32 +#define mach_type_known +#endif +#if defined(FREEBSD)&&defined(__aarch64__) +#define AARCH64 +#define mach_type_known +#endif +#if defined(FREEBSD)&&(defined(mips)||defined(__mips)||defined(_mips)) +#define MIPS +#define mach_type_known +#endif +#if defined(bsdi)&&(defined(i386)||defined(__i386__)) +#define I386 +#define BSDI +#define mach_type_known +#endif +#if!defined(mach_type_known)&&defined(__386BSD__) +#define I386 +#define THREE86BSD +#define mach_type_known +#endif +#if defined(_CX_UX)&&defined(_M88K) +#define M88K +#define CX_UX +#define mach_type_known +#endif +#if defined(DGUX)&&defined(m88k) +#define M88K +#define mach_type_known +#endif +#if defined(_WIN32_WCE)||defined(__CEGCC__)||defined(__MINGW32CE__) +#if defined(SH3)||defined(SH4) +#define SH +#endif +#if defined(x86)||defined(__i386__) +#define I386 +#endif +#if defined(_M_ARM)||defined(ARM)||defined(_ARM_) +#define ARM32 +#endif +#define MSWINCE +#define mach_type_known +#else +#if ((defined(_MSDOS)||defined(_MSC_VER))&&(_M_IX86>=300))||(defined(_WIN32)&&!defined(__CYGWIN32__)&&!defined(__CYGWIN__)&&!defined(__INTERIX)&&!defined(SYMBIAN)) +#if defined(__LP64__)||defined(_M_X64) +#define X86_64 +#elif defined(_M_ARM) +#define ARM32 +#elif defined(_M_ARM64) +#define AARCH64 +#else +#define I386 +#endif +#ifdef _XBOX_ONE +#define MSWIN_XBOX1 +#else +#ifndef MSWIN32 +#define MSWIN32 +#endif +#if defined(WINAPI_FAMILY)&&(WINAPI_FAMILY==WINAPI_FAMILY_APP) +#define MSWINRT_FLAVOR +#endif +#endif +#define mach_type_known +#endif +#if defined(_MSC_VER)&&defined(_M_IA64) +#define IA64 +#define MSWIN32 +#endif +#endif +#if defined(__DJGPP__) +#define I386 +#ifndef DJGPP +#define DJGPP +#endif +#define mach_type_known +#endif +#if defined(__CYGWIN32__)||defined(__CYGWIN__) +#if defined(__LP64__) +#define X86_64 +#else +#define I386 +#endif +#define CYGWIN32 +#define mach_type_known +#endif +#if defined(__INTERIX) +#define I386 +#define INTERIX +#define mach_type_known +#endif +#if defined(__MINGW32__)&&!defined(mach_type_known) +#define I386 +#define MSWIN32 +#define mach_type_known +#endif +#if defined(__BORLANDC__) +#define I386 +#define MSWIN32 +#define mach_type_known +#endif +#if defined(_UTS)&&!defined(mach_type_known) +#define S370 +#define UTS4 +#define mach_type_known +#endif +#if defined(__pj__)&&!defined(CPPCHECK) +#error PicoJava no longer supported +#endif +#if defined(__embedded__)&&defined(PPC) +#define POWERPC +#define NOSYS +#define mach_type_known +#endif +#if defined(__WATCOMC__)&&defined(__386__) +#define I386 +#if!defined(OS2)&&!defined(MSWIN32)&&!defined(DOS4GW) +#if defined(__OS2__) +#define OS2 +#else +#if defined(__WINDOWS_386__)||defined(__NT__) +#define MSWIN32 +#else +#define DOS4GW +#endif +#endif +#endif +#define mach_type_known +#endif +#if defined(__s390__)&&defined(LINUX) +#define S390 +#define mach_type_known +#endif +#if defined(__GNU__) +#if defined(__i386__) +#define HURD +#define I386 +#define mach_type_known +#endif +#endif +#if defined(__TANDEM) +#define MIPS +#define NONSTOP +#define mach_type_known +#endif +#if defined(__arc__)&&defined(LINUX) +#define ARC +#define mach_type_known +#endif +#if defined(__hexagon__)&&defined(LINUX) +#define HEXAGON +#define mach_type_known +#endif +#if defined(__tile__)&&defined(LINUX) +#ifdef __tilegx__ +#define TILEGX +#else +#define TILEPRO +#endif +#define mach_type_known +#endif +#if defined(__riscv)&&(defined(FREEBSD)||defined(LINUX)) +#define RISCV +#define mach_type_known +#endif +#if defined(SN_TARGET_PSP2) +#define mach_type_known +#endif +#if defined(NN_PLATFORM_CTR) +#define mach_type_known +#endif +#if defined(NN_BUILD_TARGET_PLATFORM_NX) +#define NINTENDO_SWITCH +#define mach_type_known +#endif +#if defined(SYMBIAN) +#define mach_type_known +#endif +#if defined(__EMSCRIPTEN__) +#define I386 +#define mach_type_known +#endif +#if!defined(mach_type_known)&&!defined(CPPCHECK) +#error The collector has not been ported to this machine/OS combination +#endif +#if GC_GNUC_PREREQ(2,8)&&!defined(__INTEL_COMPILER)&&!defined(__PATHCC__)&&!defined(__FUJITSU)&&!(defined(POWERPC)&&defined(DARWIN))&&!defined(RTEMS)&&!defined(__ARMCC_VERSION)&&!defined(__clang__) +#define HAVE_BUILTIN_UNWIND_INIT +#endif +#ifdef SYMBIAN +#define MACH_TYPE "SYMBIAN" +#define OS_TYPE "SYMBIAN" +#define CPP_WORDSZ 32 +#define ALIGNMENT 4 +#define DATASTART (ptr_t)ALIGNMENT +#define DATAEND (ptr_t)ALIGNMENT +#endif +#define STACK_GRAN 0x1000000 +#ifdef M68K +#define MACH_TYPE "M68K" +#define ALIGNMENT 2 +#ifdef OPENBSD +#define OS_TYPE "OPENBSD" +#define HEURISTIC2 +#ifdef __ELF__ +extern ptr_t GC_data_start; +#define DATASTART GC_data_start +#define DYNAMIC_LOADING +#else +extern char etext[]; +#define DATASTART ((ptr_t)(etext)) +#endif +#endif +#ifdef NETBSD +#define OS_TYPE "NETBSD" +#define HEURISTIC2 +#ifdef __ELF__ +extern ptr_t GC_data_start; +#define DATASTART GC_data_start +#define DYNAMIC_LOADING +#else +extern char etext[]; +#define DATASTART ((ptr_t)(etext)) +#endif +#endif +#ifdef LINUX +#define OS_TYPE "LINUX" +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#if!defined(REDIRECT_MALLOC) +#define MPROTECT_VDB +#endif +#ifdef __ELF__ +#define DYNAMIC_LOADING +EXTERN_C_END +#include +EXTERN_C_BEGIN +#if defined(__GLIBC__)&&__GLIBC__>=2 +#define SEARCH_FOR_DATA_START +#else +extern char**__environ; +#define DATASTART ((ptr_t)(&__environ)) +#endif +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#else +extern int etext[]; +#define DATASTART ((ptr_t)((((word)(etext))+0xfff)&~0xfff)) +#endif +#endif +#ifdef AMIGA +#define OS_TYPE "AMIGA" +#define DATAEND +#define GETPAGESIZE()4096 +#endif +#ifdef MACOS +#ifndef __LOWMEM__ +EXTERN_C_END +#include +EXTERN_C_BEGIN +#endif +#define OS_TYPE "MACOS" +#define STACKBOTTOM ((ptr_t)LMGetCurStackBase()) +#define DATAEND +#define GETPAGESIZE()4096 +#endif +#ifdef NEXT +#define OS_TYPE "NEXT" +#define DATASTART ((ptr_t)get_etext()) +#define DATASTART_IS_FUNC +#define STACKBOTTOM ((ptr_t)0x4000000) +#define DATAEND +#endif +#endif +#ifdef POWERPC +#define MACH_TYPE "POWERPC" +#ifdef MACOS +#define ALIGNMENT 2 +#ifndef __LOWMEM__ +EXTERN_C_END +#include +EXTERN_C_BEGIN +#endif +#define OS_TYPE "MACOS" +#define STACKBOTTOM ((ptr_t)LMGetCurStackBase()) +#define DATAEND +#endif +#ifdef LINUX +#if defined(__powerpc64__) +#define ALIGNMENT 8 +#define CPP_WORDSZ 64 +#ifndef HBLKSIZE +#define HBLKSIZE 4096 +#endif +#else +#define ALIGNMENT 4 +#endif +#define OS_TYPE "LINUX" +#if defined(__bg__) +#define HEURISTIC2 +#define NO_PTHREAD_GETATTR_NP +#else +#define LINUX_STACKBOTTOM +#endif +#define COUNT_UNMAPPED_REGIONS +#define DYNAMIC_LOADING +#define SEARCH_FOR_DATA_START +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#endif +#ifdef DARWIN +#define OS_TYPE "DARWIN" +#define DYNAMIC_LOADING +#if defined(__ppc64__) +#define ALIGNMENT 8 +#define CPP_WORDSZ 64 +#define STACKBOTTOM ((ptr_t)0x7fff5fc00000) +#define CACHE_LINE_SIZE 64 +#ifndef HBLKSIZE +#define HBLKSIZE 4096 +#endif +#else +#define ALIGNMENT 4 +#define STACKBOTTOM ((ptr_t)0xc0000000) +#endif +#define DATASTART ((ptr_t)get_etext()) +#define DATAEND ((ptr_t)get_end()) +#define USE_MMAP_ANON +#define MPROTECT_VDB +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define GETPAGESIZE()(unsigned)getpagesize() +#if defined(USE_PPC_PREFETCH)&&defined(__GNUC__) +#define PREFETCH(x)__asm__ __volatile__ ("dcbt 0,%0"::"r" ((const void*)(x))) +#define GC_PREFETCH_FOR_WRITE(x)__asm__ __volatile__ ("dcbtst 0,%0"::"r" ((const void*)(x))) +#endif +#define NO_PTHREAD_TRYLOCK +#endif +#ifdef OPENBSD +#define OS_TYPE "OPENBSD" +#if defined(__powerpc64__) +#define ALIGNMENT 8 +#define CPP_WORDSZ 64 +#else +#define ALIGNMENT 4 +#endif +#ifndef GC_OPENBSD_THREADS +#define HEURISTIC2 +#endif +extern int __data_start[]; +#define DATASTART ((ptr_t)__data_start) +extern int _end[]; +#define DATAEND ((ptr_t)(&_end)) +#define DYNAMIC_LOADING +#endif +#ifdef FREEBSD +#if defined(__powerpc64__) +#define ALIGNMENT 8 +#define CPP_WORDSZ 64 +#ifndef HBLKSIZE +#define HBLKSIZE 4096 +#endif +#else +#define ALIGNMENT 4 +#endif +#define OS_TYPE "FREEBSD" +#ifndef GC_FREEBSD_THREADS +#define MPROTECT_VDB +#endif +#define SIG_SUSPEND SIGUSR1 +#define SIG_THR_RESTART SIGUSR2 +#define FREEBSD_STACKBOTTOM +#define DYNAMIC_LOADING +extern char etext[]; +#define DATASTART GC_FreeBSDGetDataStart(0x1000,(ptr_t)etext) +#define DATASTART_USES_BSDGETDATASTART +#endif +#ifdef NETBSD +#define ALIGNMENT 4 +#define OS_TYPE "NETBSD" +#define HEURISTIC2 +extern ptr_t GC_data_start; +#define DATASTART GC_data_start +#define DYNAMIC_LOADING +#endif +#ifdef SN_TARGET_PS3 +#define OS_TYPE "SN_TARGET_PS3" +#define NO_GETENV +#define CPP_WORDSZ 32 +#define ALIGNMENT 4 +extern int _end[]; +extern int __bss_start; +#define DATASTART ((ptr_t)(__bss_start)) +#define DATAEND ((ptr_t)(_end)) +#define STACKBOTTOM ((ptr_t)ps3_get_stack_bottom()) +#define NO_PTHREAD_TRYLOCK +#endif +#ifdef AIX +#define OS_TYPE "AIX" +#undef ALIGNMENT +#undef IA64 +#ifdef __64BIT__ +#define ALIGNMENT 8 +#define CPP_WORDSZ 64 +#define STACKBOTTOM ((ptr_t)0x1000000000000000) +#else +#define ALIGNMENT 4 +#define CPP_WORDSZ 32 +#define STACKBOTTOM ((ptr_t)((ulong)&errno)) +#endif +#define USE_MMAP_ANON +extern int _data[],_end[]; +#define DATASTART ((ptr_t)((ulong)_data)) +#define DATAEND ((ptr_t)((ulong)_end)) +extern int errno; +#define DYNAMIC_LOADING +#endif +#ifdef NOSYS +#define ALIGNMENT 4 +#define OS_TYPE "NOSYS" +extern void __end[],__dso_handle[]; +#define DATASTART ((ptr_t)__dso_handle) +#define DATAEND ((ptr_t)(__end)) +#undef STACK_GRAN +#define STACK_GRAN 0x10000000 +#define HEURISTIC1 +#endif +#endif +#ifdef NACL +#define OS_TYPE "NACL" +#if defined(__GLIBC__) +#define DYNAMIC_LOADING +#endif +#define DATASTART ((ptr_t)0x10020000) +extern int _end[]; +#define DATAEND ((ptr_t)_end) +#undef STACK_GRAN +#define STACK_GRAN 0x10000 +#define HEURISTIC1 +#define NO_PTHREAD_GETATTR_NP +#define USE_MMAP_ANON +#define GETPAGESIZE()65536 +#define MAX_NACL_GC_THREADS 1024 +#endif +#ifdef VAX +#define MACH_TYPE "VAX" +#define ALIGNMENT 4 +extern char etext[]; +#define DATASTART ((ptr_t)(etext)) +#ifdef BSD +#define OS_TYPE "BSD" +#define HEURISTIC1 +#endif +#ifdef ULTRIX +#define OS_TYPE "ULTRIX" +#define STACKBOTTOM ((ptr_t)0x7fffc800) +#endif +#endif +#ifdef SPARC +#define MACH_TYPE "SPARC" +#if defined(__arch64__)||defined(__sparcv9) +#define ALIGNMENT 8 +#define CPP_WORDSZ 64 +#define ELF_CLASS ELFCLASS64 +#else +#define ALIGNMENT 4 +#define CPP_WORDSZ 32 +#endif +#ifdef SOLARIS +#define OS_TYPE "SOLARIS" +extern int _etext[]; +extern int _end[]; +ptr_t GC_SysVGetDataStart(size_t,ptr_t); +#define DATASTART GC_SysVGetDataStart(0x10000,(ptr_t)_etext) +#define DATASTART_IS_FUNC +#define DATAEND ((ptr_t)(_end)) +#if!defined(USE_MMAP)&&defined(REDIRECT_MALLOC) +#define USE_MMAP 1 +#endif +#ifdef USE_MMAP +#define HEAP_START (ptr_t)0x40000000 +#else +#define HEAP_START DATAEND +#endif +#define PROC_VDB +EXTERN_C_END +#include +#include +EXTERN_C_BEGIN +#ifdef USERLIMIT +#define STACKBOTTOM ((ptr_t)USRSTACK) +#else +#define HEURISTIC2 +#endif +#define GETPAGESIZE()(unsigned)sysconf(_SC_PAGESIZE) +#define DYNAMIC_LOADING +#endif +#ifdef DRSNX +#define OS_TYPE "DRSNX" +extern int etext[]; +ptr_t GC_SysVGetDataStart(size_t,ptr_t); +#define DATASTART GC_SysVGetDataStart(0x10000,(ptr_t)etext) +#define DATASTART_IS_FUNC +#define MPROTECT_VDB +#define STACKBOTTOM ((ptr_t)0xdfff0000) +#define DYNAMIC_LOADING +#endif +#ifdef LINUX +#define OS_TYPE "LINUX" +#ifdef __ELF__ +#define DYNAMIC_LOADING +#elif!defined(CPPCHECK) +#error Linux SPARC a.out not supported +#endif +#define COUNT_UNMAPPED_REGIONS +extern int _end[]; +extern int _etext[]; +#define DATAEND ((ptr_t)(_end)) +#define SVR4 +ptr_t GC_SysVGetDataStart(size_t,ptr_t); +#ifdef __arch64__ +#define DATASTART GC_SysVGetDataStart(0x100000,(ptr_t)_etext) +#else +#define DATASTART GC_SysVGetDataStart(0x10000,(ptr_t)_etext) +#endif +#define DATASTART_IS_FUNC +#define LINUX_STACKBOTTOM +#endif +#ifdef OPENBSD +#define OS_TYPE "OPENBSD" +#ifndef GC_OPENBSD_THREADS +#define HEURISTIC2 +#endif +extern int __data_start[]; +#define DATASTART ((ptr_t)__data_start) +extern int _end[]; +#define DATAEND ((ptr_t)(&_end)) +#define DYNAMIC_LOADING +#endif +#ifdef NETBSD +#define OS_TYPE "NETBSD" +#define HEURISTIC2 +#ifdef __ELF__ +extern ptr_t GC_data_start; +#define DATASTART GC_data_start +#define DYNAMIC_LOADING +#else +extern char etext[]; +#define DATASTART ((ptr_t)(etext)) +#endif +#endif +#ifdef FREEBSD +#define OS_TYPE "FREEBSD" +#define SIG_SUSPEND SIGUSR1 +#define SIG_THR_RESTART SIGUSR2 +#define FREEBSD_STACKBOTTOM +#define DYNAMIC_LOADING +extern char etext[]; +extern char edata[]; +#if!defined(CPPCHECK) +extern char end[]; +#endif +#define NEED_FIND_LIMIT +#define DATASTART ((ptr_t)(&etext)) +void*GC_find_limit(void*,int); +#define DATAEND (ptr_t)GC_find_limit(DATASTART,TRUE) +#define DATAEND_IS_FUNC +#define GC_HAVE_DATAREGION2 +#define DATASTART2 ((ptr_t)(&edata)) +#define DATAEND2 ((ptr_t)(&end)) +#endif +#endif +#ifdef I386 +#define MACH_TYPE "I386" +#if (defined(__LP64__)||defined(_WIN64))&&!defined(CPPCHECK) +#error This should be handled as X86_64 +#else +#define CPP_WORDSZ 32 +#define ALIGNMENT 4 +#endif +#ifdef SEQUENT +#define OS_TYPE "SEQUENT" +extern int etext[]; +#define DATASTART ((ptr_t)((((word)(etext))+0xfff)&~0xfff)) +#define STACKBOTTOM ((ptr_t)0x3ffff000) +#endif +#if defined(__EMSCRIPTEN__) +#define OS_TYPE "EMSCRIPTEN" +#define DATASTART (ptr_t)ALIGNMENT +#define DATAEND (ptr_t)ALIGNMENT +#define USE_MMAP_ANON +#define STACK_GROWS_DOWN +#endif +#if defined(__QNX__) +#define OS_TYPE "QNX" +#define SA_RESTART 0 +#define HEURISTIC1 +extern char etext[]; +extern int _end[]; +#define DATASTART ((ptr_t)etext) +#define DATAEND ((ptr_t)_end) +#endif +#ifdef HAIKU +#define OS_TYPE "HAIKU" +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define GETPAGESIZE()(unsigned)B_PAGE_SIZE +extern int etext[]; +#define DATASTART ((ptr_t)((((word)(etext))+0xfff)&~0xfff)) +#define DYNAMIC_LOADING +#define MPROTECT_VDB +#endif +#ifdef SOLARIS +#define OS_TYPE "SOLARIS" +extern int _etext[],_end[]; +ptr_t GC_SysVGetDataStart(size_t,ptr_t); +#define DATASTART GC_SysVGetDataStart(0x1000,(ptr_t)_etext) +#define DATASTART_IS_FUNC +#define DATAEND ((ptr_t)(_end)) +EXTERN_C_END +#include +EXTERN_C_BEGIN +#ifdef USERLIMIT +#define STACKBOTTOM ((ptr_t)USRSTACK) +#else +#define HEURISTIC2 +#endif +#ifdef SOLARIS25_PROC_VDB_BUG_FIXED +#define PROC_VDB +#endif +#ifndef GC_THREADS +#define MPROTECT_VDB +#endif +#define DYNAMIC_LOADING +#if!defined(USE_MMAP)&&defined(REDIRECT_MALLOC) +#define USE_MMAP 1 +#endif +#ifdef USE_MMAP +#define HEAP_START (ptr_t)0x40000000 +#else +#define HEAP_START DATAEND +#endif +#endif +#ifdef SCO +#define OS_TYPE "SCO" +extern int etext[]; +#define DATASTART ((ptr_t)((((word)(etext))+0x3fffff)&~0x3fffff)+((word)(etext)&0xfff)) +#define STACKBOTTOM ((ptr_t)0x7ffffffc) +#endif +#ifdef SCO_ELF +#define OS_TYPE "SCO_ELF" +extern int etext[]; +#define DATASTART ((ptr_t)(etext)) +#define STACKBOTTOM ((ptr_t)0x08048000) +#define DYNAMIC_LOADING +#define ELF_CLASS ELFCLASS32 +#endif +#ifdef DGUX +#define OS_TYPE "DGUX" +extern int _etext,_end; +ptr_t GC_SysVGetDataStart(size_t,ptr_t); +#define DATASTART GC_SysVGetDataStart(0x1000,(ptr_t)(&_etext)) +#define DATASTART_IS_FUNC +#define DATAEND ((ptr_t)(&_end)) +#define STACK_GROWS_DOWN +#define HEURISTIC2 +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define GETPAGESIZE()(unsigned)sysconf(_SC_PAGESIZE) +#define DYNAMIC_LOADING +#ifndef USE_MMAP +#define USE_MMAP 1 +#endif +#define MAP_FAILED (void*)((word)-1) +#define HEAP_START (ptr_t)0x40000000 +#endif +#ifdef LINUX +#define OS_TYPE "LINUX" +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#if!defined(REDIRECT_MALLOC) +#define MPROTECT_VDB +#else +#endif +#define HEAP_START (ptr_t)0x1000 +#ifdef __ELF__ +#define DYNAMIC_LOADING +EXTERN_C_END +#include +EXTERN_C_BEGIN +#if defined(__GLIBC__)&&__GLIBC__>=2||defined(HOST_ANDROID)||defined(HOST_TIZEN) +#define SEARCH_FOR_DATA_START +#else +extern char**__environ; +#define DATASTART ((ptr_t)(&__environ)) +#endif +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#if!defined(GC_NO_SIGSETJMP)&&(defined(HOST_TIZEN)||(defined(HOST_ANDROID)&&!(GC_GNUC_PREREQ(4,8)||GC_CLANG_PREREQ(3,2)||__ANDROID_API__>=18))) +#define GC_NO_SIGSETJMP 1 +#endif +#else +extern int etext[]; +#define DATASTART ((ptr_t)((((word)(etext))+0xfff)&~0xfff)) +#endif +#ifdef USE_I686_PREFETCH +#define PREFETCH(x)__asm__ __volatile__ ("prefetchnta %0"::"m"(*(char*)(x))) +#ifdef FORCE_WRITE_PREFETCH +#define GC_PREFETCH_FOR_WRITE(x)__asm__ __volatile__ ("prefetcht0 %0"::"m"(*(char*)(x))) +#else +#define GC_NO_PREFETCH_FOR_WRITE +#endif +#elif defined(USE_3DNOW_PREFETCH) +#define PREFETCH(x)__asm__ __volatile__ ("prefetch %0"::"m"(*(char*)(x))) +#define GC_PREFETCH_FOR_WRITE(x)__asm__ __volatile__ ("prefetchw %0"::"m"(*(char*)(x))) +#endif +#if defined(__GLIBC__)&&!defined(__UCLIBC__) +#define GLIBC_2_19_TSX_BUG +EXTERN_C_END +#include +EXTERN_C_BEGIN +#endif +#endif +#ifdef CYGWIN32 +#define OS_TYPE "CYGWIN32" +#define WOW64_THREAD_CONTEXT_WORKAROUND +#define RETRY_GET_THREAD_CONTEXT +#define DATASTART ((ptr_t)GC_DATASTART) +#define DATAEND ((ptr_t)GC_DATAEND) +#ifdef USE_WINALLOC +#define GWW_VDB +#else +# +#ifdef USE_MMAP +#define NEED_FIND_LIMIT +#define USE_MMAP_ANON +#endif +#endif +#endif +#ifdef INTERIX +#define OS_TYPE "INTERIX" +extern int _data_start__[]; +extern int _bss_end__[]; +#define DATASTART ((ptr_t)_data_start__) +#define DATAEND ((ptr_t)_bss_end__) +#define STACKBOTTOM ({ ptr_t rv;__asm__ __volatile__ ("movl %%fs:4,%%eax":"=a" (rv));rv;}) +#define USE_MMAP_ANON +#endif +#ifdef OS2 +#define OS_TYPE "OS2" +#define DATAEND +#endif +#ifdef MSWIN32 +#define OS_TYPE "MSWIN32" +#define WOW64_THREAD_CONTEXT_WORKAROUND +#define RETRY_GET_THREAD_CONTEXT +#define MPROTECT_VDB +#define GWW_VDB +#define DATAEND +#endif +#ifdef MSWINCE +#define OS_TYPE "MSWINCE" +#define DATAEND +#endif +#ifdef DJGPP +#define OS_TYPE "DJGPP" +EXTERN_C_END +#include "stubinfo.h" +EXTERN_C_BEGIN +extern int etext[]; +extern int _stklen; +extern int __djgpp_stack_limit; +#define DATASTART ((ptr_t)((((word)(etext))+0x1ff)&~0x1ff)) +#define STACKBOTTOM ((ptr_t)((word)__djgpp_stack_limit+_stklen)) +#endif +#ifdef OPENBSD +#define OS_TYPE "OPENBSD" +#ifndef GC_OPENBSD_THREADS +#define HEURISTIC2 +#endif +extern int __data_start[]; +#define DATASTART ((ptr_t)__data_start) +extern int _end[]; +#define DATAEND ((ptr_t)(&_end)) +#define DYNAMIC_LOADING +#endif +#ifdef FREEBSD +#define OS_TYPE "FREEBSD" +#ifndef GC_FREEBSD_THREADS +#define MPROTECT_VDB +#endif +#ifdef __GLIBC__ +#define SIG_SUSPEND (32+6) +#define SIG_THR_RESTART (32+5) +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#else +#define SIG_SUSPEND SIGUSR1 +#define SIG_THR_RESTART SIGUSR2 +#endif +#define FREEBSD_STACKBOTTOM +#ifdef __ELF__ +#define DYNAMIC_LOADING +#endif +extern char etext[]; +#define DATASTART GC_FreeBSDGetDataStart(0x1000,(ptr_t)etext) +#define DATASTART_USES_BSDGETDATASTART +#endif +#ifdef NETBSD +#define OS_TYPE "NETBSD" +#ifdef __ELF__ +#define DYNAMIC_LOADING +#endif +#endif +#ifdef THREE86BSD +#define OS_TYPE "THREE86BSD" +#endif +#ifdef BSDI +#define OS_TYPE "BSDI" +#endif +#if defined(NETBSD)||defined(THREE86BSD)||defined(BSDI) +#define HEURISTIC2 +extern char etext[]; +#define DATASTART ((ptr_t)(etext)) +#endif +#ifdef NEXT +#define OS_TYPE "NEXT" +#define DATASTART ((ptr_t)get_etext()) +#define DATASTART_IS_FUNC +#define STACKBOTTOM ((ptr_t)0xc0000000) +#define DATAEND +#endif +#ifdef RTEMS +#define OS_TYPE "RTEMS" +EXTERN_C_END +#include +EXTERN_C_BEGIN +extern int etext[]; +void*rtems_get_stack_bottom(void); +#define InitStackBottom rtems_get_stack_bottom() +#define DATASTART ((ptr_t)etext) +#define STACKBOTTOM ((ptr_t)InitStackBottom) +#define SIG_SUSPEND SIGUSR1 +#define SIG_THR_RESTART SIGUSR2 +#endif +#ifdef DOS4GW +#define OS_TYPE "DOS4GW" +extern long __nullarea; +extern char _end; +extern char*_STACKTOP; +#pragma aux __nullarea "*"; +#pragma aux _end "*"; +#define STACKBOTTOM ((ptr_t)_STACKTOP) +#define DATASTART ((ptr_t)(&__nullarea)) +#define DATAEND ((ptr_t)(&_end)) +#endif +#ifdef HURD +#define OS_TYPE "HURD" +#define STACK_GROWS_DOWN +#define HEURISTIC2 +#define SIG_SUSPEND SIGUSR1 +#define SIG_THR_RESTART SIGUSR2 +#define SEARCH_FOR_DATA_START +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#define DYNAMIC_LOADING +#define USE_MMAP_ANON +#endif +#ifdef DARWIN +#define OS_TYPE "DARWIN" +#define DARWIN_DONT_PARSE_STACK 1 +#define DYNAMIC_LOADING +#define DATASTART ((ptr_t)get_etext()) +#define DATAEND ((ptr_t)get_end()) +#define STACKBOTTOM ((ptr_t)0xc0000000) +#define USE_MMAP_ANON +#define MPROTECT_VDB +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define GETPAGESIZE()(unsigned)getpagesize() +#define NO_PTHREAD_TRYLOCK +#if TARGET_OS_IPHONE&&!defined(NO_DYLD_BIND_FULLY_IMAGE) +#define NO_DYLD_BIND_FULLY_IMAGE +#endif +#endif +#endif +#ifdef NS32K +#define MACH_TYPE "NS32K" +#define ALIGNMENT 4 +extern char**environ; +#define DATASTART ((ptr_t)(&environ)) +#define STACKBOTTOM ((ptr_t)0xfffff000) +#endif +#ifdef MIPS +#define MACH_TYPE "MIPS" +#ifdef LINUX +#define OS_TYPE "LINUX" +#define DYNAMIC_LOADING +#define COUNT_UNMAPPED_REGIONS +extern int _end[]; +#pragma weak __data_start +extern int __data_start[]; +#define DATASTART ((ptr_t)(__data_start)) +#define DATAEND ((ptr_t)(_end)) +#ifdef _MIPS_SZPTR +#define CPP_WORDSZ _MIPS_SZPTR +#define ALIGNMENT (_MIPS_SZPTR/8) +#else +#define ALIGNMENT 4 +#endif +#ifndef HBLKSIZE +#define HBLKSIZE 4096 +#endif +#if __GLIBC__==2&&__GLIBC_MINOR__>=2||__GLIBC__ > 2 +#define LINUX_STACKBOTTOM +#else +#define STACKBOTTOM ((ptr_t)0x7fff8000) +#endif +#endif +#ifdef EWS4800 +#define HEURISTIC2 +#if defined(_MIPS_SZPTR)&&(_MIPS_SZPTR==64) +extern int _fdata[],_end[]; +#define DATASTART ((ptr_t)_fdata) +#define DATAEND ((ptr_t)_end) +#define CPP_WORDSZ _MIPS_SZPTR +#define ALIGNMENT (_MIPS_SZPTR/8) +#else +extern int etext[],edata[]; +#if!defined(CPPCHECK) +extern int end[]; +#endif +extern int _DYNAMIC_LINKING[],_gp[]; +#define DATASTART ((ptr_t)((((word)(etext)+0x3ffff)&~0x3ffff)+((word)(etext)&0xffff))) +#define DATAEND ((ptr_t)(edata)) +#define GC_HAVE_DATAREGION2 +#define DATASTART2 (_DYNAMIC_LINKING?(ptr_t)(((word)_gp+0x8000+0x3ffff)&~0x3ffff):(ptr_t)edata) +#define DATAEND2 ((ptr_t)(end)) +#define ALIGNMENT 4 +#endif +#define OS_TYPE "EWS4800" +#endif +#ifdef ULTRIX +#define HEURISTIC2 +#define DATASTART ((ptr_t)0x10000000) +#define OS_TYPE "ULTRIX" +#define ALIGNMENT 4 +#endif +#ifdef IRIX5 +#define HEURISTIC2 +extern int _fdata[]; +#define DATASTART ((ptr_t)(_fdata)) +#ifdef USE_MMAP +#define HEAP_START (ptr_t)0x30000000 +#else +#define HEAP_START DATASTART +#endif +#define OS_TYPE "IRIX5" +#ifdef _MIPS_SZPTR +#define CPP_WORDSZ _MIPS_SZPTR +#define ALIGNMENT (_MIPS_SZPTR/8) +#else +#define ALIGNMENT 4 +#endif +#define DYNAMIC_LOADING +#endif +#ifdef MSWINCE +#define OS_TYPE "MSWINCE" +#define ALIGNMENT 4 +#define DATAEND +#endif +#ifdef NETBSD +#define OS_TYPE "NETBSD" +#define ALIGNMENT 4 +#define HEURISTIC2 +#ifdef __ELF__ +extern ptr_t GC_data_start; +#define DATASTART GC_data_start +#define NEED_FIND_LIMIT +#define DYNAMIC_LOADING +#else +#define DATASTART ((ptr_t)0x10000000) +#define STACKBOTTOM ((ptr_t)0x7ffff000) +#endif +#endif +#ifdef OPENBSD +#define OS_TYPE "OPENBSD" +#define CPP_WORDSZ 64 +#define ALIGNMENT 8 +#ifndef GC_OPENBSD_THREADS +#define HEURISTIC2 +#endif +extern int __data_start[]; +#define DATASTART ((ptr_t)__data_start) +extern int _end[]; +#define DATAEND ((ptr_t)(&_end)) +#define DYNAMIC_LOADING +#endif +#ifdef FREEBSD +#define OS_TYPE "FREEBSD" +#define ALIGNMENT 4 +#ifndef GC_FREEBSD_THREADS +#define MPROTECT_VDB +#endif +#define SIG_SUSPEND SIGUSR1 +#define SIG_THR_RESTART SIGUSR2 +#define FREEBSD_STACKBOTTOM +#define DYNAMIC_LOADING +extern char etext[]; +#define DATASTART GC_FreeBSDGetDataStart(0x1000,(ptr_t)etext) +#define DATASTART_USES_BSDGETDATASTART +#endif +#if defined(NONSTOP) +#define CPP_WORDSZ 32 +#define OS_TYPE "NONSTOP" +#define ALIGNMENT 4 +#define DATASTART ((ptr_t)0x08000000) +extern char**environ; +#define DATAEND ((ptr_t)(environ - 0x10)) +#define STACKBOTTOM ((ptr_t)0x4fffffff) +#endif +#endif +#ifdef NIOS2 +#define CPP_WORDSZ 32 +#define MACH_TYPE "NIOS2" +#ifdef LINUX +#define OS_TYPE "LINUX" +#define DYNAMIC_LOADING +#define COUNT_UNMAPPED_REGIONS +extern int _end[]; +extern int __data_start[]; +#define DATASTART ((ptr_t)(__data_start)) +#define DATAEND ((ptr_t)(_end)) +#define ALIGNMENT 4 +#ifndef HBLKSIZE +#define HBLKSIZE 4096 +#endif +#define LINUX_STACKBOTTOM +#endif +#endif +#ifdef OR1K +#define CPP_WORDSZ 32 +#define MACH_TYPE "OR1K" +#ifdef LINUX +#define OS_TYPE "LINUX" +#define DYNAMIC_LOADING +#define COUNT_UNMAPPED_REGIONS +extern int _end[]; +extern int __data_start[]; +#define DATASTART ((ptr_t)(__data_start)) +#define DATAEND ((ptr_t)(_end)) +#define ALIGNMENT 4 +#ifndef HBLKSIZE +#define HBLKSIZE 4096 +#endif +#define LINUX_STACKBOTTOM +#endif +#endif +#ifdef HP_PA +#define MACH_TYPE "HP_PA" +#ifdef __LP64__ +#define CPP_WORDSZ 64 +#define ALIGNMENT 8 +#else +#define CPP_WORDSZ 32 +#define ALIGNMENT 4 +#endif +#if!defined(GC_HPUX_THREADS)&&!defined(GC_LINUX_THREADS)&&!defined(OPENBSD)&&!defined(LINUX) +#define MPROTECT_VDB +#endif +#define STACK_GROWS_UP +#ifdef HPUX +#define OS_TYPE "HPUX" +extern int __data_start[]; +#define DATASTART ((ptr_t)(__data_start)) +#ifdef USE_MMAP +#define USE_MMAP_ANON +#endif +#ifdef USE_HPUX_FIXED_STACKBOTTOM +#define STACKBOTTOM ((ptr_t)0x7b033000) +#elif defined(USE_ENVIRON_POINTER) +extern char**environ; +#define STACKBOTTOM ((ptr_t)environ) +#elif!defined(HEURISTIC2) +#define HPUX_MAIN_STACKBOTTOM +#define NEED_FIND_LIMIT +#endif +#define DYNAMIC_LOADING +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define GETPAGESIZE()(unsigned)sysconf(_SC_PAGE_SIZE) +#ifndef __GNUC__ +#define PREFETCH(x)do { register long addr=(long)(x);(void)_asm ("LDW",0,0,addr,0);} while (0) +#endif +#endif +#ifdef LINUX +#define OS_TYPE "LINUX" +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#define DYNAMIC_LOADING +#define SEARCH_FOR_DATA_START +extern int _end[]; +#define DATAEND ((ptr_t)(&_end)) +#endif +#ifdef OPENBSD +#define OS_TYPE "OPENBSD" +#ifndef GC_OPENBSD_THREADS +#define HEURISTIC2 +#endif +extern int __data_start[]; +#define DATASTART ((ptr_t)__data_start) +extern int _end[]; +#define DATAEND ((ptr_t)(&_end)) +#define DYNAMIC_LOADING +#endif +#endif +#ifdef ALPHA +#define MACH_TYPE "ALPHA" +#define ALIGNMENT 8 +#define CPP_WORDSZ 64 +#ifdef NETBSD +#define OS_TYPE "NETBSD" +#define HEURISTIC2 +extern ptr_t GC_data_start; +#define DATASTART GC_data_start +#define ELFCLASS32 32 +#define ELFCLASS64 64 +#define ELF_CLASS ELFCLASS64 +#define DYNAMIC_LOADING +#endif +#ifdef OPENBSD +#define OS_TYPE "OPENBSD" +#ifndef GC_OPENBSD_THREADS +#define HEURISTIC2 +#endif +extern int __data_start[]; +#define DATASTART ((ptr_t)__data_start) +extern int _end[]; +#define DATAEND ((ptr_t)(&_end)) +#define DYNAMIC_LOADING +#endif +#ifdef FREEBSD +#define OS_TYPE "FREEBSD" +#define SIG_SUSPEND SIGUSR1 +#define SIG_THR_RESTART SIGUSR2 +#define FREEBSD_STACKBOTTOM +#define DYNAMIC_LOADING +extern char etext[]; +extern char edata[]; +#if!defined(CPPCHECK) +extern char end[]; +#endif +#define NEED_FIND_LIMIT +#define DATASTART ((ptr_t)(&etext)) +void*GC_find_limit(void*,int); +#define DATAEND (ptr_t)GC_find_limit(DATASTART,TRUE) +#define DATAEND_IS_FUNC +#define GC_HAVE_DATAREGION2 +#define DATASTART2 ((ptr_t)(&edata)) +#define DATAEND2 ((ptr_t)(&end)) +#endif +#ifdef OSF1 +#define OS_TYPE "OSF1" +#define DATASTART ((ptr_t)0x140000000) +extern int _end[]; +#define DATAEND ((ptr_t)(&_end)) +extern char**environ; +#define STACKBOTTOM ((ptr_t)(((word)(environ)|(getpagesize()-1))+1)) +extern int __start[]; +#define HEURISTIC2_LIMIT ((ptr_t)((word)(__start)&~(getpagesize()-1))) +#ifndef GC_OSF1_THREADS +#define MPROTECT_VDB +#endif +#define DYNAMIC_LOADING +#endif +#ifdef LINUX +#define OS_TYPE "LINUX" +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#ifdef __ELF__ +#define SEARCH_FOR_DATA_START +#define DYNAMIC_LOADING +#else +#define DATASTART ((ptr_t)0x140000000) +#endif +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#if!defined(REDIRECT_MALLOC) +#define MPROTECT_VDB +#endif +#endif +#endif +#ifdef IA64 +#define MACH_TYPE "IA64" +#ifdef HPUX +#ifdef _ILP32 +#define CPP_WORDSZ 32 +#define ALIGNMENT 4 +#else +#if!defined(_LP64)&&!defined(CPPCHECK) +#error Unknown ABI +#endif +#define CPP_WORDSZ 64 +#define ALIGNMENT 8 +#endif +#define OS_TYPE "HPUX" +extern int __data_start[]; +#define DATASTART ((ptr_t)(__data_start)) +#ifdef USE_MMAP +#define USE_MMAP_ANON +#endif +extern char**environ; +#define STACKBOTTOM ((ptr_t)environ) +#define HPUX_STACKBOTTOM +#define DYNAMIC_LOADING +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define GETPAGESIZE()(unsigned)sysconf(_SC_PAGE_SIZE) +#define BACKING_STORE_DISPLACEMENT 0x1000000 +#define BACKING_STORE_ALIGNMENT 0x1000 +extern ptr_t GC_register_stackbottom; +#define BACKING_STORE_BASE GC_register_stackbottom +#endif +#ifdef LINUX +#define CPP_WORDSZ 64 +#define ALIGNMENT 8 +#define OS_TYPE "LINUX" +#define LINUX_STACKBOTTOM +extern ptr_t GC_register_stackbottom; +#define BACKING_STORE_BASE GC_register_stackbottom +#define COUNT_UNMAPPED_REGIONS +#define SEARCH_FOR_DATA_START +#ifdef __GNUC__ +#define DYNAMIC_LOADING +#else +#endif +#if!defined(REDIRECT_MALLOC) +#define MPROTECT_VDB +#endif +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#ifdef __GNUC__ +#ifndef __INTEL_COMPILER +#define PREFETCH(x)__asm__ (" lfetch [%0]"::"r"(x)) +#define GC_PREFETCH_FOR_WRITE(x)__asm__ (" lfetch.excl [%0]"::"r"(x)) +#define CLEAR_DOUBLE(x)__asm__ (" stf.spill [%0]=f0"::"r"((void*)(x))) +#else +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define PREFETCH(x)__lfetch(__lfhint_none,(x)) +#define GC_PREFETCH_FOR_WRITE(x)__lfetch(__lfhint_nta,(x)) +#define CLEAR_DOUBLE(x)__stf_spill((void*)(x),0) +#endif +#endif +#endif +#ifdef MSWIN32 +#define OS_TYPE "MSWIN32" +#define DATAEND +#if defined(_WIN64) +#define CPP_WORDSZ 64 +#else +#define CPP_WORDSZ 32 +#endif +#define ALIGNMENT 8 +#endif +#endif +#ifdef M88K +#define MACH_TYPE "M88K" +#define ALIGNMENT 4 +extern int etext[]; +#ifdef CX_UX +#define OS_TYPE "CX_UX" +#define DATASTART ((ptr_t)((((word)(etext)+0x3fffff)&~0x3fffff)+0x10000)) +#endif +#ifdef DGUX +#define OS_TYPE "DGUX" +ptr_t GC_SysVGetDataStart(size_t,ptr_t); +#define DATASTART GC_SysVGetDataStart(0x10000,(ptr_t)etext) +#define DATASTART_IS_FUNC +#endif +#define STACKBOTTOM ((char*)0xf0000000) +#endif +#ifdef S370 +#define MACH_TYPE "S370" +#define ALIGNMENT 4 +#ifdef UTS4 +#define OS_TYPE "UTS4" +extern int etext[]; +extern int _etext[]; +extern int _end[]; +ptr_t GC_SysVGetDataStart(size_t,ptr_t); +#define DATASTART GC_SysVGetDataStart(0x10000,(ptr_t)_etext) +#define DATASTART_IS_FUNC +#define DATAEND ((ptr_t)(_end)) +#define HEURISTIC2 +#endif +#endif +#ifdef S390 +#define MACH_TYPE "S390" +#ifndef __s390x__ +#define ALIGNMENT 4 +#define CPP_WORDSZ 32 +#else +#define ALIGNMENT 8 +#define CPP_WORDSZ 64 +#ifndef HBLKSIZE +#define HBLKSIZE 4096 +#endif +#endif +#ifdef LINUX +#define OS_TYPE "LINUX" +#define LINUX_STACKBOTTOM +#define DYNAMIC_LOADING +#define COUNT_UNMAPPED_REGIONS +extern int __data_start[] __attribute__((__weak__)); +#define DATASTART ((ptr_t)(__data_start)) +extern int _end[] __attribute__((__weak__)); +#define DATAEND ((ptr_t)(_end)) +#define CACHE_LINE_SIZE 256 +#define GETPAGESIZE()4096 +#endif +#endif +#ifdef AARCH64 +#define MACH_TYPE "AARCH64" +#ifdef __ILP32__ +#define CPP_WORDSZ 32 +#define ALIGNMENT 4 +#else +#define CPP_WORDSZ 64 +#define ALIGNMENT 8 +#endif +#ifndef HBLKSIZE +#define HBLKSIZE 4096 +#endif +#ifdef LINUX +#define OS_TYPE "LINUX" +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#if!defined(REDIRECT_MALLOC) +#define MPROTECT_VDB +#endif +#define DYNAMIC_LOADING +#if defined(HOST_ANDROID) +#define SEARCH_FOR_DATA_START +#else +extern int __data_start[]; +#define DATASTART ((ptr_t)__data_start) +#endif +extern int _end[]; +#define DATAEND ((ptr_t)(&_end)) +#endif +#ifdef DARWIN +#define OS_TYPE "DARWIN" +#define DARWIN_DONT_PARSE_STACK 1 +#define DYNAMIC_LOADING +#define DATASTART ((ptr_t)get_etext()) +#define DATAEND ((ptr_t)get_end()) +#define STACKBOTTOM ((ptr_t)0x16fdfffff) +#define USE_MMAP_ANON +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define GETPAGESIZE()(unsigned)getpagesize() +#define NO_PTHREAD_TRYLOCK +#if TARGET_OS_IPHONE&&!defined(NO_DYLD_BIND_FULLY_IMAGE) +#define NO_DYLD_BIND_FULLY_IMAGE +#endif +#endif +#ifdef FREEBSD +#define OS_TYPE "FREEBSD" +#ifndef GC_FREEBSD_THREADS +#define MPROTECT_VDB +#endif +#define FREEBSD_STACKBOTTOM +#define DYNAMIC_LOADING +extern char etext[]; +#define DATASTART GC_FreeBSDGetDataStart(0x1000,(ptr_t)etext) +#define DATASTART_USES_BSDGETDATASTART +#endif +#ifdef NETBSD +#define OS_TYPE "NETBSD" +#define HEURISTIC2 +extern ptr_t GC_data_start; +#define DATASTART GC_data_start +#define ELF_CLASS ELFCLASS64 +#define DYNAMIC_LOADING +#endif +#ifdef OPENBSD +#define OS_TYPE "OPENBSD" +#ifndef GC_OPENBSD_THREADS +#define HEURISTIC2 +#endif +extern int __data_start[]; +#define DATASTART ((ptr_t)__data_start) +extern int _end[]; +#define DATAEND ((ptr_t)(&_end)) +#define DYNAMIC_LOADING +#endif +#ifdef NINTENDO_SWITCH +#define OS_TYPE "NINTENDO_SWITCH" +extern int __bss_end[]; +#define NO_HANDLE_FORK 1 +#define DATASTART (ptr_t)ALIGNMENT +#define DATAEND (ptr_t)(&__bss_end) +void*switch_get_stack_bottom(void); +#define STACKBOTTOM ((ptr_t)switch_get_stack_bottom()) +#endif +#ifdef MSWIN32 +#define OS_TYPE "MSWIN32" +#ifndef DATAEND +#define DATAEND +#endif +#endif +#ifdef NOSYS +extern int __data_start[]; +#define DATASTART ((ptr_t)__data_start) +extern void*__stack_base__; +#define STACKBOTTOM ((ptr_t)__stack_base__) +#endif +#endif +#ifdef ARM32 +#if defined(NACL) +#define MACH_TYPE "NACL" +#else +#define MACH_TYPE "ARM32" +#endif +#define CPP_WORDSZ 32 +#define ALIGNMENT 4 +#ifdef NETBSD +#define OS_TYPE "NETBSD" +#define HEURISTIC2 +#ifdef __ELF__ +extern ptr_t GC_data_start; +#define DATASTART GC_data_start +#define DYNAMIC_LOADING +#else +extern char etext[]; +#define DATASTART ((ptr_t)(etext)) +#endif +#endif +#ifdef LINUX +#define OS_TYPE "LINUX" +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#if!defined(REDIRECT_MALLOC) +#define MPROTECT_VDB +#endif +#define DYNAMIC_LOADING +EXTERN_C_END +#include +EXTERN_C_BEGIN +#if defined(__GLIBC__)&&__GLIBC__>=2||defined(HOST_ANDROID)||defined(HOST_TIZEN) +#define SEARCH_FOR_DATA_START +#else +extern char**__environ; +#define DATASTART ((ptr_t)(&__environ)) +#endif +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#endif +#ifdef MSWINCE +#define OS_TYPE "MSWINCE" +#define DATAEND +#endif +#ifdef FREEBSD +#define OS_TYPE "FREEBSD" +#ifndef GC_FREEBSD_THREADS +#define MPROTECT_VDB +#endif +#define SIG_SUSPEND SIGUSR1 +#define SIG_THR_RESTART SIGUSR2 +#define FREEBSD_STACKBOTTOM +#define DYNAMIC_LOADING +extern char etext[]; +#define DATASTART GC_FreeBSDGetDataStart(0x1000,(ptr_t)etext) +#define DATASTART_USES_BSDGETDATASTART +#endif +#ifdef DARWIN +#define OS_TYPE "DARWIN" +#define DARWIN_DONT_PARSE_STACK 1 +#define DYNAMIC_LOADING +#define DATASTART ((ptr_t)get_etext()) +#define DATAEND ((ptr_t)get_end()) +#define STACKBOTTOM ((ptr_t)0x30000000) +#define USE_MMAP_ANON +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define GETPAGESIZE()(unsigned)getpagesize() +#define NO_PTHREAD_TRYLOCK +#if TARGET_OS_IPHONE&&!defined(NO_DYLD_BIND_FULLY_IMAGE) +#define NO_DYLD_BIND_FULLY_IMAGE +#endif +#endif +#ifdef OPENBSD +#define OS_TYPE "OPENBSD" +#ifndef GC_OPENBSD_THREADS +#define HEURISTIC2 +#endif +extern int __data_start[]; +#define DATASTART ((ptr_t)__data_start) +extern int _end[]; +#define DATAEND ((ptr_t)(&_end)) +#define DYNAMIC_LOADING +#endif +#ifdef SN_TARGET_PSP2 +#define OS_TYPE "SN_TARGET_PSP2" +#define NO_HANDLE_FORK 1 +#define DATASTART (ptr_t)ALIGNMENT +#define DATAEND (ptr_t)ALIGNMENT +void*psp2_get_stack_bottom(void); +#define STACKBOTTOM ((ptr_t)psp2_get_stack_bottom()) +#endif +#ifdef NN_PLATFORM_CTR +#define OS_TYPE "NN_PLATFORM_CTR" +extern unsigned char Image$$ZI$$ZI$$Base[]; +#define DATASTART (ptr_t)(Image$$ZI$$ZI$$Base) +extern unsigned char Image$$ZI$$ZI$$Limit[]; +#define DATAEND (ptr_t)(Image$$ZI$$ZI$$Limit) +void*n3ds_get_stack_bottom(void); +#define STACKBOTTOM ((ptr_t)n3ds_get_stack_bottom()) +#endif +#ifdef MSWIN32 +#define OS_TYPE "MSWIN32" +#ifndef DATAEND +#define DATAEND +#endif +#endif +#ifdef NOSYS +extern int __data_start[]; +#define DATASTART ((ptr_t)(__data_start)) +extern void*__stack_base__; +#define STACKBOTTOM ((ptr_t)(__stack_base__)) +#endif +#endif +#ifdef CRIS +#define MACH_TYPE "CRIS" +#define CPP_WORDSZ 32 +#define ALIGNMENT 1 +#ifdef LINUX +#define OS_TYPE "LINUX" +#define DYNAMIC_LOADING +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#define SEARCH_FOR_DATA_START +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#endif +#endif +#if defined(SH)&&!defined(SH4) +#define MACH_TYPE "SH" +#define ALIGNMENT 4 +#ifdef MSWINCE +#define OS_TYPE "MSWINCE" +#define DATAEND +#endif +#ifdef LINUX +#define OS_TYPE "LINUX" +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#define DYNAMIC_LOADING +#define SEARCH_FOR_DATA_START +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#endif +#ifdef NETBSD +#define OS_TYPE "NETBSD" +#define HEURISTIC2 +extern ptr_t GC_data_start; +#define DATASTART GC_data_start +#define DYNAMIC_LOADING +#endif +#ifdef OPENBSD +#define OS_TYPE "OPENBSD" +#ifndef GC_OPENBSD_THREADS +#define HEURISTIC2 +#endif +extern int __data_start[]; +#define DATASTART ((ptr_t)__data_start) +extern int _end[]; +#define DATAEND ((ptr_t)(&_end)) +#define DYNAMIC_LOADING +#endif +#endif +#ifdef SH4 +#define MACH_TYPE "SH4" +#define ALIGNMENT 4 +#ifdef MSWINCE +#define OS_TYPE "MSWINCE" +#define DATAEND +#endif +#endif +#ifdef AVR32 +#define MACH_TYPE "AVR32" +#define CPP_WORDSZ 32 +#define ALIGNMENT 4 +#ifdef LINUX +#define OS_TYPE "LINUX" +#define DYNAMIC_LOADING +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#define SEARCH_FOR_DATA_START +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#endif +#endif +#ifdef M32R +#define CPP_WORDSZ 32 +#define MACH_TYPE "M32R" +#define ALIGNMENT 4 +#ifdef LINUX +#define OS_TYPE "LINUX" +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#define DYNAMIC_LOADING +#define SEARCH_FOR_DATA_START +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#endif +#endif +#ifdef X86_64 +#define MACH_TYPE "X86_64" +#ifdef __ILP32__ +#define ALIGNMENT 4 +#define CPP_WORDSZ 32 +#else +#define ALIGNMENT 8 +#define CPP_WORDSZ 64 +#endif +#ifndef HBLKSIZE +#define HBLKSIZE 4096 +#endif +#ifndef CACHE_LINE_SIZE +#define CACHE_LINE_SIZE 64 +#endif +#ifdef SN_TARGET_ORBIS +#define OS_TYPE "SN_TARGET_ORBIS" +#define DATASTART (ptr_t)ALIGNMENT +#define DATAEND (ptr_t)ALIGNMENT +void*ps4_get_stack_bottom(void); +#define STACKBOTTOM ((ptr_t)ps4_get_stack_bottom()) +#endif +#ifdef OPENBSD +#define OS_TYPE "OPENBSD" +#ifndef GC_OPENBSD_THREADS +#define HEURISTIC2 +#endif +extern int __data_start[]; +extern int _end[]; +#define DATASTART ((ptr_t)__data_start) +#define DATAEND ((ptr_t)(&_end)) +#define DYNAMIC_LOADING +#endif +#ifdef LINUX +#define OS_TYPE "LINUX" +#define LINUX_STACKBOTTOM +#if!defined(REDIRECT_MALLOC) +#define MPROTECT_VDB +#else +#endif +#define COUNT_UNMAPPED_REGIONS +#define DYNAMIC_LOADING +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define SEARCH_FOR_DATA_START +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#if defined(__GLIBC__)&&!defined(__UCLIBC__) +#define USE_MMAP_ANON +#define GETCONTEXT_FPU_EXCMASK_BUG +#define GLIBC_2_19_TSX_BUG +EXTERN_C_END +#include +EXTERN_C_BEGIN +#endif +#endif +#ifdef DARWIN +#define OS_TYPE "DARWIN" +#define DARWIN_DONT_PARSE_STACK 1 +#define DYNAMIC_LOADING +#define DATASTART ((ptr_t)get_etext()) +#define DATAEND ((ptr_t)get_end()) +#define STACKBOTTOM ((ptr_t)0x7fff5fc00000) +#define USE_MMAP_ANON +#define MPROTECT_VDB +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define GETPAGESIZE()(unsigned)getpagesize() +#define NO_PTHREAD_TRYLOCK +#if TARGET_OS_IPHONE&&!defined(NO_DYLD_BIND_FULLY_IMAGE) +#define NO_DYLD_BIND_FULLY_IMAGE +#endif +#endif +#ifdef FREEBSD +#define OS_TYPE "FREEBSD" +#ifndef GC_FREEBSD_THREADS +#define MPROTECT_VDB +#endif +#ifdef __GLIBC__ +#define SIG_SUSPEND (32+6) +#define SIG_THR_RESTART (32+5) +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#else +#define SIG_SUSPEND SIGUSR1 +#define SIG_THR_RESTART SIGUSR2 +#endif +#define FREEBSD_STACKBOTTOM +#if defined(__DragonFly__) +#define COUNT_UNMAPPED_REGIONS +#endif +#define DYNAMIC_LOADING +extern char etext[]; +#define DATASTART GC_FreeBSDGetDataStart(0x1000,(ptr_t)etext) +#define DATASTART_USES_BSDGETDATASTART +#endif +#ifdef NETBSD +#define OS_TYPE "NETBSD" +#define HEURISTIC2 +extern ptr_t GC_data_start; +#define DATASTART GC_data_start +#define DYNAMIC_LOADING +#endif +#ifdef HAIKU +#define OS_TYPE "HAIKU" +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define GETPAGESIZE()(unsigned)B_PAGE_SIZE +#define HEURISTIC2 +#define SEARCH_FOR_DATA_START +#define DYNAMIC_LOADING +#define MPROTECT_VDB +#endif +#ifdef SOLARIS +#define OS_TYPE "SOLARIS" +#define ELF_CLASS ELFCLASS64 +extern int _etext[],_end[]; +ptr_t GC_SysVGetDataStart(size_t,ptr_t); +#define DATASTART GC_SysVGetDataStart(0x1000,(ptr_t)_etext) +#define DATASTART_IS_FUNC +#define DATAEND ((ptr_t)(_end)) +EXTERN_C_END +#include +EXTERN_C_BEGIN +#ifdef USERLIMIT +#define STACKBOTTOM ((ptr_t)USRSTACK) +#else +#define HEURISTIC2 +#endif +#ifdef SOLARIS25_PROC_VDB_BUG_FIXED +#define PROC_VDB +#endif +#ifndef GC_THREADS +#define MPROTECT_VDB +#endif +#define DYNAMIC_LOADING +#if!defined(USE_MMAP)&&defined(REDIRECT_MALLOC) +#define USE_MMAP 1 +#endif +#ifdef USE_MMAP +#define HEAP_START (ptr_t)0x40000000 +#else +#define HEAP_START DATAEND +#endif +#endif +#ifdef CYGWIN32 +#define OS_TYPE "CYGWIN32" +#define RETRY_GET_THREAD_CONTEXT +#ifdef USE_WINALLOC +#define GWW_VDB +#else +#if defined(THREAD_LOCAL_ALLOC) +#else +#define MPROTECT_VDB +#endif +#ifdef USE_MMAP +#define USE_MMAP_ANON +#endif +#endif +#endif +#ifdef MSWIN_XBOX1 +#define OS_TYPE "MSWIN_XBOX1" +#define NO_GETENV +#define DATASTART (ptr_t)ALIGNMENT +#define DATAEND (ptr_t)ALIGNMENT +LONG64 durango_get_stack_bottom(void); +#define STACKBOTTOM ((ptr_t)durango_get_stack_bottom()) +#define GETPAGESIZE()4096 +#ifndef USE_MMAP +#define USE_MMAP 1 +#endif +#define PROT_NONE 0 +#define PROT_READ 1 +#define PROT_WRITE 2 +#define PROT_EXEC 4 +#define MAP_PRIVATE 2 +#define MAP_FIXED 0x10 +#define MAP_FAILED ((void*)-1) +#endif +#ifdef MSWIN32 +#define OS_TYPE "MSWIN32" +#define RETRY_GET_THREAD_CONTEXT +#if!defined(__GNUC__)||defined(__INTEL_COMPILER)||GC_GNUC_PREREQ(4,7) +#define MPROTECT_VDB +#endif +#define GWW_VDB +#ifndef DATAEND +#define DATAEND +#endif +#endif +#endif +#ifdef ARC +#define CPP_WORDSZ 32 +#define MACH_TYPE "ARC" +#define ALIGNMENT 4 +#define CACHE_LINE_SIZE 64 +#ifdef LINUX +#define OS_TYPE "LINUX" +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#define DYNAMIC_LOADING +extern int __data_start[] __attribute__((__weak__)); +#define DATASTART ((ptr_t)__data_start) +#endif +#endif +#ifdef HEXAGON +#define CPP_WORDSZ 32 +#define MACH_TYPE "HEXAGON" +#define ALIGNMENT 4 +#ifdef LINUX +#define OS_TYPE "LINUX" +#define LINUX_STACKBOTTOM +#if!defined(REDIRECT_MALLOC) +#define MPROTECT_VDB +#endif +#define COUNT_UNMAPPED_REGIONS +#define DYNAMIC_LOADING +EXTERN_C_END +#include +EXTERN_C_BEGIN +#if defined(__GLIBC__) +#define SEARCH_FOR_DATA_START +#elif!defined(CPPCHECK) +#error Unknown Hexagon libc configuration +#endif +extern int _end[]; +#define DATAEND ((ptr_t)(_end)) +#endif +#endif +#ifdef TILEPRO +#define CPP_WORDSZ 32 +#define MACH_TYPE "TILEPro" +#define ALIGNMENT 4 +#define PREFETCH(x)__insn_prefetch(x) +#define CACHE_LINE_SIZE 64 +#ifdef LINUX +#define OS_TYPE "LINUX" +extern int __data_start[]; +#define DATASTART ((ptr_t)__data_start) +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#define DYNAMIC_LOADING +#endif +#endif +#ifdef TILEGX +#define CPP_WORDSZ (__SIZEOF_POINTER__*8) +#define MACH_TYPE "TILE-Gx" +#define ALIGNMENT __SIZEOF_POINTER__ +#if CPP_WORDSZ < 64 +#define CLEAR_DOUBLE(x)(*(long long*)(x)=0) +#endif +#define PREFETCH(x)__insn_prefetch_l1(x) +#define CACHE_LINE_SIZE 64 +#ifdef LINUX +#define OS_TYPE "LINUX" +extern int __data_start[]; +#define DATASTART ((ptr_t)__data_start) +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#define DYNAMIC_LOADING +#endif +#endif +#ifdef RISCV +#define MACH_TYPE "RISC-V" +#define CPP_WORDSZ __riscv_xlen +#define ALIGNMENT (CPP_WORDSZ/8) +#ifdef FREEBSD +#define OS_TYPE "FREEBSD" +#ifndef GC_FREEBSD_THREADS +#define MPROTECT_VDB +#endif +#define SIG_SUSPEND SIGUSR1 +#define SIG_THR_RESTART SIGUSR2 +#define FREEBSD_STACKBOTTOM +#define DYNAMIC_LOADING +extern char etext[]; +#define DATASTART GC_FreeBSDGetDataStart(0x1000,(ptr_t)etext) +#define DATASTART_USES_BSDGETDATASTART +#endif +#ifdef LINUX +#define OS_TYPE "LINUX" +extern int __data_start[] __attribute__((__weak__)); +#define DATASTART ((ptr_t)__data_start) +#define LINUX_STACKBOTTOM +#define COUNT_UNMAPPED_REGIONS +#define DYNAMIC_LOADING +#endif +#endif +#if defined(__GLIBC__)&&!defined(DONT_USE_LIBC_PRIVATES) +#define USE_LIBC_PRIVATES +#endif +#ifdef NO_RETRY_GET_THREAD_CONTEXT +#undef RETRY_GET_THREAD_CONTEXT +#endif +#if defined(LINUX_STACKBOTTOM)&&defined(NO_PROC_STAT)&&!defined(USE_LIBC_PRIVATES) +#undef LINUX_STACKBOTTOM +#define HEURISTIC2 +#endif +#if defined(USE_MMAP_ANON)&&!defined(USE_MMAP) +#define USE_MMAP 1 +#elif (defined(LINUX)||defined(OPENBSD))&&defined(USE_MMAP) +#define USE_MMAP_ANON +#endif +#if defined(GC_LINUX_THREADS)&&defined(REDIRECT_MALLOC)&&!defined(USE_PROC_FOR_LIBRARIES) +#define USE_PROC_FOR_LIBRARIES +#endif +#ifndef STACK_GROWS_UP +#define STACK_GROWS_DOWN +#endif +#ifndef CPP_WORDSZ +#define CPP_WORDSZ 32 +#endif +#ifndef OS_TYPE +#define OS_TYPE "" +#endif +#ifndef DATAEND +#if!defined(CPPCHECK) +extern int end[]; +#endif +#define DATAEND ((ptr_t)(end)) +#endif +#if defined(HOST_ANDROID)&&defined(__clang__)&&!defined(BROKEN_UUENDUU_SYM) +#undef DATAEND +#pragma weak __end__ +extern int __end__[]; +#define DATAEND (__end__!=0?(ptr_t)__end__:(ptr_t)_end) +#endif +#if (defined(SVR4)||defined(HOST_ANDROID)||defined(HOST_TIZEN))&&!defined(GETPAGESIZE) +EXTERN_C_END +#include +EXTERN_C_BEGIN +#define GETPAGESIZE()(unsigned)sysconf(_SC_PAGESIZE) +#endif +#ifndef GETPAGESIZE +#if defined(SOLARIS)||defined(IRIX5)||defined(LINUX)||defined(NETBSD)||defined(FREEBSD)||defined(HPUX) +EXTERN_C_END +#include +EXTERN_C_BEGIN +#endif +#define GETPAGESIZE()(unsigned)getpagesize() +#endif +#if defined(HOST_ANDROID)&&!(__ANDROID_API__>=23)&&((defined(MIPS)&&(CPP_WORDSZ==32))||defined(ARM32)||defined(I386)) +#define USE_TKILL_ON_ANDROID +#endif +#if defined(SOLARIS)||defined(DRSNX)||defined(UTS4) +#define SVR4 +#endif +#if defined(SOLARIS)||defined(DRSNX) +#define SOLARISDL +#define SUNOS5SIGS +#endif +#if defined(HPUX) +#define SUNOS5SIGS +#endif +#if defined(FREEBSD)&&(defined(__DragonFly__)||__FreeBSD__>=4||(__FreeBSD_kernel__>=4)) +#define SUNOS5SIGS +#endif +#if!defined(GC_EXPLICIT_SIGNALS_UNBLOCK)&&defined(SUNOS5SIGS)&&!defined(GC_NO_PTHREAD_SIGMASK) +#define GC_EXPLICIT_SIGNALS_UNBLOCK +#endif +#if!defined(NO_SIGNALS_UNBLOCK_IN_MAIN)&&defined(GC_NO_PTHREAD_SIGMASK) +#define NO_SIGNALS_UNBLOCK_IN_MAIN +#endif +#if!defined(NO_MARKER_SPECIAL_SIGMASK)&&(defined(NACL)||defined(GC_WIN32_PTHREADS)) +#define NO_MARKER_SPECIAL_SIGMASK +#endif +#ifdef GC_NETBSD_THREADS +#define SIGRTMIN 33 +#define SIGRTMAX 63 +#define GC_NETBSD_THREADS_WORKAROUND +#endif +#ifdef GC_OPENBSD_THREADS +EXTERN_C_END +#include +EXTERN_C_BEGIN +#if OpenBSD < 201211 +#define GC_OPENBSD_UTHREADS 1 +#endif +#endif +#if defined(SVR4)||defined(LINUX)||defined(IRIX5)||defined(HPUX)||defined(OPENBSD)||defined(NETBSD)||defined(FREEBSD)||defined(DGUX)||defined(BSD)||defined(HAIKU)||defined(HURD)||defined(AIX)||defined(DARWIN)||defined(OSF1) +#define UNIX_LIKE +#endif +#if defined(CPPCHECK) +#undef CPP_WORDSZ +#define CPP_WORDSZ (__SIZEOF_POINTER__*8) +#elif CPP_WORDSZ!=32&&CPP_WORDSZ!=64 +#error Bad word size +#endif +#if!defined(ALIGNMENT)&&!defined(CPPCHECK) +#error Undefined ALIGNMENT +#endif +#ifdef PCR +#undef DYNAMIC_LOADING +#undef STACKBOTTOM +#undef HEURISTIC1 +#undef HEURISTIC2 +#undef PROC_VDB +#undef MPROTECT_VDB +#define PCR_VDB +#endif +#if!defined(STACKBOTTOM)&&(defined(ECOS)||defined(NOSYS))&&!defined(CPPCHECK) +#error Undefined STACKBOTTOM +#endif +#ifdef IGNORE_DYNAMIC_LOADING +#undef DYNAMIC_LOADING +#endif +#if defined(SMALL_CONFIG)&&!defined(GC_DISABLE_INCREMENTAL) +#define GC_DISABLE_INCREMENTAL +#endif +#if (defined(MSWIN32)||defined(MSWINCE))&&!defined(USE_WINALLOC) +#define USE_WINALLOC 1 +#endif +#ifdef USE_WINALLOC +#undef USE_MMAP +#endif +#if defined(DARWIN)||defined(FREEBSD)||defined(HAIKU)||defined(IRIX5)||defined(LINUX)||defined(NETBSD)||defined(OPENBSD)||defined(SOLARIS)||((defined(CYGWIN32)||defined(USE_MMAP)||defined(USE_MUNMAP))&&!defined(USE_WINALLOC)) +#define MMAP_SUPPORTED +#endif +#if defined(USE_MUNMAP)&&!defined(MUNMAP_THRESHOLD)&&(defined(SN_TARGET_ORBIS)||defined(SN_TARGET_PS3)||defined(SN_TARGET_PSP2)||defined(MSWIN_XBOX1)) +#define MUNMAP_THRESHOLD 2 +#endif +#if defined(USE_MUNMAP)&&defined(COUNT_UNMAPPED_REGIONS)&&!defined(GC_UNMAPPED_REGIONS_SOFT_LIMIT) +#if defined(__DragonFly__) +#define GC_UNMAPPED_REGIONS_SOFT_LIMIT (1000000/4) +#else +#define GC_UNMAPPED_REGIONS_SOFT_LIMIT 16384 +#endif +#endif +#if defined(GC_DISABLE_INCREMENTAL)||defined(DEFAULT_VDB) +#undef GWW_VDB +#undef MPROTECT_VDB +#undef PCR_VDB +#undef PROC_VDB +#endif +#ifdef GC_DISABLE_INCREMENTAL +#undef CHECKSUMS +#endif +#ifdef USE_GLOBAL_ALLOC +#undef GWW_VDB +#endif +#if defined(BASE_ATOMIC_OPS_EMULATED) +#undef MPROTECT_VDB +#endif +#if defined(USE_PROC_FOR_LIBRARIES)&&defined(GC_LINUX_THREADS) +#undef MPROTECT_VDB +#endif +#if defined(MPROTECT_VDB)&&defined(GC_PREFER_MPROTECT_VDB) +#undef PCR_VDB +#undef PROC_VDB +#endif +#ifdef PROC_VDB +#undef MPROTECT_VDB +#endif +#if defined(MPROTECT_VDB)&&!defined(MSWIN32)&&!defined(MSWINCE) +#include +#endif +#if defined(SIGBUS)&&!defined(HAVE_SIGBUS)&&!defined(CPPCHECK) +#define HAVE_SIGBUS +#endif +#ifndef SA_SIGINFO +#define NO_SA_SIGACTION +#endif +#if (defined(NO_SA_SIGACTION)||defined(GC_NO_SIGSETJMP))&&defined(MPROTECT_VDB)&&!defined(DARWIN)&&!defined(MSWIN32)&&!defined(MSWINCE) +#undef MPROTECT_VDB +#endif +#if!defined(PCR_VDB)&&!defined(PROC_VDB)&&!defined(MPROTECT_VDB)&&!defined(GWW_VDB)&&!defined(DEFAULT_VDB)&&!defined(GC_DISABLE_INCREMENTAL) +#define DEFAULT_VDB +#endif +#if ((defined(UNIX_LIKE)&&(defined(DARWIN)||defined(HAIKU)||defined(HURD)||defined(OPENBSD)||defined(ARM32)||defined(AVR32)||defined(MIPS)||defined(NIOS2)||defined(OR1K)))||(defined(LINUX)&&!defined(__gnu_linux__))||(defined(RTEMS)&&defined(I386))||defined(HOST_ANDROID))&&!defined(NO_GETCONTEXT) +#define NO_GETCONTEXT 1 +#endif +#ifndef PREFETCH +#if GC_GNUC_PREREQ(3,0)&&!defined(NO_PREFETCH) +#define PREFETCH(x)__builtin_prefetch((x),0,0) +#else +#define PREFETCH(x)(void)0 +#endif +#endif +#ifndef GC_PREFETCH_FOR_WRITE +#if GC_GNUC_PREREQ(3,0)&&!defined(GC_NO_PREFETCH_FOR_WRITE) +#define GC_PREFETCH_FOR_WRITE(x)__builtin_prefetch((x),1) +#else +#define GC_PREFETCH_FOR_WRITE(x)(void)0 +#endif +#endif +#ifndef CACHE_LINE_SIZE +#define CACHE_LINE_SIZE 32 +#endif +#ifndef STATIC +#ifdef GC_ASSERTIONS +#define STATIC +#else +#define STATIC static +#endif +#endif +#if defined(LINUX)&&(defined(USE_PROC_FOR_LIBRARIES)||defined(IA64)||!defined(SMALL_CONFIG)) +#define NEED_PROC_MAPS +#endif +#if defined(LINUX)||defined(HURD)||defined(__GLIBC__) +#define REGISTER_LIBRARIES_EARLY +#endif +#if defined(SEARCH_FOR_DATA_START) +extern ptr_t GC_data_start; +#define DATASTART GC_data_start +#endif +#ifndef HEAP_START +#define HEAP_START ((ptr_t)0) +#endif +#ifndef CLEAR_DOUBLE +#define CLEAR_DOUBLE(x)(((word*)(x))[0]=0,((word*)(x))[1]=0) +#endif +#if defined(GC_LINUX_THREADS)&&defined(REDIRECT_MALLOC)&&!defined(INCLUDE_LINUX_THREAD_DESCR) +#define INCLUDE_LINUX_THREAD_DESCR +#endif +#if!defined(CPPCHECK) +#if defined(GC_IRIX_THREADS)&&!defined(IRIX5) +#error Inconsistent configuration +#endif +#if defined(GC_LINUX_THREADS)&&!defined(LINUX)&&!defined(NACL) +#error Inconsistent configuration +#endif +#if defined(GC_NETBSD_THREADS)&&!defined(NETBSD) +#error Inconsistent configuration +#endif +#if defined(GC_FREEBSD_THREADS)&&!defined(FREEBSD) +#error Inconsistent configuration +#endif +#if defined(GC_SOLARIS_THREADS)&&!defined(SOLARIS) +#error Inconsistent configuration +#endif +#if defined(GC_HPUX_THREADS)&&!defined(HPUX) +#error Inconsistent configuration +#endif +#if defined(GC_AIX_THREADS)&&!defined(_AIX) +#error Inconsistent configuration +#endif +#if defined(GC_WIN32_THREADS)&&!defined(CYGWIN32)&&!defined(MSWIN32)&&!defined(MSWINCE)&&!defined(MSWIN_XBOX1) +#error Inconsistent configuration +#endif +#if defined(GC_WIN32_PTHREADS)&&defined(CYGWIN32) +#error Inconsistent configuration +#endif +#endif +#if defined(PCR)||defined(GC_WIN32_THREADS)||defined(GC_PTHREADS)||((defined(NN_PLATFORM_CTR)||defined(NINTENDO_SWITCH)||defined(SN_TARGET_ORBIS)||defined(SN_TARGET_PS3)||defined(SN_TARGET_PSP2))&&defined(GC_THREADS)) +#define THREADS +#endif +#if defined(PARALLEL_MARK)&&!defined(THREADS)&&!defined(CPPCHECK) +#error Invalid config:PARALLEL_MARK requires GC_THREADS +#endif +#if defined(GWW_VDB)&&!defined(USE_WINALLOC)&&!defined(CPPCHECK) +#error Invalid config:GWW_VDB requires USE_WINALLOC +#endif +#if (((defined(MSWIN32)||defined(MSWINCE))&&!defined(__GNUC__))||(defined(MSWIN32)&&defined(I386))||(defined(USE_PROC_FOR_LIBRARIES)&&defined(THREADS)))&&!defined(NO_CRT)&&!defined(NO_WRAP_MARK_SOME) +#define WRAP_MARK_SOME +#endif +#if defined(PARALLEL_MARK)&&!defined(DEFAULT_STACK_MAYBE_SMALL)&&(defined(HPUX)||defined(GC_DGUX386_THREADS)||defined(NO_GETCONTEXT)) +#define DEFAULT_STACK_MAYBE_SMALL +#endif +#ifdef PARALLEL_MARK +#define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word)) +#endif +#if defined(HOST_ANDROID)&&!defined(THREADS)&&!defined(USE_GET_STACKBASE_FOR_MAIN) +#define USE_GET_STACKBASE_FOR_MAIN +#endif +#if ((defined(FREEBSD)&&defined(__GLIBC__))||defined(LINUX)||defined(NETBSD)||defined(HOST_ANDROID))&&!defined(NO_PTHREAD_GETATTR_NP) +#define HAVE_PTHREAD_GETATTR_NP 1 +#elif defined(FREEBSD)&&!defined(__GLIBC__)&&!defined(NO_PTHREAD_ATTR_GET_NP) +#define HAVE_PTHREAD_NP_H 1 +#define HAVE_PTHREAD_ATTR_GET_NP 1 +#endif +#if defined(UNIX_LIKE)&&defined(THREADS)&&!defined(NO_CANCEL_SAFE)&&!defined(HOST_ANDROID) +#define CANCEL_SAFE +#endif +#ifdef CANCEL_SAFE +#define IF_CANCEL(x)x +#else +#define IF_CANCEL(x) +#endif +#if!defined(CAN_HANDLE_FORK)&&!defined(NO_HANDLE_FORK)&&!defined(HAVE_NO_FORK)&&((defined(GC_PTHREADS)&&!defined(NACL)&&!defined(GC_WIN32_PTHREADS)&&!defined(USE_WINALLOC))||(defined(DARWIN)&&defined(MPROTECT_VDB))||defined(HANDLE_FORK)) +#define CAN_HANDLE_FORK +#endif +#if defined(CAN_HANDLE_FORK)&&!defined(CAN_CALL_ATFORK)&&!defined(HURD)&&!defined(SN_TARGET_ORBIS)&&!defined(HOST_TIZEN)&&(!defined(HOST_ANDROID)||__ANDROID_API__>=21) +#define CAN_CALL_ATFORK +#endif +#if!defined(CAN_HANDLE_FORK)&&!defined(HAVE_NO_FORK)&&(defined(MSWIN32)||defined(MSWINCE)||defined(DOS4GW)||defined(OS2)||defined(SYMBIAN)) +#define HAVE_NO_FORK +#endif +#if!defined(USE_MARK_BITS)&&!defined(USE_MARK_BYTES)&&defined(PARALLEL_MARK) +#define USE_MARK_BYTES +#endif +#if (defined(MSWINCE)&&!defined(__CEGCC__)||defined(MSWINRT_FLAVOR))&&!defined(NO_GETENV) +#define NO_GETENV +#endif +#if (defined(NO_GETENV)||defined(MSWINCE))&&!defined(NO_GETENV_WIN32) +#define NO_GETENV_WIN32 +#endif +#if!defined(MSGBOX_ON_ERROR)&&!defined(NO_MSGBOX_ON_ERROR)&&!defined(SMALL_CONFIG)&&defined(MSWIN32)&&!defined(MSWINRT_FLAVOR)&&!defined(MSWIN_XBOX1) +#define MSGBOX_ON_ERROR +#endif +#ifndef STRTOULL +#if defined(_WIN64)&&!defined(__GNUC__) +#define STRTOULL _strtoui64 +#elif defined(_LLP64)||defined(__LLP64__)||defined(_WIN64) +#define STRTOULL strtoull +#else +#define STRTOULL strtoul +#endif +#endif +#ifndef GC_WORD_C +#if defined(_WIN64)&&!defined(__GNUC__) +#define GC_WORD_C(val)val##ui64 +#elif defined(_LLP64)||defined(__LLP64__)||defined(_WIN64) +#define GC_WORD_C(val)val##ULL +#else +#define GC_WORD_C(val)((word)val##UL) +#endif +#endif +#if defined(__has_feature) +#if __has_feature(address_sanitizer)&&!defined(ADDRESS_SANITIZER) +#define ADDRESS_SANITIZER +#endif +#if __has_feature(memory_sanitizer)&&!defined(MEMORY_SANITIZER) +#define MEMORY_SANITIZER +#endif +#if __has_feature(thread_sanitizer)&&!defined(THREAD_SANITIZER) +#define THREAD_SANITIZER +#endif +#else +#ifdef __SANITIZE_ADDRESS__ +#define ADDRESS_SANITIZER +#endif +#endif +#if defined(SPARC) +#define ASM_CLEAR_CODE +#endif +#if defined(SPARC) +#define CAN_SAVE_CALL_ARGS +#endif +#if (defined(I386)||defined(X86_64))&&(defined(LINUX)||defined(__GLIBC__)) +#define CAN_SAVE_CALL_ARGS +#endif +#if defined(SAVE_CALL_COUNT)&&!defined(GC_ADD_CALLER)&&defined(GC_CAN_SAVE_CALL_STACKS) +#define SAVE_CALL_CHAIN +#endif +#ifdef SAVE_CALL_CHAIN +#if defined(SAVE_CALL_NARGS)&&defined(CAN_SAVE_CALL_ARGS) +#define NARGS SAVE_CALL_NARGS +#else +#define NARGS 0 +#endif +#endif +#ifdef SAVE_CALL_CHAIN +#if!defined(SAVE_CALL_COUNT)||defined(CPPCHECK) +#define NFRAMES 6 +#else +#define NFRAMES ((SAVE_CALL_COUNT+1)&~1) +#endif +#define NEED_CALLINFO +#endif +#ifdef GC_ADD_CALLER +#define NFRAMES 1 +#define NARGS 0 +#define NEED_CALLINFO +#endif +#if (defined(FREEBSD)||(defined(DARWIN)&&!defined(_POSIX_C_SOURCE))||(defined(SOLARIS)&&(!defined(_XOPEN_SOURCE)||defined(__EXTENSIONS__)))||defined(LINUX))&&!defined(HAVE_DLADDR) +#define HAVE_DLADDR 1 +#endif +#if defined(MAKE_BACK_GRAPH)&&!defined(DBG_HDRS_ALL) +#define DBG_HDRS_ALL 1 +#endif +#if defined(POINTER_MASK)&&!defined(POINTER_SHIFT) +#define POINTER_SHIFT 0 +#endif +#if defined(POINTER_SHIFT)&&!defined(POINTER_MASK) +#define POINTER_MASK ((word)(-1)) +#endif +#if!defined(FIXUP_POINTER)&&defined(POINTER_MASK) +#define FIXUP_POINTER(p)(p=((p)&POINTER_MASK)< 32 +#define HASH_TL +#endif +#if defined(LARGE_CONFIG)||!defined(SMALL_CONFIG) +#define LOG_BOTTOM_SZ 10 +#else +#define LOG_BOTTOM_SZ 11 +#endif +#define BOTTOM_SZ (1<>LOG_HBLKSIZE)&(HDR_CACHE_SIZE-1))) +#define HCE_VALID_FOR(hce,h)((hce)->block_addr==((word)(h)>>LOG_HBLKSIZE)) +#define HCE_HDR(h)((hce)->hce_hdr) +#ifdef PRINT_BLACK_LIST +GC_INNER hdr*GC_header_cache_miss(ptr_t p,hdr_cache_entry*hce, +ptr_t source); +#define HEADER_CACHE_MISS(p,hce,source)GC_header_cache_miss(p,hce,source) +#else +GC_INNER hdr*GC_header_cache_miss(ptr_t p,hdr_cache_entry*hce); +#define HEADER_CACHE_MISS(p,hce,source)GC_header_cache_miss(p,hce) +#endif +#define HC_GET_HDR(p,hhdr,source){ hdr_cache_entry*hce=HCE(p);if (EXPECT(HCE_VALID_FOR(hce,p),TRUE)){ HC_HIT();hhdr=hce->hce_hdr;} else { hhdr=HEADER_CACHE_MISS(p,hce,source);if (NULL==hhdr)break;} } +typedef struct bi { +hdr*index[BOTTOM_SZ]; +struct bi*asc_link; +struct bi*desc_link; +word key; +#ifdef HASH_TL +struct bi*hash_link; +#endif +} bottom_index; +#define MAX_JUMP (HBLKSIZE - 1) +#define HDR_FROM_BI(bi,p)((bi)->index[((word)(p)>>LOG_HBLKSIZE)&(BOTTOM_SZ - 1)]) +#ifndef HASH_TL +#define BI(p)(GC_top_index [(word)(p)>>(LOG_BOTTOM_SZ+LOG_HBLKSIZE)]) +#define HDR_INNER(p)HDR_FROM_BI(BI(p),p) +#ifdef SMALL_CONFIG +#define HDR(p)GC_find_header((ptr_t)(p)) +#else +#define HDR(p)HDR_INNER(p) +#endif +#define GET_BI(p,bottom_indx)(void)((bottom_indx)=BI(p)) +#define GET_HDR(p,hhdr)(void)((hhdr)=HDR(p)) +#define SET_HDR(p,hhdr)(void)(HDR_INNER(p)=(hhdr)) +#define GET_HDR_ADDR(p,ha)(void)((ha)=&HDR_INNER(p)) +#else +#define TL_HASH(hi)((hi)&(TOP_SZ - 1)) +#define GET_BI(p,bottom_indx)do { REGISTER word hi=(word)(p)>>(LOG_BOTTOM_SZ+LOG_HBLKSIZE);REGISTER bottom_index*_bi=GC_top_index[TL_HASH(hi)];while (_bi->key!=hi&&_bi!=GC_all_nils)_bi=_bi->hash_link;(bottom_indx)=_bi;} while (0) +#define GET_HDR_ADDR(p,ha)do { REGISTER bottom_index*bi;GET_BI(p,bi);(ha)=&HDR_FROM_BI(bi,p);} while (0) +#define GET_HDR(p,hhdr)do { REGISTER hdr**_ha;GET_HDR_ADDR(p,_ha);(hhdr)=*_ha;} while (0) +#define SET_HDR(p,hhdr)do { REGISTER hdr**_ha;GET_HDR_ADDR(p,_ha);*_ha=(hhdr);} while (0) +#define HDR(p)GC_find_header((ptr_t)(p)) +#endif +#define IS_FORWARDING_ADDR_OR_NIL(hhdr)((size_t)(hhdr)<=MAX_JUMP) +#define FORWARDED_ADDR(h,hhdr)((struct hblk*)(h)- (size_t)(hhdr)) +EXTERN_C_END +#endif +#endif +#ifndef GC_ATTR_NO_SANITIZE_ADDR +#ifndef ADDRESS_SANITIZER +#define GC_ATTR_NO_SANITIZE_ADDR +#elif GC_CLANG_PREREQ(3,8) +#define GC_ATTR_NO_SANITIZE_ADDR __attribute__((no_sanitize("address"))) +#else +#define GC_ATTR_NO_SANITIZE_ADDR __attribute__((no_sanitize_address)) +#endif +#endif +#ifndef GC_ATTR_NO_SANITIZE_MEMORY +#ifndef MEMORY_SANITIZER +#define GC_ATTR_NO_SANITIZE_MEMORY +#elif GC_CLANG_PREREQ(3,8) +#define GC_ATTR_NO_SANITIZE_MEMORY __attribute__((no_sanitize("memory"))) +#else +#define GC_ATTR_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) +#endif +#endif +#ifndef GC_ATTR_NO_SANITIZE_THREAD +#ifndef THREAD_SANITIZER +#define GC_ATTR_NO_SANITIZE_THREAD +#elif GC_CLANG_PREREQ(3,8) +#define GC_ATTR_NO_SANITIZE_THREAD __attribute__((no_sanitize("thread"))) +#else +#define GC_ATTR_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread)) +#endif +#endif +#ifndef GC_ATTR_UNUSED +#if GC_GNUC_PREREQ(3,4) +#define GC_ATTR_UNUSED __attribute__((__unused__)) +#else +#define GC_ATTR_UNUSED +#endif +#endif +#ifdef HAVE_CONFIG_H +#define GC_INLINE static inline +#elif defined(_MSC_VER)||defined(__INTEL_COMPILER)||defined(__DMC__)||(GC_GNUC_PREREQ(3,0)&&defined(__STRICT_ANSI__))||defined(__WATCOMC__) +#define GC_INLINE static __inline +#elif GC_GNUC_PREREQ(3,0)||defined(__sun) +#define GC_INLINE static inline +#else +#define GC_INLINE static +#endif +#ifndef GC_ATTR_NOINLINE +#if GC_GNUC_PREREQ(4,0) +#define GC_ATTR_NOINLINE __attribute__((__noinline__)) +#elif _MSC_VER>=1400 +#define GC_ATTR_NOINLINE __declspec(noinline) +#else +#define GC_ATTR_NOINLINE +#endif +#endif +#ifndef GC_API_OSCALL +#if defined(__GNUC__) +#if GC_GNUC_PREREQ(4,0)&&!defined(GC_NO_VISIBILITY) +#define GC_API_OSCALL extern __attribute__((__visibility__("default"))) +#else +#define GC_API_OSCALL extern +#endif +#else +#define GC_API_OSCALL GC_API +#endif +#endif +#ifndef GC_API_PRIV +#define GC_API_PRIV GC_API +#endif +#if defined(THREADS)&&!defined(NN_PLATFORM_CTR) +#ifndef GC_ATOMIC_OPS_H +#define GC_ATOMIC_OPS_H +#ifdef GC_BUILTIN_ATOMIC +#ifdef __cplusplus +extern "C" { +#endif +typedef GC_word AO_t; +#ifdef GC_PRIVATE_H +#define AO_INLINE GC_INLINE +#else +#define AO_INLINE static __inline +#endif +typedef unsigned char AO_TS_t; +#define AO_TS_CLEAR 0 +#define AO_TS_INITIALIZER (AO_TS_t)AO_TS_CLEAR +#if defined(__GCC_ATOMIC_TEST_AND_SET_TRUEVAL)&&!defined(CPPCHECK) +#define AO_TS_SET __GCC_ATOMIC_TEST_AND_SET_TRUEVAL +#else +#define AO_TS_SET (AO_TS_t)1 +#endif +#define AO_CLEAR(p)__atomic_clear(p,__ATOMIC_RELEASE) +#define AO_test_and_set_acquire(p)__atomic_test_and_set(p,__ATOMIC_ACQUIRE) +#define AO_HAVE_test_and_set_acquire +#define AO_compiler_barrier()__atomic_signal_fence(__ATOMIC_SEQ_CST) +#define AO_nop_full()__atomic_thread_fence(__ATOMIC_SEQ_CST) +#define AO_HAVE_nop_full +#define AO_fetch_and_add(p,v)__atomic_fetch_add(p,v,__ATOMIC_RELAXED) +#define AO_HAVE_fetch_and_add +#define AO_fetch_and_add1(p)AO_fetch_and_add(p,1) +#define AO_HAVE_fetch_and_add1 +#define AO_or(p,v)(void)__atomic_or_fetch(p,v,__ATOMIC_RELAXED) +#define AO_HAVE_or +#define AO_load(p)__atomic_load_n(p,__ATOMIC_RELAXED) +#define AO_HAVE_load +#define AO_load_acquire(p)__atomic_load_n(p,__ATOMIC_ACQUIRE) +#define AO_HAVE_load_acquire +#define AO_load_acquire_read(p)AO_load_acquire(p) +#define AO_HAVE_load_acquire_read +#define AO_store(p,v)__atomic_store_n(p,v,__ATOMIC_RELAXED) +#define AO_HAVE_store +#define AO_store_release(p,v)__atomic_store_n(p,v,__ATOMIC_RELEASE) +#define AO_HAVE_store_release +#define AO_store_release_write(p,v)AO_store_release(p,v) +#define AO_HAVE_store_release_write +#define AO_char_load(p)__atomic_load_n(p,__ATOMIC_RELAXED) +#define AO_HAVE_char_load +#define AO_char_store(p,v)__atomic_store_n(p,v,__ATOMIC_RELAXED) +#define AO_HAVE_char_store +#ifdef AO_REQUIRE_CAS +AO_INLINE int +AO_compare_and_swap(volatile AO_t*p,AO_t ov,AO_t nv) +{ +return (int)__atomic_compare_exchange_n(p,&ov,nv,0, +__ATOMIC_RELAXED,__ATOMIC_RELAXED); +} +AO_INLINE int +AO_compare_and_swap_release(volatile AO_t*p,AO_t ov,AO_t nv) +{ +return (int)__atomic_compare_exchange_n(p,&ov,nv,0, +__ATOMIC_RELEASE,__ATOMIC_RELAXED); +} +#define AO_HAVE_compare_and_swap_release +#endif +#ifdef __cplusplus +} +#endif +#ifndef NO_LOCKFREE_AO_OR +#define HAVE_LOCKFREE_AO_OR 1 +#endif +#else +#include "atomic_ops.h" +#if (!defined(AO_HAVE_load)||!defined(AO_HAVE_store))&&!defined(CPPCHECK) +#error AO_load or AO_store is missing;probably old version of atomic_ops +#endif +#endif +#endif +#ifndef AO_HAVE_compiler_barrier +#define AO_HAVE_compiler_barrier 1 +#endif +#endif +#if defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32) +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN 1 +#endif +#define NOSERVICE +#include +#include +#endif +#ifndef GC_LOCKS_H +#define GC_LOCKS_H +#ifdef THREADS +#ifdef PCR +#include +#include +#endif +EXTERN_C_BEGIN +#ifdef PCR +GC_EXTERN PCR_Th_ML GC_allocate_ml; +#if defined(CPPCHECK) +#define DCL_LOCK_STATE +#else +#define DCL_LOCK_STATE PCR_ERes GC_fastLockRes;PCR_sigset_t GC_old_sig_mask +#endif +#define UNCOND_LOCK()PCR_Th_ML_Acquire(&GC_allocate_ml) +#define UNCOND_UNLOCK()PCR_Th_ML_Release(&GC_allocate_ml) +#elif defined(NN_PLATFORM_CTR)||defined(NINTENDO_SWITCH) +extern void GC_lock(void); +extern void GC_unlock(void); +#define UNCOND_LOCK()GC_lock() +#define UNCOND_UNLOCK()GC_unlock() +#endif +#if (!defined(AO_HAVE_test_and_set_acquire)||defined(GC_RTEMS_PTHREADS)||defined(SN_TARGET_ORBIS)||defined(SN_TARGET_PS3)||defined(GC_WIN32_THREADS)||defined(BASE_ATOMIC_OPS_EMULATED)||defined(LINT2))&&defined(GC_PTHREADS) +#define USE_PTHREAD_LOCKS +#undef USE_SPIN_LOCK +#endif +#if defined(GC_WIN32_THREADS)&&!defined(USE_PTHREAD_LOCKS) +#define NO_THREAD (DWORD)(-1) +GC_EXTERN CRITICAL_SECTION GC_allocate_ml; +#ifdef GC_ASSERTIONS +GC_EXTERN DWORD GC_lock_holder; +#define SET_LOCK_HOLDER()GC_lock_holder=GetCurrentThreadId() +#define UNSET_LOCK_HOLDER()GC_lock_holder=NO_THREAD +#define I_HOLD_LOCK()(!GC_need_to_lock||GC_lock_holder==GetCurrentThreadId()) +#ifdef THREAD_SANITIZER +#define I_DONT_HOLD_LOCK()TRUE +#else +#define I_DONT_HOLD_LOCK()(!GC_need_to_lock||GC_lock_holder!=GetCurrentThreadId()) +#endif +#define UNCOND_LOCK(){ GC_ASSERT(I_DONT_HOLD_LOCK());EnterCriticalSection(&GC_allocate_ml);SET_LOCK_HOLDER();} +#define UNCOND_UNLOCK(){ GC_ASSERT(I_HOLD_LOCK());UNSET_LOCK_HOLDER();LeaveCriticalSection(&GC_allocate_ml);} +#else +#define UNCOND_LOCK()EnterCriticalSection(&GC_allocate_ml) +#define UNCOND_UNLOCK()LeaveCriticalSection(&GC_allocate_ml) +#endif +#elif defined(GC_PTHREADS) +EXTERN_C_END +#include +EXTERN_C_BEGIN +#if!defined(GC_WIN32_PTHREADS) +#define NUMERIC_THREAD_ID(id)((unsigned long)(id)) +#define THREAD_EQUAL(id1,id2)((id1)==(id2)) +#define NUMERIC_THREAD_ID_UNIQUE +#elif defined(__WINPTHREADS_VERSION_MAJOR) +#define NUMERIC_THREAD_ID(id)((unsigned long)(id)) +#define THREAD_EQUAL(id1,id2)((id1)==(id2)) +#ifndef _WIN64 +#define NUMERIC_THREAD_ID_UNIQUE +#endif +#else +#define NUMERIC_THREAD_ID(id)((unsigned long)(word)(id.p)) +#define THREAD_EQUAL(id1,id2)((id1.p==id2.p)&&(id1.x==id2.x)) +#undef NUMERIC_THREAD_ID_UNIQUE +#endif +#define NO_THREAD ((unsigned long)(-1l)) +#ifdef SN_TARGET_PSP2 +EXTERN_C_END +#include "psp2-support.h" +EXTERN_C_BEGIN +GC_EXTERN WapiMutex GC_allocate_ml_PSP2; +#define UNCOND_LOCK(){ int res;GC_ASSERT(I_DONT_HOLD_LOCK());res=PSP2_MutexLock(&GC_allocate_ml_PSP2);GC_ASSERT(0==res);(void)res;SET_LOCK_HOLDER();} +#define UNCOND_UNLOCK(){ int res;GC_ASSERT(I_HOLD_LOCK());UNSET_LOCK_HOLDER();res=PSP2_MutexUnlock(&GC_allocate_ml_PSP2);GC_ASSERT(0==res);(void)res;} +#elif (!defined(THREAD_LOCAL_ALLOC)||defined(USE_SPIN_LOCK))&&!defined(USE_PTHREAD_LOCKS) +#undef USE_SPIN_LOCK +#define USE_SPIN_LOCK +GC_EXTERN volatile AO_TS_t GC_allocate_lock; +GC_INNER void GC_lock(void); +#ifdef GC_ASSERTIONS +#define UNCOND_LOCK(){ GC_ASSERT(I_DONT_HOLD_LOCK());if (AO_test_and_set_acquire(&GC_allocate_lock)==AO_TS_SET)GC_lock();SET_LOCK_HOLDER();} +#define UNCOND_UNLOCK(){ GC_ASSERT(I_HOLD_LOCK());UNSET_LOCK_HOLDER();AO_CLEAR(&GC_allocate_lock);} +#else +#define UNCOND_LOCK(){ if (AO_test_and_set_acquire(&GC_allocate_lock)==AO_TS_SET)GC_lock();} +#define UNCOND_UNLOCK()AO_CLEAR(&GC_allocate_lock) +#endif +#else +#ifndef USE_PTHREAD_LOCKS +#define USE_PTHREAD_LOCKS +#endif +#endif +#ifdef USE_PTHREAD_LOCKS +EXTERN_C_END +#include +EXTERN_C_BEGIN +GC_EXTERN pthread_mutex_t GC_allocate_ml; +#ifdef GC_ASSERTIONS +GC_INNER void GC_lock(void); +#define UNCOND_LOCK(){ GC_ASSERT(I_DONT_HOLD_LOCK());GC_lock();SET_LOCK_HOLDER();} +#define UNCOND_UNLOCK(){ GC_ASSERT(I_HOLD_LOCK());UNSET_LOCK_HOLDER();pthread_mutex_unlock(&GC_allocate_ml);} +#else +#if defined(NO_PTHREAD_TRYLOCK) +#define UNCOND_LOCK()pthread_mutex_lock(&GC_allocate_ml) +#else +GC_INNER void GC_lock(void); +#define UNCOND_LOCK(){ if (0!=pthread_mutex_trylock(&GC_allocate_ml))GC_lock();} +#endif +#define UNCOND_UNLOCK()pthread_mutex_unlock(&GC_allocate_ml) +#endif +#endif +#ifdef GC_ASSERTIONS +GC_EXTERN unsigned long GC_lock_holder; +#define SET_LOCK_HOLDER()GC_lock_holder=NUMERIC_THREAD_ID(pthread_self()) +#define UNSET_LOCK_HOLDER()GC_lock_holder=NO_THREAD +#define I_HOLD_LOCK()(!GC_need_to_lock||GC_lock_holder==NUMERIC_THREAD_ID(pthread_self())) +#if!defined(NUMERIC_THREAD_ID_UNIQUE)||defined(THREAD_SANITIZER) +#define I_DONT_HOLD_LOCK()TRUE +#else +#define I_DONT_HOLD_LOCK()(!GC_need_to_lock||GC_lock_holder!=NUMERIC_THREAD_ID(pthread_self())) +#endif +#endif +#ifndef GC_WIN32_THREADS +GC_EXTERN volatile GC_bool GC_collecting; +#ifdef AO_HAVE_char_store +#define ENTER_GC()AO_char_store((unsigned char*)&GC_collecting,TRUE) +#define EXIT_GC()AO_char_store((unsigned char*)&GC_collecting,FALSE) +#else +#define ENTER_GC()(void)(GC_collecting=TRUE) +#define EXIT_GC()(void)(GC_collecting=FALSE) +#endif +#endif +#endif +#if defined(GC_ALWAYS_MULTITHREADED)&&(defined(USE_PTHREAD_LOCKS)||defined(USE_SPIN_LOCK)) +#define GC_need_to_lock TRUE +#define set_need_to_lock()(void)0 +#else +#if defined(GC_ALWAYS_MULTITHREADED)&&!defined(CPPCHECK) +#error Runtime initialization of GC lock is needed! +#endif +#undef GC_ALWAYS_MULTITHREADED +GC_EXTERN GC_bool GC_need_to_lock; +#ifdef THREAD_SANITIZER +#define set_need_to_lock()(void)(*(GC_bool volatile*)&GC_need_to_lock?FALSE:(GC_need_to_lock=TRUE)) +#else +#define set_need_to_lock()(void)(GC_need_to_lock=TRUE) +#endif +#endif +EXTERN_C_END +#else +#define LOCK()(void)0 +#define UNLOCK()(void)0 +#ifdef GC_ASSERTIONS +#define I_HOLD_LOCK()TRUE +#define I_DONT_HOLD_LOCK()TRUE +#endif +#endif +#if defined(UNCOND_LOCK)&&!defined(LOCK) +#if (defined(LINT2)&&defined(USE_PTHREAD_LOCKS))||defined(GC_ALWAYS_MULTITHREADED) +#define LOCK()UNCOND_LOCK() +#define UNLOCK()UNCOND_UNLOCK() +#else +#define LOCK()do { if (GC_need_to_lock)UNCOND_LOCK();} while (0) +#define UNLOCK()do { if (GC_need_to_lock)UNCOND_UNLOCK();} while (0) +#endif +#endif +#ifndef ENTER_GC +#define ENTER_GC() +#define EXIT_GC() +#endif +#ifndef DCL_LOCK_STATE +#define DCL_LOCK_STATE +#endif +#endif +#define GC_WORD_MAX (~(word)0) +#ifdef STACK_GROWS_DOWN +#define COOLER_THAN > +#define HOTTER_THAN < +#define MAKE_COOLER(x,y)if ((word)((x)+(y))> (word)(x)){(x)+=(y);} else (x)=(ptr_t)GC_WORD_MAX +#define MAKE_HOTTER(x,y)(x)-=(y) +#else +#define COOLER_THAN < +#define HOTTER_THAN > +#define MAKE_COOLER(x,y)if ((word)((x)- (y))< (word)(x)){(x)-=(y);} else (x)=0 +#define MAKE_HOTTER(x,y)(x)+=(y) +#endif +#if defined(AMIGA)&&defined(__SASC) +#define GC_FAR __far +#else +#define GC_FAR +#endif +EXTERN_C_BEGIN +#ifndef GC_NO_FINALIZATION +#define GC_INVOKE_FINALIZERS()GC_notify_or_invoke_finalizers() +GC_INNER void GC_notify_or_invoke_finalizers(void); +GC_INNER void GC_finalize(void); +#ifndef GC_TOGGLE_REFS_NOT_NEEDED +GC_INNER void GC_process_togglerefs(void); +#endif +#ifndef SMALL_CONFIG +GC_INNER void GC_print_finalization_stats(void); +#endif +#else +#define GC_INVOKE_FINALIZERS()(void)0 +#endif +#if!defined(DONT_ADD_BYTE_AT_END) +#ifdef LINT2 +#define EXTRA_BYTES ((size_t)(GC_all_interior_pointers?1:0)) +#else +#define EXTRA_BYTES (size_t)GC_all_interior_pointers +#endif +#define MAX_EXTRA_BYTES 1 +#else +#define EXTRA_BYTES 0 +#define MAX_EXTRA_BYTES 0 +#endif +#ifndef LARGE_CONFIG +#define MINHINCR 16 +#define MAXHINCR 2048 +#else +#define MINHINCR 64 +#define MAXHINCR 4096 +#endif +#define BL_LIMIT GC_black_list_spacing +#ifdef NEED_CALLINFO +struct callinfo { +word ci_pc; +#if NARGS > 0 +word ci_arg[NARGS]; +#endif +#if (NFRAMES*(NARGS+1))% 2==1 +word ci_dummy; +#endif +}; +#endif +#ifdef SAVE_CALL_CHAIN +GC_INNER void GC_save_callers(struct callinfo info[NFRAMES]); +GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]); +#endif +EXTERN_C_END +#ifndef NO_CLOCK +#ifdef BSD_TIME +#undef CLOCK_TYPE +#undef GET_TIME +#undef MS_TIME_DIFF +#define CLOCK_TYPE struct timeval +#define CLOCK_TYPE_INITIALIZER { 0,0 } +#define GET_TIME(x)do { struct rusage rusage;getrusage(RUSAGE_SELF,&rusage);x=rusage.ru_utime;} while (0) +#define MS_TIME_DIFF(a,b)((unsigned long)((long)(a.tv_sec-b.tv_sec)*1000+(long)(a.tv_usec - b.tv_usec)/1000 - (a.tv_usec < b.tv_usec&&(long)(a.tv_usec - b.tv_usec)% 1000!=0?1:0))) +#define NS_FRAC_TIME_DIFF(a,b)((unsigned long)((a.tv_usec < b.tv_usec&&(long)(a.tv_usec - b.tv_usec)% 1000!=0?1000L:0)+(long)(a.tv_usec - b.tv_usec)% 1000)*1000) +#elif defined(MSWIN32)||defined(MSWINCE)||defined(WINXP_USE_PERF_COUNTER) +#if defined(MSWINRT_FLAVOR)||defined(WINXP_USE_PERF_COUNTER) +#define CLOCK_TYPE ULONGLONG +#define GET_TIME(x)do { LARGE_INTEGER freq,tc;if (!QueryPerformanceFrequency(&freq)||!QueryPerformanceCounter(&tc))ABORT("QueryPerformanceCounter requires WinXP+");x=(CLOCK_TYPE)((double)tc.QuadPart/freq.QuadPart*1e9);} while (0) +#define MS_TIME_DIFF(a,b)((unsigned long)(((a)- (b))/1000000UL)) +#define NS_FRAC_TIME_DIFF(a,b)((unsigned long)(((a)- (b))% 1000000UL)) +#else +#define CLOCK_TYPE DWORD +#define GET_TIME(x)(void)(x=GetTickCount()) +#define MS_TIME_DIFF(a,b)((unsigned long)((a)- (b))) +#define NS_FRAC_TIME_DIFF(a,b)0UL +#endif +#elif defined(NN_PLATFORM_CTR) +#define CLOCK_TYPE long long +EXTERN_C_BEGIN +CLOCK_TYPE n3ds_get_system_tick(void); +CLOCK_TYPE n3ds_convert_tick_to_ms(CLOCK_TYPE tick); +EXTERN_C_END +#define GET_TIME(x)(void)(x=n3ds_get_system_tick()) +#define MS_TIME_DIFF(a,b)((unsigned long)n3ds_convert_tick_to_ms((a)-(b))) +#define NS_FRAC_TIME_DIFF(a,b)0UL +#elif defined(NINTENDO_SWITCH)||(((defined(LINUX)&&defined(__USE_POSIX199309))||defined(CYGWIN32))&&defined(_POSIX_TIMERS)) +#include +#define CLOCK_TYPE struct timespec +#define CLOCK_TYPE_INITIALIZER { 0,0 } +#if defined(_POSIX_MONOTONIC_CLOCK)&&!defined(NINTENDO_SWITCH) +#define GET_TIME(x)do { if (clock_gettime(CLOCK_MONOTONIC,&x)==-1)ABORT("clock_gettime failed");} while (0) +#else +#define GET_TIME(x)do { if (clock_gettime(CLOCK_REALTIME,&x)==-1)ABORT("clock_gettime failed");} while (0) +#endif +#define MS_TIME_DIFF(a,b)((unsigned long)((a).tv_nsec+(1000000L*1000 - (b).tv_nsec))/1000000UL+((unsigned long)((a).tv_sec - (b).tv_sec)*1000UL)- 1000UL) +#define NS_FRAC_TIME_DIFF(a,b)((unsigned long)((a).tv_nsec+(1000000L*1000 - (b).tv_nsec))% 1000000UL) +#else +#include +#if defined(FREEBSD)&&!defined(CLOCKS_PER_SEC) +#include +#define CLOCKS_PER_SEC CLK_TCK +#endif +#if!defined(CLOCKS_PER_SEC) +#define CLOCKS_PER_SEC 1000000 +#endif +#define CLOCK_TYPE clock_t +#define GET_TIME(x)(void)(x=clock()) +#define MS_TIME_DIFF(a,b)(CLOCKS_PER_SEC % 1000==0?(unsigned long)((a)- (b))/(unsigned long)(CLOCKS_PER_SEC/1000):((unsigned long)((a)- (b))*1000)/(unsigned long)CLOCKS_PER_SEC) +#define NS_FRAC_TIME_DIFF(a,b)(CLOCKS_PER_SEC<=1000?0UL:(unsigned long)(CLOCKS_PER_SEC<=(clock_t)1000000UL?(((a)- (b))*((clock_t)1000000UL/CLOCKS_PER_SEC)% 1000)*1000:(CLOCKS_PER_SEC<=(clock_t)1000000UL*1000?((a)- (b))*((clock_t)1000000UL*1000/CLOCKS_PER_SEC):(((a)- (b))*(clock_t)1000000UL*1000)/CLOCKS_PER_SEC)% (clock_t)1000000UL)) +#endif +#ifndef CLOCK_TYPE_INITIALIZER +#define CLOCK_TYPE_INITIALIZER 0 +#endif +#endif +#if defined(SPARC)&&defined(SUNOS4)||(defined(M68K)&&defined(NEXT))||defined(VAX) +#define BCOPY_EXISTS +#elif defined(AMIGA)||defined(DARWIN) +#include +#define BCOPY_EXISTS +#elif defined(MACOS)&&defined(POWERPC) +#include +#define bcopy(x,y,n)BlockMoveData(x,y,n) +#define bzero(x,n)BlockZero(x,n) +#define BCOPY_EXISTS +#endif +#if!defined(BCOPY_EXISTS)||defined(CPPCHECK) +#include +#define BCOPY(x,y,n)memcpy(y,x,(size_t)(n)) +#define BZERO(x,n)memset(x,0,(size_t)(n)) +#else +#define BCOPY(x,y,n)bcopy((void*)(x),(void*)(y),(size_t)(n)) +#define BZERO(x,n)bzero((void*)(x),(size_t)(n)) +#endif +#ifdef PCR +#include "th/PCR_ThCtl.h" +#endif +EXTERN_C_BEGIN +#ifdef PCR +#define STOP_WORLD()PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal,PCR_allSigsBlocked,PCR_waitForever) +#define START_WORLD()PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null,PCR_allSigsBlocked,PCR_waitForever) +#else +#if defined(NN_PLATFORM_CTR)||defined(NINTENDO_SWITCH)||defined(GC_WIN32_THREADS)||defined(GC_PTHREADS) +GC_INNER void GC_stop_world(void); +GC_INNER void GC_start_world(void); +#define STOP_WORLD()GC_stop_world() +#define START_WORLD()GC_start_world() +#else +#define STOP_WORLD()GC_ASSERT(GC_blocked_sp==NULL) +#define START_WORLD() +#endif +#endif +#ifdef THREADS +GC_EXTERN GC_on_thread_event_proc GC_on_thread_event; +#endif +#if defined(SMALL_CONFIG)||defined(PCR) +#define GC_on_abort(msg)(void)0 +#else +GC_API_PRIV GC_abort_func GC_on_abort; +#endif +#if defined(CPPCHECK) +#define ABORT(msg){ GC_on_abort(msg);abort();} +#elif defined(PCR) +#define ABORT(s)PCR_Base_Panic(s) +#else +#if defined(MSWIN_XBOX1)&&!defined(DebugBreak) +#define DebugBreak()__debugbreak() +#elif defined(MSWINCE)&&!defined(DebugBreak)&&(!defined(UNDER_CE)||(defined(__MINGW32CE__)&&!defined(ARM32))) +#define DebugBreak()_exit(-1) +#endif +#if defined(MSWIN32)&&(defined(NO_DEBUGGING)||defined(LINT2)) +#define ABORT(msg)(GC_on_abort(msg),_exit(-1)) +#elif defined(MSWINCE)&&defined(NO_DEBUGGING) +#define ABORT(msg)(GC_on_abort(msg),ExitProcess(-1)) +#elif defined(MSWIN32)||defined(MSWINCE) +#if defined(_CrtDbgBreak)&&defined(_DEBUG)&&defined(_MSC_VER) +#define ABORT(msg){ GC_on_abort(msg);_CrtDbgBreak();} +#else +#define ABORT(msg){ GC_on_abort(msg);DebugBreak();} +#endif +#else +#define ABORT(msg)(GC_on_abort(msg),abort()) +#endif +#endif +#define ABORT_ARG1(C_msg,C_fmt,arg1)MACRO_BLKSTMT_BEGIN GC_INFOLOG_PRINTF(C_msg C_fmt "\n",arg1);ABORT(C_msg);MACRO_BLKSTMT_END +#define ABORT_ARG2(C_msg,C_fmt,arg1,arg2)MACRO_BLKSTMT_BEGIN GC_INFOLOG_PRINTF(C_msg C_fmt "\n",arg1,arg2);ABORT(C_msg);MACRO_BLKSTMT_END +#define ABORT_ARG3(C_msg,C_fmt,arg1,arg2,arg3)MACRO_BLKSTMT_BEGIN GC_INFOLOG_PRINTF(C_msg C_fmt "\n",arg1,arg2,arg3);ABORT(C_msg);MACRO_BLKSTMT_END +#define ABORT_RET(msg)if ((signed_word)GC_current_warn_proc==-1){} else ABORT(msg) +#ifdef PCR +#define EXIT()PCR_Base_Exit(1,PCR_waitForever) +#else +#define EXIT()(GC_on_abort(NULL),exit(1)) +#endif +#define WARN(msg,arg)(*GC_current_warn_proc)(( char*)("GC Warning:" msg),(word)(arg)) +GC_EXTERN GC_warn_proc GC_current_warn_proc; +#ifndef WARN_PRIdPTR +#define WARN_PRIdPTR "ld" +#endif +#define TRUSTED_STRING(s)(char*)COVERT_DATAFLOW(s) +#ifdef GC_READ_ENV_FILE +GC_INNER char*GC_envfile_getenv(const char*name); +#define GETENV(name)GC_envfile_getenv(name) +#elif defined(NO_GETENV)&&!defined(CPPCHECK) +#define GETENV(name)NULL +#elif defined(EMPTY_GETENV_RESULTS) +GC_INLINE char*fixed_getenv(const char*name) +{ +char*value=getenv(name); +return value!=NULL&&*value!='\0'?value:NULL; +} +#define GETENV(name)fixed_getenv(name) +#else +#define GETENV(name)getenv(name) +#endif +EXTERN_C_END +#if defined(DARWIN) +#include +#ifndef MAC_OS_X_VERSION_MAX_ALLOWED +#include +#endif +#if defined(POWERPC) +#if CPP_WORDSZ==32 +#define GC_THREAD_STATE_T ppc_thread_state_t +#else +#define GC_THREAD_STATE_T ppc_thread_state64_t +#define GC_MACH_THREAD_STATE PPC_THREAD_STATE64 +#define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE64_COUNT +#endif +#elif defined(I386)||defined(X86_64) +#if CPP_WORDSZ==32 +#if defined(i386_THREAD_STATE_COUNT)&&!defined(x86_THREAD_STATE32_COUNT) +#define GC_THREAD_STATE_T i386_thread_state_t +#define GC_MACH_THREAD_STATE i386_THREAD_STATE +#define GC_MACH_THREAD_STATE_COUNT i386_THREAD_STATE_COUNT +#else +#define GC_THREAD_STATE_T x86_thread_state32_t +#define GC_MACH_THREAD_STATE x86_THREAD_STATE32 +#define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE32_COUNT +#endif +#else +#define GC_THREAD_STATE_T x86_thread_state64_t +#define GC_MACH_THREAD_STATE x86_THREAD_STATE64 +#define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT +#endif +#elif defined(ARM32)&&defined(ARM_UNIFIED_THREAD_STATE)&&!defined(CPPCHECK) +#define GC_THREAD_STATE_T arm_unified_thread_state_t +#define GC_MACH_THREAD_STATE ARM_UNIFIED_THREAD_STATE +#define GC_MACH_THREAD_STATE_COUNT ARM_UNIFIED_THREAD_STATE_COUNT +#elif defined(ARM32) +#define GC_THREAD_STATE_T arm_thread_state_t +#ifdef ARM_MACHINE_THREAD_STATE_COUNT +#define GC_MACH_THREAD_STATE ARM_MACHINE_THREAD_STATE +#define GC_MACH_THREAD_STATE_COUNT ARM_MACHINE_THREAD_STATE_COUNT +#endif +#elif defined(AARCH64) +#define GC_THREAD_STATE_T arm_thread_state64_t +#define GC_MACH_THREAD_STATE ARM_THREAD_STATE64 +#define GC_MACH_THREAD_STATE_COUNT ARM_THREAD_STATE64_COUNT +#elif!defined(CPPCHECK) +#error define GC_THREAD_STATE_T +#endif +#ifndef GC_MACH_THREAD_STATE +#define GC_MACH_THREAD_STATE MACHINE_THREAD_STATE +#define GC_MACH_THREAD_STATE_COUNT MACHINE_THREAD_STATE_COUNT +#endif +#if CPP_WORDSZ==32 +#define GC_MACH_HEADER mach_header +#define GC_MACH_SECTION section +#define GC_GETSECTBYNAME getsectbynamefromheader +#else +#define GC_MACH_HEADER mach_header_64 +#define GC_MACH_SECTION section_64 +#define GC_GETSECTBYNAME getsectbynamefromheader_64 +#endif +#if __DARWIN_UNIX03 +#define THREAD_FLD_NAME(x)__ ## x +#else +#define THREAD_FLD_NAME(x)x +#endif +#if defined(ARM32)&&defined(ARM_UNIFIED_THREAD_STATE) +#define THREAD_FLD(x)ts_32.THREAD_FLD_NAME(x) +#else +#define THREAD_FLD(x)THREAD_FLD_NAME(x) +#endif +#endif +#include +#if __STDC_VERSION__>=201112L +#include +#endif +EXTERN_C_BEGIN +#if CPP_WORDSZ==32 +#define WORDS_TO_BYTES(x)((x)<<2) +#define BYTES_TO_WORDS(x)((x)>>2) +#define LOGWL ((word)5) +#define modWORDSZ(n)((n)&0x1f) +#if ALIGNMENT!=4 +#define UNALIGNED_PTRS +#endif +#endif +#if CPP_WORDSZ==64 +#define WORDS_TO_BYTES(x)((x)<<3) +#define BYTES_TO_WORDS(x)((x)>>3) +#define LOGWL ((word)6) +#define modWORDSZ(n)((n)&0x3f) +#if ALIGNMENT!=8 +#define UNALIGNED_PTRS +#endif +#endif +#define GRANULE_BYTES GC_GRANULE_BYTES +#define TINY_FREELISTS GC_TINY_FREELISTS +#define WORDSZ ((word)CPP_WORDSZ) +#define SIGNB ((word)1<<(WORDSZ-1)) +#define BYTES_PER_WORD ((word)(sizeof (word))) +#define divWORDSZ(n)((n)>>LOGWL) +#if GRANULE_BYTES==8 +#define BYTES_TO_GRANULES(n)((n)>>3) +#define GRANULES_TO_BYTES(n)((n)<<3) +#if CPP_WORDSZ==64 +#define GRANULES_TO_WORDS(n)(n) +#elif CPP_WORDSZ==32 +#define GRANULES_TO_WORDS(n)((n)<<1) +#else +#define GRANULES_TO_WORDS(n)BYTES_TO_WORDS(GRANULES_TO_BYTES(n)) +#endif +#elif GRANULE_BYTES==16 +#define BYTES_TO_GRANULES(n)((n)>>4) +#define GRANULES_TO_BYTES(n)((n)<<4) +#if CPP_WORDSZ==64 +#define GRANULES_TO_WORDS(n)((n)<<1) +#elif CPP_WORDSZ==32 +#define GRANULES_TO_WORDS(n)((n)<<2) +#else +#define GRANULES_TO_WORDS(n)BYTES_TO_WORDS(GRANULES_TO_BYTES(n)) +#endif +#else +#error Bad GRANULE_BYTES value +#endif +#ifndef HBLKSIZE +#if defined(LARGE_CONFIG)||!defined(SMALL_CONFIG) +#ifdef ALPHA +#define CPP_LOG_HBLKSIZE 13 +#elif defined(SN_TARGET_ORBIS)||defined(SN_TARGET_PSP2) +#define CPP_LOG_HBLKSIZE 16 +#else +#define CPP_LOG_HBLKSIZE 12 +#endif +#else +#define CPP_LOG_HBLKSIZE 10 +#endif +#else +#if HBLKSIZE==512 +#define CPP_LOG_HBLKSIZE 9 +#elif HBLKSIZE==1024 +#define CPP_LOG_HBLKSIZE 10 +#elif HBLKSIZE==2048 +#define CPP_LOG_HBLKSIZE 11 +#elif HBLKSIZE==4096 +#define CPP_LOG_HBLKSIZE 12 +#elif HBLKSIZE==8192 +#define CPP_LOG_HBLKSIZE 13 +#elif HBLKSIZE==16384 +#define CPP_LOG_HBLKSIZE 14 +#elif!defined(CPPCHECK) +#error Bad HBLKSIZE value +#endif +#undef HBLKSIZE +#endif +#define CPP_HBLKSIZE (1<>LOG_HBLKSIZE) +#define HBLK_PTR_DIFF(p,q)divHBLKSZ((ptr_t)p - (ptr_t)q) +#define modHBLKSZ(n)((n)&(HBLKSIZE-1)) +#define HBLKPTR(objptr)((struct hblk*)(((word)(objptr))&~(word)(HBLKSIZE-1))) +#define HBLKDISPL(objptr)(((size_t)(objptr))&(HBLKSIZE-1)) +#define ROUNDUP_GRANULE_SIZE(lb)(SIZET_SAT_ADD(lb,GRANULE_BYTES - 1)&~(GRANULE_BYTES - 1)) +#define ROUNDED_UP_GRANULES(lb)BYTES_TO_GRANULES(SIZET_SAT_ADD(lb,GRANULE_BYTES - 1+EXTRA_BYTES)) +#if MAX_EXTRA_BYTES==0 +#define SMALL_OBJ(bytes)EXPECT((bytes)<=(MAXOBJBYTES),TRUE) +#else +#define SMALL_OBJ(bytes)(EXPECT((bytes)<=(MAXOBJBYTES - MAX_EXTRA_BYTES),TRUE)||(bytes)<=MAXOBJBYTES - EXTRA_BYTES) +#endif +#define ADD_SLOP(lb)SIZET_SAT_ADD(lb,EXTRA_BYTES) +#ifdef LARGE_CONFIG +#if CPP_WORDSZ==32 +#define LOG_PHT_ENTRIES 20 +#else +#define LOG_PHT_ENTRIES 21 +#endif +#elif!defined(SMALL_CONFIG) +#define LOG_PHT_ENTRIES 18 +#else +#define LOG_PHT_ENTRIES 15 +#endif +#define PHT_ENTRIES ((word)1<>LOGWL) +typedef word page_hash_table[PHT_SIZE]; +#define PHT_HASH(addr)((((word)(addr))>>LOG_HBLKSIZE)&(PHT_ENTRIES - 1)) +#define get_pht_entry_from_index(bl,index)(((bl)[divWORDSZ(index)]>>modWORDSZ(index))&1) +#define set_pht_entry_from_index(bl,index)(void)((bl)[divWORDSZ(index)]|=(word)1<hb_flags&FREE_BLK)!=0) +#define OBJ_SZ_TO_BLOCKS(lb)divHBLKSZ((lb)+HBLKSIZE-1) +#define OBJ_SZ_TO_BLOCKS_CHECKED(lb)divHBLKSZ(SIZET_SAT_ADD(lb,HBLKSIZE - 1)) +#define obj_link(p)(*(void**)(p)) +#define LOG_MAX_MARK_PROCS 6 +#define MAX_MARK_PROCS (1< 32 +#define MAX_HEAP_SECTS 81920 +#else +#define MAX_HEAP_SECTS 7680 +#endif +#elif defined(SMALL_CONFIG)&&!defined(USE_PROC_FOR_LIBRARIES) +#if defined(PARALLEL_MARK)&&(defined(MSWIN32)||defined(CYGWIN32)) +#define MAX_HEAP_SECTS 384 +#else +#define MAX_HEAP_SECTS 128 +#endif +#elif CPP_WORDSZ > 32 +#define MAX_HEAP_SECTS 1024 +#else +#define MAX_HEAP_SECTS 512 +#endif +#endif +typedef struct GC_ms_entry { +ptr_t mse_start; +union word_ptr_ao_u mse_descr; +} mse; +typedef int mark_state_t; +struct disappearing_link; +struct finalizable_object; +struct dl_hashtbl_s { +struct disappearing_link**head; +word entries; +unsigned log_size; +}; +struct fnlz_roots_s { +struct finalizable_object**fo_head; +struct finalizable_object*finalize_now; +}; +union toggle_ref_u { +void*strong_ref; +GC_hidden_pointer weak_ref; +}; +typedef struct { +word ed_bitmap; +GC_bool ed_continued; +} typed_ext_descr_t; +struct _GC_arrays { +word _heapsize; +word _requested_heapsize; +ptr_t _last_heap_addr; +ptr_t _prev_heap_addr; +word _large_free_bytes; +word _large_allocd_bytes; +word _max_large_allocd_bytes; +word _bytes_allocd_before_gc; +#ifndef SEPARATE_GLOBALS +#define GC_bytes_allocd GC_arrays._bytes_allocd +word _bytes_allocd; +#endif +word _bytes_dropped; +word _bytes_finalized; +word _bytes_freed; +word _finalizer_bytes_freed; +bottom_index*_all_bottom_indices; +bottom_index*_all_bottom_indices_end; +ptr_t _scratch_free_ptr; +hdr*_hdr_free_list; +ptr_t _scratch_end_ptr; +ptr_t _scratch_last_end_ptr; +mse*_mark_stack; +mse*_mark_stack_limit; +#ifdef PARALLEL_MARK +mse*volatile _mark_stack_top; +#else +mse*_mark_stack_top; +#endif +word _composite_in_use; +word _atomic_in_use; +#ifdef USE_MUNMAP +#define GC_unmapped_bytes GC_arrays._unmapped_bytes +word _unmapped_bytes; +#ifdef COUNT_UNMAPPED_REGIONS +#define GC_num_unmapped_regions GC_arrays._num_unmapped_regions +signed_word _num_unmapped_regions; +#endif +#else +#define GC_unmapped_bytes 0 +#endif +bottom_index*_all_nils; +#define GC_scan_ptr GC_arrays._scan_ptr +struct hblk*_scan_ptr; +#ifdef PARALLEL_MARK +#define GC_main_local_mark_stack GC_arrays._main_local_mark_stack +mse*_main_local_mark_stack; +#define GC_first_nonempty GC_arrays._first_nonempty +volatile AO_t _first_nonempty; +#endif +#define GC_mark_stack_size GC_arrays._mark_stack_size +size_t _mark_stack_size; +#define GC_mark_state GC_arrays._mark_state +mark_state_t _mark_state; +#define GC_mark_stack_too_small GC_arrays._mark_stack_too_small +GC_bool _mark_stack_too_small; +#define GC_objects_are_marked GC_arrays._objects_are_marked +GC_bool _objects_are_marked; +#ifdef ENABLE_TRACE +#define GC_trace_addr GC_arrays._trace_addr +ptr_t _trace_addr; +#endif +#define GC_n_heap_sects GC_arrays._n_heap_sects +word _n_heap_sects; +#if defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32) +#define GC_n_heap_bases GC_arrays._n_heap_bases +word _n_heap_bases; +#endif +#ifdef USE_PROC_FOR_LIBRARIES +#define GC_n_memory GC_arrays._n_memory +word _n_memory; +#endif +#ifdef GC_GCJ_SUPPORT +#define GC_gcjobjfreelist GC_arrays._gcjobjfreelist +ptr_t*_gcjobjfreelist; +#endif +#define GC_fo_entries GC_arrays._fo_entries +word _fo_entries; +#ifndef GC_NO_FINALIZATION +#define GC_dl_hashtbl GC_arrays._dl_hashtbl +#define GC_fnlz_roots GC_arrays._fnlz_roots +#define GC_log_fo_table_size GC_arrays._log_fo_table_size +#ifndef GC_LONG_REFS_NOT_NEEDED +#define GC_ll_hashtbl GC_arrays._ll_hashtbl +struct dl_hashtbl_s _ll_hashtbl; +#endif +struct dl_hashtbl_s _dl_hashtbl; +struct fnlz_roots_s _fnlz_roots; +unsigned _log_fo_table_size; +#ifndef GC_TOGGLE_REFS_NOT_NEEDED +#define GC_toggleref_arr GC_arrays._toggleref_arr +#define GC_toggleref_array_size GC_arrays._toggleref_array_size +#define GC_toggleref_array_capacity GC_arrays._toggleref_array_capacity +union toggle_ref_u*_toggleref_arr; +size_t _toggleref_array_size; +size_t _toggleref_array_capacity; +#endif +#endif +#ifdef TRACE_BUF +#define GC_trace_buf_ptr GC_arrays._trace_buf_ptr +int _trace_buf_ptr; +#endif +#ifdef ENABLE_DISCLAIM +#define GC_finalized_kind GC_arrays._finalized_kind +int _finalized_kind; +#endif +#define n_root_sets GC_arrays._n_root_sets +#define GC_excl_table_entries GC_arrays._excl_table_entries +int _n_root_sets; +size_t _excl_table_entries; +#ifdef THREADS +#define GC_roots_were_cleared GC_arrays._roots_were_cleared +GC_bool _roots_were_cleared; +#endif +#define GC_explicit_typing_initialized GC_arrays._explicit_typing_initialized +#define GC_ed_size GC_arrays._ed_size +#define GC_avail_descr GC_arrays._avail_descr +#define GC_ext_descriptors GC_arrays._ext_descriptors +#ifdef AO_HAVE_load_acquire +volatile AO_t _explicit_typing_initialized; +#else +GC_bool _explicit_typing_initialized; +#endif +size_t _ed_size; +size_t _avail_descr; +typed_ext_descr_t*_ext_descriptors; +GC_mark_proc _mark_procs[MAX_MARK_PROCS]; +char _modws_valid_offsets[sizeof(word)]; +#if!defined(MSWIN32)&&!defined(MSWINCE)&&!defined(CYGWIN32) +#define GC_root_index GC_arrays._root_index +struct roots*_root_index[RT_SIZE]; +#endif +#ifdef SAVE_CALL_CHAIN +#define GC_last_stack GC_arrays._last_stack +struct callinfo _last_stack[NFRAMES]; +#endif +#ifndef SEPARATE_GLOBALS +#define GC_objfreelist GC_arrays._objfreelist +void*_objfreelist[MAXOBJGRANULES+1]; +#define GC_aobjfreelist GC_arrays._aobjfreelist +void*_aobjfreelist[MAXOBJGRANULES+1]; +#endif +void*_uobjfreelist[MAXOBJGRANULES+1]; +#ifdef GC_ATOMIC_UNCOLLECTABLE +#define GC_auobjfreelist GC_arrays._auobjfreelist +void*_auobjfreelist[MAXOBJGRANULES+1]; +#endif +size_t _size_map[MAXOBJBYTES+1]; +#ifdef MARK_BIT_PER_GRANULE +#define GC_obj_map GC_arrays._obj_map +unsigned short*_obj_map[MAXOBJGRANULES+1]; +#define MAP_LEN BYTES_TO_GRANULES(HBLKSIZE) +#endif +#define VALID_OFFSET_SZ HBLKSIZE +char _valid_offsets[VALID_OFFSET_SZ]; +#ifndef GC_DISABLE_INCREMENTAL +#define GC_grungy_pages GC_arrays._grungy_pages +page_hash_table _grungy_pages; +#define GC_dirty_pages GC_arrays._dirty_pages +volatile page_hash_table _dirty_pages; +#endif +#if (defined(CHECKSUMS)&&defined(GWW_VDB))||defined(PROC_VDB) +#define GC_written_pages GC_arrays._written_pages +page_hash_table _written_pages; +#endif +#define GC_heap_sects GC_arrays._heap_sects +struct HeapSect { +ptr_t hs_start; +size_t hs_bytes; +} _heap_sects[MAX_HEAP_SECTS]; +#if defined(USE_PROC_FOR_LIBRARIES) +#define GC_our_memory GC_arrays._our_memory +struct HeapSect _our_memory[MAX_HEAP_SECTS]; +#endif +#if defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32) +#define GC_heap_bases GC_arrays._heap_bases +ptr_t _heap_bases[MAX_HEAP_SECTS]; +#endif +#ifdef MSWINCE +#define GC_heap_lengths GC_arrays._heap_lengths +word _heap_lengths[MAX_HEAP_SECTS]; +#endif +struct roots _static_roots[MAX_ROOT_SETS]; +struct exclusion _excl_table[MAX_EXCLUSIONS]; +bottom_index*_top_index[TOP_SZ]; +}; +GC_API_PRIV GC_FAR struct _GC_arrays GC_arrays; +#define GC_all_nils GC_arrays._all_nils +#define GC_atomic_in_use GC_arrays._atomic_in_use +#define GC_bytes_allocd_before_gc GC_arrays._bytes_allocd_before_gc +#define GC_bytes_dropped GC_arrays._bytes_dropped +#define GC_bytes_finalized GC_arrays._bytes_finalized +#define GC_bytes_freed GC_arrays._bytes_freed +#define GC_composite_in_use GC_arrays._composite_in_use +#define GC_excl_table GC_arrays._excl_table +#define GC_finalizer_bytes_freed GC_arrays._finalizer_bytes_freed +#define GC_heapsize GC_arrays._heapsize +#define GC_large_allocd_bytes GC_arrays._large_allocd_bytes +#define GC_large_free_bytes GC_arrays._large_free_bytes +#define GC_last_heap_addr GC_arrays._last_heap_addr +#define GC_mark_stack GC_arrays._mark_stack +#define GC_mark_stack_limit GC_arrays._mark_stack_limit +#define GC_mark_stack_top GC_arrays._mark_stack_top +#define GC_mark_procs GC_arrays._mark_procs +#define GC_max_large_allocd_bytes GC_arrays._max_large_allocd_bytes +#define GC_modws_valid_offsets GC_arrays._modws_valid_offsets +#define GC_prev_heap_addr GC_arrays._prev_heap_addr +#define GC_requested_heapsize GC_arrays._requested_heapsize +#define GC_all_bottom_indices GC_arrays._all_bottom_indices +#define GC_all_bottom_indices_end GC_arrays._all_bottom_indices_end +#define GC_scratch_free_ptr GC_arrays._scratch_free_ptr +#define GC_hdr_free_list GC_arrays._hdr_free_list +#define GC_scratch_end_ptr GC_arrays._scratch_end_ptr +#define GC_scratch_last_end_ptr GC_arrays._scratch_last_end_ptr +#define GC_size_map GC_arrays._size_map +#define GC_static_roots GC_arrays._static_roots +#define GC_top_index GC_arrays._top_index +#define GC_uobjfreelist GC_arrays._uobjfreelist +#define GC_valid_offsets GC_arrays._valid_offsets +#define beginGC_arrays ((ptr_t)(&GC_arrays)) +#define endGC_arrays (((ptr_t)(&GC_arrays))+(sizeof GC_arrays)) +#define USED_HEAP_SIZE (GC_heapsize - GC_large_free_bytes) +#ifndef MAXOBJKINDS +#define MAXOBJKINDS 16 +#endif +GC_EXTERN struct obj_kind { +void**ok_freelist; +struct hblk**ok_reclaim_list; +word ok_descriptor; +GC_bool ok_relocate_descr; +GC_bool ok_init; +#ifdef ENABLE_DISCLAIM +GC_bool ok_mark_unconditionally; +int (GC_CALLBACK*ok_disclaim_proc)(void*); +#define OK_DISCLAIM_INITZ,FALSE,0 +#else +#define OK_DISCLAIM_INITZ +#endif +} GC_obj_kinds[MAXOBJKINDS]; +#define beginGC_obj_kinds ((ptr_t)(&GC_obj_kinds)) +#define endGC_obj_kinds (beginGC_obj_kinds+(sizeof GC_obj_kinds)) +#ifdef SEPARATE_GLOBALS +extern word GC_bytes_allocd; +extern ptr_t GC_objfreelist[MAXOBJGRANULES+1]; +#define beginGC_objfreelist ((ptr_t)(&GC_objfreelist)) +#define endGC_objfreelist (beginGC_objfreelist+sizeof(GC_objfreelist)) +extern ptr_t GC_aobjfreelist[MAXOBJGRANULES+1]; +#define beginGC_aobjfreelist ((ptr_t)(&GC_aobjfreelist)) +#define endGC_aobjfreelist (beginGC_aobjfreelist+sizeof(GC_aobjfreelist)) +#endif +#define PTRFREE 0 +#define NORMAL 1 +#define UNCOLLECTABLE 2 +#ifdef GC_ATOMIC_UNCOLLECTABLE +#define AUNCOLLECTABLE 3 +#define IS_UNCOLLECTABLE(k)(((k)&~1)==UNCOLLECTABLE) +#define GC_N_KINDS_INITIAL_VALUE 4 +#else +#define IS_UNCOLLECTABLE(k)((k)==UNCOLLECTABLE) +#define GC_N_KINDS_INITIAL_VALUE 3 +#endif +GC_EXTERN unsigned GC_n_kinds; +GC_EXTERN size_t GC_page_size; +#define ROUNDUP_PAGESIZE(lb)(SIZET_SAT_ADD(lb,GC_page_size - 1)&~(GC_page_size - 1)) +#ifdef MMAP_SUPPORTED +#define ROUNDUP_PAGESIZE_IF_MMAP(lb)ROUNDUP_PAGESIZE(lb) +#else +#define ROUNDUP_PAGESIZE_IF_MMAP(lb)(lb) +#endif +#if defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32) +GC_EXTERN SYSTEM_INFO GC_sysinfo; +GC_INNER GC_bool GC_is_heap_base(void*p); +#endif +GC_EXTERN word GC_black_list_spacing; +#ifdef GC_GCJ_SUPPORT +extern struct hblk*GC_hblkfreelist[]; +extern word GC_free_bytes[]; +#endif +GC_EXTERN word GC_root_size; +GC_EXTERN GC_bool GC_debugging_started; +struct blocking_data { +GC_fn_type fn; +void*client_data; +}; +struct GC_traced_stack_sect_s { +ptr_t saved_stack_ptr; +#ifdef IA64 +ptr_t saved_backing_store_ptr; +ptr_t backing_store_end; +#endif +struct GC_traced_stack_sect_s*prev; +}; +#ifdef THREADS +GC_INNER void GC_push_all_stack_sections(ptr_t lo,ptr_t hi, +struct GC_traced_stack_sect_s*traced_stack_sect); +GC_EXTERN word GC_total_stacksize; +#else +GC_EXTERN ptr_t GC_blocked_sp; +GC_EXTERN struct GC_traced_stack_sect_s*GC_traced_stack_sect; +#endif +#ifdef IA64 +GC_INNER void GC_push_all_register_sections(ptr_t bs_lo,ptr_t bs_hi, +int eager,struct GC_traced_stack_sect_s*traced_stack_sect); +#endif +#ifdef USE_MARK_BYTES +#define mark_bit_from_hdr(hhdr,n)((hhdr)->hb_marks[n]) +#define set_mark_bit_from_hdr(hhdr,n)((hhdr)->hb_marks[n]=1) +#define clear_mark_bit_from_hdr(hhdr,n)((hhdr)->hb_marks[n]=0) +#else +#if defined(PARALLEL_MARK)||(defined(THREAD_SANITIZER)&&defined(THREADS)) +#define OR_WORD(addr,bits)AO_or((volatile AO_t*)(addr),(AO_t)(bits)) +#else +#define OR_WORD(addr,bits)(void)(*(addr)|=(bits)) +#endif +#define mark_bit_from_hdr(hhdr,n)(((hhdr)->hb_marks[divWORDSZ(n)]>>modWORDSZ(n))&(word)1) +#define set_mark_bit_from_hdr(hhdr,n)OR_WORD((hhdr)->hb_marks+divWORDSZ(n),(word)1<hb_marks[divWORDSZ(n)]&=~((word)1< MAXOBJBYTES?1:HBLK_OBJS(sz)) +#else +#define MARK_BIT_NO(offset,sz)BYTES_TO_GRANULES((word)(offset)) +#define MARK_BIT_OFFSET(sz)BYTES_TO_GRANULES(sz) +#define IF_PER_OBJ(x) +#define FINAL_MARK_BIT(sz)((sz)> MAXOBJBYTES?MARK_BITS_PER_HBLK:BYTES_TO_GRANULES((sz)*HBLK_OBJS(sz))) +#endif +GC_INNER ptr_t GC_approx_sp(void); +GC_INNER GC_bool GC_should_collect(void); +void GC_apply_to_all_blocks(void (*fn)(struct hblk*h,word client_data), +word client_data); +GC_INNER struct hblk*GC_next_block(struct hblk*h,GC_bool allow_free); +GC_INNER struct hblk*GC_prev_block(struct hblk*h); +GC_INNER void GC_mark_init(void); +GC_INNER void GC_clear_marks(void); +GC_INNER void GC_invalidate_mark_state(void); +GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame); +GC_INNER void GC_initiate_gc(void); +GC_INNER GC_bool GC_collection_in_progress(void); +#define GC_PUSH_ALL_SYM(sym)GC_push_all(( void*)&(sym),( void*)(&(sym)+1)) +GC_INNER void GC_push_all_stack(ptr_t b,ptr_t t); +#if defined(WRAP_MARK_SOME)&&defined(PARALLEL_MARK) +GC_INNER void GC_push_conditional_eager(void*bottom,void*top, +GC_bool all); +#endif +GC_INNER void GC_push_roots(GC_bool all,ptr_t cold_gc_frame); +GC_API_PRIV GC_push_other_roots_proc GC_push_other_roots; +#ifdef THREADS +void GC_push_thread_structures(void); +#endif +GC_EXTERN void (*GC_push_typed_structures)(void); +GC_INNER void GC_with_callee_saves_pushed(void (*fn)(ptr_t,void*), +volatile ptr_t arg); +#if defined(SPARC)||defined(IA64) +ptr_t GC_save_regs_in_stack(void); +#endif +#if defined(AMIGA)||defined(MACOS)||defined(GC_DARWIN_THREADS) +void GC_push_one(word p); +#endif +#ifdef GC_WIN32_THREADS +GC_INNER void GC_push_many_regs(const word*regs,unsigned count); +#endif +#if defined(PRINT_BLACK_LIST)||defined(KEEP_BACK_PTRS) +GC_INNER void GC_mark_and_push_stack(ptr_t p,ptr_t source); +#else +GC_INNER void GC_mark_and_push_stack(ptr_t p); +#endif +GC_INNER void GC_clear_hdr_marks(hdr*hhdr); +GC_INNER void GC_set_hdr_marks(hdr*hhdr); +GC_INNER void GC_set_fl_marks(ptr_t p); +#if defined(GC_ASSERTIONS)&&defined(THREAD_LOCAL_ALLOC) +void GC_check_fl_marks(void**); +#endif +void GC_add_roots_inner(ptr_t b,ptr_t e,GC_bool tmp); +#ifdef USE_PROC_FOR_LIBRARIES +GC_INNER void GC_remove_roots_subregion(ptr_t b,ptr_t e); +#endif +GC_INNER void GC_exclude_static_roots_inner(void*start,void*finish); +#if defined(DYNAMIC_LOADING)||defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32)||defined(PCR) +GC_INNER void GC_register_dynamic_libraries(void); +#endif +GC_INNER void GC_cond_register_dynamic_libraries(void); +ptr_t GC_get_main_stack_base(void); +#ifdef IA64 +GC_INNER ptr_t GC_get_register_stack_base(void); +#endif +void GC_register_data_segments(void); +#ifdef THREADS +GC_INNER void GC_thr_init(void); +GC_INNER void GC_init_parallel(void); +#else +GC_INNER GC_bool GC_is_static_root(void*p); +#ifdef TRACE_BUF +void GC_add_trace_entry(char*kind,word arg1,word arg2); +#endif +#endif +#ifdef PRINT_BLACK_LIST +GC_INNER void GC_add_to_black_list_normal(word p,ptr_t source); +#define GC_ADD_TO_BLACK_LIST_NORMAL(bits,source)if (GC_all_interior_pointers){ GC_add_to_black_list_stack((word)(bits),(source));} else GC_add_to_black_list_normal((word)(bits),(source)) +GC_INNER void GC_add_to_black_list_stack(word p,ptr_t source); +#define GC_ADD_TO_BLACK_LIST_STACK(bits,source)GC_add_to_black_list_stack((word)(bits),(source)) +#else +GC_INNER void GC_add_to_black_list_normal(word p); +#define GC_ADD_TO_BLACK_LIST_NORMAL(bits,source)if (GC_all_interior_pointers){ GC_add_to_black_list_stack((word)(bits));} else GC_add_to_black_list_normal((word)(bits)) +GC_INNER void GC_add_to_black_list_stack(word p); +#define GC_ADD_TO_BLACK_LIST_STACK(bits,source)GC_add_to_black_list_stack((word)(bits)) +#endif +struct hblk*GC_is_black_listed(struct hblk*h,word len); +GC_INNER void GC_promote_black_lists(void); +GC_INNER void GC_unpromote_black_lists(void); +GC_INNER ptr_t GC_scratch_alloc(size_t bytes); +#ifdef GWW_VDB +#else +#define GC_scratch_recycle_no_gww GC_scratch_recycle_inner +#endif +GC_INNER void GC_scratch_recycle_inner(void*ptr,size_t bytes); +#ifdef MARK_BIT_PER_GRANULE +GC_INNER GC_bool GC_add_map_entry(size_t sz); +#endif +GC_INNER void GC_register_displacement_inner(size_t offset); +GC_INNER void GC_new_hblk(size_t size_in_granules,int kind); +GC_INNER ptr_t GC_build_fl(struct hblk*h,size_t words,GC_bool clear, +ptr_t list); +GC_INNER struct hblk*GC_allochblk(size_t size_in_bytes,int kind, +unsigned flags); +GC_INNER ptr_t GC_alloc_large(size_t lb,int k,unsigned flags); +GC_INNER void GC_freehblk(struct hblk*p); +GC_INNER GC_bool GC_expand_hp_inner(word n); +GC_INNER void GC_start_reclaim(GC_bool abort_if_found); +GC_INNER void GC_continue_reclaim(word sz,int kind); +GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func,GC_bool ignore_old); +GC_INNER ptr_t GC_reclaim_generic(struct hblk*hbp,hdr*hhdr,size_t sz, +GC_bool init,ptr_t list, +signed_word*count); +GC_INNER GC_bool GC_block_empty(hdr*hhdr); +GC_INNER int GC_CALLBACK GC_never_stop_func(void); +GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func f); +#define GC_gcollect_inner()(void)GC_try_to_collect_inner(GC_never_stop_func) +#ifdef THREADS +GC_EXTERN GC_bool GC_in_thread_creation; +#endif +GC_EXTERN GC_bool GC_is_initialized; +GC_INNER void GC_collect_a_little_inner(int n); +GC_INNER void*GC_generic_malloc_inner(size_t lb,int k); +#if defined(DBG_HDRS_ALL)||defined(GC_GCJ_SUPPORT)||!defined(GC_NO_FINALIZATION) +GC_INNER void*GC_generic_malloc_inner_ignore_off_page(size_t lb,int k); +#endif +GC_INNER GC_bool GC_collect_or_expand(word needed_blocks, +GC_bool ignore_off_page,GC_bool retry); +GC_INNER ptr_t GC_allocobj(size_t sz,int kind); +#ifdef GC_ADD_CALLER +#ifdef GC_HAVE_RETURN_ADDR_PARENT +#define GC_DBG_EXTRAS GC_RETURN_ADDR_PARENT,NULL,0 +#else +#define GC_DBG_EXTRAS GC_RETURN_ADDR,NULL,0 +#endif +#else +#define GC_DBG_EXTRAS "unknown",0 +#endif +#ifdef GC_COLLECT_AT_MALLOC +extern size_t GC_dbg_collect_at_malloc_min_lb; +#define GC_DBG_COLLECT_AT_MALLOC(lb)(void)((lb)>=GC_dbg_collect_at_malloc_min_lb?(GC_gcollect(),0):0) +#else +#define GC_DBG_COLLECT_AT_MALLOC(lb)(void)0 +#endif +#if defined(THREAD_LOCAL_ALLOC)&&defined(GC_GCJ_SUPPORT) +GC_INNER void*GC_core_gcj_malloc(size_t,void*); +#endif +GC_INNER void GC_init_headers(void); +GC_INNER struct hblkhdr*GC_install_header(struct hblk*h); +GC_INNER GC_bool GC_install_counts(struct hblk*h,size_t sz); +GC_INNER void GC_remove_header(struct hblk*h); +GC_INNER void GC_remove_counts(struct hblk*h,size_t sz); +GC_INNER hdr*GC_find_header(ptr_t h); +GC_INNER void GC_add_to_heap(struct hblk*p,size_t bytes); +#ifdef USE_PROC_FOR_LIBRARIES +GC_INNER void GC_add_to_our_memory(ptr_t p,size_t bytes); +#else +#define GC_add_to_our_memory(p,bytes) +#endif +GC_INNER void GC_print_all_errors(void); +GC_EXTERN void (*GC_check_heap)(void); +GC_EXTERN void (*GC_print_all_smashed)(void); +GC_EXTERN void (*GC_print_heap_obj)(ptr_t p); +#if defined(LINUX)&&defined(__ELF__)&&!defined(SMALL_CONFIG) +void GC_print_address_map(void); +#endif +#ifndef SHORT_DBG_HDRS +GC_EXTERN GC_bool GC_findleak_delay_free; +GC_INNER GC_bool GC_check_leaked(ptr_t base); +#endif +GC_EXTERN GC_bool GC_have_errors; +#define VERBOSE 2 +#if!defined(NO_CLOCK)||!defined(SMALL_CONFIG) +extern int GC_print_stats; +#else +#define GC_print_stats 0 +#endif +#ifdef KEEP_BACK_PTRS +GC_EXTERN long GC_backtraces; +GC_INNER void GC_generate_random_backtrace_no_gc(void); +#endif +#ifdef LINT2 +#define GC_RAND_MAX (~0U>>1) +GC_API_PRIV long GC_random(void); +#endif +GC_EXTERN GC_bool GC_print_back_height; +#ifdef MAKE_BACK_GRAPH +void GC_print_back_graph_stats(void); +#endif +#ifdef THREADS +GC_INNER void GC_free_inner(void*p); +#endif +#ifdef DBG_HDRS_ALL +GC_INNER void*GC_debug_generic_malloc_inner(size_t lb,int k); +GC_INNER void*GC_debug_generic_malloc_inner_ignore_off_page(size_t lb, +int k); +#define GC_INTERNAL_MALLOC GC_debug_generic_malloc_inner +#define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE GC_debug_generic_malloc_inner_ignore_off_page +#ifdef THREADS +GC_INNER void GC_debug_free_inner(void*p); +#define GC_INTERNAL_FREE GC_debug_free_inner +#else +#define GC_INTERNAL_FREE GC_debug_free +#endif +#else +#define GC_INTERNAL_MALLOC GC_generic_malloc_inner +#define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE GC_generic_malloc_inner_ignore_off_page +#ifdef THREADS +#define GC_INTERNAL_FREE GC_free_inner +#else +#define GC_INTERNAL_FREE GC_free +#endif +#endif +#ifdef USE_MUNMAP +GC_INNER void GC_unmap_old(void); +GC_INNER void GC_merge_unmapped(void); +GC_INNER void GC_unmap(ptr_t start,size_t bytes); +GC_INNER void GC_remap(ptr_t start,size_t bytes); +GC_INNER void GC_unmap_gap(ptr_t start1,size_t bytes1,ptr_t start2, +size_t bytes2); +GC_INLINE ptr_t GC_unmap_end(ptr_t start,size_t bytes) +{ +return (ptr_t)((word)(start+bytes)&~(GC_page_size - 1)); +} +#endif +#ifdef CAN_HANDLE_FORK +GC_EXTERN int GC_handle_fork; +#endif +#ifdef GC_DISABLE_INCREMENTAL +#define GC_incremental FALSE +#define GC_auto_incremental FALSE +#define GC_manual_vdb FALSE +#define GC_dirty(p)(void)(p) +#define REACHABLE_AFTER_DIRTY(p)(void)(p) +#else +GC_EXTERN GC_bool GC_incremental; +GC_INNER void GC_read_dirty(GC_bool output_unneeded); +GC_INNER GC_bool GC_page_was_dirty(struct hblk*h); +GC_INNER void GC_remove_protection(struct hblk*h,word nblocks, +GC_bool pointerfree); +GC_INNER GC_bool GC_dirty_init(void); +GC_EXTERN GC_bool GC_manual_vdb; +#define GC_auto_incremental (GC_incremental&&!GC_manual_vdb) +GC_INNER void GC_dirty_inner(const void*p); +#define GC_dirty(p)(GC_manual_vdb?GC_dirty_inner(p):(void)0) +#define REACHABLE_AFTER_DIRTY(p)GC_reachable_here(p) +#endif +#define GC_base_C(p)((const void*)GC_base(( void*)(p))) +void GC_print_block_list(void); +void GC_print_hblkfreelist(void); +void GC_print_heap_sects(void); +void GC_print_static_roots(void); +#ifdef KEEP_BACK_PTRS +GC_INNER void GC_store_back_pointer(ptr_t source,ptr_t dest); +GC_INNER void GC_marked_for_finalization(ptr_t dest); +#define GC_STORE_BACK_PTR(source,dest)GC_store_back_pointer(source,dest) +#define GC_MARKED_FOR_FINALIZATION(dest)GC_marked_for_finalization(dest) +#else +#define GC_STORE_BACK_PTR(source,dest)(void)(source) +#define GC_MARKED_FOR_FINALIZATION(dest) +#endif +void GC_noop6(word,word,word,word,word,word); +GC_API void GC_CALL GC_noop1(word); +#ifndef GC_ATTR_FORMAT_PRINTF +#if GC_GNUC_PREREQ(3,0) +#define GC_ATTR_FORMAT_PRINTF(spec_argnum,first_checked)__attribute__((__format__(__printf__,spec_argnum,first_checked))) +#else +#define GC_ATTR_FORMAT_PRINTF(spec_argnum,first_checked) +#endif +#endif +GC_API_PRIV void GC_printf(const char*format,...) +GC_ATTR_FORMAT_PRINTF(1,2); +GC_API_PRIV void GC_err_printf(const char*format,...) +GC_ATTR_FORMAT_PRINTF(1,2); +GC_API_PRIV void GC_log_printf(const char*format,...) +GC_ATTR_FORMAT_PRINTF(1,2); +#ifndef GC_ANDROID_LOG +#define GC_PRINT_STATS_FLAG (GC_print_stats!=0) +#define GC_INFOLOG_PRINTF GC_COND_LOG_PRINTF +#define GC_verbose_log_printf GC_log_printf +#else +extern GC_bool GC_quiet; +#define GC_PRINT_STATS_FLAG (!GC_quiet) +#ifndef GC_INFOLOG_PRINTF +#define GC_INFOLOG_PRINTF if (GC_quiet){} else GC_info_log_printf +#endif +GC_INNER void GC_info_log_printf(const char*format,...) +GC_ATTR_FORMAT_PRINTF(1,2); +GC_INNER void GC_verbose_log_printf(const char*format,...) +GC_ATTR_FORMAT_PRINTF(1,2); +#endif +#define GC_COND_LOG_PRINTF if (EXPECT(!GC_print_stats,TRUE)){} else GC_log_printf +#define GC_VERBOSE_LOG_PRINTF if (EXPECT(GC_print_stats!=VERBOSE,TRUE)){} else GC_verbose_log_printf +#ifndef GC_DBGLOG_PRINTF +#define GC_DBGLOG_PRINTF if (!GC_PRINT_STATS_FLAG){} else GC_log_printf +#endif +void GC_err_puts(const char*s); +#define TO_KiB_UL(v)((unsigned long)(((v)+((1<<9)- 1))>>10)) +GC_EXTERN unsigned GC_fail_count; +GC_EXTERN long GC_large_alloc_warn_interval; +GC_EXTERN signed_word GC_bytes_found; +#ifndef GC_GET_HEAP_USAGE_NOT_NEEDED +GC_EXTERN word GC_reclaimed_bytes_before_gc; +#endif +#ifdef USE_MUNMAP +GC_EXTERN int GC_unmap_threshold; +GC_EXTERN GC_bool GC_force_unmap_on_gcollect; +#endif +#ifdef MSWIN32 +GC_EXTERN GC_bool GC_no_win32_dlls; +GC_EXTERN GC_bool GC_wnt; +#endif +#ifdef THREADS +#if (defined(MSWIN32)&&!defined(CONSOLE_LOG))||defined(MSWINCE) +GC_EXTERN CRITICAL_SECTION GC_write_cs; +#ifdef GC_ASSERTIONS +GC_EXTERN GC_bool GC_write_disabled; +#endif +#endif +#if defined(GC_DISABLE_INCREMENTAL)||defined(HAVE_LOCKFREE_AO_OR) +#define GC_acquire_dirty_lock()(void)0 +#define GC_release_dirty_lock()(void)0 +#else +#define GC_acquire_dirty_lock()do { } while (AO_test_and_set_acquire(&GC_fault_handler_lock)==AO_TS_SET) +#define GC_release_dirty_lock()AO_CLEAR(&GC_fault_handler_lock) +GC_EXTERN volatile AO_TS_t GC_fault_handler_lock; +#endif +#ifdef MSWINCE +GC_EXTERN GC_bool GC_dont_query_stack_min; +#endif +#elif defined(IA64) +GC_EXTERN ptr_t GC_save_regs_ret_val; +#endif +#ifdef THREAD_LOCAL_ALLOC +GC_EXTERN GC_bool GC_world_stopped; +GC_INNER void GC_mark_thread_local_free_lists(void); +#endif +#if defined(MPROTECT_VDB)&&defined(GWW_VDB) +GC_INNER GC_bool GC_gww_dirty_init(void); +#endif +#if defined(CHECKSUMS)||defined(PROC_VDB) +GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk*h); +#endif +#ifdef CHECKSUMS +#if defined(MPROTECT_VDB)&&!defined(DARWIN) +void GC_record_fault(struct hblk*h); +#endif +void GC_check_dirty(void); +#endif +GC_INNER void GC_default_print_heap_obj_proc(ptr_t p); +GC_INNER void GC_setpagesize(void); +GC_INNER void GC_initialize_offsets(void); +GC_INNER void GC_bl_init(void); +GC_INNER void GC_bl_init_no_interiors(void); +GC_INNER void GC_start_debugging_inner(void); +GC_INNER void*GC_store_debug_info_inner(void*p,word sz,const char*str, +int linenum); +#ifdef REDIRECT_MALLOC +#ifdef GC_LINUX_THREADS +GC_INNER GC_bool GC_text_mapping(char*nm,ptr_t*startp,ptr_t*endp); +#endif +#elif defined(USE_WINALLOC) +GC_INNER void GC_add_current_malloc_heap(void); +#endif +#ifdef MAKE_BACK_GRAPH +GC_INNER void GC_build_back_graph(void); +GC_INNER void GC_traverse_back_graph(void); +#endif +#ifdef MSWIN32 +GC_INNER void GC_init_win32(void); +#endif +#if!defined(MSWIN32)&&!defined(MSWINCE)&&!defined(CYGWIN32) +GC_INNER void*GC_roots_present(ptr_t); +#endif +#ifdef GC_WIN32_THREADS +GC_INNER void GC_get_next_stack(char*start,char*limit,char**lo, +char**hi); +#if defined(MPROTECT_VDB)&&!defined(CYGWIN32) +GC_INNER void GC_set_write_fault_handler(void); +#endif +#if defined(WRAP_MARK_SOME)&&!defined(GC_PTHREADS) +GC_INNER GC_bool GC_started_thread_while_stopped(void); +#endif +#endif +#ifdef THREADS +GC_INNER void GC_reset_finalizer_nested(void); +GC_INNER unsigned char*GC_check_finalizer_nested(void); +GC_INNER void GC_do_blocking_inner(ptr_t data,void*context); +GC_INNER void GC_push_all_stacks(void); +#ifdef USE_PROC_FOR_LIBRARIES +GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo,ptr_t hi); +#endif +#ifdef IA64 +GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound); +#endif +#endif +#ifdef DYNAMIC_LOADING +GC_INNER GC_bool GC_register_main_static_data(void); +#ifdef DARWIN +GC_INNER void GC_init_dyld(void); +#endif +#endif +#ifdef SEARCH_FOR_DATA_START +GC_INNER void GC_init_linux_data_start(void); +void*GC_find_limit(void*,int); +#endif +#if defined(NETBSD)&&defined(__ELF__) +GC_INNER void GC_init_netbsd_elf(void); +void*GC_find_limit(void*,int); +#endif +#ifdef UNIX_LIKE +GC_INNER void GC_set_and_save_fault_handler(void (*handler)(int)); +#endif +#ifdef NEED_PROC_MAPS +#if defined(DYNAMIC_LOADING)&&defined(USE_PROC_FOR_LIBRARIES) +GC_INNER char*GC_parse_map_entry(char*buf_ptr,ptr_t*start,ptr_t*end, +char**prot,unsigned int*maj_dev, +char**mapping_name); +#endif +#if defined(IA64)||defined(INCLUDE_LINUX_THREAD_DESCR) +GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr, +ptr_t*startp,ptr_t*endp); +#endif +GC_INNER char*GC_get_maps(void); +#endif +#ifdef GC_ASSERTIONS +#define GC_ASSERT(expr)do { if (!(expr)){ GC_err_printf("Assertion failure:%s:%d\n",__FILE__,__LINE__);ABORT("assertion failure");} } while (0) +GC_INNER word GC_compute_large_free_bytes(void); +GC_INNER word GC_compute_root_size(void); +#else +#define GC_ASSERT(expr) +#endif +#if _MSC_VER>=1700 +#define GC_STATIC_ASSERT(expr)static_assert(expr,"static assertion failed:" #expr) +#elif defined(static_assert)&&__STDC_VERSION__>=201112L +#define GC_STATIC_ASSERT(expr)static_assert(expr,#expr) +#elif defined(mips)&&!defined(__GNUC__) +#define GC_STATIC_ASSERT(expr)do { if (0){ char j[(expr)?1:-1];j[0]='\0';j[0]=j[0];} } while(0) +#else +#define GC_STATIC_ASSERT(expr)(void)sizeof(char[(expr)?1:-1]) +#endif +#if GC_GNUC_PREREQ(4,0) +#define NONNULL_ARG_NOT_NULL(arg)(*(volatile void**)&(arg)!=NULL) +#else +#define NONNULL_ARG_NOT_NULL(arg)(NULL!=(arg)) +#endif +#define COND_DUMP_CHECKS do { GC_ASSERT(GC_compute_large_free_bytes()==GC_large_free_bytes);GC_ASSERT(GC_compute_root_size()==GC_root_size);} while (0) +#ifndef NO_DEBUGGING +GC_EXTERN GC_bool GC_dump_regularly; +#define COND_DUMP if (EXPECT(GC_dump_regularly,FALSE)){ GC_dump_named(NULL);} else COND_DUMP_CHECKS +#else +#define COND_DUMP COND_DUMP_CHECKS +#endif +#if defined(PARALLEL_MARK) +#define GC_markers_m1 GC_parallel +GC_EXTERN GC_bool GC_parallel_mark_disabled; +GC_INNER void GC_wait_for_markers_init(void); +GC_INNER void GC_acquire_mark_lock(void); +GC_INNER void GC_release_mark_lock(void); +GC_INNER void GC_notify_all_builder(void); +GC_INNER void GC_wait_for_reclaim(void); +GC_EXTERN signed_word GC_fl_builder_count; +GC_INNER void GC_notify_all_marker(void); +GC_INNER void GC_wait_marker(void); +GC_EXTERN word GC_mark_no; +GC_INNER void GC_help_marker(word my_mark_no); +GC_INNER void GC_start_mark_threads_inner(void); +#endif +#if defined(GC_PTHREADS)&&!defined(GC_WIN32_THREADS)&&!defined(NACL)&&!defined(GC_DARWIN_THREADS)&&!defined(SIG_SUSPEND) +#if (defined(GC_LINUX_THREADS)||defined(GC_DGUX386_THREADS))&&!defined(GC_USESIGRT_SIGNALS) +#if defined(SPARC)&&!defined(SIGPWR) +#define SIG_SUSPEND SIGLOST +#else +#define SIG_SUSPEND SIGPWR +#endif +#elif defined(GC_OPENBSD_THREADS) +#ifndef GC_OPENBSD_UTHREADS +#define SIG_SUSPEND SIGXFSZ +#endif +#elif defined(_SIGRTMIN)&&!defined(CPPCHECK) +#define SIG_SUSPEND _SIGRTMIN+6 +#else +#define SIG_SUSPEND SIGRTMIN+6 +#endif +#endif +#if defined(GC_PTHREADS)&&!defined(GC_SEM_INIT_PSHARED) +#define GC_SEM_INIT_PSHARED 0 +#endif +#if (defined(UNIX_LIKE)||(defined(NEED_FIND_LIMIT)&&defined(CYGWIN32)))&&!defined(GC_NO_SIGSETJMP) +#if defined(SUNOS5SIGS)&&!defined(FREEBSD)&&!defined(LINUX) +EXTERN_C_END +#include +EXTERN_C_BEGIN +#endif +#define SETJMP(env)sigsetjmp(env,1) +#define LONGJMP(env,val)siglongjmp(env,val) +#define JMP_BUF sigjmp_buf +#else +#ifdef ECOS +#define SETJMP(env)hal_setjmp(env) +#else +#define SETJMP(env)setjmp(env) +#endif +#define LONGJMP(env,val)longjmp(env,val) +#define JMP_BUF jmp_buf +#endif +#if defined(HEURISTIC2)||defined(SEARCH_FOR_DATA_START)||((defined(SVR4)||defined(AIX)||defined(DGUX)||(defined(LINUX)&&defined(SPARC)))&&!defined(PCR)) +#define NEED_FIND_LIMIT +#endif +#if defined(DATASTART_USES_BSDGETDATASTART) +EXTERN_C_END +#include +EXTERN_C_BEGIN +#if!defined(PCR) +#define NEED_FIND_LIMIT +#endif +GC_INNER ptr_t GC_FreeBSDGetDataStart(size_t,ptr_t); +#define DATASTART_IS_FUNC +#endif +#if (defined(NETBSD)||defined(OPENBSD))&&defined(__ELF__)&&!defined(NEED_FIND_LIMIT) +#define NEED_FIND_LIMIT +#endif +#if defined(IA64)&&!defined(NEED_FIND_LIMIT) +#define NEED_FIND_LIMIT +#endif +#if defined(NEED_FIND_LIMIT)||(defined(USE_PROC_FOR_LIBRARIES)&&defined(THREADS)) +GC_EXTERN JMP_BUF GC_jmp_buf; +GC_INNER void GC_setup_temporary_fault_handler(void); +GC_INNER void GC_reset_fault_handler(void); +#endif +#if defined(CANCEL_SAFE) +#if defined(GC_ASSERTIONS)&&(defined(USE_COMPILER_TLS)||(defined(LINUX)&&!defined(ARM32)&&GC_GNUC_PREREQ(3,3)||defined(HPUX))) +extern __thread unsigned char GC_cancel_disable_count; +#define NEED_CANCEL_DISABLE_COUNT +#define INCR_CANCEL_DISABLE()++GC_cancel_disable_count +#define DECR_CANCEL_DISABLE()--GC_cancel_disable_count +#define ASSERT_CANCEL_DISABLED()GC_ASSERT(GC_cancel_disable_count > 0) +#else +#define INCR_CANCEL_DISABLE() +#define DECR_CANCEL_DISABLE() +#define ASSERT_CANCEL_DISABLED()(void)0 +#endif +#define DISABLE_CANCEL(state)do { pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,&state);INCR_CANCEL_DISABLE();} while (0) +#define RESTORE_CANCEL(state)do { ASSERT_CANCEL_DISABLED();pthread_setcancelstate(state,NULL);DECR_CANCEL_DISABLE();} while (0) +#else +#define DISABLE_CANCEL(state)(void)0 +#define RESTORE_CANCEL(state)(void)0 +#define ASSERT_CANCEL_DISABLED()(void)0 +#endif +EXTERN_C_END +#endif +#ifdef KEEP_BACK_PTRS +#ifndef GC_BACKPTR_H +#define GC_BACKPTR_H +#ifndef GC_H +#endif +#ifdef __cplusplus +extern "C" { +#endif +typedef enum { +GC_UNREFERENCED, +GC_NO_SPACE, +GC_REFD_FROM_ROOT, +GC_REFD_FROM_REG, +GC_REFD_FROM_HEAP, +GC_FINALIZER_REFD +} GC_ref_kind; +GC_API GC_ref_kind GC_CALL GC_get_back_ptr_info(void*, +void**,size_t*) +GC_ATTR_NONNULL(1); +GC_API void*GC_CALL GC_generate_random_heap_address(void); +GC_API void*GC_CALL GC_generate_random_valid_address(void); +GC_API void GC_CALL GC_generate_random_backtrace(void); +GC_API void GC_CALL GC_print_backtrace(void*)GC_ATTR_NONNULL(1); +#ifdef __cplusplus +} +#endif +#endif +#endif +EXTERN_C_BEGIN +#if CPP_WORDSZ==32 +#define START_FLAG (word)0xfedcedcb +#define END_FLAG (word)0xbcdecdef +#else +#define START_FLAG GC_WORD_C(0xFEDCEDCBfedcedcb) +#define END_FLAG GC_WORD_C(0xBCDECDEFbcdecdef) +#endif +#if defined(KEEP_BACK_PTRS)||defined(PRINT_BLACK_LIST)||defined(MAKE_BACK_GRAPH) +#define NOT_MARKED (ptr_t)0 +#define MARKED_FOR_FINALIZATION ((ptr_t)(word)2) +#define MARKED_FROM_REGISTER ((ptr_t)(word)4) +#endif +typedef struct { +#if defined(KEEP_BACK_PTRS)||defined(MAKE_BACK_GRAPH) +#if ALIGNMENT==1 +#define HIDE_BACK_PTR(p)GC_HIDE_POINTER(~1&(word)(p)) +#else +#define HIDE_BACK_PTR(p)GC_HIDE_POINTER(p) +#endif +#ifdef KEEP_BACK_PTRS +GC_hidden_pointer oh_back_ptr; +#endif +#ifdef MAKE_BACK_GRAPH +GC_hidden_pointer oh_bg_ptr; +#endif +#if defined(KEEP_BACK_PTRS)!=defined(MAKE_BACK_GRAPH) +word oh_dummy; +#endif +#endif +const char*oh_string; +signed_word oh_int; +#ifdef NEED_CALLINFO +struct callinfo oh_ci[NFRAMES]; +#endif +#ifndef SHORT_DBG_HDRS +word oh_sz; +word oh_sf; +#endif +} oh; +#ifdef SHORT_DBG_HDRS +#define DEBUG_BYTES (sizeof (oh)) +#define UNCOLLECTABLE_DEBUG_BYTES DEBUG_BYTES +#else +#define UNCOLLECTABLE_DEBUG_BYTES (sizeof (oh)+sizeof (word)) +#define DEBUG_BYTES (UNCOLLECTABLE_DEBUG_BYTES - EXTRA_BYTES) +#endif +#define SIMPLE_ROUNDED_UP_WORDS(n)BYTES_TO_WORDS((n)+WORDS_TO_BYTES(1)- 1) +#if defined(SAVE_CALL_CHAIN) +struct callinfo; +GC_INNER void GC_save_callers(struct callinfo info[NFRAMES]); +GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]); +#define ADD_CALL_CHAIN(base,ra)GC_save_callers(((oh*)(base))->oh_ci) +#define PRINT_CALL_CHAIN(base)GC_print_callers(((oh*)(base))->oh_ci) +#elif defined(GC_ADD_CALLER) +struct callinfo; +GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]); +#define ADD_CALL_CHAIN(base,ra)((oh*)(base))->oh_ci[0].ci_pc=(ra) +#define PRINT_CALL_CHAIN(base)GC_print_callers(((oh*)(base))->oh_ci) +#else +#define ADD_CALL_CHAIN(base,ra) +#define PRINT_CALL_CHAIN(base) +#endif +#ifdef GC_ADD_CALLER +#define OPT_RA ra, +#else +#define OPT_RA +#endif +#ifdef SHORT_DBG_HDRS +#define GC_has_other_debug_info(p)1 +#else +GC_INNER int GC_has_other_debug_info(ptr_t p); +#endif +#if defined(KEEP_BACK_PTRS)||defined(MAKE_BACK_GRAPH) +#if defined(SHORT_DBG_HDRS)&&!defined(CPPCHECK) +#error Non-ptr stored in object results in GC_HAS_DEBUG_INFO malfunction +#endif +#if defined(PARALLEL_MARK)&&defined(KEEP_BACK_PTRS) +#define GC_HAS_DEBUG_INFO(p)((AO_load((volatile AO_t*)(p))&1)!=0&&GC_has_other_debug_info(p)> 0) +#else +#define GC_HAS_DEBUG_INFO(p)((*(word*)(p)&1)&&GC_has_other_debug_info(p)> 0) +#endif +#else +#define GC_HAS_DEBUG_INFO(p)(GC_has_other_debug_info(p)> 0) +#endif +EXTERN_C_END +#endif +#ifdef MAKE_BACK_GRAPH +#define MAX_IN 10 +#if (!defined(DBG_HDRS_ALL)||(ALIGNMENT!=CPP_WORDSZ/8))&&!defined(CPPCHECK) +#error The configuration does not support MAKE_BACK_GRAPH +#endif +#define FLAG_MANY 2 +typedef struct back_edges_struct { +word n_edges; +unsigned short flags; +#define RETAIN 1 +unsigned short height_gc_no; +signed_word height; +#define HEIGHT_UNKNOWN (-2) +#define HEIGHT_IN_PROGRESS (-1) +ptr_t edges[MAX_IN]; +struct back_edges_struct*cont; +} back_edges; +#define MAX_BACK_EDGE_STRUCTS 100000 +static back_edges*back_edge_space=0; +STATIC int GC_n_back_edge_structs=0; +static back_edges*avail_back_edges=0; +static back_edges*new_back_edges(void) +{ +if (0==back_edge_space){ +size_t bytes_to_get=ROUNDUP_PAGESIZE_IF_MMAP(MAX_BACK_EDGE_STRUCTS +*sizeof(back_edges)); +GC_ASSERT(GC_page_size!=0); +back_edge_space=(back_edges*)GET_MEM(bytes_to_get); +if (NULL==back_edge_space) +ABORT("Insufficient memory for back edges"); +GC_add_to_our_memory((ptr_t)back_edge_space,bytes_to_get); +} +if (0!=avail_back_edges){ +back_edges*result=avail_back_edges; +avail_back_edges=result->cont; +result->cont=0; +return result; +} +if (GC_n_back_edge_structs>=MAX_BACK_EDGE_STRUCTS - 1){ +ABORT("Needed too much space for back edges:adjust " +"MAX_BACK_EDGE_STRUCTS"); +} +return back_edge_space+(GC_n_back_edge_structs++); +} +static void deallocate_back_edges(back_edges*p) +{ +back_edges*last=p; +while (0!=last->cont)last=last->cont; +last->cont=avail_back_edges; +avail_back_edges=p; +} +#define INITIAL_IN_PROGRESS 10000 +static ptr_t*in_progress_space=0; +static size_t in_progress_size=0; +static size_t n_in_progress=0; +static void push_in_progress(ptr_t p) +{ +if (n_in_progress>=in_progress_size){ +ptr_t*new_in_progress_space; +GC_ASSERT(GC_page_size!=0); +if (NULL==in_progress_space){ +in_progress_size=ROUNDUP_PAGESIZE_IF_MMAP(INITIAL_IN_PROGRESS +*sizeof(ptr_t)) +/sizeof(ptr_t); +new_in_progress_space= +(ptr_t*)GET_MEM(in_progress_size*sizeof(ptr_t)); +} else { +in_progress_size*=2; +new_in_progress_space=(ptr_t*) +GET_MEM(in_progress_size*sizeof(ptr_t)); +if (new_in_progress_space!=NULL) +BCOPY(in_progress_space,new_in_progress_space, +n_in_progress*sizeof(ptr_t)); +} +GC_add_to_our_memory((ptr_t)new_in_progress_space, +in_progress_size*sizeof(ptr_t)); +#ifndef GWW_VDB +GC_scratch_recycle_no_gww(in_progress_space, +n_in_progress*sizeof(ptr_t)); +#elif defined(LINT2) +GC_noop1((word)in_progress_space); +#endif +in_progress_space=new_in_progress_space; +} +if (in_progress_space==0) +ABORT("MAKE_BACK_GRAPH:Out of in-progress space:" +"Huge linear data structure?"); +in_progress_space[n_in_progress++]=p; +} +static GC_bool is_in_progress(ptr_t p) +{ +size_t i; +for (i=0;i < n_in_progress;++i){ +if (in_progress_space[i]==p)return TRUE; +} +return FALSE; +} +GC_INLINE void pop_in_progress(ptr_t p GC_ATTR_UNUSED) +{ +--n_in_progress; +GC_ASSERT(in_progress_space[n_in_progress]==p); +} +#define GET_OH_BG_PTR(p)(ptr_t)GC_REVEAL_POINTER(((oh*)(p))->oh_bg_ptr) +#define SET_OH_BG_PTR(p,q)(((oh*)(p))->oh_bg_ptr=GC_HIDE_POINTER(q)) +static void ensure_struct(ptr_t p) +{ +ptr_t old_back_ptr=GET_OH_BG_PTR(p); +if (!((word)old_back_ptr&FLAG_MANY)){ +back_edges*be=new_back_edges(); +be->flags=0; +if (0==old_back_ptr){ +be->n_edges=0; +} else { +be->n_edges=1; +be->edges[0]=old_back_ptr; +} +be->height=HEIGHT_UNKNOWN; +be->height_gc_no=(unsigned short)(GC_gc_no - 1); +GC_ASSERT((word)be>=(word)back_edge_space); +SET_OH_BG_PTR(p,(word)be|FLAG_MANY); +} +} +static void add_edge(ptr_t p,ptr_t q) +{ +ptr_t pred=GET_OH_BG_PTR(q); +back_edges*be,*be_cont; +word i; +GC_ASSERT(p==GC_base(p)&&q==GC_base(q)); +if (!GC_HAS_DEBUG_INFO(q)||!GC_HAS_DEBUG_INFO(p)){ +return; +} +if (NULL==pred){ +static unsigned random_number=13; +#define GOT_LUCKY_NUMBER (((++random_number)&0x7f)==0) +SET_OH_BG_PTR(q,p); +if (GOT_LUCKY_NUMBER)ensure_struct(q); +return; +} +{ +back_edges*e=(back_edges*)((word)pred&~FLAG_MANY); +word n_edges; +word total; +int local=0; +if (((word)pred&FLAG_MANY)!=0){ +n_edges=e->n_edges; +} else if (((word)COVERT_DATAFLOW(pred)&1)==0){ +n_edges=1; +local=-1; +} else { +n_edges=0; +} +for (total=0;total < n_edges;++total){ +if (local==MAX_IN){ +e=e->cont; +local=0; +} +if (local>=0) +pred=e->edges[local++]; +if (pred==p) +return; +} +} +ensure_struct(q); +be=(back_edges*)((word)GET_OH_BG_PTR(q)&~FLAG_MANY); +for (i=be->n_edges,be_cont=be;i > MAX_IN;i-=MAX_IN) +be_cont=be_cont->cont; +if (i==MAX_IN){ +be_cont->cont=new_back_edges(); +be_cont=be_cont->cont; +i=0; +} +be_cont->edges[i]=p; +be->n_edges++; +#ifdef DEBUG_PRINT_BIG_N_EDGES +if (GC_print_stats==VERBOSE&&be->n_edges==100){ +GC_err_printf("The following object has big in-degree:\n"); +GC_print_heap_obj(q); +} +#endif +} +typedef void (*per_object_func)(ptr_t p,size_t n_bytes,word gc_descr); +static void per_object_helper(struct hblk*h,word fn) +{ +hdr*hhdr=HDR(h); +size_t sz=(size_t)hhdr->hb_sz; +word descr=hhdr->hb_descr; +per_object_func f=(per_object_func)fn; +size_t i=0; +do { +f((ptr_t)(h->hb_body+i),sz,descr); +i+=sz; +} while (i+sz<=BYTES_TO_WORDS(HBLKSIZE)); +} +GC_INLINE void GC_apply_to_each_object(per_object_func f) +{ +GC_apply_to_all_blocks(per_object_helper,(word)f); +} +static void reset_back_edge(ptr_t p,size_t n_bytes GC_ATTR_UNUSED, +word gc_descr GC_ATTR_UNUSED) +{ +if (GC_HAS_DEBUG_INFO(p)){ +ptr_t old_back_ptr=GET_OH_BG_PTR(p); +if ((word)old_back_ptr&FLAG_MANY){ +back_edges*be=(back_edges*)((word)old_back_ptr&~FLAG_MANY); +if (!(be->flags&RETAIN)){ +deallocate_back_edges(be); +SET_OH_BG_PTR(p,0); +} else { +GC_ASSERT(GC_is_marked(p)); +be->n_edges=0; +if (0!=be->cont){ +deallocate_back_edges(be->cont); +be->cont=0; +} +GC_ASSERT(GC_is_marked(p)); +be->flags&=~RETAIN; +} +} else { +SET_OH_BG_PTR(p,0); +} +} +} +static void add_back_edges(ptr_t p,size_t n_bytes,word gc_descr) +{ +word*currentp=(word*)(p+sizeof(oh)); +if((gc_descr&GC_DS_TAGS)!=GC_DS_LENGTH){ +gc_descr=n_bytes; +} +while ((word)currentp < (word)(p+gc_descr)){ +word current=*currentp++; +FIXUP_POINTER(current); +if (current>=(word)GC_least_plausible_heap_addr&& +current<=(word)GC_greatest_plausible_heap_addr){ +ptr_t target=(ptr_t)GC_base((void*)current); +if (0!=target){ +add_edge(p,target); +} +} +} +} +GC_INNER void GC_build_back_graph(void) +{ +GC_ASSERT(I_HOLD_LOCK()); +GC_apply_to_each_object(add_back_edges); +} +static word backwards_height(ptr_t p) +{ +word result; +ptr_t pred=GET_OH_BG_PTR(p); +back_edges*be; +if (NULL==pred) +return 1; +if (((word)pred&FLAG_MANY)==0){ +if (is_in_progress(p))return 0; +push_in_progress(p); +result=backwards_height(pred)+1; +pop_in_progress(p); +return result; +} +be=(back_edges*)((word)pred&~FLAG_MANY); +if (be->height>=0&&be->height_gc_no==(unsigned short)GC_gc_no) +return be->height; +if (be->height==HEIGHT_IN_PROGRESS)return 0; +result=(be->height > 0?be->height:1); +be->height=HEIGHT_IN_PROGRESS; +{ +back_edges*e=be; +word n_edges; +word total; +int local=0; +if (((word)pred&FLAG_MANY)!=0){ +n_edges=e->n_edges; +} else if (((word)pred&1)==0){ +n_edges=1; +local=-1; +} else { +n_edges=0; +} +for (total=0;total < n_edges;++total){ +word this_height; +if (local==MAX_IN){ +e=e->cont; +local=0; +} +if (local>=0) +pred=e->edges[local++]; +if (GC_is_marked(pred)&&((word)GET_OH_BG_PTR(p)&FLAG_MANY)==0){ +GC_COND_LOG_PRINTF("Found bogus pointer from %p to %p\n", +(void*)pred,(void*)p); +this_height=1; +} else { +this_height=backwards_height(pred); +} +if (this_height>=result) +result=this_height+1; +} +} +be->height=result; +be->height_gc_no=(unsigned short)GC_gc_no; +return result; +} +STATIC word GC_max_height=0; +STATIC ptr_t GC_deepest_obj=NULL; +static void update_max_height(ptr_t p,size_t n_bytes GC_ATTR_UNUSED, +word gc_descr GC_ATTR_UNUSED) +{ +if (GC_is_marked(p)&&GC_HAS_DEBUG_INFO(p)){ +word p_height=0; +ptr_t p_deepest_obj=0; +ptr_t back_ptr; +back_edges*be=0; +back_ptr=GET_OH_BG_PTR(p); +if (0!=back_ptr&&((word)back_ptr&FLAG_MANY)){ +be=(back_edges*)((word)back_ptr&~FLAG_MANY); +if (be->height!=HEIGHT_UNKNOWN)p_height=be->height; +} +{ +ptr_t pred=GET_OH_BG_PTR(p); +back_edges*e=(back_edges*)((word)pred&~FLAG_MANY); +word n_edges; +word total; +int local=0; +if (((word)pred&FLAG_MANY)!=0){ +n_edges=e->n_edges; +} else if (pred!=NULL&&((word)pred&1)==0){ +n_edges=1; +local=-1; +} else { +n_edges=0; +} +for (total=0;total < n_edges;++total){ +if (local==MAX_IN){ +e=e->cont; +local=0; +} +if (local>=0) +pred=e->edges[local++]; +if (!GC_is_marked(pred)&&GC_HAS_DEBUG_INFO(pred)){ +word this_height=backwards_height(pred); +if (this_height > p_height){ +p_height=this_height; +p_deepest_obj=pred; +} +} +} +} +if (p_height > 0){ +if (be==0){ +ensure_struct(p); +back_ptr=GET_OH_BG_PTR(p); +be=(back_edges*)((word)back_ptr&~FLAG_MANY); +} +be->flags|=RETAIN; +be->height=p_height; +be->height_gc_no=(unsigned short)GC_gc_no; +} +if (p_height > GC_max_height){ +GC_max_height=p_height; +GC_deepest_obj=p_deepest_obj; +} +} +} +STATIC word GC_max_max_height=0; +GC_INNER void GC_traverse_back_graph(void) +{ +GC_ASSERT(I_HOLD_LOCK()); +GC_max_height=0; +GC_apply_to_each_object(update_max_height); +if (0!=GC_deepest_obj) +GC_set_mark_bit(GC_deepest_obj); +} +void GC_print_back_graph_stats(void) +{ +GC_ASSERT(I_HOLD_LOCK()); +GC_printf("Maximum backwards height of reachable objects at GC %lu is %lu\n", +(unsigned long)GC_gc_no,(unsigned long)GC_max_height); +if (GC_max_height > GC_max_max_height){ +ptr_t obj=GC_deepest_obj; +GC_max_max_height=GC_max_height; +UNLOCK(); +GC_err_printf( +"The following unreachable object is last in a longest chain " +"of unreachable objects:\n"); +GC_print_heap_obj(obj); +LOCK(); +} +GC_COND_LOG_PRINTF("Needed max total of %d back-edge structs\n", +GC_n_back_edge_structs); +GC_apply_to_each_object(reset_back_edge); +GC_deepest_obj=0; +} +#endif +STATIC word*GC_old_normal_bl=NULL; +STATIC word*GC_incomplete_normal_bl=NULL; +STATIC word*GC_old_stack_bl=NULL; +STATIC word*GC_incomplete_stack_bl=NULL; +STATIC word GC_total_stack_black_listed=0; +GC_INNER word GC_black_list_spacing=MINHINCR*HBLKSIZE; +STATIC void GC_clear_bl(word*); +GC_INNER void GC_default_print_heap_obj_proc(ptr_t p) +{ +ptr_t base=(ptr_t)GC_base(p); +int kind=HDR(base)->hb_obj_kind; +GC_err_printf("object at %p of appr. %lu bytes (%s)\n", +(void*)base,(unsigned long)GC_size(base), +kind==PTRFREE?"atomic": +IS_UNCOLLECTABLE(kind)?"uncollectable":"composite"); +} +GC_INNER void (*GC_print_heap_obj)(ptr_t p)=GC_default_print_heap_obj_proc; +#ifdef PRINT_BLACK_LIST +STATIC void GC_print_blacklisted_ptr(word p,ptr_t source, +const char*kind_str) +{ +ptr_t base=(ptr_t)GC_base(source); +if (0==base){ +GC_err_printf("Black listing (%s)%p referenced from %p in %s\n", +kind_str,(void*)p,(void*)source, +NULL!=source?"root set":"register"); +} else { +GC_err_printf("Black listing (%s)%p referenced from %p in" +" object at %p of appr. %lu bytes\n", +kind_str,(void*)p,(void*)source, +(void*)base,(unsigned long)GC_size(base)); +} +} +#endif +GC_INNER void GC_bl_init_no_interiors(void) +{ +if (GC_incomplete_normal_bl==0){ +GC_old_normal_bl=(word*)GC_scratch_alloc(sizeof(page_hash_table)); +GC_incomplete_normal_bl=(word*)GC_scratch_alloc( +sizeof(page_hash_table)); +if (GC_old_normal_bl==0||GC_incomplete_normal_bl==0){ +GC_err_printf("Insufficient memory for black list\n"); +EXIT(); +} +GC_clear_bl(GC_old_normal_bl); +GC_clear_bl(GC_incomplete_normal_bl); +} +} +GC_INNER void GC_bl_init(void) +{ +if (!GC_all_interior_pointers){ +GC_bl_init_no_interiors(); +} +GC_ASSERT(NULL==GC_old_stack_bl&&NULL==GC_incomplete_stack_bl); +GC_old_stack_bl=(word*)GC_scratch_alloc(sizeof(page_hash_table)); +GC_incomplete_stack_bl=(word*)GC_scratch_alloc(sizeof(page_hash_table)); +if (GC_old_stack_bl==0||GC_incomplete_stack_bl==0){ +GC_err_printf("Insufficient memory for black list\n"); +EXIT(); +} +GC_clear_bl(GC_old_stack_bl); +GC_clear_bl(GC_incomplete_stack_bl); +} +STATIC void GC_clear_bl(word*doomed) +{ +BZERO(doomed,sizeof(page_hash_table)); +} +STATIC void GC_copy_bl(word*old,word*dest) +{ +BCOPY(old,dest,sizeof(page_hash_table)); +} +static word total_stack_black_listed(void); +GC_INNER void GC_promote_black_lists(void) +{ +word*very_old_normal_bl=GC_old_normal_bl; +word*very_old_stack_bl=GC_old_stack_bl; +GC_old_normal_bl=GC_incomplete_normal_bl; +GC_old_stack_bl=GC_incomplete_stack_bl; +if (!GC_all_interior_pointers){ +GC_clear_bl(very_old_normal_bl); +} +GC_clear_bl(very_old_stack_bl); +GC_incomplete_normal_bl=very_old_normal_bl; +GC_incomplete_stack_bl=very_old_stack_bl; +GC_total_stack_black_listed=total_stack_black_listed(); +GC_VERBOSE_LOG_PRINTF( +"%lu bytes in heap blacklisted for interior pointers\n", +(unsigned long)GC_total_stack_black_listed); +if (GC_total_stack_black_listed!=0){ +GC_black_list_spacing= +HBLKSIZE*(GC_heapsize/GC_total_stack_black_listed); +} +if (GC_black_list_spacing < 3*HBLKSIZE){ +GC_black_list_spacing=3*HBLKSIZE; +} +if (GC_black_list_spacing > MAXHINCR*HBLKSIZE){ +GC_black_list_spacing=MAXHINCR*HBLKSIZE; +} +} +GC_INNER void GC_unpromote_black_lists(void) +{ +if (!GC_all_interior_pointers){ +GC_copy_bl(GC_old_normal_bl,GC_incomplete_normal_bl); +} +GC_copy_bl(GC_old_stack_bl,GC_incomplete_stack_bl); +} +#if defined(PARALLEL_MARK)&&defined(THREAD_SANITIZER) +#define backlist_set_pht_entry_from_index(db,index)set_pht_entry_from_index_concurrent(db,index) +#else +#define backlist_set_pht_entry_from_index(bl,index)set_pht_entry_from_index(bl,index) +#endif +#ifdef PRINT_BLACK_LIST +GC_INNER void GC_add_to_black_list_normal(word p,ptr_t source) +#else +GC_INNER void GC_add_to_black_list_normal(word p) +#endif +{ +if (GC_modws_valid_offsets[p&(sizeof(word)-1)]){ +word index=PHT_HASH((word)p); +if (HDR(p)==0||get_pht_entry_from_index(GC_old_normal_bl,index)){ +#ifdef PRINT_BLACK_LIST +if (!get_pht_entry_from_index(GC_incomplete_normal_bl,index)){ +GC_print_blacklisted_ptr(p,source,"normal"); +} +#endif +backlist_set_pht_entry_from_index(GC_incomplete_normal_bl,index); +} +} +} +#ifdef PRINT_BLACK_LIST +GC_INNER void GC_add_to_black_list_stack(word p,ptr_t source) +#else +GC_INNER void GC_add_to_black_list_stack(word p) +#endif +{ +word index=PHT_HASH((word)p); +if (HDR(p)==0||get_pht_entry_from_index(GC_old_stack_bl,index)){ +#ifdef PRINT_BLACK_LIST +if (!get_pht_entry_from_index(GC_incomplete_stack_bl,index)){ +GC_print_blacklisted_ptr(p,source,"stack"); +} +#endif +backlist_set_pht_entry_from_index(GC_incomplete_stack_bl,index); +} +} +struct hblk*GC_is_black_listed(struct hblk*h,word len) +{ +word index=PHT_HASH((word)h); +word i; +word nblocks; +if (!GC_all_interior_pointers +&&(get_pht_entry_from_index(GC_old_normal_bl,index) +||get_pht_entry_from_index(GC_incomplete_normal_bl,index))){ +return (h+1); +} +nblocks=divHBLKSZ(len); +for (i=0;;){ +if (GC_old_stack_bl[divWORDSZ(index)]==0 +&&GC_incomplete_stack_bl[divWORDSZ(index)]==0){ +i+=WORDSZ - modWORDSZ(index); +} else { +if (get_pht_entry_from_index(GC_old_stack_bl,index) +||get_pht_entry_from_index(GC_incomplete_stack_bl,index)){ +return(h+i+1); +} +i++; +} +if (i>=nblocks)break; +index=PHT_HASH((word)(h+i)); +} +return(0); +} +STATIC word GC_number_stack_black_listed(struct hblk*start, +struct hblk*endp1) +{ +struct hblk*h; +word result=0; +for (h=start;(word)h < (word)endp1;h++){ +word index=PHT_HASH((word)h); +if (get_pht_entry_from_index(GC_old_stack_bl,index))result++; +} +return(result); +} +static word total_stack_black_listed(void) +{ +unsigned i; +word total=0; +for (i=0;i < GC_n_heap_sects;i++){ +struct hblk*start=(struct hblk*)GC_heap_sects[i].hs_start; +struct hblk*endp1=start+divHBLKSZ(GC_heap_sects[i].hs_bytes); +total+=GC_number_stack_black_listed(start,endp1); +} +return(total*HBLKSIZE); +} +#ifdef CHECKSUMS +#define NSUMS 10000 +#define OFFSET 0x10000 +typedef struct { +GC_bool new_valid; +word old_sum; +word new_sum; +struct hblk*block; +} page_entry; +page_entry GC_sums[NSUMS]; +STATIC word GC_faulted[NSUMS]={ 0 }; +STATIC size_t GC_n_faulted=0; +#if defined(MPROTECT_VDB)&&!defined(DARWIN) +void GC_record_fault(struct hblk*h) +{ +word page=(word)h&~(GC_page_size - 1); +GC_ASSERT(GC_page_size!=0); +if (GC_n_faulted>=NSUMS)ABORT("write fault log overflowed"); +GC_faulted[GC_n_faulted++]=page; +} +#endif +STATIC GC_bool GC_was_faulted(struct hblk*h) +{ +size_t i; +word page=(word)h&~(GC_page_size - 1); +for (i=0;i < GC_n_faulted;++i){ +if (GC_faulted[i]==page)return TRUE; +} +return FALSE; +} +STATIC word GC_checksum(struct hblk*h) +{ +word*p=(word*)h; +word*lim=(word*)(h+1); +word result=0; +while ((word)p < (word)lim){ +result+=*p++; +} +return(result|0x80000000); +} +int GC_n_dirty_errors=0; +int GC_n_faulted_dirty_errors=0; +unsigned long GC_n_clean=0; +unsigned long GC_n_dirty=0; +STATIC void GC_update_check_page(struct hblk*h,int index) +{ +page_entry*pe=GC_sums+index; +hdr*hhdr=HDR(h); +struct hblk*b; +if (pe->block!=0&&pe->block!=h+OFFSET)ABORT("goofed"); +pe->old_sum=pe->new_sum; +pe->new_sum=GC_checksum(h); +#if!defined(MSWIN32)&&!defined(MSWINCE) +if (pe->new_sum!=0x80000000&&!GC_page_was_ever_dirty(h)){ +GC_err_printf("GC_page_was_ever_dirty(%p)is wrong\n",(void*)h); +} +#endif +if (GC_page_was_dirty(h)){ +GC_n_dirty++; +} else { +GC_n_clean++; +} +b=h; +while (IS_FORWARDING_ADDR_OR_NIL(hhdr)&&hhdr!=0){ +b-=(word)hhdr; +hhdr=HDR(b); +} +if (pe->new_valid +&&hhdr!=0&&hhdr->hb_descr!=0 +&&pe->old_sum!=pe->new_sum){ +if (!GC_page_was_dirty(h)||!GC_page_was_ever_dirty(h)){ +GC_bool was_faulted=GC_was_faulted(h); +GC_n_dirty_errors++; +if (was_faulted)GC_n_faulted_dirty_errors++; +} +} +pe->new_valid=TRUE; +pe->block=h+OFFSET; +} +word GC_bytes_in_used_blocks=0; +STATIC void GC_add_block(struct hblk*h,word dummy GC_ATTR_UNUSED) +{ +hdr*hhdr=HDR(h); +GC_bytes_in_used_blocks+=(hhdr->hb_sz+HBLKSIZE-1)&~(HBLKSIZE-1); +} +STATIC void GC_check_blocks(void) +{ +word bytes_in_free_blocks=GC_large_free_bytes; +GC_bytes_in_used_blocks=0; +GC_apply_to_all_blocks(GC_add_block,(word)0); +GC_COND_LOG_PRINTF("GC_bytes_in_used_blocks=%lu," +" bytes_in_free_blocks=%lu,heapsize=%lu\n", +(unsigned long)GC_bytes_in_used_blocks, +(unsigned long)bytes_in_free_blocks, +(unsigned long)GC_heapsize); +if (GC_bytes_in_used_blocks+bytes_in_free_blocks!=GC_heapsize){ +GC_err_printf("LOST SOME BLOCKS!!\n"); +} +} +void GC_check_dirty(void) +{ +int index; +unsigned i; +struct hblk*h; +ptr_t start; +GC_check_blocks(); +GC_n_dirty_errors=0; +GC_n_faulted_dirty_errors=0; +GC_n_clean=0; +GC_n_dirty=0; +index=0; +for (i=0;i < GC_n_heap_sects;i++){ +start=GC_heap_sects[i].hs_start; +for (h=(struct hblk*)start; +(word)h < (word)(start+GC_heap_sects[i].hs_bytes);h++){ +GC_update_check_page(h,index); +index++; +if (index>=NSUMS)goto out; +} +} +out: +GC_COND_LOG_PRINTF("Checked %lu clean and %lu dirty pages\n", +GC_n_clean,GC_n_dirty); +if (GC_n_dirty_errors > 0){ +GC_err_printf("Found %d dirty bit errors (%d were faulted)\n", +GC_n_dirty_errors,GC_n_faulted_dirty_errors); +} +for (i=0;i < GC_n_faulted;++i){ +GC_faulted[i]=0; +} +GC_n_faulted=0; +} +#endif +#ifndef GC_PMARK_H +#define GC_PMARK_H +#if defined(HAVE_CONFIG_H)&&!defined(GC_PRIVATE_H) +#endif +#ifndef GC_BUILD +#define GC_BUILD +#endif +#if (defined(__linux__)||defined(__GLIBC__)||defined(__GNU__))&&!defined(_GNU_SOURCE)&&defined(GC_PTHREADS)&&!defined(GC_NO_PTHREAD_SIGMASK) +#define _GNU_SOURCE 1 +#endif +#if defined(KEEP_BACK_PTRS)||defined(PRINT_BLACK_LIST) +#endif +EXTERN_C_BEGIN +#ifndef MARK_DESCR_OFFSET +#define MARK_DESCR_OFFSET sizeof(word) +#endif +#define BITMAP_BITS (WORDSZ - GC_DS_TAG_BITS) +#define PROC(descr)(GC_mark_procs[((descr)>>GC_DS_TAG_BITS)&(GC_MAX_MARK_PROCS-1)]) +#define ENV(descr)((descr)>>(GC_DS_TAG_BITS+GC_LOG_MAX_MARK_PROCS)) +#define MAX_ENV (((word)1<<(WORDSZ - GC_DS_TAG_BITS - GC_LOG_MAX_MARK_PROCS))- 1) +GC_EXTERN unsigned GC_n_mark_procs; +#define GC_MARK_STACK_DISCARDS (INITIAL_MARK_STACK_SIZE/8) +#ifdef PARALLEL_MARK +#endif +GC_INNER mse*GC_signal_mark_stack_overflow(mse*msp); +GC_INLINE mse*GC_push_obj(ptr_t obj,hdr*hhdr,mse*mark_stack_top, +mse*mark_stack_limit) +{ +word descr=hhdr->hb_descr; +GC_ASSERT(!HBLK_IS_FREE(hhdr)); +if (descr!=0){ +mark_stack_top++; +if ((word)mark_stack_top>=(word)mark_stack_limit){ +mark_stack_top=GC_signal_mark_stack_overflow(mark_stack_top); +} +mark_stack_top->mse_start=obj; +mark_stack_top->mse_descr.w=descr; +} +return mark_stack_top; +} +#define PUSH_CONTENTS(current,mark_stack_top,mark_stack_limit,source)do { hdr*my_hhdr;HC_GET_HDR(current,my_hhdr,source);mark_stack_top=GC_push_contents_hdr(current,mark_stack_top,mark_stack_limit,source,my_hhdr,TRUE);} while (0) +#ifdef USE_MARK_BYTES +#if defined(PARALLEL_MARK)&&defined(AO_HAVE_char_store)&&!defined(BASE_ATOMIC_OPS_EMULATED) +#define SET_MARK_BIT_EXIT_IF_SET(hhdr,bit_no){ volatile unsigned char*mark_byte_addr=(unsigned char*)(hhdr)->hb_marks+(bit_no);if (AO_char_load(mark_byte_addr)!=0)break;AO_char_store(mark_byte_addr,1);} +#else +#define SET_MARK_BIT_EXIT_IF_SET(hhdr,bit_no){ char*mark_byte_addr=(char*)(hhdr)->hb_marks+(bit_no);if (*mark_byte_addr!=0)break;*mark_byte_addr=1;} +#endif +#else +#ifdef PARALLEL_MARK +#ifdef THREAD_SANITIZER +#define OR_WORD_EXIT_IF_SET(addr,bits){ if (!((word)AO_load((volatile AO_t*)(addr))&(bits))){ AO_or((volatile AO_t*)(addr),(AO_t)(bits));} else { break;} } +#else +#define OR_WORD_EXIT_IF_SET(addr,bits){ if (!(*(addr)&(bits))){ AO_or((volatile AO_t*)(addr),(AO_t)(bits));} else { break;} } +#endif +#else +#define OR_WORD_EXIT_IF_SET(addr,bits){ word old=*(addr);word my_bits=(bits);if ((old&my_bits)!=0)break;*(addr)=old|my_bits;} +#endif +#define SET_MARK_BIT_EXIT_IF_SET(hhdr,bit_no){ word*mark_word_addr=(hhdr)->hb_marks+divWORDSZ(bit_no);OR_WORD_EXIT_IF_SET(mark_word_addr,(word)1<hb_n_marks,AO_load(&hhdr->hb_n_marks)+1) +#else +#define INCR_MARKS(hhdr)(void)(++hhdr->hb_n_marks) +#endif +#ifdef ENABLE_TRACE +#define TRACE(source,cmd)if (GC_trace_addr!=0&&(ptr_t)(source)==GC_trace_addr)cmd +#define TRACE_TARGET(target,cmd)if (GC_trace_addr!=0&&(target)==*(ptr_t*)GC_trace_addr)cmd +#else +#define TRACE(source,cmd) +#define TRACE_TARGET(source,cmd) +#endif +#if defined(I386)&&defined(__GNUC__)&&!defined(NACL) +#define LONG_MULT(hprod,lprod,x,y)do { __asm__ __volatile__("mull %2":"=a"(lprod),"=d"(hprod):"g"(y),"0"(x));} while (0) +#else +#if defined(__int64)&&!defined(__GNUC__)&&!defined(CPPCHECK) +#define ULONG_MULT_T unsigned __int64 +#else +#define ULONG_MULT_T unsigned long long +#endif +#define LONG_MULT(hprod,lprod,x,y)do { ULONG_MULT_T prod=(ULONG_MULT_T)(x)*(ULONG_MULT_T)(y);GC_STATIC_ASSERT(sizeof(x)+sizeof(y)<=sizeof(prod));hprod=prod>>32;lprod=(unsigned32)prod;} while (0) +#endif +GC_INLINE mse*GC_push_contents_hdr(ptr_t current,mse*mark_stack_top, +mse*mark_stack_limit,ptr_t source, +hdr*hhdr,GC_bool do_offset_check) +{ +do { +size_t displ=HBLKDISPL(current); +ptr_t base=current; +#ifdef MARK_BIT_PER_GRANULE +size_t gran_displ=BYTES_TO_GRANULES(displ); +size_t gran_offset=hhdr->hb_map[gran_displ]; +size_t byte_offset=displ&(GRANULE_BYTES - 1); +if (EXPECT((gran_offset|byte_offset)!=0,FALSE)) +#else +unsigned32 gran_displ; +unsigned32 inv_sz=hhdr->hb_inv_sz; +#endif +{ +#ifdef MARK_BIT_PER_GRANULE +if ((hhdr->hb_flags&LARGE_BLOCK)!=0) +#else +if (EXPECT(inv_sz==LARGE_INV_SZ,FALSE)) +#endif +{ +size_t obj_displ; +base=(ptr_t)hhdr->hb_block; +obj_displ=current - base; +if (obj_displ!=displ){ +GC_ASSERT(obj_displ < hhdr->hb_sz); +} else if (do_offset_check&&!GC_valid_offsets[obj_displ]){ +GC_ADD_TO_BLACK_LIST_NORMAL(current,source); +break; +} +GC_ASSERT(hhdr->hb_sz > HBLKSIZE +||hhdr->hb_block==HBLKPTR(current)); +GC_ASSERT((word)hhdr->hb_block<=(word)current); +gran_displ=0; +} else { +#ifdef MARK_BIT_PER_GRANULE +size_t obj_displ=GRANULES_TO_BYTES(gran_offset)+byte_offset; +#else +unsigned32 low_prod; +LONG_MULT(gran_displ,low_prod,(unsigned32)displ,inv_sz); +if ((low_prod>>16)!=0) +#endif +{ +#if defined(MARK_BIT_PER_OBJ)&&!defined(MARK_BIT_PER_GRANULE) +size_t obj_displ; +GC_STATIC_ASSERT(HBLKSIZE<=(1<<15)); +obj_displ=(((low_prod>>16)+1)*(size_t)hhdr->hb_sz)>>16; +#endif +if (do_offset_check&&!GC_valid_offsets[obj_displ]){ +GC_ADD_TO_BLACK_LIST_NORMAL(current,source); +break; +} +#ifdef MARK_BIT_PER_GRANULE +gran_displ-=gran_offset; +#endif +base-=obj_displ; +} +} +} +#ifdef MARK_BIT_PER_GRANULE +GC_ASSERT(hhdr==GC_find_header(base)); +GC_ASSERT(gran_displ % BYTES_TO_GRANULES(hhdr->hb_sz)==0); +#else +GC_ASSERT(gran_displ<=HBLK_OBJS(hhdr->hb_sz)); +#endif +TRACE(source,GC_log_printf("GC #%u:passed validity tests\n", +(unsigned)GC_gc_no)); +SET_MARK_BIT_EXIT_IF_SET(hhdr,gran_displ); +TRACE(source,GC_log_printf("GC #%u:previously unmarked\n", +(unsigned)GC_gc_no)); +TRACE_TARGET(base,GC_log_printf("GC #%u:marking %p from %p instead\n", +(unsigned)GC_gc_no,(void*)base, +(void*)source)); +INCR_MARKS(hhdr); +GC_STORE_BACK_PTR(source,base); +mark_stack_top=GC_push_obj(base,hhdr,mark_stack_top, +mark_stack_limit); +} while (0); +return mark_stack_top; +} +#if defined(PRINT_BLACK_LIST)||defined(KEEP_BACK_PTRS) +#define PUSH_ONE_CHECKED_STACK(p,source)GC_mark_and_push_stack((ptr_t)(p),(ptr_t)(source)) +#else +#define PUSH_ONE_CHECKED_STACK(p,source)GC_mark_and_push_stack((ptr_t)(p)) +#endif +#ifdef NEED_FIXUP_POINTER +#define GC_PUSH_ONE_STACK(p,source)do { if ((word)(p)>=(word)GC_least_plausible_heap_addr&&(word)(p)< (word)GC_greatest_plausible_heap_addr){ PUSH_ONE_CHECKED_STACK(p,source);} FIXUP_POINTER(p);if ((word)(p)>=(word)GC_least_plausible_heap_addr&&(word)(p)< (word)GC_greatest_plausible_heap_addr){ PUSH_ONE_CHECKED_STACK(p,source);} } while (0) +#else +#define GC_PUSH_ONE_STACK(p,source)do { if ((word)(p)>=(word)GC_least_plausible_heap_addr&&(word)(p)< (word)GC_greatest_plausible_heap_addr){ PUSH_ONE_CHECKED_STACK(p,source);} } while (0) +#endif +#define GC_PUSH_ONE_HEAP(p,source,mark_stack_top)do { FIXUP_POINTER(p);if ((word)(p)>=(word)GC_least_plausible_heap_addr&&(word)(p)< (word)GC_greatest_plausible_heap_addr)mark_stack_top=GC_mark_and_push((void*)(p),mark_stack_top,GC_mark_stack_limit,(void**)(source));} while (0) +GC_INNER mse*GC_mark_from(mse*top,mse*bottom,mse*limit); +#define MARK_FROM_MARK_STACK()GC_mark_stack_top=GC_mark_from(GC_mark_stack_top,GC_mark_stack,GC_mark_stack+GC_mark_stack_size); +#define GC_mark_stack_empty()((word)GC_mark_stack_top < (word)GC_mark_stack) +#define GC_MARK_FO(real_ptr,mark_proc)do { (*(mark_proc))(real_ptr);while (!GC_mark_stack_empty())MARK_FROM_MARK_STACK();if (GC_mark_state!=MS_NONE){ GC_set_mark_bit(real_ptr);while (!GC_mark_some((ptr_t)0)){ } } } while (0) +#define MS_NONE 0 +#define MS_PUSH_RESCUERS 1 +#define MS_PUSH_UNCOLLECTABLE 2 +#define MS_ROOTS_PUSHED 3 +#define MS_PARTIALLY_INVALID 4 +#define MS_INVALID 5 +EXTERN_C_END +#endif +#ifdef GC_GCJ_SUPPORT +#ifndef GC_GCJ_H +#define GC_GCJ_H +#ifndef GC_H +#endif +#ifdef __cplusplus +extern "C" { +#endif +GC_API void GC_CALL GC_init_gcj_malloc(int, +void*); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_gcj_malloc(size_t, +void*); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_gcj_malloc(size_t, +void*, +GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_gcj_malloc_ignore_off_page(size_t, +void*); +GC_API int GC_gcj_kind; +GC_API int GC_gcj_debug_kind; +#ifdef GC_DEBUG +#define GC_GCJ_MALLOC(s,d)GC_debug_gcj_malloc(s,d,GC_EXTRAS) +#define GC_GCJ_MALLOC_IGNORE_OFF_PAGE(s,d)GC_debug_gcj_malloc(s,d,GC_EXTRAS) +#else +#define GC_GCJ_MALLOC(s,d)GC_gcj_malloc(s,d) +#define GC_GCJ_MALLOC_IGNORE_OFF_PAGE(s,d)GC_gcj_malloc_ignore_off_page(s,d) +#endif +#ifdef __cplusplus +} +#endif +#endif +int GC_gcj_kind=0; +int GC_gcj_debug_kind=0; +STATIC struct GC_ms_entry*GC_gcj_fake_mark_proc(word*addr GC_ATTR_UNUSED, +struct GC_ms_entry*mark_stack_ptr, +struct GC_ms_entry*mark_stack_limit GC_ATTR_UNUSED, +word env GC_ATTR_UNUSED) +{ +ABORT_RET("No client gcj mark proc is specified"); +return mark_stack_ptr; +} +GC_API void GC_CALL GC_init_gcj_malloc(int mp_index, +void*mp) +{ +#ifndef GC_IGNORE_GCJ_INFO +GC_bool ignore_gcj_info; +#endif +DCL_LOCK_STATE; +if (mp==0) +mp=(void*)(word)GC_gcj_fake_mark_proc; +GC_init(); +LOCK(); +if (GC_gcjobjfreelist!=NULL){ +UNLOCK(); +return; +} +#ifdef GC_IGNORE_GCJ_INFO +#define ignore_gcj_info TRUE +#else +ignore_gcj_info=(0!=GETENV("GC_IGNORE_GCJ_INFO")); +#endif +if (ignore_gcj_info){ +GC_COND_LOG_PRINTF("Gcj-style type information is disabled!\n"); +} +GC_ASSERT(GC_mark_procs[mp_index]==(GC_mark_proc)0); +GC_mark_procs[mp_index]=(GC_mark_proc)(word)mp; +if ((unsigned)mp_index>=GC_n_mark_procs) +ABORT("GC_init_gcj_malloc:bad index"); +GC_gcjobjfreelist=(ptr_t*)GC_new_free_list_inner(); +if (ignore_gcj_info){ +GC_gcj_kind=GC_new_kind_inner((void**)GC_gcjobjfreelist, +GC_DS_LENGTH, +TRUE,TRUE); +GC_gcj_debug_kind=GC_gcj_kind; +} else { +GC_gcj_kind=GC_new_kind_inner( +(void**)GC_gcjobjfreelist, +(((word)(-(signed_word)MARK_DESCR_OFFSET +- GC_INDIR_PER_OBJ_BIAS)) +|GC_DS_PER_OBJECT), +FALSE,TRUE); +GC_gcj_debug_kind=GC_new_kind_inner(GC_new_free_list_inner(), +GC_MAKE_PROC(mp_index, +1), +FALSE,TRUE); +} +UNLOCK(); +#undef ignore_gcj_info +} +#define GENERAL_MALLOC_INNER(lb,k)GC_clear_stack(GC_generic_malloc_inner(lb,k)) +#define GENERAL_MALLOC_INNER_IOP(lb,k)GC_clear_stack(GC_generic_malloc_inner_ignore_off_page(lb,k)) +static void maybe_finalize(void) +{ +static word last_finalized_no=0; +DCL_LOCK_STATE; +if (GC_gc_no==last_finalized_no|| +!EXPECT(GC_is_initialized,TRUE))return; +UNLOCK(); +GC_INVOKE_FINALIZERS(); +LOCK(); +last_finalized_no=GC_gc_no; +} +#ifdef THREAD_LOCAL_ALLOC +GC_INNER void*GC_core_gcj_malloc(size_t lb, +void*ptr_to_struct_containing_descr) +#else +GC_API GC_ATTR_MALLOC void*GC_CALL GC_gcj_malloc(size_t lb, +void*ptr_to_struct_containing_descr) +#endif +{ +ptr_t op; +DCL_LOCK_STATE; +GC_DBG_COLLECT_AT_MALLOC(lb); +if(SMALL_OBJ(lb)){ +word lg; +LOCK(); +lg=GC_size_map[lb]; +op=GC_gcjobjfreelist[lg]; +if(EXPECT(0==op,FALSE)){ +maybe_finalize(); +op=(ptr_t)GENERAL_MALLOC_INNER((word)lb,GC_gcj_kind); +if (0==op){ +GC_oom_func oom_fn=GC_oom_fn; +UNLOCK(); +return((*oom_fn)(lb)); +} +} else { +GC_gcjobjfreelist[lg]=(ptr_t)obj_link(op); +GC_bytes_allocd+=GRANULES_TO_BYTES((word)lg); +} +GC_ASSERT(((void**)op)[1]==0); +} else { +LOCK(); +maybe_finalize(); +op=(ptr_t)GENERAL_MALLOC_INNER((word)lb,GC_gcj_kind); +if (0==op){ +GC_oom_func oom_fn=GC_oom_fn; +UNLOCK(); +return((*oom_fn)(lb)); +} +} +*(void**)op=ptr_to_struct_containing_descr; +UNLOCK(); +GC_dirty(op); +REACHABLE_AFTER_DIRTY(ptr_to_struct_containing_descr); +return (void*)op; +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_debug_gcj_malloc(size_t lb, +void*ptr_to_struct_containing_descr,GC_EXTRA_PARAMS) +{ +void*result; +DCL_LOCK_STATE; +LOCK(); +maybe_finalize(); +result=GC_generic_malloc_inner(SIZET_SAT_ADD(lb,DEBUG_BYTES), +GC_gcj_debug_kind); +if (result==0){ +GC_oom_func oom_fn=GC_oom_fn; +UNLOCK(); +GC_err_printf("GC_debug_gcj_malloc(%lu,%p)returning NULL (%s:%d)\n", +(unsigned long)lb,ptr_to_struct_containing_descr,s,i); +return((*oom_fn)(lb)); +} +*((void**)((ptr_t)result+sizeof(oh)))=ptr_to_struct_containing_descr; +if (!GC_debugging_started){ +GC_start_debugging_inner(); +} +ADD_CALL_CHAIN(result,ra); +result=GC_store_debug_info_inner(result,(word)lb,s,i); +UNLOCK(); +GC_dirty(result); +REACHABLE_AFTER_DIRTY(ptr_to_struct_containing_descr); +return result; +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_gcj_malloc_ignore_off_page(size_t lb, +void*ptr_to_struct_containing_descr) +{ +ptr_t op; +DCL_LOCK_STATE; +GC_DBG_COLLECT_AT_MALLOC(lb); +if(SMALL_OBJ(lb)){ +word lg; +LOCK(); +lg=GC_size_map[lb]; +op=GC_gcjobjfreelist[lg]; +if (EXPECT(0==op,FALSE)){ +maybe_finalize(); +op=(ptr_t)GENERAL_MALLOC_INNER_IOP(lb,GC_gcj_kind); +if (0==op){ +GC_oom_func oom_fn=GC_oom_fn; +UNLOCK(); +return((*oom_fn)(lb)); +} +} else { +GC_gcjobjfreelist[lg]=(ptr_t)obj_link(op); +GC_bytes_allocd+=GRANULES_TO_BYTES((word)lg); +} +} else { +LOCK(); +maybe_finalize(); +op=(ptr_t)GENERAL_MALLOC_INNER_IOP(lb,GC_gcj_kind); +if (0==op){ +GC_oom_func oom_fn=GC_oom_fn; +UNLOCK(); +return((*oom_fn)(lb)); +} +} +*(void**)op=ptr_to_struct_containing_descr; +UNLOCK(); +GC_dirty(op); +REACHABLE_AFTER_DIRTY(ptr_to_struct_containing_descr); +return (void*)op; +} +#endif +GC_INNER hdr*GC_find_header(ptr_t h) +{ +#ifdef HASH_TL +hdr*result; +GET_HDR(h,result); +return(result); +#else +return(HDR_INNER(h)); +#endif +} +GC_INNER hdr* +#ifdef PRINT_BLACK_LIST +GC_header_cache_miss(ptr_t p,hdr_cache_entry*hce,ptr_t source) +#else +GC_header_cache_miss(ptr_t p,hdr_cache_entry*hce) +#endif +{ +hdr*hhdr; +HC_MISS(); +GET_HDR(p,hhdr); +if (IS_FORWARDING_ADDR_OR_NIL(hhdr)){ +if (GC_all_interior_pointers){ +if (hhdr!=0){ +ptr_t current=p; +current=(ptr_t)HBLKPTR(current); +do { +current=current - HBLKSIZE*(word)hhdr; +hhdr=HDR(current); +} while(IS_FORWARDING_ADDR_OR_NIL(hhdr)); +if (hhdr->hb_flags&IGNORE_OFF_PAGE) +return 0; +if (HBLK_IS_FREE(hhdr) +||p - current>=(ptrdiff_t)(hhdr->hb_sz)){ +GC_ADD_TO_BLACK_LIST_NORMAL(p,source); +return 0; +} +} else { +GC_ADD_TO_BLACK_LIST_NORMAL(p,source); +} +GC_ASSERT(hhdr==0||!HBLK_IS_FREE(hhdr)); +return hhdr; +} else { +if (hhdr==0){ +GC_ADD_TO_BLACK_LIST_NORMAL(p,source); +} +return 0; +} +} else { +if (HBLK_IS_FREE(hhdr)){ +GC_ADD_TO_BLACK_LIST_NORMAL(p,source); +return 0; +} else { +hce->block_addr=(word)(p)>>LOG_HBLKSIZE; +hce->hce_hdr=hhdr; +return hhdr; +} +} +} +GC_INNER ptr_t GC_scratch_alloc(size_t bytes) +{ +ptr_t result=GC_scratch_free_ptr; +size_t bytes_to_get; +bytes=ROUNDUP_GRANULE_SIZE(bytes); +for (;;){ +GC_scratch_free_ptr+=bytes; +if ((word)GC_scratch_free_ptr<=(word)GC_scratch_end_ptr){ +return result; +} +GC_ASSERT(GC_page_size!=0); +if (bytes>=MINHINCR*HBLKSIZE){ +bytes_to_get=ROUNDUP_PAGESIZE_IF_MMAP(bytes); +result=(ptr_t)GET_MEM(bytes_to_get); +GC_add_to_our_memory(result,bytes_to_get); +GC_scratch_free_ptr-=bytes; +if (result!=NULL){ +GC_scratch_last_end_ptr=result+bytes; +} +return result; +} +bytes_to_get=ROUNDUP_PAGESIZE_IF_MMAP(MINHINCR*HBLKSIZE); +result=(ptr_t)GET_MEM(bytes_to_get); +GC_add_to_our_memory(result,bytes_to_get); +if (NULL==result){ +WARN("Out of memory - trying to allocate requested amount" +" (%" WARN_PRIdPTR " bytes)...\n",(word)bytes); +GC_scratch_free_ptr-=bytes; +bytes_to_get=ROUNDUP_PAGESIZE_IF_MMAP(bytes); +result=(ptr_t)GET_MEM(bytes_to_get); +GC_add_to_our_memory(result,bytes_to_get); +return result; +} +GC_scratch_free_ptr=result; +GC_scratch_end_ptr=GC_scratch_free_ptr+bytes_to_get; +GC_scratch_last_end_ptr=GC_scratch_end_ptr; +} +} +static hdr*alloc_hdr(void) +{ +hdr*result; +if (NULL==GC_hdr_free_list){ +result=(hdr*)GC_scratch_alloc(sizeof(hdr)); +} else { +result=GC_hdr_free_list; +GC_hdr_free_list=(hdr*)result->hb_next; +} +return(result); +} +GC_INLINE void free_hdr(hdr*hhdr) +{ +hhdr->hb_next=(struct hblk*)GC_hdr_free_list; +GC_hdr_free_list=hhdr; +} +#ifdef COUNT_HDR_CACHE_HITS +word GC_hdr_cache_hits=0; +word GC_hdr_cache_misses=0; +#endif +GC_INNER void GC_init_headers(void) +{ +unsigned i; +GC_ASSERT(NULL==GC_all_nils); +GC_all_nils=(bottom_index*)GC_scratch_alloc(sizeof(bottom_index)); +if (GC_all_nils==NULL){ +GC_err_printf("Insufficient memory for GC_all_nils\n"); +EXIT(); +} +BZERO(GC_all_nils,sizeof(bottom_index)); +for (i=0;i < TOP_SZ;i++){ +GC_top_index[i]=GC_all_nils; +} +} +static GC_bool get_index(word addr) +{ +word hi=(word)(addr)>>(LOG_BOTTOM_SZ+LOG_HBLKSIZE); +bottom_index*r; +bottom_index*p; +bottom_index**prev; +bottom_index*pi; +word i; +GC_ASSERT(I_HOLD_LOCK()); +#ifdef HASH_TL +i=TL_HASH(hi); +pi=p=GC_top_index[i]; +while(p!=GC_all_nils){ +if (p->key==hi)return(TRUE); +p=p->hash_link; +} +#else +if (GC_top_index[hi]!=GC_all_nils) +return TRUE; +i=hi; +#endif +r=(bottom_index*)GC_scratch_alloc(sizeof(bottom_index)); +if (EXPECT(NULL==r,FALSE)) +return FALSE; +BZERO(r,sizeof(bottom_index)); +r->key=hi; +#ifdef HASH_TL +r->hash_link=pi; +#endif +prev=&GC_all_bottom_indices; +pi=0; +while ((p=*prev)!=0&&p->key < hi){ +pi=p; +prev=&(p->asc_link); +} +r->desc_link=pi; +if (0==p){ +GC_all_bottom_indices_end=r; +} else { +p->desc_link=r; +} +r->asc_link=p; +*prev=r; +GC_top_index[i]=r; +return(TRUE); +} +GC_INNER struct hblkhdr*GC_install_header(struct hblk*h) +{ +hdr*result; +if (!get_index((word)h))return(0); +result=alloc_hdr(); +if (result){ +SET_HDR(h,result); +#ifdef USE_MUNMAP +result->hb_last_reclaimed=(unsigned short)GC_gc_no; +#endif +} +return(result); +} +GC_INNER GC_bool GC_install_counts(struct hblk*h,size_t sz) +{ +struct hblk*hbp; +for (hbp=h;(word)hbp < (word)h+sz;hbp+=BOTTOM_SZ){ +if (!get_index((word)hbp)) +return FALSE; +if ((word)hbp > GC_WORD_MAX - (word)BOTTOM_SZ*HBLKSIZE) +break; +} +if (!get_index((word)h+sz - 1)) +return FALSE; +for (hbp=h+1;(word)hbp < (word)h+sz;hbp+=1){ +word i=HBLK_PTR_DIFF(hbp,h); +SET_HDR(hbp,(hdr*)(i > MAX_JUMP?MAX_JUMP:i)); +} +return TRUE; +} +GC_INNER void GC_remove_header(struct hblk*h) +{ +hdr**ha; +GET_HDR_ADDR(h,ha); +free_hdr(*ha); +*ha=0; +} +GC_INNER void GC_remove_counts(struct hblk*h,size_t sz) +{ +struct hblk*hbp; +for (hbp=h+1;(word)hbp < (word)h+sz;hbp+=1){ +SET_HDR(hbp,0); +} +} +void GC_apply_to_all_blocks(void (*fn)(struct hblk*h,word client_data), +word client_data) +{ +signed_word j; +bottom_index*index_p; +for (index_p=GC_all_bottom_indices;index_p!=0; +index_p=index_p->asc_link){ +for (j=BOTTOM_SZ-1;j>=0;){ +if (!IS_FORWARDING_ADDR_OR_NIL(index_p->index[j])){ +if (!HBLK_IS_FREE(index_p->index[j])){ +(*fn)(((struct hblk*) +(((index_p->key<index[j]==0){ +j--; +} else { +j-=(signed_word)(index_p->index[j]); +} +} +} +} +GC_INNER struct hblk*GC_next_block(struct hblk*h,GC_bool allow_free) +{ +REGISTER bottom_index*bi; +REGISTER word j=((word)h>>LOG_HBLKSIZE)&(BOTTOM_SZ-1); +GC_ASSERT(I_HOLD_LOCK()); +GET_BI(h,bi); +if (bi==GC_all_nils){ +REGISTER word hi=(word)h>>(LOG_BOTTOM_SZ+LOG_HBLKSIZE); +bi=GC_all_bottom_indices; +while (bi!=0&&bi->key < hi)bi=bi->asc_link; +j=0; +} +while (bi!=0){ +while (j < BOTTOM_SZ){ +hdr*hhdr=bi->index[j]; +if (IS_FORWARDING_ADDR_OR_NIL(hhdr)){ +j++; +} else { +if (allow_free||!HBLK_IS_FREE(hhdr)){ +return ((struct hblk*) +(((bi->key<hb_sz); +} +} +} +j=0; +bi=bi->asc_link; +} +return(0); +} +GC_INNER struct hblk*GC_prev_block(struct hblk*h) +{ +bottom_index*bi; +signed_word j=((word)h>>LOG_HBLKSIZE)&(BOTTOM_SZ-1); +GC_ASSERT(I_HOLD_LOCK()); +GET_BI(h,bi); +if (bi==GC_all_nils){ +word hi=(word)h>>(LOG_BOTTOM_SZ+LOG_HBLKSIZE); +bi=GC_all_bottom_indices_end; +while (bi!=0&&bi->key > hi)bi=bi->desc_link; +j=BOTTOM_SZ - 1; +} +while(bi!=0){ +while (j>=0){ +hdr*hhdr=bi->index[j]; +if (0==hhdr){ +--j; +} else if (IS_FORWARDING_ADDR_OR_NIL(hhdr)){ +j-=(signed_word)hhdr; +} else { +return((struct hblk*) +(((bi->key<desc_link; +} +return(0); +} +#include +#ifndef SMALL_CONFIG +STATIC ptr_t GC_build_fl_clear2(struct hblk*h,ptr_t ofl) +{ +word*p=(word*)(h->hb_body); +word*lim=(word*)(h+1); +p[0]=(word)ofl; +p[1]=0; +p[2]=(word)p; +p[3]=0; +p+=4; +for (;(word)p < (word)lim;p+=4){ +p[0]=(word)(p-2); +p[1]=0; +p[2]=(word)p; +p[3]=0; +}; +return((ptr_t)(p-2)); +} +STATIC ptr_t GC_build_fl_clear4(struct hblk*h,ptr_t ofl) +{ +word*p=(word*)(h->hb_body); +word*lim=(word*)(h+1); +p[0]=(word)ofl; +p[1]=0; +p[2]=0; +p[3]=0; +p+=4; +for (;(word)p < (word)lim;p+=4){ +GC_PREFETCH_FOR_WRITE((ptr_t)(p+64)); +p[0]=(word)(p-4); +p[1]=0; +CLEAR_DOUBLE(p+2); +}; +return((ptr_t)(p-4)); +} +STATIC ptr_t GC_build_fl2(struct hblk*h,ptr_t ofl) +{ +word*p=(word*)(h->hb_body); +word*lim=(word*)(h+1); +p[0]=(word)ofl; +p[2]=(word)p; +p+=4; +for (;(word)p < (word)lim;p+=4){ +p[0]=(word)(p-2); +p[2]=(word)p; +}; +return((ptr_t)(p-2)); +} +STATIC ptr_t GC_build_fl4(struct hblk*h,ptr_t ofl) +{ +word*p=(word*)(h->hb_body); +word*lim=(word*)(h+1); +p[0]=(word)ofl; +p[4]=(word)p; +p+=8; +for (;(word)p < (word)lim;p+=8){ +GC_PREFETCH_FOR_WRITE((ptr_t)(p+64)); +p[0]=(word)(p-4); +p[4]=(word)p; +}; +return((ptr_t)(p-4)); +} +#endif +GC_INNER ptr_t GC_build_fl(struct hblk*h,size_t sz,GC_bool clear, +ptr_t list) +{ +word*p,*prev; +word*last_object; +GC_PREFETCH_FOR_WRITE((ptr_t)h); +GC_PREFETCH_FOR_WRITE((ptr_t)h+128); +GC_PREFETCH_FOR_WRITE((ptr_t)h+256); +GC_PREFETCH_FOR_WRITE((ptr_t)h+378); +#ifndef SMALL_CONFIG +switch (sz){ +case 2:if (clear){ +return GC_build_fl_clear2(h,list); +} else { +return GC_build_fl2(h,list); +} +case 4:if (clear){ +return GC_build_fl_clear4(h,list); +} else { +return GC_build_fl4(h,list); +} +default: +break; +} +#endif +if (clear)BZERO(h,HBLKSIZE); +p=(word*)(h->hb_body)+sz; +prev=(word*)(h->hb_body); +last_object=(word*)((char*)h+HBLKSIZE); +last_object-=sz; +while ((word)p<=(word)last_object){ +obj_link(p)=(ptr_t)prev; +prev=p; +p+=sz; +} +p-=sz; +*(ptr_t*)h=list; +return ((ptr_t)p); +} +GC_INNER void GC_new_hblk(size_t gran,int kind) +{ +struct hblk*h; +GC_bool clear=GC_obj_kinds[kind].ok_init; +GC_STATIC_ASSERT((sizeof (struct hblk))==HBLKSIZE); +if (GC_debugging_started)clear=TRUE; +h=GC_allochblk(GRANULES_TO_BYTES(gran),kind,0); +if (h==0)return; +if (IS_UNCOLLECTABLE(kind))GC_set_hdr_marks(HDR(h)); +GC_obj_kinds[kind].ok_freelist[gran]= +GC_build_fl(h,GRANULES_TO_WORDS(gran),clear, +(ptr_t)GC_obj_kinds[kind].ok_freelist[gran]); +} +GC_API void GC_CALL GC_register_displacement(size_t offset) +{ +DCL_LOCK_STATE; +LOCK(); +GC_register_displacement_inner(offset); +UNLOCK(); +} +GC_INNER void GC_register_displacement_inner(size_t offset) +{ +GC_ASSERT(I_HOLD_LOCK()); +if (offset>=VALID_OFFSET_SZ){ +ABORT("Bad argument to GC_register_displacement"); +} +if (!GC_valid_offsets[offset]){ +GC_valid_offsets[offset]=TRUE; +GC_modws_valid_offsets[offset % sizeof(word)]=TRUE; +} +} +#ifdef MARK_BIT_PER_GRANULE +GC_INNER GC_bool GC_add_map_entry(size_t granules) +{ +unsigned displ; +unsigned short*new_map; +if (granules > BYTES_TO_GRANULES(MAXOBJBYTES))granules=0; +if (GC_obj_map[granules]!=0){ +return(TRUE); +} +new_map=(unsigned short*)GC_scratch_alloc(MAP_LEN*sizeof(short)); +if (new_map==0)return(FALSE); +GC_COND_LOG_PRINTF( +"Adding block map for size of %u granules (%u bytes)\n", +(unsigned)granules,(unsigned)GRANULES_TO_BYTES(granules)); +if (granules==0){ +for (displ=0;displ < BYTES_TO_GRANULES(HBLKSIZE);displ++){ +new_map[displ]=1; +} +} else { +for (displ=0;displ < BYTES_TO_GRANULES(HBLKSIZE);displ++){ +new_map[displ]=(unsigned short)(displ % granules); +} +} +GC_obj_map[granules]=new_map; +return(TRUE); +} +#endif +GC_INNER void GC_initialize_offsets(void) +{ +unsigned i; +if (GC_all_interior_pointers){ +for (i=0;i < VALID_OFFSET_SZ;++i) +GC_valid_offsets[i]=TRUE; +} else { +BZERO(GC_valid_offsets,sizeof(GC_valid_offsets)); +for (i=0;i < sizeof(word);++i) +GC_modws_valid_offsets[i]=FALSE; +} +} +STATIC void GC_CALLBACK GC_default_same_obj_print_proc(void*p,void*q) +{ +ABORT_ARG2("GC_same_obj test failed", +":%p and %p are not in the same object",p,q); +} +void (GC_CALLBACK*GC_same_obj_print_proc)(void*,void*) +=GC_default_same_obj_print_proc; +GC_API void*GC_CALL GC_same_obj(void*p,void*q) +{ +struct hblk*h; +hdr*hhdr; +ptr_t base,limit; +word sz; +if (!EXPECT(GC_is_initialized,TRUE))GC_init(); +hhdr=HDR((word)p); +if (hhdr==0){ +if (divHBLKSZ((word)p)!=divHBLKSZ((word)q) +&&HDR((word)q)!=0){ +goto fail; +} +return(p); +} +if (IS_FORWARDING_ADDR_OR_NIL(hhdr)){ +h=HBLKPTR(p)- (word)hhdr; +hhdr=HDR(h); +while (IS_FORWARDING_ADDR_OR_NIL(hhdr)){ +h=FORWARDED_ADDR(h,hhdr); +hhdr=HDR(h); +} +limit=(ptr_t)h+hhdr->hb_sz; +if ((word)p>=(word)limit||(word)q>=(word)limit +||(word)q < (word)h){ +goto fail; +} +return(p); +} +sz=hhdr->hb_sz; +if (sz > MAXOBJBYTES){ +base=(ptr_t)HBLKPTR(p); +limit=base+sz; +if ((word)p>=(word)limit){ +goto fail; +} +} else { +size_t offset; +size_t pdispl=HBLKDISPL(p); +offset=pdispl % sz; +if (HBLKPTR(p)!=HBLKPTR(q))goto fail; +base=(ptr_t)p - offset; +limit=base+sz; +} +if ((word)q>=(word)limit||(word)q < (word)base){ +goto fail; +} +return(p); +fail: +(*GC_same_obj_print_proc)((ptr_t)p,(ptr_t)q); +return(p); +} +STATIC void GC_CALLBACK GC_default_is_valid_displacement_print_proc (void*p) +{ +ABORT_ARG1("GC_is_valid_displacement test failed",":%p not valid",p); +} +void (GC_CALLBACK*GC_is_valid_displacement_print_proc)(void*)= +GC_default_is_valid_displacement_print_proc; +GC_API void*GC_CALL GC_is_valid_displacement(void*p) +{ +hdr*hhdr; +word pdispl; +word offset; +struct hblk*h; +word sz; +if (!EXPECT(GC_is_initialized,TRUE))GC_init(); +hhdr=HDR((word)p); +if (hhdr==0)return(p); +h=HBLKPTR(p); +if (GC_all_interior_pointers){ +while (IS_FORWARDING_ADDR_OR_NIL(hhdr)){ +h=FORWARDED_ADDR(h,hhdr); +hhdr=HDR(h); +} +} else if (IS_FORWARDING_ADDR_OR_NIL(hhdr)){ +goto fail; +} +sz=hhdr->hb_sz; +pdispl=HBLKDISPL(p); +offset=pdispl % sz; +if ((sz > MAXOBJBYTES&&(word)p>=(word)h+sz) +||!GC_valid_offsets[offset] +||((word)p+(sz - offset)> (word)(h+1) +&&!IS_FORWARDING_ADDR_OR_NIL(HDR(h+1)))){ +goto fail; +} +return(p); +fail: +(*GC_is_valid_displacement_print_proc)((ptr_t)p); +return(p); +} +STATIC void GC_CALLBACK GC_default_is_visible_print_proc(void*p) +{ +ABORT_ARG1("GC_is_visible test failed",":%p not GC-visible",p); +} +void (GC_CALLBACK*GC_is_visible_print_proc)(void*p)= +GC_default_is_visible_print_proc; +#ifndef THREADS +STATIC GC_bool GC_on_stack(void*p) +{ +#ifdef STACK_GROWS_DOWN +if ((word)p>=(word)GC_approx_sp() +&&(word)p < (word)GC_stackbottom){ +return(TRUE); +} +#else +if ((word)p<=(word)GC_approx_sp() +&&(word)p > (word)GC_stackbottom){ +return(TRUE); +} +#endif +return(FALSE); +} +#endif +GC_API void*GC_CALL GC_is_visible(void*p) +{ +hdr*hhdr; +if ((word)p&(ALIGNMENT - 1))goto fail; +if (!EXPECT(GC_is_initialized,TRUE))GC_init(); +#ifdef THREADS +hhdr=HDR((word)p); +if (hhdr!=0&&GC_base(p)==0){ +goto fail; +} else { +return(p); +} +#else +if (GC_on_stack(p))return(p); +hhdr=HDR((word)p); +if (hhdr==0){ +if (GC_is_static_root(p))return(p); +#if defined(DYNAMIC_LOADING)||defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32)||defined(PCR) +GC_register_dynamic_libraries(); +if (GC_is_static_root(p)) +return(p); +#endif +goto fail; +} else { +word descr; +ptr_t base=(ptr_t)GC_base(p); +if (NULL==base)goto fail; +if (HBLKPTR(base)!=HBLKPTR(p)) +hhdr=HDR(base); +descr=hhdr->hb_descr; +retry: +switch(descr&GC_DS_TAGS){ +case GC_DS_LENGTH: +if ((word)p - (word)base > descr)goto fail; +break; +case GC_DS_BITMAP: +if ((word)p - (word)base>=WORDS_TO_BYTES(BITMAP_BITS) +||((word)p&(sizeof(word)- 1)))goto fail; +if (!(((word)1<<(WORDSZ - ((ptr_t)p - (ptr_t)base)- 1)) +&descr))goto fail; +break; +case GC_DS_PROC: +break; +case GC_DS_PER_OBJECT: +if ((signed_word)descr>=0){ +descr=*(word*)((ptr_t)base+(descr&~GC_DS_TAGS)); +} else { +ptr_t type_descr=*(ptr_t*)base; +descr=*(word*)(type_descr +- (descr - (word)(GC_DS_PER_OBJECT +- GC_INDIR_PER_OBJ_BIAS))); +} +goto retry; +} +return(p); +} +#endif +fail: +(*GC_is_visible_print_proc)((ptr_t)p); +return(p); +} +GC_API void*GC_CALL GC_pre_incr (void**p,ptrdiff_t how_much) +{ +void*initial=*p; +void*result=GC_same_obj((void*)((ptr_t)initial+how_much),initial); +if (!GC_all_interior_pointers){ +(void)GC_is_valid_displacement(result); +} +return (*p=result); +} +GC_API void*GC_CALL GC_post_incr (void**p,ptrdiff_t how_much) +{ +void*initial=*p; +void*result=GC_same_obj((void*)((ptr_t)initial+how_much),initial); +if (!GC_all_interior_pointers){ +(void)GC_is_valid_displacement(result); +} +*p=result; +return(initial); +} +#ifndef GC_INLINE_H +#define GC_INLINE_H +#if GC_GNUC_PREREQ(3,0) +#define GC_EXPECT(expr,outcome)__builtin_expect(expr,outcome) +#else +#define GC_EXPECT(expr,outcome)(expr) +#endif +#ifndef GC_ASSERT +#ifdef NDEBUG +#define GC_ASSERT(expr) +#else +#include +#define GC_ASSERT(expr)assert(expr) +#endif +#endif +#ifdef __cplusplus +extern "C" { +#endif +#ifndef GC_PREFETCH_FOR_WRITE +#if GC_GNUC_PREREQ(3,0)&&!defined(GC_NO_PREFETCH_FOR_WRITE) +#define GC_PREFETCH_FOR_WRITE(x)__builtin_prefetch((x),1) +#else +#define GC_PREFETCH_FOR_WRITE(x)(void)0 +#endif +#endif +#define GC_I_PTRFREE 0 +#define GC_I_NORMAL 1 +GC_API void GC_CALL GC_generic_malloc_many(size_t,int, +void**); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_kind(size_t,int); +#ifdef GC_THREADS +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_kind_global(size_t,int); +#else +#define GC_malloc_kind_global GC_malloc_kind +#endif +#if defined(GC_THREADS)&&defined(AO_HAVE_store) +#define GC_FAST_M_AO_STORE(my_fl,next)AO_store((volatile AO_t*)(my_fl),(AO_t)(next)) +#else +#define GC_FAST_M_AO_STORE(my_fl,next)(void)(*(my_fl)=(next)) +#endif +#define GC_FAST_MALLOC_GRANS(result,granules,tiny_fl,num_direct,kind,default_expr,init)do { if (GC_EXPECT((granules)>=GC_TINY_FREELISTS,0)){ result=(default_expr);} else { void**my_fl=(tiny_fl)+(granules);void*my_entry=*my_fl;void*next;for (;;){ if (GC_EXPECT((GC_word)my_entry > (num_direct)+GC_TINY_FREELISTS+1,1)){ next=*(void**)(my_entry);result=(void*)my_entry;GC_FAST_M_AO_STORE(my_fl,next);init;GC_PREFETCH_FOR_WRITE(next);if ((kind)!=GC_I_PTRFREE){ GC_end_stubborn_change(my_fl);GC_reachable_here(next);} GC_ASSERT(GC_size(result)>=(granules)*GC_GRANULE_BYTES);GC_ASSERT((kind)==GC_I_PTRFREE||((GC_word*)result)[1]==0);break;} if ((GC_signed_word)my_entry - (GC_signed_word)(num_direct)<=0&&my_entry!=0){ GC_FAST_M_AO_STORE(my_fl,(char*)my_entry+(granules)+1);result=(default_expr);break;} else { GC_generic_malloc_many(((granules)==0?GC_GRANULE_BYTES:GC_RAW_BYTES_FROM_INDEX(granules)),kind,my_fl);my_entry=*my_fl;if (my_entry==0){ result=(*GC_get_oom_fn())((granules)*GC_GRANULE_BYTES);break;} } } } } while (0) +#define GC_WORDS_TO_WHOLE_GRANULES(n)GC_WORDS_TO_GRANULES((n)+GC_GRANULE_WORDS - 1) +#define GC_MALLOC_WORDS_KIND(result,n,tiny_fl,kind,init)do { size_t granules=GC_WORDS_TO_WHOLE_GRANULES(n);GC_FAST_MALLOC_GRANS(result,granules,tiny_fl,0,kind,GC_malloc_kind(granules*GC_GRANULE_BYTES,kind),init);} while (0) +#define GC_MALLOC_WORDS(result,n,tiny_fl)GC_MALLOC_WORDS_KIND(result,n,tiny_fl,GC_I_NORMAL,*(void**)(result)=0) +#define GC_MALLOC_ATOMIC_WORDS(result,n,tiny_fl)GC_MALLOC_WORDS_KIND(result,n,tiny_fl,GC_I_PTRFREE,(void)0) +#define GC_CONS(result,first,second,tiny_fl)do { void*l=(void*)(first);void*r=(void*)(second);GC_MALLOC_WORDS_KIND(result,2,tiny_fl,GC_I_NORMAL,(void)0);if ((result)!=0){*(void**)(result)=l;GC_PTR_STORE_AND_DIRTY((void**)(result)+1,r);GC_reachable_here(l);} } while (0) +GC_API void GC_CALL GC_print_free_list(int, +size_t); +#ifdef __cplusplus +} +#endif +#endif +#include +#ifdef GC_USE_ENTIRE_HEAP +int GC_use_entire_heap=TRUE; +#else +int GC_use_entire_heap=FALSE; +#endif +#define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE) +#define UNIQUE_THRESHOLD 32 +#define HUGE_THRESHOLD 256 +#define FL_COMPRESSION 8 +#define N_HBLK_FLS ((HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION+UNIQUE_THRESHOLD) +#ifndef GC_GCJ_SUPPORT +STATIC +#endif +struct hblk*GC_hblkfreelist[N_HBLK_FLS+1]={ 0 }; +#ifndef GC_GCJ_SUPPORT +STATIC +#endif +word GC_free_bytes[N_HBLK_FLS+1]={ 0 }; +GC_INLINE int GC_enough_large_bytes_left(void) +{ +int n; +word bytes=GC_large_allocd_bytes; +GC_ASSERT(GC_max_large_allocd_bytes<=GC_heapsize); +for (n=N_HBLK_FLS;n>=0;--n){ +bytes+=GC_free_bytes[n]; +if (bytes>=GC_max_large_allocd_bytes)return n; +} +return 0; +} +STATIC int GC_hblk_fl_from_blocks(word blocks_needed) +{ +if (blocks_needed<=UNIQUE_THRESHOLD)return (int)blocks_needed; +if (blocks_needed>=HUGE_THRESHOLD)return N_HBLK_FLS; +return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION ++UNIQUE_THRESHOLD; +} +#define PHDR(hhdr)HDR((hhdr)->hb_prev) +#define NHDR(hhdr)HDR((hhdr)->hb_next) +#ifdef USE_MUNMAP +#define IS_MAPPED(hhdr)(((hhdr)->hb_flags&WAS_UNMAPPED)==0) +#else +#define IS_MAPPED(hhdr)TRUE +#endif +#if!defined(NO_DEBUGGING)||defined(GC_ASSERTIONS) +GC_INNER word GC_compute_large_free_bytes(void) +{ +word total_free=0; +unsigned i; +for (i=0;i<=N_HBLK_FLS;++i){ +struct hblk*h; +hdr*hhdr; +for (h=GC_hblkfreelist[i];h!=0;h=hhdr->hb_next){ +hhdr=HDR(h); +total_free+=hhdr->hb_sz; +} +} +return total_free; +} +#endif +#if!defined(NO_DEBUGGING) +void GC_print_hblkfreelist(void) +{ +unsigned i; +word total; +for (i=0;i<=N_HBLK_FLS;++i){ +struct hblk*h=GC_hblkfreelist[i]; +if (0!=h)GC_printf("Free list %u (total size %lu):\n", +i,(unsigned long)GC_free_bytes[i]); +while (h){ +hdr*hhdr=HDR(h); +GC_printf("\t%p size %lu %s black listed\n", +(void*)h,(unsigned long)hhdr->hb_sz, +GC_is_black_listed(h,HBLKSIZE)!=0?"start": +GC_is_black_listed(h,hhdr->hb_sz)!=0?"partially": +"not"); +h=hhdr->hb_next; +} +} +GC_printf("GC_large_free_bytes:%lu\n", +(unsigned long)GC_large_free_bytes); +if ((total=GC_compute_large_free_bytes())!=GC_large_free_bytes) +GC_err_printf("GC_large_free_bytes INCONSISTENT!!Should be:%lu\n", +(unsigned long)total); +} +static int free_list_index_of(hdr*wanted) +{ +int i; +for (i=0;i<=N_HBLK_FLS;++i){ +struct hblk*h; +hdr*hhdr; +for (h=GC_hblkfreelist[i];h!=0;h=hhdr->hb_next){ +hhdr=HDR(h); +if (hhdr==wanted)return i; +} +} +return -1; +} +GC_API void GC_CALL GC_dump_regions(void) +{ +unsigned i; +for (i=0;i < GC_n_heap_sects;++i){ +ptr_t start=GC_heap_sects[i].hs_start; +size_t bytes=GC_heap_sects[i].hs_bytes; +ptr_t end=start+bytes; +ptr_t p; +while (i+1 < GC_n_heap_sects&&GC_heap_sects[i+1].hs_start==end){ +++i; +end=GC_heap_sects[i].hs_start+GC_heap_sects[i].hs_bytes; +} +GC_printf("***Section from %p to %p\n",(void*)start,(void*)end); +for (p=start;(word)p < (word)end;){ +hdr*hhdr=HDR(p); +if (IS_FORWARDING_ADDR_OR_NIL(hhdr)){ +GC_printf("\t%p Missing header!!(%p)\n", +(void*)p,(void*)hhdr); +p+=HBLKSIZE; +continue; +} +if (HBLK_IS_FREE(hhdr)){ +int correct_index=GC_hblk_fl_from_blocks( +divHBLKSZ(hhdr->hb_sz)); +int actual_index; +GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n", +(void*)p,(unsigned long)(hhdr->hb_sz), +IS_MAPPED(hhdr)?"":" (unmapped)"); +actual_index=free_list_index_of(hhdr); +if (-1==actual_index){ +GC_printf("\t\tBlock not on free list %d!!\n", +correct_index); +} else if (correct_index!=actual_index){ +GC_printf("\t\tBlock on list %d,should be on %d!!\n", +actual_index,correct_index); +} +p+=hhdr->hb_sz; +} else { +GC_printf("\t%p\tused for blocks of size 0x%lx bytes\n", +(void*)p,(unsigned long)(hhdr->hb_sz)); +p+=HBLKSIZE*OBJ_SZ_TO_BLOCKS(hhdr->hb_sz); +} +} +} +} +#endif +static GC_bool setup_header(hdr*hhdr,struct hblk*block,size_t byte_sz, +int kind,unsigned flags) +{ +word descr; +#ifdef MARK_BIT_PER_GRANULE +if (byte_sz > MAXOBJBYTES) +flags|=LARGE_BLOCK; +#endif +#ifdef ENABLE_DISCLAIM +if (GC_obj_kinds[kind].ok_disclaim_proc) +flags|=HAS_DISCLAIM; +if (GC_obj_kinds[kind].ok_mark_unconditionally) +flags|=MARK_UNCONDITIONALLY; +#endif +hhdr->hb_sz=byte_sz; +hhdr->hb_obj_kind=(unsigned char)kind; +hhdr->hb_flags=(unsigned char)flags; +hhdr->hb_block=block; +descr=GC_obj_kinds[kind].ok_descriptor; +if (GC_obj_kinds[kind].ok_relocate_descr)descr+=byte_sz; +hhdr->hb_descr=descr; +#ifdef MARK_BIT_PER_OBJ +if (byte_sz > MAXOBJBYTES){ +hhdr->hb_inv_sz=LARGE_INV_SZ; +} else { +word inv_sz; +#if CPP_WORDSZ==64 +inv_sz=((word)1<<32)/byte_sz; +if (((inv_sz*byte_sz)>>32)==0)++inv_sz; +#else +GC_ASSERT(byte_sz>=4); +inv_sz=((unsigned)1<<31)/byte_sz; +inv_sz*=2; +while (inv_sz*byte_sz > byte_sz)++inv_sz; +#endif +#ifdef INV_SZ_COMPUTATION_CHECK +GC_ASSERT(((1ULL<<32)+byte_sz - 1)/byte_sz==inv_sz); +#endif +hhdr->hb_inv_sz=inv_sz; +} +#endif +#ifdef MARK_BIT_PER_GRANULE +{ +size_t granules=BYTES_TO_GRANULES(byte_sz); +if (EXPECT(!GC_add_map_entry(granules),FALSE)){ +hhdr->hb_sz=HBLKSIZE; +hhdr->hb_descr=0; +hhdr->hb_flags|=LARGE_BLOCK; +hhdr->hb_map=0; +return FALSE; +} +hhdr->hb_map=GC_obj_map[(hhdr->hb_flags&LARGE_BLOCK)!=0? +0:granules]; +} +#endif +GC_clear_hdr_marks(hhdr); +hhdr->hb_last_reclaimed=(unsigned short)GC_gc_no; +return(TRUE); +} +STATIC void GC_remove_from_fl_at(hdr*hhdr,int index) +{ +GC_ASSERT(((hhdr->hb_sz)&(HBLKSIZE-1))==0); +if (hhdr->hb_prev==0){ +GC_ASSERT(HDR(GC_hblkfreelist[index])==hhdr); +GC_hblkfreelist[index]=hhdr->hb_next; +} else { +hdr*phdr; +GET_HDR(hhdr->hb_prev,phdr); +phdr->hb_next=hhdr->hb_next; +} +GC_ASSERT(GC_free_bytes[index]>=hhdr->hb_sz); +GC_free_bytes[index]-=hhdr->hb_sz; +if (0!=hhdr->hb_next){ +hdr*nhdr; +GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr))); +GET_HDR(hhdr->hb_next,nhdr); +nhdr->hb_prev=hhdr->hb_prev; +} +} +GC_INLINE void GC_remove_from_fl(hdr*hhdr) +{ +GC_remove_from_fl_at(hhdr,GC_hblk_fl_from_blocks(divHBLKSZ(hhdr->hb_sz))); +} +static struct hblk*get_block_ending_at(struct hblk*h) +{ +struct hblk*p=h - 1; +hdr*phdr; +GET_HDR(p,phdr); +while (0!=phdr&&IS_FORWARDING_ADDR_OR_NIL(phdr)){ +p=FORWARDED_ADDR(p,phdr); +phdr=HDR(p); +} +if (0!=phdr){ +return p; +} +p=GC_prev_block(h - 1); +if (p){ +phdr=HDR(p); +if ((ptr_t)p+phdr->hb_sz==(ptr_t)h){ +return p; +} +} +return NULL; +} +STATIC struct hblk*GC_free_block_ending_at(struct hblk*h) +{ +struct hblk*p=get_block_ending_at(h); +if (p){ +hdr*phdr=HDR(p); +if (HBLK_IS_FREE(phdr)){ +return p; +} +} +return 0; +} +STATIC void GC_add_to_fl(struct hblk*h,hdr*hhdr) +{ +int index=GC_hblk_fl_from_blocks(divHBLKSZ(hhdr->hb_sz)); +struct hblk*second=GC_hblkfreelist[index]; +#if defined(GC_ASSERTIONS)&&!defined(USE_MUNMAP) +struct hblk*next=(struct hblk*)((word)h+hhdr->hb_sz); +hdr*nexthdr=HDR(next); +struct hblk*prev=GC_free_block_ending_at(h); +hdr*prevhdr=HDR(prev); +GC_ASSERT(nexthdr==0||!HBLK_IS_FREE(nexthdr) +||(GC_heapsize&SIGNB)!=0); +GC_ASSERT(prev==0||!HBLK_IS_FREE(prevhdr) +||(GC_heapsize&SIGNB)!=0); +#endif +GC_ASSERT(((hhdr->hb_sz)&(HBLKSIZE-1))==0); +GC_hblkfreelist[index]=h; +GC_free_bytes[index]+=hhdr->hb_sz; +GC_ASSERT(GC_free_bytes[index]<=GC_large_free_bytes); +hhdr->hb_next=second; +hhdr->hb_prev=0; +if (second){ +hdr*second_hdr; +GET_HDR(second,second_hdr); +second_hdr->hb_prev=h; +} +hhdr->hb_flags|=FREE_BLK; +} +#ifdef USE_MUNMAP +#ifndef MUNMAP_THRESHOLD +#define MUNMAP_THRESHOLD 6 +#endif +GC_INNER int GC_unmap_threshold=MUNMAP_THRESHOLD; +#ifdef COUNT_UNMAPPED_REGIONS +static int calc_num_unmapped_regions_delta(struct hblk*h,hdr*hhdr) +{ +struct hblk*prev=get_block_ending_at(h); +struct hblk*next; +GC_bool prev_unmapped=FALSE; +GC_bool next_unmapped=FALSE; +next=GC_next_block((struct hblk*)((ptr_t)h+hhdr->hb_sz),TRUE); +if ((ptr_t)next!=GC_unmap_end((ptr_t)h,(size_t)hhdr->hb_sz)){ +next=NULL; +} +if (prev!=NULL){ +hdr*prevhdr=HDR(prev); +prev_unmapped=!IS_MAPPED(prevhdr); +} +if (next!=NULL){ +hdr*nexthdr=HDR(next); +next_unmapped=!IS_MAPPED(nexthdr); +} +if (prev_unmapped&&next_unmapped){ +return IS_MAPPED(hhdr)?-1:1; +} +if (!prev_unmapped&&!next_unmapped){ +return IS_MAPPED(hhdr)?1:-1; +} +return 0; +} +#endif +GC_INLINE void GC_adjust_num_unmapped(struct hblk*h GC_ATTR_UNUSED, +hdr*hhdr GC_ATTR_UNUSED) +{ +#ifdef COUNT_UNMAPPED_REGIONS +GC_num_unmapped_regions+=calc_num_unmapped_regions_delta(h,hhdr); +#endif +} +GC_INNER void GC_unmap_old(void) +{ +int i; +if (GC_unmap_threshold==0) +return; +#ifdef COUNT_UNMAPPED_REGIONS +if (GC_num_unmapped_regions>=GC_UNMAPPED_REGIONS_SOFT_LIMIT) +return; +#endif +for (i=0;i<=N_HBLK_FLS;++i){ +struct hblk*h; +hdr*hhdr; +for (h=GC_hblkfreelist[i];0!=h;h=hhdr->hb_next){ +hhdr=HDR(h); +if (!IS_MAPPED(hhdr))continue; +if ((unsigned short)(GC_gc_no - hhdr->hb_last_reclaimed)> +(unsigned short)GC_unmap_threshold){ +#ifdef COUNT_UNMAPPED_REGIONS +int delta=calc_num_unmapped_regions_delta(h,hhdr); +signed_word regions=GC_num_unmapped_regions+delta; +if (delta>=0&®ions>=GC_UNMAPPED_REGIONS_SOFT_LIMIT){ +GC_COND_LOG_PRINTF("Unmapped regions limit reached!\n"); +return; +} +GC_num_unmapped_regions=regions; +#endif +GC_unmap((ptr_t)h,(size_t)hhdr->hb_sz); +hhdr->hb_flags|=WAS_UNMAPPED; +} +} +} +} +GC_INNER void GC_merge_unmapped(void) +{ +int i; +for (i=0;i<=N_HBLK_FLS;++i){ +struct hblk*h=GC_hblkfreelist[i]; +while (h!=0){ +struct hblk*next; +hdr*hhdr,*nexthdr; +word size,nextsize; +GET_HDR(h,hhdr); +size=hhdr->hb_sz; +next=(struct hblk*)((word)h+size); +GET_HDR(next,nexthdr); +if (0!=nexthdr&&HBLK_IS_FREE(nexthdr) +&&(signed_word)(size+(nextsize=nexthdr->hb_sz))> 0 +){ +if (IS_MAPPED(hhdr)&&!IS_MAPPED(nexthdr)){ +if (size > nextsize){ +GC_adjust_num_unmapped(next,nexthdr); +GC_remap((ptr_t)next,nextsize); +} else { +GC_adjust_num_unmapped(h,hhdr); +GC_unmap((ptr_t)h,size); +GC_unmap_gap((ptr_t)h,size,(ptr_t)next,nextsize); +hhdr->hb_flags|=WAS_UNMAPPED; +} +} else if (IS_MAPPED(nexthdr)&&!IS_MAPPED(hhdr)){ +if (size > nextsize){ +GC_adjust_num_unmapped(next,nexthdr); +GC_unmap((ptr_t)next,nextsize); +GC_unmap_gap((ptr_t)h,size,(ptr_t)next,nextsize); +} else { +GC_adjust_num_unmapped(h,hhdr); +GC_remap((ptr_t)h,size); +hhdr->hb_flags&=~WAS_UNMAPPED; +hhdr->hb_last_reclaimed=nexthdr->hb_last_reclaimed; +} +} else if (!IS_MAPPED(hhdr)&&!IS_MAPPED(nexthdr)){ +GC_unmap_gap((ptr_t)h,size,(ptr_t)next,nextsize); +} +GC_remove_from_fl_at(hhdr,i); +GC_remove_from_fl(nexthdr); +hhdr->hb_sz+=nexthdr->hb_sz; +GC_remove_header(next); +GC_add_to_fl(h,hhdr); +h=GC_hblkfreelist[i]; +} else { +h=hhdr->hb_next; +} +} +} +} +#endif +STATIC struct hblk*GC_get_first_part(struct hblk*h,hdr*hhdr, +size_t bytes,int index) +{ +word total_size=hhdr->hb_sz; +struct hblk*rest; +hdr*rest_hdr; +GC_ASSERT((total_size&(HBLKSIZE-1))==0); +GC_remove_from_fl_at(hhdr,index); +if (total_size==bytes)return h; +rest=(struct hblk*)((word)h+bytes); +rest_hdr=GC_install_header(rest); +if (0==rest_hdr){ +WARN("Header allocation failed:dropping block\n",0); +return(0); +} +rest_hdr->hb_sz=total_size - bytes; +rest_hdr->hb_flags=0; +#ifdef GC_ASSERTIONS +hhdr->hb_flags&=~FREE_BLK; +#endif +GC_add_to_fl(rest,rest_hdr); +return h; +} +STATIC void GC_split_block(struct hblk*h,hdr*hhdr,struct hblk*n, +hdr*nhdr,int index) +{ +word total_size=hhdr->hb_sz; +word h_size=(word)n - (word)h; +struct hblk*prev=hhdr->hb_prev; +struct hblk*next=hhdr->hb_next; +nhdr->hb_prev=prev; +nhdr->hb_next=next; +nhdr->hb_sz=total_size - h_size; +nhdr->hb_flags=0; +if (prev){ +HDR(prev)->hb_next=n; +} else { +GC_hblkfreelist[index]=n; +} +if (next){ +HDR(next)->hb_prev=n; +} +GC_ASSERT(GC_free_bytes[index] > h_size); +GC_free_bytes[index]-=h_size; +#ifdef USE_MUNMAP +hhdr->hb_last_reclaimed=(unsigned short)GC_gc_no; +#endif +hhdr->hb_sz=h_size; +GC_add_to_fl(h,hhdr); +nhdr->hb_flags|=FREE_BLK; +} +STATIC struct hblk* +GC_allochblk_nth(size_t sz,int kind,unsigned flags,int n, +int may_split); +#define AVOID_SPLIT_REMAPPED 2 +GC_INNER struct hblk* +GC_allochblk(size_t sz,int kind,unsigned flags) +{ +word blocks; +int start_list; +struct hblk*result; +int may_split; +int split_limit; +GC_ASSERT((sz&(GRANULE_BYTES - 1))==0); +blocks=OBJ_SZ_TO_BLOCKS_CHECKED(sz); +if ((signed_word)(blocks*HBLKSIZE)< 0){ +return 0; +} +start_list=GC_hblk_fl_from_blocks(blocks); +result=GC_allochblk_nth(sz,kind,flags,start_list,FALSE); +if (0!=result)return result; +may_split=TRUE; +if (GC_use_entire_heap||GC_dont_gc +||USED_HEAP_SIZE < GC_requested_heapsize +||GC_incremental||!GC_should_collect()){ +split_limit=N_HBLK_FLS; +} else if (GC_finalizer_bytes_freed > (GC_heapsize>>4)){ +split_limit=0; +} else { +split_limit=GC_enough_large_bytes_left(); +#ifdef USE_MUNMAP +if (split_limit > 0) +may_split=AVOID_SPLIT_REMAPPED; +#endif +} +if (start_list < UNIQUE_THRESHOLD){ +++start_list; +} +for (;start_list<=split_limit;++start_list){ +result=GC_allochblk_nth(sz,kind,flags,start_list,may_split); +if (0!=result) +break; +} +return result; +} +STATIC long GC_large_alloc_warn_suppressed=0; +STATIC struct hblk* +GC_allochblk_nth(size_t sz,int kind,unsigned flags,int n,int may_split) +{ +struct hblk*hbp; +hdr*hhdr; +struct hblk*thishbp; +hdr*thishdr; +signed_word size_needed=HBLKSIZE*OBJ_SZ_TO_BLOCKS_CHECKED(sz); +for (hbp=GC_hblkfreelist[n];;hbp=hhdr->hb_next){ +signed_word size_avail; +if (hbp){ +} else { +return NULL; +} +GET_HDR(hbp,hhdr); +size_avail=(signed_word)hhdr->hb_sz; +if (size_avail < size_needed)continue; +if (size_avail!=size_needed){ +if (!may_split)continue; +thishbp=hhdr->hb_next; +if (thishbp){ +signed_word next_size; +GET_HDR(thishbp,thishdr); +next_size=(signed_word)(thishdr->hb_sz); +if (next_size < size_avail +&&next_size>=size_needed +&&!GC_is_black_listed(thishbp,(word)size_needed)){ +continue; +} +} +} +if (!IS_UNCOLLECTABLE(kind)&&(kind!=PTRFREE +||size_needed > (signed_word)MAX_BLACK_LIST_ALLOC)){ +struct hblk*lasthbp=hbp; +ptr_t search_end=(ptr_t)hbp+size_avail - size_needed; +signed_word orig_avail=size_avail; +signed_word eff_size_needed=(flags&IGNORE_OFF_PAGE)!=0? +(signed_word)HBLKSIZE +:size_needed; +while ((word)lasthbp<=(word)search_end +&&(thishbp=GC_is_black_listed(lasthbp, +(word)eff_size_needed))!=0){ +lasthbp=thishbp; +} +size_avail-=(ptr_t)lasthbp - (ptr_t)hbp; +thishbp=lasthbp; +if (size_avail>=size_needed){ +if (thishbp!=hbp){ +#ifdef USE_MUNMAP +if (may_split==AVOID_SPLIT_REMAPPED&&!IS_MAPPED(hhdr)) +continue; +#endif +thishdr=GC_install_header(thishbp); +if (0!=thishdr){ +#ifdef USE_MUNMAP +if (!IS_MAPPED(hhdr)){ +GC_adjust_num_unmapped(hbp,hhdr); +GC_remap((ptr_t)hbp,(size_t)hhdr->hb_sz); +hhdr->hb_flags&=~WAS_UNMAPPED; +} +#endif +GC_split_block(hbp,hhdr,thishbp,thishdr,n); +hbp=thishbp; +hhdr=thishdr; +} +} +} else if (size_needed > (signed_word)BL_LIMIT +&&orig_avail - size_needed +> (signed_word)BL_LIMIT){ +if (++GC_large_alloc_warn_suppressed +>=GC_large_alloc_warn_interval){ +WARN("Repeated allocation of very large block " +"(appr. size %" WARN_PRIdPTR "):\n" +"\tMay lead to memory leak and poor performance\n", +size_needed); +GC_large_alloc_warn_suppressed=0; +} +size_avail=orig_avail; +} else if (size_avail==0 +&&size_needed==(signed_word)HBLKSIZE +&&IS_MAPPED(hhdr)){ +if (!GC_find_leak){ +static unsigned count=0; +if ((++count&3)==0){ +word total_size=hhdr->hb_sz; +struct hblk*limit=hbp+divHBLKSZ(total_size); +struct hblk*h; +struct hblk*prev=hhdr->hb_prev; +GC_large_free_bytes-=total_size; +GC_bytes_dropped+=total_size; +GC_remove_from_fl_at(hhdr,n); +for (h=hbp;(word)h < (word)limit;h++){ +if (h!=hbp){ +hhdr=GC_install_header(h); +} +if (NULL!=hhdr){ +(void)setup_header(hhdr,h,HBLKSIZE,PTRFREE,0); +if (GC_debugging_started){ +BZERO(h,HBLKSIZE); +} +} +} +hbp=prev; +if (0==hbp){ +return GC_allochblk_nth(sz,kind,flags,n,may_split); +} +hhdr=HDR(hbp); +} +} +} +} +if( size_avail>=size_needed){ +#ifdef USE_MUNMAP +if (!IS_MAPPED(hhdr)){ +GC_adjust_num_unmapped(hbp,hhdr); +GC_remap((ptr_t)hbp,(size_t)hhdr->hb_sz); +hhdr->hb_flags&=~WAS_UNMAPPED; +} +#endif +hbp=GC_get_first_part(hbp,hhdr,size_needed,n); +break; +} +} +if (0==hbp)return 0; +if (!GC_install_counts(hbp,(word)size_needed))return(0); +if (!setup_header(hhdr,hbp,sz,kind,flags)){ +GC_remove_counts(hbp,(word)size_needed); +return(0); +} +#ifndef GC_DISABLE_INCREMENTAL +GC_ASSERT((size_needed&(HBLKSIZE-1))==0); +GC_remove_protection(hbp,divHBLKSZ(size_needed), +(hhdr->hb_descr==0)); +#endif +GC_fail_count=0; +GC_large_free_bytes-=size_needed; +GC_ASSERT(IS_MAPPED(hhdr)); +return( hbp); +} +GC_INNER void GC_freehblk(struct hblk*hbp) +{ +struct hblk*next,*prev; +hdr*hhdr,*prevhdr,*nexthdr; +word size; +GET_HDR(hbp,hhdr); +size=HBLKSIZE*OBJ_SZ_TO_BLOCKS(hhdr->hb_sz); +if ((size&SIGNB)!=0) +ABORT("Deallocating excessively large block. Too large an allocation?"); +GC_remove_counts(hbp,size); +hhdr->hb_sz=size; +#ifdef USE_MUNMAP +hhdr->hb_last_reclaimed=(unsigned short)GC_gc_no; +#endif +if (HBLK_IS_FREE(hhdr)){ +ABORT_ARG1("Duplicate large block deallocation", +" of %p",(void*)hbp); +} +GC_ASSERT(IS_MAPPED(hhdr)); +hhdr->hb_flags|=FREE_BLK; +next=(struct hblk*)((ptr_t)hbp+size); +GET_HDR(next,nexthdr); +prev=GC_free_block_ending_at(hbp); +if(0!=nexthdr&&HBLK_IS_FREE(nexthdr)&&IS_MAPPED(nexthdr) +&&(signed_word)(hhdr->hb_sz+nexthdr->hb_sz)> 0 +){ +GC_remove_from_fl(nexthdr); +hhdr->hb_sz+=nexthdr->hb_sz; +GC_remove_header(next); +} +if (prev){ +prevhdr=HDR(prev); +if (IS_MAPPED(prevhdr) +&&(signed_word)(hhdr->hb_sz+prevhdr->hb_sz)> 0){ +GC_remove_from_fl(prevhdr); +prevhdr->hb_sz+=hhdr->hb_sz; +#ifdef USE_MUNMAP +prevhdr->hb_last_reclaimed=(unsigned short)GC_gc_no; +#endif +GC_remove_header(hbp); +hbp=prev; +hhdr=prevhdr; +} +} +GC_large_free_bytes+=size; +GC_add_to_fl(hbp,hhdr); +} +#include +#if!defined(MACOS)&&!defined(MSWINCE) +#include +#if!defined(SN_TARGET_ORBIS)&&!defined(SN_TARGET_PSP2)&&!defined(__CC_ARM) +#include +#endif +#endif +word GC_non_gc_bytes=0; +word GC_gc_no=0; +#ifndef NO_CLOCK +static unsigned long full_gc_total_time=0; +static unsigned full_gc_total_ns_frac=0; +static GC_bool measure_performance=FALSE; +GC_API void GC_CALL GC_start_performance_measurement(void) +{ +measure_performance=TRUE; +} +GC_API unsigned long GC_CALL GC_get_full_gc_total_time(void) +{ +return full_gc_total_time; +} +#endif +#ifndef GC_DISABLE_INCREMENTAL +GC_INNER GC_bool GC_incremental=FALSE; +#endif +GC_API int GC_CALL GC_is_incremental_mode(void) +{ +return (int)GC_incremental; +} +#ifdef THREADS +int GC_parallel=FALSE; +#endif +#if defined(GC_FULL_FREQ)&&!defined(CPPCHECK) +int GC_full_freq=GC_FULL_FREQ; +#else +int GC_full_freq=19; +#endif +STATIC GC_bool GC_need_full_gc=FALSE; +#ifdef THREAD_LOCAL_ALLOC +GC_INNER GC_bool GC_world_stopped=FALSE; +#endif +STATIC word GC_used_heap_size_after_full=0; +EXTERN_C_BEGIN +extern const char*const GC_copyright[]; +EXTERN_C_END +const char*const GC_copyright[]= +{"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ", +"Copyright (c)1991-1995 by Xerox Corporation. All rights reserved. ", +"Copyright (c)1996-1998 by Silicon Graphics. All rights reserved. ", +"Copyright (c)1999-2009 by Hewlett-Packard Company. All rights reserved. ", +"Copyright (c)2008-2020 Ivan Maidanski ", +"THIS MATERIAL IS PROVIDED AS IS,WITH ABSOLUTELY NO WARRANTY", +" EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.", +"See source code for details." }; +#ifndef GC_NO_VERSION_VAR +EXTERN_C_BEGIN +extern const unsigned GC_version; +EXTERN_C_END +const unsigned GC_version=((GC_VERSION_MAJOR<<16)| +(GC_VERSION_MINOR<<8)|GC_VERSION_MICRO); +#endif +GC_API unsigned GC_CALL GC_get_version(void) +{ +return (GC_VERSION_MAJOR<<16)|(GC_VERSION_MINOR<<8)| +GC_VERSION_MICRO; +} +#ifdef GC_DONT_EXPAND +int GC_dont_expand=TRUE; +#else +int GC_dont_expand=FALSE; +#endif +#if defined(GC_FREE_SPACE_DIVISOR)&&!defined(CPPCHECK) +word GC_free_space_divisor=GC_FREE_SPACE_DIVISOR; +#else +word GC_free_space_divisor=3; +#endif +GC_INNER int GC_CALLBACK GC_never_stop_func(void) +{ +return(0); +} +#if defined(GC_TIME_LIMIT)&&!defined(CPPCHECK) +unsigned long GC_time_limit=GC_TIME_LIMIT; +#elif defined(PARALLEL_MARK) +unsigned long GC_time_limit=GC_TIME_UNLIMITED; +#else +unsigned long GC_time_limit=50; +#endif +#ifndef NO_CLOCK +STATIC unsigned long GC_time_lim_nsec=0; +#define TV_NSEC_LIMIT (1000UL*1000) +GC_API void GC_CALL GC_set_time_limit_tv(struct GC_timeval_s tv) +{ +GC_ASSERT(tv.tv_ms<=GC_TIME_UNLIMITED); +GC_ASSERT(tv.tv_nsec < TV_NSEC_LIMIT); +GC_time_limit=tv.tv_ms; +GC_time_lim_nsec=tv.tv_nsec; +} +GC_API struct GC_timeval_s GC_CALL GC_get_time_limit_tv(void) +{ +struct GC_timeval_s tv; +tv.tv_ms=GC_time_limit; +tv.tv_nsec=GC_time_lim_nsec; +return tv; +} +STATIC CLOCK_TYPE GC_start_time=CLOCK_TYPE_INITIALIZER; +#endif +STATIC int GC_n_attempts=0; +STATIC GC_stop_func GC_default_stop_func=GC_never_stop_func; +GC_API void GC_CALL GC_set_stop_func(GC_stop_func stop_func) +{ +DCL_LOCK_STATE; +GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func)); +LOCK(); +GC_default_stop_func=stop_func; +UNLOCK(); +} +GC_API GC_stop_func GC_CALL GC_get_stop_func(void) +{ +GC_stop_func stop_func; +DCL_LOCK_STATE; +LOCK(); +stop_func=GC_default_stop_func; +UNLOCK(); +return stop_func; +} +#if defined(GC_DISABLE_INCREMENTAL)||defined(NO_CLOCK) +#define GC_timeout_stop_func GC_default_stop_func +#else +STATIC int GC_CALLBACK GC_timeout_stop_func (void) +{ +CLOCK_TYPE current_time; +static unsigned count=0; +unsigned long time_diff,nsec_diff; +if ((*GC_default_stop_func)()) +return(1); +if ((count++&3)!=0)return(0); +GET_TIME(current_time); +time_diff=MS_TIME_DIFF(current_time,GC_start_time); +nsec_diff=NS_FRAC_TIME_DIFF(current_time,GC_start_time); +#if defined(CPPCHECK) +GC_noop1((word)&nsec_diff); +#endif +if (time_diff>=GC_time_limit +&&(time_diff > GC_time_limit||nsec_diff>=GC_time_lim_nsec)){ +GC_COND_LOG_PRINTF("Abandoning stopped marking after %lu ms %lu ns" +" (attempt %d)\n", +time_diff,nsec_diff,GC_n_attempts); +return 1; +} +return(0); +} +#endif +#ifdef THREADS +GC_INNER word GC_total_stacksize=0; +#endif +static size_t min_bytes_allocd_minimum=1; +GC_API void GC_CALL GC_set_min_bytes_allocd(size_t value) +{ +GC_ASSERT(value > 0); +min_bytes_allocd_minimum=value; +} +GC_API size_t GC_CALL GC_get_min_bytes_allocd(void) +{ +return min_bytes_allocd_minimum; +} +static word min_bytes_allocd(void) +{ +word result; +word stack_size; +word total_root_size; +word scan_size; +#ifdef THREADS +if (GC_need_to_lock){ +stack_size=GC_total_stacksize; +#ifdef DEBUG_THREADS +GC_log_printf("Total stacks size:%lu\n", +(unsigned long)stack_size); +#endif +} else +#endif +{ +#ifdef STACK_NOT_SCANNED +stack_size=0; +#elif defined(STACK_GROWS_UP) +stack_size=GC_approx_sp()- GC_stackbottom; +#else +stack_size=GC_stackbottom - GC_approx_sp(); +#endif +} +total_root_size=2*stack_size+GC_root_size; +scan_size=2*GC_composite_in_use+GC_atomic_in_use/4 ++total_root_size; +result=scan_size/GC_free_space_divisor; +if (GC_incremental){ +result/=2; +} +return result > min_bytes_allocd_minimum +?result:min_bytes_allocd_minimum; +} +STATIC word GC_non_gc_bytes_at_gc=0; +STATIC word GC_adj_bytes_allocd(void) +{ +signed_word result; +signed_word expl_managed=(signed_word)GC_non_gc_bytes +- (signed_word)GC_non_gc_bytes_at_gc; +result=(signed_word)GC_bytes_allocd ++(signed_word)GC_bytes_dropped +- (signed_word)GC_bytes_freed ++(signed_word)GC_finalizer_bytes_freed +- expl_managed; +if (result > (signed_word)GC_bytes_allocd){ +result=GC_bytes_allocd; +} +result+=GC_bytes_finalized; +if (result < (signed_word)(GC_bytes_allocd>>3)){ +return(GC_bytes_allocd>>3); +} else { +return(result); +} +} +STATIC void GC_clear_a_few_frames(void) +{ +#ifndef CLEAR_NWORDS +#define CLEAR_NWORDS 64 +#endif +volatile word frames[CLEAR_NWORDS]; +BZERO((word*)frames,CLEAR_NWORDS*sizeof(word)); +} +STATIC word GC_collect_at_heapsize=GC_WORD_MAX; +GC_INNER GC_bool GC_should_collect(void) +{ +static word last_min_bytes_allocd; +static word last_gc_no; +if (last_gc_no!=GC_gc_no){ +last_min_bytes_allocd=min_bytes_allocd(); +last_gc_no=GC_gc_no; +} +return(GC_adj_bytes_allocd()>=last_min_bytes_allocd +||GC_heapsize>=GC_collect_at_heapsize); +} +GC_start_callback_proc GC_start_call_back=0; +GC_API void GC_CALL GC_set_start_callback(GC_start_callback_proc fn) +{ +DCL_LOCK_STATE; +LOCK(); +GC_start_call_back=fn; +UNLOCK(); +} +GC_API GC_start_callback_proc GC_CALL GC_get_start_callback(void) +{ +GC_start_callback_proc fn; +DCL_LOCK_STATE; +LOCK(); +fn=GC_start_call_back; +UNLOCK(); +return fn; +} +GC_INLINE void GC_notify_full_gc(void) +{ +if (GC_start_call_back!=0){ +(*GC_start_call_back)(); +} +} +STATIC GC_bool GC_is_full_gc=FALSE; +STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func); +STATIC void GC_finish_collection(void); +STATIC void GC_maybe_gc(void) +{ +GC_ASSERT(I_HOLD_LOCK()); +ASSERT_CANCEL_DISABLED(); +if (GC_should_collect()){ +static int n_partial_gcs=0; +if (!GC_incremental){ +GC_try_to_collect_inner(GC_never_stop_func); +n_partial_gcs=0; +return; +} else { +#ifdef PARALLEL_MARK +if (GC_parallel) +GC_wait_for_reclaim(); +#endif +if (GC_need_full_gc||n_partial_gcs>=GC_full_freq){ +GC_COND_LOG_PRINTF( +"***>Full mark for collection #%lu after %lu allocd bytes\n", +(unsigned long)GC_gc_no+1,(unsigned long)GC_bytes_allocd); +GC_promote_black_lists(); +(void)GC_reclaim_all((GC_stop_func)0,TRUE); +GC_notify_full_gc(); +GC_clear_marks(); +n_partial_gcs=0; +GC_is_full_gc=TRUE; +} else { +n_partial_gcs++; +} +} +#ifndef NO_CLOCK +if (GC_time_limit!=GC_TIME_UNLIMITED){ GET_TIME(GC_start_time);} +#endif +if (GC_stopped_mark(GC_time_limit==GC_TIME_UNLIMITED? +GC_never_stop_func:GC_timeout_stop_func)){ +#ifdef SAVE_CALL_CHAIN +GC_save_callers(GC_last_stack); +#endif +GC_finish_collection(); +} else { +if (!GC_is_full_gc){ +GC_n_attempts++; +} +} +} +} +STATIC GC_on_collection_event_proc GC_on_collection_event=0; +GC_API void GC_CALL GC_set_on_collection_event(GC_on_collection_event_proc fn) +{ +DCL_LOCK_STATE; +LOCK(); +GC_on_collection_event=fn; +UNLOCK(); +} +GC_API GC_on_collection_event_proc GC_CALL GC_get_on_collection_event(void) +{ +GC_on_collection_event_proc fn; +DCL_LOCK_STATE; +LOCK(); +fn=GC_on_collection_event; +UNLOCK(); +return fn; +} +GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func) +{ +#ifndef NO_CLOCK +CLOCK_TYPE start_time=CLOCK_TYPE_INITIALIZER; +GC_bool start_time_valid; +#endif +ASSERT_CANCEL_DISABLED(); +GC_ASSERT(I_HOLD_LOCK()); +if (GC_dont_gc||(*stop_func)())return FALSE; +if (GC_on_collection_event) +GC_on_collection_event(GC_EVENT_START); +if (GC_incremental&&GC_collection_in_progress()){ +GC_COND_LOG_PRINTF( +"GC_try_to_collect_inner:finishing collection in progress\n"); +while(GC_collection_in_progress()){ +if ((*stop_func)()){ +return(FALSE); +} +ENTER_GC(); +GC_collect_a_little_inner(1); +EXIT_GC(); +} +} +GC_notify_full_gc(); +#ifndef NO_CLOCK +start_time_valid=FALSE; +if ((GC_print_stats|(int)measure_performance)!=0){ +if (GC_print_stats) +GC_log_printf("Initiating full world-stop collection!\n"); +start_time_valid=TRUE; +GET_TIME(start_time); +} +#endif +GC_promote_black_lists(); +#ifdef PARALLEL_MARK +if (GC_parallel) +GC_wait_for_reclaim(); +#endif +if ((GC_find_leak||stop_func!=GC_never_stop_func) +&&!GC_reclaim_all(stop_func,FALSE)){ +return(FALSE); +} +GC_invalidate_mark_state(); +GC_clear_marks(); +#ifdef SAVE_CALL_CHAIN +GC_save_callers(GC_last_stack); +#endif +GC_is_full_gc=TRUE; +if (!GC_stopped_mark(stop_func)){ +if (!GC_incremental){ +GC_invalidate_mark_state(); +GC_unpromote_black_lists(); +} +return(FALSE); +} +GC_finish_collection(); +#ifndef NO_CLOCK +if (start_time_valid){ +CLOCK_TYPE current_time; +unsigned long time_diff,ns_frac_diff; +GET_TIME(current_time); +time_diff=MS_TIME_DIFF(current_time,start_time); +ns_frac_diff=NS_FRAC_TIME_DIFF(current_time,start_time); +if (measure_performance){ +full_gc_total_time+=time_diff; +full_gc_total_ns_frac+=(unsigned)ns_frac_diff; +if (full_gc_total_ns_frac>=1000000U){ +full_gc_total_ns_frac-=1000000U; +full_gc_total_time++; +} +} +if (GC_print_stats) +GC_log_printf("Complete collection took %lu ms %lu ns\n", +time_diff,ns_frac_diff); +} +#endif +if (GC_on_collection_event) +GC_on_collection_event(GC_EVENT_END); +return(TRUE); +} +#ifndef GC_RATE +#define GC_RATE 10 +#endif +#ifndef MAX_PRIOR_ATTEMPTS +#define MAX_PRIOR_ATTEMPTS 1 +#endif +STATIC int GC_deficit=0; +STATIC int GC_rate=GC_RATE; +GC_API void GC_CALL GC_set_rate(int value) +{ +GC_ASSERT(value > 0); +GC_rate=value; +} +GC_API int GC_CALL GC_get_rate(void) +{ +return GC_rate; +} +static int max_prior_attempts=MAX_PRIOR_ATTEMPTS; +GC_API void GC_CALL GC_set_max_prior_attempts(int value) +{ +GC_ASSERT(value>=0); +max_prior_attempts=value; +} +GC_API int GC_CALL GC_get_max_prior_attempts(void) +{ +return max_prior_attempts; +} +GC_INNER void GC_collect_a_little_inner(int n) +{ +IF_CANCEL(int cancel_state;) +GC_ASSERT(I_HOLD_LOCK()); +if (GC_dont_gc)return; +DISABLE_CANCEL(cancel_state); +if (GC_incremental&&GC_collection_in_progress()){ +int i; +int max_deficit=GC_rate*n; +#ifdef PARALLEL_MARK +if (GC_time_limit!=GC_TIME_UNLIMITED) +GC_parallel_mark_disabled=TRUE; +#endif +for (i=GC_deficit;i < max_deficit;i++){ +if (GC_mark_some(NULL)) +break; +} +#ifdef PARALLEL_MARK +GC_parallel_mark_disabled=FALSE; +#endif +if (i < max_deficit){ +#ifdef SAVE_CALL_CHAIN +GC_save_callers(GC_last_stack); +#endif +#ifdef PARALLEL_MARK +if (GC_parallel) +GC_wait_for_reclaim(); +#endif +if (GC_n_attempts < max_prior_attempts +&&GC_time_limit!=GC_TIME_UNLIMITED){ +#ifndef NO_CLOCK +GET_TIME(GC_start_time); +#endif +if (GC_stopped_mark(GC_timeout_stop_func)){ +GC_finish_collection(); +} else { +GC_n_attempts++; +} +} else { +(void)GC_stopped_mark(GC_never_stop_func); +GC_finish_collection(); +} +} +if (GC_deficit > 0){ +GC_deficit-=max_deficit; +if (GC_deficit < 0) +GC_deficit=0; +} +} else { +GC_maybe_gc(); +} +RESTORE_CANCEL(cancel_state); +} +GC_INNER void (*GC_check_heap)(void)=0; +GC_INNER void (*GC_print_all_smashed)(void)=0; +GC_API int GC_CALL GC_collect_a_little(void) +{ +int result; +DCL_LOCK_STATE; +LOCK(); +ENTER_GC(); +GC_collect_a_little_inner(1); +EXIT_GC(); +result=(int)GC_collection_in_progress(); +UNLOCK(); +if (!result&&GC_debugging_started)GC_print_all_smashed(); +return(result); +} +#ifndef NO_CLOCK +static unsigned world_stopped_total_time=0; +static unsigned world_stopped_total_divisor=0; +#ifndef MAX_TOTAL_TIME_DIVISOR +#define MAX_TOTAL_TIME_DIVISOR 1000 +#endif +#endif +#ifdef USE_MUNMAP +#define IF_USE_MUNMAP(x)x +#define COMMA_IF_USE_MUNMAP(x),x +#else +#define IF_USE_MUNMAP(x) +#define COMMA_IF_USE_MUNMAP(x) +#endif +STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func) +{ +int i; +#ifndef NO_CLOCK +CLOCK_TYPE start_time=CLOCK_TYPE_INITIALIZER; +#endif +GC_ASSERT(I_HOLD_LOCK()); +#if!defined(REDIRECT_MALLOC)&&defined(USE_WINALLOC) +GC_add_current_malloc_heap(); +#endif +#if defined(REGISTER_LIBRARIES_EARLY) +GC_cond_register_dynamic_libraries(); +#endif +#ifndef NO_CLOCK +if (GC_PRINT_STATS_FLAG) +GET_TIME(start_time); +#endif +#if!defined(GC_NO_FINALIZATION)&&!defined(GC_TOGGLE_REFS_NOT_NEEDED) +GC_process_togglerefs(); +#endif +#ifdef THREADS +if (GC_on_collection_event) +GC_on_collection_event(GC_EVENT_PRE_STOP_WORLD); +#endif +STOP_WORLD(); +#ifdef THREADS +if (GC_on_collection_event) +GC_on_collection_event(GC_EVENT_POST_STOP_WORLD); +#endif +#ifdef THREAD_LOCAL_ALLOC +GC_world_stopped=TRUE; +#endif +GC_COND_LOG_PRINTF( +"\n--> Marking for collection #%lu after %lu allocated bytes\n", +(unsigned long)GC_gc_no+1,(unsigned long)GC_bytes_allocd); +#ifdef MAKE_BACK_GRAPH +if (GC_print_back_height){ +GC_build_back_graph(); +} +#endif +if (GC_on_collection_event) +GC_on_collection_event(GC_EVENT_MARK_START); +GC_clear_a_few_frames(); +GC_noop6(0,0,0,0,0,0); +GC_initiate_gc(); +#ifdef PARALLEL_MARK +if (stop_func!=GC_never_stop_func) +GC_parallel_mark_disabled=TRUE; +#endif +for (i=0;!(*stop_func)();i++){ +if (GC_mark_some(GC_approx_sp())){ +#ifdef PARALLEL_MARK +if (GC_parallel&&GC_parallel_mark_disabled){ +GC_COND_LOG_PRINTF("Stopped marking done after %d iterations" +" with disabled parallel marker\n",i); +} +#endif +i=-1; +break; +} +} +#ifdef PARALLEL_MARK +GC_parallel_mark_disabled=FALSE; +#endif +if (i>=0){ +GC_COND_LOG_PRINTF("Abandoned stopped marking after" +" %d iterations\n",i); +GC_deficit=i; +#ifdef THREAD_LOCAL_ALLOC +GC_world_stopped=FALSE; +#endif +#ifdef THREADS +if (GC_on_collection_event) +GC_on_collection_event(GC_EVENT_PRE_START_WORLD); +#endif +START_WORLD(); +#ifdef THREADS +if (GC_on_collection_event) +GC_on_collection_event(GC_EVENT_POST_START_WORLD); +#endif +return FALSE; +} +GC_gc_no++; +GC_DBGLOG_PRINTF("GC #%lu freed %ld bytes,heap %lu KiB" +IF_USE_MUNMAP(" (+%lu KiB unmapped)")"\n", +(unsigned long)GC_gc_no,(long)GC_bytes_found, +TO_KiB_UL(GC_heapsize - GC_unmapped_bytes) +COMMA_IF_USE_MUNMAP(TO_KiB_UL(GC_unmapped_bytes))); +if (GC_debugging_started){ +(*GC_check_heap)(); +} +if (GC_on_collection_event){ +GC_on_collection_event(GC_EVENT_MARK_END); +#ifdef THREADS +GC_on_collection_event(GC_EVENT_PRE_START_WORLD); +#endif +} +#ifdef THREAD_LOCAL_ALLOC +GC_world_stopped=FALSE; +#endif +START_WORLD(); +#ifdef THREADS +if (GC_on_collection_event) +GC_on_collection_event(GC_EVENT_POST_START_WORLD); +#endif +#ifndef NO_CLOCK +if (GC_PRINT_STATS_FLAG){ +unsigned long time_diff; +unsigned total_time,divisor; +CLOCK_TYPE current_time; +GET_TIME(current_time); +time_diff=MS_TIME_DIFF(current_time,start_time); +total_time=world_stopped_total_time; +divisor=world_stopped_total_divisor; +if ((int)total_time < 0||divisor>=MAX_TOTAL_TIME_DIVISOR){ +total_time>>=1; +divisor>>=1; +} +total_time+=time_diff < (((unsigned)-1)>>1)? +(unsigned)time_diff:((unsigned)-1)>>1; +world_stopped_total_time=total_time; +world_stopped_total_divisor=++divisor; +GC_ASSERT(divisor!=0); +GC_log_printf("World-stopped marking took %lu ms %lu ns" +" (%u ms in average)\n", +time_diff,NS_FRAC_TIME_DIFF(current_time,start_time), +total_time/divisor); +} +#endif +return(TRUE); +} +GC_INNER void GC_set_fl_marks(ptr_t q) +{ +if (q){ +struct hblk*h=HBLKPTR(q); +struct hblk*last_h=h; +hdr*hhdr=HDR(h); +IF_PER_OBJ(word sz=hhdr->hb_sz;) +for (;;){ +word bit_no=MARK_BIT_NO((ptr_t)q - (ptr_t)h,sz); +if (!mark_bit_from_hdr(hhdr,bit_no)){ +set_mark_bit_from_hdr(hhdr,bit_no); +++hhdr->hb_n_marks; +} +q=(ptr_t)obj_link(q); +if (q==NULL) +break; +h=HBLKPTR(q); +if (h!=last_h){ +last_h=h; +hhdr=HDR(h); +IF_PER_OBJ(sz=hhdr->hb_sz;) +} +} +} +} +#if defined(GC_ASSERTIONS)&&defined(THREAD_LOCAL_ALLOC) +void GC_check_fl_marks(void**pfreelist) +{ +#if defined(AO_HAVE_load_acquire_read)&&!defined(THREAD_SANITIZER) +AO_t*list=(AO_t*)AO_load_acquire_read((AO_t*)pfreelist); +AO_t*prev; +AO_t*p; +if ((word)list<=HBLKSIZE)return; +prev=(AO_t*)pfreelist; +for (p=list;p!=NULL;){ +AO_t*next; +if (!GC_is_marked(p)){ +ABORT_ARG2("Unmarked local free list entry", +":object %p on list %p",(void*)p,(void*)list); +} +next=(AO_t*)AO_load_acquire_read(p); +if (AO_load(prev)!=(AO_t)p) +break; +prev=p; +p=next; +} +#else +(void)pfreelist; +#endif +} +#endif +STATIC void GC_clear_fl_marks(ptr_t q) +{ +struct hblk*h=HBLKPTR(q); +struct hblk*last_h=h; +hdr*hhdr=HDR(h); +word sz=hhdr->hb_sz; +for (;;){ +word bit_no=MARK_BIT_NO((ptr_t)q - (ptr_t)h,sz); +if (mark_bit_from_hdr(hhdr,bit_no)){ +size_t n_marks=hhdr->hb_n_marks; +GC_ASSERT(n_marks!=0); +clear_mark_bit_from_hdr(hhdr,bit_no); +n_marks--; +#ifdef PARALLEL_MARK +if (0!=n_marks||!GC_parallel){ +hhdr->hb_n_marks=n_marks; +} +#else +hhdr->hb_n_marks=n_marks; +#endif +} +GC_bytes_found-=sz; +q=(ptr_t)obj_link(q); +if (q==NULL) +break; +h=HBLKPTR(q); +if (h!=last_h){ +last_h=h; +hhdr=HDR(h); +sz=hhdr->hb_sz; +} +} +} +#if defined(GC_ASSERTIONS)&&defined(THREAD_LOCAL_ALLOC) +void GC_check_tls(void); +#endif +GC_on_heap_resize_proc GC_on_heap_resize=0; +GC_INLINE int GC_compute_heap_usage_percent(void) +{ +word used=GC_composite_in_use+GC_atomic_in_use; +word heap_sz=GC_heapsize - GC_unmapped_bytes; +#if defined(CPPCHECK) +word limit=(GC_WORD_MAX>>1)/50; +#else +const word limit=GC_WORD_MAX/100; +#endif +return used>=heap_sz?0:used < limit? +(int)((used*100)/heap_sz):(int)(used/(heap_sz/100)); +} +STATIC void GC_finish_collection(void) +{ +#ifndef NO_CLOCK +CLOCK_TYPE start_time=CLOCK_TYPE_INITIALIZER; +CLOCK_TYPE finalize_time=CLOCK_TYPE_INITIALIZER; +#endif +GC_ASSERT(I_HOLD_LOCK()); +#if defined(GC_ASSERTIONS)&&defined(THREAD_LOCAL_ALLOC)&&!defined(DBG_HDRS_ALL) +GC_check_tls(); +#endif +#ifndef NO_CLOCK +if (GC_print_stats) +GET_TIME(start_time); +#endif +if (GC_on_collection_event) +GC_on_collection_event(GC_EVENT_RECLAIM_START); +#ifndef GC_GET_HEAP_USAGE_NOT_NEEDED +if (GC_bytes_found > 0) +GC_reclaimed_bytes_before_gc+=(word)GC_bytes_found; +#endif +GC_bytes_found=0; +#if defined(LINUX)&&defined(__ELF__)&&!defined(SMALL_CONFIG) +if (GETENV("GC_PRINT_ADDRESS_MAP")!=0){ +GC_print_address_map(); +} +#endif +COND_DUMP; +if (GC_find_leak){ +word size; +unsigned kind; +ptr_t q; +for (kind=0;kind < GC_n_kinds;kind++){ +for (size=1;size<=MAXOBJGRANULES;size++){ +q=(ptr_t)GC_obj_kinds[kind].ok_freelist[size]; +if (q!=NULL) +GC_set_fl_marks(q); +} +} +GC_start_reclaim(TRUE); +} +#ifndef GC_NO_FINALIZATION +GC_finalize(); +#endif +#ifndef NO_CLOCK +if (GC_print_stats) +GET_TIME(finalize_time); +#endif +if (GC_print_back_height){ +#ifdef MAKE_BACK_GRAPH +GC_traverse_back_graph(); +#elif!defined(SMALL_CONFIG) +GC_err_printf("Back height not available:" +"Rebuild collector with -DMAKE_BACK_GRAPH\n"); +#endif +} +{ +word size; +ptr_t q; +unsigned kind; +for (kind=0;kind < GC_n_kinds;kind++){ +for (size=1;size<=MAXOBJGRANULES;size++){ +q=(ptr_t)GC_obj_kinds[kind].ok_freelist[size]; +if (q!=NULL) +GC_clear_fl_marks(q); +} +} +} +GC_VERBOSE_LOG_PRINTF("Bytes recovered before sweep - f.l. count=%ld\n", +(long)GC_bytes_found); +GC_start_reclaim(FALSE); +GC_DBGLOG_PRINTF("In-use heap:%d%% (%lu KiB pointers+%lu KiB other)\n", +GC_compute_heap_usage_percent(), +TO_KiB_UL(GC_composite_in_use), +TO_KiB_UL(GC_atomic_in_use)); +if (GC_is_full_gc){ +GC_used_heap_size_after_full=USED_HEAP_SIZE; +GC_need_full_gc=FALSE; +} else { +GC_need_full_gc=USED_HEAP_SIZE - GC_used_heap_size_after_full +> min_bytes_allocd(); +} +GC_VERBOSE_LOG_PRINTF("Immediately reclaimed %ld bytes,heapsize:" +" %lu bytes" IF_USE_MUNMAP(" (%lu unmapped)")"\n", +(long)GC_bytes_found, +(unsigned long)GC_heapsize +COMMA_IF_USE_MUNMAP((unsigned long) +GC_unmapped_bytes)); +GC_n_attempts=0; +GC_is_full_gc=FALSE; +GC_bytes_allocd_before_gc+=GC_bytes_allocd; +GC_non_gc_bytes_at_gc=GC_non_gc_bytes; +GC_bytes_allocd=0; +GC_bytes_dropped=0; +GC_bytes_freed=0; +GC_finalizer_bytes_freed=0; +IF_USE_MUNMAP(GC_unmap_old()); +if (GC_on_collection_event) +GC_on_collection_event(GC_EVENT_RECLAIM_END); +#ifndef NO_CLOCK +if (GC_print_stats){ +CLOCK_TYPE done_time; +GET_TIME(done_time); +#if!defined(SMALL_CONFIG)&&!defined(GC_NO_FINALIZATION) +GC_print_finalization_stats(); +#endif +GC_log_printf("Finalize and initiate sweep took %lu ms %lu ns" +"+%lu ms %lu ns\n", +MS_TIME_DIFF(finalize_time,start_time), +NS_FRAC_TIME_DIFF(finalize_time,start_time), +MS_TIME_DIFF(done_time,finalize_time), +NS_FRAC_TIME_DIFF(done_time,finalize_time)); +} +#elif!defined(SMALL_CONFIG)&&!defined(GC_NO_FINALIZATION) +if (GC_print_stats) +GC_print_finalization_stats(); +#endif +} +STATIC GC_bool GC_try_to_collect_general(GC_stop_func stop_func, +GC_bool force_unmap GC_ATTR_UNUSED) +{ +GC_bool result; +IF_USE_MUNMAP(int old_unmap_threshold;) +IF_CANCEL(int cancel_state;) +DCL_LOCK_STATE; +if (!EXPECT(GC_is_initialized,TRUE))GC_init(); +if (GC_debugging_started)GC_print_all_smashed(); +GC_INVOKE_FINALIZERS(); +LOCK(); +DISABLE_CANCEL(cancel_state); +#ifdef USE_MUNMAP +old_unmap_threshold=GC_unmap_threshold; +if (force_unmap|| +(GC_force_unmap_on_gcollect&&old_unmap_threshold > 0)) +GC_unmap_threshold=1; +#endif +ENTER_GC(); +GC_noop6(0,0,0,0,0,0); +result=GC_try_to_collect_inner(stop_func!=0?stop_func: +GC_default_stop_func); +EXIT_GC(); +IF_USE_MUNMAP(GC_unmap_threshold=old_unmap_threshold); +RESTORE_CANCEL(cancel_state); +UNLOCK(); +if (result){ +if (GC_debugging_started)GC_print_all_smashed(); +GC_INVOKE_FINALIZERS(); +} +return(result); +} +GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func) +{ +GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func)); +return (int)GC_try_to_collect_general(stop_func,FALSE); +} +GC_API void GC_CALL GC_gcollect(void) +{ +(void)GC_try_to_collect_general(0,FALSE); +if (GC_have_errors)GC_print_all_errors(); +} +STATIC word GC_heapsize_at_forced_unmap=0; +GC_API void GC_CALL GC_gcollect_and_unmap(void) +{ +GC_heapsize_at_forced_unmap=GC_heapsize; +(void)GC_try_to_collect_general(GC_never_stop_func,TRUE); +} +#ifdef USE_PROC_FOR_LIBRARIES +GC_INNER void GC_add_to_our_memory(ptr_t p,size_t bytes) +{ +if (0==p)return; +if (GC_n_memory>=MAX_HEAP_SECTS) +ABORT("Too many GC-allocated memory sections:Increase MAX_HEAP_SECTS"); +GC_our_memory[GC_n_memory].hs_start=p; +GC_our_memory[GC_n_memory].hs_bytes=bytes; +GC_n_memory++; +} +#endif +GC_INNER void GC_add_to_heap(struct hblk*p,size_t bytes) +{ +hdr*phdr; +word endp; +if (GC_n_heap_sects>=MAX_HEAP_SECTS){ +ABORT("Too many heap sections:Increase MAXHINCR or MAX_HEAP_SECTS"); +} +while ((word)p<=HBLKSIZE){ +++p; +bytes-=HBLKSIZE; +if (0==bytes)return; +} +endp=(word)p+bytes; +if (endp<=(word)p){ +bytes-=HBLKSIZE; +if (0==bytes)return; +endp-=HBLKSIZE; +} +phdr=GC_install_header(p); +if (0==phdr){ +return; +} +GC_ASSERT(endp > (word)p&&endp==(word)p+bytes); +GC_heap_sects[GC_n_heap_sects].hs_start=(ptr_t)p; +GC_heap_sects[GC_n_heap_sects].hs_bytes=bytes; +GC_n_heap_sects++; +phdr->hb_sz=bytes; +phdr->hb_flags=0; +GC_freehblk(p); +GC_heapsize+=bytes; +GC_collect_at_heapsize+=bytes; +if (GC_collect_at_heapsize < GC_heapsize) +GC_collect_at_heapsize=GC_WORD_MAX; +if ((word)p<=(word)GC_least_plausible_heap_addr +||GC_least_plausible_heap_addr==0){ +GC_least_plausible_heap_addr=(void*)((ptr_t)p - sizeof(word)); +} +if ((word)p+bytes>=(word)GC_greatest_plausible_heap_addr){ +GC_greatest_plausible_heap_addr=(void*)endp; +} +} +#if!defined(NO_DEBUGGING) +void GC_print_heap_sects(void) +{ +unsigned i; +GC_printf("Total heap size:%lu" IF_USE_MUNMAP(" (%lu unmapped)")"\n", +(unsigned long)GC_heapsize +COMMA_IF_USE_MUNMAP((unsigned long)GC_unmapped_bytes)); +for (i=0;i < GC_n_heap_sects;i++){ +ptr_t start=GC_heap_sects[i].hs_start; +size_t len=GC_heap_sects[i].hs_bytes; +struct hblk*h; +unsigned nbl=0; +for (h=(struct hblk*)start;(word)h < (word)(start+len);h++){ +if (GC_is_black_listed(h,HBLKSIZE))nbl++; +} +GC_printf("Section %d from %p to %p %u/%lu blacklisted\n", +i,(void*)start,(void*)&start[len], +nbl,(unsigned long)divHBLKSZ(len)); +} +} +#endif +void*GC_least_plausible_heap_addr=(void*)GC_WORD_MAX; +void*GC_greatest_plausible_heap_addr=0; +GC_INLINE word GC_max(word x,word y) +{ +return(x > y?x:y); +} +GC_INLINE word GC_min(word x,word y) +{ +return(x < y?x:y); +} +STATIC word GC_max_heapsize=0; +GC_API void GC_CALL GC_set_max_heap_size(GC_word n) +{ +GC_max_heapsize=n; +} +GC_word GC_max_retries=0; +GC_INNER GC_bool GC_expand_hp_inner(word n) +{ +size_t bytes; +struct hblk*space; +word expansion_slop; +GC_ASSERT(I_HOLD_LOCK()); +GC_ASSERT(GC_page_size!=0); +if (n < MINHINCR)n=MINHINCR; +bytes=ROUNDUP_PAGESIZE((size_t)n*HBLKSIZE); +if (GC_max_heapsize!=0 +&&(GC_max_heapsize < (word)bytes +||GC_heapsize > GC_max_heapsize - (word)bytes)){ +return(FALSE); +} +space=GET_MEM(bytes); +GC_add_to_our_memory((ptr_t)space,bytes); +if (space==0){ +WARN("Failed to expand heap by %" WARN_PRIdPTR " bytes\n", +(word)bytes); +return(FALSE); +} +GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n", +TO_KiB_UL(GC_heapsize+(word)bytes), +(unsigned long)GC_bytes_allocd); +expansion_slop=min_bytes_allocd()+4*MAXHINCR*HBLKSIZE; +if ((GC_last_heap_addr==0&&!((word)space&SIGNB)) +||(GC_last_heap_addr!=0 +&&(word)GC_last_heap_addr < (word)space)){ +word new_limit=(word)space+(word)bytes+expansion_slop; +if (new_limit > (word)space){ +GC_greatest_plausible_heap_addr= +(void*)GC_max((word)GC_greatest_plausible_heap_addr, +(word)new_limit); +} +} else { +word new_limit=(word)space - expansion_slop; +if (new_limit < (word)space){ +GC_least_plausible_heap_addr= +(void*)GC_min((word)GC_least_plausible_heap_addr, +(word)space - expansion_slop); +} +} +GC_prev_heap_addr=GC_last_heap_addr; +GC_last_heap_addr=(ptr_t)space; +GC_add_to_heap(space,bytes); +GC_collect_at_heapsize= +GC_heapsize+expansion_slop - 2*MAXHINCR*HBLKSIZE; +if (GC_collect_at_heapsize < GC_heapsize) +GC_collect_at_heapsize=GC_WORD_MAX; +if (GC_on_heap_resize) +(*GC_on_heap_resize)(GC_heapsize); +return(TRUE); +} +GC_API int GC_CALL GC_expand_hp(size_t bytes) +{ +int result; +DCL_LOCK_STATE; +if (!EXPECT(GC_is_initialized,TRUE))GC_init(); +LOCK(); +result=(int)GC_expand_hp_inner(divHBLKSZ((word)bytes)); +if (result)GC_requested_heapsize+=bytes; +UNLOCK(); +return(result); +} +GC_INNER unsigned GC_fail_count=0; +#if defined(GC_ALLOCD_BYTES_PER_FINALIZER)&&!defined(CPPCHECK) +STATIC word GC_allocd_bytes_per_finalizer=GC_ALLOCD_BYTES_PER_FINALIZER; +#else +STATIC word GC_allocd_bytes_per_finalizer=10000; +#endif +GC_API void GC_CALL GC_set_allocd_bytes_per_finalizer(GC_word value) +{ +GC_allocd_bytes_per_finalizer=value; +} +GC_API GC_word GC_CALL GC_get_allocd_bytes_per_finalizer(void) +{ +return GC_allocd_bytes_per_finalizer; +} +static word last_fo_entries=0; +static word last_bytes_finalized=0; +GC_INNER GC_bool GC_collect_or_expand(word needed_blocks, +GC_bool ignore_off_page, +GC_bool retry) +{ +GC_bool gc_not_stopped=TRUE; +word blocks_to_get; +IF_CANCEL(int cancel_state;) +GC_ASSERT(I_HOLD_LOCK()); +DISABLE_CANCEL(cancel_state); +if (!GC_incremental&&!GC_dont_gc&& +((GC_dont_expand&&GC_bytes_allocd > 0) +||(GC_fo_entries > last_fo_entries +&&(last_bytes_finalized|GC_bytes_finalized)!=0 +&&(GC_fo_entries - last_fo_entries) +*GC_allocd_bytes_per_finalizer > GC_bytes_allocd) +||GC_should_collect())){ +gc_not_stopped=GC_try_to_collect_inner( +GC_bytes_allocd > 0&&(!GC_dont_expand||!retry)? +GC_default_stop_func:GC_never_stop_func); +if (gc_not_stopped==TRUE||!retry){ +last_fo_entries=GC_fo_entries; +last_bytes_finalized=GC_bytes_finalized; +RESTORE_CANCEL(cancel_state); +return(TRUE); +} +} +blocks_to_get=(GC_heapsize - GC_heapsize_at_forced_unmap) +/(HBLKSIZE*GC_free_space_divisor) ++needed_blocks; +if (blocks_to_get > MAXHINCR){ +word slop; +if (ignore_off_page){ +slop=4; +} else { +slop=2*divHBLKSZ(BL_LIMIT); +if (slop > needed_blocks)slop=needed_blocks; +} +if (needed_blocks+slop > MAXHINCR){ +blocks_to_get=needed_blocks+slop; +} else { +blocks_to_get=MAXHINCR; +} +if (blocks_to_get > divHBLKSZ(GC_WORD_MAX)) +blocks_to_get=divHBLKSZ(GC_WORD_MAX); +} +if (!GC_expand_hp_inner(blocks_to_get) +&&(blocks_to_get==needed_blocks +||!GC_expand_hp_inner(needed_blocks))){ +if (gc_not_stopped==FALSE){ +GC_gcollect_inner(); +GC_ASSERT(GC_bytes_allocd==0); +} else if (GC_fail_count++< GC_max_retries){ +WARN("Out of Memory!Trying to continue...\n",0); +GC_gcollect_inner(); +} else { +#if!defined(AMIGA)||!defined(GC_AMIGA_FASTALLOC) +WARN("Out of Memory!Heap size:%" WARN_PRIdPTR " MiB." +" Returning NULL!\n",(GC_heapsize - GC_unmapped_bytes)>>20); +#endif +RESTORE_CANCEL(cancel_state); +return(FALSE); +} +} else if (GC_fail_count){ +GC_COND_LOG_PRINTF("Memory available again...\n"); +} +RESTORE_CANCEL(cancel_state); +return(TRUE); +} +GC_INNER ptr_t GC_allocobj(size_t gran,int kind) +{ +void**flh=&(GC_obj_kinds[kind].ok_freelist[gran]); +GC_bool tried_minor=FALSE; +GC_bool retry=FALSE; +GC_ASSERT(I_HOLD_LOCK()); +if (gran==0)return(0); +while (*flh==0){ +ENTER_GC(); +#ifndef GC_DISABLE_INCREMENTAL +if (GC_incremental&&GC_time_limit!=GC_TIME_UNLIMITED){ +GC_collect_a_little_inner(1); +} +#endif +GC_ASSERT(!GC_is_full_gc +||NULL==GC_obj_kinds[kind].ok_reclaim_list +||NULL==GC_obj_kinds[kind].ok_reclaim_list[gran]); +GC_continue_reclaim(gran,kind); +EXIT_GC(); +#if defined(CPPCHECK) +GC_noop1((word)&flh); +#endif +if (NULL==*flh){ +GC_new_hblk(gran,kind); +#if defined(CPPCHECK) +GC_noop1((word)&flh); +#endif +if (NULL==*flh){ +ENTER_GC(); +if (GC_incremental&&GC_time_limit==GC_TIME_UNLIMITED +&&!tried_minor){ +GC_collect_a_little_inner(1); +tried_minor=TRUE; +} else { +if (!GC_collect_or_expand(1,FALSE,retry)){ +EXIT_GC(); +return(0); +} +retry=TRUE; +} +EXIT_GC(); +} +} +} +GC_fail_count=0; +return (ptr_t)(*flh); +} +#ifndef MSWINCE +#include +#endif +#include +#ifndef SHORT_DBG_HDRS +GC_INNER int GC_has_other_debug_info(ptr_t p) +{ +ptr_t body=(ptr_t)((oh*)p+1); +word sz=GC_size(p); +if (HBLKPTR(p)!=HBLKPTR((ptr_t)body) +||sz < DEBUG_BYTES+EXTRA_BYTES){ +return 0; +} +if (((oh*)p)->oh_sf!=(START_FLAG^(word)body) +&&((word*)p)[BYTES_TO_WORDS(sz)-1]!=(END_FLAG^(word)body)){ +return 0; +} +if (((oh*)p)->oh_sz==sz){ +return -1; +} +return 1; +} +#endif +#ifdef LINT2 +long GC_random(void) +{ +static unsigned seed=1; +seed=(seed*1103515245U+12345)&GC_RAND_MAX; +return (long)seed; +} +#endif +#ifdef KEEP_BACK_PTRS +#ifdef LINT2 +#define RANDOM()GC_random() +#else +#include +#define GC_RAND_MAX RAND_MAX +#if defined(__GLIBC__)||defined(SOLARIS)||defined(HPUX)||defined(IRIX5)||defined(OSF1) +#define RANDOM()random() +#else +#define RANDOM()(long)rand() +#endif +#endif +GC_INNER void GC_store_back_pointer(ptr_t source,ptr_t dest) +{ +if (GC_HAS_DEBUG_INFO(dest)){ +#ifdef PARALLEL_MARK +AO_store((volatile AO_t*)&((oh*)dest)->oh_back_ptr, +(AO_t)HIDE_BACK_PTR(source)); +#else +((oh*)dest)->oh_back_ptr=HIDE_BACK_PTR(source); +#endif +} +} +GC_INNER void GC_marked_for_finalization(ptr_t dest) +{ +GC_store_back_pointer(MARKED_FOR_FINALIZATION,dest); +} +GC_API GC_ref_kind GC_CALL GC_get_back_ptr_info(void*dest,void**base_p, +size_t*offset_p) +{ +oh*hdr=(oh*)GC_base(dest); +ptr_t bp; +ptr_t bp_base; +#ifdef LINT2 +if (!hdr)ABORT("Invalid GC_get_back_ptr_info argument"); +#endif +if (!GC_HAS_DEBUG_INFO((ptr_t)hdr))return GC_NO_SPACE; +bp=(ptr_t)GC_REVEAL_POINTER(hdr->oh_back_ptr); +if (MARKED_FOR_FINALIZATION==bp)return GC_FINALIZER_REFD; +if (MARKED_FROM_REGISTER==bp)return GC_REFD_FROM_REG; +if (NOT_MARKED==bp)return GC_UNREFERENCED; +#if ALIGNMENT==1 +{ +ptr_t alternate_ptr=bp+1; +ptr_t target=*(ptr_t*)bp; +ptr_t alternate_target=*(ptr_t*)alternate_ptr; +if ((word)alternate_target>=(word)GC_least_plausible_heap_addr +&&(word)alternate_target<=(word)GC_greatest_plausible_heap_addr +&&((word)target < (word)GC_least_plausible_heap_addr +||(word)target > (word)GC_greatest_plausible_heap_addr)){ +bp=alternate_ptr; +} +} +#endif +bp_base=(ptr_t)GC_base(bp); +if (NULL==bp_base){ +*base_p=bp; +*offset_p=0; +return GC_REFD_FROM_ROOT; +} else { +if (GC_HAS_DEBUG_INFO(bp_base))bp_base+=sizeof(oh); +*base_p=bp_base; +*offset_p=bp - bp_base; +return GC_REFD_FROM_HEAP; +} +} +GC_API void*GC_CALL GC_generate_random_heap_address(void) +{ +size_t i; +word heap_offset=RANDOM(); +if (GC_heapsize > GC_RAND_MAX){ +heap_offset*=GC_RAND_MAX; +heap_offset+=RANDOM(); +} +heap_offset%=GC_heapsize; +for (i=0;;++i){ +size_t size; +if (i>=GC_n_heap_sects) +ABORT("GC_generate_random_heap_address:size inconsistency"); +size=GC_heap_sects[i].hs_bytes; +if (heap_offset < size){ +break; +} else { +heap_offset-=size; +} +} +return GC_heap_sects[i].hs_start+heap_offset; +} +GC_API void*GC_CALL GC_generate_random_valid_address(void) +{ +ptr_t result; +ptr_t base; +do { +result=(ptr_t)GC_generate_random_heap_address(); +base=(ptr_t)GC_base(result); +} while (NULL==base||!GC_is_marked(base)); +return result; +} +GC_API void GC_CALL GC_print_backtrace(void*p) +{ +void*current=p; +int i; +GC_ref_kind source; +size_t offset; +void*base; +GC_print_heap_obj((ptr_t)GC_base(current)); +for (i=0;;++i){ +source=GC_get_back_ptr_info(current,&base,&offset); +if (GC_UNREFERENCED==source){ +GC_err_printf("Reference could not be found\n"); +goto out; +} +if (GC_NO_SPACE==source){ +GC_err_printf("No debug info in object:Can't find reference\n"); +goto out; +} +GC_err_printf("Reachable via %d levels of pointers from ",i); +switch(source){ +case GC_REFD_FROM_ROOT: +GC_err_printf("root at %p\n\n",base); +goto out; +case GC_REFD_FROM_REG: +GC_err_printf("root in register\n\n"); +goto out; +case GC_FINALIZER_REFD: +GC_err_printf("list of finalizable objects\n\n"); +goto out; +case GC_REFD_FROM_HEAP: +GC_err_printf("offset %ld in object:\n",(long)offset); +GC_print_heap_obj((ptr_t)GC_base(base)); +break; +default: +GC_err_printf("INTERNAL ERROR:UNEXPECTED SOURCE!!!!\n"); +goto out; +} +current=base; +} +out:; +} +GC_INNER void GC_generate_random_backtrace_no_gc(void) +{ +void*current; +current=GC_generate_random_valid_address(); +GC_printf("\n****Chosen address %p in object\n",current); +GC_print_backtrace(current); +} +GC_API void GC_CALL GC_generate_random_backtrace(void) +{ +if (GC_try_to_collect(GC_never_stop_func)==0){ +GC_err_printf("Cannot generate a backtrace:" +"garbage collection is disabled!\n"); +return; +} +GC_generate_random_backtrace_no_gc(); +} +#endif +#define CROSSES_HBLK(p,sz)(((word)((p)+sizeof(oh)+(sz)- 1)^(word)(p))>=HBLKSIZE) +GC_INNER void*GC_store_debug_info_inner(void*p,word sz GC_ATTR_UNUSED, +const char*string,int linenum) +{ +word*result=(word*)((oh*)p+1); +GC_ASSERT(I_HOLD_LOCK()); +GC_ASSERT(GC_size(p)>=sizeof(oh)+sz); +GC_ASSERT(!(SMALL_OBJ(sz)&&CROSSES_HBLK((ptr_t)p,sz))); +#ifdef KEEP_BACK_PTRS +((oh*)p)->oh_back_ptr=HIDE_BACK_PTR(NOT_MARKED); +#endif +#ifdef MAKE_BACK_GRAPH +((oh*)p)->oh_bg_ptr=HIDE_BACK_PTR((ptr_t)0); +#endif +((oh*)p)->oh_string=string; +((oh*)p)->oh_int=linenum; +#ifndef SHORT_DBG_HDRS +((oh*)p)->oh_sz=sz; +((oh*)p)->oh_sf=START_FLAG^(word)result; +((word*)p)[BYTES_TO_WORDS(GC_size(p))-1]= +result[SIMPLE_ROUNDED_UP_WORDS(sz)]=END_FLAG^(word)result; +#endif +return result; +} +static void*store_debug_info(void*p,size_t lb, +const char*fn,GC_EXTRA_PARAMS) +{ +void*result; +DCL_LOCK_STATE; +if (NULL==p){ +GC_err_printf("%s(%lu)returning NULL (%s:%d)\n", +fn,(unsigned long)lb,s,i); +return NULL; +} +LOCK(); +if (!GC_debugging_started) +GC_start_debugging_inner(); +ADD_CALL_CHAIN(p,ra); +result=GC_store_debug_info_inner(p,(word)lb,s,i); +UNLOCK(); +return result; +} +#ifndef SHORT_DBG_HDRS +STATIC ptr_t GC_check_annotated_obj(oh*ohdr) +{ +ptr_t body=(ptr_t)(ohdr+1); +word gc_sz=GC_size((ptr_t)ohdr); +if (ohdr->oh_sz+DEBUG_BYTES > gc_sz){ +return((ptr_t)(&(ohdr->oh_sz))); +} +if (ohdr->oh_sf!=(START_FLAG^(word)body)){ +return((ptr_t)(&(ohdr->oh_sf))); +} +if (((word*)ohdr)[BYTES_TO_WORDS(gc_sz)-1]!=(END_FLAG^(word)body)){ +return (ptr_t)(&((word*)ohdr)[BYTES_TO_WORDS(gc_sz)-1]); +} +if (((word*)body)[SIMPLE_ROUNDED_UP_WORDS(ohdr->oh_sz)] +!=(END_FLAG^(word)body)){ +return (ptr_t)(&((word*)body)[SIMPLE_ROUNDED_UP_WORDS(ohdr->oh_sz)]); +} +return(0); +} +#endif +STATIC GC_describe_type_fn GC_describe_type_fns[MAXOBJKINDS]={0}; +GC_API void GC_CALL GC_register_describe_type_fn(int kind, +GC_describe_type_fn fn) +{ +GC_describe_type_fns[kind]=fn; +} +#define GET_OH_LINENUM(ohdr)((int)(ohdr)->oh_int) +#ifndef SHORT_DBG_HDRS +#define IF_NOT_SHORTDBG_HDRS(x)x +#define COMMA_IFNOT_SHORTDBG_HDRS(x),x +#else +#define IF_NOT_SHORTDBG_HDRS(x) +#define COMMA_IFNOT_SHORTDBG_HDRS(x) +#endif +STATIC void GC_print_obj(ptr_t p) +{ +oh*ohdr=(oh*)GC_base(p); +ptr_t q; +hdr*hhdr; +int kind; +const char*kind_str; +char buffer[GC_TYPE_DESCR_LEN+1]; +GC_ASSERT(I_DONT_HOLD_LOCK()); +#ifdef LINT2 +if (!ohdr)ABORT("Invalid GC_print_obj argument"); +#endif +q=(ptr_t)(ohdr+1); +hhdr=GC_find_header(q); +kind=hhdr->hb_obj_kind; +if (0!=GC_describe_type_fns[kind]&&GC_is_marked(ohdr)){ +buffer[GC_TYPE_DESCR_LEN]=0; +(GC_describe_type_fns[kind])(q,buffer); +GC_ASSERT(buffer[GC_TYPE_DESCR_LEN]==0); +kind_str=buffer; +} else { +switch(kind){ +case PTRFREE: +kind_str="PTRFREE"; +break; +case NORMAL: +kind_str="NORMAL"; +break; +case UNCOLLECTABLE: +kind_str="UNCOLLECTABLE"; +break; +#ifdef GC_ATOMIC_UNCOLLECTABLE +case AUNCOLLECTABLE: +kind_str="ATOMIC_UNCOLLECTABLE"; +break; +#endif +default: +kind_str=NULL; +} +} +if (NULL!=kind_str){ +GC_err_printf("%p (%s:%d," IF_NOT_SHORTDBG_HDRS(" sz=%lu,")" %s)\n", +(void*)((ptr_t)ohdr+sizeof(oh)), +ohdr->oh_string,GET_OH_LINENUM(ohdr) +COMMA_IFNOT_SHORTDBG_HDRS((unsigned long)ohdr->oh_sz), +kind_str); +} else { +GC_err_printf("%p (%s:%d," IF_NOT_SHORTDBG_HDRS(" sz=%lu,") +" kind=%d descr=0x%lx)\n", +(void*)((ptr_t)ohdr+sizeof(oh)), +ohdr->oh_string,GET_OH_LINENUM(ohdr) +COMMA_IFNOT_SHORTDBG_HDRS((unsigned long)ohdr->oh_sz), +kind,(unsigned long)hhdr->hb_descr); +} +PRINT_CALL_CHAIN(ohdr); +} +STATIC void GC_debug_print_heap_obj_proc(ptr_t p) +{ +GC_ASSERT(I_DONT_HOLD_LOCK()); +if (GC_HAS_DEBUG_INFO(p)){ +GC_print_obj(p); +} else { +GC_default_print_heap_obj_proc(p); +} +} +#ifndef SHORT_DBG_HDRS +STATIC void GC_print_smashed_obj(const char*msg,void*p, +ptr_t clobbered_addr) +{ +oh*ohdr=(oh*)GC_base(p); +GC_ASSERT(I_DONT_HOLD_LOCK()); +#ifdef LINT2 +if (!ohdr)ABORT("Invalid GC_print_smashed_obj argument"); +#endif +if ((word)clobbered_addr<=(word)(&ohdr->oh_sz) +||ohdr->oh_string==0){ +GC_err_printf( +"%s %p in or near object at %p(,appr. sz=%lu)\n", +msg,(void*)clobbered_addr,p, +(unsigned long)(GC_size((ptr_t)ohdr)- DEBUG_BYTES)); +} else { +GC_err_printf("%s %p in or near object at %p (%s:%d,sz=%lu)\n", +msg,(void*)clobbered_addr,p, +(word)(ohdr->oh_string)< HBLKSIZE?"(smashed string)": +ohdr->oh_string[0]=='\0'?"EMPTY(smashed?)": +ohdr->oh_string, +GET_OH_LINENUM(ohdr),(unsigned long)(ohdr->oh_sz)); +PRINT_CALL_CHAIN(ohdr); +} +} +STATIC void GC_check_heap_proc (void); +STATIC void GC_print_all_smashed_proc (void); +#else +STATIC void GC_do_nothing(void){} +#endif +GC_INNER void GC_start_debugging_inner(void) +{ +GC_ASSERT(I_HOLD_LOCK()); +#ifndef SHORT_DBG_HDRS +GC_check_heap=GC_check_heap_proc; +GC_print_all_smashed=GC_print_all_smashed_proc; +#else +GC_check_heap=GC_do_nothing; +GC_print_all_smashed=GC_do_nothing; +#endif +GC_print_heap_obj=GC_debug_print_heap_obj_proc; +GC_debugging_started=TRUE; +GC_register_displacement_inner((word)sizeof(oh)); +#if defined(CPPCHECK) +GC_noop1(GC_debug_header_size); +#endif +} +const size_t GC_debug_header_size=sizeof(oh); +GC_API size_t GC_CALL GC_get_debug_header_size(void){ +return sizeof(oh); +} +GC_API void GC_CALL GC_debug_register_displacement(size_t offset) +{ +DCL_LOCK_STATE; +LOCK(); +GC_register_displacement_inner(offset); +GC_register_displacement_inner((word)sizeof(oh)+offset); +UNLOCK(); +} +#ifdef GC_ADD_CALLER +#if defined(HAVE_DLADDR)&&defined(GC_HAVE_RETURN_ADDR_PARENT) +#include +STATIC void GC_caller_func_offset(word ad,const char**symp,int*offp) +{ +Dl_info caller; +if (ad&&dladdr((void*)ad,&caller)&&caller.dli_sname!=NULL){ +*symp=caller.dli_sname; +*offp=(int)((char*)ad - (char*)caller.dli_saddr); +} +if (NULL==*symp){ +*symp="unknown"; +} +} +#else +#define GC_caller_func_offset(ad,symp,offp)(void)(*(symp)="unknown") +#endif +#endif +GC_API GC_ATTR_MALLOC void*GC_CALL GC_debug_malloc(size_t lb, +GC_EXTRA_PARAMS) +{ +void*result; +result=GC_malloc(SIZET_SAT_ADD(lb,DEBUG_BYTES)); +#ifdef GC_ADD_CALLER +if (s==NULL){ +GC_caller_func_offset(ra,&s,&i); +} +#endif +return store_debug_info(result,lb,"GC_debug_malloc",OPT_RA s,i); +} +GC_API GC_ATTR_MALLOC void*GC_CALL +GC_debug_malloc_ignore_off_page(size_t lb,GC_EXTRA_PARAMS) +{ +void*result=GC_malloc_ignore_off_page(SIZET_SAT_ADD(lb,DEBUG_BYTES)); +return store_debug_info(result,lb,"GC_debug_malloc_ignore_off_page", +OPT_RA s,i); +} +GC_API GC_ATTR_MALLOC void*GC_CALL +GC_debug_malloc_atomic_ignore_off_page(size_t lb,GC_EXTRA_PARAMS) +{ +void*result=GC_malloc_atomic_ignore_off_page( +SIZET_SAT_ADD(lb,DEBUG_BYTES)); +return store_debug_info(result,lb, +"GC_debug_malloc_atomic_ignore_off_page", +OPT_RA s,i); +} +STATIC void*GC_debug_generic_malloc(size_t lb,int knd,GC_EXTRA_PARAMS) +{ +void*result=GC_generic_malloc(SIZET_SAT_ADD(lb,DEBUG_BYTES),knd); +return store_debug_info(result,lb,"GC_debug_generic_malloc", +OPT_RA s,i); +} +#ifdef DBG_HDRS_ALL +GC_INNER void*GC_debug_generic_malloc_inner(size_t lb,int k) +{ +void*result; +GC_ASSERT(I_HOLD_LOCK()); +result=GC_generic_malloc_inner(SIZET_SAT_ADD(lb,DEBUG_BYTES),k); +if (NULL==result){ +GC_err_printf("GC internal allocation (%lu bytes)returning NULL\n", +(unsigned long)lb); +return(0); +} +if (!GC_debugging_started){ +GC_start_debugging_inner(); +} +ADD_CALL_CHAIN(result,GC_RETURN_ADDR); +return (GC_store_debug_info_inner(result,(word)lb,"INTERNAL",0)); +} +GC_INNER void*GC_debug_generic_malloc_inner_ignore_off_page(size_t lb, +int k) +{ +void*result; +GC_ASSERT(I_HOLD_LOCK()); +result=GC_generic_malloc_inner_ignore_off_page( +SIZET_SAT_ADD(lb,DEBUG_BYTES),k); +if (NULL==result){ +GC_err_printf("GC internal allocation (%lu bytes)returning NULL\n", +(unsigned long)lb); +return(0); +} +if (!GC_debugging_started){ +GC_start_debugging_inner(); +} +ADD_CALL_CHAIN(result,GC_RETURN_ADDR); +return (GC_store_debug_info_inner(result,(word)lb,"INTERNAL",0)); +} +#endif +#ifndef CPPCHECK +GC_API void*GC_CALL GC_debug_malloc_stubborn(size_t lb,GC_EXTRA_PARAMS) +{ +return GC_debug_malloc(lb,OPT_RA s,i); +} +GC_API void GC_CALL GC_debug_change_stubborn( +const void*p GC_ATTR_UNUSED){} +#endif +GC_API void GC_CALL GC_debug_end_stubborn_change(const void*p) +{ +const void*q=GC_base_C(p); +if (NULL==q){ +ABORT_ARG1("GC_debug_end_stubborn_change:bad arg",":%p",p); +} +GC_end_stubborn_change(q); +} +GC_API void GC_CALL GC_debug_ptr_store_and_dirty(void*p,const void*q) +{ +*(void**)GC_is_visible(p)=GC_is_valid_displacement((void*)q); +GC_debug_end_stubborn_change(p); +REACHABLE_AFTER_DIRTY(q); +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_debug_malloc_atomic(size_t lb, +GC_EXTRA_PARAMS) +{ +void*result=GC_malloc_atomic(SIZET_SAT_ADD(lb,DEBUG_BYTES)); +return store_debug_info(result,lb,"GC_debug_malloc_atomic", +OPT_RA s,i); +} +GC_API GC_ATTR_MALLOC char*GC_CALL GC_debug_strdup(const char*str, +GC_EXTRA_PARAMS) +{ +char*copy; +size_t lb; +if (str==NULL){ +if (GC_find_leak) +GC_err_printf("strdup(NULL)behavior is undefined\n"); +return NULL; +} +lb=strlen(str)+1; +copy=(char*)GC_debug_malloc_atomic(lb,OPT_RA s,i); +if (copy==NULL){ +#ifndef MSWINCE +errno=ENOMEM; +#endif +return NULL; +} +BCOPY(str,copy,lb); +return copy; +} +GC_API GC_ATTR_MALLOC char*GC_CALL GC_debug_strndup(const char*str, +size_t size,GC_EXTRA_PARAMS) +{ +char*copy; +size_t len=strlen(str); +if (len > size) +len=size; +copy=(char*)GC_debug_malloc_atomic(len+1,OPT_RA s,i); +if (copy==NULL){ +#ifndef MSWINCE +errno=ENOMEM; +#endif +return NULL; +} +if (len > 0) +BCOPY(str,copy,len); +copy[len]='\0'; +return copy; +} +#ifdef GC_REQUIRE_WCSDUP +#include +GC_API GC_ATTR_MALLOC wchar_t*GC_CALL GC_debug_wcsdup(const wchar_t*str, +GC_EXTRA_PARAMS) +{ +size_t lb=(wcslen(str)+1)*sizeof(wchar_t); +wchar_t*copy=(wchar_t*)GC_debug_malloc_atomic(lb,OPT_RA s,i); +if (copy==NULL){ +#ifndef MSWINCE +errno=ENOMEM; +#endif +return NULL; +} +BCOPY(str,copy,lb); +return copy; +} +#endif +GC_API GC_ATTR_MALLOC void*GC_CALL GC_debug_malloc_uncollectable(size_t lb, +GC_EXTRA_PARAMS) +{ +void*result=GC_malloc_uncollectable( +SIZET_SAT_ADD(lb,UNCOLLECTABLE_DEBUG_BYTES)); +return store_debug_info(result,lb,"GC_debug_malloc_uncollectable", +OPT_RA s,i); +} +#ifdef GC_ATOMIC_UNCOLLECTABLE +GC_API GC_ATTR_MALLOC void*GC_CALL +GC_debug_malloc_atomic_uncollectable(size_t lb,GC_EXTRA_PARAMS) +{ +void*result=GC_malloc_atomic_uncollectable( +SIZET_SAT_ADD(lb,UNCOLLECTABLE_DEBUG_BYTES)); +return store_debug_info(result,lb, +"GC_debug_malloc_atomic_uncollectable", +OPT_RA s,i); +} +#endif +#ifndef GC_FREED_MEM_MARKER +#if CPP_WORDSZ==32 +#define GC_FREED_MEM_MARKER 0xdeadbeef +#else +#define GC_FREED_MEM_MARKER GC_WORD_C(0xEFBEADDEdeadbeef) +#endif +#endif +GC_API void GC_CALL GC_debug_free(void*p) +{ +ptr_t base; +if (0==p)return; +base=(ptr_t)GC_base(p); +if (NULL==base){ +#if defined(REDIRECT_MALLOC)&&((defined(NEED_CALLINFO)&&defined(GC_HAVE_BUILTIN_BACKTRACE))||defined(GC_LINUX_THREADS)||defined(GC_SOLARIS_THREADS)||defined(MSWIN32)) +if (!GC_is_heap_ptr(p))return; +#endif +ABORT_ARG1("Invalid pointer passed to free()",":%p",p); +} +if ((ptr_t)p - (ptr_t)base!=sizeof(oh)){ +#if defined(REDIRECT_FREE)&&defined(USE_PROC_FOR_LIBRARIES) +#endif +GC_err_printf( +"GC_debug_free called on pointer %p w/o debugging info\n",p); +} else { +#ifndef SHORT_DBG_HDRS +ptr_t clobbered=GC_check_annotated_obj((oh*)base); +word sz=GC_size(base); +if (clobbered!=0){ +GC_have_errors=TRUE; +if (((oh*)base)->oh_sz==sz){ +GC_print_smashed_obj( +"GC_debug_free:found previously deallocated (?)object at", +p,clobbered); +return; +} else { +GC_print_smashed_obj("GC_debug_free:found smashed location at", +p,clobbered); +} +} +((oh*)base)->oh_sz=sz; +#endif +} +if (GC_find_leak +#ifndef SHORT_DBG_HDRS +&&((ptr_t)p - (ptr_t)base!=sizeof(oh)||!GC_findleak_delay_free) +#endif +){ +GC_free(base); +} else { +hdr*hhdr=HDR(p); +if (hhdr->hb_obj_kind==UNCOLLECTABLE +#ifdef GC_ATOMIC_UNCOLLECTABLE +||hhdr->hb_obj_kind==AUNCOLLECTABLE +#endif +){ +GC_free(base); +} else { +word i; +word sz=hhdr->hb_sz; +word obj_sz=BYTES_TO_WORDS(sz - sizeof(oh)); +for (i=0;i < obj_sz;++i) +((word*)p)[i]=GC_FREED_MEM_MARKER; +GC_ASSERT((word*)p+i==(word*)(base+sz)); +LOCK(); +GC_bytes_freed+=sz; +UNLOCK(); +} +} +} +#if defined(THREADS)&&defined(DBG_HDRS_ALL) +GC_INNER void GC_debug_free_inner(void*p) +{ +ptr_t base=(ptr_t)GC_base(p); +GC_ASSERT((ptr_t)p - (ptr_t)base==sizeof(oh)); +#ifdef LINT2 +if (!base)ABORT("Invalid GC_debug_free_inner argument"); +#endif +#ifndef SHORT_DBG_HDRS +((oh*)base)->oh_sz=GC_size(base); +#endif +GC_free_inner(base); +} +#endif +GC_API void*GC_CALL GC_debug_realloc(void*p,size_t lb,GC_EXTRA_PARAMS) +{ +void*base; +void*result; +hdr*hhdr; +if (p==0){ +return GC_debug_malloc(lb,OPT_RA s,i); +} +if (0==lb){ +GC_debug_free(p); +return NULL; +} +#ifdef GC_ADD_CALLER +if (s==NULL){ +GC_caller_func_offset(ra,&s,&i); +} +#endif +base=GC_base(p); +if (base==0){ +ABORT_ARG1("Invalid pointer passed to realloc()",":%p",p); +} +if ((ptr_t)p - (ptr_t)base!=sizeof(oh)){ +GC_err_printf( +"GC_debug_realloc called on pointer %p w/o debugging info\n",p); +return(GC_realloc(p,lb)); +} +hhdr=HDR(base); +switch (hhdr->hb_obj_kind){ +case NORMAL: +result=GC_debug_malloc(lb,OPT_RA s,i); +break; +case PTRFREE: +result=GC_debug_malloc_atomic(lb,OPT_RA s,i); +break; +case UNCOLLECTABLE: +result=GC_debug_malloc_uncollectable(lb,OPT_RA s,i); +break; +#ifdef GC_ATOMIC_UNCOLLECTABLE +case AUNCOLLECTABLE: +result=GC_debug_malloc_atomic_uncollectable(lb,OPT_RA s,i); +break; +#endif +default: +result=NULL; +ABORT_RET("GC_debug_realloc:encountered bad kind"); +} +if (result!=NULL){ +size_t old_sz; +#ifdef SHORT_DBG_HDRS +old_sz=GC_size(base)- sizeof(oh); +#else +old_sz=((oh*)base)->oh_sz; +#endif +if (old_sz > 0) +BCOPY(p,result,old_sz < lb?old_sz:lb); +GC_debug_free(p); +} +return(result); +} +GC_API GC_ATTR_MALLOC void*GC_CALL +GC_debug_generic_or_special_malloc(size_t lb,int knd,GC_EXTRA_PARAMS) +{ +switch (knd){ +case PTRFREE: +return GC_debug_malloc_atomic(lb,OPT_RA s,i); +case NORMAL: +return GC_debug_malloc(lb,OPT_RA s,i); +case UNCOLLECTABLE: +return GC_debug_malloc_uncollectable(lb,OPT_RA s,i); +#ifdef GC_ATOMIC_UNCOLLECTABLE +case AUNCOLLECTABLE: +return GC_debug_malloc_atomic_uncollectable(lb,OPT_RA s,i); +#endif +default: +return GC_debug_generic_malloc(lb,knd,OPT_RA s,i); +} +} +#ifndef SHORT_DBG_HDRS +#ifndef MAX_SMASHED +#define MAX_SMASHED 20 +#endif +STATIC ptr_t GC_smashed[MAX_SMASHED]={0}; +STATIC unsigned GC_n_smashed=0; +STATIC void GC_add_smashed(ptr_t smashed) +{ +GC_ASSERT(GC_is_marked(GC_base(smashed))); +GC_smashed[GC_n_smashed]=smashed; +if (GC_n_smashed < MAX_SMASHED - 1)++GC_n_smashed; +GC_have_errors=TRUE; +} +STATIC void GC_print_all_smashed_proc(void) +{ +unsigned i; +GC_ASSERT(I_DONT_HOLD_LOCK()); +if (GC_n_smashed==0)return; +GC_err_printf("GC_check_heap_block:found %u smashed heap objects:\n", +GC_n_smashed); +for (i=0;i < GC_n_smashed;++i){ +ptr_t base=(ptr_t)GC_base(GC_smashed[i]); +#ifdef LINT2 +if (!base)ABORT("Invalid GC_smashed element"); +#endif +GC_print_smashed_obj("",base+sizeof(oh),GC_smashed[i]); +GC_smashed[i]=0; +} +GC_n_smashed=0; +} +STATIC void GC_check_heap_block(struct hblk*hbp,word dummy GC_ATTR_UNUSED) +{ +struct hblkhdr*hhdr=HDR(hbp); +word sz=hhdr->hb_sz; +word bit_no; +char*p,*plim; +p=hbp->hb_body; +if (sz > MAXOBJBYTES){ +plim=p; +} else { +plim=hbp->hb_body+HBLKSIZE - sz; +} +for (bit_no=0;(word)p<=(word)plim; +bit_no+=MARK_BIT_OFFSET(sz),p+=sz){ +if (mark_bit_from_hdr(hhdr,bit_no)&&GC_HAS_DEBUG_INFO((ptr_t)p)){ +ptr_t clobbered=GC_check_annotated_obj((oh*)p); +if (clobbered!=0) +GC_add_smashed(clobbered); +} +} +} +STATIC void GC_check_heap_proc(void) +{ +GC_STATIC_ASSERT((sizeof(oh)&(GRANULE_BYTES - 1))==0); +GC_apply_to_all_blocks(GC_check_heap_block,0); +} +GC_INNER GC_bool GC_check_leaked(ptr_t base) +{ +word i; +word obj_sz; +word*p; +if ( +#if defined(KEEP_BACK_PTRS)||defined(MAKE_BACK_GRAPH) +(*(word*)base&1)!=0&& +#endif +GC_has_other_debug_info(base)>=0) +return TRUE; +p=(word*)(base+sizeof(oh)); +obj_sz=BYTES_TO_WORDS(HDR(base)->hb_sz - sizeof(oh)); +for (i=0;i < obj_sz;++i) +if (p[i]!=GC_FREED_MEM_MARKER){ +GC_set_mark_bit(base); +GC_add_smashed((ptr_t)(&p[i])); +break; +} +return FALSE; +} +#endif +#ifndef GC_NO_FINALIZATION +struct closure { +GC_finalization_proc cl_fn; +void*cl_data; +}; +STATIC void*GC_make_closure(GC_finalization_proc fn,void*data) +{ +struct closure*result= +#ifdef DBG_HDRS_ALL +(struct closure*)GC_debug_malloc(sizeof (struct closure), +GC_EXTRAS); +#else +(struct closure*)GC_malloc(sizeof (struct closure)); +#endif +if (result!=0){ +result->cl_fn=fn; +result->cl_data=data; +} +return((void*)result); +} +STATIC void GC_CALLBACK GC_debug_invoke_finalizer(void*obj,void*data) +{ +struct closure*cl=(struct closure*)data; +(*(cl->cl_fn))((void*)((char*)obj+sizeof(oh)),cl->cl_data); +} +#define OFN_UNSET ((GC_finalization_proc)~(signed_word)0) +static void store_old(void*obj,GC_finalization_proc my_old_fn, +struct closure*my_old_cd,GC_finalization_proc*ofn, +void**ocd) +{ +if (0!=my_old_fn){ +if (my_old_fn==OFN_UNSET){ +return; +} +if (my_old_fn!=GC_debug_invoke_finalizer){ +GC_err_printf("Debuggable object at %p had a non-debug finalizer\n", +obj); +} else { +if (ofn)*ofn=my_old_cd->cl_fn; +if (ocd)*ocd=my_old_cd->cl_data; +} +} else { +if (ofn)*ofn=0; +if (ocd)*ocd=0; +} +} +GC_API void GC_CALL GC_debug_register_finalizer(void*obj, +GC_finalization_proc fn, +void*cd,GC_finalization_proc*ofn, +void**ocd) +{ +GC_finalization_proc my_old_fn=OFN_UNSET; +void*my_old_cd; +ptr_t base=(ptr_t)GC_base(obj); +if (NULL==base){ +if (ocd)*ocd=0; +if (ofn)*ofn=0; +return; +} +if ((ptr_t)obj - base!=sizeof(oh)){ +GC_err_printf("GC_debug_register_finalizer called with" +" non-base-pointer %p\n",obj); +} +if (0==fn){ +GC_register_finalizer(base,0,0,&my_old_fn,&my_old_cd); +} else { +cd=GC_make_closure(fn,cd); +if (cd==0)return; +GC_register_finalizer(base,GC_debug_invoke_finalizer, +cd,&my_old_fn,&my_old_cd); +} +store_old(obj,my_old_fn,(struct closure*)my_old_cd,ofn,ocd); +} +GC_API void GC_CALL GC_debug_register_finalizer_no_order +(void*obj,GC_finalization_proc fn, +void*cd,GC_finalization_proc*ofn, +void**ocd) +{ +GC_finalization_proc my_old_fn=OFN_UNSET; +void*my_old_cd; +ptr_t base=(ptr_t)GC_base(obj); +if (NULL==base){ +if (ocd)*ocd=0; +if (ofn)*ofn=0; +return; +} +if ((ptr_t)obj - base!=sizeof(oh)){ +GC_err_printf("GC_debug_register_finalizer_no_order called with" +" non-base-pointer %p\n",obj); +} +if (0==fn){ +GC_register_finalizer_no_order(base,0,0,&my_old_fn,&my_old_cd); +} else { +cd=GC_make_closure(fn,cd); +if (cd==0)return; +GC_register_finalizer_no_order(base,GC_debug_invoke_finalizer, +cd,&my_old_fn,&my_old_cd); +} +store_old(obj,my_old_fn,(struct closure*)my_old_cd,ofn,ocd); +} +GC_API void GC_CALL GC_debug_register_finalizer_unreachable +(void*obj,GC_finalization_proc fn, +void*cd,GC_finalization_proc*ofn, +void**ocd) +{ +GC_finalization_proc my_old_fn=OFN_UNSET; +void*my_old_cd; +ptr_t base=(ptr_t)GC_base(obj); +if (NULL==base){ +if (ocd)*ocd=0; +if (ofn)*ofn=0; +return; +} +if ((ptr_t)obj - base!=sizeof(oh)){ +GC_err_printf("GC_debug_register_finalizer_unreachable called with" +" non-base-pointer %p\n",obj); +} +if (0==fn){ +GC_register_finalizer_unreachable(base,0,0,&my_old_fn,&my_old_cd); +} else { +cd=GC_make_closure(fn,cd); +if (cd==0)return; +GC_register_finalizer_unreachable(base,GC_debug_invoke_finalizer, +cd,&my_old_fn,&my_old_cd); +} +store_old(obj,my_old_fn,(struct closure*)my_old_cd,ofn,ocd); +} +GC_API void GC_CALL GC_debug_register_finalizer_ignore_self +(void*obj,GC_finalization_proc fn, +void*cd,GC_finalization_proc*ofn, +void**ocd) +{ +GC_finalization_proc my_old_fn=OFN_UNSET; +void*my_old_cd; +ptr_t base=(ptr_t)GC_base(obj); +if (NULL==base){ +if (ocd)*ocd=0; +if (ofn)*ofn=0; +return; +} +if ((ptr_t)obj - base!=sizeof(oh)){ +GC_err_printf("GC_debug_register_finalizer_ignore_self called with" +" non-base-pointer %p\n",obj); +} +if (0==fn){ +GC_register_finalizer_ignore_self(base,0,0,&my_old_fn,&my_old_cd); +} else { +cd=GC_make_closure(fn,cd); +if (cd==0)return; +GC_register_finalizer_ignore_self(base,GC_debug_invoke_finalizer, +cd,&my_old_fn,&my_old_cd); +} +store_old(obj,my_old_fn,(struct closure*)my_old_cd,ofn,ocd); +} +#endif +GC_API GC_ATTR_MALLOC void*GC_CALL GC_debug_malloc_replacement(size_t lb) +{ +return GC_debug_malloc(lb,GC_DBG_EXTRAS); +} +GC_API void*GC_CALL GC_debug_realloc_replacement(void*p,size_t lb) +{ +return GC_debug_realloc(p,lb,GC_DBG_EXTRAS); +} +#ifndef GC_NO_FINALIZATION +#ifndef GC_JAVAXFC_H +#define GC_JAVAXFC_H +#ifndef GC_H +#endif +#ifdef __cplusplus +extern "C" { +#endif +GC_API void GC_CALL GC_finalize_all(void); +#ifdef GC_THREADS +#ifndef GC_SUSPEND_THREAD_ID +#define GC_SUSPEND_THREAD_ID void* +#endif +GC_API void GC_CALL GC_suspend_thread(GC_SUSPEND_THREAD_ID); +GC_API void GC_CALL GC_resume_thread(GC_SUSPEND_THREAD_ID); +GC_API int GC_CALL GC_is_thread_suspended(GC_SUSPEND_THREAD_ID); +#endif +#ifdef __cplusplus +} +#endif +#endif +typedef void (*finalization_mark_proc)(ptr_t); +#define HASH3(addr,size,log_size)((((word)(addr)>>3)^((word)(addr)>>(3+(log_size))))&((size)- 1)) +#define HASH2(addr,log_size)HASH3(addr,(word)1<<(log_size),log_size) +struct hash_chain_entry { +word hidden_key; +struct hash_chain_entry*next; +}; +struct disappearing_link { +struct hash_chain_entry prolog; +#define dl_hidden_link prolog.hidden_key +#define dl_next(x)(struct disappearing_link*)((x)->prolog.next) +#define dl_set_next(x,y)(void)((x)->prolog.next=(struct hash_chain_entry*)(y)) +word dl_hidden_obj; +}; +struct finalizable_object { +struct hash_chain_entry prolog; +#define fo_hidden_base prolog.hidden_key +#define fo_next(x)(struct finalizable_object*)((x)->prolog.next) +#define fo_set_next(x,y)((x)->prolog.next=(struct hash_chain_entry*)(y)) +GC_finalization_proc fo_fn; +ptr_t fo_client_data; +word fo_object_size; +finalization_mark_proc fo_mark_proc; +}; +#ifdef AO_HAVE_store +#define SET_FINALIZE_NOW(fo)AO_store((volatile AO_t*)&GC_fnlz_roots.finalize_now,(AO_t)(fo)) +#else +#define SET_FINALIZE_NOW(fo)(void)(GC_fnlz_roots.finalize_now=(fo)) +#endif +GC_API void GC_CALL GC_push_finalizer_structures(void) +{ +GC_ASSERT((word)(&GC_dl_hashtbl.head)% sizeof(word)==0); +GC_ASSERT((word)(&GC_fnlz_roots)% sizeof(word)==0); +#ifndef GC_LONG_REFS_NOT_NEEDED +GC_ASSERT((word)(&GC_ll_hashtbl.head)% sizeof(word)==0); +GC_PUSH_ALL_SYM(GC_ll_hashtbl.head); +#endif +GC_PUSH_ALL_SYM(GC_dl_hashtbl.head); +GC_PUSH_ALL_SYM(GC_fnlz_roots); +} +#ifndef GC_ON_GROW_LOG_SIZE_MIN +#define GC_ON_GROW_LOG_SIZE_MIN CPP_LOG_HBLKSIZE +#endif +STATIC void GC_grow_table(struct hash_chain_entry***table, +unsigned*log_size_ptr,word*entries_ptr) +{ +word i; +struct hash_chain_entry*p; +unsigned log_old_size=*log_size_ptr; +unsigned log_new_size=log_old_size+1; +word old_size=*table==NULL?0:(word)1<=GC_ON_GROW_LOG_SIZE_MIN&&!GC_incremental){ +IF_CANCEL(int cancel_state;) +DISABLE_CANCEL(cancel_state); +(void)GC_try_to_collect_inner(GC_never_stop_func); +RESTORE_CANCEL(cancel_state); +if (*entries_ptr < ((word)1<>2)) +return; +} +new_table=(struct hash_chain_entry**) +GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE( +(size_t)new_size*sizeof(struct hash_chain_entry*), +NORMAL); +if (new_table==0){ +if (*table==0){ +ABORT("Insufficient space for initial table allocation"); +} else { +return; +} +} +for (i=0;i < old_size;i++){ +p=(*table)[i]; +while (p!=0){ +ptr_t real_key=(ptr_t)GC_REVEAL_POINTER(p->hidden_key); +struct hash_chain_entry*next=p->next; +size_t new_hash=HASH3(real_key,new_size,log_new_size); +p->next=new_table[new_hash]; +GC_dirty(p); +new_table[new_hash]=p; +p=next; +} +} +*log_size_ptr=log_new_size; +*table=new_table; +GC_dirty(new_table); +} +GC_API int GC_CALL GC_register_disappearing_link(void**link) +{ +ptr_t base; +base=(ptr_t)GC_base(link); +if (base==0) +ABORT("Bad arg to GC_register_disappearing_link"); +return(GC_general_register_disappearing_link(link,base)); +} +STATIC int GC_register_disappearing_link_inner( +struct dl_hashtbl_s*dl_hashtbl,void**link, +const void*obj,const char*tbl_log_name) +{ +struct disappearing_link*curr_dl; +size_t index; +struct disappearing_link*new_dl; +DCL_LOCK_STATE; +if (EXPECT(GC_find_leak,FALSE))return GC_UNIMPLEMENTED; +LOCK(); +GC_ASSERT(obj!=NULL&&GC_base_C(obj)==obj); +if (EXPECT(NULL==dl_hashtbl->head,FALSE) +||EXPECT(dl_hashtbl->entries +> ((word)1<log_size),FALSE)){ +GC_grow_table((struct hash_chain_entry***)&dl_hashtbl->head, +&dl_hashtbl->log_size,&dl_hashtbl->entries); +GC_COND_LOG_PRINTF("Grew %s table to %u entries\n",tbl_log_name, +1U<log_size); +} +index=HASH2(link,dl_hashtbl->log_size); +for (curr_dl=dl_hashtbl->head[index];curr_dl!=0; +curr_dl=dl_next(curr_dl)){ +if (curr_dl->dl_hidden_link==GC_HIDE_POINTER(link)){ +curr_dl->dl_hidden_obj=GC_HIDE_POINTER(obj); +UNLOCK(); +return GC_DUPLICATE; +} +} +new_dl=(struct disappearing_link*) +GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL); +if (0==new_dl){ +GC_oom_func oom_fn=GC_oom_fn; +UNLOCK(); +new_dl=(struct disappearing_link*) +(*oom_fn)(sizeof(struct disappearing_link)); +if (0==new_dl){ +return GC_NO_MEMORY; +} +LOCK(); +index=HASH2(link,dl_hashtbl->log_size); +for (curr_dl=dl_hashtbl->head[index];curr_dl!=0; +curr_dl=dl_next(curr_dl)){ +if (curr_dl->dl_hidden_link==GC_HIDE_POINTER(link)){ +curr_dl->dl_hidden_obj=GC_HIDE_POINTER(obj); +UNLOCK(); +#ifndef DBG_HDRS_ALL +GC_free((void*)new_dl); +#endif +return GC_DUPLICATE; +} +} +} +new_dl->dl_hidden_obj=GC_HIDE_POINTER(obj); +new_dl->dl_hidden_link=GC_HIDE_POINTER(link); +dl_set_next(new_dl,dl_hashtbl->head[index]); +GC_dirty(new_dl); +dl_hashtbl->head[index]=new_dl; +dl_hashtbl->entries++; +GC_dirty(dl_hashtbl->head+index); +UNLOCK(); +return GC_SUCCESS; +} +GC_API int GC_CALL GC_general_register_disappearing_link(void**link, +const void*obj) +{ +if (((word)link&(ALIGNMENT-1))!=0||!NONNULL_ARG_NOT_NULL(link)) +ABORT("Bad arg to GC_general_register_disappearing_link"); +return GC_register_disappearing_link_inner(&GC_dl_hashtbl,link,obj, +"dl"); +} +#ifdef DBG_HDRS_ALL +#define FREE_DL_ENTRY(curr_dl)dl_set_next(curr_dl,NULL) +#else +#define FREE_DL_ENTRY(curr_dl)GC_free(curr_dl) +#endif +GC_INLINE struct disappearing_link*GC_unregister_disappearing_link_inner( +struct dl_hashtbl_s*dl_hashtbl,void**link) +{ +struct disappearing_link*curr_dl; +struct disappearing_link*prev_dl=NULL; +size_t index; +GC_ASSERT(I_HOLD_LOCK()); +if (EXPECT(NULL==dl_hashtbl->head,FALSE))return NULL; +index=HASH2(link,dl_hashtbl->log_size); +for (curr_dl=dl_hashtbl->head[index];curr_dl; +curr_dl=dl_next(curr_dl)){ +if (curr_dl->dl_hidden_link==GC_HIDE_POINTER(link)){ +if (NULL==prev_dl){ +dl_hashtbl->head[index]=dl_next(curr_dl); +GC_dirty(dl_hashtbl->head+index); +} else { +dl_set_next(prev_dl,dl_next(curr_dl)); +GC_dirty(prev_dl); +} +dl_hashtbl->entries--; +break; +} +prev_dl=curr_dl; +} +return curr_dl; +} +GC_API int GC_CALL GC_unregister_disappearing_link(void**link) +{ +struct disappearing_link*curr_dl; +DCL_LOCK_STATE; +if (((word)link&(ALIGNMENT-1))!=0)return(0); +LOCK(); +curr_dl=GC_unregister_disappearing_link_inner(&GC_dl_hashtbl,link); +UNLOCK(); +if (NULL==curr_dl)return 0; +FREE_DL_ENTRY(curr_dl); +return 1; +} +#ifndef GC_TOGGLE_REFS_NOT_NEEDED +typedef union toggle_ref_u GCToggleRef; +STATIC GC_toggleref_func GC_toggleref_callback=0; +GC_INNER void GC_process_togglerefs(void) +{ +size_t i; +size_t new_size=0; +GC_bool needs_barrier=FALSE; +GC_ASSERT(I_HOLD_LOCK()); +for (i=0;i < GC_toggleref_array_size;++i){ +GCToggleRef r=GC_toggleref_arr[i]; +void*obj=r.strong_ref; +if (((word)obj&1)!=0){ +obj=GC_REVEAL_POINTER(r.weak_ref); +} +if (NULL==obj){ +continue; +} +switch (GC_toggleref_callback(obj)){ +case GC_TOGGLE_REF_DROP: +break; +case GC_TOGGLE_REF_STRONG: +GC_toggleref_arr[new_size++].strong_ref=obj; +needs_barrier=TRUE; +break; +case GC_TOGGLE_REF_WEAK: +GC_toggleref_arr[new_size++].weak_ref=GC_HIDE_POINTER(obj); +break; +default: +ABORT("Bad toggle-ref status returned by callback"); +} +} +if (new_size < GC_toggleref_array_size){ +BZERO(&GC_toggleref_arr[new_size], +(GC_toggleref_array_size - new_size)*sizeof(GCToggleRef)); +GC_toggleref_array_size=new_size; +} +if (needs_barrier) +GC_dirty(GC_toggleref_arr); +} +STATIC void GC_normal_finalize_mark_proc(ptr_t); +static void push_and_mark_object(void*p) +{ +GC_normal_finalize_mark_proc((ptr_t)p); +while (!GC_mark_stack_empty()){ +MARK_FROM_MARK_STACK(); +} +GC_set_mark_bit(p); +if (GC_mark_state!=MS_NONE){ +while (!GC_mark_some(0)){ +} +} +} +STATIC void GC_mark_togglerefs(void) +{ +size_t i; +if (NULL==GC_toggleref_arr) +return; +GC_set_mark_bit(GC_toggleref_arr); +for (i=0;i < GC_toggleref_array_size;++i){ +void*obj=GC_toggleref_arr[i].strong_ref; +if (obj!=NULL&&((word)obj&1)==0){ +push_and_mark_object(obj); +} +} +} +STATIC void GC_clear_togglerefs(void) +{ +size_t i; +for (i=0;i < GC_toggleref_array_size;++i){ +if ((GC_toggleref_arr[i].weak_ref&1)!=0){ +if (!GC_is_marked(GC_REVEAL_POINTER(GC_toggleref_arr[i].weak_ref))){ +GC_toggleref_arr[i].weak_ref=0; +} else { +} +} +} +} +GC_API void GC_CALL GC_set_toggleref_func(GC_toggleref_func fn) +{ +DCL_LOCK_STATE; +LOCK(); +GC_toggleref_callback=fn; +UNLOCK(); +} +GC_API GC_toggleref_func GC_CALL GC_get_toggleref_func(void) +{ +GC_toggleref_func fn; +DCL_LOCK_STATE; +LOCK(); +fn=GC_toggleref_callback; +UNLOCK(); +return fn; +} +static GC_bool ensure_toggleref_capacity(size_t capacity_inc) +{ +GC_ASSERT(I_HOLD_LOCK()); +if (NULL==GC_toggleref_arr){ +GC_toggleref_array_capacity=32; +GC_toggleref_arr=(GCToggleRef*)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE( +GC_toggleref_array_capacity*sizeof(GCToggleRef), +NORMAL); +if (NULL==GC_toggleref_arr) +return FALSE; +} +if (GC_toggleref_array_size+capacity_inc +>=GC_toggleref_array_capacity){ +GCToggleRef*new_array; +while (GC_toggleref_array_capacity +< GC_toggleref_array_size+capacity_inc){ +GC_toggleref_array_capacity*=2; +if ((GC_toggleref_array_capacity +&((size_t)1<<(sizeof(size_t)*8 - 1)))!=0) +return FALSE; +} +new_array=(GCToggleRef*)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE( +GC_toggleref_array_capacity*sizeof(GCToggleRef), +NORMAL); +if (NULL==new_array) +return FALSE; +if (EXPECT(GC_toggleref_array_size > 0,TRUE)) +BCOPY(GC_toggleref_arr,new_array, +GC_toggleref_array_size*sizeof(GCToggleRef)); +GC_INTERNAL_FREE(GC_toggleref_arr); +GC_toggleref_arr=new_array; +} +return TRUE; +} +GC_API int GC_CALL GC_toggleref_add(void*obj,int is_strong_ref) +{ +int res=GC_SUCCESS; +DCL_LOCK_STATE; +GC_ASSERT(NONNULL_ARG_NOT_NULL(obj)); +LOCK(); +if (GC_toggleref_callback!=0){ +if (!ensure_toggleref_capacity(1)){ +res=GC_NO_MEMORY; +} else { +GC_toggleref_arr[GC_toggleref_array_size].strong_ref= +is_strong_ref?obj:(void*)GC_HIDE_POINTER(obj); +if (is_strong_ref) +GC_dirty(GC_toggleref_arr+GC_toggleref_array_size); +GC_toggleref_array_size++; +} +} +UNLOCK(); +return res; +} +#endif +STATIC GC_await_finalize_proc GC_object_finalized_proc=0; +GC_API void GC_CALL GC_set_await_finalize_proc(GC_await_finalize_proc fn) +{ +DCL_LOCK_STATE; +LOCK(); +GC_object_finalized_proc=fn; +UNLOCK(); +} +GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void) +{ +GC_await_finalize_proc fn; +DCL_LOCK_STATE; +LOCK(); +fn=GC_object_finalized_proc; +UNLOCK(); +return fn; +} +#ifndef GC_LONG_REFS_NOT_NEEDED +GC_API int GC_CALL GC_register_long_link(void**link,const void*obj) +{ +if (((word)link&(ALIGNMENT-1))!=0||!NONNULL_ARG_NOT_NULL(link)) +ABORT("Bad arg to GC_register_long_link"); +return GC_register_disappearing_link_inner(&GC_ll_hashtbl,link,obj, +"long dl"); +} +GC_API int GC_CALL GC_unregister_long_link(void**link) +{ +struct disappearing_link*curr_dl; +DCL_LOCK_STATE; +if (((word)link&(ALIGNMENT-1))!=0)return(0); +LOCK(); +curr_dl=GC_unregister_disappearing_link_inner(&GC_ll_hashtbl,link); +UNLOCK(); +if (NULL==curr_dl)return 0; +FREE_DL_ENTRY(curr_dl); +return 1; +} +#endif +#ifndef GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED +STATIC int GC_move_disappearing_link_inner( +struct dl_hashtbl_s*dl_hashtbl, +void**link,void**new_link) +{ +struct disappearing_link*curr_dl,*new_dl; +struct disappearing_link*prev_dl=NULL; +size_t curr_index,new_index; +word curr_hidden_link,new_hidden_link; +GC_ASSERT(I_HOLD_LOCK()); +if (EXPECT(NULL==dl_hashtbl->head,FALSE))return GC_NOT_FOUND; +curr_index=HASH2(link,dl_hashtbl->log_size); +curr_hidden_link=GC_HIDE_POINTER(link); +for (curr_dl=dl_hashtbl->head[curr_index];curr_dl; +curr_dl=dl_next(curr_dl)){ +if (curr_dl->dl_hidden_link==curr_hidden_link) +break; +prev_dl=curr_dl; +} +if (EXPECT(NULL==curr_dl,FALSE)){ +return GC_NOT_FOUND; +} else if (link==new_link){ +return GC_SUCCESS; +} +new_index=HASH2(new_link,dl_hashtbl->log_size); +new_hidden_link=GC_HIDE_POINTER(new_link); +for (new_dl=dl_hashtbl->head[new_index];new_dl; +new_dl=dl_next(new_dl)){ +if (new_dl->dl_hidden_link==new_hidden_link){ +return GC_DUPLICATE; +} +} +if (NULL==prev_dl){ +dl_hashtbl->head[curr_index]=dl_next(curr_dl); +} else { +dl_set_next(prev_dl,dl_next(curr_dl)); +GC_dirty(prev_dl); +} +curr_dl->dl_hidden_link=new_hidden_link; +dl_set_next(curr_dl,dl_hashtbl->head[new_index]); +dl_hashtbl->head[new_index]=curr_dl; +GC_dirty(curr_dl); +GC_dirty(dl_hashtbl->head); +return GC_SUCCESS; +} +GC_API int GC_CALL GC_move_disappearing_link(void**link,void**new_link) +{ +int result; +DCL_LOCK_STATE; +if (((word)new_link&(ALIGNMENT-1))!=0 +||!NONNULL_ARG_NOT_NULL(new_link)) +ABORT("Bad new_link arg to GC_move_disappearing_link"); +if (((word)link&(ALIGNMENT-1))!=0) +return GC_NOT_FOUND; +LOCK(); +result=GC_move_disappearing_link_inner(&GC_dl_hashtbl,link,new_link); +UNLOCK(); +return result; +} +#ifndef GC_LONG_REFS_NOT_NEEDED +GC_API int GC_CALL GC_move_long_link(void**link,void**new_link) +{ +int result; +DCL_LOCK_STATE; +if (((word)new_link&(ALIGNMENT-1))!=0 +||!NONNULL_ARG_NOT_NULL(new_link)) +ABORT("Bad new_link arg to GC_move_long_link"); +if (((word)link&(ALIGNMENT-1))!=0) +return GC_NOT_FOUND; +LOCK(); +result=GC_move_disappearing_link_inner(&GC_ll_hashtbl,link,new_link); +UNLOCK(); +return result; +} +#endif +#endif +STATIC void GC_normal_finalize_mark_proc(ptr_t p) +{ +#if defined(_MSC_VER)&&defined(I386) +hdr*hhdr=HDR(p); +#define mark_stack_top GC_mark_stack_top +mse*mark_stack_limit=GC_mark_stack+GC_mark_stack_size; +word descr=hhdr->hb_descr; +if (descr!=0){ +mark_stack_top++; +if ((word)mark_stack_top>=(word)mark_stack_limit){ +mark_stack_top=GC_signal_mark_stack_overflow(mark_stack_top); +} +mark_stack_top->mse_start=p; +mark_stack_top->mse_descr.w=descr; +} +#undef mark_stack_top +#else +GC_mark_stack_top=GC_push_obj(p,HDR(p),GC_mark_stack_top, +GC_mark_stack+GC_mark_stack_size); +#endif +} +STATIC void GC_ignore_self_finalize_mark_proc(ptr_t p) +{ +hdr*hhdr=HDR(p); +word descr=hhdr->hb_descr; +ptr_t q; +ptr_t scan_limit; +ptr_t target_limit=p+hhdr->hb_sz - 1; +if ((descr&GC_DS_TAGS)==GC_DS_LENGTH){ +scan_limit=p+descr - sizeof(word); +} else { +scan_limit=target_limit+1 - sizeof(word); +} +for (q=p;(word)q<=(word)scan_limit;q+=ALIGNMENT){ +word r=*(word*)q; +if (r < (word)p||r > (word)target_limit){ +GC_PUSH_ONE_HEAP(r,q,GC_mark_stack_top); +} +} +} +STATIC void GC_null_finalize_mark_proc(ptr_t p GC_ATTR_UNUSED){} +STATIC void GC_unreachable_finalize_mark_proc(ptr_t p) +{ +GC_normal_finalize_mark_proc(p); +} +STATIC void GC_register_finalizer_inner(void*obj, +GC_finalization_proc fn,void*cd, +GC_finalization_proc*ofn,void**ocd, +finalization_mark_proc mp) +{ +struct finalizable_object*curr_fo; +size_t index; +struct finalizable_object*new_fo=0; +hdr*hhdr=NULL; +DCL_LOCK_STATE; +if (EXPECT(GC_find_leak,FALSE))return; +LOCK(); +if (EXPECT(NULL==GC_fnlz_roots.fo_head,FALSE) +||EXPECT(GC_fo_entries > ((word)1<=sizeof(struct finalizable_object)); +if (curr_fo->fo_hidden_base==GC_HIDE_POINTER(obj)){ +if (ocd)*ocd=(void*)(curr_fo->fo_client_data); +if (ofn)*ofn=curr_fo->fo_fn; +if (prev_fo==0){ +GC_fnlz_roots.fo_head[index]=fo_next(curr_fo); +} else { +fo_set_next(prev_fo,fo_next(curr_fo)); +GC_dirty(prev_fo); +} +if (fn==0){ +GC_fo_entries--; +#if!defined(THREADS)&&!defined(DBG_HDRS_ALL) +GC_free((void*)curr_fo); +#endif +} else { +curr_fo->fo_fn=fn; +curr_fo->fo_client_data=(ptr_t)cd; +curr_fo->fo_mark_proc=mp; +GC_dirty(curr_fo); +if (prev_fo==0){ +GC_fnlz_roots.fo_head[index]=curr_fo; +} else { +fo_set_next(prev_fo,curr_fo); +GC_dirty(prev_fo); +} +} +if (NULL==prev_fo) +GC_dirty(GC_fnlz_roots.fo_head+index); +UNLOCK(); +#ifndef DBG_HDRS_ALL +GC_free((void*)new_fo); +#endif +return; +} +prev_fo=curr_fo; +curr_fo=fo_next(curr_fo); +} +if (EXPECT(new_fo!=0,FALSE)){ +GC_ASSERT(fn!=0); +#ifdef LINT2 +if (NULL==hhdr)ABORT("Bad hhdr in GC_register_finalizer_inner"); +#endif +break; +} +if (fn==0){ +if (ocd)*ocd=0; +if (ofn)*ofn=0; +UNLOCK(); +return; +} +GET_HDR(obj,hhdr); +if (EXPECT(0==hhdr,FALSE)){ +if (ocd)*ocd=0; +if (ofn)*ofn=0; +UNLOCK(); +return; +} +new_fo=(struct finalizable_object*) +GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL); +if (EXPECT(new_fo!=0,TRUE)) +break; +oom_fn=GC_oom_fn; +UNLOCK(); +new_fo=(struct finalizable_object*) +(*oom_fn)(sizeof(struct finalizable_object)); +if (0==new_fo){ +return; +} +LOCK(); +} +GC_ASSERT(GC_size(new_fo)>=sizeof(struct finalizable_object)); +if (ocd)*ocd=0; +if (ofn)*ofn=0; +new_fo->fo_hidden_base=GC_HIDE_POINTER(obj); +new_fo->fo_fn=fn; +new_fo->fo_client_data=(ptr_t)cd; +new_fo->fo_object_size=hhdr->hb_sz; +new_fo->fo_mark_proc=mp; +fo_set_next(new_fo,GC_fnlz_roots.fo_head[index]); +GC_dirty(new_fo); +GC_fo_entries++; +GC_fnlz_roots.fo_head[index]=new_fo; +GC_dirty(GC_fnlz_roots.fo_head+index); +UNLOCK(); +} +GC_API void GC_CALL GC_register_finalizer(void*obj, +GC_finalization_proc fn,void*cd, +GC_finalization_proc*ofn,void**ocd) +{ +GC_register_finalizer_inner(obj,fn,cd,ofn, +ocd,GC_normal_finalize_mark_proc); +} +GC_API void GC_CALL GC_register_finalizer_ignore_self(void*obj, +GC_finalization_proc fn,void*cd, +GC_finalization_proc*ofn,void**ocd) +{ +GC_register_finalizer_inner(obj,fn,cd,ofn, +ocd,GC_ignore_self_finalize_mark_proc); +} +GC_API void GC_CALL GC_register_finalizer_no_order(void*obj, +GC_finalization_proc fn,void*cd, +GC_finalization_proc*ofn,void**ocd) +{ +GC_register_finalizer_inner(obj,fn,cd,ofn, +ocd,GC_null_finalize_mark_proc); +} +static GC_bool need_unreachable_finalization=FALSE; +GC_API void GC_CALL GC_register_finalizer_unreachable(void*obj, +GC_finalization_proc fn,void*cd, +GC_finalization_proc*ofn,void**ocd) +{ +need_unreachable_finalization=TRUE; +GC_ASSERT(GC_java_finalization); +GC_register_finalizer_inner(obj,fn,cd,ofn, +ocd,GC_unreachable_finalize_mark_proc); +} +#ifndef NO_DEBUGGING +STATIC void GC_dump_finalization_links( +const struct dl_hashtbl_s*dl_hashtbl) +{ +size_t dl_size=(size_t)1<log_size; +size_t i; +if (NULL==dl_hashtbl->head)return; +for (i=0;i < dl_size;i++){ +struct disappearing_link*curr_dl; +for (curr_dl=dl_hashtbl->head[i];curr_dl!=0; +curr_dl=dl_next(curr_dl)){ +ptr_t real_ptr=(ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_obj); +ptr_t real_link=(ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_link); +GC_printf("Object:%p,link:%p\n", +(void*)real_ptr,(void*)real_link); +} +} +} +GC_API void GC_CALL GC_dump_finalization(void) +{ +struct finalizable_object*curr_fo; +size_t i; +size_t fo_size=GC_fnlz_roots.fo_head==NULL?0: +(size_t)1<fo_hidden_base); +GC_printf("Finalizable object:%p\n",(void*)real_ptr); +} +} +} +#endif +#ifndef SMALL_CONFIG +STATIC word GC_old_dl_entries=0; +#ifndef GC_LONG_REFS_NOT_NEEDED +STATIC word GC_old_ll_entries=0; +#endif +#endif +#ifndef THREADS +STATIC int GC_finalizer_nested=0; +STATIC unsigned GC_finalizer_skipped=0; +STATIC unsigned char*GC_check_finalizer_nested(void) +{ +unsigned nesting_level=*(unsigned char*)&GC_finalizer_nested; +if (nesting_level){ +if (++GC_finalizer_skipped < (1U<log_size; +GC_bool needs_barrier=FALSE; +GC_ASSERT(I_HOLD_LOCK()); +if (NULL==dl_hashtbl->head)return; +for (i=0;i < dl_size;i++){ +struct disappearing_link*curr_dl,*next_dl; +struct disappearing_link*prev_dl=NULL; +for (curr_dl=dl_hashtbl->head[i];curr_dl!=NULL;curr_dl=next_dl){ +next_dl=dl_next(curr_dl); +if (is_remove_dangling){ +ptr_t real_link=(ptr_t)GC_base(GC_REVEAL_POINTER( +curr_dl->dl_hidden_link)); +if (NULL==real_link||EXPECT(GC_is_marked(real_link),TRUE)){ +prev_dl=curr_dl; +continue; +} +} else { +if (EXPECT(GC_is_marked((ptr_t)GC_REVEAL_POINTER( +curr_dl->dl_hidden_obj)),TRUE)){ +prev_dl=curr_dl; +continue; +} +*(ptr_t*)GC_REVEAL_POINTER(curr_dl->dl_hidden_link)=NULL; +} +if (NULL==prev_dl){ +dl_hashtbl->head[i]=next_dl; +needs_barrier=TRUE; +} else { +dl_set_next(prev_dl,next_dl); +GC_dirty(prev_dl); +} +GC_clear_mark_bit(curr_dl); +dl_hashtbl->entries--; +} +} +if (needs_barrier) +GC_dirty(dl_hashtbl->head); +} +GC_INNER void GC_finalize(void) +{ +struct finalizable_object*curr_fo,*prev_fo,*next_fo; +ptr_t real_ptr; +size_t i; +size_t fo_size=GC_fnlz_roots.fo_head==NULL?0: +(size_t)1<=sizeof(struct finalizable_object)); +real_ptr=(ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base); +if (!GC_is_marked(real_ptr)){ +GC_MARKED_FOR_FINALIZATION(real_ptr); +GC_MARK_FO(real_ptr,curr_fo->fo_mark_proc); +if (GC_is_marked(real_ptr)){ +WARN("Finalization cycle involving %p\n",real_ptr); +} +} +} +} +GC_bytes_finalized=0; +for (i=0;i < fo_size;i++){ +curr_fo=GC_fnlz_roots.fo_head[i]; +prev_fo=0; +while (curr_fo!=0){ +real_ptr=(ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base); +if (!GC_is_marked(real_ptr)){ +if (!GC_java_finalization){ +GC_set_mark_bit(real_ptr); +} +next_fo=fo_next(curr_fo); +if (NULL==prev_fo){ +GC_fnlz_roots.fo_head[i]=next_fo; +if (GC_object_finalized_proc){ +GC_dirty(GC_fnlz_roots.fo_head+i); +} else { +needs_barrier=TRUE; +} +} else { +fo_set_next(prev_fo,next_fo); +GC_dirty(prev_fo); +} +GC_fo_entries--; +if (GC_object_finalized_proc) +GC_object_finalized_proc(real_ptr); +fo_set_next(curr_fo,GC_fnlz_roots.finalize_now); +GC_dirty(curr_fo); +SET_FINALIZE_NOW(curr_fo); +curr_fo->fo_hidden_base= +(word)GC_REVEAL_POINTER(curr_fo->fo_hidden_base); +GC_bytes_finalized+= +curr_fo->fo_object_size ++sizeof(struct finalizable_object); +GC_ASSERT(GC_is_marked(GC_base(curr_fo))); +curr_fo=next_fo; +} else { +prev_fo=curr_fo; +curr_fo=fo_next(curr_fo); +} +} +} +if (GC_java_finalization){ +for (curr_fo=GC_fnlz_roots.finalize_now; +curr_fo!=NULL;curr_fo=fo_next(curr_fo)){ +real_ptr=(ptr_t)curr_fo->fo_hidden_base; +if (!GC_is_marked(real_ptr)){ +if (curr_fo->fo_mark_proc==GC_null_finalize_mark_proc){ +GC_MARK_FO(real_ptr,GC_normal_finalize_mark_proc); +} +if (curr_fo->fo_mark_proc!=GC_unreachable_finalize_mark_proc){ +GC_set_mark_bit(real_ptr); +} +} +} +if (need_unreachable_finalization){ +curr_fo=GC_fnlz_roots.finalize_now; +GC_ASSERT(NULL==curr_fo||GC_fnlz_roots.fo_head!=NULL); +prev_fo=NULL; +while (curr_fo!=NULL){ +next_fo=fo_next(curr_fo); +if (curr_fo->fo_mark_proc==GC_unreachable_finalize_mark_proc){ +real_ptr=(ptr_t)curr_fo->fo_hidden_base; +if (!GC_is_marked(real_ptr)){ +GC_set_mark_bit(real_ptr); +} else { +if (NULL==prev_fo){ +SET_FINALIZE_NOW(next_fo); +} else { +fo_set_next(prev_fo,next_fo); +GC_dirty(prev_fo); +} +curr_fo->fo_hidden_base= +GC_HIDE_POINTER(curr_fo->fo_hidden_base); +GC_bytes_finalized-= +curr_fo->fo_object_size+sizeof(struct finalizable_object); +i=HASH2(real_ptr,GC_log_fo_table_size); +fo_set_next(curr_fo,GC_fnlz_roots.fo_head[i]); +GC_dirty(curr_fo); +GC_fo_entries++; +GC_fnlz_roots.fo_head[i]=curr_fo; +curr_fo=prev_fo; +needs_barrier=TRUE; +} +} +prev_fo=curr_fo; +curr_fo=next_fo; +} +} +} +if (needs_barrier) +GC_dirty(GC_fnlz_roots.fo_head); +GC_make_disappearing_links_disappear(&GC_dl_hashtbl,TRUE); +#ifndef GC_TOGGLE_REFS_NOT_NEEDED +GC_clear_togglerefs(); +#endif +#ifndef GC_LONG_REFS_NOT_NEEDED +GC_make_disappearing_links_disappear(&GC_ll_hashtbl,FALSE); +GC_make_disappearing_links_disappear(&GC_ll_hashtbl,TRUE); +#endif +if (GC_fail_count){ +#ifdef THREADS +GC_reset_finalizer_nested(); +#else +GC_finalizer_nested=0; +#endif +} +} +#ifndef JAVA_FINALIZATION_NOT_NEEDED +STATIC void GC_enqueue_all_finalizers(void) +{ +struct finalizable_object*next_fo; +size_t i; +size_t fo_size=GC_fnlz_roots.fo_head==NULL?0: +(size_t)1<fo_hidden_base); +GC_MARK_FO(real_ptr,GC_normal_finalize_mark_proc); +GC_set_mark_bit(real_ptr); +next_fo=fo_next(curr_fo); +fo_set_next(curr_fo,GC_fnlz_roots.finalize_now); +GC_dirty(curr_fo); +SET_FINALIZE_NOW(curr_fo); +curr_fo->fo_hidden_base= +(word)GC_REVEAL_POINTER(curr_fo->fo_hidden_base); +GC_bytes_finalized+= +curr_fo->fo_object_size+sizeof(struct finalizable_object); +curr_fo=next_fo; +} +} +GC_fo_entries=0; +} +GC_API void GC_CALL GC_finalize_all(void) +{ +DCL_LOCK_STATE; +LOCK(); +while (GC_fo_entries > 0){ +GC_enqueue_all_finalizers(); +UNLOCK(); +GC_invoke_finalizers(); +LOCK(); +} +UNLOCK(); +} +#endif +GC_API int GC_CALL GC_should_invoke_finalizers(void) +{ +#ifdef AO_HAVE_load +return AO_load((volatile AO_t*)&GC_fnlz_roots.finalize_now)!=0; +#else +return GC_fnlz_roots.finalize_now!=NULL; +#endif +} +GC_API int GC_CALL GC_invoke_finalizers(void) +{ +int count=0; +word bytes_freed_before=0; +DCL_LOCK_STATE; +while (GC_should_invoke_finalizers()){ +struct finalizable_object*curr_fo; +#ifdef THREADS +LOCK(); +#endif +if (count==0){ +bytes_freed_before=GC_bytes_freed; +} +curr_fo=GC_fnlz_roots.finalize_now; +#ifdef THREADS +if (curr_fo!=NULL) +SET_FINALIZE_NOW(fo_next(curr_fo)); +UNLOCK(); +if (curr_fo==0)break; +#else +GC_fnlz_roots.finalize_now=fo_next(curr_fo); +#endif +fo_set_next(curr_fo,0); +(*(curr_fo->fo_fn))((ptr_t)(curr_fo->fo_hidden_base), +curr_fo->fo_client_data); +curr_fo->fo_client_data=0; +++count; +} +if (count!=0 +#if defined(THREADS)&&!defined(THREAD_SANITIZER) +&&bytes_freed_before!=GC_bytes_freed +#endif +){ +LOCK(); +GC_finalizer_bytes_freed+=(GC_bytes_freed - bytes_freed_before); +UNLOCK(); +} +return count; +} +static word last_finalizer_notification=0; +GC_INNER void GC_notify_or_invoke_finalizers(void) +{ +GC_finalizer_notifier_proc notifier_fn=0; +#if defined(KEEP_BACK_PTRS)||defined(MAKE_BACK_GRAPH) +static word last_back_trace_gc_no=1; +#endif +DCL_LOCK_STATE; +#if defined(THREADS)&&!defined(KEEP_BACK_PTRS)&&!defined(MAKE_BACK_GRAPH) +if (!GC_should_invoke_finalizers()) +return; +#endif +LOCK(); +#if defined(KEEP_BACK_PTRS)||defined(MAKE_BACK_GRAPH) +if (GC_gc_no > last_back_trace_gc_no){ +#ifdef KEEP_BACK_PTRS +long i; +last_back_trace_gc_no=GC_WORD_MAX; +for (i=0;i < GC_backtraces;++i){ +UNLOCK(); +GC_generate_random_backtrace_no_gc(); +LOCK(); +} +last_back_trace_gc_no=GC_gc_no; +#endif +#ifdef MAKE_BACK_GRAPH +if (GC_print_back_height){ +GC_print_back_graph_stats(); +} +#endif +} +#endif +if (NULL==GC_fnlz_roots.finalize_now){ +UNLOCK(); +return; +} +if (!GC_finalize_on_demand){ +unsigned char*pnested=GC_check_finalizer_nested(); +UNLOCK(); +if (pnested!=NULL){ +(void)GC_invoke_finalizers(); +*pnested=0; +#ifndef THREADS +GC_ASSERT(NULL==GC_fnlz_roots.finalize_now); +#endif +} +return; +} +if (last_finalizer_notification!=GC_gc_no){ +notifier_fn=GC_finalizer_notifier; +last_finalizer_notification=GC_gc_no; +} +UNLOCK(); +if (notifier_fn!=0) +(*notifier_fn)(); +} +#ifndef SMALL_CONFIG +#ifndef GC_LONG_REFS_NOT_NEEDED +#define IF_LONG_REFS_PRESENT_ELSE(x,y)(x) +#else +#define IF_LONG_REFS_PRESENT_ELSE(x,y)(y) +#endif +GC_INNER void GC_print_finalization_stats(void) +{ +struct finalizable_object*fo; +unsigned long ready=0; +GC_log_printf("%lu finalization entries;" +" %lu/%lu short/long disappearing links alive\n", +(unsigned long)GC_fo_entries, +(unsigned long)GC_dl_hashtbl.entries, +(unsigned long)IF_LONG_REFS_PRESENT_ELSE( +GC_ll_hashtbl.entries,0)); +for (fo=GC_fnlz_roots.finalize_now;fo!=NULL;fo=fo_next(fo)) +++ready; +GC_log_printf("%lu finalization-ready objects;" +" %ld/%ld short/long links cleared\n", +ready, +(long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries, +(long)IF_LONG_REFS_PRESENT_ELSE( +GC_old_ll_entries - GC_ll_hashtbl.entries,0)); +} +#endif +#endif +#ifdef ENABLE_DISCLAIM +#ifndef GC_DISCLAIM_H +#define GC_DISCLAIM_H +#ifdef __cplusplus +extern "C" { +#endif +GC_API void GC_CALL GC_init_finalized_malloc(void); +typedef int (GC_CALLBACK*GC_disclaim_proc)(void*); +GC_API void GC_CALL GC_register_disclaim_proc(int, +GC_disclaim_proc, +int)GC_ATTR_NONNULL(2); +struct GC_finalizer_closure { +GC_finalization_proc proc; +void*cd; +}; +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_finalized_malloc(size_t, +const struct GC_finalizer_closure*)GC_ATTR_NONNULL(2); +#ifdef __cplusplus +} +#endif +#endif +#if defined(KEEP_BACK_PTRS)||defined(MAKE_BACK_GRAPH) +#define FINALIZER_CLOSURE_FLAG 0x2 +#else +#define FINALIZER_CLOSURE_FLAG 0x1 +#endif +STATIC int GC_CALLBACK GC_finalized_disclaim(void*obj) +{ +word fc_word=*(word*)obj; +if ((fc_word&FINALIZER_CLOSURE_FLAG)!=0){ +const struct GC_finalizer_closure*fc +=(struct GC_finalizer_closure*)(fc_word +&~(word)FINALIZER_CLOSURE_FLAG); +GC_ASSERT(!GC_find_leak); +(*fc->proc)((word*)obj+1,fc->cd); +} +return 0; +} +GC_API void GC_CALL GC_init_finalized_malloc(void) +{ +DCL_LOCK_STATE; +GC_init(); +LOCK(); +if (GC_finalized_kind!=0){ +UNLOCK(); +return; +} +GC_register_displacement_inner(sizeof(word)); +GC_register_displacement_inner(FINALIZER_CLOSURE_FLAG); +GC_register_displacement_inner(sizeof(oh)+FINALIZER_CLOSURE_FLAG); +GC_finalized_kind=GC_new_kind_inner(GC_new_free_list_inner(), +GC_DS_LENGTH,TRUE,TRUE); +GC_ASSERT(GC_finalized_kind!=0); +GC_register_disclaim_proc(GC_finalized_kind,GC_finalized_disclaim,TRUE); +UNLOCK(); +} +GC_API void GC_CALL GC_register_disclaim_proc(int kind,GC_disclaim_proc proc, +int mark_unconditionally) +{ +GC_ASSERT((unsigned)kind < MAXOBJKINDS); +GC_ASSERT(NONNULL_ARG_NOT_NULL(proc)); +if (!EXPECT(GC_find_leak,FALSE)){ +GC_obj_kinds[kind].ok_disclaim_proc=proc; +GC_obj_kinds[kind].ok_mark_unconditionally= +(GC_bool)mark_unconditionally; +} +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_finalized_malloc(size_t lb, +const struct GC_finalizer_closure*fclos) +{ +word*op; +GC_ASSERT(GC_finalized_kind!=0); +GC_ASSERT(NONNULL_ARG_NOT_NULL(fclos)); +GC_ASSERT(((word)fclos&FINALIZER_CLOSURE_FLAG)==0); +op=(word*)GC_malloc_kind(SIZET_SAT_ADD(lb,sizeof(word)), +GC_finalized_kind); +if (EXPECT(NULL==op,FALSE)) +return NULL; +*op=(word)fclos|FINALIZER_CLOSURE_FLAG; +GC_dirty(op); +REACHABLE_AFTER_DIRTY(fclos); +return op+1; +} +#endif +#include +#include +STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind*kind) +{ +struct hblk**result=(struct hblk**) +GC_scratch_alloc((MAXOBJGRANULES+1)*sizeof(struct hblk*)); +if (result==0)return(FALSE); +BZERO(result,(MAXOBJGRANULES+1)*sizeof(struct hblk*)); +kind->ok_reclaim_list=result; +return(TRUE); +} +GC_INNER ptr_t GC_alloc_large(size_t lb,int k,unsigned flags) +{ +struct hblk*h; +word n_blocks; +ptr_t result; +GC_bool retry=FALSE; +GC_ASSERT(I_HOLD_LOCK()); +lb=ROUNDUP_GRANULE_SIZE(lb); +n_blocks=OBJ_SZ_TO_BLOCKS_CHECKED(lb); +if (!EXPECT(GC_is_initialized,TRUE)){ +DCL_LOCK_STATE; +UNLOCK(); +GC_init(); +LOCK(); +} +if (GC_incremental&&!GC_dont_gc){ +ENTER_GC(); +GC_collect_a_little_inner((int)n_blocks); +EXIT_GC(); +} +h=GC_allochblk(lb,k,flags); +#ifdef USE_MUNMAP +if (0==h){ +GC_merge_unmapped(); +h=GC_allochblk(lb,k,flags); +} +#endif +while (0==h&&GC_collect_or_expand(n_blocks,flags!=0,retry)){ +h=GC_allochblk(lb,k,flags); +retry=TRUE; +} +if (h==0){ +result=0; +} else { +size_t total_bytes=n_blocks*HBLKSIZE; +if (n_blocks > 1){ +GC_large_allocd_bytes+=total_bytes; +if (GC_large_allocd_bytes > GC_max_large_allocd_bytes) +GC_max_large_allocd_bytes=GC_large_allocd_bytes; +} +result=h->hb_body; +} +return result; +} +STATIC ptr_t GC_alloc_large_and_clear(size_t lb,int k,unsigned flags) +{ +ptr_t result; +GC_ASSERT(I_HOLD_LOCK()); +result=GC_alloc_large(lb,k,flags); +if (result!=NULL +&&(GC_debugging_started||GC_obj_kinds[k].ok_init)){ +word n_blocks=OBJ_SZ_TO_BLOCKS(lb); +BZERO(result,n_blocks*HBLKSIZE); +} +return result; +} +STATIC void GC_extend_size_map(size_t i) +{ +size_t orig_granule_sz=ROUNDED_UP_GRANULES(i); +size_t granule_sz; +size_t byte_sz=GRANULES_TO_BYTES(orig_granule_sz); +size_t smaller_than_i=byte_sz - (byte_sz>>3); +size_t low_limit; +size_t number_of_objs; +GC_ASSERT(I_HOLD_LOCK()); +GC_ASSERT(0==GC_size_map[i]); +if (0==GC_size_map[smaller_than_i]){ +low_limit=byte_sz - (byte_sz>>2); +granule_sz=orig_granule_sz; +while (GC_size_map[low_limit]!=0) +low_limit++; +} else { +low_limit=smaller_than_i+1; +while (GC_size_map[low_limit]!=0) +low_limit++; +granule_sz=ROUNDED_UP_GRANULES(low_limit); +granule_sz+=granule_sz>>3; +if (granule_sz < orig_granule_sz) +granule_sz=orig_granule_sz; +} +granule_sz=(granule_sz+1)&~1; +if (granule_sz > MAXOBJGRANULES) +granule_sz=MAXOBJGRANULES; +number_of_objs=HBLK_GRANULES/granule_sz; +GC_ASSERT(number_of_objs!=0); +granule_sz=(HBLK_GRANULES/number_of_objs)&~1; +byte_sz=GRANULES_TO_BYTES(granule_sz)- EXTRA_BYTES; +for (;low_limit<=byte_sz;low_limit++) +GC_size_map[low_limit]=granule_sz; +} +GC_INNER void*GC_generic_malloc_inner(size_t lb,int k) +{ +void*op; +GC_ASSERT(I_HOLD_LOCK()); +GC_ASSERT(k < MAXOBJKINDS); +if (SMALL_OBJ(lb)){ +struct obj_kind*kind=GC_obj_kinds+k; +size_t lg=GC_size_map[lb]; +void**opp=&(kind->ok_freelist[lg]); +op=*opp; +if (EXPECT(0==op,FALSE)){ +if (lg==0){ +if (!EXPECT(GC_is_initialized,TRUE)){ +DCL_LOCK_STATE; +UNLOCK(); +GC_init(); +LOCK(); +lg=GC_size_map[lb]; +} +if (0==lg){ +GC_extend_size_map(lb); +lg=GC_size_map[lb]; +GC_ASSERT(lg!=0); +} +opp=&(kind->ok_freelist[lg]); +op=*opp; +} +if (0==op){ +if (0==kind->ok_reclaim_list&& +!GC_alloc_reclaim_list(kind)) +return NULL; +op=GC_allocobj(lg,k); +if (0==op) +return NULL; +} +} +*opp=obj_link(op); +obj_link(op)=0; +GC_bytes_allocd+=GRANULES_TO_BYTES((word)lg); +} else { +op=(ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb),k,0); +if (op!=NULL) +GC_bytes_allocd+=lb; +} +return op; +} +#if defined(DBG_HDRS_ALL)||defined(GC_GCJ_SUPPORT)||!defined(GC_NO_FINALIZATION) +GC_INNER void*GC_generic_malloc_inner_ignore_off_page(size_t lb,int k) +{ +word lb_adjusted; +void*op; +GC_ASSERT(I_HOLD_LOCK()); +if (lb<=HBLKSIZE) +return GC_generic_malloc_inner(lb,k); +GC_ASSERT(k < MAXOBJKINDS); +lb_adjusted=ADD_SLOP(lb); +op=GC_alloc_large_and_clear(lb_adjusted,k,IGNORE_OFF_PAGE); +if (op!=NULL) +GC_bytes_allocd+=lb_adjusted; +return op; +} +#endif +#ifdef GC_COLLECT_AT_MALLOC +#if defined(CPPCHECK) +size_t GC_dbg_collect_at_malloc_min_lb=16*1024; +#else +size_t GC_dbg_collect_at_malloc_min_lb=(GC_COLLECT_AT_MALLOC); +#endif +#endif +GC_API GC_ATTR_MALLOC void*GC_CALL GC_generic_malloc(size_t lb,int k) +{ +void*result; +DCL_LOCK_STATE; +GC_ASSERT(k < MAXOBJKINDS); +if (EXPECT(GC_have_errors,FALSE)) +GC_print_all_errors(); +GC_INVOKE_FINALIZERS(); +GC_DBG_COLLECT_AT_MALLOC(lb); +if (SMALL_OBJ(lb)){ +LOCK(); +result=GC_generic_malloc_inner(lb,k); +UNLOCK(); +} else { +size_t lg; +size_t lb_rounded; +word n_blocks; +GC_bool init; +lg=ROUNDED_UP_GRANULES(lb); +lb_rounded=GRANULES_TO_BYTES(lg); +n_blocks=OBJ_SZ_TO_BLOCKS(lb_rounded); +init=GC_obj_kinds[k].ok_init; +LOCK(); +result=(ptr_t)GC_alloc_large(lb_rounded,k,0); +if (0!=result){ +if (GC_debugging_started){ +BZERO(result,n_blocks*HBLKSIZE); +} else { +#ifdef THREADS +((word*)result)[0]=0; +((word*)result)[1]=0; +((word*)result)[GRANULES_TO_WORDS(lg)-1]=0; +((word*)result)[GRANULES_TO_WORDS(lg)-2]=0; +#endif +} +GC_bytes_allocd+=lb_rounded; +} +UNLOCK(); +if (init&&!GC_debugging_started&&0!=result){ +BZERO(result,n_blocks*HBLKSIZE); +} +} +if (0==result){ +return((*GC_get_oom_fn())(lb)); +} else { +return(result); +} +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_malloc_kind_global(size_t lb,int k) +{ +GC_ASSERT(k < MAXOBJKINDS); +if (SMALL_OBJ(lb)){ +void*op; +void**opp; +size_t lg; +DCL_LOCK_STATE; +GC_DBG_COLLECT_AT_MALLOC(lb); +LOCK(); +lg=GC_size_map[lb]; +opp=&GC_obj_kinds[k].ok_freelist[lg]; +op=*opp; +if (EXPECT(op!=NULL,TRUE)){ +if (k==PTRFREE){ +*opp=obj_link(op); +} else { +GC_ASSERT(0==obj_link(op) +||((word)obj_link(op) +<=(word)GC_greatest_plausible_heap_addr +&&(word)obj_link(op) +>=(word)GC_least_plausible_heap_addr)); +*opp=obj_link(op); +obj_link(op)=0; +} +GC_bytes_allocd+=GRANULES_TO_BYTES((word)lg); +UNLOCK(); +return op; +} +UNLOCK(); +} +return GC_clear_stack(GC_generic_malloc(lb,k)); +} +#if defined(THREADS)&&!defined(THREAD_LOCAL_ALLOC) +GC_API GC_ATTR_MALLOC void*GC_CALL GC_malloc_kind(size_t lb,int k) +{ +return GC_malloc_kind_global(lb,k); +} +#endif +GC_API GC_ATTR_MALLOC void*GC_CALL GC_malloc_atomic(size_t lb) +{ +return GC_malloc_kind(lb,PTRFREE); +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_malloc(size_t lb) +{ +return GC_malloc_kind(lb,NORMAL); +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_generic_malloc_uncollectable( +size_t lb,int k) +{ +void*op; +DCL_LOCK_STATE; +GC_ASSERT(k < MAXOBJKINDS); +if (SMALL_OBJ(lb)){ +void**opp; +size_t lg; +GC_DBG_COLLECT_AT_MALLOC(lb); +if (EXTRA_BYTES!=0&&lb!=0)lb--; +LOCK(); +lg=GC_size_map[lb]; +opp=&GC_obj_kinds[k].ok_freelist[lg]; +op=*opp; +if (EXPECT(op!=NULL,TRUE)){ +*opp=obj_link(op); +obj_link(op)=0; +GC_bytes_allocd+=GRANULES_TO_BYTES((word)lg); +GC_non_gc_bytes+=GRANULES_TO_BYTES((word)lg); +UNLOCK(); +} else { +UNLOCK(); +op=GC_generic_malloc(lb,k); +} +GC_ASSERT(0==op||GC_is_marked(op)); +} else { +op=GC_generic_malloc(lb,k); +if (op){ +hdr*hhdr=HDR(op); +GC_ASSERT(((word)op&(HBLKSIZE - 1))==0); +LOCK(); +set_mark_bit_from_hdr(hhdr,0); +#ifndef THREADS +GC_ASSERT(hhdr->hb_n_marks==0); +#endif +hhdr->hb_n_marks=1; +UNLOCK(); +} +} +return op; +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_malloc_uncollectable(size_t lb) +{ +return GC_generic_malloc_uncollectable(lb,UNCOLLECTABLE); +} +#ifdef GC_ATOMIC_UNCOLLECTABLE +GC_API GC_ATTR_MALLOC void*GC_CALL +GC_malloc_atomic_uncollectable(size_t lb) +{ +return GC_generic_malloc_uncollectable(lb,AUNCOLLECTABLE); +} +#endif +#if defined(REDIRECT_MALLOC)&&!defined(REDIRECT_MALLOC_IN_HEADER) +#ifndef MSWINCE +#include +#endif +#define GC_debug_malloc_replacement(lb)GC_debug_malloc(lb,GC_DBG_EXTRAS) +#if defined(CPPCHECK) +#define REDIRECT_MALLOC_F GC_malloc +#else +#define REDIRECT_MALLOC_F REDIRECT_MALLOC +#endif +void*malloc(size_t lb) +{ +#if defined(I386)&&defined(GC_SOLARIS_THREADS) +if (!EXPECT(GC_is_initialized,TRUE))return sbrk(lb); +#endif +return (void*)REDIRECT_MALLOC_F(lb); +} +#if defined(GC_LINUX_THREADS) +STATIC ptr_t GC_libpthread_start=0; +STATIC ptr_t GC_libpthread_end=0; +STATIC ptr_t GC_libld_start=0; +STATIC ptr_t GC_libld_end=0; +STATIC void GC_init_lib_bounds(void) +{ +IF_CANCEL(int cancel_state;) +if (GC_libpthread_start!=0)return; +DISABLE_CANCEL(cancel_state); +GC_init(); +if (!GC_text_mapping("libpthread-", +&GC_libpthread_start,&GC_libpthread_end)){ +WARN("Failed to find libpthread.so text mapping:Expect crash\n",0); +GC_libpthread_start=(ptr_t)1; +} +if (!GC_text_mapping("ld-",&GC_libld_start,&GC_libld_end)){ +WARN("Failed to find ld.so text mapping:Expect crash\n",0); +} +RESTORE_CANCEL(cancel_state); +} +#endif +void*calloc(size_t n,size_t lb) +{ +if ((lb|n)> GC_SQRT_SIZE_MAX +&&lb&&n > GC_SIZE_MAX/lb) +return (*GC_get_oom_fn())(GC_SIZE_MAX); +#if defined(GC_LINUX_THREADS) +{ +static GC_bool lib_bounds_set=FALSE; +ptr_t caller=(ptr_t)__builtin_return_address(0); +if (!EXPECT(lib_bounds_set,TRUE)){ +GC_init_lib_bounds(); +lib_bounds_set=TRUE; +} +if (((word)caller>=(word)GC_libpthread_start +&&(word)caller < (word)GC_libpthread_end) +||((word)caller>=(word)GC_libld_start +&&(word)caller < (word)GC_libld_end)) +return GC_generic_malloc_uncollectable(n*lb,UNCOLLECTABLE); +} +#endif +return (void*)REDIRECT_MALLOC_F(n*lb); +} +#ifndef strdup +char*strdup(const char*s) +{ +size_t lb=strlen(s)+1; +char*result=(char*)REDIRECT_MALLOC_F(lb); +if (result==0){ +errno=ENOMEM; +return 0; +} +BCOPY(s,result,lb); +return result; +} +#endif +#ifndef strndup +char*strndup(const char*str,size_t size) +{ +char*copy; +size_t len=strlen(str); +if (len > size) +len=size; +copy=(char*)REDIRECT_MALLOC_F(len+1); +if (copy==NULL){ +errno=ENOMEM; +return NULL; +} +if (EXPECT(len > 0,TRUE)) +BCOPY(str,copy,len); +copy[len]='\0'; +return copy; +} +#endif +#undef GC_debug_malloc_replacement +#endif +GC_API void GC_CALL GC_free(void*p) +{ +struct hblk*h; +hdr*hhdr; +size_t sz; +size_t ngranules; +int knd; +struct obj_kind*ok; +DCL_LOCK_STATE; +if (p){ +} else { +return; +} +#ifdef LOG_ALLOCS +GC_log_printf("GC_free(%p)after GC #%lu\n", +p,(unsigned long)GC_gc_no); +#endif +h=HBLKPTR(p); +hhdr=HDR(h); +#if defined(REDIRECT_MALLOC)&&((defined(NEED_CALLINFO)&&defined(GC_HAVE_BUILTIN_BACKTRACE))||defined(GC_SOLARIS_THREADS)||defined(GC_LINUX_THREADS)||defined(MSWIN32)) +if (0==hhdr)return; +#endif +GC_ASSERT(GC_base(p)==p); +sz=(size_t)hhdr->hb_sz; +ngranules=BYTES_TO_GRANULES(sz); +knd=hhdr->hb_obj_kind; +ok=&GC_obj_kinds[knd]; +if (EXPECT(ngranules<=MAXOBJGRANULES,TRUE)){ +void**flh; +LOCK(); +GC_bytes_freed+=sz; +if (IS_UNCOLLECTABLE(knd))GC_non_gc_bytes-=sz; +if (ok->ok_init&&EXPECT(sz > sizeof(word),TRUE)){ +BZERO((word*)p+1,sz-sizeof(word)); +} +flh=&(ok->ok_freelist[ngranules]); +obj_link(p)=*flh; +*flh=(ptr_t)p; +UNLOCK(); +} else { +size_t nblocks=OBJ_SZ_TO_BLOCKS(sz); +LOCK(); +GC_bytes_freed+=sz; +if (IS_UNCOLLECTABLE(knd))GC_non_gc_bytes-=sz; +if (nblocks > 1){ +GC_large_allocd_bytes-=nblocks*HBLKSIZE; +} +GC_freehblk(h); +UNLOCK(); +} +} +#ifdef THREADS +GC_INNER void GC_free_inner(void*p) +{ +struct hblk*h; +hdr*hhdr; +size_t sz; +size_t ngranules; +int knd; +struct obj_kind*ok; +h=HBLKPTR(p); +hhdr=HDR(h); +knd=hhdr->hb_obj_kind; +sz=(size_t)hhdr->hb_sz; +ngranules=BYTES_TO_GRANULES(sz); +ok=&GC_obj_kinds[knd]; +if (ngranules<=MAXOBJGRANULES){ +void**flh; +GC_bytes_freed+=sz; +if (IS_UNCOLLECTABLE(knd))GC_non_gc_bytes-=sz; +if (ok->ok_init&&EXPECT(sz > sizeof(word),TRUE)){ +BZERO((word*)p+1,sz-sizeof(word)); +} +flh=&(ok->ok_freelist[ngranules]); +obj_link(p)=*flh; +*flh=(ptr_t)p; +} else { +size_t nblocks=OBJ_SZ_TO_BLOCKS(sz); +GC_bytes_freed+=sz; +if (IS_UNCOLLECTABLE(knd))GC_non_gc_bytes-=sz; +if (nblocks > 1){ +GC_large_allocd_bytes-=nblocks*HBLKSIZE; +} +GC_freehblk(h); +} +} +#endif +#if defined(REDIRECT_MALLOC)&&!defined(REDIRECT_FREE) +#define REDIRECT_FREE GC_free +#endif +#if defined(REDIRECT_FREE)&&!defined(REDIRECT_MALLOC_IN_HEADER) +#if defined(CPPCHECK) +#define REDIRECT_FREE_F GC_free +#else +#define REDIRECT_FREE_F REDIRECT_FREE +#endif +void free(void*p) +{ +#ifndef IGNORE_FREE +#if defined(GC_LINUX_THREADS)&&!defined(USE_PROC_FOR_LIBRARIES) +ptr_t caller=(ptr_t)__builtin_return_address(0); +if (((word)caller>=(word)GC_libpthread_start +&&(word)caller < (word)GC_libpthread_end) +||((word)caller>=(word)GC_libld_start +&&(word)caller < (word)GC_libld_end)){ +GC_free(p); +return; +} +#endif +REDIRECT_FREE_F(p); +#endif +} +#endif +#include +#include +#ifndef MSWINCE +#include +#endif +#ifndef GC_ALLOC_PTRS_H +#define GC_ALLOC_PTRS_H +#ifdef __cplusplus +extern "C" { +#endif +GC_API void**const GC_objfreelist_ptr; +GC_API void**const GC_aobjfreelist_ptr; +GC_API void**const GC_uobjfreelist_ptr; +#ifdef GC_ATOMIC_UNCOLLECTABLE +GC_API void**const GC_auobjfreelist_ptr; +#endif +GC_API void GC_CALL GC_incr_bytes_allocd(size_t bytes); +GC_API void GC_CALL GC_incr_bytes_freed(size_t bytes); +#ifdef __cplusplus +} +#endif +#endif +void**const GC_objfreelist_ptr=GC_objfreelist; +void**const GC_aobjfreelist_ptr=GC_aobjfreelist; +void**const GC_uobjfreelist_ptr=GC_uobjfreelist; +#ifdef GC_ATOMIC_UNCOLLECTABLE +void**const GC_auobjfreelist_ptr=GC_auobjfreelist; +#endif +GC_API int GC_CALL GC_get_kind_and_size(const void*p,size_t*psize) +{ +hdr*hhdr=HDR(p); +if (psize!=NULL){ +*psize=(size_t)hhdr->hb_sz; +} +return hhdr->hb_obj_kind; +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_generic_or_special_malloc(size_t lb, +int knd) +{ +switch(knd){ +case PTRFREE: +case NORMAL: +return GC_malloc_kind(lb,knd); +case UNCOLLECTABLE: +#ifdef GC_ATOMIC_UNCOLLECTABLE +case AUNCOLLECTABLE: +#endif +return GC_generic_malloc_uncollectable(lb,knd); +default: +return GC_generic_malloc(lb,knd); +} +} +GC_API void*GC_CALL GC_realloc(void*p,size_t lb) +{ +struct hblk*h; +hdr*hhdr; +void*result; +size_t sz; +size_t orig_sz; +int obj_kind; +if (p==0)return(GC_malloc(lb)); +if (0==lb){ +#ifndef IGNORE_FREE +GC_free(p); +#endif +return NULL; +} +h=HBLKPTR(p); +hhdr=HDR(h); +sz=(size_t)hhdr->hb_sz; +obj_kind=hhdr->hb_obj_kind; +orig_sz=sz; +if (sz > MAXOBJBYTES){ +word descr=GC_obj_kinds[obj_kind].ok_descriptor; +sz=(sz+HBLKSIZE-1)&~HBLKMASK; +if (GC_obj_kinds[obj_kind].ok_relocate_descr) +descr+=sz; +#ifdef AO_HAVE_store +GC_STATIC_ASSERT(sizeof(hhdr->hb_sz)==sizeof(AO_t)); +AO_store((volatile AO_t*)&hhdr->hb_sz,(AO_t)sz); +AO_store((volatile AO_t*)&hhdr->hb_descr,(AO_t)descr); +#else +{ +DCL_LOCK_STATE; +LOCK(); +hhdr->hb_sz=sz; +hhdr->hb_descr=descr; +UNLOCK(); +} +#endif +#ifdef MARK_BIT_PER_OBJ +GC_ASSERT(hhdr->hb_inv_sz==LARGE_INV_SZ); +#endif +#ifdef MARK_BIT_PER_GRANULE +GC_ASSERT((hhdr->hb_flags&LARGE_BLOCK)!=0 +&&hhdr->hb_map[ANY_INDEX]==1); +#endif +if (IS_UNCOLLECTABLE(obj_kind))GC_non_gc_bytes+=(sz - orig_sz); +} +if (ADD_SLOP(lb)<=sz){ +if (lb>=(sz>>1)){ +if (orig_sz > lb){ +BZERO(((ptr_t)p)+lb,orig_sz - lb); +} +return(p); +} +sz=lb; +} +result=GC_generic_or_special_malloc((word)lb,obj_kind); +if (result!=NULL){ +BCOPY(p,result,sz); +#ifndef IGNORE_FREE +GC_free(p); +#endif +} +return result; +} +#if defined(REDIRECT_MALLOC)&&!defined(REDIRECT_REALLOC) +#define REDIRECT_REALLOC GC_realloc +#endif +#ifdef REDIRECT_REALLOC +#define GC_debug_realloc_replacement(p,lb)GC_debug_realloc(p,lb,GC_DBG_EXTRAS) +#if!defined(REDIRECT_MALLOC_IN_HEADER) +void*realloc(void*p,size_t lb) +{ +return(REDIRECT_REALLOC(p,lb)); +} +#endif +#undef GC_debug_realloc_replacement +#endif +GC_API GC_ATTR_MALLOC void*GC_CALL +GC_generic_malloc_ignore_off_page(size_t lb,int k) +{ +void*result; +size_t lg; +size_t lb_rounded; +word n_blocks; +GC_bool init; +DCL_LOCK_STATE; +if (SMALL_OBJ(lb)) +return GC_generic_malloc(lb,k); +GC_ASSERT(k < MAXOBJKINDS); +lg=ROUNDED_UP_GRANULES(lb); +lb_rounded=GRANULES_TO_BYTES(lg); +n_blocks=OBJ_SZ_TO_BLOCKS(lb_rounded); +init=GC_obj_kinds[k].ok_init; +if (EXPECT(GC_have_errors,FALSE)) +GC_print_all_errors(); +GC_INVOKE_FINALIZERS(); +GC_DBG_COLLECT_AT_MALLOC(lb); +LOCK(); +result=(ptr_t)GC_alloc_large(ADD_SLOP(lb),k,IGNORE_OFF_PAGE); +if (NULL==result){ +GC_oom_func oom_fn=GC_oom_fn; +UNLOCK(); +return (*oom_fn)(lb); +} +if (GC_debugging_started){ +BZERO(result,n_blocks*HBLKSIZE); +} else { +#ifdef THREADS +((word*)result)[0]=0; +((word*)result)[1]=0; +((word*)result)[GRANULES_TO_WORDS(lg)-1]=0; +((word*)result)[GRANULES_TO_WORDS(lg)-2]=0; +#endif +} +GC_bytes_allocd+=lb_rounded; +UNLOCK(); +if (init&&!GC_debugging_started){ +BZERO(result,n_blocks*HBLKSIZE); +} +return(result); +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_malloc_ignore_off_page(size_t lb) +{ +return GC_generic_malloc_ignore_off_page(lb,NORMAL); +} +GC_API GC_ATTR_MALLOC void*GC_CALL +GC_malloc_atomic_ignore_off_page(size_t lb) +{ +return GC_generic_malloc_ignore_off_page(lb,PTRFREE); +} +GC_API void GC_CALL GC_incr_bytes_allocd(size_t n) +{ +GC_bytes_allocd+=n; +} +GC_API void GC_CALL GC_incr_bytes_freed(size_t n) +{ +GC_bytes_freed+=n; +} +GC_API size_t GC_CALL GC_get_expl_freed_bytes_since_gc(void) +{ +return (size_t)GC_bytes_freed; +} +#ifdef PARALLEL_MARK +STATIC volatile AO_t GC_bytes_allocd_tmp=0; +#endif +GC_API void GC_CALL GC_generic_malloc_many(size_t lb,int k,void**result) +{ +void*op; +void*p; +void**opp; +size_t lw; +size_t lg; +signed_word my_bytes_allocd=0; +struct obj_kind*ok=&(GC_obj_kinds[k]); +struct hblk**rlh; +DCL_LOCK_STATE; +GC_ASSERT(lb!=0&&(lb&(GRANULE_BYTES-1))==0); +if (!SMALL_OBJ(lb)||GC_manual_vdb){ +op=GC_generic_malloc(lb,k); +if (EXPECT(0!=op,TRUE)) +obj_link(op)=0; +*result=op; +#ifndef GC_DISABLE_INCREMENTAL +if (GC_manual_vdb&&GC_is_heap_ptr(result)){ +GC_dirty_inner(result); +REACHABLE_AFTER_DIRTY(op); +} +#endif +return; +} +GC_ASSERT(k < MAXOBJKINDS); +lw=BYTES_TO_WORDS(lb); +lg=BYTES_TO_GRANULES(lb); +if (EXPECT(GC_have_errors,FALSE)) +GC_print_all_errors(); +GC_INVOKE_FINALIZERS(); +GC_DBG_COLLECT_AT_MALLOC(lb); +if (!EXPECT(GC_is_initialized,TRUE))GC_init(); +LOCK(); +if (GC_incremental&&!GC_dont_gc){ +ENTER_GC(); +GC_collect_a_little_inner(1); +EXIT_GC(); +} +rlh=ok->ok_reclaim_list; +if (rlh!=NULL){ +struct hblk*hbp; +hdr*hhdr; +for (rlh+=lg;(hbp=*rlh)!=NULL;){ +hhdr=HDR(hbp); +*rlh=hhdr->hb_next; +GC_ASSERT(hhdr->hb_sz==lb); +hhdr->hb_last_reclaimed=(unsigned short)GC_gc_no; +#ifdef PARALLEL_MARK +if (GC_parallel){ +signed_word my_bytes_allocd_tmp= +(signed_word)AO_load(&GC_bytes_allocd_tmp); +GC_ASSERT(my_bytes_allocd_tmp>=0); +if (my_bytes_allocd_tmp!=0){ +(void)AO_fetch_and_add(&GC_bytes_allocd_tmp, +(AO_t)(-my_bytes_allocd_tmp)); +GC_bytes_allocd+=my_bytes_allocd_tmp; +} +GC_acquire_mark_lock(); +++GC_fl_builder_count; +UNLOCK(); +GC_release_mark_lock(); +} +#endif +op=GC_reclaim_generic(hbp,hhdr,lb, +ok->ok_init,0,&my_bytes_allocd); +if (op!=0){ +#ifdef PARALLEL_MARK +if (GC_parallel){ +*result=op; +(void)AO_fetch_and_add(&GC_bytes_allocd_tmp, +(AO_t)my_bytes_allocd); +GC_acquire_mark_lock(); +--GC_fl_builder_count; +if (GC_fl_builder_count==0)GC_notify_all_builder(); +#ifdef THREAD_SANITIZER +GC_release_mark_lock(); +LOCK(); +GC_bytes_found+=my_bytes_allocd; +UNLOCK(); +#else +GC_bytes_found+=my_bytes_allocd; +GC_release_mark_lock(); +#endif +(void)GC_clear_stack(0); +return; +} +#endif +GC_bytes_found+=my_bytes_allocd; +GC_bytes_allocd+=my_bytes_allocd; +goto out; +} +#ifdef PARALLEL_MARK +if (GC_parallel){ +GC_acquire_mark_lock(); +--GC_fl_builder_count; +if (GC_fl_builder_count==0)GC_notify_all_builder(); +GC_release_mark_lock(); +LOCK(); +} +#endif +} +} +opp=&(GC_obj_kinds[k].ok_freelist[lg]); +if ( (op=*opp)!=0){ +*opp=0; +my_bytes_allocd=0; +for (p=op;p!=0;p=obj_link(p)){ +my_bytes_allocd+=lb; +if ((word)my_bytes_allocd>=HBLKSIZE){ +*opp=obj_link(p); +obj_link(p)=0; +break; +} +} +GC_bytes_allocd+=my_bytes_allocd; +goto out; +} +{ +struct hblk*h=GC_allochblk(lb,k,0); +if (h){ +if (IS_UNCOLLECTABLE(k))GC_set_hdr_marks(HDR(h)); +GC_bytes_allocd+=HBLKSIZE - HBLKSIZE % lb; +#ifdef PARALLEL_MARK +if (GC_parallel){ +GC_acquire_mark_lock(); +++GC_fl_builder_count; +UNLOCK(); +GC_release_mark_lock(); +op=GC_build_fl(h,lw, +(ok->ok_init||GC_debugging_started),0); +*result=op; +GC_acquire_mark_lock(); +--GC_fl_builder_count; +if (GC_fl_builder_count==0)GC_notify_all_builder(); +GC_release_mark_lock(); +(void)GC_clear_stack(0); +return; +} +#endif +op=GC_build_fl(h,lw,(ok->ok_init||GC_debugging_started),0); +goto out; +} +} +op=GC_generic_malloc_inner(lb,k); +if (0!=op)obj_link(op)=0; +out: +*result=op; +UNLOCK(); +(void)GC_clear_stack(0); +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_malloc_many(size_t lb) +{ +void*result; +lb=SIZET_SAT_ADD(lb,EXTRA_BYTES+GRANULE_BYTES - 1) +&~(GRANULE_BYTES - 1); +GC_generic_malloc_many(lb,NORMAL,&result); +return result; +} +#include +GC_API GC_ATTR_MALLOC void*GC_CALL GC_memalign(size_t align,size_t lb) +{ +size_t new_lb; +size_t offset; +ptr_t result; +if (align<=GRANULE_BYTES)return GC_malloc(lb); +if (align>=HBLKSIZE/2||lb>=HBLKSIZE/2){ +if (align > HBLKSIZE){ +return (*GC_get_oom_fn())(LONG_MAX-1024); +} +return GC_malloc(lb<=HBLKSIZE?HBLKSIZE:lb); +} +new_lb=SIZET_SAT_ADD(lb,align - 1); +result=(ptr_t)GC_malloc(new_lb); +offset=(word)result % align; +if (offset!=0){ +offset=align - offset; +if (!GC_all_interior_pointers){ +GC_STATIC_ASSERT(VALID_OFFSET_SZ<=HBLKSIZE); +GC_ASSERT(offset < VALID_OFFSET_SZ); +GC_register_displacement(offset); +} +} +result+=offset; +GC_ASSERT((word)result % align==0); +return result; +} +GC_API int GC_CALL GC_posix_memalign(void**memptr,size_t align,size_t lb) +{ +size_t align_minus_one=align - 1; +if (align < sizeof(void*)||(align_minus_one&align)!=0){ +#ifdef MSWINCE +return ERROR_INVALID_PARAMETER; +#else +return EINVAL; +#endif +} +if ((*memptr=GC_memalign(align,lb))==NULL){ +#ifdef MSWINCE +return ERROR_NOT_ENOUGH_MEMORY; +#else +return ENOMEM; +#endif +} +return 0; +} +GC_API GC_ATTR_MALLOC char*GC_CALL GC_strdup(const char*s) +{ +char*copy; +size_t lb; +if (s==NULL)return NULL; +lb=strlen(s)+1; +copy=(char*)GC_malloc_atomic(lb); +if (NULL==copy){ +#ifndef MSWINCE +errno=ENOMEM; +#endif +return NULL; +} +BCOPY(s,copy,lb); +return copy; +} +GC_API GC_ATTR_MALLOC char*GC_CALL GC_strndup(const char*str,size_t size) +{ +char*copy; +size_t len=strlen(str); +if (len > size) +len=size; +copy=(char*)GC_malloc_atomic(len+1); +if (copy==NULL){ +#ifndef MSWINCE +errno=ENOMEM; +#endif +return NULL; +} +if (EXPECT(len > 0,TRUE)) +BCOPY(str,copy,len); +copy[len]='\0'; +return copy; +} +#ifdef GC_REQUIRE_WCSDUP +#include +GC_API GC_ATTR_MALLOC wchar_t*GC_CALL GC_wcsdup(const wchar_t*str) +{ +size_t lb=(wcslen(str)+1)*sizeof(wchar_t); +wchar_t*copy=(wchar_t*)GC_malloc_atomic(lb); +if (copy==NULL){ +#ifndef MSWINCE +errno=ENOMEM; +#endif +return NULL; +} +BCOPY(str,copy,lb); +return copy; +} +#endif +#ifndef CPPCHECK +GC_API void*GC_CALL GC_malloc_stubborn(size_t lb) +{ +return GC_malloc(lb); +} +GC_API void GC_CALL GC_change_stubborn(const void*p GC_ATTR_UNUSED) +{ +} +#endif +GC_API void GC_CALL GC_end_stubborn_change(const void*p) +{ +GC_dirty(p); +} +GC_API void GC_CALL GC_ptr_store_and_dirty(void*p,const void*q) +{ +*(const void**)p=q; +GC_dirty(p); +REACHABLE_AFTER_DIRTY(q); +} +#if defined(__MINGW32__)&&!defined(__MINGW_EXCPT_DEFINE_PSDK)&&defined(__i386__) +#define __MINGW_EXCPT_DEFINE_PSDK 1 +#endif +#include +#if defined(MSWIN32)&&defined(__GNUC__) +#include +#endif +GC_ATTR_NOINLINE +void GC_noop6(word arg1 GC_ATTR_UNUSED,word arg2 GC_ATTR_UNUSED, +word arg3 GC_ATTR_UNUSED,word arg4 GC_ATTR_UNUSED, +word arg5 GC_ATTR_UNUSED,word arg6 GC_ATTR_UNUSED) +{ +#if defined(AO_HAVE_compiler_barrier)&&!defined(BASE_ATOMIC_OPS_EMULATED) +AO_compiler_barrier(); +#else +GC_noop1(0); +#endif +} +volatile word GC_noop_sink; +GC_ATTR_NO_SANITIZE_THREAD +GC_API void GC_CALL GC_noop1(word x) +{ +GC_noop_sink=x; +} +GC_INNER struct obj_kind GC_obj_kinds[MAXOBJKINDS]={ +{&GC_aobjfreelist[0],0, +GC_DS_LENGTH,FALSE,FALSE +OK_DISCLAIM_INITZ }, +{&GC_objfreelist[0],0, +GC_DS_LENGTH, +TRUE,TRUE +OK_DISCLAIM_INITZ }, +{&GC_uobjfreelist[0],0, +GC_DS_LENGTH,TRUE,TRUE +OK_DISCLAIM_INITZ }, +#ifdef GC_ATOMIC_UNCOLLECTABLE +{&GC_auobjfreelist[0],0, +GC_DS_LENGTH,FALSE,FALSE +OK_DISCLAIM_INITZ }, +#endif +}; +#ifndef INITIAL_MARK_STACK_SIZE +#define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE) +#endif +#if!defined(GC_DISABLE_INCREMENTAL) +STATIC word GC_n_rescuing_pages=0; +#endif +#ifdef PARALLEL_MARK +GC_INNER GC_bool GC_parallel_mark_disabled=FALSE; +#endif +GC_INNER GC_bool GC_collection_in_progress(void) +{ +return(GC_mark_state!=MS_NONE); +} +GC_INNER void GC_clear_hdr_marks(hdr*hhdr) +{ +size_t last_bit; +#ifdef AO_HAVE_load +last_bit=FINAL_MARK_BIT((size_t)AO_load((volatile AO_t*)&hhdr->hb_sz)); +#else +last_bit=FINAL_MARK_BIT((size_t)hhdr->hb_sz); +#endif +BZERO(hhdr->hb_marks,sizeof(hhdr->hb_marks)); +set_mark_bit_from_hdr(hhdr,last_bit); +hhdr->hb_n_marks=0; +} +GC_INNER void GC_set_hdr_marks(hdr*hhdr) +{ +unsigned i; +size_t sz=(size_t)hhdr->hb_sz; +unsigned n_marks=(unsigned)FINAL_MARK_BIT(sz); +#ifdef USE_MARK_BYTES +for (i=0;i<=n_marks;i+=(unsigned)MARK_BIT_OFFSET(sz)){ +hhdr->hb_marks[i]=1; +} +#else +for (i=0;i < divWORDSZ(n_marks+WORDSZ);++i){ +hhdr->hb_marks[i]=GC_WORD_MAX; +} +#endif +#ifdef MARK_BIT_PER_OBJ +hhdr->hb_n_marks=n_marks; +#else +hhdr->hb_n_marks=HBLK_OBJS(sz); +#endif +} +static void clear_marks_for_block(struct hblk*h,word dummy GC_ATTR_UNUSED) +{ +hdr*hhdr=HDR(h); +if (IS_UNCOLLECTABLE(hhdr->hb_obj_kind))return; +GC_clear_hdr_marks(hhdr); +} +GC_API void GC_CALL GC_set_mark_bit(const void*p) +{ +struct hblk*h=HBLKPTR(p); +hdr*hhdr=HDR(h); +word bit_no=MARK_BIT_NO((ptr_t)p - (ptr_t)h,hhdr->hb_sz); +if (!mark_bit_from_hdr(hhdr,bit_no)){ +set_mark_bit_from_hdr(hhdr,bit_no); +++hhdr->hb_n_marks; +} +} +GC_API void GC_CALL GC_clear_mark_bit(const void*p) +{ +struct hblk*h=HBLKPTR(p); +hdr*hhdr=HDR(h); +word bit_no=MARK_BIT_NO((ptr_t)p - (ptr_t)h,hhdr->hb_sz); +if (mark_bit_from_hdr(hhdr,bit_no)){ +size_t n_marks=hhdr->hb_n_marks; +GC_ASSERT(n_marks!=0); +clear_mark_bit_from_hdr(hhdr,bit_no); +n_marks--; +#ifdef PARALLEL_MARK +if (n_marks!=0||!GC_parallel) +hhdr->hb_n_marks=n_marks; +#else +hhdr->hb_n_marks=n_marks; +#endif +} +} +GC_API int GC_CALL GC_is_marked(const void*p) +{ +struct hblk*h=HBLKPTR(p); +hdr*hhdr=HDR(h); +word bit_no=MARK_BIT_NO((ptr_t)p - (ptr_t)h,hhdr->hb_sz); +return (int)mark_bit_from_hdr(hhdr,bit_no); +} +GC_INNER void GC_clear_marks(void) +{ +GC_apply_to_all_blocks(clear_marks_for_block,(word)0); +GC_objects_are_marked=FALSE; +GC_mark_state=MS_INVALID; +GC_scan_ptr=NULL; +} +GC_INNER void GC_initiate_gc(void) +{ +GC_ASSERT(I_HOLD_LOCK()); +#ifndef GC_DISABLE_INCREMENTAL +if (GC_incremental){ +#ifdef CHECKSUMS +GC_read_dirty(FALSE); +GC_check_dirty(); +#else +GC_read_dirty(GC_mark_state==MS_INVALID); +#endif +} +GC_n_rescuing_pages=0; +#endif +if (GC_mark_state==MS_NONE){ +GC_mark_state=MS_PUSH_RESCUERS; +} else if (GC_mark_state!=MS_INVALID){ +ABORT("Unexpected state"); +} +GC_scan_ptr=NULL; +} +#ifdef PARALLEL_MARK +STATIC void GC_do_parallel_mark(void); +#endif +#ifdef GC_DISABLE_INCREMENTAL +#define GC_push_next_marked_dirty(h)GC_push_next_marked(h) +#else +STATIC struct hblk*GC_push_next_marked_dirty(struct hblk*h); +#endif +STATIC struct hblk*GC_push_next_marked(struct hblk*h); +STATIC struct hblk*GC_push_next_marked_uncollectable(struct hblk*h); +static void alloc_mark_stack(size_t); +#ifdef WRAP_MARK_SOME +STATIC GC_bool GC_mark_some_inner(ptr_t cold_gc_frame) +#else +GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame) +#endif +{ +switch(GC_mark_state){ +case MS_NONE: +break; +case MS_PUSH_RESCUERS: +if ((word)GC_mark_stack_top +>=(word)(GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2)){ +GC_mark_stack_too_small=TRUE; +MARK_FROM_MARK_STACK(); +break; +} else { +GC_scan_ptr=GC_push_next_marked_dirty(GC_scan_ptr); +if (NULL==GC_scan_ptr){ +#if!defined(GC_DISABLE_INCREMENTAL) +GC_COND_LOG_PRINTF("Marked from %lu dirty pages\n", +(unsigned long)GC_n_rescuing_pages); +#endif +GC_push_roots(FALSE,cold_gc_frame); +GC_objects_are_marked=TRUE; +if (GC_mark_state!=MS_INVALID){ +GC_mark_state=MS_ROOTS_PUSHED; +} +} +} +break; +case MS_PUSH_UNCOLLECTABLE: +if ((word)GC_mark_stack_top +>=(word)(GC_mark_stack+GC_mark_stack_size/4)){ +#ifdef PARALLEL_MARK +if (GC_parallel)GC_mark_stack_too_small=TRUE; +#endif +MARK_FROM_MARK_STACK(); +break; +} else { +GC_scan_ptr=GC_push_next_marked_uncollectable(GC_scan_ptr); +if (NULL==GC_scan_ptr){ +GC_push_roots(TRUE,cold_gc_frame); +GC_objects_are_marked=TRUE; +if (GC_mark_state!=MS_INVALID){ +GC_mark_state=MS_ROOTS_PUSHED; +} +} +} +break; +case MS_ROOTS_PUSHED: +#ifdef PARALLEL_MARK +if (GC_parallel&&!GC_parallel_mark_disabled){ +GC_do_parallel_mark(); +GC_ASSERT((word)GC_mark_stack_top < (word)GC_first_nonempty); +GC_mark_stack_top=GC_mark_stack - 1; +if (GC_mark_stack_too_small){ +alloc_mark_stack(2*GC_mark_stack_size); +} +if (GC_mark_state==MS_ROOTS_PUSHED){ +GC_mark_state=MS_NONE; +return(TRUE); +} +break; +} +#endif +if ((word)GC_mark_stack_top>=(word)GC_mark_stack){ +MARK_FROM_MARK_STACK(); +break; +} else { +GC_mark_state=MS_NONE; +if (GC_mark_stack_too_small){ +alloc_mark_stack(2*GC_mark_stack_size); +} +return(TRUE); +} +case MS_INVALID: +case MS_PARTIALLY_INVALID: +if (!GC_objects_are_marked){ +GC_mark_state=MS_PUSH_UNCOLLECTABLE; +break; +} +if ((word)GC_mark_stack_top>=(word)GC_mark_stack){ +MARK_FROM_MARK_STACK(); +break; +} +if (NULL==GC_scan_ptr&&GC_mark_state==MS_INVALID){ +if (GC_mark_stack_too_small){ +alloc_mark_stack(2*GC_mark_stack_size); +} +GC_mark_state=MS_PARTIALLY_INVALID; +} +GC_scan_ptr=GC_push_next_marked(GC_scan_ptr); +if (NULL==GC_scan_ptr&&GC_mark_state==MS_PARTIALLY_INVALID){ +GC_push_roots(TRUE,cold_gc_frame); +GC_objects_are_marked=TRUE; +if (GC_mark_state!=MS_INVALID){ +GC_mark_state=MS_ROOTS_PUSHED; +} +} +break; +default: +ABORT("GC_mark_some:bad state"); +} +return(FALSE); +} +#ifdef WRAP_MARK_SOME +#if (defined(MSWIN32)||defined(MSWINCE))&&defined(__GNUC__) +typedef struct { +EXCEPTION_REGISTRATION ex_reg; +void*alt_path; +} ext_ex_regn; +static EXCEPTION_DISPOSITION mark_ex_handler( +struct _EXCEPTION_RECORD*ex_rec, +void*est_frame, +struct _CONTEXT*context, +void*disp_ctxt GC_ATTR_UNUSED) +{ +if (ex_rec->ExceptionCode==STATUS_ACCESS_VIOLATION){ +ext_ex_regn*xer=(ext_ex_regn*)est_frame; +context->Esp=context->Ebp; +context->Ebp=*((DWORD*)context->Esp); +context->Esp=context->Esp - 8; +context->Eip=(DWORD)(xer->alt_path); +return ExceptionContinueExecution; +} else { +return ExceptionContinueSearch; +} +} +#endif +GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame) +{ +GC_bool ret_val; +#if defined(MSWIN32)||defined(MSWINCE) +#ifndef __GNUC__ +__try { +ret_val=GC_mark_some_inner(cold_gc_frame); +} __except (GetExceptionCode()==EXCEPTION_ACCESS_VIOLATION? +EXCEPTION_EXECUTE_HANDLER:EXCEPTION_CONTINUE_SEARCH){ +goto handle_ex; +} +#if defined(GC_WIN32_THREADS)&&!defined(GC_PTHREADS) +if (GC_started_thread_while_stopped()) +goto handle_thr_start; +#endif +rm_handler: +return ret_val; +#else +ext_ex_regn er; +#if GC_GNUC_PREREQ(4,7)||GC_CLANG_PREREQ(3,3) +#pragma GCC diagnostic push +#if defined(__clang__)||GC_GNUC_PREREQ(6,4) +#pragma GCC diagnostic ignored "-Wpedantic" +#else +#pragma GCC diagnostic ignored "-pedantic" +#endif +er.alt_path=&&handle_ex; +#pragma GCC diagnostic pop +#elif!defined(CPPCHECK) +er.alt_path=&&handle_ex; +#endif +er.ex_reg.handler=mark_ex_handler; +__asm__ __volatile__ ("movl %%fs:0,%0":"=r" (er.ex_reg.prev)); +__asm__ __volatile__ ("movl %0,%%fs:0"::"r" (&er)); +ret_val=GC_mark_some_inner(cold_gc_frame); +if (er.alt_path==0) +goto handle_ex; +#if defined(GC_WIN32_THREADS)&&!defined(GC_PTHREADS) +if (GC_started_thread_while_stopped()) +goto handle_thr_start; +#endif +rm_handler: +__asm__ __volatile__ ("mov %0,%%fs:0"::"r" (er.ex_reg.prev)); +return ret_val; +#endif +#else +#ifndef DEFAULT_VDB +if (GC_auto_incremental){ +WARN("Incremental GC incompatible with/proc roots\n",0); +} +#endif +GC_setup_temporary_fault_handler(); +if(SETJMP(GC_jmp_buf)!=0)goto handle_ex; +ret_val=GC_mark_some_inner(cold_gc_frame); +rm_handler: +GC_reset_fault_handler(); +return ret_val; +#endif +handle_ex: +{ +static word warned_gc_no; +if (warned_gc_no!=GC_gc_no){ +WARN("Caught ACCESS_VIOLATION in marker;" +" memory mapping disappeared\n",0); +warned_gc_no=GC_gc_no; +} +} +#if (defined(MSWIN32)||defined(MSWINCE))&&defined(GC_WIN32_THREADS)&&!defined(GC_PTHREADS) +handle_thr_start: +#endif +#ifdef REGISTER_LIBRARIES_EARLY +START_WORLD(); +GC_cond_register_dynamic_libraries(); +STOP_WORLD(); +#endif +GC_invalidate_mark_state(); +GC_scan_ptr=NULL; +ret_val=FALSE; +goto rm_handler; +} +#endif +GC_INNER void GC_invalidate_mark_state(void) +{ +GC_mark_state=MS_INVALID; +GC_mark_stack_top=GC_mark_stack-1; +} +GC_INNER mse*GC_signal_mark_stack_overflow(mse*msp) +{ +GC_mark_state=MS_INVALID; +#ifdef PARALLEL_MARK +if (!GC_parallel) +GC_mark_stack_too_small=TRUE; +#else +GC_mark_stack_too_small=TRUE; +#endif +GC_COND_LOG_PRINTF("Mark stack overflow;current size=%lu entries\n", +(unsigned long)GC_mark_stack_size); +return(msp - GC_MARK_STACK_DISCARDS); +} +GC_ATTR_NO_SANITIZE_ADDR GC_ATTR_NO_SANITIZE_MEMORY GC_ATTR_NO_SANITIZE_THREAD +GC_INNER mse*GC_mark_from(mse*mark_stack_top,mse*mark_stack, +mse*mark_stack_limit) +{ +signed_word credit=HBLKSIZE; +ptr_t current_p; +word current; +ptr_t limit=0; +word descr; +ptr_t greatest_ha=(ptr_t)GC_greatest_plausible_heap_addr; +ptr_t least_ha=(ptr_t)GC_least_plausible_heap_addr; +DECLARE_HDR_CACHE; +#define SPLIT_RANGE_WORDS 128 +GC_objects_are_marked=TRUE; +INIT_HDR_CACHE; +#ifdef OS2 +while ((word)mark_stack_top>=(word)mark_stack&&credit>=0) +#else +while (((((word)mark_stack_top - (word)mark_stack)|(word)credit) +&SIGNB)==0) +#endif +{ +current_p=mark_stack_top->mse_start; +descr=mark_stack_top->mse_descr.w; +retry: +if (descr&((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS)- 1))|GC_DS_TAGS)){ +word tag=descr&GC_DS_TAGS; +GC_STATIC_ASSERT(GC_DS_TAGS==0x3); +switch(tag){ +case GC_DS_LENGTH: +GC_ASSERT(descr < (word)GC_greatest_plausible_heap_addr +- (word)GC_least_plausible_heap_addr +||(word)(current_p+descr) +<=(word)GC_least_plausible_heap_addr +||(word)current_p>=(word)GC_greatest_plausible_heap_addr); +#ifdef PARALLEL_MARK +#define SHARE_BYTES 2048 +if (descr > SHARE_BYTES&&GC_parallel +&&(word)mark_stack_top < (word)(mark_stack_limit - 1)){ +word new_size=(descr/2)&~(word)(sizeof(word)-1); +mark_stack_top->mse_start=current_p; +mark_stack_top->mse_descr.w=new_size+sizeof(word); +mark_stack_top++; +#ifdef ENABLE_TRACE +if ((word)GC_trace_addr>=(word)current_p +&&(word)GC_trace_addr < (word)(current_p+descr)){ +GC_log_printf("GC #%u:large section;start %p,len %lu," +" splitting (parallel)at %p\n", +(unsigned)GC_gc_no,(void*)current_p, +(unsigned long)descr, +(void*)(current_p+new_size)); +} +#endif +current_p+=new_size; +descr-=new_size; +goto retry; +} +#endif +mark_stack_top->mse_start= +limit=current_p+WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1); +mark_stack_top->mse_descr.w= +descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1); +#ifdef ENABLE_TRACE +if ((word)GC_trace_addr>=(word)current_p +&&(word)GC_trace_addr < (word)(current_p+descr)){ +GC_log_printf("GC #%u:large section;start %p,len %lu," +" splitting at %p\n", +(unsigned)GC_gc_no,(void*)current_p, +(unsigned long)descr,(void*)limit); +} +#endif +limit+=sizeof(word)- ALIGNMENT; +break; +case GC_DS_BITMAP: +mark_stack_top--; +#ifdef ENABLE_TRACE +if ((word)GC_trace_addr>=(word)current_p +&&(word)GC_trace_addr < (word)(current_p ++WORDS_TO_BYTES(WORDSZ-2))){ +GC_log_printf("GC #%u:tracing from %p bitmap descr %lu\n", +(unsigned)GC_gc_no,(void*)current_p, +(unsigned long)descr); +} +#endif +descr&=~GC_DS_TAGS; +credit-=WORDS_TO_BYTES(WORDSZ/2); +while (descr!=0){ +if ((descr&SIGNB)!=0){ +current=*(word*)current_p; +FIXUP_POINTER(current); +if (current>=(word)least_ha&¤t < (word)greatest_ha){ +PREFETCH((ptr_t)current); +#ifdef ENABLE_TRACE +if (GC_trace_addr==current_p){ +GC_log_printf("GC #%u:considering(3)%p->%p\n", +(unsigned)GC_gc_no,(void*)current_p, +(void*)current); +} +#endif +PUSH_CONTENTS((ptr_t)current,mark_stack_top, +mark_stack_limit,current_p); +} +} +descr<<=1; +current_p+=sizeof(word); +} +continue; +case GC_DS_PROC: +mark_stack_top--; +#ifdef ENABLE_TRACE +if ((word)GC_trace_addr>=(word)current_p +&&GC_base(current_p)!=0 +&&GC_base(current_p)==GC_base(GC_trace_addr)){ +GC_log_printf("GC #%u:tracing from %p,proc descr %lu\n", +(unsigned)GC_gc_no,(void*)current_p, +(unsigned long)descr); +} +#endif +credit-=GC_PROC_BYTES; +mark_stack_top=(*PROC(descr))((word*)current_p,mark_stack_top, +mark_stack_limit,ENV(descr)); +continue; +case GC_DS_PER_OBJECT: +if ((signed_word)descr>=0){ +descr=*(word*)(current_p+descr - GC_DS_PER_OBJECT); +} else { +ptr_t type_descr=*(ptr_t*)current_p; +if (EXPECT(0==type_descr,FALSE)){ +mark_stack_top--; +continue; +} +descr=*(word*)(type_descr +- ((signed_word)descr+(GC_INDIR_PER_OBJ_BIAS +- GC_DS_PER_OBJECT))); +} +if (0==descr){ +mark_stack_top--; +continue; +} +goto retry; +} +} else { +mark_stack_top--; +#ifndef SMALL_CONFIG +if (descr < sizeof(word)) +continue; +#endif +#ifdef ENABLE_TRACE +if ((word)GC_trace_addr>=(word)current_p +&&(word)GC_trace_addr < (word)(current_p+descr)){ +GC_log_printf("GC #%u:small object;start %p,len %lu\n", +(unsigned)GC_gc_no,(void*)current_p, +(unsigned long)descr); +} +#endif +limit=current_p+(word)descr; +} +GC_ASSERT(!((word)current_p&(ALIGNMENT-1))); +credit-=limit - current_p; +limit-=sizeof(word); +{ +#define PREF_DIST 4 +#ifndef SMALL_CONFIG +word deferred; +for(;;){ +PREFETCH(limit - PREF_DIST*CACHE_LINE_SIZE); +GC_ASSERT((word)limit>=(word)current_p); +deferred=*(word*)limit; +FIXUP_POINTER(deferred); +limit-=ALIGNMENT; +if (deferred>=(word)least_ha&&deferred < (word)greatest_ha){ +PREFETCH((ptr_t)deferred); +break; +} +if ((word)current_p > (word)limit)goto next_object; +deferred=*(word*)limit; +FIXUP_POINTER(deferred); +limit-=ALIGNMENT; +if (deferred>=(word)least_ha&&deferred < (word)greatest_ha){ +PREFETCH((ptr_t)deferred); +break; +} +if ((word)current_p > (word)limit)goto next_object; +} +#endif +while ((word)current_p<=(word)limit){ +current=*(word*)current_p; +FIXUP_POINTER(current); +PREFETCH(current_p+PREF_DIST*CACHE_LINE_SIZE); +if (current>=(word)least_ha&¤t < (word)greatest_ha){ +PREFETCH((ptr_t)current); +#ifdef ENABLE_TRACE +if (GC_trace_addr==current_p){ +GC_log_printf("GC #%u:considering(1)%p->%p\n", +(unsigned)GC_gc_no,(void*)current_p, +(void*)current); +} +#endif +PUSH_CONTENTS((ptr_t)current,mark_stack_top, +mark_stack_limit,current_p); +} +current_p+=ALIGNMENT; +} +#ifndef SMALL_CONFIG +#ifdef ENABLE_TRACE +if (GC_trace_addr==current_p){ +GC_log_printf("GC #%u:considering(2)%p->%p\n", +(unsigned)GC_gc_no,(void*)current_p, +(void*)deferred); +} +#endif +PUSH_CONTENTS((ptr_t)deferred,mark_stack_top, +mark_stack_limit,current_p); +next_object:; +#endif +} +} +return mark_stack_top; +} +#ifdef PARALLEL_MARK +STATIC GC_bool GC_help_wanted=FALSE; +STATIC unsigned GC_helper_count=0; +STATIC unsigned GC_active_count=0; +GC_INNER word GC_mark_no=0; +#ifdef LINT2 +#define LOCAL_MARK_STACK_SIZE (HBLKSIZE/8) +#else +#define LOCAL_MARK_STACK_SIZE HBLKSIZE +#endif +GC_INNER void GC_wait_for_markers_init(void) +{ +signed_word count; +if (GC_markers_m1==0) +return; +#ifndef CAN_HANDLE_FORK +GC_ASSERT(NULL==GC_main_local_mark_stack); +#else +if (NULL==GC_main_local_mark_stack) +#endif +{ +size_t bytes_to_get= +ROUNDUP_PAGESIZE_IF_MMAP(LOCAL_MARK_STACK_SIZE*sizeof(mse)); +GC_ASSERT(GC_page_size!=0); +GC_main_local_mark_stack=(mse*)GET_MEM(bytes_to_get); +if (NULL==GC_main_local_mark_stack) +ABORT("Insufficient memory for main local_mark_stack"); +GC_add_to_our_memory((ptr_t)GC_main_local_mark_stack,bytes_to_get); +} +GC_acquire_mark_lock(); +GC_fl_builder_count+=GC_markers_m1; +count=GC_fl_builder_count; +GC_release_mark_lock(); +if (count!=0){ +GC_ASSERT(count > 0); +GC_wait_for_reclaim(); +} +} +STATIC mse*GC_steal_mark_stack(mse*low,mse*high,mse*local, +unsigned max,mse**next) +{ +mse*p; +mse*top=local - 1; +unsigned i=0; +GC_ASSERT((word)high>=(word)(low - 1) +&&(word)(high - low+1)<=GC_mark_stack_size); +for (p=low;(word)p<=(word)high&&i<=max;++p){ +word descr=(word)AO_load(&p->mse_descr.ao); +if (descr!=0){ +AO_store_release_write(&p->mse_descr.ao,0); +++top; +top->mse_descr.w=descr; +top->mse_start=p->mse_start; +GC_ASSERT((descr&GC_DS_TAGS)!=GC_DS_LENGTH +||descr < (word)GC_greatest_plausible_heap_addr +- (word)GC_least_plausible_heap_addr +||(word)(p->mse_start+descr) +<=(word)GC_least_plausible_heap_addr +||(word)p->mse_start +>=(word)GC_greatest_plausible_heap_addr); +++i; +if ((descr&GC_DS_TAGS)==GC_DS_LENGTH)i+=(int)(descr>>8); +} +} +*next=p; +return top; +} +STATIC void GC_return_mark_stack(mse*low,mse*high) +{ +mse*my_top; +mse*my_start; +size_t stack_size; +if ((word)high < (word)low)return; +stack_size=high - low+1; +GC_acquire_mark_lock(); +my_top=GC_mark_stack_top; +my_start=my_top+1; +if ((word)(my_start - GC_mark_stack+stack_size) +> (word)GC_mark_stack_size){ +GC_COND_LOG_PRINTF("No room to copy back mark stack\n"); +GC_mark_state=MS_INVALID; +GC_mark_stack_too_small=TRUE; +} else { +BCOPY(low,my_start,stack_size*sizeof(mse)); +GC_ASSERT((mse*)AO_load((volatile AO_t*)(&GC_mark_stack_top)) +==my_top); +AO_store_release_write((volatile AO_t*)(&GC_mark_stack_top), +(AO_t)(my_top+stack_size)); +} +GC_release_mark_lock(); +GC_notify_all_marker(); +} +#ifndef N_LOCAL_ITERS +#define N_LOCAL_ITERS 1 +#endif +static GC_bool has_inactive_helpers(void) +{ +GC_bool res; +GC_acquire_mark_lock(); +res=GC_active_count < GC_helper_count; +GC_release_mark_lock(); +return res; +} +STATIC void GC_do_local_mark(mse*local_mark_stack,mse*local_top) +{ +unsigned n; +for (;;){ +for (n=0;n < N_LOCAL_ITERS;++n){ +local_top=GC_mark_from(local_top,local_mark_stack, +local_mark_stack+LOCAL_MARK_STACK_SIZE); +if ((word)local_top < (word)local_mark_stack)return; +if ((word)(local_top - local_mark_stack) +>=LOCAL_MARK_STACK_SIZE/2){ +GC_return_mark_stack(local_mark_stack,local_top); +return; +} +} +if ((word)AO_load((volatile AO_t*)&GC_mark_stack_top) +< (word)AO_load(&GC_first_nonempty) +&&(word)local_top > (word)(local_mark_stack+1) +&&has_inactive_helpers()){ +mse*new_bottom=local_mark_stack ++(local_top - local_mark_stack)/2; +GC_ASSERT((word)new_bottom > (word)local_mark_stack +&&(word)new_bottom < (word)local_top); +GC_return_mark_stack(local_mark_stack,new_bottom - 1); +memmove(local_mark_stack,new_bottom, +(local_top - new_bottom+1)*sizeof(mse)); +local_top-=(new_bottom - local_mark_stack); +} +} +} +#ifndef ENTRIES_TO_GET +#define ENTRIES_TO_GET 5 +#endif +STATIC void GC_mark_local(mse*local_mark_stack,int id) +{ +mse*my_first_nonempty; +GC_active_count++; +my_first_nonempty=(mse*)AO_load(&GC_first_nonempty); +GC_ASSERT((word)GC_mark_stack<=(word)my_first_nonempty); +GC_ASSERT((word)my_first_nonempty +<=(word)AO_load((volatile AO_t*)&GC_mark_stack_top)+sizeof(mse)); +GC_VERBOSE_LOG_PRINTF("Starting mark helper %d\n",id); +GC_release_mark_lock(); +for (;;){ +size_t n_on_stack; +unsigned n_to_get; +mse*my_top; +mse*local_top; +mse*global_first_nonempty=(mse*)AO_load(&GC_first_nonempty); +GC_ASSERT((word)my_first_nonempty>=(word)GC_mark_stack&& +(word)my_first_nonempty<= +(word)AO_load((volatile AO_t*)&GC_mark_stack_top) ++sizeof(mse)); +GC_ASSERT((word)global_first_nonempty>=(word)GC_mark_stack); +if ((word)my_first_nonempty < (word)global_first_nonempty){ +my_first_nonempty=global_first_nonempty; +} else if ((word)global_first_nonempty < (word)my_first_nonempty){ +(void)AO_compare_and_swap(&GC_first_nonempty, +(AO_t)global_first_nonempty, +(AO_t)my_first_nonempty); +} +my_top=(mse*)AO_load_acquire((volatile AO_t*)(&GC_mark_stack_top)); +if ((word)my_top < (word)my_first_nonempty){ +GC_acquire_mark_lock(); +my_top=GC_mark_stack_top; +n_on_stack=my_top - my_first_nonempty+1; +if (0==n_on_stack){ +GC_active_count--; +GC_ASSERT(GC_active_count<=GC_helper_count); +if (0==GC_active_count)GC_notify_all_marker(); +while (GC_active_count > 0 +&&(word)AO_load(&GC_first_nonempty) +> (word)GC_mark_stack_top){ +GC_wait_marker(); +} +if (GC_active_count==0 +&&(word)AO_load(&GC_first_nonempty) +> (word)GC_mark_stack_top){ +GC_bool need_to_notify=FALSE; +GC_helper_count--; +if (0==GC_helper_count)need_to_notify=TRUE; +GC_VERBOSE_LOG_PRINTF("Finished mark helper %d\n",id); +if (need_to_notify)GC_notify_all_marker(); +return; +} +GC_active_count++; +GC_ASSERT(GC_active_count > 0); +GC_release_mark_lock(); +continue; +} else { +GC_release_mark_lock(); +} +} else { +n_on_stack=my_top - my_first_nonempty+1; +} +n_to_get=ENTRIES_TO_GET; +if (n_on_stack < 2*ENTRIES_TO_GET)n_to_get=1; +local_top=GC_steal_mark_stack(my_first_nonempty,my_top, +local_mark_stack,n_to_get, +&my_first_nonempty); +GC_ASSERT((word)my_first_nonempty>=(word)GC_mark_stack&& +(word)my_first_nonempty<= +(word)AO_load((volatile AO_t*)&GC_mark_stack_top) ++sizeof(mse)); +GC_do_local_mark(local_mark_stack,local_top); +} +} +STATIC void GC_do_parallel_mark(void) +{ +GC_acquire_mark_lock(); +GC_ASSERT(I_HOLD_LOCK()); +if (GC_help_wanted||GC_active_count!=0||GC_helper_count!=0) +ABORT("Tried to start parallel mark in bad state"); +GC_VERBOSE_LOG_PRINTF("Starting marking for mark phase number %lu\n", +(unsigned long)GC_mark_no); +GC_first_nonempty=(AO_t)GC_mark_stack; +GC_active_count=0; +GC_helper_count=1; +GC_help_wanted=TRUE; +GC_notify_all_marker(); +GC_mark_local(GC_main_local_mark_stack,0); +GC_help_wanted=FALSE; +while (GC_helper_count > 0){ +GC_wait_marker(); +} +GC_VERBOSE_LOG_PRINTF("Finished marking for mark phase number %lu\n", +(unsigned long)GC_mark_no); +GC_mark_no++; +GC_release_mark_lock(); +GC_notify_all_marker(); +} +GC_INNER void GC_help_marker(word my_mark_no) +{ +#define my_id my_id_mse.mse_descr.w +mse my_id_mse; +mse local_mark_stack[LOCAL_MARK_STACK_SIZE]; +GC_ASSERT(GC_parallel); +while (GC_mark_no < my_mark_no +||(!GC_help_wanted&&GC_mark_no==my_mark_no)){ +GC_wait_marker(); +} +my_id=GC_helper_count; +if (GC_mark_no!=my_mark_no||my_id > (unsigned)GC_markers_m1){ +return; +} +GC_helper_count=(unsigned)my_id+1; +GC_mark_local(local_mark_stack,(int)my_id); +#undef my_id +} +#endif +GC_INNER void GC_scratch_recycle_inner(void*ptr,size_t bytes) +{ +if (ptr!=NULL){ +size_t page_offset=(word)ptr&(GC_page_size - 1); +size_t displ=0; +size_t recycled_bytes; +GC_ASSERT(bytes!=0); +GC_ASSERT(GC_page_size!=0); +if (page_offset!=0) +displ=GC_page_size - page_offset; +recycled_bytes=(bytes - displ)&~(GC_page_size - 1); +GC_COND_LOG_PRINTF("Recycle %lu scratch-allocated bytes at %p\n", +(unsigned long)recycled_bytes,ptr); +if (recycled_bytes > 0) +GC_add_to_heap((struct hblk*)((word)ptr+displ),recycled_bytes); +} +} +static void alloc_mark_stack(size_t n) +{ +mse*new_stack=(mse*)GC_scratch_alloc(n*sizeof(struct GC_ms_entry)); +#ifdef GWW_VDB +static GC_bool GC_incremental_at_stack_alloc=FALSE; +GC_bool recycle_old=!GC_auto_incremental +||GC_incremental_at_stack_alloc; +GC_incremental_at_stack_alloc=GC_auto_incremental; +#else +#define recycle_old TRUE +#endif +GC_mark_stack_too_small=FALSE; +if (GC_mark_stack!=NULL){ +if (new_stack!=0){ +if (recycle_old){ +GC_scratch_recycle_inner(GC_mark_stack, +GC_mark_stack_size*sizeof(struct GC_ms_entry)); +} +GC_mark_stack=new_stack; +GC_mark_stack_size=n; +GC_mark_stack_limit=new_stack+n; +GC_COND_LOG_PRINTF("Grew mark stack to %lu frames\n", +(unsigned long)GC_mark_stack_size); +} else { +WARN("Failed to grow mark stack to %" WARN_PRIdPTR " frames\n",n); +} +} else if (NULL==new_stack){ +GC_err_printf("No space for mark stack\n"); +EXIT(); +} else { +GC_mark_stack=new_stack; +GC_mark_stack_size=n; +GC_mark_stack_limit=new_stack+n; +} +GC_mark_stack_top=GC_mark_stack-1; +} +GC_INNER void GC_mark_init(void) +{ +alloc_mark_stack(INITIAL_MARK_STACK_SIZE); +} +GC_API void GC_CALL GC_push_all(void*bottom,void*top) +{ +word length; +bottom=(void*)(((word)bottom+ALIGNMENT-1)&~(ALIGNMENT-1)); +top=(void*)((word)top&~(ALIGNMENT-1)); +if ((word)bottom>=(word)top)return; +GC_mark_stack_top++; +if ((word)GC_mark_stack_top>=(word)GC_mark_stack_limit){ +ABORT("Unexpected mark stack overflow"); +} +length=(word)top - (word)bottom; +#if GC_DS_TAGS > ALIGNMENT - 1 +length+=GC_DS_TAGS; +length&=~GC_DS_TAGS; +#endif +GC_mark_stack_top->mse_start=(ptr_t)bottom; +GC_mark_stack_top->mse_descr.w=length; +} +#ifndef GC_DISABLE_INCREMENTAL +STATIC void GC_push_selected(ptr_t bottom,ptr_t top, +GC_bool (*dirty_fn)(struct hblk*)) +{ +struct hblk*h; +bottom=(ptr_t)(((word)bottom+ALIGNMENT-1)&~(ALIGNMENT-1)); +top=(ptr_t)(((word)top)&~(ALIGNMENT-1)); +if ((word)bottom>=(word)top)return; +h=HBLKPTR(bottom+HBLKSIZE); +if ((word)top<=(word)h){ +if ((*dirty_fn)(h-1)){ +GC_push_all(bottom,top); +} +return; +} +if ((*dirty_fn)(h-1)){ +if ((word)(GC_mark_stack_top - GC_mark_stack) +> 3*GC_mark_stack_size/4){ +GC_push_all(bottom,top); +return; +} +GC_push_all(bottom,h); +} +while ((word)(h+1)<=(word)top){ +if ((*dirty_fn)(h)){ +if ((word)(GC_mark_stack_top - GC_mark_stack) +> 3*GC_mark_stack_size/4){ +GC_push_all(h,top); +return; +} else { +GC_push_all(h,h+1); +} +} +h++; +} +if ((ptr_t)h!=top&&(*dirty_fn)(h)){ +GC_push_all(h,top); +} +} +GC_API void GC_CALL GC_push_conditional(void*bottom,void*top,int all) +{ +if (!all){ +GC_push_selected((ptr_t)bottom,(ptr_t)top,GC_page_was_dirty); +} else { +#ifdef PROC_VDB +if (GC_auto_incremental){ +GC_push_selected((ptr_t)bottom,(ptr_t)top,GC_page_was_ever_dirty); +} else +#endif +{ +GC_push_all(bottom,top); +} +} +} +#else +GC_API void GC_CALL GC_push_conditional(void*bottom,void*top, +int all GC_ATTR_UNUSED) +{ +GC_push_all(bottom,top); +} +#endif +#if defined(AMIGA)||defined(MACOS)||defined(GC_DARWIN_THREADS) +void GC_push_one(word p) +{ +GC_PUSH_ONE_STACK(p,MARKED_FROM_REGISTER); +} +#endif +#ifdef GC_WIN32_THREADS +GC_INNER void GC_push_many_regs(const word*regs,unsigned count) +{ +unsigned i; +for (i=0;i < count;i++) +GC_PUSH_ONE_STACK(regs[i],MARKED_FROM_REGISTER); +} +#endif +GC_API struct GC_ms_entry*GC_CALL GC_mark_and_push(void*obj, +mse*mark_stack_ptr, +mse*mark_stack_limit, +void**src GC_ATTR_UNUSED) +{ +hdr*hhdr; +PREFETCH(obj); +GET_HDR(obj,hhdr); +if ((EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE) +&&(!GC_all_interior_pointers +||NULL==(hhdr=GC_find_header((ptr_t)GC_base(obj))))) +||EXPECT(HBLK_IS_FREE(hhdr),FALSE)){ +GC_ADD_TO_BLACK_LIST_NORMAL(obj,(ptr_t)src); +return mark_stack_ptr; +} +return GC_push_contents_hdr((ptr_t)obj,mark_stack_ptr,mark_stack_limit, +(ptr_t)src,hhdr,TRUE); +} +#if defined(PRINT_BLACK_LIST)||defined(KEEP_BACK_PTRS) +GC_INNER void GC_mark_and_push_stack(ptr_t p,ptr_t source) +#else +GC_INNER void GC_mark_and_push_stack(ptr_t p) +#define source ((ptr_t)0) +#endif +{ +hdr*hhdr; +ptr_t r=p; +PREFETCH(p); +GET_HDR(p,hhdr); +if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)){ +if (NULL==hhdr +||(r=(ptr_t)GC_base(p))==NULL +||(hhdr=HDR(r))==NULL){ +GC_ADD_TO_BLACK_LIST_STACK(p,source); +return; +} +} +if (EXPECT(HBLK_IS_FREE(hhdr),FALSE)){ +GC_ADD_TO_BLACK_LIST_NORMAL(p,source); +return; +} +#ifdef THREADS +GC_dirty(p); +#endif +GC_mark_stack_top=GC_push_contents_hdr(r,GC_mark_stack_top, +GC_mark_stack_limit, +source,hhdr,FALSE); +} +#undef source +#ifdef TRACE_BUF +#ifndef TRACE_ENTRIES +#define TRACE_ENTRIES 1000 +#endif +struct trace_entry { +char*kind; +word gc_no; +word bytes_allocd; +word arg1; +word arg2; +} GC_trace_buf[TRACE_ENTRIES]={ { NULL,0,0,0,0 } }; +void GC_add_trace_entry(char*kind,word arg1,word arg2) +{ +GC_trace_buf[GC_trace_buf_ptr].kind=kind; +GC_trace_buf[GC_trace_buf_ptr].gc_no=GC_gc_no; +GC_trace_buf[GC_trace_buf_ptr].bytes_allocd=GC_bytes_allocd; +GC_trace_buf[GC_trace_buf_ptr].arg1=arg1^0x80000000; +GC_trace_buf[GC_trace_buf_ptr].arg2=arg2^0x80000000; +GC_trace_buf_ptr++; +if (GC_trace_buf_ptr>=TRACE_ENTRIES)GC_trace_buf_ptr=0; +} +GC_API void GC_CALL GC_print_trace_inner(word gc_no) +{ +int i; +for (i=GC_trace_buf_ptr-1;i!=GC_trace_buf_ptr;i--){ +struct trace_entry*p; +if (i < 0)i=TRACE_ENTRIES-1; +p=GC_trace_buf+i; +if (p->gc_no < gc_no||p->kind==0){ +return; +} +GC_printf("Trace:%s (gc:%u,bytes:%lu)0x%lX,0x%lX\n", +p->kind,(unsigned)p->gc_no, +(unsigned long)p->bytes_allocd, +(long)p->arg1^0x80000000L,(long)p->arg2^0x80000000L); +} +GC_printf("Trace incomplete\n"); +} +GC_API void GC_CALL GC_print_trace(word gc_no) +{ +DCL_LOCK_STATE; +LOCK(); +GC_print_trace_inner(gc_no); +UNLOCK(); +} +#endif +GC_ATTR_NO_SANITIZE_ADDR GC_ATTR_NO_SANITIZE_MEMORY GC_ATTR_NO_SANITIZE_THREAD +GC_API void GC_CALL GC_push_all_eager(void*bottom,void*top) +{ +word*b=(word*)(((word)bottom+ALIGNMENT-1)&~(ALIGNMENT-1)); +word*t=(word*)(((word)top)&~(ALIGNMENT-1)); +REGISTER word*p; +REGISTER word*lim; +REGISTER ptr_t greatest_ha=(ptr_t)GC_greatest_plausible_heap_addr; +REGISTER ptr_t least_ha=(ptr_t)GC_least_plausible_heap_addr; +#define GC_greatest_plausible_heap_addr greatest_ha +#define GC_least_plausible_heap_addr least_ha +if (top==0)return; +lim=t - 1; +for (p=b;(word)p<=(word)lim; +p=(word*)(((ptr_t)p)+ALIGNMENT)){ +REGISTER word q=*p; +GC_PUSH_ONE_STACK(q,p); +} +#undef GC_greatest_plausible_heap_addr +#undef GC_least_plausible_heap_addr +} +GC_INNER void GC_push_all_stack(ptr_t bottom,ptr_t top) +{ +#ifndef NEED_FIXUP_POINTER +if (GC_all_interior_pointers +#if defined(THREADS)&&defined(MPROTECT_VDB) +&&!GC_auto_incremental +#endif +&&(word)GC_mark_stack_top +< (word)(GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/8)){ +GC_push_all(bottom,top); +} else +#endif +{ +GC_push_all_eager(bottom,top); +} +} +#if defined(WRAP_MARK_SOME)&&defined(PARALLEL_MARK) +GC_ATTR_NO_SANITIZE_ADDR GC_ATTR_NO_SANITIZE_MEMORY +GC_ATTR_NO_SANITIZE_THREAD +GC_INNER void GC_push_conditional_eager(void*bottom,void*top, +GC_bool all) +{ +word*b=(word*)(((word)bottom+ALIGNMENT-1)&~(ALIGNMENT-1)); +word*t=(word*)(((word)top)&~(ALIGNMENT-1)); +REGISTER word*p; +REGISTER word*lim; +REGISTER ptr_t greatest_ha=(ptr_t)GC_greatest_plausible_heap_addr; +REGISTER ptr_t least_ha=(ptr_t)GC_least_plausible_heap_addr; +#define GC_greatest_plausible_heap_addr greatest_ha +#define GC_least_plausible_heap_addr least_ha +if (top==NULL) +return; +(void)all; +lim=t - 1; +for (p=b;(word)p<=(word)lim;p=(word*)((ptr_t)p+ALIGNMENT)){ +REGISTER word q=*p; +GC_PUSH_ONE_HEAP(q,p,GC_mark_stack_top); +} +#undef GC_greatest_plausible_heap_addr +#undef GC_least_plausible_heap_addr +} +#endif +#if!defined(SMALL_CONFIG)&&!defined(USE_MARK_BYTES)&&defined(MARK_BIT_PER_GRANULE) +#if GC_GRANULE_WORDS==1 +#define USE_PUSH_MARKED_ACCELERATORS +#define PUSH_GRANULE(q)do { word qcontents=(q)[0];GC_PUSH_ONE_HEAP(qcontents,q,GC_mark_stack_top);} while (0) +#elif GC_GRANULE_WORDS==2 +#define USE_PUSH_MARKED_ACCELERATORS +#define PUSH_GRANULE(q)do { word qcontents=(q)[0];GC_PUSH_ONE_HEAP(qcontents,q,GC_mark_stack_top);qcontents=(q)[1];GC_PUSH_ONE_HEAP(qcontents,(q)+1,GC_mark_stack_top);} while (0) +#elif GC_GRANULE_WORDS==4 +#define USE_PUSH_MARKED_ACCELERATORS +#define PUSH_GRANULE(q)do { word qcontents=(q)[0];GC_PUSH_ONE_HEAP(qcontents,q,GC_mark_stack_top);qcontents=(q)[1];GC_PUSH_ONE_HEAP(qcontents,(q)+1,GC_mark_stack_top);qcontents=(q)[2];GC_PUSH_ONE_HEAP(qcontents,(q)+2,GC_mark_stack_top);qcontents=(q)[3];GC_PUSH_ONE_HEAP(qcontents,(q)+3,GC_mark_stack_top);} while (0) +#endif +#endif +#ifdef USE_PUSH_MARKED_ACCELERATORS +STATIC void GC_push_marked1(struct hblk*h,hdr*hhdr) +{ +word*mark_word_addr=&(hhdr->hb_marks[0]); +word*p; +word*plim; +ptr_t greatest_ha=(ptr_t)GC_greatest_plausible_heap_addr; +ptr_t least_ha=(ptr_t)GC_least_plausible_heap_addr; +mse*mark_stack_top=GC_mark_stack_top; +mse*mark_stack_limit=GC_mark_stack_limit; +#undef GC_mark_stack_top +#undef GC_mark_stack_limit +#define GC_mark_stack_top mark_stack_top +#define GC_mark_stack_limit mark_stack_limit +#define GC_greatest_plausible_heap_addr greatest_ha +#define GC_least_plausible_heap_addr least_ha +p=(word*)(h->hb_body); +plim=(word*)(((word)h)+HBLKSIZE); +while ((word)p < (word)plim){ +word mark_word=*mark_word_addr++; +word*q=p; +while(mark_word!=0){ +if (mark_word&1){ +PUSH_GRANULE(q); +} +q+=GC_GRANULE_WORDS; +mark_word>>=1; +} +p+=WORDSZ*GC_GRANULE_WORDS; +} +#undef GC_greatest_plausible_heap_addr +#undef GC_least_plausible_heap_addr +#undef GC_mark_stack_top +#undef GC_mark_stack_limit +#define GC_mark_stack_limit GC_arrays._mark_stack_limit +#define GC_mark_stack_top GC_arrays._mark_stack_top +GC_mark_stack_top=mark_stack_top; +} +#ifndef UNALIGNED_PTRS +STATIC void GC_push_marked2(struct hblk*h,hdr*hhdr) +{ +word*mark_word_addr=&(hhdr->hb_marks[0]); +word*p; +word*plim; +ptr_t greatest_ha=(ptr_t)GC_greatest_plausible_heap_addr; +ptr_t least_ha=(ptr_t)GC_least_plausible_heap_addr; +mse*mark_stack_top=GC_mark_stack_top; +mse*mark_stack_limit=GC_mark_stack_limit; +#undef GC_mark_stack_top +#undef GC_mark_stack_limit +#define GC_mark_stack_top mark_stack_top +#define GC_mark_stack_limit mark_stack_limit +#define GC_greatest_plausible_heap_addr greatest_ha +#define GC_least_plausible_heap_addr least_ha +p=(word*)(h->hb_body); +plim=(word*)(((word)h)+HBLKSIZE); +while ((word)p < (word)plim){ +word mark_word=*mark_word_addr++; +word*q=p; +while(mark_word!=0){ +if (mark_word&1){ +PUSH_GRANULE(q); +PUSH_GRANULE(q+GC_GRANULE_WORDS); +} +q+=2*GC_GRANULE_WORDS; +mark_word>>=2; +} +p+=WORDSZ*GC_GRANULE_WORDS; +} +#undef GC_greatest_plausible_heap_addr +#undef GC_least_plausible_heap_addr +#undef GC_mark_stack_top +#undef GC_mark_stack_limit +#define GC_mark_stack_limit GC_arrays._mark_stack_limit +#define GC_mark_stack_top GC_arrays._mark_stack_top +GC_mark_stack_top=mark_stack_top; +} +#if GC_GRANULE_WORDS < 4 +STATIC void GC_push_marked4(struct hblk*h,hdr*hhdr) +{ +word*mark_word_addr=&(hhdr->hb_marks[0]); +word*p; +word*plim; +ptr_t greatest_ha=(ptr_t)GC_greatest_plausible_heap_addr; +ptr_t least_ha=(ptr_t)GC_least_plausible_heap_addr; +mse*mark_stack_top=GC_mark_stack_top; +mse*mark_stack_limit=GC_mark_stack_limit; +#undef GC_mark_stack_top +#undef GC_mark_stack_limit +#define GC_mark_stack_top mark_stack_top +#define GC_mark_stack_limit mark_stack_limit +#define GC_greatest_plausible_heap_addr greatest_ha +#define GC_least_plausible_heap_addr least_ha +p=(word*)(h->hb_body); +plim=(word*)(((word)h)+HBLKSIZE); +while ((word)p < (word)plim){ +word mark_word=*mark_word_addr++; +word*q=p; +while(mark_word!=0){ +if (mark_word&1){ +PUSH_GRANULE(q); +PUSH_GRANULE(q+GC_GRANULE_WORDS); +PUSH_GRANULE(q+2*GC_GRANULE_WORDS); +PUSH_GRANULE(q+3*GC_GRANULE_WORDS); +} +q+=4*GC_GRANULE_WORDS; +mark_word>>=4; +} +p+=WORDSZ*GC_GRANULE_WORDS; +} +#undef GC_greatest_plausible_heap_addr +#undef GC_least_plausible_heap_addr +#undef GC_mark_stack_top +#undef GC_mark_stack_limit +#define GC_mark_stack_limit GC_arrays._mark_stack_limit +#define GC_mark_stack_top GC_arrays._mark_stack_top +GC_mark_stack_top=mark_stack_top; +} +#endif +#endif +#endif +STATIC void GC_push_marked(struct hblk*h,hdr*hhdr) +{ +word sz=hhdr->hb_sz; +word descr=hhdr->hb_descr; +ptr_t p; +word bit_no; +ptr_t lim; +mse*GC_mark_stack_top_reg; +mse*mark_stack_limit=GC_mark_stack_limit; +if (( GC_DS_LENGTH)==descr)return; +if (GC_block_empty(hhdr))return; +#if!defined(GC_DISABLE_INCREMENTAL) +GC_n_rescuing_pages++; +#endif +GC_objects_are_marked=TRUE; +if (sz > MAXOBJBYTES){ +lim=h->hb_body; +} else { +lim=(ptr_t)((word)(h+1)->hb_body - sz); +} +switch(BYTES_TO_GRANULES(sz)){ +#if defined(USE_PUSH_MARKED_ACCELERATORS) +case 1: +GC_push_marked1(h,hhdr); +break; +#if!defined(UNALIGNED_PTRS) +case 2: +GC_push_marked2(h,hhdr); +break; +#if GC_GRANULE_WORDS < 4 +case 4: +GC_push_marked4(h,hhdr); +break; +#endif +#endif +#else +case 1: +#endif +default: +GC_mark_stack_top_reg=GC_mark_stack_top; +for (p=h->hb_body,bit_no=0;(word)p<=(word)lim; +p+=sz,bit_no+=MARK_BIT_OFFSET(sz)){ +if (mark_bit_from_hdr(hhdr,bit_no)){ +GC_mark_stack_top_reg=GC_push_obj(p,hhdr,GC_mark_stack_top_reg, +mark_stack_limit); +} +} +GC_mark_stack_top=GC_mark_stack_top_reg; +} +} +#ifdef ENABLE_DISCLAIM +STATIC void GC_push_unconditionally(struct hblk*h,hdr*hhdr) +{ +word sz=hhdr->hb_sz; +word descr=hhdr->hb_descr; +ptr_t p; +ptr_t lim; +mse*GC_mark_stack_top_reg; +mse*mark_stack_limit=GC_mark_stack_limit; +if (( GC_DS_LENGTH)==descr) +return; +#if!defined(GC_DISABLE_INCREMENTAL) +GC_n_rescuing_pages++; +#endif +GC_objects_are_marked=TRUE; +if (sz > MAXOBJBYTES) +lim=h->hb_body; +else +lim=(ptr_t)((word)(h+1)->hb_body - sz); +GC_mark_stack_top_reg=GC_mark_stack_top; +for (p=h->hb_body;(word)p<=(word)lim;p+=sz) +if ((*(word*)p&0x3)!=0) +GC_mark_stack_top_reg=GC_push_obj(p,hhdr,GC_mark_stack_top_reg, +mark_stack_limit); +GC_mark_stack_top=GC_mark_stack_top_reg; +} +#endif +#ifndef GC_DISABLE_INCREMENTAL +STATIC GC_bool GC_block_was_dirty(struct hblk*h,hdr*hhdr) +{ +word sz=hhdr->hb_sz; +if (sz<=MAXOBJBYTES){ +return(GC_page_was_dirty(h)); +} else { +ptr_t p=(ptr_t)h; +while ((word)p < (word)h+sz){ +if (GC_page_was_dirty((struct hblk*)p))return(TRUE); +p+=HBLKSIZE; +} +return(FALSE); +} +} +#endif +STATIC struct hblk*GC_push_next_marked(struct hblk*h) +{ +hdr*hhdr=HDR(h); +if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr)||HBLK_IS_FREE(hhdr),FALSE)){ +h=GC_next_block(h,FALSE); +if (NULL==h)return NULL; +hhdr=GC_find_header((ptr_t)h); +} else { +#ifdef LINT2 +if (NULL==h)ABORT("Bad HDR()definition"); +#endif +} +GC_push_marked(h,hhdr); +return(h+OBJ_SZ_TO_BLOCKS(hhdr->hb_sz)); +} +#ifndef GC_DISABLE_INCREMENTAL +STATIC struct hblk*GC_push_next_marked_dirty(struct hblk*h) +{ +hdr*hhdr=HDR(h); +if (!GC_incremental)ABORT("Dirty bits not set up"); +for (;;){ +if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr) +||HBLK_IS_FREE(hhdr),FALSE)){ +h=GC_next_block(h,FALSE); +if (NULL==h)return NULL; +hhdr=GC_find_header((ptr_t)h); +} else { +#ifdef LINT2 +if (NULL==h)ABORT("Bad HDR()definition"); +#endif +} +if (GC_block_was_dirty(h,hhdr)) +break; +h+=OBJ_SZ_TO_BLOCKS(hhdr->hb_sz); +hhdr=HDR(h); +} +#ifdef ENABLE_DISCLAIM +if ((hhdr->hb_flags&MARK_UNCONDITIONALLY)!=0){ +GC_push_unconditionally(h,hhdr); +} else +#endif +{ +GC_push_marked(h,hhdr); +} +return(h+OBJ_SZ_TO_BLOCKS(hhdr->hb_sz)); +} +#endif +STATIC struct hblk*GC_push_next_marked_uncollectable(struct hblk*h) +{ +hdr*hhdr=HDR(h); +for (;;){ +if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr) +||HBLK_IS_FREE(hhdr),FALSE)){ +h=GC_next_block(h,FALSE); +if (NULL==h)return NULL; +hhdr=GC_find_header((ptr_t)h); +} else { +#ifdef LINT2 +if (NULL==h)ABORT("Bad HDR()definition"); +#endif +} +if (hhdr->hb_obj_kind==UNCOLLECTABLE){ +GC_push_marked(h,hhdr); +break; +} +#ifdef ENABLE_DISCLAIM +if ((hhdr->hb_flags&MARK_UNCONDITIONALLY)!=0){ +GC_push_unconditionally(h,hhdr); +break; +} +#endif +h+=OBJ_SZ_TO_BLOCKS(hhdr->hb_sz); +hhdr=HDR(h); +} +return(h+OBJ_SZ_TO_BLOCKS(hhdr->hb_sz)); +} +#include +int GC_no_dls=0; +#if!defined(NO_DEBUGGING)||defined(GC_ASSERTIONS) +GC_INNER word GC_compute_root_size(void) +{ +int i; +word size=0; +for (i=0;i < n_root_sets;i++){ +size+=GC_static_roots[i].r_end - GC_static_roots[i].r_start; +} +return size; +} +#endif +#if!defined(NO_DEBUGGING) +void GC_print_static_roots(void) +{ +int i; +word size; +for (i=0;i < n_root_sets;i++){ +GC_printf("From %p to %p%s\n", +(void*)GC_static_roots[i].r_start, +(void*)GC_static_roots[i].r_end, +GC_static_roots[i].r_tmp?" (temporary)":""); +} +GC_printf("GC_root_size:%lu\n",(unsigned long)GC_root_size); +if ((size=GC_compute_root_size())!=GC_root_size) +GC_err_printf("GC_root_size incorrect!!Should be:%lu\n", +(unsigned long)size); +} +#endif +#ifndef THREADS +GC_INNER GC_bool GC_is_static_root(void*p) +{ +static int last_root_set=MAX_ROOT_SETS; +int i; +if (last_root_set < n_root_sets +&&(word)p>=(word)GC_static_roots[last_root_set].r_start +&&(word)p < (word)GC_static_roots[last_root_set].r_end) +return(TRUE); +for (i=0;i < n_root_sets;i++){ +if ((word)p>=(word)GC_static_roots[i].r_start +&&(word)p < (word)GC_static_roots[i].r_end){ +last_root_set=i; +return(TRUE); +} +} +return(FALSE); +} +#endif +#if!defined(MSWIN32)&&!defined(MSWINCE)&&!defined(CYGWIN32) +GC_INLINE int rt_hash(ptr_t addr) +{ +word result=(word)addr; +#if CPP_WORDSZ > 8*LOG_RT_SIZE +result^=result>>8*LOG_RT_SIZE; +#endif +#if CPP_WORDSZ > 4*LOG_RT_SIZE +result^=result>>4*LOG_RT_SIZE; +#endif +result^=result>>2*LOG_RT_SIZE; +result^=result>>LOG_RT_SIZE; +result&=(RT_SIZE-1); +return(result); +} +GC_INNER void*GC_roots_present(ptr_t b) +{ +int h=rt_hash(b); +struct roots*p=GC_root_index[h]; +while (p!=0){ +if (p->r_start==(ptr_t)b)return(p); +p=p->r_next; +} +return NULL; +} +GC_INLINE void add_roots_to_index(struct roots*p) +{ +int h=rt_hash(p->r_start); +p->r_next=GC_root_index[h]; +GC_root_index[h]=p; +} +#endif +GC_INNER word GC_root_size=0; +GC_API void GC_CALL GC_add_roots(void*b,void*e) +{ +DCL_LOCK_STATE; +if (!EXPECT(GC_is_initialized,TRUE))GC_init(); +LOCK(); +GC_add_roots_inner((ptr_t)b,(ptr_t)e,FALSE); +UNLOCK(); +} +void GC_add_roots_inner(ptr_t b,ptr_t e,GC_bool tmp) +{ +GC_ASSERT((word)b<=(word)e); +b=(ptr_t)(((word)b+(sizeof(word)- 1))&~(word)(sizeof(word)- 1)); +e=(ptr_t)((word)e&~(word)(sizeof(word)- 1)); +if ((word)b>=(word)e)return; +#if defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32) +{ +int i; +struct roots*old=NULL; +for (i=0;i < n_root_sets;i++){ +old=GC_static_roots+i; +if ((word)b<=(word)old->r_end +&&(word)e>=(word)old->r_start){ +if ((word)b < (word)old->r_start){ +GC_root_size+=old->r_start - b; +old->r_start=b; +} +if ((word)e > (word)old->r_end){ +GC_root_size+=e - old->r_end; +old->r_end=e; +} +old->r_tmp&=tmp; +break; +} +} +if (i < n_root_sets){ +struct roots*other; +for (i++;i < n_root_sets;i++){ +other=GC_static_roots+i; +b=other->r_start; +e=other->r_end; +if ((word)b<=(word)old->r_end +&&(word)e>=(word)old->r_start){ +if ((word)b < (word)old->r_start){ +GC_root_size+=old->r_start - b; +old->r_start=b; +} +if ((word)e > (word)old->r_end){ +GC_root_size+=e - old->r_end; +old->r_end=e; +} +old->r_tmp&=other->r_tmp; +GC_root_size-=(other->r_end - other->r_start); +other->r_start=GC_static_roots[n_root_sets-1].r_start; +other->r_end=GC_static_roots[n_root_sets-1].r_end; +n_root_sets--; +} +} +return; +} +} +#else +{ +struct roots*old=(struct roots*)GC_roots_present(b); +if (old!=0){ +if ((word)e<=(word)old->r_end){ +old->r_tmp&=tmp; +return; +} +if (old->r_tmp==tmp||!tmp){ +GC_root_size+=e - old->r_end; +old->r_end=e; +old->r_tmp=tmp; +return; +} +b=old->r_end; +} +} +#endif +if (n_root_sets==MAX_ROOT_SETS){ +ABORT("Too many root sets"); +} +#ifdef DEBUG_ADD_DEL_ROOTS +GC_log_printf("Adding data root section %d:%p .. %p%s\n", +n_root_sets,(void*)b,(void*)e, +tmp?" (temporary)":""); +#endif +GC_static_roots[n_root_sets].r_start=(ptr_t)b; +GC_static_roots[n_root_sets].r_end=(ptr_t)e; +GC_static_roots[n_root_sets].r_tmp=tmp; +#if!defined(MSWIN32)&&!defined(MSWINCE)&&!defined(CYGWIN32) +GC_static_roots[n_root_sets].r_next=0; +add_roots_to_index(GC_static_roots+n_root_sets); +#endif +GC_root_size+=e - b; +n_root_sets++; +} +GC_API void GC_CALL GC_clear_roots(void) +{ +DCL_LOCK_STATE; +if (!EXPECT(GC_is_initialized,TRUE))GC_init(); +LOCK(); +#ifdef THREADS +GC_roots_were_cleared=TRUE; +#endif +n_root_sets=0; +GC_root_size=0; +#if!defined(MSWIN32)&&!defined(MSWINCE)&&!defined(CYGWIN32) +BZERO(GC_root_index,RT_SIZE*sizeof(void*)); +#endif +#ifdef DEBUG_ADD_DEL_ROOTS +GC_log_printf("Clear all data root sections\n"); +#endif +UNLOCK(); +} +STATIC void GC_remove_root_at_pos(int i) +{ +#ifdef DEBUG_ADD_DEL_ROOTS +GC_log_printf("Remove data root section at %d:%p .. %p%s\n", +i,(void*)GC_static_roots[i].r_start, +(void*)GC_static_roots[i].r_end, +GC_static_roots[i].r_tmp?" (temporary)":""); +#endif +GC_root_size-=(GC_static_roots[i].r_end - GC_static_roots[i].r_start); +GC_static_roots[i].r_start=GC_static_roots[n_root_sets-1].r_start; +GC_static_roots[i].r_end=GC_static_roots[n_root_sets-1].r_end; +GC_static_roots[i].r_tmp=GC_static_roots[n_root_sets-1].r_tmp; +n_root_sets--; +} +#if!defined(MSWIN32)&&!defined(MSWINCE)&&!defined(CYGWIN32) +STATIC void GC_rebuild_root_index(void) +{ +int i; +BZERO(GC_root_index,RT_SIZE*sizeof(void*)); +for (i=0;i < n_root_sets;i++) +add_roots_to_index(GC_static_roots+i); +} +#endif +#if defined(DYNAMIC_LOADING)||defined(MSWIN32)||defined(MSWINCE)||defined(PCR)||defined(CYGWIN32) +STATIC void GC_remove_tmp_roots(void) +{ +int i; +#if!defined(MSWIN32)&&!defined(MSWINCE)&&!defined(CYGWIN32) +int old_n_roots=n_root_sets; +#endif +for (i=0;i < n_root_sets;){ +if (GC_static_roots[i].r_tmp){ +GC_remove_root_at_pos(i); +} else { +i++; +} +} +#if!defined(MSWIN32)&&!defined(MSWINCE)&&!defined(CYGWIN32) +if (n_root_sets < old_n_roots) +GC_rebuild_root_index(); +#endif +} +#endif +#if!defined(MSWIN32)&&!defined(MSWINCE)&&!defined(CYGWIN32) +STATIC void GC_remove_roots_inner(ptr_t b,ptr_t e); +GC_API void GC_CALL GC_remove_roots(void*b,void*e) +{ +DCL_LOCK_STATE; +if ((((word)b+(sizeof(word)- 1))&~(word)(sizeof(word)- 1))>= +((word)e&~(word)(sizeof(word)- 1))) +return; +LOCK(); +GC_remove_roots_inner((ptr_t)b,(ptr_t)e); +UNLOCK(); +} +STATIC void GC_remove_roots_inner(ptr_t b,ptr_t e) +{ +int i; +GC_bool rebuild=FALSE; +for (i=0;i < n_root_sets;){ +if ((word)GC_static_roots[i].r_start>=(word)b +&&(word)GC_static_roots[i].r_end<=(word)e){ +GC_remove_root_at_pos(i); +rebuild=TRUE; +} else { +i++; +} +} +if (rebuild) +GC_rebuild_root_index(); +} +#endif +#ifdef USE_PROC_FOR_LIBRARIES +GC_INLINE void swap_static_roots(int i,int j) +{ +ptr_t r_start=GC_static_roots[i].r_start; +ptr_t r_end=GC_static_roots[i].r_end; +GC_bool r_tmp=GC_static_roots[i].r_tmp; +GC_static_roots[i].r_start=GC_static_roots[j].r_start; +GC_static_roots[i].r_end=GC_static_roots[j].r_end; +GC_static_roots[i].r_tmp=GC_static_roots[j].r_tmp; +GC_static_roots[j].r_start=r_start; +GC_static_roots[j].r_end=r_end; +GC_static_roots[j].r_tmp=r_tmp; +} +GC_INNER void GC_remove_roots_subregion(ptr_t b,ptr_t e) +{ +int i; +GC_bool rebuild=FALSE; +GC_ASSERT(I_HOLD_LOCK()); +GC_ASSERT((word)b % sizeof(word)==0&&(word)e % sizeof(word)==0); +for (i=0;i < n_root_sets;i++){ +ptr_t r_start,r_end; +if (GC_static_roots[i].r_tmp){ +#ifdef GC_ASSERTIONS +int j; +for (j=i+1;j < n_root_sets;j++){ +GC_ASSERT(GC_static_roots[j].r_tmp); +} +#endif +break; +} +r_start=GC_static_roots[i].r_start; +r_end=GC_static_roots[i].r_end; +if (!EXPECT((word)e<=(word)r_start||(word)r_end<=(word)b,TRUE)){ +#ifdef DEBUG_ADD_DEL_ROOTS +GC_log_printf("Removing %p .. %p from root section %d (%p .. %p)\n", +(void*)b,(void*)e, +i,(void*)r_start,(void*)r_end); +#endif +if ((word)r_start < (word)b){ +GC_root_size-=r_end - b; +GC_static_roots[i].r_end=b; +if ((word)e < (word)r_end){ +int j; +if (rebuild){ +GC_rebuild_root_index(); +rebuild=FALSE; +} +GC_add_roots_inner(e,r_end,FALSE); +for (j=i+1;j < n_root_sets;j++) +if (GC_static_roots[j].r_tmp) +break; +if (j < n_root_sets-1&&!GC_static_roots[n_root_sets-1].r_tmp){ +swap_static_roots(j,n_root_sets - 1); +rebuild=TRUE; +} +} +} else { +if ((word)e < (word)r_end){ +GC_root_size-=e - r_start; +GC_static_roots[i].r_start=e; +} else { +GC_remove_root_at_pos(i); +if (i < n_root_sets - 1&&GC_static_roots[i].r_tmp +&&!GC_static_roots[i+1].r_tmp){ +int j; +for (j=i+2;j < n_root_sets;j++) +if (GC_static_roots[j].r_tmp) +break; +swap_static_roots(i,j - 1); +} +i--; +} +rebuild=TRUE; +} +} +} +if (rebuild) +GC_rebuild_root_index(); +} +#endif +#if!defined(NO_DEBUGGING) +GC_API int GC_CALL GC_is_tmp_root(void*p) +{ +static int last_root_set=MAX_ROOT_SETS; +int i; +if (last_root_set < n_root_sets +&&(word)p>=(word)GC_static_roots[last_root_set].r_start +&&(word)p < (word)GC_static_roots[last_root_set].r_end) +return GC_static_roots[last_root_set].r_tmp; +for (i=0;i < n_root_sets;i++){ +if ((word)p>=(word)GC_static_roots[i].r_start +&&(word)p < (word)GC_static_roots[i].r_end){ +last_root_set=i; +return GC_static_roots[i].r_tmp; +} +} +return(FALSE); +} +#endif +GC_INNER ptr_t GC_approx_sp(void) +{ +volatile word sp; +#if defined(S390)&&!defined(CPPCHECK)&&(__clang_major__ < 8) +sp=(word)&sp; +#elif defined(CPPCHECK)||(__GNUC__>=4&&!defined(STACK_NOT_SCANNED)) +sp=(word)__builtin_frame_address(0); +#else +sp=(word)&sp; +#endif +return((ptr_t)sp); +} +GC_API void GC_CALL GC_clear_exclusion_table(void) +{ +GC_excl_table_entries=0; +} +STATIC struct exclusion*GC_next_exclusion(ptr_t start_addr) +{ +size_t low=0; +size_t high; +GC_ASSERT(GC_excl_table_entries > 0); +high=GC_excl_table_entries - 1; +while (high > low){ +size_t mid=(low+high)>>1; +if ((word)GC_excl_table[mid].e_end<=(word)start_addr){ +low=mid+1; +} else { +high=mid; +} +} +if ((word)GC_excl_table[low].e_end<=(word)start_addr)return 0; +return GC_excl_table+low; +} +GC_INNER void GC_exclude_static_roots_inner(void*start,void*finish) +{ +struct exclusion*next; +size_t next_index; +GC_ASSERT((word)start % sizeof(word)==0); +GC_ASSERT((word)start < (word)finish); +if (0==GC_excl_table_entries){ +next=0; +} else { +next=GC_next_exclusion((ptr_t)start); +} +if (0!=next){ +size_t i; +if ((word)(next->e_start)< (word)finish){ +ABORT("Exclusion ranges overlap"); +} +if ((word)(next->e_start)==(word)finish){ +next->e_start=(ptr_t)start; +return; +} +next_index=next - GC_excl_table; +for (i=GC_excl_table_entries;i > next_index;--i){ +GC_excl_table[i]=GC_excl_table[i-1]; +} +} else { +next_index=GC_excl_table_entries; +} +if (GC_excl_table_entries==MAX_EXCLUSIONS)ABORT("Too many exclusions"); +GC_excl_table[next_index].e_start=(ptr_t)start; +GC_excl_table[next_index].e_end=(ptr_t)finish; +++GC_excl_table_entries; +} +GC_API void GC_CALL GC_exclude_static_roots(void*b,void*e) +{ +DCL_LOCK_STATE; +if (b==e)return; +b=(void*)((word)b&~(word)(sizeof(word)- 1)); +e=(void*)(((word)e+(sizeof(word)- 1))&~(word)(sizeof(word)- 1)); +if (NULL==e) +e=(void*)(~(word)(sizeof(word)- 1)); +LOCK(); +GC_exclude_static_roots_inner(b,e); +UNLOCK(); +} +#if defined(WRAP_MARK_SOME)&&defined(PARALLEL_MARK) +#define GC_PUSH_CONDITIONAL(b,t,all)(GC_parallel?GC_push_conditional_eager(b,t,all):GC_push_conditional(b,t,all)) +#elif defined(GC_DISABLE_INCREMENTAL) +#define GC_PUSH_CONDITIONAL(b,t,all)GC_push_all(b,t) +#else +#define GC_PUSH_CONDITIONAL(b,t,all)GC_push_conditional(b,t,all) +#endif +STATIC void GC_push_conditional_with_exclusions(ptr_t bottom,ptr_t top, +GC_bool all GC_ATTR_UNUSED) +{ +while ((word)bottom < (word)top){ +struct exclusion*next=GC_next_exclusion(bottom); +ptr_t excl_start; +if (0==next +||(word)(excl_start=next->e_start)>=(word)top){ +GC_PUSH_CONDITIONAL(bottom,top,all); +break; +} +if ((word)excl_start > (word)bottom) +GC_PUSH_CONDITIONAL(bottom,excl_start,all); +bottom=next->e_end; +} +} +#ifdef IA64 +GC_INNER void GC_push_all_register_sections(ptr_t bs_lo,ptr_t bs_hi, +int eager,struct GC_traced_stack_sect_s*traced_stack_sect) +{ +while (traced_stack_sect!=NULL){ +ptr_t frame_bs_lo=traced_stack_sect->backing_store_end; +GC_ASSERT((word)frame_bs_lo<=(word)bs_hi); +if (eager){ +GC_push_all_eager(frame_bs_lo,bs_hi); +} else { +GC_push_all_stack(frame_bs_lo,bs_hi); +} +bs_hi=traced_stack_sect->saved_backing_store_ptr; +traced_stack_sect=traced_stack_sect->prev; +} +GC_ASSERT((word)bs_lo<=(word)bs_hi); +if (eager){ +GC_push_all_eager(bs_lo,bs_hi); +} else { +GC_push_all_stack(bs_lo,bs_hi); +} +} +#endif +#ifdef THREADS +GC_INNER void GC_push_all_stack_sections(ptr_t lo,ptr_t hi, +struct GC_traced_stack_sect_s*traced_stack_sect) +{ +while (traced_stack_sect!=NULL){ +GC_ASSERT((word)lo HOTTER_THAN (word)traced_stack_sect); +#ifdef STACK_GROWS_UP +GC_push_all_stack((ptr_t)traced_stack_sect,lo); +#else +GC_push_all_stack(lo,(ptr_t)traced_stack_sect); +#endif +lo=traced_stack_sect->saved_stack_ptr; +GC_ASSERT(lo!=NULL); +traced_stack_sect=traced_stack_sect->prev; +} +GC_ASSERT(!((word)hi HOTTER_THAN (word)lo)); +#ifdef STACK_GROWS_UP +GC_push_all_stack(hi,lo); +#else +GC_push_all_stack(lo,hi); +#endif +} +#else +STATIC void GC_push_all_stack_partially_eager(ptr_t bottom,ptr_t top, +ptr_t cold_gc_frame) +{ +#ifndef NEED_FIXUP_POINTER +if (GC_all_interior_pointers){ +if (0==cold_gc_frame){ +GC_push_all_stack(bottom,top); +return; +} +GC_ASSERT((word)bottom<=(word)cold_gc_frame +&&(word)cold_gc_frame<=(word)top); +#ifdef STACK_GROWS_DOWN +GC_push_all(cold_gc_frame - sizeof(ptr_t),top); +GC_push_all_eager(bottom,cold_gc_frame); +#else +GC_push_all(bottom,cold_gc_frame+sizeof(ptr_t)); +GC_push_all_eager(cold_gc_frame,top); +#endif +} else +#endif +{ +GC_push_all_eager(bottom,top); +} +#ifdef TRACE_BUF +GC_add_trace_entry("GC_push_all_stack",(word)bottom,(word)top); +#endif +} +STATIC void GC_push_all_stack_part_eager_sections(ptr_t lo,ptr_t hi, +ptr_t cold_gc_frame,struct GC_traced_stack_sect_s*traced_stack_sect) +{ +GC_ASSERT(traced_stack_sect==NULL||cold_gc_frame==NULL|| +(word)cold_gc_frame HOTTER_THAN (word)traced_stack_sect); +while (traced_stack_sect!=NULL){ +GC_ASSERT((word)lo HOTTER_THAN (word)traced_stack_sect); +#ifdef STACK_GROWS_UP +GC_push_all_stack_partially_eager((ptr_t)traced_stack_sect,lo, +cold_gc_frame); +#else +GC_push_all_stack_partially_eager(lo,(ptr_t)traced_stack_sect, +cold_gc_frame); +#endif +lo=traced_stack_sect->saved_stack_ptr; +GC_ASSERT(lo!=NULL); +traced_stack_sect=traced_stack_sect->prev; +cold_gc_frame=NULL; +} +GC_ASSERT(!((word)hi HOTTER_THAN (word)lo)); +#ifdef STACK_GROWS_UP +GC_push_all_stack_partially_eager(hi,lo,cold_gc_frame); +#else +GC_push_all_stack_partially_eager(lo,hi,cold_gc_frame); +#endif +} +#endif +STATIC void GC_push_current_stack(ptr_t cold_gc_frame, +void*context GC_ATTR_UNUSED) +{ +#if defined(THREADS) +#ifdef STACK_GROWS_DOWN +GC_push_all_eager(GC_approx_sp(),cold_gc_frame); +#else +GC_push_all_eager(cold_gc_frame,GC_approx_sp()); +#endif +#else +GC_push_all_stack_part_eager_sections(GC_approx_sp(),GC_stackbottom, +cold_gc_frame,GC_traced_stack_sect); +#ifdef IA64 +{ +ptr_t bsp=GC_save_regs_ret_val; +ptr_t cold_gc_bs_pointer=bsp - 2048; +if (GC_all_interior_pointers +&&(word)cold_gc_bs_pointer > (word)BACKING_STORE_BASE){ +if (GC_traced_stack_sect!=NULL +&&(word)cold_gc_bs_pointer +< (word)GC_traced_stack_sect->backing_store_end) +cold_gc_bs_pointer= +GC_traced_stack_sect->backing_store_end; +GC_push_all_register_sections(BACKING_STORE_BASE, +cold_gc_bs_pointer,FALSE,GC_traced_stack_sect); +GC_push_all_eager(cold_gc_bs_pointer,bsp); +} else { +GC_push_all_register_sections(BACKING_STORE_BASE,bsp, +TRUE,GC_traced_stack_sect); +} +} +#endif +#endif +} +GC_INNER void (*GC_push_typed_structures)(void)=0; +GC_INNER void GC_cond_register_dynamic_libraries(void) +{ +#if (defined(DYNAMIC_LOADING)&&!defined(MSWIN_XBOX1))||defined(CYGWIN32)||defined(MSWIN32)||defined(MSWINCE)||defined(PCR) +GC_remove_tmp_roots(); +if (!GC_no_dls)GC_register_dynamic_libraries(); +#else +GC_no_dls=TRUE; +#endif +} +STATIC void GC_push_regs_and_stack(ptr_t cold_gc_frame) +{ +#ifdef THREADS +if (NULL==cold_gc_frame) +return; +#endif +GC_with_callee_saves_pushed(GC_push_current_stack,cold_gc_frame); +} +GC_INNER void GC_push_roots(GC_bool all,ptr_t cold_gc_frame GC_ATTR_UNUSED) +{ +int i; +unsigned kind; +#if!defined(REGISTER_LIBRARIES_EARLY) +GC_cond_register_dynamic_libraries(); +#endif +for (i=0;i < n_root_sets;i++){ +GC_push_conditional_with_exclusions( +GC_static_roots[i].r_start, +GC_static_roots[i].r_end,all); +} +for (kind=0;kind < GC_n_kinds;kind++){ +void*base=GC_base(GC_obj_kinds[kind].ok_freelist); +if (base!=NULL){ +GC_set_mark_bit(base); +} +} +#ifndef GC_NO_FINALIZATION +GC_push_finalizer_structures(); +#endif +#ifdef THREADS +if (GC_no_dls||GC_roots_were_cleared) +GC_push_thread_structures(); +#endif +if (GC_push_typed_structures) +GC_push_typed_structures(); +#if defined(THREAD_LOCAL_ALLOC) +if (GC_world_stopped) +GC_mark_thread_local_free_lists(); +#endif +#ifndef STACK_NOT_SCANNED +GC_push_regs_and_stack(cold_gc_frame); +#endif +if (GC_push_other_roots!=0){ +(*GC_push_other_roots)(); +} +} +#ifdef ENABLE_DISCLAIM +#endif +#include +GC_INNER signed_word GC_bytes_found=0; +#if defined(PARALLEL_MARK) +GC_INNER signed_word GC_fl_builder_count=0; +#endif +#ifndef MAX_LEAKED +#define MAX_LEAKED 40 +#endif +STATIC ptr_t GC_leaked[MAX_LEAKED]={ NULL }; +STATIC unsigned GC_n_leaked=0; +GC_INNER GC_bool GC_have_errors=FALSE; +#if!defined(EAGER_SWEEP)&&defined(ENABLE_DISCLAIM) +STATIC void GC_reclaim_unconditionally_marked(void); +#endif +GC_INLINE void GC_add_leaked(ptr_t leaked) +{ +#ifndef SHORT_DBG_HDRS +if (GC_findleak_delay_free&&!GC_check_leaked(leaked)) +return; +#endif +GC_have_errors=TRUE; +if (GC_n_leaked < MAX_LEAKED){ +GC_leaked[GC_n_leaked++]=leaked; +GC_set_mark_bit(leaked); +} +} +GC_INNER void GC_print_all_errors(void) +{ +static GC_bool printing_errors=FALSE; +GC_bool have_errors; +unsigned i,n_leaked; +ptr_t leaked[MAX_LEAKED]; +DCL_LOCK_STATE; +LOCK(); +if (printing_errors){ +UNLOCK(); +return; +} +have_errors=GC_have_errors; +printing_errors=TRUE; +n_leaked=GC_n_leaked; +if (n_leaked > 0){ +GC_ASSERT(n_leaked<=MAX_LEAKED); +BCOPY(GC_leaked,leaked,n_leaked*sizeof(ptr_t)); +GC_n_leaked=0; +BZERO(GC_leaked,n_leaked*sizeof(ptr_t)); +} +UNLOCK(); +if (GC_debugging_started){ +GC_print_all_smashed(); +} else { +have_errors=FALSE; +} +if (n_leaked > 0){ +GC_err_printf("Found %u leaked objects:\n",n_leaked); +have_errors=TRUE; +} +for (i=0;i < n_leaked;i++){ +ptr_t p=leaked[i]; +#ifndef SKIP_LEAKED_OBJECTS_PRINTING +GC_print_heap_obj(p); +#endif +GC_free(p); +} +if (have_errors +#ifndef GC_ABORT_ON_LEAK +&&GETENV("GC_ABORT_ON_LEAK")!=NULL +#endif +){ +ABORT("Leaked or smashed objects encountered"); +} +LOCK(); +printing_errors=FALSE; +UNLOCK(); +} +GC_INNER GC_bool GC_block_empty(hdr*hhdr) +{ +return (hhdr->hb_n_marks==0); +} +STATIC GC_bool GC_block_nearly_full(hdr*hhdr,word sz) +{ +return hhdr->hb_n_marks > HBLK_OBJS(sz)*7/8; +} +STATIC ptr_t GC_reclaim_clear(struct hblk*hbp,hdr*hhdr,word sz, +ptr_t list,signed_word*count) +{ +word bit_no=0; +word*p,*q,*plim; +signed_word n_bytes_found=0; +GC_ASSERT(hhdr==GC_find_header((ptr_t)hbp)); +#ifndef THREADS +GC_ASSERT(sz==hhdr->hb_sz); +#else +#endif +GC_ASSERT((sz&(BYTES_PER_WORD-1))==0); +p=(word*)(hbp->hb_body); +plim=(word*)(hbp->hb_body+HBLKSIZE - sz); +while ((word)p<=(word)plim){ +if (mark_bit_from_hdr(hhdr,bit_no)){ +p=(word*)((ptr_t)p+sz); +} else { +n_bytes_found+=sz; +obj_link(p)=list; +list=((ptr_t)p); +q=(word*)((ptr_t)p+sz); +#ifdef USE_MARK_BYTES +GC_ASSERT(!(sz&1) +&&!((word)p&(2*sizeof(word)- 1))); +p[1]=0; +p+=2; +while ((word)p < (word)q){ +CLEAR_DOUBLE(p); +p+=2; +} +#else +p++; +while ((word)p < (word)q){ +*p++=0; +} +#endif +} +bit_no+=MARK_BIT_OFFSET(sz); +} +*count+=n_bytes_found; +return(list); +} +STATIC ptr_t GC_reclaim_uninit(struct hblk*hbp,hdr*hhdr,word sz, +ptr_t list,signed_word*count) +{ +word bit_no=0; +word*p,*plim; +signed_word n_bytes_found=0; +#ifndef THREADS +GC_ASSERT(sz==hhdr->hb_sz); +#endif +p=(word*)(hbp->hb_body); +plim=(word*)((ptr_t)hbp+HBLKSIZE - sz); +while ((word)p<=(word)plim){ +if (!mark_bit_from_hdr(hhdr,bit_no)){ +n_bytes_found+=sz; +obj_link(p)=list; +list=((ptr_t)p); +} +p=(word*)((ptr_t)p+sz); +bit_no+=MARK_BIT_OFFSET(sz); +} +*count+=n_bytes_found; +return(list); +} +#ifdef ENABLE_DISCLAIM +STATIC ptr_t GC_disclaim_and_reclaim(struct hblk*hbp,hdr*hhdr,word sz, +ptr_t list,signed_word*count) +{ +word bit_no=0; +word*p,*q,*plim; +signed_word n_bytes_found=0; +struct obj_kind*ok=&GC_obj_kinds[hhdr->hb_obj_kind]; +int (GC_CALLBACK*disclaim)(void*)=ok->ok_disclaim_proc; +#ifndef THREADS +GC_ASSERT(sz==hhdr->hb_sz); +#endif +p=(word*)(hbp->hb_body); +plim=(word*)((ptr_t)p+HBLKSIZE - sz); +while ((word)p<=(word)plim){ +int marked=mark_bit_from_hdr(hhdr,bit_no); +if (!marked&&(*disclaim)(p)){ +set_mark_bit_from_hdr(hhdr,bit_no); +hhdr->hb_n_marks++; +marked=1; +} +if (marked) +p=(word*)((ptr_t)p+sz); +else { +n_bytes_found+=sz; +obj_link(p)=list; +list=((ptr_t)p); +q=(word*)((ptr_t)p+sz); +#ifdef USE_MARK_BYTES +GC_ASSERT((sz&1)==0); +GC_ASSERT(((word)p&(2*sizeof(word)- 1))==0); +p[1]=0; +p+=2; +while ((word)p < (word)q){ +CLEAR_DOUBLE(p); +p+=2; +} +#else +p++; +while ((word)p < (word)q){ +*p++=0; +} +#endif +} +bit_no+=MARK_BIT_OFFSET(sz); +} +*count+=n_bytes_found; +return list; +} +#endif +STATIC void GC_reclaim_check(struct hblk*hbp,hdr*hhdr,word sz) +{ +word bit_no; +ptr_t p,plim; +#ifndef THREADS +GC_ASSERT(sz==hhdr->hb_sz); +#endif +p=hbp->hb_body; +plim=p+HBLKSIZE - sz; +for (bit_no=0;(word)p<=(word)plim; +p+=sz,bit_no+=MARK_BIT_OFFSET(sz)){ +if (!mark_bit_from_hdr(hhdr,bit_no)){ +GC_add_leaked(p); +} +} +} +#ifdef AO_HAVE_load +#define IS_PTRFREE_SAFE(hhdr)(AO_load((volatile AO_t*)&(hhdr)->hb_descr)==0) +#else +#define IS_PTRFREE_SAFE(hhdr)((hhdr)->hb_descr==0) +#endif +GC_INNER ptr_t GC_reclaim_generic(struct hblk*hbp,hdr*hhdr,size_t sz, +GC_bool init,ptr_t list, +signed_word*count) +{ +ptr_t result; +GC_ASSERT(GC_find_header((ptr_t)hbp)==hhdr); +#ifndef GC_DISABLE_INCREMENTAL +GC_remove_protection(hbp,1,IS_PTRFREE_SAFE(hhdr)); +#endif +#ifdef ENABLE_DISCLAIM +if ((hhdr->hb_flags&HAS_DISCLAIM)!=0){ +result=GC_disclaim_and_reclaim(hbp,hhdr,sz,list,count); +} else +#endif +if (init||GC_debugging_started){ +result=GC_reclaim_clear(hbp,hhdr,sz,list,count); +} else { +GC_ASSERT(IS_PTRFREE_SAFE(hhdr)); +result=GC_reclaim_uninit(hbp,hhdr,sz,list,count); +} +if (IS_UNCOLLECTABLE(hhdr->hb_obj_kind))GC_set_hdr_marks(hhdr); +return result; +} +STATIC void GC_reclaim_small_nonempty_block(struct hblk*hbp,word sz, +GC_bool report_if_found) +{ +hdr*hhdr=HDR(hbp); +struct obj_kind*ok=&GC_obj_kinds[hhdr->hb_obj_kind]; +void**flh=&(ok->ok_freelist[BYTES_TO_GRANULES(sz)]); +hhdr->hb_last_reclaimed=(unsigned short)GC_gc_no; +if (report_if_found){ +GC_reclaim_check(hbp,hhdr,sz); +} else { +*flh=GC_reclaim_generic(hbp,hhdr,sz,ok->ok_init, +(ptr_t)(*flh),&GC_bytes_found); +} +} +#ifdef ENABLE_DISCLAIM +STATIC void GC_disclaim_and_reclaim_or_free_small_block(struct hblk*hbp) +{ +hdr*hhdr=HDR(hbp); +word sz=hhdr->hb_sz; +struct obj_kind*ok=&GC_obj_kinds[hhdr->hb_obj_kind]; +void**flh=&(ok->ok_freelist[BYTES_TO_GRANULES(sz)]); +void*flh_next; +hhdr->hb_last_reclaimed=(unsigned short)GC_gc_no; +flh_next=GC_reclaim_generic(hbp,hhdr,sz,ok->ok_init, +(ptr_t)(*flh),&GC_bytes_found); +if (hhdr->hb_n_marks) +*flh=flh_next; +else { +GC_bytes_found+=HBLKSIZE; +GC_freehblk(hbp); +} +} +#endif +STATIC void GC_reclaim_block(struct hblk*hbp,word report_if_found) +{ +hdr*hhdr=HDR(hbp); +word sz; +struct obj_kind*ok=&GC_obj_kinds[hhdr->hb_obj_kind]; +#ifdef AO_HAVE_load +sz=(word)AO_load((volatile AO_t*)&hhdr->hb_sz); +#else +sz=hhdr->hb_sz; +#endif +if( sz > MAXOBJBYTES){ +if(!mark_bit_from_hdr(hhdr,0)){ +if (report_if_found){ +GC_add_leaked((ptr_t)hbp); +} else { +word blocks; +#ifdef ENABLE_DISCLAIM +if (EXPECT(hhdr->hb_flags&HAS_DISCLAIM,0)){ +if ((*ok->ok_disclaim_proc)(hbp)){ +set_mark_bit_from_hdr(hhdr,0); +goto in_use; +} +} +#endif +blocks=OBJ_SZ_TO_BLOCKS(sz); +#if defined(CPPCHECK) +GC_noop1((word)&blocks); +#endif +if (blocks > 1){ +GC_large_allocd_bytes-=blocks*HBLKSIZE; +} +GC_bytes_found+=sz; +GC_freehblk(hbp); +} +} else { +#ifdef ENABLE_DISCLAIM +in_use: +#endif +if (IS_PTRFREE_SAFE(hhdr)){ +GC_atomic_in_use+=sz; +} else { +GC_composite_in_use+=sz; +} +} +} else { +GC_bool empty=GC_block_empty(hhdr); +#ifdef PARALLEL_MARK +GC_ASSERT(hhdr->hb_n_marks<=2*(HBLKSIZE/sz+1)+16); +#else +GC_ASSERT(sz*hhdr->hb_n_marks<=HBLKSIZE); +#endif +if (report_if_found){ +GC_reclaim_small_nonempty_block(hbp,sz, +TRUE); +} else if (empty){ +#ifdef ENABLE_DISCLAIM +if ((hhdr->hb_flags&HAS_DISCLAIM)!=0){ +GC_disclaim_and_reclaim_or_free_small_block(hbp); +} else +#endif +{ +GC_bytes_found+=HBLKSIZE; +GC_freehblk(hbp); +} +} else if (GC_find_leak||!GC_block_nearly_full(hhdr,sz)){ +struct hblk**rlh=ok->ok_reclaim_list; +if (rlh!=NULL){ +rlh+=BYTES_TO_GRANULES(sz); +hhdr->hb_next=*rlh; +*rlh=hbp; +} +} +if (IS_PTRFREE_SAFE(hhdr)){ +GC_atomic_in_use+=sz*hhdr->hb_n_marks; +} else { +GC_composite_in_use+=sz*hhdr->hb_n_marks; +} +} +} +#if!defined(NO_DEBUGGING) +struct Print_stats +{ +size_t number_of_blocks; +size_t total_bytes; +}; +#ifdef USE_MARK_BYTES +unsigned GC_n_set_marks(hdr*hhdr) +{ +unsigned result=0; +word i; +word sz=hhdr->hb_sz; +word offset=MARK_BIT_OFFSET(sz); +word limit=FINAL_MARK_BIT(sz); +for (i=0;i < limit;i+=offset){ +result+=hhdr->hb_marks[i]; +} +GC_ASSERT(hhdr->hb_marks[limit]); +return(result); +} +#else +static unsigned set_bits(word n) +{ +word m=n; +unsigned result=0; +while (m > 0){ +if (m&1)result++; +m>>=1; +} +return(result); +} +unsigned GC_n_set_marks(hdr*hhdr) +{ +unsigned result=0; +word i; +word n_mark_words; +#ifdef MARK_BIT_PER_OBJ +word n_objs=HBLK_OBJS(hhdr->hb_sz); +if (0==n_objs)n_objs=1; +n_mark_words=divWORDSZ(n_objs+WORDSZ - 1); +#else +n_mark_words=MARK_BITS_SZ; +#endif +for (i=0;i < n_mark_words - 1;i++){ +result+=set_bits(hhdr->hb_marks[i]); +} +#ifdef MARK_BIT_PER_OBJ +result+=set_bits((hhdr->hb_marks[n_mark_words - 1]) +<<(n_mark_words*WORDSZ - n_objs)); +#else +result+=set_bits(hhdr->hb_marks[n_mark_words - 1]); +#endif +return result; +} +#endif +STATIC void GC_print_block_descr(struct hblk*h, +word raw_ps) +{ +hdr*hhdr=HDR(h); +size_t bytes=hhdr->hb_sz; +struct Print_stats*ps; +unsigned n_marks=GC_n_set_marks(hhdr); +unsigned n_objs=(unsigned)HBLK_OBJS(bytes); +if (0==n_objs)n_objs=1; +if (hhdr->hb_n_marks!=n_marks){ +GC_printf("%u,%u,%u!=%u,%u\n",hhdr->hb_obj_kind,(unsigned)bytes, +(unsigned)hhdr->hb_n_marks,n_marks,n_objs); +} else { +GC_printf("%u,%u,%u,%u\n",hhdr->hb_obj_kind,(unsigned)bytes, +n_marks,n_objs); +} +ps=(struct Print_stats*)raw_ps; +ps->total_bytes+=(bytes+(HBLKSIZE-1))&~(HBLKSIZE-1); +ps->number_of_blocks++; +} +void GC_print_block_list(void) +{ +struct Print_stats pstats; +GC_printf("kind(0=ptrfree,1=normal,2=unc.)," +"size_in_bytes,#_marks_set,#objs\n"); +pstats.number_of_blocks=0; +pstats.total_bytes=0; +GC_apply_to_all_blocks(GC_print_block_descr,(word)&pstats); +GC_printf("blocks=%lu,bytes=%lu\n", +(unsigned long)pstats.number_of_blocks, +(unsigned long)pstats.total_bytes); +} +GC_API void GC_CALL GC_print_free_list(int kind,size_t sz_in_granules) +{ +void*flh_next; +int n; +GC_ASSERT(kind < MAXOBJKINDS); +GC_ASSERT(sz_in_granules<=MAXOBJGRANULES); +flh_next=GC_obj_kinds[kind].ok_freelist[sz_in_granules]; +for (n=0;flh_next;n++){ +GC_printf("Free object in heap block %p [%d]:%p\n", +(void*)HBLKPTR(flh_next),n,flh_next); +flh_next=obj_link(flh_next); +} +} +#endif +STATIC void GC_clear_fl_links(void**flp) +{ +void*next=*flp; +while (0!=next){ +*flp=0; +flp=&(obj_link(next)); +next=*flp; +} +} +GC_INNER void GC_start_reclaim(GC_bool report_if_found) +{ +unsigned kind; +#if defined(PARALLEL_MARK) +GC_ASSERT(0==GC_fl_builder_count); +#endif +GC_composite_in_use=0; +GC_atomic_in_use=0; +for (kind=0;kind < GC_n_kinds;kind++){ +struct hblk**rlist=GC_obj_kinds[kind].ok_reclaim_list; +GC_bool should_clobber=(GC_obj_kinds[kind].ok_descriptor!=0); +if (rlist==0)continue; +if (!report_if_found){ +void**fop; +void**lim=&(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]); +for (fop=GC_obj_kinds[kind].ok_freelist; +(word)fop < (word)lim;(*(word**)&fop)++){ +if (*fop!=0){ +if (should_clobber){ +GC_clear_fl_links(fop); +} else { +*fop=0; +} +} +} +} +BZERO(rlist,(MAXOBJGRANULES+1)*sizeof(void*)); +} +GC_apply_to_all_blocks(GC_reclaim_block,(word)report_if_found); +#ifdef EAGER_SWEEP +GC_reclaim_all((GC_stop_func)0,FALSE); +#elif defined(ENABLE_DISCLAIM) +GC_reclaim_unconditionally_marked(); +#endif +#if defined(PARALLEL_MARK) +GC_ASSERT(0==GC_fl_builder_count); +#endif +} +GC_INNER void GC_continue_reclaim(word sz,int kind) +{ +hdr*hhdr; +struct hblk*hbp; +struct obj_kind*ok=&(GC_obj_kinds[kind]); +struct hblk**rlh=ok->ok_reclaim_list; +void**flh=&(ok->ok_freelist[sz]); +if (NULL==rlh) +return; +for (rlh+=sz;(hbp=*rlh)!=NULL;){ +hhdr=HDR(hbp); +*rlh=hhdr->hb_next; +GC_reclaim_small_nonempty_block(hbp,hhdr->hb_sz,FALSE); +if (*flh!=0) +break; +} +} +GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func,GC_bool ignore_old) +{ +word sz; +unsigned kind; +hdr*hhdr; +struct hblk*hbp; +struct obj_kind*ok; +struct hblk**rlp; +struct hblk**rlh; +#ifndef NO_CLOCK +CLOCK_TYPE start_time=CLOCK_TYPE_INITIALIZER; +if (GC_print_stats==VERBOSE) +GET_TIME(start_time); +#endif +for (kind=0;kind < GC_n_kinds;kind++){ +ok=&(GC_obj_kinds[kind]); +rlp=ok->ok_reclaim_list; +if (rlp==0)continue; +for (sz=1;sz<=MAXOBJGRANULES;sz++){ +for (rlh=rlp+sz;(hbp=*rlh)!=NULL;){ +if (stop_func!=(GC_stop_func)0&&(*stop_func)()){ +return(FALSE); +} +hhdr=HDR(hbp); +*rlh=hhdr->hb_next; +if (!ignore_old +||(word)hhdr->hb_last_reclaimed==GC_gc_no - 1){ +GC_reclaim_small_nonempty_block(hbp,hhdr->hb_sz,FALSE); +} +} +} +} +#ifndef NO_CLOCK +if (GC_print_stats==VERBOSE){ +CLOCK_TYPE done_time; +GET_TIME(done_time); +GC_verbose_log_printf( +"Disposing of reclaim lists took %lu ms %lu ns\n", +MS_TIME_DIFF(done_time,start_time), +NS_FRAC_TIME_DIFF(done_time,start_time)); +} +#endif +return(TRUE); +} +#if!defined(EAGER_SWEEP)&&defined(ENABLE_DISCLAIM) +STATIC void GC_reclaim_unconditionally_marked(void) +{ +word sz; +unsigned kind; +hdr*hhdr; +struct hblk*hbp; +struct obj_kind*ok; +struct hblk**rlp; +struct hblk**rlh; +for (kind=0;kind < GC_n_kinds;kind++){ +ok=&(GC_obj_kinds[kind]); +if (!ok->ok_mark_unconditionally) +continue; +rlp=ok->ok_reclaim_list; +if (rlp==0) +continue; +for (sz=1;sz<=MAXOBJGRANULES;sz++){ +rlh=rlp+sz; +while ((hbp=*rlh)!=0){ +hhdr=HDR(hbp); +*rlh=hhdr->hb_next; +GC_reclaim_small_nonempty_block(hbp,hhdr->hb_sz,FALSE); +} +} +} +} +#endif +struct enumerate_reachable_s { +GC_reachable_object_proc proc; +void*client_data; +}; +STATIC void GC_do_enumerate_reachable_objects(struct hblk*hbp,word ped) +{ +struct hblkhdr*hhdr=HDR(hbp); +size_t sz=(size_t)hhdr->hb_sz; +size_t bit_no; +char*p,*plim; +if (GC_block_empty(hhdr)){ +return; +} +p=hbp->hb_body; +if (sz > MAXOBJBYTES){ +plim=p; +} else { +plim=hbp->hb_body+HBLKSIZE - sz; +} +for (bit_no=0;p<=plim;bit_no+=MARK_BIT_OFFSET(sz),p+=sz){ +if (mark_bit_from_hdr(hhdr,bit_no)){ +((struct enumerate_reachable_s*)ped)->proc(p,sz, +((struct enumerate_reachable_s*)ped)->client_data); +} +} +} +GC_API void GC_CALL GC_enumerate_reachable_objects_inner( +GC_reachable_object_proc proc, +void*client_data) +{ +struct enumerate_reachable_s ed; +GC_ASSERT(I_HOLD_LOCK()); +ed.proc=proc; +ed.client_data=client_data; +GC_apply_to_all_blocks(GC_do_enumerate_reachable_objects,(word)&ed); +} +#ifndef GC_TYPED_H +#define GC_TYPED_H +#ifndef GC_H +#endif +#ifdef __cplusplus +extern "C" { +#endif +typedef GC_word*GC_bitmap; +#define GC_WORDSZ (8*sizeof(GC_word)) +#define GC_get_bit(bm,index)(((bm)[(index)/GC_WORDSZ]>>((index)% GC_WORDSZ))&1) +#define GC_set_bit(bm,index)((bm)[(index)/GC_WORDSZ]|=(GC_word)1<<((index)% GC_WORDSZ)) +#define GC_WORD_OFFSET(t,f)(offsetof(t,f)/sizeof(GC_word)) +#define GC_WORD_LEN(t)(sizeof(t)/sizeof(GC_word)) +#define GC_BITMAP_SIZE(t)((GC_WORD_LEN(t)+GC_WORDSZ - 1)/GC_WORDSZ) +typedef GC_word GC_descr; +GC_API GC_descr GC_CALL GC_make_descriptor(const GC_word*, +size_t); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_explicitly_typed(size_t, +GC_descr); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_explicitly_typed_ignore_off_page(size_t, +GC_descr); +GC_API GC_ATTR_MALLOC GC_ATTR_CALLOC_SIZE(1,2)void*GC_CALL +GC_calloc_explicitly_typed(size_t, +size_t, +GC_descr); +#ifdef GC_DEBUG +#define GC_MALLOC_EXPLICITLY_TYPED(bytes,d)((void)(d),GC_MALLOC(bytes)) +#define GC_CALLOC_EXPLICITLY_TYPED(n,bytes,d)((void)(d),GC_MALLOC((n)*(bytes))) +#else +#define GC_MALLOC_EXPLICITLY_TYPED(bytes,d)GC_malloc_explicitly_typed(bytes,d) +#define GC_CALLOC_EXPLICITLY_TYPED(n,bytes,d)GC_calloc_explicitly_typed(n,bytes,d) +#endif +#ifdef __cplusplus +} +#endif +#endif +#define TYPD_EXTRA_BYTES (sizeof(word)- EXTRA_BYTES) +STATIC int GC_explicit_kind=0; +STATIC int GC_array_kind=0; +struct LeafDescriptor { +word ld_tag; +#define LEAF_TAG 1 +size_t ld_size; +size_t ld_nelements; +GC_descr ld_descriptor; +}; +struct ComplexArrayDescriptor { +word ad_tag; +#define ARRAY_TAG 2 +size_t ad_nelements; +union ComplexDescriptor*ad_element_descr; +}; +struct SequenceDescriptor { +word sd_tag; +#define SEQUENCE_TAG 3 +union ComplexDescriptor*sd_first; +union ComplexDescriptor*sd_second; +}; +typedef union ComplexDescriptor { +struct LeafDescriptor ld; +struct ComplexArrayDescriptor ad; +struct SequenceDescriptor sd; +} complex_descriptor; +#define TAG ad.ad_tag +#define ED_INITIAL_SIZE 100 +STATIC int GC_typed_mark_proc_index=0; +STATIC int GC_array_mark_proc_index=0; +STATIC void GC_push_typed_structures_proc(void) +{ +GC_PUSH_ALL_SYM(GC_ext_descriptors); +} +STATIC signed_word GC_add_ext_descriptor(const word*bm,word nbits) +{ +size_t nwords=divWORDSZ(nbits+WORDSZ-1); +signed_word result; +size_t i; +word last_part; +size_t extra_bits; +DCL_LOCK_STATE; +LOCK(); +while (GC_avail_descr+nwords>=GC_ed_size){ +typed_ext_descr_t*newExtD; +size_t new_size; +word ed_size=GC_ed_size; +if (ed_size==0){ +GC_ASSERT((word)(&GC_ext_descriptors)% sizeof(word)==0); +GC_push_typed_structures=GC_push_typed_structures_proc; +UNLOCK(); +new_size=ED_INITIAL_SIZE; +} else { +UNLOCK(); +new_size=2*ed_size; +if (new_size > MAX_ENV)return(-1); +} +newExtD=(typed_ext_descr_t*)GC_malloc_atomic(new_size +*sizeof(typed_ext_descr_t)); +if (NULL==newExtD) +return -1; +LOCK(); +if (ed_size==GC_ed_size){ +if (GC_avail_descr!=0){ +BCOPY(GC_ext_descriptors,newExtD, +GC_avail_descr*sizeof(typed_ext_descr_t)); +} +GC_ed_size=new_size; +GC_ext_descriptors=newExtD; +} +} +result=GC_avail_descr; +for (i=0;i < nwords-1;i++){ +GC_ext_descriptors[result+i].ed_bitmap=bm[i]; +GC_ext_descriptors[result+i].ed_continued=TRUE; +} +last_part=bm[i]; +extra_bits=nwords*WORDSZ - nbits; +last_part<<=extra_bits; +last_part>>=extra_bits; +GC_ext_descriptors[result+i].ed_bitmap=last_part; +GC_ext_descriptors[result+i].ed_continued=FALSE; +GC_avail_descr+=nwords; +UNLOCK(); +return(result); +} +STATIC GC_descr GC_bm_table[WORDSZ/2]; +STATIC GC_descr GC_double_descr(GC_descr descriptor,word nwords) +{ +if ((descriptor&GC_DS_TAGS)==GC_DS_LENGTH){ +descriptor=GC_bm_table[BYTES_TO_WORDS((word)descriptor)]; +}; +descriptor|=(descriptor&~GC_DS_TAGS)>>nwords; +return(descriptor); +} +STATIC complex_descriptor* +GC_make_sequence_descriptor(complex_descriptor*first, +complex_descriptor*second); +#define COMPLEX 2 +#define LEAF 1 +#define SIMPLE 0 +#define NO_MEM (-1) +STATIC int GC_make_array_descriptor(size_t nelements,size_t size, +GC_descr descriptor,GC_descr*simple_d, +complex_descriptor**complex_d, +struct LeafDescriptor*leaf) +{ +#define OPT_THRESHOLD 50 +if ((descriptor&GC_DS_TAGS)==GC_DS_LENGTH){ +if (descriptor==(GC_descr)size){ +*simple_d=nelements*descriptor; +return(SIMPLE); +} else if ((word)descriptor==0){ +*simple_d=(GC_descr)0; +return(SIMPLE); +} +} +if (nelements<=OPT_THRESHOLD){ +if (nelements<=1){ +if (nelements==1){ +*simple_d=descriptor; +return(SIMPLE); +} else { +*simple_d=(GC_descr)0; +return(SIMPLE); +} +} +} else if (size<=BITMAP_BITS/2 +&&(descriptor&GC_DS_TAGS)!=GC_DS_PROC +&&(size&(sizeof(word)-1))==0){ +int result= +GC_make_array_descriptor(nelements/2,2*size, +GC_double_descr(descriptor, +BYTES_TO_WORDS(size)), +simple_d,complex_d,leaf); +if ((nelements&1)==0){ +return(result); +} else { +struct LeafDescriptor*one_element= +(struct LeafDescriptor*) +GC_malloc_atomic(sizeof(struct LeafDescriptor)); +if (result==NO_MEM||one_element==0)return(NO_MEM); +one_element->ld_tag=LEAF_TAG; +one_element->ld_size=size; +one_element->ld_nelements=1; +one_element->ld_descriptor=descriptor; +switch(result){ +case SIMPLE: +{ +struct LeafDescriptor*beginning= +(struct LeafDescriptor*) +GC_malloc_atomic(sizeof(struct LeafDescriptor)); +if (beginning==0)return(NO_MEM); +beginning->ld_tag=LEAF_TAG; +beginning->ld_size=size; +beginning->ld_nelements=1; +beginning->ld_descriptor=*simple_d; +*complex_d=GC_make_sequence_descriptor( +(complex_descriptor*)beginning, +(complex_descriptor*)one_element); +break; +} +case LEAF: +{ +struct LeafDescriptor*beginning= +(struct LeafDescriptor*) +GC_malloc_atomic(sizeof(struct LeafDescriptor)); +if (beginning==0)return(NO_MEM); +beginning->ld_tag=LEAF_TAG; +beginning->ld_size=leaf->ld_size; +beginning->ld_nelements=leaf->ld_nelements; +beginning->ld_descriptor=leaf->ld_descriptor; +*complex_d=GC_make_sequence_descriptor( +(complex_descriptor*)beginning, +(complex_descriptor*)one_element); +break; +} +case COMPLEX: +*complex_d=GC_make_sequence_descriptor( +*complex_d, +(complex_descriptor*)one_element); +break; +} +return(COMPLEX); +} +} +leaf->ld_size=size; +leaf->ld_nelements=nelements; +leaf->ld_descriptor=descriptor; +return(LEAF); +} +STATIC complex_descriptor* +GC_make_sequence_descriptor(complex_descriptor*first, +complex_descriptor*second) +{ +struct SequenceDescriptor*result= +(struct SequenceDescriptor*) +GC_malloc(sizeof(struct SequenceDescriptor)); +if (result!=0){ +result->sd_tag=SEQUENCE_TAG; +result->sd_first=first; +result->sd_second=second; +GC_dirty(result); +REACHABLE_AFTER_DIRTY(first); +REACHABLE_AFTER_DIRTY(second); +} +return((complex_descriptor*)result); +} +STATIC mse*GC_typed_mark_proc(word*addr,mse*mark_stack_ptr, +mse*mark_stack_limit,word env); +STATIC mse*GC_array_mark_proc(word*addr,mse*mark_stack_ptr, +mse*mark_stack_limit,word env); +STATIC void GC_init_explicit_typing(void) +{ +unsigned i; +GC_STATIC_ASSERT(sizeof(struct LeafDescriptor)% sizeof(word)==0); +GC_explicit_kind=GC_new_kind_inner(GC_new_free_list_inner(), +(WORDS_TO_BYTES((word)-1)|GC_DS_PER_OBJECT), +TRUE,TRUE); +GC_typed_mark_proc_index=GC_new_proc_inner(GC_typed_mark_proc); +GC_array_mark_proc_index=GC_new_proc_inner(GC_array_mark_proc); +GC_array_kind=GC_new_kind_inner(GC_new_free_list_inner(), +GC_MAKE_PROC(GC_array_mark_proc_index,0), +FALSE,TRUE); +GC_bm_table[0]=GC_DS_BITMAP; +for (i=1;i < WORDSZ/2;i++){ +GC_bm_table[i]=(((word)-1)<<(WORDSZ - i))|GC_DS_BITMAP; +} +} +STATIC mse*GC_typed_mark_proc(word*addr,mse*mark_stack_ptr, +mse*mark_stack_limit,word env) +{ +word bm=GC_ext_descriptors[env].ed_bitmap; +word*current_p=addr; +word current; +ptr_t greatest_ha=(ptr_t)GC_greatest_plausible_heap_addr; +ptr_t least_ha=(ptr_t)GC_least_plausible_heap_addr; +DECLARE_HDR_CACHE; +INIT_HDR_CACHE; +for (;bm!=0;bm>>=1,current_p++){ +if (bm&1){ +current=*current_p; +FIXUP_POINTER(current); +if (current>=(word)least_ha&¤t<=(word)greatest_ha){ +PUSH_CONTENTS((ptr_t)current,mark_stack_ptr, +mark_stack_limit,(ptr_t)current_p); +} +} +} +if (GC_ext_descriptors[env].ed_continued){ +mark_stack_ptr++; +if ((word)mark_stack_ptr>=(word)mark_stack_limit){ +mark_stack_ptr=GC_signal_mark_stack_overflow(mark_stack_ptr); +} +mark_stack_ptr->mse_start=(ptr_t)(addr+WORDSZ); +mark_stack_ptr->mse_descr.w= +GC_MAKE_PROC(GC_typed_mark_proc_index,env+1); +} +return(mark_stack_ptr); +} +STATIC word GC_descr_obj_size(complex_descriptor*d) +{ +switch(d->TAG){ +case LEAF_TAG: +return(d->ld.ld_nelements*d->ld.ld_size); +case ARRAY_TAG: +return(d->ad.ad_nelements +*GC_descr_obj_size(d->ad.ad_element_descr)); +case SEQUENCE_TAG: +return(GC_descr_obj_size(d->sd.sd_first) ++GC_descr_obj_size(d->sd.sd_second)); +default: +ABORT_RET("Bad complex descriptor"); +return 0; +} +} +STATIC mse*GC_push_complex_descriptor(word*addr,complex_descriptor*d, +mse*msp,mse*msl) +{ +ptr_t current=(ptr_t)addr; +word nelements; +word sz; +word i; +switch(d->TAG){ +case LEAF_TAG: +{ +GC_descr descr=d->ld.ld_descriptor; +nelements=d->ld.ld_nelements; +if (msl - msp<=(ptrdiff_t)nelements)return(0); +sz=d->ld.ld_size; +for (i=0;i < nelements;i++){ +msp++; +msp->mse_start=current; +msp->mse_descr.w=descr; +current+=sz; +} +return(msp); +} +case ARRAY_TAG: +{ +complex_descriptor*descr=d->ad.ad_element_descr; +nelements=d->ad.ad_nelements; +sz=GC_descr_obj_size(descr); +for (i=0;i < nelements;i++){ +msp=GC_push_complex_descriptor((word*)current,descr, +msp,msl); +if (msp==0)return(0); +current+=sz; +} +return(msp); +} +case SEQUENCE_TAG: +{ +sz=GC_descr_obj_size(d->sd.sd_first); +msp=GC_push_complex_descriptor((word*)current,d->sd.sd_first, +msp,msl); +if (msp==0)return(0); +current+=sz; +msp=GC_push_complex_descriptor((word*)current,d->sd.sd_second, +msp,msl); +return(msp); +} +default: +ABORT_RET("Bad complex descriptor"); +return 0; +} +} +STATIC mse*GC_array_mark_proc(word*addr,mse*mark_stack_ptr, +mse*mark_stack_limit, +word env GC_ATTR_UNUSED) +{ +hdr*hhdr=HDR(addr); +word sz=hhdr->hb_sz; +word nwords=BYTES_TO_WORDS(sz); +complex_descriptor*descr=(complex_descriptor*)(addr[nwords-1]); +mse*orig_mark_stack_ptr=mark_stack_ptr; +mse*new_mark_stack_ptr; +if (descr==0){ +return(orig_mark_stack_ptr); +} +new_mark_stack_ptr=GC_push_complex_descriptor(addr,descr, +mark_stack_ptr, +mark_stack_limit-1); +if (new_mark_stack_ptr==0){ +if (NULL==mark_stack_ptr)ABORT("Bad mark_stack_ptr"); +#ifdef PARALLEL_MARK +if (GC_mark_stack+GC_mark_stack_size==mark_stack_limit) +#endif +{ +GC_mark_stack_too_small=TRUE; +} +new_mark_stack_ptr=orig_mark_stack_ptr+1; +new_mark_stack_ptr->mse_start=(ptr_t)addr; +new_mark_stack_ptr->mse_descr.w=sz|GC_DS_LENGTH; +} else { +new_mark_stack_ptr++; +new_mark_stack_ptr->mse_start=(ptr_t)(addr+nwords - 1); +new_mark_stack_ptr->mse_descr.w=sizeof(word)|GC_DS_LENGTH; +} +return new_mark_stack_ptr; +} +GC_API GC_descr GC_CALL GC_make_descriptor(const GC_word*bm,size_t len) +{ +signed_word last_set_bit=len - 1; +GC_descr result; +DCL_LOCK_STATE; +#if defined(AO_HAVE_load_acquire)&&defined(AO_HAVE_store_release) +if (!EXPECT(AO_load_acquire(&GC_explicit_typing_initialized),TRUE)){ +LOCK(); +if (!GC_explicit_typing_initialized){ +GC_init_explicit_typing(); +AO_store_release(&GC_explicit_typing_initialized,TRUE); +} +UNLOCK(); +} +#else +LOCK(); +if (!EXPECT(GC_explicit_typing_initialized,TRUE)){ +GC_init_explicit_typing(); +GC_explicit_typing_initialized=TRUE; +} +UNLOCK(); +#endif +while (last_set_bit>=0&&!GC_get_bit(bm,last_set_bit)) +last_set_bit--; +if (last_set_bit < 0)return(0); +#if ALIGNMENT==CPP_WORDSZ/8 +{ +signed_word i; +for (i=0;i < last_set_bit;i++){ +if (!GC_get_bit(bm,i)){ +break; +} +} +if (i==last_set_bit){ +return (WORDS_TO_BYTES(last_set_bit+1)|GC_DS_LENGTH); +} +} +#endif +if ((word)last_set_bit < BITMAP_BITS){ +signed_word i; +result=SIGNB; +for (i=last_set_bit - 1;i>=0;i--){ +result>>=1; +if (GC_get_bit(bm,i))result|=SIGNB; +} +result|=GC_DS_BITMAP; +} else { +signed_word index=GC_add_ext_descriptor(bm,(word)last_set_bit+1); +if (index==-1)return(WORDS_TO_BYTES(last_set_bit+1)|GC_DS_LENGTH); +result=GC_MAKE_PROC(GC_typed_mark_proc_index,(word)index); +} +return result; +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_malloc_explicitly_typed(size_t lb, +GC_descr d) +{ +word*op; +size_t lg; +GC_ASSERT(GC_explicit_typing_initialized); +lb=SIZET_SAT_ADD(lb,TYPD_EXTRA_BYTES); +op=(word*)GC_malloc_kind(lb,GC_explicit_kind); +if (EXPECT(NULL==op,FALSE)) +return NULL; +lg=BYTES_TO_GRANULES(GC_size(op)); +op[GRANULES_TO_WORDS(lg)- 1]=d; +GC_dirty(op+GRANULES_TO_WORDS(lg)- 1); +REACHABLE_AFTER_DIRTY(d); +return op; +} +#define GENERAL_MALLOC_IOP(lb,k)GC_clear_stack(GC_generic_malloc_ignore_off_page(lb,k)) +GC_API GC_ATTR_MALLOC void*GC_CALL +GC_malloc_explicitly_typed_ignore_off_page(size_t lb,GC_descr d) +{ +ptr_t op; +size_t lg; +DCL_LOCK_STATE; +GC_ASSERT(GC_explicit_typing_initialized); +lb=SIZET_SAT_ADD(lb,TYPD_EXTRA_BYTES); +if (SMALL_OBJ(lb)){ +void**opp; +GC_DBG_COLLECT_AT_MALLOC(lb); +LOCK(); +lg=GC_size_map[lb]; +opp=&GC_obj_kinds[GC_explicit_kind].ok_freelist[lg]; +op=(ptr_t)(*opp); +if (EXPECT(0==op,FALSE)){ +UNLOCK(); +op=(ptr_t)GENERAL_MALLOC_IOP(lb,GC_explicit_kind); +if (0==op)return 0; +lg=BYTES_TO_GRANULES(GC_size(op)); +} else { +*opp=obj_link(op); +obj_link(op)=0; +GC_bytes_allocd+=GRANULES_TO_BYTES((word)lg); +UNLOCK(); +} +} else { +op=(ptr_t)GENERAL_MALLOC_IOP(lb,GC_explicit_kind); +if (NULL==op)return NULL; +lg=BYTES_TO_GRANULES(GC_size(op)); +} +((word*)op)[GRANULES_TO_WORDS(lg)- 1]=d; +GC_dirty(op+GRANULES_TO_WORDS(lg)- 1); +REACHABLE_AFTER_DIRTY(d); +return op; +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_calloc_explicitly_typed(size_t n, +size_t lb,GC_descr d) +{ +word*op; +size_t lg; +GC_descr simple_descr; +complex_descriptor*complex_descr; +int descr_type; +struct LeafDescriptor leaf; +GC_ASSERT(GC_explicit_typing_initialized); +descr_type=GC_make_array_descriptor((word)n,(word)lb,d,&simple_descr, +&complex_descr,&leaf); +if ((lb|n)> GC_SQRT_SIZE_MAX +&&lb > 0&&n > GC_SIZE_MAX/lb) +return (*GC_get_oom_fn())(GC_SIZE_MAX); +lb*=n; +switch(descr_type){ +case NO_MEM:return(0); +case SIMPLE: +return GC_malloc_explicitly_typed(lb,simple_descr); +case LEAF: +lb=SIZET_SAT_ADD(lb, +sizeof(struct LeafDescriptor)+TYPD_EXTRA_BYTES); +break; +case COMPLEX: +lb=SIZET_SAT_ADD(lb,TYPD_EXTRA_BYTES); +break; +} +op=(word*)GC_malloc_kind(lb,GC_array_kind); +if (EXPECT(NULL==op,FALSE)) +return NULL; +lg=BYTES_TO_GRANULES(GC_size(op)); +if (descr_type==LEAF){ +volatile struct LeafDescriptor*lp= +(struct LeafDescriptor*) +(op+GRANULES_TO_WORDS(lg) +- (BYTES_TO_WORDS(sizeof(struct LeafDescriptor))+1)); +lp->ld_tag=LEAF_TAG; +lp->ld_size=leaf.ld_size; +lp->ld_nelements=leaf.ld_nelements; +lp->ld_descriptor=leaf.ld_descriptor; +((volatile word*)op)[GRANULES_TO_WORDS(lg)- 1]=(word)lp; +} else { +#ifndef GC_NO_FINALIZATION +size_t lw=GRANULES_TO_WORDS(lg); +op[lw - 1]=(word)complex_descr; +GC_dirty(op+lw - 1); +REACHABLE_AFTER_DIRTY(complex_descr); +if (EXPECT(GC_general_register_disappearing_link( +(void**)(op+lw - 1),op) +==GC_NO_MEMORY,FALSE)) +#endif +{ +return (*GC_get_oom_fn())(lb); +} +} +return op; +} +#include +#include +#include +#ifndef MSWINCE +#include +#endif +#ifdef GC_SOLARIS_THREADS +#include +#endif +#if defined(UNIX_LIKE)||defined(CYGWIN32)||defined(SYMBIAN)||(defined(CONSOLE_LOG)&&defined(MSWIN32)) +#include +#include +#include +#endif +#if defined(CONSOLE_LOG)&&defined(MSWIN32)&&defined(_MSC_VER) +#include +#endif +#ifdef NONSTOP +#include +#endif +#ifdef THREADS +#ifdef PCR +#include "il/PCR_IL.h" +GC_INNER PCR_Th_ML GC_allocate_ml; +#elif defined(SN_TARGET_PSP2) +GC_INNER WapiMutex GC_allocate_ml_PSP2={ 0,NULL }; +#elif defined(SN_TARGET_ORBIS)||defined(SN_TARGET_PS3) +#include +GC_INNER pthread_mutex_t GC_allocate_ml; +#endif +#endif +#ifdef DYNAMIC_LOADING +#define GC_REGISTER_MAIN_STATIC_DATA()GC_register_main_static_data() +#elif defined(GC_DONT_REGISTER_MAIN_STATIC_DATA) +#define GC_REGISTER_MAIN_STATIC_DATA()FALSE +#else +#define GC_REGISTER_MAIN_STATIC_DATA()TRUE +#endif +#ifdef NEED_CANCEL_DISABLE_COUNT +__thread unsigned char GC_cancel_disable_count=0; +#endif +GC_FAR struct _GC_arrays GC_arrays; +GC_INNER unsigned GC_n_mark_procs=GC_RESERVED_MARK_PROCS; +GC_INNER unsigned GC_n_kinds=GC_N_KINDS_INITIAL_VALUE; +GC_INNER GC_bool GC_debugging_started=FALSE; +ptr_t GC_stackbottom=0; +#ifdef IA64 +ptr_t GC_register_stackbottom=0; +#endif +int GC_dont_gc=FALSE; +int GC_dont_precollect=FALSE; +GC_bool GC_quiet=0; +#if!defined(NO_CLOCK)||!defined(SMALL_CONFIG) +int GC_print_stats=0; +#endif +#ifdef GC_PRINT_BACK_HEIGHT +GC_INNER GC_bool GC_print_back_height=TRUE; +#else +GC_INNER GC_bool GC_print_back_height=FALSE; +#endif +#ifndef NO_DEBUGGING +#ifdef GC_DUMP_REGULARLY +GC_INNER GC_bool GC_dump_regularly=TRUE; +#else +GC_INNER GC_bool GC_dump_regularly=FALSE; +#endif +#ifndef NO_CLOCK +STATIC CLOCK_TYPE GC_init_time; +#endif +#endif +#ifdef KEEP_BACK_PTRS +GC_INNER long GC_backtraces=0; +#endif +#ifdef FIND_LEAK +int GC_find_leak=1; +#else +int GC_find_leak=0; +#endif +#ifndef SHORT_DBG_HDRS +#ifdef GC_FINDLEAK_DELAY_FREE +GC_INNER GC_bool GC_findleak_delay_free=TRUE; +#else +GC_INNER GC_bool GC_findleak_delay_free=FALSE; +#endif +#endif +#ifdef ALL_INTERIOR_POINTERS +int GC_all_interior_pointers=1; +#else +int GC_all_interior_pointers=0; +#endif +#ifdef FINALIZE_ON_DEMAND +int GC_finalize_on_demand=1; +#else +int GC_finalize_on_demand=0; +#endif +#ifdef JAVA_FINALIZATION +int GC_java_finalization=1; +#else +int GC_java_finalization=0; +#endif +GC_finalizer_notifier_proc GC_finalizer_notifier= +(GC_finalizer_notifier_proc)0; +#ifdef GC_FORCE_UNMAP_ON_GCOLLECT +GC_INNER GC_bool GC_force_unmap_on_gcollect=TRUE; +#else +GC_INNER GC_bool GC_force_unmap_on_gcollect=FALSE; +#endif +#ifndef GC_LARGE_ALLOC_WARN_INTERVAL +#define GC_LARGE_ALLOC_WARN_INTERVAL 5 +#endif +GC_INNER long GC_large_alloc_warn_interval=GC_LARGE_ALLOC_WARN_INTERVAL; +STATIC void*GC_CALLBACK GC_default_oom_fn( +size_t bytes_requested GC_ATTR_UNUSED) +{ +return(0); +} +GC_oom_func GC_oom_fn=GC_default_oom_fn; +#ifdef CAN_HANDLE_FORK +#ifdef HANDLE_FORK +GC_INNER int GC_handle_fork=1; +#else +GC_INNER int GC_handle_fork=FALSE; +#endif +#elif!defined(HAVE_NO_FORK) +GC_API void GC_CALL GC_atfork_prepare(void) +{ +#ifdef THREADS +ABORT("fork()handling unsupported"); +#endif +} +GC_API void GC_CALL GC_atfork_parent(void) +{ +} +GC_API void GC_CALL GC_atfork_child(void) +{ +} +#endif +GC_API void GC_CALL GC_set_handle_fork(int value GC_ATTR_UNUSED) +{ +#ifdef CAN_HANDLE_FORK +if (!GC_is_initialized) +GC_handle_fork=value>=-1?value:1; +#elif defined(THREADS)||(defined(DARWIN)&&defined(MPROTECT_VDB)) +if (!GC_is_initialized&&value){ +#ifndef SMALL_CONFIG +GC_init(); +#ifndef THREADS +if (GC_manual_vdb) +return; +#endif +#endif +ABORT("fork()handling unsupported"); +} +#else +#endif +} +STATIC void GC_init_size_map(void) +{ +size_t i; +GC_size_map[0]=1; +for (i=1;i<=GRANULES_TO_BYTES(TINY_FREELISTS-1)- EXTRA_BYTES;i++){ +GC_size_map[i]=ROUNDED_UP_GRANULES(i); +#ifndef _MSC_VER +GC_ASSERT(GC_size_map[i] < TINY_FREELISTS); +#endif +} +} +#ifndef SMALL_CLEAR_SIZE +#define SMALL_CLEAR_SIZE 256 +#endif +#if defined(ALWAYS_SMALL_CLEAR_STACK)||defined(STACK_NOT_SCANNED) +GC_API void*GC_CALL GC_clear_stack(void*arg) +{ +#ifndef STACK_NOT_SCANNED +word volatile dummy[SMALL_CLEAR_SIZE]; +BZERO(( void*)dummy,sizeof(dummy)); +#endif +return arg; +} +#else +#ifdef THREADS +#define BIG_CLEAR_SIZE 2048 +#else +STATIC word GC_stack_last_cleared=0; +STATIC ptr_t GC_min_sp=NULL; +STATIC ptr_t GC_high_water=NULL; +STATIC word GC_bytes_allocd_at_reset=0; +#define DEGRADE_RATE 50 +#endif +#if defined(ASM_CLEAR_CODE) +void*GC_clear_stack_inner(void*,ptr_t); +#else +void*GC_clear_stack_inner(void*arg, +#if defined(__APPLE_CC__)&&!GC_CLANG_PREREQ(6,0) +volatile +#endif +ptr_t limit) +{ +#define CLEAR_SIZE 213 +volatile word dummy[CLEAR_SIZE]; +BZERO(( void*)dummy,sizeof(dummy)); +if ((word)GC_approx_sp()COOLER_THAN (word)limit){ +(void)GC_clear_stack_inner(arg,limit); +} +#if defined(CPPCHECK) +GC_noop1(dummy[0]); +#else +GC_noop1(COVERT_DATAFLOW(dummy)); +#endif +return(arg); +} +#endif +#ifdef THREADS +GC_ATTR_NO_SANITIZE_THREAD +static unsigned next_random_no(void) +{ +static unsigned random_no=0; +return++random_no % 13; +} +#endif +GC_API void*GC_CALL GC_clear_stack(void*arg) +{ +ptr_t sp=GC_approx_sp(); +#ifdef THREADS +word volatile dummy[SMALL_CLEAR_SIZE]; +#endif +#define SLOP 400 +#define GC_SLOP 4000 +#define CLEAR_THRESHOLD 100000 +#ifdef THREADS +if (next_random_no()==0){ +ptr_t limit=sp; +MAKE_HOTTER(limit,BIG_CLEAR_SIZE*sizeof(word)); +limit=(ptr_t)((word)limit&~0xf); +return GC_clear_stack_inner(arg,limit); +} +BZERO((void*)dummy,SMALL_CLEAR_SIZE*sizeof(word)); +#else +if (GC_gc_no > GC_stack_last_cleared){ +if (GC_stack_last_cleared==0) +GC_high_water=(ptr_t)GC_stackbottom; +GC_min_sp=GC_high_water; +GC_stack_last_cleared=GC_gc_no; +GC_bytes_allocd_at_reset=GC_bytes_allocd; +} +MAKE_COOLER(GC_high_water,WORDS_TO_BYTES(DEGRADE_RATE)+GC_SLOP); +if ((word)sp HOTTER_THAN (word)GC_high_water){ +GC_high_water=sp; +} +MAKE_HOTTER(GC_high_water,GC_SLOP); +{ +ptr_t limit=GC_min_sp; +MAKE_HOTTER(limit,SLOP); +if ((word)sp COOLER_THAN (word)limit){ +limit=(ptr_t)((word)limit&~0xf); +GC_min_sp=sp; +return GC_clear_stack_inner(arg,limit); +} +} +if (GC_bytes_allocd - GC_bytes_allocd_at_reset > CLEAR_THRESHOLD){ +GC_min_sp=sp; +MAKE_HOTTER(GC_min_sp,CLEAR_THRESHOLD/4); +if ((word)GC_min_sp HOTTER_THAN (word)GC_high_water) +GC_min_sp=GC_high_water; +GC_bytes_allocd_at_reset=GC_bytes_allocd; +} +#endif +return arg; +} +#endif +GC_API void*GC_CALL GC_base(void*p) +{ +ptr_t r; +struct hblk*h; +bottom_index*bi; +hdr*candidate_hdr; +r=(ptr_t)p; +if (!EXPECT(GC_is_initialized,TRUE))return 0; +h=HBLKPTR(r); +GET_BI(r,bi); +candidate_hdr=HDR_FROM_BI(bi,r); +if (candidate_hdr==0)return(0); +while (IS_FORWARDING_ADDR_OR_NIL(candidate_hdr)){ +h=FORWARDED_ADDR(h,candidate_hdr); +r=(ptr_t)h; +candidate_hdr=HDR(h); +} +if (HBLK_IS_FREE(candidate_hdr))return(0); +r=(ptr_t)((word)r&~(WORDS_TO_BYTES(1)- 1)); +{ +size_t offset=HBLKDISPL(r); +word sz=candidate_hdr->hb_sz; +size_t obj_displ=offset % sz; +ptr_t limit; +r-=obj_displ; +limit=r+sz; +if ((word)limit > (word)(h+1)&&sz<=HBLKSIZE){ +return(0); +} +if ((word)p>=(word)limit)return(0); +} +return((void*)r); +} +GC_API int GC_CALL GC_is_heap_ptr(const void*p) +{ +bottom_index*bi; +GC_ASSERT(GC_is_initialized); +GET_BI(p,bi); +return HDR_FROM_BI(bi,p)!=0; +} +GC_API size_t GC_CALL GC_size(const void*p) +{ +hdr*hhdr=HDR(p); +return (size_t)hhdr->hb_sz; +} +GC_API size_t GC_CALL GC_get_heap_size(void) +{ +return (size_t)(GC_heapsize - GC_unmapped_bytes); +} +GC_API size_t GC_CALL GC_get_free_bytes(void) +{ +return (size_t)(GC_large_free_bytes - GC_unmapped_bytes); +} +GC_API size_t GC_CALL GC_get_unmapped_bytes(void) +{ +return (size_t)GC_unmapped_bytes; +} +GC_API size_t GC_CALL GC_get_bytes_since_gc(void) +{ +return (size_t)GC_bytes_allocd; +} +GC_API size_t GC_CALL GC_get_total_bytes(void) +{ +return (size_t)(GC_bytes_allocd+GC_bytes_allocd_before_gc); +} +#ifndef GC_GET_HEAP_USAGE_NOT_NEEDED +GC_API size_t GC_CALL GC_get_size_map_at(int i) +{ +if ((unsigned)i > MAXOBJBYTES) +return GC_SIZE_MAX; +return GRANULES_TO_BYTES(GC_size_map[i]); +} +GC_API void GC_CALL GC_get_heap_usage_safe(GC_word*pheap_size, +GC_word*pfree_bytes,GC_word*punmapped_bytes, +GC_word*pbytes_since_gc,GC_word*ptotal_bytes) +{ +DCL_LOCK_STATE; +LOCK(); +if (pheap_size!=NULL) +*pheap_size=GC_heapsize - GC_unmapped_bytes; +if (pfree_bytes!=NULL) +*pfree_bytes=GC_large_free_bytes - GC_unmapped_bytes; +if (punmapped_bytes!=NULL) +*punmapped_bytes=GC_unmapped_bytes; +if (pbytes_since_gc!=NULL) +*pbytes_since_gc=GC_bytes_allocd; +if (ptotal_bytes!=NULL) +*ptotal_bytes=GC_bytes_allocd+GC_bytes_allocd_before_gc; +UNLOCK(); +} +GC_INNER word GC_reclaimed_bytes_before_gc=0; +static void fill_prof_stats(struct GC_prof_stats_s*pstats) +{ +pstats->heapsize_full=GC_heapsize; +pstats->free_bytes_full=GC_large_free_bytes; +pstats->unmapped_bytes=GC_unmapped_bytes; +pstats->bytes_allocd_since_gc=GC_bytes_allocd; +pstats->allocd_bytes_before_gc=GC_bytes_allocd_before_gc; +pstats->non_gc_bytes=GC_non_gc_bytes; +pstats->gc_no=GC_gc_no; +#ifdef PARALLEL_MARK +pstats->markers_m1=(word)((signed_word)GC_markers_m1); +#else +pstats->markers_m1=0; +#endif +pstats->bytes_reclaimed_since_gc=GC_bytes_found > 0? +(word)GC_bytes_found:0; +pstats->reclaimed_bytes_before_gc=GC_reclaimed_bytes_before_gc; +pstats->expl_freed_bytes_since_gc=GC_bytes_freed; +} +#include +GC_API size_t GC_CALL GC_get_prof_stats(struct GC_prof_stats_s*pstats, +size_t stats_sz) +{ +struct GC_prof_stats_s stats; +DCL_LOCK_STATE; +LOCK(); +fill_prof_stats(stats_sz>=sizeof(stats)?pstats:&stats); +UNLOCK(); +if (stats_sz==sizeof(stats)){ +return sizeof(stats); +} else if (stats_sz > sizeof(stats)){ +memset((char*)pstats+sizeof(stats),0xff,stats_sz - sizeof(stats)); +return sizeof(stats); +} else { +if (EXPECT(stats_sz > 0,TRUE)) +BCOPY(&stats,pstats,stats_sz); +return stats_sz; +} +} +#ifdef THREADS +GC_API size_t GC_CALL GC_get_prof_stats_unsafe( +struct GC_prof_stats_s*pstats, +size_t stats_sz) +{ +struct GC_prof_stats_s stats; +if (stats_sz>=sizeof(stats)){ +fill_prof_stats(pstats); +if (stats_sz > sizeof(stats)) +memset((char*)pstats+sizeof(stats),0xff, +stats_sz - sizeof(stats)); +return sizeof(stats); +} else { +if (EXPECT(stats_sz > 0,TRUE)){ +fill_prof_stats(&stats); +BCOPY(&stats,pstats,stats_sz); +} +return stats_sz; +} +} +#endif +#endif +#if defined(GC_DARWIN_THREADS)||defined(GC_OPENBSD_UTHREADS)||defined(GC_WIN32_THREADS)||(defined(NACL)&&defined(THREADS)) +GC_API void GC_CALL GC_set_suspend_signal(int sig GC_ATTR_UNUSED) +{ +} +GC_API void GC_CALL GC_set_thr_restart_signal(int sig GC_ATTR_UNUSED) +{ +} +GC_API int GC_CALL GC_get_suspend_signal(void) +{ +return -1; +} +GC_API int GC_CALL GC_get_thr_restart_signal(void) +{ +return -1; +} +#endif +#if!defined(_MAX_PATH)&&(defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32)) +#define _MAX_PATH MAX_PATH +#endif +#ifdef GC_READ_ENV_FILE +STATIC char*GC_envfile_content=NULL; +STATIC unsigned GC_envfile_length=0; +#ifndef GC_ENVFILE_MAXLEN +#define GC_ENVFILE_MAXLEN 0x4000 +#endif +#define GC_ENV_FILE_EXT ".gc.env" +STATIC void GC_envfile_init(void) +{ +#if defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32) +HANDLE hFile; +char*content; +unsigned ofs; +unsigned len; +DWORD nBytesRead; +TCHAR path[_MAX_PATH+0x10]; +len=(unsigned)GetModuleFileName(NULL,path, +_MAX_PATH+1); +if (len > 4&&path[len - 4]==(TCHAR)'.'){ +len-=4; +} +BCOPY(TEXT(GC_ENV_FILE_EXT),&path[len],sizeof(TEXT(GC_ENV_FILE_EXT))); +hFile=CreateFile(path,GENERIC_READ, +FILE_SHARE_READ|FILE_SHARE_WRITE, +NULL,OPEN_EXISTING, +FILE_ATTRIBUTE_NORMAL,NULL); +if (hFile==INVALID_HANDLE_VALUE) +return; +len=(unsigned)GetFileSize(hFile,NULL); +if (len<=1||len>=GC_ENVFILE_MAXLEN){ +CloseHandle(hFile); +return; +} +GC_ASSERT(GC_page_size!=0); +content=(char*)GET_MEM(ROUNDUP_PAGESIZE_IF_MMAP((size_t)len+1)); +if (content==NULL){ +CloseHandle(hFile); +return; +} +ofs=0; +nBytesRead=(DWORD)-1L; +while (ReadFile(hFile,content+ofs,len - ofs+1,&nBytesRead, +NULL)&&nBytesRead!=0){ +if ((ofs+=nBytesRead)> len) +break; +} +CloseHandle(hFile); +if (ofs!=len||nBytesRead!=0) +return; +content[ofs]='\0'; +while (ofs--> 0){ +if (content[ofs]=='\r'||content[ofs]=='\n') +content[ofs]='\0'; +} +GC_ASSERT(NULL==GC_envfile_content); +GC_envfile_length=len+1; +GC_envfile_content=content; +#endif +} +GC_INNER char*GC_envfile_getenv(const char*name) +{ +char*p; +char*end_of_content; +unsigned namelen; +#ifndef NO_GETENV +p=getenv(name); +if (p!=NULL) +return*p!='\0'?p:NULL; +#endif +p=GC_envfile_content; +if (p==NULL) +return NULL; +namelen=strlen(name); +if (namelen==0) +return NULL; +for (end_of_content=p+GC_envfile_length; +p!=end_of_content;p+=strlen(p)+1){ +if (strncmp(p,name,namelen)==0&&*(p+=namelen)=='='){ +p++; +return*p!='\0'?p:NULL; +} +} +return NULL; +} +#endif +GC_INNER GC_bool GC_is_initialized=FALSE; +GC_API int GC_CALL GC_is_init_called(void) +{ +return GC_is_initialized; +} +#if defined(GC_WIN32_THREADS)&&((defined(MSWIN32)&&!defined(CONSOLE_LOG))||defined(MSWINCE)) +GC_INNER CRITICAL_SECTION GC_write_cs; +#endif +#ifndef DONT_USE_ATEXIT +#if!defined(PCR)&&!defined(SMALL_CONFIG) +static GC_bool skip_gc_atexit=FALSE; +#else +#define skip_gc_atexit FALSE +#endif +STATIC void GC_exit_check(void) +{ +if (GC_find_leak&&!skip_gc_atexit){ +#ifdef THREADS +GC_in_thread_creation=TRUE; +GC_gcollect(); +GC_in_thread_creation=FALSE; +#else +GC_gcollect(); +#endif +} +} +#endif +#if defined(UNIX_LIKE)&&!defined(NO_DEBUGGING) +static void looping_handler(int sig) +{ +GC_err_printf("Caught signal %d:looping in handler\n",sig); +for (;;){ +} +} +static GC_bool installed_looping_handler=FALSE; +static void maybe_install_looping_handler(void) +{ +if (!installed_looping_handler&&0!=GETENV("GC_LOOP_ON_ABORT")){ +GC_set_and_save_fault_handler(looping_handler); +installed_looping_handler=TRUE; +} +} +#else +#define maybe_install_looping_handler() +#endif +#define GC_DEFAULT_STDOUT_FD 1 +#define GC_DEFAULT_STDERR_FD 2 +#if!defined(OS2)&&!defined(MACOS)&&!defined(GC_ANDROID_LOG)&&!defined(NN_PLATFORM_CTR)&&!defined(NINTENDO_SWITCH)&&(!defined(MSWIN32)||defined(CONSOLE_LOG))&&!defined(MSWINCE) +STATIC int GC_stdout=GC_DEFAULT_STDOUT_FD; +STATIC int GC_stderr=GC_DEFAULT_STDERR_FD; +STATIC int GC_log=GC_DEFAULT_STDERR_FD; +#ifndef MSWIN32 +GC_API void GC_CALL GC_set_log_fd(int fd) +{ +GC_log=fd; +} +#endif +#endif +#ifdef MSGBOX_ON_ERROR +STATIC void GC_win32_MessageBoxA(const char*msg,const char*caption, +unsigned flags) +{ +#ifndef DONT_USE_USER32_DLL +(void)MessageBoxA(NULL,msg,caption,flags); +#else +HINSTANCE hU32=LoadLibrary(TEXT("user32.dll")); +if (hU32){ +FARPROC pfn=GetProcAddress(hU32,"MessageBoxA"); +if (pfn) +(void)(*(int (WINAPI*)(HWND,LPCSTR,LPCSTR,UINT))(word)pfn)( +NULL,msg,caption,flags); +(void)FreeLibrary(hU32); +} +#endif +} +#endif +#if defined(THREADS)&&defined(UNIX_LIKE)&&!defined(NO_GETCONTEXT) +static void callee_saves_pushed_dummy_fn(ptr_t data GC_ATTR_UNUSED, +void*context GC_ATTR_UNUSED){} +#endif +#ifndef SMALL_CONFIG +#ifdef MANUAL_VDB +static GC_bool manual_vdb_allowed=TRUE; +#else +static GC_bool manual_vdb_allowed=FALSE; +#endif +GC_API void GC_CALL GC_set_manual_vdb_allowed(int value) +{ +manual_vdb_allowed=(GC_bool)value; +} +GC_API int GC_CALL GC_get_manual_vdb_allowed(void) +{ +return (int)manual_vdb_allowed; +} +#endif +STATIC word GC_parse_mem_size_arg(const char*str) +{ +word result=0; +if (*str!='\0'){ +char*endptr; +char ch; +result=(word)STRTOULL(str,&endptr,10); +ch=*endptr; +if (ch!='\0'){ +if (*(endptr+1)!='\0') +return 0; +switch (ch){ +case 'K': +case 'k': +result<<=10; +break; +case 'M': +case 'm': +result<<=20; +break; +case 'G': +case 'g': +result<<=30; +break; +default: +result=0; +} +} +} +return result; +} +#define GC_LOG_STD_NAME "gc.log" +GC_API void GC_CALL GC_init(void) +{ +word initial_heap_sz; +IF_CANCEL(int cancel_state;) +#if defined(GC_ASSERTIONS)&&defined(GC_ALWAYS_MULTITHREADED) +DCL_LOCK_STATE; +#endif +if (EXPECT(GC_is_initialized,TRUE))return; +#ifdef REDIRECT_MALLOC +{ +static GC_bool init_started=FALSE; +if (init_started) +ABORT("Redirected malloc()called during GC init"); +init_started=TRUE; +} +#endif +#if defined(GC_INITIAL_HEAP_SIZE)&&!defined(CPPCHECK) +initial_heap_sz=GC_INITIAL_HEAP_SIZE; +#else +initial_heap_sz=MINHINCR*HBLKSIZE; +#endif +DISABLE_CANCEL(cancel_state); +#ifdef THREADS +#ifndef GC_ALWAYS_MULTITHREADED +GC_ASSERT(!GC_need_to_lock); +#endif +#ifdef SN_TARGET_PS3 +{ +pthread_mutexattr_t mattr; +if (0!=pthread_mutexattr_init(&mattr)){ +ABORT("pthread_mutexattr_init failed"); +} +if (0!=pthread_mutex_init(&GC_allocate_ml,&mattr)){ +ABORT("pthread_mutex_init failed"); +} +(void)pthread_mutexattr_destroy(&mattr); +} +#endif +#endif +#if defined(GC_WIN32_THREADS)&&!defined(GC_PTHREADS) +#ifndef SPIN_COUNT +#define SPIN_COUNT 4000 +#endif +#ifdef MSWINRT_FLAVOR +InitializeCriticalSectionAndSpinCount(&GC_allocate_ml,SPIN_COUNT); +#else +{ +#ifndef MSWINCE +FARPROC pfn=0; +HMODULE hK32=GetModuleHandle(TEXT("kernel32.dll")); +if (hK32) +pfn=GetProcAddress(hK32, +"InitializeCriticalSectionAndSpinCount"); +if (pfn){ +(*(BOOL (WINAPI*)(LPCRITICAL_SECTION,DWORD))(word)pfn)( +&GC_allocate_ml,SPIN_COUNT); +} else +#endif +InitializeCriticalSection(&GC_allocate_ml); +} +#endif +#endif +#if defined(GC_WIN32_THREADS)&&((defined(MSWIN32)&&!defined(CONSOLE_LOG))||defined(MSWINCE)) +InitializeCriticalSection(&GC_write_cs); +#endif +GC_setpagesize(); +#ifdef MSWIN32 +GC_init_win32(); +#endif +#ifdef GC_READ_ENV_FILE +GC_envfile_init(); +#endif +#if!defined(NO_CLOCK)||!defined(SMALL_CONFIG) +#ifdef GC_PRINT_VERBOSE_STATS +GC_print_stats=VERBOSE; +#else +if (0!=GETENV("GC_PRINT_VERBOSE_STATS")){ +GC_print_stats=VERBOSE; +} else if (0!=GETENV("GC_PRINT_STATS")){ +GC_print_stats=1; +} +#endif +#endif +#if ((defined(UNIX_LIKE)&&!defined(GC_ANDROID_LOG))||(defined(CONSOLE_LOG)&&defined(MSWIN32))||defined(CYGWIN32)||defined(SYMBIAN))&&!defined(SMALL_CONFIG) +{ +char*file_name=TRUSTED_STRING(GETENV("GC_LOG_FILE")); +#ifdef GC_LOG_TO_FILE_ALWAYS +if (NULL==file_name) +file_name=GC_LOG_STD_NAME; +#else +if (0!=file_name) +#endif +{ +#if defined(_MSC_VER) +int log_d=_open(file_name,O_CREAT|O_WRONLY|O_APPEND); +#else +int log_d=open(file_name,O_CREAT|O_WRONLY|O_APPEND,0644); +#endif +if (log_d < 0){ +GC_err_printf("Failed to open %s as log file\n",file_name); +} else { +char*str; +GC_log=log_d; +str=GETENV("GC_ONLY_LOG_TO_FILE"); +#ifdef GC_ONLY_LOG_TO_FILE +if (str!=NULL&&*str=='0'&&*(str+1)=='\0') +#else +if (str==NULL||(*str=='0'&&*(str+1)=='\0')) +#endif +{ +GC_stdout=log_d; +GC_stderr=log_d; +} +} +} +} +#endif +#if!defined(NO_DEBUGGING)&&!defined(GC_DUMP_REGULARLY) +if (0!=GETENV("GC_DUMP_REGULARLY")){ +GC_dump_regularly=TRUE; +} +#endif +#ifdef KEEP_BACK_PTRS +{ +char*backtraces_string=GETENV("GC_BACKTRACES"); +if (0!=backtraces_string){ +GC_backtraces=atol(backtraces_string); +if (backtraces_string[0]=='\0')GC_backtraces=1; +} +} +#endif +if (0!=GETENV("GC_FIND_LEAK")){ +GC_find_leak=1; +} +#ifndef SHORT_DBG_HDRS +if (0!=GETENV("GC_FINDLEAK_DELAY_FREE")){ +GC_findleak_delay_free=TRUE; +} +#endif +if (0!=GETENV("GC_ALL_INTERIOR_POINTERS")){ +GC_all_interior_pointers=1; +} +if (0!=GETENV("GC_DONT_GC")){ +GC_dont_gc=1; +} +if (0!=GETENV("GC_PRINT_BACK_HEIGHT")){ +GC_print_back_height=TRUE; +} +if (0!=GETENV("GC_NO_BLACKLIST_WARNING")){ +GC_large_alloc_warn_interval=LONG_MAX; +} +{ +char*addr_string=GETENV("GC_TRACE"); +if (0!=addr_string){ +#ifndef ENABLE_TRACE +WARN("Tracing not enabled:Ignoring GC_TRACE value\n",0); +#else +word addr=(word)STRTOULL(addr_string,NULL,16); +if (addr < 0x1000) +WARN("Unlikely trace address:%p\n",(void*)addr); +GC_trace_addr=(ptr_t)addr; +#endif +} +} +#ifdef GC_COLLECT_AT_MALLOC +{ +char*string=GETENV("GC_COLLECT_AT_MALLOC"); +if (0!=string){ +size_t min_lb=(size_t)STRTOULL(string,NULL,10); +if (min_lb > 0) +GC_dbg_collect_at_malloc_min_lb=min_lb; +} +} +#endif +#if!defined(GC_DISABLE_INCREMENTAL)&&!defined(NO_CLOCK) +{ +char*time_limit_string=GETENV("GC_PAUSE_TIME_TARGET"); +if (0!=time_limit_string){ +long time_limit=atol(time_limit_string); +if (time_limit > 0){ +GC_time_limit=time_limit; +} +} +} +#endif +#ifndef SMALL_CONFIG +{ +char*full_freq_string=GETENV("GC_FULL_FREQUENCY"); +if (full_freq_string!=NULL){ +int full_freq=atoi(full_freq_string); +if (full_freq > 0) +GC_full_freq=full_freq; +} +} +#endif +{ +char*interval_string=GETENV("GC_LARGE_ALLOC_WARN_INTERVAL"); +if (0!=interval_string){ +long interval=atol(interval_string); +if (interval<=0){ +WARN("GC_LARGE_ALLOC_WARN_INTERVAL environment variable has " +"bad value:Ignoring\n",0); +} else { +GC_large_alloc_warn_interval=interval; +} +} +} +{ +char*space_divisor_string=GETENV("GC_FREE_SPACE_DIVISOR"); +if (space_divisor_string!=NULL){ +int space_divisor=atoi(space_divisor_string); +if (space_divisor > 0) +GC_free_space_divisor=(unsigned)space_divisor; +} +} +#ifdef USE_MUNMAP +{ +char*string=GETENV("GC_UNMAP_THRESHOLD"); +if (string!=NULL){ +if (*string=='0'&&*(string+1)=='\0'){ +GC_unmap_threshold=0; +} else { +int unmap_threshold=atoi(string); +if (unmap_threshold > 0) +GC_unmap_threshold=unmap_threshold; +} +} +} +{ +char*string=GETENV("GC_FORCE_UNMAP_ON_GCOLLECT"); +if (string!=NULL){ +if (*string=='0'&&*(string+1)=='\0'){ +GC_force_unmap_on_gcollect=FALSE; +} else { +GC_force_unmap_on_gcollect=TRUE; +} +} +} +{ +char*string=GETENV("GC_USE_ENTIRE_HEAP"); +if (string!=NULL){ +if (*string=='0'&&*(string+1)=='\0'){ +GC_use_entire_heap=FALSE; +} else { +GC_use_entire_heap=TRUE; +} +} +} +#endif +#if!defined(NO_DEBUGGING)&&!defined(NO_CLOCK) +GET_TIME(GC_init_time); +#endif +maybe_install_looping_handler(); +#if ALIGNMENT > GC_DS_TAGS +if (EXTRA_BYTES!=0) +GC_obj_kinds[NORMAL].ok_descriptor=(word)(-ALIGNMENT)|GC_DS_LENGTH; +#endif +GC_exclude_static_roots_inner(beginGC_arrays,endGC_arrays); +GC_exclude_static_roots_inner(beginGC_obj_kinds,endGC_obj_kinds); +#ifdef SEPARATE_GLOBALS +GC_exclude_static_roots_inner(beginGC_objfreelist,endGC_objfreelist); +GC_exclude_static_roots_inner(beginGC_aobjfreelist,endGC_aobjfreelist); +#endif +#if defined(USE_PROC_FOR_LIBRARIES)&&defined(GC_LINUX_THREADS) +WARN("USE_PROC_FOR_LIBRARIES+GC_LINUX_THREADS performs poorly.\n",0); +#endif +#if defined(SEARCH_FOR_DATA_START) +GC_init_linux_data_start(); +#endif +#if defined(NETBSD)&&defined(__ELF__) +GC_init_netbsd_elf(); +#endif +#if!defined(THREADS)||defined(GC_PTHREADS)||defined(NN_PLATFORM_CTR)||defined(NINTENDO_SWITCH)||defined(GC_WIN32_THREADS)||defined(GC_SOLARIS_THREADS) +if (GC_stackbottom==0){ +GC_stackbottom=GC_get_main_stack_base(); +#if (defined(LINUX)||defined(HPUX))&&defined(IA64) +GC_register_stackbottom=GC_get_register_stack_base(); +#endif +} else { +#if (defined(LINUX)||defined(HPUX))&&defined(IA64) +if (GC_register_stackbottom==0){ +WARN("GC_register_stackbottom should be set with GC_stackbottom\n",0); +GC_register_stackbottom=GC_get_register_stack_base(); +} +#endif +} +#endif +#if!defined(CPPCHECK) +GC_STATIC_ASSERT(sizeof(ptr_t)==sizeof(word)); +GC_STATIC_ASSERT(sizeof(signed_word)==sizeof(word)); +#if!defined(_AUX_SOURCE)||defined(__GNUC__) +GC_STATIC_ASSERT((word)(-1)> (word)0); +#endif +GC_STATIC_ASSERT((signed_word)(-1)< (signed_word)0); +#endif +GC_STATIC_ASSERT(sizeof (struct hblk)==HBLKSIZE); +#ifndef THREADS +GC_ASSERT(!((word)GC_stackbottom HOTTER_THAN (word)GC_approx_sp())); +#endif +#ifndef GC_DISABLE_INCREMENTAL +if (GC_incremental||0!=GETENV("GC_ENABLE_INCREMENTAL")){ +#if defined(BASE_ATOMIC_OPS_EMULATED)||defined(CHECKSUMS)||defined(REDIRECT_MALLOC)||defined(REDIRECT_MALLOC_IN_HEADER)||defined(SMALL_CONFIG) +#else +if (manual_vdb_allowed){ +GC_manual_vdb=TRUE; +GC_incremental=TRUE; +} else +#endif +{ +GC_incremental=GC_dirty_init(); +GC_ASSERT(GC_bytes_allocd==0); +} +} +#endif +if (GC_REGISTER_MAIN_STATIC_DATA())GC_register_data_segments(); +GC_init_headers(); +GC_bl_init(); +GC_mark_init(); +{ +char*sz_str=GETENV("GC_INITIAL_HEAP_SIZE"); +if (sz_str!=NULL){ +initial_heap_sz=GC_parse_mem_size_arg(sz_str); +if (initial_heap_sz<=MINHINCR*HBLKSIZE){ +WARN("Bad initial heap size %s - ignoring it.\n",sz_str); +} +} +} +{ +char*sz_str=GETENV("GC_MAXIMUM_HEAP_SIZE"); +if (sz_str!=NULL){ +word max_heap_sz=GC_parse_mem_size_arg(sz_str); +if (max_heap_sz < initial_heap_sz){ +WARN("Bad maximum heap size %s - ignoring it.\n",sz_str); +} +if (0==GC_max_retries)GC_max_retries=2; +GC_set_max_heap_size(max_heap_sz); +} +} +#if defined(GC_ASSERTIONS)&&defined(GC_ALWAYS_MULTITHREADED) +LOCK(); +#endif +if (!GC_expand_hp_inner(divHBLKSZ(initial_heap_sz))){ +GC_err_printf("Can't start up:not enough memory\n"); +EXIT(); +} else { +GC_requested_heapsize+=initial_heap_sz; +} +if (GC_all_interior_pointers) +GC_initialize_offsets(); +GC_register_displacement_inner(0L); +#if defined(GC_LINUX_THREADS)&&defined(REDIRECT_MALLOC) +if (!GC_all_interior_pointers){ +GC_register_displacement_inner(sizeof(void*)); +} +#endif +GC_init_size_map(); +#ifdef PCR +if (PCR_IL_Lock(PCR_Bool_false,PCR_allSigsBlocked,PCR_waitForever) +!=PCR_ERes_okay){ +ABORT("Can't lock load state"); +} else if (PCR_IL_Unlock()!=PCR_ERes_okay){ +ABORT("Can't unlock load state"); +} +PCR_IL_Unlock(); +GC_pcr_install(); +#endif +GC_is_initialized=TRUE; +#if defined(GC_PTHREADS)||defined(GC_WIN32_THREADS) +GC_thr_init(); +#ifdef PARALLEL_MARK +#if defined(GC_ASSERTIONS)&&defined(GC_ALWAYS_MULTITHREADED) +UNLOCK(); +#endif +GC_start_mark_threads_inner(); +#if defined(GC_ASSERTIONS)&&defined(GC_ALWAYS_MULTITHREADED) +LOCK(); +#endif +#endif +#endif +COND_DUMP; +if (!GC_dont_precollect||GC_incremental){ +GC_gcollect_inner(); +} +#if defined(GC_ASSERTIONS)&&defined(GC_ALWAYS_MULTITHREADED) +UNLOCK(); +#endif +#if defined(THREADS)&&defined(UNIX_LIKE)&&!defined(NO_GETCONTEXT) +if (GC_dont_gc||GC_dont_precollect) +GC_with_callee_saves_pushed(callee_saves_pushed_dummy_fn,NULL); +#endif +#ifndef DONT_USE_ATEXIT +if (GC_find_leak){ +atexit(GC_exit_check); +} +#endif +#if defined(PARALLEL_MARK)||defined(THREAD_LOCAL_ALLOC)||(defined(GC_ALWAYS_MULTITHREADED)&&defined(GC_WIN32_THREADS)&&!defined(GC_NO_THREADS_DISCOVERY)) +GC_init_parallel(); +#endif +#if defined(DYNAMIC_LOADING)&&defined(DARWIN) +GC_init_dyld(); +#endif +RESTORE_CANCEL(cancel_state); +} +GC_API void GC_CALL GC_enable_incremental(void) +{ +#if!defined(GC_DISABLE_INCREMENTAL)&&!defined(KEEP_BACK_PTRS) +DCL_LOCK_STATE; +if (!GC_find_leak&&0==GETENV("GC_DISABLE_INCREMENTAL")){ +LOCK(); +if (!GC_incremental){ +GC_setpagesize(); +maybe_install_looping_handler(); +if (!GC_is_initialized){ +UNLOCK(); +GC_incremental=TRUE; +GC_init(); +LOCK(); +} else { +#if!defined(BASE_ATOMIC_OPS_EMULATED)&&!defined(CHECKSUMS)&&!defined(REDIRECT_MALLOC)&&!defined(REDIRECT_MALLOC_IN_HEADER)&&!defined(SMALL_CONFIG) +if (manual_vdb_allowed){ +GC_manual_vdb=TRUE; +GC_incremental=TRUE; +} else +#endif +{ +GC_incremental=GC_dirty_init(); +} +} +if (GC_incremental&&!GC_dont_gc){ +IF_CANCEL(int cancel_state;) +DISABLE_CANCEL(cancel_state); +if (GC_bytes_allocd > 0){ +GC_gcollect_inner(); +} +GC_read_dirty(FALSE); +RESTORE_CANCEL(cancel_state); +} +} +UNLOCK(); +return; +} +#endif +GC_init(); +} +#if defined(THREADS) +GC_API void GC_CALL GC_start_mark_threads(void) +{ +#if defined(PARALLEL_MARK)&&defined(CAN_HANDLE_FORK)&&!defined(THREAD_SANITIZER) +IF_CANCEL(int cancel_state;) +DISABLE_CANCEL(cancel_state); +GC_start_mark_threads_inner(); +RESTORE_CANCEL(cancel_state); +#else +GC_ASSERT(I_DONT_HOLD_LOCK()); +#endif +} +#endif +GC_API void GC_CALL GC_deinit(void) +{ +if (GC_is_initialized){ +GC_is_initialized=FALSE; +#if defined(GC_WIN32_THREADS)&&(defined(MSWIN32)||defined(MSWINCE)) +#if!defined(CONSOLE_LOG)||defined(MSWINCE) +DeleteCriticalSection(&GC_write_cs); +#endif +DeleteCriticalSection(&GC_allocate_ml); +#endif +} +} +#if (defined(MSWIN32)&&!defined(CONSOLE_LOG))||defined(MSWINCE) +#if defined(_MSC_VER)&&defined(_DEBUG)&&!defined(MSWINCE) +#include +#endif +STATIC HANDLE GC_log=0; +#ifdef THREADS +#if defined(PARALLEL_MARK)&&!defined(GC_ALWAYS_MULTITHREADED) +#define IF_NEED_TO_LOCK(x)if (GC_parallel||GC_need_to_lock)x +#else +#define IF_NEED_TO_LOCK(x)if (GC_need_to_lock)x +#endif +#else +#define IF_NEED_TO_LOCK(x) +#endif +#ifdef MSWINRT_FLAVOR +#include +DECLSPEC_IMPORT HRESULT WINAPI RoGetActivationFactory( +HSTRING activatableClassId, +REFIID iid,void**factory); +static GC_bool getWinRTLogPath(wchar_t*buf,size_t bufLen) +{ +static const GUID kIID_IApplicationDataStatics={ +0x5612147B,0xE843,0x45E3, +0x94,0xD8,0x06,0x16,0x9E,0x3C,0x8E,0x17 +}; +static const GUID kIID_IStorageItem={ +0x4207A996,0xCA2F,0x42F7, +0xBD,0xE8,0x8B,0x10,0x45,0x7A,0x7F,0x30 +}; +GC_bool result=FALSE; +HSTRING_HEADER appDataClassNameHeader; +HSTRING appDataClassName; +__x_ABI_CWindows_CStorage_CIApplicationDataStatics*appDataStatics=0; +GC_ASSERT(bufLen > 0); +if (SUCCEEDED(WindowsCreateStringReference( +RuntimeClass_Windows_Storage_ApplicationData, +(sizeof(RuntimeClass_Windows_Storage_ApplicationData)-1) +/sizeof(wchar_t), +&appDataClassNameHeader,&appDataClassName)) +&&SUCCEEDED(RoGetActivationFactory(appDataClassName, +&kIID_IApplicationDataStatics, +&appDataStatics))){ +__x_ABI_CWindows_CStorage_CIApplicationData*appData=NULL; +__x_ABI_CWindows_CStorage_CIStorageFolder*tempFolder=NULL; +__x_ABI_CWindows_CStorage_CIStorageItem*tempFolderItem=NULL; +HSTRING tempPath=NULL; +if (SUCCEEDED(appDataStatics->lpVtbl->get_Current(appDataStatics, +&appData)) +&&SUCCEEDED(appData->lpVtbl->get_TemporaryFolder(appData, +&tempFolder)) +&&SUCCEEDED(tempFolder->lpVtbl->QueryInterface(tempFolder, +&kIID_IStorageItem, +&tempFolderItem)) +&&SUCCEEDED(tempFolderItem->lpVtbl->get_Path(tempFolderItem, +&tempPath))){ +UINT32 tempPathLen; +const wchar_t*tempPathBuf= +WindowsGetStringRawBuffer(tempPath,&tempPathLen); +buf[0]='\0'; +if (wcsncat_s(buf,bufLen,tempPathBuf,tempPathLen)==0 +&&wcscat_s(buf,bufLen,L"\\")==0 +&&wcscat_s(buf,bufLen,TEXT(GC_LOG_STD_NAME))==0) +result=TRUE; +WindowsDeleteString(tempPath); +} +if (tempFolderItem!=NULL) +tempFolderItem->lpVtbl->Release(tempFolderItem); +if (tempFolder!=NULL) +tempFolder->lpVtbl->Release(tempFolder); +if (appData!=NULL) +appData->lpVtbl->Release(appData); +appDataStatics->lpVtbl->Release(appDataStatics); +} +return result; +} +#endif +STATIC HANDLE GC_CreateLogFile(void) +{ +HANDLE hFile; +#ifdef MSWINRT_FLAVOR +TCHAR pathBuf[_MAX_PATH+0x10]; +hFile=INVALID_HANDLE_VALUE; +if (getWinRTLogPath(pathBuf,_MAX_PATH+1)){ +CREATEFILE2_EXTENDED_PARAMETERS extParams; +BZERO(&extParams,sizeof(extParams)); +extParams.dwSize=sizeof(extParams); +extParams.dwFileAttributes=FILE_ATTRIBUTE_NORMAL; +extParams.dwFileFlags=GC_print_stats==VERBOSE?0 +:FILE_FLAG_WRITE_THROUGH; +hFile=CreateFile2(pathBuf,GENERIC_WRITE,FILE_SHARE_READ, +CREATE_ALWAYS,&extParams); +} +#else +TCHAR*logPath; +#if defined(NO_GETENV_WIN32)&&defined(CPPCHECK) +#define appendToFile FALSE +#else +BOOL appendToFile=FALSE; +#endif +#if!defined(NO_GETENV_WIN32)||!defined(OLD_WIN32_LOG_FILE) +TCHAR pathBuf[_MAX_PATH+0x10]; +logPath=pathBuf; +#endif +#ifndef NO_GETENV_WIN32 +if (GetEnvironmentVariable(TEXT("GC_LOG_FILE"),pathBuf, +_MAX_PATH+1)- 1U < (DWORD)_MAX_PATH){ +appendToFile=TRUE; +} else +#endif +{ +#ifdef OLD_WIN32_LOG_FILE +logPath=TEXT(GC_LOG_STD_NAME); +#else +int len=(int)GetModuleFileName(NULL,pathBuf, +_MAX_PATH+1); +if (len > 4&&pathBuf[len - 4]==(TCHAR)'.'){ +len-=4; +} +BCOPY(TEXT(".")TEXT(GC_LOG_STD_NAME),&pathBuf[len], +sizeof(TEXT(".")TEXT(GC_LOG_STD_NAME))); +#endif +} +hFile=CreateFile(logPath,GENERIC_WRITE,FILE_SHARE_READ, +NULL, +appendToFile?OPEN_ALWAYS:CREATE_ALWAYS, +GC_print_stats==VERBOSE?FILE_ATTRIBUTE_NORMAL: +FILE_ATTRIBUTE_NORMAL|FILE_FLAG_WRITE_THROUGH, +NULL); +#ifndef NO_GETENV_WIN32 +if (appendToFile&&hFile!=INVALID_HANDLE_VALUE){ +LONG posHigh=0; +(void)SetFilePointer(hFile,0,&posHigh,FILE_END); +} +#endif +#undef appendToFile +#endif +return hFile; +} +STATIC int GC_write(const char*buf,size_t len) +{ +BOOL res; +DWORD written; +#if defined(THREADS)&&defined(GC_ASSERTIONS) +static GC_bool inside_write=FALSE; +if (inside_write) +return -1; +#endif +if (len==0) +return 0; +IF_NEED_TO_LOCK(EnterCriticalSection(&GC_write_cs)); +#if defined(THREADS)&&defined(GC_ASSERTIONS) +if (GC_write_disabled){ +inside_write=TRUE; +ABORT("Assertion failure:GC_write called with write_disabled"); +} +#endif +if (GC_log==0){ +GC_log=GC_CreateLogFile(); +} +if (GC_log==INVALID_HANDLE_VALUE){ +IF_NEED_TO_LOCK(LeaveCriticalSection(&GC_write_cs)); +#ifdef NO_DEBUGGING +return 0; +#else +return -1; +#endif +} +res=WriteFile(GC_log,buf,(DWORD)len,&written,NULL); +#if defined(_MSC_VER)&&defined(_DEBUG)&&!defined(NO_CRT) +#ifdef MSWINCE +{ +WCHAR wbuf[1024]; +wbuf[MultiByteToWideChar(CP_ACP,0, +buf,len,wbuf, +sizeof(wbuf)/sizeof(wbuf[0])- 1)]=0; +OutputDebugStringW(wbuf); +} +#else +_CrtDbgReport(_CRT_WARN,NULL,0,NULL,"%.*s",len,buf); +#endif +#endif +IF_NEED_TO_LOCK(LeaveCriticalSection(&GC_write_cs)); +return res?(int)written:-1; +} +#define WRITE(f,buf,len)GC_write(buf,len) +#elif defined(OS2)||defined(MACOS) +STATIC FILE*GC_stdout=NULL; +STATIC FILE*GC_stderr=NULL; +STATIC FILE*GC_log=NULL; +STATIC void GC_set_files(void) +{ +if (GC_stdout==NULL){ +GC_stdout=stdout; +} +if (GC_stderr==NULL){ +GC_stderr=stderr; +} +if (GC_log==NULL){ +GC_log=stderr; +} +} +GC_INLINE int GC_write(FILE*f,const char*buf,size_t len) +{ +int res=fwrite(buf,1,len,f); +fflush(f); +return res; +} +#define WRITE(f,buf,len)(GC_set_files(),GC_write(f,buf,len)) +#elif defined(GC_ANDROID_LOG) +#include +#ifndef GC_ANDROID_LOG_TAG +#define GC_ANDROID_LOG_TAG "BDWGC" +#endif +#define GC_stdout ANDROID_LOG_DEBUG +#define GC_stderr ANDROID_LOG_ERROR +#define GC_log GC_stdout +#define WRITE(level,buf,unused_len)__android_log_write(level,GC_ANDROID_LOG_TAG,buf) +#elif defined(NN_PLATFORM_CTR) +int n3ds_log_write(const char*text,int length); +#define WRITE(level,buf,len)n3ds_log_write(buf,len) +#elif defined(NINTENDO_SWITCH) +int switch_log_write(const char*text,int length); +#define WRITE(level,buf,len)switch_log_write(buf,len) +#else +#if!defined(SN_TARGET_ORBIS)&&!defined(SN_TARGET_PSP2) +#if!defined(AMIGA)&&!defined(MSWIN32)&&!defined(MSWIN_XBOX1)&&!defined(__CC_ARM) +#include +#endif +#if!defined(ECOS)&&!defined(NOSYS) +#include +#endif +#endif +STATIC int GC_write(int fd,const char*buf,size_t len) +{ +#if defined(ECOS)||defined(SN_TARGET_ORBIS)||defined(SN_TARGET_PSP2)||defined(NOSYS) +#ifdef ECOS +#else +#endif +return len; +#else +int bytes_written=0; +IF_CANCEL(int cancel_state;) +DISABLE_CANCEL(cancel_state); +while ((unsigned)bytes_written < len){ +#ifdef GC_SOLARIS_THREADS +int result=syscall(SYS_write,fd,buf+bytes_written, +len - bytes_written); +#elif defined(_MSC_VER) +int result=_write(fd,buf+bytes_written, +(unsigned)(len - bytes_written)); +#else +int result=write(fd,buf+bytes_written,len - bytes_written); +#endif +if (-1==result){ +if (EAGAIN==errno) +continue; +RESTORE_CANCEL(cancel_state); +return(result); +} +bytes_written+=result; +} +RESTORE_CANCEL(cancel_state); +return(bytes_written); +#endif +} +#define WRITE(f,buf,len)GC_write(f,buf,len) +#endif +#define BUFSZ 1024 +#if defined(DJGPP)||defined(__STRICT_ANSI__) +#define GC_VSNPRINTF(buf,bufsz,format,args)vsprintf(buf,format,args) +#elif defined(_MSC_VER) +#ifdef MSWINCE +#define GC_VSNPRINTF StringCchVPrintfA +#else +#define GC_VSNPRINTF _vsnprintf +#endif +#else +#define GC_VSNPRINTF vsnprintf +#endif +#define GC_PRINTF_FILLBUF(buf,format)do { va_list args;va_start(args,format);(buf)[sizeof(buf)- 1]=0x15;(void)GC_VSNPRINTF(buf,sizeof(buf)- 1,format,args);va_end(args);if ((buf)[sizeof(buf)- 1]!=0x15)ABORT("GC_printf clobbered stack");} while (0) +void GC_printf(const char*format,...) +{ +if (!GC_quiet){ +char buf[BUFSZ+1]; +GC_PRINTF_FILLBUF(buf,format); +#ifdef NACL +(void)WRITE(GC_stdout,buf,strlen(buf)); +#else +if (WRITE(GC_stdout,buf,strlen(buf))< 0 +#if defined(CYGWIN32)||(defined(CONSOLE_LOG)&&defined(MSWIN32)) +&&GC_stdout!=GC_DEFAULT_STDOUT_FD +#endif +){ +ABORT("write to stdout failed"); +} +#endif +} +} +void GC_err_printf(const char*format,...) +{ +char buf[BUFSZ+1]; +GC_PRINTF_FILLBUF(buf,format); +GC_err_puts(buf); +} +void GC_log_printf(const char*format,...) +{ +char buf[BUFSZ+1]; +GC_PRINTF_FILLBUF(buf,format); +#ifdef NACL +(void)WRITE(GC_log,buf,strlen(buf)); +#else +if (WRITE(GC_log,buf,strlen(buf))< 0 +#if defined(CYGWIN32)||(defined(CONSOLE_LOG)&&defined(MSWIN32)) +&&GC_log!=GC_DEFAULT_STDERR_FD +#endif +){ +ABORT("write to GC log failed"); +} +#endif +} +#ifndef GC_ANDROID_LOG +#define GC_warn_printf GC_err_printf +#else +GC_INNER void GC_info_log_printf(const char*format,...) +{ +char buf[BUFSZ+1]; +GC_PRINTF_FILLBUF(buf,format); +(void)WRITE(ANDROID_LOG_INFO,buf,0); +} +GC_INNER void GC_verbose_log_printf(const char*format,...) +{ +char buf[BUFSZ+1]; +GC_PRINTF_FILLBUF(buf,format); +(void)WRITE(ANDROID_LOG_VERBOSE,buf,0); +} +STATIC void GC_warn_printf(const char*format,...) +{ +char buf[BUFSZ+1]; +GC_PRINTF_FILLBUF(buf,format); +(void)WRITE(ANDROID_LOG_WARN,buf,0); +} +#endif +void GC_err_puts(const char*s) +{ +(void)WRITE(GC_stderr,s,strlen(s)); +} +STATIC void GC_CALLBACK GC_default_warn_proc(char*msg,GC_word arg) +{ +GC_warn_printf(msg,arg); +} +GC_INNER GC_warn_proc GC_current_warn_proc=GC_default_warn_proc; +GC_API void GC_CALLBACK GC_ignore_warn_proc(char*msg,GC_word arg) +{ +if (GC_print_stats){ +GC_default_warn_proc(msg,arg); +} +} +GC_API void GC_CALL GC_set_warn_proc(GC_warn_proc p) +{ +DCL_LOCK_STATE; +GC_ASSERT(NONNULL_ARG_NOT_NULL(p)); +#ifdef GC_WIN32_THREADS +#ifdef CYGWIN32 +GC_ASSERT(GC_is_initialized); +#else +if (!GC_is_initialized)GC_init(); +#endif +#endif +LOCK(); +GC_current_warn_proc=p; +UNLOCK(); +} +GC_API GC_warn_proc GC_CALL GC_get_warn_proc(void) +{ +GC_warn_proc result; +DCL_LOCK_STATE; +LOCK(); +result=GC_current_warn_proc; +UNLOCK(); +return(result); +} +#if!defined(PCR)&&!defined(SMALL_CONFIG) +STATIC void GC_CALLBACK GC_default_on_abort(const char*msg) +{ +#ifndef DONT_USE_ATEXIT +skip_gc_atexit=TRUE; +#endif +if (msg!=NULL){ +#ifdef MSGBOX_ON_ERROR +GC_win32_MessageBoxA(msg,"Fatal error in GC",MB_ICONERROR|MB_OK); +#endif +#ifndef GC_ANDROID_LOG +#if defined(GC_WIN32_THREADS)&&defined(GC_ASSERTIONS)&&((defined(MSWIN32)&&!defined(CONSOLE_LOG))||defined(MSWINCE)) +if (!GC_write_disabled) +#endif +{ +if (WRITE(GC_stderr,msg,strlen(msg))>=0) +(void)WRITE(GC_stderr,"\n",1); +} +#else +__android_log_assert("*",GC_ANDROID_LOG_TAG,"%s\n",msg); +#endif +} +#if!defined(NO_DEBUGGING)&&!defined(GC_ANDROID_LOG) +if (GETENV("GC_LOOP_ON_ABORT")!=NULL){ +for(;;){ +} +} +#endif +} +GC_abort_func GC_on_abort=GC_default_on_abort; +GC_API void GC_CALL GC_set_abort_func(GC_abort_func fn) +{ +DCL_LOCK_STATE; +GC_ASSERT(NONNULL_ARG_NOT_NULL(fn)); +LOCK(); +GC_on_abort=fn; +UNLOCK(); +} +GC_API GC_abort_func GC_CALL GC_get_abort_func(void) +{ +GC_abort_func fn; +DCL_LOCK_STATE; +LOCK(); +fn=GC_on_abort; +UNLOCK(); +return fn; +} +#endif +GC_API void GC_CALL GC_enable(void) +{ +DCL_LOCK_STATE; +LOCK(); +GC_ASSERT(GC_dont_gc!=0); +GC_dont_gc--; +UNLOCK(); +} +GC_API void GC_CALL GC_disable(void) +{ +DCL_LOCK_STATE; +LOCK(); +GC_dont_gc++; +UNLOCK(); +} +GC_API int GC_CALL GC_is_disabled(void) +{ +return GC_dont_gc!=0; +} +GC_API void**GC_CALL GC_new_free_list_inner(void) +{ +void*result; +GC_ASSERT(I_HOLD_LOCK()); +result=GC_INTERNAL_MALLOC((MAXOBJGRANULES+1)*sizeof(ptr_t),PTRFREE); +if (NULL==result)ABORT("Failed to allocate freelist for new kind"); +BZERO(result,(MAXOBJGRANULES+1)*sizeof(ptr_t)); +return (void**)result; +} +GC_API void**GC_CALL GC_new_free_list(void) +{ +void**result; +DCL_LOCK_STATE; +LOCK(); +result=GC_new_free_list_inner(); +UNLOCK(); +return result; +} +GC_API unsigned GC_CALL GC_new_kind_inner(void**fl,GC_word descr, +int adjust,int clear) +{ +unsigned result=GC_n_kinds; +GC_ASSERT(NONNULL_ARG_NOT_NULL(fl)); +GC_ASSERT(adjust==FALSE||adjust==TRUE); +GC_ASSERT(clear==TRUE +||(descr==0&&adjust==FALSE&&clear==FALSE)); +if (result < MAXOBJKINDS){ +GC_ASSERT(result > 0); +GC_n_kinds++; +GC_obj_kinds[result].ok_freelist=fl; +GC_obj_kinds[result].ok_reclaim_list=0; +GC_obj_kinds[result].ok_descriptor=descr; +GC_obj_kinds[result].ok_relocate_descr=adjust; +GC_obj_kinds[result].ok_init=(GC_bool)clear; +#ifdef ENABLE_DISCLAIM +GC_obj_kinds[result].ok_mark_unconditionally=FALSE; +GC_obj_kinds[result].ok_disclaim_proc=0; +#endif +} else { +ABORT("Too many kinds"); +} +return result; +} +GC_API unsigned GC_CALL GC_new_kind(void**fl,GC_word descr,int adjust, +int clear) +{ +unsigned result; +DCL_LOCK_STATE; +LOCK(); +result=GC_new_kind_inner(fl,descr,adjust,clear); +UNLOCK(); +return result; +} +GC_API unsigned GC_CALL GC_new_proc_inner(GC_mark_proc proc) +{ +unsigned result=GC_n_mark_procs; +if (result < MAX_MARK_PROCS){ +GC_n_mark_procs++; +GC_mark_procs[result]=proc; +} else { +ABORT("Too many mark procedures"); +} +return result; +} +GC_API unsigned GC_CALL GC_new_proc(GC_mark_proc proc) +{ +unsigned result; +DCL_LOCK_STATE; +LOCK(); +result=GC_new_proc_inner(proc); +UNLOCK(); +return result; +} +GC_API void*GC_CALL GC_call_with_alloc_lock(GC_fn_type fn,void*client_data) +{ +void*result; +DCL_LOCK_STATE; +#ifdef THREADS +LOCK(); +#endif +result=(*fn)(client_data); +#ifdef THREADS +UNLOCK(); +#endif +return(result); +} +GC_API void*GC_CALL GC_call_with_stack_base(GC_stack_base_func fn,void*arg) +{ +struct GC_stack_base base; +void*result; +base.mem_base=(void*)&base; +#ifdef IA64 +base.reg_base=(void*)GC_save_regs_in_stack(); +#endif +result=fn(&base,arg); +GC_noop1(COVERT_DATAFLOW(&base)); +return result; +} +#ifndef THREADS +GC_INNER ptr_t GC_blocked_sp=NULL; +#ifdef IA64 +STATIC ptr_t GC_blocked_register_sp=NULL; +#endif +GC_INNER struct GC_traced_stack_sect_s*GC_traced_stack_sect=NULL; +GC_API void*GC_CALL GC_call_with_gc_active(GC_fn_type fn, +void*client_data) +{ +struct GC_traced_stack_sect_s stacksect; +GC_ASSERT(GC_is_initialized); +if ((word)GC_stackbottom HOTTER_THAN (word)(&stacksect)) +GC_stackbottom=(ptr_t)COVERT_DATAFLOW(&stacksect); +if (GC_blocked_sp==NULL){ +client_data=fn(client_data); +GC_noop1(COVERT_DATAFLOW(&stacksect)); +return client_data; +} +stacksect.saved_stack_ptr=GC_blocked_sp; +#ifdef IA64 +stacksect.backing_store_end=GC_save_regs_in_stack(); +stacksect.saved_backing_store_ptr=GC_blocked_register_sp; +#endif +stacksect.prev=GC_traced_stack_sect; +GC_blocked_sp=NULL; +GC_traced_stack_sect=&stacksect; +client_data=fn(client_data); +GC_ASSERT(GC_blocked_sp==NULL); +GC_ASSERT(GC_traced_stack_sect==&stacksect); +#if defined(CPPCHECK) +GC_noop1((word)GC_traced_stack_sect - (word)GC_blocked_sp); +#endif +GC_traced_stack_sect=stacksect.prev; +#ifdef IA64 +GC_blocked_register_sp=stacksect.saved_backing_store_ptr; +#endif +GC_blocked_sp=stacksect.saved_stack_ptr; +return client_data; +} +STATIC void GC_do_blocking_inner(ptr_t data,void*context GC_ATTR_UNUSED) +{ +struct blocking_data*d=(struct blocking_data*)data; +GC_ASSERT(GC_is_initialized); +GC_ASSERT(GC_blocked_sp==NULL); +#ifdef SPARC +GC_blocked_sp=GC_save_regs_in_stack(); +#else +GC_blocked_sp=(ptr_t)&d; +#endif +#ifdef IA64 +GC_blocked_register_sp=GC_save_regs_in_stack(); +#endif +d->client_data=(d->fn)(d->client_data); +#ifdef SPARC +GC_ASSERT(GC_blocked_sp!=NULL); +#else +GC_ASSERT(GC_blocked_sp==(ptr_t)(&d)); +#endif +#if defined(CPPCHECK) +GC_noop1((word)GC_blocked_sp); +#endif +GC_blocked_sp=NULL; +} +GC_API void GC_CALL GC_set_stackbottom(void*gc_thread_handle, +const struct GC_stack_base*sb) +{ +GC_ASSERT(sb->mem_base!=NULL); +GC_ASSERT(NULL==gc_thread_handle||&GC_stackbottom==gc_thread_handle); +GC_ASSERT(NULL==GC_blocked_sp +&&NULL==GC_traced_stack_sect); +(void)gc_thread_handle; +GC_stackbottom=(char*)sb->mem_base; +#ifdef IA64 +GC_register_stackbottom=(ptr_t)sb->reg_base; +#endif +} +GC_API void*GC_CALL GC_get_my_stackbottom(struct GC_stack_base*sb) +{ +GC_ASSERT(GC_is_initialized); +sb->mem_base=GC_stackbottom; +#ifdef IA64 +sb->reg_base=GC_register_stackbottom; +#endif +return&GC_stackbottom; +} +#endif +GC_API void*GC_CALL GC_do_blocking(GC_fn_type fn,void*client_data) +{ +struct blocking_data my_data; +my_data.fn=fn; +my_data.client_data=client_data; +GC_with_callee_saves_pushed(GC_do_blocking_inner,(ptr_t)(&my_data)); +return my_data.client_data; +} +#if!defined(NO_DEBUGGING) +GC_API void GC_CALL GC_dump(void) +{ +DCL_LOCK_STATE; +LOCK(); +GC_dump_named(NULL); +UNLOCK(); +} +GC_API void GC_CALL GC_dump_named(const char*name) +{ +#ifndef NO_CLOCK +CLOCK_TYPE current_time; +GET_TIME(current_time); +#endif +if (name!=NULL){ +GC_printf("***GC Dump %s\n",name); +} else { +GC_printf("***GC Dump collection #%lu\n",(unsigned long)GC_gc_no); +} +#ifndef NO_CLOCK +GC_printf("Time since GC init:%lu ms\n", +MS_TIME_DIFF(current_time,GC_init_time)); +#endif +GC_printf("\n***Static roots:\n"); +GC_print_static_roots(); +GC_printf("\n***Heap sections:\n"); +GC_print_heap_sects(); +GC_printf("\n***Free blocks:\n"); +GC_print_hblkfreelist(); +GC_printf("\n***Blocks in use:\n"); +GC_print_block_list(); +} +#endif +static void block_add_size(struct hblk*h,word pbytes) +{ +hdr*hhdr=HDR(h); +*(word*)pbytes+=(WORDS_TO_BYTES(hhdr->hb_sz)+(HBLKSIZE - 1)) +&~(word)(HBLKSIZE - 1); +} +GC_API size_t GC_CALL GC_get_memory_use(void) +{ +word bytes=0; +DCL_LOCK_STATE; +LOCK(); +GC_apply_to_all_blocks(block_add_size,(word)(&bytes)); +UNLOCK(); +return (size_t)bytes; +} +GC_API GC_word GC_CALL GC_get_gc_no(void) +{ +return GC_gc_no; +} +#ifdef THREADS +GC_API int GC_CALL GC_get_parallel(void) +{ +return GC_parallel; +} +GC_API void GC_CALL GC_alloc_lock(void) +{ +DCL_LOCK_STATE; +LOCK(); +} +GC_API void GC_CALL GC_alloc_unlock(void) +{ +UNLOCK(); +} +GC_INNER GC_on_thread_event_proc GC_on_thread_event=0; +GC_API void GC_CALL GC_set_on_thread_event(GC_on_thread_event_proc fn) +{ +DCL_LOCK_STATE; +LOCK(); +GC_on_thread_event=fn; +UNLOCK(); +} +GC_API GC_on_thread_event_proc GC_CALL GC_get_on_thread_event(void) +{ +GC_on_thread_event_proc fn; +DCL_LOCK_STATE; +LOCK(); +fn=GC_on_thread_event; +UNLOCK(); +return fn; +} +#endif +GC_API void GC_CALL GC_set_oom_fn(GC_oom_func fn) +{ +GC_ASSERT(NONNULL_ARG_NOT_NULL(fn)); +DCL_LOCK_STATE; +LOCK(); +GC_oom_fn=fn; +UNLOCK(); +} +GC_API GC_oom_func GC_CALL GC_get_oom_fn(void) +{ +GC_oom_func fn; +DCL_LOCK_STATE; +LOCK(); +fn=GC_oom_fn; +UNLOCK(); +return fn; +} +GC_API void GC_CALL GC_set_on_heap_resize(GC_on_heap_resize_proc fn) +{ +DCL_LOCK_STATE; +LOCK(); +GC_on_heap_resize=fn; +UNLOCK(); +} +GC_API GC_on_heap_resize_proc GC_CALL GC_get_on_heap_resize(void) +{ +GC_on_heap_resize_proc fn; +DCL_LOCK_STATE; +LOCK(); +fn=GC_on_heap_resize; +UNLOCK(); +return fn; +} +GC_API void GC_CALL GC_set_finalizer_notifier(GC_finalizer_notifier_proc fn) +{ +DCL_LOCK_STATE; +LOCK(); +GC_finalizer_notifier=fn; +UNLOCK(); +} +GC_API GC_finalizer_notifier_proc GC_CALL GC_get_finalizer_notifier(void) +{ +GC_finalizer_notifier_proc fn; +DCL_LOCK_STATE; +LOCK(); +fn=GC_finalizer_notifier; +UNLOCK(); +return fn; +} +GC_API void GC_CALL GC_set_find_leak(int value) +{ +GC_find_leak=value; +} +GC_API int GC_CALL GC_get_find_leak(void) +{ +return GC_find_leak; +} +GC_API void GC_CALL GC_set_all_interior_pointers(int value) +{ +DCL_LOCK_STATE; +GC_all_interior_pointers=value?1:0; +if (GC_is_initialized){ +LOCK(); +GC_initialize_offsets(); +if (!GC_all_interior_pointers) +GC_bl_init_no_interiors(); +UNLOCK(); +} +} +GC_API int GC_CALL GC_get_all_interior_pointers(void) +{ +return GC_all_interior_pointers; +} +GC_API void GC_CALL GC_set_finalize_on_demand(int value) +{ +GC_ASSERT(value!=-1); +GC_finalize_on_demand=value; +} +GC_API int GC_CALL GC_get_finalize_on_demand(void) +{ +return GC_finalize_on_demand; +} +GC_API void GC_CALL GC_set_java_finalization(int value) +{ +GC_ASSERT(value!=-1); +GC_java_finalization=value; +} +GC_API int GC_CALL GC_get_java_finalization(void) +{ +return GC_java_finalization; +} +GC_API void GC_CALL GC_set_dont_expand(int value) +{ +GC_ASSERT(value!=-1); +GC_dont_expand=value; +} +GC_API int GC_CALL GC_get_dont_expand(void) +{ +return GC_dont_expand; +} +GC_API void GC_CALL GC_set_no_dls(int value) +{ +GC_ASSERT(value!=-1); +GC_no_dls=value; +} +GC_API int GC_CALL GC_get_no_dls(void) +{ +return GC_no_dls; +} +GC_API void GC_CALL GC_set_non_gc_bytes(GC_word value) +{ +GC_non_gc_bytes=value; +} +GC_API GC_word GC_CALL GC_get_non_gc_bytes(void) +{ +return GC_non_gc_bytes; +} +GC_API void GC_CALL GC_set_free_space_divisor(GC_word value) +{ +GC_ASSERT(value > 0); +GC_free_space_divisor=value; +} +GC_API GC_word GC_CALL GC_get_free_space_divisor(void) +{ +return GC_free_space_divisor; +} +GC_API void GC_CALL GC_set_max_retries(GC_word value) +{ +GC_ASSERT((GC_signed_word)value!=-1); +GC_max_retries=value; +} +GC_API GC_word GC_CALL GC_get_max_retries(void) +{ +return GC_max_retries; +} +GC_API void GC_CALL GC_set_dont_precollect(int value) +{ +GC_ASSERT(value!=-1); +GC_dont_precollect=value; +} +GC_API int GC_CALL GC_get_dont_precollect(void) +{ +return GC_dont_precollect; +} +GC_API void GC_CALL GC_set_full_freq(int value) +{ +GC_ASSERT(value>=0); +GC_full_freq=value; +} +GC_API int GC_CALL GC_get_full_freq(void) +{ +return GC_full_freq; +} +GC_API void GC_CALL GC_set_time_limit(unsigned long value) +{ +GC_ASSERT((long)value!=-1L); +GC_time_limit=value; +} +GC_API unsigned long GC_CALL GC_get_time_limit(void) +{ +return GC_time_limit; +} +GC_API void GC_CALL GC_set_force_unmap_on_gcollect(int value) +{ +GC_force_unmap_on_gcollect=(GC_bool)value; +} +GC_API int GC_CALL GC_get_force_unmap_on_gcollect(void) +{ +return (int)GC_force_unmap_on_gcollect; +} +GC_API void GC_CALL GC_abort_on_oom(void) +{ +GC_err_printf("Insufficient memory for the allocation\n"); +EXIT(); +} +#ifdef THREADS +GC_API void GC_CALL GC_stop_world_external(void) +{ +GC_ASSERT(GC_is_initialized); +LOCK(); +#ifdef THREAD_LOCAL_ALLOC +GC_ASSERT(!GC_world_stopped); +#endif +STOP_WORLD(); +#ifdef THREAD_LOCAL_ALLOC +GC_world_stopped=TRUE; +#endif +} +GC_API void GC_CALL GC_start_world_external(void) +{ +#ifdef THREAD_LOCAL_ALLOC +GC_ASSERT(GC_world_stopped); +GC_world_stopped=FALSE; +#else +GC_ASSERT(GC_is_initialized); +#endif +START_WORLD(); +UNLOCK(); +} +#endif +#if!defined(OS2)&&!defined(PCR)&&!defined(AMIGA)&&!defined(MACOS)&&!defined(MSWINCE)&&!defined(SN_TARGET_ORBIS)&&!defined(SN_TARGET_PSP2)&&!defined(__CC_ARM) +#include +#if!defined(MSWIN32)&&!defined(MSWIN_XBOX1) +#include +#endif +#endif +#include +#if defined(MSWINCE)||defined(SN_TARGET_PS3) +#define SIGSEGV 0 +#else +#include +#endif +#if defined(UNIX_LIKE)||defined(CYGWIN32)||defined(NACL)||defined(SYMBIAN) +#include +#endif +#if defined(LINUX)||defined(LINUX_STACKBOTTOM) +#include +#endif +#ifdef AMIGA +#define GC_AMIGA_DEF +#if!defined(GC_AMIGA_DEF)&&!defined(GC_AMIGA_SB)&&!defined(GC_AMIGA_DS)&&!defined(GC_AMIGA_AM) +#include +#include +#define GC_AMIGA_DEF +#define GC_AMIGA_SB +#define GC_AMIGA_DS +#define GC_AMIGA_AM +#endif +#ifdef GC_AMIGA_DEF +#ifndef __GNUC__ +#include +#endif +#include +#include +#include +#include +#endif +#ifdef GC_AMIGA_SB +ptr_t GC_get_main_stack_base(void) +{ +struct Process*proc=(struct Process*)SysBase->ThisTask; +if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS +&&proc->pr_CLI!=NULL){ +return (char*)proc->pr_ReturnAddr+sizeof(ULONG); +} else { +return (char*)proc->pr_Task.tc_SPUpper; +} +} +#endif +#ifdef GC_AMIGA_DS +void GC_register_data_segments(void) +{ +struct Process*proc; +struct CommandLineInterface*cli; +BPTR myseglist; +ULONG*data; +#ifdef __GNUC__ +ULONG dataSegSize; +GC_bool found_segment=FALSE; +extern char __data_size[]; +dataSegSize=__data_size+8; +#endif +proc=(struct Process*)SysBase->ThisTask; +myseglist=proc->pr_SegList; +if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS){ +if (proc->pr_CLI!=NULL){ +cli=BADDR(proc->pr_CLI); +myseglist=cli->cli_Module; +} +} else { +ABORT("Not a Process."); +} +if (myseglist==NULL){ +ABORT("Arrrgh.. can't find segments,aborting"); +} +for (data=(ULONG*)BADDR(myseglist);data!=NULL; +data=(ULONG*)BADDR(data[0])){ +if ((ULONG)GC_register_data_segments < (ULONG)(&data[1]) +||(ULONG)GC_register_data_segments > (ULONG)(&data[1]) ++data[-1]){ +#ifdef __GNUC__ +if (dataSegSize==data[-1]){ +found_segment=TRUE; +} +#endif +GC_add_roots_inner((char*)&data[1], +((char*)&data[1])+data[-1],FALSE); +} +} +#ifdef __GNUC__ +if (!found_segment){ +ABORT("Can`t find correct Segments.\nSolution:Use an newer version of ixemul.library"); +} +#endif +} +#endif +#ifdef GC_AMIGA_AM +#ifndef GC_AMIGA_FASTALLOC +void*GC_amiga_allocwrapper(size_t size,void*(*AllocFunction)(size_t size2)){ +return (*AllocFunction)(size); +} +void*(*GC_amiga_allocwrapper_do)(size_t size,void*(*AllocFunction)(size_t size2)) +=GC_amiga_allocwrapper; +#else +void*GC_amiga_allocwrapper_firsttime(size_t size,void*(*AllocFunction)(size_t size2)); +void*(*GC_amiga_allocwrapper_do)(size_t size,void*(*AllocFunction)(size_t size2)) +=GC_amiga_allocwrapper_firsttime; +struct GC_Amiga_AllocedMemoryHeader{ +ULONG size; +struct GC_Amiga_AllocedMemoryHeader*next; +}; +struct GC_Amiga_AllocedMemoryHeader*GC_AMIGAMEM=(struct GC_Amiga_AllocedMemoryHeader*)(int)~(NULL); +ULONG GC_AMIGA_MEMF=MEMF_FAST|MEMF_CLEAR; +#ifndef GC_AMIGA_ONLYFAST +BOOL GC_amiga_dontalloc=FALSE; +#endif +#ifdef GC_AMIGA_PRINTSTATS +int succ=0,succ2=0; +int nsucc=0,nsucc2=0; +int nullretries=0; +int numcollects=0; +int chipa=0; +int allochip=0; +int allocfast=0; +int cur0=0; +int cur1=0; +int cur10=0; +int cur50=0; +int cur150=0; +int cur151=0; +int ncur0=0; +int ncur1=0; +int ncur10=0; +int ncur50=0; +int ncur150=0; +int ncur151=0; +#endif +void GC_amiga_free_all_mem(void){ +struct GC_Amiga_AllocedMemoryHeader*gc_am=(struct GC_Amiga_AllocedMemoryHeader*)(~(int)(GC_AMIGAMEM)); +#ifdef GC_AMIGA_PRINTSTATS +printf("\n\n" +"%d bytes of chip-mem,and %d bytes of fast-mem where allocated from the OS.\n", +allochip,allocfast +); +printf( +"%d bytes of chip-mem were returned from the GC_AMIGA_FASTALLOC supported allocating functions.\n", +chipa +); +printf("\n"); +printf("GC_gcollect was called %d times to avoid returning NULL or start allocating with the MEMF_ANY flag.\n",numcollects); +printf("%d of them was a success. (the others had to use allocation from the OS.)\n",nullretries); +printf("\n"); +printf("Succeeded forcing %d gc-allocations (%d bytes)of chip-mem to be fast-mem.\n",succ,succ2); +printf("Failed forcing %d gc-allocations (%d bytes)of chip-mem to be fast-mem.\n",nsucc,nsucc2); +printf("\n"); +printf( +"Number of retries before succeeding a chip->fast force:\n" +"0:%d,1:%d,2-9:%d,10-49:%d,50-149:%d,>150:%d\n", +cur0,cur1,cur10,cur50,cur150,cur151 +); +printf( +"Number of retries before giving up a chip->fast force:\n" +"0:%d,1:%d,2-9:%d,10-49:%d,50-149:%d,>150:%d\n", +ncur0,ncur1,ncur10,ncur50,ncur150,ncur151 +); +#endif +while(gc_am!=NULL){ +struct GC_Amiga_AllocedMemoryHeader*temp=gc_am->next; +FreeMem(gc_am,gc_am->size); +gc_am=(struct GC_Amiga_AllocedMemoryHeader*)(~(int)(temp)); +} +} +#ifndef GC_AMIGA_ONLYFAST +char*chipmax; +size_t latestsize; +#endif +#ifdef GC_AMIGA_FASTALLOC +void*GC_amiga_get_mem(size_t size){ +struct GC_Amiga_AllocedMemoryHeader*gc_am; +#ifndef GC_AMIGA_ONLYFAST +if(GC_amiga_dontalloc==TRUE){ +return NULL; +} +if(GC_AMIGA_MEMF==(MEMF_ANY|MEMF_CLEAR)&&size>100000&&latestsize<50000)return NULL; +#endif +gc_am=AllocMem((ULONG)(size+sizeof(struct GC_Amiga_AllocedMemoryHeader)),GC_AMIGA_MEMF); +if(gc_am==NULL)return NULL; +gc_am->next=GC_AMIGAMEM; +gc_am->size=size+sizeof(struct GC_Amiga_AllocedMemoryHeader); +GC_AMIGAMEM=(struct GC_Amiga_AllocedMemoryHeader*)(~(int)(gc_am)); +#ifdef GC_AMIGA_PRINTSTATS +if((char*)gc_amchipmax||ret==NULL){ +if(ret==NULL){ +nsucc++; +nsucc2+=size; +if(rec==0)ncur0++; +if(rec==1)ncur1++; +if(rec>1&&rec<10)ncur10++; +if(rec>=10&&rec<50)ncur50++; +if(rec>=50&&rec<150)ncur150++; +if(rec>=150)ncur151++; +}else{ +succ++; +succ2+=size; +if(rec==0)cur0++; +if(rec==1)cur1++; +if(rec>1&&rec<10)cur10++; +if(rec>=10&&rec<50)cur50++; +if(rec>=50&&rec<150)cur150++; +if(rec>=150)cur151++; +} +} +#endif +if (((char*)ret)<=chipmax&&ret!=NULL&&(rec<(size>500000?9:size/5000))){ +ret=GC_amiga_rec_alloc(size,AllocFunction,rec+1); +} +return ret; +} +#endif +void*GC_amiga_allocwrapper_any(size_t size,void*(*AllocFunction)(size_t size2)){ +void*ret; +GC_amiga_dontalloc=TRUE; +latestsize=size; +ret=(*AllocFunction)(size); +if(((char*)ret)<=chipmax){ +if(ret==NULL){ +#ifdef GC_AMIGA_GC +if(!GC_dont_gc){ +GC_gcollect(); +#ifdef GC_AMIGA_PRINTSTATS +numcollects++; +#endif +ret=(*AllocFunction)(size); +} +if(ret==NULL) +#endif +{ +GC_amiga_dontalloc=FALSE; +ret=(*AllocFunction)(size); +if(ret==NULL){ +WARN("Out of Memory!Returning NIL!\n",0); +} +} +#ifdef GC_AMIGA_PRINTSTATS +else{ +nullretries++; +} +if(ret!=NULL&&(char*)ret<=chipmax)chipa+=size; +#endif +} +#ifdef GC_AMIGA_RETRY +else{ +void*ret2; +if( +AllocFunction!=GC_malloc_uncollectable +#ifdef GC_ATOMIC_UNCOLLECTABLE +&&AllocFunction!=GC_malloc_atomic_uncollectable +#endif +){ +ret2=GC_amiga_rec_alloc(size,AllocFunction,0); +}else{ +ret2=(*AllocFunction)(size); +#ifdef GC_AMIGA_PRINTSTATS +if((char*)ret2chipmax){ +GC_free(ret); +ret=ret2; +}else{ +GC_free(ret2); +} +} +#endif +} +#if defined(CPPCHECK) +if (GC_amiga_dontalloc) +#endif +GC_amiga_dontalloc=FALSE; +return ret; +} +void (*GC_amiga_toany)(void)=NULL; +void GC_amiga_set_toany(void (*func)(void)){ +GC_amiga_toany=func; +} +#endif +void*GC_amiga_allocwrapper_fast(size_t size,void*(*AllocFunction)(size_t size2)){ +void*ret; +ret=(*AllocFunction)(size); +if(ret==NULL){ +#ifdef GC_AMIGA_GC +if(!GC_dont_gc){ +GC_gcollect(); +#ifdef GC_AMIGA_PRINTSTATS +numcollects++; +#endif +ret=(*AllocFunction)(size); +} +if(ret==NULL) +#endif +{ +#ifndef GC_AMIGA_ONLYFAST +GC_AMIGA_MEMF=MEMF_ANY|MEMF_CLEAR; +if(GC_amiga_toany!=NULL)(*GC_amiga_toany)(); +GC_amiga_allocwrapper_do=GC_amiga_allocwrapper_any; +return GC_amiga_allocwrapper_any(size,AllocFunction); +#endif +} +#ifdef GC_AMIGA_PRINTSTATS +else{ +nullretries++; +} +#endif +} +return ret; +} +void*GC_amiga_allocwrapper_firsttime(size_t size,void*(*AllocFunction)(size_t size2)){ +atexit(&GC_amiga_free_all_mem); +chipmax=(char*)SysBase->MaxLocMem; +GC_amiga_allocwrapper_do=GC_amiga_allocwrapper_fast; +return GC_amiga_allocwrapper_fast(size,AllocFunction); +} +#endif +void*GC_amiga_realloc(void*old_object,size_t new_size_in_bytes){ +#ifndef GC_AMIGA_FASTALLOC +return GC_realloc(old_object,new_size_in_bytes); +#else +void*ret; +latestsize=new_size_in_bytes; +ret=GC_realloc(old_object,new_size_in_bytes); +if(ret==NULL&&new_size_in_bytes!=0 +&&GC_AMIGA_MEMF==(MEMF_FAST|MEMF_CLEAR)){ +#ifdef GC_AMIGA_GC +if(!GC_dont_gc){ +GC_gcollect(); +#ifdef GC_AMIGA_PRINTSTATS +numcollects++; +#endif +ret=GC_realloc(old_object,new_size_in_bytes); +} +if(ret==NULL) +#endif +{ +#ifndef GC_AMIGA_ONLYFAST +GC_AMIGA_MEMF=MEMF_ANY|MEMF_CLEAR; +if(GC_amiga_toany!=NULL)(*GC_amiga_toany)(); +GC_amiga_allocwrapper_do=GC_amiga_allocwrapper_any; +ret=GC_realloc(old_object,new_size_in_bytes); +#endif +} +#ifdef GC_AMIGA_PRINTSTATS +else{ +nullretries++; +} +#endif +} +if(ret==NULL&&new_size_in_bytes!=0){ +WARN("Out of Memory!Returning NIL!\n",0); +} +#ifdef GC_AMIGA_PRINTSTATS +if(((char*)ret) +#endif +#ifdef IRIX5 +#include +#include +#endif +#if defined(MMAP_SUPPORTED)||defined(ADD_HEAP_GUARD_PAGES) +#if defined(USE_MUNMAP)&&!defined(USE_MMAP)&&!defined(CPPCHECK) +#error Invalid config:USE_MUNMAP requires USE_MMAP +#endif +#include +#include +#include +#include +#endif +#ifdef DARWIN +#include +#endif +#ifdef DJGPP +typedef long unsigned int caddr_t; +#endif +#ifdef PCR +#include "mm/PCR_MM.h" +#endif +#if defined(GC_DARWIN_THREADS)&&defined(MPROTECT_VDB) +#ifndef GC_DARWIN_STOP_WORLD_H +#define GC_DARWIN_STOP_WORLD_H +#if!defined(GC_DARWIN_THREADS) +#error darwin_stop_world.h included without GC_DARWIN_THREADS defined +#endif +#include +#include +EXTERN_C_BEGIN +struct thread_stop_info { +mach_port_t mach_thread; +ptr_t stack_ptr; +}; +#ifndef DARWIN_DONT_PARSE_STACK +GC_INNER ptr_t GC_FindTopOfStack(unsigned long); +#endif +#ifdef MPROTECT_VDB +GC_INNER void GC_mprotect_stop(void); +GC_INNER void GC_mprotect_resume(void); +#ifndef GC_NO_THREADS_DISCOVERY +GC_INNER void GC_darwin_register_mach_handler_thread(mach_port_t thread); +#endif +#endif +#if defined(PARALLEL_MARK)&&!defined(GC_NO_THREADS_DISCOVERY) +GC_INNER GC_bool GC_is_mach_marker(thread_act_t); +#endif +EXTERN_C_END +#endif +#endif +#if!defined(NO_EXECUTE_PERMISSION) +STATIC GC_bool GC_pages_executable=TRUE; +#else +STATIC GC_bool GC_pages_executable=FALSE; +#endif +#define IGNORE_PAGES_EXECUTABLE 1 +#ifdef NEED_PROC_MAPS +STATIC ssize_t GC_repeat_read(int fd,char*buf,size_t count) +{ +#define READ read +size_t num_read=0; +ASSERT_CANCEL_DISABLED(); +while (num_read < count){ +ssize_t result=READ(fd,buf+num_read,count - num_read); +if (result < 0)return result; +if (result==0)break; +num_read+=result; +} +#undef READ +return num_read; +} +#ifdef THREADS +STATIC size_t GC_get_file_len(int f) +{ +size_t total=0; +ssize_t result; +#define GET_FILE_LEN_BUF_SZ 500 +char buf[GET_FILE_LEN_BUF_SZ]; +do { +result=read(f,buf,GET_FILE_LEN_BUF_SZ); +if (result==-1)return 0; +total+=result; +} while (result > 0); +return total; +} +STATIC size_t GC_get_maps_len(void) +{ +int f=open("/proc/self/maps",O_RDONLY); +size_t result; +if (f < 0)return 0; +result=GC_get_file_len(f); +close(f); +return result; +} +#endif +GC_INNER char*GC_get_maps(void) +{ +ssize_t result; +static char*maps_buf=NULL; +static size_t maps_buf_sz=1; +size_t maps_size; +#ifdef THREADS +size_t old_maps_size=0; +#endif +GC_ASSERT(I_HOLD_LOCK()); +#ifdef THREADS +maps_size=GC_get_maps_len(); +if (0==maps_size)return 0; +#else +maps_size=4000; +#endif +do { +int f; +while (maps_size>=maps_buf_sz){ +#ifdef LINT2 +GC_noop1((word)maps_buf); +#else +GC_scratch_recycle_no_gww(maps_buf,maps_buf_sz); +#endif +while (maps_size>=maps_buf_sz)maps_buf_sz*=2; +maps_buf=GC_scratch_alloc(maps_buf_sz); +#ifdef THREADS +maps_size=GC_get_maps_len(); +if (0==maps_size)return 0; +#endif +if (maps_buf==0)return 0; +} +GC_ASSERT(maps_buf_sz>=maps_size+1); +f=open("/proc/self/maps",O_RDONLY); +if (-1==f)return 0; +#ifdef THREADS +old_maps_size=maps_size; +#endif +maps_size=0; +do { +result=GC_repeat_read(f,maps_buf,maps_buf_sz-1); +if (result<=0){ +close(f); +return 0; +} +maps_size+=result; +} while ((size_t)result==maps_buf_sz-1); +close(f); +#ifdef THREADS +if (maps_size > old_maps_size){ +WARN("Unexpected asynchronous/proc/self/maps growth" +" (to %" WARN_PRIdPTR " bytes)\n",maps_size); +} +#endif +} while (maps_size>=maps_buf_sz +#ifdef THREADS +||maps_size < old_maps_size +#endif +); +maps_buf[maps_size]='\0'; +return maps_buf; +} +#if (defined(DYNAMIC_LOADING)&&defined(USE_PROC_FOR_LIBRARIES))||defined(IA64)||defined(INCLUDE_LINUX_THREAD_DESCR)||defined(REDIRECT_MALLOC) +GC_INNER char*GC_parse_map_entry(char*buf_ptr,ptr_t*start,ptr_t*end, +char**prot,unsigned int*maj_dev, +char**mapping_name) +{ +unsigned char*start_start,*end_start,*maj_dev_start; +unsigned char*p; +if (buf_ptr==NULL||*buf_ptr=='\0'){ +return NULL; +} +p=(unsigned char*)buf_ptr; +while (isspace(*p))++p; +start_start=p; +GC_ASSERT(isxdigit(*start_start)); +*start=(ptr_t)strtoul((char*)start_start,(char**)&p,16); +GC_ASSERT(*p=='-'); +++p; +end_start=p; +GC_ASSERT(isxdigit(*end_start)); +*end=(ptr_t)strtoul((char*)end_start,(char**)&p,16); +GC_ASSERT(isspace(*p)); +while (isspace(*p))++p; +GC_ASSERT(*p=='r'||*p=='-'); +*prot=(char*)p; +while (!isspace(*p))++p; +while (isspace(*p))p++; +GC_ASSERT(isxdigit(*p)); +while (!isspace(*p))++p; +while (isspace(*p))p++; +maj_dev_start=p; +GC_ASSERT(isxdigit(*maj_dev_start)); +*maj_dev=strtoul((char*)maj_dev_start,NULL,16); +if (mapping_name==0){ +while (*p&&*p++!='\n'); +} else { +while (*p&&*p!='\n'&&*p!='/'&&*p!='[')p++; +*mapping_name=(char*)p; +while (*p&&*p++!='\n'); +} +return (char*)p; +} +#endif +#if defined(IA64)||defined(INCLUDE_LINUX_THREAD_DESCR) +GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr,ptr_t*startp, +ptr_t*endp) +{ +char*prot; +ptr_t my_start,my_end; +unsigned int maj_dev; +char*maps=GC_get_maps(); +char*buf_ptr=maps; +if (0==maps)return(FALSE); +for (;;){ +buf_ptr=GC_parse_map_entry(buf_ptr,&my_start,&my_end, +&prot,&maj_dev,0); +if (buf_ptr==NULL)return FALSE; +if (prot[1]=='w'&&maj_dev==0){ +if ((word)my_end > (word)addr&&(word)my_start<=(word)addr){ +*startp=my_start; +*endp=my_end; +return TRUE; +} +} +} +return FALSE; +} +#endif +#if defined(REDIRECT_MALLOC) +GC_INNER GC_bool GC_text_mapping(char*nm,ptr_t*startp,ptr_t*endp) +{ +size_t nm_len=strlen(nm); +char*prot; +char*map_path; +ptr_t my_start,my_end; +unsigned int maj_dev; +char*maps=GC_get_maps(); +char*buf_ptr=maps; +if (0==maps)return(FALSE); +for (;;){ +buf_ptr=GC_parse_map_entry(buf_ptr,&my_start,&my_end, +&prot,&maj_dev,&map_path); +if (buf_ptr==NULL)return FALSE; +if (prot[0]=='r'&&prot[1]=='-'&&prot[2]=='x'){ +char*p=map_path; +while (*p!='\0'&&*p!='\n'&&*p!=' '&&*p!='\t')++p; +while (*p!='/'&&(word)p>=(word)map_path)--p; +++p; +if (strncmp(nm,p,nm_len)==0){ +*startp=my_start; +*endp=my_end; +return TRUE; +} +} +} +return FALSE; +} +#endif +#ifdef IA64 +static ptr_t backing_store_base_from_proc(void) +{ +ptr_t my_start,my_end; +if (!GC_enclosing_mapping(GC_save_regs_in_stack(),&my_start,&my_end)){ +GC_COND_LOG_PRINTF("Failed to find backing store base from/proc\n"); +return 0; +} +return my_start; +} +#endif +#endif +#if defined(SEARCH_FOR_DATA_START) +#if defined(LINUX)||defined(HURD) +EXTERN_C_BEGIN +#pragma weak __data_start +#pragma weak data_start +extern int __data_start[],data_start[]; +EXTERN_C_END +#endif +ptr_t GC_data_start=NULL; +GC_INNER void GC_init_linux_data_start(void) +{ +ptr_t data_end=DATAEND; +#if (defined(LINUX)||defined(HURD))&&defined(USE_PROG_DATA_START) +if (COVERT_DATAFLOW(__data_start)!=0){ +GC_data_start=(ptr_t)(__data_start); +} else { +GC_data_start=(ptr_t)(data_start); +} +if (COVERT_DATAFLOW(GC_data_start)!=0){ +if ((word)GC_data_start > (word)data_end) +ABORT_ARG2("Wrong __data_start/_end pair", +":%p .. %p",(void*)GC_data_start,(void*)data_end); +return; +} +#ifdef DEBUG_ADD_DEL_ROOTS +GC_log_printf("__data_start not provided\n"); +#endif +#endif +if (GC_no_dls){ +GC_data_start=data_end; +return; +} +GC_data_start=(ptr_t)GC_find_limit(data_end,FALSE); +} +#endif +#ifdef ECOS +#ifndef ECOS_GC_MEMORY_SIZE +#define ECOS_GC_MEMORY_SIZE (448*1024) +#endif +static char ecos_gc_memory[ECOS_GC_MEMORY_SIZE]; +static char*ecos_gc_brk=ecos_gc_memory; +static void*tiny_sbrk(ptrdiff_t increment) +{ +void*p=ecos_gc_brk; +ecos_gc_brk+=increment; +if ((word)ecos_gc_brk > (word)(ecos_gc_memory+sizeof(ecos_gc_memory))){ +ecos_gc_brk-=increment; +return NULL; +} +return p; +} +#define sbrk tiny_sbrk +#endif +#if defined(NETBSD)&&defined(__ELF__) +ptr_t GC_data_start=NULL; +EXTERN_C_BEGIN +extern char**environ; +EXTERN_C_END +GC_INNER void GC_init_netbsd_elf(void) +{ +GC_data_start=(ptr_t)GC_find_limit(&environ,FALSE); +} +#endif +#if defined(ADDRESS_SANITIZER)&&(defined(UNIX_LIKE)||defined(NEED_FIND_LIMIT)||defined(MPROTECT_VDB))&&!defined(CUSTOM_ASAN_DEF_OPTIONS) +GC_API const char*__asan_default_options(void) +{ +return "allow_user_segv_handler=1"; +} +#endif +#ifdef OPENBSD +static struct sigaction old_segv_act; +STATIC JMP_BUF GC_jmp_buf_openbsd; +STATIC void GC_fault_handler_openbsd(int sig GC_ATTR_UNUSED) +{ +LONGJMP(GC_jmp_buf_openbsd,1); +} +#ifdef GC_OPENBSD_UTHREADS +#include +EXTERN_C_BEGIN +extern sigset_t __syscall(quad_t,...); +EXTERN_C_END +STATIC ptr_t GC_find_limit_openbsd(ptr_t p,ptr_t bound) +{ +static volatile ptr_t result; +struct sigaction act; +word pgsz=(word)sysconf(_SC_PAGESIZE); +GC_ASSERT((word)bound>=pgsz); +GC_ASSERT(I_HOLD_LOCK()); +act.sa_handler=GC_fault_handler_openbsd; +sigemptyset(&act.sa_mask); +act.sa_flags=SA_NODEFER|SA_RESTART; +sigaction(SIGSEGV,&act,&old_segv_act); +if (SETJMP(GC_jmp_buf_openbsd)==0){ +result=(ptr_t)((word)p&~(pgsz-1)); +for (;;){ +if ((word)result>=(word)bound - pgsz){ +result=bound; +break; +} +result+=pgsz; +GC_noop1((word)(*result)); +} +} +#ifdef THREADS +__syscall(SYS_sigprocmask,SIG_UNBLOCK,sigmask(SIGPROF)); +#endif +sigaction(SIGSEGV,&old_segv_act,0); +return(result); +} +#endif +static volatile int firstpass; +STATIC ptr_t GC_skip_hole_openbsd(ptr_t p,ptr_t bound) +{ +static volatile ptr_t result; +struct sigaction act; +word pgsz=(word)sysconf(_SC_PAGESIZE); +GC_ASSERT((word)bound>=pgsz); +GC_ASSERT(I_HOLD_LOCK()); +act.sa_handler=GC_fault_handler_openbsd; +sigemptyset(&act.sa_mask); +act.sa_flags=SA_NODEFER|SA_RESTART; +sigaction(SIGSEGV,&act,&old_segv_act); +firstpass=1; +result=(ptr_t)((word)p&~(pgsz-1)); +if (SETJMP(GC_jmp_buf_openbsd)!=0||firstpass){ +firstpass=0; +if ((word)result>=(word)bound - pgsz){ +result=bound; +} else { +result+=pgsz; +GC_noop1((word)(*result)); +} +} +sigaction(SIGSEGV,&old_segv_act,0); +return(result); +} +#endif +#ifdef OS2 +#include +#if!defined(__IBMC__)&&!defined(__WATCOMC__) +struct exe_hdr { +unsigned short magic_number; +unsigned short padding[29]; +long new_exe_offset; +}; +#define E_MAGIC(x)(x).magic_number +#define EMAGIC 0x5A4D +#define E_LFANEW(x)(x).new_exe_offset +struct e32_exe { +unsigned char magic_number[2]; +unsigned char byte_order; +unsigned char word_order; +unsigned long exe_format_level; +unsigned short cpu; +unsigned short os; +unsigned long padding1[13]; +unsigned long object_table_offset; +unsigned long object_count; +unsigned long padding2[31]; +}; +#define E32_MAGIC1(x)(x).magic_number[0] +#define E32MAGIC1 'L' +#define E32_MAGIC2(x)(x).magic_number[1] +#define E32MAGIC2 'X' +#define E32_BORDER(x)(x).byte_order +#define E32LEBO 0 +#define E32_WORDER(x)(x).word_order +#define E32LEWO 0 +#define E32_CPU(x)(x).cpu +#define E32CPU286 1 +#define E32_OBJTAB(x)(x).object_table_offset +#define E32_OBJCNT(x)(x).object_count +struct o32_obj { +unsigned long size; +unsigned long base; +unsigned long flags; +unsigned long pagemap; +unsigned long mapsize; +unsigned long reserved; +}; +#define O32_FLAGS(x)(x).flags +#define OBJREAD 0x0001L +#define OBJWRITE 0x0002L +#define OBJINVALID 0x0080L +#define O32_SIZE(x)(x).size +#define O32_BASE(x)(x).base +#else +#ifndef WORD +#define WORD unsigned short +#endif +#ifndef DWORD +#define DWORD unsigned long +#endif +#define EXE386 1 +#include +#include +#endif +#define INCL_DOSEXCEPTIONS +#define INCL_DOSPROCESS +#define INCL_DOSERRORS +#define INCL_DOSMODULEMGR +#define INCL_DOSMEMMGR +#include +#endif +GC_INNER size_t GC_page_size=0; +#if defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32) +#ifndef VER_PLATFORM_WIN32_CE +#define VER_PLATFORM_WIN32_CE 3 +#endif +#if defined(MSWINCE)&&defined(THREADS) +GC_INNER GC_bool GC_dont_query_stack_min=FALSE; +#endif +GC_INNER SYSTEM_INFO GC_sysinfo; +GC_INNER void GC_setpagesize(void) +{ +GetSystemInfo(&GC_sysinfo); +#if defined(CYGWIN32)&&(defined(MPROTECT_VDB)||defined(USE_MUNMAP)) +GC_page_size=(size_t)GC_sysinfo.dwAllocationGranularity; +GC_ASSERT(GC_page_size>=(size_t)GC_sysinfo.dwPageSize); +#else +GC_page_size=(size_t)GC_sysinfo.dwPageSize; +#endif +#if defined(MSWINCE)&&!defined(_WIN32_WCE_EMULATION) +{ +OSVERSIONINFO verInfo; +verInfo.dwOSVersionInfoSize=sizeof(OSVERSIONINFO); +if (!GetVersionEx(&verInfo)) +ABORT("GetVersionEx failed"); +if (verInfo.dwPlatformId==VER_PLATFORM_WIN32_CE&& +verInfo.dwMajorVersion < 6){ +GC_sysinfo.lpMaximumApplicationAddress=(LPVOID)((word)32<<20); +#ifdef THREADS +if (verInfo.dwMajorVersion < 5) +GC_dont_query_stack_min=TRUE; +#endif +} +} +#endif +} +#ifndef CYGWIN32 +#define is_writable(prot)((prot)==PAGE_READWRITE||(prot)==PAGE_WRITECOPY||(prot)==PAGE_EXECUTE_READWRITE||(prot)==PAGE_EXECUTE_WRITECOPY) +STATIC word GC_get_writable_length(ptr_t p,ptr_t*base) +{ +MEMORY_BASIC_INFORMATION buf; +word result; +word protect; +result=VirtualQuery(p,&buf,sizeof(buf)); +if (result!=sizeof(buf))ABORT("Weird VirtualQuery result"); +if (base!=0)*base=(ptr_t)(buf.AllocationBase); +protect=(buf.Protect&~(PAGE_GUARD|PAGE_NOCACHE)); +if (!is_writable(protect)){ +return(0); +} +if (buf.State!=MEM_COMMIT)return(0); +return(buf.RegionSize); +} +GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base*sb) +{ +ptr_t trunc_sp; +word size; +if (!GC_page_size)GC_setpagesize(); +trunc_sp=(ptr_t)((word)GC_approx_sp()&~(GC_page_size - 1)); +size=GC_get_writable_length(trunc_sp,0); +GC_ASSERT(size!=0); +sb->mem_base=trunc_sp+size; +return GC_SUCCESS; +} +#else +GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base*sb) +{ +#ifdef X86_64 +sb->mem_base=((NT_TIB*)NtCurrentTeb())->StackBase; +#else +void*_tlsbase; +__asm__ ("movl %%fs:4,%0" +:"=r" (_tlsbase)); +sb->mem_base=_tlsbase; +#endif +return GC_SUCCESS; +} +#endif +#define HAVE_GET_STACK_BASE +#else +GC_INNER void GC_setpagesize(void) +{ +#if defined(MPROTECT_VDB)||defined(PROC_VDB)||defined(USE_MMAP) +GC_page_size=(size_t)GETPAGESIZE(); +#if!defined(CPPCHECK) +if (0==GC_page_size) +ABORT("getpagesize failed"); +#endif +#else +GC_page_size=HBLKSIZE; +#endif +} +#endif +#ifdef HAIKU +#include +GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base*sb) +{ +thread_info th; +get_thread_info(find_thread(NULL),&th); +sb->mem_base=th.stack_end; +return GC_SUCCESS; +} +#define HAVE_GET_STACK_BASE +#endif +#ifdef OS2 +GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base*sb) +{ +PTIB ptib; +PPIB ppib; +if (DosGetInfoBlocks(&ptib,&ppib)!=NO_ERROR){ +WARN("DosGetInfoBlocks failed\n",0); +return GC_UNIMPLEMENTED; +} +sb->mem_base=ptib->tib_pstacklimit; +return GC_SUCCESS; +} +#define HAVE_GET_STACK_BASE +#endif +#ifdef AMIGA +#define GC_AMIGA_SB +#undef GC_AMIGA_SB +#define GET_MAIN_STACKBASE_SPECIAL +#endif +#if defined(NEED_FIND_LIMIT)||defined(UNIX_LIKE) +typedef void (*GC_fault_handler_t)(int); +#if defined(SUNOS5SIGS)||defined(IRIX5)||defined(OSF1)||defined(HAIKU)||defined(HURD)||defined(FREEBSD)||defined(NETBSD) +static struct sigaction old_segv_act; +#if defined(_sigargs)||defined(HURD)||defined(NETBSD)||defined(FREEBSD) +static struct sigaction old_bus_act; +#endif +#else +static GC_fault_handler_t old_segv_handler; +#ifdef HAVE_SIGBUS +static GC_fault_handler_t old_bus_handler; +#endif +#endif +GC_INNER void GC_set_and_save_fault_handler(GC_fault_handler_t h) +{ +#if defined(SUNOS5SIGS)||defined(IRIX5)||defined(OSF1)||defined(HAIKU)||defined(HURD)||defined(FREEBSD)||defined(NETBSD)||defined(OPENBSD) +struct sigaction act; +act.sa_handler=h; +#ifdef SIGACTION_FLAGS_NODEFER_HACK +act.sa_flags=SA_RESTART|SA_NODEFER; +#else +act.sa_flags=SA_RESTART; +#endif +(void)sigemptyset(&act.sa_mask); +#ifdef GC_IRIX_THREADS +(void)sigaction(SIGSEGV,0,&old_segv_act); +(void)sigaction(SIGSEGV,&act,0); +#else +(void)sigaction(SIGSEGV,&act,&old_segv_act); +#if defined(IRIX5)&&defined(_sigargs)||defined(HURD)||defined(NETBSD)||defined(FREEBSD) +(void)sigaction(SIGBUS,&act,&old_bus_act); +#endif +#endif +#else +old_segv_handler=signal(SIGSEGV,h); +#ifdef HAVE_SIGBUS +old_bus_handler=signal(SIGBUS,h); +#endif +#endif +#if defined(CPPCHECK)&&defined(ADDRESS_SANITIZER) +GC_noop1((word)&__asan_default_options); +#endif +} +#endif +#if defined(NEED_FIND_LIMIT)||(defined(USE_PROC_FOR_LIBRARIES)&&defined(THREADS)) +#define MIN_PAGE_SIZE 256 +GC_INNER JMP_BUF GC_jmp_buf; +STATIC void GC_fault_handler(int sig GC_ATTR_UNUSED) +{ +LONGJMP(GC_jmp_buf,1); +} +GC_INNER void GC_setup_temporary_fault_handler(void) +{ +GC_ASSERT(I_HOLD_LOCK()); +GC_set_and_save_fault_handler(GC_fault_handler); +} +GC_INNER void GC_reset_fault_handler(void) +{ +#if defined(SUNOS5SIGS)||defined(IRIX5)||defined(OSF1)||defined(HAIKU)||defined(HURD)||defined(FREEBSD)||defined(NETBSD)||defined(OPENBSD) +(void)sigaction(SIGSEGV,&old_segv_act,0); +#if defined(IRIX5)&&defined(_sigargs)||defined(HURD)||defined(NETBSD) +(void)sigaction(SIGBUS,&old_bus_act,0); +#endif +#else +(void)signal(SIGSEGV,old_segv_handler); +#ifdef HAVE_SIGBUS +(void)signal(SIGBUS,old_bus_handler); +#endif +#endif +} +GC_ATTR_NO_SANITIZE_ADDR +STATIC ptr_t GC_find_limit_with_bound(ptr_t p,GC_bool up,ptr_t bound) +{ +static volatile ptr_t result; +GC_ASSERT(up?(word)bound>=MIN_PAGE_SIZE +:(word)bound<=~(word)MIN_PAGE_SIZE); +GC_ASSERT(I_HOLD_LOCK()); +GC_setup_temporary_fault_handler(); +if (SETJMP(GC_jmp_buf)==0){ +result=(ptr_t)(((word)(p)) +&~(MIN_PAGE_SIZE-1)); +for (;;){ +if (up){ +if ((word)result>=(word)bound - MIN_PAGE_SIZE){ +result=bound; +break; +} +result+=MIN_PAGE_SIZE; +} else { +if ((word)result<=(word)bound+MIN_PAGE_SIZE){ +result=bound - MIN_PAGE_SIZE; +break; +} +result-=MIN_PAGE_SIZE; +} +GC_noop1((word)(*result)); +} +} +GC_reset_fault_handler(); +if (!up){ +result+=MIN_PAGE_SIZE; +} +return(result); +} +void*GC_find_limit(void*p,int up) +{ +return GC_find_limit_with_bound((ptr_t)p,(GC_bool)up, +up?(ptr_t)GC_WORD_MAX:0); +} +#endif +#ifdef HPUX_MAIN_STACKBOTTOM +#include +#include +STATIC ptr_t GC_hpux_main_stack_base(void) +{ +struct pst_vm_status vm_status; +int i=0; +while (pstat_getprocvm(&vm_status,sizeof(vm_status),0,i++)==1){ +if (vm_status.pst_type==PS_STACK) +return (ptr_t)vm_status.pst_vaddr; +} +#ifdef STACK_GROWS_UP +return (ptr_t)GC_find_limit(GC_approx_sp(),FALSE); +#else +return (ptr_t)GC_find_limit(GC_approx_sp(),TRUE); +#endif +} +#endif +#ifdef HPUX_STACKBOTTOM +#include +#include +GC_INNER ptr_t GC_get_register_stack_base(void) +{ +struct pst_vm_status vm_status; +int i=0; +while (pstat_getprocvm(&vm_status,sizeof(vm_status),0,i++)==1){ +if (vm_status.pst_type==PS_RSESTACK){ +return (ptr_t)vm_status.pst_vaddr; +} +} +return (ptr_t)(((word)GC_stackbottom - BACKING_STORE_DISPLACEMENT - 1) +&~(BACKING_STORE_ALIGNMENT - 1)); +} +#endif +#ifdef LINUX_STACKBOTTOM +#include +#include +#define STAT_SKIP 27 +#ifdef USE_LIBC_PRIVATES +EXTERN_C_BEGIN +#pragma weak __libc_stack_end +extern ptr_t __libc_stack_end; +#ifdef IA64 +#pragma weak __libc_ia64_register_backing_store_base +extern ptr_t __libc_ia64_register_backing_store_base; +#endif +EXTERN_C_END +#endif +#ifdef IA64 +GC_INNER ptr_t GC_get_register_stack_base(void) +{ +ptr_t result; +#ifdef USE_LIBC_PRIVATES +if (0!=&__libc_ia64_register_backing_store_base +&&0!=__libc_ia64_register_backing_store_base){ +return __libc_ia64_register_backing_store_base; +} +#endif +result=backing_store_base_from_proc(); +if (0==result){ +result=(ptr_t)GC_find_limit(GC_save_regs_in_stack(),FALSE); +} +return result; +} +#endif +STATIC ptr_t GC_linux_main_stack_base(void) +{ +#ifndef STAT_READ +#define STAT_BUF_SIZE 4096 +#define STAT_READ read +#endif +char stat_buf[STAT_BUF_SIZE]; +int f; +word result; +int i,buf_offset=0,len; +#ifdef USE_LIBC_PRIVATES +if (0!=&__libc_stack_end&&0!=__libc_stack_end){ +#if defined(IA64) +if (((word)__libc_stack_end&0xfff)+0x10 < 0x1000){ +return __libc_stack_end+0x10; +} +#elif defined(SPARC) +if (__libc_stack_end!=(ptr_t)(unsigned long)0x1) +return __libc_stack_end; +#else +return __libc_stack_end; +#endif +} +#endif +f=open("/proc/self/stat",O_RDONLY); +if (f < 0) +ABORT("Couldn't read/proc/self/stat"); +len=STAT_READ(f,stat_buf,STAT_BUF_SIZE); +close(f); +for (i=0;i < STAT_SKIP;++i){ +while (buf_offset < len&&isspace(stat_buf[buf_offset++])){ +} +while (buf_offset < len&&!isspace(stat_buf[buf_offset++])){ +} +} +while (buf_offset < len&&isspace(stat_buf[buf_offset])){ +buf_offset++; +} +for (i=0;buf_offset+i < len;i++){ +if (!isdigit(stat_buf[buf_offset+i]))break; +} +if (buf_offset+i>=len)ABORT("Could not parse/proc/self/stat"); +stat_buf[buf_offset+i]='\0'; +result=(word)STRTOULL(&stat_buf[buf_offset],NULL,10); +if (result < 0x100000||(result&(sizeof(word)- 1))!=0) +ABORT("Absurd stack bottom value"); +return (ptr_t)result; +} +#endif +#ifdef FREEBSD_STACKBOTTOM +#include +#include +#include +STATIC ptr_t GC_freebsd_main_stack_base(void) +{ +int nm[2]={CTL_KERN,KERN_USRSTACK}; +ptr_t base; +size_t len=sizeof(ptr_t); +int r=sysctl(nm,2,&base,&len,NULL,0); +if (r)ABORT("Error getting main stack base"); +return base; +} +#endif +#if defined(ECOS)||defined(NOSYS) +ptr_t GC_get_main_stack_base(void) +{ +return STACKBOTTOM; +} +#define GET_MAIN_STACKBASE_SPECIAL +#elif defined(SYMBIAN) +EXTERN_C_BEGIN +extern int GC_get_main_symbian_stack_base(void); +EXTERN_C_END +ptr_t GC_get_main_stack_base(void) +{ +return (ptr_t)GC_get_main_symbian_stack_base(); +} +#define GET_MAIN_STACKBASE_SPECIAL +#elif defined(__EMSCRIPTEN__) +#include +static void*emscripten_stack_base; +static void scan_stack_cb(void*begin,void*end) +{ +(void)begin; +emscripten_stack_base=end; +} +ptr_t GC_get_main_stack_base(void) +{ +emscripten_scan_stack(scan_stack_cb); +return (ptr_t)emscripten_stack_base; +} +#define GET_MAIN_STACKBASE_SPECIAL +#elif!defined(AMIGA)&&!defined(HAIKU)&&!defined(OS2)&&!defined(MSWIN32)&&!defined(MSWINCE)&&!defined(CYGWIN32)&&!defined(GC_OPENBSD_THREADS)&&(!defined(GC_SOLARIS_THREADS)||defined(_STRICT_STDC)) +#if (defined(HAVE_PTHREAD_ATTR_GET_NP)||defined(HAVE_PTHREAD_GETATTR_NP))&&(defined(THREADS)||defined(USE_GET_STACKBASE_FOR_MAIN)) +#include +#ifdef HAVE_PTHREAD_NP_H +#include +#endif +#elif defined(DARWIN)&&!defined(NO_PTHREAD_GET_STACKADDR_NP) +#include +#undef STACKBOTTOM +#define STACKBOTTOM (ptr_t)pthread_get_stackaddr_np(pthread_self()) +#endif +ptr_t GC_get_main_stack_base(void) +{ +ptr_t result; +#if (defined(HAVE_PTHREAD_ATTR_GET_NP)||defined(HAVE_PTHREAD_GETATTR_NP))&&(defined(USE_GET_STACKBASE_FOR_MAIN)||(defined(THREADS)&&!defined(REDIRECT_MALLOC))) +pthread_attr_t attr; +void*stackaddr; +size_t size; +#ifdef HAVE_PTHREAD_ATTR_GET_NP +if (pthread_attr_init(&attr)==0 +&&(pthread_attr_get_np(pthread_self(),&attr)==0 +?TRUE:(pthread_attr_destroy(&attr),FALSE))) +#else +if (pthread_getattr_np(pthread_self(),&attr)==0) +#endif +{ +if (pthread_attr_getstack(&attr,&stackaddr,&size)==0 +&&stackaddr!=NULL){ +(void)pthread_attr_destroy(&attr); +#ifdef STACK_GROWS_DOWN +stackaddr=(char*)stackaddr+size; +#endif +return (ptr_t)stackaddr; +} +(void)pthread_attr_destroy(&attr); +} +WARN("pthread_getattr_np or pthread_attr_getstack failed" +" for main thread\n",0); +#endif +#ifdef STACKBOTTOM +result=STACKBOTTOM; +#else +#ifdef HEURISTIC1 +#define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1) +#ifdef STACK_GROWS_DOWN +result=(ptr_t)(((word)GC_approx_sp()+STACKBOTTOM_ALIGNMENT_M1) +&~STACKBOTTOM_ALIGNMENT_M1); +#else +result=(ptr_t)((word)GC_approx_sp()&~STACKBOTTOM_ALIGNMENT_M1); +#endif +#elif defined(HPUX_MAIN_STACKBOTTOM) +result=GC_hpux_main_stack_base(); +#elif defined(LINUX_STACKBOTTOM) +result=GC_linux_main_stack_base(); +#elif defined(FREEBSD_STACKBOTTOM) +result=GC_freebsd_main_stack_base(); +#elif defined(HEURISTIC2) +{ +ptr_t sp=GC_approx_sp(); +#ifdef STACK_GROWS_DOWN +result=(ptr_t)GC_find_limit(sp,TRUE); +#if defined(HEURISTIC2_LIMIT)&&!defined(CPPCHECK) +if ((word)result > (word)HEURISTIC2_LIMIT +&&(word)sp < (word)HEURISTIC2_LIMIT){ +result=HEURISTIC2_LIMIT; +} +#endif +#else +result=(ptr_t)GC_find_limit(sp,FALSE); +#if defined(HEURISTIC2_LIMIT)&&!defined(CPPCHECK) +if ((word)result < (word)HEURISTIC2_LIMIT +&&(word)sp > (word)HEURISTIC2_LIMIT){ +result=HEURISTIC2_LIMIT; +} +#endif +#endif +} +#elif defined(STACK_NOT_SCANNED)||defined(CPPCHECK) +result=NULL; +#else +#error None of HEURISTIC*and*STACKBOTTOM defined! +#endif +#if defined(STACK_GROWS_DOWN)&&!defined(CPPCHECK) +if (result==0) +result=(ptr_t)(signed_word)(-sizeof(ptr_t)); +#endif +#endif +#if!defined(CPPCHECK) +GC_ASSERT((word)GC_approx_sp()HOTTER_THAN (word)result); +#endif +return(result); +} +#define GET_MAIN_STACKBASE_SPECIAL +#endif +#if (defined(HAVE_PTHREAD_ATTR_GET_NP)||defined(HAVE_PTHREAD_GETATTR_NP))&&defined(THREADS)&&!defined(HAVE_GET_STACK_BASE) +#include +#ifdef HAVE_PTHREAD_NP_H +#include +#endif +GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base*b) +{ +pthread_attr_t attr; +size_t size; +#ifdef IA64 +DCL_LOCK_STATE; +#endif +#ifdef HAVE_PTHREAD_ATTR_GET_NP +if (pthread_attr_init(&attr)!=0) +ABORT("pthread_attr_init failed"); +if (pthread_attr_get_np(pthread_self(),&attr)!=0){ +WARN("pthread_attr_get_np failed\n",0); +(void)pthread_attr_destroy(&attr); +return GC_UNIMPLEMENTED; +} +#else +if (pthread_getattr_np(pthread_self(),&attr)!=0){ +WARN("pthread_getattr_np failed\n",0); +return GC_UNIMPLEMENTED; +} +#endif +if (pthread_attr_getstack(&attr,&(b->mem_base),&size)!=0){ +ABORT("pthread_attr_getstack failed"); +} +(void)pthread_attr_destroy(&attr); +#ifdef STACK_GROWS_DOWN +b->mem_base=(char*)(b->mem_base)+size; +#endif +#ifdef IA64 +LOCK(); +{ +IF_CANCEL(int cancel_state;) +ptr_t bsp; +ptr_t next_stack; +DISABLE_CANCEL(cancel_state); +bsp=GC_save_regs_in_stack(); +next_stack=GC_greatest_stack_base_below(bsp); +if (0==next_stack){ +b->reg_base=GC_find_limit(bsp,FALSE); +} else { +b->reg_base=GC_find_limit_with_bound(bsp,FALSE,next_stack); +} +RESTORE_CANCEL(cancel_state); +} +UNLOCK(); +#endif +return GC_SUCCESS; +} +#define HAVE_GET_STACK_BASE +#endif +#if defined(GC_DARWIN_THREADS)&&!defined(NO_PTHREAD_GET_STACKADDR_NP) +#include +GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base*b) +{ +b->mem_base=pthread_get_stackaddr_np(pthread_self()); +GC_ASSERT((word)GC_approx_sp()HOTTER_THAN (word)b->mem_base); +return GC_SUCCESS; +} +#define HAVE_GET_STACK_BASE +#endif +#ifdef GC_OPENBSD_THREADS +#include +#include +#include +GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base*sb) +{ +stack_t stack; +if (pthread_stackseg_np(pthread_self(),&stack)) +ABORT("pthread_stackseg_np(self)failed"); +sb->mem_base=stack.ss_sp; +return GC_SUCCESS; +} +#define HAVE_GET_STACK_BASE +#endif +#if defined(GC_SOLARIS_THREADS)&&!defined(_STRICT_STDC) +#include +#include +#include +static pthread_t stackbase_main_self=0; +static void*stackbase_main_ss_sp=NULL; +GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base*b) +{ +stack_t s; +pthread_t self=pthread_self(); +if (self==stackbase_main_self) +{ +b->mem_base=stackbase_main_ss_sp; +GC_ASSERT(b->mem_base!=NULL); +return GC_SUCCESS; +} +if (thr_stksegment(&s)){ +ABORT("thr_stksegment failed"); +} +GC_ASSERT((word)GC_approx_sp()HOTTER_THAN (word)s.ss_sp); +if (!stackbase_main_self&&thr_main()!=0) +{ +stackbase_main_ss_sp=s.ss_sp; +stackbase_main_self=self; +} +b->mem_base=s.ss_sp; +return GC_SUCCESS; +} +#define HAVE_GET_STACK_BASE +#endif +#ifdef GC_RTEMS_PTHREADS +GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base*sb) +{ +sb->mem_base=rtems_get_stack_bottom(); +return GC_SUCCESS; +} +#define HAVE_GET_STACK_BASE +#endif +#ifndef HAVE_GET_STACK_BASE +#ifdef NEED_FIND_LIMIT +GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base*b) +{ +IF_CANCEL(int cancel_state;) +DCL_LOCK_STATE; +LOCK(); +DISABLE_CANCEL(cancel_state); +#ifdef STACK_GROWS_DOWN +b->mem_base=GC_find_limit(GC_approx_sp(),TRUE); +#ifdef IA64 +b->reg_base=GC_find_limit(GC_save_regs_in_stack(),FALSE); +#endif +#else +b->mem_base=GC_find_limit(GC_approx_sp(),FALSE); +#endif +RESTORE_CANCEL(cancel_state); +UNLOCK(); +return GC_SUCCESS; +} +#else +GC_API int GC_CALL GC_get_stack_base( +struct GC_stack_base*b GC_ATTR_UNUSED) +{ +#if defined(GET_MAIN_STACKBASE_SPECIAL)&&!defined(THREADS)&&!defined(IA64) +b->mem_base=GC_get_main_stack_base(); +return GC_SUCCESS; +#else +return GC_UNIMPLEMENTED; +#endif +} +#endif +#endif +#ifndef GET_MAIN_STACKBASE_SPECIAL +ptr_t GC_get_main_stack_base(void) +{ +struct GC_stack_base sb; +if (GC_get_stack_base(&sb)!=GC_SUCCESS) +ABORT("GC_get_stack_base failed"); +GC_ASSERT((word)GC_approx_sp()HOTTER_THAN (word)sb.mem_base); +return (ptr_t)sb.mem_base; +} +#endif +#ifdef OS2 +void GC_register_data_segments(void) +{ +PTIB ptib; +PPIB ppib; +HMODULE module_handle; +#define PBUFSIZ 512 +UCHAR path[PBUFSIZ]; +FILE*myexefile; +struct exe_hdr hdrdos; +struct e32_exe hdr386; +struct o32_obj seg; +int nsegs; +#if defined(CPPCHECK) +hdrdos.padding[0]=0; +hdr386.exe_format_level=0; +hdr386.os=0; +hdr386.padding1[0]=0; +hdr386.padding2[0]=0; +seg.pagemap=0; +seg.mapsize=0; +seg.reserved=0; +#endif +if (DosGetInfoBlocks(&ptib,&ppib)!=NO_ERROR){ +ABORT("DosGetInfoBlocks failed"); +} +module_handle=ppib->pib_hmte; +if (DosQueryModuleName(module_handle,PBUFSIZ,path)!=NO_ERROR){ +ABORT("DosQueryModuleName failed"); +} +myexefile=fopen(path,"rb"); +if (myexefile==0){ +ABORT_ARG1("Failed to open executable",":%s",path); +} +if (fread((char*)(&hdrdos),1,sizeof(hdrdos),myexefile) +< sizeof(hdrdos)){ +ABORT_ARG1("Could not read MSDOS header"," from:%s",path); +} +if (E_MAGIC(hdrdos)!=EMAGIC){ +ABORT_ARG1("Bad DOS magic number"," in file:%s",path); +} +if (fseek(myexefile,E_LFANEW(hdrdos),SEEK_SET)!=0){ +ABORT_ARG1("Bad DOS magic number"," in file:%s",path); +} +if (fread((char*)(&hdr386),1,sizeof(hdr386),myexefile) +< sizeof(hdr386)){ +ABORT_ARG1("Could not read OS/2 header"," from:%s",path); +} +if (E32_MAGIC1(hdr386)!=E32MAGIC1||E32_MAGIC2(hdr386)!=E32MAGIC2){ +ABORT_ARG1("Bad OS/2 magic number"," in file:%s",path); +} +if (E32_BORDER(hdr386)!=E32LEBO||E32_WORDER(hdr386)!=E32LEWO){ +ABORT_ARG1("Bad byte order in executable"," file:%s",path); +} +if (E32_CPU(hdr386)==E32CPU286){ +ABORT_ARG1("GC cannot handle 80286 executables",":%s",path); +} +if (fseek(myexefile,E_LFANEW(hdrdos)+E32_OBJTAB(hdr386), +SEEK_SET)!=0){ +ABORT_ARG1("Seek to object table failed"," in file:%s",path); +} +for (nsegs=E32_OBJCNT(hdr386);nsegs > 0;nsegs--){ +int flags; +if (fread((char*)(&seg),1,sizeof(seg),myexefile)< sizeof(seg)){ +ABORT_ARG1("Could not read obj table entry"," from file:%s",path); +} +flags=O32_FLAGS(seg); +if (!(flags&OBJWRITE))continue; +if (!(flags&OBJREAD))continue; +if (flags&OBJINVALID){ +GC_err_printf("Object with invalid pages?\n"); +continue; +} +GC_add_roots_inner((ptr_t)O32_BASE(seg), +(ptr_t)(O32_BASE(seg)+O32_SIZE(seg)),FALSE); +} +(void)fclose(myexefile); +} +#else +#if defined(GWW_VDB) +#ifndef MEM_WRITE_WATCH +#define MEM_WRITE_WATCH 0x200000 +#endif +#ifndef WRITE_WATCH_FLAG_RESET +#define WRITE_WATCH_FLAG_RESET 1 +#endif +#define GC_ULONG_PTR word +typedef UINT (WINAPI*GetWriteWatch_type)( +DWORD,PVOID,GC_ULONG_PTR, +PVOID*,GC_ULONG_PTR*,PULONG); +static FARPROC GetWriteWatch_func; +static DWORD GetWriteWatch_alloc_flag; +#define GC_GWW_AVAILABLE()(GetWriteWatch_func!=0) +static void detect_GetWriteWatch(void) +{ +static GC_bool done; +HMODULE hK32; +if (done) +return; +#if defined(MPROTECT_VDB) +{ +char*str=GETENV("GC_USE_GETWRITEWATCH"); +#if defined(GC_PREFER_MPROTECT_VDB) +if (str==NULL||(*str=='0'&&*(str+1)=='\0')){ +done=TRUE; +return; +} +#else +if (str!=NULL&&*str=='0'&&*(str+1)=='\0'){ +done=TRUE; +return; +} +#endif +} +#endif +#ifdef MSWINRT_FLAVOR +{ +MEMORY_BASIC_INFORMATION memInfo; +SIZE_T result=VirtualQuery(GetProcAddress, +&memInfo,sizeof(memInfo)); +if (result!=sizeof(memInfo)) +ABORT("Weird VirtualQuery result"); +hK32=(HMODULE)memInfo.AllocationBase; +} +#else +hK32=GetModuleHandle(TEXT("kernel32.dll")); +#endif +if (hK32!=(HMODULE)0&& +(GetWriteWatch_func=GetProcAddress(hK32,"GetWriteWatch"))!=0){ +void*page; +GC_ASSERT(GC_page_size!=0); +page=VirtualAlloc(NULL,GC_page_size,MEM_WRITE_WATCH|MEM_RESERVE, +PAGE_READWRITE); +if (page!=NULL){ +PVOID pages[16]; +GC_ULONG_PTR count=16; +DWORD page_size; +if ((*(GetWriteWatch_type)(word)GetWriteWatch_func)( +WRITE_WATCH_FLAG_RESET,page, +GC_page_size,pages,&count, +&page_size)!=0){ +GetWriteWatch_func=0; +} else { +GetWriteWatch_alloc_flag=MEM_WRITE_WATCH; +} +VirtualFree(page,0,MEM_RELEASE); +} else { +GetWriteWatch_func=0; +} +} +#ifndef SMALL_CONFIG +if (!GetWriteWatch_func){ +GC_COND_LOG_PRINTF("Did not find a usable GetWriteWatch()\n"); +} else { +GC_COND_LOG_PRINTF("Using GetWriteWatch()\n"); +} +#endif +done=TRUE; +} +#else +#define GetWriteWatch_alloc_flag 0 +#endif +#if defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32) +#ifdef MSWIN32 +GC_INNER GC_bool GC_no_win32_dlls=FALSE; +GC_INNER GC_bool GC_wnt=FALSE; +GC_INNER void GC_init_win32(void) +{ +#if defined(_WIN64)||(defined(_MSC_VER)&&_MSC_VER>=1800) +GC_wnt=TRUE; +#else +DWORD v=GetVersion(); +GC_wnt=!(v&0x80000000); +GC_no_win32_dlls|=((!GC_wnt)&&(v&0xff)<=3); +#endif +#ifdef USE_MUNMAP +if (GC_no_win32_dlls){ +GC_unmap_threshold=0; +} +#endif +} +STATIC ptr_t GC_least_described_address(ptr_t start) +{ +MEMORY_BASIC_INFORMATION buf; +LPVOID limit=GC_sysinfo.lpMinimumApplicationAddress; +ptr_t p=(ptr_t)((word)start&~(GC_page_size - 1)); +GC_ASSERT(GC_page_size!=0); +for (;;){ +size_t result; +LPVOID q=(LPVOID)(p - GC_page_size); +if ((word)q > (word)p||(word)q < (word)limit)break; +result=VirtualQuery(q,&buf,sizeof(buf)); +if (result!=sizeof(buf)||buf.AllocationBase==0)break; +p=(ptr_t)(buf.AllocationBase); +} +return p; +} +#endif +#if defined(USE_WINALLOC)&&!defined(REDIRECT_MALLOC) +STATIC size_t GC_max_root_size=100000; +STATIC struct GC_malloc_heap_list { +void*allocation_base; +struct GC_malloc_heap_list*next; +}*GC_malloc_heap_l=0; +STATIC GC_bool GC_is_malloc_heap_base(void*p) +{ +struct GC_malloc_heap_list*q=GC_malloc_heap_l; +while (0!=q){ +if (q->allocation_base==p)return TRUE; +q=q->next; +} +return FALSE; +} +STATIC void*GC_get_allocation_base(void*p) +{ +MEMORY_BASIC_INFORMATION buf; +size_t result=VirtualQuery(p,&buf,sizeof(buf)); +if (result!=sizeof(buf)){ +ABORT("Weird VirtualQuery result"); +} +return buf.AllocationBase; +} +GC_INNER void GC_add_current_malloc_heap(void) +{ +struct GC_malloc_heap_list*new_l=(struct GC_malloc_heap_list*) +malloc(sizeof(struct GC_malloc_heap_list)); +void*candidate; +if (NULL==new_l)return; +candidate=GC_get_allocation_base(new_l); +if (GC_is_malloc_heap_base(candidate)){ +size_t req_size=10000; +do { +void*p=malloc(req_size); +if (0==p){ +free(new_l); +return; +} +candidate=GC_get_allocation_base(p); +free(p); +req_size*=2; +} while (GC_is_malloc_heap_base(candidate) +&&req_size < GC_max_root_size/10&&req_size < 500000); +if (GC_is_malloc_heap_base(candidate)){ +free(new_l); +return; +} +} +GC_COND_LOG_PRINTF("Found new system malloc AllocationBase at %p\n", +candidate); +new_l->allocation_base=candidate; +new_l->next=GC_malloc_heap_l; +GC_malloc_heap_l=new_l; +} +STATIC void GC_free_malloc_heap_list(void) +{ +struct GC_malloc_heap_list*q=GC_malloc_heap_l; +GC_malloc_heap_l=NULL; +while (q!=NULL){ +struct GC_malloc_heap_list*next=q->next; +free(q); +q=next; +} +} +#endif +GC_INNER GC_bool GC_is_heap_base(void*p) +{ +int i; +#if defined(USE_WINALLOC)&&!defined(REDIRECT_MALLOC) +if (GC_root_size > GC_max_root_size) +GC_max_root_size=GC_root_size; +if (GC_is_malloc_heap_base(p)) +return TRUE; +#endif +for (i=0;i < (int)GC_n_heap_bases;i++){ +if (GC_heap_bases[i]==p)return TRUE; +} +return FALSE; +} +#ifdef MSWIN32 +STATIC void GC_register_root_section(ptr_t static_root) +{ +MEMORY_BASIC_INFORMATION buf; +LPVOID p; +char*base; +char*limit; +if (!GC_no_win32_dlls)return; +p=base=limit=GC_least_described_address(static_root); +while ((word)p < (word)GC_sysinfo.lpMaximumApplicationAddress){ +size_t result=VirtualQuery(p,&buf,sizeof(buf)); +char*new_limit; +DWORD protect; +if (result!=sizeof(buf)||buf.AllocationBase==0 +||GC_is_heap_base(buf.AllocationBase))break; +new_limit=(char*)p+buf.RegionSize; +protect=buf.Protect; +if (buf.State==MEM_COMMIT +&&is_writable(protect)){ +if ((char*)p==limit){ +limit=new_limit; +} else { +if (base!=limit)GC_add_roots_inner(base,limit,FALSE); +base=(char*)p; +limit=new_limit; +} +} +if ((word)p > (word)new_limit)break; +p=(LPVOID)new_limit; +} +if (base!=limit)GC_add_roots_inner(base,limit,FALSE); +} +#endif +void GC_register_data_segments(void) +{ +#ifdef MSWIN32 +GC_register_root_section((ptr_t)&GC_pages_executable); +#endif +} +#else +#if (defined(SVR4)||defined(AIX)||defined(DGUX)||(defined(LINUX)&&defined(SPARC)))&&!defined(PCR) +ptr_t GC_SysVGetDataStart(size_t max_page_size,ptr_t etext_addr) +{ +word text_end=((word)(etext_addr)+sizeof(word)- 1) +&~(word)(sizeof(word)- 1); +word next_page=((text_end+(word)max_page_size - 1) +&~((word)max_page_size - 1)); +word page_offset=(text_end&((word)max_page_size - 1)); +volatile ptr_t result=(char*)(next_page+page_offset); +GC_setup_temporary_fault_handler(); +if (SETJMP(GC_jmp_buf)==0){ +#ifdef AO_HAVE_fetch_and_add +volatile AO_t zero=0; +(void)AO_fetch_and_add((volatile AO_t*)result,zero); +#else +char v=*result; +#if defined(CPPCHECK) +GC_noop1((word)&v); +#endif +*result=v; +#endif +GC_reset_fault_handler(); +} else { +GC_reset_fault_handler(); +result=(char*)GC_find_limit(DATAEND,FALSE); +} +return ( ptr_t)result; +} +#endif +#ifdef DATASTART_USES_BSDGETDATASTART +GC_INNER ptr_t GC_FreeBSDGetDataStart(size_t max_page_size, +ptr_t etext_addr) +{ +word text_end=((word)(etext_addr)+sizeof(word)- 1) +&~(word)(sizeof(word)- 1); +volatile word next_page=(text_end+(word)max_page_size - 1) +&~((word)max_page_size - 1); +volatile ptr_t result=(ptr_t)text_end; +GC_setup_temporary_fault_handler(); +if (SETJMP(GC_jmp_buf)==0){ +for (;next_page < (word)DATAEND;next_page+=(word)max_page_size) +*(volatile char*)next_page; +GC_reset_fault_handler(); +} else { +GC_reset_fault_handler(); +result=(ptr_t)GC_find_limit(DATAEND,FALSE); +} +return(result); +} +#endif +#ifdef AMIGA +#define GC_AMIGA_DS +#undef GC_AMIGA_DS +#elif defined(OPENBSD) +void GC_register_data_segments(void) +{ +ptr_t region_start=DATASTART; +if ((word)region_start - 1U>=(word)DATAEND) +ABORT_ARG2("Wrong DATASTART/END pair", +":%p .. %p",(void*)region_start,(void*)DATAEND); +for (;;){ +#ifdef GC_OPENBSD_UTHREADS +ptr_t region_end=GC_find_limit_openbsd(region_start,DATAEND); +#else +ptr_t region_end=GC_find_limit_with_bound(region_start,TRUE,DATAEND); +#endif +GC_add_roots_inner(region_start,region_end,FALSE); +if ((word)region_end>=(word)DATAEND) +break; +region_start=GC_skip_hole_openbsd(region_end,DATAEND); +} +} +#else +#if!defined(PCR)&&!defined(MACOS)&&defined(REDIRECT_MALLOC)&&defined(GC_SOLARIS_THREADS) +EXTERN_C_BEGIN +extern caddr_t sbrk(int); +EXTERN_C_END +#endif +void GC_register_data_segments(void) +{ +#if!defined(PCR)&&!defined(MACOS) +#if defined(REDIRECT_MALLOC)&&defined(GC_SOLARIS_THREADS) +GC_ASSERT(DATASTART); +{ +ptr_t p=(ptr_t)sbrk(0); +if ((word)DATASTART < (word)p) +GC_add_roots_inner(DATASTART,p,FALSE); +} +#else +if ((word)DATASTART - 1U>=(word)DATAEND){ +ABORT_ARG2("Wrong DATASTART/END pair", +":%p .. %p",(void*)DATASTART,(void*)DATAEND); +} +GC_add_roots_inner(DATASTART,DATAEND,FALSE); +#ifdef GC_HAVE_DATAREGION2 +if ((word)DATASTART2 - 1U>=(word)DATAEND2) +ABORT_ARG2("Wrong DATASTART/END2 pair", +":%p .. %p",(void*)DATASTART2,(void*)DATAEND2); +GC_add_roots_inner(DATASTART2,DATAEND2,FALSE); +#endif +#endif +#endif +#if defined(MACOS) +{ +#if defined(THINK_C) +extern void*GC_MacGetDataStart(void); +GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), +(ptr_t)LMGetCurrentA5(),FALSE); +#else +#if defined(__MWERKS__) +#if!__POWERPC__ +extern void*GC_MacGetDataStart(void); +#if __option(far_data) +extern void*GC_MacGetDataEnd(void); +#endif +GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), +(ptr_t)LMGetCurrentA5(),FALSE); +#if __option(far_data) +GC_add_roots_inner((ptr_t)LMGetCurrentA5(), +(ptr_t)GC_MacGetDataEnd(),FALSE); +#endif +#else +extern char __data_start__[],__data_end__[]; +GC_add_roots_inner((ptr_t)&__data_start__, +(ptr_t)&__data_end__,FALSE); +#endif +#endif +#endif +} +#endif +} +#endif +#endif +#endif +#if!defined(OS2)&&!defined(PCR)&&!defined(AMIGA)&&!defined(USE_WINALLOC)&&!defined(MACOS)&&!defined(DOS4GW)&&!defined(NINTENDO_SWITCH)&&!defined(NONSTOP)&&!defined(SN_TARGET_ORBIS)&&!defined(SN_TARGET_PS3)&&!defined(SN_TARGET_PSP2)&&!defined(RTEMS)&&!defined(__CC_ARM) +#define SBRK_ARG_T ptrdiff_t +#if defined(MMAP_SUPPORTED) +#ifdef USE_MMAP_FIXED +#define GC_MMAP_FLAGS MAP_FIXED|MAP_PRIVATE +#else +#define GC_MMAP_FLAGS MAP_PRIVATE +#endif +#ifdef USE_MMAP_ANON +#define zero_fd -1 +#if defined(MAP_ANONYMOUS)&&!defined(CPPCHECK) +#define OPT_MAP_ANON MAP_ANONYMOUS +#else +#define OPT_MAP_ANON MAP_ANON +#endif +#else +static int zero_fd=-1; +#define OPT_MAP_ANON 0 +#endif +#ifndef MSWIN_XBOX1 +#if defined(SYMBIAN)&&!defined(USE_MMAP_ANON) +EXTERN_C_BEGIN +extern char*GC_get_private_path_and_zero_file(void); +EXTERN_C_END +#endif +STATIC ptr_t GC_unix_mmap_get_mem(size_t bytes) +{ +void*result; +static ptr_t last_addr=HEAP_START; +#ifndef USE_MMAP_ANON +static GC_bool initialized=FALSE; +if (!EXPECT(initialized,TRUE)){ +#ifdef SYMBIAN +char*path=GC_get_private_path_and_zero_file(); +if (path!=NULL){ +zero_fd=open(path,O_RDWR|O_CREAT,0644); +free(path); +} +#else +zero_fd=open("/dev/zero",O_RDONLY); +#endif +if (zero_fd==-1) +ABORT("Could not open/dev/zero"); +if (fcntl(zero_fd,F_SETFD,FD_CLOEXEC)==-1) +WARN("Could not set FD_CLOEXEC for/dev/zero\n",0); +initialized=TRUE; +} +#endif +GC_ASSERT(GC_page_size!=0); +if (bytes&(GC_page_size - 1))ABORT("Bad GET_MEM arg"); +result=mmap(last_addr,bytes,(PROT_READ|PROT_WRITE) +|(GC_pages_executable?PROT_EXEC:0), +GC_MMAP_FLAGS|OPT_MAP_ANON,zero_fd,0); +#undef IGNORE_PAGES_EXECUTABLE +if (EXPECT(MAP_FAILED==result,FALSE)){ +if (HEAP_START==last_addr&&GC_pages_executable&&EACCES==errno) +ABORT("Cannot allocate executable pages"); +return NULL; +} +last_addr=(ptr_t)(((word)result+bytes+GC_page_size - 1) +&~(GC_page_size - 1)); +#if!defined(LINUX) +if (last_addr==0){ +munmap(result,~GC_page_size - (size_t)result+1); +return GC_unix_mmap_get_mem(bytes); +} +#else +GC_ASSERT(last_addr!=0); +#endif +if (((word)result % HBLKSIZE)!=0) +ABORT( +"GC_unix_get_mem:Memory returned by mmap is not aligned to HBLKSIZE."); +return((ptr_t)result); +} +#endif +#endif +#if defined(USE_MMAP) +ptr_t GC_unix_get_mem(size_t bytes) +{ +return GC_unix_mmap_get_mem(bytes); +} +#else +STATIC ptr_t GC_unix_sbrk_get_mem(size_t bytes) +{ +ptr_t result; +#ifdef IRIX5 +__LOCK_MALLOC(); +#endif +{ +ptr_t cur_brk=(ptr_t)sbrk(0); +SBRK_ARG_T lsbs=(word)cur_brk&(GC_page_size-1); +GC_ASSERT(GC_page_size!=0); +if ((SBRK_ARG_T)bytes < 0){ +result=0; +goto out; +} +if (lsbs!=0){ +if((ptr_t)sbrk((SBRK_ARG_T)GC_page_size - lsbs)==(ptr_t)(-1)){ +result=0; +goto out; +} +} +#ifdef ADD_HEAP_GUARD_PAGES +{ +ptr_t guard=(ptr_t)sbrk((SBRK_ARG_T)GC_page_size); +if (mprotect(guard,GC_page_size,PROT_NONE)!=0) +ABORT("ADD_HEAP_GUARD_PAGES:mprotect failed"); +} +#endif +result=(ptr_t)sbrk((SBRK_ARG_T)bytes); +if (result==(ptr_t)(-1))result=0; +} +out: +#ifdef IRIX5 +__UNLOCK_MALLOC(); +#endif +return(result); +} +ptr_t GC_unix_get_mem(size_t bytes) +{ +#if defined(MMAP_SUPPORTED) +static GC_bool sbrk_failed=FALSE; +ptr_t result=0; +if (GC_pages_executable){ +return GC_unix_mmap_get_mem(bytes); +} +if (!sbrk_failed)result=GC_unix_sbrk_get_mem(bytes); +if (0==result){ +sbrk_failed=TRUE; +result=GC_unix_mmap_get_mem(bytes); +} +if (0==result){ +result=GC_unix_sbrk_get_mem(bytes); +} +return result; +#else +return GC_unix_sbrk_get_mem(bytes); +#endif +} +#endif +#endif +#ifdef OS2 +void*os2_alloc(size_t bytes) +{ +void*result; +if (DosAllocMem(&result,bytes,(PAG_READ|PAG_WRITE|PAG_COMMIT) +|(GC_pages_executable?PAG_EXECUTE:0)) +!=NO_ERROR){ +return(0); +} +if (result==0)return(os2_alloc(bytes)); +return(result); +} +#endif +#ifdef MSWIN_XBOX1 +ptr_t GC_durango_get_mem(size_t bytes) +{ +if (0==bytes)return NULL; +return (ptr_t)VirtualAlloc(NULL,bytes,MEM_COMMIT|MEM_TOP_DOWN, +PAGE_READWRITE); +} +#elif defined(MSWINCE) +ptr_t GC_wince_get_mem(size_t bytes) +{ +ptr_t result=0; +word i; +GC_ASSERT(GC_page_size!=0); +bytes=ROUNDUP_PAGESIZE(bytes); +for (i=0;i < GC_n_heap_bases;i++){ +if (((word)(-(signed_word)GC_heap_lengths[i]) +&(GC_sysinfo.dwAllocationGranularity-1)) +>=bytes){ +result=GC_heap_bases[i]+GC_heap_lengths[i]; +break; +} +} +if (i==GC_n_heap_bases){ +size_t res_bytes= +SIZET_SAT_ADD(bytes,(size_t)GC_sysinfo.dwAllocationGranularity-1) +&~((size_t)GC_sysinfo.dwAllocationGranularity-1); +result=(ptr_t)VirtualAlloc(NULL,res_bytes, +MEM_RESERVE|MEM_TOP_DOWN, +GC_pages_executable?PAGE_EXECUTE_READWRITE: +PAGE_READWRITE); +if (HBLKDISPL(result)!=0)ABORT("Bad VirtualAlloc result"); +if (GC_n_heap_bases>=MAX_HEAP_SECTS)ABORT("Too many heap sections"); +if (result==NULL)return NULL; +GC_heap_bases[GC_n_heap_bases]=result; +GC_heap_lengths[GC_n_heap_bases]=0; +GC_n_heap_bases++; +} +result=(ptr_t)VirtualAlloc(result,bytes,MEM_COMMIT, +GC_pages_executable?PAGE_EXECUTE_READWRITE: +PAGE_READWRITE); +#undef IGNORE_PAGES_EXECUTABLE +if (result!=NULL){ +if (HBLKDISPL(result)!=0)ABORT("Bad VirtualAlloc result"); +GC_heap_lengths[i]+=bytes; +} +return(result); +} +#elif (defined(USE_WINALLOC)&&!defined(MSWIN_XBOX1))||defined(CYGWIN32) +#ifdef USE_GLOBAL_ALLOC +#define GLOBAL_ALLOC_TEST 1 +#else +#define GLOBAL_ALLOC_TEST GC_no_win32_dlls +#endif +#if (defined(GC_USE_MEM_TOP_DOWN)&&defined(USE_WINALLOC))||defined(CPPCHECK) +DWORD GC_mem_top_down=MEM_TOP_DOWN; +#else +#define GC_mem_top_down 0 +#endif +ptr_t GC_win32_get_mem(size_t bytes) +{ +ptr_t result; +#ifndef USE_WINALLOC +result=GC_unix_get_mem(bytes); +#else +#if defined(MSWIN32)&&!defined(MSWINRT_FLAVOR) +if (GLOBAL_ALLOC_TEST){ +result=(ptr_t)GlobalAlloc(0,SIZET_SAT_ADD(bytes,HBLKSIZE)); +result=(ptr_t)(((word)result+HBLKSIZE - 1) +&~(word)(HBLKSIZE - 1)); +} else +#endif +{ +#ifdef MPROTECT_VDB +#ifdef GWW_VDB +#define VIRTUAL_ALLOC_PAD (GC_GWW_AVAILABLE()?0:1) +#else +#define VIRTUAL_ALLOC_PAD 1 +#endif +#else +#define VIRTUAL_ALLOC_PAD 0 +#endif +result=(ptr_t)VirtualAlloc(NULL, +SIZET_SAT_ADD(bytes,VIRTUAL_ALLOC_PAD), +GetWriteWatch_alloc_flag +|(MEM_COMMIT|MEM_RESERVE) +|GC_mem_top_down, +GC_pages_executable?PAGE_EXECUTE_READWRITE: +PAGE_READWRITE); +#undef IGNORE_PAGES_EXECUTABLE +} +#endif +if (HBLKDISPL(result)!=0)ABORT("Bad VirtualAlloc result"); +if (GC_n_heap_bases>=MAX_HEAP_SECTS)ABORT("Too many heap sections"); +if (0!=result)GC_heap_bases[GC_n_heap_bases++]=result; +return(result); +} +#endif +#if defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32)||defined(MSWIN_XBOX1) +GC_API void GC_CALL GC_win32_free_heap(void) +{ +#if defined(USE_WINALLOC)&&!defined(REDIRECT_MALLOC)&&!defined(MSWIN_XBOX1) +GC_free_malloc_heap_list(); +#endif +#if (defined(USE_WINALLOC)&&!defined(MSWIN_XBOX1)&&!defined(MSWINCE))||defined(CYGWIN32) +#ifndef MSWINRT_FLAVOR +#ifndef CYGWIN32 +if (GLOBAL_ALLOC_TEST) +#endif +{ +while (GC_n_heap_bases--> 0){ +#ifdef CYGWIN32 +#else +GlobalFree(GC_heap_bases[GC_n_heap_bases]); +#endif +GC_heap_bases[GC_n_heap_bases]=0; +} +return; +} +#endif +#ifndef CYGWIN32 +while (GC_n_heap_bases > 0){ +VirtualFree(GC_heap_bases[--GC_n_heap_bases],0,MEM_RELEASE); +GC_heap_bases[GC_n_heap_bases]=0; +} +#endif +#endif +} +#endif +#ifdef AMIGA +#define GC_AMIGA_AM +#undef GC_AMIGA_AM +#endif +#if defined(HAIKU) +#include +ptr_t GC_haiku_get_mem(size_t bytes) +{ +void*mem; +GC_ASSERT(GC_page_size!=0); +if (posix_memalign(&mem,GC_page_size,bytes)==0) +return mem; +return NULL; +} +#endif +#ifdef USE_MUNMAP +#if!defined(NN_PLATFORM_CTR)&&!defined(MSWIN32)&&!defined(MSWINCE)&&!defined(MSWIN_XBOX1) +#include +#ifdef SN_TARGET_PS3 +#include +#else +#include +#endif +#include +#include +#endif +STATIC ptr_t GC_unmap_start(ptr_t start,size_t bytes) +{ +ptr_t result=(ptr_t)(((word)start+GC_page_size - 1) +&~(GC_page_size - 1)); +GC_ASSERT(GC_page_size!=0); +if ((word)(result+GC_page_size)> (word)(start+bytes))return 0; +return result; +} +GC_INNER void GC_unmap(ptr_t start,size_t bytes) +{ +ptr_t start_addr=GC_unmap_start(start,bytes); +ptr_t end_addr=GC_unmap_end(start,bytes); +word len=end_addr - start_addr; +if (0==start_addr)return; +#ifdef USE_WINALLOC +while (len!=0){ +MEMORY_BASIC_INFORMATION mem_info; +word free_len; +if (VirtualQuery(start_addr,&mem_info,sizeof(mem_info)) +!=sizeof(mem_info)) +ABORT("Weird VirtualQuery result"); +free_len=(len < mem_info.RegionSize)?len:mem_info.RegionSize; +if (!VirtualFree(start_addr,free_len,MEM_DECOMMIT)) +ABORT("VirtualFree failed"); +GC_unmapped_bytes+=free_len; +start_addr+=free_len; +len-=free_len; +} +#elif defined(SN_TARGET_PS3) +ps3_free_mem(start_addr,len); +#else +{ +#if defined(AIX)||defined(CYGWIN32)||defined(HAIKU)||defined(HPUX) +if (mprotect(start_addr,len,PROT_NONE)) +ABORT("mprotect(PROT_NONE)failed"); +#elif defined(__EMSCRIPTEN__) +#else +void*result=mmap(start_addr,len,PROT_NONE, +MAP_PRIVATE|MAP_FIXED|OPT_MAP_ANON, +zero_fd,0); +if (result!=(void*)start_addr) +ABORT("mmap(PROT_NONE)failed"); +#if defined(CPPCHECK)||defined(LINT2) +GC_noop1((word)result); +#endif +#endif +} +GC_unmapped_bytes+=len; +#endif +} +GC_INNER void GC_remap(ptr_t start,size_t bytes) +{ +ptr_t start_addr=GC_unmap_start(start,bytes); +ptr_t end_addr=GC_unmap_end(start,bytes); +word len=end_addr - start_addr; +if (0==start_addr)return; +#ifdef USE_WINALLOC +while (len!=0){ +MEMORY_BASIC_INFORMATION mem_info; +word alloc_len; +ptr_t result; +if (VirtualQuery(start_addr,&mem_info,sizeof(mem_info)) +!=sizeof(mem_info)) +ABORT("Weird VirtualQuery result"); +alloc_len=(len < mem_info.RegionSize)?len:mem_info.RegionSize; +result=(ptr_t)VirtualAlloc(start_addr,alloc_len,MEM_COMMIT, +GC_pages_executable +?PAGE_EXECUTE_READWRITE +:PAGE_READWRITE); +if (result!=start_addr){ +if (GetLastError()==ERROR_NOT_ENOUGH_MEMORY|| +GetLastError()==ERROR_OUTOFMEMORY){ +ABORT("Not enough memory to process remapping"); +} else { +ABORT("VirtualAlloc remapping failed"); +} +} +#ifdef LINT2 +GC_noop1((word)result); +#endif +GC_unmapped_bytes-=alloc_len; +start_addr+=alloc_len; +len-=alloc_len; +} +#else +{ +#if defined(NACL)||defined(NETBSD) +void*result=mmap(start_addr,len,(PROT_READ|PROT_WRITE) +|(GC_pages_executable?PROT_EXEC:0), +MAP_PRIVATE|MAP_FIXED|OPT_MAP_ANON, +zero_fd,0); +if (result!=(void*)start_addr) +ABORT("mmap as mprotect failed"); +#if defined(CPPCHECK)||defined(LINT2) +GC_noop1((word)result); +#endif +#else +if (mprotect(start_addr,len,(PROT_READ|PROT_WRITE) +|(GC_pages_executable?PROT_EXEC:0))!=0){ +ABORT_ARG3("mprotect remapping failed", +" at %p (length %lu),errcode=%d", +(void*)start_addr,(unsigned long)len,errno); +} +#endif +} +#undef IGNORE_PAGES_EXECUTABLE +GC_unmapped_bytes-=len; +#endif +} +GC_INNER void GC_unmap_gap(ptr_t start1,size_t bytes1,ptr_t start2, +size_t bytes2) +{ +ptr_t start1_addr=GC_unmap_start(start1,bytes1); +ptr_t end1_addr=GC_unmap_end(start1,bytes1); +ptr_t start2_addr=GC_unmap_start(start2,bytes2); +ptr_t start_addr=end1_addr; +ptr_t end_addr=start2_addr; +size_t len; +GC_ASSERT(start1+bytes1==start2); +if (0==start1_addr)start_addr=GC_unmap_start(start1,bytes1+bytes2); +if (0==start2_addr)end_addr=GC_unmap_end(start1,bytes1+bytes2); +if (0==start_addr)return; +len=end_addr - start_addr; +#ifdef USE_WINALLOC +while (len!=0){ +MEMORY_BASIC_INFORMATION mem_info; +word free_len; +if (VirtualQuery(start_addr,&mem_info,sizeof(mem_info)) +!=sizeof(mem_info)) +ABORT("Weird VirtualQuery result"); +free_len=(len < mem_info.RegionSize)?len:mem_info.RegionSize; +if (!VirtualFree(start_addr,free_len,MEM_DECOMMIT)) +ABORT("VirtualFree failed"); +GC_unmapped_bytes+=free_len; +start_addr+=free_len; +len-=free_len; +} +#else +if (len!=0){ +#if defined(AIX)||defined(CYGWIN32)||defined(HAIKU)||defined(HPUX) +if (mprotect(start_addr,len,PROT_NONE)) +ABORT("mprotect(PROT_NONE)failed"); +#else +void*result=mmap(start_addr,len,PROT_NONE, +MAP_PRIVATE|MAP_FIXED|OPT_MAP_ANON, +zero_fd,0); +if (result!=(void*)start_addr) +ABORT("mmap(PROT_NONE)failed"); +#if defined(CPPCHECK)||defined(LINT2) +GC_noop1((word)result); +#endif +#endif +GC_unmapped_bytes+=len; +} +#endif +} +#endif +#ifndef THREADS +#if defined(__EMSCRIPTEN__) +static void scan_regs_cb(void*begin,void*end) +{ +GC_push_all_stack((ptr_t)begin,(ptr_t)end); +} +STATIC void GC_CALLBACK GC_default_push_other_roots(void) +{ +emscripten_scan_registers(scan_regs_cb); +} +#else +#define GC_default_push_other_roots 0 +#endif +#else +#ifdef PCR +PCR_ERes GC_push_thread_stack(PCR_Th_T*t,PCR_Any dummy) +{ +struct PCR_ThCtl_TInfoRep info; +PCR_ERes result; +info.ti_stkLow=info.ti_stkHi=0; +result=PCR_ThCtl_GetInfo(t,&info); +GC_push_all_stack((ptr_t)(info.ti_stkLow),(ptr_t)(info.ti_stkHi)); +return(result); +} +PCR_ERes GC_push_old_obj(void*p,size_t size,PCR_Any data) +{ +GC_push_all_stack((ptr_t)p,(ptr_t)p+size); +return(PCR_ERes_okay); +} +extern struct PCR_MM_ProcsRep*GC_old_allocator; +STATIC void GC_CALLBACK GC_default_push_other_roots(void) +{ +if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false, +GC_push_old_obj,0) +!=PCR_ERes_okay){ +ABORT("Old object enumeration failed"); +} +if (PCR_ERes_IsErr( +PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0)) +||PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(),0))){ +ABORT("Thread stack marking failed"); +} +} +#elif defined(SN_TARGET_PS3) +STATIC void GC_CALLBACK GC_default_push_other_roots(void) +{ +ABORT("GC_default_push_other_roots is not implemented"); +} +void GC_push_thread_structures(void) +{ +ABORT("GC_push_thread_structures is not implemented"); +} +#else +STATIC void GC_CALLBACK GC_default_push_other_roots(void) +{ +GC_push_all_stacks(); +} +#endif +#endif +GC_push_other_roots_proc GC_push_other_roots=GC_default_push_other_roots; +GC_API void GC_CALL GC_set_push_other_roots(GC_push_other_roots_proc fn) +{ +GC_push_other_roots=fn; +} +GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void) +{ +return GC_push_other_roots; +} +#if (defined(CHECKSUMS)&&defined(GWW_VDB))||defined(PROC_VDB) +STATIC void GC_or_pages(page_hash_table pht1,page_hash_table pht2) +{ +unsigned i; +for (i=0;i < PHT_SIZE;i++)pht1[i]|=pht2[i]; +} +#endif +#ifdef GWW_VDB +#define GC_GWW_BUF_LEN (MAXHINCR*HBLKSIZE/4096) +static PVOID gww_buf[GC_GWW_BUF_LEN]; +#ifndef MPROTECT_VDB +#define GC_gww_dirty_init GC_dirty_init +#endif +GC_INNER GC_bool GC_gww_dirty_init(void) +{ +detect_GetWriteWatch(); +return GC_GWW_AVAILABLE(); +} +GC_INLINE void GC_gww_read_dirty(GC_bool output_unneeded) +{ +word i; +if (!output_unneeded) +BZERO(GC_grungy_pages,sizeof(GC_grungy_pages)); +for (i=0;i!=GC_n_heap_sects;++i){ +GC_ULONG_PTR count; +do { +PVOID*pages=gww_buf; +DWORD page_size; +count=GC_GWW_BUF_LEN; +if ((*(GetWriteWatch_type)(word)GetWriteWatch_func)( +WRITE_WATCH_FLAG_RESET, +GC_heap_sects[i].hs_start, +GC_heap_sects[i].hs_bytes, +pages,&count,&page_size)!=0){ +static int warn_count=0; +struct hblk*start=(struct hblk*)GC_heap_sects[i].hs_start; +static struct hblk*last_warned=0; +size_t nblocks=divHBLKSZ(GC_heap_sects[i].hs_bytes); +if (i!=0&&last_warned!=start&&warn_count++< 5){ +last_warned=start; +WARN("GC_gww_read_dirty unexpectedly failed at %p:" +"Falling back to marking all pages dirty\n",start); +} +if (!output_unneeded){ +unsigned j; +for (j=0;j < nblocks;++j){ +word hash=PHT_HASH(start+j); +set_pht_entry_from_index(GC_grungy_pages,hash); +} +} +count=1; +} else if (!output_unneeded){ +PVOID*pages_end=pages+count; +while (pages!=pages_end){ +struct hblk*h=(struct hblk*)*pages++; +struct hblk*h_end=(struct hblk*)((char*)h+page_size); +do { +set_pht_entry_from_index(GC_grungy_pages,PHT_HASH(h)); +} while ((word)(++h)< (word)h_end); +} +} +} while (count==GC_GWW_BUF_LEN); +} +#ifdef CHECKSUMS +GC_ASSERT(!output_unneeded); +GC_or_pages(GC_written_pages,GC_grungy_pages); +#endif +} +#else +#define GC_GWW_AVAILABLE()FALSE +#endif +#ifdef DEFAULT_VDB +GC_INNER GC_bool GC_dirty_init(void) +{ +GC_VERBOSE_LOG_PRINTF("Initializing DEFAULT_VDB...\n"); +return TRUE; +} +#endif +#ifndef GC_DISABLE_INCREMENTAL +#if!defined(THREADS)||defined(HAVE_LOCKFREE_AO_OR) +#define async_set_pht_entry_from_index(db,index)set_pht_entry_from_index_concurrent(db,index) +#elif defined(AO_HAVE_test_and_set_acquire) +GC_INNER volatile AO_TS_t GC_fault_handler_lock=AO_TS_INITIALIZER; +static void async_set_pht_entry_from_index(volatile page_hash_table db, +size_t index) +{ +GC_acquire_dirty_lock(); +set_pht_entry_from_index(db,index); +GC_release_dirty_lock(); +} +#else +#error No test_and_set operation:Introduces a race. +#endif +#endif +#ifdef MPROTECT_VDB +#ifdef DARWIN +#include +STATIC mach_port_t GC_task_self=0; +#define PROTECT(addr,len)if (vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len),FALSE,VM_PROT_READ|(GC_pages_executable?VM_PROT_EXECUTE:0))==KERN_SUCCESS){} else ABORT("vm_protect(PROTECT)failed") +#define UNPROTECT(addr,len)if (vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len),FALSE,(VM_PROT_READ|VM_PROT_WRITE)|(GC_pages_executable?VM_PROT_EXECUTE:0))==KERN_SUCCESS){} else ABORT("vm_protect(UNPROTECT)failed") +#elif!defined(USE_WINALLOC) +#include +#include +#if!defined(CYGWIN32)&&!defined(HAIKU) +#include +#endif +#define PROTECT(addr,len)if (mprotect((caddr_t)(addr),(size_t)(len),PROT_READ|(GC_pages_executable?PROT_EXEC:0))>=0){ } else ABORT("mprotect failed") +#define UNPROTECT(addr,len)if (mprotect((caddr_t)(addr),(size_t)(len),(PROT_READ|PROT_WRITE)|(GC_pages_executable?PROT_EXEC:0))>=0){ } else ABORT(GC_pages_executable?"un-mprotect executable page failed" " (probably disabled by OS)":"un-mprotect failed") +#undef IGNORE_PAGES_EXECUTABLE +#else +#ifndef MSWINCE +#include +#endif +static DWORD protect_junk; +#define PROTECT(addr,len)if (VirtualProtect((addr),(len),GC_pages_executable?PAGE_EXECUTE_READ:PAGE_READONLY,&protect_junk)){ } else ABORT_ARG1("VirtualProtect failed",":errcode=0x%X",(unsigned)GetLastError()) +#define UNPROTECT(addr,len)if (VirtualProtect((addr),(len),GC_pages_executable?PAGE_EXECUTE_READWRITE:PAGE_READWRITE,&protect_junk)){ } else ABORT("un-VirtualProtect failed") +#endif +#if defined(MSWIN32) +typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_HNDLR_PTR; +#undef SIG_DFL +#define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER)((signed_word)-1) +#elif defined(MSWINCE) +typedef LONG (WINAPI*SIG_HNDLR_PTR)(struct _EXCEPTION_POINTERS*); +#undef SIG_DFL +#define SIG_DFL (SIG_HNDLR_PTR)(-1) +#elif defined(DARWIN) +typedef void (*SIG_HNDLR_PTR)(); +#else +typedef void (*SIG_HNDLR_PTR)(int,siginfo_t*,void*); +typedef void (*PLAIN_HNDLR_PTR)(int); +#endif +#if defined(__GLIBC__) +#if __GLIBC__ < 2||__GLIBC__==2&&__GLIBC_MINOR__ < 2 +#error glibc too old? +#endif +#endif +#ifndef DARWIN +STATIC SIG_HNDLR_PTR GC_old_segv_handler=0; +#if defined(FREEBSD)||defined(HPUX)||defined(HURD)||defined(LINUX) +STATIC SIG_HNDLR_PTR GC_old_bus_handler=0; +#ifndef LINUX +STATIC GC_bool GC_old_bus_handler_used_si=FALSE; +#endif +#endif +#if!defined(MSWIN32)&&!defined(MSWINCE) +STATIC GC_bool GC_old_segv_handler_used_si=FALSE; +#endif +#endif +#ifdef THREADS +GC_ATTR_NO_SANITIZE_THREAD +static GC_bool is_header_found_async(void*addr) +{ +#ifdef HASH_TL +hdr*result; +GET_HDR((ptr_t)addr,result); +return result!=NULL; +#else +return HDR_INNER(addr)!=NULL; +#endif +} +#else +#define is_header_found_async(addr)(HDR(addr)!=NULL) +#endif +#ifndef DARWIN +#if!defined(MSWIN32)&&!defined(MSWINCE) +#include +#if defined(FREEBSD)||defined(HURD)||defined(HPUX) +#define SIG_OK (sig==SIGBUS||sig==SIGSEGV) +#else +#define SIG_OK (sig==SIGSEGV) +#endif +#if defined(FREEBSD) +#ifndef SEGV_ACCERR +#define SEGV_ACCERR 2 +#endif +#if defined(AARCH64)||defined(ARM32)||defined(MIPS) +#define CODE_OK (si->si_code==SEGV_ACCERR) +#elif defined(POWERPC) +#define AIM +#include +#define CODE_OK (si->si_code==EXC_DSI||si->si_code==SEGV_ACCERR) +#else +#define CODE_OK (si->si_code==BUS_PAGE_FAULT||si->si_code==SEGV_ACCERR) +#endif +#elif defined(OSF1) +#define CODE_OK (si->si_code==2) +#elif defined(IRIX5) +#define CODE_OK (si->si_code==EACCES) +#elif defined(CYGWIN32)||defined(HAIKU)||defined(HURD) +#define CODE_OK TRUE +#elif defined(LINUX) +#define CODE_OK TRUE +#elif defined(HPUX) +#define CODE_OK (si->si_code==SEGV_ACCERR||si->si_code==BUS_ADRERR||si->si_code==BUS_UNKNOWN||si->si_code==SEGV_UNKNOWN||si->si_code==BUS_OBJERR) +#elif defined(SUNOS5SIGS) +#define CODE_OK (si->si_code==SEGV_ACCERR) +#endif +#ifndef NO_GETCONTEXT +#include +#endif +STATIC void GC_write_fault_handler(int sig,siginfo_t*si,void*raw_sc) +#else +#define SIG_OK (exc_info->ExceptionRecord->ExceptionCode==STATUS_ACCESS_VIOLATION) +#define CODE_OK (exc_info->ExceptionRecord->ExceptionInformation[0]==1) +STATIC LONG WINAPI GC_write_fault_handler( +struct _EXCEPTION_POINTERS*exc_info) +#endif +{ +#if!defined(MSWIN32)&&!defined(MSWINCE) +char*addr=(char*)si->si_addr; +#else +char*addr=(char*)(exc_info->ExceptionRecord +->ExceptionInformation[1]); +#endif +if (SIG_OK&&CODE_OK){ +struct hblk*h=(struct hblk*)((word)addr&~(GC_page_size-1)); +GC_bool in_allocd_block; +size_t i; +GC_ASSERT(GC_page_size!=0); +#ifdef CHECKSUMS +GC_record_fault(h); +#endif +#ifdef SUNOS5SIGS +in_allocd_block=FALSE; +for (i=0;i < divHBLKSZ(GC_page_size);i++){ +if (is_header_found_async(&h[i])){ +in_allocd_block=TRUE; +break; +} +} +#else +in_allocd_block=is_header_found_async(addr); +#endif +if (!in_allocd_block){ +SIG_HNDLR_PTR old_handler; +#if defined(MSWIN32)||defined(MSWINCE) +old_handler=GC_old_segv_handler; +#else +GC_bool used_si; +#if defined(FREEBSD)||defined(HURD)||defined(HPUX) +if (sig==SIGBUS){ +old_handler=GC_old_bus_handler; +used_si=GC_old_bus_handler_used_si; +} else +#endif +{ +old_handler=GC_old_segv_handler; +used_si=GC_old_segv_handler_used_si; +} +#endif +if (old_handler==(SIG_HNDLR_PTR)(signed_word)SIG_DFL){ +#if!defined(MSWIN32)&&!defined(MSWINCE) +ABORT_ARG1("Unexpected bus error or segmentation fault", +" at %p",(void*)addr); +#else +return(EXCEPTION_CONTINUE_SEARCH); +#endif +} else { +#if defined(MSWIN32)||defined(MSWINCE) +return((*old_handler)(exc_info)); +#else +if (used_si) +((SIG_HNDLR_PTR)old_handler)(sig,si,raw_sc); +else +((PLAIN_HNDLR_PTR)(signed_word)old_handler)(sig); +return; +#endif +} +} +UNPROTECT(h,GC_page_size); +for (i=0;i < divHBLKSZ(GC_page_size);i++){ +word index=PHT_HASH(h+i); +async_set_pht_entry_from_index(GC_dirty_pages,index); +} +#if defined(MSWIN32)||defined(MSWINCE) +return(EXCEPTION_CONTINUE_EXECUTION); +#else +return; +#endif +} +#if defined(MSWIN32)||defined(MSWINCE) +return EXCEPTION_CONTINUE_SEARCH; +#else +ABORT_ARG1("Unexpected bus error or segmentation fault", +" at %p",(void*)addr); +#endif +} +#if defined(GC_WIN32_THREADS)&&!defined(CYGWIN32) +GC_INNER void GC_set_write_fault_handler(void) +{ +SetUnhandledExceptionFilter(GC_write_fault_handler); +} +#endif +#endif +#if!defined(DARWIN) +GC_INNER GC_bool GC_dirty_init(void) +{ +#if!defined(MSWIN32)&&!defined(MSWINCE) +struct sigaction act,oldact; +act.sa_flags=SA_RESTART|SA_SIGINFO; +act.sa_sigaction=GC_write_fault_handler; +(void)sigemptyset(&act.sa_mask); +#if defined(THREADS)&&!defined(GC_OPENBSD_UTHREADS)&&!defined(GC_WIN32_THREADS)&&!defined(NACL) +(void)sigaddset(&act.sa_mask,GC_get_suspend_signal()); +#endif +#endif +GC_VERBOSE_LOG_PRINTF( +"Initializing mprotect virtual dirty bit implementation\n"); +if (GC_page_size % HBLKSIZE!=0){ +ABORT("Page size not multiple of HBLKSIZE"); +} +#if!defined(MSWIN32)&&!defined(MSWINCE) +#if defined(GC_IRIX_THREADS) +sigaction(SIGSEGV,0,&oldact); +sigaction(SIGSEGV,&act,0); +#else +{ +int res=sigaction(SIGSEGV,&act,&oldact); +if (res!=0)ABORT("Sigaction failed"); +} +#endif +if (oldact.sa_flags&SA_SIGINFO){ +GC_old_segv_handler=oldact.sa_sigaction; +GC_old_segv_handler_used_si=TRUE; +} else { +GC_old_segv_handler=(SIG_HNDLR_PTR)(signed_word)oldact.sa_handler; +GC_old_segv_handler_used_si=FALSE; +} +if (GC_old_segv_handler==(SIG_HNDLR_PTR)(signed_word)SIG_IGN){ +WARN("Previously ignored segmentation violation!?\n",0); +GC_old_segv_handler=(SIG_HNDLR_PTR)(signed_word)SIG_DFL; +} +if (GC_old_segv_handler!=(SIG_HNDLR_PTR)(signed_word)SIG_DFL){ +GC_VERBOSE_LOG_PRINTF("Replaced other SIGSEGV handler\n"); +} +#if defined(HPUX)||defined(LINUX)||defined(HURD)||(defined(FREEBSD)&&(defined(__GLIBC__)||defined(SUNOS5SIGS))) +sigaction(SIGBUS,&act,&oldact); +if ((oldact.sa_flags&SA_SIGINFO)!=0){ +GC_old_bus_handler=oldact.sa_sigaction; +#if!defined(LINUX) +GC_old_bus_handler_used_si=TRUE; +#endif +} else { +GC_old_bus_handler=(SIG_HNDLR_PTR)(signed_word)oldact.sa_handler; +} +if (GC_old_bus_handler==(SIG_HNDLR_PTR)(signed_word)SIG_IGN){ +WARN("Previously ignored bus error!?\n",0); +#if!defined(LINUX) +GC_old_bus_handler=(SIG_HNDLR_PTR)(signed_word)SIG_DFL; +#else +#endif +} else if (GC_old_bus_handler!=(SIG_HNDLR_PTR)(signed_word)SIG_DFL){ +GC_VERBOSE_LOG_PRINTF("Replaced other SIGBUS handler\n"); +} +#endif +#endif +#if defined(GWW_VDB) +if (GC_gww_dirty_init()) +return TRUE; +#endif +#if defined(MSWIN32) +GC_old_segv_handler=SetUnhandledExceptionFilter(GC_write_fault_handler); +if (GC_old_segv_handler!=NULL){ +GC_COND_LOG_PRINTF("Replaced other UnhandledExceptionFilter\n"); +} else { +GC_old_segv_handler=SIG_DFL; +} +#elif defined(MSWINCE) +#endif +#if defined(CPPCHECK)&&defined(ADDRESS_SANITIZER) +GC_noop1((word)&__asan_default_options); +#endif +return TRUE; +} +#endif +GC_API int GC_CALL GC_incremental_protection_needs(void) +{ +GC_ASSERT(GC_is_initialized); +if (GC_page_size==HBLKSIZE){ +return GC_PROTECTS_POINTER_HEAP; +} else { +return GC_PROTECTS_POINTER_HEAP|GC_PROTECTS_PTRFREE_HEAP; +} +} +#define HAVE_INCREMENTAL_PROTECTION_NEEDS +#define IS_PTRFREE(hhdr)((hhdr)->hb_descr==0) +#define PAGE_ALIGNED(x)!((word)(x)&(GC_page_size - 1)) +STATIC void GC_protect_heap(void) +{ +unsigned i; +GC_bool protect_all= +(0!=(GC_incremental_protection_needs()&GC_PROTECTS_PTRFREE_HEAP)); +GC_ASSERT(GC_page_size!=0); +for (i=0;i < GC_n_heap_sects;i++){ +ptr_t start=GC_heap_sects[i].hs_start; +size_t len=GC_heap_sects[i].hs_bytes; +if (protect_all){ +PROTECT(start,len); +} else { +struct hblk*current; +struct hblk*current_start; +struct hblk*limit; +GC_ASSERT(PAGE_ALIGNED(len)); +GC_ASSERT(PAGE_ALIGNED(start)); +current_start=current=(struct hblk*)start; +limit=(struct hblk*)(start+len); +while ((word)current < (word)limit){ +hdr*hhdr; +word nhblks; +GC_bool is_ptrfree; +GC_ASSERT(PAGE_ALIGNED(current)); +GET_HDR(current,hhdr); +if (IS_FORWARDING_ADDR_OR_NIL(hhdr)){ +GC_ASSERT(current_start==current); +current_start=++current; +continue; +} +if (HBLK_IS_FREE(hhdr)){ +GC_ASSERT(PAGE_ALIGNED(hhdr->hb_sz)); +nhblks=divHBLKSZ(hhdr->hb_sz); +is_ptrfree=TRUE; +} else { +nhblks=OBJ_SZ_TO_BLOCKS(hhdr->hb_sz); +is_ptrfree=IS_PTRFREE(hhdr); +} +if (is_ptrfree){ +if ((word)current_start < (word)current){ +PROTECT(current_start,(ptr_t)current - (ptr_t)current_start); +} +current_start=(current+=nhblks); +} else { +current+=nhblks; +} +} +if ((word)current_start < (word)current){ +PROTECT(current_start,(ptr_t)current - (ptr_t)current_start); +} +} +} +} +#endif +#ifdef PROC_VDB +#include +#include +#include +#include +#include +#ifdef GC_NO_SYS_FAULT_H +#define PG_MODIFIED 1 +struct prpageheader { +int dummy[2]; +unsigned long pr_nmap; +unsigned long pr_npage; +}; +struct prasmap { +char*pr_vaddr; +size_t pr_npage; +char dummy1[64+8]; +unsigned pr_mflags; +unsigned pr_pagesize; +int dummy2[2]; +}; +#else +#include +#include +#endif +#define INITIAL_BUF_SZ 16384 +STATIC size_t GC_proc_buf_size=INITIAL_BUF_SZ; +STATIC char*GC_proc_buf=NULL; +STATIC int GC_proc_fd=0; +GC_INNER GC_bool GC_dirty_init(void) +{ +char buf[40]; +if (GC_bytes_allocd!=0||GC_bytes_allocd_before_gc!=0){ +memset(GC_written_pages,0xff,sizeof(page_hash_table)); +GC_VERBOSE_LOG_PRINTF( +"Allocated %lu bytes:all pages may have been written\n", +(unsigned long)(GC_bytes_allocd+GC_bytes_allocd_before_gc)); +} +(void)snprintf(buf,sizeof(buf),"/proc/%ld/pagedata",(long)getpid()); +buf[sizeof(buf)- 1]='\0'; +GC_proc_fd=open(buf,O_RDONLY); +if (GC_proc_fd < 0){ +WARN("/proc open failed;cannot enable GC incremental mode\n",0); +return FALSE; +} +if (syscall(SYS_fcntl,GC_proc_fd,F_SETFD,FD_CLOEXEC)==-1) +WARN("Could not set FD_CLOEXEC for/proc\n",0); +GC_proc_buf=GC_scratch_alloc(GC_proc_buf_size); +if (GC_proc_buf==NULL) +ABORT("Insufficient space for/proc read"); +return TRUE; +} +GC_INLINE void GC_proc_read_dirty(GC_bool output_unneeded) +{ +#define READ read +int nmaps; +char*bufp=GC_proc_buf; +int i; +BZERO(GC_grungy_pages,sizeof(GC_grungy_pages)); +if (READ(GC_proc_fd,bufp,GC_proc_buf_size)<=0){ +size_t new_size=2*GC_proc_buf_size; +char*new_buf; +WARN("/proc read failed:GC_proc_buf_size=%" WARN_PRIdPTR "\n", +(signed_word)GC_proc_buf_size); +new_buf=GC_scratch_alloc(new_size); +if (new_buf!=0){ +GC_scratch_recycle_no_gww(bufp,GC_proc_buf_size); +GC_proc_buf=bufp=new_buf; +GC_proc_buf_size=new_size; +} +if (READ(GC_proc_fd,bufp,GC_proc_buf_size)<=0){ +WARN("Insufficient space for/proc read\n",0); +if (!output_unneeded) +memset(GC_grungy_pages,0xff,sizeof (page_hash_table)); +memset(GC_written_pages,0xff,sizeof(page_hash_table)); +return; +} +} +nmaps=((struct prpageheader*)bufp)->pr_nmap; +#ifdef DEBUG_DIRTY_BITS +GC_log_printf("Proc VDB read:pr_nmap=%u,pr_npage=%lu\n", +nmaps,((struct prpageheader*)bufp)->pr_npage); +#endif +#if defined(GC_NO_SYS_FAULT_H)&&defined(CPPCHECK) +GC_noop1(((struct prpageheader*)bufp)->dummy[0]); +#endif +bufp+=sizeof(struct prpageheader); +for (i=0;i < nmaps;i++){ +struct prasmap*map=(struct prasmap*)bufp; +ptr_t vaddr=(ptr_t)(map->pr_vaddr); +unsigned long npages=map->pr_npage; +unsigned pagesize=map->pr_pagesize; +ptr_t limit; +#if defined(GC_NO_SYS_FAULT_H)&&defined(CPPCHECK) +GC_noop1(map->dummy1[0]+map->dummy2[0]); +#endif +#ifdef DEBUG_DIRTY_BITS +GC_log_printf( +"pr_vaddr=%p,npage=%lu,mflags=0x%x,pagesize=0x%x\n", +(void*)vaddr,npages,map->pr_mflags,pagesize); +#endif +bufp+=sizeof(struct prasmap); +limit=vaddr+pagesize*npages; +for (;(word)vaddr < (word)limit;vaddr+=pagesize){ +if ((*bufp++)&PG_MODIFIED){ +struct hblk*h; +ptr_t next_vaddr=vaddr+pagesize; +#ifdef DEBUG_DIRTY_BITS +GC_log_printf("dirty page at:%p\n",(void*)vaddr); +#endif +for (h=(struct hblk*)vaddr; +(word)h < (word)next_vaddr;h++){ +word index=PHT_HASH(h); +set_pht_entry_from_index(GC_grungy_pages,index); +} +} +} +bufp=(char*)(((word)bufp+(sizeof(long)-1)) +&~(word)(sizeof(long)-1)); +} +#ifdef DEBUG_DIRTY_BITS +GC_log_printf("Proc VDB read done\n"); +#endif +GC_or_pages(GC_written_pages,GC_grungy_pages); +#undef READ +} +#endif +#ifdef PCR_VDB +#include "vd/PCR_VD.h" +#define NPAGES (32*1024) +PCR_VD_DB GC_grungy_bits[NPAGES]; +STATIC ptr_t GC_vd_base=NULL; +GC_INNER GC_bool GC_dirty_init(void) +{ +GC_vd_base=GC_heap_sects[0].hs_start; +if (GC_vd_base==0){ +ABORT("Bad initial heap segment"); +} +if (PCR_VD_Start(HBLKSIZE,GC_vd_base,NPAGES*HBLKSIZE) +!=PCR_ERes_okay){ +ABORT("Dirty bit initialization failed"); +} +return TRUE; +} +#endif +#ifndef GC_DISABLE_INCREMENTAL +GC_INNER GC_bool GC_manual_vdb=FALSE; +GC_INNER void GC_dirty_inner(const void*p) +{ +word index=PHT_HASH(p); +#if defined(MPROTECT_VDB) +GC_ASSERT(GC_manual_vdb); +#endif +async_set_pht_entry_from_index(GC_dirty_pages,index); +} +GC_INNER void GC_read_dirty(GC_bool output_unneeded) +{ +if (GC_manual_vdb +#if defined(MPROTECT_VDB) +||!GC_GWW_AVAILABLE() +#endif +){ +if (!output_unneeded) +BCOPY(( void*)GC_dirty_pages,GC_grungy_pages, +sizeof(GC_dirty_pages)); +BZERO(( void*)GC_dirty_pages, +sizeof(GC_dirty_pages)); +#ifdef MPROTECT_VDB +if (!GC_manual_vdb) +GC_protect_heap(); +#endif +return; +} +#ifdef GWW_VDB +GC_gww_read_dirty(output_unneeded); +#elif defined(PROC_VDB) +GC_proc_read_dirty(output_unneeded); +#elif defined(PCR_VDB) +{ +static int onhs=0; +int nhs=GC_n_heap_sects; +for (;onhs < nhs;onhs++){ +PCR_VD_WriteProtectEnable( +GC_heap_sects[onhs].hs_start, +GC_heap_sects[onhs].hs_bytes); +} +} +if (PCR_VD_Clear(GC_vd_base,NPAGES*HBLKSIZE,GC_grungy_bits) +!=PCR_ERes_okay){ +ABORT("Dirty bit read failed"); +} +#endif +} +GC_INNER GC_bool GC_page_was_dirty(struct hblk*h) +{ +#ifdef PCR_VDB +if (!GC_manual_vdb){ +if ((word)h < (word)GC_vd_base +||(word)h>=(word)(GC_vd_base+NPAGES*HBLKSIZE)){ +return TRUE; +} +return GC_grungy_bits[h-(struct hblk*)GC_vd_base]&PCR_VD_DB_dirtyBit; +} +#elif defined(DEFAULT_VDB) +if (!GC_manual_vdb) +return TRUE; +#endif +return NULL==HDR(h) +||get_pht_entry_from_index(GC_grungy_pages,PHT_HASH(h)); +} +#if defined(CHECKSUMS)||defined(PROC_VDB) +GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk*h) +{ +#if defined(GWW_VDB)||defined(PROC_VDB) +#ifdef MPROTECT_VDB +if (!GC_GWW_AVAILABLE()) +return TRUE; +#endif +return NULL==HDR(h) +||get_pht_entry_from_index(GC_written_pages,PHT_HASH(h)); +#else +(void)h; +return TRUE; +#endif +} +#endif +GC_INNER void GC_remove_protection(struct hblk*h,word nblocks, +GC_bool is_ptrfree) +{ +#ifdef PCR_VDB +(void)is_ptrfree; +if (!GC_auto_incremental) +return; +PCR_VD_WriteProtectDisable(h,nblocks*HBLKSIZE); +PCR_VD_WriteProtectEnable(h,nblocks*HBLKSIZE); +#elif defined(MPROTECT_VDB) +struct hblk*h_trunc; +struct hblk*h_end; +struct hblk*current; +if (!GC_auto_incremental||GC_GWW_AVAILABLE()) +return; +GC_ASSERT(GC_page_size!=0); +h_trunc=(struct hblk*)((word)h&~(GC_page_size-1)); +h_end=(struct hblk*)(((word)(h+nblocks)+GC_page_size - 1) +&~(GC_page_size - 1)); +if (h_end==h_trunc+1&& +get_pht_entry_from_index(GC_dirty_pages,PHT_HASH(h_trunc))){ +return; +} +for (current=h_trunc;(word)current < (word)h_end;++current){ +word index=PHT_HASH(current); +if (!is_ptrfree||(word)current < (word)h +||(word)current>=(word)(h+nblocks)){ +async_set_pht_entry_from_index(GC_dirty_pages,index); +} +} +UNPROTECT(h_trunc,(ptr_t)h_end - (ptr_t)h_trunc); +#else +(void)h;(void)nblocks;(void)is_ptrfree; +#endif +} +#endif +#if defined(MPROTECT_VDB)&&defined(DARWIN) +#include +#include +#include +#include +#include +EXTERN_C_BEGIN +extern boolean_t +exc_server(mach_msg_header_t*,mach_msg_header_t*); +extern kern_return_t +exception_raise(mach_port_t,mach_port_t,mach_port_t,exception_type_t, +exception_data_t,mach_msg_type_number_t); +extern kern_return_t +exception_raise_state(mach_port_t,mach_port_t,mach_port_t,exception_type_t, +exception_data_t,mach_msg_type_number_t, +thread_state_flavor_t*,thread_state_t, +mach_msg_type_number_t,thread_state_t, +mach_msg_type_number_t*); +extern kern_return_t +exception_raise_state_identity(mach_port_t,mach_port_t,mach_port_t, +exception_type_t,exception_data_t, +mach_msg_type_number_t,thread_state_flavor_t*, +thread_state_t,mach_msg_type_number_t, +thread_state_t,mach_msg_type_number_t*); +GC_API_OSCALL kern_return_t +catch_exception_raise(mach_port_t exception_port,mach_port_t thread, +mach_port_t task,exception_type_t exception, +exception_data_t code, +mach_msg_type_number_t code_count); +GC_API_OSCALL kern_return_t +catch_exception_raise_state(mach_port_name_t exception_port, +int exception,exception_data_t code, +mach_msg_type_number_t codeCnt,int flavor, +thread_state_t old_state,int old_stateCnt, +thread_state_t new_state,int new_stateCnt); +GC_API_OSCALL kern_return_t +catch_exception_raise_state_identity(mach_port_name_t exception_port, +mach_port_t thread,mach_port_t task,int exception, +exception_data_t code,mach_msg_type_number_t codeCnt, +int flavor,thread_state_t old_state,int old_stateCnt, +thread_state_t new_state,int new_stateCnt); +EXTERN_C_END +GC_API_OSCALL kern_return_t +catch_exception_raise_state(mach_port_name_t exception_port GC_ATTR_UNUSED, +int exception GC_ATTR_UNUSED,exception_data_t code GC_ATTR_UNUSED, +mach_msg_type_number_t codeCnt GC_ATTR_UNUSED,int flavor GC_ATTR_UNUSED, +thread_state_t old_state GC_ATTR_UNUSED,int old_stateCnt GC_ATTR_UNUSED, +thread_state_t new_state GC_ATTR_UNUSED,int new_stateCnt GC_ATTR_UNUSED) +{ +ABORT_RET("Unexpected catch_exception_raise_state invocation"); +return(KERN_INVALID_ARGUMENT); +} +GC_API_OSCALL kern_return_t +catch_exception_raise_state_identity( +mach_port_name_t exception_port GC_ATTR_UNUSED, +mach_port_t thread GC_ATTR_UNUSED,mach_port_t task GC_ATTR_UNUSED, +int exception GC_ATTR_UNUSED,exception_data_t code GC_ATTR_UNUSED, +mach_msg_type_number_t codeCnt GC_ATTR_UNUSED,int flavor GC_ATTR_UNUSED, +thread_state_t old_state GC_ATTR_UNUSED,int old_stateCnt GC_ATTR_UNUSED, +thread_state_t new_state GC_ATTR_UNUSED,int new_stateCnt GC_ATTR_UNUSED) +{ +ABORT_RET("Unexpected catch_exception_raise_state_identity invocation"); +return(KERN_INVALID_ARGUMENT); +} +#define MAX_EXCEPTION_PORTS 16 +static struct { +mach_msg_type_number_t count; +exception_mask_t masks[MAX_EXCEPTION_PORTS]; +exception_handler_t ports[MAX_EXCEPTION_PORTS]; +exception_behavior_t behaviors[MAX_EXCEPTION_PORTS]; +thread_state_flavor_t flavors[MAX_EXCEPTION_PORTS]; +} GC_old_exc_ports; +STATIC struct ports_s { +void (*volatile os_callback[3])(void); +mach_port_t exception; +#if defined(THREADS) +mach_port_t reply; +#endif +} GC_ports={ +{ +(void (*)(void))catch_exception_raise, +(void (*)(void))catch_exception_raise_state, +(void (*)(void))catch_exception_raise_state_identity +}, +#ifdef THREADS +0, +#endif +0 +}; +typedef struct { +mach_msg_header_t head; +} GC_msg_t; +typedef enum { +GC_MP_NORMAL, +GC_MP_DISCARDING, +GC_MP_STOPPED +} GC_mprotect_state_t; +#ifdef THREADS +#define ID_STOP 1 +#define ID_RESUME 2 +#define ID_ACK 3 +STATIC GC_mprotect_state_t GC_mprotect_state=GC_MP_NORMAL; +STATIC void GC_mprotect_thread_notify(mach_msg_id_t id) +{ +struct buf_s { +GC_msg_t msg; +mach_msg_trailer_t trailer; +} buf; +mach_msg_return_t r; +buf.msg.head.msgh_bits=MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0); +buf.msg.head.msgh_size=sizeof(buf.msg); +buf.msg.head.msgh_remote_port=GC_ports.exception; +buf.msg.head.msgh_local_port=MACH_PORT_NULL; +buf.msg.head.msgh_id=id; +r=mach_msg(&buf.msg.head,MACH_SEND_MSG|MACH_RCV_MSG|MACH_RCV_LARGE, +sizeof(buf.msg),sizeof(buf),GC_ports.reply, +MACH_MSG_TIMEOUT_NONE,MACH_PORT_NULL); +if (r!=MACH_MSG_SUCCESS) +ABORT("mach_msg failed in GC_mprotect_thread_notify"); +if (buf.msg.head.msgh_id!=ID_ACK) +ABORT("Invalid ack in GC_mprotect_thread_notify"); +} +STATIC void GC_mprotect_thread_reply(void) +{ +GC_msg_t msg; +mach_msg_return_t r; +msg.head.msgh_bits=MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0); +msg.head.msgh_size=sizeof(msg); +msg.head.msgh_remote_port=GC_ports.reply; +msg.head.msgh_local_port=MACH_PORT_NULL; +msg.head.msgh_id=ID_ACK; +r=mach_msg(&msg.head,MACH_SEND_MSG,sizeof(msg),0,MACH_PORT_NULL, +MACH_MSG_TIMEOUT_NONE,MACH_PORT_NULL); +if (r!=MACH_MSG_SUCCESS) +ABORT("mach_msg failed in GC_mprotect_thread_reply"); +} +GC_INNER void GC_mprotect_stop(void) +{ +GC_mprotect_thread_notify(ID_STOP); +} +GC_INNER void GC_mprotect_resume(void) +{ +GC_mprotect_thread_notify(ID_RESUME); +} +#else +#define GC_mprotect_state GC_MP_NORMAL +#endif +struct mp_reply_s { +mach_msg_header_t head; +char data[256]; +}; +struct mp_msg_s { +mach_msg_header_t head; +mach_msg_body_t msgh_body; +char data[1024]; +}; +STATIC void*GC_mprotect_thread(void*arg) +{ +mach_msg_return_t r; +struct mp_reply_s reply; +struct mp_msg_s msg; +mach_msg_id_t id; +if ((word)arg==GC_WORD_MAX)return 0; +#if defined(CPPCHECK) +reply.data[0]=0; +msg.data[0]=0; +#endif +#if defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID) +(void)pthread_setname_np("GC-mprotect"); +#endif +#if defined(THREADS)&&!defined(GC_NO_THREADS_DISCOVERY) +GC_darwin_register_mach_handler_thread(mach_thread_self()); +#endif +for(;;){ +r=mach_msg(&msg.head,MACH_RCV_MSG|MACH_RCV_LARGE| +(GC_mprotect_state==GC_MP_DISCARDING?MACH_RCV_TIMEOUT:0), +0,sizeof(msg),GC_ports.exception, +GC_mprotect_state==GC_MP_DISCARDING?0 +:MACH_MSG_TIMEOUT_NONE,MACH_PORT_NULL); +id=r==MACH_MSG_SUCCESS?msg.head.msgh_id:-1; +#if defined(THREADS) +if(GC_mprotect_state==GC_MP_DISCARDING){ +if(r==MACH_RCV_TIMED_OUT){ +GC_mprotect_state=GC_MP_STOPPED; +GC_mprotect_thread_reply(); +continue; +} +if(r==MACH_MSG_SUCCESS&&(id==ID_STOP||id==ID_RESUME)) +ABORT("Out of order mprotect thread request"); +} +#endif +if (r!=MACH_MSG_SUCCESS){ +ABORT_ARG2("mach_msg failed", +":errcode=%d (%s)",(int)r,mach_error_string(r)); +} +switch(id){ +#if defined(THREADS) +case ID_STOP: +if(GC_mprotect_state!=GC_MP_NORMAL) +ABORT("Called mprotect_stop when state wasn't normal"); +GC_mprotect_state=GC_MP_DISCARDING; +break; +case ID_RESUME: +if(GC_mprotect_state!=GC_MP_STOPPED) +ABORT("Called mprotect_resume when state wasn't stopped"); +GC_mprotect_state=GC_MP_NORMAL; +GC_mprotect_thread_reply(); +break; +#endif +default: +if(!exc_server(&msg.head,&reply.head)) +ABORT("exc_server failed"); +r=mach_msg(&reply.head,MACH_SEND_MSG,reply.head.msgh_size,0, +MACH_PORT_NULL,MACH_MSG_TIMEOUT_NONE, +MACH_PORT_NULL); +if(r!=MACH_MSG_SUCCESS){ +#ifdef BROKEN_EXCEPTION_HANDLING +GC_err_printf("mach_msg failed with %d %s while sending " +"exc reply\n",(int)r,mach_error_string(r)); +#else +ABORT("mach_msg failed while sending exception reply"); +#endif +} +} +} +} +#ifdef BROKEN_EXCEPTION_HANDLING +STATIC int GC_sigbus_count=0; +STATIC void GC_darwin_sigbus(int num,siginfo_t*sip,void*context) +{ +if (num!=SIGBUS) +ABORT("Got a non-sigbus signal in the sigbus handler"); +if (GC_sigbus_count>=8){ +ABORT("Got more than 8 SIGBUSs in a row!"); +} else { +GC_sigbus_count++; +WARN("Ignoring SIGBUS\n",0); +} +} +#endif +GC_INNER GC_bool GC_dirty_init(void) +{ +kern_return_t r; +mach_port_t me; +pthread_t thread; +pthread_attr_t attr; +exception_mask_t mask; +#ifdef CAN_HANDLE_FORK +if (GC_handle_fork){ +WARN("Can't turn on GC incremental mode as fork()" +" handling requested\n",0); +return FALSE; +} +#endif +GC_VERBOSE_LOG_PRINTF("Initializing mach/darwin mprotect" +" virtual dirty bit implementation\n"); +#ifdef BROKEN_EXCEPTION_HANDLING +WARN("Enabling workarounds for various darwin " +"exception handling bugs\n",0); +#endif +if (GC_page_size % HBLKSIZE!=0){ +ABORT("Page size not multiple of HBLKSIZE"); +} +GC_task_self=me=mach_task_self(); +r=mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.exception); +if (r!=KERN_SUCCESS) +ABORT("mach_port_allocate failed (exception port)"); +r=mach_port_insert_right(me,GC_ports.exception,GC_ports.exception, +MACH_MSG_TYPE_MAKE_SEND); +if (r!=KERN_SUCCESS) +ABORT("mach_port_insert_right failed (exception port)"); +#if defined(THREADS) +r=mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.reply); +if(r!=KERN_SUCCESS) +ABORT("mach_port_allocate failed (reply port)"); +#endif +mask=EXC_MASK_BAD_ACCESS; +r=task_get_exception_ports(me,mask,GC_old_exc_ports.masks, +&GC_old_exc_ports.count,GC_old_exc_ports.ports, +GC_old_exc_ports.behaviors, +GC_old_exc_ports.flavors); +if (r!=KERN_SUCCESS) +ABORT("task_get_exception_ports failed"); +r=task_set_exception_ports(me,mask,GC_ports.exception,EXCEPTION_DEFAULT, +GC_MACH_THREAD_STATE); +if (r!=KERN_SUCCESS) +ABORT("task_set_exception_ports failed"); +if (pthread_attr_init(&attr)!=0) +ABORT("pthread_attr_init failed"); +if (pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_DETACHED)!=0) +ABORT("pthread_attr_setdetachedstate failed"); +#undef pthread_create +if (pthread_create(&thread,&attr,GC_mprotect_thread,NULL)!=0) +ABORT("pthread_create failed"); +(void)pthread_attr_destroy(&attr); +#ifdef BROKEN_EXCEPTION_HANDLING +{ +struct sigaction sa,oldsa; +sa.sa_handler=(SIG_HNDLR_PTR)GC_darwin_sigbus; +sigemptyset(&sa.sa_mask); +sa.sa_flags=SA_RESTART|SA_SIGINFO; +if (sigaction(SIGBUS,&sa,&oldsa)< 0) +ABORT("sigaction failed"); +if (oldsa.sa_handler!=(SIG_HNDLR_PTR)(signed_word)SIG_DFL){ +GC_VERBOSE_LOG_PRINTF("Replaced other SIGBUS handler\n"); +} +} +#endif +#if defined(CPPCHECK) +GC_noop1((word)GC_ports.os_callback[0]); +#endif +return TRUE; +} +STATIC kern_return_t GC_forward_exception(mach_port_t thread,mach_port_t task, +exception_type_t exception, +exception_data_t data, +mach_msg_type_number_t data_count) +{ +unsigned int i; +kern_return_t r; +mach_port_t port; +exception_behavior_t behavior; +thread_state_flavor_t flavor; +thread_state_data_t thread_state; +mach_msg_type_number_t thread_state_count=THREAD_STATE_MAX; +for (i=0;i < GC_old_exc_ports.count;i++) +if (GC_old_exc_ports.masks[i]&(1< 0?code[0]:-1, +code_count > 1?code[1]:-1); +#endif +return FWD(); +} +r=thread_get_state(thread,flavor,(natural_t*)&exc_state, +&exc_state_count); +if(r!=KERN_SUCCESS){ +#ifdef BROKEN_EXCEPTION_HANDLING +GC_err_printf("thread_get_state failed in catch_exception_raise\n"); +return KERN_SUCCESS; +#else +ABORT("thread_get_state failed in catch_exception_raise"); +#endif +} +addr=(char*)exc_state.DARWIN_EXC_STATE_DAR; +if (!is_header_found_async(addr)){ +#ifdef BROKEN_EXCEPTION_HANDLING +static char*last_fault; +static int last_fault_count; +if(addr!=last_fault){ +last_fault=addr; +last_fault_count=0; +} +if(++last_fault_count < 32){ +if(last_fault_count==1) +WARN("Ignoring KERN_PROTECTION_FAILURE at %p\n",addr); +return KERN_SUCCESS; +} +GC_err_printf("Unexpected KERN_PROTECTION_FAILURE at %p;aborting...\n", +(void*)addr); +EXIT(); +#else +return FWD(); +#endif +} +#ifdef BROKEN_EXCEPTION_HANDLING +GC_sigbus_count=0; +#endif +GC_ASSERT(GC_page_size!=0); +if (GC_mprotect_state==GC_MP_NORMAL){ +struct hblk*h=(struct hblk*)((word)addr&~(GC_page_size-1)); +size_t i; +UNPROTECT(h,GC_page_size); +for (i=0;i < divHBLKSZ(GC_page_size);i++){ +word index=PHT_HASH(h+i); +async_set_pht_entry_from_index(GC_dirty_pages,index); +} +} else if (GC_mprotect_state==GC_MP_DISCARDING){ +} else { +GC_err_printf("KERN_PROTECTION_FAILURE while world is stopped\n"); +return FWD(); +} +return KERN_SUCCESS; +} +#undef FWD +#ifndef NO_DESC_CATCH_EXCEPTION_RAISE +__asm__(".desc _catch_exception_raise,0x10"); +__asm__(".desc _catch_exception_raise_state,0x10"); +__asm__(".desc _catch_exception_raise_state_identity,0x10"); +#endif +#endif +#ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS +GC_API int GC_CALL GC_incremental_protection_needs(void) +{ +GC_ASSERT(GC_is_initialized); +return GC_PROTECTS_NONE; +} +#endif +#ifdef ECOS +#undef sbrk +#endif +GC_API void GC_CALL GC_set_pages_executable(int value) +{ +GC_ASSERT(!GC_is_initialized); +GC_pages_executable=(GC_bool)(value!=0); +} +GC_API int GC_CALL GC_get_pages_executable(void) +{ +#ifdef IGNORE_PAGES_EXECUTABLE +return 1; +#else +return (int)GC_pages_executable; +#endif +} +#if defined(I386)&&defined(LINUX)&&defined(SAVE_CALL_CHAIN) +#include +struct frame { +struct frame*fr_savfp; +long fr_savpc; +#if NARGS > 0 +long fr_arg[NARGS]; +#endif +}; +#endif +#if defined(SPARC) +#if defined(LINUX) +#include +#if defined(SAVE_CALL_CHAIN) +struct frame { +long fr_local[8]; +long fr_arg[6]; +struct frame*fr_savfp; +long fr_savpc; +#ifndef __arch64__ +char*fr_stret; +#endif +long fr_argd[6]; +long fr_argx[0]; +}; +#endif +#elif defined (DRSNX) +#include +#elif defined(OPENBSD) +#include +#elif defined(FREEBSD)||defined(NETBSD) +#include +#else +#include +#endif +#if NARGS > 6 +#error We only know how to get the first 6 arguments +#endif +#endif +#ifdef NEED_CALLINFO +#ifdef LINUX +#include +#endif +#endif +#if defined(GC_HAVE_BUILTIN_BACKTRACE) +#ifdef _MSC_VER +#ifndef GC_MSVC_DBG_H +#define GC_MSVC_DBG_H +#include +#ifdef __cplusplus +extern "C" { +#endif +#if!MSVC_DBG_DLL +#define MSVC_DBG_EXPORT +#elif MSVC_DBG_BUILD +#define MSVC_DBG_EXPORT __declspec(dllexport) +#else +#define MSVC_DBG_EXPORT __declspec(dllimport) +#endif +#ifndef MAX_SYM_NAME +#define MAX_SYM_NAME 2000 +#endif +typedef void*HANDLE; +typedef struct _CONTEXT CONTEXT; +MSVC_DBG_EXPORT size_t GetStackFrames(size_t skip,void*frames[],size_t maxFrames); +MSVC_DBG_EXPORT size_t GetStackFramesFromContext(HANDLE hProcess,HANDLE hThread,CONTEXT*context,size_t skip,void*frames[],size_t maxFrames); +MSVC_DBG_EXPORT size_t GetModuleNameFromAddress(void*address,char*moduleName,size_t size); +MSVC_DBG_EXPORT size_t GetModuleNameFromStack(size_t skip,char*moduleName,size_t size); +MSVC_DBG_EXPORT size_t GetSymbolNameFromAddress(void*address,char*symbolName,size_t size,size_t*offsetBytes); +MSVC_DBG_EXPORT size_t GetSymbolNameFromStack(size_t skip,char*symbolName,size_t size,size_t*offsetBytes); +MSVC_DBG_EXPORT size_t GetFileLineFromAddress(void*address,char*fileName,size_t size,size_t*lineNumber,size_t*offsetBytes); +MSVC_DBG_EXPORT size_t GetFileLineFromStack(size_t skip,char*fileName,size_t size,size_t*lineNumber,size_t*offsetBytes); +MSVC_DBG_EXPORT size_t GetDescriptionFromAddress(void*address,const char*format,char*description,size_t size); +MSVC_DBG_EXPORT size_t GetDescriptionFromStack(void*const frames[],size_t count,const char*format,char*description[],size_t size); +MSVC_DBG_EXPORT int backtrace(void*addresses[],int count); +MSVC_DBG_EXPORT char**backtrace_symbols(void*const addresses[],int count); +#ifdef __cplusplus +} +#endif +#endif +#else +#include +#endif +#endif +#ifdef SAVE_CALL_CHAIN +#if NARGS==0&&NFRAMES % 2==0&&defined(GC_HAVE_BUILTIN_BACKTRACE) +#ifdef REDIRECT_MALLOC +#ifdef THREADS +__thread +#endif +GC_bool GC_in_save_callers=FALSE; +#endif +GC_INNER void GC_save_callers(struct callinfo info[NFRAMES]) +{ +void*tmp_info[NFRAMES+1]; +int npcs,i; +#define IGNORE_FRAMES 1 +#ifdef REDIRECT_MALLOC +if (GC_in_save_callers){ +info[0].ci_pc=(word)(&GC_save_callers); +for (i=1;i < NFRAMES;++i)info[i].ci_pc=0; +return; +} +GC_in_save_callers=TRUE; +#endif +GC_ASSERT(I_HOLD_LOCK()); +GC_STATIC_ASSERT(sizeof(struct callinfo)==sizeof(void*)); +npcs=backtrace((void**)tmp_info,NFRAMES+IGNORE_FRAMES); +if (npcs > IGNORE_FRAMES) +BCOPY(&tmp_info[IGNORE_FRAMES],info, +(npcs - IGNORE_FRAMES)*sizeof(void*)); +for (i=npcs - IGNORE_FRAMES;i < NFRAMES;++i)info[i].ci_pc=0; +#ifdef REDIRECT_MALLOC +GC_in_save_callers=FALSE; +#endif +} +#else +#if (defined(OPENBSD)||defined(NETBSD)||defined(FREEBSD))&&defined(SPARC) +#define FR_SAVFP fr_fp +#define FR_SAVPC fr_pc +#else +#define FR_SAVFP fr_savfp +#define FR_SAVPC fr_savpc +#endif +#if defined(SPARC)&&(defined(__arch64__)||defined(__sparcv9)) +#define BIAS 2047 +#else +#define BIAS 0 +#endif +GC_INNER void GC_save_callers(struct callinfo info[NFRAMES]) +{ +struct frame*frame; +struct frame*fp; +int nframes=0; +#ifdef I386 +asm("movl %%ebp,%0":"=r"(frame)); +fp=frame; +#else +frame=(struct frame*)GC_save_regs_in_stack(); +fp=(struct frame*)((long)frame->FR_SAVFP+BIAS); +#endif +for (;!((word)fp HOTTER_THAN (word)frame) +#ifndef THREADS +&&!((word)GC_stackbottom HOTTER_THAN (word)fp) +#elif defined(STACK_GROWS_UP) +&&fp!=NULL +#endif +&&nframes < NFRAMES; +fp=(struct frame*)((long)fp->FR_SAVFP+BIAS),nframes++){ +#if NARGS > 0 +int i; +#endif +info[nframes].ci_pc=fp->FR_SAVPC; +#if NARGS > 0 +for (i=0;i < NARGS;i++){ +info[nframes].ci_arg[i]=~(fp->fr_arg[i]); +} +#endif +} +if (nframes < NFRAMES)info[nframes].ci_pc=0; +} +#endif +#endif +#ifdef NEED_CALLINFO +GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]) +{ +int i; +static int reentry_count=0; +DCL_LOCK_STATE; +LOCK(); +++reentry_count; +UNLOCK(); +#if NFRAMES==1 +GC_err_printf("\tCaller at allocation:\n"); +#else +GC_err_printf("\tCall chain at allocation:\n"); +#endif +for (i=0;i < NFRAMES;i++){ +#if defined(LINUX)&&!defined(SMALL_CONFIG) +GC_bool stop=FALSE; +#endif +if (0==info[i].ci_pc) +break; +#if NARGS > 0 +{ +int j; +GC_err_printf("\t\targs:"); +for (j=0;j < NARGS;j++){ +if (j!=0)GC_err_printf(","); +GC_err_printf("%d (0x%X)",~(info[i].ci_arg[j]), +~(info[i].ci_arg[j])); +} +GC_err_printf("\n"); +} +#endif +if (reentry_count > 1){ +GC_err_printf("\t\t##PC##=0x%lx\n", +(unsigned long)info[i].ci_pc); +continue; +} +{ +char buf[40]; +char*name; +#if defined(GC_HAVE_BUILTIN_BACKTRACE)&&!defined(GC_BACKTRACE_SYMBOLS_BROKEN) +char**sym_name= +backtrace_symbols((void**)(&(info[i].ci_pc)),1); +if (sym_name!=NULL){ +name=sym_name[0]; +} else +#endif +{ +(void)snprintf(buf,sizeof(buf),"##PC##=0x%lx", +(unsigned long)info[i].ci_pc); +buf[sizeof(buf)- 1]='\0'; +name=buf; +} +#if defined(LINUX)&&!defined(SMALL_CONFIG) +do { +FILE*pipe; +#define EXE_SZ 100 +static char exe_name[EXE_SZ]; +#define CMD_SZ 200 +char cmd_buf[CMD_SZ]; +#define RESULT_SZ 200 +static char result_buf[RESULT_SZ]; +size_t result_len; +char*old_preload; +#define PRELOAD_SZ 200 +char preload_buf[PRELOAD_SZ]; +static GC_bool found_exe_name=FALSE; +static GC_bool will_fail=FALSE; +if (will_fail) +break; +if (!found_exe_name){ +int ret_code=readlink("/proc/self/exe",exe_name,EXE_SZ); +if (ret_code < 0||ret_code>=EXE_SZ +||exe_name[0]!='/'){ +will_fail=TRUE; +break; +} +exe_name[ret_code]='\0'; +found_exe_name=TRUE; +} +(void)snprintf(cmd_buf,sizeof(cmd_buf), +"/usr/bin/addr2line -f -e %s 0x%lx", +exe_name,(unsigned long)info[i].ci_pc); +cmd_buf[sizeof(cmd_buf)- 1]='\0'; +old_preload=GETENV("LD_PRELOAD"); +if (0!=old_preload){ +size_t old_len=strlen(old_preload); +if (old_len>=PRELOAD_SZ){ +will_fail=TRUE; +break; +} +BCOPY(old_preload,preload_buf,old_len+1); +unsetenv ("LD_PRELOAD"); +} +pipe=popen(cmd_buf,"r"); +if (0!=old_preload +&&0!=setenv ("LD_PRELOAD",preload_buf,0)){ +WARN("Failed to reset LD_PRELOAD\n",0); +} +if (NULL==pipe){ +will_fail=TRUE; +break; +} +result_len=fread(result_buf,1,RESULT_SZ - 1,pipe); +(void)pclose(pipe); +if (0==result_len){ +will_fail=TRUE; +break; +} +if (result_buf[result_len - 1]=='\n')--result_len; +result_buf[result_len]=0; +if (result_buf[0]=='?' +||(result_buf[result_len-2]==':' +&&result_buf[result_len-1]=='0')) +break; +{ +char*nl=strchr(result_buf,'\n'); +if (nl!=NULL +&&(word)nl < (word)(result_buf+result_len)){ +*nl=':'; +} +if (strncmp(result_buf,"main", +nl!=NULL +?(size_t)((word)nl +- COVERT_DATAFLOW(result_buf)) +:result_len)==0){ +stop=TRUE; +} +} +if (result_len < RESULT_SZ - 25){ +(void)snprintf(&result_buf[result_len], +sizeof(result_buf)- result_len, +" [0x%lx]",(unsigned long)info[i].ci_pc); +result_buf[sizeof(result_buf)- 1]='\0'; +} +#if defined(CPPCHECK) +GC_noop1((unsigned char)name[0]); +#endif +name=result_buf; +} while (0); +#endif +GC_err_printf("\t\t%s\n",name); +#if defined(GC_HAVE_BUILTIN_BACKTRACE)&&!defined(GC_BACKTRACE_SYMBOLS_BROKEN) +if (sym_name!=NULL) +free(sym_name); +#endif +} +#if defined(LINUX)&&!defined(SMALL_CONFIG) +if (stop) +break; +#endif +} +LOCK(); +--reentry_count; +UNLOCK(); +} +#endif +#if defined(LINUX)&&defined(__ELF__)&&!defined(SMALL_CONFIG) +void GC_print_address_map(void) +{ +char*maps; +GC_err_printf("----------Begin address map----------\n"); +maps=GC_get_maps(); +GC_err_puts(maps!=NULL?maps:"Failed to get map!\n"); +GC_err_printf("----------End address map----------\n"); +} +#endif +#if defined(THREAD_LOCAL_ALLOC) +#ifndef THREADS +#error "invalid config - THREAD_LOCAL_ALLOC requires GC_THREADS" +#endif +#ifndef GC_THREAD_LOCAL_ALLOC_H +#define GC_THREAD_LOCAL_ALLOC_H +#ifdef THREAD_LOCAL_ALLOC +#if defined(USE_HPUX_TLS) +#error USE_HPUX_TLS macro was replaced by USE_COMPILER_TLS +#endif +#include +EXTERN_C_BEGIN +#if!defined(USE_PTHREAD_SPECIFIC)&&!defined(USE_WIN32_SPECIFIC)&&!defined(USE_WIN32_COMPILER_TLS)&&!defined(USE_COMPILER_TLS)&&!defined(USE_CUSTOM_SPECIFIC) +#if defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32) +#if defined(CYGWIN32)&&GC_GNUC_PREREQ(4,0) +#if defined(__clang__) +#define USE_PTHREAD_SPECIFIC +#else +#define USE_COMPILER_TLS +#endif +#elif defined(__GNUC__)||defined(MSWINCE) +#define USE_WIN32_SPECIFIC +#else +#define USE_WIN32_COMPILER_TLS +#endif +#elif (defined(LINUX)&&!defined(ARM32)&&!defined(AVR32)&&GC_GNUC_PREREQ(3,3)&&!(defined(__clang__)&&defined(HOST_ANDROID)))||(defined(FREEBSD)||(defined(NETBSD)&&__NetBSD_Version__>=600000000)&&(GC_GNUC_PREREQ(4,4)||GC_CLANG_PREREQ(3,9)))||(defined(HOST_ANDROID)&&defined(ARM32)&&(GC_GNUC_PREREQ(4,6)||GC_CLANG_PREREQ_FULL(3,8,256229))) +#define USE_COMPILER_TLS +#elif defined(GC_DGUX386_THREADS)||defined(GC_OSF1_THREADS)||defined(GC_AIX_THREADS)||defined(GC_DARWIN_THREADS)||defined(GC_FREEBSD_THREADS)||defined(GC_NETBSD_THREADS)||defined(GC_LINUX_THREADS)||defined(GC_HAIKU_THREADS)||defined(GC_RTEMS_PTHREADS) +#define USE_PTHREAD_SPECIFIC +#elif defined(GC_HPUX_THREADS) +#ifdef __GNUC__ +#define USE_PTHREAD_SPECIFIC +#else +#define USE_COMPILER_TLS +#endif +#else +#define USE_CUSTOM_SPECIFIC +#endif +#endif +#ifndef THREAD_FREELISTS_KINDS +#ifdef ENABLE_DISCLAIM +#define THREAD_FREELISTS_KINDS (NORMAL+2) +#else +#define THREAD_FREELISTS_KINDS (NORMAL+1) +#endif +#endif +typedef struct thread_local_freelists { +void*_freelists[THREAD_FREELISTS_KINDS][TINY_FREELISTS]; +#define ptrfree_freelists _freelists[PTRFREE] +#define normal_freelists _freelists[NORMAL] +#ifdef GC_GCJ_SUPPORT +void*gcj_freelists[TINY_FREELISTS]; +#define ERROR_FL ((void*)GC_WORD_MAX) +#endif +#define DIRECT_GRANULES (HBLKSIZE/GRANULE_BYTES) +}*GC_tlfs; +#if defined(USE_PTHREAD_SPECIFIC) +#define GC_getspecific pthread_getspecific +#define GC_setspecific pthread_setspecific +#define GC_key_create pthread_key_create +#define GC_remove_specific(key)pthread_setspecific(key,NULL) +#define GC_remove_specific_after_fork(key,t)(void)0 +typedef pthread_key_t GC_key_t; +#elif defined(USE_COMPILER_TLS)||defined(USE_WIN32_COMPILER_TLS) +#define GC_getspecific(x)(x) +#define GC_setspecific(key,v)((key)=(v),0) +#define GC_key_create(key,d)0 +#define GC_remove_specific(key) +#define GC_remove_specific_after_fork(key,t)(void)0 +typedef void*GC_key_t; +#elif defined(USE_WIN32_SPECIFIC) +#define GC_getspecific TlsGetValue +#define GC_setspecific(key,v)!TlsSetValue(key,v) +#ifndef TLS_OUT_OF_INDEXES +#define TLS_OUT_OF_INDEXES (DWORD)0xFFFFFFFF +#endif +#define GC_key_create(key,d)((d)!=0||(*(key)=TlsAlloc())==TLS_OUT_OF_INDEXES?-1:0) +#define GC_remove_specific(key) +#define GC_remove_specific_after_fork(key,t)(void)0 +typedef DWORD GC_key_t; +#elif defined(USE_CUSTOM_SPECIFIC) +EXTERN_C_END +#ifndef GC_SPECIFIC_H +#define GC_SPECIFIC_H +#include +EXTERN_C_BEGIN +#define MALLOC_CLEAR(n)GC_INTERNAL_MALLOC(n,NORMAL) +#define TS_CACHE_SIZE 1024 +#define CACHE_HASH(n)((((n)>>8)^(n))&(TS_CACHE_SIZE - 1)) +#define TS_HASH_SIZE 1024 +#define HASH(p)((unsigned)((((word)(p))>>8)^(word)(p))&(TS_HASH_SIZE - 1)) +#ifdef GC_ASSERTIONS +typedef GC_hidden_pointer ts_entry_value_t; +#define TS_HIDE_VALUE(p)GC_HIDE_POINTER(p) +#define TS_REVEAL_PTR(p)GC_REVEAL_POINTER(p) +#else +typedef void*ts_entry_value_t; +#define TS_HIDE_VALUE(p)(p) +#define TS_REVEAL_PTR(p)(p) +#endif +typedef struct thread_specific_entry { +volatile AO_t qtid; +ts_entry_value_t value; +struct thread_specific_entry*next; +pthread_t thread; +} tse; +#define quick_thread_id()(((word)GC_approx_sp())>>12) +#define INVALID_QTID ((word)0) +#define INVALID_THREADID ((pthread_t)0) +union ptse_ao_u { +tse*p; +volatile AO_t ao; +}; +typedef struct thread_specific_data { +tse*volatile cache[TS_CACHE_SIZE]; +union ptse_ao_u hash[TS_HASH_SIZE]; +pthread_mutex_t lock; +} tsd; +typedef tsd*GC_key_t; +#define GC_key_create(key,d)GC_key_create_inner(key) +GC_INNER int GC_key_create_inner(tsd**key_ptr); +GC_INNER int GC_setspecific(tsd*key,void*value); +#define GC_remove_specific(key)GC_remove_specific_after_fork(key,pthread_self()) +GC_INNER void GC_remove_specific_after_fork(tsd*key,pthread_t t); +GC_INNER void*GC_slow_getspecific(tsd*key,word qtid, +tse*volatile*cache_entry); +GC_INLINE void*GC_getspecific(tsd*key) +{ +word qtid=quick_thread_id(); +tse*volatile*entry_ptr=&key->cache[CACHE_HASH(qtid)]; +tse*entry=*entry_ptr; +GC_ASSERT(qtid!=INVALID_QTID); +if (EXPECT(entry->qtid==qtid,TRUE)){ +GC_ASSERT(entry->thread==pthread_self()); +return TS_REVEAL_PTR(entry->value); +} +return GC_slow_getspecific(key,qtid,entry_ptr); +} +EXTERN_C_END +#endif +EXTERN_C_BEGIN +#else +#error implement me +#endif +GC_INNER void GC_init_thread_local(GC_tlfs p); +GC_INNER void GC_destroy_thread_local(GC_tlfs p); +GC_INNER void GC_mark_thread_local_fls_for(GC_tlfs p); +#ifdef GC_ASSERTIONS +GC_bool GC_is_thread_tsd_valid(void*tsd); +void GC_check_tls_for(GC_tlfs p); +#if defined(USE_CUSTOM_SPECIFIC) +void GC_check_tsd_marks(tsd*key); +#endif +#endif +#ifndef GC_ATTR_TLS_FAST +#define GC_ATTR_TLS_FAST +#endif +extern +#if defined(USE_COMPILER_TLS) +__thread GC_ATTR_TLS_FAST +#elif defined(USE_WIN32_COMPILER_TLS) +__declspec(thread)GC_ATTR_TLS_FAST +#endif +GC_key_t GC_thread_key; +EXTERN_C_END +#endif +#endif +#include +#if defined(USE_COMPILER_TLS) +__thread GC_ATTR_TLS_FAST +#elif defined(USE_WIN32_COMPILER_TLS) +__declspec(thread)GC_ATTR_TLS_FAST +#endif +GC_key_t GC_thread_key; +static GC_bool keys_initialized; +static void return_single_freelist(void*fl,void**gfl) +{ +if (*gfl==0){ +*gfl=fl; +} else { +void*q,**qptr; +GC_ASSERT(GC_size(fl)==GC_size(*gfl)); +qptr=&(obj_link(fl)); +while ((word)(q=*qptr)>=HBLKSIZE) +qptr=&(obj_link(q)); +GC_ASSERT(0==q); +*qptr=*gfl; +*gfl=fl; +} +} +static void return_freelists(void**fl,void**gfl) +{ +int i; +for (i=1;i < TINY_FREELISTS;++i){ +if ((word)(fl[i])>=HBLKSIZE){ +return_single_freelist(fl[i],&gfl[i]); +} +fl[i]=(ptr_t)HBLKSIZE; +} +#ifdef GC_GCJ_SUPPORT +if (fl[0]==ERROR_FL)return; +#endif +if ((word)(fl[0])>=HBLKSIZE){ +return_single_freelist(fl[0],&gfl[1]); +} +} +#ifdef USE_PTHREAD_SPECIFIC +static void reset_thread_key(void*v){ +pthread_setspecific(GC_thread_key,v); +} +#else +#define reset_thread_key 0 +#endif +GC_INNER void GC_init_thread_local(GC_tlfs p) +{ +int i,j,res; +GC_ASSERT(I_HOLD_LOCK()); +if (!EXPECT(keys_initialized,TRUE)){ +GC_ASSERT((word)&GC_thread_key % sizeof(word)==0); +res=GC_key_create(&GC_thread_key,reset_thread_key); +if (COVERT_DATAFLOW(res)!=0){ +ABORT("Failed to create key for local allocator"); +} +keys_initialized=TRUE; +} +res=GC_setspecific(GC_thread_key,p); +if (COVERT_DATAFLOW(res)!=0){ +ABORT("Failed to set thread specific allocation pointers"); +} +for (j=0;j < TINY_FREELISTS;++j){ +for (i=0;i < THREAD_FREELISTS_KINDS;++i){ +p->_freelists[i][j]=(void*)(word)1; +} +#ifdef GC_GCJ_SUPPORT +p->gcj_freelists[j]=(void*)(word)1; +#endif +} +#ifdef GC_GCJ_SUPPORT +p->gcj_freelists[0]=ERROR_FL; +#endif +} +GC_INNER void GC_destroy_thread_local(GC_tlfs p) +{ +int k; +GC_STATIC_ASSERT(THREAD_FREELISTS_KINDS<=MAXOBJKINDS); +for (k=0;k < THREAD_FREELISTS_KINDS;++k){ +if (k==(int)GC_n_kinds) +break; +return_freelists(p->_freelists[k],GC_obj_kinds[k].ok_freelist); +} +#ifdef GC_GCJ_SUPPORT +return_freelists(p->gcj_freelists,(void**)GC_gcjobjfreelist); +#endif +} +GC_API GC_ATTR_MALLOC void*GC_CALL GC_malloc_kind(size_t bytes,int kind) +{ +size_t granules; +void*tsd; +void*result; +#if MAXOBJKINDS > THREAD_FREELISTS_KINDS +if (EXPECT(kind>=THREAD_FREELISTS_KINDS,FALSE)){ +return GC_malloc_kind_global(bytes,kind); +} +#endif +#if!defined(USE_PTHREAD_SPECIFIC)&&!defined(USE_WIN32_SPECIFIC) +{ +GC_key_t k=GC_thread_key; +if (EXPECT(0==k,FALSE)){ +return GC_malloc_kind_global(bytes,kind); +} +tsd=GC_getspecific(k); +} +#else +if (!EXPECT(keys_initialized,TRUE)) +return GC_malloc_kind_global(bytes,kind); +tsd=GC_getspecific(GC_thread_key); +#endif +#if!defined(USE_COMPILER_TLS)&&!defined(USE_WIN32_COMPILER_TLS) +if (EXPECT(0==tsd,FALSE)){ +return GC_malloc_kind_global(bytes,kind); +} +#endif +GC_ASSERT(GC_is_initialized); +GC_ASSERT(GC_is_thread_tsd_valid(tsd)); +granules=ROUNDED_UP_GRANULES(bytes); +#if defined(CPPCHECK) +#define MALLOC_KIND_PTRFREE_INIT (void*)1 +#else +#define MALLOC_KIND_PTRFREE_INIT NULL +#endif +GC_FAST_MALLOC_GRANS(result,granules, +((GC_tlfs)tsd)->_freelists[kind],DIRECT_GRANULES, +kind,GC_malloc_kind_global(bytes,kind), +(void)(kind==PTRFREE?MALLOC_KIND_PTRFREE_INIT +:(obj_link(result)=0))); +#ifdef LOG_ALLOCS +GC_log_printf("GC_malloc_kind(%lu,%d)returned %p,recent GC #%lu\n", +(unsigned long)bytes,kind,result, +(unsigned long)GC_gc_no); +#endif +return result; +} +#ifdef GC_GCJ_SUPPORT +GC_API GC_ATTR_MALLOC void*GC_CALL GC_gcj_malloc(size_t bytes, +void*ptr_to_struct_containing_descr) +{ +if (EXPECT(GC_incremental,FALSE)){ +return GC_core_gcj_malloc(bytes,ptr_to_struct_containing_descr); +} else { +size_t granules=ROUNDED_UP_GRANULES(bytes); +void*result; +void**tiny_fl; +GC_ASSERT(GC_gcjobjfreelist!=NULL); +tiny_fl=((GC_tlfs)GC_getspecific(GC_thread_key))->gcj_freelists; +GC_FAST_MALLOC_GRANS(result,granules,tiny_fl,DIRECT_GRANULES, +GC_gcj_kind, +GC_core_gcj_malloc(bytes, +ptr_to_struct_containing_descr), +{AO_compiler_barrier(); +*(void**)result=ptr_to_struct_containing_descr;}); +return result; +} +} +#endif +GC_INNER void GC_mark_thread_local_fls_for(GC_tlfs p) +{ +ptr_t q; +int i,j; +for (j=0;j < TINY_FREELISTS;++j){ +for (i=0;i < THREAD_FREELISTS_KINDS;++i){ +q=(ptr_t)AO_load((volatile AO_t*)&p->_freelists[i][j]); +if ((word)q > HBLKSIZE) +GC_set_fl_marks(q); +} +#ifdef GC_GCJ_SUPPORT +if (EXPECT(j > 0,TRUE)){ +q=(ptr_t)AO_load((volatile AO_t*)&p->gcj_freelists[j]); +if ((word)q > HBLKSIZE) +GC_set_fl_marks(q); +} +#endif +} +} +#if defined(GC_ASSERTIONS) +void GC_check_tls_for(GC_tlfs p) +{ +int i,j; +for (j=1;j < TINY_FREELISTS;++j){ +for (i=0;i < THREAD_FREELISTS_KINDS;++i){ +GC_check_fl_marks(&p->_freelists[i][j]); +} +#ifdef GC_GCJ_SUPPORT +GC_check_fl_marks(&p->gcj_freelists[j]); +#endif +} +} +#endif +#endif +#ifndef GC_PTHREAD_SUPPORT_H +#define GC_PTHREAD_SUPPORT_H +#if defined(GC_PTHREADS)&&!defined(GC_WIN32_THREADS) +#if defined(GC_DARWIN_THREADS) +#else +#ifndef GC_PTHREAD_STOP_WORLD_H +#define GC_PTHREAD_STOP_WORLD_H +EXTERN_C_BEGIN +struct thread_stop_info { +#if!defined(GC_OPENBSD_UTHREADS)&&!defined(NACL)&&!defined(SN_TARGET_ORBIS)&&!defined(SN_TARGET_PSP2) +volatile AO_t last_stop_count; +#endif +ptr_t stack_ptr; +#ifdef NACL +#ifdef ARM32 +#define NACL_GC_REG_STORAGE_SIZE 9 +#else +#define NACL_GC_REG_STORAGE_SIZE 20 +#endif +ptr_t reg_storage[NACL_GC_REG_STORAGE_SIZE]; +#elif defined(SN_TARGET_ORBIS) +#define ORBIS_GC_REG_STORAGE_SIZE 27 +word registers[ORBIS_GC_REG_STORAGE_SIZE]; +#endif +}; +GC_INNER void GC_stop_init(void); +EXTERN_C_END +#endif +#endif +#ifdef THREAD_LOCAL_ALLOC +#endif +#ifdef THREAD_SANITIZER +#endif +EXTERN_C_BEGIN +typedef struct GC_Thread_Rep { +#ifdef THREAD_SANITIZER +char dummy[sizeof(oh)]; +#endif +struct GC_Thread_Rep*next; +pthread_t id; +#ifdef USE_TKILL_ON_ANDROID +pid_t kernel_id; +#endif +struct thread_stop_info stop_info; +#if defined(GC_ENABLE_SUSPEND_THREAD)&&!defined(GC_DARWIN_THREADS)&&!defined(GC_OPENBSD_UTHREADS)&&!defined(NACL) +volatile AO_t suspended_ext; +#endif +unsigned char flags; +#define FINISHED 1 +#define DETACHED 2 +#define MAIN_THREAD 4 +#define DISABLED_GC 0x10 +unsigned char thread_blocked; +unsigned short finalizer_skipped; +unsigned char finalizer_nested; +ptr_t stack_end; +ptr_t altstack; +word altstack_size; +ptr_t stack; +word stack_size; +#if defined(GC_DARWIN_THREADS)&&!defined(DARWIN_DONT_PARSE_STACK) +ptr_t topOfStack; +#endif +#ifdef IA64 +ptr_t backing_store_end; +ptr_t backing_store_ptr; +#endif +struct GC_traced_stack_sect_s*traced_stack_sect; +void*status; +#ifdef THREAD_LOCAL_ALLOC +struct thread_local_freelists tlfs GC_ATTR_WORD_ALIGNED; +#endif +}*GC_thread; +#ifndef THREAD_TABLE_SZ +#define THREAD_TABLE_SZ 256 +#endif +#if CPP_WORDSZ==64 +#define THREAD_TABLE_INDEX(id)(int)(((((NUMERIC_THREAD_ID(id)>>8)^NUMERIC_THREAD_ID(id))>>16)^((NUMERIC_THREAD_ID(id)>>8)^NUMERIC_THREAD_ID(id)))% THREAD_TABLE_SZ) +#else +#define THREAD_TABLE_INDEX(id)(int)(((NUMERIC_THREAD_ID(id)>>16)^(NUMERIC_THREAD_ID(id)>>8)^NUMERIC_THREAD_ID(id))% THREAD_TABLE_SZ) +#endif +GC_EXTERN volatile GC_thread GC_threads[THREAD_TABLE_SZ]; +GC_EXTERN GC_bool GC_thr_initialized; +GC_INNER GC_thread GC_lookup_thread(pthread_t id); +#ifdef NACL +GC_EXTERN __thread GC_thread GC_nacl_gc_thread_self; +GC_INNER void GC_nacl_initialize_gc_thread(void); +GC_INNER void GC_nacl_shutdown_gc_thread(void); +#endif +#ifdef GC_EXPLICIT_SIGNALS_UNBLOCK +GC_INNER void GC_unblock_gc_signals(void); +#endif +#ifdef GC_PTHREAD_START_STANDALONE +#define GC_INNER_PTHRSTART +#else +#define GC_INNER_PTHRSTART GC_INNER +#endif +GC_INNER_PTHRSTART void*GC_CALLBACK GC_inner_start_routine( +struct GC_stack_base*sb,void*arg); +GC_INNER_PTHRSTART GC_thread GC_start_rtn_prepare_thread( +void*(**pstart)(void*), +void**pstart_arg, +struct GC_stack_base*sb,void*arg); +GC_INNER_PTHRSTART void GC_thread_exit_proc(void*); +EXTERN_C_END +#endif +#endif +#if defined(GC_DARWIN_THREADS) +#include +#include +#include +#ifdef POWERPC +#if CPP_WORDSZ==32 +#define PPC_RED_ZONE_SIZE 224 +#elif CPP_WORDSZ==64 +#define PPC_RED_ZONE_SIZE 320 +#endif +#endif +#ifndef DARWIN_DONT_PARSE_STACK +typedef struct StackFrame { +unsigned long savedSP; +unsigned long savedCR; +unsigned long savedLR; +unsigned long reserved[2]; +unsigned long savedRTOC; +} StackFrame; +GC_INNER ptr_t GC_FindTopOfStack(unsigned long stack_start) +{ +StackFrame*frame=(StackFrame*)stack_start; +if (stack_start==0){ +#ifdef POWERPC +#if CPP_WORDSZ==32 +__asm__ __volatile__ ("lwz %0,0(r1)":"=r" (frame)); +#else +__asm__ __volatile__ ("ld %0,0(r1)":"=r" (frame)); +#endif +#elif defined(ARM32) +volatile ptr_t sp_reg; +__asm__ __volatile__ ("mov %0,r7\n":"=r" (sp_reg)); +frame=(StackFrame*)sp_reg; +#elif defined(AARCH64) +volatile ptr_t sp_reg; +__asm__ __volatile__ ("mov %0,x29\n":"=r" (sp_reg)); +frame=(StackFrame*)sp_reg; +#else +#if defined(CPPCHECK) +GC_noop1((word)&frame); +#endif +ABORT("GC_FindTopOfStack(0)is not implemented"); +#endif +} +#ifdef DEBUG_THREADS_EXTRA +GC_log_printf("FindTopOfStack start at sp=%p\n",(void*)frame); +#endif +while (frame->savedSP!=0){ +frame=(StackFrame*)frame->savedSP; +if ((frame->savedLR&~0x3)==0||(frame->savedLR&~0x3)==~0x3UL) +break; +} +#ifdef DEBUG_THREADS_EXTRA +GC_log_printf("FindTopOfStack finish at sp=%p\n",(void*)frame); +#endif +return (ptr_t)frame; +} +#endif +#ifdef GC_NO_THREADS_DISCOVERY +#define GC_query_task_threads FALSE +#elif defined(GC_DISCOVER_TASK_THREADS) +#define GC_query_task_threads TRUE +#else +STATIC GC_bool GC_query_task_threads=FALSE; +#endif +GC_API void GC_CALL GC_use_threads_discovery(void) +{ +#if defined(GC_NO_THREADS_DISCOVERY)||defined(DARWIN_DONT_PARSE_STACK) +ABORT("Darwin task-threads-based stop and push unsupported"); +#else +#ifndef GC_ALWAYS_MULTITHREADED +GC_ASSERT(!GC_need_to_lock); +#endif +#ifndef GC_DISCOVER_TASK_THREADS +GC_query_task_threads=TRUE; +#endif +GC_init_parallel(); +#endif +} +#ifndef kCFCoreFoundationVersionNumber_iOS_8_0 +#define kCFCoreFoundationVersionNumber_iOS_8_0 1140.1 +#endif +STATIC ptr_t GC_stack_range_for(ptr_t*phi,thread_act_t thread,GC_thread p, +GC_bool thread_blocked,mach_port_t my_thread, +ptr_t*paltstack_lo, +ptr_t*paltstack_hi GC_ATTR_UNUSED) +{ +ptr_t lo; +if (thread==my_thread){ +GC_ASSERT(!thread_blocked); +lo=GC_approx_sp(); +#ifndef DARWIN_DONT_PARSE_STACK +*phi=GC_FindTopOfStack(0); +#endif +} else if (thread_blocked){ +#if defined(CPPCHECK) +if (NULL==p)ABORT("Invalid GC_thread passed to GC_stack_range_for"); +#endif +lo=p->stop_info.stack_ptr; +#ifndef DARWIN_DONT_PARSE_STACK +*phi=p->topOfStack; +#endif +} else { +kern_return_t kern_result; +GC_THREAD_STATE_T state; +#if defined(ARM32)&&defined(ARM_THREAD_STATE32) +size_t size; +static cpu_type_t cputype=0; +if (cputype==0){ +sysctlbyname("hw.cputype",&cputype,&size,NULL,0); +} +if (cputype==CPU_TYPE_ARM64 +||kCFCoreFoundationVersionNumber +>=kCFCoreFoundationVersionNumber_iOS_8_0){ +arm_unified_thread_state_t unified_state; +mach_msg_type_number_t unified_thread_state_count +=ARM_UNIFIED_THREAD_STATE_COUNT; +#if defined(CPPCHECK) +#define GC_ARM_UNIFIED_THREAD_STATE 1 +#else +#define GC_ARM_UNIFIED_THREAD_STATE ARM_UNIFIED_THREAD_STATE +#endif +kern_result=thread_get_state(thread,GC_ARM_UNIFIED_THREAD_STATE, +(natural_t*)&unified_state, +&unified_thread_state_count); +#if!defined(CPPCHECK) +if (unified_state.ash.flavor!=ARM_THREAD_STATE32){ +ABORT("unified_state flavor should be ARM_THREAD_STATE32"); +} +#endif +state=unified_state; +} else +#endif +{ +mach_msg_type_number_t thread_state_count=GC_MACH_THREAD_STATE_COUNT; +kern_result=thread_get_state(thread,GC_MACH_THREAD_STATE, +(natural_t*)&state, +&thread_state_count); +} +#ifdef DEBUG_THREADS +GC_log_printf("thread_get_state returns value=%d\n",kern_result); +#endif +if (kern_result!=KERN_SUCCESS) +ABORT("thread_get_state failed"); +#if defined(I386) +lo=(ptr_t)state.THREAD_FLD(esp); +#ifndef DARWIN_DONT_PARSE_STACK +*phi=GC_FindTopOfStack(state.THREAD_FLD(esp)); +#endif +GC_push_one(state.THREAD_FLD(eax)); +GC_push_one(state.THREAD_FLD(ebx)); +GC_push_one(state.THREAD_FLD(ecx)); +GC_push_one(state.THREAD_FLD(edx)); +GC_push_one(state.THREAD_FLD(edi)); +GC_push_one(state.THREAD_FLD(esi)); +GC_push_one(state.THREAD_FLD(ebp)); +#elif defined(X86_64) +lo=(ptr_t)state.THREAD_FLD(rsp); +#ifndef DARWIN_DONT_PARSE_STACK +*phi=GC_FindTopOfStack(state.THREAD_FLD(rsp)); +#endif +GC_push_one(state.THREAD_FLD(rax)); +GC_push_one(state.THREAD_FLD(rbx)); +GC_push_one(state.THREAD_FLD(rcx)); +GC_push_one(state.THREAD_FLD(rdx)); +GC_push_one(state.THREAD_FLD(rdi)); +GC_push_one(state.THREAD_FLD(rsi)); +GC_push_one(state.THREAD_FLD(rbp)); +GC_push_one(state.THREAD_FLD(r8)); +GC_push_one(state.THREAD_FLD(r9)); +GC_push_one(state.THREAD_FLD(r10)); +GC_push_one(state.THREAD_FLD(r11)); +GC_push_one(state.THREAD_FLD(r12)); +GC_push_one(state.THREAD_FLD(r13)); +GC_push_one(state.THREAD_FLD(r14)); +GC_push_one(state.THREAD_FLD(r15)); +#elif defined(POWERPC) +lo=(ptr_t)(state.THREAD_FLD(r1)- PPC_RED_ZONE_SIZE); +#ifndef DARWIN_DONT_PARSE_STACK +*phi=GC_FindTopOfStack(state.THREAD_FLD(r1)); +#endif +GC_push_one(state.THREAD_FLD(r0)); +GC_push_one(state.THREAD_FLD(r2)); +GC_push_one(state.THREAD_FLD(r3)); +GC_push_one(state.THREAD_FLD(r4)); +GC_push_one(state.THREAD_FLD(r5)); +GC_push_one(state.THREAD_FLD(r6)); +GC_push_one(state.THREAD_FLD(r7)); +GC_push_one(state.THREAD_FLD(r8)); +GC_push_one(state.THREAD_FLD(r9)); +GC_push_one(state.THREAD_FLD(r10)); +GC_push_one(state.THREAD_FLD(r11)); +GC_push_one(state.THREAD_FLD(r12)); +GC_push_one(state.THREAD_FLD(r13)); +GC_push_one(state.THREAD_FLD(r14)); +GC_push_one(state.THREAD_FLD(r15)); +GC_push_one(state.THREAD_FLD(r16)); +GC_push_one(state.THREAD_FLD(r17)); +GC_push_one(state.THREAD_FLD(r18)); +GC_push_one(state.THREAD_FLD(r19)); +GC_push_one(state.THREAD_FLD(r20)); +GC_push_one(state.THREAD_FLD(r21)); +GC_push_one(state.THREAD_FLD(r22)); +GC_push_one(state.THREAD_FLD(r23)); +GC_push_one(state.THREAD_FLD(r24)); +GC_push_one(state.THREAD_FLD(r25)); +GC_push_one(state.THREAD_FLD(r26)); +GC_push_one(state.THREAD_FLD(r27)); +GC_push_one(state.THREAD_FLD(r28)); +GC_push_one(state.THREAD_FLD(r29)); +GC_push_one(state.THREAD_FLD(r30)); +GC_push_one(state.THREAD_FLD(r31)); +#elif defined(ARM32) +lo=(ptr_t)state.THREAD_FLD(sp); +#ifndef DARWIN_DONT_PARSE_STACK +*phi=GC_FindTopOfStack(state.THREAD_FLD(r[7])); +#endif +{ +int j; +for (j=0;j < 7;j++) +GC_push_one(state.THREAD_FLD(r[j])); +j++; +for (;j<=12;j++) +GC_push_one(state.THREAD_FLD(r[j])); +} +GC_push_one(state.THREAD_FLD(lr)); +#elif defined(AARCH64) +lo=(ptr_t)state.THREAD_FLD(sp); +#ifndef DARWIN_DONT_PARSE_STACK +*phi=GC_FindTopOfStack(state.THREAD_FLD(fp)); +#endif +{ +int j; +for (j=0;j<=28;j++){ +GC_push_one(state.THREAD_FLD(x[j])); +} +} +GC_push_one(state.THREAD_FLD(lr)); +#elif defined(CPPCHECK) +lo=NULL; +#else +#error FIXME for non-arm/ppc/x86 architectures +#endif +} +#ifdef DARWIN_DONT_PARSE_STACK +*phi=(p->flags&MAIN_THREAD)!=0?GC_stackbottom:p->stack_end; +#endif +#ifdef DARWIN_DONT_PARSE_STACK +if (p->altstack!=NULL&&(word)p->altstack<=(word)lo +&&(word)lo<=(word)p->altstack+p->altstack_size){ +*paltstack_lo=lo; +*paltstack_hi=p->altstack+p->altstack_size; +lo=p->stack; +*phi=p->stack+p->stack_size; +} else +#endif +{ +*paltstack_lo=NULL; +} +#ifdef DEBUG_THREADS +GC_log_printf("Darwin:Stack for thread %p=[%p,%p)\n", +(void*)(word)thread,(void*)lo,(void*)(*phi)); +#endif +return lo; +} +GC_INNER void GC_push_all_stacks(void) +{ +ptr_t hi,altstack_lo,altstack_hi; +task_t my_task=current_task(); +mach_port_t my_thread=mach_thread_self(); +GC_bool found_me=FALSE; +int nthreads=0; +word total_size=0; +mach_msg_type_number_t listcount=(mach_msg_type_number_t)THREAD_TABLE_SZ; +if (!EXPECT(GC_thr_initialized,TRUE)) +GC_thr_init(); +#ifndef DARWIN_DONT_PARSE_STACK +if (GC_query_task_threads){ +int i; +kern_return_t kern_result; +thread_act_array_t act_list=0; +kern_result=task_threads(my_task,&act_list,&listcount); +if (kern_result!=KERN_SUCCESS) +ABORT("task_threads failed"); +for (i=0;i < (int)listcount;i++){ +thread_act_t thread=act_list[i]; +ptr_t lo=GC_stack_range_for(&hi,thread,NULL,FALSE,my_thread, +&altstack_lo,&altstack_hi); +if (lo){ +GC_ASSERT((word)lo<=(word)hi); +total_size+=hi - lo; +GC_push_all_stack(lo,hi); +} +nthreads++; +if (thread==my_thread) +found_me=TRUE; +mach_port_deallocate(my_task,thread); +} +vm_deallocate(my_task,(vm_address_t)act_list, +sizeof(thread_t)*listcount); +} else +#endif +{ +int i; +for (i=0;i < (int)listcount;i++){ +GC_thread p; +for (p=GC_threads[i];p!=NULL;p=p->next) +if ((p->flags&FINISHED)==0){ +thread_act_t thread=(thread_act_t)p->stop_info.mach_thread; +ptr_t lo=GC_stack_range_for(&hi,thread,p, +(GC_bool)p->thread_blocked, +my_thread,&altstack_lo, +&altstack_hi); +if (lo){ +GC_ASSERT((word)lo<=(word)hi); +total_size+=hi - lo; +GC_push_all_stack_sections(lo,hi,p->traced_stack_sect); +} +if (altstack_lo){ +total_size+=altstack_hi - altstack_lo; +GC_push_all_stack(altstack_lo,altstack_hi); +} +nthreads++; +if (thread==my_thread) +found_me=TRUE; +} +} +} +mach_port_deallocate(my_task,my_thread); +GC_VERBOSE_LOG_PRINTF("Pushed %d thread stacks\n",nthreads); +if (!found_me&&!GC_in_thread_creation) +ABORT("Collecting from unknown thread"); +GC_total_stacksize=total_size; +} +#ifndef GC_NO_THREADS_DISCOVERY +#ifdef MPROTECT_VDB +STATIC mach_port_t GC_mach_handler_thread=0; +STATIC GC_bool GC_use_mach_handler_thread=FALSE; +GC_INNER void GC_darwin_register_mach_handler_thread(mach_port_t thread) +{ +GC_mach_handler_thread=thread; +GC_use_mach_handler_thread=TRUE; +} +#endif +#ifndef GC_MAX_MACH_THREADS +#define GC_MAX_MACH_THREADS THREAD_TABLE_SZ +#endif +struct GC_mach_thread { +thread_act_t thread; +GC_bool suspended; +}; +struct GC_mach_thread GC_mach_threads[GC_MAX_MACH_THREADS]; +STATIC int GC_mach_threads_count=0; +STATIC GC_bool GC_suspend_thread_list(thread_act_array_t act_list,int count, +thread_act_array_t old_list, +int old_count,task_t my_task, +mach_port_t my_thread) +{ +int i; +int j=-1; +GC_bool changed=FALSE; +for (i=0;i < count;i++){ +thread_act_t thread=act_list[i]; +GC_bool found; +kern_return_t kern_result; +if (thread==my_thread +#ifdef MPROTECT_VDB +||(GC_mach_handler_thread==thread&&GC_use_mach_handler_thread) +#endif +#ifdef PARALLEL_MARK +||GC_is_mach_marker(thread) +#endif +){ +mach_port_deallocate(my_task,thread); +continue; +} +found=FALSE; +{ +int last_found=j; +while (++j < old_count) +if (old_list[j]==thread){ +found=TRUE; +break; +} +if (!found){ +for (j=0;j < last_found;j++) +if (old_list[j]==thread){ +found=TRUE; +break; +} +} +} +if (found){ +mach_port_deallocate(my_task,thread); +continue; +} +if (GC_mach_threads_count==GC_MAX_MACH_THREADS) +ABORT("Too many threads"); +GC_mach_threads[GC_mach_threads_count].thread=thread; +GC_mach_threads[GC_mach_threads_count].suspended=FALSE; +changed=TRUE; +#ifdef DEBUG_THREADS +GC_log_printf("Suspending %p\n",(void*)(word)thread); +#endif +GC_acquire_dirty_lock(); +do { +kern_result=thread_suspend(thread); +} while (kern_result==KERN_ABORTED); +GC_release_dirty_lock(); +if (kern_result!=KERN_SUCCESS){ +GC_mach_threads[GC_mach_threads_count].suspended=FALSE; +} else { +GC_mach_threads[GC_mach_threads_count].suspended=TRUE; +if (GC_on_thread_event) +GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED,(void*)(word)thread); +} +GC_mach_threads_count++; +} +return changed; +} +#endif +GC_INNER void GC_stop_world(void) +{ +task_t my_task=current_task(); +mach_port_t my_thread=mach_thread_self(); +kern_return_t kern_result; +#ifdef DEBUG_THREADS +GC_log_printf("Stopping the world from thread %p\n", +(void*)(word)my_thread); +#endif +#ifdef PARALLEL_MARK +if (GC_parallel){ +GC_acquire_mark_lock(); +GC_ASSERT(GC_fl_builder_count==0); +} +#endif +if (GC_query_task_threads){ +#ifndef GC_NO_THREADS_DISCOVERY +GC_bool changed; +thread_act_array_t act_list,prev_list; +mach_msg_type_number_t listcount,prevcount; +GC_mach_threads_count=0; +changed=TRUE; +prev_list=NULL; +prevcount=0; +do { +kern_result=task_threads(my_task,&act_list,&listcount); +if (kern_result==KERN_SUCCESS){ +changed=GC_suspend_thread_list(act_list,listcount,prev_list, +prevcount,my_task,my_thread); +if (prev_list!=NULL){ +vm_deallocate(my_task,(vm_address_t)prev_list, +sizeof(thread_t)*prevcount); +} +prev_list=act_list; +prevcount=listcount; +} +} while (changed); +GC_ASSERT(prev_list!=0); +vm_deallocate(my_task,(vm_address_t)act_list, +sizeof(thread_t)*listcount); +#endif +} else { +unsigned i; +for (i=0;i < THREAD_TABLE_SZ;i++){ +GC_thread p; +for (p=GC_threads[i];p!=NULL;p=p->next){ +if ((p->flags&FINISHED)==0&&!p->thread_blocked&& +p->stop_info.mach_thread!=my_thread){ +GC_acquire_dirty_lock(); +do { +kern_result=thread_suspend(p->stop_info.mach_thread); +} while (kern_result==KERN_ABORTED); +GC_release_dirty_lock(); +if (kern_result!=KERN_SUCCESS) +ABORT("thread_suspend failed"); +if (GC_on_thread_event) +GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED, +(void*)(word)p->stop_info.mach_thread); +} +} +} +} +#ifdef MPROTECT_VDB +if (GC_auto_incremental){ +GC_mprotect_stop(); +} +#endif +#ifdef PARALLEL_MARK +if (GC_parallel) +GC_release_mark_lock(); +#endif +#ifdef DEBUG_THREADS +GC_log_printf("World stopped from %p\n",(void*)(word)my_thread); +#endif +mach_port_deallocate(my_task,my_thread); +} +GC_INLINE void GC_thread_resume(thread_act_t thread) +{ +kern_return_t kern_result; +#if defined(DEBUG_THREADS)||defined(GC_ASSERTIONS) +struct thread_basic_info info; +mach_msg_type_number_t outCount=THREAD_BASIC_INFO_COUNT; +#if defined(CPPCHECK)&&defined(DEBUG_THREADS) +info.run_state=0; +#endif +kern_result=thread_info(thread,THREAD_BASIC_INFO, +(thread_info_t)&info,&outCount); +if (kern_result!=KERN_SUCCESS) +ABORT("thread_info failed"); +#endif +#ifdef DEBUG_THREADS +GC_log_printf("Resuming thread %p with state %d\n",(void*)(word)thread, +info.run_state); +#endif +kern_result=thread_resume(thread); +if (kern_result!=KERN_SUCCESS){ +WARN("thread_resume(%p)failed:mach port invalid\n",thread); +} else if (GC_on_thread_event){ +GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED,(void*)(word)thread); +} +} +GC_INNER void GC_start_world(void) +{ +task_t my_task=current_task(); +#ifdef DEBUG_THREADS +GC_log_printf("World starting\n"); +#endif +#ifdef MPROTECT_VDB +if (GC_auto_incremental){ +GC_mprotect_resume(); +} +#endif +if (GC_query_task_threads){ +#ifndef GC_NO_THREADS_DISCOVERY +int i,j; +kern_return_t kern_result; +thread_act_array_t act_list; +mach_msg_type_number_t listcount; +kern_result=task_threads(my_task,&act_list,&listcount); +if (kern_result!=KERN_SUCCESS) +ABORT("task_threads failed"); +j=(int)listcount; +for (i=0;i < GC_mach_threads_count;i++){ +thread_act_t thread=GC_mach_threads[i].thread; +if (GC_mach_threads[i].suspended){ +int last_found=j; +while (++j < (int)listcount){ +if (act_list[j]==thread) +break; +} +if (j>=(int)listcount){ +for (j=0;j < last_found;j++){ +if (act_list[j]==thread) +break; +} +} +if (j!=last_found){ +GC_thread_resume(thread); +} +} else { +#ifdef DEBUG_THREADS +GC_log_printf("Not resuming thread %p as it is not suspended\n", +(void*)(word)thread); +#endif +} +mach_port_deallocate(my_task,thread); +} +for (i=0;i < (int)listcount;i++) +mach_port_deallocate(my_task,act_list[i]); +vm_deallocate(my_task,(vm_address_t)act_list, +sizeof(thread_t)*listcount); +#endif +} else { +int i; +mach_port_t my_thread=mach_thread_self(); +for (i=0;i < THREAD_TABLE_SZ;i++){ +GC_thread p; +for (p=GC_threads[i];p!=NULL;p=p->next){ +if ((p->flags&FINISHED)==0&&!p->thread_blocked&& +p->stop_info.mach_thread!=my_thread) +GC_thread_resume(p->stop_info.mach_thread); +} +} +mach_port_deallocate(my_task,my_thread); +} +#ifdef DEBUG_THREADS +GC_log_printf("World started\n"); +#endif +} +#endif +#if!defined(MACOS)&&!defined(SN_TARGET_ORBIS)&&!defined(SN_TARGET_PSP2)&&!defined(_WIN32_WCE)&&!defined(__CC_ARM) +#include +#endif +#undef GC_MUST_RESTORE_REDEFINED_DLOPEN +#if defined(GC_PTHREADS)&&!defined(GC_NO_DLOPEN)&&!defined(GC_NO_THREAD_REDIRECTS)&&!defined(GC_USE_LD_WRAP) +#undef dlopen +#define GC_MUST_RESTORE_REDEFINED_DLOPEN +#endif +STATIC GC_has_static_roots_func GC_has_static_roots=0; +#if (defined(DYNAMIC_LOADING)||defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32))&&!defined(PCR) +#if!defined(DARWIN)&&!defined(SCO_ELF)&&!defined(SOLARISDL)&&!defined(AIX)&&!defined(DGUX)&&!defined(IRIX5)&&!defined(HPUX)&&!defined(CYGWIN32)&&!defined(MSWIN32)&&!defined(MSWINCE)&&!(defined(ALPHA)&&defined(OSF1))&&!(defined(FREEBSD)&&defined(__ELF__))&&!(defined(LINUX)&&defined(__ELF__))&&!(defined(NETBSD)&&defined(__ELF__))&&!(defined(OPENBSD)&&(defined(__ELF__)||defined(M68K)))&&!defined(HAIKU)&&!defined(HURD)&&!defined(NACL)&&!defined(CPPCHECK) +#error We only know how to find data segments of dynamic libraries for above. +#error Additional SVR4 variants might not be too hard to add. +#endif +#include +#ifdef SOLARISDL +#include +#include +#include +#endif +#if defined(NETBSD) +#include +#include +#include +#define ELFSIZE ARCH_ELFSIZE +#endif +#if defined(OPENBSD) +#include +#if (OpenBSD>=200519)&&!defined(HAVE_DL_ITERATE_PHDR) +#define HAVE_DL_ITERATE_PHDR +#endif +#endif +#if defined(SCO_ELF)||defined(DGUX)||defined(HURD)||defined(NACL)||(defined(__ELF__)&&(defined(LINUX)||defined(FREEBSD)||defined(NETBSD)||defined(OPENBSD))) +#include +#if!defined(OPENBSD)&&!defined(HOST_ANDROID) +#include +#endif +#ifdef HOST_ANDROID +#ifdef BIONIC_ELFDATA_REDEF_BUG +#include +#include +#undef ELF_DATA +#undef EM_ALPHA +#endif +#include +#if!defined(GC_DONT_DEFINE_LINK_MAP)&&!(__ANDROID_API__>=21) +struct link_map { +uintptr_t l_addr; +char*l_name; +uintptr_t l_ld; +struct link_map*l_next; +struct link_map*l_prev; +}; +struct r_debug { +int32_t r_version; +struct link_map*r_map; +void (*r_brk)(void); +int32_t r_state; +uintptr_t r_ldbase; +}; +#endif +#else +EXTERN_C_BEGIN +#include +EXTERN_C_END +#endif +#endif +#ifndef ElfW +#if defined(FREEBSD) +#if __ELF_WORD_SIZE==32 +#define ElfW(type)Elf32_##type +#else +#define ElfW(type)Elf64_##type +#endif +#elif defined(NETBSD)||defined(OPENBSD) +#if ELFSIZE==32 +#define ElfW(type)Elf32_##type +#elif ELFSIZE==64 +#define ElfW(type)Elf64_##type +#else +#error Missing ELFSIZE define +#endif +#else +#if!defined(ELF_CLASS)||ELF_CLASS==ELFCLASS32 +#define ElfW(type)Elf32_##type +#else +#define ElfW(type)Elf64_##type +#endif +#endif +#endif +#if defined(SOLARISDL)&&!defined(USE_PROC_FOR_LIBRARIES) +EXTERN_C_BEGIN +extern ElfW(Dyn)_DYNAMIC; +EXTERN_C_END +STATIC struct link_map* +GC_FirstDLOpenedLinkMap(void) +{ +ElfW(Dyn)*dp; +static struct link_map*cachedResult=0; +static ElfW(Dyn)*dynStructureAddr=0; +#ifdef SUNOS53_SHARED_LIB +if( dynStructureAddr==0){ +void*startupSyms=dlopen(0,RTLD_LAZY); +dynStructureAddr=(ElfW(Dyn)*)(word)dlsym(startupSyms,"_DYNAMIC"); +} +#else +dynStructureAddr=&_DYNAMIC; +#endif +if (0==COVERT_DATAFLOW(dynStructureAddr)){ +return(0); +} +if (cachedResult==0){ +int tag; +for( dp=((ElfW(Dyn)*)(&_DYNAMIC));(tag=dp->d_tag)!=0;dp++){ +if (tag==DT_DEBUG){ +struct r_debug*rd=(struct r_debug*)dp->d_un.d_ptr; +if (rd!=NULL){ +struct link_map*lm=rd->r_map; +if (lm!=NULL) +cachedResult=lm->l_next; +} +break; +} +} +} +return cachedResult; +} +#endif +#ifdef GC_MUST_RESTORE_REDEFINED_DLOPEN +#define dlopen GC_dlopen +#endif +#if defined(SOLARISDL) +#if!defined(PCR)&&!defined(GC_SOLARIS_THREADS)&&defined(THREADS)&&!defined(CPPCHECK) +#error Fix mutual exclusion with dlopen +#endif +#ifndef USE_PROC_FOR_LIBRARIES +GC_INNER void GC_register_dynamic_libraries(void) +{ +struct link_map*lm; +for (lm=GC_FirstDLOpenedLinkMap();lm!=0;lm=lm->l_next){ +ElfW(Ehdr)*e; +ElfW(Phdr)*p; +unsigned long offset; +char*start; +int i; +e=(ElfW(Ehdr)*)lm->l_addr; +p=((ElfW(Phdr)*)(((char*)(e))+e->e_phoff)); +offset=((unsigned long)(lm->l_addr)); +for( i=0;i < (int)e->e_phnum;i++,p++){ +switch( p->p_type){ +case PT_LOAD: +{ +if(!(p->p_flags&PF_W))break; +start=((char*)(p->p_vaddr))+offset; +GC_add_roots_inner(start,start+p->p_memsz,TRUE); +} +break; +default: +break; +} +} +} +} +#endif +#endif +#if defined(SCO_ELF)||defined(DGUX)||defined(HURD)||defined(NACL)||(defined(__ELF__)&&(defined(LINUX)||defined(FREEBSD)||defined(NETBSD)||defined(OPENBSD))) +#ifdef USE_PROC_FOR_LIBRARIES +#include +#include +#include +#include +#define MAPS_BUF_SIZE (32*1024) +static void sort_heap_sects(struct HeapSect*base,size_t number_of_elements) +{ +signed_word n=(signed_word)number_of_elements; +signed_word nsorted=1; +while (nsorted < n){ +signed_word i; +while (nsorted < n&& +(word)base[nsorted-1].hs_start < (word)base[nsorted].hs_start) +++nsorted; +if (nsorted==n)break; +GC_ASSERT((word)base[nsorted-1].hs_start > (word)base[nsorted].hs_start); +i=nsorted - 1; +while (i>=0&&(word)base[i].hs_start > (word)base[i+1].hs_start){ +struct HeapSect tmp=base[i]; +base[i]=base[i+1]; +base[i+1]=tmp; +--i; +} +GC_ASSERT((word)base[nsorted-1].hs_start < (word)base[nsorted].hs_start); +++nsorted; +} +} +STATIC void GC_register_map_entries(char*maps) +{ +char*prot; +char*buf_ptr=maps; +ptr_t start,end; +unsigned int maj_dev; +ptr_t least_ha,greatest_ha; +unsigned i; +GC_ASSERT(I_HOLD_LOCK()); +sort_heap_sects(GC_our_memory,GC_n_memory); +least_ha=GC_our_memory[0].hs_start; +greatest_ha=GC_our_memory[GC_n_memory-1].hs_start ++GC_our_memory[GC_n_memory-1].hs_bytes; +for (;;){ +buf_ptr=GC_parse_map_entry(buf_ptr,&start,&end,&prot, +&maj_dev,0); +if (NULL==buf_ptr) +break; +if (prot[1]=='w'){ +if ((word)start<=(word)GC_stackbottom +&&(word)end>=(word)GC_stackbottom){ +continue; +} +#ifdef THREADS +if (GC_segment_is_thread_stack(start,end))continue; +#endif +if ((word)end<=(word)least_ha +||(word)start>=(word)greatest_ha){ +GC_add_roots_inner(start,end,TRUE); +continue; +} +i=0; +while ((word)(GC_our_memory[i].hs_start ++GC_our_memory[i].hs_bytes)< (word)start) +++i; +GC_ASSERT(i < GC_n_memory); +if ((word)GC_our_memory[i].hs_start<=(word)start){ +start=GC_our_memory[i].hs_start ++GC_our_memory[i].hs_bytes; +++i; +} +while (i < GC_n_memory +&&(word)GC_our_memory[i].hs_start < (word)end +&&(word)start < (word)end){ +if ((word)start < (word)GC_our_memory[i].hs_start) +GC_add_roots_inner(start, +GC_our_memory[i].hs_start,TRUE); +start=GC_our_memory[i].hs_start ++GC_our_memory[i].hs_bytes; +++i; +} +if ((word)start < (word)end) +GC_add_roots_inner(start,end,TRUE); +} else if (prot[0]=='-'&&prot[1]=='-'&&prot[2]=='-'){ +GC_remove_roots_subregion(start,end); +} +} +} +GC_INNER void GC_register_dynamic_libraries(void) +{ +char*maps=GC_get_maps(); +if (NULL==maps) +ABORT("Failed to read/proc for library registration"); +GC_register_map_entries(maps); +} +GC_INNER GC_bool GC_register_main_static_data(void) +{ +return FALSE; +} +#define HAVE_REGISTER_MAIN_STATIC_DATA +#else +#if __GLIBC__ > 2||(__GLIBC__==2&&__GLIBC_MINOR__ > 2)||(__GLIBC__==2&&__GLIBC_MINOR__==2&&defined(DT_CONFIG))||defined(HOST_ANDROID) +#ifndef HAVE_DL_ITERATE_PHDR +#define HAVE_DL_ITERATE_PHDR +#endif +#ifdef HOST_ANDROID +EXTERN_C_BEGIN +extern int dl_iterate_phdr(int (*cb)(struct dl_phdr_info*, +size_t,void*), +void*data); +EXTERN_C_END +#endif +#endif +#if defined(__DragonFly__)||defined(__FreeBSD_kernel__)||(defined(FREEBSD)&&__FreeBSD__>=7) +#ifndef HAVE_DL_ITERATE_PHDR +#define HAVE_DL_ITERATE_PHDR +#endif +#define DL_ITERATE_PHDR_STRONG +#elif defined(HAVE_DL_ITERATE_PHDR) +EXTERN_C_BEGIN +#pragma weak dl_iterate_phdr +EXTERN_C_END +#endif +#if defined(HAVE_DL_ITERATE_PHDR) +#ifdef PT_GNU_RELRO +#define MAX_LOAD_SEGS MAX_ROOT_SETS +static struct load_segment { +ptr_t start; +ptr_t end; +ptr_t start2; +ptr_t end2; +} load_segs[MAX_LOAD_SEGS]; +static int n_load_segs; +static GC_bool load_segs_overflow; +#endif +STATIC int GC_register_dynlib_callback(struct dl_phdr_info*info, +size_t size,void*ptr) +{ +const ElfW(Phdr)*p; +ptr_t start,end; +int i; +if (size < offsetof (struct dl_phdr_info,dlpi_phnum) ++sizeof (info->dlpi_phnum)) +return -1; +p=info->dlpi_phdr; +for (i=0;i < (int)info->dlpi_phnum;i++,p++){ +if (p->p_type==PT_LOAD){ +GC_has_static_roots_func callback=GC_has_static_roots; +if ((p->p_flags&PF_W)==0)continue; +start=(ptr_t)p->p_vaddr+info->dlpi_addr; +end=start+p->p_memsz; +if (callback!=0&&!callback(info->dlpi_name,start,p->p_memsz)) +continue; +#ifdef PT_GNU_RELRO +#if CPP_WORDSZ==64 +start=(ptr_t)((word)start&~(word)(sizeof(word)- 1)); +#endif +if (n_load_segs>=MAX_LOAD_SEGS){ +if (!load_segs_overflow){ +WARN("Too many PT_LOAD segments;" +" registering as roots directly...\n",0); +load_segs_overflow=TRUE; +} +GC_add_roots_inner(start,end,TRUE); +} else { +load_segs[n_load_segs].start=start; +load_segs[n_load_segs].end=end; +load_segs[n_load_segs].start2=0; +load_segs[n_load_segs].end2=0; +++n_load_segs; +} +#else +GC_add_roots_inner(start,end,TRUE); +#endif +} +} +#ifdef PT_GNU_RELRO +p=info->dlpi_phdr; +for (i=0;i < (int)info->dlpi_phnum;i++,p++){ +if (p->p_type==PT_GNU_RELRO){ +int j; +start=(ptr_t)p->p_vaddr+info->dlpi_addr; +end=start+p->p_memsz; +for (j=n_load_segs;--j>=0;){ +if ((word)start>=(word)load_segs[j].start +&&(word)start < (word)load_segs[j].end){ +if (load_segs[j].start2!=0){ +WARN("More than one GNU_RELRO segment per load one\n",0); +} else { +GC_ASSERT((word)end<=(word)load_segs[j].end); +load_segs[j].end2=load_segs[j].end; +load_segs[j].end=start; +load_segs[j].start2=end; +} +break; +} +if (0==j&&0==GC_has_static_roots) +WARN("Failed to find PT_GNU_RELRO segment" +" inside PT_LOAD region\n",0); +} +} +} +#endif +*(int*)ptr=1; +return 0; +} +GC_INNER GC_bool GC_register_main_static_data(void) +{ +#ifdef DL_ITERATE_PHDR_STRONG +return FALSE; +#else +return 0==COVERT_DATAFLOW(dl_iterate_phdr); +#endif +} +STATIC GC_bool GC_register_dynamic_libraries_dl_iterate_phdr(void) +{ +int did_something; +if (GC_register_main_static_data()) +return FALSE; +#ifdef PT_GNU_RELRO +{ +static GC_bool excluded_segs=FALSE; +n_load_segs=0; +load_segs_overflow=FALSE; +if (!EXPECT(excluded_segs,TRUE)){ +GC_exclude_static_roots_inner((ptr_t)load_segs, +(ptr_t)load_segs+sizeof(load_segs)); +excluded_segs=TRUE; +} +} +#endif +did_something=0; +dl_iterate_phdr(GC_register_dynlib_callback,&did_something); +if (did_something){ +#ifdef PT_GNU_RELRO +int i; +for (i=0;i < n_load_segs;++i){ +if ((word)load_segs[i].end > (word)load_segs[i].start){ +GC_add_roots_inner(load_segs[i].start,load_segs[i].end,TRUE); +} +if ((word)load_segs[i].end2 > (word)load_segs[i].start2){ +GC_add_roots_inner(load_segs[i].start2,load_segs[i].end2,TRUE); +} +} +#endif +} else { +ptr_t datastart,dataend; +#ifdef DATASTART_IS_FUNC +static ptr_t datastart_cached=(ptr_t)GC_WORD_MAX; +if (datastart_cached==(ptr_t)GC_WORD_MAX){ +datastart_cached=DATASTART; +} +datastart=datastart_cached; +#else +datastart=DATASTART; +#endif +#ifdef DATAEND_IS_FUNC +{ +static ptr_t dataend_cached=0; +if (dataend_cached==0){ +dataend_cached=DATAEND; +} +dataend=dataend_cached; +} +#else +dataend=DATAEND; +#endif +if (NULL==*(char*volatile*)&datastart +||(word)datastart > (word)dataend) +ABORT_ARG2("Wrong DATASTART/END pair", +":%p .. %p",(void*)datastart,(void*)dataend); +GC_add_roots_inner(datastart,dataend,TRUE); +#ifdef GC_HAVE_DATAREGION2 +if ((word)DATASTART2 - 1U>=(word)DATAEND2){ +ABORT_ARG2("Wrong DATASTART/END2 pair", +":%p .. %p",(void*)DATASTART2,(void*)DATAEND2); +} +GC_add_roots_inner(DATASTART2,DATAEND2,TRUE); +#endif +} +return TRUE; +} +#define HAVE_REGISTER_MAIN_STATIC_DATA +#else +#if defined(NETBSD)||defined(OPENBSD) +#include +#ifndef DT_DEBUG +#define DT_DEBUG 21 +#endif +#ifndef PT_LOAD +#define PT_LOAD 1 +#endif +#ifndef PF_W +#define PF_W 2 +#endif +#elif!defined(HOST_ANDROID) +#include +#endif +#ifndef HOST_ANDROID +#include +#endif +#endif +EXTERN_C_BEGIN +#ifdef __GNUC__ +#pragma weak _DYNAMIC +#endif +extern ElfW(Dyn)_DYNAMIC[]; +EXTERN_C_END +STATIC struct link_map* +GC_FirstDLOpenedLinkMap(void) +{ +static struct link_map*cachedResult=0; +if (0==COVERT_DATAFLOW(_DYNAMIC)){ +return(0); +} +if( cachedResult==0){ +#if defined(NETBSD)&&defined(RTLD_DI_LINKMAP) +#if defined(CPPCHECK) +#define GC_RTLD_DI_LINKMAP 2 +#else +#define GC_RTLD_DI_LINKMAP RTLD_DI_LINKMAP +#endif +struct link_map*lm=NULL; +if (!dlinfo(RTLD_SELF,GC_RTLD_DI_LINKMAP,&lm)&&lm!=NULL){ +while (lm->l_prev!=NULL){ +lm=lm->l_prev; +} +cachedResult=lm->l_next; +} +#else +ElfW(Dyn)*dp; +int tag; +for( dp=_DYNAMIC;(tag=dp->d_tag)!=0;dp++){ +if (tag==DT_DEBUG){ +struct r_debug*rd=(struct r_debug*)dp->d_un.d_ptr; +if (rd!=NULL){ +struct link_map*lm=rd->r_map; +if (lm!=NULL) +cachedResult=lm->l_next; +} +break; +} +} +#endif +} +return cachedResult; +} +GC_INNER void GC_register_dynamic_libraries(void) +{ +struct link_map*lm; +#ifdef HAVE_DL_ITERATE_PHDR +if (GC_register_dynamic_libraries_dl_iterate_phdr()){ +return; +} +#endif +for (lm=GC_FirstDLOpenedLinkMap();lm!=0;lm=lm->l_next) +{ +ElfW(Ehdr)*e; +ElfW(Phdr)*p; +unsigned long offset; +char*start; +int i; +e=(ElfW(Ehdr)*)lm->l_addr; +#ifdef HOST_ANDROID +if (e==NULL) +continue; +#endif +p=((ElfW(Phdr)*)(((char*)(e))+e->e_phoff)); +offset=((unsigned long)(lm->l_addr)); +for( i=0;i < (int)e->e_phnum;i++,p++){ +switch( p->p_type){ +case PT_LOAD: +{ +if(!(p->p_flags&PF_W))break; +start=((char*)(p->p_vaddr))+offset; +GC_add_roots_inner(start,start+p->p_memsz,TRUE); +} +break; +default: +break; +} +} +} +} +#endif +#endif +#if defined(IRIX5)||(defined(USE_PROC_FOR_LIBRARIES)&&!defined(LINUX)) +#include +#include +#include +#include +#include +#include +#ifndef _sigargs +#define IRIX6 +#endif +GC_INNER void GC_register_dynamic_libraries(void) +{ +static int fd=-1; +char buf[30]; +static prmap_t*addr_map=0; +static int current_sz=0; +int needed_sz=0; +int i; +long flags; +ptr_t start; +ptr_t limit; +ptr_t heap_start=HEAP_START; +ptr_t heap_end=heap_start; +#ifdef SOLARISDL +#define MA_PHYS 0 +#endif +if (fd < 0){ +(void)snprintf(buf,sizeof(buf),"/proc/%ld",(long)getpid()); +buf[sizeof(buf)- 1]='\0'; +fd=open(buf,O_RDONLY); +if (fd < 0){ +ABORT("/proc open failed"); +} +} +if (ioctl(fd,PIOCNMAP,&needed_sz)< 0){ +ABORT_ARG2("/proc PIOCNMAP ioctl failed", +":fd=%d,errno=%d",fd,errno); +} +if (needed_sz>=current_sz){ +GC_scratch_recycle_no_gww(addr_map, +(size_t)current_sz*sizeof(prmap_t)); +current_sz=needed_sz*2+1; +addr_map=(prmap_t*)GC_scratch_alloc( +(size_t)current_sz*sizeof(prmap_t)); +if (addr_map==NULL) +ABORT("Insufficient memory for address map"); +} +if (ioctl(fd,PIOCMAP,addr_map)< 0){ +ABORT_ARG3("/proc PIOCMAP ioctl failed", +":errcode=%d,needed_sz=%d,addr_map=%p", +errno,needed_sz,(void*)addr_map); +}; +if (GC_n_heap_sects > 0){ +heap_end=GC_heap_sects[GC_n_heap_sects-1].hs_start ++GC_heap_sects[GC_n_heap_sects-1].hs_bytes; +if ((word)heap_end < (word)GC_scratch_last_end_ptr) +heap_end=GC_scratch_last_end_ptr; +} +for (i=0;i < needed_sz;i++){ +flags=addr_map[i].pr_mflags; +if ((flags&(MA_BREAK|MA_STACK|MA_PHYS +|MA_FETCHOP|MA_NOTCACHED))!=0)goto irrelevant; +if ((flags&(MA_READ|MA_WRITE))!=(MA_READ|MA_WRITE)) +goto irrelevant; +start=(ptr_t)(addr_map[i].pr_vaddr); +if (GC_roots_present(start))goto irrelevant; +if ((word)start < (word)heap_end&&(word)start>=(word)heap_start) +goto irrelevant; +limit=start+addr_map[i].pr_size; +#ifndef IRIX6 +if (addr_map[i].pr_off==0&&strncmp(start,ELFMAG,4)==0){ +caddr_t arg; +int obj; +#define MAP_IRR_SZ 10 +static ptr_t map_irr[MAP_IRR_SZ]; +static int n_irr=0; +struct stat buf; +int j; +for (j=0;j < n_irr;j++){ +if (map_irr[j]==start)goto irrelevant; +} +arg=(caddr_t)start; +obj=ioctl(fd,PIOCOPENM,&arg); +if (obj>=0){ +fstat(obj,&buf); +close(obj); +if ((buf.st_mode&0111)!=0){ +if (n_irr < MAP_IRR_SZ){ +map_irr[n_irr++]=start; +} +goto irrelevant; +} +} +} +#endif +GC_add_roots_inner(start,limit,TRUE); +irrelevant:; +} +if (close(fd)< 0)ABORT("Couldn't close/proc file"); +fd=-1; +} +#endif +#if defined(MSWIN32)||defined(MSWINCE)||defined(CYGWIN32) +#include +STATIC void GC_cond_add_roots(char*base,char*limit) +{ +#ifdef GC_WIN32_THREADS +char*curr_base=base; +char*next_stack_lo; +char*next_stack_hi; +if (base==limit)return; +for(;;){ +GC_get_next_stack(curr_base,limit,&next_stack_lo,&next_stack_hi); +if ((word)next_stack_lo>=(word)limit)break; +if ((word)next_stack_lo > (word)curr_base) +GC_add_roots_inner(curr_base,next_stack_lo,TRUE); +curr_base=next_stack_hi; +} +if ((word)curr_base < (word)limit) +GC_add_roots_inner(curr_base,limit,TRUE); +#else +char*stack_top +=(char*)((word)GC_approx_sp()& +~(word)(GC_sysinfo.dwAllocationGranularity - 1)); +if (base==limit)return; +if ((word)limit > (word)stack_top +&&(word)base < (word)GC_stackbottom){ +return; +} +GC_add_roots_inner(base,limit,TRUE); +#endif +} +#ifdef DYNAMIC_LOADING +GC_INNER GC_bool GC_register_main_static_data(void) +{ +#if defined(MSWINCE)||defined(CYGWIN32) +return FALSE; +#else +return GC_no_win32_dlls; +#endif +} +#define HAVE_REGISTER_MAIN_STATIC_DATA +#endif +#ifdef DEBUG_VIRTUALQUERY +void GC_dump_meminfo(MEMORY_BASIC_INFORMATION*buf) +{ +GC_printf("BaseAddress=0x%lx,AllocationBase=0x%lx," +" RegionSize=0x%lx(%lu)\n",buf->BaseAddress, +buf->AllocationBase,buf->RegionSize,buf->RegionSize); +GC_printf("\tAllocationProtect=0x%lx,State=0x%lx,Protect=0x%lx," +"Type=0x%lx\n",buf->AllocationProtect,buf->State, +buf->Protect,buf->Type); +} +#endif +#if defined(MSWINCE)||defined(CYGWIN32) +#define GC_wnt TRUE +#endif +GC_INNER void GC_register_dynamic_libraries(void) +{ +MEMORY_BASIC_INFORMATION buf; +DWORD protect; +LPVOID p; +char*base; +char*limit,*new_limit; +#ifdef MSWIN32 +if (GC_no_win32_dlls)return; +#endif +p=GC_sysinfo.lpMinimumApplicationAddress; +base=limit=(char*)p; +while ((word)p < (word)GC_sysinfo.lpMaximumApplicationAddress){ +size_t result=VirtualQuery(p,&buf,sizeof(buf)); +#ifdef MSWINCE +if (result==0){ +new_limit=(char*) +(((DWORD)p+GC_sysinfo.dwAllocationGranularity) +&~(GC_sysinfo.dwAllocationGranularity-1)); +} else +#endif +{ +if (result!=sizeof(buf)){ +ABORT("Weird VirtualQuery result"); +} +new_limit=(char*)p+buf.RegionSize; +protect=buf.Protect; +if (buf.State==MEM_COMMIT +&&(protect==PAGE_EXECUTE_READWRITE +||protect==PAGE_EXECUTE_WRITECOPY +||protect==PAGE_READWRITE +||protect==PAGE_WRITECOPY) +&&(buf.Type==MEM_IMAGE +#ifdef GC_REGISTER_MEM_PRIVATE +||(protect==PAGE_READWRITE&&buf.Type==MEM_PRIVATE) +#else +||(!GC_wnt&&buf.Type==MEM_PRIVATE) +#endif +) +&&!GC_is_heap_base(buf.AllocationBase)){ +#ifdef DEBUG_VIRTUALQUERY +GC_dump_meminfo(&buf); +#endif +if ((char*)p!=limit){ +GC_cond_add_roots(base,limit); +base=(char*)p; +} +limit=new_limit; +} +} +if ((word)p > (word)new_limit)break; +p=(LPVOID)new_limit; +} +GC_cond_add_roots(base,limit); +} +#endif +#if defined(ALPHA)&&defined(OSF1) +#include +EXTERN_C_BEGIN +extern char*sys_errlist[]; +extern int sys_nerr; +extern int errno; +EXTERN_C_END +GC_INNER void GC_register_dynamic_libraries(void) +{ +ldr_module_t moduleid=LDR_NULL_MODULE; +ldr_process_t mypid=ldr_my_process(); +while (TRUE){ +ldr_module_info_t moduleinfo; +size_t modulereturnsize; +ldr_region_t region; +ldr_region_info_t regioninfo; +size_t regionreturnsize; +int status=ldr_next_module(mypid,&moduleid); +if (moduleid==LDR_NULL_MODULE) +break; +if (status!=0){ +ABORT_ARG3("ldr_next_module failed", +":status=%d,errcode=%d (%s)",status,errno, +errno < sys_nerr?sys_errlist[errno]:""); +} +status=ldr_inq_module(mypid,moduleid,&moduleinfo, +sizeof(moduleinfo),&modulereturnsize); +if (status!=0) +ABORT("ldr_inq_module failed"); +if (moduleinfo.lmi_flags&LDR_MAIN) +continue; +#ifdef DL_VERBOSE +GC_log_printf("---Module---\n"); +GC_log_printf("Module ID\t=%16ld\n",moduleinfo.lmi_modid); +GC_log_printf("Count of regions=%16d\n",moduleinfo.lmi_nregion); +GC_log_printf("flags for module=%16lx\n",moduleinfo.lmi_flags); +GC_log_printf("module pathname\t=\"%s\"\n",moduleinfo.lmi_name); +#endif +for (region=0;region < moduleinfo.lmi_nregion;region++){ +status=ldr_inq_region(mypid,moduleid,region,®ioninfo, +sizeof(regioninfo),®ionreturnsize); +if (status!=0) +ABORT("ldr_inq_region failed"); +if (!(regioninfo.lri_prot&LDR_W)) +continue; +#ifdef DL_VERBOSE +GC_log_printf("--- Region---\n"); +GC_log_printf("Region number\t=%16ld\n", +regioninfo.lri_region_no); +GC_log_printf("Protection flags=%016x\n",regioninfo.lri_prot); +GC_log_printf("Virtual address\t=%16p\n",regioninfo.lri_vaddr); +GC_log_printf("Mapped address\t=%16p\n", +regioninfo.lri_mapaddr); +GC_log_printf("Region size\t=%16ld\n",regioninfo.lri_size); +GC_log_printf("Region name\t=\"%s\"\n",regioninfo.lri_name); +#endif +GC_add_roots_inner((char*)regioninfo.lri_mapaddr, +(char*)regioninfo.lri_mapaddr+regioninfo.lri_size, +TRUE); +} +} +} +#endif +#if defined(HPUX) +#include +#include +EXTERN_C_BEGIN +extern char*sys_errlist[]; +extern int sys_nerr; +EXTERN_C_END +GC_INNER void GC_register_dynamic_libraries(void) +{ +int index=1; +while (TRUE){ +struct shl_descriptor*shl_desc; +int status=shl_get(index,&shl_desc); +if (status!=0){ +#ifdef GC_HPUX_THREADS +break; +#else +if (errno==EINVAL){ +break; +} else { +ABORT_ARG3("shl_get failed", +":status=%d,errcode=%d (%s)",status,errno, +errno < sys_nerr?sys_errlist[errno]:""); +} +#endif +} +#ifdef DL_VERBOSE +GC_log_printf("---Shared library---\n"); +GC_log_printf("\tfilename\t=\"%s\"\n",shl_desc->filename); +GC_log_printf("\tindex\t\t=%d\n",index); +GC_log_printf("\thandle\t\t=%08x\n", +(unsigned long)shl_desc->handle); +GC_log_printf("\ttext seg.start\t=%08x\n",shl_desc->tstart); +GC_log_printf("\ttext seg.end\t=%08x\n",shl_desc->tend); +GC_log_printf("\tdata seg.start\t=%08x\n",shl_desc->dstart); +GC_log_printf("\tdata seg.end\t=%08x\n",shl_desc->dend); +GC_log_printf("\tref.count\t=%lu\n",shl_desc->ref_count); +#endif +GC_add_roots_inner((char*)shl_desc->dstart, +(char*)shl_desc->dend,TRUE); +index++; +} +} +#endif +#ifdef AIX +#include +#include +#include +GC_INNER void GC_register_dynamic_libraries(void) +{ +int ldibuflen=8192; +for (;;){ +int len; +struct ld_info*ldi; +#if defined(CPPCHECK) +char ldibuf[ldibuflen]; +#else +char*ldibuf=alloca(ldibuflen); +#endif +len=loadquery(L_GETINFO,ldibuf,ldibuflen); +if (len < 0){ +if (errno!=ENOMEM){ +ABORT("loadquery failed"); +} +ldibuflen*=2; +continue; +} +ldi=(struct ld_info*)ldibuf; +while (ldi){ +len=ldi->ldinfo_next; +GC_add_roots_inner( +ldi->ldinfo_dataorg, +(ptr_t)(unsigned long)ldi->ldinfo_dataorg ++ldi->ldinfo_datasize, +TRUE); +ldi=len?(struct ld_info*)((char*)ldi+len):0; +} +break; +} +} +#endif +#ifdef DARWIN +#ifndef __private_extern__ +#define __private_extern__ extern +#include +#undef __private_extern__ +#else +#include +#endif +#include +STATIC const struct dyld_sections_s { +const char*seg; +const char*sect; +} GC_dyld_sections[]={ +{ SEG_DATA,SECT_DATA }, +{ SEG_DATA,"__static_data" }, +{ SEG_DATA,SECT_BSS }, +{ SEG_DATA,SECT_COMMON }, +{ SEG_DATA,"__zobj_data" }, +{ SEG_DATA,"__zobj_bss" } +}; +STATIC const char*const GC_dyld_add_sect_fmts[]={ +"__bss%u", +"__pu_bss%u", +"__zo_bss%u", +"__zo_pu_bss%u" +}; +#ifndef L2_MAX_OFILE_ALIGNMENT +#define L2_MAX_OFILE_ALIGNMENT 15 +#endif +STATIC const char*GC_dyld_name_for_hdr(const struct GC_MACH_HEADER*hdr) +{ +unsigned long i,c; +c=_dyld_image_count(); +for (i=0;i < c;i++) +if ((const struct GC_MACH_HEADER*)_dyld_get_image_header(i)==hdr) +return _dyld_get_image_name(i); +return NULL; +} +STATIC void GC_dyld_image_add(const struct GC_MACH_HEADER*hdr, +intptr_t slide) +{ +unsigned long start,end; +unsigned i,j; +const struct GC_MACH_SECTION*sec; +const char*name; +GC_has_static_roots_func callback=GC_has_static_roots; +DCL_LOCK_STATE; +if (GC_no_dls)return; +#ifdef DARWIN_DEBUG +name=GC_dyld_name_for_hdr(hdr); +#else +name=callback!=0?GC_dyld_name_for_hdr(hdr):NULL; +#endif +for (i=0;i < sizeof(GC_dyld_sections)/sizeof(GC_dyld_sections[0]);i++){ +sec=GC_GETSECTBYNAME(hdr,GC_dyld_sections[i].seg, +GC_dyld_sections[i].sect); +if (sec==NULL||sec->size < sizeof(word)) +continue; +start=slide+sec->addr; +end=start+sec->size; +LOCK(); +if (callback==0||callback(name,(void*)start,(size_t)sec->size)){ +#ifdef DARWIN_DEBUG +GC_log_printf( +"Adding section __DATA,%s at %p-%p (%lu bytes)from image %s\n", +GC_dyld_sections[i].sect,(void*)start,(void*)end, +(unsigned long)sec->size,name); +#endif +GC_add_roots_inner((ptr_t)start,(ptr_t)end,FALSE); +} +UNLOCK(); +} +for (j=0;j < sizeof(GC_dyld_add_sect_fmts)/sizeof(char*);j++){ +const char*fmt=GC_dyld_add_sect_fmts[j]; +for (i=0;i<=L2_MAX_OFILE_ALIGNMENT;i++){ +char secnam[16]; +(void)snprintf(secnam,sizeof(secnam),fmt,(unsigned)i); +secnam[sizeof(secnam)- 1]='\0'; +sec=GC_GETSECTBYNAME(hdr,SEG_DATA,secnam); +if (sec==NULL||sec->size==0) +continue; +start=slide+sec->addr; +end=start+sec->size; +#ifdef DARWIN_DEBUG +GC_log_printf("Adding on-demand section __DATA,%s at" +" %p-%p (%lu bytes)from image %s\n", +secnam,(void*)start,(void*)end, +(unsigned long)sec->size,name); +#endif +GC_add_roots((char*)start,(char*)end); +} +} +#if defined(DARWIN_DEBUG)&&!defined(NO_DEBUGGING) +LOCK(); +GC_print_static_roots(); +UNLOCK(); +#endif +} +STATIC void GC_dyld_image_remove(const struct GC_MACH_HEADER*hdr, +intptr_t slide) +{ +unsigned long start,end; +unsigned i,j; +const struct GC_MACH_SECTION*sec; +#if defined(DARWIN_DEBUG)&&!defined(NO_DEBUGGING) +DCL_LOCK_STATE; +#endif +for (i=0;i < sizeof(GC_dyld_sections)/sizeof(GC_dyld_sections[0]);i++){ +sec=GC_GETSECTBYNAME(hdr,GC_dyld_sections[i].seg, +GC_dyld_sections[i].sect); +if (sec==NULL||sec->size==0) +continue; +start=slide+sec->addr; +end=start+sec->size; +#ifdef DARWIN_DEBUG +GC_log_printf( +"Removing section __DATA,%s at %p-%p (%lu bytes)from image %s\n", +GC_dyld_sections[i].sect,(void*)start,(void*)end, +(unsigned long)sec->size,GC_dyld_name_for_hdr(hdr)); +#endif +GC_remove_roots((char*)start,(char*)end); +} +for (j=0;j < sizeof(GC_dyld_add_sect_fmts)/sizeof(char*);j++){ +const char*fmt=GC_dyld_add_sect_fmts[j]; +for (i=0;i<=L2_MAX_OFILE_ALIGNMENT;i++){ +char secnam[16]; +(void)snprintf(secnam,sizeof(secnam),fmt,(unsigned)i); +secnam[sizeof(secnam)- 1]='\0'; +sec=GC_GETSECTBYNAME(hdr,SEG_DATA,secnam); +if (sec==NULL||sec->size==0) +continue; +start=slide+sec->addr; +end=start+sec->size; +#ifdef DARWIN_DEBUG +GC_log_printf("Removing on-demand section __DATA,%s at" +" %p-%p (%lu bytes)from image %s\n",secnam, +(void*)start,(void*)end,(unsigned long)sec->size, +GC_dyld_name_for_hdr(hdr)); +#endif +GC_remove_roots((char*)start,(char*)end); +} +} +#if defined(DARWIN_DEBUG)&&!defined(NO_DEBUGGING) +LOCK(); +GC_print_static_roots(); +UNLOCK(); +#endif +} +GC_INNER void GC_register_dynamic_libraries(void) +{ +} +GC_INNER void GC_init_dyld(void) +{ +static GC_bool initialized=FALSE; +if (initialized)return; +#ifdef DARWIN_DEBUG +GC_log_printf("Registering dyld callbacks...\n"); +#endif +_dyld_register_func_for_add_image( +(void (*)(const struct mach_header*,intptr_t))GC_dyld_image_add); +_dyld_register_func_for_remove_image( +(void (*)(const struct mach_header*,intptr_t))GC_dyld_image_remove); +initialized=TRUE; +#ifdef NO_DYLD_BIND_FULLY_IMAGE +#else +if (GC_no_dls)return; +if (GETENV("DYLD_BIND_AT_LAUNCH")==0){ +#ifdef DARWIN_DEBUG +GC_log_printf("Forcing full bind of GC code...\n"); +#endif +if (!_dyld_bind_fully_image_containing_address( +(unsigned long*)GC_malloc)) +ABORT("_dyld_bind_fully_image_containing_address failed"); +} +#endif +} +#define HAVE_REGISTER_MAIN_STATIC_DATA +GC_INNER GC_bool GC_register_main_static_data(void) +{ +return FALSE; +} +#endif +#if defined(HAIKU) +#include +GC_INNER void GC_register_dynamic_libraries(void) +{ +image_info info; +int32 cookie=0; +while (get_next_image_info(0,&cookie,&info)==B_OK){ +ptr_t data=(ptr_t)info.data; +GC_add_roots_inner(data,data+info.data_size,TRUE); +} +} +#endif +#elif defined(PCR) +GC_INNER void GC_register_dynamic_libraries(void) +{ +PCR_IL_LoadedFile*p=PCR_IL_GetLastLoadedFile(); +PCR_IL_LoadedSegment*q; +while (p!=NIL&&!(p->lf_commitPoint)){ +p=p->lf_prev; +} +for (;p!=NIL;p=p->lf_prev){ +for (q=p->lf_ls;q!=NIL;q=q->ls_next){ +if ((q->ls_flags&PCR_IL_SegFlags_Traced_MASK) +==PCR_IL_SegFlags_Traced_on){ +GC_add_roots_inner((ptr_t)q->ls_addr, +(ptr_t)q->ls_addr+q->ls_bytes,TRUE); +} +} +} +} +#endif +#if!defined(HAVE_REGISTER_MAIN_STATIC_DATA)&&defined(DYNAMIC_LOADING) +GC_INNER GC_bool GC_register_main_static_data(void) +{ +return TRUE; +} +#endif +GC_API void GC_CALL GC_register_has_static_roots_callback( +GC_has_static_roots_func callback) +{ +GC_has_static_roots=callback; +} +#if defined(GC_PTHREADS)&&!defined(GC_NO_DLOPEN) +#undef GC_MUST_RESTORE_REDEFINED_DLOPEN +#if defined(dlopen)&&!defined(GC_USE_LD_WRAP) +#undef dlopen +#define GC_MUST_RESTORE_REDEFINED_DLOPEN +#endif +#ifndef USE_PROC_FOR_LIBRARIES +static void disable_gc_for_dlopen(void) +{ +DCL_LOCK_STATE; +LOCK(); +while (GC_incremental&&GC_collection_in_progress()){ +ENTER_GC(); +GC_collect_a_little_inner(1000); +EXIT_GC(); +} +++GC_dont_gc; +UNLOCK(); +} +#endif +#ifdef GC_USE_LD_WRAP +#define WRAP_DLFUNC(f)__wrap_##f +#define REAL_DLFUNC(f)__real_##f +void*REAL_DLFUNC(dlopen)(const char*,int); +#else +#define WRAP_DLFUNC(f)GC_##f +#define REAL_DLFUNC(f)f +#endif +GC_API void*WRAP_DLFUNC(dlopen)(const char*path,int mode) +{ +void*result; +#ifndef USE_PROC_FOR_LIBRARIES +disable_gc_for_dlopen(); +#endif +result=REAL_DLFUNC(dlopen)(path,mode); +#ifndef USE_PROC_FOR_LIBRARIES +GC_enable(); +#endif +return(result); +} +#ifdef GC_USE_LD_WRAP +GC_API void*GC_dlopen(const char*path,int mode) +{ +return dlopen(path,mode); +} +#endif +#ifdef GC_MUST_RESTORE_REDEFINED_DLOPEN +#define dlopen GC_dlopen +#endif +#endif +#if!defined(SN_TARGET_ORBIS)&&!defined(SN_TARGET_PSP2) +#include +#ifdef AMIGA +#ifndef __GNUC__ +#include +#else +#include +#endif +#endif +#if defined(MACOS)&&defined(__MWERKS__) +#if defined(POWERPC) +#define NONVOLATILE_GPR_COUNT 19 +struct ppc_registers { +unsigned long gprs[NONVOLATILE_GPR_COUNT]; +}; +typedef struct ppc_registers ppc_registers; +#if defined(CPPCHECK) +void getRegisters(ppc_registers*regs); +#else +asm static void getRegisters(register ppc_registers*regs) +{ +stmw r13,regs->gprs +blr +} +#endif +static void PushMacRegisters(void) +{ +ppc_registers regs; +int i; +getRegisters(®s); +for (i=0;i < NONVOLATILE_GPR_COUNT;i++) +GC_push_one(regs.gprs[i]); +} +#else +asm static void PushMacRegisters(void) +{ +sub.w #4,sp +move.l a2,(sp) +jsr GC_push_one +move.l a3,(sp) +jsr GC_push_one +move.l a4,(sp) +jsr GC_push_one +#if!__option(a6frames) +move.l a6,(sp) +jsr GC_push_one +#endif +move.l d2,(sp) +jsr GC_push_one +move.l d3,(sp) +jsr GC_push_one +move.l d4,(sp) +jsr GC_push_one +move.l d5,(sp) +jsr GC_push_one +move.l d6,(sp) +jsr GC_push_one +move.l d7,(sp) +jsr GC_push_one +add.w #4,sp +rts +} +#endif +#endif +#if defined(SPARC)||defined(IA64) +GC_INNER ptr_t GC_save_regs_ret_val=NULL; +#endif +#undef HAVE_PUSH_REGS +#if defined(USE_ASM_PUSH_REGS) +#define HAVE_PUSH_REGS +#else +#ifdef STACK_NOT_SCANNED +void GC_push_regs(void) +{ +} +#define HAVE_PUSH_REGS +#elif defined(M68K)&&defined(AMIGA) +void GC_push_regs(void) +{ +#ifdef __GNUC__ +asm("subq.w&0x4,%sp"); +asm("mov.l %a2,(%sp)");asm("jsr _GC_push_one"); +asm("mov.l %a3,(%sp)");asm("jsr _GC_push_one"); +asm("mov.l %a4,(%sp)");asm("jsr _GC_push_one"); +asm("mov.l %a5,(%sp)");asm("jsr _GC_push_one"); +asm("mov.l %a6,(%sp)");asm("jsr _GC_push_one"); +asm("mov.l %d2,(%sp)");asm("jsr _GC_push_one"); +asm("mov.l %d3,(%sp)");asm("jsr _GC_push_one"); +asm("mov.l %d4,(%sp)");asm("jsr _GC_push_one"); +asm("mov.l %d5,(%sp)");asm("jsr _GC_push_one"); +asm("mov.l %d6,(%sp)");asm("jsr _GC_push_one"); +asm("mov.l %d7,(%sp)");asm("jsr _GC_push_one"); +asm("addq.w&0x4,%sp"); +#else +GC_push_one(getreg(REG_A2)); +GC_push_one(getreg(REG_A3)); +#ifndef __SASC +GC_push_one(getreg(REG_A4)); +#endif +GC_push_one(getreg(REG_A5)); +GC_push_one(getreg(REG_A6)); +GC_push_one(getreg(REG_D2)); +GC_push_one(getreg(REG_D3)); +GC_push_one(getreg(REG_D4)); +GC_push_one(getreg(REG_D5)); +GC_push_one(getreg(REG_D6)); +GC_push_one(getreg(REG_D7)); +#endif +} +#define HAVE_PUSH_REGS +#elif defined(MACOS) +#if defined(M68K)&&defined(THINK_C)&&!defined(CPPCHECK) +#define PushMacReg(reg)move.l reg,(sp)jsr GC_push_one +void GC_push_regs(void) +{ +asm { +sub.w #4,sp;reserve space for one parameter. +PushMacReg(a2); +PushMacReg(a3); +PushMacReg(a4); +;skip a5 (globals),a6 (frame pointer),and a7 (stack pointer) +PushMacReg(d2); +PushMacReg(d3); +PushMacReg(d4); +PushMacReg(d5); +PushMacReg(d6); +PushMacReg(d7); +add.w #4,sp;fix stack. +} +} +#define HAVE_PUSH_REGS +#undef PushMacReg +#elif defined(__MWERKS__) +void GC_push_regs(void) +{ +PushMacRegisters(); +} +#define HAVE_PUSH_REGS +#endif +#endif +#endif +#if defined(HAVE_PUSH_REGS)&&defined(THREADS) +#error GC_push_regs cannot be used with threads +#undef HAVE_PUSH_REGS +#endif +#if!defined(HAVE_PUSH_REGS)&&defined(UNIX_LIKE) +#include +#ifndef NO_GETCONTEXT +#if defined(DARWIN)&&(MAC_OS_X_VERSION_MAX_ALLOWED>=1060) +#include +#else +#include +#endif +#ifdef GETCONTEXT_FPU_EXCMASK_BUG +#include +#endif +#endif +#endif +GC_ATTR_NO_SANITIZE_ADDR +GC_INNER void GC_with_callee_saves_pushed(void (*fn)(ptr_t,void*), +volatile ptr_t arg) +{ +volatile int dummy; +volatile ptr_t context=0; +#if defined(HAVE_PUSH_REGS) +GC_push_regs(); +#elif defined(__EMSCRIPTEN__) +#else +#if defined(UNIX_LIKE)&&!defined(NO_GETCONTEXT) +static signed char getcontext_works=0; +ucontext_t ctxt; +#ifdef GETCONTEXT_FPU_EXCMASK_BUG +#ifdef X86_64 +unsigned short old_fcw; +#if defined(CPPCHECK) +GC_noop1((word)&old_fcw); +#endif +__asm__ __volatile__ ("fstcw %0":"=m" (*&old_fcw)); +#else +int except_mask=fegetexcept(); +#endif +#endif +if (getcontext_works>=0){ +if (getcontext(&ctxt)< 0){ +WARN("getcontext failed:" +" using another register retrieval method...\n",0); +} else { +context=(ptr_t)&ctxt; +} +if (EXPECT(0==getcontext_works,FALSE)) +getcontext_works=context!=NULL?1:-1; +} +#ifdef GETCONTEXT_FPU_EXCMASK_BUG +#ifdef X86_64 +__asm__ __volatile__ ("fldcw %0"::"m" (*&old_fcw)); +{ +unsigned mxcsr; +__asm__ __volatile__ ("stmxcsr %0":"=m" (*&mxcsr)); +mxcsr=(mxcsr&~(FE_ALL_EXCEPT<<7))| +((old_fcw&FE_ALL_EXCEPT)<<7); +__asm__ __volatile__ ("ldmxcsr %0"::"m" (*&mxcsr)); +} +#else +if (feenableexcept(except_mask)< 0) +ABORT("feenableexcept failed"); +#endif +#endif +#if defined(SPARC)||defined(IA64) +GC_save_regs_ret_val=GC_save_regs_in_stack(); +#endif +if (NULL==context) +#endif +{ +#if defined(HAVE_BUILTIN_UNWIND_INIT) +__builtin_unwind_init(); +#elif defined(NO_CRT)&&defined(MSWIN32) +CONTEXT ctx; +RtlCaptureContext(&ctx); +#else +jmp_buf regs; +word*i=(word*)®s; +ptr_t lim=(ptr_t)(®s)+sizeof(regs); +for (;(word)i < (word)lim;i++){ +*i=0; +} +#if defined(MSWIN32)||defined(MSWINCE)||defined(UTS4)||defined(OS2)||defined(CX_UX)||defined(__CC_ARM)||defined(LINUX)||defined(EWS4800)||defined(RTEMS) +(void)setjmp(regs); +#else +(void)_setjmp(regs); +#endif +#endif +} +#endif +fn(arg,( void*)context); +GC_noop1(COVERT_DATAFLOW(&dummy)); +} +#endif +#if defined(GC_PTHREADS)&&!defined(GC_WIN32_THREADS)&&!defined(GC_DARWIN_THREADS)&&!defined(SN_TARGET_ORBIS)&&!defined(SN_TARGET_PSP2) +#ifdef NACL +#include +#include +STATIC int GC_nacl_num_gc_threads=0; +STATIC __thread int GC_nacl_thread_idx=-1; +STATIC volatile int GC_nacl_park_threads_now=0; +STATIC volatile pthread_t GC_nacl_thread_parker=-1; +GC_INNER __thread GC_thread GC_nacl_gc_thread_self=NULL; +volatile int GC_nacl_thread_parked[MAX_NACL_GC_THREADS]; +int GC_nacl_thread_used[MAX_NACL_GC_THREADS]; +#elif defined(GC_OPENBSD_UTHREADS) +#include +#else +#include +#include +#include +#include +#include +#if (!defined(AO_HAVE_load_acquire)||!defined(AO_HAVE_store_release))&&!defined(CPPCHECK) +#error AO_load_acquire and/or AO_store_release are missing; +#error please define AO_REQUIRE_CAS manually +#endif +#undef pthread_sigmask +#ifdef GC_ENABLE_SUSPEND_THREAD +static void*GC_CALLBACK suspend_self_inner(void*client_data); +#endif +#ifdef DEBUG_THREADS +#ifndef NSIG +#if defined(MAXSIG) +#define NSIG (MAXSIG+1) +#elif defined(_NSIG) +#define NSIG _NSIG +#elif defined(__SIGRTMAX) +#define NSIG (__SIGRTMAX+1) +#else +#error define NSIG +#endif +#endif +void GC_print_sig_mask(void) +{ +sigset_t blocked; +int i; +if (pthread_sigmask(SIG_BLOCK,NULL,&blocked)!=0) +ABORT("pthread_sigmask failed"); +for (i=1;i < NSIG;i++){ +if (sigismember(&blocked,i)) +GC_printf("Signal blocked:%d\n",i); +} +} +#endif +STATIC void GC_remove_allowed_signals(sigset_t*set) +{ +if (sigdelset(set,SIGINT)!=0 +||sigdelset(set,SIGQUIT)!=0 +||sigdelset(set,SIGABRT)!=0 +||sigdelset(set,SIGTERM)!=0){ +ABORT("sigdelset failed"); +} +#ifdef MPROTECT_VDB +if (sigdelset(set,SIGSEGV)!=0 +#ifdef HAVE_SIGBUS +||sigdelset(set,SIGBUS)!=0 +#endif +){ +ABORT("sigdelset failed"); +} +#endif +} +static sigset_t suspend_handler_mask; +#define THREAD_RESTARTED 0x1 +STATIC volatile AO_t GC_stop_count=0; +STATIC volatile AO_t GC_world_is_stopped=FALSE; +#if defined(GC_OSF1_THREADS)||defined(THREAD_SANITIZER)||defined(ADDRESS_SANITIZER)||defined(MEMORY_SANITIZER) +STATIC GC_bool GC_retry_signals=TRUE; +#else +STATIC GC_bool GC_retry_signals=FALSE; +#endif +#ifndef SIG_THR_RESTART +#if defined(GC_HPUX_THREADS)||defined(GC_OSF1_THREADS)||defined(GC_NETBSD_THREADS)||defined(GC_USESIGRT_SIGNALS) +#if defined(_SIGRTMIN)&&!defined(CPPCHECK) +#define SIG_THR_RESTART _SIGRTMIN+5 +#else +#define SIG_THR_RESTART SIGRTMIN+5 +#endif +#else +#define SIG_THR_RESTART SIGXCPU +#endif +#endif +#define SIGNAL_UNSET (-1) +STATIC int GC_sig_suspend=SIGNAL_UNSET; +STATIC int GC_sig_thr_restart=SIGNAL_UNSET; +GC_API void GC_CALL GC_set_suspend_signal(int sig) +{ +if (GC_is_initialized)return; +GC_sig_suspend=sig; +} +GC_API void GC_CALL GC_set_thr_restart_signal(int sig) +{ +if (GC_is_initialized)return; +GC_sig_thr_restart=sig; +} +GC_API int GC_CALL GC_get_suspend_signal(void) +{ +return GC_sig_suspend!=SIGNAL_UNSET?GC_sig_suspend:SIG_SUSPEND; +} +GC_API int GC_CALL GC_get_thr_restart_signal(void) +{ +return GC_sig_thr_restart!=SIGNAL_UNSET +?GC_sig_thr_restart:SIG_THR_RESTART; +} +#if defined(GC_EXPLICIT_SIGNALS_UNBLOCK)||!defined(NO_SIGNALS_UNBLOCK_IN_MAIN) +GC_INNER void GC_unblock_gc_signals(void) +{ +sigset_t set; +sigemptyset(&set); +GC_ASSERT(GC_sig_suspend!=SIGNAL_UNSET); +GC_ASSERT(GC_sig_thr_restart!=SIGNAL_UNSET); +sigaddset(&set,GC_sig_suspend); +sigaddset(&set,GC_sig_thr_restart); +if (pthread_sigmask(SIG_UNBLOCK,&set,NULL)!=0) +ABORT("pthread_sigmask failed"); +} +#endif +STATIC sem_t GC_suspend_ack_sem; +STATIC void GC_suspend_handler_inner(ptr_t dummy,void*context); +#ifndef NO_SA_SIGACTION +STATIC void GC_suspend_handler(int sig,siginfo_t*info GC_ATTR_UNUSED, +void*context GC_ATTR_UNUSED) +#else +STATIC void GC_suspend_handler(int sig) +#endif +{ +int old_errno=errno; +if (sig!=GC_sig_suspend){ +#if defined(GC_FREEBSD_THREADS) +if (0==sig)return; +#endif +ABORT("Bad signal in suspend_handler"); +} +#if defined(IA64)||defined(HP_PA)||defined(M68K) +GC_with_callee_saves_pushed(GC_suspend_handler_inner,NULL); +#else +{ +#ifdef NO_SA_SIGACTION +void*context=0; +#endif +GC_suspend_handler_inner(NULL,context); +} +#endif +errno=old_errno; +} +#ifdef BASE_ATOMIC_OPS_EMULATED +#define ao_load_acquire_async(p)(*(p)) +#define ao_load_async(p)ao_load_acquire_async(p) +#define ao_store_release_async(p,v)(void)(*(p)=(v)) +#define ao_store_async(p,v)ao_store_release_async(p,v) +#else +#define ao_load_acquire_async(p)AO_load_acquire(p) +#define ao_load_async(p)AO_load(p) +#define ao_store_release_async(p,v)AO_store_release(p,v) +#define ao_store_async(p,v)AO_store(p,v) +#endif +#ifdef THREAD_SANITIZER +GC_ATTR_NO_SANITIZE_THREAD +static GC_thread GC_lookup_thread_async(pthread_t id) +{ +GC_thread p=GC_threads[THREAD_TABLE_INDEX(id)]; +while (p!=NULL&&!THREAD_EQUAL(p->id,id)) +p=p->next; +return p; +} +#else +#define GC_lookup_thread_async GC_lookup_thread +#endif +GC_INLINE void GC_store_stack_ptr(GC_thread me) +{ +#ifdef SPARC +ao_store_async((volatile AO_t*)&me->stop_info.stack_ptr, +(AO_t)GC_save_regs_in_stack()); +#else +#ifdef IA64 +me->backing_store_ptr=GC_save_regs_in_stack(); +#endif +ao_store_async((volatile AO_t*)&me->stop_info.stack_ptr, +(AO_t)GC_approx_sp()); +#endif +} +STATIC void GC_suspend_handler_inner(ptr_t dummy GC_ATTR_UNUSED, +void*context GC_ATTR_UNUSED) +{ +pthread_t self=pthread_self(); +GC_thread me; +IF_CANCEL(int cancel_state;) +AO_t my_stop_count=ao_load_acquire_async(&GC_stop_count); +DISABLE_CANCEL(cancel_state); +#ifdef DEBUG_THREADS +GC_log_printf("Suspending %p\n",(void*)self); +#endif +GC_ASSERT(((word)my_stop_count&THREAD_RESTARTED)==0); +me=GC_lookup_thread_async(self); +#ifdef GC_ENABLE_SUSPEND_THREAD +if (ao_load_async(&me->suspended_ext)){ +GC_store_stack_ptr(me); +sem_post(&GC_suspend_ack_sem); +suspend_self_inner(me); +#ifdef DEBUG_THREADS +GC_log_printf("Continuing %p on GC_resume_thread\n",(void*)self); +#endif +RESTORE_CANCEL(cancel_state); +return; +} +#endif +if (((word)me->stop_info.last_stop_count&~(word)THREAD_RESTARTED) +==(word)my_stop_count){ +if (!GC_retry_signals){ +WARN("Duplicate suspend signal in thread %p\n",self); +} +RESTORE_CANCEL(cancel_state); +return; +} +GC_store_stack_ptr(me); +#ifdef THREAD_SANITIZER +{ +sigset_t set; +sigemptyset(&set); +GC_ASSERT(GC_sig_suspend!=SIGNAL_UNSET); +GC_ASSERT(GC_sig_thr_restart!=SIGNAL_UNSET); +sigaddset(&set,GC_sig_suspend); +sigaddset(&set,GC_sig_thr_restart); +if (pthread_sigmask(SIG_UNBLOCK,&set,NULL)!=0) +ABORT("pthread_sigmask failed in suspend handler"); +} +#endif +sem_post(&GC_suspend_ack_sem); +ao_store_release_async(&me->stop_info.last_stop_count,my_stop_count); +do { +sigsuspend (&suspend_handler_mask); +} while (ao_load_acquire_async(&GC_world_is_stopped) +&&ao_load_async(&GC_stop_count)==my_stop_count); +#ifdef DEBUG_THREADS +GC_log_printf("Continuing %p\n",(void*)self); +#endif +#ifndef GC_NETBSD_THREADS_WORKAROUND +if (GC_retry_signals) +#endif +{ +sem_post(&GC_suspend_ack_sem); +#ifdef GC_NETBSD_THREADS_WORKAROUND +if (GC_retry_signals) +#endif +{ +ao_store_release_async(&me->stop_info.last_stop_count, +(AO_t)((word)my_stop_count|THREAD_RESTARTED)); +} +} +RESTORE_CANCEL(cancel_state); +} +static void suspend_restart_barrier(int n_live_threads) +{ +int i; +for (i=0;i < n_live_threads;i++){ +while (0!=sem_wait(&GC_suspend_ack_sem)){ +if (errno!=EINTR) +ABORT("sem_wait failed"); +} +} +#ifdef GC_ASSERTIONS +sem_getvalue(&GC_suspend_ack_sem,&i); +GC_ASSERT(0==i); +#endif +} +static int resend_lost_signals(int n_live_threads, +int (*suspend_restart_all)(void)) +{ +#define WAIT_UNIT 3000 +#define RETRY_INTERVAL 100000 +if (n_live_threads > 0){ +unsigned long wait_usecs=0; +for (;;){ +int ack_count; +sem_getvalue(&GC_suspend_ack_sem,&ack_count); +if (ack_count==n_live_threads) +break; +if (wait_usecs > RETRY_INTERVAL){ +int newly_sent=suspend_restart_all(); +GC_COND_LOG_PRINTF("Resent %d signals after timeout\n",newly_sent); +sem_getvalue(&GC_suspend_ack_sem,&ack_count); +if (newly_sent < n_live_threads - ack_count){ +WARN("Lost some threads while stopping or starting world?!\n",0); +n_live_threads=ack_count+newly_sent; +} +wait_usecs=0; +} +#ifdef LINT2 +#undef WAIT_UNIT +#define WAIT_UNIT 1 +sched_yield(); +#elif defined(CPPCHECK) +{ +struct timespec ts; +ts.tv_sec=0; +ts.tv_nsec=WAIT_UNIT*1000; +(void)nanosleep(&ts,NULL); +} +#else +usleep(WAIT_UNIT); +#endif +wait_usecs+=WAIT_UNIT; +} +} +return n_live_threads; +} +STATIC void GC_restart_handler(int sig) +{ +#if defined(DEBUG_THREADS) +int old_errno=errno; +#endif +if (sig!=GC_sig_thr_restart) +ABORT("Bad signal in restart handler"); +#ifdef DEBUG_THREADS +GC_log_printf("In GC_restart_handler for %p\n",(void*)pthread_self()); +errno=old_errno; +#endif +} +#ifdef USE_TKILL_ON_ANDROID +EXTERN_C_BEGIN +extern int tkill(pid_t tid,int sig); +EXTERN_C_END +static int android_thread_kill(pid_t tid,int sig) +{ +int ret; +int old_errno=errno; +ret=tkill(tid,sig); +if (ret < 0){ +ret=errno; +errno=old_errno; +} +return ret; +} +#define THREAD_SYSTEM_ID(t)(t)->kernel_id +#define RAISE_SIGNAL(t,sig)android_thread_kill(THREAD_SYSTEM_ID(t),sig) +#else +#define THREAD_SYSTEM_ID(t)(t)->id +#define RAISE_SIGNAL(t,sig)pthread_kill(THREAD_SYSTEM_ID(t),sig) +#endif +#ifdef GC_ENABLE_SUSPEND_THREAD +#include +STATIC void GC_brief_async_signal_safe_sleep(void) +{ +struct timeval tv; +tv.tv_sec=0; +#if defined(GC_TIME_LIMIT)&&!defined(CPPCHECK) +tv.tv_usec=1000*GC_TIME_LIMIT/2; +#else +tv.tv_usec=1000*50/2; +#endif +(void)select(0,0,0,0,&tv); +} +static void*GC_CALLBACK suspend_self_inner(void*client_data){ +GC_thread me=(GC_thread)client_data; +while (ao_load_acquire_async(&me->suspended_ext)){ +GC_brief_async_signal_safe_sleep(); +} +return NULL; +} +GC_API void GC_CALL GC_suspend_thread(GC_SUSPEND_THREAD_ID thread){ +GC_thread t; +IF_CANCEL(int cancel_state;) +DCL_LOCK_STATE; +LOCK(); +t=GC_lookup_thread((pthread_t)thread); +if (t==NULL||t->suspended_ext){ +UNLOCK(); +return; +} +AO_store_release(&t->suspended_ext,TRUE); +if (THREAD_EQUAL((pthread_t)thread,pthread_self())){ +UNLOCK(); +(void)GC_do_blocking(suspend_self_inner,t); +return; +} +if ((t->flags&FINISHED)!=0){ +UNLOCK(); +return; +} +DISABLE_CANCEL(cancel_state); +#ifdef PARALLEL_MARK +if (GC_parallel) +GC_wait_for_reclaim(); +#endif +if (GC_manual_vdb){ +GC_acquire_dirty_lock(); +} +switch (RAISE_SIGNAL(t,GC_sig_suspend)){ +case 0: +break; +default: +ABORT("pthread_kill failed"); +} +GC_ASSERT(GC_thr_initialized); +while (sem_wait(&GC_suspend_ack_sem)!=0){ +if (errno!=EINTR) +ABORT("sem_wait for handler failed (suspend_self)"); +} +if (GC_manual_vdb) +GC_release_dirty_lock(); +RESTORE_CANCEL(cancel_state); +UNLOCK(); +} +GC_API void GC_CALL GC_resume_thread(GC_SUSPEND_THREAD_ID thread){ +GC_thread t; +DCL_LOCK_STATE; +LOCK(); +t=GC_lookup_thread((pthread_t)thread); +if (t!=NULL) +AO_store(&t->suspended_ext,FALSE); +UNLOCK(); +} +GC_API int GC_CALL GC_is_thread_suspended(GC_SUSPEND_THREAD_ID thread){ +GC_thread t; +int is_suspended=0; +DCL_LOCK_STATE; +LOCK(); +t=GC_lookup_thread((pthread_t)thread); +if (t!=NULL&&t->suspended_ext) +is_suspended=(int)TRUE; +UNLOCK(); +return is_suspended; +} +#endif +#undef ao_load_acquire_async +#undef ao_load_async +#undef ao_store_async +#undef ao_store_release_async +#endif +#ifdef IA64 +#define IF_IA64(x)x +#else +#define IF_IA64(x) +#endif +GC_INNER void GC_push_all_stacks(void) +{ +GC_bool found_me=FALSE; +size_t nthreads=0; +int i; +GC_thread p; +ptr_t lo,hi; +IF_IA64(ptr_t bs_lo;ptr_t bs_hi;) +struct GC_traced_stack_sect_s*traced_stack_sect; +pthread_t self=pthread_self(); +word total_size=0; +if (!EXPECT(GC_thr_initialized,TRUE)) +GC_thr_init(); +#ifdef DEBUG_THREADS +GC_log_printf("Pushing stacks from thread %p\n",(void*)self); +#endif +for (i=0;i < THREAD_TABLE_SZ;i++){ +for (p=GC_threads[i];p!=0;p=p->next){ +if (p->flags&FINISHED)continue; +++nthreads; +traced_stack_sect=p->traced_stack_sect; +if (THREAD_EQUAL(p->id,self)){ +GC_ASSERT(!p->thread_blocked); +#ifdef SPARC +lo=(ptr_t)GC_save_regs_in_stack(); +#else +lo=GC_approx_sp(); +#endif +found_me=TRUE; +IF_IA64(bs_hi=(ptr_t)GC_save_regs_in_stack();) +} else { +lo=(ptr_t)AO_load((volatile AO_t*)&p->stop_info.stack_ptr); +IF_IA64(bs_hi=p->backing_store_ptr;) +if (traced_stack_sect!=NULL +&&traced_stack_sect->saved_stack_ptr==lo){ +traced_stack_sect=traced_stack_sect->prev; +} +} +if ((p->flags&MAIN_THREAD)==0){ +hi=p->stack_end; +IF_IA64(bs_lo=p->backing_store_end); +} else { +hi=GC_stackbottom; +IF_IA64(bs_lo=BACKING_STORE_BASE;) +} +#ifdef DEBUG_THREADS +GC_log_printf("Stack for thread %p=[%p,%p)\n", +(void*)p->id,(void*)lo,(void*)hi); +#endif +if (0==lo)ABORT("GC_push_all_stacks:sp not set!"); +if (p->altstack!=NULL&&(word)p->altstack<=(word)lo +&&(word)lo<=(word)p->altstack+p->altstack_size){ +hi=p->altstack+p->altstack_size; +} +GC_push_all_stack_sections(lo,hi,traced_stack_sect); +#ifdef STACK_GROWS_UP +total_size+=lo - hi; +#else +total_size+=hi - lo; +#endif +#ifdef NACL +GC_push_all_stack((ptr_t)p->stop_info.reg_storage, +(ptr_t)(p->stop_info.reg_storage+NACL_GC_REG_STORAGE_SIZE)); +total_size+=NACL_GC_REG_STORAGE_SIZE*sizeof(ptr_t); +#endif +#ifdef IA64 +#ifdef DEBUG_THREADS +GC_log_printf("Reg stack for thread %p=[%p,%p)\n", +(void*)p->id,(void*)bs_lo,(void*)bs_hi); +#endif +GC_push_all_register_sections(bs_lo,bs_hi, +THREAD_EQUAL(p->id,self), +traced_stack_sect); +total_size+=bs_hi - bs_lo; +#endif +} +} +GC_VERBOSE_LOG_PRINTF("Pushed %d thread stacks\n",(int)nthreads); +if (!found_me&&!GC_in_thread_creation) +ABORT("Collecting from unknown thread"); +GC_total_stacksize=total_size; +} +#ifdef DEBUG_THREADS +pthread_t GC_stopping_thread; +int GC_stopping_pid=0; +#endif +STATIC int GC_suspend_all(void) +{ +int n_live_threads=0; +int i; +#ifndef NACL +GC_thread p; +#ifndef GC_OPENBSD_UTHREADS +int result; +#endif +pthread_t self=pthread_self(); +for (i=0;i < THREAD_TABLE_SZ;i++){ +for (p=GC_threads[i];p!=0;p=p->next){ +if (!THREAD_EQUAL(p->id,self)){ +if ((p->flags&FINISHED)!=0)continue; +if (p->thread_blocked)continue; +#ifndef GC_OPENBSD_UTHREADS +#ifdef GC_ENABLE_SUSPEND_THREAD +if (p->suspended_ext)continue; +#endif +if (AO_load(&p->stop_info.last_stop_count)==GC_stop_count) +continue; +n_live_threads++; +#endif +#ifdef DEBUG_THREADS +GC_log_printf("Sending suspend signal to %p\n",(void*)p->id); +#endif +#ifdef GC_OPENBSD_UTHREADS +{ +stack_t stack; +GC_acquire_dirty_lock(); +if (pthread_suspend_np(p->id)!=0) +ABORT("pthread_suspend_np failed"); +GC_release_dirty_lock(); +if (pthread_stackseg_np(p->id,&stack)) +ABORT("pthread_stackseg_np failed"); +p->stop_info.stack_ptr=(ptr_t)stack.ss_sp - stack.ss_size; +if (GC_on_thread_event) +GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED, +(void*)p->id); +} +#else +result=RAISE_SIGNAL(p,GC_sig_suspend); +switch(result){ +case ESRCH: +n_live_threads--; +break; +case 0: +if (GC_on_thread_event) +GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED, +(void*)(word)THREAD_SYSTEM_ID(p)); +break; +default: +ABORT_ARG1("pthread_kill failed at suspend", +":errcode=%d",result); +} +#endif +} +} +} +#else +#ifndef NACL_PARK_WAIT_NANOSECONDS +#define NACL_PARK_WAIT_NANOSECONDS (100*1000) +#endif +#define NANOS_PER_SECOND (1000UL*1000*1000) +unsigned long num_sleeps=0; +#ifdef DEBUG_THREADS +GC_log_printf("pthread_stop_world:num_threads=%d\n", +GC_nacl_num_gc_threads - 1); +#endif +GC_nacl_thread_parker=pthread_self(); +GC_nacl_park_threads_now=1; +if (GC_manual_vdb) +GC_acquire_dirty_lock(); +while (1){ +int num_threads_parked=0; +struct timespec ts; +int num_used=0; +for (i=0;i < MAX_NACL_GC_THREADS +&&num_used < GC_nacl_num_gc_threads;i++){ +if (GC_nacl_thread_used[i]==1){ +num_used++; +if (GC_nacl_thread_parked[i]==1){ +num_threads_parked++; +if (GC_on_thread_event) +GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED,(void*)(word)i); +} +} +} +if (num_threads_parked>=GC_nacl_num_gc_threads - 1) +break; +ts.tv_sec=0; +ts.tv_nsec=NACL_PARK_WAIT_NANOSECONDS; +#ifdef DEBUG_THREADS +GC_log_printf("Sleep waiting for %d threads to park...\n", +GC_nacl_num_gc_threads - num_threads_parked - 1); +#endif +nanosleep(&ts,0); +if (++num_sleeps > NANOS_PER_SECOND/NACL_PARK_WAIT_NANOSECONDS){ +WARN("GC appears stalled waiting for %" WARN_PRIdPTR +" threads to park...\n", +GC_nacl_num_gc_threads - num_threads_parked - 1); +num_sleeps=0; +} +} +if (GC_manual_vdb) +GC_release_dirty_lock(); +#endif +return n_live_threads; +} +GC_INNER void GC_stop_world(void) +{ +#if!defined(GC_OPENBSD_UTHREADS)&&!defined(NACL) +int n_live_threads; +#endif +GC_ASSERT(I_HOLD_LOCK()); +#ifdef DEBUG_THREADS +GC_stopping_thread=pthread_self(); +GC_stopping_pid=getpid(); +GC_log_printf("Stopping the world from %p\n",(void*)GC_stopping_thread); +#endif +#ifdef PARALLEL_MARK +if (GC_parallel){ +GC_acquire_mark_lock(); +GC_ASSERT(GC_fl_builder_count==0); +} +#endif +#if defined(GC_OPENBSD_UTHREADS)||defined(NACL) +(void)GC_suspend_all(); +#else +AO_store(&GC_stop_count, +(AO_t)((word)GC_stop_count+(THREAD_RESTARTED+1))); +if (GC_manual_vdb){ +GC_acquire_dirty_lock(); +} +AO_store_release(&GC_world_is_stopped,TRUE); +n_live_threads=GC_suspend_all(); +if (GC_retry_signals) +n_live_threads=resend_lost_signals(n_live_threads,GC_suspend_all); +suspend_restart_barrier(n_live_threads); +if (GC_manual_vdb) +GC_release_dirty_lock(); +#endif +#ifdef PARALLEL_MARK +if (GC_parallel) +GC_release_mark_lock(); +#endif +#ifdef DEBUG_THREADS +GC_log_printf("World stopped from %p\n",(void*)pthread_self()); +GC_stopping_thread=0; +#endif +} +#ifdef NACL +#if defined(__x86_64__) +#define NACL_STORE_REGS()do { __asm__ __volatile__ ("push %rbx");__asm__ __volatile__ ("push %rbp");__asm__ __volatile__ ("push %r12");__asm__ __volatile__ ("push %r13");__asm__ __volatile__ ("push %r14");__asm__ __volatile__ ("push %r15");__asm__ __volatile__ ("mov %%esp,%0":"=m" (GC_nacl_gc_thread_self->stop_info.stack_ptr));BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr,GC_nacl_gc_thread_self->stop_info.reg_storage,NACL_GC_REG_STORAGE_SIZE*sizeof(ptr_t));__asm__ __volatile__ ("naclasp $48,%r15");} while (0) +#elif defined(__i386__) +#define NACL_STORE_REGS()do { __asm__ __volatile__ ("push %ebx");__asm__ __volatile__ ("push %ebp");__asm__ __volatile__ ("push %esi");__asm__ __volatile__ ("push %edi");__asm__ __volatile__ ("mov %%esp,%0":"=m" (GC_nacl_gc_thread_self->stop_info.stack_ptr));BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr,GC_nacl_gc_thread_self->stop_info.reg_storage,NACL_GC_REG_STORAGE_SIZE*sizeof(ptr_t));__asm__ __volatile__ ("add $16,%esp");} while (0) +#elif defined(__arm__) +#define NACL_STORE_REGS()do { __asm__ __volatile__ ("push {r4-r8,r10-r12,lr}");__asm__ __volatile__ ("mov r0,%0"::"r" (&GC_nacl_gc_thread_self->stop_info.stack_ptr));__asm__ __volatile__ ("bic r0,r0,#0xc0000000");__asm__ __volatile__ ("str sp,[r0]");BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr,GC_nacl_gc_thread_self->stop_info.reg_storage,NACL_GC_REG_STORAGE_SIZE*sizeof(ptr_t));__asm__ __volatile__ ("add sp,sp,#40");__asm__ __volatile__ ("bic sp,sp,#0xc0000000");} while (0) +#else +#error TODO Please port NACL_STORE_REGS +#endif +GC_API_OSCALL void nacl_pre_syscall_hook(void) +{ +if (GC_nacl_thread_idx!=-1){ +NACL_STORE_REGS(); +GC_nacl_gc_thread_self->stop_info.stack_ptr=GC_approx_sp(); +GC_nacl_thread_parked[GC_nacl_thread_idx]=1; +} +} +GC_API_OSCALL void __nacl_suspend_thread_if_needed(void) +{ +if (GC_nacl_park_threads_now){ +pthread_t self=pthread_self(); +if (GC_nacl_thread_parker==self) +return; +if (GC_nacl_thread_idx < 0) +return; +if (!GC_nacl_thread_parked[GC_nacl_thread_idx]){ +NACL_STORE_REGS(); +GC_nacl_gc_thread_self->stop_info.stack_ptr=GC_approx_sp(); +} +GC_nacl_thread_parked[GC_nacl_thread_idx]=1; +while (GC_nacl_park_threads_now){ +} +GC_nacl_thread_parked[GC_nacl_thread_idx]=0; +BZERO(GC_nacl_gc_thread_self->stop_info.reg_storage, +NACL_GC_REG_STORAGE_SIZE*sizeof(ptr_t)); +} +} +GC_API_OSCALL void nacl_post_syscall_hook(void) +{ +__nacl_suspend_thread_if_needed(); +if (GC_nacl_thread_idx!=-1){ +GC_nacl_thread_parked[GC_nacl_thread_idx]=0; +} +} +STATIC GC_bool GC_nacl_thread_parking_inited=FALSE; +STATIC pthread_mutex_t GC_nacl_thread_alloc_lock=PTHREAD_MUTEX_INITIALIZER; +struct nacl_irt_blockhook { +int (*register_block_hooks)(void (*pre)(void),void (*post)(void)); +}; +EXTERN_C_BEGIN +extern size_t nacl_interface_query(const char*interface_ident, +void*table,size_t tablesize); +EXTERN_C_END +GC_INNER void GC_nacl_initialize_gc_thread(void) +{ +int i; +static struct nacl_irt_blockhook gc_hook; +pthread_mutex_lock(&GC_nacl_thread_alloc_lock); +if (!EXPECT(GC_nacl_thread_parking_inited,TRUE)){ +BZERO(GC_nacl_thread_parked,sizeof(GC_nacl_thread_parked)); +BZERO(GC_nacl_thread_used,sizeof(GC_nacl_thread_used)); +nacl_interface_query("nacl-irt-blockhook-0.1", +&gc_hook,sizeof(gc_hook)); +gc_hook.register_block_hooks(nacl_pre_syscall_hook, +nacl_post_syscall_hook); +GC_nacl_thread_parking_inited=TRUE; +} +GC_ASSERT(GC_nacl_num_gc_threads<=MAX_NACL_GC_THREADS); +for (i=0;i < MAX_NACL_GC_THREADS;i++){ +if (GC_nacl_thread_used[i]==0){ +GC_nacl_thread_used[i]=1; +GC_nacl_thread_idx=i; +GC_nacl_num_gc_threads++; +break; +} +} +pthread_mutex_unlock(&GC_nacl_thread_alloc_lock); +} +GC_INNER void GC_nacl_shutdown_gc_thread(void) +{ +pthread_mutex_lock(&GC_nacl_thread_alloc_lock); +GC_ASSERT(GC_nacl_thread_idx>=0); +GC_ASSERT(GC_nacl_thread_idx < MAX_NACL_GC_THREADS); +GC_ASSERT(GC_nacl_thread_used[GC_nacl_thread_idx]!=0); +GC_nacl_thread_used[GC_nacl_thread_idx]=0; +GC_nacl_thread_idx=-1; +GC_nacl_num_gc_threads--; +pthread_mutex_unlock(&GC_nacl_thread_alloc_lock); +} +#else +STATIC int GC_restart_all(void) +{ +int n_live_threads=0; +int i; +pthread_t self=pthread_self(); +GC_thread p; +#ifndef GC_OPENBSD_UTHREADS +int result; +#endif +for (i=0;i < THREAD_TABLE_SZ;i++){ +for (p=GC_threads[i];p!=NULL;p=p->next){ +if (!THREAD_EQUAL(p->id,self)){ +if ((p->flags&FINISHED)!=0)continue; +if (p->thread_blocked)continue; +#ifndef GC_OPENBSD_UTHREADS +#ifdef GC_ENABLE_SUSPEND_THREAD +if (p->suspended_ext)continue; +#endif +if (GC_retry_signals +&&AO_load(&p->stop_info.last_stop_count) +==(AO_t)((word)GC_stop_count|THREAD_RESTARTED)) +continue; +n_live_threads++; +#endif +#ifdef DEBUG_THREADS +GC_log_printf("Sending restart signal to %p\n",(void*)p->id); +#endif +#ifdef GC_OPENBSD_UTHREADS +if (pthread_resume_np(p->id)!=0) +ABORT("pthread_resume_np failed"); +if (GC_on_thread_event) +GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED,(void*)p->id); +#else +result=RAISE_SIGNAL(p,GC_sig_thr_restart); +switch(result){ +case ESRCH: +n_live_threads--; +break; +case 0: +if (GC_on_thread_event) +GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED, +(void*)(word)THREAD_SYSTEM_ID(p)); +break; +default: +ABORT_ARG1("pthread_kill failed at resume", +":errcode=%d",result); +} +#endif +} +} +} +return n_live_threads; +} +#endif +GC_INNER void GC_start_world(void) +{ +#ifndef NACL +int n_live_threads; +GC_ASSERT(I_HOLD_LOCK()); +#ifdef DEBUG_THREADS +GC_log_printf("World starting\n"); +#endif +#ifndef GC_OPENBSD_UTHREADS +AO_store_release(&GC_world_is_stopped,FALSE); +#endif +n_live_threads=GC_restart_all(); +#ifdef GC_OPENBSD_UTHREADS +(void)n_live_threads; +#elif defined(GC_NETBSD_THREADS_WORKAROUND) +if (GC_retry_signals) +n_live_threads=resend_lost_signals(n_live_threads,GC_restart_all); +suspend_restart_barrier(n_live_threads); +#else +if (GC_retry_signals){ +n_live_threads=resend_lost_signals(n_live_threads,GC_restart_all); +suspend_restart_barrier(n_live_threads); +} +#endif +#ifdef DEBUG_THREADS +GC_log_printf("World started\n"); +#endif +#else +#ifdef DEBUG_THREADS +GC_log_printf("World starting...\n"); +#endif +GC_nacl_park_threads_now=0; +if (GC_on_thread_event) +GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED,NULL); +#endif +} +GC_INNER void GC_stop_init(void) +{ +#if!defined(GC_OPENBSD_UTHREADS)&&!defined(NACL) +struct sigaction act; +char*str; +if (SIGNAL_UNSET==GC_sig_suspend) +GC_sig_suspend=SIG_SUSPEND; +if (SIGNAL_UNSET==GC_sig_thr_restart) +GC_sig_thr_restart=SIG_THR_RESTART; +if (GC_sig_suspend==GC_sig_thr_restart) +ABORT("Cannot use same signal for thread suspend and resume"); +if (sem_init(&GC_suspend_ack_sem,GC_SEM_INIT_PSHARED,0)!=0) +ABORT("sem_init failed"); +#ifdef SA_RESTART +act.sa_flags=SA_RESTART +#else +act.sa_flags=0 +#endif +#ifndef NO_SA_SIGACTION +|SA_SIGINFO +#endif +; +if (sigfillset(&act.sa_mask)!=0){ +ABORT("sigfillset failed"); +} +#ifdef GC_RTEMS_PTHREADS +if(sigprocmask(SIG_UNBLOCK,&act.sa_mask,NULL)!=0){ +ABORT("sigprocmask failed"); +} +#endif +GC_remove_allowed_signals(&act.sa_mask); +#ifndef NO_SA_SIGACTION +act.sa_sigaction=GC_suspend_handler; +#else +act.sa_handler=GC_suspend_handler; +#endif +if (sigaction(GC_sig_suspend,&act,NULL)!=0){ +ABORT("Cannot set SIG_SUSPEND handler"); +} +#ifndef NO_SA_SIGACTION +act.sa_flags&=~SA_SIGINFO; +#endif +act.sa_handler=GC_restart_handler; +if (sigaction(GC_sig_thr_restart,&act,NULL)!=0){ +ABORT("Cannot set SIG_THR_RESTART handler"); +} +if (sigfillset(&suspend_handler_mask)!=0)ABORT("sigfillset failed"); +GC_remove_allowed_signals(&suspend_handler_mask); +if (sigdelset(&suspend_handler_mask,GC_sig_thr_restart)!=0) +ABORT("sigdelset failed"); +str=GETENV("GC_RETRY_SIGNALS"); +if (str!=NULL){ +if (*str=='0'&&*(str+1)=='\0'){ +GC_retry_signals=FALSE; +} else { +GC_retry_signals=TRUE; +} +} +if (GC_retry_signals){ +GC_COND_LOG_PRINTF( +"Will retry suspend and restart signals if necessary\n"); +} +#ifndef NO_SIGNALS_UNBLOCK_IN_MAIN +GC_unblock_gc_signals(); +#endif +#endif +} +#endif +#if defined(GC_PTHREADS)&&!defined(GC_WIN32_THREADS) +#include +#include +#include +#include +#include +#include +#if!defined(SN_TARGET_ORBIS)&&!defined(SN_TARGET_PSP2) +#if!defined(GC_RTEMS_PTHREADS) +#include +#endif +#include +#include +#include +#include +#endif +#include +#if defined(GC_DARWIN_THREADS) +#ifndef GC_DARWIN_SEMAPHORE_H +#define GC_DARWIN_SEMAPHORE_H +#if!defined(GC_DARWIN_THREADS) +#error darwin_semaphore.h included with GC_DARWIN_THREADS not defined +#endif +#ifdef __cplusplus +extern "C" { +#endif +typedef struct { +pthread_mutex_t mutex; +pthread_cond_t cond; +int value; +} sem_t; +GC_INLINE int sem_init(sem_t*sem,int pshared,int value){ +if (pshared!=0){ +errno=EPERM; +return -1; +} +sem->value=value; +if (pthread_mutex_init(&sem->mutex,NULL)!=0) +return -1; +if (pthread_cond_init(&sem->cond,NULL)!=0){ +(void)pthread_mutex_destroy(&sem->mutex); +return -1; +} +return 0; +} +GC_INLINE int sem_post(sem_t*sem){ +if (pthread_mutex_lock(&sem->mutex)!=0) +return -1; +sem->value++; +if (pthread_cond_signal(&sem->cond)!=0){ +(void)pthread_mutex_unlock(&sem->mutex); +return -1; +} +return pthread_mutex_unlock(&sem->mutex)!=0?-1:0; +} +GC_INLINE int sem_wait(sem_t*sem){ +if (pthread_mutex_lock(&sem->mutex)!=0) +return -1; +while (sem->value==0){ +if (pthread_cond_wait(&sem->cond,&sem->mutex)!=0){ +(void)pthread_mutex_unlock(&sem->mutex); +return -1; +} +} +sem->value--; +return pthread_mutex_unlock(&sem->mutex)!=0?-1:0; +} +GC_INLINE int sem_destroy(sem_t*sem){ +return pthread_cond_destroy(&sem->cond)!=0 +||pthread_mutex_destroy(&sem->mutex)!=0?-1:0; +} +#ifdef __cplusplus +} +#endif +#endif +#else +#include +#endif +#if defined(GC_DARWIN_THREADS)||defined(GC_FREEBSD_THREADS) +#include +#endif +#if defined(GC_NETBSD_THREADS)||defined(GC_OPENBSD_THREADS) +#include +#include +#endif +#if!defined(USE_SPIN_LOCK) +GC_INNER pthread_mutex_t GC_allocate_ml=PTHREAD_MUTEX_INITIALIZER; +#endif +#ifdef GC_ASSERTIONS +GC_INNER unsigned long GC_lock_holder=NO_THREAD; +#endif +#if defined(GC_DGUX386_THREADS) +#include +#include +typedef unsigned int sem_t; +#endif +#undef pthread_create +#ifndef GC_NO_PTHREAD_SIGMASK +#undef pthread_sigmask +#endif +#ifndef GC_NO_PTHREAD_CANCEL +#undef pthread_cancel +#endif +#ifdef GC_HAVE_PTHREAD_EXIT +#undef pthread_exit +#endif +#undef pthread_join +#undef pthread_detach +#if defined(GC_OSF1_THREADS)&&defined(_PTHREAD_USE_MANGLED_NAMES_)&&!defined(_PTHREAD_USE_PTDNAM_) +#define pthread_create __pthread_create +#define pthread_join __pthread_join +#define pthread_detach __pthread_detach +#ifndef GC_NO_PTHREAD_CANCEL +#define pthread_cancel __pthread_cancel +#endif +#ifdef GC_HAVE_PTHREAD_EXIT +#define pthread_exit __pthread_exit +#endif +#endif +#ifdef GC_USE_LD_WRAP +#define WRAP_FUNC(f)__wrap_##f +#define REAL_FUNC(f)__real_##f +int REAL_FUNC(pthread_create)(pthread_t*, +GC_PTHREAD_CREATE_CONST pthread_attr_t*, +void*(*start_routine)(void*),void*); +int REAL_FUNC(pthread_join)(pthread_t,void**); +int REAL_FUNC(pthread_detach)(pthread_t); +#ifndef GC_NO_PTHREAD_SIGMASK +int REAL_FUNC(pthread_sigmask)(int,const sigset_t*,sigset_t*); +#endif +#ifndef GC_NO_PTHREAD_CANCEL +int REAL_FUNC(pthread_cancel)(pthread_t); +#endif +#ifdef GC_HAVE_PTHREAD_EXIT +void REAL_FUNC(pthread_exit)(void*)GC_PTHREAD_EXIT_ATTRIBUTE; +#endif +#else +#ifdef GC_USE_DLOPEN_WRAP +#include +#define WRAP_FUNC(f)f +#define REAL_FUNC(f)GC_real_##f +typedef int (*GC_pthread_create_t)(pthread_t*, +GC_PTHREAD_CREATE_CONST pthread_attr_t*, +void*(*)(void*),void*); +static GC_pthread_create_t REAL_FUNC(pthread_create); +#ifndef GC_NO_PTHREAD_SIGMASK +typedef int (*GC_pthread_sigmask_t)(int,const sigset_t*, +sigset_t*); +static GC_pthread_sigmask_t REAL_FUNC(pthread_sigmask); +#endif +typedef int (*GC_pthread_join_t)(pthread_t,void**); +static GC_pthread_join_t REAL_FUNC(pthread_join); +typedef int (*GC_pthread_detach_t)(pthread_t); +static GC_pthread_detach_t REAL_FUNC(pthread_detach); +#ifndef GC_NO_PTHREAD_CANCEL +typedef int (*GC_pthread_cancel_t)(pthread_t); +static GC_pthread_cancel_t REAL_FUNC(pthread_cancel); +#endif +#ifdef GC_HAVE_PTHREAD_EXIT +typedef void (*GC_pthread_exit_t)(void*)GC_PTHREAD_EXIT_ATTRIBUTE; +static GC_pthread_exit_t REAL_FUNC(pthread_exit); +#endif +#else +#define WRAP_FUNC(f)GC_##f +#if!defined(GC_DGUX386_THREADS) +#define REAL_FUNC(f)f +#else +#define REAL_FUNC(f)__d10_##f +#endif +#endif +#endif +#if defined(GC_USE_LD_WRAP)||defined(GC_USE_DLOPEN_WRAP) +GC_API int GC_pthread_create(pthread_t*t, +GC_PTHREAD_CREATE_CONST pthread_attr_t*a, +void*(*fn)(void*),void*arg) +{ +return pthread_create(t,a,fn,arg); +} +#ifndef GC_NO_PTHREAD_SIGMASK +GC_API int GC_pthread_sigmask(int how,const sigset_t*mask, +sigset_t*old) +{ +return pthread_sigmask(how,mask,old); +} +#endif +GC_API int GC_pthread_join(pthread_t t,void**res) +{ +return pthread_join(t,res); +} +GC_API int GC_pthread_detach(pthread_t t) +{ +return pthread_detach(t); +} +#ifndef GC_NO_PTHREAD_CANCEL +GC_API int GC_pthread_cancel(pthread_t t) +{ +return pthread_cancel(t); +} +#endif +#ifdef GC_HAVE_PTHREAD_EXIT +GC_API GC_PTHREAD_EXIT_ATTRIBUTE void GC_pthread_exit(void*retval) +{ +pthread_exit(retval); +} +#endif +#endif +#ifdef GC_USE_DLOPEN_WRAP +STATIC GC_bool GC_syms_initialized=FALSE; +STATIC void GC_init_real_syms(void) +{ +void*dl_handle; +if (GC_syms_initialized)return; +#ifdef RTLD_NEXT +dl_handle=RTLD_NEXT; +#else +dl_handle=dlopen("libpthread.so.0",RTLD_LAZY); +if (NULL==dl_handle){ +dl_handle=dlopen("libpthread.so",RTLD_LAZY); +} +if (NULL==dl_handle)ABORT("Couldn't open libpthread"); +#endif +REAL_FUNC(pthread_create)=(GC_pthread_create_t)(word) +dlsym(dl_handle,"pthread_create"); +#ifdef RTLD_NEXT +if (REAL_FUNC(pthread_create)==0) +ABORT("pthread_create not found" +" (probably -lgc is specified after -lpthread)"); +#endif +#ifndef GC_NO_PTHREAD_SIGMASK +REAL_FUNC(pthread_sigmask)=(GC_pthread_sigmask_t)(word) +dlsym(dl_handle,"pthread_sigmask"); +#endif +REAL_FUNC(pthread_join)=(GC_pthread_join_t)(word) +dlsym(dl_handle,"pthread_join"); +REAL_FUNC(pthread_detach)=(GC_pthread_detach_t)(word) +dlsym(dl_handle,"pthread_detach"); +#ifndef GC_NO_PTHREAD_CANCEL +REAL_FUNC(pthread_cancel)=(GC_pthread_cancel_t)(word) +dlsym(dl_handle,"pthread_cancel"); +#endif +#ifdef GC_HAVE_PTHREAD_EXIT +REAL_FUNC(pthread_exit)=(GC_pthread_exit_t)(word) +dlsym(dl_handle,"pthread_exit"); +#endif +GC_syms_initialized=TRUE; +} +#define INIT_REAL_SYMS()if (EXPECT(GC_syms_initialized,TRUE)){} else GC_init_real_syms() +#define ASSERT_SYMS_INITIALIZED()GC_ASSERT(GC_syms_initialized) +#else +#define INIT_REAL_SYMS()(void)0 +#define ASSERT_SYMS_INITIALIZED()GC_ASSERT(parallel_initialized) +#endif +static GC_bool parallel_initialized=FALSE; +#ifndef GC_ALWAYS_MULTITHREADED +GC_INNER GC_bool GC_need_to_lock=FALSE; +#endif +STATIC int GC_nprocs=1; +#ifdef THREAD_LOCAL_ALLOC +GC_INNER void GC_mark_thread_local_free_lists(void) +{ +int i; +GC_thread p; +for (i=0;i < THREAD_TABLE_SZ;++i){ +for (p=GC_threads[i];0!=p;p=p->next){ +if (!(p->flags&FINISHED)) +GC_mark_thread_local_fls_for(&(p->tlfs)); +} +} +} +#if defined(GC_ASSERTIONS) +void GC_check_tls(void) +{ +int i; +GC_thread p; +for (i=0;i < THREAD_TABLE_SZ;++i){ +for (p=GC_threads[i];0!=p;p=p->next){ +if (!(p->flags&FINISHED)) +GC_check_tls_for(&(p->tlfs)); +} +} +#if defined(USE_CUSTOM_SPECIFIC) +if (GC_thread_key!=0) +GC_check_tsd_marks(GC_thread_key); +#endif +} +#endif +#endif +#ifdef PARALLEL_MARK +#ifndef MAX_MARKERS +#define MAX_MARKERS 16 +#endif +static ptr_t marker_sp[MAX_MARKERS - 1]={0}; +#ifdef IA64 +static ptr_t marker_bsp[MAX_MARKERS - 1]={0}; +#endif +#if defined(GC_DARWIN_THREADS)&&!defined(GC_NO_THREADS_DISCOVERY) +static mach_port_t marker_mach_threads[MAX_MARKERS - 1]={0}; +GC_INNER GC_bool GC_is_mach_marker(thread_act_t thread) +{ +int i; +for (i=0;i < GC_markers_m1;i++){ +if (marker_mach_threads[i]==thread) +return TRUE; +} +return FALSE; +} +#endif +#ifdef HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG +static void set_marker_thread_name(unsigned id) +{ +int err=pthread_setname_np(pthread_self(),"GC-marker-%zu", +(void*)(size_t)id); +if (err!=0) +WARN("pthread_setname_np failed,errno=%" WARN_PRIdPTR "\n",err); +} +#elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)||defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID) +static void set_marker_thread_name(unsigned id) +{ +char name_buf[16]; +int len=sizeof("GC-marker-")- 1; +BCOPY("GC-marker-",name_buf,len); +if (id>=10) +name_buf[len++]=(char)('0'+(id/10)% 10); +name_buf[len]=(char)('0'+id % 10); +name_buf[len+1]='\0'; +#ifdef HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID +(void)pthread_setname_np(name_buf); +#else +if (pthread_setname_np(pthread_self(),name_buf)!=0) +WARN("pthread_setname_np failed\n",0); +#endif +} +#else +#define set_marker_thread_name(id)(void)(id) +#endif +STATIC void*GC_mark_thread(void*id) +{ +word my_mark_no=0; +IF_CANCEL(int cancel_state;) +if ((word)id==GC_WORD_MAX)return 0; +DISABLE_CANCEL(cancel_state); +set_marker_thread_name((unsigned)(word)id); +marker_sp[(word)id]=GC_approx_sp(); +#ifdef IA64 +marker_bsp[(word)id]=GC_save_regs_in_stack(); +#endif +#if defined(GC_DARWIN_THREADS)&&!defined(GC_NO_THREADS_DISCOVERY) +marker_mach_threads[(word)id]=mach_thread_self(); +#endif +GC_acquire_mark_lock(); +if (0==--GC_fl_builder_count) +GC_notify_all_builder(); +for (;;++my_mark_no){ +if (my_mark_no < GC_mark_no||my_mark_no > GC_mark_no+2){ +my_mark_no=GC_mark_no; +} +#ifdef DEBUG_THREADS +GC_log_printf("Starting mark helper for mark number %lu\n", +(unsigned long)my_mark_no); +#endif +GC_help_marker(my_mark_no); +} +} +STATIC pthread_t GC_mark_threads[MAX_MARKERS]; +#ifdef CAN_HANDLE_FORK +static int available_markers_m1=0; +static pthread_cond_t mark_cv; +#else +#define available_markers_m1 GC_markers_m1 +static pthread_cond_t mark_cv=PTHREAD_COND_INITIALIZER; +#endif +GC_INNER void GC_start_mark_threads_inner(void) +{ +int i; +pthread_attr_t attr; +#ifndef NO_MARKER_SPECIAL_SIGMASK +sigset_t set,oldset; +#endif +GC_ASSERT(I_DONT_HOLD_LOCK()); +if (available_markers_m1<=0)return; +#ifdef CAN_HANDLE_FORK +if (GC_parallel)return; +{ +pthread_cond_t mark_cv_local=PTHREAD_COND_INITIALIZER; +BCOPY(&mark_cv_local,&mark_cv,sizeof(mark_cv)); +} +#endif +GC_ASSERT(GC_fl_builder_count==0); +INIT_REAL_SYMS(); +if (0!=pthread_attr_init(&attr))ABORT("pthread_attr_init failed"); +if (0!=pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_DETACHED)) +ABORT("pthread_attr_setdetachstate failed"); +#ifdef DEFAULT_STACK_MAYBE_SMALL +{ +size_t old_size; +if (pthread_attr_getstacksize(&attr,&old_size)!=0) +ABORT("pthread_attr_getstacksize failed"); +if (old_size < MIN_STACK_SIZE +&&old_size!=0){ +if (pthread_attr_setstacksize(&attr,MIN_STACK_SIZE)!=0) +ABORT("pthread_attr_setstacksize failed"); +} +} +#endif +#ifndef NO_MARKER_SPECIAL_SIGMASK +if (sigfillset(&set)!=0) +ABORT("sigfillset failed"); +#if!defined(GC_DARWIN_THREADS)&&!defined(GC_OPENBSD_UTHREADS)&&!defined(NACL) +if (sigdelset(&set,GC_get_suspend_signal())!=0 +||sigdelset(&set,GC_get_thr_restart_signal())!=0) +ABORT("sigdelset failed"); +#endif +if (REAL_FUNC(pthread_sigmask)(SIG_BLOCK,&set,&oldset)< 0){ +WARN("pthread_sigmask set failed,no markers started," +" errno=%" WARN_PRIdPTR "\n",errno); +GC_markers_m1=0; +(void)pthread_attr_destroy(&attr); +return; +} +#endif +#ifdef CAN_HANDLE_FORK +GC_markers_m1=available_markers_m1; +#endif +for (i=0;i < available_markers_m1;++i){ +if (0!=REAL_FUNC(pthread_create)(GC_mark_threads+i,&attr, +GC_mark_thread,(void*)(word)i)){ +WARN("Marker thread creation failed,errno=%" WARN_PRIdPTR "\n", +errno); +GC_markers_m1=i; +break; +} +} +#ifndef NO_MARKER_SPECIAL_SIGMASK +if (REAL_FUNC(pthread_sigmask)(SIG_SETMASK,&oldset,NULL)< 0){ +WARN("pthread_sigmask restore failed,errno=%" WARN_PRIdPTR "\n", +errno); +} +#endif +(void)pthread_attr_destroy(&attr); +GC_wait_for_markers_init(); +GC_COND_LOG_PRINTF("Started %d mark helper threads\n",GC_markers_m1); +} +#endif +GC_INNER GC_bool GC_thr_initialized=FALSE; +GC_INNER volatile GC_thread GC_threads[THREAD_TABLE_SZ]={0}; +void GC_push_thread_structures(void) +{ +GC_ASSERT(I_HOLD_LOCK()); +GC_PUSH_ALL_SYM(GC_threads); +#if defined(THREAD_LOCAL_ALLOC) +GC_PUSH_ALL_SYM(GC_thread_key); +#endif +} +#ifdef DEBUG_THREADS +STATIC int GC_count_threads(void) +{ +int i; +int count=0; +GC_ASSERT(I_HOLD_LOCK()); +for (i=0;i < THREAD_TABLE_SZ;++i){ +GC_thread th=GC_threads[i]; +while (th){ +if (!(th->flags&FINISHED)) +++count; +th=th->next; +} +} +return count; +} +#endif +static struct GC_Thread_Rep first_thread; +STATIC GC_thread GC_new_thread(pthread_t id) +{ +int hv=THREAD_TABLE_INDEX(id); +GC_thread result; +static GC_bool first_thread_used=FALSE; +#ifdef DEBUG_THREADS +GC_log_printf("Creating thread %p\n",(void*)id); +for (result=GC_threads[hv];result!=NULL;result=result->next) +if (!THREAD_EQUAL(result->id,id)){ +GC_log_printf("Hash collision at GC_threads[%d]\n",hv); +break; +} +#endif +GC_ASSERT(I_HOLD_LOCK()); +if (!EXPECT(first_thread_used,TRUE)){ +result=&first_thread; +first_thread_used=TRUE; +GC_ASSERT(NULL==GC_threads[hv]); +#if defined(THREAD_SANITIZER)&&defined(CPPCHECK) +GC_noop1(result->dummy[0]); +#endif +} else { +result=(struct GC_Thread_Rep*) +GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep),NORMAL); +if (result==0)return(0); +} +result->id=id; +#ifdef USE_TKILL_ON_ANDROID +result->kernel_id=gettid(); +#endif +result->next=GC_threads[hv]; +GC_threads[hv]=result; +#ifdef NACL +GC_nacl_gc_thread_self=result; +GC_nacl_initialize_gc_thread(); +#endif +GC_ASSERT(result->flags==0&&result->thread_blocked==0); +if (EXPECT(result!=&first_thread,TRUE)) +GC_dirty(result); +return(result); +} +STATIC void GC_delete_thread(pthread_t id) +{ +int hv=THREAD_TABLE_INDEX(id); +GC_thread p=GC_threads[hv]; +GC_thread prev=NULL; +#ifdef DEBUG_THREADS +GC_log_printf("Deleting thread %p,n_threads=%d\n", +(void*)id,GC_count_threads()); +#endif +#ifdef NACL +GC_nacl_shutdown_gc_thread(); +GC_nacl_gc_thread_self=NULL; +#endif +GC_ASSERT(I_HOLD_LOCK()); +while (!THREAD_EQUAL(p->id,id)){ +prev=p; +p=p->next; +} +if (prev==0){ +GC_threads[hv]=p->next; +} else { +GC_ASSERT(prev!=&first_thread); +prev->next=p->next; +GC_dirty(prev); +} +if (p!=&first_thread){ +#ifdef GC_DARWIN_THREADS +mach_port_deallocate(mach_task_self(),p->stop_info.mach_thread); +#endif +GC_INTERNAL_FREE(p); +} +} +STATIC void GC_delete_gc_thread(GC_thread t) +{ +pthread_t id=t->id; +int hv=THREAD_TABLE_INDEX(id); +GC_thread p=GC_threads[hv]; +GC_thread prev=NULL; +GC_ASSERT(I_HOLD_LOCK()); +while (p!=t){ +prev=p; +p=p->next; +} +if (prev==0){ +GC_threads[hv]=p->next; +} else { +GC_ASSERT(prev!=&first_thread); +prev->next=p->next; +GC_dirty(prev); +} +#ifdef GC_DARWIN_THREADS +mach_port_deallocate(mach_task_self(),p->stop_info.mach_thread); +#endif +GC_INTERNAL_FREE(p); +#ifdef DEBUG_THREADS +GC_log_printf("Deleted thread %p,n_threads=%d\n", +(void*)id,GC_count_threads()); +#endif +} +GC_INNER GC_thread GC_lookup_thread(pthread_t id) +{ +GC_thread p=GC_threads[THREAD_TABLE_INDEX(id)]; +while (p!=0&&!THREAD_EQUAL(p->id,id))p=p->next; +return(p); +} +GC_INNER void GC_reset_finalizer_nested(void) +{ +GC_thread me=GC_lookup_thread(pthread_self()); +me->finalizer_nested=0; +} +GC_INNER unsigned char*GC_check_finalizer_nested(void) +{ +GC_thread me=GC_lookup_thread(pthread_self()); +unsigned nesting_level=me->finalizer_nested; +if (nesting_level){ +if (++me->finalizer_skipped < (1U<finalizer_skipped=0; +} +me->finalizer_nested=(unsigned char)(nesting_level+1); +return&me->finalizer_nested; +} +#if defined(GC_ASSERTIONS)&&defined(THREAD_LOCAL_ALLOC) +GC_bool GC_is_thread_tsd_valid(void*tsd) +{ +GC_thread me; +DCL_LOCK_STATE; +LOCK(); +me=GC_lookup_thread(pthread_self()); +UNLOCK(); +return (word)tsd>=(word)(&me->tlfs) +&&(word)tsd < (word)(&me->tlfs)+sizeof(me->tlfs); +} +#endif +GC_API int GC_CALL GC_thread_is_registered(void) +{ +pthread_t self=pthread_self(); +GC_thread me; +DCL_LOCK_STATE; +LOCK(); +me=GC_lookup_thread(self); +UNLOCK(); +return me!=NULL; +} +static pthread_t main_pthread_id; +static void*main_stack,*main_altstack; +static word main_stack_size,main_altstack_size; +GC_API void GC_CALL GC_register_altstack(void*stack,GC_word stack_size, +void*altstack, +GC_word altstack_size) +{ +GC_thread me; +pthread_t self=pthread_self(); +DCL_LOCK_STATE; +LOCK(); +me=GC_lookup_thread(self); +if (me!=NULL){ +me->stack=(ptr_t)stack; +me->stack_size=stack_size; +me->altstack=(ptr_t)altstack; +me->altstack_size=altstack_size; +} else { +main_pthread_id=self; +main_stack=stack; +main_stack_size=stack_size; +main_altstack=altstack; +main_altstack_size=altstack_size; +} +UNLOCK(); +} +#ifdef CAN_HANDLE_FORK +#ifdef CAN_CALL_ATFORK +GC_ATTR_NO_SANITIZE_THREAD +#endif +static void store_to_threads_table(int hv,GC_thread me) +{ +GC_threads[hv]=me; +} +STATIC void GC_remove_all_threads_but_me(void) +{ +pthread_t self=pthread_self(); +int hv; +for (hv=0;hv < THREAD_TABLE_SZ;++hv){ +GC_thread p,next; +GC_thread me=NULL; +for (p=GC_threads[hv];0!=p;p=next){ +next=p->next; +if (THREAD_EQUAL(p->id,self) +&&me==NULL){ +me=p; +p->next=0; +#ifdef GC_DARWIN_THREADS +me->stop_info.mach_thread=mach_thread_self(); +#endif +#ifdef USE_TKILL_ON_ANDROID +me->kernel_id=gettid(); +#endif +#if defined(THREAD_LOCAL_ALLOC)&&!defined(USE_CUSTOM_SPECIFIC) +{ +int res; +res=GC_setspecific(GC_thread_key,&me->tlfs); +if (COVERT_DATAFLOW(res)!=0) +ABORT("GC_setspecific failed (in child)"); +} +#endif +} else { +#ifdef THREAD_LOCAL_ALLOC +if (!(p->flags&FINISHED)){ +GC_remove_specific_after_fork(GC_thread_key,p->id); +} +#endif +#if!defined(THREAD_SANITIZER)||!defined(CAN_CALL_ATFORK) +if (p!=&first_thread)GC_INTERNAL_FREE(p); +#endif +} +} +store_to_threads_table(hv,me); +} +} +#endif +#ifdef USE_PROC_FOR_LIBRARIES +GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo,ptr_t hi) +{ +int i; +GC_thread p; +GC_ASSERT(I_HOLD_LOCK()); +#ifdef PARALLEL_MARK +for (i=0;i < GC_markers_m1;++i){ +if ((word)marker_sp[i] > (word)lo&&(word)marker_sp[i] < (word)hi) +return TRUE; +#ifdef IA64 +if ((word)marker_bsp[i] > (word)lo +&&(word)marker_bsp[i] < (word)hi) +return TRUE; +#endif +} +#endif +for (i=0;i < THREAD_TABLE_SZ;i++){ +for (p=GC_threads[i];p!=0;p=p->next){ +if (0!=p->stack_end){ +#ifdef STACK_GROWS_UP +if ((word)p->stack_end>=(word)lo +&&(word)p->stack_end < (word)hi) +return TRUE; +#else +if ((word)p->stack_end > (word)lo +&&(word)p->stack_end<=(word)hi) +return TRUE; +#endif +} +} +} +return FALSE; +} +#endif +#ifdef IA64 +GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound) +{ +int i; +GC_thread p; +ptr_t result=0; +GC_ASSERT(I_HOLD_LOCK()); +#ifdef PARALLEL_MARK +for (i=0;i < GC_markers_m1;++i){ +if ((word)marker_sp[i] > (word)result +&&(word)marker_sp[i] < (word)bound) +result=marker_sp[i]; +} +#endif +for (i=0;i < THREAD_TABLE_SZ;i++){ +for (p=GC_threads[i];p!=0;p=p->next){ +if ((word)p->stack_end > (word)result +&&(word)p->stack_end < (word)bound){ +result=p->stack_end; +} +} +} +return result; +} +#endif +#ifndef STAT_READ +#define STAT_BUF_SIZE 4096 +#define STAT_READ read +#endif +#ifdef GC_HPUX_THREADS +#define GC_get_nprocs()pthread_num_processors_np() +#elif defined(GC_OSF1_THREADS)||defined(GC_AIX_THREADS)||defined(GC_HAIKU_THREADS)||defined(GC_SOLARIS_THREADS)||defined(HURD)||defined(HOST_ANDROID)||defined(NACL) +GC_INLINE int GC_get_nprocs(void) +{ +int nprocs=(int)sysconf(_SC_NPROCESSORS_ONLN); +return nprocs > 0?nprocs:1; +} +#elif defined(GC_IRIX_THREADS) +GC_INLINE int GC_get_nprocs(void) +{ +int nprocs=(int)sysconf(_SC_NPROC_ONLN); +return nprocs > 0?nprocs:1; +} +#elif defined(GC_LINUX_THREADS) +STATIC int GC_get_nprocs(void) +{ +char stat_buf[STAT_BUF_SIZE]; +int f; +int result,i,len; +f=open("/proc/stat",O_RDONLY); +if (f < 0){ +WARN("Couldn't read/proc/stat\n",0); +return 1; +} +len=STAT_READ(f,stat_buf,STAT_BUF_SIZE); +close(f); +result=1; +for (i=0;i < len - 100;++i){ +if (stat_buf[i]=='\n'&&stat_buf[i+1]=='c' +&&stat_buf[i+2]=='p'&&stat_buf[i+3]=='u'){ +int cpu_no=atoi(&stat_buf[i+4]); +if (cpu_no>=result) +result=cpu_no+1; +} +} +return result; +} +#elif defined(GC_DGUX386_THREADS) +STATIC int GC_get_nprocs(void) +{ +int numCpus; +struct dg_sys_info_pm_info pm_sysinfo; +int status=0; +status=dg_sys_info((long int*)&pm_sysinfo, +DG_SYS_INFO_PM_INFO_TYPE,DG_SYS_INFO_PM_CURRENT_VERSION); +if (status < 0) +numCpus=-1; +else +numCpus=pm_sysinfo.idle_vp_count; +return(numCpus); +} +#elif defined(GC_DARWIN_THREADS)||defined(GC_FREEBSD_THREADS)||defined(GC_NETBSD_THREADS)||defined(GC_OPENBSD_THREADS) +STATIC int GC_get_nprocs(void) +{ +int mib[]={CTL_HW,HW_NCPU}; +int res; +size_t len=sizeof(res); +sysctl(mib,sizeof(mib)/sizeof(int),&res,&len,NULL,0); +return res; +} +#else +#define GC_get_nprocs()1 +#endif +#if defined(ARM32)&&defined(GC_LINUX_THREADS)&&!defined(NACL) +STATIC int GC_get_nprocs_present(void) +{ +char stat_buf[16]; +int f; +int len; +f=open("/sys/devices/system/cpu/present",O_RDONLY); +if (f < 0) +return -1; +len=STAT_READ(f,stat_buf,sizeof(stat_buf)); +close(f); +if (len < 2||stat_buf[0]!='0'||stat_buf[len - 1]!='\n'){ +return 0; +} else if (len==2){ +return 1; +} else if (stat_buf[1]!='-'){ +return 0; +} +stat_buf[len - 1]='\0'; +return atoi(&stat_buf[2])+1; +} +#endif +STATIC void GC_wait_for_gc_completion(GC_bool wait_for_all) +{ +DCL_LOCK_STATE; +#if!defined(THREAD_SANITIZER)||!defined(CAN_CALL_ATFORK) +GC_ASSERT(I_HOLD_LOCK()); +#endif +ASSERT_CANCEL_DISABLED(); +if (GC_incremental&&GC_collection_in_progress()){ +word old_gc_no=GC_gc_no; +while (GC_incremental&&GC_collection_in_progress() +&&(wait_for_all||old_gc_no==GC_gc_no)){ +ENTER_GC(); +GC_in_thread_creation=TRUE; +GC_collect_a_little_inner(1); +GC_in_thread_creation=FALSE; +EXIT_GC(); +UNLOCK(); +sched_yield(); +LOCK(); +} +} +} +#ifdef CAN_HANDLE_FORK +IF_CANCEL(static int fork_cancel_state;) +#if defined(GC_ASSERTIONS)&&defined(CAN_CALL_ATFORK) +GC_ATTR_NO_SANITIZE_THREAD +#endif +static void fork_prepare_proc(void) +{ +LOCK(); +DISABLE_CANCEL(fork_cancel_state); +#if defined(PARALLEL_MARK) +if (GC_parallel) +GC_wait_for_reclaim(); +#endif +GC_wait_for_gc_completion(TRUE); +#if defined(PARALLEL_MARK) +if (GC_parallel) +GC_acquire_mark_lock(); +#endif +GC_acquire_dirty_lock(); +} +#if defined(GC_ASSERTIONS)&&defined(CAN_CALL_ATFORK) +GC_ATTR_NO_SANITIZE_THREAD +#endif +static void fork_parent_proc(void) +{ +GC_release_dirty_lock(); +#if defined(PARALLEL_MARK) +if (GC_parallel) +GC_release_mark_lock(); +#endif +RESTORE_CANCEL(fork_cancel_state); +UNLOCK(); +} +#if defined(GC_ASSERTIONS)&&defined(CAN_CALL_ATFORK) +GC_ATTR_NO_SANITIZE_THREAD +#endif +static void fork_child_proc(void) +{ +GC_release_dirty_lock(); +#if defined(PARALLEL_MARK) +if (GC_parallel) +GC_release_mark_lock(); +#endif +GC_remove_all_threads_but_me(); +#ifdef PARALLEL_MARK +GC_parallel=FALSE; +#endif +RESTORE_CANCEL(fork_cancel_state); +UNLOCK(); +#ifdef USE_PTHREAD_LOCKS +GC_ASSERT(I_DONT_HOLD_LOCK()); +(void)pthread_mutex_destroy(&GC_allocate_ml); +if (0!=pthread_mutex_init(&GC_allocate_ml,NULL)) +ABORT("pthread_mutex_init failed (in child)"); +#endif +} +GC_API void GC_CALL GC_atfork_prepare(void) +{ +if (!EXPECT(GC_is_initialized,TRUE))GC_init(); +#if defined(GC_DARWIN_THREADS)&&defined(MPROTECT_VDB) +if (GC_auto_incremental){ +GC_ASSERT(0==GC_handle_fork); +ABORT("Unable to fork while mprotect_thread is running"); +} +#endif +if (GC_handle_fork<=0) +fork_prepare_proc(); +} +GC_API void GC_CALL GC_atfork_parent(void) +{ +if (GC_handle_fork<=0) +fork_parent_proc(); +} +GC_API void GC_CALL GC_atfork_child(void) +{ +if (GC_handle_fork<=0) +fork_child_proc(); +} +#endif +#ifdef INCLUDE_LINUX_THREAD_DESCR +__thread int GC_dummy_thread_local; +#endif +#ifdef PARALLEL_MARK +static void setup_mark_lock(void); +static unsigned required_markers_cnt=0; +#endif +GC_API void GC_CALL GC_set_markers_count(unsigned markers GC_ATTR_UNUSED) +{ +#ifdef PARALLEL_MARK +required_markers_cnt=markers < MAX_MARKERS?markers:MAX_MARKERS; +#endif +} +GC_INNER void GC_thr_init(void) +{ +GC_ASSERT(I_HOLD_LOCK()); +if (GC_thr_initialized)return; +GC_thr_initialized=TRUE; +GC_ASSERT((word)&GC_threads % sizeof(word)==0); +#ifdef CAN_HANDLE_FORK +if (GC_handle_fork){ +#ifdef CAN_CALL_ATFORK +if (pthread_atfork(fork_prepare_proc,fork_parent_proc, +fork_child_proc)==0){ +GC_handle_fork=1; +} else +#endif +if (GC_handle_fork!=-1) +ABORT("pthread_atfork failed"); +} +#endif +#ifdef INCLUDE_LINUX_THREAD_DESCR +{ +ptr_t thread_local_addr=(ptr_t)(&GC_dummy_thread_local); +ptr_t main_thread_start,main_thread_end; +if (!GC_enclosing_mapping(thread_local_addr,&main_thread_start, +&main_thread_end)){ +ABORT("Failed to find mapping for main thread thread locals"); +} else { +GC_add_roots_inner(main_thread_start,main_thread_end,FALSE); +} +} +#endif +{ +pthread_t self=pthread_self(); +GC_thread t=GC_new_thread(self); +if (t==NULL) +ABORT("Failed to allocate memory for the initial thread"); +#ifdef GC_DARWIN_THREADS +t->stop_info.mach_thread=mach_thread_self(); +#else +t->stop_info.stack_ptr=GC_approx_sp(); +#endif +t->flags=DETACHED|MAIN_THREAD; +if (THREAD_EQUAL(self,main_pthread_id)){ +t->stack=(ptr_t)main_stack; +t->stack_size=main_stack_size; +t->altstack=(ptr_t)main_altstack; +t->altstack_size=main_altstack_size; +} +} +#ifndef GC_DARWIN_THREADS +GC_stop_init(); +#endif +{ +char*nprocs_string=GETENV("GC_NPROCS"); +GC_nprocs=-1; +if (nprocs_string!=NULL)GC_nprocs=atoi(nprocs_string); +} +if (GC_nprocs<=0 +#if defined(ARM32)&&defined(GC_LINUX_THREADS)&&!defined(NACL) +&&(GC_nprocs=GC_get_nprocs_present())<=1 +#endif +) +{ +GC_nprocs=GC_get_nprocs(); +} +if (GC_nprocs<=0){ +WARN("GC_get_nprocs()returned %" WARN_PRIdPTR "\n",GC_nprocs); +GC_nprocs=2; +#ifdef PARALLEL_MARK +available_markers_m1=0; +#endif +} else { +#ifdef PARALLEL_MARK +{ +char*markers_string=GETENV("GC_MARKERS"); +int markers=required_markers_cnt; +if (markers_string!=NULL){ +markers=atoi(markers_string); +if (markers<=0||markers > MAX_MARKERS){ +WARN("Too big or invalid number of mark threads:%" WARN_PRIdPTR +";using maximum threads\n",(signed_word)markers); +markers=MAX_MARKERS; +} +} else if (0==markers){ +markers=GC_nprocs; +#if defined(GC_MIN_MARKERS)&&!defined(CPPCHECK) +if (markers < GC_MIN_MARKERS) +markers=GC_MIN_MARKERS; +#endif +if (markers > MAX_MARKERS) +markers=MAX_MARKERS; +} +available_markers_m1=markers - 1; +} +#endif +} +GC_COND_LOG_PRINTF("Number of processors=%d\n",GC_nprocs); +#ifdef PARALLEL_MARK +if (available_markers_m1<=0){ +GC_parallel=FALSE; +GC_COND_LOG_PRINTF( +"Single marker thread,turning off parallel marking\n"); +} else { +setup_mark_lock(); +} +#endif +} +GC_INNER void GC_init_parallel(void) +{ +#if defined(THREAD_LOCAL_ALLOC) +DCL_LOCK_STATE; +#endif +if (parallel_initialized)return; +parallel_initialized=TRUE; +if (!GC_is_initialized)GC_init(); +#if defined(THREAD_LOCAL_ALLOC) +LOCK(); +GC_init_thread_local(&(GC_lookup_thread(pthread_self())->tlfs)); +UNLOCK(); +#endif +} +#ifndef GC_NO_PTHREAD_SIGMASK +GC_API int WRAP_FUNC(pthread_sigmask)(int how,const sigset_t*set, +sigset_t*oset) +{ +sigset_t fudged_set; +INIT_REAL_SYMS(); +if (set!=NULL&&(how==SIG_BLOCK||how==SIG_SETMASK)){ +int sig_suspend=GC_get_suspend_signal(); +fudged_set=*set; +GC_ASSERT(sig_suspend>=0); +if (sigdelset(&fudged_set,sig_suspend)!=0) +ABORT("sigdelset failed"); +set=&fudged_set; +} +return(REAL_FUNC(pthread_sigmask)(how,set,oset)); +} +#endif +GC_INNER void GC_do_blocking_inner(ptr_t data,void*context GC_ATTR_UNUSED) +{ +struct blocking_data*d=(struct blocking_data*)data; +pthread_t self=pthread_self(); +GC_thread me; +#if defined(SPARC)||defined(IA64) +ptr_t stack_ptr=GC_save_regs_in_stack(); +#endif +#if defined(GC_DARWIN_THREADS)&&!defined(DARWIN_DONT_PARSE_STACK) +GC_bool topOfStackUnset=FALSE; +#endif +DCL_LOCK_STATE; +LOCK(); +me=GC_lookup_thread(self); +GC_ASSERT(!(me->thread_blocked)); +#ifdef SPARC +me->stop_info.stack_ptr=stack_ptr; +#else +me->stop_info.stack_ptr=GC_approx_sp(); +#endif +#if defined(GC_DARWIN_THREADS)&&!defined(DARWIN_DONT_PARSE_STACK) +if (me->topOfStack==NULL){ +topOfStackUnset=TRUE; +me->topOfStack=GC_FindTopOfStack(0); +} +#endif +#ifdef IA64 +me->backing_store_ptr=stack_ptr; +#endif +me->thread_blocked=(unsigned char)TRUE; +UNLOCK(); +d->client_data=(d->fn)(d->client_data); +LOCK(); +#if defined(CPPCHECK) +GC_noop1((word)&me->thread_blocked); +#endif +me->thread_blocked=FALSE; +#if defined(GC_DARWIN_THREADS)&&!defined(DARWIN_DONT_PARSE_STACK) +if (topOfStackUnset) +me->topOfStack=NULL; +#endif +UNLOCK(); +} +GC_API void GC_CALL GC_set_stackbottom(void*gc_thread_handle, +const struct GC_stack_base*sb) +{ +GC_thread t=(GC_thread)gc_thread_handle; +GC_ASSERT(sb->mem_base!=NULL); +if (!EXPECT(GC_is_initialized,TRUE)){ +GC_ASSERT(NULL==t); +} else { +GC_ASSERT(I_HOLD_LOCK()); +if (NULL==t) +t=GC_lookup_thread(pthread_self()); +GC_ASSERT((t->flags&FINISHED)==0); +GC_ASSERT(!(t->thread_blocked) +&&NULL==t->traced_stack_sect); +if ((t->flags&MAIN_THREAD)==0){ +t->stack_end=(ptr_t)sb->mem_base; +#ifdef IA64 +t->backing_store_end=(ptr_t)sb->reg_base; +#endif +return; +} +} +GC_stackbottom=(char*)sb->mem_base; +#ifdef IA64 +GC_register_stackbottom=(ptr_t)sb->reg_base; +#endif +} +GC_API void*GC_CALL GC_get_my_stackbottom(struct GC_stack_base*sb) +{ +pthread_t self=pthread_self(); +GC_thread me; +DCL_LOCK_STATE; +LOCK(); +me=GC_lookup_thread(self); +if ((me->flags&MAIN_THREAD)==0){ +sb->mem_base=me->stack_end; +#ifdef IA64 +sb->reg_base=me->backing_store_end; +#endif +} else { +sb->mem_base=GC_stackbottom; +#ifdef IA64 +sb->reg_base=GC_register_stackbottom; +#endif +} +UNLOCK(); +return (void*)me; +} +GC_API void*GC_CALL GC_call_with_gc_active(GC_fn_type fn, +void*client_data) +{ +struct GC_traced_stack_sect_s stacksect; +pthread_t self=pthread_self(); +GC_thread me; +DCL_LOCK_STATE; +LOCK(); +me=GC_lookup_thread(self); +if ((me->flags&MAIN_THREAD)==0){ +GC_ASSERT(me->stack_end!=NULL); +if ((word)me->stack_end HOTTER_THAN (word)(&stacksect)) +me->stack_end=(ptr_t)(&stacksect); +} else { +if ((word)GC_stackbottom HOTTER_THAN (word)(&stacksect)) +GC_stackbottom=(ptr_t)COVERT_DATAFLOW(&stacksect); +} +if (!me->thread_blocked){ +UNLOCK(); +client_data=fn(client_data); +GC_noop1(COVERT_DATAFLOW(&stacksect)); +return client_data; +} +stacksect.saved_stack_ptr=me->stop_info.stack_ptr; +#ifdef IA64 +stacksect.backing_store_end=GC_save_regs_in_stack(); +stacksect.saved_backing_store_ptr=me->backing_store_ptr; +#endif +stacksect.prev=me->traced_stack_sect; +me->thread_blocked=FALSE; +me->traced_stack_sect=&stacksect; +UNLOCK(); +client_data=fn(client_data); +GC_ASSERT(me->thread_blocked==FALSE); +GC_ASSERT(me->traced_stack_sect==&stacksect); +#if defined(CPPCHECK) +GC_noop1((word)me->traced_stack_sect); +#endif +LOCK(); +me->traced_stack_sect=stacksect.prev; +#ifdef IA64 +me->backing_store_ptr=stacksect.saved_backing_store_ptr; +#endif +me->thread_blocked=(unsigned char)TRUE; +me->stop_info.stack_ptr=stacksect.saved_stack_ptr; +UNLOCK(); +return client_data; +} +STATIC void GC_unregister_my_thread_inner(GC_thread me) +{ +#ifdef DEBUG_THREADS +GC_log_printf( +"Unregistering thread %p,gc_thread=%p,n_threads=%d\n", +(void*)me->id,(void*)me,GC_count_threads()); +#endif +GC_ASSERT(!(me->flags&FINISHED)); +#if defined(THREAD_LOCAL_ALLOC) +GC_ASSERT(GC_getspecific(GC_thread_key)==&me->tlfs); +GC_destroy_thread_local(&(me->tlfs)); +#endif +#if defined(GC_HAVE_PTHREAD_EXIT)||!defined(GC_NO_PTHREAD_CANCEL) +if ((me->flags&DISABLED_GC)!=0){ +GC_dont_gc--; +} +#endif +if (me->flags&DETACHED){ +GC_delete_thread(pthread_self()); +} else { +me->flags|=FINISHED; +} +#if defined(THREAD_LOCAL_ALLOC) +GC_remove_specific(GC_thread_key); +#endif +} +GC_API int GC_CALL GC_unregister_my_thread(void) +{ +pthread_t self=pthread_self(); +GC_thread me; +IF_CANCEL(int cancel_state;) +DCL_LOCK_STATE; +LOCK(); +DISABLE_CANCEL(cancel_state); +GC_wait_for_gc_completion(FALSE); +me=GC_lookup_thread(self); +#ifdef DEBUG_THREADS +GC_log_printf( +"Called GC_unregister_my_thread on %p,gc_thread=%p\n", +(void*)self,(void*)me); +#endif +GC_ASSERT(THREAD_EQUAL(me->id,self)); +GC_unregister_my_thread_inner(me); +RESTORE_CANCEL(cancel_state); +UNLOCK(); +return GC_SUCCESS; +} +GC_INNER_PTHRSTART void GC_thread_exit_proc(void*arg) +{ +IF_CANCEL(int cancel_state;) +DCL_LOCK_STATE; +#ifdef DEBUG_THREADS +GC_log_printf("Called GC_thread_exit_proc on %p,gc_thread=%p\n", +(void*)((GC_thread)arg)->id,arg); +#endif +LOCK(); +DISABLE_CANCEL(cancel_state); +GC_wait_for_gc_completion(FALSE); +GC_unregister_my_thread_inner((GC_thread)arg); +RESTORE_CANCEL(cancel_state); +UNLOCK(); +} +#if!defined(SN_TARGET_ORBIS)&&!defined(SN_TARGET_PSP2) +GC_API int WRAP_FUNC(pthread_join)(pthread_t thread,void**retval) +{ +int result; +GC_thread t; +DCL_LOCK_STATE; +ASSERT_SYMS_INITIALIZED(); +LOCK(); +t=GC_lookup_thread(thread); +UNLOCK(); +result=REAL_FUNC(pthread_join)(thread,retval); +#if defined(GC_FREEBSD_THREADS) +if (result==EINTR)result=0; +#endif +if (result==0){ +LOCK(); +if ((t->flags&FINISHED)!=0) +GC_delete_gc_thread(t); +UNLOCK(); +} +return result; +} +GC_API int WRAP_FUNC(pthread_detach)(pthread_t thread) +{ +int result; +GC_thread t; +DCL_LOCK_STATE; +ASSERT_SYMS_INITIALIZED(); +LOCK(); +t=GC_lookup_thread(thread); +UNLOCK(); +result=REAL_FUNC(pthread_detach)(thread); +if (result==0){ +LOCK(); +t->flags|=DETACHED; +if ((t->flags&FINISHED)!=0){ +GC_delete_gc_thread(t); +} +UNLOCK(); +} +return result; +} +#endif +#ifndef GC_NO_PTHREAD_CANCEL +GC_API int WRAP_FUNC(pthread_cancel)(pthread_t thread) +{ +#ifdef CANCEL_SAFE +GC_thread t; +DCL_LOCK_STATE; +#endif +INIT_REAL_SYMS(); +#ifdef CANCEL_SAFE +LOCK(); +t=GC_lookup_thread(thread); +if (t!=NULL&&(t->flags&DISABLED_GC)==0){ +t->flags|=DISABLED_GC; +GC_dont_gc++; +} +UNLOCK(); +#endif +return REAL_FUNC(pthread_cancel)(thread); +} +#endif +#ifdef GC_HAVE_PTHREAD_EXIT +GC_API GC_PTHREAD_EXIT_ATTRIBUTE void WRAP_FUNC(pthread_exit)(void*retval) +{ +pthread_t self=pthread_self(); +GC_thread me; +DCL_LOCK_STATE; +INIT_REAL_SYMS(); +LOCK(); +me=GC_lookup_thread(self); +if (me!=0&&(me->flags&DISABLED_GC)==0){ +me->flags|=DISABLED_GC; +GC_dont_gc++; +} +UNLOCK(); +REAL_FUNC(pthread_exit)(retval); +} +#endif +GC_INNER GC_bool GC_in_thread_creation=FALSE; +GC_INLINE void GC_record_stack_base(GC_thread me, +const struct GC_stack_base*sb) +{ +#ifndef GC_DARWIN_THREADS +me->stop_info.stack_ptr=(ptr_t)sb->mem_base; +#endif +me->stack_end=(ptr_t)sb->mem_base; +if (me->stack_end==NULL) +ABORT("Bad stack base in GC_register_my_thread"); +#ifdef IA64 +me->backing_store_end=(ptr_t)sb->reg_base; +#endif +} +STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base*sb, +pthread_t my_pthread) +{ +GC_thread me; +GC_in_thread_creation=TRUE; +me=GC_new_thread(my_pthread); +GC_in_thread_creation=FALSE; +if (me==0) +ABORT("Failed to allocate memory for thread registering"); +#ifdef GC_DARWIN_THREADS +me->stop_info.mach_thread=mach_thread_self(); +#endif +GC_record_stack_base(me,sb); +#ifdef GC_EXPLICIT_SIGNALS_UNBLOCK +GC_unblock_gc_signals(); +#endif +return me; +} +GC_API void GC_CALL GC_allow_register_threads(void) +{ +GC_ASSERT(GC_lookup_thread(pthread_self())!=0); +set_need_to_lock(); +} +GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base*sb) +{ +pthread_t self=pthread_self(); +GC_thread me; +DCL_LOCK_STATE; +if (GC_need_to_lock==FALSE) +ABORT("Threads explicit registering is not previously enabled"); +LOCK(); +me=GC_lookup_thread(self); +if (0==me){ +me=GC_register_my_thread_inner(sb,self); +#if defined(CPPCHECK) +GC_noop1(me->flags); +#endif +me->flags|=DETACHED; +#if defined(THREAD_LOCAL_ALLOC) +GC_init_thread_local(&(me->tlfs)); +#endif +UNLOCK(); +return GC_SUCCESS; +} else if ((me->flags&FINISHED)!=0){ +#ifdef GC_DARWIN_THREADS +me->stop_info.mach_thread=mach_thread_self(); +#endif +GC_record_stack_base(me,sb); +me->flags&=~FINISHED; +#ifdef GC_EXPLICIT_SIGNALS_UNBLOCK +GC_unblock_gc_signals(); +#endif +#if defined(THREAD_LOCAL_ALLOC) +GC_init_thread_local(&(me->tlfs)); +#endif +UNLOCK(); +return GC_SUCCESS; +} else { +UNLOCK(); +return GC_DUPLICATE; +} +} +struct start_info { +void*(*start_routine)(void*); +void*arg; +word flags; +sem_t registered; +}; +GC_INNER_PTHRSTART GC_thread GC_start_rtn_prepare_thread( +void*(**pstart)(void*), +void**pstart_arg, +struct GC_stack_base*sb,void*arg) +{ +struct start_info*si=(struct start_info*)arg; +pthread_t self=pthread_self(); +GC_thread me; +DCL_LOCK_STATE; +#ifdef DEBUG_THREADS +GC_log_printf("Starting thread %p,pid=%ld,sp=%p\n", +(void*)self,(long)getpid(),(void*)&arg); +#endif +LOCK(); +me=GC_register_my_thread_inner(sb,self); +me->flags=si->flags; +#if defined(THREAD_LOCAL_ALLOC) +GC_init_thread_local(&(me->tlfs)); +#endif +UNLOCK(); +*pstart=si->start_routine; +#ifdef DEBUG_THREADS +GC_log_printf("start_routine=%p\n",(void*)(signed_word)(*pstart)); +#endif +*pstart_arg=si->arg; +sem_post(&(si->registered)); +return me; +} +#if!defined(SN_TARGET_ORBIS)&&!defined(SN_TARGET_PSP2) +STATIC void*GC_start_routine(void*arg) +{ +#ifdef INCLUDE_LINUX_THREAD_DESCR +struct GC_stack_base sb; +#ifdef REDIRECT_MALLOC +GC_disable(); +#endif +if (GC_get_stack_base(&sb)!=GC_SUCCESS) +ABORT("Failed to get thread stack base"); +#ifdef REDIRECT_MALLOC +GC_enable(); +#endif +return GC_inner_start_routine(&sb,arg); +#else +return GC_call_with_stack_base(GC_inner_start_routine,arg); +#endif +} +GC_API int WRAP_FUNC(pthread_create)(pthread_t*new_thread, +GC_PTHREAD_CREATE_CONST pthread_attr_t*attr, +void*(*start_routine)(void*),void*arg) +{ +int result; +int detachstate; +word my_flags=0; +struct start_info si; +DCL_LOCK_STATE; +INIT_REAL_SYMS(); +if (!EXPECT(parallel_initialized,TRUE)) +GC_init_parallel(); +if (sem_init(&si.registered,GC_SEM_INIT_PSHARED,0)!=0) +ABORT("sem_init failed"); +si.start_routine=start_routine; +si.arg=arg; +LOCK(); +if (!EXPECT(GC_thr_initialized,TRUE)) +GC_thr_init(); +#ifdef GC_ASSERTIONS +{ +size_t stack_size=0; +if (NULL!=attr){ +if (pthread_attr_getstacksize(attr,&stack_size)!=0) +ABORT("pthread_attr_getstacksize failed"); +} +if (0==stack_size){ +pthread_attr_t my_attr; +if (pthread_attr_init(&my_attr)!=0) +ABORT("pthread_attr_init failed"); +if (pthread_attr_getstacksize(&my_attr,&stack_size)!=0) +ABORT("pthread_attr_getstacksize failed"); +(void)pthread_attr_destroy(&my_attr); +} +if (0==stack_size){ +#ifndef SOLARIS +WARN("Failed to get stack size for assertion checking\n",0); +#endif +stack_size=1000000; +} +GC_ASSERT(stack_size>=65536); +} +#endif +if (NULL==attr){ +detachstate=PTHREAD_CREATE_JOINABLE; +} else { +pthread_attr_getdetachstate(attr,&detachstate); +} +if (PTHREAD_CREATE_DETACHED==detachstate)my_flags|=DETACHED; +si.flags=my_flags; +UNLOCK(); +#ifdef DEBUG_THREADS +GC_log_printf("About to start new thread from thread %p\n", +(void*)pthread_self()); +#endif +set_need_to_lock(); +result=REAL_FUNC(pthread_create)(new_thread,attr,GC_start_routine, +&si); +if (0==result){ +IF_CANCEL(int cancel_state;) +#ifdef DEBUG_THREADS +GC_log_printf("Started thread %p\n",(void*)(*new_thread)); +#endif +DISABLE_CANCEL(cancel_state); +while (0!=sem_wait(&si.registered)){ +#if defined(GC_HAIKU_THREADS) +if (EACCES==errno)continue; +#endif +if (EINTR!=errno)ABORT("sem_wait failed"); +} +RESTORE_CANCEL(cancel_state); +} +sem_destroy(&si.registered); +return(result); +} +#endif +#if defined(USE_SPIN_LOCK)||!defined(NO_PTHREAD_TRYLOCK) +#define GC_PAUSE_SPIN_CYCLES 10 +STATIC void GC_pause(void) +{ +int i; +for (i=0;i < GC_PAUSE_SPIN_CYCLES;++i){ +#if defined(AO_HAVE_compiler_barrier)&&!defined(BASE_ATOMIC_OPS_EMULATED) +AO_compiler_barrier(); +#else +GC_noop1(i); +#endif +} +} +#endif +#ifndef SPIN_MAX +#define SPIN_MAX 128 +#endif +GC_INNER volatile GC_bool GC_collecting=FALSE; +#if (!defined(USE_SPIN_LOCK)&&!defined(NO_PTHREAD_TRYLOCK))||defined(PARALLEL_MARK) +#ifdef LOCK_STATS +volatile AO_t GC_spin_count=0; +volatile AO_t GC_block_count=0; +volatile AO_t GC_unlocked_count=0; +#endif +STATIC void GC_generic_lock(pthread_mutex_t*lock) +{ +#ifndef NO_PTHREAD_TRYLOCK +unsigned pause_length=1; +unsigned i; +if (0==pthread_mutex_trylock(lock)){ +#ifdef LOCK_STATS +(void)AO_fetch_and_add1(&GC_unlocked_count); +#endif +return; +} +for (;pause_length<=SPIN_MAX;pause_length<<=1){ +for (i=0;i < pause_length;++i){ +GC_pause(); +} +switch(pthread_mutex_trylock(lock)){ +case 0: +#ifdef LOCK_STATS +(void)AO_fetch_and_add1(&GC_spin_count); +#endif +return; +case EBUSY: +break; +default: +ABORT("Unexpected error from pthread_mutex_trylock"); +} +} +#endif +#ifdef LOCK_STATS +(void)AO_fetch_and_add1(&GC_block_count); +#endif +pthread_mutex_lock(lock); +} +#endif +#if defined(AO_HAVE_char_load)&&!defined(BASE_ATOMIC_OPS_EMULATED) +#define is_collecting()((GC_bool)AO_char_load((unsigned char*)&GC_collecting)) +#else +#define is_collecting()GC_collecting +#endif +#if defined(USE_SPIN_LOCK) +GC_INNER volatile AO_TS_t GC_allocate_lock=AO_TS_INITIALIZER; +#define low_spin_max 30 +#define high_spin_max SPIN_MAX +static volatile AO_t spin_max=low_spin_max; +static volatile AO_t last_spins=0; +GC_INNER void GC_lock(void) +{ +unsigned my_spin_max; +unsigned my_last_spins; +unsigned i; +if (AO_test_and_set_acquire(&GC_allocate_lock)==AO_TS_CLEAR){ +return; +} +my_spin_max=(unsigned)AO_load(&spin_max); +my_last_spins=(unsigned)AO_load(&last_spins); +for (i=0;i < my_spin_max;i++){ +if (is_collecting()||GC_nprocs==1) +goto yield; +if (i < my_last_spins/2){ +GC_pause(); +continue; +} +if (AO_test_and_set_acquire(&GC_allocate_lock)==AO_TS_CLEAR){ +AO_store(&last_spins,(AO_t)i); +AO_store(&spin_max,(AO_t)high_spin_max); +return; +} +} +AO_store(&spin_max,(AO_t)low_spin_max); +yield: +for (i=0;;++i){ +if (AO_test_and_set_acquire(&GC_allocate_lock)==AO_TS_CLEAR){ +return; +} +#define SLEEP_THRESHOLD 12 +if (i < SLEEP_THRESHOLD){ +sched_yield(); +} else { +struct timespec ts; +if (i > 24)i=24; +ts.tv_sec=0; +ts.tv_nsec=1< 2||(glibc_major==2&&glibc_minor>=19)){ +if (0!=pthread_mutexattr_init(&mattr)){ +ABORT("pthread_mutexattr_init failed"); +} +if (0!=pthread_mutexattr_settype(&mattr,PTHREAD_MUTEX_NORMAL)){ +ABORT("pthread_mutexattr_settype failed"); +} +if (0!=pthread_mutex_init(&mark_mutex,&mattr)){ +ABORT("pthread_mutex_init failed"); +} +(void)pthread_mutexattr_destroy(&mattr); +} +#endif +} +GC_INNER void GC_acquire_mark_lock(void) +{ +#if defined(NUMERIC_THREAD_ID_UNIQUE)&&!defined(THREAD_SANITIZER) +GC_ASSERT(GC_mark_lock_holder!=NUMERIC_THREAD_ID(pthread_self())); +#endif +GC_generic_lock(&mark_mutex); +SET_MARK_LOCK_HOLDER; +} +GC_INNER void GC_release_mark_lock(void) +{ +UNSET_MARK_LOCK_HOLDER; +if (pthread_mutex_unlock(&mark_mutex)!=0){ +ABORT("pthread_mutex_unlock failed"); +} +} +STATIC void GC_wait_builder(void) +{ +ASSERT_CANCEL_DISABLED(); +UNSET_MARK_LOCK_HOLDER; +if (pthread_cond_wait(&builder_cv,&mark_mutex)!=0){ +ABORT("pthread_cond_wait failed"); +} +GC_ASSERT(GC_mark_lock_holder==NO_THREAD); +SET_MARK_LOCK_HOLDER; +} +GC_INNER void GC_wait_for_reclaim(void) +{ +GC_acquire_mark_lock(); +while (GC_fl_builder_count > 0){ +GC_wait_builder(); +} +GC_release_mark_lock(); +} +GC_INNER void GC_notify_all_builder(void) +{ +GC_ASSERT(GC_mark_lock_holder==NUMERIC_THREAD_ID(pthread_self())); +if (pthread_cond_broadcast(&builder_cv)!=0){ +ABORT("pthread_cond_broadcast failed"); +} +} +GC_INNER void GC_wait_marker(void) +{ +ASSERT_CANCEL_DISABLED(); +GC_ASSERT(GC_parallel); +UNSET_MARK_LOCK_HOLDER; +if (pthread_cond_wait(&mark_cv,&mark_mutex)!=0){ +ABORT("pthread_cond_wait failed"); +} +GC_ASSERT(GC_mark_lock_holder==NO_THREAD); +SET_MARK_LOCK_HOLDER; +} +GC_INNER void GC_notify_all_marker(void) +{ +GC_ASSERT(GC_parallel); +if (pthread_cond_broadcast(&mark_cv)!=0){ +ABORT("pthread_cond_broadcast failed"); +} +} +#endif +#ifdef PTHREAD_REGISTER_CANCEL_WEAK_STUBS +EXTERN_C_BEGIN +extern void __pthread_register_cancel(void)__attribute__((__weak__)); +extern void __pthread_unregister_cancel(void)__attribute__((__weak__)); +EXTERN_C_END +void __pthread_register_cancel(void){} +void __pthread_unregister_cancel(void){} +#endif +#endif +#if defined(USE_CUSTOM_SPECIFIC) +static const tse invalid_tse={INVALID_QTID,0,0,INVALID_THREADID}; +GC_INNER int GC_key_create_inner(tsd**key_ptr) +{ +int i; +int ret; +tsd*result; +GC_ASSERT(I_HOLD_LOCK()); +GC_ASSERT((word)(&invalid_tse.next)% sizeof(tse*)==0); +result=(tsd*)MALLOC_CLEAR(sizeof(tsd)); +if (NULL==result)return ENOMEM; +ret=pthread_mutex_init(&result->lock,NULL); +if (ret!=0)return ret; +for (i=0;i < TS_CACHE_SIZE;++i){ +result->cache[i]=( tse*)&invalid_tse; +} +#ifdef GC_ASSERTIONS +for (i=0;i < TS_HASH_SIZE;++i){ +GC_ASSERT(result->hash[i].p==0); +} +#endif +*key_ptr=result; +return 0; +} +GC_INNER int GC_setspecific(tsd*key,void*value) +{ +pthread_t self=pthread_self(); +unsigned hash_val=HASH(self); +volatile tse*entry; +GC_ASSERT(I_HOLD_LOCK()); +GC_ASSERT(self!=INVALID_THREADID); +GC_dont_gc++; +entry=(volatile tse*)MALLOC_CLEAR(sizeof(tse)); +GC_dont_gc--; +if (0==entry)return ENOMEM; +pthread_mutex_lock(&(key->lock)); +entry->next=key->hash[hash_val].p; +entry->thread=self; +entry->value=TS_HIDE_VALUE(value); +GC_ASSERT(entry->qtid==INVALID_QTID); +AO_store_release(&key->hash[hash_val].ao,(AO_t)entry); +GC_dirty(( void*)entry); +GC_dirty(key->hash+hash_val); +if (pthread_mutex_unlock(&key->lock)!=0) +ABORT("pthread_mutex_unlock failed (setspecific)"); +return 0; +} +GC_INNER void GC_remove_specific_after_fork(tsd*key,pthread_t t) +{ +unsigned hash_val=HASH(t); +tse*entry; +tse*prev=NULL; +#ifdef CAN_HANDLE_FORK +GC_ASSERT(I_HOLD_LOCK()); +#endif +pthread_mutex_lock(&(key->lock)); +entry=key->hash[hash_val].p; +while (entry!=NULL&&!THREAD_EQUAL(entry->thread,t)){ +prev=entry; +entry=entry->next; +} +if (entry!=NULL){ +entry->qtid=INVALID_QTID; +if (NULL==prev){ +key->hash[hash_val].p=entry->next; +GC_dirty(key->hash+hash_val); +} else { +prev->next=entry->next; +GC_dirty(prev); +} +} +#ifdef LINT2 +GC_noop1((word)entry); +#endif +if (pthread_mutex_unlock(&key->lock)!=0) +ABORT("pthread_mutex_unlock failed (remove_specific after fork)"); +} +GC_INNER void*GC_slow_getspecific(tsd*key,word qtid, +tse*volatile*cache_ptr) +{ +pthread_t self=pthread_self(); +tse*entry=key->hash[HASH(self)].p; +GC_ASSERT(qtid!=INVALID_QTID); +while (entry!=NULL&&!THREAD_EQUAL(entry->thread,self)){ +entry=entry->next; +} +if (entry==NULL)return NULL; +entry->qtid=(AO_t)qtid; +*cache_ptr=entry; +return TS_REVEAL_PTR(entry->value); +} +#ifdef GC_ASSERTIONS +void GC_check_tsd_marks(tsd*key) +{ +int i; +tse*p; +if (!GC_is_marked(GC_base(key))){ +ABORT("Unmarked thread-specific-data table"); +} +for (i=0;i < TS_HASH_SIZE;++i){ +for (p=key->hash[i].p;p!=0;p=p->next){ +if (!GC_is_marked(GC_base(p))){ +ABORT_ARG1("Unmarked thread-specific-data entry", +" at %p",(void*)p); +} +} +} +for (i=0;i < TS_CACHE_SIZE;++i){ +p=key->cache[i]; +if (p!=&invalid_tse&&!GC_is_marked(GC_base(p))){ +ABORT_ARG1("Unmarked cached thread-specific-data entry", +" at %p",(void*)p); +} +} +} +#endif +#endif +#if defined(GC_WIN32_THREADS) +#ifdef THREAD_LOCAL_ALLOC +#endif +#if!defined(USE_PTHREAD_LOCKS) +GC_INNER CRITICAL_SECTION GC_allocate_ml; +#ifdef GC_ASSERTIONS +GC_INNER DWORD GC_lock_holder=NO_THREAD; +#endif +#else +GC_INNER pthread_mutex_t GC_allocate_ml=PTHREAD_MUTEX_INITIALIZER; +#ifdef GC_ASSERTIONS +GC_INNER unsigned long GC_lock_holder=NO_THREAD; +#endif +#endif +#undef CreateThread +#undef ExitThread +#undef _beginthreadex +#undef _endthreadex +#ifdef GC_PTHREADS +#include +#undef pthread_create +#undef pthread_join +#undef pthread_detach +#ifndef GC_NO_PTHREAD_SIGMASK +#undef pthread_sigmask +#endif +STATIC void*GC_pthread_start(void*arg); +STATIC void GC_thread_exit_proc(void*arg); +#include +#ifdef CAN_CALL_ATFORK +#include +#endif +#elif!defined(MSWINCE) +#include +#include +#endif +static ptr_t copy_ptr_regs(word*regs,const CONTEXT*pcontext); +#if defined(I386) +#ifdef WOW64_THREAD_CONTEXT_WORKAROUND +#define PUSHED_REGS_COUNT 9 +#else +#define PUSHED_REGS_COUNT 7 +#endif +#elif defined(X86_64)||defined(SHx) +#define PUSHED_REGS_COUNT 15 +#elif defined(ARM32) +#define PUSHED_REGS_COUNT 13 +#elif defined(AARCH64) +#define PUSHED_REGS_COUNT 30 +#elif defined(MIPS)||defined(ALPHA) +#define PUSHED_REGS_COUNT 28 +#elif defined(PPC) +#define PUSHED_REGS_COUNT 29 +#endif +#if (defined(GC_DLL)||defined(GC_INSIDE_DLL))&&!defined(NO_CRT)&&!defined(GC_NO_THREADS_DISCOVERY)&&!defined(MSWINCE)&&!defined(THREAD_LOCAL_ALLOC)&&!defined(GC_PTHREADS) +#ifdef GC_DISCOVER_TASK_THREADS +#define GC_win32_dll_threads TRUE +#else +STATIC GC_bool GC_win32_dll_threads=FALSE; +#endif +#else +#ifndef GC_NO_THREADS_DISCOVERY +#define GC_NO_THREADS_DISCOVERY +#endif +#define GC_win32_dll_threads FALSE +#undef MAX_THREADS +#define MAX_THREADS 1 +#endif +typedef LONG*IE_t; +STATIC GC_bool GC_thr_initialized=FALSE; +#ifndef GC_ALWAYS_MULTITHREADED +GC_INNER GC_bool GC_need_to_lock=FALSE; +#endif +static GC_bool parallel_initialized=FALSE; +GC_API void GC_CALL GC_use_threads_discovery(void) +{ +#ifdef GC_NO_THREADS_DISCOVERY +ABORT("GC DllMain-based thread registration unsupported"); +#else +GC_ASSERT(!parallel_initialized); +#ifndef GC_DISCOVER_TASK_THREADS +GC_win32_dll_threads=TRUE; +#endif +GC_init_parallel(); +#endif +} +#define ADDR_LIMIT ((ptr_t)GC_WORD_MAX) +struct GC_Thread_Rep { +union { +#ifndef GC_NO_THREADS_DISCOVERY +volatile AO_t in_use; +LONG long_in_use; +#endif +struct GC_Thread_Rep*next; +} tm; +DWORD id; +#ifdef MSWINCE +#define THREAD_HANDLE(t)(HANDLE)(word)(t)->id +#else +HANDLE handle; +#define THREAD_HANDLE(t)(t)->handle +#endif +ptr_t stack_base; +ptr_t last_stack_min; +#ifdef IA64 +ptr_t backing_store_end; +ptr_t backing_store_ptr; +#elif defined(I386) +ptr_t initial_stack_base; +#endif +ptr_t thread_blocked_sp; +struct GC_traced_stack_sect_s*traced_stack_sect; +unsigned short finalizer_skipped; +unsigned char finalizer_nested; +unsigned char suspended; +#ifdef GC_PTHREADS +unsigned char flags; +#define FINISHED 1 +#define DETACHED 2 +#define KNOWN_FINISHED(t)(((t)->flags)&FINISHED) +pthread_t pthread_id; +void*status; +#else +#define KNOWN_FINISHED(t)0 +#endif +#ifdef THREAD_LOCAL_ALLOC +struct thread_local_freelists tlfs; +#endif +#ifdef RETRY_GET_THREAD_CONTEXT +ptr_t context_sp; +word context_regs[PUSHED_REGS_COUNT]; +#endif +}; +typedef struct GC_Thread_Rep*GC_thread; +typedef volatile struct GC_Thread_Rep*GC_vthread; +#ifndef GC_NO_THREADS_DISCOVERY +STATIC DWORD GC_main_thread=0; +STATIC volatile AO_t GC_attached_thread=FALSE; +STATIC volatile GC_bool GC_please_stop=FALSE; +#elif defined(GC_ASSERTIONS) +STATIC GC_bool GC_please_stop=FALSE; +#endif +#if defined(WRAP_MARK_SOME)&&!defined(GC_PTHREADS) +GC_INNER GC_bool GC_started_thread_while_stopped(void) +{ +#ifndef GC_NO_THREADS_DISCOVERY +if (GC_win32_dll_threads){ +#ifdef AO_HAVE_compare_and_swap_release +if (AO_compare_and_swap_release(&GC_attached_thread,TRUE, +FALSE)) +return TRUE; +#else +AO_nop_full(); +if (AO_load(&GC_attached_thread)){ +AO_store(&GC_attached_thread,FALSE); +return TRUE; +} +#endif +} +#endif +return FALSE; +} +#endif +#ifndef MAX_THREADS +#define MAX_THREADS 512 +#endif +volatile struct GC_Thread_Rep dll_thread_table[MAX_THREADS]; +STATIC volatile LONG GC_max_thread_index=0; +#ifndef THREAD_TABLE_SZ +#define THREAD_TABLE_SZ 256 +#endif +#define THREAD_TABLE_INDEX(id)(int)((((id)>>8)^(id))% THREAD_TABLE_SZ) +STATIC GC_thread GC_threads[THREAD_TABLE_SZ]; +static struct GC_Thread_Rep first_thread; +static GC_bool first_thread_used=FALSE; +STATIC GC_thread GC_new_thread(DWORD id) +{ +int hv=THREAD_TABLE_INDEX(id); +GC_thread result; +#ifdef DEBUG_THREADS +GC_log_printf("Creating thread 0x%lx\n",(long)id); +if (GC_threads[hv]!=NULL) +GC_log_printf("Hash collision at GC_threads[%d]\n",hv); +#endif +GC_ASSERT(I_HOLD_LOCK()); +if (!EXPECT(first_thread_used,TRUE)){ +result=&first_thread; +first_thread_used=TRUE; +GC_ASSERT(NULL==GC_threads[hv]); +} else { +GC_ASSERT(!GC_win32_dll_threads); +result=(struct GC_Thread_Rep*) +GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep),NORMAL); +if (result==0)return(0); +} +result->tm.next=GC_threads[hv]; +GC_threads[hv]=result; +#ifdef GC_PTHREADS +GC_ASSERT(result->flags==0); +#endif +GC_ASSERT(result->thread_blocked_sp==NULL); +if (EXPECT(result!=&first_thread,TRUE)) +GC_dirty(result); +return(result); +} +GC_INNER GC_bool GC_in_thread_creation=FALSE; +GC_INLINE void GC_record_stack_base(GC_vthread me, +const struct GC_stack_base*sb) +{ +me->stack_base=(ptr_t)sb->mem_base; +#ifdef IA64 +me->backing_store_end=(ptr_t)sb->reg_base; +#elif defined(I386) +me->initial_stack_base=(ptr_t)sb->mem_base; +#endif +if (me->stack_base==NULL) +ABORT("Bad stack base in GC_register_my_thread"); +} +STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base*sb, +DWORD thread_id) +{ +GC_vthread me; +#if defined(MPROTECT_VDB)&&!defined(CYGWIN32) +if (GC_auto_incremental +#ifdef GWW_VDB +&&!GC_gww_dirty_init() +#endif +) +GC_set_write_fault_handler(); +#endif +#ifndef GC_NO_THREADS_DISCOVERY +if (GC_win32_dll_threads){ +int i; +for (i=0; +InterlockedExchange(&dll_thread_table[i].tm.long_in_use,1)!=0; +i++){ +if (i==MAX_THREADS - 1) +ABORT("Too many threads"); +} +while (i > GC_max_thread_index){ +InterlockedIncrement((IE_t)&GC_max_thread_index); +} +if (GC_max_thread_index>=MAX_THREADS){ +GC_max_thread_index=MAX_THREADS - 1; +} +me=dll_thread_table+i; +} else +#endif +{ +GC_ASSERT(I_HOLD_LOCK()); +GC_in_thread_creation=TRUE; +me=GC_new_thread(thread_id); +GC_in_thread_creation=FALSE; +if (me==0) +ABORT("Failed to allocate memory for thread registering"); +} +#ifdef GC_PTHREADS +me->pthread_id=pthread_self(); +#endif +#ifndef MSWINCE +if (!DuplicateHandle(GetCurrentProcess(),GetCurrentThread(), +GetCurrentProcess(), +(HANDLE*)&(me->handle), +0,FALSE, +DUPLICATE_SAME_ACCESS)){ +ABORT_ARG1("DuplicateHandle failed", +":errcode=0x%X",(unsigned)GetLastError()); +} +#endif +me->last_stack_min=ADDR_LIMIT; +GC_record_stack_base(me,sb); +me->id=thread_id; +#if defined(THREAD_LOCAL_ALLOC) +GC_init_thread_local((GC_tlfs)(&(me->tlfs))); +#endif +#ifndef GC_NO_THREADS_DISCOVERY +if (GC_win32_dll_threads){ +if (GC_please_stop){ +AO_store(&GC_attached_thread,TRUE); +AO_nop_full(); +} +} else +#endif +{ +GC_ASSERT(!GC_please_stop); +} +return (GC_thread)(me); +} +GC_INLINE LONG GC_get_max_thread_index(void) +{ +LONG my_max=GC_max_thread_index; +if (my_max>=MAX_THREADS)return MAX_THREADS - 1; +return my_max; +} +STATIC GC_thread GC_lookup_thread_inner(DWORD thread_id) +{ +#ifndef GC_NO_THREADS_DISCOVERY +if (GC_win32_dll_threads){ +int i; +LONG my_max=GC_get_max_thread_index(); +for (i=0;i<=my_max&& +(!AO_load_acquire(&dll_thread_table[i].tm.in_use) +||dll_thread_table[i].id!=thread_id); +i++){ +} +return i<=my_max?(GC_thread)(dll_thread_table+i):NULL; +} else +#endif +{ +GC_thread p=GC_threads[THREAD_TABLE_INDEX(thread_id)]; +GC_ASSERT(I_HOLD_LOCK()); +while (p!=0&&p->id!=thread_id)p=p->tm.next; +return(p); +} +} +#ifdef LINT2 +#define CHECK_LOOKUP_MY_THREAD(me)if (!(me))ABORT("GC_lookup_thread_inner(GetCurrentThreadId)failed") +#else +#define CHECK_LOOKUP_MY_THREAD(me) +#endif +GC_INNER void GC_reset_finalizer_nested(void) +{ +GC_thread me=GC_lookup_thread_inner(GetCurrentThreadId()); +CHECK_LOOKUP_MY_THREAD(me); +me->finalizer_nested=0; +} +GC_INNER unsigned char*GC_check_finalizer_nested(void) +{ +GC_thread me=GC_lookup_thread_inner(GetCurrentThreadId()); +unsigned nesting_level; +CHECK_LOOKUP_MY_THREAD(me); +nesting_level=me->finalizer_nested; +if (nesting_level){ +if (++me->finalizer_skipped < (1U<finalizer_skipped=0; +} +me->finalizer_nested=(unsigned char)(nesting_level+1); +return&me->finalizer_nested; +} +#if defined(GC_ASSERTIONS)&&defined(THREAD_LOCAL_ALLOC) +GC_bool GC_is_thread_tsd_valid(void*tsd) +{ +GC_thread me; +DCL_LOCK_STATE; +LOCK(); +me=GC_lookup_thread_inner(GetCurrentThreadId()); +UNLOCK(); +return (word)tsd>=(word)(&me->tlfs) +&&(word)tsd < (word)(&me->tlfs)+sizeof(me->tlfs); +} +#endif +GC_API int GC_CALL GC_thread_is_registered(void) +{ +DWORD thread_id=GetCurrentThreadId(); +GC_thread me; +DCL_LOCK_STATE; +LOCK(); +me=GC_lookup_thread_inner(thread_id); +UNLOCK(); +return me!=NULL; +} +GC_API void GC_CALL GC_register_altstack(void*stack GC_ATTR_UNUSED, +GC_word stack_size GC_ATTR_UNUSED, +void*altstack GC_ATTR_UNUSED, +GC_word altstack_size GC_ATTR_UNUSED) +{ +} +#if defined(MPROTECT_VDB) +#define UNPROTECT_THREAD(t)if (!GC_win32_dll_threads&&GC_auto_incremental&&t!=&first_thread){ GC_ASSERT(SMALL_OBJ(GC_size(t)));GC_remove_protection(HBLKPTR(t),1,FALSE);} else (void)0 +#else +#define UNPROTECT_THREAD(t)(void)0 +#endif +#ifdef CYGWIN32 +#define GC_PTHREAD_PTRVAL(pthread_id)pthread_id +#elif defined(GC_WIN32_PTHREADS)||defined(GC_PTHREADS_PARAMARK) +#include +#if defined(__WINPTHREADS_VERSION_MAJOR) +#define GC_PTHREAD_PTRVAL(pthread_id)pthread_id +#else +#define GC_PTHREAD_PTRVAL(pthread_id)pthread_id.p +#endif +#endif +STATIC void GC_delete_gc_thread_no_free(GC_vthread t) +{ +#ifndef MSWINCE +CloseHandle(t->handle); +#endif +#ifndef GC_NO_THREADS_DISCOVERY +if (GC_win32_dll_threads){ +t->stack_base=0; +t->id=0; +t->suspended=FALSE; +#ifdef RETRY_GET_THREAD_CONTEXT +t->context_sp=NULL; +#endif +AO_store_release(&t->tm.in_use,FALSE); +} else +#endif +{ +DWORD id=((GC_thread)t)->id; +int hv=THREAD_TABLE_INDEX(id); +GC_thread p=GC_threads[hv]; +GC_thread prev=NULL; +GC_ASSERT(I_HOLD_LOCK()); +while (p!=(GC_thread)t){ +prev=p; +p=p->tm.next; +} +if (prev==0){ +GC_threads[hv]=p->tm.next; +} else { +GC_ASSERT(prev!=&first_thread); +prev->tm.next=p->tm.next; +GC_dirty(prev); +} +} +} +STATIC void GC_delete_thread(DWORD id) +{ +if (GC_win32_dll_threads){ +GC_vthread t=GC_lookup_thread_inner(id); +if (0==t){ +WARN("Removing nonexistent thread,id=%" WARN_PRIdPTR "\n",id); +} else { +GC_delete_gc_thread_no_free(t); +} +} else { +int hv=THREAD_TABLE_INDEX(id); +GC_thread p=GC_threads[hv]; +GC_thread prev=NULL; +GC_ASSERT(I_HOLD_LOCK()); +while (p->id!=id){ +prev=p; +p=p->tm.next; +} +#ifndef MSWINCE +CloseHandle(p->handle); +#endif +if (prev==0){ +GC_threads[hv]=p->tm.next; +} else { +GC_ASSERT(prev!=&first_thread); +prev->tm.next=p->tm.next; +GC_dirty(prev); +} +if (EXPECT(p!=&first_thread,TRUE)){ +GC_INTERNAL_FREE(p); +} +} +} +GC_API void GC_CALL GC_allow_register_threads(void) +{ +GC_ASSERT(GC_lookup_thread_inner(GetCurrentThreadId())!=0); +#if!defined(GC_ALWAYS_MULTITHREADED)&&!defined(PARALLEL_MARK)&&!defined(GC_NO_THREADS_DISCOVERY) +parallel_initialized=TRUE; +#endif +set_need_to_lock(); +} +GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base*sb) +{ +GC_thread me; +DWORD thread_id=GetCurrentThreadId(); +DCL_LOCK_STATE; +if (GC_need_to_lock==FALSE) +ABORT("Threads explicit registering is not previously enabled"); +LOCK(); +me=GC_lookup_thread_inner(thread_id); +if (me==0){ +#ifdef GC_PTHREADS +me=GC_register_my_thread_inner(sb,thread_id); +#if defined(CPPCHECK) +GC_noop1(me->flags); +#endif +me->flags|=DETACHED; +#else +GC_register_my_thread_inner(sb,thread_id); +#endif +UNLOCK(); +return GC_SUCCESS; +} else +#ifdef GC_PTHREADS +if ((me->flags&FINISHED)!=0){ +GC_record_stack_base(me,sb); +me->flags&=~FINISHED; +#ifdef THREAD_LOCAL_ALLOC +GC_init_thread_local((GC_tlfs)(&me->tlfs)); +#endif +UNLOCK(); +return GC_SUCCESS; +} else +#endif +{ +UNLOCK(); +return GC_DUPLICATE; +} +} +STATIC void GC_wait_for_gc_completion(GC_bool wait_for_all) +{ +GC_ASSERT(I_HOLD_LOCK()); +if (GC_incremental&&GC_collection_in_progress()){ +word old_gc_no=GC_gc_no; +do { +ENTER_GC(); +GC_in_thread_creation=TRUE; +GC_collect_a_little_inner(1); +GC_in_thread_creation=FALSE; +EXIT_GC(); +UNLOCK(); +Sleep(0); +LOCK(); +} while (GC_incremental&&GC_collection_in_progress() +&&(wait_for_all||old_gc_no==GC_gc_no)); +} +} +GC_API int GC_CALL GC_unregister_my_thread(void) +{ +DCL_LOCK_STATE; +#ifdef DEBUG_THREADS +GC_log_printf("Unregistering thread 0x%lx\n",(long)GetCurrentThreadId()); +#endif +if (GC_win32_dll_threads){ +#if defined(THREAD_LOCAL_ALLOC) +GC_ASSERT(FALSE); +#else +GC_delete_thread(GetCurrentThreadId()); +#endif +} else { +#if defined(THREAD_LOCAL_ALLOC)||defined(GC_PTHREADS) +GC_thread me; +#endif +DWORD thread_id=GetCurrentThreadId(); +LOCK(); +GC_wait_for_gc_completion(FALSE); +#if defined(THREAD_LOCAL_ALLOC)||defined(GC_PTHREADS) +me=GC_lookup_thread_inner(thread_id); +CHECK_LOOKUP_MY_THREAD(me); +GC_ASSERT(!KNOWN_FINISHED(me)); +#endif +#if defined(THREAD_LOCAL_ALLOC) +GC_ASSERT(GC_getspecific(GC_thread_key)==&me->tlfs); +GC_destroy_thread_local(&(me->tlfs)); +#endif +#ifdef GC_PTHREADS +if ((me->flags&DETACHED)==0){ +me->flags|=FINISHED; +} else +#endif +{ +GC_delete_thread(thread_id); +} +#if defined(THREAD_LOCAL_ALLOC) +GC_remove_specific(GC_thread_key); +#endif +UNLOCK(); +} +return GC_SUCCESS; +} +GC_INNER void GC_do_blocking_inner(ptr_t data,void*context GC_ATTR_UNUSED) +{ +struct blocking_data*d=(struct blocking_data*)data; +DWORD thread_id=GetCurrentThreadId(); +GC_thread me; +#ifdef IA64 +ptr_t stack_ptr=GC_save_regs_in_stack(); +#endif +DCL_LOCK_STATE; +LOCK(); +me=GC_lookup_thread_inner(thread_id); +CHECK_LOOKUP_MY_THREAD(me); +GC_ASSERT(me->thread_blocked_sp==NULL); +#ifdef IA64 +me->backing_store_ptr=stack_ptr; +#endif +me->thread_blocked_sp=(ptr_t)&d; +UNLOCK(); +d->client_data=(d->fn)(d->client_data); +LOCK(); +#if defined(CPPCHECK) +GC_noop1((word)me->thread_blocked_sp); +#endif +me->thread_blocked_sp=NULL; +UNLOCK(); +} +GC_API void*GC_CALL GC_call_with_gc_active(GC_fn_type fn, +void*client_data) +{ +struct GC_traced_stack_sect_s stacksect; +DWORD thread_id=GetCurrentThreadId(); +GC_thread me; +DCL_LOCK_STATE; +LOCK(); +me=GC_lookup_thread_inner(thread_id); +CHECK_LOOKUP_MY_THREAD(me); +GC_ASSERT(me->stack_base!=NULL); +if ((word)me->stack_base < (word)(&stacksect)){ +me->stack_base=(ptr_t)(&stacksect); +#if defined(I386) +me->initial_stack_base=me->stack_base; +#endif +} +if (me->thread_blocked_sp==NULL){ +UNLOCK(); +client_data=fn(client_data); +GC_noop1(COVERT_DATAFLOW(&stacksect)); +return client_data; +} +stacksect.saved_stack_ptr=me->thread_blocked_sp; +#ifdef IA64 +stacksect.backing_store_end=GC_save_regs_in_stack(); +stacksect.saved_backing_store_ptr=me->backing_store_ptr; +#endif +stacksect.prev=me->traced_stack_sect; +me->thread_blocked_sp=NULL; +me->traced_stack_sect=&stacksect; +UNLOCK(); +client_data=fn(client_data); +GC_ASSERT(me->thread_blocked_sp==NULL); +GC_ASSERT(me->traced_stack_sect==&stacksect); +LOCK(); +#if defined(CPPCHECK) +GC_noop1((word)me->traced_stack_sect); +#endif +me->traced_stack_sect=stacksect.prev; +#ifdef IA64 +me->backing_store_ptr=stacksect.saved_backing_store_ptr; +#endif +me->thread_blocked_sp=stacksect.saved_stack_ptr; +UNLOCK(); +return client_data; +} +GC_API void GC_CALL GC_set_stackbottom(void*gc_thread_handle, +const struct GC_stack_base*sb) +{ +GC_thread t=(GC_thread)gc_thread_handle; +GC_ASSERT(sb->mem_base!=NULL); +if (!EXPECT(GC_is_initialized,TRUE)){ +GC_ASSERT(NULL==t); +GC_stackbottom=(char*)sb->mem_base; +#ifdef IA64 +GC_register_stackbottom=(ptr_t)sb->reg_base; +#endif +return; +} +GC_ASSERT(I_HOLD_LOCK()); +if (NULL==t){ +t=GC_lookup_thread_inner(GetCurrentThreadId()); +CHECK_LOOKUP_MY_THREAD(t); +} +GC_ASSERT(!KNOWN_FINISHED(t)); +GC_ASSERT(NULL==t->thread_blocked_sp +&&NULL==t->traced_stack_sect); +t->stack_base=(ptr_t)sb->mem_base; +t->last_stack_min=ADDR_LIMIT; +#ifdef IA64 +t->backing_store_end=(ptr_t)sb->reg_base; +#endif +} +GC_API void*GC_CALL GC_get_my_stackbottom(struct GC_stack_base*sb) +{ +DWORD thread_id=GetCurrentThreadId(); +GC_thread me; +DCL_LOCK_STATE; +LOCK(); +me=GC_lookup_thread_inner(thread_id); +CHECK_LOOKUP_MY_THREAD(me); +sb->mem_base=me->stack_base; +#ifdef IA64 +sb->reg_base=me->backing_store_end; +#endif +UNLOCK(); +return (void*)me; +} +#ifdef GC_PTHREADS +#define PTHREAD_MAP_SIZE 512 +DWORD GC_pthread_map_cache[PTHREAD_MAP_SIZE]={0}; +#define PTHREAD_MAP_INDEX(pthread_id)((NUMERIC_THREAD_ID(pthread_id)>>5)% PTHREAD_MAP_SIZE) +#define SET_PTHREAD_MAP_CACHE(pthread_id,win32_id)(void)(GC_pthread_map_cache[PTHREAD_MAP_INDEX(pthread_id)]=(win32_id)) +#define GET_PTHREAD_MAP_CACHE(pthread_id)GC_pthread_map_cache[PTHREAD_MAP_INDEX(pthread_id)] +STATIC GC_thread GC_lookup_pthread(pthread_t id) +{ +#ifndef GC_NO_THREADS_DISCOVERY +if (GC_win32_dll_threads){ +int i; +LONG my_max=GC_get_max_thread_index(); +for (i=0;i<=my_max&& +(!AO_load_acquire(&dll_thread_table[i].tm.in_use) +||THREAD_EQUAL(dll_thread_table[i].pthread_id,id)); +i++){ +} +return i<=my_max?(GC_thread)(dll_thread_table+i):NULL; +} else +#endif +{ +DWORD win32_id=GET_PTHREAD_MAP_CACHE(id); +int hv_guess=THREAD_TABLE_INDEX(win32_id); +int hv; +GC_thread p; +DCL_LOCK_STATE; +LOCK(); +for (p=GC_threads[hv_guess];0!=p;p=p->tm.next){ +if (THREAD_EQUAL(p->pthread_id,id)) +goto foundit; +} +for (hv=0;hv < THREAD_TABLE_SZ;++hv){ +for (p=GC_threads[hv];0!=p;p=p->tm.next){ +if (THREAD_EQUAL(p->pthread_id,id)) +goto foundit; +} +} +p=0; +foundit: +UNLOCK(); +return p; +} +} +#endif +#ifdef CAN_HANDLE_FORK +STATIC void GC_remove_all_threads_but_me(void) +{ +int hv; +GC_thread me=NULL; +DWORD thread_id; +pthread_t pthread_id=pthread_self(); +GC_ASSERT(!GC_win32_dll_threads); +for (hv=0;hv < THREAD_TABLE_SZ;++hv){ +GC_thread p,next; +for (p=GC_threads[hv];0!=p;p=next){ +next=p->tm.next; +if (THREAD_EQUAL(p->pthread_id,pthread_id) +&&me==NULL){ +me=p; +p->tm.next=0; +} else { +#ifdef THREAD_LOCAL_ALLOC +if ((p->flags&FINISHED)==0){ +GC_remove_specific_after_fork(GC_thread_key,p->pthread_id); +} +#endif +if (&first_thread!=p) +GC_INTERNAL_FREE(p); +} +} +GC_threads[hv]=NULL; +} +GC_ASSERT(me!=NULL); +thread_id=GetCurrentThreadId(); +GC_threads[THREAD_TABLE_INDEX(thread_id)]=me; +me->id=thread_id; +#ifndef MSWINCE +if (!DuplicateHandle(GetCurrentProcess(),GetCurrentThread(), +GetCurrentProcess(),(HANDLE*)&me->handle, +0,FALSE, +DUPLICATE_SAME_ACCESS)) +ABORT("DuplicateHandle failed"); +#endif +#if defined(THREAD_LOCAL_ALLOC)&&!defined(USE_CUSTOM_SPECIFIC) +if (GC_setspecific(GC_thread_key,&me->tlfs)!=0) +ABORT("GC_setspecific failed (in child)"); +#endif +} +static void fork_prepare_proc(void) +{ +LOCK(); +#ifdef PARALLEL_MARK +if (GC_parallel) +GC_wait_for_reclaim(); +#endif +GC_wait_for_gc_completion(TRUE); +#ifdef PARALLEL_MARK +if (GC_parallel) +GC_acquire_mark_lock(); +#endif +} +static void fork_parent_proc(void) +{ +#ifdef PARALLEL_MARK +if (GC_parallel) +GC_release_mark_lock(); +#endif +UNLOCK(); +} +static void fork_child_proc(void) +{ +#ifdef PARALLEL_MARK +if (GC_parallel){ +GC_release_mark_lock(); +GC_parallel=FALSE; +} +#endif +GC_remove_all_threads_but_me(); +UNLOCK(); +} +GC_API void GC_CALL GC_atfork_prepare(void) +{ +if (!EXPECT(GC_is_initialized,TRUE))GC_init(); +if (GC_handle_fork<=0) +fork_prepare_proc(); +} +GC_API void GC_CALL GC_atfork_parent(void) +{ +if (GC_handle_fork<=0) +fork_parent_proc(); +} +GC_API void GC_CALL GC_atfork_child(void) +{ +if (GC_handle_fork<=0) +fork_child_proc(); +} +#endif +void GC_push_thread_structures(void) +{ +GC_ASSERT(I_HOLD_LOCK()); +#ifndef GC_NO_THREADS_DISCOVERY +if (GC_win32_dll_threads){ +} else +#endif +{ +GC_PUSH_ALL_SYM(GC_threads); +} +#if defined(THREAD_LOCAL_ALLOC) +GC_PUSH_ALL_SYM(GC_thread_key); +#endif +} +#ifdef WOW64_THREAD_CONTEXT_WORKAROUND +#ifndef CONTEXT_EXCEPTION_ACTIVE +#define CONTEXT_EXCEPTION_ACTIVE 0x08000000 +#define CONTEXT_EXCEPTION_REQUEST 0x40000000 +#define CONTEXT_EXCEPTION_REPORTING 0x80000000 +#endif +static BOOL isWow64; +#define GET_THREAD_CONTEXT_FLAGS (isWow64?CONTEXT_INTEGER|CONTEXT_CONTROL|CONTEXT_EXCEPTION_REQUEST|CONTEXT_SEGMENTS:CONTEXT_INTEGER|CONTEXT_CONTROL) +#else +#define GET_THREAD_CONTEXT_FLAGS (CONTEXT_INTEGER|CONTEXT_CONTROL) +#endif +STATIC void GC_suspend(GC_thread t) +{ +#ifndef MSWINCE +DWORD exitCode; +#endif +#ifdef RETRY_GET_THREAD_CONTEXT +int retry_cnt=0; +#define MAX_SUSPEND_THREAD_RETRIES (1000*1000) +#endif +#ifdef DEBUG_THREADS +GC_log_printf("Suspending 0x%x\n",(int)t->id); +#endif +UNPROTECT_THREAD(t); +GC_acquire_dirty_lock(); +#ifdef MSWINCE +while (SuspendThread(THREAD_HANDLE(t))==(DWORD)-1){ +GC_release_dirty_lock(); +Sleep(10); +GC_acquire_dirty_lock(); +} +#elif defined(RETRY_GET_THREAD_CONTEXT) +for (;;){ +if (GetExitCodeThread(t->handle,&exitCode) +&&exitCode!=STILL_ACTIVE){ +GC_release_dirty_lock(); +#ifdef GC_PTHREADS +t->stack_base=0; +#else +GC_ASSERT(GC_win32_dll_threads); +GC_delete_gc_thread_no_free(t); +#endif +return; +} +if (SuspendThread(t->handle)!=(DWORD)-1){ +CONTEXT context; +context.ContextFlags=GET_THREAD_CONTEXT_FLAGS; +if (GetThreadContext(t->handle,&context)){ +t->context_sp=copy_ptr_regs(t->context_regs,&context); +break; +} +if (ResumeThread(t->handle)==(DWORD)-1) +ABORT("ResumeThread failed in suspend loop"); +} +if (retry_cnt > 1){ +GC_release_dirty_lock(); +Sleep(0); +GC_acquire_dirty_lock(); +} +if (++retry_cnt>=MAX_SUSPEND_THREAD_RETRIES) +ABORT("SuspendThread loop failed"); +} +#else +if (GetExitCodeThread(t->handle,&exitCode) +&&exitCode!=STILL_ACTIVE){ +GC_release_dirty_lock(); +#ifdef GC_PTHREADS +t->stack_base=0; +#else +GC_ASSERT(GC_win32_dll_threads); +GC_delete_gc_thread_no_free(t); +#endif +return; +} +if (SuspendThread(t->handle)==(DWORD)-1) +ABORT("SuspendThread failed"); +#endif +t->suspended=(unsigned char)TRUE; +GC_release_dirty_lock(); +if (GC_on_thread_event) +GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED,THREAD_HANDLE(t)); +} +#if defined(GC_ASSERTIONS)&&((defined(MSWIN32)&&!defined(CONSOLE_LOG))||defined(MSWINCE)) +GC_INNER GC_bool GC_write_disabled=FALSE; +#endif +GC_INNER void GC_stop_world(void) +{ +DWORD thread_id=GetCurrentThreadId(); +if (!GC_thr_initialized) +ABORT("GC_stop_world()called before GC_thr_init()"); +GC_ASSERT(I_HOLD_LOCK()); +#ifdef PARALLEL_MARK +if (GC_parallel){ +GC_acquire_mark_lock(); +GC_ASSERT(GC_fl_builder_count==0); +} +#endif +#if!defined(GC_NO_THREADS_DISCOVERY)||defined(GC_ASSERTIONS) +GC_please_stop=TRUE; +#endif +#if (defined(MSWIN32)&&!defined(CONSOLE_LOG))||defined(MSWINCE) +GC_ASSERT(!GC_write_disabled); +EnterCriticalSection(&GC_write_cs); +#ifdef GC_ASSERTIONS +GC_write_disabled=TRUE; +#endif +#endif +#ifndef GC_NO_THREADS_DISCOVERY +if (GC_win32_dll_threads){ +int i; +int my_max; +AO_store(&GC_attached_thread,FALSE); +my_max=(int)GC_get_max_thread_index(); +for (i=0;i<=my_max;i++){ +GC_vthread t=dll_thread_table+i; +if (t->stack_base!=0&&t->thread_blocked_sp==NULL +&&t->id!=thread_id){ +GC_suspend((GC_thread)t); +} +} +} else +#endif +{ +GC_thread t; +int i; +for (i=0;i < THREAD_TABLE_SZ;i++){ +for (t=GC_threads[i];t!=0;t=t->tm.next){ +if (t->stack_base!=0&&t->thread_blocked_sp==NULL +&&!KNOWN_FINISHED(t)&&t->id!=thread_id){ +GC_suspend(t); +} +} +} +} +#if (defined(MSWIN32)&&!defined(CONSOLE_LOG))||defined(MSWINCE) +#ifdef GC_ASSERTIONS +GC_write_disabled=FALSE; +#endif +LeaveCriticalSection(&GC_write_cs); +#endif +#ifdef PARALLEL_MARK +if (GC_parallel) +GC_release_mark_lock(); +#endif +} +GC_INNER void GC_start_world(void) +{ +#ifdef GC_ASSERTIONS +DWORD thread_id=GetCurrentThreadId(); +#endif +GC_ASSERT(I_HOLD_LOCK()); +if (GC_win32_dll_threads){ +LONG my_max=GC_get_max_thread_index(); +int i; +for (i=0;i<=my_max;i++){ +GC_thread t=(GC_thread)(dll_thread_table+i); +if (t->suspended){ +#ifdef DEBUG_THREADS +GC_log_printf("Resuming 0x%x\n",(int)t->id); +#endif +GC_ASSERT(t->stack_base!=0&&t->id!=thread_id); +if (ResumeThread(THREAD_HANDLE(t))==(DWORD)-1) +ABORT("ResumeThread failed"); +t->suspended=FALSE; +if (GC_on_thread_event) +GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED,THREAD_HANDLE(t)); +} +} +} else { +GC_thread t; +int i; +for (i=0;i < THREAD_TABLE_SZ;i++){ +for (t=GC_threads[i];t!=0;t=t->tm.next){ +if (t->suspended){ +#ifdef DEBUG_THREADS +GC_log_printf("Resuming 0x%x\n",(int)t->id); +#endif +GC_ASSERT(t->stack_base!=0&&t->id!=thread_id); +if (ResumeThread(THREAD_HANDLE(t))==(DWORD)-1) +ABORT("ResumeThread failed"); +UNPROTECT_THREAD(t); +t->suspended=FALSE; +if (GC_on_thread_event) +GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED,THREAD_HANDLE(t)); +} else { +#ifdef DEBUG_THREADS +GC_log_printf("Not resuming thread 0x%x as it is not suspended\n", +(int)t->id); +#endif +} +} +} +} +#if!defined(GC_NO_THREADS_DISCOVERY)||defined(GC_ASSERTIONS) +GC_please_stop=FALSE; +#endif +} +#ifdef MSWINCE +#define GC_wince_evaluate_stack_min(s)(ptr_t)(((word)(s)- 1)&~(word)0xFFFF) +#elif defined(GC_ASSERTIONS) +#define GC_dont_query_stack_min FALSE +#endif +static ptr_t last_address=0; +static MEMORY_BASIC_INFORMATION last_info; +STATIC ptr_t GC_get_stack_min(ptr_t s) +{ +ptr_t bottom; +GC_ASSERT(I_HOLD_LOCK()); +if (s!=last_address){ +VirtualQuery(s,&last_info,sizeof(last_info)); +last_address=s; +} +do { +bottom=(ptr_t)last_info.BaseAddress; +VirtualQuery(bottom - 1,&last_info,sizeof(last_info)); +last_address=bottom - 1; +} while ((last_info.Protect&PAGE_READWRITE) +&&!(last_info.Protect&PAGE_GUARD)); +return(bottom); +} +static GC_bool may_be_in_stack(ptr_t s) +{ +GC_ASSERT(I_HOLD_LOCK()); +if (s!=last_address){ +VirtualQuery(s,&last_info,sizeof(last_info)); +last_address=s; +} +return (last_info.Protect&PAGE_READWRITE) +&&!(last_info.Protect&PAGE_GUARD); +} +static ptr_t copy_ptr_regs(word*regs,const CONTEXT*pcontext){ +ptr_t sp; +int cnt=0; +#define context (*pcontext) +#define PUSH1(reg)(regs[cnt++]=(word)pcontext->reg) +#define PUSH2(r1,r2)(PUSH1(r1),PUSH1(r2)) +#define PUSH4(r1,r2,r3,r4)(PUSH2(r1,r2),PUSH2(r3,r4)) +#if defined(I386) +#ifdef WOW64_THREAD_CONTEXT_WORKAROUND +PUSH2(ContextFlags,SegFs); +#endif +PUSH4(Edi,Esi,Ebx,Edx),PUSH2(Ecx,Eax),PUSH1(Ebp); +sp=(ptr_t)context.Esp; +#elif defined(X86_64) +PUSH4(Rax,Rcx,Rdx,Rbx);PUSH2(Rbp,Rsi);PUSH1(Rdi); +PUSH4(R8,R9,R10,R11);PUSH4(R12,R13,R14,R15); +sp=(ptr_t)context.Rsp; +#elif defined(ARM32) +PUSH4(R0,R1,R2,R3),PUSH4(R4,R5,R6,R7),PUSH4(R8,R9,R10,R11); +PUSH1(R12); +sp=(ptr_t)context.Sp; +#elif defined(AARCH64) +PUSH4(X0,X1,X2,X3),PUSH4(X4,X5,X6,X7),PUSH4(X8,X9,X10,X11); +PUSH4(X12,X13,X14,X15),PUSH4(X16,X17,X18,X19),PUSH4(X20,X21,X22,X23); +PUSH4(X24,X25,X26,X27),PUSH1(X28); +PUSH1(Lr); +sp=(ptr_t)context.Sp; +#elif defined(SHx) +PUSH4(R0,R1,R2,R3),PUSH4(R4,R5,R6,R7),PUSH4(R8,R9,R10,R11); +PUSH2(R12,R13),PUSH1(R14); +sp=(ptr_t)context.R15; +#elif defined(MIPS) +PUSH4(IntAt,IntV0,IntV1,IntA0),PUSH4(IntA1,IntA2,IntA3,IntT0); +PUSH4(IntT1,IntT2,IntT3,IntT4),PUSH4(IntT5,IntT6,IntT7,IntS0); +PUSH4(IntS1,IntS2,IntS3,IntS4),PUSH4(IntS5,IntS6,IntS7,IntT8); +PUSH4(IntT9,IntK0,IntK1,IntS8); +sp=(ptr_t)context.IntSp; +#elif defined(PPC) +PUSH4(Gpr0,Gpr3,Gpr4,Gpr5),PUSH4(Gpr6,Gpr7,Gpr8,Gpr9); +PUSH4(Gpr10,Gpr11,Gpr12,Gpr14),PUSH4(Gpr15,Gpr16,Gpr17,Gpr18); +PUSH4(Gpr19,Gpr20,Gpr21,Gpr22),PUSH4(Gpr23,Gpr24,Gpr25,Gpr26); +PUSH4(Gpr27,Gpr28,Gpr29,Gpr30),PUSH1(Gpr31); +sp=(ptr_t)context.Gpr1; +#elif defined(ALPHA) +PUSH4(IntV0,IntT0,IntT1,IntT2),PUSH4(IntT3,IntT4,IntT5,IntT6); +PUSH4(IntT7,IntS0,IntS1,IntS2),PUSH4(IntS3,IntS4,IntS5,IntFp); +PUSH4(IntA0,IntA1,IntA2,IntA3),PUSH4(IntA4,IntA5,IntT8,IntT9); +PUSH4(IntT10,IntT11,IntT12,IntAt); +sp=(ptr_t)context.IntSp; +#elif defined(CPPCHECK) +sp=(ptr_t)(word)cnt; +#else +#error Architecture is not supported +#endif +#undef context +GC_ASSERT(cnt==PUSHED_REGS_COUNT); +return sp; +} +STATIC word GC_push_stack_for(GC_thread thread,DWORD me) +{ +ptr_t sp,stack_min; +struct GC_traced_stack_sect_s*traced_stack_sect= +thread->traced_stack_sect; +if (thread->id==me){ +GC_ASSERT(thread->thread_blocked_sp==NULL); +sp=GC_approx_sp(); +} else if ((sp=thread->thread_blocked_sp)==NULL){ +#ifdef RETRY_GET_THREAD_CONTEXT +word*regs=thread->context_regs; +if (thread->suspended){ +sp=thread->context_sp; +} else +#else +word regs[PUSHED_REGS_COUNT]; +#endif +{ +CONTEXT context; +context.ContextFlags=GET_THREAD_CONTEXT_FLAGS; +if (GetThreadContext(THREAD_HANDLE(thread),&context)){ +sp=copy_ptr_regs(regs,&context); +} else { +#ifdef RETRY_GET_THREAD_CONTEXT +sp=thread->context_sp; +if (NULL==sp){ +return 0; +} +#else +ABORT("GetThreadContext failed"); +#endif +} +} +#ifdef THREAD_LOCAL_ALLOC +GC_ASSERT(thread->suspended||!GC_world_stopped); +#endif +#ifndef WOW64_THREAD_CONTEXT_WORKAROUND +GC_push_many_regs(regs,PUSHED_REGS_COUNT); +#else +GC_push_many_regs(regs+2,PUSHED_REGS_COUNT - 2); +if (isWow64){ +DWORD ContextFlags=(DWORD)regs[0]; +WORD SegFs=(WORD)regs[1]; +if ((ContextFlags&CONTEXT_EXCEPTION_REPORTING)!=0 +&&(ContextFlags&(CONTEXT_EXCEPTION_ACTIVE +))!=0){ +LDT_ENTRY selector; +PNT_TIB tib; +if (!GetThreadSelectorEntry(THREAD_HANDLE(thread),SegFs,&selector)) +ABORT("GetThreadSelectorEntry failed"); +tib=(PNT_TIB)(selector.BaseLow +|(selector.HighWord.Bits.BaseMid<<16) +|(selector.HighWord.Bits.BaseHi<<24)); +#ifdef DEBUG_THREADS +GC_log_printf("TIB stack limit/base:%p .. %p\n", +(void*)tib->StackLimit,(void*)tib->StackBase); +#endif +GC_ASSERT(!((word)thread->stack_base +COOLER_THAN (word)tib->StackBase)); +if (thread->stack_base!=thread->initial_stack_base +&&((word)thread->stack_base<=(word)tib->StackLimit +||(word)tib->StackBase < (word)thread->stack_base)){ +WARN("GetThreadContext might return stale register values" +" including ESP=%p\n",sp); +} else { +sp=(ptr_t)tib->StackLimit; +} +} +#ifdef DEBUG_THREADS +else { +static GC_bool logged; +if (!logged +&&(ContextFlags&CONTEXT_EXCEPTION_REPORTING)==0){ +GC_log_printf("CONTEXT_EXCEPTION_REQUEST not supported\n"); +logged=TRUE; +} +} +#endif +} +#endif +} +if (thread->last_stack_min==ADDR_LIMIT){ +#ifdef MSWINCE +if (GC_dont_query_stack_min){ +stack_min=GC_wince_evaluate_stack_min(traced_stack_sect!=NULL? +(ptr_t)traced_stack_sect:thread->stack_base); +} else +#endif +{ +stack_min=GC_get_stack_min(traced_stack_sect!=NULL? +(ptr_t)traced_stack_sect:thread->stack_base); +UNPROTECT_THREAD(thread); +thread->last_stack_min=stack_min; +} +} else { +if (traced_stack_sect!=NULL&& +(word)thread->last_stack_min > (word)traced_stack_sect){ +UNPROTECT_THREAD(thread); +thread->last_stack_min=(ptr_t)traced_stack_sect; +} +if ((word)sp < (word)thread->stack_base +&&(word)sp>=(word)thread->last_stack_min){ +stack_min=sp; +} else { +if (may_be_in_stack(thread->id==me&& +(word)sp < (word)thread->last_stack_min? +sp:thread->last_stack_min)){ +stack_min=(ptr_t)last_info.BaseAddress; +if ((word)sp < (word)stack_min +||(word)sp>=(word)thread->stack_base) +stack_min=GC_get_stack_min(thread->last_stack_min); +} else { +stack_min=GC_get_stack_min(thread->stack_base); +} +UNPROTECT_THREAD(thread); +thread->last_stack_min=stack_min; +} +} +GC_ASSERT(GC_dont_query_stack_min +||stack_min==GC_get_stack_min(thread->stack_base) +||((word)sp>=(word)stack_min +&&(word)stack_min < (word)thread->stack_base +&&(word)stack_min +> (word)GC_get_stack_min(thread->stack_base))); +if ((word)sp>=(word)stack_min&&(word)sp < (word)thread->stack_base){ +#ifdef DEBUG_THREADS +GC_log_printf("Pushing stack for 0x%x from sp %p to %p from 0x%x\n", +(int)thread->id,(void*)sp,(void*)thread->stack_base, +(int)me); +#endif +GC_push_all_stack_sections(sp,thread->stack_base,traced_stack_sect); +} else { +if (thread->id==me||(word)sp>=(word)thread->stack_base +||(word)(sp+GC_page_size)< (word)stack_min) +WARN("Thread stack pointer %p out of range,pushing everything\n", +sp); +#ifdef DEBUG_THREADS +GC_log_printf("Pushing stack for 0x%x from (min)%p to %p from 0x%x\n", +(int)thread->id,(void*)stack_min, +(void*)thread->stack_base,(int)me); +#endif +GC_push_all_stack(stack_min,thread->stack_base); +} +return thread->stack_base - sp; +} +GC_INNER void GC_push_all_stacks(void) +{ +DWORD thread_id=GetCurrentThreadId(); +GC_bool found_me=FALSE; +#ifndef SMALL_CONFIG +unsigned nthreads=0; +#endif +word total_size=0; +#ifndef GC_NO_THREADS_DISCOVERY +if (GC_win32_dll_threads){ +int i; +LONG my_max=GC_get_max_thread_index(); +for (i=0;i<=my_max;i++){ +GC_thread t=(GC_thread)(dll_thread_table+i); +if (t->tm.in_use&&t->stack_base){ +#ifndef SMALL_CONFIG +++nthreads; +#endif +total_size+=GC_push_stack_for(t,thread_id); +if (t->id==thread_id)found_me=TRUE; +} +} +} else +#endif +{ +int i; +for (i=0;i < THREAD_TABLE_SZ;i++){ +GC_thread t; +for (t=GC_threads[i];t!=0;t=t->tm.next){ +if (!KNOWN_FINISHED(t)&&t->stack_base){ +#ifndef SMALL_CONFIG +++nthreads; +#endif +total_size+=GC_push_stack_for(t,thread_id); +if (t->id==thread_id)found_me=TRUE; +} +} +} +} +#ifndef SMALL_CONFIG +GC_VERBOSE_LOG_PRINTF("Pushed %d thread stacks%s\n",nthreads, +GC_win32_dll_threads? +" based on DllMain thread tracking":""); +#endif +if (!found_me&&!GC_in_thread_creation) +ABORT("Collecting from unknown thread"); +GC_total_stacksize=total_size; +} +#ifdef PARALLEL_MARK +#ifndef MAX_MARKERS +#define MAX_MARKERS 16 +#endif +static ptr_t marker_sp[MAX_MARKERS - 1]; +#ifdef IA64 +static ptr_t marker_bsp[MAX_MARKERS - 1]; +#endif +static ptr_t marker_last_stack_min[MAX_MARKERS - 1]; +#endif +GC_INNER void GC_get_next_stack(char*start,char*limit, +char**lo,char**hi) +{ +int i; +char*current_min=ADDR_LIMIT; +ptr_t*plast_stack_min=NULL; +GC_thread thread=NULL; +if (GC_win32_dll_threads){ +LONG my_max=GC_get_max_thread_index(); +for (i=0;i<=my_max;i++){ +ptr_t s=(ptr_t)(dll_thread_table[i].stack_base); +if ((word)s > (word)start&&(word)s < (word)current_min){ +plast_stack_min=(ptr_t*) +&dll_thread_table[i].last_stack_min; +current_min=s; +#if defined(CPPCHECK) +thread=(GC_thread)&dll_thread_table[i]; +#endif +} +} +} else { +for (i=0;i < THREAD_TABLE_SZ;i++){ +GC_thread t; +for (t=GC_threads[i];t!=0;t=t->tm.next){ +ptr_t s=t->stack_base; +if ((word)s > (word)start&&(word)s < (word)current_min){ +plast_stack_min=&t->last_stack_min; +thread=t; +current_min=s; +} +} +} +#ifdef PARALLEL_MARK +for (i=0;i < GC_markers_m1;++i){ +ptr_t s=marker_sp[i]; +#ifdef IA64 +#endif +if ((word)s > (word)start&&(word)s < (word)current_min){ +GC_ASSERT(marker_last_stack_min[i]!=NULL); +plast_stack_min=&marker_last_stack_min[i]; +current_min=s; +thread=NULL; +} +} +#endif +} +*hi=current_min; +if (current_min==ADDR_LIMIT){ +*lo=ADDR_LIMIT; +return; +} +GC_ASSERT((word)current_min > (word)start&&plast_stack_min!=NULL); +#ifdef MSWINCE +if (GC_dont_query_stack_min){ +*lo=GC_wince_evaluate_stack_min(current_min); +return; +} +#endif +if ((word)current_min > (word)limit&&!may_be_in_stack(limit)){ +*lo=ADDR_LIMIT; +return; +} +if (*plast_stack_min==ADDR_LIMIT +||!may_be_in_stack(*plast_stack_min)){ +*lo=GC_get_stack_min(current_min); +} else { +*lo=GC_get_stack_min(*plast_stack_min); +} +if (thread!=NULL){ +UNPROTECT_THREAD(thread); +} +*plast_stack_min=*lo; +} +#ifdef PARALLEL_MARK +#if defined(GC_PTHREADS)&&!defined(GC_PTHREADS_PARAMARK) +#if!defined(__MINGW32__) +#define GC_PTHREADS_PARAMARK +#endif +#endif +#if!defined(GC_PTHREADS_PARAMARK) +STATIC HANDLE GC_marker_cv[MAX_MARKERS - 1]={0}; +STATIC DWORD GC_marker_Id[MAX_MARKERS - 1]={0}; +#endif +#if defined(GC_PTHREADS)&&defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID) +static void set_marker_thread_name(unsigned id) +{ +char name_buf[16]; +int len=sizeof("GC-marker-")- 1; +BCOPY("GC-marker-",name_buf,len); +if (id>=10) +name_buf[len++]=(char)('0'+(id/10)% 10); +name_buf[len]=(char)('0'+id % 10); +name_buf[len+1]='\0'; +if (pthread_setname_np(pthread_self(),name_buf)!=0) +WARN("pthread_setname_np failed\n",0); +} +#elif!defined(MSWINCE) +static FARPROC setThreadDescription_fn; +static void set_marker_thread_name(unsigned id) +{ +WCHAR name_buf[16]; +int len=sizeof(L"GC-marker-")/sizeof(WCHAR)- 1; +HRESULT hr; +if (!setThreadDescription_fn)return; +BCOPY(L"GC-marker-",name_buf,len*sizeof(WCHAR)); +if (id>=10) +name_buf[len++]=(WCHAR)('0'+(id/10)% 10); +name_buf[len]=(WCHAR)('0'+id % 10); +name_buf[len+1]=0; +hr=(*(HRESULT (WINAPI*)(HANDLE,const WCHAR*)) +(word)setThreadDescription_fn)(GetCurrentThread(),name_buf); +if (FAILED(hr)) +WARN("SetThreadDescription failed\n",0); +} +#else +#define set_marker_thread_name(id)(void)(id) +#endif +#ifdef GC_PTHREADS_PARAMARK +STATIC void*GC_mark_thread(void*id) +#elif defined(MSWINCE) +STATIC DWORD WINAPI GC_mark_thread(LPVOID id) +#else +STATIC unsigned __stdcall GC_mark_thread(void*id) +#endif +{ +word my_mark_no=0; +if ((word)id==GC_WORD_MAX)return 0; +set_marker_thread_name((unsigned)(word)id); +marker_sp[(word)id]=GC_approx_sp(); +#ifdef IA64 +marker_bsp[(word)id]=GC_save_regs_in_stack(); +#endif +#if!defined(GC_PTHREADS_PARAMARK) +GC_marker_Id[(word)id]=GetCurrentThreadId(); +#endif +GC_acquire_mark_lock(); +if (0==--GC_fl_builder_count) +GC_notify_all_builder(); +for (;;++my_mark_no){ +if (my_mark_no - GC_mark_no > (word)2){ +my_mark_no=GC_mark_no; +} +#ifdef DEBUG_THREADS +GC_log_printf("Starting mark helper for mark number %lu\n", +(unsigned long)my_mark_no); +#endif +GC_help_marker(my_mark_no); +} +} +#ifndef GC_ASSERTIONS +#define SET_MARK_LOCK_HOLDER (void)0 +#define UNSET_MARK_LOCK_HOLDER (void)0 +#endif +#ifdef CAN_HANDLE_FORK +static int available_markers_m1=0; +#else +#define available_markers_m1 GC_markers_m1 +#endif +#ifdef GC_PTHREADS_PARAMARK +#include +#if defined(GC_ASSERTIONS)&&!defined(NUMERIC_THREAD_ID) +#define NUMERIC_THREAD_ID(id)(unsigned long)(word)GC_PTHREAD_PTRVAL(id) +#endif +#ifdef CAN_HANDLE_FORK +static pthread_cond_t mark_cv; +#else +static pthread_cond_t mark_cv=PTHREAD_COND_INITIALIZER; +#endif +GC_INNER void GC_start_mark_threads_inner(void) +{ +int i; +pthread_attr_t attr; +pthread_t new_thread; +#ifndef NO_MARKER_SPECIAL_SIGMASK +sigset_t set,oldset; +#endif +GC_ASSERT(I_DONT_HOLD_LOCK()); +if (available_markers_m1<=0)return; +#ifdef CAN_HANDLE_FORK +if (GC_parallel)return; +{ +pthread_cond_t mark_cv_local=PTHREAD_COND_INITIALIZER; +BCOPY(&mark_cv_local,&mark_cv,sizeof(mark_cv)); +} +#endif +GC_ASSERT(GC_fl_builder_count==0); +if (0!=pthread_attr_init(&attr))ABORT("pthread_attr_init failed"); +if (0!=pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_DETACHED)) +ABORT("pthread_attr_setdetachstate failed"); +#ifndef NO_MARKER_SPECIAL_SIGMASK +if (sigfillset(&set)!=0) +ABORT("sigfillset failed"); +if (pthread_sigmask(SIG_BLOCK,&set,&oldset)< 0){ +WARN("pthread_sigmask set failed,no markers started," +" errno=%" WARN_PRIdPTR "\n",errno); +GC_markers_m1=0; +(void)pthread_attr_destroy(&attr); +return; +} +#endif +#ifdef CAN_HANDLE_FORK +GC_markers_m1=available_markers_m1; +#endif +for (i=0;i < available_markers_m1;++i){ +marker_last_stack_min[i]=ADDR_LIMIT; +if (0!=pthread_create(&new_thread,&attr, +GC_mark_thread,(void*)(word)i)){ +WARN("Marker thread creation failed\n",0); +GC_markers_m1=i; +break; +} +} +#ifndef NO_MARKER_SPECIAL_SIGMASK +if (pthread_sigmask(SIG_SETMASK,&oldset,NULL)< 0){ +WARN("pthread_sigmask restore failed,errno=%" WARN_PRIdPTR "\n", +errno); +} +#endif +(void)pthread_attr_destroy(&attr); +GC_wait_for_markers_init(); +GC_COND_LOG_PRINTF("Started %d mark helper threads\n",GC_markers_m1); +} +#ifdef GC_ASSERTIONS +STATIC unsigned long GC_mark_lock_holder=NO_THREAD; +#define SET_MARK_LOCK_HOLDER (void)(GC_mark_lock_holder=NUMERIC_THREAD_ID(pthread_self())) +#define UNSET_MARK_LOCK_HOLDER do { GC_ASSERT(GC_mark_lock_holder==NUMERIC_THREAD_ID(pthread_self()));GC_mark_lock_holder=NO_THREAD;} while (0) +#endif +static pthread_mutex_t mark_mutex=PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t builder_cv=PTHREAD_COND_INITIALIZER; +#ifdef LOCK_STATS +volatile AO_t GC_block_count=0; +#endif +GC_INNER void GC_acquire_mark_lock(void) +{ +#if defined(NUMERIC_THREAD_ID_UNIQUE)&&!defined(THREAD_SANITIZER) +GC_ASSERT(GC_mark_lock_holder!=NUMERIC_THREAD_ID(pthread_self())); +#endif +if (pthread_mutex_lock(&mark_mutex)!=0){ +ABORT("pthread_mutex_lock failed"); +} +#ifdef LOCK_STATS +(void)AO_fetch_and_add1(&GC_block_count); +#endif +SET_MARK_LOCK_HOLDER; +} +GC_INNER void GC_release_mark_lock(void) +{ +UNSET_MARK_LOCK_HOLDER; +if (pthread_mutex_unlock(&mark_mutex)!=0){ +ABORT("pthread_mutex_unlock failed"); +} +} +STATIC void GC_wait_builder(void) +{ +UNSET_MARK_LOCK_HOLDER; +if (pthread_cond_wait(&builder_cv,&mark_mutex)!=0){ +ABORT("pthread_cond_wait failed"); +} +GC_ASSERT(GC_mark_lock_holder==NO_THREAD); +SET_MARK_LOCK_HOLDER; +} +GC_INNER void GC_wait_for_reclaim(void) +{ +GC_acquire_mark_lock(); +while (GC_fl_builder_count > 0){ +GC_wait_builder(); +} +GC_release_mark_lock(); +} +GC_INNER void GC_notify_all_builder(void) +{ +GC_ASSERT(GC_mark_lock_holder==NUMERIC_THREAD_ID(pthread_self())); +if (pthread_cond_broadcast(&builder_cv)!=0){ +ABORT("pthread_cond_broadcast failed"); +} +} +GC_INNER void GC_wait_marker(void) +{ +GC_ASSERT(GC_parallel); +UNSET_MARK_LOCK_HOLDER; +if (pthread_cond_wait(&mark_cv,&mark_mutex)!=0){ +ABORT("pthread_cond_wait failed"); +} +GC_ASSERT(GC_mark_lock_holder==NO_THREAD); +SET_MARK_LOCK_HOLDER; +} +GC_INNER void GC_notify_all_marker(void) +{ +GC_ASSERT(GC_parallel); +if (pthread_cond_broadcast(&mark_cv)!=0){ +ABORT("pthread_cond_broadcast failed"); +} +} +#else +#ifndef MARK_THREAD_STACK_SIZE +#define MARK_THREAD_STACK_SIZE 0 +#endif +static HANDLE mark_mutex_event=(HANDLE)0; +static HANDLE builder_cv=(HANDLE)0; +static HANDLE mark_cv=(HANDLE)0; +GC_INNER void GC_start_mark_threads_inner(void) +{ +int i; +GC_ASSERT(I_DONT_HOLD_LOCK()); +if (available_markers_m1<=0)return; +GC_ASSERT(GC_fl_builder_count==0); +for (i=0;i < GC_markers_m1;++i){ +if ((GC_marker_cv[i]=CreateEvent(NULL, +TRUE, +FALSE, +NULL))==(HANDLE)0) +ABORT("CreateEvent failed"); +} +for (i=0;i < GC_markers_m1;++i){ +#if defined(MSWINCE)||defined(MSWIN_XBOX1) +HANDLE handle; +DWORD thread_id; +marker_last_stack_min[i]=ADDR_LIMIT; +handle=CreateThread(NULL, +MARK_THREAD_STACK_SIZE, +GC_mark_thread,(LPVOID)(word)i, +0,&thread_id); +if (handle==NULL){ +WARN("Marker thread creation failed\n",0); +break; +} else { +CloseHandle(handle); +} +#else +GC_uintptr_t handle; +unsigned thread_id; +marker_last_stack_min[i]=ADDR_LIMIT; +handle=_beginthreadex(NULL, +MARK_THREAD_STACK_SIZE,GC_mark_thread, +(void*)(word)i,0,&thread_id); +if (!handle||handle==(GC_uintptr_t)-1L){ +WARN("Marker thread creation failed\n",0); +break; +} else { +} +#endif +} +while (GC_markers_m1 > i){ +GC_markers_m1--; +CloseHandle(GC_marker_cv[GC_markers_m1]); +} +GC_wait_for_markers_init(); +GC_COND_LOG_PRINTF("Started %d mark helper threads\n",GC_markers_m1); +if (i==0){ +CloseHandle(mark_cv); +CloseHandle(builder_cv); +CloseHandle(mark_mutex_event); +} +} +#ifdef GC_ASSERTIONS +STATIC DWORD GC_mark_lock_holder=NO_THREAD; +#define SET_MARK_LOCK_HOLDER (void)(GC_mark_lock_holder=GetCurrentThreadId()) +#define UNSET_MARK_LOCK_HOLDER do { GC_ASSERT(GC_mark_lock_holder==GetCurrentThreadId());GC_mark_lock_holder=NO_THREAD;} while (0) +#endif +STATIC LONG GC_mark_mutex_state=0; +#ifdef LOCK_STATS +volatile AO_t GC_block_count=0; +volatile AO_t GC_unlocked_count=0; +#endif +GC_INNER void GC_acquire_mark_lock(void) +{ +#ifndef THREAD_SANITIZER +GC_ASSERT(GC_mark_lock_holder!=GetCurrentThreadId()); +#endif +if (InterlockedExchange(&GC_mark_mutex_state,1)!=0){ +#ifdef LOCK_STATS +(void)AO_fetch_and_add1(&GC_block_count); +#endif +while (InterlockedExchange(&GC_mark_mutex_state, +-1)!=0){ +if (WaitForSingleObject(mark_mutex_event,INFINITE)==WAIT_FAILED) +ABORT("WaitForSingleObject failed"); +} +} +#ifdef LOCK_STATS +else { +(void)AO_fetch_and_add1(&GC_unlocked_count); +} +#endif +GC_ASSERT(GC_mark_lock_holder==NO_THREAD); +SET_MARK_LOCK_HOLDER; +} +GC_INNER void GC_release_mark_lock(void) +{ +UNSET_MARK_LOCK_HOLDER; +if (InterlockedExchange(&GC_mark_mutex_state,0)< 0){ +if (SetEvent(mark_mutex_event)==FALSE) +ABORT("SetEvent failed"); +} +} +GC_INNER void GC_wait_for_reclaim(void) +{ +GC_ASSERT(builder_cv!=0); +for (;;){ +GC_acquire_mark_lock(); +if (GC_fl_builder_count==0) +break; +if (ResetEvent(builder_cv)==FALSE) +ABORT("ResetEvent failed"); +GC_release_mark_lock(); +if (WaitForSingleObject(builder_cv,INFINITE)==WAIT_FAILED) +ABORT("WaitForSingleObject failed"); +} +GC_release_mark_lock(); +} +GC_INNER void GC_notify_all_builder(void) +{ +GC_ASSERT(GC_mark_lock_holder==GetCurrentThreadId()); +GC_ASSERT(builder_cv!=0); +GC_ASSERT(GC_fl_builder_count==0); +if (SetEvent(builder_cv)==FALSE) +ABORT("SetEvent failed"); +} +GC_INNER void GC_wait_marker(void) +{ +HANDLE event=mark_cv; +DWORD thread_id=GetCurrentThreadId(); +int i=GC_markers_m1; +while (i--> 0){ +if (GC_marker_Id[i]==thread_id){ +event=GC_marker_cv[i]; +break; +} +} +if (ResetEvent(event)==FALSE) +ABORT("ResetEvent failed"); +GC_release_mark_lock(); +if (WaitForSingleObject(event,INFINITE)==WAIT_FAILED) +ABORT("WaitForSingleObject failed"); +GC_acquire_mark_lock(); +} +GC_INNER void GC_notify_all_marker(void) +{ +DWORD thread_id=GetCurrentThreadId(); +int i=GC_markers_m1; +while (i--> 0){ +if (SetEvent(GC_marker_Id[i]!=thread_id?GC_marker_cv[i]: +mark_cv)==FALSE) +ABORT("SetEvent failed"); +} +} +#endif +static unsigned required_markers_cnt=0; +#endif +typedef struct { +LPTHREAD_START_ROUTINE start; +LPVOID param; +} thread_args; +STATIC void*GC_CALLBACK GC_win32_start_inner(struct GC_stack_base*sb, +void*arg) +{ +void*ret; +LPTHREAD_START_ROUTINE start=((thread_args*)arg)->start; +LPVOID param=((thread_args*)arg)->param; +GC_register_my_thread(sb); +#ifdef DEBUG_THREADS +GC_log_printf("thread 0x%lx starting...\n",(long)GetCurrentThreadId()); +#endif +GC_free(arg); +#if!defined(__GNUC__)&&!defined(NO_CRT) +ret=NULL; +__try +#endif +{ +ret=(void*)(word)(*start)(param); +} +#if!defined(__GNUC__)&&!defined(NO_CRT) +__finally +#endif +{ +GC_unregister_my_thread(); +} +#ifdef DEBUG_THREADS +GC_log_printf("thread 0x%lx returned from start routine\n", +(long)GetCurrentThreadId()); +#endif +return ret; +} +STATIC DWORD WINAPI GC_win32_start(LPVOID arg) +{ +return (DWORD)(word)GC_call_with_stack_base(GC_win32_start_inner,arg); +} +GC_API HANDLE WINAPI GC_CreateThread( +LPSECURITY_ATTRIBUTES lpThreadAttributes, +GC_WIN32_SIZE_T dwStackSize, +LPTHREAD_START_ROUTINE lpStartAddress, +LPVOID lpParameter,DWORD dwCreationFlags, +LPDWORD lpThreadId) +{ +if (!EXPECT(parallel_initialized,TRUE)) +GC_init_parallel(); +#ifdef DEBUG_THREADS +GC_log_printf("About to create a thread from 0x%lx\n", +(long)GetCurrentThreadId()); +#endif +if (GC_win32_dll_threads){ +return CreateThread(lpThreadAttributes,dwStackSize,lpStartAddress, +lpParameter,dwCreationFlags,lpThreadId); +} else { +thread_args*args= +(thread_args*)GC_malloc_uncollectable(sizeof(thread_args)); +HANDLE thread_h; +if (NULL==args){ +SetLastError(ERROR_NOT_ENOUGH_MEMORY); +return NULL; +} +args->start=lpStartAddress; +args->param=lpParameter; +GC_dirty(args); +REACHABLE_AFTER_DIRTY(lpParameter); +set_need_to_lock(); +thread_h=CreateThread(lpThreadAttributes,dwStackSize,GC_win32_start, +args,dwCreationFlags,lpThreadId); +if (thread_h==0)GC_free(args); +return thread_h; +} +} +GC_API DECLSPEC_NORETURN void WINAPI GC_ExitThread(DWORD dwExitCode) +{ +GC_unregister_my_thread(); +ExitThread(dwExitCode); +} +#if!defined(CYGWIN32)&&!defined(MSWINCE)&&!defined(MSWIN_XBOX1)&&!defined(NO_CRT) +GC_API GC_uintptr_t GC_CALL GC_beginthreadex( +void*security,unsigned stack_size, +unsigned (__stdcall*start_address)(void*), +void*arglist,unsigned initflag, +unsigned*thrdaddr) +{ +if (!EXPECT(parallel_initialized,TRUE)) +GC_init_parallel(); +#ifdef DEBUG_THREADS +GC_log_printf("About to create a thread from 0x%lx\n", +(long)GetCurrentThreadId()); +#endif +if (GC_win32_dll_threads){ +return _beginthreadex(security,stack_size,start_address, +arglist,initflag,thrdaddr); +} else { +GC_uintptr_t thread_h; +thread_args*args= +(thread_args*)GC_malloc_uncollectable(sizeof(thread_args)); +if (NULL==args){ +errno=EAGAIN; +return 0; +} +args->start=(LPTHREAD_START_ROUTINE)start_address; +args->param=arglist; +GC_dirty(args); +REACHABLE_AFTER_DIRTY(arglist); +set_need_to_lock(); +thread_h=_beginthreadex(security,stack_size, +(unsigned (__stdcall*)(void*))GC_win32_start, +args,initflag,thrdaddr); +if (thread_h==0)GC_free(args); +return thread_h; +} +} +GC_API void GC_CALL GC_endthreadex(unsigned retval) +{ +GC_unregister_my_thread(); +_endthreadex(retval); +} +#endif +#ifdef GC_WINMAIN_REDIRECT +#if defined(MSWINCE)&&defined(UNDER_CE) +#define WINMAIN_LPTSTR LPWSTR +#else +#define WINMAIN_LPTSTR LPSTR +#endif +#undef WinMain +int WINAPI GC_WinMain(HINSTANCE,HINSTANCE,WINMAIN_LPTSTR,int); +typedef struct { +HINSTANCE hInstance; +HINSTANCE hPrevInstance; +WINMAIN_LPTSTR lpCmdLine; +int nShowCmd; +} main_thread_args; +static DWORD WINAPI main_thread_start(LPVOID arg) +{ +main_thread_args*args=(main_thread_args*)arg; +return (DWORD)GC_WinMain(args->hInstance,args->hPrevInstance, +args->lpCmdLine,args->nShowCmd); +} +STATIC void*GC_waitForSingleObjectInfinite(void*handle) +{ +return (void*)(word)WaitForSingleObject((HANDLE)handle,INFINITE); +} +#ifndef WINMAIN_THREAD_STACK_SIZE +#define WINMAIN_THREAD_STACK_SIZE 0 +#endif +int WINAPI WinMain(HINSTANCE hInstance,HINSTANCE hPrevInstance, +WINMAIN_LPTSTR lpCmdLine,int nShowCmd) +{ +DWORD exit_code=1; +main_thread_args args={ +hInstance,hPrevInstance,lpCmdLine,nShowCmd +}; +HANDLE thread_h; +DWORD thread_id; +GC_INIT(); +thread_h=GC_CreateThread(NULL, +WINMAIN_THREAD_STACK_SIZE, +main_thread_start,&args,0, +&thread_id); +if (thread_h!=NULL){ +if ((DWORD)(word)GC_do_blocking(GC_waitForSingleObjectInfinite, +(void*)thread_h)==WAIT_FAILED) +ABORT("WaitForSingleObject(main_thread)failed"); +GetExitCodeThread (thread_h,&exit_code); +CloseHandle (thread_h); +} else { +ABORT("GC_CreateThread(main_thread)failed"); +} +#ifdef MSWINCE +GC_deinit(); +#endif +return (int)exit_code; +} +#endif +GC_API void GC_CALL GC_set_markers_count(unsigned markers GC_ATTR_UNUSED) +{ +#ifdef PARALLEL_MARK +required_markers_cnt=markers < MAX_MARKERS?markers:MAX_MARKERS; +#endif +} +GC_INNER void GC_thr_init(void) +{ +struct GC_stack_base sb; +#if (!defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)&&!defined(MSWINCE)&&defined(PARALLEL_MARK))||defined(WOW64_THREAD_CONTEXT_WORKAROUND) +HMODULE hK32=GetModuleHandle(TEXT("kernel32.dll")); +#endif +GC_ASSERT(I_HOLD_LOCK()); +if (GC_thr_initialized)return; +GC_ASSERT((word)&GC_threads % sizeof(word)==0); +#ifdef GC_NO_THREADS_DISCOVERY +#define GC_main_thread GetCurrentThreadId() +#else +GC_main_thread=GetCurrentThreadId(); +#endif +GC_thr_initialized=TRUE; +#ifdef CAN_HANDLE_FORK +if (GC_handle_fork){ +#ifdef CAN_CALL_ATFORK +if (pthread_atfork(fork_prepare_proc,fork_parent_proc, +fork_child_proc)==0){ +GC_handle_fork=1; +} else +#endif +if (GC_handle_fork!=-1) +ABORT("pthread_atfork failed"); +} +#endif +#ifdef WOW64_THREAD_CONTEXT_WORKAROUND +if (hK32){ +FARPROC pfn=GetProcAddress(hK32,"IsWow64Process"); +if (pfn +&&!(*(BOOL (WINAPI*)(HANDLE,BOOL*))(word)pfn)( +GetCurrentProcess(),&isWow64)) +isWow64=FALSE; +} +#endif +sb.mem_base=GC_stackbottom; +GC_ASSERT(sb.mem_base!=NULL); +#ifdef IA64 +sb.reg_base=GC_register_stackbottom; +#endif +#if defined(PARALLEL_MARK) +{ +char*markers_string=GETENV("GC_MARKERS"); +int markers=required_markers_cnt; +if (markers_string!=NULL){ +markers=atoi(markers_string); +if (markers<=0||markers > MAX_MARKERS){ +WARN("Too big or invalid number of mark threads:%" WARN_PRIdPTR +";using maximum threads\n",(signed_word)markers); +markers=MAX_MARKERS; +} +} else if (0==markers){ +#ifdef MSWINCE +markers=(int)GC_sysinfo.dwNumberOfProcessors; +#else +#ifdef _WIN64 +DWORD_PTR procMask=0; +DWORD_PTR sysMask; +#else +DWORD procMask=0; +DWORD sysMask; +#endif +int ncpu=0; +if ( +#ifdef __cplusplus +GetProcessAffinityMask(GetCurrentProcess(),&procMask,&sysMask) +#else +GetProcessAffinityMask(GetCurrentProcess(), +(void*)&procMask,(void*)&sysMask) +#endif +&&procMask){ +do { +ncpu++; +} while ((procMask&=procMask - 1)!=0); +} +markers=ncpu; +#endif +#if defined(GC_MIN_MARKERS)&&!defined(CPPCHECK) +if (markers < GC_MIN_MARKERS) +markers=GC_MIN_MARKERS; +#endif +if (markers > MAX_MARKERS) +markers=MAX_MARKERS; +} +available_markers_m1=markers - 1; +} +if (GC_win32_dll_threads||available_markers_m1<=0){ +GC_parallel=FALSE; +GC_COND_LOG_PRINTF( +"Single marker thread,turning off parallel marking\n"); +} else { +#ifndef GC_PTHREADS_PARAMARK +mark_mutex_event=CreateEvent(NULL, +FALSE, +FALSE,NULL); +builder_cv=CreateEvent(NULL, +TRUE, +FALSE,NULL); +mark_cv=CreateEvent(NULL,TRUE, +FALSE,NULL); +if (mark_mutex_event==(HANDLE)0||builder_cv==(HANDLE)0 +||mark_cv==(HANDLE)0) +ABORT("CreateEvent failed"); +#endif +#if!defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)&&!defined(MSWINCE) +if (hK32) +setThreadDescription_fn=GetProcAddress(hK32, +"SetThreadDescription"); +#endif +} +#endif +GC_ASSERT(0==GC_lookup_thread_inner(GC_main_thread)); +GC_register_my_thread_inner(&sb,GC_main_thread); +#undef GC_main_thread +} +#ifdef GC_PTHREADS +struct start_info { +void*(*start_routine)(void*); +void*arg; +GC_bool detached; +}; +GC_API int GC_pthread_join(pthread_t pthread_id,void**retval) +{ +int result; +#ifndef GC_WIN32_PTHREADS +GC_thread t; +#endif +DCL_LOCK_STATE; +GC_ASSERT(!GC_win32_dll_threads); +#ifdef DEBUG_THREADS +GC_log_printf("thread %p(0x%lx)is joining thread %p\n", +(void*)GC_PTHREAD_PTRVAL(pthread_self()), +(long)GetCurrentThreadId(), +(void*)GC_PTHREAD_PTRVAL(pthread_id)); +#endif +#ifndef GC_WIN32_PTHREADS +while ((t=GC_lookup_pthread(pthread_id))==0) +Sleep(10); +#endif +result=pthread_join(pthread_id,retval); +if (0==result){ +#ifdef GC_WIN32_PTHREADS +GC_thread t=GC_lookup_pthread(pthread_id); +if (NULL==t)ABORT("Thread not registered"); +#endif +LOCK(); +if ((t->flags&FINISHED)!=0){ +GC_delete_gc_thread_no_free(t); +GC_INTERNAL_FREE(t); +} +UNLOCK(); +} +#ifdef DEBUG_THREADS +GC_log_printf("thread %p(0x%lx)join with thread %p %s\n", +(void*)GC_PTHREAD_PTRVAL(pthread_self()), +(long)GetCurrentThreadId(), +(void*)GC_PTHREAD_PTRVAL(pthread_id), +result!=0?"failed":"succeeded"); +#endif +return result; +} +GC_API int GC_pthread_create(pthread_t*new_thread, +GC_PTHREAD_CREATE_CONST pthread_attr_t*attr, +void*(*start_routine)(void*),void*arg) +{ +int result; +struct start_info*si; +if (!EXPECT(parallel_initialized,TRUE)) +GC_init_parallel(); +GC_ASSERT(!GC_win32_dll_threads); +si=(struct start_info*)GC_malloc_uncollectable( +sizeof(struct start_info)); +if (NULL==si) +return EAGAIN; +si->start_routine=start_routine; +si->arg=arg; +GC_dirty(si); +REACHABLE_AFTER_DIRTY(arg); +if (attr!=0&& +pthread_attr_getdetachstate(attr,&si->detached) +==PTHREAD_CREATE_DETACHED){ +si->detached=TRUE; +} +#ifdef DEBUG_THREADS +GC_log_printf("About to create a thread from %p(0x%lx)\n", +(void*)GC_PTHREAD_PTRVAL(pthread_self()), +(long)GetCurrentThreadId()); +#endif +set_need_to_lock(); +result=pthread_create(new_thread,attr,GC_pthread_start,si); +if (result){ +GC_free(si); +} +return(result); +} +STATIC void*GC_CALLBACK GC_pthread_start_inner(struct GC_stack_base*sb, +void*arg) +{ +struct start_info*si=(struct start_info*)arg; +void*result; +void*(*start)(void*); +void*start_arg; +DWORD thread_id=GetCurrentThreadId(); +pthread_t pthread_id=pthread_self(); +GC_thread me; +DCL_LOCK_STATE; +#ifdef DEBUG_THREADS +GC_log_printf("thread %p(0x%x)starting...\n", +(void*)GC_PTHREAD_PTRVAL(pthread_id),(int)thread_id); +#endif +GC_ASSERT(!GC_win32_dll_threads); +LOCK(); +me=GC_register_my_thread_inner(sb,thread_id); +SET_PTHREAD_MAP_CACHE(pthread_id,thread_id); +GC_ASSERT(me!=&first_thread); +me->pthread_id=pthread_id; +if (si->detached)me->flags|=DETACHED; +UNLOCK(); +start=si->start_routine; +start_arg=si->arg; +GC_free(si); +pthread_cleanup_push(GC_thread_exit_proc,(void*)me); +result=(*start)(start_arg); +me->status=result; +GC_dirty(me); +pthread_cleanup_pop(1); +#ifdef DEBUG_THREADS +GC_log_printf("thread %p(0x%x)returned from start routine\n", +(void*)GC_PTHREAD_PTRVAL(pthread_id),(int)thread_id); +#endif +return(result); +} +STATIC void*GC_pthread_start(void*arg) +{ +return GC_call_with_stack_base(GC_pthread_start_inner,arg); +} +STATIC void GC_thread_exit_proc(void*arg) +{ +GC_thread me=(GC_thread)arg; +DCL_LOCK_STATE; +GC_ASSERT(!GC_win32_dll_threads); +#ifdef DEBUG_THREADS +GC_log_printf("thread %p(0x%lx)called pthread_exit()\n", +(void*)GC_PTHREAD_PTRVAL(pthread_self()), +(long)GetCurrentThreadId()); +#endif +LOCK(); +GC_wait_for_gc_completion(FALSE); +#if defined(THREAD_LOCAL_ALLOC) +GC_ASSERT(GC_getspecific(GC_thread_key)==&me->tlfs); +GC_destroy_thread_local(&(me->tlfs)); +#endif +if (me->flags&DETACHED){ +GC_delete_thread(GetCurrentThreadId()); +} else { +me->flags|=FINISHED; +} +#if defined(THREAD_LOCAL_ALLOC) +GC_remove_specific(GC_thread_key); +#endif +UNLOCK(); +} +#ifndef GC_NO_PTHREAD_SIGMASK +GC_API int GC_pthread_sigmask(int how,const sigset_t*set, +sigset_t*oset) +{ +return pthread_sigmask(how,set,oset); +} +#endif +GC_API int GC_pthread_detach(pthread_t thread) +{ +int result; +GC_thread t; +DCL_LOCK_STATE; +GC_ASSERT(!GC_win32_dll_threads); +while ((t=GC_lookup_pthread(thread))==NULL) +Sleep(10); +result=pthread_detach(thread); +if (result==0){ +LOCK(); +t->flags|=DETACHED; +if ((t->flags&FINISHED)!=0){ +GC_delete_gc_thread_no_free(t); +GC_INTERNAL_FREE(t); +} +UNLOCK(); +} +return result; +} +#elif!defined(GC_NO_THREADS_DISCOVERY) +#ifdef GC_INSIDE_DLL +GC_API +#else +#define GC_DllMain DllMain +#endif +BOOL WINAPI GC_DllMain(HINSTANCE inst GC_ATTR_UNUSED,ULONG reason, +LPVOID reserved GC_ATTR_UNUSED) +{ +DWORD thread_id; +if (!GC_win32_dll_threads&¶llel_initialized)return TRUE; +switch (reason){ +case DLL_THREAD_ATTACH: +#ifdef PARALLEL_MARK +if (GC_parallel){ +break; +} +#endif +case DLL_PROCESS_ATTACH: +thread_id=GetCurrentThreadId(); +if (parallel_initialized&&GC_main_thread!=thread_id){ +#ifdef PARALLEL_MARK +ABORT("Cannot initialize parallel marker from DllMain"); +#else +struct GC_stack_base sb; +#ifdef GC_ASSERTIONS +int sb_result= +#endif +GC_get_stack_base(&sb); +GC_ASSERT(sb_result==GC_SUCCESS); +GC_register_my_thread_inner(&sb,thread_id); +#endif +} +break; +case DLL_THREAD_DETACH: +GC_ASSERT(parallel_initialized); +if (GC_win32_dll_threads){ +GC_delete_thread(GetCurrentThreadId()); +} +break; +case DLL_PROCESS_DETACH: +if (GC_win32_dll_threads){ +int i; +int my_max=(int)GC_get_max_thread_index(); +for (i=0;i<=my_max;++i){ +if (AO_load(&(dll_thread_table[i].tm.in_use))) +GC_delete_gc_thread_no_free(&dll_thread_table[i]); +} +GC_deinit(); +} +break; +} +return TRUE; +} +#endif +GC_INNER void GC_init_parallel(void) +{ +#if defined(THREAD_LOCAL_ALLOC) +GC_thread me; +DCL_LOCK_STATE; +#endif +if (parallel_initialized)return; +parallel_initialized=TRUE; +if (!GC_is_initialized)GC_init(); +#if defined(CPPCHECK)&&!defined(GC_NO_THREADS_DISCOVERY) +GC_noop1((word)&GC_DllMain); +#endif +if (GC_win32_dll_threads){ +set_need_to_lock(); +} +#if defined(THREAD_LOCAL_ALLOC) +LOCK(); +me=GC_lookup_thread_inner(GetCurrentThreadId()); +CHECK_LOOKUP_MY_THREAD(me); +GC_init_thread_local(&me->tlfs); +UNLOCK(); +#endif +} +#if defined(USE_PTHREAD_LOCKS) +GC_INNER void GC_lock(void) +{ +pthread_mutex_lock(&GC_allocate_ml); +} +#endif +#if defined(THREAD_LOCAL_ALLOC) +GC_INNER void GC_mark_thread_local_free_lists(void) +{ +int i; +GC_thread p; +for (i=0;i < THREAD_TABLE_SZ;++i){ +for (p=GC_threads[i];0!=p;p=p->tm.next){ +if (!KNOWN_FINISHED(p)){ +#ifdef DEBUG_THREADS +GC_log_printf("Marking thread locals for 0x%x\n",(int)p->id); +#endif +GC_mark_thread_local_fls_for(&(p->tlfs)); +} +} +} +} +#if defined(GC_ASSERTIONS) +void GC_check_tls(void) +{ +int i; +GC_thread p; +for (i=0;i < THREAD_TABLE_SZ;++i){ +for (p=GC_threads[i];0!=p;p=p->tm.next){ +if (!KNOWN_FINISHED(p)) +GC_check_tls_for(&(p->tlfs)); +} +} +#if defined(USE_CUSTOM_SPECIFIC) +if (GC_thread_key!=0) +GC_check_tsd_marks(GC_thread_key); +#endif +} +#endif +#endif +#ifndef GC_NO_THREAD_REDIRECTS +#define CreateThread GC_CreateThread +#define ExitThread GC_ExitThread +#undef _beginthreadex +#define _beginthreadex GC_beginthreadex +#undef _endthreadex +#define _endthreadex GC_endthreadex +#endif +#endif +#ifndef GC_PTHREAD_START_STANDALONE +#if defined(__GNUC__)&&defined(__linux__) +#undef __EXCEPTIONS +#endif +#if defined(GC_PTHREADS)&&!defined(GC_WIN32_THREADS) +#include +#include +GC_INNER_PTHRSTART void*GC_CALLBACK GC_inner_start_routine( +struct GC_stack_base*sb,void*arg) +{ +void*(*start)(void*); +void*start_arg; +void*result; +volatile GC_thread me= +GC_start_rtn_prepare_thread(&start,&start_arg,sb,arg); +#ifndef NACL +pthread_cleanup_push(GC_thread_exit_proc,me); +#endif +result=(*start)(start_arg); +#if defined(DEBUG_THREADS)&&!defined(GC_PTHREAD_START_STANDALONE) +GC_log_printf("Finishing thread %p\n",(void*)pthread_self()); +#endif +me->status=result; +GC_end_stubborn_change(me); +#ifndef NACL +pthread_cleanup_pop(1); +#endif +return result; +} +#endif +#endif +#ifndef GC_NO_THREAD_REDIRECTS +#define GC_PTHREAD_REDIRECTS_ONLY +#ifndef GC_PTHREAD_REDIRECTS_H +#define GC_PTHREAD_REDIRECTS_H +#if defined(GC_H)&&defined(GC_PTHREADS) +#ifndef GC_PTHREAD_REDIRECTS_ONLY +#include +#ifndef GC_NO_DLOPEN +#include +#endif +#ifndef GC_NO_PTHREAD_SIGMASK +#include +#endif +#ifdef __cplusplus +extern "C" { +#endif +#ifndef GC_SUSPEND_THREAD_ID +#define GC_SUSPEND_THREAD_ID pthread_t +#endif +#ifndef GC_NO_DLOPEN +GC_API void*GC_dlopen(const char*,int); +#endif +#ifndef GC_NO_PTHREAD_SIGMASK +#if defined(GC_PTHREAD_SIGMASK_NEEDED)||defined(_BSD_SOURCE)||defined(_GNU_SOURCE)||(_POSIX_C_SOURCE>=199506L)||(_XOPEN_SOURCE>=500) +GC_API int GC_pthread_sigmask(int,const sigset_t*, +sigset_t*); +#endif +#endif +#ifndef GC_PTHREAD_CREATE_CONST +#define GC_PTHREAD_CREATE_CONST const +#endif +GC_API int GC_pthread_create(pthread_t*, +GC_PTHREAD_CREATE_CONST pthread_attr_t*, +void*(*)(void*),void*); +GC_API int GC_pthread_join(pthread_t,void**); +GC_API int GC_pthread_detach(pthread_t); +#ifndef GC_NO_PTHREAD_CANCEL +GC_API int GC_pthread_cancel(pthread_t); +#endif +#if defined(GC_HAVE_PTHREAD_EXIT)&&!defined(GC_PTHREAD_EXIT_DECLARED) +#define GC_PTHREAD_EXIT_DECLARED +GC_API void GC_pthread_exit(void*)GC_PTHREAD_EXIT_ATTRIBUTE; +#endif +#ifdef __cplusplus +} +#endif +#endif +#if!defined(GC_NO_THREAD_REDIRECTS)&&!defined(GC_USE_LD_WRAP) +#undef pthread_create +#undef pthread_join +#undef pthread_detach +#define pthread_create GC_pthread_create +#define pthread_join GC_pthread_join +#define pthread_detach GC_pthread_detach +#ifndef GC_NO_PTHREAD_SIGMASK +#undef pthread_sigmask +#define pthread_sigmask GC_pthread_sigmask +#endif +#ifndef GC_NO_DLOPEN +#undef dlopen +#define dlopen GC_dlopen +#endif +#ifndef GC_NO_PTHREAD_CANCEL +#undef pthread_cancel +#define pthread_cancel GC_pthread_cancel +#endif +#ifdef GC_HAVE_PTHREAD_EXIT +#undef pthread_exit +#define pthread_exit GC_pthread_exit +#endif +#endif +#endif +#endif +#endif diff --git a/v_windows/v/old/thirdparty/libgc/gc.h b/v_windows/v/old/thirdparty/libgc/gc.h new file mode 100644 index 0000000..d0646f5 --- /dev/null +++ b/v_windows/v/old/thirdparty/libgc/gc.h @@ -0,0 +1,1081 @@ +/* + * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers + * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. + * Copyright 1996-1999 by Silicon Graphics. All rights reserved. + * Copyright 1999 by Hewlett-Packard Company. All rights reserved. + * Copyright (C) 2007 Free Software Foundation, Inc + * Copyright (c) 2000-2011 by Hewlett-Packard Development Company. + * Copyright (c) 2009-2020 Ivan Maidanski + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + */ + +#ifndef GC_H +#define GC_H +#if (defined(WIN64)&&!defined(_WIN64))&&defined(_MSC_VER) +#pragma message("Warning:Expecting _WIN64 for x64 targets!Notice the leading underscore!") +#endif +#if defined(GC_H) +#define GC_TMP_VERSION_MAJOR 8 +#define GC_TMP_VERSION_MINOR 1 +#define GC_TMP_VERSION_MICRO 0 +#ifdef GC_VERSION_MAJOR +#if GC_TMP_VERSION_MAJOR!=GC_VERSION_MAJOR||GC_TMP_VERSION_MINOR!=GC_VERSION_MINOR||GC_TMP_VERSION_MICRO!=GC_VERSION_MICRO +#error Inconsistent version info. Check README.md,include/gc_version.h and configure.ac. +#endif +#else +#define GC_VERSION_MAJOR GC_TMP_VERSION_MAJOR +#define GC_VERSION_MINOR GC_TMP_VERSION_MINOR +#define GC_VERSION_MICRO GC_TMP_VERSION_MICRO +#endif +#endif +#if defined(GC_H) +#if defined(__GNUC__)&&defined(__GNUC_MINOR__) +#define GC_GNUC_PREREQ(major,minor)((__GNUC__<<16)+__GNUC_MINOR__>=((major)<<16)+(minor)) +#else +#define GC_GNUC_PREREQ(major,minor)0 +#endif +#if defined(SOLARIS_THREADS)||defined(_SOLARIS_THREADS)||defined(_SOLARIS_PTHREADS)||defined(GC_SOLARIS_PTHREADS) +#ifndef GC_SOLARIS_THREADS +#define GC_SOLARIS_THREADS +#endif +#endif +#if defined(IRIX_THREADS) +#define GC_IRIX_THREADS +#endif +#if defined(DGUX_THREADS)&&!defined(GC_DGUX386_THREADS) +#define GC_DGUX386_THREADS +#endif +#if defined(AIX_THREADS) +#define GC_AIX_THREADS +#endif +#if defined(HPUX_THREADS) +#define GC_HPUX_THREADS +#endif +#if defined(OSF1_THREADS) +#define GC_OSF1_THREADS +#endif +#if defined(LINUX_THREADS) +#define GC_LINUX_THREADS +#endif +#if defined(WIN32_THREADS) +#define GC_WIN32_THREADS +#endif +#if defined(RTEMS_THREADS) +#define GC_RTEMS_PTHREADS +#endif +#if defined(USE_LD_WRAP) +#define GC_USE_LD_WRAP +#endif +#if defined(GC_WIN32_PTHREADS)&&!defined(GC_WIN32_THREADS) +#define GC_WIN32_THREADS +#endif +#if defined(GC_AIX_THREADS)||defined(GC_DARWIN_THREADS)||defined(GC_DGUX386_THREADS)||defined(GC_FREEBSD_THREADS)||defined(GC_HPUX_THREADS)||defined(GC_IRIX_THREADS)||defined(GC_LINUX_THREADS)||defined(GC_NETBSD_THREADS)||defined(GC_OPENBSD_THREADS)||defined(GC_OSF1_THREADS)||defined(GC_SOLARIS_THREADS)||defined(GC_WIN32_THREADS)||defined(GC_RTEMS_PTHREADS) +#ifndef GC_THREADS +#define GC_THREADS +#endif +#elif defined(GC_THREADS) +#if defined(__linux__) +#define GC_LINUX_THREADS +#elif defined(__OpenBSD__) +#define GC_OPENBSD_THREADS +#elif defined(_PA_RISC1_1)||defined(_PA_RISC2_0)||defined(hppa)||defined(__HPPA)||(defined(__ia64)&&defined(_HPUX_SOURCE)) +#define GC_HPUX_THREADS +#elif defined(__HAIKU__) +#define GC_HAIKU_THREADS +#elif defined(__DragonFly__)||defined(__FreeBSD_kernel__)||(defined(__FreeBSD__)&&!defined(SN_TARGET_ORBIS)) +#define GC_FREEBSD_THREADS +#elif defined(__NetBSD__) +#define GC_NETBSD_THREADS +#elif defined(__alpha)||defined(__alpha__) +#define GC_OSF1_THREADS +#elif (defined(mips)||defined(__mips)||defined(_mips))&&!(defined(nec_ews)||defined(_nec_ews)||defined(ultrix)||defined(__ultrix)) +#define GC_IRIX_THREADS +#elif defined(__sparc)||((defined(sun)||defined(__sun))&&(defined(i386)||defined(__i386__)||defined(__amd64)||defined(__amd64__))) +#define GC_SOLARIS_THREADS +#elif defined(__APPLE__)&&defined(__MACH__) +#define GC_DARWIN_THREADS +#endif +#if defined(DGUX)&&(defined(i386)||defined(__i386__)) +#define GC_DGUX386_THREADS +#endif +#if defined(_AIX) +#define GC_AIX_THREADS +#endif +#if (defined(_WIN32)||defined(_MSC_VER)||defined(__BORLANDC__)||defined(__CYGWIN32__)||defined(__CYGWIN__)||defined(__CEGCC__)||defined(_WIN32_WCE)||defined(__MINGW32__))&&!defined(GC_WIN32_THREADS) +#define GC_WIN32_THREADS +#endif +#if defined(__rtems__)&&(defined(i386)||defined(__i386__)) +#define GC_RTEMS_PTHREADS +#endif +#endif +#undef GC_PTHREADS +#if (!defined(GC_WIN32_THREADS)||defined(GC_WIN32_PTHREADS)||defined(__CYGWIN32__)||defined(__CYGWIN__))&&defined(GC_THREADS)&&!defined(NN_PLATFORM_CTR)&&!defined(NN_BUILD_TARGET_PLATFORM_NX) +#define GC_PTHREADS +#endif +#if!defined(_PTHREADS)&&defined(GC_NETBSD_THREADS) +#define _PTHREADS +#endif +#if defined(GC_DGUX386_THREADS)&&!defined(_POSIX4A_DRAFT10_SOURCE) +#define _POSIX4A_DRAFT10_SOURCE 1 +#endif +#if!defined(_REENTRANT)&&defined(GC_PTHREADS)&&!defined(GC_WIN32_THREADS) +#define _REENTRANT 1 +#endif +#define __GC +#if!defined(_WIN32_WCE)||defined(__GNUC__) +#include +#if defined(__MINGW32__)&&!defined(_WIN32_WCE) +#include +#endif +#else +#include +#ifndef _PTRDIFF_T_DEFINED +#define _PTRDIFF_T_DEFINED +typedef long ptrdiff_t; +#endif +#endif +#if!defined(GC_NOT_DLL)&&!defined(GC_DLL)&&((defined(_DLL)&&!defined(__GNUC__))||(defined(DLL_EXPORT)&&defined(GC_BUILD))) +#define GC_DLL +#endif +#if defined(GC_DLL)&&!defined(GC_API) +#if defined(__CEGCC__) +#if defined(GC_BUILD) +#define GC_API __declspec(dllexport) +#else +#define GC_API __declspec(dllimport) +#endif +#elif defined(__MINGW32__) +#if defined(__cplusplus)&&defined(GC_BUILD) +#define GC_API extern __declspec(dllexport) +#elif defined(GC_BUILD)||defined(__MINGW32_DELAY_LOAD__) +#define GC_API __declspec(dllexport) +#else +#define GC_API extern __declspec(dllimport) +#endif +#elif defined(_MSC_VER)||defined(__DMC__)||defined(__BORLANDC__)||defined(__CYGWIN__) +#ifdef GC_BUILD +#define GC_API extern __declspec(dllexport) +#else +#define GC_API __declspec(dllimport) +#endif +#elif defined(__WATCOMC__) +#ifdef GC_BUILD +#define GC_API extern __declspec(dllexport) +#else +#define GC_API extern __declspec(dllimport) +#endif +#elif defined(__SYMBIAN32__) +#ifdef GC_BUILD +#define GC_API extern EXPORT_C +#else +#define GC_API extern IMPORT_C +#endif +#elif defined(__GNUC__) +#if defined(GC_BUILD)&&!defined(GC_NO_VISIBILITY)&&(GC_GNUC_PREREQ(4,0)||defined(GC_VISIBILITY_HIDDEN_SET)) +#define GC_API extern __attribute__((__visibility__("default"))) +#endif +#endif +#endif +#ifndef GC_API +#define GC_API extern +#endif +#ifndef GC_CALL +#define GC_CALL +#endif +#ifndef GC_CALLBACK +#define GC_CALLBACK GC_CALL +#endif +#ifndef GC_ATTR_MALLOC +#ifdef GC_OOM_FUNC_RETURNS_ALIAS +#define GC_ATTR_MALLOC +#elif GC_GNUC_PREREQ(3,1) +#define GC_ATTR_MALLOC __attribute__((__malloc__)) +#elif defined(_MSC_VER)&&(_MSC_VER>=1900)&&!defined(__EDG__) +#define GC_ATTR_MALLOC __declspec(allocator)__declspec(noalias)__declspec(restrict) +#elif defined(_MSC_VER)&&_MSC_VER>=1400 +#define GC_ATTR_MALLOC __declspec(noalias)__declspec(restrict) +#else +#define GC_ATTR_MALLOC +#endif +#endif +#ifndef GC_ATTR_ALLOC_SIZE +#undef GC_ATTR_CALLOC_SIZE +#ifdef __clang__ +#if __has_attribute(__alloc_size__) +#define GC_ATTR_ALLOC_SIZE(argnum)__attribute__((__alloc_size__(argnum))) +#define GC_ATTR_CALLOC_SIZE(n,s)__attribute__((__alloc_size__(n,s))) +#else +#define GC_ATTR_ALLOC_SIZE(argnum) +#endif +#elif GC_GNUC_PREREQ(4,3)&&!defined(__ICC) +#define GC_ATTR_ALLOC_SIZE(argnum)__attribute__((__alloc_size__(argnum))) +#define GC_ATTR_CALLOC_SIZE(n,s)__attribute__((__alloc_size__(n,s))) +#else +#define GC_ATTR_ALLOC_SIZE(argnum) +#endif +#endif +#ifndef GC_ATTR_CALLOC_SIZE +#define GC_ATTR_CALLOC_SIZE(n,s) +#endif +#ifndef GC_ATTR_NONNULL +#if GC_GNUC_PREREQ(4,0) +#define GC_ATTR_NONNULL(argnum)__attribute__((__nonnull__(argnum))) +#else +#define GC_ATTR_NONNULL(argnum) +#endif +#endif +#ifndef GC_ATTR_CONST +#if GC_GNUC_PREREQ(4,0) +#define GC_ATTR_CONST __attribute__((__const__)) +#else +#define GC_ATTR_CONST +#endif +#endif +#ifndef GC_ATTR_DEPRECATED +#ifdef GC_BUILD +#undef GC_ATTR_DEPRECATED +#define GC_ATTR_DEPRECATED +#elif GC_GNUC_PREREQ(4,0) +#define GC_ATTR_DEPRECATED __attribute__((__deprecated__)) +#elif defined(_MSC_VER)&&_MSC_VER>=1200 +#define GC_ATTR_DEPRECATED __declspec(deprecated) +#else +#define GC_ATTR_DEPRECATED +#endif +#endif +#if defined(__sgi)&&!defined(__GNUC__)&&_COMPILER_VERSION>=720 +#define GC_ADD_CALLER +#define GC_RETURN_ADDR (GC_word)__return_address +#endif +#if defined(__linux__)||defined(__GLIBC__) +#if!defined(__native_client__) +#include +#endif +#if (__GLIBC__==2&&__GLIBC_MINOR__>=1||__GLIBC__ > 2)&&!defined(__ia64__)&&!defined(GC_MISSING_EXECINFO_H)&&!defined(GC_HAVE_BUILTIN_BACKTRACE) +#define GC_HAVE_BUILTIN_BACKTRACE +#endif +#if defined(__i386__)||defined(__amd64__)||defined(__x86_64__) +#define GC_CAN_SAVE_CALL_STACKS +#endif +#endif +#if defined(_MSC_VER)&&_MSC_VER>=1200&&!defined(_AMD64_)&&!defined(_M_X64)&&!defined(_WIN32_WCE)&&!defined(GC_HAVE_NO_BUILTIN_BACKTRACE)&&!defined(GC_HAVE_BUILTIN_BACKTRACE) +#define GC_HAVE_BUILTIN_BACKTRACE +#endif +#if defined(GC_HAVE_BUILTIN_BACKTRACE)&&!defined(GC_CAN_SAVE_CALL_STACKS) +#define GC_CAN_SAVE_CALL_STACKS +#endif +#if defined(__sparc__) +#define GC_CAN_SAVE_CALL_STACKS +#endif +#if (defined(__linux__)||defined(__DragonFly__)||defined(__FreeBSD__)||defined(__FreeBSD_kernel__)||defined(__HAIKU__)||defined(__NetBSD__)||defined(__OpenBSD__)||defined(HOST_ANDROID)||defined(__ANDROID__))&&!defined(GC_CAN_SAVE_CALL_STACKS) +#define GC_ADD_CALLER +#if GC_GNUC_PREREQ(2,95) +#define GC_RETURN_ADDR (GC_word)__builtin_return_address(0) +#if GC_GNUC_PREREQ(4,0)&&(defined(__i386__)||defined(__amd64__)||defined(__x86_64__)) +#define GC_HAVE_RETURN_ADDR_PARENT +#define GC_RETURN_ADDR_PARENT (GC_word)__builtin_extract_return_addr(__builtin_return_address(1)) +#endif +#else +#define GC_RETURN_ADDR 0 +#endif +#endif +#ifdef GC_PTHREADS +#if (defined(GC_DARWIN_THREADS)||defined(GC_WIN32_PTHREADS)||defined(__native_client__)||defined(GC_RTEMS_PTHREADS))&&!defined(GC_NO_DLOPEN) +#define GC_NO_DLOPEN +#endif +#if (defined(GC_DARWIN_THREADS)||defined(GC_WIN32_PTHREADS)||defined(GC_OPENBSD_THREADS)||defined(__native_client__))&&!defined(GC_NO_PTHREAD_SIGMASK) +#define GC_NO_PTHREAD_SIGMASK +#endif +#if defined(__native_client__) +#ifndef GC_PTHREAD_CREATE_CONST +#define GC_PTHREAD_CREATE_CONST +#endif +#ifndef GC_HAVE_PTHREAD_EXIT +#define GC_HAVE_PTHREAD_EXIT +#define GC_PTHREAD_EXIT_ATTRIBUTE +#endif +#endif +#if!defined(GC_HAVE_PTHREAD_EXIT)&&!defined(HOST_ANDROID)&&!defined(__ANDROID__)&&(defined(GC_LINUX_THREADS)||defined(GC_SOLARIS_THREADS)) +#define GC_HAVE_PTHREAD_EXIT +#if GC_GNUC_PREREQ(2,7) +#define GC_PTHREAD_EXIT_ATTRIBUTE __attribute__((__noreturn__)) +#elif defined(__NORETURN) +#define GC_PTHREAD_EXIT_ATTRIBUTE __NORETURN +#else +#define GC_PTHREAD_EXIT_ATTRIBUTE +#endif +#endif +#if (!defined(GC_HAVE_PTHREAD_EXIT)||defined(__native_client__))&&!defined(GC_NO_PTHREAD_CANCEL) +#define GC_NO_PTHREAD_CANCEL +#endif +#endif +#ifdef __cplusplus +#ifndef GC_ATTR_EXPLICIT +#if __cplusplus>=201103L&&!defined(__clang__)||_MSVC_LANG>=201103L||defined(CPPCHECK) +#define GC_ATTR_EXPLICIT explicit +#else +#define GC_ATTR_EXPLICIT +#endif +#endif +#ifndef GC_NOEXCEPT +#if defined(__DMC__)||(defined(__BORLANDC__)&&(defined(_RWSTD_NO_EXCEPTIONS)||defined(_RWSTD_NO_EX_SPEC)))||(defined(_MSC_VER)&&defined(_HAS_EXCEPTIONS)&&!_HAS_EXCEPTIONS)||(defined(__WATCOMC__)&&!defined(_CPPUNWIND)) +#define GC_NOEXCEPT +#ifndef GC_NEW_ABORTS_ON_OOM +#define GC_NEW_ABORTS_ON_OOM +#endif +#elif __cplusplus>=201103L||_MSVC_LANG>=201103L +#define GC_NOEXCEPT noexcept +#else +#define GC_NOEXCEPT throw() +#endif +#endif +#endif +#endif +#ifdef __cplusplus +extern "C" { +#endif +typedef void*GC_PTR; +#ifdef _WIN64 +#if defined(__int64)&&!defined(CPPCHECK) +typedef unsigned __int64 GC_word; +typedef __int64 GC_signed_word; +#else +typedef unsigned long long GC_word; +typedef long long GC_signed_word; +#endif +#else +typedef unsigned long GC_word; +typedef long GC_signed_word; +#endif +GC_API unsigned GC_CALL GC_get_version(void); +GC_API GC_ATTR_DEPRECATED GC_word GC_gc_no; +GC_API GC_word GC_CALL GC_get_gc_no(void); +#ifdef GC_THREADS +GC_API GC_ATTR_DEPRECATED int GC_parallel; +GC_API int GC_CALL GC_get_parallel(void); +GC_API void GC_CALL GC_set_markers_count(unsigned); +#endif +typedef void*(GC_CALLBACK*GC_oom_func)(size_t); +GC_API GC_ATTR_DEPRECATED GC_oom_func GC_oom_fn; +GC_API void GC_CALL GC_set_oom_fn(GC_oom_func)GC_ATTR_NONNULL(1); +GC_API GC_oom_func GC_CALL GC_get_oom_fn(void); +typedef void (GC_CALLBACK*GC_on_heap_resize_proc)(GC_word); +GC_API GC_ATTR_DEPRECATED GC_on_heap_resize_proc GC_on_heap_resize; +GC_API void GC_CALL GC_set_on_heap_resize(GC_on_heap_resize_proc); +GC_API GC_on_heap_resize_proc GC_CALL GC_get_on_heap_resize(void); +typedef enum { +GC_EVENT_START, +GC_EVENT_MARK_START, +GC_EVENT_MARK_END, +GC_EVENT_RECLAIM_START, +GC_EVENT_RECLAIM_END, +GC_EVENT_END, +GC_EVENT_PRE_STOP_WORLD, +GC_EVENT_POST_STOP_WORLD, +GC_EVENT_PRE_START_WORLD, +GC_EVENT_POST_START_WORLD, +GC_EVENT_THREAD_SUSPENDED, +GC_EVENT_THREAD_UNSUSPENDED +} GC_EventType; +typedef void (GC_CALLBACK*GC_on_collection_event_proc)(GC_EventType); +GC_API void GC_CALL GC_set_on_collection_event(GC_on_collection_event_proc); +GC_API GC_on_collection_event_proc GC_CALL GC_get_on_collection_event(void); +#if defined(GC_THREADS)||(defined(GC_BUILD)&&defined(NN_PLATFORM_CTR)) +typedef void (GC_CALLBACK*GC_on_thread_event_proc)(GC_EventType, +void*); +GC_API void GC_CALL GC_set_on_thread_event(GC_on_thread_event_proc); +GC_API GC_on_thread_event_proc GC_CALL GC_get_on_thread_event(void); +#endif +GC_API GC_ATTR_DEPRECATED int GC_find_leak; +GC_API void GC_CALL GC_set_find_leak(int); +GC_API int GC_CALL GC_get_find_leak(void); +GC_API GC_ATTR_DEPRECATED int GC_all_interior_pointers; +GC_API void GC_CALL GC_set_all_interior_pointers(int); +GC_API int GC_CALL GC_get_all_interior_pointers(void); +GC_API GC_ATTR_DEPRECATED int GC_finalize_on_demand; +GC_API void GC_CALL GC_set_finalize_on_demand(int); +GC_API int GC_CALL GC_get_finalize_on_demand(void); +GC_API GC_ATTR_DEPRECATED int GC_java_finalization; +GC_API void GC_CALL GC_set_java_finalization(int); +GC_API int GC_CALL GC_get_java_finalization(void); +typedef void (GC_CALLBACK*GC_finalizer_notifier_proc)(void); +GC_API GC_ATTR_DEPRECATED GC_finalizer_notifier_proc GC_finalizer_notifier; +GC_API void GC_CALL GC_set_finalizer_notifier(GC_finalizer_notifier_proc); +GC_API GC_finalizer_notifier_proc GC_CALL GC_get_finalizer_notifier(void); +GC_API +#ifndef GC_DONT_GC +GC_ATTR_DEPRECATED +#endif +int GC_dont_gc; +GC_API GC_ATTR_DEPRECATED int GC_dont_expand; +GC_API void GC_CALL GC_set_dont_expand(int); +GC_API int GC_CALL GC_get_dont_expand(void); +GC_API GC_ATTR_DEPRECATED int GC_use_entire_heap; +GC_API GC_ATTR_DEPRECATED int GC_full_freq; +GC_API void GC_CALL GC_set_full_freq(int); +GC_API int GC_CALL GC_get_full_freq(void); +GC_API GC_ATTR_DEPRECATED GC_word GC_non_gc_bytes; +GC_API void GC_CALL GC_set_non_gc_bytes(GC_word); +GC_API GC_word GC_CALL GC_get_non_gc_bytes(void); +GC_API GC_ATTR_DEPRECATED int GC_no_dls; +GC_API void GC_CALL GC_set_no_dls(int); +GC_API int GC_CALL GC_get_no_dls(void); +GC_API GC_ATTR_DEPRECATED GC_word GC_free_space_divisor; +GC_API void GC_CALL GC_set_free_space_divisor(GC_word); +GC_API GC_word GC_CALL GC_get_free_space_divisor(void); +GC_API GC_ATTR_DEPRECATED GC_word GC_max_retries; +GC_API void GC_CALL GC_set_max_retries(GC_word); +GC_API GC_word GC_CALL GC_get_max_retries(void); +GC_API GC_ATTR_DEPRECATED char*GC_stackbottom; +GC_API GC_ATTR_DEPRECATED int GC_dont_precollect; +GC_API void GC_CALL GC_set_dont_precollect(int); +GC_API int GC_CALL GC_get_dont_precollect(void); +GC_API GC_ATTR_DEPRECATED unsigned long GC_time_limit; +#define GC_TIME_UNLIMITED 999999 +GC_API void GC_CALL GC_set_time_limit(unsigned long); +GC_API unsigned long GC_CALL GC_get_time_limit(void); +struct GC_timeval_s { +unsigned long tv_ms; +unsigned long tv_nsec; +}; +GC_API void GC_CALL GC_set_time_limit_tv(struct GC_timeval_s); +GC_API struct GC_timeval_s GC_CALL GC_get_time_limit_tv(void); +GC_API void GC_CALL GC_set_allocd_bytes_per_finalizer(GC_word); +GC_API GC_word GC_CALL GC_get_allocd_bytes_per_finalizer(void); +GC_API void GC_CALL GC_start_performance_measurement(void); +GC_API unsigned long GC_CALL GC_get_full_gc_total_time(void); +GC_API void GC_CALL GC_set_pages_executable(int); +GC_API int GC_CALL GC_get_pages_executable(void); +GC_API void GC_CALL GC_set_min_bytes_allocd(size_t); +GC_API size_t GC_CALL GC_get_min_bytes_allocd(void); +GC_API void GC_CALL GC_set_rate(int); +GC_API int GC_CALL GC_get_rate(void); +GC_API void GC_CALL GC_set_max_prior_attempts(int); +GC_API int GC_CALL GC_get_max_prior_attempts(void); +GC_API void GC_CALL GC_set_handle_fork(int); +GC_API void GC_CALL GC_atfork_prepare(void); +GC_API void GC_CALL GC_atfork_parent(void); +GC_API void GC_CALL GC_atfork_child(void); +GC_API void GC_CALL GC_init(void); +GC_API int GC_CALL GC_is_init_called(void); +GC_API void GC_CALL GC_deinit(void); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc(size_t); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_atomic(size_t); +GC_API GC_ATTR_MALLOC char*GC_CALL GC_strdup(const char*); +GC_API GC_ATTR_MALLOC char*GC_CALL +GC_strndup(const char*,size_t)GC_ATTR_NONNULL(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_uncollectable(size_t); +GC_API GC_ATTR_DEPRECATED void*GC_CALL GC_malloc_stubborn(size_t); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(2)void*GC_CALL +GC_memalign(size_t,size_t); +GC_API int GC_CALL GC_posix_memalign(void**,size_t, +size_t)GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_free(void*); +#define GC_MALLOC_STUBBORN(sz)GC_MALLOC(sz) +#define GC_NEW_STUBBORN(t)GC_NEW(t) +#define GC_CHANGE_STUBBORN(p)GC_change_stubborn(p) +GC_API GC_ATTR_DEPRECATED void GC_CALL GC_change_stubborn(const void*); +GC_API void GC_CALL GC_end_stubborn_change(const void*)GC_ATTR_NONNULL(1); +GC_API void*GC_CALL GC_base(void*); +GC_API int GC_CALL GC_is_heap_ptr(const void*); +GC_API size_t GC_CALL GC_size(const void*)GC_ATTR_NONNULL(1); +GC_API void*GC_CALL GC_realloc(void*, +size_t) +GC_ATTR_ALLOC_SIZE(2); +GC_API int GC_CALL GC_expand_hp(size_t); +GC_API void GC_CALL GC_set_max_heap_size(GC_word); +GC_API void GC_CALL GC_exclude_static_roots(void*, +void*); +GC_API void GC_CALL GC_clear_exclusion_table(void); +GC_API void GC_CALL GC_clear_roots(void); +GC_API void GC_CALL GC_add_roots(void*, +void*); +GC_API void GC_CALL GC_remove_roots(void*, +void*); +GC_API void GC_CALL GC_register_displacement(size_t); +GC_API void GC_CALL GC_debug_register_displacement(size_t); +GC_API void GC_CALL GC_gcollect(void); +GC_API void GC_CALL GC_gcollect_and_unmap(void); +typedef int (GC_CALLBACK*GC_stop_func)(void); +GC_API int GC_CALL GC_try_to_collect(GC_stop_func) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_set_stop_func(GC_stop_func) +GC_ATTR_NONNULL(1); +GC_API GC_stop_func GC_CALL GC_get_stop_func(void); +GC_API size_t GC_CALL GC_get_heap_size(void); +GC_API size_t GC_CALL GC_get_free_bytes(void); +GC_API size_t GC_CALL GC_get_unmapped_bytes(void); +GC_API size_t GC_CALL GC_get_bytes_since_gc(void); +GC_API size_t GC_CALL GC_get_expl_freed_bytes_since_gc(void); +GC_API size_t GC_CALL GC_get_total_bytes(void); +GC_API void GC_CALL GC_get_heap_usage_safe(GC_word*, +GC_word*, +GC_word*, +GC_word*, +GC_word*); +struct GC_prof_stats_s { +GC_word heapsize_full; +GC_word free_bytes_full; +GC_word unmapped_bytes; +GC_word bytes_allocd_since_gc; +GC_word allocd_bytes_before_gc; +GC_word non_gc_bytes; +GC_word gc_no; +GC_word markers_m1; +GC_word bytes_reclaimed_since_gc; +GC_word reclaimed_bytes_before_gc; +GC_word expl_freed_bytes_since_gc; +}; +GC_API size_t GC_CALL GC_get_prof_stats(struct GC_prof_stats_s*, +size_t); +#ifdef GC_THREADS +GC_API size_t GC_CALL GC_get_prof_stats_unsafe(struct GC_prof_stats_s*, +size_t); +#endif +GC_API size_t GC_CALL GC_get_size_map_at(int i); +GC_API size_t GC_CALL GC_get_memory_use(void); +GC_API void GC_CALL GC_disable(void); +GC_API int GC_CALL GC_is_disabled(void); +GC_API void GC_CALL GC_enable(void); +GC_API void GC_CALL GC_set_manual_vdb_allowed(int); +GC_API int GC_CALL GC_get_manual_vdb_allowed(void); +GC_API void GC_CALL GC_enable_incremental(void); +GC_API int GC_CALL GC_is_incremental_mode(void); +#define GC_PROTECTS_POINTER_HEAP 1 +#define GC_PROTECTS_PTRFREE_HEAP 2 +#define GC_PROTECTS_STATIC_DATA 4 +#define GC_PROTECTS_STACK 8 +#define GC_PROTECTS_NONE 0 +GC_API int GC_CALL GC_incremental_protection_needs(void); +GC_API int GC_CALL GC_collect_a_little(void); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_ignore_off_page(size_t); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_atomic_ignore_off_page(size_t); +#ifdef GC_ADD_CALLER +#define GC_EXTRAS GC_RETURN_ADDR,__FILE__,__LINE__ +#define GC_EXTRA_PARAMS GC_word ra,const char*s,int i +#else +#define GC_EXTRAS __FILE__,__LINE__ +#define GC_EXTRA_PARAMS const char*s,int i +#endif +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_malloc_atomic_uncollectable(size_t); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc_atomic_uncollectable(size_t,GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc(size_t,GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc_atomic(size_t,GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC char*GC_CALL +GC_debug_strdup(const char*,GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC char*GC_CALL +GC_debug_strndup(const char*,size_t,GC_EXTRA_PARAMS) +GC_ATTR_NONNULL(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc_uncollectable(size_t, +GC_EXTRA_PARAMS); +GC_API GC_ATTR_DEPRECATED void*GC_CALL +GC_debug_malloc_stubborn(size_t,GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc_ignore_off_page(size_t, +GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc_atomic_ignore_off_page(size_t, +GC_EXTRA_PARAMS); +GC_API void GC_CALL GC_debug_free(void*); +GC_API void*GC_CALL GC_debug_realloc(void*, +size_t,GC_EXTRA_PARAMS) +GC_ATTR_ALLOC_SIZE(2); +GC_API GC_ATTR_DEPRECATED void GC_CALL GC_debug_change_stubborn(const void*); +GC_API void GC_CALL GC_debug_end_stubborn_change(const void*) +GC_ATTR_NONNULL(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1)void*GC_CALL +GC_debug_malloc_replacement(size_t); +GC_API GC_ATTR_ALLOC_SIZE(2)void*GC_CALL +GC_debug_realloc_replacement(void*, +size_t); +#ifdef GC_DEBUG_REPLACEMENT +#define GC_MALLOC(sz)GC_debug_malloc_replacement(sz) +#define GC_REALLOC(old,sz)GC_debug_realloc_replacement(old,sz) +#elif defined(GC_DEBUG) +#define GC_MALLOC(sz)GC_debug_malloc(sz,GC_EXTRAS) +#define GC_REALLOC(old,sz)GC_debug_realloc(old,sz,GC_EXTRAS) +#else +#define GC_MALLOC(sz)GC_malloc(sz) +#define GC_REALLOC(old,sz)GC_realloc(old,sz) +#endif +#ifdef GC_DEBUG +#define GC_MALLOC_ATOMIC(sz)GC_debug_malloc_atomic(sz,GC_EXTRAS) +#define GC_STRDUP(s)GC_debug_strdup(s,GC_EXTRAS) +#define GC_STRNDUP(s,sz)GC_debug_strndup(s,sz,GC_EXTRAS) +#define GC_MALLOC_ATOMIC_UNCOLLECTABLE(sz)GC_debug_malloc_atomic_uncollectable(sz,GC_EXTRAS) +#define GC_MALLOC_UNCOLLECTABLE(sz)GC_debug_malloc_uncollectable(sz,GC_EXTRAS) +#define GC_MALLOC_IGNORE_OFF_PAGE(sz)GC_debug_malloc_ignore_off_page(sz,GC_EXTRAS) +#define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz)GC_debug_malloc_atomic_ignore_off_page(sz,GC_EXTRAS) +#define GC_FREE(p)GC_debug_free(p) +#define GC_REGISTER_FINALIZER(p,f,d,of,od)GC_debug_register_finalizer(p,f,d,of,od) +#define GC_REGISTER_FINALIZER_IGNORE_SELF(p,f,d,of,od)GC_debug_register_finalizer_ignore_self(p,f,d,of,od) +#define GC_REGISTER_FINALIZER_NO_ORDER(p,f,d,of,od)GC_debug_register_finalizer_no_order(p,f,d,of,od) +#define GC_REGISTER_FINALIZER_UNREACHABLE(p,f,d,of,od)GC_debug_register_finalizer_unreachable(p,f,d,of,od) +#define GC_END_STUBBORN_CHANGE(p)GC_debug_end_stubborn_change(p) +#define GC_PTR_STORE_AND_DIRTY(p,q)GC_debug_ptr_store_and_dirty(p,q) +#define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link,obj)GC_general_register_disappearing_link(link,GC_base(( void*)(obj))) +#define GC_REGISTER_LONG_LINK(link,obj)GC_register_long_link(link,GC_base(( void*)(obj))) +#define GC_REGISTER_DISPLACEMENT(n)GC_debug_register_displacement(n) +#else +#define GC_MALLOC_ATOMIC(sz)GC_malloc_atomic(sz) +#define GC_STRDUP(s)GC_strdup(s) +#define GC_STRNDUP(s,sz)GC_strndup(s,sz) +#define GC_MALLOC_ATOMIC_UNCOLLECTABLE(sz)GC_malloc_atomic_uncollectable(sz) +#define GC_MALLOC_UNCOLLECTABLE(sz)GC_malloc_uncollectable(sz) +#define GC_MALLOC_IGNORE_OFF_PAGE(sz)GC_malloc_ignore_off_page(sz) +#define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz)GC_malloc_atomic_ignore_off_page(sz) +#define GC_FREE(p)GC_free(p) +#define GC_REGISTER_FINALIZER(p,f,d,of,od)GC_register_finalizer(p,f,d,of,od) +#define GC_REGISTER_FINALIZER_IGNORE_SELF(p,f,d,of,od)GC_register_finalizer_ignore_self(p,f,d,of,od) +#define GC_REGISTER_FINALIZER_NO_ORDER(p,f,d,of,od)GC_register_finalizer_no_order(p,f,d,of,od) +#define GC_REGISTER_FINALIZER_UNREACHABLE(p,f,d,of,od)GC_register_finalizer_unreachable(p,f,d,of,od) +#define GC_END_STUBBORN_CHANGE(p)GC_end_stubborn_change(p) +#define GC_PTR_STORE_AND_DIRTY(p,q)GC_ptr_store_and_dirty(p,q) +#define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link,obj)GC_general_register_disappearing_link(link,obj) +#define GC_REGISTER_LONG_LINK(link,obj)GC_register_long_link(link,obj) +#define GC_REGISTER_DISPLACEMENT(n)GC_register_displacement(n) +#endif +#define GC_NEW(t)((t*)GC_MALLOC(sizeof(t))) +#define GC_NEW_ATOMIC(t)((t*)GC_MALLOC_ATOMIC(sizeof(t))) +#define GC_NEW_UNCOLLECTABLE(t)((t*)GC_MALLOC_UNCOLLECTABLE(sizeof(t))) +#ifdef GC_REQUIRE_WCSDUP +GC_API GC_ATTR_MALLOC wchar_t*GC_CALL +GC_wcsdup(const wchar_t*)GC_ATTR_NONNULL(1); +GC_API GC_ATTR_MALLOC wchar_t*GC_CALL +GC_debug_wcsdup(const wchar_t*,GC_EXTRA_PARAMS)GC_ATTR_NONNULL(1); +#ifdef GC_DEBUG +#define GC_WCSDUP(s)GC_debug_wcsdup(s,GC_EXTRAS) +#else +#define GC_WCSDUP(s)GC_wcsdup(s) +#endif +#endif +typedef void (GC_CALLBACK*GC_finalization_proc)(void*, +void*); +GC_API void GC_CALL GC_register_finalizer(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_debug_register_finalizer(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_register_finalizer_ignore_self(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_debug_register_finalizer_ignore_self(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_register_finalizer_no_order(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_debug_register_finalizer_no_order(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_register_finalizer_unreachable(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_debug_register_finalizer_unreachable(void*, +GC_finalization_proc,void*, +GC_finalization_proc*,void**) +GC_ATTR_NONNULL(1); +#define GC_NO_MEMORY 2 +GC_API int GC_CALL GC_register_disappearing_link(void**) +GC_ATTR_NONNULL(1); +GC_API int GC_CALL GC_general_register_disappearing_link(void**, +const void*) +GC_ATTR_NONNULL(1)GC_ATTR_NONNULL(2); +GC_API int GC_CALL GC_move_disappearing_link(void**, +void**) +GC_ATTR_NONNULL(2); +GC_API int GC_CALL GC_unregister_disappearing_link(void**); +GC_API int GC_CALL GC_register_long_link(void**, +const void*) +GC_ATTR_NONNULL(1)GC_ATTR_NONNULL(2); +GC_API int GC_CALL GC_move_long_link(void**, +void**) +GC_ATTR_NONNULL(2); +GC_API int GC_CALL GC_unregister_long_link(void**); +typedef enum { +GC_TOGGLE_REF_DROP, +GC_TOGGLE_REF_STRONG, +GC_TOGGLE_REF_WEAK +} GC_ToggleRefStatus; +typedef GC_ToggleRefStatus (GC_CALLBACK*GC_toggleref_func)(void*); +GC_API void GC_CALL GC_set_toggleref_func(GC_toggleref_func); +GC_API GC_toggleref_func GC_CALL GC_get_toggleref_func(void); +GC_API int GC_CALL GC_toggleref_add(void*,int) +GC_ATTR_NONNULL(1); +typedef void (GC_CALLBACK*GC_await_finalize_proc)(void*); +GC_API void GC_CALL GC_set_await_finalize_proc(GC_await_finalize_proc); +GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void); +GC_API int GC_CALL GC_should_invoke_finalizers(void); +GC_API int GC_CALL GC_invoke_finalizers(void); +#if defined(__GNUC__)&&!defined(__INTEL_COMPILER) +#define GC_reachable_here(ptr)__asm__ __volatile__(" "::"X"(ptr):"memory") +#else +GC_API void GC_CALL GC_noop1(GC_word); +#ifdef LINT2 +#define GC_reachable_here(ptr)GC_noop1(~(GC_word)(ptr)^(~(GC_word)0)) +#else +#define GC_reachable_here(ptr)GC_noop1((GC_word)(ptr)) +#endif +#endif +typedef void (GC_CALLBACK*GC_warn_proc)(char*, +GC_word); +GC_API void GC_CALL GC_set_warn_proc(GC_warn_proc)GC_ATTR_NONNULL(1); +GC_API GC_warn_proc GC_CALL GC_get_warn_proc(void); +GC_API void GC_CALLBACK GC_ignore_warn_proc(char*,GC_word); +GC_API void GC_CALL GC_set_log_fd(int); +typedef void (GC_CALLBACK*GC_abort_func)(const char*); +GC_API void GC_CALL GC_set_abort_func(GC_abort_func)GC_ATTR_NONNULL(1); +GC_API GC_abort_func GC_CALL GC_get_abort_func(void); +GC_API void GC_CALL GC_abort_on_oom(void); +typedef GC_word GC_hidden_pointer; +#define GC_HIDE_POINTER(p)(~(GC_hidden_pointer)(p)) +#define GC_REVEAL_POINTER(p)((void*)GC_HIDE_POINTER(p)) +#if defined(I_HIDE_POINTERS)||defined(GC_I_HIDE_POINTERS) +#define HIDE_POINTER(p)GC_HIDE_POINTER(p) +#define REVEAL_POINTER(p)GC_REVEAL_POINTER(p) +#endif +#ifdef GC_THREADS +GC_API void GC_CALL GC_alloc_lock(void); +GC_API void GC_CALL GC_alloc_unlock(void); +#else +#define GC_alloc_lock()(void)0 +#define GC_alloc_unlock()(void)0 +#endif +typedef void*(GC_CALLBACK*GC_fn_type)(void*); +GC_API void*GC_CALL GC_call_with_alloc_lock(GC_fn_type, +void*)GC_ATTR_NONNULL(1); +struct GC_stack_base { +void*mem_base; +#if defined(__ia64)||defined(__ia64__)||defined(_M_IA64) +void*reg_base; +#endif +}; +typedef void*(GC_CALLBACK*GC_stack_base_func)( +struct GC_stack_base*,void*); +GC_API void*GC_CALL GC_call_with_stack_base(GC_stack_base_func, +void*)GC_ATTR_NONNULL(1); +#define GC_SUCCESS 0 +#define GC_DUPLICATE 1 +#define GC_NO_THREADS 2 +#define GC_UNIMPLEMENTED 3 +#define GC_NOT_FOUND 4 +#if defined(GC_DARWIN_THREADS)||defined(GC_WIN32_THREADS) +GC_API void GC_CALL GC_use_threads_discovery(void); +#endif +#ifdef GC_THREADS +GC_API void GC_CALL GC_set_suspend_signal(int); +GC_API void GC_CALL GC_set_thr_restart_signal(int); +GC_API int GC_CALL GC_get_suspend_signal(void); +GC_API int GC_CALL GC_get_thr_restart_signal(void); +GC_API void GC_CALL GC_start_mark_threads(void); +GC_API void GC_CALL GC_allow_register_threads(void); +GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base*) +GC_ATTR_NONNULL(1); +GC_API int GC_CALL GC_thread_is_registered(void); +GC_API void GC_CALL GC_register_altstack(void*, +GC_word, +void*, +GC_word); +GC_API int GC_CALL GC_unregister_my_thread(void); +GC_API void GC_CALL GC_stop_world_external(void); +GC_API void GC_CALL GC_start_world_external(void); +#endif +GC_API void*GC_CALL GC_do_blocking(GC_fn_type, +void*)GC_ATTR_NONNULL(1); +GC_API void*GC_CALL GC_call_with_gc_active(GC_fn_type, +void*)GC_ATTR_NONNULL(1); +GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base*) +GC_ATTR_NONNULL(1); +GC_API void*GC_CALL GC_get_my_stackbottom(struct GC_stack_base*) +GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_set_stackbottom(void*, +const struct GC_stack_base*) +GC_ATTR_NONNULL(2); +GC_API void*GC_CALL GC_same_obj(void*,void*); +GC_API void*GC_CALL GC_pre_incr(void**,ptrdiff_t) +GC_ATTR_NONNULL(1); +GC_API void*GC_CALL GC_post_incr(void**,ptrdiff_t) +GC_ATTR_NONNULL(1); +GC_API void*GC_CALL GC_is_visible(void*); +GC_API void*GC_CALL GC_is_valid_displacement(void*); +GC_API void GC_CALL GC_dump(void); +GC_API void GC_CALL GC_dump_named(const char*); +GC_API void GC_CALL GC_dump_regions(void); +GC_API void GC_CALL GC_dump_finalization(void); +#if defined(GC_DEBUG)&&defined(__GNUC__) +#define GC_PTR_ADD3(x,n,type_of_result)((type_of_result)GC_same_obj((x)+(n),(x))) +#define GC_PRE_INCR3(x,n,type_of_result)((type_of_result)GC_pre_incr((void**)(&(x)),(n)*sizeof(*x))) +#define GC_POST_INCR3(x,n,type_of_result)((type_of_result)GC_post_incr((void**)(&(x)),(n)*sizeof(*x))) +#define GC_PTR_ADD(x,n)GC_PTR_ADD3(x,n,__typeof__(x)) +#define GC_PRE_INCR(x,n)GC_PRE_INCR3(x,n,__typeof__(x)) +#define GC_POST_INCR(x)GC_POST_INCR3(x,1,__typeof__(x)) +#define GC_POST_DECR(x)GC_POST_INCR3(x,-1,__typeof__(x)) +#else +#define GC_PTR_ADD(x,n)((x)+(n)) +#define GC_PRE_INCR(x,n)((x)+=(n)) +#define GC_POST_INCR(x)((x)++) +#define GC_POST_DECR(x)((x)--) +#endif +#ifdef GC_DEBUG +#define GC_PTR_STORE(p,q)(*(void**)GC_is_visible((void*)(p))=GC_is_valid_displacement((void*)(q))) +#else +#define GC_PTR_STORE(p,q)(*(void**)(p)=(void*)(q)) +#endif +GC_API void GC_CALL GC_ptr_store_and_dirty(void*, +const void*); +GC_API void GC_CALL GC_debug_ptr_store_and_dirty(void*, +const void*); +GC_API void (GC_CALLBACK*GC_same_obj_print_proc)(void*, +void*); +GC_API void (GC_CALLBACK*GC_is_valid_displacement_print_proc)(void*); +GC_API void (GC_CALLBACK*GC_is_visible_print_proc)(void*); +#ifdef GC_PTHREADS +#ifdef __cplusplus +} +#endif +#ifdef __cplusplus +extern "C" { +#endif +#endif +GC_API GC_ATTR_MALLOC void*GC_CALL GC_malloc_many(size_t); +#define GC_NEXT(p)(*(void**)(p)) +typedef int (GC_CALLBACK*GC_has_static_roots_func)( +const char*, +void*, +size_t); +GC_API void GC_CALL GC_register_has_static_roots_callback( +GC_has_static_roots_func); +#if!defined(CPPCHECK)&&!defined(GC_WINDOWS_H_INCLUDED)&&defined(WINAPI) +#define GC_WINDOWS_H_INCLUDED +#endif +#if defined(GC_WIN32_THREADS)&&(!defined(GC_PTHREADS)||defined(GC_BUILD)||defined(GC_WINDOWS_H_INCLUDED)) +#if (!defined(GC_NO_THREAD_DECLS)||defined(GC_BUILD))&&!defined(GC_DONT_INCL_WINDOWS_H) +#ifdef __cplusplus +} +#endif +#if!defined(_WIN32_WCE)&&!defined(__CEGCC__) +#include +#endif +#if defined(GC_BUILD)||!defined(GC_DONT_INCLUDE_WINDOWS_H) +#include +#define GC_WINDOWS_H_INCLUDED +#endif +#ifdef __cplusplus +extern "C" { +#endif +#ifdef GC_UNDERSCORE_STDCALL +#define GC_CreateThread _GC_CreateThread +#define GC_ExitThread _GC_ExitThread +#endif +#ifndef DECLSPEC_NORETURN +#ifdef GC_WINDOWS_H_INCLUDED +#define DECLSPEC_NORETURN +#else +#define DECLSPEC_NORETURN __declspec(noreturn) +#endif +#endif +#if!defined(_UINTPTR_T)&&!defined(_UINTPTR_T_DEFINED)&&!defined(UINTPTR_MAX) +typedef GC_word GC_uintptr_t; +#else +typedef uintptr_t GC_uintptr_t; +#endif +#ifdef _WIN64 +#define GC_WIN32_SIZE_T GC_uintptr_t +#elif defined(GC_WINDOWS_H_INCLUDED) +#define GC_WIN32_SIZE_T DWORD +#else +#define GC_WIN32_SIZE_T unsigned long +#endif +#ifdef GC_INSIDE_DLL +#ifdef GC_UNDERSCORE_STDCALL +#define GC_DllMain _GC_DllMain +#endif +#ifdef GC_WINDOWS_H_INCLUDED +GC_API BOOL WINAPI GC_DllMain(HINSTANCE, +ULONG, +LPVOID); +#else +GC_API int __stdcall GC_DllMain(void*,unsigned long,void*); +#endif +#endif +#ifdef GC_WINDOWS_H_INCLUDED +GC_API HANDLE WINAPI GC_CreateThread( +LPSECURITY_ATTRIBUTES, +GC_WIN32_SIZE_T, +LPTHREAD_START_ROUTINE, +LPVOID,DWORD, +LPDWORD); +GC_API DECLSPEC_NORETURN void WINAPI GC_ExitThread( +DWORD); +#else +struct _SECURITY_ATTRIBUTES; +GC_API void*__stdcall GC_CreateThread(struct _SECURITY_ATTRIBUTES*, +GC_WIN32_SIZE_T, +unsigned long (__stdcall*)(void*), +void*,unsigned long,unsigned long*); +GC_API DECLSPEC_NORETURN void __stdcall GC_ExitThread(unsigned long); +#endif +#if!defined(_WIN32_WCE)&&!defined(__CEGCC__) +GC_API GC_uintptr_t GC_CALL GC_beginthreadex( +void*,unsigned, +unsigned (__stdcall*)(void*), +void*,unsigned, +unsigned*); +GC_API void GC_CALL GC_endthreadex(unsigned); +#endif +#endif +#ifdef GC_WINMAIN_REDIRECT +#define WinMain GC_WinMain +#endif +#define GC_use_DllMain GC_use_threads_discovery +#ifndef GC_NO_THREAD_REDIRECTS +#define CreateThread GC_CreateThread +#define ExitThread GC_ExitThread +#undef _beginthreadex +#define _beginthreadex GC_beginthreadex +#undef _endthreadex +#define _endthreadex GC_endthreadex +#endif +#endif +GC_API void GC_CALL GC_set_force_unmap_on_gcollect(int); +GC_API int GC_CALL GC_get_force_unmap_on_gcollect(void); +#if defined(__CYGWIN32__)||defined(__CYGWIN__) +#ifdef __x86_64__ +extern int __data_start__[],__data_end__[]; +extern int __bss_start__[],__bss_end__[]; +#define GC_DATASTART ((GC_word)__data_start__ < (GC_word)__bss_start__?(void*)__data_start__:(void*)__bss_start__) +#define GC_DATAEND ((GC_word)__data_end__ > (GC_word)__bss_end__?(void*)__data_end__:(void*)__bss_end__) +#else +extern int _data_start__[],_data_end__[],_bss_start__[],_bss_end__[]; +#define GC_DATASTART ((GC_word)_data_start__ < (GC_word)_bss_start__?(void*)_data_start__:(void*)_bss_start__) +#define GC_DATAEND ((GC_word)_data_end__ > (GC_word)_bss_end__?(void*)_data_end__:(void*)_bss_end__) +#endif +#define GC_INIT_CONF_ROOTS GC_add_roots(GC_DATASTART,GC_DATAEND);GC_gcollect() +#elif defined(_AIX) +extern int _data[],_end[]; +#define GC_DATASTART ((void*)_data) +#define GC_DATAEND ((void*)_end) +#define GC_INIT_CONF_ROOTS GC_add_roots(GC_DATASTART,GC_DATAEND) +#elif (defined(HOST_ANDROID)||defined(__ANDROID__))&&defined(IGNORE_DYNAMIC_LOADING) +#pragma weak __dso_handle +extern int __dso_handle[]; +GC_API void*GC_CALL GC_find_limit(void*,int); +#define GC_INIT_CONF_ROOTS (void)(__dso_handle!=0?(GC_add_roots(__dso_handle,GC_find_limit(__dso_handle,1)),0):0) +#else +#define GC_INIT_CONF_ROOTS +#endif +#ifdef GC_DONT_EXPAND +#define GC_INIT_CONF_DONT_EXPAND GC_set_dont_expand(1) +#else +#define GC_INIT_CONF_DONT_EXPAND +#endif +#ifdef GC_FORCE_UNMAP_ON_GCOLLECT +#define GC_INIT_CONF_FORCE_UNMAP_ON_GCOLLECT GC_set_force_unmap_on_gcollect(1) +#else +#define GC_INIT_CONF_FORCE_UNMAP_ON_GCOLLECT +#endif +#ifdef GC_DONT_GC +#define GC_INIT_CONF_MAX_RETRIES (void)(GC_dont_gc=1) +#elif defined(GC_MAX_RETRIES)&&!defined(CPPCHECK) +#define GC_INIT_CONF_MAX_RETRIES GC_set_max_retries(GC_MAX_RETRIES) +#else +#define GC_INIT_CONF_MAX_RETRIES +#endif +#if defined(GC_ALLOCD_BYTES_PER_FINALIZER)&&!defined(CPPCHECK) +#define GC_INIT_CONF_ALLOCD_BYTES_PER_FINALIZER GC_set_allocd_bytes_per_finalizer(GC_ALLOCD_BYTES_PER_FINALIZER) +#else +#define GC_INIT_CONF_ALLOCD_BYTES_PER_FINALIZER +#endif +#if defined(GC_FREE_SPACE_DIVISOR)&&!defined(CPPCHECK) +#define GC_INIT_CONF_FREE_SPACE_DIVISOR GC_set_free_space_divisor(GC_FREE_SPACE_DIVISOR) +#else +#define GC_INIT_CONF_FREE_SPACE_DIVISOR +#endif +#if defined(GC_FULL_FREQ)&&!defined(CPPCHECK) +#define GC_INIT_CONF_FULL_FREQ GC_set_full_freq(GC_FULL_FREQ) +#else +#define GC_INIT_CONF_FULL_FREQ +#endif +#if defined(GC_TIME_LIMIT)&&!defined(CPPCHECK) +#define GC_INIT_CONF_TIME_LIMIT GC_set_time_limit(GC_TIME_LIMIT) +#else +#define GC_INIT_CONF_TIME_LIMIT +#endif +#if defined(GC_MARKERS)&&defined(GC_THREADS)&&!defined(CPPCHECK) +#define GC_INIT_CONF_MARKERS GC_set_markers_count(GC_MARKERS) +#else +#define GC_INIT_CONF_MARKERS +#endif +#if defined(GC_SIG_SUSPEND)&&defined(GC_THREADS)&&!defined(CPPCHECK) +#define GC_INIT_CONF_SUSPEND_SIGNAL GC_set_suspend_signal(GC_SIG_SUSPEND) +#else +#define GC_INIT_CONF_SUSPEND_SIGNAL +#endif +#if defined(GC_SIG_THR_RESTART)&&defined(GC_THREADS)&&!defined(CPPCHECK) +#define GC_INIT_CONF_THR_RESTART_SIGNAL GC_set_thr_restart_signal(GC_SIG_THR_RESTART) +#else +#define GC_INIT_CONF_THR_RESTART_SIGNAL +#endif +#if defined(GC_MAXIMUM_HEAP_SIZE)&&!defined(CPPCHECK) +#define GC_INIT_CONF_MAXIMUM_HEAP_SIZE GC_set_max_heap_size(GC_MAXIMUM_HEAP_SIZE) +#else +#define GC_INIT_CONF_MAXIMUM_HEAP_SIZE +#endif +#ifdef GC_IGNORE_WARN +#define GC_INIT_CONF_IGNORE_WARN GC_set_warn_proc(GC_ignore_warn_proc) +#else +#define GC_INIT_CONF_IGNORE_WARN +#endif +#if defined(GC_INITIAL_HEAP_SIZE)&&!defined(CPPCHECK) +#define GC_INIT_CONF_INITIAL_HEAP_SIZE { size_t heap_size=GC_get_heap_size();if (heap_size < (GC_INITIAL_HEAP_SIZE))(void)GC_expand_hp((GC_INITIAL_HEAP_SIZE)- heap_size);} +#else +#define GC_INIT_CONF_INITIAL_HEAP_SIZE +#endif +#define GC_INIT(){ GC_INIT_CONF_DONT_EXPAND;GC_INIT_CONF_FORCE_UNMAP_ON_GCOLLECT;GC_INIT_CONF_MAX_RETRIES;GC_INIT_CONF_ALLOCD_BYTES_PER_FINALIZER;GC_INIT_CONF_FREE_SPACE_DIVISOR;GC_INIT_CONF_FULL_FREQ;GC_INIT_CONF_TIME_LIMIT;GC_INIT_CONF_MARKERS;GC_INIT_CONF_SUSPEND_SIGNAL;GC_INIT_CONF_THR_RESTART_SIGNAL;GC_INIT_CONF_MAXIMUM_HEAP_SIZE;GC_init();GC_INIT_CONF_ROOTS;GC_INIT_CONF_IGNORE_WARN;GC_INIT_CONF_INITIAL_HEAP_SIZE;} +GC_API void GC_CALL GC_win32_free_heap(void); +#if defined(__SYMBIAN32__) +void GC_init_global_static_roots(void); +#endif +#if defined(_AMIGA)&&!defined(GC_AMIGA_MAKINGLIB) +void*GC_amiga_realloc(void*,size_t); +#define GC_realloc(a,b)GC_amiga_realloc(a,b) +void GC_amiga_set_toany(void (*)(void)); +extern int GC_amiga_free_space_divisor_inc; +extern void*(*GC_amiga_allocwrapper_do)(size_t,void*(GC_CALL*)(size_t)); +#define GC_malloc(a)(*GC_amiga_allocwrapper_do)(a,GC_malloc) +#define GC_malloc_atomic(a)(*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic) +#define GC_malloc_uncollectable(a)(*GC_amiga_allocwrapper_do)(a,GC_malloc_uncollectable) +#define GC_malloc_atomic_uncollectable(a)(*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_uncollectable) +#define GC_malloc_ignore_off_page(a)(*GC_amiga_allocwrapper_do)(a,GC_malloc_ignore_off_page) +#define GC_malloc_atomic_ignore_off_page(a)(*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_ignore_off_page) +#endif +#ifdef __cplusplus +} +#endif +#endif diff --git a/v_windows/v/old/thirdparty/mssql/include/.gitignore b/v_windows/v/old/thirdparty/mssql/include/.gitignore new file mode 100644 index 0000000..773ce27 --- /dev/null +++ b/v_windows/v/old/thirdparty/mssql/include/.gitignore @@ -0,0 +1,4 @@ +* + +!.gitignore +!mssql.h \ No newline at end of file diff --git a/v_windows/v/old/thirdparty/mssql/include/mssql.h b/v_windows/v/old/thirdparty/mssql/include/mssql.h new file mode 100644 index 0000000..d38768c --- /dev/null +++ b/v_windows/v/old/thirdparty/mssql/include/mssql.h @@ -0,0 +1,20 @@ +// Hacking some headers in windows. +// sql headers using UNICODE to change function signatures. +// Currently Linux bindings do not use unicode SQL C bindings, +// So we turn off the UNICODE to make it compile on windows. +// For future Unicode support, please raise a issue. + +#include +#include + +#ifdef UNICODE +// Turn off unicode macro and turn back on, so it only affects sql headers +#undef UNICODE +#include +#include +#define UNICODE + +#else +#include +#include +#endif \ No newline at end of file diff --git a/v_windows/v/old/thirdparty/picoev/picoev.c b/v_windows/v/old/thirdparty/picoev/picoev.c new file mode 100644 index 0000000..bd6691a --- /dev/null +++ b/v_windows/v/old/thirdparty/picoev/picoev.c @@ -0,0 +1,9 @@ +#ifdef __linux__ + #include "src/picoev_epoll.c" +#elif __APPLE__ + #include "src/picoev_kqueue.c" +#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) + #include "src/picoev_kqueue.c" +#else + #include "src/picoev_select.c" +#endif diff --git a/v_windows/v/old/thirdparty/picoev/src/README.md b/v_windows/v/old/thirdparty/picoev/src/README.md new file mode 100644 index 0000000..9619e08 --- /dev/null +++ b/v_windows/v/old/thirdparty/picoev/src/README.md @@ -0,0 +1,21 @@ +picoev +====== + +A *tiny*, *lightning fast* event loop for network applications + +The text below is copied from the [original publication](http://developer.cybozu.co.jp/archives/kazuho/2009/08/picoev-a-tiny-e.html) + +I am sure many programmers writing network applications have their own abstracting layers hiding the differences between various I/O multiplex APIs, like select(2), poll(2), epoll(2), ... And of course, I am one among them. While writing mycached ([see Mycached: memcached protocol support for MySQL for more information](http://developer.cybozu.co.jp/archives/kazuho/2009/08/mycached-memcac.html)), I was at first considering of using [libev](http://software.schmorp.de/pkg/libev.html) for multiplexing socket I/Os. [Libevent](http://www.monkey.org/~provos/libevent/) was not an option since it does not (yet) provide multithreading support. + +But it was a great pain for me to learn how to use libev. I do not mean that its is an ugly product. In fact, I think that it is a very well written, excellent library. However, for me it was too much a boring task to learn how the things are abstracted, already being familiar with the problems it tries to hide. + +So instead I thought it might be a good occasion to write my own library that could be used in any programs I may write in the future. The result is picoev, and it is faster than libevent or libev! The benchmark used is a re-modified version taken from libev.schmorp.de/bench.html and can be found [here](http://coderepos.org/share/browser/lang/c/picoev/trunk/example/bench.c). +![setup time](http://developer.cybozu.co.jp/archives/kazuho/files/picoev_setup.png) +![event processing time](http://developer.cybozu.co.jp/archives/kazuho/files/picoev_event.png) + +Why is it faster? It is because it uses an array and a ring buffer of bit vectors as its internal structure. Libevent and libev seem to use some kind of sorted tree to represent file descriptors. However, if we concentrate on Un*x systems, there is a guarantee that the descriptors will be a small positive integer. Picoev utilizes the fact and stores information related to file descriptors (such as pointers to callback functions or callback arguments) in an array, resulting in a faster manipulation of socket states. + +Another optimization technique used by picoev is not to use an ordered tree for keeping timeout information. Generally speaking, most network applications do not require accurate timeouts. Thus it is possible to use a ring buffer (a sliding array) of bit vectors for the purpose. Each bit vector represents a set of file descriptors that time-outs at a given time. Picoev uses 128 of bit vectors to represent timeouts, for example, the first bit vector represents the sockets that timeout a second after, the second bit vector representing them of two seconds after..., and the bit vectors slide every second. If the maximum timeout required by the web application is greater than 128, the minimum granurality of timeout becomes two seconds. + +I would like to reiterate that both libevent and libev are great libraries. Picoev is not at comparable to them especially in maturity and the number of features. It only supports select(2), epoll(2), and kqueue(2) for the time being. However the design is simple than the other two, and I think it will be a good starting point to write network applications, or to use as a basis for writing one's own network libraries. + diff --git a/v_windows/v/old/thirdparty/picoev/src/picoev.h b/v_windows/v/old/thirdparty/picoev/src/picoev.h new file mode 100644 index 0000000..b00b02b --- /dev/null +++ b/v_windows/v/old/thirdparty/picoev/src/picoev.h @@ -0,0 +1,404 @@ +/* + * Copyright (c) 2009, Cybozu Labs, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of the nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef picoev_h +#define picoev_h + +#ifdef __cplusplus +extern "C" { +# define PICOEV_INLINE inline +#else +# define PICOEV_INLINE static __inline__ +#endif + +#include +#include +#include +#include +#include + +#define PICOEV_IS_INITED (picoev.max_fd != 0) +#define PICOEV_IS_INITED_AND_FD_IN_RANGE(fd) \ + (((unsigned)fd) < (unsigned)picoev.max_fd) +#define PICOEV_TOO_MANY_LOOPS (picoev.num_loops != 0) /* use after ++ */ +#define PICOEV_FD_BELONGS_TO_LOOP(loop, fd) \ + ((loop)->loop_id == picoev.fds[fd].loop_id) + +#define PICOEV_TIMEOUT_VEC_OF(loop, idx) \ + ((loop)->timeout.vec + (idx) * picoev.timeout_vec_size) +#define PICOEV_TIMEOUT_VEC_OF_VEC_OF(loop, idx) \ + ((loop)->timeout.vec_of_vec + (idx) * picoev.timeout_vec_of_vec_size) +#define PICOEV_RND_UP(v, d) (((v) + (d) - 1) / (d) * (d)) + +#define PICOEV_PAGE_SIZE 4096 +#define PICOEV_CACHE_LINE_SIZE 32 /* in bytes, ok if greater than the actual */ +#define PICOEV_SIMD_BITS 128 +#define PICOEV_TIMEOUT_VEC_SIZE 128 +#define PICOEV_SHORT_BITS (sizeof(short) * 8) + +#define PICOEV_READ 1 +#define PICOEV_WRITE 2 +#define PICOEV_TIMEOUT 4 +#define PICOEV_ADD 0x40000000 +#define PICOEV_DEL 0x20000000 +#define PICOEV_READWRITE (PICOEV_READ | PICOEV_WRITE) + +#define PICOEV_TIMEOUT_IDX_UNUSED (UCHAR_MAX) + + typedef unsigned short picoev_loop_id_t; + + typedef struct picoev_loop_st picoev_loop; + + typedef void picoev_handler(picoev_loop* loop, int fd, int revents, + void* cb_arg); + + typedef struct picoev_fd_st { + /* use accessors! */ + /* TODO adjust the size to match that of a cache line */ + picoev_handler* callback; + void* cb_arg; + picoev_loop_id_t loop_id; + char events; + unsigned char timeout_idx; /* PICOEV_TIMEOUT_IDX_UNUSED if not used */ + int _backend; /* can be used by backends (never modified by core) */ + } picoev_fd; + + struct picoev_loop_st { + /* read only */ + picoev_loop_id_t loop_id; + struct { + short* vec; + short* vec_of_vec; + size_t base_idx; + time_t base_time; + int resolution; + void* _free_addr; + } timeout; + time_t now; + }; + + typedef struct picoev_globals_st { + /* read only */ + picoev_fd* fds; + void* _fds_free_addr; + int max_fd; + int num_loops; + size_t timeout_vec_size; /* # of elements in picoev_loop.timeout.vec[0] */ + size_t timeout_vec_of_vec_size; /* ... in timeout.vec_of_vec[0] */ + } picoev_globals; + + extern picoev_globals picoev; + + /* creates a new event loop (defined by each backend) */ + picoev_loop* picoev_create_loop(int max_timeout); + + /* destroys a loop (defined by each backend) */ + int picoev_destroy_loop(picoev_loop* loop); + + /* internal: updates events to be watched (defined by each backend) */ + int picoev_update_events_internal(picoev_loop* loop, int fd, int events); + + /* internal: poll once and call the handlers (defined by each backend) */ + int picoev_poll_once_internal(picoev_loop* loop, int max_wait); + + /* internal, aligned allocator with address scrambling to avoid cache + line contention */ + PICOEV_INLINE + void* picoev_memalign(size_t sz, void** orig_addr, int clear) { + sz = sz + PICOEV_PAGE_SIZE + PICOEV_CACHE_LINE_SIZE; + if ((*orig_addr = malloc(sz)) == NULL) { + return NULL; + } + if (clear != 0) { + memset(*orig_addr, 0, sz); + } + return + (void*)PICOEV_RND_UP((unsigned long)*orig_addr + + (rand() % PICOEV_PAGE_SIZE), + PICOEV_CACHE_LINE_SIZE); + } + + /* initializes picoev */ + PICOEV_INLINE + int picoev_init(int max_fd) { + assert(! PICOEV_IS_INITED); + assert(max_fd > 0); + if ((picoev.fds = (picoev_fd*)picoev_memalign(sizeof(picoev_fd) * max_fd, + &picoev._fds_free_addr, 1)) + == NULL) { + return -1; + } + picoev.max_fd = max_fd; + picoev.num_loops = 0; + picoev.timeout_vec_size + = PICOEV_RND_UP(picoev.max_fd, PICOEV_SIMD_BITS) / PICOEV_SHORT_BITS; + picoev.timeout_vec_of_vec_size + = PICOEV_RND_UP(picoev.timeout_vec_size, PICOEV_SIMD_BITS) + / PICOEV_SHORT_BITS; + return 0; + } + + /* deinitializes picoev */ + PICOEV_INLINE + int picoev_deinit(void) { + assert(PICOEV_IS_INITED); + free(picoev._fds_free_addr); + picoev.fds = NULL; + picoev._fds_free_addr = NULL; + picoev.max_fd = 0; + picoev.num_loops = 0; + return 0; + } + + /* updates timeout */ + PICOEV_INLINE + void picoev_set_timeout(picoev_loop* loop, int fd, int secs) { + picoev_fd* target; + short* vec, * vec_of_vec; + size_t vi = fd / PICOEV_SHORT_BITS, delta; + assert(PICOEV_IS_INITED_AND_FD_IN_RANGE(fd)); + assert(PICOEV_FD_BELONGS_TO_LOOP(loop, fd)); + target = picoev.fds + fd; + /* clear timeout */ + if (target->timeout_idx != PICOEV_TIMEOUT_IDX_UNUSED) { + vec = PICOEV_TIMEOUT_VEC_OF(loop, target->timeout_idx); + if ((vec[vi] &= ~((unsigned short)SHRT_MIN >> (fd % PICOEV_SHORT_BITS))) + == 0) { + vec_of_vec = PICOEV_TIMEOUT_VEC_OF_VEC_OF(loop, target->timeout_idx); + vec_of_vec[vi / PICOEV_SHORT_BITS] + &= ~((unsigned short)SHRT_MIN >> (vi % PICOEV_SHORT_BITS)); + } + target->timeout_idx = PICOEV_TIMEOUT_IDX_UNUSED; + } + if (secs != 0) { + delta = (loop->now + secs - loop->timeout.base_time) + / loop->timeout.resolution; + if (delta >= PICOEV_TIMEOUT_VEC_SIZE) { + delta = PICOEV_TIMEOUT_VEC_SIZE - 1; + } + target->timeout_idx = + (loop->timeout.base_idx + delta) % PICOEV_TIMEOUT_VEC_SIZE; + vec = PICOEV_TIMEOUT_VEC_OF(loop, target->timeout_idx); + vec[vi] |= (unsigned short)SHRT_MIN >> (fd % PICOEV_SHORT_BITS); + vec_of_vec = PICOEV_TIMEOUT_VEC_OF_VEC_OF(loop, target->timeout_idx); + vec_of_vec[vi / PICOEV_SHORT_BITS] + |= (unsigned short)SHRT_MIN >> (vi % PICOEV_SHORT_BITS); + } + } + + /* registers a file descriptor and callback argument to a event loop */ + PICOEV_INLINE + int picoev_add(picoev_loop* loop, int fd, int events, int timeout_in_secs, + picoev_handler* callback, void* cb_arg) { + picoev_fd* target; + if (!PICOEV_IS_INITED_AND_FD_IN_RANGE(fd)) { return -1; } + target = picoev.fds + fd; + assert(target->loop_id == 0); + target->callback = callback; + target->cb_arg = cb_arg; + target->loop_id = loop->loop_id; + target->events = 0; + target->timeout_idx = PICOEV_TIMEOUT_IDX_UNUSED; + if (picoev_update_events_internal(loop, fd, events | PICOEV_ADD) != 0) { + target->loop_id = 0; + return -1; + } + picoev_set_timeout(loop, fd, timeout_in_secs); + return 0; + } + + /* unregisters a file descriptor from event loop */ + PICOEV_INLINE + int picoev_del(picoev_loop* loop, int fd) { + picoev_fd* target; + assert(PICOEV_IS_INITED_AND_FD_IN_RANGE(fd)); + target = picoev.fds + fd; + if (picoev_update_events_internal(loop, fd, PICOEV_DEL) != 0) { + return -1; + } + picoev_set_timeout(loop, fd, 0); + target->loop_id = 0; + return 0; + } + + /* check if fd is registered (checks all loops if loop == NULL) */ + PICOEV_INLINE + int picoev_is_active(picoev_loop* loop, int fd) { + assert(PICOEV_IS_INITED_AND_FD_IN_RANGE(fd)); + return loop != NULL + ? picoev.fds[fd].loop_id == loop->loop_id + : picoev.fds[fd].loop_id != 0; + } + + /* returns events being watched for given descriptor */ + PICOEV_INLINE + int picoev_get_events(picoev_loop* loop __attribute__((unused)), int fd) { + assert(PICOEV_IS_INITED_AND_FD_IN_RANGE(fd)); + return picoev.fds[fd].events & PICOEV_READWRITE; + } + + /* sets events to be watched for given desriptor */ + PICOEV_INLINE + int picoev_set_events(picoev_loop* loop, int fd, int events) { + assert(PICOEV_IS_INITED_AND_FD_IN_RANGE(fd)); + if (picoev.fds[fd].events != events + && picoev_update_events_internal(loop, fd, events) != 0) { + return -1; + } + return 0; + } + + /* returns callback for given descriptor */ + PICOEV_INLINE + picoev_handler* picoev_get_callback(picoev_loop* loop __attribute__((unused)), + int fd, void** cb_arg) { + assert(PICOEV_IS_INITED_AND_FD_IN_RANGE(fd)); + if (cb_arg != NULL) { + *cb_arg = picoev.fds[fd].cb_arg; + } + return picoev.fds[fd].callback; + } + + /* sets callback for given descriptor */ + PICOEV_INLINE + void picoev_set_callback(picoev_loop* loop __attribute__((unused)), int fd, + picoev_handler* callback, void** cb_arg) { + assert(PICOEV_IS_INITED_AND_FD_IN_RANGE(fd)); + if (cb_arg != NULL) { + picoev.fds[fd].cb_arg = *cb_arg; + } + picoev.fds[fd].callback = callback; + } + + /* function to iterate registered information. To start iteration, set curfd + to -1 and call the function until -1 is returned */ + PICOEV_INLINE + int picoev_next_fd(picoev_loop* loop, int curfd) { + if (curfd != -1) { + assert(PICOEV_IS_INITED_AND_FD_IN_RANGE(curfd)); + } + while (++curfd < picoev.max_fd) { + if (loop->loop_id == picoev.fds[curfd].loop_id) { + return curfd; + } + } + return -1; + } + + /* internal function */ + PICOEV_INLINE + int picoev_init_loop_internal(picoev_loop* loop, int max_timeout) { + loop->loop_id = ++picoev.num_loops; + assert(PICOEV_TOO_MANY_LOOPS); + if ((loop->timeout.vec_of_vec + = (short*)picoev_memalign((picoev.timeout_vec_of_vec_size + + picoev.timeout_vec_size) + * sizeof(short) * PICOEV_TIMEOUT_VEC_SIZE, + &loop->timeout._free_addr, 1)) + == NULL) { + --picoev.num_loops; + return -1; + } + loop->timeout.vec = loop->timeout.vec_of_vec + + picoev.timeout_vec_of_vec_size * PICOEV_TIMEOUT_VEC_SIZE; + loop->timeout.base_idx = 0; + loop->timeout.base_time = time(NULL); + loop->timeout.resolution + = PICOEV_RND_UP(max_timeout, PICOEV_TIMEOUT_VEC_SIZE) + / PICOEV_TIMEOUT_VEC_SIZE; + return 0; + } + + /* internal function */ + PICOEV_INLINE + void picoev_deinit_loop_internal(picoev_loop* loop) { + free(loop->timeout._free_addr); + } + + /* internal function */ + PICOEV_INLINE + void picoev_handle_timeout_internal(picoev_loop* loop) { + size_t i, j, k; + for (; + loop->timeout.base_time <= loop->now - loop->timeout.resolution; + loop->timeout.base_idx + = (loop->timeout.base_idx + 1) % PICOEV_TIMEOUT_VEC_SIZE, + loop->timeout.base_time += loop->timeout.resolution) { + /* TODO use SIMD instructions */ + short* vec = PICOEV_TIMEOUT_VEC_OF(loop, loop->timeout.base_idx); + short* vec_of_vec + = PICOEV_TIMEOUT_VEC_OF_VEC_OF(loop, loop->timeout.base_idx); + for (i = 0; i < picoev.timeout_vec_of_vec_size; ++i) { + short vv = vec_of_vec[i]; + if (vv != 0) { + for (j = i * PICOEV_SHORT_BITS; vv != 0; j++, vv <<= 1) { + if (vv < 0) { + short v = vec[j]; + assert(v != 0); + for (k = j * PICOEV_SHORT_BITS; v != 0; k++, v <<= 1) { + if (v < 0) { + picoev_fd* fd = picoev.fds + k; + assert(fd->loop_id == loop->loop_id); + fd->timeout_idx = PICOEV_TIMEOUT_IDX_UNUSED; + (*fd->callback)(loop, k, PICOEV_TIMEOUT, fd->cb_arg); + } + } + vec[j] = 0; + } + } + vec_of_vec[i] = 0; + } + } + } + } + + /* loop once */ + PICOEV_INLINE + int picoev_loop_once(picoev_loop* loop, int max_wait) { + loop->now = time(NULL); + if (max_wait > loop->timeout.resolution) { + max_wait = loop->timeout.resolution; + } + if (picoev_poll_once_internal(loop, max_wait) != 0) { + return -1; + } + if (max_wait != 0) { + loop->now = time(NULL); + } + picoev_handle_timeout_internal(loop); + return 0; + } + +#undef PICOEV_INLINE + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/v_windows/v/old/thirdparty/picoev/src/picoev_epoll.c b/v_windows/v/old/thirdparty/picoev/src/picoev_epoll.c new file mode 100644 index 0000000..61bda4d --- /dev/null +++ b/v_windows/v/old/thirdparty/picoev/src/picoev_epoll.c @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2009, Cybozu Labs, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of the nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include "picoev.h" + +#ifndef PICOEV_EPOLL_DEFER_DELETES +# define PICOEV_EPOLL_DEFER_DELETES 1 +#endif + +typedef struct picoev_loop_epoll_st { + picoev_loop loop; + int epfd; + struct epoll_event events[1024]; +} picoev_loop_epoll; + +picoev_globals picoev; + +picoev_loop* picoev_create_loop(int max_timeout) +{ + picoev_loop_epoll* loop; + + /* init parent */ + assert(PICOEV_IS_INITED); + if ((loop = (picoev_loop_epoll*)malloc(sizeof(picoev_loop_epoll))) == NULL) { + return NULL; + } + if (picoev_init_loop_internal(&loop->loop, max_timeout) != 0) { + free(loop); + return NULL; + } + + /* init myself */ + if ((loop->epfd = epoll_create(picoev.max_fd)) == -1) { + picoev_deinit_loop_internal(&loop->loop); + free(loop); + return NULL; + } + + loop->loop.now = time(NULL); + return &loop->loop; +} + +int picoev_destroy_loop(picoev_loop* _loop) +{ + picoev_loop_epoll* loop = (picoev_loop_epoll*)_loop; + + if (close(loop->epfd) != 0) { + return -1; + } + picoev_deinit_loop_internal(&loop->loop); + free(loop); + return 0; +} + +int picoev_update_events_internal(picoev_loop* _loop, int fd, int events) +{ + picoev_loop_epoll* loop = (picoev_loop_epoll*)_loop; + picoev_fd* target = picoev.fds + fd; + struct epoll_event ev; + int epoll_ret; + + memset( &ev, 0, sizeof( ev ) ); + assert(PICOEV_FD_BELONGS_TO_LOOP(&loop->loop, fd)); + + if ((events & PICOEV_READWRITE) == target->events) { + return 0; + } + + ev.events = ((events & PICOEV_READ) != 0 ? EPOLLIN : 0) + | ((events & PICOEV_WRITE) != 0 ? EPOLLOUT : 0); + ev.data.fd = fd; + +#define SET(op, check_error) do { \ + epoll_ret = epoll_ctl(loop->epfd, op, fd, &ev); \ + assert(! check_error || epoll_ret == 0); \ + } while (0) + +#if PICOEV_EPOLL_DEFER_DELETES + + if ((events & PICOEV_DEL) != 0) { + /* nothing to do */ + } else if ((events & PICOEV_READWRITE) == 0) { + SET(EPOLL_CTL_DEL, 1); + } else { + SET(EPOLL_CTL_MOD, 0); + if (epoll_ret != 0) { + assert(errno == ENOENT); + SET(EPOLL_CTL_ADD, 1); + } + } + +#else + + if ((events & PICOEV_READWRITE) == 0) { + SET(EPOLL_CTL_DEL, 1); + } else { + SET(target->events == 0 ? EPOLL_CTL_ADD : EPOLL_CTL_MOD, 1); + } + +#endif + +#undef SET + + target->events = events; + + return 0; +} + +int picoev_poll_once_internal(picoev_loop* _loop, int max_wait) +{ + picoev_loop_epoll* loop = (picoev_loop_epoll*)_loop; + int i, nevents; + + nevents = epoll_wait(loop->epfd, loop->events, + sizeof(loop->events) / sizeof(loop->events[0]), + max_wait * 1000); + if (nevents == -1) { + return -1; + } + for (i = 0; i < nevents; ++i) { + struct epoll_event* event = loop->events + i; + picoev_fd* target = picoev.fds + event->data.fd; + if (loop->loop.loop_id == target->loop_id + && (target->events & PICOEV_READWRITE) != 0) { + int revents = ((event->events & EPOLLIN) != 0 ? PICOEV_READ : 0) + | ((event->events & EPOLLOUT) != 0 ? PICOEV_WRITE : 0); + if (revents != 0) { + (*target->callback)(&loop->loop, event->data.fd, revents, + target->cb_arg); + } + } else { +#if PICOEV_EPOLL_DEFER_DELETES + event->events = 0; + epoll_ctl(loop->epfd, EPOLL_CTL_DEL, event->data.fd, event); +#endif + } + } + return 0; +} diff --git a/v_windows/v/old/thirdparty/picoev/src/picoev_kqueue.c b/v_windows/v/old/thirdparty/picoev/src/picoev_kqueue.c new file mode 100644 index 0000000..a45a052 --- /dev/null +++ b/v_windows/v/old/thirdparty/picoev/src/picoev_kqueue.c @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2009, Cybozu Labs, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of the nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include "picoev.h" + +#define EV_QUEUE_SZ 128 + +#define BACKEND_BUILD(next_fd, events) \ + ((unsigned)((next_fd << 8) | (events & 0xff))) +#define BACKEND_GET_NEXT_FD(backend) ((int)(backend) >> 8) +#define BACKEND_GET_OLD_EVENTS(backend) ((int)(backend) & 0xff) + +typedef struct picoev_loop_kqueue_st { + picoev_loop loop; + int kq; + int changed_fds; /* link list using picoev_fd::_backend, -1 if not changed */ + struct kevent events[1024]; + struct kevent changelist[256]; +} picoev_loop_kqueue; + +picoev_globals picoev; + +static int apply_pending_changes(picoev_loop_kqueue* loop, int apply_all) +{ +#define SET(op, events) \ + EV_SET(loop->changelist + cl_off++, loop->changed_fds, \ + (((events) & PICOEV_READ) != 0 ? EVFILT_READ : 0) \ + | (((events) & PICOEV_WRITE) != 0 ? EVFILT_WRITE : 0), \ + (op), 0, 0, NULL) + + int cl_off = 0, nevents; + + while (loop->changed_fds != -1) { + picoev_fd* changed = picoev.fds + loop->changed_fds; + int old_events = BACKEND_GET_OLD_EVENTS(changed->_backend); + if (changed->events != old_events) { + if (old_events != 0) { + SET(EV_DISABLE, old_events); + } + if (changed->events != 0) { + SET(EV_ADD | EV_ENABLE, changed->events); + } + if ((size_t)cl_off + 1 + >= sizeof(loop->changelist) / sizeof(loop->changelist[0])) { + nevents = kevent(loop->kq, loop->changelist, cl_off, NULL, 0, NULL); + assert(nevents == 0); + cl_off = 0; + } + } + loop->changed_fds = BACKEND_GET_NEXT_FD(changed->_backend); + changed->_backend = -1; + } + + if (apply_all && cl_off != 0) { + nevents = kevent(loop->kq, loop->changelist, cl_off, NULL, 0, NULL); + assert(nevents == 0); + cl_off = 0; + } + + return cl_off; + +#undef SET +} + +picoev_loop* picoev_create_loop(int max_timeout) +{ + picoev_loop_kqueue* loop; + + /* init parent */ + assert(PICOEV_IS_INITED); + if ((loop = (picoev_loop_kqueue*)malloc(sizeof(picoev_loop_kqueue))) + == NULL) { + return NULL; + } + if (picoev_init_loop_internal(&loop->loop, max_timeout) != 0) { + free(loop); + return NULL; + } + + /* init kqueue */ + if ((loop->kq = kqueue()) == -1) { + picoev_deinit_loop_internal(&loop->loop); + free(loop); + return NULL; + } + loop->changed_fds = -1; + + loop->loop.now = time(NULL); + return &loop->loop; +} + +int picoev_destroy_loop(picoev_loop* _loop) +{ + picoev_loop_kqueue* loop = (picoev_loop_kqueue*)_loop; + + if (close(loop->kq) != 0) { + return -1; + } + picoev_deinit_loop_internal(&loop->loop); + free(loop); + return 0; +} + +int picoev_update_events_internal(picoev_loop* _loop, int fd, int events) +{ + picoev_loop_kqueue* loop = (picoev_loop_kqueue*)_loop; + picoev_fd* target = picoev.fds + fd; + + assert(PICOEV_FD_BELONGS_TO_LOOP(&loop->loop, fd)); + + /* initialize if adding the fd */ + if ((events & PICOEV_ADD) != 0) { + target->_backend = -1; + } + /* return if nothing to do */ + if (events == PICOEV_DEL + ? target->_backend == -1 + : (events & PICOEV_READWRITE) == target->events) { + return 0; + } + /* add to changed list if not yet being done */ + if (target->_backend == -1) { + target->_backend = BACKEND_BUILD(loop->changed_fds, target->events); + loop->changed_fds = fd; + } + /* update events */ + target->events = events & PICOEV_READWRITE; + /* apply immediately if is a DELETE */ + if ((events & PICOEV_DEL) != 0) { + apply_pending_changes(loop, 1); + } + + return 0; +} + +int picoev_poll_once_internal(picoev_loop* _loop, int max_wait) +{ + picoev_loop_kqueue* loop = (picoev_loop_kqueue*)_loop; + struct timespec ts; + int cl_off = 0, nevents, i; + + /* apply pending changes, with last changes stored to loop->changelist */ + cl_off = apply_pending_changes(loop, 0); + + ts.tv_sec = max_wait; + ts.tv_nsec = 0; + nevents = kevent(loop->kq, loop->changelist, cl_off, loop->events, + sizeof(loop->events) / sizeof(loop->events[0]), &ts); + if (nevents == -1) { + /* the errors we can only rescue */ + assert(errno == EACCES || errno == EFAULT || errno == EINTR); + return -1; + } + for (i = 0; i < nevents; ++i) { + struct kevent* event = loop->events + i; + picoev_fd* target = picoev.fds + event->ident; + assert((event->flags & EV_ERROR) == 0); /* changelist errors are fatal */ + if (loop->loop.loop_id == target->loop_id + && (event->filter & (EVFILT_READ | EVFILT_WRITE)) != 0) { + int revents; + switch (event->filter) { + case EVFILT_READ: + revents = PICOEV_READ; + break; + case EVFILT_WRITE: + revents = PICOEV_WRITE; + break; + default: + assert(0); + revents = 0; // suppress compiler warning + break; + } + (*target->callback)(&loop->loop, event->ident, revents, target->cb_arg); + } + } + + return 0; +} diff --git a/v_windows/v/old/thirdparty/picoev/src/picoev_select.c b/v_windows/v/old/thirdparty/picoev/src/picoev_select.c new file mode 100644 index 0000000..afbe140 --- /dev/null +++ b/v_windows/v/old/thirdparty/picoev/src/picoev_select.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2009, Cybozu Labs, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of the nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _WIN32 +# include +#else +# include +#endif + +#include "picoev.h" + +#ifdef _WIN32 +# define PICOEV_W32_INTERNAL +# include "picoev_w32.h" +# define PICOEV_FD_SET(x, y) FD_SET(picoev_w32_fd2sock(x), y) +# define PICOEV_FD_ISSET(x, y) FD_ISSET(picoev_w32_fd2sock(x), y) + +typedef struct picoev_w32_globals_st { + int* fds; + void* _fds_free_addr; +} picoev_w32_globals; + +picoev_w32_globals picoev_w32; + +int picoev_w32_sock2fd(int sock) { + int i; + for (i = 0; i < picoev.max_fd && picoev_w32.fds[i]; ++i) + if (picoev_w32.fds[i] == sock) return i; + assert(PICOEV_IS_INITED_AND_FD_IN_RANGE(i)); + picoev_w32.fds[i] = sock; + return i; +} + +int picoev_w32_fd2sock(int fd) { + assert(PICOEV_IS_INITED_AND_FD_IN_RANGE(fd)); + return picoev_w32.fds[fd]; +} + +extern int picoev_w32_deinit(void); + +int picoev_w32_init(int max_fd) { + int r = picoev_init(max_fd); + if ((picoev_w32.fds = (int*)picoev_memalign(sizeof(int) * max_fd, + &picoev_w32._fds_free_addr, 1)) + == NULL) { + picoev_deinit(); + return -1; + } +} + +int picoev_w32_deinit(void) { + free(picoev_w32._fds_free_addr); + picoev_w32.fds = NULL; + picoev_w32._fds_free_addr = NULL; + return picoev_deinit(); +} + +#else +# define PICOEV_FD_SET(x, y) FD_SET(x, y) +# define PICOEV_FD_ISSET(x, y) FD_ISSET(x, y) +#endif + +picoev_globals picoev; + +picoev_loop* picoev_create_loop(int max_timeout) +{ + picoev_loop* loop; + + assert(PICOEV_IS_INITED); + if ((loop = (picoev_loop*)malloc(sizeof(picoev_loop))) == NULL) { + return NULL; + } + if (picoev_init_loop_internal(loop, max_timeout) != 0) { + free(loop); + return NULL; + } + + loop->now = time(NULL); + return loop; +} + +int picoev_destroy_loop(picoev_loop* loop) +{ + picoev_deinit_loop_internal(loop); + free(loop); + return 0; +} + +int picoev_update_events_internal(picoev_loop* loop, int fd, int events) +{ + picoev.fds[fd].events = events & PICOEV_READWRITE; + return 0; +} + +int picoev_poll_once_internal(picoev_loop* loop, int max_wait) +{ + fd_set readfds, writefds, errorfds; + struct timeval tv; + int i, r, maxfd = 0; + + /* setup */ + FD_ZERO(&readfds); + FD_ZERO(&writefds); + FD_ZERO(&errorfds); + for (i = 0; i < picoev.max_fd; ++i) { + picoev_fd* fd = picoev.fds + i; + if (fd->loop_id == loop->loop_id) { + if ((fd->events & PICOEV_READ) != 0) { + PICOEV_FD_SET(i, &readfds); + if (maxfd < i) { + maxfd = i; + } + } + if ((fd->events & PICOEV_WRITE) != 0) { + PICOEV_FD_SET(i, &writefds); + if (maxfd < i) { + maxfd = i; + } + } + } + } + + /* select and handle if any */ + tv.tv_sec = max_wait; + tv.tv_usec = 0; + r = select(maxfd + 1, &readfds, &writefds, &errorfds, &tv); + if (r == -1) { + return -1; + } else if (r > 0) { + for (i = 0; i < picoev.max_fd; ++i) { + picoev_fd* target = picoev.fds + i; + if (target->loop_id == loop->loop_id) { + int revents = (PICOEV_FD_ISSET(i, &readfds) ? PICOEV_READ : 0) + | (PICOEV_FD_ISSET(i, &writefds) ? PICOEV_WRITE : 0); + if (revents != 0) { + (*target->callback)(loop, i, revents, target->cb_arg); + } + } + } + } + + return 0; +} diff --git a/v_windows/v/old/thirdparty/picoev/src/picoev_w32.h b/v_windows/v/old/thirdparty/picoev/src/picoev_w32.h new file mode 100644 index 0000000..b7e0cc4 --- /dev/null +++ b/v_windows/v/old/thirdparty/picoev/src/picoev_w32.h @@ -0,0 +1,15 @@ +#ifndef picoev_w32_h +#define picoev_w32_h + +#include "picoev.h" + +#ifndef PICOEV_W32_INTERNAL +extern int picoev_w32_init(int); +extern int picoev_w32_deinit(void); +extern int picoev_w32_sock2fd(int); +extern int picoev_w32_fd2sock(int); +# define picoev_init picoev_w32_init +# define picoev_deinit picoev_w32_deinit +#endif + +#endif diff --git a/v_windows/v/old/thirdparty/picohttpparser/picohttpparser.c b/v_windows/v/old/thirdparty/picohttpparser/picohttpparser.c new file mode 100644 index 0000000..7a6abd0 --- /dev/null +++ b/v_windows/v/old/thirdparty/picohttpparser/picohttpparser.c @@ -0,0 +1,28 @@ +#include "src/picohttpparser.c" + +#if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32)) +#define __WINDOWS__ +#endif + +// date +#include + +const char* get_date() { + time_t t; + struct tm tm; + static const char *days[] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}; + static const char *months[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; + static char date[30] = "Thu, 01 Jan 1970 00:00:00 GMT"; + + time(&t); + #ifdef __WINDOWS__ + gmtime_s(&t, &tm); + #else + gmtime_r(&t, &tm); + #endif + strftime(date, 30, "---, %d --- %Y %H:%M:%S GMT", &tm); + memcpy(date, days[tm.tm_wday], 3); + memcpy(date + 8, months[tm.tm_mon], 3); + + return date; +} diff --git a/v_windows/v/old/thirdparty/picohttpparser/picohttpparser.h b/v_windows/v/old/thirdparty/picohttpparser/picohttpparser.h new file mode 100644 index 0000000..34faaf6 --- /dev/null +++ b/v_windows/v/old/thirdparty/picohttpparser/picohttpparser.h @@ -0,0 +1,112 @@ + +typedef struct phr_header phr_header_t; + +#include "src/picohttpparser.h" +#include + +#define ADVANCE_TOKEN2(buf, tok, toklen, max_len) \ + do {\ + for (int i = 0; i < max_len; i++) {\ + if (buf[i] == ' ') {\ + tok = buf;\ + toklen = i++;\ + while (buf[i] == ' ') i++;\ + buf += i;\ + break;\ + }\ + }\ + } while (0) + +static inline int phr_parse_request_path(const char *buf_start, size_t len, const char **method, size_t *method_len, const char **path, size_t *path_len) { + if (len < 14) return -2; + const char *buf = buf_start, *buf_end = buf_start + len; + + ADVANCE_TOKEN2(buf, *method, *method_len, 9); + len -= buf-buf_start; + ADVANCE_TOKEN2(buf, *path, *path_len, len); + if (*method_len == 0 || *path_len == 0) return -1; + + return buf-buf_start; +} + +static inline int phr_parse_request_path_pipeline(const char *buf_start, size_t len, const char **method, size_t *method_len, const char **path, size_t *path_len) { + if (len < 14) return -2; + const char *buf = buf_start, *buf_end = buf_start + len; + + ADVANCE_TOKEN2(buf, *method, *method_len, 9); + len -= buf-buf_start; + ADVANCE_TOKEN2(buf, *path, *path_len, len); + if (*method_len == 0 || *path_len == 0) return -1; + + while (buf < buf_end) { + ++buf; + if (*(uint32_t*)buf == 0x0a0d0a0d) { + buf += 4; + return (int)(buf - buf_start); + } + }; + + return -1; +} + +// date +const char* get_date(); + +// branchlut +const char gDigitsLut[200] = { + '0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9', + '1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9', + '2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9', + '3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9', + '4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9', + '5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9', + '6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9', + '7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9', + '8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9', + '9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9' +}; + +static inline int u64toa(char* buf, uint64_t value) { + const char* b = buf; + if (value < 100000000) { + uint32_t v = (uint32_t)(value); + if (v < 10000) { + const uint32_t d1 = (v / 100) << 1; + const uint32_t d2 = (v % 100) << 1; + + if (v >= 1000) + *buf++ = gDigitsLut[d1]; + if (v >= 100) + *buf++ = gDigitsLut[d1 + 1]; + if (v >= 10) + *buf++ = gDigitsLut[d2]; + *buf++ = gDigitsLut[d2 + 1]; + } + else { + // value = bbbbcccc + const uint32_t b = v / 10000; + const uint32_t c = v % 10000; + + const uint32_t d1 = (b / 100) << 1; + const uint32_t d2 = (b % 100) << 1; + + const uint32_t d3 = (c / 100) << 1; + const uint32_t d4 = (c % 100) << 1; + + if (value >= 10000000) + *buf++ = gDigitsLut[d1]; + if (value >= 1000000) + *buf++ = gDigitsLut[d1 + 1]; + if (value >= 100000) + *buf++ = gDigitsLut[d2]; + *buf++ = gDigitsLut[d2 + 1]; + + *buf++ = gDigitsLut[d3]; + *buf++ = gDigitsLut[d3 + 1]; + *buf++ = gDigitsLut[d4]; + *buf++ = gDigitsLut[d4 + 1]; + } + } + // *buf = '\0'; + return buf - b; +} diff --git a/v_windows/v/old/thirdparty/picohttpparser/src/README.md b/v_windows/v/old/thirdparty/picohttpparser/src/README.md new file mode 100644 index 0000000..cb32f58 --- /dev/null +++ b/v_windows/v/old/thirdparty/picohttpparser/src/README.md @@ -0,0 +1,116 @@ +PicoHTTPParser +============= + +Copyright (c) 2009-2014 [Kazuho Oku](https://github.com/kazuho), [Tokuhiro Matsuno](https://github.com/tokuhirom), [Daisuke Murase](https://github.com/typester), [Shigeo Mitsunari](https://github.com/herumi) + +PicoHTTPParser is a tiny, primitive, fast HTTP request/response parser. + +Unlike most parsers, it is stateless and does not allocate memory by itself. +All it does is accept pointer to buffer and the output structure, and setups the pointers in the latter to point at the necessary portions of the buffer. + +The code is widely deployed within Perl applications through popular modules that use it, including [Plack](https://metacpan.org/pod/Plack), [Starman](https://metacpan.org/pod/Starman), [Starlet](https://metacpan.org/pod/Starlet), [Furl](https://metacpan.org/pod/Furl). It is also the HTTP/1 parser of [H2O](https://github.com/h2o/h2o). + +Check out [test.c] to find out how to use the parser. + +The software is dual-licensed under the Perl License or the MIT License. + +Usage +----- + +The library exposes four functions: `phr_parse_request`, `phr_parse_response`, `phr_parse_headers`, `phr_decode_chunked`. + +### phr_parse_request + +The example below reads an HTTP request from socket `sock` using `read(2)`, parses it using `phr_parse_request`, and prints the details. + +```c +char buf[4096], *method, *path; +int pret, minor_version; +struct phr_header headers[100]; +size_t buflen = 0, prevbuflen = 0, method_len, path_len, num_headers; +ssize_t rret; + +while (1) { + /* read the request */ + while ((rret = read(sock, buf + buflen, sizeof(buf) - buflen)) == -1 && errno == EINTR) + ; + if (rret <= 0) + return IOError; + prevbuflen = buflen; + buflen += rret; + /* parse the request */ + num_headers = sizeof(headers) / sizeof(headers[0]); + pret = phr_parse_request(buf, buflen, &method, &method_len, &path, &path_len, + &minor_version, headers, &num_headers, prevbuflen); + if (pret > 0) + break; /* successfully parsed the request */ + else if (pret == -1) + return ParseError; + /* request is incomplete, continue the loop */ + assert(pret == -2); + if (buflen == sizeof(buf)) + return RequestIsTooLongError; +} + +printf("request is %d bytes long\n", pret); +printf("method is %.*s\n", (int)method_len, method); +printf("path is %.*s\n", (int)path_len, path); +printf("HTTP version is 1.%d\n", minor_version); +printf("headers:\n"); +for (i = 0; i != num_headers; ++i) { + printf("%.*s: %.*s\n", (int)headers[i].name_len, headers[i].name, + (int)headers[i].value_len, headers[i].value); +} +``` + +### phr_parse_response, phr_parse_headers + +`phr_parse_response` and `phr_parse_headers` provide similar interfaces as `phr_parse_request`. `phr_parse_response` parses an HTTP response, and `phr_parse_headers` parses the headers only. + +### phr_decode_chunked + +The example below decodes incoming data in chunked-encoding. The data is decoded in-place. + +```c +struct phr_chunked_decoder decoder = {}; /* zero-clear */ +char *buf = malloc(4096); +size_t size = 0, capacity = 4096, rsize; +ssize_t rret, pret; + +/* set consume_trailer to 1 to discard the trailing header, or the application + * should call phr_parse_headers to parse the trailing header */ +decoder.consume_trailer = 1; + +do { + /* expand the buffer if necessary */ + if (size == capacity) { + capacity *= 2; + buf = realloc(buf, capacity); + assert(buf != NULL); + } + /* read */ + while ((rret = read(sock, buf + size, capacity - size)) == -1 && errno == EINTR) + ; + if (rret <= 0) + return IOError; + /* decode */ + rsize = rret; + pret = phr_decode_chunked(&decoder, buf + size, &rsize); + if (pret == -1) + return ParseError; + size += rsize; +} while (pret == -2); + +/* successfully decoded the chunked data */ +assert(pret >= 0); +printf("decoded data is at %p (%zu bytes)\n", buf, size); +``` + +Benchmark +--------- + +![benchmark results](http://i.gyazo.com/a85c18d3162dfb46b485bb41e0ad443a.png) + +The benchmark code is from [fukamachi/fast-http@6b91103](https://github.com/fukamachi/fast-http/tree/6b9110347c7a3407310c08979aefd65078518478). + +The internals of picohttpparser has been described to some extent in [my blog entry]( http://blog.kazuhooku.com/2014/11/the-internals-h2o-or-how-to-write-fast.html). diff --git a/v_windows/v/old/thirdparty/picohttpparser/src/picohttpparser.c b/v_windows/v/old/thirdparty/picohttpparser/src/picohttpparser.c new file mode 100644 index 0000000..74ccc3e --- /dev/null +++ b/v_windows/v/old/thirdparty/picohttpparser/src/picohttpparser.c @@ -0,0 +1,645 @@ +/* + * Copyright (c) 2009-2014 Kazuho Oku, Tokuhiro Matsuno, Daisuke Murase, + * Shigeo Mitsunari + * + * The software is licensed under either the MIT License (below) or the Perl + * license. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#ifdef __SSE4_2__ +#ifdef _MSC_VER +#include +#else +#include +#endif +#endif +#include "picohttpparser.h" + +#if __GNUC__ >= 3 +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) +#else +#define likely(x) (x) +#define unlikely(x) (x) +#endif + +#ifdef _MSC_VER +#define ALIGNED(n) _declspec(align(n)) +#else +#define ALIGNED(n) __attribute__((aligned(n))) +#endif + +#define IS_PRINTABLE_ASCII(c) ((unsigned char)(c)-040u < 0137u) + +#define CHECK_EOF() \ + if (buf == buf_end) { \ + *ret = -2; \ + return NULL; \ + } + +#define EXPECT_CHAR_NO_CHECK(ch) \ + if (*buf++ != ch) { \ + *ret = -1; \ + return NULL; \ + } + +#define EXPECT_CHAR(ch) \ + CHECK_EOF(); \ + EXPECT_CHAR_NO_CHECK(ch); + +#define ADVANCE_TOKEN(tok, toklen) \ + do { \ + const char *tok_start = buf; \ + static const char ALIGNED(16) ranges2[16] = "\000\040\177\177"; \ + int found2; \ + buf = findchar_fast(buf, buf_end, ranges2, 4, &found2); \ + if (!found2) { \ + CHECK_EOF(); \ + } \ + while (1) { \ + if (*buf == ' ') { \ + break; \ + } else if (unlikely(!IS_PRINTABLE_ASCII(*buf))) { \ + if ((unsigned char)*buf < '\040' || *buf == '\177') { \ + *ret = -1; \ + return NULL; \ + } \ + } \ + ++buf; \ + CHECK_EOF(); \ + } \ + tok = tok_start; \ + toklen = buf - tok_start; \ + } while (0) + +static const char *token_char_map = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" + "\0\1\0\1\1\1\1\1\0\0\1\1\0\1\1\0\1\1\1\1\1\1\1\1\1\1\0\0\0\0\0\0" + "\0\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\0\0\0\1\1" + "\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\0\1\0\1\0" + "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" + "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" + "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" + "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; + +static const char *findchar_fast(const char *buf, const char *buf_end, const char *ranges, size_t ranges_size, int *found) +{ + *found = 0; +#if __SSE4_2__ + if (likely(buf_end - buf >= 16)) { + __m128i ranges16 = _mm_loadu_si128((const __m128i *)ranges); + + size_t left = (buf_end - buf) & ~15; + do { + __m128i b16 = _mm_loadu_si128((const __m128i *)buf); + int r = _mm_cmpestri(ranges16, ranges_size, b16, 16, _SIDD_LEAST_SIGNIFICANT | _SIDD_CMP_RANGES | _SIDD_UBYTE_OPS); + if (unlikely(r != 16)) { + buf += r; + *found = 1; + break; + } + buf += 16; + left -= 16; + } while (likely(left != 0)); + } +#else + /* suppress unused parameter warning */ + (void)buf_end; + (void)ranges; + (void)ranges_size; +#endif + return buf; +} + +static const char *get_token_to_eol(const char *buf, const char *buf_end, const char **token, size_t *token_len, int *ret) +{ + const char *token_start = buf; + +#ifdef __SSE4_2__ + static const char ALIGNED(16) ranges1[16] = "\0\010" /* allow HT */ + "\012\037" /* allow SP and up to but not including DEL */ + "\177\177"; /* allow chars w. MSB set */ + int found; + buf = findchar_fast(buf, buf_end, ranges1, 6, &found); + if (found) + goto FOUND_CTL; +#else + /* find non-printable char within the next 8 bytes, this is the hottest code; manually inlined */ + while (likely(buf_end - buf >= 8)) { +#define DOIT() \ + do { \ + if (unlikely(!IS_PRINTABLE_ASCII(*buf))) \ + goto NonPrintable; \ + ++buf; \ + } while (0) + DOIT(); + DOIT(); + DOIT(); + DOIT(); + DOIT(); + DOIT(); + DOIT(); + DOIT(); +#undef DOIT + continue; + NonPrintable: + if ((likely((unsigned char)*buf < '\040') && likely(*buf != '\011')) || unlikely(*buf == '\177')) { + goto FOUND_CTL; + } + ++buf; + } +#endif + for (;; ++buf) { + CHECK_EOF(); + if (unlikely(!IS_PRINTABLE_ASCII(*buf))) { + if ((likely((unsigned char)*buf < '\040') && likely(*buf != '\011')) || unlikely(*buf == '\177')) { + goto FOUND_CTL; + } + } + } +FOUND_CTL: + if (likely(*buf == '\015')) { + ++buf; + EXPECT_CHAR('\012'); + *token_len = buf - 2 - token_start; + } else if (*buf == '\012') { + *token_len = buf - token_start; + ++buf; + } else { + *ret = -1; + return NULL; + } + *token = token_start; + + return buf; +} + +static const char *is_complete(const char *buf, const char *buf_end, size_t last_len, int *ret) +{ + int ret_cnt = 0; + buf = last_len < 3 ? buf : buf + last_len - 3; + + while (1) { + CHECK_EOF(); + if (*buf == '\015') { + ++buf; + CHECK_EOF(); + EXPECT_CHAR('\012'); + ++ret_cnt; + } else if (*buf == '\012') { + ++buf; + ++ret_cnt; + } else { + ++buf; + ret_cnt = 0; + } + if (ret_cnt == 2) { + return buf; + } + } + + *ret = -2; + return NULL; +} + +#define PARSE_INT(valp_, mul_) \ + if (*buf < '0' || '9' < *buf) { \ + buf++; \ + *ret = -1; \ + return NULL; \ + } \ + *(valp_) = (mul_) * (*buf++ - '0'); + +#define PARSE_INT_3(valp_) \ + do { \ + int res_ = 0; \ + PARSE_INT(&res_, 100) \ + *valp_ = res_; \ + PARSE_INT(&res_, 10) \ + *valp_ += res_; \ + PARSE_INT(&res_, 1) \ + *valp_ += res_; \ + } while (0) + +/* returned pointer is always within [buf, buf_end), or null */ +static const char *parse_http_version(const char *buf, const char *buf_end, int *minor_version, int *ret) +{ + /* we want at least [HTTP/1.] to try to parse */ + if (buf_end - buf < 9) { + *ret = -2; + return NULL; + } + EXPECT_CHAR_NO_CHECK('H'); + EXPECT_CHAR_NO_CHECK('T'); + EXPECT_CHAR_NO_CHECK('T'); + EXPECT_CHAR_NO_CHECK('P'); + EXPECT_CHAR_NO_CHECK('/'); + EXPECT_CHAR_NO_CHECK('1'); + EXPECT_CHAR_NO_CHECK('.'); + PARSE_INT(minor_version, 1); + return buf; +} + +static const char *parse_headers(const char *buf, const char *buf_end, struct phr_header *headers, size_t *num_headers, + size_t max_headers, int *ret) +{ + for (;; ++*num_headers) { + CHECK_EOF(); + if (*buf == '\015') { + ++buf; + EXPECT_CHAR('\012'); + break; + } else if (*buf == '\012') { + ++buf; + break; + } + if (*num_headers == max_headers) { + *ret = -1; + return NULL; + } + if (!(*num_headers != 0 && (*buf == ' ' || *buf == '\t'))) { + /* parsing name, but do not discard SP before colon, see + * http://www.mozilla.org/security/announce/2006/mfsa2006-33.html */ + headers[*num_headers].name = buf; + static const char ALIGNED(16) ranges1[] = "\x00 " /* control chars and up to SP */ + "\"\"" /* 0x22 */ + "()" /* 0x28,0x29 */ + ",," /* 0x2c */ + "//" /* 0x2f */ + ":@" /* 0x3a-0x40 */ + "[]" /* 0x5b-0x5d */ + "{\377"; /* 0x7b-0xff */ + int found; + buf = findchar_fast(buf, buf_end, ranges1, sizeof(ranges1) - 1, &found); + if (!found) { + CHECK_EOF(); + } + while (1) { + if (*buf == ':') { + break; + } else if (!token_char_map[(unsigned char)*buf]) { + *ret = -1; + return NULL; + } + ++buf; + CHECK_EOF(); + } + if ((headers[*num_headers].name_len = buf - headers[*num_headers].name) == 0) { + *ret = -1; + return NULL; + } + ++buf; + for (;; ++buf) { + CHECK_EOF(); + if (!(*buf == ' ' || *buf == '\t')) { + break; + } + } + } else { + headers[*num_headers].name = NULL; + headers[*num_headers].name_len = 0; + } + const char *value; + size_t value_len; + if ((buf = get_token_to_eol(buf, buf_end, &value, &value_len, ret)) == NULL) { + return NULL; + } + /* remove trailing SPs and HTABs */ + const char *value_end = value + value_len; + for (; value_end != value; --value_end) { + const char c = *(value_end - 1); + if (!(c == ' ' || c == '\t')) { + break; + } + } + headers[*num_headers].value = value; + headers[*num_headers].value_len = value_end - value; + } + return buf; +} + +static const char *parse_request(const char *buf, const char *buf_end, const char **method, size_t *method_len, const char **path, + size_t *path_len, int *minor_version, struct phr_header *headers, size_t *num_headers, + size_t max_headers, int *ret) +{ + /* skip first empty line (some clients add CRLF after POST content) */ + CHECK_EOF(); + if (*buf == '\015') { + ++buf; + EXPECT_CHAR('\012'); + } else if (*buf == '\012') { + ++buf; + } + + /* parse request line */ + ADVANCE_TOKEN(*method, *method_len); + do { + ++buf; + } while (*buf == ' '); + ADVANCE_TOKEN(*path, *path_len); + do { + ++buf; + } while (*buf == ' '); + if (*method_len == 0 || *path_len == 0) { + *ret = -1; + return NULL; + } + if ((buf = parse_http_version(buf, buf_end, minor_version, ret)) == NULL) { + return NULL; + } + if (*buf == '\015') { + ++buf; + EXPECT_CHAR('\012'); + } else if (*buf == '\012') { + ++buf; + } else { + *ret = -1; + return NULL; + } + + return parse_headers(buf, buf_end, headers, num_headers, max_headers, ret); +} + +int phr_parse_request(const char *buf_start, size_t len, const char **method, size_t *method_len, const char **path, + size_t *path_len, int *minor_version, struct phr_header *headers, size_t *num_headers, size_t last_len) +{ + const char *buf = buf_start, *buf_end = buf_start + len; + size_t max_headers = *num_headers; + int r; + + *method = NULL; + *method_len = 0; + *path = NULL; + *path_len = 0; + *minor_version = -1; + *num_headers = 0; + + /* if last_len != 0, check if the request is complete (a fast countermeasure + againt slowloris */ + if (last_len != 0 && is_complete(buf, buf_end, last_len, &r) == NULL) { + return r; + } + + if ((buf = parse_request(buf, buf_end, method, method_len, path, path_len, minor_version, headers, num_headers, max_headers, + &r)) == NULL) { + return r; + } + + return (int)(buf - buf_start); +} + +static const char *parse_response(const char *buf, const char *buf_end, int *minor_version, int *status, const char **msg, + size_t *msg_len, struct phr_header *headers, size_t *num_headers, size_t max_headers, int *ret) +{ + /* parse "HTTP/1.x" */ + if ((buf = parse_http_version(buf, buf_end, minor_version, ret)) == NULL) { + return NULL; + } + /* skip space */ + if (*buf != ' ') { + *ret = -1; + return NULL; + } + do { + ++buf; + } while (*buf == ' '); + /* parse status code, we want at least [:digit:][:digit:][:digit:] to try to parse */ + if (buf_end - buf < 4) { + *ret = -2; + return NULL; + } + PARSE_INT_3(status); + + /* get message includig preceding space */ + if ((buf = get_token_to_eol(buf, buf_end, msg, msg_len, ret)) == NULL) { + return NULL; + } + if (*msg_len == 0) { + /* ok */ + } else if (**msg == ' ') { + /* remove preceding space */ + do { + ++*msg; + --*msg_len; + } while (**msg == ' '); + } else { + /* garbage found after status code */ + *ret = -1; + return NULL; + } + + return parse_headers(buf, buf_end, headers, num_headers, max_headers, ret); +} + +int phr_parse_response(const char *buf_start, size_t len, int *minor_version, int *status, const char **msg, size_t *msg_len, + struct phr_header *headers, size_t *num_headers, size_t last_len) +{ + const char *buf = buf_start, *buf_end = buf + len; + size_t max_headers = *num_headers; + int r; + + *minor_version = -1; + *status = 0; + *msg = NULL; + *msg_len = 0; + *num_headers = 0; + + /* if last_len != 0, check if the response is complete (a fast countermeasure + against slowloris */ + if (last_len != 0 && is_complete(buf, buf_end, last_len, &r) == NULL) { + return r; + } + + if ((buf = parse_response(buf, buf_end, minor_version, status, msg, msg_len, headers, num_headers, max_headers, &r)) == NULL) { + return r; + } + + return (int)(buf - buf_start); +} + +int phr_parse_headers(const char *buf_start, size_t len, struct phr_header *headers, size_t *num_headers, size_t last_len) +{ + const char *buf = buf_start, *buf_end = buf + len; + size_t max_headers = *num_headers; + int r; + + *num_headers = 0; + + /* if last_len != 0, check if the response is complete (a fast countermeasure + against slowloris */ + if (last_len != 0 && is_complete(buf, buf_end, last_len, &r) == NULL) { + return r; + } + + if ((buf = parse_headers(buf, buf_end, headers, num_headers, max_headers, &r)) == NULL) { + return r; + } + + return (int)(buf - buf_start); +} + +enum { + CHUNKED_IN_CHUNK_SIZE, + CHUNKED_IN_CHUNK_EXT, + CHUNKED_IN_CHUNK_DATA, + CHUNKED_IN_CHUNK_CRLF, + CHUNKED_IN_TRAILERS_LINE_HEAD, + CHUNKED_IN_TRAILERS_LINE_MIDDLE +}; + +static int decode_hex(int ch) +{ + if ('0' <= ch && ch <= '9') { + return ch - '0'; + } else if ('A' <= ch && ch <= 'F') { + return ch - 'A' + 0xa; + } else if ('a' <= ch && ch <= 'f') { + return ch - 'a' + 0xa; + } else { + return -1; + } +} + +ssize_t phr_decode_chunked(struct phr_chunked_decoder *decoder, char *buf, size_t *_bufsz) +{ + size_t dst = 0, src = 0, bufsz = *_bufsz; + ssize_t ret = -2; /* incomplete */ + + while (1) { + switch (decoder->_state) { + case CHUNKED_IN_CHUNK_SIZE: + for (;; ++src) { + int v; + if (src == bufsz) + goto Exit; + if ((v = decode_hex(buf[src])) == -1) { + if (decoder->_hex_count == 0) { + ret = -1; + goto Exit; + } + break; + } + if (decoder->_hex_count == sizeof(size_t) * 2) { + ret = -1; + goto Exit; + } + decoder->bytes_left_in_chunk = decoder->bytes_left_in_chunk * 16 + v; + ++decoder->_hex_count; + } + decoder->_hex_count = 0; + decoder->_state = CHUNKED_IN_CHUNK_EXT; + /* fallthru */ + case CHUNKED_IN_CHUNK_EXT: + /* RFC 7230 A.2 "Line folding in chunk extensions is disallowed" */ + for (;; ++src) { + if (src == bufsz) + goto Exit; + if (buf[src] == '\012') + break; + } + ++src; + if (decoder->bytes_left_in_chunk == 0) { + if (decoder->consume_trailer) { + decoder->_state = CHUNKED_IN_TRAILERS_LINE_HEAD; + break; + } else { + goto Complete; + } + } + decoder->_state = CHUNKED_IN_CHUNK_DATA; + /* fallthru */ + case CHUNKED_IN_CHUNK_DATA: { + size_t avail = bufsz - src; + if (avail < decoder->bytes_left_in_chunk) { + if (dst != src) + memmove(buf + dst, buf + src, avail); + src += avail; + dst += avail; + decoder->bytes_left_in_chunk -= avail; + goto Exit; + } + if (dst != src) + memmove(buf + dst, buf + src, decoder->bytes_left_in_chunk); + src += decoder->bytes_left_in_chunk; + dst += decoder->bytes_left_in_chunk; + decoder->bytes_left_in_chunk = 0; + decoder->_state = CHUNKED_IN_CHUNK_CRLF; + } + /* fallthru */ + case CHUNKED_IN_CHUNK_CRLF: + for (;; ++src) { + if (src == bufsz) + goto Exit; + if (buf[src] != '\015') + break; + } + if (buf[src] != '\012') { + ret = -1; + goto Exit; + } + ++src; + decoder->_state = CHUNKED_IN_CHUNK_SIZE; + break; + case CHUNKED_IN_TRAILERS_LINE_HEAD: + for (;; ++src) { + if (src == bufsz) + goto Exit; + if (buf[src] != '\015') + break; + } + if (buf[src++] == '\012') + goto Complete; + decoder->_state = CHUNKED_IN_TRAILERS_LINE_MIDDLE; + /* fallthru */ + case CHUNKED_IN_TRAILERS_LINE_MIDDLE: + for (;; ++src) { + if (src == bufsz) + goto Exit; + if (buf[src] == '\012') + break; + } + ++src; + decoder->_state = CHUNKED_IN_TRAILERS_LINE_HEAD; + break; + default: + assert(!"decoder is corrupt"); + } + } + +Complete: + ret = bufsz - src; +Exit: + if (dst != src) + memmove(buf + dst, buf + src, bufsz - src); + *_bufsz = dst; + return ret; +} + +int phr_decode_chunked_is_in_data(struct phr_chunked_decoder *decoder) +{ + return decoder->_state == CHUNKED_IN_CHUNK_DATA; +} + +#undef CHECK_EOF +#undef EXPECT_CHAR +#undef ADVANCE_TOKEN diff --git a/v_windows/v/old/thirdparty/picohttpparser/src/picohttpparser.h b/v_windows/v/old/thirdparty/picohttpparser/src/picohttpparser.h new file mode 100644 index 0000000..8121b12 --- /dev/null +++ b/v_windows/v/old/thirdparty/picohttpparser/src/picohttpparser.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2009-2014 Kazuho Oku, Tokuhiro Matsuno, Daisuke Murase, + * Shigeo Mitsunari + * + * The software is licensed under either the MIT License (below) or the Perl + * license. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef picohttpparser_h +#define picohttpparser_h + +#include + +#ifdef _MSC_VER +#define ssize_t intptr_t +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* contains name and value of a header (name == NULL if is a continuing line + * of a multiline header */ +typedef struct phr_header { + const char *name; + size_t name_len; + const char *value; + size_t value_len; +}phr_header; + +/* returns number of bytes consumed if successful, -2 if request is partial, + * -1 if failed */ +int phr_parse_request(const char *buf, size_t len, const char **method, size_t *method_len, const char **path, size_t *path_len, + int *minor_version, struct phr_header *headers, size_t *num_headers, size_t last_len); + +/* ditto */ +int phr_parse_response(const char *_buf, size_t len, int *minor_version, int *status, const char **msg, size_t *msg_len, + struct phr_header *headers, size_t *num_headers, size_t last_len); + +/* ditto */ +int phr_parse_headers(const char *buf, size_t len, struct phr_header *headers, size_t *num_headers, size_t last_len); + +/* should be zero-filled before start */ +struct phr_chunked_decoder { + size_t bytes_left_in_chunk; /* number of bytes left in current chunk */ + char consume_trailer; /* if trailing headers should be consumed */ + char _hex_count; + char _state; +}; + +/* the function rewrites the buffer given as (buf, bufsz) removing the chunked- + * encoding headers. When the function returns without an error, bufsz is + * updated to the length of the decoded data available. Applications should + * repeatedly call the function while it returns -2 (incomplete) every time + * supplying newly arrived data. If the end of the chunked-encoded data is + * found, the function returns a non-negative number indicating the number of + * octets left undecoded at the tail of the supplied buffer. Returns -1 on + * error. + */ +ssize_t phr_decode_chunked(struct phr_chunked_decoder *decoder, char *buf, size_t *bufsz); + +/* returns if the chunked decoder is in middle of chunked data */ +int phr_decode_chunked_is_in_data(struct phr_chunked_decoder *decoder); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/v_windows/v/old/thirdparty/sokol/sokol_app.h b/v_windows/v/old/thirdparty/sokol/sokol_app.h new file mode 100644 index 0000000..ef4ac70 --- /dev/null +++ b/v_windows/v/old/thirdparty/sokol/sokol_app.h @@ -0,0 +1,10475 @@ +#if defined(SOKOL_IMPL) && !defined(SOKOL_APP_IMPL) +#define SOKOL_APP_IMPL +#endif +#ifndef SOKOL_APP_INCLUDED +/* + sokol_app.h -- cross-platform application wrapper + + Project URL: https://github.com/floooh/sokol + + Do this: + #define SOKOL_IMPL or + #define SOKOL_APP_IMPL + before you include this file in *one* C or C++ file to create the + implementation. + + In the same place define one of the following to select the 3D-API + which should be initialized by sokol_app.h (this must also match + the backend selected for sokol_gfx.h if both are used in the same + project): + + #define SOKOL_GLCORE33 + #define SOKOL_GLES2 + #define SOKOL_GLES3 + #define SOKOL_D3D11 + #define SOKOL_METAL + #define SOKOL_WGPU + + Optionally provide the following defines with your own implementations: + + SOKOL_ASSERT(c) - your own assert macro (default: assert(c)) + SOKOL_LOG(msg) - your own logging function (default: puts(msg)) + SOKOL_UNREACHABLE() - a guard macro for unreachable code (default: assert(false)) + SOKOL_ABORT() - called after an unrecoverable error (default: abort()) + SOKOL_WIN32_FORCE_MAIN - define this on Win32 to use a main() entry point instead of WinMain + SOKOL_NO_ENTRY - define this if sokol_app.h shouldn't "hijack" the main() function + SOKOL_APP_API_DECL - public function declaration prefix (default: extern) + SOKOL_API_DECL - same as SOKOL_APP_API_DECL + SOKOL_API_IMPL - public function implementation prefix (default: -) + SOKOL_CALLOC - your own calloc function (default: calloc(n, s)) + SOKOL_FREE - your own free function (default: free(p)) + + Optionally define the following to force debug checks and validations + even in release mode: + + SOKOL_DEBUG - by default this is defined if _DEBUG is defined + + If sokol_app.h is compiled as a DLL, define the following before + including the declaration or implementation: + + SOKOL_DLL + + On Windows, SOKOL_DLL will define SOKOL_APP_API_DECL as __declspec(dllexport) + or __declspec(dllimport) as needed. + + For example code, see https://github.com/floooh/sokol-samples/tree/master/sapp + + Portions of the Windows and Linux GL initialization and event code have been + taken from GLFW (http://www.glfw.org/) + + iOS onscreen keyboard support 'inspired' by libgdx. + + Link with the following system libraries: + + - on macOS with Metal: Cocoa, QuartzCore, Metal, MetalKit + - on macOS with GL: Cocoa, QuartzCore, OpenGL + - on iOS with Metal: Foundation, UIKit, Metal, MetalKit + - on iOS with GL: Foundation, UIKit, OpenGLES, GLKit + - on Linux: X11, Xi, Xcursor, GL, dl, pthread, m(?) + - on Android: GLESv3, EGL, log, android + - on Windows with the MSVC or Clang toolchains: no action needed, libs are defined in-source via pragma-comment-lib + - on Windows with MINGW/MSYS2 gcc: compile with '-mwin32' so that _WIN32 is defined + - link with the following libs: -lkernel32 -luser32 -lshell32 + - additionally with the GL backend: -lgdi32 + - additionally with the D3D11 backend: -ld3d11 -ldxgi -dxguid + + On Linux, you also need to use the -pthread compiler and linker option, otherwise weird + things will happen, see here for details: https://github.com/floooh/sokol/issues/376 + + Building for UWP requires a recent Visual Studio toolchain and Windows SDK + (at least VS2019 and Windows SDK 10.0.19041.0). When the UWP backend is + selected, the sokol_app.h implementation must be compiled as C++17. + + On macOS and iOS, the implementation must be compiled as Objective-C. + + FEATURE OVERVIEW + ================ + sokol_app.h provides a minimalistic cross-platform API which + implements the 'application-wrapper' parts of a 3D application: + + - a common application entry function + - creates a window and 3D-API context/device with a 'default framebuffer' + - makes the rendered frame visible + - provides keyboard-, mouse- and low-level touch-events + - platforms: MacOS, iOS, HTML5, Win32, Linux, Android (TODO: RaspberryPi) + - 3D-APIs: Metal, D3D11, GL3.2, GLES2, GLES3, WebGL, WebGL2 + + FEATURE/PLATFORM MATRIX + ======================= + | Windows | macOS | Linux | iOS | Android | UWP | Raspi | HTML5 + --------------------+---------+-------+-------+-------+---------+------+-------+------- + gl 3.x | YES | YES | YES | --- | --- | --- | --- | --- + gles2/webgl | --- | --- | --- | YES | YES | --- | TODO | YES + gles3/webgl2 | --- | --- | --- | YES | YES | --- | --- | YES + metal | --- | YES | --- | YES | --- | --- | --- | --- + d3d11 | YES | --- | --- | --- | --- | YES | --- | --- + KEY_DOWN | YES | YES | YES | SOME | TODO | YES | TODO | YES + KEY_UP | YES | YES | YES | SOME | TODO | YES | TODO | YES + CHAR | YES | YES | YES | YES | TODO | YES | TODO | YES + MOUSE_DOWN | YES | YES | YES | --- | --- | YES | TODO | YES + MOUSE_UP | YES | YES | YES | --- | --- | YES | TODO | YES + MOUSE_SCROLL | YES | YES | YES | --- | --- | YES | TODO | YES + MOUSE_MOVE | YES | YES | YES | --- | --- | YES | TODO | YES + MOUSE_ENTER | YES | YES | YES | --- | --- | YES | TODO | YES + MOUSE_LEAVE | YES | YES | YES | --- | --- | YES | TODO | YES + TOUCHES_BEGAN | --- | --- | --- | YES | YES | TODO | --- | YES + TOUCHES_MOVED | --- | --- | --- | YES | YES | TODO | --- | YES + TOUCHES_ENDED | --- | --- | --- | YES | YES | TODO | --- | YES + TOUCHES_CANCELLED | --- | --- | --- | YES | YES | TODO | --- | YES + RESIZED | YES | YES | YES | YES | YES | YES | --- | YES + ICONIFIED | YES | YES | YES | --- | --- | YES | --- | --- + RESTORED | YES | YES | YES | --- | --- | YES | --- | --- + SUSPENDED | --- | --- | --- | YES | YES | YES | --- | TODO + RESUMED | --- | --- | --- | YES | YES | YES | --- | TODO + QUIT_REQUESTED | YES | YES | YES | --- | --- | --- | TODO | YES + UPDATE_CURSOR | YES | YES | TODO | --- | --- | TODO | --- | TODO + IME | TODO | TODO? | TODO | ??? | TODO | --- | ??? | ??? + key repeat flag | YES | YES | YES | --- | --- | YES | TODO | YES + windowed | YES | YES | YES | --- | --- | YES | TODO | YES + fullscreen | YES | YES | YES | YES | YES | YES | TODO | --- + mouse hide | YES | YES | YES | --- | --- | YES | TODO | TODO + mouse lock | YES | YES | YES | --- | --- | TODO | TODO | YES + screen keyboard | --- | --- | --- | YES | TODO | TODO | --- | YES + swap interval | YES | YES | YES | YES | TODO | --- | TODO | YES + high-dpi | YES | YES | TODO | YES | YES | YES | TODO | YES + clipboard | YES | YES | TODO | --- | --- | TODO | --- | YES + MSAA | YES | YES | YES | YES | YES | TODO | TODO | YES + drag'n'drop | YES | YES | YES | --- | --- | TODO | TODO | YES + + TODO + ==== + - Linux: + - clipboard support + - UWP: + - clipboard, mouselock + - sapp_consume_event() on non-web platforms? + + STEP BY STEP + ============ + --- Add a sokol_main() function to your code which returns a sapp_desc structure + with initialization parameters and callback function pointers. This + function is called very early, usually at the start of the + platform's entry function (e.g. main or WinMain). You should do as + little as possible here, since the rest of your code might be called + from another thread (this depends on the platform): + + sapp_desc sokol_main(int argc, char* argv[]) { + return (sapp_desc) { + .width = 640, + .height = 480, + .init_cb = my_init_func, + .frame_cb = my_frame_func, + .cleanup_cb = my_cleanup_func, + .event_cb = my_event_func, + ... + }; + } + + There are many more setup parameters, but these are the most important. + For a complete list search for the sapp_desc structure declaration + below. + + DO NOT call any sokol-app function from inside sokol_main(), since + sokol-app will not be initialized at this point. + + The .width and .height parameters are the preferred size of the 3D + rendering canvas. The actual size may differ from this depending on + platform and other circumstances. Also the canvas size may change at + any time (for instance when the user resizes the application window, + or rotates the mobile device). + + All provided function callbacks will be called from the same thread, + but this may be different from the thread where sokol_main() was called. + + .init_cb (void (*)(void)) + This function is called once after the application window, + 3D rendering context and swap chain have been created. The + function takes no arguments and has no return value. + .frame_cb (void (*)(void)) + This is the per-frame callback, which is usually called 60 + times per second. This is where your application would update + most of its state and perform all rendering. + .cleanup_cb (void (*)(void)) + The cleanup callback is called once right before the application + quits. + .event_cb (void (*)(const sapp_event* event)) + The event callback is mainly for input handling, but is also + used to communicate other types of events to the application. Keep the + event_cb struct member zero-initialized if your application doesn't require + event handling. + .fail_cb (void (*)(const char* msg)) + The fail callback is called when a fatal error is encountered + during start which doesn't allow the program to continue. + Providing a callback here gives you a chance to show an error message + to the user. The default behaviour is SOKOL_LOG(msg) + + As you can see, those 'standard callbacks' don't have a user_data + argument, so any data that needs to be preserved between callbacks + must live in global variables. If keeping state in global variables + is not an option, there's an alternative set of callbacks with + an additional user_data pointer argument: + + .user_data (void*) + The user-data argument for the callbacks below + .init_userdata_cb (void (*)(void* user_data)) + .frame_userdata_cb (void (*)(void* user_data)) + .cleanup_userdata_cb (void (*)(void* user_data)) + .event_cb (void(*)(const sapp_event* event, void* user_data)) + .fail_cb (void(*)(const char* msg, void* user_data)) + These are the user-data versions of the callback functions. You + can mix those with the standard callbacks that don't have the + user_data argument. + + The function sapp_userdata() can be used to query the user_data + pointer provided in the sapp_desc struct. + + You can also call sapp_query_desc() to get a copy of the + original sapp_desc structure. + + NOTE that there's also an alternative compile mode where sokol_app.h + doesn't "hijack" the main() function. Search below for SOKOL_NO_ENTRY. + + --- Implement the initialization callback function (init_cb), this is called + once after the rendering surface, 3D API and swap chain have been + initialized by sokol_app. All sokol-app functions can be called + from inside the initialization callback, the most useful functions + at this point are: + + int sapp_width(void) + int sapp_height(void) + Returns the current width and height of the default framebuffer in pixels, + this may change from one frame to the next, and it may be different + from the initial size provided in the sapp_desc struct. + + float sapp_widthf(void) + float sapp_heightf(void) + These are alternatives to sapp_width() and sapp_height() which return + the default framebuffer size as float values instead of integer. This + may help to prevent casting back and forth between int and float + in more strongly typed languages than C and C++. + + int sapp_color_format(void) + int sapp_depth_format(void) + The color and depth-stencil pixelformats of the default framebuffer, + as integer values which are compatible with sokol-gfx's + sg_pixel_format enum (so that they can be plugged directly in places + where sg_pixel_format is expected). Possible values are: + + 23 == SG_PIXELFORMAT_RGBA8 + 27 == SG_PIXELFORMAT_BGRA8 + 41 == SG_PIXELFORMAT_DEPTH + 42 == SG_PIXELFORMAT_DEPTH_STENCIL + + int sapp_sample_count(void) + Return the MSAA sample count of the default framebuffer. + + bool sapp_gles2(void) + Returns true if a GLES2 or WebGL context has been created. This + is useful when a GLES3/WebGL2 context was requested but is not + available so that sokol_app.h had to fallback to GLES2/WebGL. + + const void* sapp_metal_get_device(void) + const void* sapp_metal_get_renderpass_descriptor(void) + const void* sapp_metal_get_drawable(void) + If the Metal backend has been selected, these functions return pointers + to various Metal API objects required for rendering, otherwise + they return a null pointer. These void pointers are actually + Objective-C ids converted with a (ARC) __bridge cast so that + the ids can be tunnel through C code. Also note that the returned + pointers to the renderpass-descriptor and drawable may change from one + frame to the next, only the Metal device object is guaranteed to + stay the same. + + const void* sapp_macos_get_window(void) + On macOS, get the NSWindow object pointer, otherwise a null pointer. + Before being used as Objective-C object, the void* must be converted + back with a (ARC) __bridge cast. + + const void* sapp_ios_get_window(void) + On iOS, get the UIWindow object pointer, otherwise a null pointer. + Before being used as Objective-C object, the void* must be converted + back with a (ARC) __bridge cast. + + const void* sapp_win32_get_hwnd(void) + On Windows, get the window's HWND, otherwise a null pointer. The + HWND has been cast to a void pointer in order to be tunneled + through code which doesn't include Windows.h. + + const void* sapp_d3d11_get_device(void) + const void* sapp_d3d11_get_device_context(void) + const void* sapp_d3d11_get_render_target_view(void) + const void* sapp_d3d11_get_depth_stencil_view(void) + Similar to the sapp_metal_* functions, the sapp_d3d11_* functions + return pointers to D3D11 API objects required for rendering, + only if the D3D11 backend has been selected. Otherwise they + return a null pointer. Note that the returned pointers to the + render-target-view and depth-stencil-view may change from one + frame to the next! + + const void* sapp_wgpu_get_device(void) + const void* sapp_wgpu_get_render_view(void) + const void* sapp_wgpu_get_resolve_view(void) + const void* sapp_wgpu_get_depth_stencil_view(void) + These are the WebGPU-specific functions to get the WebGPU + objects and values required for rendering. If sokol_app.h + is not compiled with SOKOL_WGPU, these functions return null. + + const void* sapp_android_get_native_activity(void); + On Android, get the native activity ANativeActivity pointer, otherwise + a null pointer. + + --- Implement the frame-callback function, this function will be called + on the same thread as the init callback, but might be on a different + thread than the sokol_main() function. Note that the size of + the rendering framebuffer might have changed since the frame callback + was called last. Call the functions sapp_width() and sapp_height() + each frame to get the current size. + + --- Optionally implement the event-callback to handle input events. + sokol-app provides the following type of input events: + - a 'virtual key' was pressed down or released + - a single text character was entered (provided as UTF-32 code point) + - a mouse button was pressed down or released (left, right, middle) + - mouse-wheel or 2D scrolling events + - the mouse was moved + - the mouse has entered or left the application window boundaries + - low-level, portable multi-touch events (began, moved, ended, cancelled) + - the application window was resized, iconified or restored + - the application was suspended or restored (on mobile platforms) + - the user or application code has asked to quit the application + - a string was pasted to the system clipboard + - one or more files have been dropped onto the application window + + To explicitly 'consume' an event and prevent that the event is + forwarded for further handling to the operating system, call + sapp_consume_event() from inside the event handler (NOTE that + this behaviour is currently only implemented for some HTML5 + events, support for other platforms and event types will + be added as needed, please open a github ticket and/or provide + a PR if needed). + + NOTE: Do *not* call any 3D API rendering functions in the event + callback function, since the 3D API context may not be active when the + event callback is called (it may work on some platforms and 3D APIs, + but not others, and the exact behaviour may change between + sokol-app versions). + + --- Implement the cleanup-callback function, this is called once + after the user quits the application (see the section + "APPLICATION QUIT" for detailed information on quitting + behaviour, and how to intercept a pending quit - for instance to show a + "Really Quit?" dialog box). Note that the cleanup-callback isn't + guaranteed to be called on the web and mobile platforms. + + MOUSE LOCK (AKA POINTER LOCK, AKA MOUSE CAPTURE) + ================================================ + In normal mouse mode, no mouse movement events are reported when the + mouse leaves the windows client area or hits the screen border (whether + it's one or the other depends on the platform), and the mouse move events + (SAPP_EVENTTYPE_MOUSE_MOVE) contain absolute mouse positions in + framebuffer pixels in the sapp_event items mouse_x and mouse_y, and + relative movement in framebuffer pixels in the sapp_event items mouse_dx + and mouse_dy. + + To get continuous mouse movement (also when the mouse leaves the window + client area or hits the screen border), activate mouse-lock mode + by calling: + + sapp_lock_mouse(true) + + When mouse lock is activated, the mouse pointer is hidden, the + reported absolute mouse position (sapp_event.mouse_x/y) appears + frozen, and the relative mouse movement in sapp_event.mouse_dx/dy + no longer has a direct relation to framebuffer pixels but instead + uses "raw mouse input" (what "raw mouse input" exactly means also + differs by platform). + + To deactivate mouse lock and return to normal mouse mode, call + + sapp_lock_mouse(false) + + And finally, to check if mouse lock is currently active, call + + if (sapp_mouse_locked()) { ... } + + On native platforms, the sapp_lock_mouse() and sapp_mouse_locked() + functions work as expected (mouse lock is activated or deactivated + immediately when sapp_lock_mouse() is called, and sapp_mouse_locked() + also immediately returns the new state after sapp_lock_mouse() + is called. + + On the web platform, sapp_lock_mouse() and sapp_mouse_locked() behave + differently, as dictated by the limitations of the HTML5 Pointer Lock API: + + - sapp_lock_mouse(true) can be called at any time, but it will + only take effect in a 'short-lived input event handler of a specific + type', meaning when one of the following events happens: + - SAPP_EVENTTYPE_MOUSE_DOWN + - SAPP_EVENTTYPE_MOUSE_UP + - SAPP_EVENTTYPE_MOUSE_SCROLL + - SAPP_EVENTYTPE_KEY_UP + - SAPP_EVENTTYPE_KEY_DOWN + - The mouse lock/unlock action on the web platform is asynchronous, + this means that sapp_mouse_locked() won't immediately return + the new status after calling sapp_lock_mouse(), instead the + reported status will only change when the pointer lock has actually + been activated or deactivated in the browser. + - On the web, mouse lock can be deactivated by the user at any time + by pressing the Esc key. When this happens, sokol_app.h behaves + the same as if sapp_lock_mouse(false) is called. + + For things like camera manipulation it's most straightforward to lock + and unlock the mouse right from the sokol_app.h event handler, for + instance the following code enters and leaves mouse lock when the + left mouse button is pressed and released, and then uses the relative + movement information to manipulate a camera (taken from the + cgltf-sapp.c sample in the sokol-samples repository + at https://github.com/floooh/sokol-samples): + + static void input(const sapp_event* ev) { + switch (ev->type) { + case SAPP_EVENTTYPE_MOUSE_DOWN: + if (ev->mouse_button == SAPP_MOUSEBUTTON_LEFT) { + sapp_lock_mouse(true); + } + break; + + case SAPP_EVENTTYPE_MOUSE_UP: + if (ev->mouse_button == SAPP_MOUSEBUTTON_LEFT) { + sapp_lock_mouse(false); + } + break; + + case SAPP_EVENTTYPE_MOUSE_MOVE: + if (sapp_mouse_locked()) { + cam_orbit(&state.camera, ev->mouse_dx * 0.25f, ev->mouse_dy * 0.25f); + } + break; + + default: + break; + } + } + + CLIPBOARD SUPPORT + ================= + Applications can send and receive UTF-8 encoded text data from and to the + system clipboard. By default, clipboard support is disabled and + must be enabled at startup via the following sapp_desc struct + members: + + sapp_desc.enable_clipboard - set to true to enable clipboard support + sapp_desc.clipboard_size - size of the internal clipboard buffer in bytes + + Enabling the clipboard will dynamically allocate a clipboard buffer + for UTF-8 encoded text data of the requested size in bytes, the default + size is 8 KBytes. Strings that don't fit into the clipboard buffer + (including the terminating zero) will be silently clipped, so it's + important that you provide a big enough clipboard size for your + use case. + + To send data to the clipboard, call sapp_set_clipboard_string() with + a pointer to an UTF-8 encoded, null-terminated C-string. + + NOTE that on the HTML5 platform, sapp_set_clipboard_string() must be + called from inside a 'short-lived event handler', and there are a few + other HTML5-specific caveats to workaround. You'll basically have to + tinker until it works in all browsers :/ (maybe the situation will + improve when all browsers agree on and implement the new + HTML5 navigator.clipboard API). + + To get data from the clipboard, check for the SAPP_EVENTTYPE_CLIPBOARD_PASTED + event in your event handler function, and then call sapp_get_clipboard_string() + to obtain the pasted UTF-8 encoded text. + + NOTE that behaviour of sapp_get_clipboard_string() is slightly different + depending on platform: + + - on the HTML5 platform, the internal clipboard buffer will only be updated + right before the SAPP_EVENTTYPE_CLIPBOARD_PASTED event is sent, + and sapp_get_clipboard_string() will simply return the current content + of the clipboard buffer + - on 'native' platforms, the call to sapp_get_clipboard_string() will + update the internal clipboard buffer with the most recent data + from the system clipboard + + Portable code should check for the SAPP_EVENTTYPE_CLIPBOARD_PASTED event, + and then call sapp_get_clipboard_string() right in the event handler. + + The SAPP_EVENTTYPE_CLIPBOARD_PASTED event will be generated by sokol-app + as follows: + + - on macOS: when the Cmd+V key is pressed down + - on HTML5: when the browser sends a 'paste' event to the global 'window' object + - on all other platforms: when the Ctrl+V key is pressed down + + DRAG AND DROP SUPPORT + ===================== + PLEASE NOTE: the drag'n'drop feature works differently on WASM/HTML5 + and on the native desktop platforms (Win32, Linux and macOS) because + of security-related restrictions in the HTML5 drag'n'drop API. The + WASM/HTML5 specifics are described at the end of this documentation + section: + + Like clipboard support, drag'n'drop support must be explicitly enabled + at startup in the sapp_desc struct. + + sapp_desc sokol_main() { + return (sapp_desc) { + .enable_dragndrop = true, // default is false + ... + }; + } + + You can also adjust the maximum number of files that are accepted + in a drop operation, and the maximum path length in bytes if needed: + + sapp_desc sokol_main() { + return (sapp_desc) { + .enable_dragndrop = true, // default is false + .max_dropped_files = 8, // default is 1 + .max_dropped_file_path_length = 8192, // in bytes, default is 2048 + ... + }; + } + + When drag'n'drop is enabled, the event callback will be invoked with an + event of type SAPP_EVENTTYPE_FILES_DROPPED whenever the user drops files on + the application window. + + After the SAPP_EVENTTYPE_FILES_DROPPED is received, you can query the + number of dropped files, and their absolute paths by calling separate + functions: + + void on_event(const sapp_event* ev) { + if (ev->type == SAPP_EVENTTYPE_FILES_DROPPED) { + + // the mouse position where the drop happened + float x = ev->mouse_x; + float y = ev->mouse_y; + + // get the number of files and their paths like this: + const int num_dropped_files = sapp_get_num_dropped_files(); + for (int i = 0; i < num_dropped_files; i++) { + const char* path = sapp_get_dropped_file_path(i); + ... + } + } + } + + The returned file paths are UTF-8 encoded strings. + + You can call sapp_get_num_dropped_files() and sapp_get_dropped_file_path() + anywhere, also outside the event handler callback, but be aware that the + file path strings will be overwritten with the next drop operation. + + In any case, sapp_get_dropped_file_path() will never return a null pointer, + instead an empty string "" will be returned if the drag'n'drop feature + hasn't been enabled, the last drop-operation failed, or the file path index + is out of range. + + Drag'n'drop caveats: + + - if more files are dropped in a single drop-action + than sapp_desc.max_dropped_files, the additional + files will be silently ignored + - if any of the file paths is longer than + sapp_desc.max_dropped_file_path_length (in number of bytes, after UTF-8 + encoding) the entire drop operation will be silently ignored (this + needs some sort of error feedback in the future) + - no mouse positions are reported while the drag is in + process, this may change in the future + + Drag'n'drop on HTML5/WASM: + + The HTML5 drag'n'drop API doesn't return file paths, but instead + black-box 'file objects' which must be used to load the content + of dropped files. This is the reason why sokol_app.h adds two + HTML5-specific functions to the drag'n'drop API: + + uint32_t sapp_html5_get_dropped_file_size(int index) + Returns the size in bytes of a dropped file. + + void sapp_html5_fetch_dropped_file(const sapp_html5_fetch_request* request) + Asynchronously loads the content of a dropped file into a + provided memory buffer (which must be big enough to hold + the file content) + + To start loading the first dropped file after an SAPP_EVENTTYPE_FILES_DROPPED + event is received: + + sapp_html5_fetch_dropped_file(&(sapp_html5_fetch_request){ + .dropped_file_index = 0, + .callback = fetch_cb + .buffer_ptr = buf, + .buffer_size = buf_size, + .user_data = ... + }); + + Make sure that the memory pointed to by 'buf' stays valid until the + callback function is called! + + As result of the asynchronous loading operation (no matter if succeeded or + failed) the 'fetch_cb' function will be called: + + void fetch_cb(const sapp_html5_fetch_response* response) { + // IMPORTANT: check if the loading operation actually succeeded: + if (response->succeeded) { + // the size of the loaded file: + const uint32_t num_bytes = response->fetched_size; + // and the pointer to the data (same as 'buf' in the fetch-call): + const void* ptr = response->buffer_ptr; + } + else { + // on error check the error code: + switch (response->error_code) { + case SAPP_HTML5_FETCH_ERROR_BUFFER_TOO_SMALL: + ... + break; + case SAPP_HTML5_FETCH_ERROR_OTHER: + ... + break; + } + } + } + + Check the droptest-sapp example for a real-world example which works + both on native platforms and the web: + + https://github.com/floooh/sokol-samples/blob/master/sapp/droptest-sapp.c + + HIGH-DPI RENDERING + ================== + You can set the sapp_desc.high_dpi flag during initialization to request + a full-resolution framebuffer on HighDPI displays. The default behaviour + is sapp_desc.high_dpi=false, this means that the application will + render to a lower-resolution framebuffer on HighDPI displays and the + rendered content will be upscaled by the window system composer. + + In a HighDPI scenario, you still request the same window size during + sokol_main(), but the framebuffer sizes returned by sapp_width() + and sapp_height() will be scaled up according to the DPI scaling + ratio. You can also get a DPI scaling factor with the function + sapp_dpi_scale(). + + Here's an example on a Mac with Retina display: + + sapp_desc sokol_main() { + return (sapp_desc) { + .width = 640, + .height = 480, + .high_dpi = true, + ... + }; + } + + The functions sapp_width(), sapp_height() and sapp_dpi_scale() will + return the following values: + + sapp_width -> 1280 + sapp_height -> 960 + sapp_dpi_scale -> 2.0 + + If the high_dpi flag is false, or you're not running on a Retina display, + the values would be: + + sapp_width -> 640 + sapp_height -> 480 + sapp_dpi_scale -> 1.0 + + APPLICATION QUIT + ================ + Without special quit handling, a sokol_app.h application will quit + 'gracefully' when the user clicks the window close-button unless a + platform's application model prevents this (e.g. on web or mobile). + 'Graceful exit' means that the application-provided cleanup callback will + be called before the application quits. + + On native desktop platforms sokol_app.h provides more control over the + application-quit-process. It's possible to initiate a 'programmatic quit' + from the application code, and a quit initiated by the application user can + be intercepted (for instance to show a custom dialog box). + + This 'programmatic quit protocol' is implemented through 3 functions + and 1 event: + + - sapp_quit(): This function simply quits the application without + giving the user a chance to intervene. Usually this might + be called when the user clicks the 'Ok' button in a 'Really Quit?' + dialog box + - sapp_request_quit(): Calling sapp_request_quit() will send the + event SAPP_EVENTTYPE_QUIT_REQUESTED to the applications event handler + callback, giving the user code a chance to intervene and cancel the + pending quit process (for instance to show a 'Really Quit?' dialog + box). If the event handler callback does nothing, the application + will be quit as usual. To prevent this, call the function + sapp_cancel_quit() from inside the event handler. + - sapp_cancel_quit(): Cancels a pending quit request, either initiated + by the user clicking the window close button, or programmatically + by calling sapp_request_quit(). The only place where calling this + function makes sense is from inside the event handler callback when + the SAPP_EVENTTYPE_QUIT_REQUESTED event has been received. + - SAPP_EVENTTYPE_QUIT_REQUESTED: this event is sent when the user + clicks the window's close button or application code calls the + sapp_request_quit() function. The event handler callback code can handle + this event by calling sapp_cancel_quit() to cancel the quit. + If the event is ignored, the application will quit as usual. + + On the web platform, the quit behaviour differs from native platforms, + because of web-specific restrictions: + + A `programmatic quit` initiated by calling sapp_quit() or + sapp_request_quit() will work as described above: the cleanup callback is + called, platform-specific cleanup is performed (on the web + this means that JS event handlers are unregisters), and then + the request-animation-loop will be exited. However that's all. The + web page itself will continue to exist (e.g. it's not possible to + programmatically close the browser tab). + + On the web it's also not possible to run custom code when the user + closes a brower tab, so it's not possible to prevent this with a + fancy custom dialog box. + + Instead the standard "Leave Site?" dialog box can be activated (or + deactivated) with the following function: + + sapp_html5_ask_leave_site(bool ask); + + The initial state of the associated internal flag can be provided + at startup via sapp_desc.html5_ask_leave_site. + + This feature should only be used sparingly in critical situations - for + instance when the user would loose data - since popping up modal dialog + boxes is considered quite rude in the web world. Note that there's no way + to customize the content of this dialog box or run any code as a result + of the user's decision. Also note that the user must have interacted with + the site before the dialog box will appear. These are all security measures + to prevent fishing. + + The Dear ImGui HighDPI sample contains example code of how to + implement a 'Really Quit?' dialog box with Dear ImGui (native desktop + platforms only), and for showing the hardwired "Leave Site?" dialog box + when running on the web platform: + + https://floooh.github.io/sokol-html5/wasm/imgui-highdpi-sapp.html + + FULLSCREEN + ========== + If the sapp_desc.fullscreen flag is true, sokol-app will try to create + a fullscreen window on platforms with a 'proper' window system + (mobile devices will always use fullscreen). The implementation details + depend on the target platform, in general sokol-app will use a + 'soft approach' which doesn't interfere too much with the platform's + window system (for instance borderless fullscreen window instead of + a 'real' fullscreen mode). Such details might change over time + as sokol-app is adapted for different needs. + + The most important effect of fullscreen mode to keep in mind is that + the requested canvas width and height will be ignored for the initial + window size, calling sapp_width() and sapp_height() will instead return + the resolution of the fullscreen canvas (however the provided size + might still be used for the non-fullscreen window, in case the user can + switch back from fullscreen- to windowed-mode). + + To toggle fullscreen mode programmatically, call sapp_toggle_fullscreen(). + + To check if the application window is currently in fullscreen mode, + call sapp_is_fullscreen(). + + ONSCREEN KEYBOARD + ================= + On some platforms which don't provide a physical keyboard, sokol-app + can display the platform's integrated onscreen keyboard for text + input. To request that the onscreen keyboard is shown, call + + sapp_show_keyboard(true); + + Likewise, to hide the keyboard call: + + sapp_show_keyboard(false); + + Note that on the web platform, the keyboard can only be shown from + inside an input handler. On such platforms, sapp_show_keyboard() + will only work as expected when it is called from inside the + sokol-app event callback function. When called from other places, + an internal flag will be set, and the onscreen keyboard will be + called at the next 'legal' opportunity (when the next input event + is handled). + + OPTIONAL: DON'T HIJACK main() (#define SOKOL_NO_ENTRY) + ====================================================== + In its default configuration, sokol_app.h "hijacks" the platform's + standard main() function. This was done because different platforms + have different main functions which are not compatible with + C's main() (for instance WinMain on Windows has completely different + arguments). However, this "main hijacking" posed a problem for + usage scenarios like integrating sokol_app.h with other languages than + C or C++, so an alternative SOKOL_NO_ENTRY mode has been added + in which the user code provides the platform's main function: + + - define SOKOL_NO_ENTRY before including the sokol_app.h implementation + - do *not* provide a sokol_main() function + - instead provide the standard main() function of the platform + - from the main function, call the function ```sapp_run()``` which + takes a pointer to an ```sapp_desc``` structure. + - ```sapp_run()``` takes over control and calls the provided init-, frame-, + shutdown- and event-callbacks just like in the default model, it + will only return when the application quits (or not at all on some + platforms, like emscripten) + + NOTE: SOKOL_NO_ENTRY is currently not supported on Android. + + WINDOWS CONSOLE OUTPUT + ====================== + On Windows, regular windowed applications don't show any stdout/stderr text + output, which can be a bit of a hassle for printf() debugging or generally + logging text to the console. Also, console output by default uses a local + codepage setting and thus international UTF-8 encoded text is printed + as garbage. + + To help with these issues, sokol_app.h can be configured at startup + via the following Windows-specific sapp_desc flags: + + sapp_desc.win32_console_utf8 (default: false) + When set to true, the output console codepage will be switched + to UTF-8 (and restored to the original codepage on exit) + + sapp_desc.win32_console_attach (default: false) + When set to true, stdout and stderr will be attached to the + console of the parent process (if the parent process actually + has a console). This means that if the application was started + in a command line window, stdout and stderr output will be printed + to the terminal, just like a regular command line program. But if + the application is started via double-click, it will behave like + a regular UI application, and stdout/stderr will not be visible. + + sapp_desc.win32_console_create (default: false) + When set to true, a new console window will be created and + stdout/stderr will be redirected to that console window. It + doesn't matter if the application is started from the command + line or via double-click. + + TEMP NOTE DUMP + ============== + - onscreen keyboard support on Android requires Java :(, should we even bother? + - sapp_desc needs a bool whether to initialize depth-stencil surface + - GL context initialization needs more control (at least what GL version to initialize) + - application icon + - the UPDATE_CURSOR event currently behaves differently between Win32 and OSX + (Win32 sends the event each frame when the mouse moves and is inside the window + client area, OSX sends it only once when the mouse enters the client area) + - the Android implementation calls cleanup_cb() and destroys the egl context in onDestroy + at the latest but should do it earlier, in onStop, as an app is "killable" after onStop + on Android Honeycomb and later (it can't be done at the moment as the app may be started + again after onStop and the sokol lifecycle does not yet handle context teardown/bringup) + + + LICENSE + ======= + zlib/libpng license + + Copyright (c) 2018 Andre Weissflog + + This software is provided 'as-is', without any express or implied warranty. + In no event will the authors be held liable for any damages arising from the + use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software in a + product, an acknowledgment in the product documentation would be + appreciated but is not required. + + 2. Altered source versions must be plainly marked as such, and must not + be misrepresented as being the original software. + + 3. This notice may not be removed or altered from any source + distribution. +*/ +#define SOKOL_APP_INCLUDED (1) +#include +#include + +#if defined(SOKOL_API_DECL) && !defined(SOKOL_APP_API_DECL) +#define SOKOL_APP_API_DECL SOKOL_API_DECL +#endif +#ifndef SOKOL_APP_API_DECL +#if defined(_WIN32) && defined(SOKOL_DLL) && defined(SOKOL_APP_IMPL) +#define SOKOL_APP_API_DECL __declspec(dllexport) +#elif defined(_WIN32) && defined(SOKOL_DLL) +#define SOKOL_APP_API_DECL __declspec(dllimport) +#else +#define SOKOL_APP_API_DECL extern +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +enum { + SAPP_MAX_TOUCHPOINTS = 8, + SAPP_MAX_MOUSEBUTTONS = 3, + SAPP_MAX_KEYCODES = 512, +}; + +typedef enum sapp_event_type { + SAPP_EVENTTYPE_INVALID, + SAPP_EVENTTYPE_KEY_DOWN, + SAPP_EVENTTYPE_KEY_UP, + SAPP_EVENTTYPE_CHAR, + SAPP_EVENTTYPE_MOUSE_DOWN, + SAPP_EVENTTYPE_MOUSE_UP, + SAPP_EVENTTYPE_MOUSE_SCROLL, + SAPP_EVENTTYPE_MOUSE_MOVE, + SAPP_EVENTTYPE_MOUSE_ENTER, + SAPP_EVENTTYPE_MOUSE_LEAVE, + SAPP_EVENTTYPE_TOUCHES_BEGAN, + SAPP_EVENTTYPE_TOUCHES_MOVED, + SAPP_EVENTTYPE_TOUCHES_ENDED, + SAPP_EVENTTYPE_TOUCHES_CANCELLED, + SAPP_EVENTTYPE_RESIZED, + SAPP_EVENTTYPE_ICONIFIED, + SAPP_EVENTTYPE_RESTORED, + SAPP_EVENTTYPE_SUSPENDED, + SAPP_EVENTTYPE_RESUMED, + SAPP_EVENTTYPE_UPDATE_CURSOR, + SAPP_EVENTTYPE_QUIT_REQUESTED, + SAPP_EVENTTYPE_CLIPBOARD_PASTED, + SAPP_EVENTTYPE_FILES_DROPPED, + _SAPP_EVENTTYPE_NUM, + _SAPP_EVENTTYPE_FORCE_U32 = 0x7FFFFFFF +} sapp_event_type; + +/* key codes are the same names and values as GLFW */ +typedef enum sapp_keycode { + SAPP_KEYCODE_INVALID = 0, + SAPP_KEYCODE_SPACE = 32, + SAPP_KEYCODE_APOSTROPHE = 39, /* ' */ + SAPP_KEYCODE_COMMA = 44, /* , */ + SAPP_KEYCODE_MINUS = 45, /* - */ + SAPP_KEYCODE_PERIOD = 46, /* . */ + SAPP_KEYCODE_SLASH = 47, /* / */ + SAPP_KEYCODE_0 = 48, + SAPP_KEYCODE_1 = 49, + SAPP_KEYCODE_2 = 50, + SAPP_KEYCODE_3 = 51, + SAPP_KEYCODE_4 = 52, + SAPP_KEYCODE_5 = 53, + SAPP_KEYCODE_6 = 54, + SAPP_KEYCODE_7 = 55, + SAPP_KEYCODE_8 = 56, + SAPP_KEYCODE_9 = 57, + SAPP_KEYCODE_SEMICOLON = 59, /* ; */ + SAPP_KEYCODE_EQUAL = 61, /* = */ + SAPP_KEYCODE_A = 65, + SAPP_KEYCODE_B = 66, + SAPP_KEYCODE_C = 67, + SAPP_KEYCODE_D = 68, + SAPP_KEYCODE_E = 69, + SAPP_KEYCODE_F = 70, + SAPP_KEYCODE_G = 71, + SAPP_KEYCODE_H = 72, + SAPP_KEYCODE_I = 73, + SAPP_KEYCODE_J = 74, + SAPP_KEYCODE_K = 75, + SAPP_KEYCODE_L = 76, + SAPP_KEYCODE_M = 77, + SAPP_KEYCODE_N = 78, + SAPP_KEYCODE_O = 79, + SAPP_KEYCODE_P = 80, + SAPP_KEYCODE_Q = 81, + SAPP_KEYCODE_R = 82, + SAPP_KEYCODE_S = 83, + SAPP_KEYCODE_T = 84, + SAPP_KEYCODE_U = 85, + SAPP_KEYCODE_V = 86, + SAPP_KEYCODE_W = 87, + SAPP_KEYCODE_X = 88, + SAPP_KEYCODE_Y = 89, + SAPP_KEYCODE_Z = 90, + SAPP_KEYCODE_LEFT_BRACKET = 91, /* [ */ + SAPP_KEYCODE_BACKSLASH = 92, /* \ */ + SAPP_KEYCODE_RIGHT_BRACKET = 93, /* ] */ + SAPP_KEYCODE_GRAVE_ACCENT = 96, /* ` */ + SAPP_KEYCODE_WORLD_1 = 161, /* non-US #1 */ + SAPP_KEYCODE_WORLD_2 = 162, /* non-US #2 */ + SAPP_KEYCODE_ESCAPE = 256, + SAPP_KEYCODE_ENTER = 257, + SAPP_KEYCODE_TAB = 258, + SAPP_KEYCODE_BACKSPACE = 259, + SAPP_KEYCODE_INSERT = 260, + SAPP_KEYCODE_DELETE = 261, + SAPP_KEYCODE_RIGHT = 262, + SAPP_KEYCODE_LEFT = 263, + SAPP_KEYCODE_DOWN = 264, + SAPP_KEYCODE_UP = 265, + SAPP_KEYCODE_PAGE_UP = 266, + SAPP_KEYCODE_PAGE_DOWN = 267, + SAPP_KEYCODE_HOME = 268, + SAPP_KEYCODE_END = 269, + SAPP_KEYCODE_CAPS_LOCK = 280, + SAPP_KEYCODE_SCROLL_LOCK = 281, + SAPP_KEYCODE_NUM_LOCK = 282, + SAPP_KEYCODE_PRINT_SCREEN = 283, + SAPP_KEYCODE_PAUSE = 284, + SAPP_KEYCODE_F1 = 290, + SAPP_KEYCODE_F2 = 291, + SAPP_KEYCODE_F3 = 292, + SAPP_KEYCODE_F4 = 293, + SAPP_KEYCODE_F5 = 294, + SAPP_KEYCODE_F6 = 295, + SAPP_KEYCODE_F7 = 296, + SAPP_KEYCODE_F8 = 297, + SAPP_KEYCODE_F9 = 298, + SAPP_KEYCODE_F10 = 299, + SAPP_KEYCODE_F11 = 300, + SAPP_KEYCODE_F12 = 301, + SAPP_KEYCODE_F13 = 302, + SAPP_KEYCODE_F14 = 303, + SAPP_KEYCODE_F15 = 304, + SAPP_KEYCODE_F16 = 305, + SAPP_KEYCODE_F17 = 306, + SAPP_KEYCODE_F18 = 307, + SAPP_KEYCODE_F19 = 308, + SAPP_KEYCODE_F20 = 309, + SAPP_KEYCODE_F21 = 310, + SAPP_KEYCODE_F22 = 311, + SAPP_KEYCODE_F23 = 312, + SAPP_KEYCODE_F24 = 313, + SAPP_KEYCODE_F25 = 314, + SAPP_KEYCODE_KP_0 = 320, + SAPP_KEYCODE_KP_1 = 321, + SAPP_KEYCODE_KP_2 = 322, + SAPP_KEYCODE_KP_3 = 323, + SAPP_KEYCODE_KP_4 = 324, + SAPP_KEYCODE_KP_5 = 325, + SAPP_KEYCODE_KP_6 = 326, + SAPP_KEYCODE_KP_7 = 327, + SAPP_KEYCODE_KP_8 = 328, + SAPP_KEYCODE_KP_9 = 329, + SAPP_KEYCODE_KP_DECIMAL = 330, + SAPP_KEYCODE_KP_DIVIDE = 331, + SAPP_KEYCODE_KP_MULTIPLY = 332, + SAPP_KEYCODE_KP_SUBTRACT = 333, + SAPP_KEYCODE_KP_ADD = 334, + SAPP_KEYCODE_KP_ENTER = 335, + SAPP_KEYCODE_KP_EQUAL = 336, + SAPP_KEYCODE_LEFT_SHIFT = 340, + SAPP_KEYCODE_LEFT_CONTROL = 341, + SAPP_KEYCODE_LEFT_ALT = 342, + SAPP_KEYCODE_LEFT_SUPER = 343, + SAPP_KEYCODE_RIGHT_SHIFT = 344, + SAPP_KEYCODE_RIGHT_CONTROL = 345, + SAPP_KEYCODE_RIGHT_ALT = 346, + SAPP_KEYCODE_RIGHT_SUPER = 347, + SAPP_KEYCODE_MENU = 348, +} sapp_keycode; + +typedef struct sapp_touchpoint { + uintptr_t identifier; + float pos_x; + float pos_y; + bool changed; +} sapp_touchpoint; + +typedef enum sapp_mousebutton { + SAPP_MOUSEBUTTON_LEFT = 0x0, + SAPP_MOUSEBUTTON_RIGHT = 0x1, + SAPP_MOUSEBUTTON_MIDDLE = 0x2, + SAPP_MOUSEBUTTON_INVALID = 0x100, +} sapp_mousebutton; + +enum { + SAPP_MODIFIER_SHIFT = 0x1, + SAPP_MODIFIER_CTRL = 0x2, + SAPP_MODIFIER_ALT = 0x4, + SAPP_MODIFIER_SUPER = 0x8 +}; + +typedef struct sapp_event { + uint64_t frame_count; + sapp_event_type type; + sapp_keycode key_code; + uint32_t char_code; + bool key_repeat; + uint32_t modifiers; + sapp_mousebutton mouse_button; + float mouse_x; + float mouse_y; + float mouse_dx; + float mouse_dy; + float scroll_x; + float scroll_y; + int num_touches; + sapp_touchpoint touches[SAPP_MAX_TOUCHPOINTS]; + int window_width; + int window_height; + int framebuffer_width; + int framebuffer_height; +} sapp_event; + +typedef struct sapp_desc { + void (*init_cb)(void); /* these are the user-provided callbacks without user data */ + void (*frame_cb)(void); + void (*cleanup_cb)(void); + void (*event_cb)(const sapp_event*); + void (*fail_cb)(const char*); + + void* user_data; /* these are the user-provided callbacks with user data */ + void (*init_userdata_cb)(void*); + void (*frame_userdata_cb)(void*); + void (*cleanup_userdata_cb)(void*); + void (*event_userdata_cb)(const sapp_event*, void*); + void (*fail_userdata_cb)(const char*, void*); + + int width; /* the preferred width of the window / canvas */ + int height; /* the preferred height of the window / canvas */ + int sample_count; /* MSAA sample count */ + int swap_interval; /* the preferred swap interval (ignored on some platforms) */ + bool high_dpi; /* whether the rendering canvas is full-resolution on HighDPI displays */ + bool fullscreen; /* whether the window should be created in fullscreen mode */ + bool alpha; /* whether the framebuffer should have an alpha channel (ignored on some platforms) */ + const char* window_title; /* the window title as UTF-8 encoded string */ + bool user_cursor; /* if true, user is expected to manage cursor image in SAPP_EVENTTYPE_UPDATE_CURSOR */ + bool enable_clipboard; /* enable clipboard access, default is false */ + int clipboard_size; /* max size of clipboard content in bytes */ + bool enable_dragndrop; /* enable file dropping (drag'n'drop), default is false */ + int max_dropped_files; /* max number of dropped files to process (default: 1) */ + int max_dropped_file_path_length; /* max length in bytes of a dropped UTF-8 file path (default: 2048) */ + + /* backend-specific options */ + bool gl_force_gles2; /* if true, setup GLES2/WebGL even if GLES3/WebGL2 is available */ + bool win32_console_utf8; /* if true, set the output console codepage to UTF-8 */ + bool win32_console_create; /* if true, attach stdout/stderr to a new console window */ + bool win32_console_attach; /* if true, attach stdout/stderr to parent process */ + const char* html5_canvas_name; /* the name (id) of the HTML5 canvas element, default is "canvas" */ + bool html5_canvas_resize; /* if true, the HTML5 canvas size is set to sapp_desc.width/height, otherwise canvas size is tracked */ + bool html5_preserve_drawing_buffer; /* HTML5 only: whether to preserve default framebuffer content between frames */ + bool html5_premultiplied_alpha; /* HTML5 only: whether the rendered pixels use premultiplied alpha convention */ + bool html5_ask_leave_site; /* initial state of the internal html5_ask_leave_site flag (see sapp_html5_ask_leave_site()) */ + bool ios_keyboard_resizes_canvas; /* if true, showing the iOS keyboard shrinks the canvas */ + + /* V patches */ + bool __v_native_render; /* V patch to allow for native rendering */ +} sapp_desc; + +/* HTML5 specific: request and response structs for + asynchronously loading dropped-file content. +*/ +typedef enum sapp_html5_fetch_error { + SAPP_HTML5_FETCH_ERROR_NO_ERROR, + SAPP_HTML5_FETCH_ERROR_BUFFER_TOO_SMALL, + SAPP_HTML5_FETCH_ERROR_OTHER, +} sapp_html5_fetch_error; + +typedef struct sapp_html5_fetch_response { + bool succeeded; /* true if the loading operation has succeeded */ + sapp_html5_fetch_error error_code; + int file_index; /* index of the dropped file (0..sapp_get_num_dropped_filed()-1) */ + uint32_t fetched_size; /* size in bytes of loaded data */ + void* buffer_ptr; /* pointer to user-provided buffer which contains the loaded data */ + uint32_t buffer_size; /* size of user-provided buffer (buffer_size >= fetched_size) */ + void* user_data; /* user-provided user data pointer */ +} sapp_html5_fetch_response; + +typedef struct sapp_html5_fetch_request { + int dropped_file_index; /* 0..sapp_get_num_dropped_files()-1 */ + void (*callback)(const sapp_html5_fetch_response*); /* response callback function pointer (required) */ + void* buffer_ptr; /* pointer to buffer to load data into */ + uint32_t buffer_size; /* size in bytes of buffer */ + void* user_data; /* optional userdata pointer */ +} sapp_html5_fetch_request; + +/* user-provided functions */ +extern sapp_desc sokol_main(int argc, char* argv[]); + +/* returns true after sokol-app has been initialized */ +SOKOL_APP_API_DECL bool sapp_isvalid(void); +/* returns the current framebuffer width in pixels */ +SOKOL_APP_API_DECL int sapp_width(void); +/* same as sapp_width(), but returns float */ +SOKOL_APP_API_DECL float sapp_widthf(void); +/* returns the current framebuffer height in pixels */ +SOKOL_APP_API_DECL int sapp_height(void); +/* same as sapp_height(), but returns float */ +SOKOL_APP_API_DECL float sapp_heightf(void); +/* get default framebuffer color pixel format */ +SOKOL_APP_API_DECL int sapp_color_format(void); +/* get default framebuffer depth pixel format */ +SOKOL_APP_API_DECL int sapp_depth_format(void); +/* get default framebuffer sample count */ +SOKOL_APP_API_DECL int sapp_sample_count(void); +/* returns true when high_dpi was requested and actually running in a high-dpi scenario */ +SOKOL_APP_API_DECL bool sapp_high_dpi(void); +/* returns the dpi scaling factor (window pixels to framebuffer pixels) */ +SOKOL_APP_API_DECL float sapp_dpi_scale(void); +/* show or hide the mobile device onscreen keyboard */ +SOKOL_APP_API_DECL void sapp_show_keyboard(bool show); +/* return true if the mobile device onscreen keyboard is currently shown */ +SOKOL_APP_API_DECL bool sapp_keyboard_shown(void); +/* query fullscreen mode */ +SOKOL_APP_API_DECL bool sapp_is_fullscreen(void); +/* toggle fullscreen mode */ +SOKOL_APP_API_DECL void sapp_toggle_fullscreen(void); +/* show or hide the mouse cursor */ +SOKOL_APP_API_DECL void sapp_show_mouse(bool show); +/* show or hide the mouse cursor */ +SOKOL_APP_API_DECL bool sapp_mouse_shown(); +/* enable/disable mouse-pointer-lock mode */ +SOKOL_APP_API_DECL void sapp_lock_mouse(bool lock); +/* return true if in mouse-pointer-lock mode (this may toggle a few frames later) */ +SOKOL_APP_API_DECL bool sapp_mouse_locked(void); +/* return the userdata pointer optionally provided in sapp_desc */ +SOKOL_APP_API_DECL void* sapp_userdata(void); +/* return a copy of the sapp_desc structure */ +SOKOL_APP_API_DECL sapp_desc sapp_query_desc(void); +/* initiate a "soft quit" (sends SAPP_EVENTTYPE_QUIT_REQUESTED) */ +SOKOL_APP_API_DECL void sapp_request_quit(void); +/* cancel a pending quit (when SAPP_EVENTTYPE_QUIT_REQUESTED has been received) */ +SOKOL_APP_API_DECL void sapp_cancel_quit(void); +/* initiate a "hard quit" (quit application without sending SAPP_EVENTTYPE_QUIT_REQUSTED) */ +SOKOL_APP_API_DECL void sapp_quit(void); +/* call from inside event callback to consume the current event (don't forward to platform) */ +SOKOL_APP_API_DECL void sapp_consume_event(void); +/* get the current frame counter (for comparison with sapp_event.frame_count) */ +SOKOL_APP_API_DECL uint64_t sapp_frame_count(void); +/* write string into clipboard */ +SOKOL_APP_API_DECL void sapp_set_clipboard_string(const char* str); +/* read string from clipboard (usually during SAPP_EVENTTYPE_CLIPBOARD_PASTED) */ +SOKOL_APP_API_DECL const char* sapp_get_clipboard_string(void); +/* set the window title (only on desktop platforms) */ +SOKOL_APP_API_DECL void sapp_set_window_title(const char* str); +/* gets the total number of dropped files (after an SAPP_EVENTTYPE_FILES_DROPPED event) */ +SOKOL_APP_API_DECL int sapp_get_num_dropped_files(void); +/* gets the dropped file paths */ +SOKOL_APP_API_DECL const char* sapp_get_dropped_file_path(int index); + +/* special run-function for SOKOL_NO_ENTRY (in standard mode this is an empty stub) */ +SOKOL_APP_API_DECL void sapp_run(const sapp_desc* desc); + +/* GL: return true when GLES2 fallback is active (to detect fallback from GLES3) */ +SOKOL_APP_API_DECL bool sapp_gles2(void); + +/* HTML5: enable or disable the hardwired "Leave Site?" dialog box */ +SOKOL_APP_API_DECL void sapp_html5_ask_leave_site(bool ask); +/* HTML5: get byte size of a dropped file */ +SOKOL_APP_API_DECL uint32_t sapp_html5_get_dropped_file_size(int index); +/* HTML5: asynchronously load the content of a dropped file */ +SOKOL_APP_API_DECL void sapp_html5_fetch_dropped_file(const sapp_html5_fetch_request* request); + +/* Metal: get bridged pointer to Metal device object */ +SOKOL_APP_API_DECL const void* sapp_metal_get_device(void); +/* Metal: get bridged pointer to this frame's renderpass descriptor */ +SOKOL_APP_API_DECL const void* sapp_metal_get_renderpass_descriptor(void); +/* Metal: get bridged pointer to current drawable */ +SOKOL_APP_API_DECL const void* sapp_metal_get_drawable(void); +/* macOS: get bridged pointer to macOS NSWindow */ +SOKOL_APP_API_DECL const void* sapp_macos_get_window(void); +/* iOS: get bridged pointer to iOS UIWindow */ +SOKOL_APP_API_DECL const void* sapp_ios_get_window(void); + +/* D3D11: get pointer to ID3D11Device object */ +SOKOL_APP_API_DECL const void* sapp_d3d11_get_device(void); +/* D3D11: get pointer to ID3D11DeviceContext object */ +SOKOL_APP_API_DECL const void* sapp_d3d11_get_device_context(void); +/* D3D11: get pointer to ID3D11RenderTargetView object */ +SOKOL_APP_API_DECL const void* sapp_d3d11_get_render_target_view(void); +/* D3D11: get pointer to ID3D11DepthStencilView */ +SOKOL_APP_API_DECL const void* sapp_d3d11_get_depth_stencil_view(void); +/* Win32: get the HWND window handle */ +SOKOL_APP_API_DECL const void* sapp_win32_get_hwnd(void); + +/* WebGPU: get WGPUDevice handle */ +SOKOL_APP_API_DECL const void* sapp_wgpu_get_device(void); +/* WebGPU: get swapchain's WGPUTextureView handle for rendering */ +SOKOL_APP_API_DECL const void* sapp_wgpu_get_render_view(void); +/* WebGPU: get swapchain's MSAA-resolve WGPUTextureView (may return null) */ +SOKOL_APP_API_DECL const void* sapp_wgpu_get_resolve_view(void); +/* WebGPU: get swapchain's WGPUTextureView for the depth-stencil surface */ +SOKOL_APP_API_DECL const void* sapp_wgpu_get_depth_stencil_view(void); + +/* Android: get native activity handle */ +SOKOL_APP_API_DECL const void* sapp_android_get_native_activity(void); + +#ifdef __cplusplus +} /* extern "C" */ + +/* reference-based equivalents for C++ */ +inline void sapp_run(const sapp_desc& desc) { return sapp_run(&desc); } + +#endif + +// this WinRT specific hack is required when wWinMain is in a static library +#if defined(_MSC_VER) && defined(UNICODE) +#include +#if defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) +#pragma comment(linker, "/include:wWinMain") +#endif +#endif + +#endif // SOKOL_APP_INCLUDED + +/*-- IMPLEMENTATION ----------------------------------------------------------*/ +#ifdef SOKOL_APP_IMPL +#define SOKOL_APP_IMPL_INCLUDED (1) + +#include // memset +#include // size_t + +/* check if the config defines are alright */ +#if defined(__APPLE__) + // see https://clang.llvm.org/docs/LanguageExtensions.html#automatic-reference-counting + #if !defined(__cplusplus) + #if __has_feature(objc_arc) && !__has_feature(objc_arc_fields) + #error "sokol_app.h requires __has_feature(objc_arc_field) if ARC is enabled (use a more recent compiler version)" + #endif + #endif + #define _SAPP_APPLE (1) + #include + #if defined(TARGET_OS_IPHONE) && !TARGET_OS_IPHONE + /* MacOS */ + #define _SAPP_MACOS (1) + #if !defined(SOKOL_METAL) && !defined(SOKOL_GLCORE33) + #error("sokol_app.h: unknown 3D API selected for MacOS, must be SOKOL_METAL or SOKOL_GLCORE33") + #endif + #else + /* iOS or iOS Simulator */ + #define _SAPP_IOS (1) + #if !defined(SOKOL_METAL) && !defined(SOKOL_GLES3) + #error("sokol_app.h: unknown 3D API selected for iOS, must be SOKOL_METAL or SOKOL_GLES3") + #endif + #endif +#elif defined(__EMSCRIPTEN__) + /* emscripten (asm.js or wasm) */ + #define _SAPP_EMSCRIPTEN (1) + #if !defined(SOKOL_GLES3) && !defined(SOKOL_GLES2) && !defined(SOKOL_WGPU) + #error("sokol_app.h: unknown 3D API selected for emscripten, must be SOKOL_GLES3, SOKOL_GLES2 or SOKOL_WGPU") + #endif +#elif defined(_WIN32) + /* Windows (D3D11 or GL) */ + #include + #if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)) + #define _SAPP_UWP (1) + #if !defined(SOKOL_D3D11) + #error("sokol_app.h: unknown 3D API selected for UWP, must be SOKOL_D3D11") + #endif + #if !defined(__cplusplus) + #error("sokol_app.h: UWP bindings require C++/17") + #endif + #else + #define _SAPP_WIN32 (1) + #if !defined(SOKOL_D3D11) && !defined(SOKOL_GLCORE33) + #error("sokol_app.h: unknown 3D API selected for Win32, must be SOKOL_D3D11 or SOKOL_GLCORE33") + #endif + #endif +#elif defined(__ANDROID__) + /* Android */ + #define _SAPP_ANDROID (1) + #if !defined(SOKOL_GLES3) && !defined(SOKOL_GLES2) + #error("sokol_app.h: unknown 3D API selected for Android, must be SOKOL_GLES3 or SOKOL_GLES2") + #endif + #if defined(SOKOL_NO_ENTRY) + #error("sokol_app.h: SOKOL_NO_ENTRY is not supported on Android") + #endif +#elif defined(__linux__) || defined(__unix__) + /* Linux */ + #define _SAPP_LINUX (1) + #if !defined(SOKOL_GLCORE33) + #error("sokol_app.h: unknown 3D API selected for Linux, must be SOKOL_GLCORE33") + #endif +#else +#error "sokol_app.h: Unknown platform" +#endif + +#ifndef SOKOL_API_IMPL + #define SOKOL_API_IMPL +#endif +#ifndef SOKOL_DEBUG + #ifndef NDEBUG + #define SOKOL_DEBUG (1) + #endif +#endif +#ifndef SOKOL_ASSERT + #include + #define SOKOL_ASSERT(c) assert(c) +#endif +#ifndef SOKOL_UNREACHABLE + #define SOKOL_UNREACHABLE SOKOL_ASSERT(false) +#endif +#if !defined(SOKOL_CALLOC) || !defined(SOKOL_FREE) + #include +#endif +#if !defined(SOKOL_CALLOC) + #define SOKOL_CALLOC(n,s) calloc(n,s) +#endif +#if !defined(SOKOL_FREE) + #define SOKOL_FREE(p) free(p) +#endif +#ifndef SOKOL_LOG + #ifdef SOKOL_DEBUG + #if defined(__ANDROID__) + #include + #define SOKOL_LOG(s) { SOKOL_ASSERT(s); __android_log_write(ANDROID_LOG_INFO, "SOKOL_APP", s); } + #else + #include + #define SOKOL_LOG(s) { SOKOL_ASSERT(s); puts(s); } + #endif + #else + #define SOKOL_LOG(s) + #endif +#endif +#ifndef SOKOL_ABORT + #include + #define SOKOL_ABORT() abort() +#endif +#ifndef _SOKOL_PRIVATE + #if defined(__GNUC__) || defined(__clang__) + #define _SOKOL_PRIVATE __attribute__((unused)) static + #else + #define _SOKOL_PRIVATE static + #endif +#endif +#ifndef _SOKOL_UNUSED + #define _SOKOL_UNUSED(x) (void)(x) +#endif + +/*== PLATFORM SPECIFIC INCLUDES AND DEFINES ==================================*/ +#if defined(_SAPP_APPLE) + #if defined(SOKOL_METAL) + #import + #import + #endif + #if defined(_SAPP_MACOS) + #if !defined(SOKOL_METAL) + #ifndef GL_SILENCE_DEPRECATION + #define GL_SILENCE_DEPRECATION + #endif + #include + #endif + #elif defined(_SAPP_IOS) + #import + #if !defined(SOKOL_METAL) + #import + #endif + #endif +#elif defined(_SAPP_EMSCRIPTEN) + #if defined(SOKOL_WGPU) + #include + #endif + #include + #include +#elif defined(_SAPP_WIN32) + #ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union */ + #pragma warning(disable:4204) /* nonstandard extension used: non-constant aggregate initializer */ + #pragma warning(disable:4054) /* 'type cast': from function pointer */ + #pragma warning(disable:4055) /* 'type cast': from data pointer */ + #pragma warning(disable:4505) /* unreferenced local function has been removed */ + #pragma warning(disable:4115) /* /W4: 'ID3D11ModuleInstance': named type definition in parentheses (in d3d11.h) */ + #endif + #ifndef WIN32_LEAN_AND_MEAN + #define WIN32_LEAN_AND_MEAN + #endif + #ifndef NOMINMAX + #define NOMINMAX + #endif + #include + #include + #include + #if !defined(SOKOL_NO_ENTRY) // if SOKOL_NO_ENTRY is defined, it's the applications' responsibility to use the right subsystem + #if defined(SOKOL_WIN32_FORCE_MAIN) + #pragma comment (linker, "/subsystem:console") + #else + #pragma comment (linker, "/subsystem:windows") + #endif + #endif + #include /* freopen_s() */ + #include /* wcslen() */ + + #pragma comment (lib, "kernel32") + #pragma comment (lib, "user32") + #pragma comment (lib, "shell32") /* CommandLineToArgvW, DragQueryFileW, DragFinished */ + #if defined(SOKOL_D3D11) + #pragma comment (lib, "dxgi") + #pragma comment (lib, "d3d11") + #pragma comment (lib, "dxguid") + #endif + #if defined(SOKOL_GLCORE33) + #pragma comment (lib, "gdi32") + #endif + + #if defined(SOKOL_D3D11) + #ifndef D3D11_NO_HELPERS + #define D3D11_NO_HELPERS + #endif + #include + #include + // DXGI_SWAP_EFFECT_FLIP_DISCARD is only defined in newer Windows SDKs, so don't depend on it + #define _SAPP_DXGI_SWAP_EFFECT_FLIP_DISCARD (4) + #endif + #ifndef WM_MOUSEHWHEEL /* see https://github.com/floooh/sokol/issues/138 */ + #define WM_MOUSEHWHEEL (0x020E) + #endif +#elif defined(_SAPP_UWP) + #ifndef NOMINMAX + #define NOMINMAX + #endif + #ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union */ + #pragma warning(disable:4054) /* 'type cast': from function pointer */ + #pragma warning(disable:4055) /* 'type cast': from data pointer */ + #pragma warning(disable:4505) /* unreferenced local function has been removed */ + #pragma warning(disable:4115) /* /W4: 'ID3D11ModuleInstance': named type definition in parentheses (in d3d11.h) */ + #endif + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + + #include + #include + #include + + #pragma comment (lib, "WindowsApp") + #pragma comment (lib, "dxguid") +#elif defined(_SAPP_ANDROID) + #include + #include + #include + #include + #include +#elif defined(_SAPP_LINUX) + #define GL_GLEXT_PROTOTYPES + #include + #include + #include + #include + #include + #include + #include + #include + #include /* CARD32 */ + #include /* dlopen, dlsym, dlclose */ + #include /* LONG_MAX */ + #include /* only used a linker-guard, search for _sapp_linux_run() and see first comment */ +#endif + +/*== MACOS DECLARATIONS ======================================================*/ +#if defined(_SAPP_MACOS) +// __v_ +@interface SokolWindow : NSWindow { +} +@end +@interface MyView2 : NSView +@end + +MyView2* g_view; + +// A custom NSWindow interface to handle events in borderless windows. +@implementation SokolWindow +- (BOOL)canBecomeKeyWindow { return YES; } // needed for NSWindowStyleMaskBorderless +- (BOOL)canBecomeMainWindow { return YES; } +@end +// __v_ + +@interface _sapp_macos_app_delegate : NSObject +@end +@interface _sapp_macos_window : NSWindow +@end +@interface _sapp_macos_window_delegate : NSObject +@end +#if defined(SOKOL_METAL) + @interface _sapp_macos_view : MTKView + @end +#elif defined(SOKOL_GLCORE33) + @interface _sapp_macos_view : NSOpenGLView + - (void)timerFired:(id)sender; + @end +#endif // SOKOL_GLCORE33 + +typedef struct { + uint32_t flags_changed_store; + uint8_t mouse_buttons; +// NSWindow* window; +// SokolWindow* window; // __v_ + _sapp_macos_window* window; // __v_ + NSTrackingArea* tracking_area; + _sapp_macos_app_delegate* app_dlg; + _sapp_macos_window_delegate* win_dlg; + _sapp_macos_view* view; + #if defined(SOKOL_METAL) + id mtl_device; + #endif +} _sapp_macos_t; + +#endif // _SAPP_MACOS + +/*== IOS DECLARATIONS ========================================================*/ +#if defined(_SAPP_IOS) + +@interface _sapp_app_delegate : NSObject +@end +@interface _sapp_textfield_dlg : NSObject +- (void)keyboardWasShown:(NSNotification*)notif; +- (void)keyboardWillBeHidden:(NSNotification*)notif; +- (void)keyboardDidChangeFrame:(NSNotification*)notif; +@end +#if defined(SOKOL_METAL) + @interface _sapp_ios_view : MTKView; + @end +#else + @interface _sapp_ios_view : GLKView + @end +#endif + +typedef struct { + UIWindow* window; + _sapp_ios_view* view; + UITextField* textfield; + _sapp_textfield_dlg* textfield_dlg; + #if defined(SOKOL_METAL) + UIViewController* view_ctrl; + id mtl_device; + #else + GLKViewController* view_ctrl; + EAGLContext* eagl_ctx; + #endif + bool suspended; +} _sapp_ios_t; + +#endif // _SAPP_IOS + +/*== EMSCRIPTEN DECLARATIONS =================================================*/ +#if defined(_SAPP_EMSCRIPTEN) + +#if defined(SOKOL_WGPU) +typedef struct { + int state; + WGPUDevice device; + WGPUSwapChain swapchain; + WGPUTextureFormat render_format; + WGPUTexture msaa_tex; + WGPUTexture depth_stencil_tex; + WGPUTextureView swapchain_view; + WGPUTextureView msaa_view; + WGPUTextureView depth_stencil_view; +} _sapp_wgpu_t; +#endif + +typedef struct { + bool textfield_created; + bool wants_show_keyboard; + bool wants_hide_keyboard; + bool mouse_lock_requested; + #if defined(SOKOL_WGPU) + _sapp_wgpu_t wgpu; + #endif +} _sapp_emsc_t; +#endif // _SAPP_EMSCRIPTEN + +/*== WIN32 DECLARATIONS ======================================================*/ +#if defined(SOKOL_D3D11) && (defined(_SAPP_WIN32) || defined(_SAPP_UWP)) +typedef struct { + ID3D11Device* device; + ID3D11DeviceContext* device_context; + ID3D11Texture2D* rt; + ID3D11RenderTargetView* rtv; + ID3D11Texture2D* msaa_rt; + ID3D11RenderTargetView* msaa_rtv; + ID3D11Texture2D* ds; + ID3D11DepthStencilView* dsv; + DXGI_SWAP_CHAIN_DESC swap_chain_desc; + IDXGISwapChain* swap_chain; +} _sapp_d3d11_t; +#endif + +/*== WIN32 DECLARATIONS ======================================================*/ +#if defined(_SAPP_WIN32) + +#ifndef DPI_ENUMS_DECLARED +typedef enum PROCESS_DPI_AWARENESS +{ + PROCESS_DPI_UNAWARE = 0, + PROCESS_SYSTEM_DPI_AWARE = 1, + PROCESS_PER_MONITOR_DPI_AWARE = 2 +} PROCESS_DPI_AWARENESS; +typedef enum MONITOR_DPI_TYPE { + MDT_EFFECTIVE_DPI = 0, + MDT_ANGULAR_DPI = 1, + MDT_RAW_DPI = 2, + MDT_DEFAULT = MDT_EFFECTIVE_DPI +} MONITOR_DPI_TYPE; +#endif /*DPI_ENUMS_DECLARED*/ + +typedef struct { + bool aware; + float content_scale; + float window_scale; + float mouse_scale; +} _sapp_win32_dpi_t; + +typedef struct { + HWND hwnd; + HDC dc; + UINT orig_codepage; + LONG mouse_locked_x, mouse_locked_y; + bool is_win10_or_greater; + bool in_create_window; + bool iconified; + bool mouse_tracked; + uint8_t mouse_capture_mask; + _sapp_win32_dpi_t dpi; + bool raw_input_mousepos_valid; + LONG raw_input_mousepos_x; + LONG raw_input_mousepos_y; + uint8_t raw_input_data[256]; +} _sapp_win32_t; + +#if defined(SOKOL_GLCORE33) +#define WGL_NUMBER_PIXEL_FORMATS_ARB 0x2000 +#define WGL_SUPPORT_OPENGL_ARB 0x2010 +#define WGL_DRAW_TO_WINDOW_ARB 0x2001 +#define WGL_PIXEL_TYPE_ARB 0x2013 +#define WGL_TYPE_RGBA_ARB 0x202b +#define WGL_ACCELERATION_ARB 0x2003 +#define WGL_NO_ACCELERATION_ARB 0x2025 +#define WGL_RED_BITS_ARB 0x2015 +#define WGL_GREEN_BITS_ARB 0x2017 +#define WGL_BLUE_BITS_ARB 0x2019 +#define WGL_ALPHA_BITS_ARB 0x201b +#define WGL_DEPTH_BITS_ARB 0x2022 +#define WGL_STENCIL_BITS_ARB 0x2023 +#define WGL_DOUBLE_BUFFER_ARB 0x2011 +#define WGL_SAMPLES_ARB 0x2042 +#define WGL_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB 0x00000002 +#define WGL_CONTEXT_PROFILE_MASK_ARB 0x9126 +#define WGL_CONTEXT_CORE_PROFILE_BIT_ARB 0x00000001 +#define WGL_CONTEXT_MAJOR_VERSION_ARB 0x2091 +#define WGL_CONTEXT_MINOR_VERSION_ARB 0x2092 +#define WGL_CONTEXT_FLAGS_ARB 0x2094 +#define ERROR_INVALID_VERSION_ARB 0x2095 +#define ERROR_INVALID_PROFILE_ARB 0x2096 +#define ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB 0x2054 +typedef BOOL (WINAPI * PFNWGLSWAPINTERVALEXTPROC)(int); +typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBIVARBPROC)(HDC,int,int,UINT,const int*,int*); +typedef const char* (WINAPI * PFNWGLGETEXTENSIONSSTRINGEXTPROC)(void); +typedef const char* (WINAPI * PFNWGLGETEXTENSIONSSTRINGARBPROC)(HDC); +typedef HGLRC (WINAPI * PFNWGLCREATECONTEXTATTRIBSARBPROC)(HDC,HGLRC,const int*); +typedef HGLRC (WINAPI * PFN_wglCreateContext)(HDC); +typedef BOOL (WINAPI * PFN_wglDeleteContext)(HGLRC); +typedef PROC (WINAPI * PFN_wglGetProcAddress)(LPCSTR); +typedef HDC (WINAPI * PFN_wglGetCurrentDC)(void); +typedef BOOL (WINAPI * PFN_wglMakeCurrent)(HDC,HGLRC); + +typedef struct { + HINSTANCE opengl32; + HGLRC gl_ctx; + PFN_wglCreateContext CreateContext; + PFN_wglDeleteContext DeleteContext; + PFN_wglGetProcAddress GetProcAddress; + PFN_wglGetCurrentDC GetCurrentDC; + PFN_wglMakeCurrent MakeCurrent; + PFNWGLSWAPINTERVALEXTPROC SwapIntervalEXT; + PFNWGLGETPIXELFORMATATTRIBIVARBPROC GetPixelFormatAttribivARB; + PFNWGLGETEXTENSIONSSTRINGEXTPROC GetExtensionsStringEXT; + PFNWGLGETEXTENSIONSSTRINGARBPROC GetExtensionsStringARB; + PFNWGLCREATECONTEXTATTRIBSARBPROC CreateContextAttribsARB; + bool ext_swap_control; + bool arb_multisample; + bool arb_pixel_format; + bool arb_create_context; + bool arb_create_context_profile; + HWND msg_hwnd; + HDC msg_dc; +} _sapp_wgl_t; +#endif // SOKOL_GLCORE33 + +#endif // _SAPP_WIN32 + +/*== UWP DECLARATIONS ======================================================*/ +#if defined(_SAPP_UWP) + +typedef struct { + float content_scale; + float window_scale; + float mouse_scale; +} _sapp_uwp_dpi_t; + +typedef struct { + bool mouse_tracked; + uint8_t mouse_buttons; + _sapp_uwp_dpi_t dpi; +} _sapp_uwp_t; + +#endif // _SAPP_UWP + +/*== ANDROID DECLARATIONS ====================================================*/ + +#if defined(_SAPP_ANDROID) +typedef enum { + _SOKOL_ANDROID_MSG_CREATE, + _SOKOL_ANDROID_MSG_RESUME, + _SOKOL_ANDROID_MSG_PAUSE, + _SOKOL_ANDROID_MSG_FOCUS, + _SOKOL_ANDROID_MSG_NO_FOCUS, + _SOKOL_ANDROID_MSG_SET_NATIVE_WINDOW, + _SOKOL_ANDROID_MSG_SET_INPUT_QUEUE, + _SOKOL_ANDROID_MSG_DESTROY, +} _sapp_android_msg_t; + +typedef struct { + pthread_t thread; + pthread_mutex_t mutex; + pthread_cond_t cond; + int read_from_main_fd; + int write_from_main_fd; +} _sapp_android_pt_t; + +typedef struct { + ANativeWindow* window; + AInputQueue* input; +} _sapp_android_resources_t; + +typedef struct { + ANativeActivity* activity; + _sapp_android_pt_t pt; + _sapp_android_resources_t pending; + _sapp_android_resources_t current; + ALooper* looper; + bool is_thread_started; + bool is_thread_stopping; + bool is_thread_stopped; + bool has_created; + bool has_resumed; + bool has_focus; + EGLConfig config; + EGLDisplay display; + EGLContext context; + EGLSurface surface; +} _sapp_android_t; + +#endif // _SAPP_ANDROID + +/*== LINUX DECLARATIONS ======================================================*/ +#if defined(_SAPP_LINUX) + +#define _SAPP_X11_XDND_VERSION (5) + +#define GLX_VENDOR 1 +#define GLX_RGBA_BIT 0x00000001 +#define GLX_WINDOW_BIT 0x00000001 +#define GLX_DRAWABLE_TYPE 0x8010 +#define GLX_RENDER_TYPE 0x8011 +#define GLX_DOUBLEBUFFER 5 +#define GLX_RED_SIZE 8 +#define GLX_GREEN_SIZE 9 +#define GLX_BLUE_SIZE 10 +#define GLX_ALPHA_SIZE 11 +#define GLX_DEPTH_SIZE 12 +#define GLX_STENCIL_SIZE 13 +#define GLX_SAMPLES 0x186a1 +#define GLX_CONTEXT_CORE_PROFILE_BIT_ARB 0x00000001 +#define GLX_CONTEXT_PROFILE_MASK_ARB 0x9126 +#define GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB 0x00000002 +#define GLX_CONTEXT_MAJOR_VERSION_ARB 0x2091 +#define GLX_CONTEXT_MINOR_VERSION_ARB 0x2092 +#define GLX_CONTEXT_FLAGS_ARB 0x2094 + +typedef XID GLXWindow; +typedef XID GLXDrawable; +typedef struct __GLXFBConfig* GLXFBConfig; +typedef struct __GLXcontext* GLXContext; +typedef void (*__GLXextproc)(void); + +typedef int (*PFNGLXGETFBCONFIGATTRIBPROC)(Display*,GLXFBConfig,int,int*); +typedef const char* (*PFNGLXGETCLIENTSTRINGPROC)(Display*,int); +typedef Bool (*PFNGLXQUERYEXTENSIONPROC)(Display*,int*,int*); +typedef Bool (*PFNGLXQUERYVERSIONPROC)(Display*,int*,int*); +typedef void (*PFNGLXDESTROYCONTEXTPROC)(Display*,GLXContext); +typedef Bool (*PFNGLXMAKECURRENTPROC)(Display*,GLXDrawable,GLXContext); +typedef void (*PFNGLXSWAPBUFFERSPROC)(Display*,GLXDrawable); +typedef const char* (*PFNGLXQUERYEXTENSIONSSTRINGPROC)(Display*,int); +typedef GLXFBConfig* (*PFNGLXGETFBCONFIGSPROC)(Display*,int,int*); +typedef __GLXextproc (* PFNGLXGETPROCADDRESSPROC)(const char *procName); +typedef void (*PFNGLXSWAPINTERVALEXTPROC)(Display*,GLXDrawable,int); +typedef XVisualInfo* (*PFNGLXGETVISUALFROMFBCONFIGPROC)(Display*,GLXFBConfig); +typedef GLXWindow (*PFNGLXCREATEWINDOWPROC)(Display*,GLXFBConfig,Window,const int*); +typedef void (*PFNGLXDESTROYWINDOWPROC)(Display*,GLXWindow); + +typedef int (*PFNGLXSWAPINTERVALMESAPROC)(int); +typedef GLXContext (*PFNGLXCREATECONTEXTATTRIBSARBPROC)(Display*,GLXFBConfig,GLXContext,Bool,const int*); + +typedef struct { + bool available; + int major_opcode; + int event_base; + int error_base; + int major; + int minor; +} _sapp_xi_t; + +typedef struct { + int version; + Window source; + Atom format; + Atom XdndAware; + Atom XdndEnter; + Atom XdndPosition; + Atom XdndStatus; + Atom XdndActionCopy; + Atom XdndDrop; + Atom XdndFinished; + Atom XdndSelection; + Atom XdndTypeList; + Atom text_uri_list; +} _sapp_xdnd_t; + +typedef struct { + uint8_t mouse_buttons; + Display* display; + int screen; + Window root; + Colormap colormap; + Window window; + Cursor hidden_cursor; + int window_state; + float dpi; + unsigned char error_code; + Atom UTF8_STRING; + Atom WM_PROTOCOLS; + Atom WM_DELETE_WINDOW; + Atom WM_STATE; + Atom NET_WM_NAME; + Atom NET_WM_ICON_NAME; + Atom NET_WM_STATE; + Atom NET_WM_STATE_FULLSCREEN; + _sapp_xi_t xi; + _sapp_xdnd_t xdnd; +} _sapp_x11_t; + +typedef struct { + void* libgl; + int major; + int minor; + int event_base; + int error_base; + GLXContext ctx; + GLXWindow window; + + // GLX 1.3 functions + PFNGLXGETFBCONFIGSPROC GetFBConfigs; + PFNGLXGETFBCONFIGATTRIBPROC GetFBConfigAttrib; + PFNGLXGETCLIENTSTRINGPROC GetClientString; + PFNGLXQUERYEXTENSIONPROC QueryExtension; + PFNGLXQUERYVERSIONPROC QueryVersion; + PFNGLXDESTROYCONTEXTPROC DestroyContext; + PFNGLXMAKECURRENTPROC MakeCurrent; + PFNGLXSWAPBUFFERSPROC SwapBuffers; + PFNGLXQUERYEXTENSIONSSTRINGPROC QueryExtensionsString; + PFNGLXGETVISUALFROMFBCONFIGPROC GetVisualFromFBConfig; + PFNGLXCREATEWINDOWPROC CreateWindow; + PFNGLXDESTROYWINDOWPROC DestroyWindow; + + // GLX 1.4 and extension functions + PFNGLXGETPROCADDRESSPROC GetProcAddress; + PFNGLXGETPROCADDRESSPROC GetProcAddressARB; + PFNGLXSWAPINTERVALEXTPROC SwapIntervalEXT; + PFNGLXSWAPINTERVALMESAPROC SwapIntervalMESA; + PFNGLXCREATECONTEXTATTRIBSARBPROC CreateContextAttribsARB; + + // extension availability + bool EXT_swap_control; + bool MESA_swap_control; + bool ARB_multisample; + bool ARB_create_context; + bool ARB_create_context_profile; +} _sapp_glx_t; + +#endif // _SAPP_LINUX + +/*== COMMON DECLARATIONS =====================================================*/ + +/* helper macros */ +#define _sapp_def(val, def) (((val) == 0) ? (def) : (val)) +#define _sapp_absf(a) (((a)<0.0f)?-(a):(a)) + +#define _SAPP_MAX_TITLE_LENGTH (128) +/* NOTE: the pixel format values *must* be compatible with sg_pixel_format */ +#define _SAPP_PIXELFORMAT_RGBA8 (23) +#define _SAPP_PIXELFORMAT_BGRA8 (27) +#define _SAPP_PIXELFORMAT_DEPTH (41) +#define _SAPP_PIXELFORMAT_DEPTH_STENCIL (42) + +#if defined(_SAPP_MACOS) || defined(_SAPP_IOS) + // this is ARC compatible + #if defined(__cplusplus) + #define _SAPP_CLEAR(type, item) { item = (type) { }; } + #else + #define _SAPP_CLEAR(type, item) { item = (type) { 0 }; } + #endif +#else + #define _SAPP_CLEAR(type, item) { memset(&item, 0, sizeof(item)); } +#endif + +typedef struct { + bool enabled; + int buf_size; + char* buffer; +} _sapp_clipboard_t; + +typedef struct { + bool enabled; + int max_files; + int max_path_length; + int num_files; + int buf_size; + char* buffer; +} _sapp_drop_t; + +typedef struct { + float x, y; + float dx, dy; + bool shown; + bool locked; + bool pos_valid; +} _sapp_mouse_t; + +typedef struct { + sapp_desc desc; + bool valid; + bool fullscreen; + bool gles2_fallback; + bool first_frame; + bool init_called; + bool cleanup_called; + bool quit_requested; + bool quit_ordered; + bool event_consumed; + bool html5_ask_leave_site; + bool onscreen_keyboard_shown; + int window_width; + int window_height; + int framebuffer_width; + int framebuffer_height; + int sample_count; + int swap_interval; + float dpi_scale; + uint64_t frame_count; + sapp_event event; + _sapp_mouse_t mouse; + _sapp_clipboard_t clipboard; + _sapp_drop_t drop; + #if defined(_SAPP_MACOS) + _sapp_macos_t macos; + #elif defined(_SAPP_IOS) + _sapp_ios_t ios; + #elif defined(_SAPP_EMSCRIPTEN) + _sapp_emsc_t emsc; + #elif defined(_SAPP_WIN32) + _sapp_win32_t win32; + #if defined(SOKOL_D3D11) + _sapp_d3d11_t d3d11; + #elif defined(SOKOL_GLCORE33) + _sapp_wgl_t wgl; + #endif + #elif defined(_SAPP_UWP) + _sapp_uwp_t uwp; + #if defined(SOKOL_D3D11) + _sapp_d3d11_t d3d11; + #endif + #elif defined(_SAPP_ANDROID) + _sapp_android_t android; + #elif defined(_SAPP_LINUX) + _sapp_x11_t x11; + _sapp_glx_t glx; + #endif + char html5_canvas_selector[_SAPP_MAX_TITLE_LENGTH]; + char window_title[_SAPP_MAX_TITLE_LENGTH]; /* UTF-8 */ + wchar_t window_title_wide[_SAPP_MAX_TITLE_LENGTH]; /* UTF-32 or UCS-2 */ + sapp_keycode keycodes[SAPP_MAX_KEYCODES]; + + /* V patches */ + bool __v_native_render; /* V patch to allow for native rendering */ +} _sapp_t; +static _sapp_t _sapp; + +/*=== PRIVATE HELPER FUNCTIONS ===============================================*/ +_SOKOL_PRIVATE void _sapp_fail(const char* msg) { + if (_sapp.desc.fail_cb) { + _sapp.desc.fail_cb(msg); + } + else if (_sapp.desc.fail_userdata_cb) { + _sapp.desc.fail_userdata_cb(msg, _sapp.desc.user_data); + } + else { + SOKOL_LOG(msg); + } + SOKOL_ABORT(); +} + +_SOKOL_PRIVATE void _sapp_call_init(void) { + if (_sapp.desc.init_cb) { + _sapp.desc.init_cb(); + } + else if (_sapp.desc.init_userdata_cb) { + _sapp.desc.init_userdata_cb(_sapp.desc.user_data); + } + _sapp.init_called = true; +} + +_SOKOL_PRIVATE void _sapp_call_frame(void) { + if (_sapp.__v_native_render) { + return; + } + if (_sapp.init_called && !_sapp.cleanup_called) { + if (_sapp.desc.frame_cb) { + _sapp.desc.frame_cb(); + } + else if (_sapp.desc.frame_userdata_cb) { + _sapp.desc.frame_userdata_cb(_sapp.desc.user_data); + } + } +} + +// __v_ +_SOKOL_PRIVATE void _sapp_call_frame_native(void) { +//puts("_sapp_call_frame_native()"); +//printf("init called=%d cleanup_called=%d\n", _sapp.init_called,_sapp.cleanup_called); + if (_sapp.init_called && !_sapp.cleanup_called) { + if (_sapp.desc.frame_cb) { + _sapp.desc.frame_cb(); + } + else if (_sapp.desc.frame_userdata_cb) { + _sapp.desc.frame_userdata_cb(_sapp.desc.user_data); + } + } +} + + +_SOKOL_PRIVATE void _sapp_call_cleanup(void) { + if (!_sapp.cleanup_called) { + if (_sapp.desc.cleanup_cb) { + _sapp.desc.cleanup_cb(); + } + else if (_sapp.desc.cleanup_userdata_cb) { + _sapp.desc.cleanup_userdata_cb(_sapp.desc.user_data); + } + _sapp.cleanup_called = true; + } +} + +_SOKOL_PRIVATE bool _sapp_call_event(const sapp_event* e) { + if (!_sapp.cleanup_called) { + if (_sapp.desc.event_cb) { + _sapp.desc.event_cb(e); + } + else if (_sapp.desc.event_userdata_cb) { + _sapp.desc.event_userdata_cb(e, _sapp.desc.user_data); + } + } + if (_sapp.event_consumed) { + _sapp.event_consumed = false; + return true; + } + else { + return false; + } +} + +_SOKOL_PRIVATE char* _sapp_dropped_file_path_ptr(int index) { + SOKOL_ASSERT(_sapp.drop.buffer); + SOKOL_ASSERT((index >= 0) && (index <= _sapp.drop.max_files)); + int offset = index * _sapp.drop.max_path_length; + SOKOL_ASSERT(offset < _sapp.drop.buf_size); + return &_sapp.drop.buffer[offset]; +} + +/* Copy a string into a fixed size buffer with guaranteed zero- + termination. + + Return false if the string didn't fit into the buffer and had to be clamped. + + FIXME: Currently UTF-8 strings might become invalid if the string + is clamped, because the last zero-byte might be written into + the middle of a multi-byte sequence. +*/ +_SOKOL_PRIVATE bool _sapp_strcpy(const char* src, char* dst, int max_len) { + SOKOL_ASSERT(src && dst && (max_len > 0)); + char* const end = &(dst[max_len-1]); + char c = 0; + for (int i = 0; i < max_len; i++) { + c = *src; + if (c != 0) { + src++; + } + *dst++ = c; + } + /* truncated? */ + if (c != 0) { + *end = 0; + return false; + } + else { + return true; + } +} + +_SOKOL_PRIVATE sapp_desc _sapp_desc_defaults(const sapp_desc* in_desc) { + sapp_desc desc = *in_desc; + desc.width = _sapp_def(desc.width, 640); + desc.height = _sapp_def(desc.height, 480); + desc.sample_count = _sapp_def(desc.sample_count, 1); + desc.swap_interval = _sapp_def(desc.swap_interval, 1); + desc.html5_canvas_name = _sapp_def(desc.html5_canvas_name, "canvas"); + desc.clipboard_size = _sapp_def(desc.clipboard_size, 8192); + desc.max_dropped_files = _sapp_def(desc.max_dropped_files, 1); + desc.max_dropped_file_path_length = _sapp_def(desc.max_dropped_file_path_length, 2048); + desc.window_title = _sapp_def(desc.window_title, "sokol_app"); + return desc; +} + +_SOKOL_PRIVATE void _sapp_init_state(const sapp_desc* desc) { + _SAPP_CLEAR(_sapp_t, _sapp); + _sapp.desc = _sapp_desc_defaults(desc); + _sapp.first_frame = true; + _sapp.window_width = _sapp.desc.width; + _sapp.window_height = _sapp.desc.height; + _sapp.framebuffer_width = _sapp.window_width; + _sapp.framebuffer_height = _sapp.window_height; + _sapp.sample_count = _sapp.desc.sample_count; + _sapp.swap_interval = _sapp.desc.swap_interval; + _sapp.html5_canvas_selector[0] = '#'; + _sapp_strcpy(_sapp.desc.html5_canvas_name, &_sapp.html5_canvas_selector[1], sizeof(_sapp.html5_canvas_selector) - 1); + _sapp.desc.html5_canvas_name = &_sapp.html5_canvas_selector[1]; + _sapp.html5_ask_leave_site = _sapp.desc.html5_ask_leave_site; + _sapp.clipboard.enabled = _sapp.desc.enable_clipboard; + if (_sapp.clipboard.enabled) { + _sapp.clipboard.buf_size = _sapp.desc.clipboard_size; + _sapp.clipboard.buffer = (char*) SOKOL_CALLOC(1, (size_t)_sapp.clipboard.buf_size); + } + _sapp.drop.enabled = _sapp.desc.enable_dragndrop; + if (_sapp.drop.enabled) { + _sapp.drop.max_files = _sapp.desc.max_dropped_files; + _sapp.drop.max_path_length = _sapp.desc.max_dropped_file_path_length; + _sapp.drop.buf_size = _sapp.drop.max_files * _sapp.drop.max_path_length; + _sapp.drop.buffer = (char*) SOKOL_CALLOC(1, (size_t)_sapp.drop.buf_size); + } + _sapp_strcpy(_sapp.desc.window_title, _sapp.window_title, sizeof(_sapp.window_title)); + _sapp.desc.window_title = _sapp.window_title; + _sapp.dpi_scale = 1.0f; + _sapp.fullscreen = _sapp.desc.fullscreen; + _sapp.mouse.shown = true; + _sapp.__v_native_render = _sapp.desc.__v_native_render; +} + +_SOKOL_PRIVATE void _sapp_discard_state(void) { + if (_sapp.clipboard.enabled) { + SOKOL_ASSERT(_sapp.clipboard.buffer); + SOKOL_FREE((void*)_sapp.clipboard.buffer); + } + if (_sapp.drop.enabled) { + SOKOL_ASSERT(_sapp.drop.buffer); + SOKOL_FREE((void*)_sapp.drop.buffer); + } + _SAPP_CLEAR(_sapp_t, _sapp); +} + +_SOKOL_PRIVATE void _sapp_init_event(sapp_event_type type) { + memset(&_sapp.event, 0, sizeof(_sapp.event)); + _sapp.event.type = type; + _sapp.event.frame_count = _sapp.frame_count; + _sapp.event.mouse_button = SAPP_MOUSEBUTTON_INVALID; + _sapp.event.window_width = _sapp.window_width; + _sapp.event.window_height = _sapp.window_height; + _sapp.event.framebuffer_width = _sapp.framebuffer_width; + _sapp.event.framebuffer_height = _sapp.framebuffer_height; + _sapp.event.mouse_x = _sapp.mouse.x; + _sapp.event.mouse_y = _sapp.mouse.y; + _sapp.event.mouse_dx = _sapp.mouse.dx; + _sapp.event.mouse_dy = _sapp.mouse.dy; +} + +_SOKOL_PRIVATE bool _sapp_events_enabled(void) { + /* only send events when an event callback is set, and the init function was called */ + return (_sapp.desc.event_cb || _sapp.desc.event_userdata_cb) && _sapp.init_called; +} + +_SOKOL_PRIVATE sapp_keycode _sapp_translate_key(int scan_code) { + if ((scan_code >= 0) && (scan_code < SAPP_MAX_KEYCODES)) { + return _sapp.keycodes[scan_code]; + } + else { + return SAPP_KEYCODE_INVALID; + } +} + +_SOKOL_PRIVATE void _sapp_clear_drop_buffer(void) { + if (_sapp.drop.enabled) { + SOKOL_ASSERT(_sapp.drop.buffer); + memset(_sapp.drop.buffer, 0, (size_t)_sapp.drop.buf_size); + } +} + +_SOKOL_PRIVATE void _sapp_frame(void) { + if (_sapp.first_frame) { + _sapp.first_frame = false; + _sapp_call_init(); + } + _sapp_call_frame(); + _sapp.frame_count++; +} + +/*== MacOS/iOS ===============================================================*/ +#if defined(_SAPP_APPLE) + +#if __has_feature(objc_arc) +#define _SAPP_OBJC_RELEASE(obj) { obj = nil; } +#else +#define _SAPP_OBJC_RELEASE(obj) { [obj release]; obj = nil; } +#endif + +/*== MacOS ===================================================================*/ +#if defined(_SAPP_MACOS) + +_SOKOL_PRIVATE void _sapp_macos_init_keytable(void) { + _sapp.keycodes[0x1D] = SAPP_KEYCODE_0; + _sapp.keycodes[0x12] = SAPP_KEYCODE_1; + _sapp.keycodes[0x13] = SAPP_KEYCODE_2; + _sapp.keycodes[0x14] = SAPP_KEYCODE_3; + _sapp.keycodes[0x15] = SAPP_KEYCODE_4; + _sapp.keycodes[0x17] = SAPP_KEYCODE_5; + _sapp.keycodes[0x16] = SAPP_KEYCODE_6; + _sapp.keycodes[0x1A] = SAPP_KEYCODE_7; + _sapp.keycodes[0x1C] = SAPP_KEYCODE_8; + _sapp.keycodes[0x19] = SAPP_KEYCODE_9; + _sapp.keycodes[0x00] = SAPP_KEYCODE_A; + _sapp.keycodes[0x0B] = SAPP_KEYCODE_B; + _sapp.keycodes[0x08] = SAPP_KEYCODE_C; + _sapp.keycodes[0x02] = SAPP_KEYCODE_D; + _sapp.keycodes[0x0E] = SAPP_KEYCODE_E; + _sapp.keycodes[0x03] = SAPP_KEYCODE_F; + _sapp.keycodes[0x05] = SAPP_KEYCODE_G; + _sapp.keycodes[0x04] = SAPP_KEYCODE_H; + _sapp.keycodes[0x22] = SAPP_KEYCODE_I; + _sapp.keycodes[0x26] = SAPP_KEYCODE_J; + _sapp.keycodes[0x28] = SAPP_KEYCODE_K; + _sapp.keycodes[0x25] = SAPP_KEYCODE_L; + _sapp.keycodes[0x2E] = SAPP_KEYCODE_M; + _sapp.keycodes[0x2D] = SAPP_KEYCODE_N; + _sapp.keycodes[0x1F] = SAPP_KEYCODE_O; + _sapp.keycodes[0x23] = SAPP_KEYCODE_P; + _sapp.keycodes[0x0C] = SAPP_KEYCODE_Q; + _sapp.keycodes[0x0F] = SAPP_KEYCODE_R; + _sapp.keycodes[0x01] = SAPP_KEYCODE_S; + _sapp.keycodes[0x11] = SAPP_KEYCODE_T; + _sapp.keycodes[0x20] = SAPP_KEYCODE_U; + _sapp.keycodes[0x09] = SAPP_KEYCODE_V; + _sapp.keycodes[0x0D] = SAPP_KEYCODE_W; + _sapp.keycodes[0x07] = SAPP_KEYCODE_X; + _sapp.keycodes[0x10] = SAPP_KEYCODE_Y; + _sapp.keycodes[0x06] = SAPP_KEYCODE_Z; + _sapp.keycodes[0x27] = SAPP_KEYCODE_APOSTROPHE; + _sapp.keycodes[0x2A] = SAPP_KEYCODE_BACKSLASH; + _sapp.keycodes[0x2B] = SAPP_KEYCODE_COMMA; + _sapp.keycodes[0x18] = SAPP_KEYCODE_EQUAL; + _sapp.keycodes[0x32] = SAPP_KEYCODE_GRAVE_ACCENT; + _sapp.keycodes[0x21] = SAPP_KEYCODE_LEFT_BRACKET; + _sapp.keycodes[0x1B] = SAPP_KEYCODE_MINUS; + _sapp.keycodes[0x2F] = SAPP_KEYCODE_PERIOD; + _sapp.keycodes[0x1E] = SAPP_KEYCODE_RIGHT_BRACKET; + _sapp.keycodes[0x29] = SAPP_KEYCODE_SEMICOLON; + _sapp.keycodes[0x2C] = SAPP_KEYCODE_SLASH; + _sapp.keycodes[0x0A] = SAPP_KEYCODE_WORLD_1; + _sapp.keycodes[0x33] = SAPP_KEYCODE_BACKSPACE; + _sapp.keycodes[0x39] = SAPP_KEYCODE_CAPS_LOCK; + _sapp.keycodes[0x75] = SAPP_KEYCODE_DELETE; + _sapp.keycodes[0x7D] = SAPP_KEYCODE_DOWN; + _sapp.keycodes[0x77] = SAPP_KEYCODE_END; + _sapp.keycodes[0x24] = SAPP_KEYCODE_ENTER; + _sapp.keycodes[0x35] = SAPP_KEYCODE_ESCAPE; + _sapp.keycodes[0x7A] = SAPP_KEYCODE_F1; + _sapp.keycodes[0x78] = SAPP_KEYCODE_F2; + _sapp.keycodes[0x63] = SAPP_KEYCODE_F3; + _sapp.keycodes[0x76] = SAPP_KEYCODE_F4; + _sapp.keycodes[0x60] = SAPP_KEYCODE_F5; + _sapp.keycodes[0x61] = SAPP_KEYCODE_F6; + _sapp.keycodes[0x62] = SAPP_KEYCODE_F7; + _sapp.keycodes[0x64] = SAPP_KEYCODE_F8; + _sapp.keycodes[0x65] = SAPP_KEYCODE_F9; + _sapp.keycodes[0x6D] = SAPP_KEYCODE_F10; + _sapp.keycodes[0x67] = SAPP_KEYCODE_F11; + _sapp.keycodes[0x6F] = SAPP_KEYCODE_F12; + _sapp.keycodes[0x69] = SAPP_KEYCODE_F13; + _sapp.keycodes[0x6B] = SAPP_KEYCODE_F14; + _sapp.keycodes[0x71] = SAPP_KEYCODE_F15; + _sapp.keycodes[0x6A] = SAPP_KEYCODE_F16; + _sapp.keycodes[0x40] = SAPP_KEYCODE_F17; + _sapp.keycodes[0x4F] = SAPP_KEYCODE_F18; + _sapp.keycodes[0x50] = SAPP_KEYCODE_F19; + _sapp.keycodes[0x5A] = SAPP_KEYCODE_F20; + _sapp.keycodes[0x73] = SAPP_KEYCODE_HOME; + _sapp.keycodes[0x72] = SAPP_KEYCODE_INSERT; + _sapp.keycodes[0x7B] = SAPP_KEYCODE_LEFT; + _sapp.keycodes[0x3A] = SAPP_KEYCODE_LEFT_ALT; + _sapp.keycodes[0x3B] = SAPP_KEYCODE_LEFT_CONTROL; + _sapp.keycodes[0x38] = SAPP_KEYCODE_LEFT_SHIFT; + _sapp.keycodes[0x37] = SAPP_KEYCODE_LEFT_SUPER; + _sapp.keycodes[0x6E] = SAPP_KEYCODE_MENU; + _sapp.keycodes[0x47] = SAPP_KEYCODE_NUM_LOCK; + _sapp.keycodes[0x79] = SAPP_KEYCODE_PAGE_DOWN; + _sapp.keycodes[0x74] = SAPP_KEYCODE_PAGE_UP; + _sapp.keycodes[0x7C] = SAPP_KEYCODE_RIGHT; + _sapp.keycodes[0x3D] = SAPP_KEYCODE_RIGHT_ALT; + _sapp.keycodes[0x3E] = SAPP_KEYCODE_RIGHT_CONTROL; + _sapp.keycodes[0x3C] = SAPP_KEYCODE_RIGHT_SHIFT; + _sapp.keycodes[0x36] = SAPP_KEYCODE_RIGHT_SUPER; + _sapp.keycodes[0x31] = SAPP_KEYCODE_SPACE; + _sapp.keycodes[0x30] = SAPP_KEYCODE_TAB; + _sapp.keycodes[0x7E] = SAPP_KEYCODE_UP; + _sapp.keycodes[0x52] = SAPP_KEYCODE_KP_0; + _sapp.keycodes[0x53] = SAPP_KEYCODE_KP_1; + _sapp.keycodes[0x54] = SAPP_KEYCODE_KP_2; + _sapp.keycodes[0x55] = SAPP_KEYCODE_KP_3; + _sapp.keycodes[0x56] = SAPP_KEYCODE_KP_4; + _sapp.keycodes[0x57] = SAPP_KEYCODE_KP_5; + _sapp.keycodes[0x58] = SAPP_KEYCODE_KP_6; + _sapp.keycodes[0x59] = SAPP_KEYCODE_KP_7; + _sapp.keycodes[0x5B] = SAPP_KEYCODE_KP_8; + _sapp.keycodes[0x5C] = SAPP_KEYCODE_KP_9; + _sapp.keycodes[0x45] = SAPP_KEYCODE_KP_ADD; + _sapp.keycodes[0x41] = SAPP_KEYCODE_KP_DECIMAL; + _sapp.keycodes[0x4B] = SAPP_KEYCODE_KP_DIVIDE; + _sapp.keycodes[0x4C] = SAPP_KEYCODE_KP_ENTER; + _sapp.keycodes[0x51] = SAPP_KEYCODE_KP_EQUAL; + _sapp.keycodes[0x43] = SAPP_KEYCODE_KP_MULTIPLY; + _sapp.keycodes[0x4E] = SAPP_KEYCODE_KP_SUBTRACT; +} + +_SOKOL_PRIVATE void _sapp_macos_discard_state(void) { + // NOTE: it's safe to call [release] on a nil object + _SAPP_OBJC_RELEASE(_sapp.macos.tracking_area); + _SAPP_OBJC_RELEASE(_sapp.macos.app_dlg); + _SAPP_OBJC_RELEASE(_sapp.macos.win_dlg); + _SAPP_OBJC_RELEASE(_sapp.macos.view); + #if defined(SOKOL_METAL) + _SAPP_OBJC_RELEASE(_sapp.macos.mtl_device); + #endif + _SAPP_OBJC_RELEASE(_sapp.macos.window); +} + +_SOKOL_PRIVATE void _sapp_macos_run(const sapp_desc* desc) { + _sapp_init_state(desc); + _sapp_macos_init_keytable(); + [NSApplication sharedApplication]; + NSApp.activationPolicy = NSApplicationActivationPolicyRegular; + _sapp.macos.app_dlg = [[_sapp_macos_app_delegate alloc] init]; + NSApp.delegate = _sapp.macos.app_dlg; + [NSApp activateIgnoringOtherApps:YES]; + [NSApp run]; + // NOTE: [NSApp run] never returns, instead cleanup code + // must be put into applicationWillTerminate +} + +/* MacOS entry function */ +#if !defined(SOKOL_NO_ENTRY) +int main(int argc, char* argv[]) { + sapp_desc desc = sokol_main(argc, argv); + _sapp_macos_run(&desc); + return 0; +} +#endif /* SOKOL_NO_ENTRY */ + +_SOKOL_PRIVATE uint32_t _sapp_macos_mod(NSEventModifierFlags f) { + uint32_t m = 0; + if (f & NSEventModifierFlagShift) { + m |= SAPP_MODIFIER_SHIFT; + } + if (f & NSEventModifierFlagControl) { + m |= SAPP_MODIFIER_CTRL; + } + if (f & NSEventModifierFlagOption) { + m |= SAPP_MODIFIER_ALT; + } + if (f & NSEventModifierFlagCommand) { + m |= SAPP_MODIFIER_SUPER; + } + return m; +} + +_SOKOL_PRIVATE void _sapp_macos_mouse_event(sapp_event_type type, sapp_mousebutton btn, uint32_t mod) { + if (_sapp_events_enabled()) { + _sapp_init_event(type); + _sapp.event.mouse_button = btn; + _sapp.event.modifiers = mod; + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE void _sapp_macos_key_event(sapp_event_type type, sapp_keycode key, bool repeat, uint32_t mod) { + if (_sapp_events_enabled()) { + _sapp_init_event(type); + _sapp.event.key_code = key; + _sapp.event.key_repeat = repeat; + _sapp.event.modifiers = mod; + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE void _sapp_macos_app_event(sapp_event_type type) { + if (_sapp_events_enabled()) { + _sapp_init_event(type); + _sapp_call_event(&_sapp.event); + } +} + +/* NOTE: unlike the iOS version of this function, the macOS version + can dynamically update the DPI scaling factor when a window is moved + between HighDPI / LowDPI screens. +*/ +_SOKOL_PRIVATE void _sapp_macos_update_dimensions(void) { + #if defined(SOKOL_METAL) + const NSRect fb_rect = [_sapp.macos.view bounds]; + _sapp.framebuffer_width = fb_rect.size.width * _sapp.dpi_scale; + _sapp.framebuffer_height = fb_rect.size.height * _sapp.dpi_scale; + #elif defined(SOKOL_GLCORE33) + const NSRect fb_rect = [_sapp.macos.view convertRectToBacking:[_sapp.macos.view frame]]; + _sapp.framebuffer_width = fb_rect.size.width; + _sapp.framebuffer_height = fb_rect.size.height; + #endif + const NSRect bounds = [_sapp.macos.view bounds]; + _sapp.window_width = bounds.size.width; + _sapp.window_height = bounds.size.height; + if (_sapp.framebuffer_width == 0) { + _sapp.framebuffer_width = 1; + } + if (_sapp.framebuffer_height == 0) { + _sapp.framebuffer_height = 1; + } + if (_sapp.window_width == 0) { + _sapp.window_width = 1; + } + if (_sapp.window_height == 0) { + _sapp.window_height = 1; + } + _sapp.dpi_scale = (float)_sapp.framebuffer_width / (float)_sapp.window_width; + + /* NOTE: _sapp_macos_update_dimensions() isn't called each frame, but only + when the window size actually changes, so resizing the MTKView's + in each call is fine even when MTKView doesn't ignore setting an + identical drawableSize. + */ + #if defined(SOKOL_METAL) + CGSize drawable_size = { (CGFloat) _sapp.framebuffer_width, (CGFloat) _sapp.framebuffer_height }; + _sapp.macos.view.drawableSize = drawable_size; + #endif +} + +_SOKOL_PRIVATE void _sapp_macos_toggle_fullscreen(void) { + /* NOTE: the _sapp.fullscreen flag is also notified by the + windowDidEnterFullscreen / windowDidExitFullscreen + event handlers + */ + _sapp.fullscreen = !_sapp.fullscreen; + [_sapp.macos.window toggleFullScreen:nil]; +} + +_SOKOL_PRIVATE void _sapp_macos_set_clipboard_string(const char* str) { + @autoreleasepool { + NSPasteboard* pasteboard = [NSPasteboard generalPasteboard]; + [pasteboard declareTypes:@[NSPasteboardTypeString] owner:nil]; + [pasteboard setString:@(str) forType:NSPasteboardTypeString]; + } +} + +_SOKOL_PRIVATE const char* _sapp_macos_get_clipboard_string(void) { + SOKOL_ASSERT(_sapp.clipboard.buffer); + @autoreleasepool { + _sapp.clipboard.buffer[0] = 0; + NSPasteboard* pasteboard = [NSPasteboard generalPasteboard]; + if (![[pasteboard types] containsObject:NSPasteboardTypeString]) { + return _sapp.clipboard.buffer; + } + NSString* str = [pasteboard stringForType:NSPasteboardTypeString]; + if (!str) { + return _sapp.clipboard.buffer; + } + _sapp_strcpy([str UTF8String], _sapp.clipboard.buffer, _sapp.clipboard.buf_size); + } + return _sapp.clipboard.buffer; +} + +_SOKOL_PRIVATE void _sapp_macos_update_window_title(void) { + [_sapp.macos.window setTitle: [NSString stringWithUTF8String:_sapp.window_title]]; +} + +_SOKOL_PRIVATE void _sapp_macos_update_mouse(NSEvent* event) { + if (!_sapp.mouse.locked) { + const NSPoint mouse_pos = event.locationInWindow; + float new_x = mouse_pos.x * _sapp.dpi_scale; + float new_y = _sapp.framebuffer_height - (mouse_pos.y * _sapp.dpi_scale) - 1; + /* don't update dx/dy in the very first update */ + if (_sapp.mouse.pos_valid) { + _sapp.mouse.dx = new_x - _sapp.mouse.x; + _sapp.mouse.dy = new_y - _sapp.mouse.y; + } + _sapp.mouse.x = new_x; + _sapp.mouse.y = new_y; + _sapp.mouse.pos_valid = true; + } +} + +_SOKOL_PRIVATE void _sapp_macos_show_mouse(bool visible) { + /* NOTE: this function is only called when the mouse visibility actually changes */ + if (visible) { + CGDisplayShowCursor(kCGDirectMainDisplay); + } + else { + CGDisplayHideCursor(kCGDirectMainDisplay); + } +} + +_SOKOL_PRIVATE void _sapp_macos_lock_mouse(bool lock) { + if (lock == _sapp.mouse.locked) { + return; + } + _sapp.mouse.dx = 0.0f; + _sapp.mouse.dy = 0.0f; + _sapp.mouse.locked = lock; + /* + NOTE that this code doesn't warp the mouse cursor to the window + center as everybody else does it. This lead to a spike in the + *second* mouse-moved event after the warp happened. The + mouse centering doesn't seem to be required (mouse-moved events + are reported correctly even when the cursor is at an edge of the screen). + + NOTE also that the hide/show of the mouse cursor should properly + stack with calls to sapp_show_mouse() + */ + if (_sapp.mouse.locked) { + CGAssociateMouseAndMouseCursorPosition(NO); + CGDisplayHideCursor(kCGDirectMainDisplay); + } + else { + CGDisplayShowCursor(kCGDirectMainDisplay); + CGAssociateMouseAndMouseCursorPosition(YES); + } +} + +_SOKOL_PRIVATE void _sapp_macos_frame(void) { + _sapp_frame(); + if (_sapp.quit_requested || _sapp.quit_ordered) { + [_sapp.macos.window performClose:nil]; + } +} + +#include "sokol_app2.h" // __v_ + +@implementation _sapp_macos_app_delegate +- (void)applicationDidFinishLaunching:(NSNotification*)aNotification { + _SOKOL_UNUSED(aNotification); + if (_sapp.fullscreen) { + NSRect screen_rect = NSScreen.mainScreen.frame; + _sapp.window_width = screen_rect.size.width; + _sapp.window_height = screen_rect.size.height; + } + if (_sapp.desc.high_dpi) { + _sapp.framebuffer_width = 2 * _sapp.window_width; + _sapp.framebuffer_height = 2 * _sapp.window_height; + } + else { + _sapp.framebuffer_width = _sapp.window_width; + _sapp.framebuffer_height = _sapp.window_height; + } + _sapp.dpi_scale = (float)_sapp.framebuffer_width / (float) _sapp.window_width; + const NSUInteger style = _sapp.desc.fullscreen ? NSWindowStyleMaskBorderless : // __v_ + NSWindowStyleMaskTitled | + NSWindowStyleMaskClosable | + NSWindowStyleMaskMiniaturizable | + NSWindowStyleMaskResizable; + NSRect window_rect = NSMakeRect(0, 0, _sapp.window_width, _sapp.window_height); + _sapp.macos.window = [[_sapp_macos_window alloc] + initWithContentRect:window_rect + styleMask:style + backing:NSBackingStoreBuffered + defer:NO]; + _sapp.macos.window.releasedWhenClosed = NO; // this is necessary for proper cleanup in applicationWillTerminate + _sapp.macos.window.title = [NSString stringWithUTF8String:_sapp.window_title]; + _sapp.macos.window.acceptsMouseMovedEvents = YES; + _sapp.macos.window.restorable = YES; + + + + // _v__ + _sapp.macos.window.backgroundColor = [NSColor whiteColor]; + + // Quit menu + NSMenu* menu_bar = [[NSMenu alloc] init]; +NSMenuItem* app_menu_item = [[NSMenuItem alloc] init]; +[menu_bar addItem:app_menu_item]; +NSApp.mainMenu = menu_bar; +NSMenu* app_menu = [[NSMenu alloc] init]; +NSString* window_title_as_nsstring = [NSString stringWithUTF8String:_sapp.window_title]; +// `quit_title` memory will be owned by the NSMenuItem, so no need to release it ourselves +NSString* quit_title = [@"Quit " stringByAppendingString:window_title_as_nsstring]; +NSMenuItem* quit_menu_item = [[NSMenuItem alloc] + initWithTitle:quit_title + action:@selector(terminate:) + keyEquivalent:@"q"]; +[app_menu addItem:quit_menu_item]; +app_menu_item.submenu = app_menu; +_SAPP_OBJC_RELEASE( window_title_as_nsstring ); +_SAPP_OBJC_RELEASE( app_menu ); +_SAPP_OBJC_RELEASE( app_menu_item ); +_SAPP_OBJC_RELEASE( menu_bar ); + + + // _v__ + + _sapp.macos.win_dlg = [[_sapp_macos_window_delegate alloc] init]; + _sapp.macos.window.delegate = _sapp.macos.win_dlg; + #if defined(SOKOL_METAL) + _sapp.macos.mtl_device = MTLCreateSystemDefaultDevice(); + _sapp.macos.view = [[_sapp_macos_view alloc] init]; + [_sapp.macos.view updateTrackingAreas]; + _sapp.macos.view.preferredFramesPerSecond = 60 / _sapp.swap_interval; + _sapp.macos.view.device = _sapp.macos.mtl_device; + _sapp.macos.view.colorPixelFormat = MTLPixelFormatBGRA8Unorm; + _sapp.macos.view.depthStencilPixelFormat = MTLPixelFormatDepth32Float_Stencil8; + _sapp.macos.view.sampleCount = (NSUInteger) _sapp.sample_count; + _sapp.macos.view.autoResizeDrawable = false; + _sapp.macos.window.contentView = _sapp.macos.view; + [_sapp.macos.window makeFirstResponder:_sapp.macos.view]; + _sapp.macos.view.layer.magnificationFilter = kCAFilterNearest; + #elif defined(SOKOL_GLCORE33) + NSOpenGLPixelFormatAttribute attrs[32]; + int i = 0; + attrs[i++] = NSOpenGLPFAAccelerated; + attrs[i++] = NSOpenGLPFADoubleBuffer; + attrs[i++] = NSOpenGLPFAOpenGLProfile; attrs[i++] = NSOpenGLProfileVersion3_2Core; + attrs[i++] = NSOpenGLPFAColorSize; attrs[i++] = 24; + attrs[i++] = NSOpenGLPFAAlphaSize; attrs[i++] = 8; + attrs[i++] = NSOpenGLPFADepthSize; attrs[i++] = 24; + attrs[i++] = NSOpenGLPFAStencilSize; attrs[i++] = 8; + if (_sapp.sample_count > 1) { + attrs[i++] = NSOpenGLPFAMultisample; + attrs[i++] = NSOpenGLPFASampleBuffers; attrs[i++] = 1; + attrs[i++] = NSOpenGLPFASamples; attrs[i++] = (NSOpenGLPixelFormatAttribute)_sapp.sample_count; + } + else { + attrs[i++] = NSOpenGLPFASampleBuffers; attrs[i++] = 0; + } + attrs[i++] = 0; + NSOpenGLPixelFormat* glpixelformat_obj = [[NSOpenGLPixelFormat alloc] initWithAttributes:attrs]; + SOKOL_ASSERT(glpixelformat_obj != nil); + + _sapp.macos.view = [[_sapp_macos_view alloc] + initWithFrame:window_rect + pixelFormat:glpixelformat_obj]; + _SAPP_OBJC_RELEASE(glpixelformat_obj); + [_sapp.macos.view updateTrackingAreas]; + if (_sapp.desc.high_dpi) { + [_sapp.macos.view setWantsBestResolutionOpenGLSurface:YES]; + } + else { + [_sapp.macos.view setWantsBestResolutionOpenGLSurface:NO]; + } + + _sapp.macos.window.contentView = _sapp.macos.view; + [_sapp.macos.window makeFirstResponder:_sapp.macos.view]; + + NSTimer* timer_obj = [NSTimer timerWithTimeInterval:0.001 + target:_sapp.macos.view + selector:@selector(timerFired:) + userInfo:nil + repeats:YES]; + [[NSRunLoop currentRunLoop] addTimer:timer_obj forMode:NSDefaultRunLoopMode]; + timer_obj = nil; + #endif + _sapp.valid = true; + // __v_ + if (!_sapp.__v_native_render) { + if (_sapp.fullscreen) { + // on GL, this already toggles a rendered frame, so set the valid flag before + [_sapp.macos.window toggleFullScreen:self]; + } + else { + [_sapp.macos.window center]; + } + } + // __v C + /////////////////////////////////////////////////////// + // Create a child view for native rendering + if (_sapp.__v_native_render) { + + CGRect wRect = _sapp.macos.window.frame; + NSView *contentView =_sapp.macos.window.contentView; + CGRect cRect = contentView.frame; + + CGRect rect = CGRectMake(wRect.origin.x, wRect.origin.y, cRect.size.width, cRect.size.height); + NSWindow *overlayWindow = [[NSWindow alloc]initWithContentRect:rect + styleMask:NSBorderlessWindowMask + backing:NSBackingStoreBuffered + defer:NO]; + //overlayWindow.backgroundColor = [NSColor whiteColor]; + + //overlayWindow.backgroundColor = [[NSColor whiteColor] colorWithAlphaComponent:0]; + [overlayWindow setOpaque:YES]; + [_sapp.macos.window setIgnoresMouseEvents:NO]; + g_view = [[MyView2 alloc] init]; + overlayWindow.contentView = g_view; + + [ contentView addSubview:g_view]; +//[ _sapp.macos.window addChildWindow:overlayWindow ordered:NSWindowAbove]; + [_sapp.macos.window center]; + +} + ////////////////////////////////// + + [_sapp.macos.window makeKeyAndOrderFront:nil]; + _sapp_macos_update_dimensions(); +// [NSEvent setMouseCoalescingEnabled:NO]; +} + +- (BOOL)applicationShouldTerminateAfterLastWindowClosed:(NSApplication*)sender { + _SOKOL_UNUSED(sender); + return YES; +} + +- (void)applicationWillTerminate:(NSNotification*)notification { + _SOKOL_UNUSED(notification); + _sapp_call_cleanup(); + _sapp_macos_discard_state(); + _sapp_discard_state(); +} +@end + +@implementation _sapp_macos_window_delegate +- (BOOL)windowShouldClose:(id)sender { + _SOKOL_UNUSED(sender); + /* only give user-code a chance to intervene when sapp_quit() wasn't already called */ + if (!_sapp.quit_ordered) { + /* if window should be closed and event handling is enabled, give user code + a chance to intervene via sapp_cancel_quit() + */ + _sapp.quit_requested = true; + _sapp_macos_app_event(SAPP_EVENTTYPE_QUIT_REQUESTED); + /* user code hasn't intervened, quit the app */ + if (_sapp.quit_requested) { + _sapp.quit_ordered = true; + } + } + if (_sapp.quit_ordered) { + return YES; + } + else { + return NO; + } +} + +- (void)windowDidResize:(NSNotification*)notification { + _SOKOL_UNUSED(notification); + _sapp_macos_update_dimensions(); + if (!_sapp.first_frame) { + _sapp_macos_app_event(SAPP_EVENTTYPE_RESIZED); + } +} + +- (void)windowDidMiniaturize:(NSNotification*)notification { + _SOKOL_UNUSED(notification); + _sapp_macos_app_event(SAPP_EVENTTYPE_ICONIFIED); +} + +- (void)windowDidDeminiaturize:(NSNotification*)notification { + _SOKOL_UNUSED(notification); + _sapp_macos_app_event(SAPP_EVENTTYPE_RESTORED); +} + +- (void)windowDidEnterFullScreen:(NSNotification*)notification { + _SOKOL_UNUSED(notification); + _sapp.fullscreen = true; +} + +- (void)windowDidExitFullScreen:(NSNotification*)notification { + _SOKOL_UNUSED(notification); + _sapp.fullscreen = false; +} +@end + +@implementation _sapp_macos_window + +// __v_ +- (BOOL)canBecomeKeyWindow { return YES; } // needed for NSWindowStyleMaskBorderless +- (BOOL)canBecomeMainWindow { return YES; } +// __v_ + +- (instancetype)initWithContentRect:(NSRect)contentRect + styleMask:(NSWindowStyleMask)style + backing:(NSBackingStoreType)backingStoreType + defer:(BOOL)flag { + if (self = [super initWithContentRect:contentRect styleMask:style backing:backingStoreType defer:flag]) { + #if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101300 + [self registerForDraggedTypes:[NSArray arrayWithObject:NSPasteboardTypeFileURL]]; + #endif + } + return self; +} + +- (NSDragOperation)draggingEntered:(id)sender { + return NSDragOperationCopy; +} + +- (NSDragOperation)draggingUpdated:(id)sender { + return NSDragOperationCopy; +} + +- (BOOL)performDragOperation:(id)sender { + #if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101300 + NSPasteboard *pboard = [sender draggingPasteboard]; + if ([pboard.types containsObject:NSPasteboardTypeFileURL]) { + _sapp_clear_drop_buffer(); + _sapp.drop.num_files = ((int)pboard.pasteboardItems.count > _sapp.drop.max_files) ? _sapp.drop.max_files : pboard.pasteboardItems.count; + bool drop_failed = false; + for (int i = 0; i < _sapp.drop.num_files; i++) { + NSURL *fileUrl = [NSURL fileURLWithPath:[pboard.pasteboardItems[(NSUInteger)i] stringForType:NSPasteboardTypeFileURL]]; + if (!_sapp_strcpy(fileUrl.standardizedURL.path.UTF8String, _sapp_dropped_file_path_ptr(i), _sapp.drop.max_path_length)) { + SOKOL_LOG("sokol_app.h: dropped file path too long (sapp_desc.max_dropped_file_path_length)\n"); + drop_failed = true; + break; + } + } + if (!drop_failed) { + if (_sapp_events_enabled()) { + _sapp_init_event(SAPP_EVENTTYPE_FILES_DROPPED); + _sapp_call_event(&_sapp.event); + } + } + else { + _sapp_clear_drop_buffer(); + _sapp.drop.num_files = 0; + } + return YES; + } + #endif + return NO; +} +@end + +@implementation _sapp_macos_view +#if defined(SOKOL_GLCORE33) +/* NOTE: this is a hack/fix when the initial window size has been clipped by + macOS because it didn't fit on the screen, in that case the + frame size of the window is reported wrong if low-dpi rendering + was requested (instead the high-dpi dimensions are returned) + until the window is resized for the first time. + + Hooking into reshape and getting the frame dimensions seems to report + the correct dimensions. +*/ +- (void)reshape { + _sapp_macos_update_dimensions(); + [super reshape]; +} +- (void)timerFired:(id)sender { + _SOKOL_UNUSED(sender); + [self setNeedsDisplay:YES]; +} +- (void)prepareOpenGL { + [super prepareOpenGL]; + GLint swapInt = 1; + NSOpenGLContext* ctx = [_sapp.macos.view openGLContext]; + [ctx setValues:&swapInt forParameter:NSOpenGLContextParameterSwapInterval]; + [ctx makeCurrentContext]; +} +#endif + +_SOKOL_PRIVATE void _sapp_macos_poll_input_events() { + /* + + NOTE: late event polling temporarily out-commented to check if this + causes infrequent and almost impossible to reproduce probelms with the + window close events, see: + https://github.com/floooh/sokol/pull/483#issuecomment-805148815 + + + const NSEventMask mask = NSEventMaskLeftMouseDown | + NSEventMaskLeftMouseUp| + NSEventMaskRightMouseDown | + NSEventMaskRightMouseUp | + NSEventMaskMouseMoved | + NSEventMaskLeftMouseDragged | + NSEventMaskRightMouseDragged | + NSEventMaskMouseEntered | + NSEventMaskMouseExited | + NSEventMaskKeyDown | + NSEventMaskKeyUp | + NSEventMaskCursorUpdate | + NSEventMaskScrollWheel | + NSEventMaskTabletPoint | + NSEventMaskTabletProximity | + NSEventMaskOtherMouseDown | + NSEventMaskOtherMouseUp | + NSEventMaskOtherMouseDragged | + NSEventMaskPressure | + NSEventMaskDirectTouch; + @autoreleasepool { + for (;;) { + // NOTE: using NSDefaultRunLoopMode here causes stuttering in the GL backend, + // see: https://github.com/floooh/sokol/issues/486 + NSEvent* event = [NSApp nextEventMatchingMask:mask untilDate:nil inMode:NSEventTrackingRunLoopMode dequeue:YES]; + if (event == nil) { + break; + } + [NSApp sendEvent:event]; + } + } + */ +} + +- (void)drawRect:(NSRect)rect { + _SOKOL_UNUSED(rect); + /* Catch any last-moment input events */ + _sapp_macos_poll_input_events(); + _sapp_macos_frame(); + #if !defined(SOKOL_METAL) + [[_sapp.macos.view openGLContext] flushBuffer]; + #endif +} + +- (BOOL)isOpaque { + return YES; +} +- (BOOL)canBecomeKeyView { + return YES; +} +- (BOOL)acceptsFirstResponder { + return YES; +} +- (BOOL)acceptsFirstMouse:(NSEvent *)event { + return YES; +} + + +- (void)updateTrackingAreas { + if (_sapp.macos.tracking_area != nil) { + [self removeTrackingArea:_sapp.macos.tracking_area]; + _SAPP_OBJC_RELEASE(_sapp.macos.tracking_area); + } + const NSTrackingAreaOptions options = NSTrackingMouseEnteredAndExited | + NSTrackingActiveInKeyWindow | + NSTrackingEnabledDuringMouseDrag | + NSTrackingCursorUpdate | + NSTrackingInVisibleRect | + NSTrackingAssumeInside; + _sapp.macos.tracking_area = [[NSTrackingArea alloc] initWithRect:[self bounds] options:options owner:self userInfo:nil]; + [self addTrackingArea:_sapp.macos.tracking_area]; + [super updateTrackingAreas]; +} +- (void)mouseEntered:(NSEvent*)event { + _sapp_macos_update_mouse(event); + /* don't send mouse enter/leave while dragging (so that it behaves the same as + on Windows while SetCapture is active + */ + if (0 == _sapp.macos.mouse_buttons) { + _sapp_macos_mouse_event(SAPP_EVENTTYPE_MOUSE_ENTER, SAPP_MOUSEBUTTON_INVALID, _sapp_macos_mod(event.modifierFlags)); + } +} +- (void)mouseExited:(NSEvent*)event { + _sapp_macos_update_mouse(event); + if (0 == _sapp.macos.mouse_buttons) { + _sapp_macos_mouse_event(SAPP_EVENTTYPE_MOUSE_LEAVE, SAPP_MOUSEBUTTON_INVALID, _sapp_macos_mod(event.modifierFlags)); + } +} +- (void)mouseDown:(NSEvent*)event { + _sapp_macos_update_mouse(event); + _sapp_macos_mouse_event(SAPP_EVENTTYPE_MOUSE_DOWN, SAPP_MOUSEBUTTON_LEFT, _sapp_macos_mod(event.modifierFlags)); + _sapp.macos.mouse_buttons |= (1< 0.0f) || (_sapp_absf(dy) > 0.0f)) { + _sapp_init_event(SAPP_EVENTTYPE_MOUSE_SCROLL); + _sapp.event.modifiers = _sapp_macos_mod(event.modifierFlags); + _sapp.event.scroll_x = dx; + _sapp.event.scroll_y = dy; + _sapp_call_event(&_sapp.event); + } + } +} +- (void)keyDown:(NSEvent*)event { + if (_sapp_events_enabled()) { + const uint32_t mods = _sapp_macos_mod(event.modifierFlags); + /* NOTE: macOS doesn't send keyUp events while the Cmd key is pressed, + as a workaround, to prevent key presses from sticking we'll send + a keyup event following right after the keydown if SUPER is also pressed + */ + const sapp_keycode key_code = _sapp_translate_key(event.keyCode); + _sapp_macos_key_event(SAPP_EVENTTYPE_KEY_DOWN, key_code, event.isARepeat, mods); + if (0 != (mods & SAPP_MODIFIER_SUPER)) { + _sapp_macos_key_event(SAPP_EVENTTYPE_KEY_UP, key_code, event.isARepeat, mods); + } + const NSString* chars = event.characters; + const NSUInteger len = chars.length; + if (len > 0) { + _sapp_init_event(SAPP_EVENTTYPE_CHAR); + _sapp.event.modifiers = mods; + for (NSUInteger i = 0; i < len; i++) { + const unichar codepoint = [chars characterAtIndex:i]; + if ((codepoint & 0xFF00) == 0xF700) { + continue; + } + _sapp.event.char_code = codepoint; + _sapp.event.key_repeat = event.isARepeat; + _sapp_call_event(&_sapp.event); + } + } + /* if this is a Cmd+V (paste), also send a CLIPBOARD_PASTE event */ + if (_sapp.clipboard.enabled && (mods == SAPP_MODIFIER_SUPER) && (key_code == SAPP_KEYCODE_V)) { + _sapp_init_event(SAPP_EVENTTYPE_CLIPBOARD_PASTED); + _sapp_call_event(&_sapp.event); + } + } +} +- (void)keyUp:(NSEvent*)event { + _sapp_macos_key_event(SAPP_EVENTTYPE_KEY_UP, + _sapp_translate_key(event.keyCode), + event.isARepeat, + _sapp_macos_mod(event.modifierFlags)); +} +- (void)flagsChanged:(NSEvent*)event { + const uint32_t old_f = _sapp.macos.flags_changed_store; + const uint32_t new_f = event.modifierFlags; + _sapp.macos.flags_changed_store = new_f; + sapp_keycode key_code = SAPP_KEYCODE_INVALID; + bool down = false; + if ((new_f ^ old_f) & NSEventModifierFlagShift) { + key_code = SAPP_KEYCODE_LEFT_SHIFT; + down = 0 != (new_f & NSEventModifierFlagShift); + } + if ((new_f ^ old_f) & NSEventModifierFlagControl) { + key_code = SAPP_KEYCODE_LEFT_CONTROL; + down = 0 != (new_f & NSEventModifierFlagControl); + } + if ((new_f ^ old_f) & NSEventModifierFlagOption) { + key_code = SAPP_KEYCODE_LEFT_ALT; + down = 0 != (new_f & NSEventModifierFlagOption); + } + if ((new_f ^ old_f) & NSEventModifierFlagCommand) { + key_code = SAPP_KEYCODE_LEFT_SUPER; + down = 0 != (new_f & NSEventModifierFlagCommand); + } + if (key_code != SAPP_KEYCODE_INVALID) { + _sapp_macos_key_event(down ? SAPP_EVENTTYPE_KEY_DOWN : SAPP_EVENTTYPE_KEY_UP, + key_code, + false, + _sapp_macos_mod(event.modifierFlags)); + } +} +- (void)cursorUpdate:(NSEvent*)event { + _SOKOL_UNUSED(event); + if (_sapp.desc.user_cursor) { + _sapp_macos_app_event(SAPP_EVENTTYPE_UPDATE_CURSOR); + } +} +@end + +#endif /* MacOS */ + +/*== iOS =====================================================================*/ +#if defined(_SAPP_IOS) + +_SOKOL_PRIVATE void _sapp_ios_discard_state(void) { + // NOTE: it's safe to call [release] on a nil object + _SAPP_OBJC_RELEASE(_sapp.ios.textfield_dlg); + _SAPP_OBJC_RELEASE(_sapp.ios.textfield); + #if defined(SOKOL_METAL) + _SAPP_OBJC_RELEASE(_sapp.ios.view_ctrl); + _SAPP_OBJC_RELEASE(_sapp.ios.mtl_device); + #else + _SAPP_OBJC_RELEASE(_sapp.ios.view_ctrl); + _SAPP_OBJC_RELEASE(_sapp.ios.eagl_ctx); + #endif + _SAPP_OBJC_RELEASE(_sapp.ios.view); + _SAPP_OBJC_RELEASE(_sapp.ios.window); +} + +_SOKOL_PRIVATE void _sapp_ios_run(const sapp_desc* desc) { + _sapp_init_state(desc); + static int argc = 1; + static char* argv[] = { (char*)"sokol_app" }; + UIApplicationMain(argc, argv, nil, NSStringFromClass([_sapp_app_delegate class])); +} + +/* iOS entry function */ +#if !defined(SOKOL_NO_ENTRY) +int main(int argc, char* argv[]) { + sapp_desc desc = sokol_main(argc, argv); + _sapp_ios_run(&desc); + return 0; +} +#endif /* SOKOL_NO_ENTRY */ + +_SOKOL_PRIVATE void _sapp_ios_app_event(sapp_event_type type) { + if (_sapp_events_enabled()) { + _sapp_init_event(type); + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE void _sapp_ios_touch_event(sapp_event_type type, NSSet* touches, UIEvent* event) { + if (_sapp_events_enabled()) { + _sapp_init_event(type); + NSEnumerator* enumerator = event.allTouches.objectEnumerator; + UITouch* ios_touch; + while ((ios_touch = [enumerator nextObject])) { + if ((_sapp.event.num_touches + 1) < SAPP_MAX_TOUCHPOINTS) { + CGPoint ios_pos = [ios_touch locationInView:_sapp.ios.view]; + sapp_touchpoint* cur_point = &_sapp.event.touches[_sapp.event.num_touches++]; + cur_point->identifier = (uintptr_t) ios_touch; + cur_point->pos_x = ios_pos.x * _sapp.dpi_scale; + cur_point->pos_y = ios_pos.y * _sapp.dpi_scale; + cur_point->changed = [touches containsObject:ios_touch]; + } + } + if (_sapp.event.num_touches > 0) { + _sapp_call_event(&_sapp.event); + } + } +} + +_SOKOL_PRIVATE void _sapp_ios_update_dimensions(void) { + CGRect screen_rect = UIScreen.mainScreen.bounds; + _sapp.framebuffer_width = (int)(screen_rect.size.width * _sapp.dpi_scale); + _sapp.framebuffer_height = (int)(screen_rect.size.height * _sapp.dpi_scale); + _sapp.window_width = (int)screen_rect.size.width; + _sapp.window_height = (int)screen_rect.size.height; + int cur_fb_width, cur_fb_height; + #if defined(SOKOL_METAL) + const CGSize fb_size = _sapp.ios.view.drawableSize; + cur_fb_width = (int) fb_size.width; + cur_fb_height = (int) fb_size.height; + #else + cur_fb_width = (int) _sapp.ios.view.drawableWidth; + cur_fb_height = (int) _sapp.ios.view.drawableHeight; + #endif + const bool dim_changed = (_sapp.framebuffer_width != cur_fb_width) || + (_sapp.framebuffer_height != cur_fb_height); + if (dim_changed) { + #if defined(SOKOL_METAL) + const CGSize drawable_size = { (CGFloat) _sapp.framebuffer_width, (CGFloat) _sapp.framebuffer_height }; + _sapp.ios.view.drawableSize = drawable_size; + #else + // nothing to do here, GLKView correctly respects the view's contentScaleFactor + #endif + if (!_sapp.first_frame) { + _sapp_ios_app_event(SAPP_EVENTTYPE_RESIZED); + } + } +} + +_SOKOL_PRIVATE void _sapp_ios_frame(void) { + _sapp_ios_update_dimensions(); + _sapp_frame(); +} + +_SOKOL_PRIVATE void _sapp_ios_show_keyboard(bool shown) { + /* if not happened yet, create an invisible text field */ + if (nil == _sapp.ios.textfield) { + _sapp.ios.textfield_dlg = [[_sapp_textfield_dlg alloc] init]; + _sapp.ios.textfield = [[UITextField alloc] initWithFrame:CGRectMake(10, 10, 100, 50)]; + _sapp.ios.textfield.keyboardType = UIKeyboardTypeDefault; + _sapp.ios.textfield.returnKeyType = UIReturnKeyDefault; + _sapp.ios.textfield.autocapitalizationType = UITextAutocapitalizationTypeNone; + _sapp.ios.textfield.autocorrectionType = UITextAutocorrectionTypeNo; + _sapp.ios.textfield.spellCheckingType = UITextSpellCheckingTypeNo; + _sapp.ios.textfield.hidden = YES; + _sapp.ios.textfield.text = @"x"; + _sapp.ios.textfield.delegate = _sapp.ios.textfield_dlg; + [_sapp.ios.view_ctrl.view addSubview:_sapp.ios.textfield]; + + [[NSNotificationCenter defaultCenter] addObserver:_sapp.ios.textfield_dlg + selector:@selector(keyboardWasShown:) + name:UIKeyboardDidShowNotification object:nil]; + [[NSNotificationCenter defaultCenter] addObserver:_sapp.ios.textfield_dlg + selector:@selector(keyboardWillBeHidden:) + name:UIKeyboardWillHideNotification object:nil]; + [[NSNotificationCenter defaultCenter] addObserver:_sapp.ios.textfield_dlg + selector:@selector(keyboardDidChangeFrame:) + name:UIKeyboardDidChangeFrameNotification object:nil]; + } + if (shown) { + /* setting the text field as first responder brings up the onscreen keyboard */ + [_sapp.ios.textfield becomeFirstResponder]; + } + else { + [_sapp.ios.textfield resignFirstResponder]; + } +} + +@implementation _sapp_app_delegate +- (BOOL)application:(UIApplication*)application didFinishLaunchingWithOptions:(NSDictionary*)launchOptions { + CGRect screen_rect = UIScreen.mainScreen.bounds; + _sapp.ios.window = [[UIWindow alloc] initWithFrame:screen_rect]; + _sapp.window_width = screen_rect.size.width; + _sapp.window_height = screen_rect.size.height; + if (_sapp.desc.high_dpi) { + _sapp.dpi_scale = (float) UIScreen.mainScreen.nativeScale; + } + else { + _sapp.dpi_scale = 1.0f; + } + _sapp.framebuffer_width = _sapp.window_width * _sapp.dpi_scale; + _sapp.framebuffer_height = _sapp.window_height * _sapp.dpi_scale; + #if defined(SOKOL_METAL) + _sapp.ios.mtl_device = MTLCreateSystemDefaultDevice(); + _sapp.ios.view = [[_sapp_ios_view alloc] init]; + _sapp.ios.view.preferredFramesPerSecond = 60 / _sapp.swap_interval; + _sapp.ios.view.device = _sapp.ios.mtl_device; + _sapp.ios.view.colorPixelFormat = MTLPixelFormatBGRA8Unorm; + _sapp.ios.view.depthStencilPixelFormat = MTLPixelFormatDepth32Float_Stencil8; + _sapp.ios.view.sampleCount = (NSUInteger)_sapp.sample_count; + /* NOTE: iOS MTKView seems to ignore thew view's contentScaleFactor + and automatically renders at Retina resolution. We'll disable + autoResize and instead do the resizing in _sapp_ios_update_dimensions() + */ + _sapp.ios.view.autoResizeDrawable = false; + _sapp.ios.view.userInteractionEnabled = YES; + _sapp.ios.view.multipleTouchEnabled = YES; + _sapp.ios.view_ctrl = [[UIViewController alloc] init]; + _sapp.ios.view_ctrl.modalPresentationStyle = UIModalPresentationFullScreen; + _sapp.ios.view_ctrl.view = _sapp.ios.view; + _sapp.ios.window.rootViewController = _sapp.ios.view_ctrl; + #else + if (_sapp.desc.gl_force_gles2) { + _sapp.ios.eagl_ctx = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2]; + _sapp.gles2_fallback = true; + } + else { + _sapp.ios.eagl_ctx = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES3]; + if (_sapp.ios.eagl_ctx == nil) { + _sapp.ios.eagl_ctx = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2]; + _sapp.gles2_fallback = true; + } + } + _sapp.ios.view = [[_sapp_ios_view alloc] initWithFrame:screen_rect]; + _sapp.ios.view.drawableColorFormat = GLKViewDrawableColorFormatRGBA8888; + _sapp.ios.view.drawableDepthFormat = GLKViewDrawableDepthFormat24; + _sapp.ios.view.drawableStencilFormat = GLKViewDrawableStencilFormatNone; + GLKViewDrawableMultisample msaa = _sapp.sample_count > 1 ? GLKViewDrawableMultisample4X : GLKViewDrawableMultisampleNone; + _sapp.ios.view.drawableMultisample = msaa; + _sapp.ios.view.context = _sapp.ios.eagl_ctx; + _sapp.ios.view.enableSetNeedsDisplay = NO; + _sapp.ios.view.userInteractionEnabled = YES; + _sapp.ios.view.multipleTouchEnabled = YES; + // on GLKView, contentScaleFactor appears to work just fine! + if (_sapp.desc.high_dpi) { + _sapp.ios.view.contentScaleFactor = 2.0; + } + else { + _sapp.ios.view.contentScaleFactor = 1.0; + } + _sapp.ios.view_ctrl = [[GLKViewController alloc] init]; + _sapp.ios.view_ctrl.view = _sapp.ios.view; + _sapp.ios.view_ctrl.preferredFramesPerSecond = 60 / _sapp.swap_interval; + _sapp.ios.window.rootViewController = _sapp.ios.view_ctrl; + #endif + [_sapp.ios.window makeKeyAndVisible]; + + _sapp.valid = true; + return YES; +} + +- (void)applicationWillResignActive:(UIApplication *)application { + if (!_sapp.ios.suspended) { + _sapp.ios.suspended = true; + _sapp_ios_app_event(SAPP_EVENTTYPE_SUSPENDED); + } +} + +- (void)applicationDidBecomeActive:(UIApplication *)application { + if (_sapp.ios.suspended) { + _sapp.ios.suspended = false; + _sapp_ios_app_event(SAPP_EVENTTYPE_RESUMED); + } +} + +/* NOTE: this method will rarely ever be called, iOS application + which are terminated by the user are usually killed via signal 9 + by the operating system. +*/ +- (void)applicationWillTerminate:(UIApplication *)application { + _SOKOL_UNUSED(application); + _sapp_call_cleanup(); + _sapp_ios_discard_state(); + _sapp_discard_state(); +} +@end + +@implementation _sapp_textfield_dlg +- (void)keyboardWasShown:(NSNotification*)notif { + _sapp.onscreen_keyboard_shown = true; + /* query the keyboard's size, and modify the content view's size */ + if (_sapp.desc.ios_keyboard_resizes_canvas) { + NSDictionary* info = notif.userInfo; + CGFloat kbd_h = [[info objectForKey:UIKeyboardFrameEndUserInfoKey] CGRectValue].size.height; + CGRect view_frame = UIScreen.mainScreen.bounds; + view_frame.size.height -= kbd_h; + _sapp.ios.view.frame = view_frame; + } +} +- (void)keyboardWillBeHidden:(NSNotification*)notif { + _sapp.onscreen_keyboard_shown = false; + if (_sapp.desc.ios_keyboard_resizes_canvas) { + _sapp.ios.view.frame = UIScreen.mainScreen.bounds; + } +} +- (void)keyboardDidChangeFrame:(NSNotification*)notif { + /* this is for the case when the screen rotation changes while the keyboard is open */ + if (_sapp.onscreen_keyboard_shown && _sapp.desc.ios_keyboard_resizes_canvas) { + NSDictionary* info = notif.userInfo; + CGFloat kbd_h = [[info objectForKey:UIKeyboardFrameEndUserInfoKey] CGRectValue].size.height; + CGRect view_frame = UIScreen.mainScreen.bounds; + view_frame.size.height -= kbd_h; + _sapp.ios.view.frame = view_frame; + } +} +- (BOOL)textField:(UITextField*)textField shouldChangeCharactersInRange:(NSRange)range replacementString:(NSString*)string { + if (_sapp_events_enabled()) { + const NSUInteger len = string.length; + if (len > 0) { + for (NSUInteger i = 0; i < len; i++) { + unichar c = [string characterAtIndex:i]; + if (c >= 32) { + /* ignore surrogates for now */ + if ((c < 0xD800) || (c > 0xDFFF)) { + _sapp_init_event(SAPP_EVENTTYPE_CHAR); + _sapp.event.char_code = c; + _sapp_call_event(&_sapp.event); + } + } + if (c <= 32) { + sapp_keycode k = SAPP_KEYCODE_INVALID; + switch (c) { + case 10: k = SAPP_KEYCODE_ENTER; break; + case 32: k = SAPP_KEYCODE_SPACE; break; + default: break; + } + if (k != SAPP_KEYCODE_INVALID) { + _sapp_init_event(SAPP_EVENTTYPE_KEY_DOWN); + _sapp.event.key_code = k; + _sapp_call_event(&_sapp.event); + _sapp_init_event(SAPP_EVENTTYPE_KEY_UP); + _sapp.event.key_code = k; + _sapp_call_event(&_sapp.event); + } + } + } + } + else { + /* this was a backspace */ + _sapp_init_event(SAPP_EVENTTYPE_KEY_DOWN); + _sapp.event.key_code = SAPP_KEYCODE_BACKSPACE; + _sapp_call_event(&_sapp.event); + _sapp_init_event(SAPP_EVENTTYPE_KEY_UP); + _sapp.event.key_code = SAPP_KEYCODE_BACKSPACE; + _sapp_call_event(&_sapp.event); + } + } + return NO; +} +@end + +@implementation _sapp_ios_view +- (void)drawRect:(CGRect)rect { + _SOKOL_UNUSED(rect); + _sapp_ios_frame(); +} +- (BOOL)isOpaque { + return YES; +} +- (void)touchesBegan:(NSSet *)touches withEvent:(UIEvent*)event { + _sapp_ios_touch_event(SAPP_EVENTTYPE_TOUCHES_BEGAN, touches, event); +} +- (void)touchesMoved:(NSSet *)touches withEvent:(UIEvent*)event { + _sapp_ios_touch_event(SAPP_EVENTTYPE_TOUCHES_MOVED, touches, event); +} +- (void)touchesEnded:(NSSet *)touches withEvent:(UIEvent*)event { + _sapp_ios_touch_event(SAPP_EVENTTYPE_TOUCHES_ENDED, touches, event); +} +- (void)touchesCancelled:(NSSet *)touches withEvent:(UIEvent*)event { + _sapp_ios_touch_event(SAPP_EVENTTYPE_TOUCHES_CANCELLED, touches, event); +} +@end +#endif /* TARGET_OS_IPHONE */ + +#endif /* _SAPP_APPLE */ + +/*== EMSCRIPTEN ==============================================================*/ +#if defined(_SAPP_EMSCRIPTEN) + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void (*_sapp_html5_fetch_callback) (const sapp_html5_fetch_response*); + +/* this function is called from a JS event handler when the user hides + the onscreen keyboard pressing the 'dismiss keyboard key' +*/ +EMSCRIPTEN_KEEPALIVE void _sapp_emsc_notify_keyboard_hidden(void) { + _sapp.onscreen_keyboard_shown = false; +} + +EMSCRIPTEN_KEEPALIVE void _sapp_emsc_onpaste(const char* str) { + if (_sapp.clipboard.enabled) { + _sapp_strcpy(str, _sapp.clipboard.buffer, _sapp.clipboard.buf_size); + if (_sapp_events_enabled()) { + _sapp_init_event(SAPP_EVENTTYPE_CLIPBOARD_PASTED); + _sapp_call_event(&_sapp.event); + } + } +} + +/* https://developer.mozilla.org/en-US/docs/Web/API/WindowEventHandlers/onbeforeunload */ +EMSCRIPTEN_KEEPALIVE int _sapp_html5_get_ask_leave_site(void) { + return _sapp.html5_ask_leave_site ? 1 : 0; +} + +EMSCRIPTEN_KEEPALIVE void _sapp_emsc_begin_drop(int num) { + if (!_sapp.drop.enabled) { + return; + } + if (num < 0) { + num = 0; + } + if (num > _sapp.drop.max_files) { + num = _sapp.drop.max_files; + } + _sapp.drop.num_files = num; + _sapp_clear_drop_buffer(); +} + +EMSCRIPTEN_KEEPALIVE void _sapp_emsc_drop(int i, const char* name) { + /* NOTE: name is only the filename part, not a path */ + if (!_sapp.drop.enabled) { + return; + } + if (0 == name) { + return; + } + SOKOL_ASSERT(_sapp.drop.num_files <= _sapp.drop.max_files); + if ((i < 0) || (i >= _sapp.drop.num_files)) { + return; + } + if (!_sapp_strcpy(name, _sapp_dropped_file_path_ptr(i), _sapp.drop.max_path_length)) { + SOKOL_LOG("sokol_app.h: dropped file path too long!\n"); + _sapp.drop.num_files = 0; + } +} + +EMSCRIPTEN_KEEPALIVE void _sapp_emsc_end_drop(int x, int y) { + if (!_sapp.drop.enabled) { + return; + } + if (0 == _sapp.drop.num_files) { + /* there was an error copying the filenames */ + _sapp_clear_drop_buffer(); + return; + + } + if (_sapp_events_enabled()) { + _sapp.mouse.x = (float)x * _sapp.dpi_scale; + _sapp.mouse.y = (float)y * _sapp.dpi_scale; + _sapp.mouse.dx = 0.0f; + _sapp.mouse.dy = 0.0f; + _sapp_init_event(SAPP_EVENTTYPE_FILES_DROPPED); + _sapp_call_event(&_sapp.event); + } +} + +EMSCRIPTEN_KEEPALIVE void _sapp_emsc_invoke_fetch_cb(int index, int success, int error_code, _sapp_html5_fetch_callback callback, uint32_t fetched_size, void* buf_ptr, uint32_t buf_size, void* user_data) { + sapp_html5_fetch_response response; + memset(&response, 0, sizeof(response)); + response.succeeded = (0 != success); + response.error_code = (sapp_html5_fetch_error) error_code; + response.file_index = index; + response.fetched_size = fetched_size; + response.buffer_ptr = buf_ptr; + response.buffer_size = buf_size; + response.user_data = user_data; + callback(&response); +} + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +/* Javascript helper functions for mobile virtual keyboard input */ +EM_JS(void, sapp_js_create_textfield, (void), { + var _sapp_inp = document.createElement("input"); + _sapp_inp.type = "text"; + _sapp_inp.id = "_sokol_app_input_element"; + _sapp_inp.autocapitalize = "none"; + _sapp_inp.addEventListener("focusout", function(_sapp_event) { + __sapp_emsc_notify_keyboard_hidden() + + }); + document.body.append(_sapp_inp); +}); + +EM_JS(void, sapp_js_focus_textfield, (void), { + document.getElementById("_sokol_app_input_element").focus(); +}); + +EM_JS(void, sapp_js_unfocus_textfield, (void), { + document.getElementById("_sokol_app_input_element").blur(); +}); + +EM_JS(void, sapp_js_add_beforeunload_listener, (void), { + Module.sokol_beforeunload = function(event) { + if (__sapp_html5_get_ask_leave_site() != 0) { + event.preventDefault(); + event.returnValue = ' '; + } + }; + window.addEventListener('beforeunload', Module.sokol_beforeunload); +}); + +EM_JS(void, sapp_js_remove_beforeunload_listener, (void), { + window.removeEventListener('beforeunload', Module.sokol_beforeunload); +}); + +EM_JS(void, sapp_js_add_clipboard_listener, (void), { + Module.sokol_paste = function(event) { + var pasted_str = event.clipboardData.getData('text'); + ccall('_sapp_emsc_onpaste', 'void', ['string'], [pasted_str]); + }; + window.addEventListener('paste', Module.sokol_paste); +}); + +EM_JS(void, sapp_js_remove_clipboard_listener, (void), { + window.removeEventListener('paste', Module.sokol_paste); +}); + +EM_JS(void, sapp_js_write_clipboard, (const char* c_str), { + var str = UTF8ToString(c_str); + var ta = document.createElement('textarea'); + ta.setAttribute('autocomplete', 'off'); + ta.setAttribute('autocorrect', 'off'); + ta.setAttribute('autocapitalize', 'off'); + ta.setAttribute('spellcheck', 'false'); + ta.style.left = -100 + 'px'; + ta.style.top = -100 + 'px'; + ta.style.height = 1; + ta.style.width = 1; + ta.value = str; + document.body.appendChild(ta); + ta.select(); + document.execCommand('copy'); + document.body.removeChild(ta); +}); + +_SOKOL_PRIVATE void _sapp_emsc_set_clipboard_string(const char* str) { + sapp_js_write_clipboard(str); +} + +EM_JS(void, sapp_js_add_dragndrop_listeners, (const char* canvas_name_cstr), { + Module.sokol_drop_files = []; + var canvas_name = UTF8ToString(canvas_name_cstr); + var canvas = document.getElementById(canvas_name); + Module.sokol_dragenter = function(event) { + event.stopPropagation(); + event.preventDefault(); + }; + Module.sokol_dragleave = function(event) { + event.stopPropagation(); + event.preventDefault(); + }; + Module.sokol_dragover = function(event) { + event.stopPropagation(); + event.preventDefault(); + }; + Module.sokol_drop = function(event) { + event.stopPropagation(); + event.preventDefault(); + var files = event.dataTransfer.files; + Module.sokol_dropped_files = files; + __sapp_emsc_begin_drop(files.length); + var i; + for (i = 0; i < files.length; i++) { + ccall('_sapp_emsc_drop', 'void', ['number', 'string'], [i, files[i].name]); + } + // FIXME? see computation of targetX/targetY in emscripten via getClientBoundingRect + __sapp_emsc_end_drop(event.clientX, event.clientY); + }; + canvas.addEventListener('dragenter', Module.sokol_dragenter, false); + canvas.addEventListener('dragleave', Module.sokol_dragleave, false); + canvas.addEventListener('dragover', Module.sokol_dragover, false); + canvas.addEventListener('drop', Module.sokol_drop, false); +}); + +EM_JS(uint32_t, sapp_js_dropped_file_size, (int index), { + if ((index < 0) || (index >= Module.sokol_dropped_files.length)) { + return 0; + } + else { + return Module.sokol_dropped_files[index].size; + } +}); + +EM_JS(void, sapp_js_fetch_dropped_file, (int index, _sapp_html5_fetch_callback callback, void* buf_ptr, uint32_t buf_size, void* user_data), { + var reader = new FileReader(); + reader.onload = function(loadEvent) { + var content = loadEvent.target.result; + if (content.byteLength > buf_size) { + // SAPP_HTML5_FETCH_ERROR_BUFFER_TOO_SMALL + __sapp_emsc_invoke_fetch_cb(index, 0, 1, callback, 0, buf_ptr, buf_size, user_data); + } + else { + HEAPU8.set(new Uint8Array(content), buf_ptr); + __sapp_emsc_invoke_fetch_cb(index, 1, 0, callback, content.byteLength, buf_ptr, buf_size, user_data); + } + }; + reader.onerror = function() { + // SAPP_HTML5_FETCH_ERROR_OTHER + __sapp_emsc_invoke_fetch_cb(index, 0, 2, callback, 0, buf_ptr, buf_size, user_data); + }; + reader.readAsArrayBuffer(Module.sokol_dropped_files[index]); +}); + +EM_JS(void, sapp_js_remove_dragndrop_listeners, (const char* canvas_name_cstr), { + var canvas_name = UTF8ToString(canvas_name_cstr); + var canvas = document.getElementById(canvas_name); + canvas.removeEventListener('dragenter', Module.sokol_dragenter); + canvas.removeEventListener('dragleave', Module.sokol_dragleave); + canvas.removeEventListener('dragover', Module.sokol_dragover); + canvas.removeEventListener('drop', Module.sokol_drop); +}); + +/* called from the emscripten event handler to update the keyboard visibility + state, this must happen from an JS input event handler, otherwise + the request will be ignored by the browser +*/ +_SOKOL_PRIVATE void _sapp_emsc_update_keyboard_state(void) { + if (_sapp.emsc.wants_show_keyboard) { + /* create input text field on demand */ + if (!_sapp.emsc.textfield_created) { + _sapp.emsc.textfield_created = true; + sapp_js_create_textfield(); + } + /* focus the text input field, this will bring up the keyboard */ + _sapp.onscreen_keyboard_shown = true; + _sapp.emsc.wants_show_keyboard = false; + sapp_js_focus_textfield(); + } + if (_sapp.emsc.wants_hide_keyboard) { + /* unfocus the text input field */ + if (_sapp.emsc.textfield_created) { + _sapp.onscreen_keyboard_shown = false; + _sapp.emsc.wants_hide_keyboard = false; + sapp_js_unfocus_textfield(); + } + } +} + +/* actually showing the onscreen keyboard must be initiated from a JS + input event handler, so we'll just keep track of the desired + state, and the actual state change will happen with the next input event +*/ +_SOKOL_PRIVATE void _sapp_emsc_show_keyboard(bool show) { + if (show) { + _sapp.emsc.wants_show_keyboard = true; + } + else { + _sapp.emsc.wants_hide_keyboard = true; + } +} + +EM_JS(void, sapp_js_pointer_init, (const char* c_str_target), { + // lookup and store canvas object by name + var target_str = UTF8ToString(c_str_target); + Module.sapp_emsc_target = document.getElementById(target_str); + if (!Module.sapp_emsc_target) { + console.log("sokol_app.h: invalid target:" + target_str); + } + if (!Module.sapp_emsc_target.requestPointerLock) { + console.log("sokol_app.h: target doesn't support requestPointerLock:" + target_str); + } +}); + +_SOKOL_PRIVATE EM_BOOL _sapp_emsc_pointerlockchange_cb(int emsc_type, const EmscriptenPointerlockChangeEvent* emsc_event, void* user_data) { + _SOKOL_UNUSED(emsc_type); + _SOKOL_UNUSED(user_data); + _sapp.mouse.locked = emsc_event->isActive; + return EM_TRUE; +} + +_SOKOL_PRIVATE EM_BOOL _sapp_emsc_pointerlockerror_cb(int emsc_type, const void* reserved, void* user_data) { + _SOKOL_UNUSED(emsc_type); + _SOKOL_UNUSED(reserved); + _SOKOL_UNUSED(user_data); + _sapp.mouse.locked = false; + _sapp.emsc.mouse_lock_requested = false; + return true; +} + +EM_JS(void, sapp_js_request_pointerlock, (void), { + if (Module.sapp_emsc_target) { + if (Module.sapp_emsc_target.requestPointerLock) { + Module.sapp_emsc_target.requestPointerLock(); + } + } +}); + +EM_JS(void, sapp_js_exit_pointerlock, (void), { + if (document.exitPointerLock) { + document.exitPointerLock(); + } +}); + +_SOKOL_PRIVATE void _sapp_emsc_lock_mouse(bool lock) { + if (lock) { + /* request mouse-lock during event handler invocation (see _sapp_emsc_update_mouse_lock_state) */ + _sapp.emsc.mouse_lock_requested = true; + } + else { + /* NOTE: the _sapp.mouse_locked state will be set in the pointerlockchange callback */ + _sapp.emsc.mouse_lock_requested = false; + sapp_js_exit_pointerlock(); + } +} + +/* called from inside event handlers to check if mouse lock had been requested, + and if yes, actually enter mouse lock. +*/ +_SOKOL_PRIVATE void _sapp_emsc_update_mouse_lock_state(void) { + if (_sapp.emsc.mouse_lock_requested) { + _sapp.emsc.mouse_lock_requested = false; + sapp_js_request_pointerlock(); + } +} + +#if defined(SOKOL_WGPU) +_SOKOL_PRIVATE void _sapp_emsc_wgpu_surfaces_create(void); +_SOKOL_PRIVATE void _sapp_emsc_wgpu_surfaces_discard(void); +#endif + +_SOKOL_PRIVATE EM_BOOL _sapp_emsc_size_changed(int event_type, const EmscriptenUiEvent* ui_event, void* user_data) { + _SOKOL_UNUSED(event_type); + _SOKOL_UNUSED(user_data); + double w, h; + emscripten_get_element_css_size(_sapp.html5_canvas_selector, &w, &h); + /* The above method might report zero when toggling HTML5 fullscreen, + in that case use the window's inner width reported by the + emscripten event. This works ok when toggling *into* fullscreen + but doesn't properly restore the previous canvas size when switching + back from fullscreen. + + In general, due to the HTML5's fullscreen API's flaky nature it is + recommended to use 'soft fullscreen' (stretching the WebGL canvas + over the browser windows client rect) with a CSS definition like this: + + position: absolute; + top: 0px; + left: 0px; + margin: 0px; + border: 0; + width: 100%; + height: 100%; + overflow: hidden; + display: block; + */ + if (w < 1.0) { + w = ui_event->windowInnerWidth; + } + else { + _sapp.window_width = (int) w; + } + if (h < 1.0) { + h = ui_event->windowInnerHeight; + } + else { + _sapp.window_height = (int) h; + } + if (_sapp.desc.high_dpi) { + _sapp.dpi_scale = emscripten_get_device_pixel_ratio(); + } + _sapp.framebuffer_width = (int) (w * _sapp.dpi_scale); + _sapp.framebuffer_height = (int) (h * _sapp.dpi_scale); + SOKOL_ASSERT((_sapp.framebuffer_width > 0) && (_sapp.framebuffer_height > 0)); + emscripten_set_canvas_element_size(_sapp.html5_canvas_selector, _sapp.framebuffer_width, _sapp.framebuffer_height); + #if defined(SOKOL_WGPU) + /* on WebGPU: recreate size-dependent rendering surfaces */ + _sapp_emsc_wgpu_surfaces_discard(); + _sapp_emsc_wgpu_surfaces_create(); + #endif + if (_sapp_events_enabled()) { + _sapp_init_event(SAPP_EVENTTYPE_RESIZED); + _sapp_call_event(&_sapp.event); + } + return true; +} + +_SOKOL_PRIVATE EM_BOOL _sapp_emsc_mouse_cb(int emsc_type, const EmscriptenMouseEvent* emsc_event, void* user_data) { + _SOKOL_UNUSED(user_data); + if (_sapp.mouse.locked) { + _sapp.mouse.dx = (float) emsc_event->movementX; + _sapp.mouse.dy = (float) emsc_event->movementY; + } + else { + float new_x = emsc_event->targetX * _sapp.dpi_scale; + float new_y = emsc_event->targetY * _sapp.dpi_scale; + if (_sapp.mouse.pos_valid) { + _sapp.mouse.dx = new_x - _sapp.mouse.x; + _sapp.mouse.dy = new_y - _sapp.mouse.y; + } + _sapp.mouse.x = new_x; + _sapp.mouse.y = new_y; + _sapp.mouse.pos_valid = true; + } + if (_sapp_events_enabled() && (emsc_event->button >= 0) && (emsc_event->button < SAPP_MAX_MOUSEBUTTONS)) { + sapp_event_type type; + bool is_button_event = false; + switch (emsc_type) { + case EMSCRIPTEN_EVENT_MOUSEDOWN: + type = SAPP_EVENTTYPE_MOUSE_DOWN; + is_button_event = true; + break; + case EMSCRIPTEN_EVENT_MOUSEUP: + type = SAPP_EVENTTYPE_MOUSE_UP; + is_button_event = true; + break; + case EMSCRIPTEN_EVENT_MOUSEMOVE: + type = SAPP_EVENTTYPE_MOUSE_MOVE; + break; + case EMSCRIPTEN_EVENT_MOUSEENTER: + type = SAPP_EVENTTYPE_MOUSE_ENTER; + break; + case EMSCRIPTEN_EVENT_MOUSELEAVE: + type = SAPP_EVENTTYPE_MOUSE_LEAVE; + break; + default: + type = SAPP_EVENTTYPE_INVALID; + break; + } + if (type != SAPP_EVENTTYPE_INVALID) { + _sapp_init_event(type); + if (emsc_event->ctrlKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_CTRL; + } + if (emsc_event->shiftKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_SHIFT; + } + if (emsc_event->altKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_ALT; + } + if (emsc_event->metaKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_SUPER; + } + if (is_button_event) { + switch (emsc_event->button) { + case 0: _sapp.event.mouse_button = SAPP_MOUSEBUTTON_LEFT; break; + case 1: _sapp.event.mouse_button = SAPP_MOUSEBUTTON_MIDDLE; break; + case 2: _sapp.event.mouse_button = SAPP_MOUSEBUTTON_RIGHT; break; + default: _sapp.event.mouse_button = (sapp_mousebutton)emsc_event->button; break; + } + } + else { + _sapp.event.mouse_button = SAPP_MOUSEBUTTON_INVALID; + } + _sapp_call_event(&_sapp.event); + } + /* mouse lock can only be activated in mouse button events (not in move, enter or leave) */ + if (is_button_event) { + _sapp_emsc_update_mouse_lock_state(); + } + } + _sapp_emsc_update_keyboard_state(); + return true; +} + +_SOKOL_PRIVATE EM_BOOL _sapp_emsc_wheel_cb(int emsc_type, const EmscriptenWheelEvent* emsc_event, void* user_data) { + _SOKOL_UNUSED(emsc_type); + _SOKOL_UNUSED(user_data); + if (_sapp_events_enabled()) { + _sapp_init_event(SAPP_EVENTTYPE_MOUSE_SCROLL); + if (emsc_event->mouse.ctrlKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_CTRL; + } + if (emsc_event->mouse.shiftKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_SHIFT; + } + if (emsc_event->mouse.altKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_ALT; + } + if (emsc_event->mouse.metaKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_SUPER; + } + /* see https://github.com/floooh/sokol/issues/339 */ + float scale; + switch (emsc_event->deltaMode) { + case DOM_DELTA_PIXEL: scale = -0.04f; break; + case DOM_DELTA_LINE: scale = -1.33f; break; + case DOM_DELTA_PAGE: scale = -10.0f; break; // FIXME: this is a guess + default: scale = -0.1f; break; // shouldn't happen + } + _sapp.event.scroll_x = scale * (float)emsc_event->deltaX; + _sapp.event.scroll_y = scale * (float)emsc_event->deltaY; + _sapp_call_event(&_sapp.event); + } + _sapp_emsc_update_keyboard_state(); + _sapp_emsc_update_mouse_lock_state(); + return true; +} + +_SOKOL_PRIVATE EM_BOOL _sapp_emsc_key_cb(int emsc_type, const EmscriptenKeyboardEvent* emsc_event, void* user_data) { + _SOKOL_UNUSED(user_data); + bool retval = true; + if (_sapp_events_enabled()) { + sapp_event_type type; + switch (emsc_type) { + case EMSCRIPTEN_EVENT_KEYDOWN: + type = SAPP_EVENTTYPE_KEY_DOWN; + break; + case EMSCRIPTEN_EVENT_KEYUP: + type = SAPP_EVENTTYPE_KEY_UP; + break; + case EMSCRIPTEN_EVENT_KEYPRESS: + type = SAPP_EVENTTYPE_CHAR; + break; + default: + type = SAPP_EVENTTYPE_INVALID; + break; + } + if (type != SAPP_EVENTTYPE_INVALID) { + bool send_keyup_followup = false; + _sapp_init_event(type); + _sapp.event.key_repeat = emsc_event->repeat; + if (emsc_event->ctrlKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_CTRL; + } + if (emsc_event->shiftKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_SHIFT; + } + if (emsc_event->altKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_ALT; + } + if (emsc_event->metaKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_SUPER; + } + if (type == SAPP_EVENTTYPE_CHAR) { + _sapp.event.char_code = emsc_event->charCode; + /* workaround to make Cmd+V work on Safari */ + if ((emsc_event->metaKey) && (emsc_event->charCode == 118)) { + retval = false; + } + } + else { + _sapp.event.key_code = _sapp_translate_key((int)emsc_event->keyCode); + /* Special hack for macOS: if the Super key is pressed, macOS doesn't + send keyUp events. As a workaround, to prevent keys from + "sticking", we'll send a keyup event following a keydown + when the SUPER key is pressed + */ + if ((type == SAPP_EVENTTYPE_KEY_DOWN) && + (_sapp.event.key_code != SAPP_KEYCODE_LEFT_SUPER) && + (_sapp.event.key_code != SAPP_KEYCODE_RIGHT_SUPER) && + (_sapp.event.modifiers & SAPP_MODIFIER_SUPER)) + { + send_keyup_followup = true; + } + /* only forward a certain key ranges to the browser */ + switch (_sapp.event.key_code) { + case SAPP_KEYCODE_WORLD_1: + case SAPP_KEYCODE_WORLD_2: + case SAPP_KEYCODE_ESCAPE: + case SAPP_KEYCODE_ENTER: + case SAPP_KEYCODE_TAB: + case SAPP_KEYCODE_BACKSPACE: + case SAPP_KEYCODE_INSERT: + case SAPP_KEYCODE_DELETE: + case SAPP_KEYCODE_RIGHT: + case SAPP_KEYCODE_LEFT: + case SAPP_KEYCODE_DOWN: + case SAPP_KEYCODE_UP: + case SAPP_KEYCODE_PAGE_UP: + case SAPP_KEYCODE_PAGE_DOWN: + case SAPP_KEYCODE_HOME: + case SAPP_KEYCODE_END: + case SAPP_KEYCODE_CAPS_LOCK: + case SAPP_KEYCODE_SCROLL_LOCK: + case SAPP_KEYCODE_NUM_LOCK: + case SAPP_KEYCODE_PRINT_SCREEN: + case SAPP_KEYCODE_PAUSE: + case SAPP_KEYCODE_F1: + case SAPP_KEYCODE_F2: + case SAPP_KEYCODE_F3: + case SAPP_KEYCODE_F4: + case SAPP_KEYCODE_F5: + case SAPP_KEYCODE_F6: + case SAPP_KEYCODE_F7: + case SAPP_KEYCODE_F8: + case SAPP_KEYCODE_F9: + case SAPP_KEYCODE_F10: + case SAPP_KEYCODE_F11: + case SAPP_KEYCODE_F12: + case SAPP_KEYCODE_F13: + case SAPP_KEYCODE_F14: + case SAPP_KEYCODE_F15: + case SAPP_KEYCODE_F16: + case SAPP_KEYCODE_F17: + case SAPP_KEYCODE_F18: + case SAPP_KEYCODE_F19: + case SAPP_KEYCODE_F20: + case SAPP_KEYCODE_F21: + case SAPP_KEYCODE_F22: + case SAPP_KEYCODE_F23: + case SAPP_KEYCODE_F24: + case SAPP_KEYCODE_F25: + case SAPP_KEYCODE_LEFT_SHIFT: + case SAPP_KEYCODE_LEFT_CONTROL: + case SAPP_KEYCODE_LEFT_ALT: + case SAPP_KEYCODE_LEFT_SUPER: + case SAPP_KEYCODE_RIGHT_SHIFT: + case SAPP_KEYCODE_RIGHT_CONTROL: + case SAPP_KEYCODE_RIGHT_ALT: + case SAPP_KEYCODE_RIGHT_SUPER: + case SAPP_KEYCODE_MENU: + /* consume the event */ + break; + default: + /* forward key to browser */ + retval = false; + break; + } + } + if (_sapp_call_event(&_sapp.event)) { + /* consume event via sapp_consume_event() */ + retval = true; + } + if (send_keyup_followup) { + _sapp.event.type = SAPP_EVENTTYPE_KEY_UP; + if (_sapp_call_event(&_sapp.event)) { + retval = true; + } + } + } + } + _sapp_emsc_update_keyboard_state(); + _sapp_emsc_update_mouse_lock_state(); + return retval; +} + +_SOKOL_PRIVATE EM_BOOL _sapp_emsc_touch_cb(int emsc_type, const EmscriptenTouchEvent* emsc_event, void* user_data) { + _SOKOL_UNUSED(user_data); + bool retval = true; + if (_sapp_events_enabled()) { + sapp_event_type type; + switch (emsc_type) { + case EMSCRIPTEN_EVENT_TOUCHSTART: + type = SAPP_EVENTTYPE_TOUCHES_BEGAN; + break; + case EMSCRIPTEN_EVENT_TOUCHMOVE: + type = SAPP_EVENTTYPE_TOUCHES_MOVED; + break; + case EMSCRIPTEN_EVENT_TOUCHEND: + type = SAPP_EVENTTYPE_TOUCHES_ENDED; + break; + case EMSCRIPTEN_EVENT_TOUCHCANCEL: + type = SAPP_EVENTTYPE_TOUCHES_CANCELLED; + break; + default: + type = SAPP_EVENTTYPE_INVALID; + retval = false; + break; + } + if (type != SAPP_EVENTTYPE_INVALID) { + _sapp_init_event(type); + if (emsc_event->ctrlKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_CTRL; + } + if (emsc_event->shiftKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_SHIFT; + } + if (emsc_event->altKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_ALT; + } + if (emsc_event->metaKey) { + _sapp.event.modifiers |= SAPP_MODIFIER_SUPER; + } + _sapp.event.num_touches = emsc_event->numTouches; + if (_sapp.event.num_touches > SAPP_MAX_TOUCHPOINTS) { + _sapp.event.num_touches = SAPP_MAX_TOUCHPOINTS; + } + for (int i = 0; i < _sapp.event.num_touches; i++) { + const EmscriptenTouchPoint* src = &emsc_event->touches[i]; + sapp_touchpoint* dst = &_sapp.event.touches[i]; + dst->identifier = (uintptr_t)src->identifier; + dst->pos_x = src->targetX * _sapp.dpi_scale; + dst->pos_y = src->targetY * _sapp.dpi_scale; + dst->changed = src->isChanged; + } + _sapp_call_event(&_sapp.event); + } + } + _sapp_emsc_update_keyboard_state(); + return retval; +} + +_SOKOL_PRIVATE void _sapp_emsc_keytable_init(void) { + _sapp.keycodes[8] = SAPP_KEYCODE_BACKSPACE; + _sapp.keycodes[9] = SAPP_KEYCODE_TAB; + _sapp.keycodes[13] = SAPP_KEYCODE_ENTER; + _sapp.keycodes[16] = SAPP_KEYCODE_LEFT_SHIFT; + _sapp.keycodes[17] = SAPP_KEYCODE_LEFT_CONTROL; + _sapp.keycodes[18] = SAPP_KEYCODE_LEFT_ALT; + _sapp.keycodes[19] = SAPP_KEYCODE_PAUSE; + _sapp.keycodes[27] = SAPP_KEYCODE_ESCAPE; + _sapp.keycodes[32] = SAPP_KEYCODE_SPACE; + _sapp.keycodes[33] = SAPP_KEYCODE_PAGE_UP; + _sapp.keycodes[34] = SAPP_KEYCODE_PAGE_DOWN; + _sapp.keycodes[35] = SAPP_KEYCODE_END; + _sapp.keycodes[36] = SAPP_KEYCODE_HOME; + _sapp.keycodes[37] = SAPP_KEYCODE_LEFT; + _sapp.keycodes[38] = SAPP_KEYCODE_UP; + _sapp.keycodes[39] = SAPP_KEYCODE_RIGHT; + _sapp.keycodes[40] = SAPP_KEYCODE_DOWN; + _sapp.keycodes[45] = SAPP_KEYCODE_INSERT; + _sapp.keycodes[46] = SAPP_KEYCODE_DELETE; + _sapp.keycodes[48] = SAPP_KEYCODE_0; + _sapp.keycodes[49] = SAPP_KEYCODE_1; + _sapp.keycodes[50] = SAPP_KEYCODE_2; + _sapp.keycodes[51] = SAPP_KEYCODE_3; + _sapp.keycodes[52] = SAPP_KEYCODE_4; + _sapp.keycodes[53] = SAPP_KEYCODE_5; + _sapp.keycodes[54] = SAPP_KEYCODE_6; + _sapp.keycodes[55] = SAPP_KEYCODE_7; + _sapp.keycodes[56] = SAPP_KEYCODE_8; + _sapp.keycodes[57] = SAPP_KEYCODE_9; + _sapp.keycodes[59] = SAPP_KEYCODE_SEMICOLON; + _sapp.keycodes[64] = SAPP_KEYCODE_EQUAL; + _sapp.keycodes[65] = SAPP_KEYCODE_A; + _sapp.keycodes[66] = SAPP_KEYCODE_B; + _sapp.keycodes[67] = SAPP_KEYCODE_C; + _sapp.keycodes[68] = SAPP_KEYCODE_D; + _sapp.keycodes[69] = SAPP_KEYCODE_E; + _sapp.keycodes[70] = SAPP_KEYCODE_F; + _sapp.keycodes[71] = SAPP_KEYCODE_G; + _sapp.keycodes[72] = SAPP_KEYCODE_H; + _sapp.keycodes[73] = SAPP_KEYCODE_I; + _sapp.keycodes[74] = SAPP_KEYCODE_J; + _sapp.keycodes[75] = SAPP_KEYCODE_K; + _sapp.keycodes[76] = SAPP_KEYCODE_L; + _sapp.keycodes[77] = SAPP_KEYCODE_M; + _sapp.keycodes[78] = SAPP_KEYCODE_N; + _sapp.keycodes[79] = SAPP_KEYCODE_O; + _sapp.keycodes[80] = SAPP_KEYCODE_P; + _sapp.keycodes[81] = SAPP_KEYCODE_Q; + _sapp.keycodes[82] = SAPP_KEYCODE_R; + _sapp.keycodes[83] = SAPP_KEYCODE_S; + _sapp.keycodes[84] = SAPP_KEYCODE_T; + _sapp.keycodes[85] = SAPP_KEYCODE_U; + _sapp.keycodes[86] = SAPP_KEYCODE_V; + _sapp.keycodes[87] = SAPP_KEYCODE_W; + _sapp.keycodes[88] = SAPP_KEYCODE_X; + _sapp.keycodes[89] = SAPP_KEYCODE_Y; + _sapp.keycodes[90] = SAPP_KEYCODE_Z; + _sapp.keycodes[91] = SAPP_KEYCODE_LEFT_SUPER; + _sapp.keycodes[93] = SAPP_KEYCODE_MENU; + _sapp.keycodes[96] = SAPP_KEYCODE_KP_0; + _sapp.keycodes[97] = SAPP_KEYCODE_KP_1; + _sapp.keycodes[98] = SAPP_KEYCODE_KP_2; + _sapp.keycodes[99] = SAPP_KEYCODE_KP_3; + _sapp.keycodes[100] = SAPP_KEYCODE_KP_4; + _sapp.keycodes[101] = SAPP_KEYCODE_KP_5; + _sapp.keycodes[102] = SAPP_KEYCODE_KP_6; + _sapp.keycodes[103] = SAPP_KEYCODE_KP_7; + _sapp.keycodes[104] = SAPP_KEYCODE_KP_8; + _sapp.keycodes[105] = SAPP_KEYCODE_KP_9; + _sapp.keycodes[106] = SAPP_KEYCODE_KP_MULTIPLY; + _sapp.keycodes[107] = SAPP_KEYCODE_KP_ADD; + _sapp.keycodes[109] = SAPP_KEYCODE_KP_SUBTRACT; + _sapp.keycodes[110] = SAPP_KEYCODE_KP_DECIMAL; + _sapp.keycodes[111] = SAPP_KEYCODE_KP_DIVIDE; + _sapp.keycodes[112] = SAPP_KEYCODE_F1; + _sapp.keycodes[113] = SAPP_KEYCODE_F2; + _sapp.keycodes[114] = SAPP_KEYCODE_F3; + _sapp.keycodes[115] = SAPP_KEYCODE_F4; + _sapp.keycodes[116] = SAPP_KEYCODE_F5; + _sapp.keycodes[117] = SAPP_KEYCODE_F6; + _sapp.keycodes[118] = SAPP_KEYCODE_F7; + _sapp.keycodes[119] = SAPP_KEYCODE_F8; + _sapp.keycodes[120] = SAPP_KEYCODE_F9; + _sapp.keycodes[121] = SAPP_KEYCODE_F10; + _sapp.keycodes[122] = SAPP_KEYCODE_F11; + _sapp.keycodes[123] = SAPP_KEYCODE_F12; + _sapp.keycodes[144] = SAPP_KEYCODE_NUM_LOCK; + _sapp.keycodes[145] = SAPP_KEYCODE_SCROLL_LOCK; + _sapp.keycodes[173] = SAPP_KEYCODE_MINUS; + _sapp.keycodes[186] = SAPP_KEYCODE_SEMICOLON; + _sapp.keycodes[187] = SAPP_KEYCODE_EQUAL; + _sapp.keycodes[188] = SAPP_KEYCODE_COMMA; + _sapp.keycodes[189] = SAPP_KEYCODE_MINUS; + _sapp.keycodes[190] = SAPP_KEYCODE_PERIOD; + _sapp.keycodes[191] = SAPP_KEYCODE_SLASH; + _sapp.keycodes[192] = SAPP_KEYCODE_GRAVE_ACCENT; + _sapp.keycodes[219] = SAPP_KEYCODE_LEFT_BRACKET; + _sapp.keycodes[220] = SAPP_KEYCODE_BACKSLASH; + _sapp.keycodes[221] = SAPP_KEYCODE_RIGHT_BRACKET; + _sapp.keycodes[222] = SAPP_KEYCODE_APOSTROPHE; + _sapp.keycodes[224] = SAPP_KEYCODE_LEFT_SUPER; +} + +#if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) +_SOKOL_PRIVATE EM_BOOL _sapp_emsc_webgl_context_cb(int emsc_type, const void* reserved, void* user_data) { + _SOKOL_UNUSED(reserved); + _SOKOL_UNUSED(user_data); + sapp_event_type type; + switch (emsc_type) { + case EMSCRIPTEN_EVENT_WEBGLCONTEXTLOST: type = SAPP_EVENTTYPE_SUSPENDED; break; + case EMSCRIPTEN_EVENT_WEBGLCONTEXTRESTORED: type = SAPP_EVENTTYPE_RESUMED; break; + default: type = SAPP_EVENTTYPE_INVALID; break; + } + if (_sapp_events_enabled() && (SAPP_EVENTTYPE_INVALID != type)) { + _sapp_init_event(type); + _sapp_call_event(&_sapp.event); + } + return true; +} + +_SOKOL_PRIVATE void _sapp_emsc_webgl_init(void) { + EmscriptenWebGLContextAttributes attrs; + emscripten_webgl_init_context_attributes(&attrs); + attrs.alpha = _sapp.desc.alpha; + attrs.depth = true; + attrs.stencil = true; + attrs.antialias = _sapp.sample_count > 1; + attrs.premultipliedAlpha = _sapp.desc.html5_premultiplied_alpha; + attrs.preserveDrawingBuffer = _sapp.desc.html5_preserve_drawing_buffer; + attrs.enableExtensionsByDefault = true; + #if defined(SOKOL_GLES3) + if (_sapp.desc.gl_force_gles2) { + attrs.majorVersion = 1; + _sapp.gles2_fallback = true; + } + else { + attrs.majorVersion = 2; + } + #endif + EMSCRIPTEN_WEBGL_CONTEXT_HANDLE ctx = emscripten_webgl_create_context(_sapp.html5_canvas_selector, &attrs); + if (!ctx) { + attrs.majorVersion = 1; + ctx = emscripten_webgl_create_context(_sapp.html5_canvas_selector, &attrs); + _sapp.gles2_fallback = true; + } + emscripten_webgl_make_context_current(ctx); + + /* some WebGL extension are not enabled automatically by emscripten */ + emscripten_webgl_enable_extension(ctx, "WEBKIT_WEBGL_compressed_texture_pvrtc"); +} +#endif + +#if defined(SOKOL_WGPU) +#define _SAPP_EMSC_WGPU_STATE_INITIAL (0) +#define _SAPP_EMSC_WGPU_STATE_READY (1) +#define _SAPP_EMSC_WGPU_STATE_RUNNING (2) + +#if defined(__cplusplus) +extern "C" { +#endif +/* called when the asynchronous WebGPU device + swapchain init code in JS has finished */ +EMSCRIPTEN_KEEPALIVE void _sapp_emsc_wgpu_ready(int device_id, int swapchain_id, int swapchain_fmt) { + SOKOL_ASSERT(0 == _sapp.emsc.wgpu.device); + _sapp.emsc.wgpu.device = (WGPUDevice) device_id; + _sapp.emsc.wgpu.swapchain = (WGPUSwapChain) swapchain_id; + _sapp.emsc.wgpu.render_format = (WGPUTextureFormat) swapchain_fmt; + _sapp.emsc.wgpu.state = _SAPP_EMSC_WGPU_STATE_READY; +} +#if defined(__cplusplus) +} // extern "C" +#endif + +/* embedded JS function to handle all the asynchronous WebGPU setup */ +EM_JS(void, sapp_js_wgpu_init, (), { + WebGPU.initManagers(); + // FIXME: the extension activation must be more clever here + navigator.gpu.requestAdapter().then(function(adapter) { + console.log("wgpu adapter extensions: " + adapter.extensions); + adapter.requestDevice({ extensions: ["textureCompressionBC"]}).then(function(device) { + var gpuContext = document.getElementById("canvas").getContext("gpupresent"); + console.log("wgpu device extensions: " + adapter.extensions); + gpuContext.getSwapChainPreferredFormat(device).then(function(fmt) { + var swapChainDescriptor = { device: device, format: fmt }; + var swapChain = gpuContext.configureSwapChain(swapChainDescriptor); + var deviceId = WebGPU.mgrDevice.create(device); + var swapChainId = WebGPU.mgrSwapChain.create(swapChain); + var fmtId = WebGPU.TextureFormat.findIndex(function(elm) { return elm==fmt; }); + console.log("wgpu device: " + device); + console.log("wgpu swap chain: " + swapChain); + console.log("wgpu preferred format: " + fmt + " (" + fmtId + ")"); + __sapp_emsc_wgpu_ready(deviceId, swapChainId, fmtId); + }); + }); + }); +}); + +_SOKOL_PRIVATE void _sapp_emsc_wgpu_surfaces_create(void) { + SOKOL_ASSERT(_sapp.emsc.wgpu.device); + SOKOL_ASSERT(_sapp.emsc.wgpu.swapchain); + SOKOL_ASSERT(0 == _sapp.emsc.wgpu.depth_stencil_tex); + SOKOL_ASSERT(0 == _sapp.emsc.wgpu.depth_stencil_view); + SOKOL_ASSERT(0 == _sapp.emsc.wgpu.msaa_tex); + SOKOL_ASSERT(0 == _sapp.emsc.wgpu.msaa_view); + + WGPUTextureDescriptor ds_desc; + memset(&ds_desc, 0, sizeof(ds_desc)); + ds_desc.usage = WGPUTextureUsage_OutputAttachment; + ds_desc.dimension = WGPUTextureDimension_2D; + ds_desc.size.width = (uint32_t) _sapp.framebuffer_width; + ds_desc.size.height = (uint32_t) _sapp.framebuffer_height; + ds_desc.size.depth = 1; + ds_desc.arrayLayerCount = 1; + ds_desc.format = WGPUTextureFormat_Depth24PlusStencil8; + ds_desc.mipLevelCount = 1; + ds_desc.sampleCount = _sapp.sample_count; + _sapp.emsc.wgpu.depth_stencil_tex = wgpuDeviceCreateTexture(_sapp.emsc.wgpu.device, &ds_desc); + _sapp.emsc.wgpu.depth_stencil_view = wgpuTextureCreateView(_sapp.emsc.wgpu.depth_stencil_tex, 0); + + if (_sapp.sample_count > 1) { + WGPUTextureDescriptor msaa_desc; + memset(&msaa_desc, 0, sizeof(msaa_desc)); + msaa_desc.usage = WGPUTextureUsage_OutputAttachment; + msaa_desc.dimension = WGPUTextureDimension_2D; + msaa_desc.size.width = (uint32_t) _sapp.framebuffer_width; + msaa_desc.size.height = (uint32_t) _sapp.framebuffer_height; + msaa_desc.size.depth = 1; + msaa_desc.arrayLayerCount = 1; + msaa_desc.format = _sapp.emsc.wgpu.render_format; + msaa_desc.mipLevelCount = 1; + msaa_desc.sampleCount = _sapp.sample_count; + _sapp.emsc.wgpu.msaa_tex = wgpuDeviceCreateTexture(_sapp.emsc.wgpu.device, &msaa_desc); + _sapp.emsc.wgpu.msaa_view = wgpuTextureCreateView(_sapp.emsc.wgpu.msaa_tex, 0); + } +} + +_SOKOL_PRIVATE void _sapp_emsc_wgpu_surfaces_discard(void) { + if (_sapp.emsc.wgpu.msaa_tex) { + wgpuTextureRelease(_sapp.emsc.wgpu.msaa_tex); + _sapp.emsc.wgpu.msaa_tex = 0; + } + if (_sapp.emsc.wgpu.msaa_view) { + wgpuTextureViewRelease(_sapp.emsc.wgpu.msaa_view); + _sapp.emsc.wgpu.msaa_view = 0; + } + if (_sapp.emsc.wgpu.depth_stencil_tex) { + wgpuTextureRelease(_sapp.emsc.wgpu.depth_stencil_tex); + _sapp.emsc.wgpu.depth_stencil_tex = 0; + } + if (_sapp.emsc.wgpu.depth_stencil_view) { + wgpuTextureViewRelease(_sapp.emsc.wgpu.depth_stencil_view); + _sapp.emsc.wgpu.depth_stencil_view = 0; + } +} + +_SOKOL_PRIVATE void _sapp_emsc_wgpu_next_frame(void) { + if (_sapp.emsc.wgpu.swapchain_view) { + wgpuTextureViewRelease(_sapp.emsc.wgpu.swapchain_view); + } + _sapp.emsc.wgpu.swapchain_view = wgpuSwapChainGetCurrentTextureView(_sapp.emsc.wgpu.swapchain); +} +#endif + +_SOKOL_PRIVATE void _sapp_emsc_register_eventhandlers(void) { + emscripten_set_mousedown_callback(_sapp.html5_canvas_selector, 0, true, _sapp_emsc_mouse_cb); + emscripten_set_mouseup_callback(_sapp.html5_canvas_selector, 0, true, _sapp_emsc_mouse_cb); + emscripten_set_mousemove_callback(_sapp.html5_canvas_selector, 0, true, _sapp_emsc_mouse_cb); + emscripten_set_mouseenter_callback(_sapp.html5_canvas_selector, 0, true, _sapp_emsc_mouse_cb); + emscripten_set_mouseleave_callback(_sapp.html5_canvas_selector, 0, true, _sapp_emsc_mouse_cb); + emscripten_set_wheel_callback(_sapp.html5_canvas_selector, 0, true, _sapp_emsc_wheel_cb); + emscripten_set_keydown_callback(EMSCRIPTEN_EVENT_TARGET_WINDOW, 0, true, _sapp_emsc_key_cb); + emscripten_set_keyup_callback(EMSCRIPTEN_EVENT_TARGET_WINDOW, 0, true, _sapp_emsc_key_cb); + emscripten_set_keypress_callback(EMSCRIPTEN_EVENT_TARGET_WINDOW, 0, true, _sapp_emsc_key_cb); + emscripten_set_touchstart_callback(_sapp.html5_canvas_selector, 0, true, _sapp_emsc_touch_cb); + emscripten_set_touchmove_callback(_sapp.html5_canvas_selector, 0, true, _sapp_emsc_touch_cb); + emscripten_set_touchend_callback(_sapp.html5_canvas_selector, 0, true, _sapp_emsc_touch_cb); + emscripten_set_touchcancel_callback(_sapp.html5_canvas_selector, 0, true, _sapp_emsc_touch_cb); + emscripten_set_pointerlockchange_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, 0, true, _sapp_emsc_pointerlockchange_cb); + emscripten_set_pointerlockerror_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, 0, true, _sapp_emsc_pointerlockerror_cb); + sapp_js_add_beforeunload_listener(); + if (_sapp.clipboard.enabled) { + sapp_js_add_clipboard_listener(); + } + if (_sapp.drop.enabled) { + sapp_js_add_dragndrop_listeners(&_sapp.html5_canvas_selector[1]); + } + #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) + emscripten_set_webglcontextlost_callback(_sapp.html5_canvas_selector, 0, true, _sapp_emsc_webgl_context_cb); + emscripten_set_webglcontextrestored_callback(_sapp.html5_canvas_selector, 0, true, _sapp_emsc_webgl_context_cb); + #endif +} + +_SOKOL_PRIVATE void _sapp_emsc_unregister_eventhandlers() { + emscripten_set_mousedown_callback(_sapp.html5_canvas_selector, 0, true, 0); + emscripten_set_mouseup_callback(_sapp.html5_canvas_selector, 0, true, 0); + emscripten_set_mousemove_callback(_sapp.html5_canvas_selector, 0, true, 0); + emscripten_set_mouseenter_callback(_sapp.html5_canvas_selector, 0, true, 0); + emscripten_set_mouseleave_callback(_sapp.html5_canvas_selector, 0, true, 0); + emscripten_set_wheel_callback(_sapp.html5_canvas_selector, 0, true, 0); + emscripten_set_keydown_callback(EMSCRIPTEN_EVENT_TARGET_WINDOW, 0, true, 0); + emscripten_set_keyup_callback(EMSCRIPTEN_EVENT_TARGET_WINDOW, 0, true, 0); + emscripten_set_keypress_callback(EMSCRIPTEN_EVENT_TARGET_WINDOW, 0, true, 0); + emscripten_set_touchstart_callback(_sapp.html5_canvas_selector, 0, true, 0); + emscripten_set_touchmove_callback(_sapp.html5_canvas_selector, 0, true, 0); + emscripten_set_touchend_callback(_sapp.html5_canvas_selector, 0, true, 0); + emscripten_set_touchcancel_callback(_sapp.html5_canvas_selector, 0, true, 0); + emscripten_set_pointerlockchange_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, 0, true, 0); + emscripten_set_pointerlockerror_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, 0, true, 0); + sapp_js_remove_beforeunload_listener(); + if (_sapp.clipboard.enabled) { + sapp_js_remove_clipboard_listener(); + } + if (_sapp.drop.enabled) { + sapp_js_remove_dragndrop_listeners(&_sapp.html5_canvas_selector[1]); + } + #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) + emscripten_set_webglcontextlost_callback(_sapp.html5_canvas_selector, 0, true, 0); + emscripten_set_webglcontextrestored_callback(_sapp.html5_canvas_selector, 0, true, 0); + #endif +} + +_SOKOL_PRIVATE EM_BOOL _sapp_emsc_frame(double time, void* userData) { + _SOKOL_UNUSED(time); + _SOKOL_UNUSED(userData); + + #if defined(SOKOL_WGPU) + /* + on WebGPU, the emscripten frame callback will already be called while + the asynchronous WebGPU device and swapchain initialization is still + in progress + */ + switch (_sapp.emsc.wgpu.state) { + case _SAPP_EMSC_WGPU_STATE_INITIAL: + /* async JS init hasn't finished yet */ + break; + case _SAPP_EMSC_WGPU_STATE_READY: + /* perform post-async init stuff */ + _sapp_emsc_wgpu_surfaces_create(); + _sapp.emsc.wgpu.state = _SAPP_EMSC_WGPU_STATE_RUNNING; + break; + case _SAPP_EMSC_WGPU_STATE_RUNNING: + /* a regular frame */ + _sapp_emsc_wgpu_next_frame(); + _sapp_frame(); + break; + } + #else + /* WebGL code path */ + _sapp_frame(); + #endif + + /* quit-handling */ + if (_sapp.quit_requested) { + _sapp_init_event(SAPP_EVENTTYPE_QUIT_REQUESTED); + _sapp_call_event(&_sapp.event); + if (_sapp.quit_requested) { + _sapp.quit_ordered = true; + } + } + if (_sapp.quit_ordered) { + _sapp_emsc_unregister_eventhandlers(); + _sapp_call_cleanup(); + _sapp_discard_state(); + return EM_FALSE; + } + return EM_TRUE; +} + +_SOKOL_PRIVATE void _sapp_emsc_run(const sapp_desc* desc) { + _sapp_init_state(desc); + sapp_js_pointer_init(&_sapp.html5_canvas_selector[1]); + _sapp_emsc_keytable_init(); + double w, h; + if (_sapp.desc.html5_canvas_resize) { + w = (double) _sapp.desc.width; + h = (double) _sapp.desc.height; + } + else { + emscripten_get_element_css_size(_sapp.html5_canvas_selector, &w, &h); + emscripten_set_resize_callback(EMSCRIPTEN_EVENT_TARGET_WINDOW, 0, false, _sapp_emsc_size_changed); + } + if (_sapp.desc.high_dpi) { + _sapp.dpi_scale = emscripten_get_device_pixel_ratio(); + } + _sapp.window_width = (int) w; + _sapp.window_height = (int) h; + _sapp.framebuffer_width = (int) (w * _sapp.dpi_scale); + _sapp.framebuffer_height = (int) (h * _sapp.dpi_scale); + emscripten_set_canvas_element_size(_sapp.html5_canvas_selector, _sapp.framebuffer_width, _sapp.framebuffer_height); + #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) + _sapp_emsc_webgl_init(); + #elif defined(SOKOL_WGPU) + sapp_js_wgpu_init(); + #endif + _sapp.valid = true; + _sapp_emsc_register_eventhandlers(); + + /* start the frame loop */ + emscripten_request_animation_frame_loop(_sapp_emsc_frame, 0); + + /* NOT A BUG: do not call _sapp_discard_state() here, instead this is + called in _sapp_emsc_frame() when the application is ordered to quit + */ +} + +#if !defined(SOKOL_NO_ENTRY) +int main(int argc, char* argv[]) { + sapp_desc desc = sokol_main(argc, argv); + _sapp_emsc_run(&desc); + return 0; +} +#endif /* SOKOL_NO_ENTRY */ +#endif /* _SAPP_EMSCRIPTEN */ + +/*== MISC GL SUPPORT FUNCTIONS ================================================*/ +#if defined(SOKOL_GLCORE33) +typedef struct { + int red_bits; + int green_bits; + int blue_bits; + int alpha_bits; + int depth_bits; + int stencil_bits; + int samples; + bool doublebuffer; + uintptr_t handle; +} _sapp_gl_fbconfig; + +_SOKOL_PRIVATE void _sapp_gl_init_fbconfig(_sapp_gl_fbconfig* fbconfig) { + memset(fbconfig, 0, sizeof(_sapp_gl_fbconfig)); + /* -1 means "don't care" */ + fbconfig->red_bits = -1; + fbconfig->green_bits = -1; + fbconfig->blue_bits = -1; + fbconfig->alpha_bits = -1; + fbconfig->depth_bits = -1; + fbconfig->stencil_bits = -1; + fbconfig->samples = -1; +} + +_SOKOL_PRIVATE const _sapp_gl_fbconfig* _sapp_gl_choose_fbconfig(const _sapp_gl_fbconfig* desired, const _sapp_gl_fbconfig* alternatives, int count) { + int missing, least_missing = 1000000; + int color_diff, least_color_diff = 10000000; + int extra_diff, least_extra_diff = 10000000; + const _sapp_gl_fbconfig* current; + const _sapp_gl_fbconfig* closest = 0; + for (int i = 0; i < count; i++) { + current = alternatives + i; + if (desired->doublebuffer != current->doublebuffer) { + continue; + } + missing = 0; + if (desired->alpha_bits > 0 && current->alpha_bits == 0) { + missing++; + } + if (desired->depth_bits > 0 && current->depth_bits == 0) { + missing++; + } + if (desired->stencil_bits > 0 && current->stencil_bits == 0) { + missing++; + } + if (desired->samples > 0 && current->samples == 0) { + /* Technically, several multisampling buffers could be + involved, but that's a lower level implementation detail and + not important to us here, so we count them as one + */ + missing++; + } + + /* These polynomials make many small channel size differences matter + less than one large channel size difference + Calculate color channel size difference value + */ + color_diff = 0; + if (desired->red_bits != -1) { + color_diff += (desired->red_bits - current->red_bits) * (desired->red_bits - current->red_bits); + } + if (desired->green_bits != -1) { + color_diff += (desired->green_bits - current->green_bits) * (desired->green_bits - current->green_bits); + } + if (desired->blue_bits != -1) { + color_diff += (desired->blue_bits - current->blue_bits) * (desired->blue_bits - current->blue_bits); + } + + /* Calculate non-color channel size difference value */ + extra_diff = 0; + if (desired->alpha_bits != -1) { + extra_diff += (desired->alpha_bits - current->alpha_bits) * (desired->alpha_bits - current->alpha_bits); + } + if (desired->depth_bits != -1) { + extra_diff += (desired->depth_bits - current->depth_bits) * (desired->depth_bits - current->depth_bits); + } + if (desired->stencil_bits != -1) { + extra_diff += (desired->stencil_bits - current->stencil_bits) * (desired->stencil_bits - current->stencil_bits); + } + if (desired->samples != -1) { + extra_diff += (desired->samples - current->samples) * (desired->samples - current->samples); + } + + /* Figure out if the current one is better than the best one found so far + Least number of missing buffers is the most important heuristic, + then color buffer size match and lastly size match for other buffers + */ + if (missing < least_missing) { + closest = current; + } + else if (missing == least_missing) { + if ((color_diff < least_color_diff) || + (color_diff == least_color_diff && extra_diff < least_extra_diff)) + { + closest = current; + } + } + if (current == closest) { + least_missing = missing; + least_color_diff = color_diff; + least_extra_diff = extra_diff; + } + } + return closest; +} +#endif + +/*== WINDOWS DESKTOP and UWP====================================================*/ +#if defined(_SAPP_WIN32) || defined(_SAPP_UWP) +_SOKOL_PRIVATE bool _sapp_win32_uwp_utf8_to_wide(const char* src, wchar_t* dst, int dst_num_bytes) { + SOKOL_ASSERT(src && dst && (dst_num_bytes > 1)); + memset(dst, 0, (size_t)dst_num_bytes); + const int dst_chars = dst_num_bytes / (int)sizeof(wchar_t); + const int dst_needed = MultiByteToWideChar(CP_UTF8, 0, src, -1, 0, 0); + if ((dst_needed > 0) && (dst_needed < dst_chars)) { + MultiByteToWideChar(CP_UTF8, 0, src, -1, dst, dst_chars); + return true; + } + else { + /* input string doesn't fit into destination buffer */ + return false; + } +} + +_SOKOL_PRIVATE void _sapp_win32_uwp_app_event(sapp_event_type type) { + if (_sapp_events_enabled()) { + _sapp_init_event(type); + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE void _sapp_win32_uwp_init_keytable(void) { + /* same as GLFW */ + _sapp.keycodes[0x00B] = SAPP_KEYCODE_0; + _sapp.keycodes[0x002] = SAPP_KEYCODE_1; + _sapp.keycodes[0x003] = SAPP_KEYCODE_2; + _sapp.keycodes[0x004] = SAPP_KEYCODE_3; + _sapp.keycodes[0x005] = SAPP_KEYCODE_4; + _sapp.keycodes[0x006] = SAPP_KEYCODE_5; + _sapp.keycodes[0x007] = SAPP_KEYCODE_6; + _sapp.keycodes[0x008] = SAPP_KEYCODE_7; + _sapp.keycodes[0x009] = SAPP_KEYCODE_8; + _sapp.keycodes[0x00A] = SAPP_KEYCODE_9; + _sapp.keycodes[0x01E] = SAPP_KEYCODE_A; + _sapp.keycodes[0x030] = SAPP_KEYCODE_B; + _sapp.keycodes[0x02E] = SAPP_KEYCODE_C; + _sapp.keycodes[0x020] = SAPP_KEYCODE_D; + _sapp.keycodes[0x012] = SAPP_KEYCODE_E; + _sapp.keycodes[0x021] = SAPP_KEYCODE_F; + _sapp.keycodes[0x022] = SAPP_KEYCODE_G; + _sapp.keycodes[0x023] = SAPP_KEYCODE_H; + _sapp.keycodes[0x017] = SAPP_KEYCODE_I; + _sapp.keycodes[0x024] = SAPP_KEYCODE_J; + _sapp.keycodes[0x025] = SAPP_KEYCODE_K; + _sapp.keycodes[0x026] = SAPP_KEYCODE_L; + _sapp.keycodes[0x032] = SAPP_KEYCODE_M; + _sapp.keycodes[0x031] = SAPP_KEYCODE_N; + _sapp.keycodes[0x018] = SAPP_KEYCODE_O; + _sapp.keycodes[0x019] = SAPP_KEYCODE_P; + _sapp.keycodes[0x010] = SAPP_KEYCODE_Q; + _sapp.keycodes[0x013] = SAPP_KEYCODE_R; + _sapp.keycodes[0x01F] = SAPP_KEYCODE_S; + _sapp.keycodes[0x014] = SAPP_KEYCODE_T; + _sapp.keycodes[0x016] = SAPP_KEYCODE_U; + _sapp.keycodes[0x02F] = SAPP_KEYCODE_V; + _sapp.keycodes[0x011] = SAPP_KEYCODE_W; + _sapp.keycodes[0x02D] = SAPP_KEYCODE_X; + _sapp.keycodes[0x015] = SAPP_KEYCODE_Y; + _sapp.keycodes[0x02C] = SAPP_KEYCODE_Z; + _sapp.keycodes[0x028] = SAPP_KEYCODE_APOSTROPHE; + _sapp.keycodes[0x02B] = SAPP_KEYCODE_BACKSLASH; + _sapp.keycodes[0x033] = SAPP_KEYCODE_COMMA; + _sapp.keycodes[0x00D] = SAPP_KEYCODE_EQUAL; + _sapp.keycodes[0x029] = SAPP_KEYCODE_GRAVE_ACCENT; + _sapp.keycodes[0x01A] = SAPP_KEYCODE_LEFT_BRACKET; + _sapp.keycodes[0x00C] = SAPP_KEYCODE_MINUS; + _sapp.keycodes[0x034] = SAPP_KEYCODE_PERIOD; + _sapp.keycodes[0x01B] = SAPP_KEYCODE_RIGHT_BRACKET; + _sapp.keycodes[0x027] = SAPP_KEYCODE_SEMICOLON; + _sapp.keycodes[0x035] = SAPP_KEYCODE_SLASH; + _sapp.keycodes[0x056] = SAPP_KEYCODE_WORLD_2; + _sapp.keycodes[0x00E] = SAPP_KEYCODE_BACKSPACE; + _sapp.keycodes[0x153] = SAPP_KEYCODE_DELETE; + _sapp.keycodes[0x14F] = SAPP_KEYCODE_END; + _sapp.keycodes[0x01C] = SAPP_KEYCODE_ENTER; + _sapp.keycodes[0x001] = SAPP_KEYCODE_ESCAPE; + _sapp.keycodes[0x147] = SAPP_KEYCODE_HOME; + _sapp.keycodes[0x152] = SAPP_KEYCODE_INSERT; + _sapp.keycodes[0x15D] = SAPP_KEYCODE_MENU; + _sapp.keycodes[0x151] = SAPP_KEYCODE_PAGE_DOWN; + _sapp.keycodes[0x149] = SAPP_KEYCODE_PAGE_UP; + _sapp.keycodes[0x045] = SAPP_KEYCODE_PAUSE; + _sapp.keycodes[0x146] = SAPP_KEYCODE_PAUSE; + _sapp.keycodes[0x039] = SAPP_KEYCODE_SPACE; + _sapp.keycodes[0x00F] = SAPP_KEYCODE_TAB; + _sapp.keycodes[0x03A] = SAPP_KEYCODE_CAPS_LOCK; + _sapp.keycodes[0x145] = SAPP_KEYCODE_NUM_LOCK; + _sapp.keycodes[0x046] = SAPP_KEYCODE_SCROLL_LOCK; + _sapp.keycodes[0x03B] = SAPP_KEYCODE_F1; + _sapp.keycodes[0x03C] = SAPP_KEYCODE_F2; + _sapp.keycodes[0x03D] = SAPP_KEYCODE_F3; + _sapp.keycodes[0x03E] = SAPP_KEYCODE_F4; + _sapp.keycodes[0x03F] = SAPP_KEYCODE_F5; + _sapp.keycodes[0x040] = SAPP_KEYCODE_F6; + _sapp.keycodes[0x041] = SAPP_KEYCODE_F7; + _sapp.keycodes[0x042] = SAPP_KEYCODE_F8; + _sapp.keycodes[0x043] = SAPP_KEYCODE_F9; + _sapp.keycodes[0x044] = SAPP_KEYCODE_F10; + _sapp.keycodes[0x057] = SAPP_KEYCODE_F11; + _sapp.keycodes[0x058] = SAPP_KEYCODE_F12; + _sapp.keycodes[0x064] = SAPP_KEYCODE_F13; + _sapp.keycodes[0x065] = SAPP_KEYCODE_F14; + _sapp.keycodes[0x066] = SAPP_KEYCODE_F15; + _sapp.keycodes[0x067] = SAPP_KEYCODE_F16; + _sapp.keycodes[0x068] = SAPP_KEYCODE_F17; + _sapp.keycodes[0x069] = SAPP_KEYCODE_F18; + _sapp.keycodes[0x06A] = SAPP_KEYCODE_F19; + _sapp.keycodes[0x06B] = SAPP_KEYCODE_F20; + _sapp.keycodes[0x06C] = SAPP_KEYCODE_F21; + _sapp.keycodes[0x06D] = SAPP_KEYCODE_F22; + _sapp.keycodes[0x06E] = SAPP_KEYCODE_F23; + _sapp.keycodes[0x076] = SAPP_KEYCODE_F24; + _sapp.keycodes[0x038] = SAPP_KEYCODE_LEFT_ALT; + _sapp.keycodes[0x01D] = SAPP_KEYCODE_LEFT_CONTROL; + _sapp.keycodes[0x02A] = SAPP_KEYCODE_LEFT_SHIFT; + _sapp.keycodes[0x15B] = SAPP_KEYCODE_LEFT_SUPER; + _sapp.keycodes[0x137] = SAPP_KEYCODE_PRINT_SCREEN; + _sapp.keycodes[0x138] = SAPP_KEYCODE_RIGHT_ALT; + _sapp.keycodes[0x11D] = SAPP_KEYCODE_RIGHT_CONTROL; + _sapp.keycodes[0x036] = SAPP_KEYCODE_RIGHT_SHIFT; + _sapp.keycodes[0x15C] = SAPP_KEYCODE_RIGHT_SUPER; + _sapp.keycodes[0x150] = SAPP_KEYCODE_DOWN; + _sapp.keycodes[0x14B] = SAPP_KEYCODE_LEFT; + _sapp.keycodes[0x14D] = SAPP_KEYCODE_RIGHT; + _sapp.keycodes[0x148] = SAPP_KEYCODE_UP; + _sapp.keycodes[0x052] = SAPP_KEYCODE_KP_0; + _sapp.keycodes[0x04F] = SAPP_KEYCODE_KP_1; + _sapp.keycodes[0x050] = SAPP_KEYCODE_KP_2; + _sapp.keycodes[0x051] = SAPP_KEYCODE_KP_3; + _sapp.keycodes[0x04B] = SAPP_KEYCODE_KP_4; + _sapp.keycodes[0x04C] = SAPP_KEYCODE_KP_5; + _sapp.keycodes[0x04D] = SAPP_KEYCODE_KP_6; + _sapp.keycodes[0x047] = SAPP_KEYCODE_KP_7; + _sapp.keycodes[0x048] = SAPP_KEYCODE_KP_8; + _sapp.keycodes[0x049] = SAPP_KEYCODE_KP_9; + _sapp.keycodes[0x04E] = SAPP_KEYCODE_KP_ADD; + _sapp.keycodes[0x053] = SAPP_KEYCODE_KP_DECIMAL; + _sapp.keycodes[0x135] = SAPP_KEYCODE_KP_DIVIDE; + _sapp.keycodes[0x11C] = SAPP_KEYCODE_KP_ENTER; + _sapp.keycodes[0x037] = SAPP_KEYCODE_KP_MULTIPLY; + _sapp.keycodes[0x04A] = SAPP_KEYCODE_KP_SUBTRACT; +} +#endif // _SAPP_WIN32 || _SAPP_UWP + +/*== WINDOWS DESKTOP===========================================================*/ +#if defined(_SAPP_WIN32) + +#if defined(SOKOL_D3D11) + +#if defined(__cplusplus) +#define _sapp_d3d11_Release(self) (self)->Release() +#else +#define _sapp_d3d11_Release(self) (self)->lpVtbl->Release(self) +#endif + +#define _SAPP_SAFE_RELEASE(obj) if (obj) { _sapp_d3d11_Release(obj); obj=0; } + +static inline HRESULT _sapp_dxgi_GetBuffer(IDXGISwapChain* self, UINT Buffer, REFIID riid, void** ppSurface) { + #if defined(__cplusplus) + return self->GetBuffer(Buffer, riid, ppSurface); + #else + return self->lpVtbl->GetBuffer(self, Buffer, riid, ppSurface); + #endif +} + +static inline HRESULT _sapp_d3d11_CreateRenderTargetView(ID3D11Device* self, ID3D11Resource *pResource, const D3D11_RENDER_TARGET_VIEW_DESC* pDesc, ID3D11RenderTargetView** ppRTView) { + #if defined(__cplusplus) + return self->CreateRenderTargetView(pResource, pDesc, ppRTView); + #else + return self->lpVtbl->CreateRenderTargetView(self, pResource, pDesc, ppRTView); + #endif +} + +static inline HRESULT _sapp_d3d11_CreateTexture2D(ID3D11Device* self, const D3D11_TEXTURE2D_DESC* pDesc, const D3D11_SUBRESOURCE_DATA* pInitialData, ID3D11Texture2D** ppTexture2D) { + #if defined(__cplusplus) + return self->CreateTexture2D(pDesc, pInitialData, ppTexture2D); + #else + return self->lpVtbl->CreateTexture2D(self, pDesc, pInitialData, ppTexture2D); + #endif +} + +static inline HRESULT _sapp_d3d11_CreateDepthStencilView(ID3D11Device* self, ID3D11Resource* pResource, const D3D11_DEPTH_STENCIL_VIEW_DESC* pDesc, ID3D11DepthStencilView** ppDepthStencilView) { + #if defined(__cplusplus) + return self->CreateDepthStencilView(pResource, pDesc, ppDepthStencilView); + #else + return self->lpVtbl->CreateDepthStencilView(self, pResource, pDesc, ppDepthStencilView); + #endif +} + +static inline void _sapp_d3d11_ResolveSubresource(ID3D11DeviceContext* self, ID3D11Resource* pDstResource, UINT DstSubresource, ID3D11Resource* pSrcResource, UINT SrcSubresource, DXGI_FORMAT Format) { + #if defined(__cplusplus) + self->ResolveSubresource(pDstResource, DstSubresource, pSrcResource, SrcSubresource, Format); + #else + self->lpVtbl->ResolveSubresource(self, pDstResource, DstSubresource, pSrcResource, SrcSubresource, Format); + #endif +} + +static inline HRESULT _sapp_dxgi_ResizeBuffers(IDXGISwapChain* self, UINT BufferCount, UINT Width, UINT Height, DXGI_FORMAT NewFormat, UINT SwapChainFlags) { + #if defined(__cplusplus) + return self->ResizeBuffers(BufferCount, Width, Height, NewFormat, SwapChainFlags); + #else + return self->lpVtbl->ResizeBuffers(self, BufferCount, Width, Height, NewFormat, SwapChainFlags); + #endif +} + +static inline HRESULT _sapp_dxgi_Present(IDXGISwapChain* self, UINT SyncInterval, UINT Flags) { + #if defined(__cplusplus) + return self->Present(SyncInterval, Flags); + #else + return self->lpVtbl->Present(self, SyncInterval, Flags); + #endif +} + +_SOKOL_PRIVATE void _sapp_d3d11_create_device_and_swapchain(void) { + DXGI_SWAP_CHAIN_DESC* sc_desc = &_sapp.d3d11.swap_chain_desc; + sc_desc->BufferDesc.Width = (UINT)_sapp.framebuffer_width; + sc_desc->BufferDesc.Height = (UINT)_sapp.framebuffer_height; + sc_desc->BufferDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; + sc_desc->BufferDesc.RefreshRate.Numerator = 60; + sc_desc->BufferDesc.RefreshRate.Denominator = 1; + sc_desc->OutputWindow = _sapp.win32.hwnd; + sc_desc->Windowed = true; + if (_sapp.win32.is_win10_or_greater) { + sc_desc->BufferCount = 2; + sc_desc->SwapEffect = (DXGI_SWAP_EFFECT) _SAPP_DXGI_SWAP_EFFECT_FLIP_DISCARD; + } + else { + sc_desc->BufferCount = 1; + sc_desc->SwapEffect = DXGI_SWAP_EFFECT_DISCARD; + } + sc_desc->SampleDesc.Count = 1; + sc_desc->SampleDesc.Quality = 0; + sc_desc->BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; + UINT create_flags = D3D11_CREATE_DEVICE_SINGLETHREADED | D3D11_CREATE_DEVICE_BGRA_SUPPORT; + #if defined(SOKOL_DEBUG) + create_flags |= D3D11_CREATE_DEVICE_DEBUG; + #endif + D3D_FEATURE_LEVEL feature_level; + HRESULT hr = D3D11CreateDeviceAndSwapChain( + NULL, /* pAdapter (use default) */ + D3D_DRIVER_TYPE_HARDWARE, /* DriverType */ + NULL, /* Software */ + create_flags, /* Flags */ + NULL, /* pFeatureLevels */ + 0, /* FeatureLevels */ + D3D11_SDK_VERSION, /* SDKVersion */ + sc_desc, /* pSwapChainDesc */ + &_sapp.d3d11.swap_chain, /* ppSwapChain */ + &_sapp.d3d11.device, /* ppDevice */ + &feature_level, /* pFeatureLevel */ + &_sapp.d3d11.device_context); /* ppImmediateContext */ + _SOKOL_UNUSED(hr); + SOKOL_ASSERT(SUCCEEDED(hr) && _sapp.d3d11.swap_chain && _sapp.d3d11.device && _sapp.d3d11.device_context); +} + +_SOKOL_PRIVATE void _sapp_d3d11_destroy_device_and_swapchain(void) { + _SAPP_SAFE_RELEASE(_sapp.d3d11.swap_chain); + _SAPP_SAFE_RELEASE(_sapp.d3d11.device_context); + _SAPP_SAFE_RELEASE(_sapp.d3d11.device); +} + +_SOKOL_PRIVATE void _sapp_d3d11_create_default_render_target(void) { + SOKOL_ASSERT(0 == _sapp.d3d11.rt); + SOKOL_ASSERT(0 == _sapp.d3d11.rtv); + SOKOL_ASSERT(0 == _sapp.d3d11.msaa_rt); + SOKOL_ASSERT(0 == _sapp.d3d11.msaa_rtv); + SOKOL_ASSERT(0 == _sapp.d3d11.ds); + SOKOL_ASSERT(0 == _sapp.d3d11.dsv); + + HRESULT hr; + + /* view for the swapchain-created framebuffer */ + #ifdef __cplusplus + hr = _sapp_dxgi_GetBuffer(_sapp.d3d11.swap_chain, 0, IID_ID3D11Texture2D, (void**)&_sapp.d3d11.rt); + #else + hr = _sapp_dxgi_GetBuffer(_sapp.d3d11.swap_chain, 0, &IID_ID3D11Texture2D, (void**)&_sapp.d3d11.rt); + #endif + SOKOL_ASSERT(SUCCEEDED(hr) && _sapp.d3d11.rt); + hr = _sapp_d3d11_CreateRenderTargetView(_sapp.d3d11.device, (ID3D11Resource*)_sapp.d3d11.rt, NULL, &_sapp.d3d11.rtv); + SOKOL_ASSERT(SUCCEEDED(hr) && _sapp.d3d11.rtv); + + /* common desc for MSAA and depth-stencil texture */ + D3D11_TEXTURE2D_DESC tex_desc; + memset(&tex_desc, 0, sizeof(tex_desc)); + tex_desc.Width = (UINT)_sapp.framebuffer_width; + tex_desc.Height = (UINT)_sapp.framebuffer_height; + tex_desc.MipLevels = 1; + tex_desc.ArraySize = 1; + tex_desc.Usage = D3D11_USAGE_DEFAULT; + tex_desc.BindFlags = D3D11_BIND_RENDER_TARGET; + tex_desc.SampleDesc.Count = (UINT) _sapp.sample_count; + tex_desc.SampleDesc.Quality = (UINT) (_sapp.sample_count > 1 ? D3D11_STANDARD_MULTISAMPLE_PATTERN : 0); + + /* create MSAA texture and view if antialiasing requested */ + if (_sapp.sample_count > 1) { + tex_desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; + hr = _sapp_d3d11_CreateTexture2D(_sapp.d3d11.device, &tex_desc, NULL, &_sapp.d3d11.msaa_rt); + SOKOL_ASSERT(SUCCEEDED(hr) && _sapp.d3d11.msaa_rt); + hr = _sapp_d3d11_CreateRenderTargetView(_sapp.d3d11.device, (ID3D11Resource*)_sapp.d3d11.msaa_rt, NULL, &_sapp.d3d11.msaa_rtv); + SOKOL_ASSERT(SUCCEEDED(hr) && _sapp.d3d11.msaa_rtv); + } + + /* texture and view for the depth-stencil-surface */ + tex_desc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT; + tex_desc.BindFlags = D3D11_BIND_DEPTH_STENCIL; + hr = _sapp_d3d11_CreateTexture2D(_sapp.d3d11.device, &tex_desc, NULL, &_sapp.d3d11.ds); + SOKOL_ASSERT(SUCCEEDED(hr) && _sapp.d3d11.ds); + hr = _sapp_d3d11_CreateDepthStencilView(_sapp.d3d11.device, (ID3D11Resource*)_sapp.d3d11.ds, NULL, &_sapp.d3d11.dsv); + SOKOL_ASSERT(SUCCEEDED(hr) && _sapp.d3d11.dsv); +} + +_SOKOL_PRIVATE void _sapp_d3d11_destroy_default_render_target(void) { + _SAPP_SAFE_RELEASE(_sapp.d3d11.rt); + _SAPP_SAFE_RELEASE(_sapp.d3d11.rtv); + _SAPP_SAFE_RELEASE(_sapp.d3d11.msaa_rt); + _SAPP_SAFE_RELEASE(_sapp.d3d11.msaa_rtv); + _SAPP_SAFE_RELEASE(_sapp.d3d11.ds); + _SAPP_SAFE_RELEASE(_sapp.d3d11.dsv); +} + +_SOKOL_PRIVATE void _sapp_d3d11_resize_default_render_target(void) { + if (_sapp.d3d11.swap_chain) { + _sapp_d3d11_destroy_default_render_target(); + _sapp_dxgi_ResizeBuffers(_sapp.d3d11.swap_chain, _sapp.d3d11.swap_chain_desc.BufferCount, (UINT)_sapp.framebuffer_width, (UINT)_sapp.framebuffer_height, DXGI_FORMAT_B8G8R8A8_UNORM, 0); + _sapp_d3d11_create_default_render_target(); + } +} + +_SOKOL_PRIVATE void _sapp_d3d11_present(void) { + /* do MSAA resolve if needed */ + if (_sapp.sample_count > 1) { + SOKOL_ASSERT(_sapp.d3d11.rt); + SOKOL_ASSERT(_sapp.d3d11.msaa_rt); + _sapp_d3d11_ResolveSubresource(_sapp.d3d11.device_context, (ID3D11Resource*)_sapp.d3d11.rt, 0, (ID3D11Resource*)_sapp.d3d11.msaa_rt, 0, DXGI_FORMAT_B8G8R8A8_UNORM); + } + _sapp_dxgi_Present(_sapp.d3d11.swap_chain, (UINT)_sapp.swap_interval, 0); +} + +#endif /* SOKOL_D3D11 */ + +#if defined(SOKOL_GLCORE33) +_SOKOL_PRIVATE void _sapp_wgl_init(void) { + _sapp.wgl.opengl32 = LoadLibraryA("opengl32.dll"); + if (!_sapp.wgl.opengl32) { + _sapp_fail("Failed to load opengl32.dll\n"); + } + SOKOL_ASSERT(_sapp.wgl.opengl32); + _sapp.wgl.CreateContext = (PFN_wglCreateContext)(void*) GetProcAddress(_sapp.wgl.opengl32, "wglCreateContext"); + SOKOL_ASSERT(_sapp.wgl.CreateContext); + _sapp.wgl.DeleteContext = (PFN_wglDeleteContext)(void*) GetProcAddress(_sapp.wgl.opengl32, "wglDeleteContext"); + SOKOL_ASSERT(_sapp.wgl.DeleteContext); + _sapp.wgl.GetProcAddress = (PFN_wglGetProcAddress)(void*) GetProcAddress(_sapp.wgl.opengl32, "wglGetProcAddress"); + SOKOL_ASSERT(_sapp.wgl.GetProcAddress); + _sapp.wgl.GetCurrentDC = (PFN_wglGetCurrentDC)(void*) GetProcAddress(_sapp.wgl.opengl32, "wglGetCurrentDC"); + SOKOL_ASSERT(_sapp.wgl.GetCurrentDC); + _sapp.wgl.MakeCurrent = (PFN_wglMakeCurrent)(void*) GetProcAddress(_sapp.wgl.opengl32, "wglMakeCurrent"); + SOKOL_ASSERT(_sapp.wgl.MakeCurrent); + + _sapp.wgl.msg_hwnd = CreateWindowExW(WS_EX_OVERLAPPEDWINDOW, + L"SOKOLAPP", + L"sokol-app message window", + WS_CLIPSIBLINGS|WS_CLIPCHILDREN, + 0, 0, 1, 1, + NULL, NULL, + GetModuleHandleW(NULL), + NULL); + if (!_sapp.wgl.msg_hwnd) { + _sapp_fail("Win32: failed to create helper window!\n"); + } + SOKOL_ASSERT(_sapp.wgl.msg_hwnd); + ShowWindow(_sapp.wgl.msg_hwnd, SW_HIDE); + MSG msg; + while (PeekMessageW(&msg, _sapp.wgl.msg_hwnd, 0, 0, PM_REMOVE)) { + TranslateMessage(&msg); + DispatchMessageW(&msg); + } + _sapp.wgl.msg_dc = GetDC(_sapp.wgl.msg_hwnd); + if (!_sapp.wgl.msg_dc) { + _sapp_fail("Win32: failed to obtain helper window DC!\n"); + } +} + +_SOKOL_PRIVATE void _sapp_wgl_shutdown(void) { + SOKOL_ASSERT(_sapp.wgl.opengl32 && _sapp.wgl.msg_hwnd); + DestroyWindow(_sapp.wgl.msg_hwnd); _sapp.wgl.msg_hwnd = 0; + FreeLibrary(_sapp.wgl.opengl32); _sapp.wgl.opengl32 = 0; +} + +_SOKOL_PRIVATE bool _sapp_wgl_has_ext(const char* ext, const char* extensions) { + SOKOL_ASSERT(ext && extensions); + const char* start = extensions; + while (true) { + const char* where = strstr(start, ext); + if (!where) { + return false; + } + const char* terminator = where + strlen(ext); + if ((where == start) || (*(where - 1) == ' ')) { + if (*terminator == ' ' || *terminator == '\0') { + break; + } + } + start = terminator; + } + return true; +} + +_SOKOL_PRIVATE bool _sapp_wgl_ext_supported(const char* ext) { + SOKOL_ASSERT(ext); + if (_sapp.wgl.GetExtensionsStringEXT) { + const char* extensions = _sapp.wgl.GetExtensionsStringEXT(); + if (extensions) { + if (_sapp_wgl_has_ext(ext, extensions)) { + return true; + } + } + } + if (_sapp.wgl.GetExtensionsStringARB) { + const char* extensions = _sapp.wgl.GetExtensionsStringARB(_sapp.wgl.GetCurrentDC()); + if (extensions) { + if (_sapp_wgl_has_ext(ext, extensions)) { + return true; + } + } + } + return false; +} + +_SOKOL_PRIVATE void _sapp_wgl_load_extensions(void) { + SOKOL_ASSERT(_sapp.wgl.msg_dc); + PIXELFORMATDESCRIPTOR pfd; + memset(&pfd, 0, sizeof(pfd)); + pfd.nSize = sizeof(pfd); + pfd.nVersion = 1; + pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER; + pfd.iPixelType = PFD_TYPE_RGBA; + pfd.cColorBits = 24; + if (!SetPixelFormat(_sapp.wgl.msg_dc, ChoosePixelFormat(_sapp.wgl.msg_dc, &pfd), &pfd)) { + _sapp_fail("WGL: failed to set pixel format for dummy context\n"); + } + HGLRC rc = _sapp.wgl.CreateContext(_sapp.wgl.msg_dc); + if (!rc) { + _sapp_fail("WGL: Failed to create dummy context\n"); + } + if (!_sapp.wgl.MakeCurrent(_sapp.wgl.msg_dc, rc)) { + _sapp_fail("WGL: Failed to make context current\n"); + } + _sapp.wgl.GetExtensionsStringEXT = (PFNWGLGETEXTENSIONSSTRINGEXTPROC)(void*) _sapp.wgl.GetProcAddress("wglGetExtensionsStringEXT"); + _sapp.wgl.GetExtensionsStringARB = (PFNWGLGETEXTENSIONSSTRINGARBPROC)(void*) _sapp.wgl.GetProcAddress("wglGetExtensionsStringARB"); + _sapp.wgl.CreateContextAttribsARB = (PFNWGLCREATECONTEXTATTRIBSARBPROC)(void*) _sapp.wgl.GetProcAddress("wglCreateContextAttribsARB"); + _sapp.wgl.SwapIntervalEXT = (PFNWGLSWAPINTERVALEXTPROC)(void*) _sapp.wgl.GetProcAddress("wglSwapIntervalEXT"); + _sapp.wgl.GetPixelFormatAttribivARB = (PFNWGLGETPIXELFORMATATTRIBIVARBPROC)(void*) _sapp.wgl.GetProcAddress("wglGetPixelFormatAttribivARB"); + _sapp.wgl.arb_multisample = _sapp_wgl_ext_supported("WGL_ARB_multisample"); + _sapp.wgl.arb_create_context = _sapp_wgl_ext_supported("WGL_ARB_create_context"); + _sapp.wgl.arb_create_context_profile = _sapp_wgl_ext_supported("WGL_ARB_create_context_profile"); + _sapp.wgl.ext_swap_control = _sapp_wgl_ext_supported("WGL_EXT_swap_control"); + _sapp.wgl.arb_pixel_format = _sapp_wgl_ext_supported("WGL_ARB_pixel_format"); + _sapp.wgl.MakeCurrent(_sapp.wgl.msg_dc, 0); + _sapp.wgl.DeleteContext(rc); +} + +_SOKOL_PRIVATE int _sapp_wgl_attrib(int pixel_format, int attrib) { + SOKOL_ASSERT(_sapp.wgl.arb_pixel_format); + int value = 0; + if (!_sapp.wgl.GetPixelFormatAttribivARB(_sapp.win32.dc, pixel_format, 0, 1, &attrib, &value)) { + _sapp_fail("WGL: Failed to retrieve pixel format attribute\n"); + } + return value; +} + +_SOKOL_PRIVATE int _sapp_wgl_find_pixel_format(void) { + SOKOL_ASSERT(_sapp.win32.dc); + SOKOL_ASSERT(_sapp.wgl.arb_pixel_format); + const _sapp_gl_fbconfig* closest; + + int native_count = _sapp_wgl_attrib(1, WGL_NUMBER_PIXEL_FORMATS_ARB); + _sapp_gl_fbconfig* usable_configs = (_sapp_gl_fbconfig*) SOKOL_CALLOC((size_t)native_count, sizeof(_sapp_gl_fbconfig)); + SOKOL_ASSERT(usable_configs); + int usable_count = 0; + for (int i = 0; i < native_count; i++) { + const int n = i + 1; + _sapp_gl_fbconfig* u = usable_configs + usable_count; + _sapp_gl_init_fbconfig(u); + if (!_sapp_wgl_attrib(n, WGL_SUPPORT_OPENGL_ARB) || !_sapp_wgl_attrib(n, WGL_DRAW_TO_WINDOW_ARB)) { + continue; + } + if (_sapp_wgl_attrib(n, WGL_PIXEL_TYPE_ARB) != WGL_TYPE_RGBA_ARB) { + continue; + } + if (_sapp_wgl_attrib(n, WGL_ACCELERATION_ARB) == WGL_NO_ACCELERATION_ARB) { + continue; + } + u->red_bits = _sapp_wgl_attrib(n, WGL_RED_BITS_ARB); + u->green_bits = _sapp_wgl_attrib(n, WGL_GREEN_BITS_ARB); + u->blue_bits = _sapp_wgl_attrib(n, WGL_BLUE_BITS_ARB); + u->alpha_bits = _sapp_wgl_attrib(n, WGL_ALPHA_BITS_ARB); + u->depth_bits = _sapp_wgl_attrib(n, WGL_DEPTH_BITS_ARB); + u->stencil_bits = _sapp_wgl_attrib(n, WGL_STENCIL_BITS_ARB); + if (_sapp_wgl_attrib(n, WGL_DOUBLE_BUFFER_ARB)) { + u->doublebuffer = true; + } + if (_sapp.wgl.arb_multisample) { + u->samples = _sapp_wgl_attrib(n, WGL_SAMPLES_ARB); + } + u->handle = (uintptr_t)n; + usable_count++; + } + SOKOL_ASSERT(usable_count > 0); + _sapp_gl_fbconfig desired; + _sapp_gl_init_fbconfig(&desired); + desired.red_bits = 8; + desired.green_bits = 8; + desired.blue_bits = 8; + desired.alpha_bits = 8; + desired.depth_bits = 24; + desired.stencil_bits = 8; + desired.doublebuffer = true; + desired.samples = _sapp.sample_count > 1 ? _sapp.sample_count : 0; + closest = _sapp_gl_choose_fbconfig(&desired, usable_configs, usable_count); + int pixel_format = 0; + if (closest) { + pixel_format = (int) closest->handle; + } + SOKOL_FREE(usable_configs); + return pixel_format; +} + +_SOKOL_PRIVATE void _sapp_wgl_create_context(void) { + int pixel_format = _sapp_wgl_find_pixel_format(); + if (0 == pixel_format) { + _sapp_fail("WGL: Didn't find matching pixel format.\n"); + } + PIXELFORMATDESCRIPTOR pfd; + if (!DescribePixelFormat(_sapp.win32.dc, pixel_format, sizeof(pfd), &pfd)) { + _sapp_fail("WGL: Failed to retrieve PFD for selected pixel format!\n"); + } + if (!SetPixelFormat(_sapp.win32.dc, pixel_format, &pfd)) { + _sapp_fail("WGL: Failed to set selected pixel format!\n"); + } + if (!_sapp.wgl.arb_create_context) { + _sapp_fail("WGL: ARB_create_context required!\n"); + } + if (!_sapp.wgl.arb_create_context_profile) { + _sapp_fail("WGL: ARB_create_context_profile required!\n"); + } + const int attrs[] = { + WGL_CONTEXT_MAJOR_VERSION_ARB, 3, + WGL_CONTEXT_MINOR_VERSION_ARB, 3, + WGL_CONTEXT_FLAGS_ARB, WGL_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB, + WGL_CONTEXT_PROFILE_MASK_ARB, WGL_CONTEXT_CORE_PROFILE_BIT_ARB, + 0, 0 + }; + _sapp.wgl.gl_ctx = _sapp.wgl.CreateContextAttribsARB(_sapp.win32.dc, 0, attrs); + if (!_sapp.wgl.gl_ctx) { + const DWORD err = GetLastError(); + if (err == (0xc0070000 | ERROR_INVALID_VERSION_ARB)) { + _sapp_fail("WGL: Driver does not support OpenGL version 3.3\n"); + } + else if (err == (0xc0070000 | ERROR_INVALID_PROFILE_ARB)) { + _sapp_fail("WGL: Driver does not support the requested OpenGL profile"); + } + else if (err == (0xc0070000 | ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB)) { + _sapp_fail("WGL: The share context is not compatible with the requested context"); + } + else { + _sapp_fail("WGL: Failed to create OpenGL context"); + } + } + _sapp.wgl.MakeCurrent(_sapp.win32.dc, _sapp.wgl.gl_ctx); + if (_sapp.wgl.ext_swap_control) { + /* FIXME: DwmIsCompositionEnabled() (see GLFW) */ + _sapp.wgl.SwapIntervalEXT(_sapp.swap_interval); + } +} + +_SOKOL_PRIVATE void _sapp_wgl_destroy_context(void) { + SOKOL_ASSERT(_sapp.wgl.gl_ctx); + _sapp.wgl.DeleteContext(_sapp.wgl.gl_ctx); + _sapp.wgl.gl_ctx = 0; +} + +_SOKOL_PRIVATE void _sapp_wgl_swap_buffers(void) { + SOKOL_ASSERT(_sapp.win32.dc); + /* FIXME: DwmIsCompositionEnabled? (see GLFW) */ + SwapBuffers(_sapp.win32.dc); +} +#endif /* SOKOL_GLCORE33 */ + +_SOKOL_PRIVATE bool _sapp_win32_wide_to_utf8(const wchar_t* src, char* dst, int dst_num_bytes) { + SOKOL_ASSERT(src && dst && (dst_num_bytes > 1)); + memset(dst, 0, (size_t)dst_num_bytes); + const int bytes_needed = WideCharToMultiByte(CP_UTF8, 0, src, -1, NULL, 0, NULL, NULL); + if (bytes_needed <= dst_num_bytes) { + WideCharToMultiByte(CP_UTF8, 0, src, -1, dst, dst_num_bytes, NULL, NULL); + return true; + } + else { + return false; + } +} + +_SOKOL_PRIVATE void _sapp_win32_toggle_fullscreen(void) { + HMONITOR monitor = MonitorFromWindow(_sapp.win32.hwnd, MONITOR_DEFAULTTONEAREST); + MONITORINFO minfo; + memset(&minfo, 0, sizeof(minfo)); + minfo.cbSize = sizeof(MONITORINFO); + GetMonitorInfo(monitor, &minfo); + const RECT mr = minfo.rcMonitor; + const int monitor_w = mr.right - mr.left; + const int monitor_h = mr.bottom - mr.top; + + const DWORD win_ex_style = WS_EX_APPWINDOW | WS_EX_WINDOWEDGE; + DWORD win_style; + RECT rect = { 0, 0, 0, 0 }; + + _sapp.fullscreen = !_sapp.fullscreen; + if (!_sapp.fullscreen) { + win_style = WS_CLIPSIBLINGS | WS_CLIPCHILDREN | WS_CAPTION | WS_SYSMENU | WS_MINIMIZEBOX | WS_MAXIMIZEBOX | WS_SIZEBOX; + rect.right = (int) ((float)_sapp.desc.width * _sapp.win32.dpi.window_scale); + rect.bottom = (int) ((float)_sapp.desc.height * _sapp.win32.dpi.window_scale); + } + else { + win_style = WS_POPUP | WS_SYSMENU | WS_VISIBLE; + rect.right = monitor_w; + rect.bottom = monitor_h; + } + AdjustWindowRectEx(&rect, win_style, FALSE, win_ex_style); + int win_width = rect.right - rect.left; + int win_height = rect.bottom - rect.top; + if (!_sapp.fullscreen) { + rect.left = (monitor_w - win_width) / 2; + rect.top = (monitor_h - win_height) / 2; + } + + SetWindowLongPtr(_sapp.win32.hwnd, GWL_STYLE, win_style); + SetWindowPos(_sapp.win32.hwnd, HWND_TOP, mr.left + rect.left, mr.top + rect.top, win_width, win_height, SWP_SHOWWINDOW | SWP_FRAMECHANGED); +} + +_SOKOL_PRIVATE void _sapp_win32_show_mouse(bool visible) { + /* NOTE: this function is only called when the mouse visibility actually changes */ + ShowCursor((BOOL)visible); +} + +_SOKOL_PRIVATE void _sapp_win32_capture_mouse(uint8_t btn_mask) { + if (0 == _sapp.win32.mouse_capture_mask) { + SetCapture(_sapp.win32.hwnd); + } + _sapp.win32.mouse_capture_mask |= btn_mask; +} + +_SOKOL_PRIVATE void _sapp_win32_release_mouse(uint8_t btn_mask) { + if (0 != _sapp.win32.mouse_capture_mask) { + _sapp.win32.mouse_capture_mask &= ~btn_mask; + if (0 == _sapp.win32.mouse_capture_mask) { + ReleaseCapture(); + } + } +} + +_SOKOL_PRIVATE void _sapp_win32_lock_mouse(bool lock) { + if (lock == _sapp.mouse.locked) { + return; + } + _sapp.mouse.dx = 0.0f; + _sapp.mouse.dy = 0.0f; + _sapp.mouse.locked = lock; + _sapp_win32_release_mouse(0xFF); + if (_sapp.mouse.locked) { + /* store the current mouse position, so it can be restored when unlocked */ + POINT pos; + BOOL res = GetCursorPos(&pos); + SOKOL_ASSERT(res); _SOKOL_UNUSED(res); + _sapp.win32.mouse_locked_x = pos.x; + _sapp.win32.mouse_locked_y = pos.y; + + /* while the mouse is locked, make the mouse cursor invisible and + confine the mouse movement to a small rectangle inside our window + (so that we dont miss any mouse up events) + */ + RECT client_rect = { + _sapp.win32.mouse_locked_x, + _sapp.win32.mouse_locked_y, + _sapp.win32.mouse_locked_x, + _sapp.win32.mouse_locked_y + }; + ClipCursor(&client_rect); + + /* make the mouse cursor invisible, this will stack with sapp_show_mouse() */ + ShowCursor(FALSE); + + /* enable raw input for mouse, starts sending WM_INPUT messages to WinProc (see GLFW) */ + const RAWINPUTDEVICE rid = { + 0x01, // usUsagePage: HID_USAGE_PAGE_GENERIC + 0x02, // usUsage: HID_USAGE_GENERIC_MOUSE + 0, // dwFlags + _sapp.win32.hwnd // hwndTarget + }; + if (!RegisterRawInputDevices(&rid, 1, sizeof(rid))) { + SOKOL_LOG("RegisterRawInputDevices() failed (on mouse lock).\n"); + } + /* in case the raw mouse device only supports absolute position reporting, + we need to skip the dx/dy compution for the first WM_INPUT event + */ + _sapp.win32.raw_input_mousepos_valid = false; + } + else { + /* disable raw input for mouse */ + const RAWINPUTDEVICE rid = { 0x01, 0x02, RIDEV_REMOVE, NULL }; + if (!RegisterRawInputDevices(&rid, 1, sizeof(rid))) { + SOKOL_LOG("RegisterRawInputDevices() failed (on mouse unlock).\n"); + } + + /* let the mouse roam freely again */ + ClipCursor(NULL); + ShowCursor(TRUE); + + /* restore the 'pre-locked' mouse position */ + BOOL res = SetCursorPos(_sapp.win32.mouse_locked_x, _sapp.win32.mouse_locked_y); + SOKOL_ASSERT(res); _SOKOL_UNUSED(res); + } +} + +/* updates current window and framebuffer size from the window's client rect, returns true if size has changed */ +_SOKOL_PRIVATE bool _sapp_win32_update_dimensions(void) { + RECT rect; + if (GetClientRect(_sapp.win32.hwnd, &rect)) { + _sapp.window_width = (int)((float)(rect.right - rect.left) / _sapp.win32.dpi.window_scale); + _sapp.window_height = (int)((float)(rect.bottom - rect.top) / _sapp.win32.dpi.window_scale); + const int fb_width = (int)((float)_sapp.window_width * _sapp.win32.dpi.content_scale); + const int fb_height = (int)((float)_sapp.window_height * _sapp.win32.dpi.content_scale); + if ((fb_width != _sapp.framebuffer_width) || (fb_height != _sapp.framebuffer_height)) { + _sapp.framebuffer_width = fb_width; + _sapp.framebuffer_height = fb_height; + /* prevent a framebuffer size of 0 when window is minimized */ + if (_sapp.framebuffer_width == 0) { + _sapp.framebuffer_width = 1; + } + if (_sapp.framebuffer_height == 0) { + _sapp.framebuffer_height = 1; + } + return true; + } + } + else { + _sapp.window_width = _sapp.window_height = 1; + _sapp.framebuffer_width = _sapp.framebuffer_height = 1; + } + return false; +} + +_SOKOL_PRIVATE uint32_t _sapp_win32_mods(void) { + uint32_t mods = 0; + if (GetKeyState(VK_SHIFT) & (1<<15)) { + mods |= SAPP_MODIFIER_SHIFT; + } + if (GetKeyState(VK_CONTROL) & (1<<15)) { + mods |= SAPP_MODIFIER_CTRL; + } + if (GetKeyState(VK_MENU) & (1<<15)) { + mods |= SAPP_MODIFIER_ALT; + } + if ((GetKeyState(VK_LWIN) | GetKeyState(VK_RWIN)) & (1<<15)) { + mods |= SAPP_MODIFIER_SUPER; + } + return mods; +} + +_SOKOL_PRIVATE void _sapp_win32_mouse_event(sapp_event_type type, sapp_mousebutton btn) { + if (_sapp_events_enabled()) { + _sapp_init_event(type); + _sapp.event.modifiers = _sapp_win32_mods(); + _sapp.event.mouse_button = btn; + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE void _sapp_win32_scroll_event(float x, float y) { + if (_sapp_events_enabled()) { + _sapp_init_event(SAPP_EVENTTYPE_MOUSE_SCROLL); + _sapp.event.modifiers = _sapp_win32_mods(); + _sapp.event.scroll_x = -x / 30.0f; + _sapp.event.scroll_y = y / 30.0f; + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE void _sapp_win32_key_event(sapp_event_type type, int vk, bool repeat) { + if (_sapp_events_enabled() && (vk < SAPP_MAX_KEYCODES)) { + _sapp_init_event(type); + _sapp.event.modifiers = _sapp_win32_mods(); + _sapp.event.key_code = _sapp.keycodes[vk]; + _sapp.event.key_repeat = repeat; + _sapp_call_event(&_sapp.event); + /* check if a CLIPBOARD_PASTED event must be sent too */ + if (_sapp.clipboard.enabled && + (type == SAPP_EVENTTYPE_KEY_DOWN) && + (_sapp.event.modifiers == SAPP_MODIFIER_CTRL) && + (_sapp.event.key_code == SAPP_KEYCODE_V)) + { + _sapp_init_event(SAPP_EVENTTYPE_CLIPBOARD_PASTED); + _sapp_call_event(&_sapp.event); + } + } +} + +_SOKOL_PRIVATE void _sapp_win32_char_event(uint32_t c, bool repeat) { + if (_sapp_events_enabled() && (c >= 32)) { + _sapp_init_event(SAPP_EVENTTYPE_CHAR); + _sapp.event.modifiers = _sapp_win32_mods(); + _sapp.event.char_code = c; + _sapp.event.key_repeat = repeat; + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE void _sapp_win32_files_dropped(HDROP hdrop) { + if (!_sapp.drop.enabled) { + return; + } + _sapp_clear_drop_buffer(); + bool drop_failed = false; + const int count = (int) DragQueryFileW(hdrop, 0xffffffff, NULL, 0); + _sapp.drop.num_files = (count > _sapp.drop.max_files) ? _sapp.drop.max_files : count; + for (UINT i = 0; i < (UINT)_sapp.drop.num_files; i++) { + const UINT num_chars = DragQueryFileW(hdrop, i, NULL, 0) + 1; + WCHAR* buffer = (WCHAR*) SOKOL_CALLOC(num_chars, sizeof(WCHAR)); + DragQueryFileW(hdrop, i, buffer, num_chars); + if (!_sapp_win32_wide_to_utf8(buffer, _sapp_dropped_file_path_ptr((int)i), _sapp.drop.max_path_length)) { + SOKOL_LOG("sokol_app.h: dropped file path too long (sapp_desc.max_dropped_file_path_length)\n"); + drop_failed = true; + } + SOKOL_FREE(buffer); + } + DragFinish(hdrop); + if (!drop_failed) { + if (_sapp_events_enabled()) { + _sapp_init_event(SAPP_EVENTTYPE_FILES_DROPPED); + _sapp_call_event(&_sapp.event); + } + } + else { + _sapp_clear_drop_buffer(); + _sapp.drop.num_files = 0; + } +} + +_SOKOL_PRIVATE LRESULT CALLBACK _sapp_win32_wndproc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam) { + if (!_sapp.win32.in_create_window) { + switch (uMsg) { + case WM_CLOSE: + /* only give user a chance to intervene when sapp_quit() wasn't already called */ + if (!_sapp.quit_ordered) { + /* if window should be closed and event handling is enabled, give user code + a change to intervene via sapp_cancel_quit() + */ + _sapp.quit_requested = true; + _sapp_win32_uwp_app_event(SAPP_EVENTTYPE_QUIT_REQUESTED); + /* if user code hasn't intervened, quit the app */ + if (_sapp.quit_requested) { + _sapp.quit_ordered = true; + } + } + if (_sapp.quit_ordered) { + PostQuitMessage(0); + } + return 0; + case WM_SYSCOMMAND: + switch (wParam & 0xFFF0) { + case SC_SCREENSAVE: + case SC_MONITORPOWER: + if (_sapp.fullscreen) { + /* disable screen saver and blanking in fullscreen mode */ + return 0; + } + break; + case SC_KEYMENU: + /* user trying to access menu via ALT */ + return 0; + } + break; + case WM_ERASEBKGND: + return 1; + case WM_SIZE: + { + const bool iconified = wParam == SIZE_MINIMIZED; + if (iconified != _sapp.win32.iconified) { + _sapp.win32.iconified = iconified; + if (iconified) { + _sapp_win32_uwp_app_event(SAPP_EVENTTYPE_ICONIFIED); + } + else { + _sapp_win32_uwp_app_event(SAPP_EVENTTYPE_RESTORED); + } + } + } + break; + case WM_SETCURSOR: + if (_sapp.desc.user_cursor) { + if (LOWORD(lParam) == HTCLIENT) { + _sapp_win32_uwp_app_event(SAPP_EVENTTYPE_UPDATE_CURSOR); + return 1; + } + } + break; + case WM_LBUTTONDOWN: + _sapp_win32_mouse_event(SAPP_EVENTTYPE_MOUSE_DOWN, SAPP_MOUSEBUTTON_LEFT); + _sapp_win32_capture_mouse(1<data.mouse.usFlags & MOUSE_MOVE_ABSOLUTE) { + /* mouse only reports absolute position + NOTE: THIS IS UNTESTED, it's unclear from reading the + Win32 RawInput docs under which circumstances absolute + positions are sent. + */ + if (_sapp.win32.raw_input_mousepos_valid) { + LONG new_x = raw_mouse_data->data.mouse.lLastX; + LONG new_y = raw_mouse_data->data.mouse.lLastY; + _sapp.mouse.dx = (float) (new_x - _sapp.win32.raw_input_mousepos_x); + _sapp.mouse.dy = (float) (new_y - _sapp.win32.raw_input_mousepos_y); + _sapp.win32.raw_input_mousepos_x = new_x; + _sapp.win32.raw_input_mousepos_y = new_y; + _sapp.win32.raw_input_mousepos_valid = true; + } + } + else { + /* mouse reports movement delta (this seems to be the common case) */ + _sapp.mouse.dx = (float) raw_mouse_data->data.mouse.lLastX; + _sapp.mouse.dy = (float) raw_mouse_data->data.mouse.lLastY; + } + _sapp_win32_mouse_event(SAPP_EVENTTYPE_MOUSE_MOVE, SAPP_MOUSEBUTTON_INVALID); + } + break; + + case WM_MOUSELEAVE: + if (!_sapp.mouse.locked) { + _sapp.win32.mouse_tracked = false; + _sapp_win32_mouse_event(SAPP_EVENTTYPE_MOUSE_LEAVE, SAPP_MOUSEBUTTON_INVALID); + } + break; + case WM_MOUSEWHEEL: + _sapp_win32_scroll_event(0.0f, (float)((SHORT)HIWORD(wParam))); + break; + case WM_MOUSEHWHEEL: + _sapp_win32_scroll_event((float)((SHORT)HIWORD(wParam)), 0.0f); + break; + case WM_CHAR: + _sapp_win32_char_event((uint32_t)wParam, !!(lParam&0x40000000)); + break; + case WM_KEYDOWN: + case WM_SYSKEYDOWN: + _sapp_win32_key_event(SAPP_EVENTTYPE_KEY_DOWN, (int)(HIWORD(lParam)&0x1FF), !!(lParam&0x40000000)); + break; + case WM_KEYUP: + case WM_SYSKEYUP: + _sapp_win32_key_event(SAPP_EVENTTYPE_KEY_UP, (int)(HIWORD(lParam)&0x1FF), false); + break; + case WM_ENTERSIZEMOVE: + SetTimer(_sapp.win32.hwnd, 1, USER_TIMER_MINIMUM, NULL); + break; + case WM_EXITSIZEMOVE: + KillTimer(_sapp.win32.hwnd, 1); + break; + case WM_TIMER: + _sapp_frame(); + #if defined(SOKOL_D3D11) + _sapp_d3d11_present(); + #endif + #if defined(SOKOL_GLCORE33) + _sapp_wgl_swap_buffers(); + #endif + /* NOTE: resizing the swap-chain during resize leads to a substantial + memory spike (hundreds of megabytes for a few seconds). + + if (_sapp_win32_update_dimensions()) { + #if defined(SOKOL_D3D11) + _sapp_d3d11_resize_default_render_target(); + #endif + _sapp_win32_uwp_app_event(SAPP_EVENTTYPE_RESIZED); + } + */ + break; + case WM_DROPFILES: + _sapp_win32_files_dropped((HDROP)wParam); + break; + default: + break; + } + } + return DefWindowProcW(hWnd, uMsg, wParam, lParam); +} + +_SOKOL_PRIVATE void _sapp_win32_create_window(void) { + WNDCLASSW wndclassw; + memset(&wndclassw, 0, sizeof(wndclassw)); + wndclassw.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC; + wndclassw.lpfnWndProc = (WNDPROC) _sapp_win32_wndproc; + wndclassw.hInstance = GetModuleHandleW(NULL); + wndclassw.hCursor = LoadCursor(NULL, IDC_ARROW); + wndclassw.hIcon = LoadIcon(NULL, IDI_WINLOGO); + wndclassw.lpszClassName = L"SOKOLAPP"; + RegisterClassW(&wndclassw); + + DWORD win_style; + const DWORD win_ex_style = WS_EX_APPWINDOW | WS_EX_WINDOWEDGE; + RECT rect = { 0, 0, 0, 0 }; + if (_sapp.fullscreen) { + win_style = WS_POPUP | WS_SYSMENU | WS_VISIBLE; + rect.right = GetSystemMetrics(SM_CXSCREEN); + rect.bottom = GetSystemMetrics(SM_CYSCREEN); + } + else { + win_style = WS_CLIPSIBLINGS | WS_CLIPCHILDREN | WS_CAPTION | WS_SYSMENU | WS_MINIMIZEBOX | WS_MAXIMIZEBOX | WS_SIZEBOX; + rect.right = (int) ((float)_sapp.window_width * _sapp.win32.dpi.window_scale); + rect.bottom = (int) ((float)_sapp.window_height * _sapp.win32.dpi.window_scale); + } + AdjustWindowRectEx(&rect, win_style, FALSE, win_ex_style); + const int win_width = rect.right - rect.left; + const int win_height = rect.bottom - rect.top; + _sapp.win32.in_create_window = true; + _sapp.win32.hwnd = CreateWindowExW( + win_ex_style, /* dwExStyle */ + L"SOKOLAPP", /* lpClassName */ + _sapp.window_title_wide, /* lpWindowName */ + win_style, /* dwStyle */ + CW_USEDEFAULT, /* X */ + CW_USEDEFAULT, /* Y */ + win_width, /* nWidth */ + win_height, /* nHeight */ + NULL, /* hWndParent */ + NULL, /* hMenu */ + GetModuleHandle(NULL), /* hInstance */ + NULL); /* lParam */ + ShowWindow(_sapp.win32.hwnd, SW_SHOW); + _sapp.win32.in_create_window = false; + _sapp.win32.dc = GetDC(_sapp.win32.hwnd); + SOKOL_ASSERT(_sapp.win32.dc); + _sapp_win32_update_dimensions(); + + DragAcceptFiles(_sapp.win32.hwnd, 1); +} + +_SOKOL_PRIVATE void _sapp_win32_destroy_window(void) { + DestroyWindow(_sapp.win32.hwnd); _sapp.win32.hwnd = 0; + UnregisterClassW(L"SOKOLAPP", GetModuleHandleW(NULL)); +} + +_SOKOL_PRIVATE void _sapp_win32_init_console(void) { + if (_sapp.desc.win32_console_create || _sapp.desc.win32_console_attach) { + BOOL con_valid = FALSE; + if (_sapp.desc.win32_console_create) { + con_valid = AllocConsole(); + } + else if (_sapp.desc.win32_console_attach) { + con_valid = AttachConsole(ATTACH_PARENT_PROCESS); + } + if (con_valid) { + FILE* res_fp = 0; + errno_t err; + err = freopen_s(&res_fp, "CON", "w", stdout); + err = freopen_s(&res_fp, "CON", "w", stderr); + (void)err; + } + } + if (_sapp.desc.win32_console_utf8) { + _sapp.win32.orig_codepage = GetConsoleOutputCP(); + SetConsoleOutputCP(CP_UTF8); + } +} + +_SOKOL_PRIVATE void _sapp_win32_restore_console(void) { + if (_sapp.desc.win32_console_utf8) { + SetConsoleOutputCP(_sapp.win32.orig_codepage); + } +} + +_SOKOL_PRIVATE void _sapp_win32_init_dpi(void) { + + typedef BOOL(WINAPI * SETPROCESSDPIAWARE_T)(void); + typedef HRESULT(WINAPI * SETPROCESSDPIAWARENESS_T)(PROCESS_DPI_AWARENESS); + typedef HRESULT(WINAPI * GETDPIFORMONITOR_T)(HMONITOR, MONITOR_DPI_TYPE, UINT*, UINT*); + + SETPROCESSDPIAWARE_T fn_setprocessdpiaware = 0; + SETPROCESSDPIAWARENESS_T fn_setprocessdpiawareness = 0; + GETDPIFORMONITOR_T fn_getdpiformonitor = 0; + HINSTANCE user32 = LoadLibraryA("user32.dll"); + if (user32) { + fn_setprocessdpiaware = (SETPROCESSDPIAWARE_T)(void*) GetProcAddress(user32, "SetProcessDPIAware"); + } + HINSTANCE shcore = LoadLibraryA("shcore.dll"); + if (shcore) { + fn_setprocessdpiawareness = (SETPROCESSDPIAWARENESS_T)(void*) GetProcAddress(shcore, "SetProcessDpiAwareness"); + fn_getdpiformonitor = (GETDPIFORMONITOR_T)(void*) GetProcAddress(shcore, "GetDpiForMonitor"); + } + if (fn_setprocessdpiawareness) { + /* if the app didn't request HighDPI rendering, let Windows do the upscaling */ + PROCESS_DPI_AWARENESS process_dpi_awareness = PROCESS_SYSTEM_DPI_AWARE; + _sapp.win32.dpi.aware = true; + if (!_sapp.desc.high_dpi) { + process_dpi_awareness = PROCESS_DPI_UNAWARE; + _sapp.win32.dpi.aware = false; + } + fn_setprocessdpiawareness(process_dpi_awareness); + } + else if (fn_setprocessdpiaware) { + fn_setprocessdpiaware(); + _sapp.win32.dpi.aware = true; + } + /* get dpi scale factor for main monitor */ + if (fn_getdpiformonitor && _sapp.win32.dpi.aware) { + POINT pt = { 1, 1 }; + HMONITOR hm = MonitorFromPoint(pt, MONITOR_DEFAULTTONEAREST); + UINT dpix, dpiy; + HRESULT hr = fn_getdpiformonitor(hm, MDT_EFFECTIVE_DPI, &dpix, &dpiy); + _SOKOL_UNUSED(hr); + SOKOL_ASSERT(SUCCEEDED(hr)); + /* clamp window scale to an integer factor */ + _sapp.win32.dpi.window_scale = (float)dpix / 96.0f; + } + else { + _sapp.win32.dpi.window_scale = 1.0f; + } + if (_sapp.desc.high_dpi) { + _sapp.win32.dpi.content_scale = _sapp.win32.dpi.window_scale; + _sapp.win32.dpi.mouse_scale = 1.0f; + } + else { + _sapp.win32.dpi.content_scale = 1.0f; + _sapp.win32.dpi.mouse_scale = 1.0f / _sapp.win32.dpi.window_scale; + } + _sapp.dpi_scale = _sapp.win32.dpi.content_scale; + if (user32) { + FreeLibrary(user32); + } + if (shcore) { + FreeLibrary(shcore); + } +} + +_SOKOL_PRIVATE bool _sapp_win32_set_clipboard_string(const char* str) { + SOKOL_ASSERT(str); + SOKOL_ASSERT(_sapp.win32.hwnd); + SOKOL_ASSERT(_sapp.clipboard.enabled && (_sapp.clipboard.buf_size > 0)); + + wchar_t* wchar_buf = 0; + const SIZE_T wchar_buf_size = (SIZE_T)_sapp.clipboard.buf_size * sizeof(wchar_t); + HANDLE object = GlobalAlloc(GMEM_MOVEABLE, wchar_buf_size); + if (!object) { + goto error; + } + wchar_buf = (wchar_t*) GlobalLock(object); + if (!wchar_buf) { + goto error; + } + if (!_sapp_win32_uwp_utf8_to_wide(str, wchar_buf, (int)wchar_buf_size)) { + goto error; + } + GlobalUnlock(wchar_buf); + wchar_buf = 0; + if (!OpenClipboard(_sapp.win32.hwnd)) { + goto error; + } + EmptyClipboard(); + SetClipboardData(CF_UNICODETEXT, object); + CloseClipboard(); + return true; + +error: + if (wchar_buf) { + GlobalUnlock(object); + } + if (object) { + GlobalFree(object); + } + return false; +} + +_SOKOL_PRIVATE const char* _sapp_win32_get_clipboard_string(void) { + SOKOL_ASSERT(_sapp.clipboard.enabled && _sapp.clipboard.buffer); + SOKOL_ASSERT(_sapp.win32.hwnd); + if (!OpenClipboard(_sapp.win32.hwnd)) { + /* silently ignore any errors and just return the current + content of the local clipboard buffer + */ + return _sapp.clipboard.buffer; + } + HANDLE object = GetClipboardData(CF_UNICODETEXT); + if (!object) { + CloseClipboard(); + return _sapp.clipboard.buffer; + } + const wchar_t* wchar_buf = (const wchar_t*) GlobalLock(object); + if (!wchar_buf) { + CloseClipboard(); + return _sapp.clipboard.buffer; + } + if (!_sapp_win32_wide_to_utf8(wchar_buf, _sapp.clipboard.buffer, _sapp.clipboard.buf_size)) { + SOKOL_LOG("sokol_app.h: clipboard string didn't fit into clipboard buffer\n"); + } + GlobalUnlock(object); + CloseClipboard(); + return _sapp.clipboard.buffer; +} + +_SOKOL_PRIVATE void _sapp_win32_update_window_title(void) { + _sapp_win32_uwp_utf8_to_wide(_sapp.window_title, _sapp.window_title_wide, sizeof(_sapp.window_title_wide)); + SetWindowTextW(_sapp.win32.hwnd, _sapp.window_title_wide); +} + +/* don't laugh, but this seems to be the easiest and most robust + way to check if we're running on Win10 + + From: https://github.com/videolan/vlc/blob/232fb13b0d6110c4d1b683cde24cf9a7f2c5c2ea/modules/video_output/win32/d3d11_swapchain.c#L263 +*/ +_SOKOL_PRIVATE bool _sapp_win32_is_win10_or_greater(void) { + HMODULE h = GetModuleHandleW(L"kernel32.dll"); + if (NULL != h) { + return (NULL != GetProcAddress(h, "GetSystemCpuSetInformation")); + } + else { + return false; + } +} + +_SOKOL_PRIVATE void _sapp_win32_run(const sapp_desc* desc) { + _sapp_init_state(desc); + _sapp_win32_init_console(); + _sapp.win32.is_win10_or_greater = _sapp_win32_is_win10_or_greater(); + _sapp_win32_uwp_init_keytable(); + _sapp_win32_uwp_utf8_to_wide(_sapp.window_title, _sapp.window_title_wide, sizeof(_sapp.window_title_wide)); + _sapp_win32_init_dpi(); + _sapp_win32_create_window(); + #if defined(SOKOL_D3D11) + _sapp_d3d11_create_device_and_swapchain(); + _sapp_d3d11_create_default_render_target(); + #endif + #if defined(SOKOL_GLCORE33) + _sapp_wgl_init(); + _sapp_wgl_load_extensions(); + _sapp_wgl_create_context(); + #endif + _sapp.valid = true; + + bool done = false; + while (!(done || _sapp.quit_ordered)) { + MSG msg; + while (PeekMessageW(&msg, NULL, 0, 0, PM_REMOVE)) { + if (WM_QUIT == msg.message) { + done = true; + continue; + } + else { + TranslateMessage(&msg); + DispatchMessage(&msg); + } + } + _sapp_frame(); + #if defined(SOKOL_D3D11) + _sapp_d3d11_present(); + if (IsIconic(_sapp.win32.hwnd)) { + Sleep((DWORD)(16 * _sapp.swap_interval)); + } + #endif + #if defined(SOKOL_GLCORE33) + _sapp_wgl_swap_buffers(); + #endif + /* check for window resized, this cannot happen in WM_SIZE as it explodes memory usage */ + if (_sapp_win32_update_dimensions()) { + #if defined(SOKOL_D3D11) + _sapp_d3d11_resize_default_render_target(); + #endif + _sapp_win32_uwp_app_event(SAPP_EVENTTYPE_RESIZED); + } + if (_sapp.quit_requested) { + PostMessage(_sapp.win32.hwnd, WM_CLOSE, 0, 0); + } + } + _sapp_call_cleanup(); + + #if defined(SOKOL_D3D11) + _sapp_d3d11_destroy_default_render_target(); + _sapp_d3d11_destroy_device_and_swapchain(); + #else + _sapp_wgl_destroy_context(); + _sapp_wgl_shutdown(); + #endif + _sapp_win32_destroy_window(); + _sapp_win32_restore_console(); + _sapp_discard_state(); +} + +_SOKOL_PRIVATE char** _sapp_win32_command_line_to_utf8_argv(LPWSTR w_command_line, int* o_argc) { + int argc = 0; + char** argv = 0; + char* args; + + LPWSTR* w_argv = CommandLineToArgvW(w_command_line, &argc); + if (w_argv == NULL) { + _sapp_fail("Win32: failed to parse command line"); + } else { + size_t size = wcslen(w_command_line) * 4; + argv = (char**) SOKOL_CALLOC(1, ((size_t)argc + 1) * sizeof(char*) + size); + SOKOL_ASSERT(argv); + args = (char*) &argv[argc + 1]; + int n; + for (int i = 0; i < argc; ++i) { + n = WideCharToMultiByte(CP_UTF8, 0, w_argv[i], -1, args, (int)size, NULL, NULL); + if (n == 0) { + _sapp_fail("Win32: failed to convert all arguments to utf8"); + break; + } + argv[i] = args; + size -= (size_t)n; + args += n; + } + LocalFree(w_argv); + } + *o_argc = argc; + return argv; +} + +#if !defined(SOKOL_NO_ENTRY) +#if defined(SOKOL_WIN32_FORCE_MAIN) +int main(int argc, char* argv[]) { + sapp_desc desc = sokol_main(argc, argv); + _sapp_win32_run(&desc); + return 0; +} +#else +int WINAPI WinMain(_In_ HINSTANCE hInstance, _In_opt_ HINSTANCE hPrevInstance, _In_ LPSTR lpCmdLine, _In_ int nCmdShow) { + _SOKOL_UNUSED(hInstance); + _SOKOL_UNUSED(hPrevInstance); + _SOKOL_UNUSED(lpCmdLine); + _SOKOL_UNUSED(nCmdShow); + int argc_utf8 = 0; + char** argv_utf8 = _sapp_win32_command_line_to_utf8_argv(GetCommandLineW(), &argc_utf8); + sapp_desc desc = sokol_main(argc_utf8, argv_utf8); + _sapp_win32_run(&desc); + SOKOL_FREE(argv_utf8); + return 0; +} +#endif /* SOKOL_WIN32_FORCE_MAIN */ +#endif /* SOKOL_NO_ENTRY */ + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + +#endif /* _SAPP_WIN32 */ + +/*== UWP ================================================================*/ +#if defined(_SAPP_UWP) + +// Helper functions +_SOKOL_PRIVATE void _sapp_uwp_configure_dpi(float monitor_dpi) { + _sapp.uwp.dpi.window_scale = monitor_dpi / 96.0f; + if (_sapp.desc.high_dpi) { + _sapp.uwp.dpi.content_scale = _sapp.uwp.dpi.window_scale; + _sapp.uwp.dpi.mouse_scale = 1.0f * _sapp.uwp.dpi.window_scale; + } + else { + _sapp.uwp.dpi.content_scale = 1.0f; + _sapp.uwp.dpi.mouse_scale = 1.0f; + } + _sapp.dpi_scale = _sapp.uwp.dpi.content_scale; +} + +_SOKOL_PRIVATE void _sapp_uwp_show_mouse(bool visible) { + using namespace winrt::Windows::UI::Core; + + /* NOTE: this function is only called when the mouse visibility actually changes */ + CoreWindow::GetForCurrentThread().PointerCursor(visible ? + CoreCursor(CoreCursorType::Arrow, 0) : + CoreCursor(nullptr)); +} + +_SOKOL_PRIVATE uint32_t _sapp_uwp_mods(winrt::Windows::UI::Core::CoreWindow const& sender_window) { + using namespace winrt::Windows::System; + using namespace winrt::Windows::UI::Core; + + uint32_t mods = 0; + if ((sender_window.GetKeyState(VirtualKey::Shift) & CoreVirtualKeyStates::Down) == CoreVirtualKeyStates::Down) { + mods |= SAPP_MODIFIER_SHIFT; + } + if ((sender_window.GetKeyState(VirtualKey::Control) & CoreVirtualKeyStates::Down) == CoreVirtualKeyStates::Down) { + mods |= SAPP_MODIFIER_CTRL; + } + if ((sender_window.GetKeyState(VirtualKey::Menu) & CoreVirtualKeyStates::Down) == CoreVirtualKeyStates::Down) { + mods |= SAPP_MODIFIER_ALT; + } + if (((sender_window.GetKeyState(VirtualKey::LeftWindows) & CoreVirtualKeyStates::Down) == CoreVirtualKeyStates::Down) || + ((sender_window.GetKeyState(VirtualKey::RightWindows) & CoreVirtualKeyStates::Down) == CoreVirtualKeyStates::Down)) + { + mods |= SAPP_MODIFIER_SUPER; + } + return mods; +} + +_SOKOL_PRIVATE void _sapp_uwp_mouse_event(sapp_event_type type, sapp_mousebutton btn, winrt::Windows::UI::Core::CoreWindow const& sender_window) { + if (_sapp_events_enabled()) { + _sapp_init_event(type); + _sapp.event.modifiers = _sapp_uwp_mods(sender_window); + _sapp.event.mouse_button = btn; + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE void _sapp_uwp_scroll_event(float delta, bool horizontal, winrt::Windows::UI::Core::CoreWindow const& sender_window) { + if (_sapp_events_enabled()) { + _sapp_init_event(SAPP_EVENTTYPE_MOUSE_SCROLL); + _sapp.event.modifiers = _sapp_uwp_mods(sender_window); + _sapp.event.scroll_x = horizontal ? (-delta / 30.0f) : 0.0f; + _sapp.event.scroll_y = horizontal ? 0.0f : (delta / 30.0f); + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE void _sapp_uwp_extract_mouse_button_events(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::PointerEventArgs const& args) { + + // we need to figure out ourselves what mouse buttons have been pressed and released, + // because UWP doesn't properly send down/up mouse button events when multiple buttons + // are pressed down, so we also need to check the mouse button state in other mouse events + // to track what buttons have been pressed down and released + // + auto properties = args.CurrentPoint().Properties(); + const uint8_t lmb_bit = (1 << SAPP_MOUSEBUTTON_LEFT); + const uint8_t rmb_bit = (1 << SAPP_MOUSEBUTTON_RIGHT); + const uint8_t mmb_bit = (1 << SAPP_MOUSEBUTTON_MIDDLE); + uint8_t new_btns = 0; + if (properties.IsLeftButtonPressed()) { + new_btns |= lmb_bit; + } + if (properties.IsRightButtonPressed()) { + new_btns |= rmb_bit; + } + if (properties.IsMiddleButtonPressed()) { + new_btns |= mmb_bit; + } + const uint8_t old_btns = _sapp.uwp.mouse_buttons; + const uint8_t chg_btns = new_btns ^ old_btns; + + _sapp.uwp.mouse_buttons = new_btns; + + sapp_event_type type = SAPP_EVENTTYPE_INVALID; + sapp_mousebutton btn = SAPP_MOUSEBUTTON_INVALID; + if (chg_btns & lmb_bit) { + btn = SAPP_MOUSEBUTTON_LEFT; + type = (new_btns & lmb_bit) ? SAPP_EVENTTYPE_MOUSE_DOWN : SAPP_EVENTTYPE_MOUSE_UP; + } + if (chg_btns & rmb_bit) { + btn = SAPP_MOUSEBUTTON_RIGHT; + type = (new_btns & rmb_bit) ? SAPP_EVENTTYPE_MOUSE_DOWN : SAPP_EVENTTYPE_MOUSE_UP; + } + if (chg_btns & mmb_bit) { + btn = SAPP_MOUSEBUTTON_MIDDLE; + type = (new_btns & mmb_bit) ? SAPP_EVENTTYPE_MOUSE_DOWN : SAPP_EVENTTYPE_MOUSE_UP; + } + if (type != SAPP_EVENTTYPE_INVALID) { + _sapp_uwp_mouse_event(type, btn, sender); + } +} + +_SOKOL_PRIVATE void _sapp_uwp_key_event(sapp_event_type type, winrt::Windows::UI::Core::CoreWindow const& sender_window, winrt::Windows::UI::Core::KeyEventArgs const& args) { + auto key_status = args.KeyStatus(); + uint32_t ext_scan_code = key_status.ScanCode | (key_status.IsExtendedKey ? 0x100 : 0); + if (_sapp_events_enabled() && (ext_scan_code < SAPP_MAX_KEYCODES)) { + _sapp_init_event(type); + _sapp.event.modifiers = _sapp_uwp_mods(sender_window); + _sapp.event.key_code = _sapp.keycodes[ext_scan_code]; + _sapp.event.key_repeat = type == SAPP_EVENTTYPE_KEY_UP ? false : key_status.WasKeyDown; + _sapp_call_event(&_sapp.event); + /* check if a CLIPBOARD_PASTED event must be sent too */ + if (_sapp.clipboard.enabled && + (type == SAPP_EVENTTYPE_KEY_DOWN) && + (_sapp.event.modifiers == SAPP_MODIFIER_CTRL) && + (_sapp.event.key_code == SAPP_KEYCODE_V)) + { + _sapp_init_event(SAPP_EVENTTYPE_CLIPBOARD_PASTED); + _sapp_call_event(&_sapp.event); + } + } +} + +_SOKOL_PRIVATE void _sapp_uwp_char_event(uint32_t c, bool repeat, winrt::Windows::UI::Core::CoreWindow const& sender_window) { + if (_sapp_events_enabled() && (c >= 32)) { + _sapp_init_event(SAPP_EVENTTYPE_CHAR); + _sapp.event.modifiers = _sapp_uwp_mods(sender_window); + _sapp.event.char_code = c; + _sapp.event.key_repeat = repeat; + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE void _sapp_uwp_toggle_fullscreen(void) { + auto appView = winrt::Windows::UI::ViewManagement::ApplicationView::GetForCurrentView(); + _sapp.fullscreen = appView.IsFullScreenMode(); + if (!_sapp.fullscreen) { + appView.TryEnterFullScreenMode(); + } + else { + appView.ExitFullScreenMode(); + } + _sapp.fullscreen = appView.IsFullScreenMode(); +} + +namespace {/* Empty namespace to ensure internal linkage (same as _SOKOL_PRIVATE) */ + +// Controls all the DirectX device resources. +class DeviceResources { +public: + // Provides an interface for an application that owns DeviceResources to be notified of the device being lost or created. + interface IDeviceNotify { + virtual void OnDeviceLost() = 0; + virtual void OnDeviceRestored() = 0; + }; + + DeviceResources(); + ~DeviceResources(); + void SetWindow(winrt::Windows::UI::Core::CoreWindow const& window); + void SetLogicalSize(winrt::Windows::Foundation::Size logicalSize); + void SetCurrentOrientation(winrt::Windows::Graphics::Display::DisplayOrientations currentOrientation); + void SetDpi(float dpi); + void ValidateDevice(); + void HandleDeviceLost(); + void RegisterDeviceNotify(IDeviceNotify* deviceNotify); + void Trim(); + void Present(); + +private: + + // Swapchain Rotation Matrices (Z-rotation) + static inline const DirectX::XMFLOAT4X4 DeviceResources::m_rotation0 = { + 1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f + }; + static inline const DirectX::XMFLOAT4X4 DeviceResources::m_rotation90 = { + 0.0f, 1.0f, 0.0f, 0.0f, + -1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f + }; + static inline const DirectX::XMFLOAT4X4 DeviceResources::m_rotation180 = { + -1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, -1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f + }; + static inline const DirectX::XMFLOAT4X4 DeviceResources::m_rotation270 = { + 0.0f, -1.0f, 0.0f, 0.0f, + 1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f + }; + + void CreateDeviceResources(); + void CreateWindowSizeDependentResources(); + void UpdateRenderTargetSize(); + DXGI_MODE_ROTATION ComputeDisplayRotation(); + bool SdkLayersAvailable(); + + // Direct3D objects. + winrt::com_ptr m_d3dDevice; + winrt::com_ptr m_d3dContext; + winrt::com_ptr m_swapChain; + + // Direct3D rendering objects. Required for 3D. + winrt::com_ptr m_d3dRenderTarget; + winrt::com_ptr m_d3dRenderTargetView; + winrt::com_ptr m_d3dMSAARenderTarget; + winrt::com_ptr m_d3dMSAARenderTargetView; + winrt::com_ptr m_d3dDepthStencil; + winrt::com_ptr m_d3dDepthStencilView; + D3D11_VIEWPORT m_screenViewport = { }; + + // Cached reference to the Window. + winrt::agile_ref< winrt::Windows::UI::Core::CoreWindow> m_window; + + // Cached device properties. + D3D_FEATURE_LEVEL m_d3dFeatureLevel = D3D_FEATURE_LEVEL_9_1; + winrt::Windows::Foundation::Size m_d3dRenderTargetSize = { }; + winrt::Windows::Foundation::Size m_outputSize = { }; + winrt::Windows::Foundation::Size m_logicalSize = { }; + winrt::Windows::Graphics::Display::DisplayOrientations m_nativeOrientation = winrt::Windows::Graphics::Display::DisplayOrientations::None; + winrt::Windows::Graphics::Display::DisplayOrientations m_currentOrientation = winrt::Windows::Graphics::Display::DisplayOrientations::None; + float m_dpi = -1.0f; + + // Transforms used for display orientation. + DirectX::XMFLOAT4X4 m_orientationTransform3D; + + // The IDeviceNotify can be held directly as it owns the DeviceResources. + IDeviceNotify* m_deviceNotify = nullptr; +}; + +// Main entry point for our app. Connects the app with the Windows shell and handles application lifecycle events. +struct App : winrt::implements { +public: + // IFrameworkViewSource Methods + winrt::Windows::ApplicationModel::Core::IFrameworkView CreateView() { return *this; } + + // IFrameworkView Methods. + virtual void Initialize(winrt::Windows::ApplicationModel::Core::CoreApplicationView const& applicationView); + virtual void SetWindow(winrt::Windows::UI::Core::CoreWindow const& window); + virtual void Load(winrt::hstring const& entryPoint); + virtual void Run(); + virtual void Uninitialize(); + +protected: + // Application lifecycle event handlers + void OnActivated(winrt::Windows::ApplicationModel::Core::CoreApplicationView const& applicationView, winrt::Windows::ApplicationModel::Activation::IActivatedEventArgs const& args); + void OnSuspending(winrt::Windows::Foundation::IInspectable const& sender, winrt::Windows::ApplicationModel::SuspendingEventArgs const& args); + void OnResuming(winrt::Windows::Foundation::IInspectable const& sender, winrt::Windows::Foundation::IInspectable const& args); + + // Window event handlers + void OnWindowSizeChanged(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::WindowSizeChangedEventArgs const& args); + void OnVisibilityChanged(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::VisibilityChangedEventArgs const& args); + + // Navigation event handlers + void OnBackRequested(winrt::Windows::Foundation::IInspectable const& sender, winrt::Windows::UI::Core::BackRequestedEventArgs const& args); + + // Input event handlers + void OnKeyDown(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::KeyEventArgs const& args); + void OnKeyUp(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::KeyEventArgs const& args); + void OnCharacterReceived(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::CharacterReceivedEventArgs const& args); + + // Pointer event handlers + void OnPointerEntered(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::PointerEventArgs const& args); + void OnPointerExited(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::PointerEventArgs const& args); + void OnPointerPressed(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::PointerEventArgs const& args); + void OnPointerReleased(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::PointerEventArgs const& args); + void OnPointerMoved(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::PointerEventArgs const& args); + void OnPointerWheelChanged(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::PointerEventArgs const& args); + + // DisplayInformation event handlers. + void OnDpiChanged(winrt::Windows::Graphics::Display::DisplayInformation const& sender, winrt::Windows::Foundation::IInspectable const& args); + void OnOrientationChanged(winrt::Windows::Graphics::Display::DisplayInformation const& sender, winrt::Windows::Foundation::IInspectable const& args); + void OnDisplayContentsInvalidated(winrt::Windows::Graphics::Display::DisplayInformation const& sender, winrt::Windows::Foundation::IInspectable const& args); + +private: + std::unique_ptr m_deviceResources; + bool m_windowVisible = true; +}; + +DeviceResources::DeviceResources() { + CreateDeviceResources(); +} + +DeviceResources::~DeviceResources() { + // Cleanup Sokol Context + _sapp.d3d11.device = nullptr; + _sapp.d3d11.device_context = nullptr; +} + +void DeviceResources::CreateDeviceResources() { + // This flag adds support for surfaces with a different color channel ordering + // than the API default. It is required for compatibility with Direct2D. + UINT creationFlags = D3D11_CREATE_DEVICE_BGRA_SUPPORT; + + #if defined(_DEBUG) + if (SdkLayersAvailable()) { + // If the project is in a debug build, enable debugging via SDK Layers with this flag. + creationFlags |= D3D11_CREATE_DEVICE_DEBUG; + } + #endif + + // This array defines the set of DirectX hardware feature levels this app will support. + // Note the ordering should be preserved. + // Don't forget to declare your application's minimum required feature level in its + // description. All applications are assumed to support 9.1 unless otherwise stated. + D3D_FEATURE_LEVEL featureLevels[] = { + D3D_FEATURE_LEVEL_12_1, + D3D_FEATURE_LEVEL_12_0, + D3D_FEATURE_LEVEL_11_1, + D3D_FEATURE_LEVEL_11_0, + D3D_FEATURE_LEVEL_10_1, + D3D_FEATURE_LEVEL_10_0, + D3D_FEATURE_LEVEL_9_3, + D3D_FEATURE_LEVEL_9_2, + D3D_FEATURE_LEVEL_9_1 + }; + + // Create the Direct3D 11 API device object and a corresponding context. + winrt::com_ptr device; + winrt::com_ptr context; + + HRESULT hr = D3D11CreateDevice( + nullptr, // Specify nullptr to use the default adapter. + D3D_DRIVER_TYPE_HARDWARE, // Create a device using the hardware graphics driver. + 0, // Should be 0 unless the driver is D3D_DRIVER_TYPE_SOFTWARE. + creationFlags, // Set debug and Direct2D compatibility flags. + featureLevels, // List of feature levels this app can support. + ARRAYSIZE(featureLevels), // Size of the list above. + D3D11_SDK_VERSION, // Always set this to D3D11_SDK_VERSION for Microsoft Store apps. + device.put(), // Returns the Direct3D device created. + &m_d3dFeatureLevel, // Returns feature level of device created. + context.put() // Returns the device immediate context. + ); + + if (FAILED(hr)) { + // If the initialization fails, fall back to the WARP device. + // For more information on WARP, see: + // https://go.microsoft.com/fwlink/?LinkId=286690 + winrt::check_hresult( + D3D11CreateDevice( + nullptr, + D3D_DRIVER_TYPE_WARP, // Create a WARP device instead of a hardware device. + 0, + creationFlags, + featureLevels, + ARRAYSIZE(featureLevels), + D3D11_SDK_VERSION, + device.put(), + &m_d3dFeatureLevel, + context.put() + ) + ); + } + + // Store pointers to the Direct3D 11.3 API device and immediate context. + m_d3dDevice = device.as(); + m_d3dContext = context.as(); + + // Setup Sokol Context + _sapp.d3d11.device = m_d3dDevice.get(); + _sapp.d3d11.device_context = m_d3dContext.get(); +} + +void DeviceResources::CreateWindowSizeDependentResources() { + // Cleanup Sokol Context (these are non-owning raw pointers) + _sapp.d3d11.rt = nullptr; + _sapp.d3d11.rtv = nullptr; + _sapp.d3d11.msaa_rt = nullptr; + _sapp.d3d11.msaa_rtv = nullptr; + _sapp.d3d11.ds = nullptr; + _sapp.d3d11.dsv = nullptr; + + // Clear the previous window size specific context. + ID3D11RenderTargetView* nullViews[] = { nullptr }; + m_d3dContext->OMSetRenderTargets(ARRAYSIZE(nullViews), nullViews, nullptr); + // these are smart pointers, setting to nullptr will delete the objects + m_d3dRenderTarget = nullptr; + m_d3dRenderTargetView = nullptr; + m_d3dMSAARenderTarget = nullptr; + m_d3dMSAARenderTargetView = nullptr; + m_d3dDepthStencilView = nullptr; + m_d3dDepthStencil = nullptr; + m_d3dContext->Flush1(D3D11_CONTEXT_TYPE_ALL, nullptr); + + UpdateRenderTargetSize(); + + // The width and height of the swap chain must be based on the window's + // natively-oriented width and height. If the window is not in the native + // orientation, the dimensions must be reversed. + DXGI_MODE_ROTATION displayRotation = ComputeDisplayRotation(); + + bool swapDimensions = displayRotation == DXGI_MODE_ROTATION_ROTATE90 || displayRotation == DXGI_MODE_ROTATION_ROTATE270; + m_d3dRenderTargetSize.Width = swapDimensions ? m_outputSize.Height : m_outputSize.Width; + m_d3dRenderTargetSize.Height = swapDimensions ? m_outputSize.Width : m_outputSize.Height; + + if (m_swapChain != nullptr) { + // If the swap chain already exists, resize it. + HRESULT hr = m_swapChain->ResizeBuffers( + 2, // Double-buffered swap chain. + lround(m_d3dRenderTargetSize.Width), + lround(m_d3dRenderTargetSize.Height), + DXGI_FORMAT_B8G8R8A8_UNORM, + 0 + ); + + if (hr == DXGI_ERROR_DEVICE_REMOVED || hr == DXGI_ERROR_DEVICE_RESET) { + // If the device was removed for any reason, a new device and swap chain will need to be created. + HandleDeviceLost(); + + // Everything is set up now. Do not continue execution of this method. HandleDeviceLost will reenter this method + // and correctly set up the new device. + return; + } + else { + winrt::check_hresult(hr); + } + } + else { + // Otherwise, create a new one using the same adapter as the existing Direct3D device. + DXGI_SCALING scaling = (_sapp.uwp.dpi.content_scale == _sapp.uwp.dpi.window_scale) ? DXGI_SCALING_NONE : DXGI_SCALING_STRETCH; + DXGI_SWAP_CHAIN_DESC1 swapChainDesc = { 0 }; + + swapChainDesc.Width = lround(m_d3dRenderTargetSize.Width); // Match the size of the window. + swapChainDesc.Height = lround(m_d3dRenderTargetSize.Height); + swapChainDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; // This is the most common swap chain format. + swapChainDesc.Stereo = false; + swapChainDesc.SampleDesc.Count = 1; // Don't use multi-sampling. + swapChainDesc.SampleDesc.Quality = 0; + swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; + swapChainDesc.BufferCount = 2; // Use double-buffering to minimize latency. + swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL; // All Microsoft Store apps must use this SwapEffect. + swapChainDesc.Flags = 0; + swapChainDesc.Scaling = scaling; + swapChainDesc.AlphaMode = DXGI_ALPHA_MODE_IGNORE; + + // This sequence obtains the DXGI factory that was used to create the Direct3D device above. + winrt::com_ptr dxgiDevice = m_d3dDevice.as(); + winrt::com_ptr dxgiAdapter; + winrt::check_hresult(dxgiDevice->GetAdapter(dxgiAdapter.put())); + winrt::com_ptr dxgiFactory; + winrt::check_hresult(dxgiAdapter->GetParent(__uuidof(IDXGIFactory4), dxgiFactory.put_void())); + winrt::com_ptr swapChain; + winrt::check_hresult(dxgiFactory->CreateSwapChainForCoreWindow(m_d3dDevice.get(), m_window.get().as<::IUnknown>().get(), &swapChainDesc, nullptr, swapChain.put())); + m_swapChain = swapChain.as(); + + // Ensure that DXGI does not queue more than one frame at a time. This both reduces latency and + // ensures that the application will only render after each VSync, minimizing power consumption. + winrt::check_hresult(dxgiDevice->SetMaximumFrameLatency(1)); + + // Setup Sokol Context + winrt::check_hresult(swapChain->GetDesc(&_sapp.d3d11.swap_chain_desc)); + _sapp.d3d11.swap_chain = m_swapChain.as().detach(); + } + + // Set the proper orientation for the swap chain, and generate 2D and + // 3D matrix transformations for rendering to the rotated swap chain. + // Note the rotation angle for the 2D and 3D transforms are different. + // This is due to the difference in coordinate spaces. Additionally, + // the 3D matrix is specified explicitly to avoid rounding errors. + switch (displayRotation) { + case DXGI_MODE_ROTATION_IDENTITY: + m_orientationTransform3D = m_rotation0; + break; + + case DXGI_MODE_ROTATION_ROTATE90: + m_orientationTransform3D = m_rotation270; + break; + + case DXGI_MODE_ROTATION_ROTATE180: + m_orientationTransform3D = m_rotation180; + break; + + case DXGI_MODE_ROTATION_ROTATE270: + m_orientationTransform3D = m_rotation90; + break; + } + winrt::check_hresult(m_swapChain->SetRotation(displayRotation)); + + // Create a render target view of the swap chain back buffer. + winrt::check_hresult(m_swapChain->GetBuffer(0, IID_PPV_ARGS(&m_d3dRenderTarget))); + winrt::check_hresult(m_d3dDevice->CreateRenderTargetView1(m_d3dRenderTarget.get(), nullptr, m_d3dRenderTargetView.put())); + + // Create MSAA texture and view if needed + if (_sapp.sample_count > 1) { + CD3D11_TEXTURE2D_DESC1 msaaTexDesc( + DXGI_FORMAT_B8G8R8A8_UNORM, + lround(m_d3dRenderTargetSize.Width), + lround(m_d3dRenderTargetSize.Height), + 1, // arraySize + 1, // mipLevels + D3D11_BIND_RENDER_TARGET, + D3D11_USAGE_DEFAULT, + 0, // cpuAccessFlags + _sapp.sample_count, + _sapp.sample_count > 1 ? D3D11_STANDARD_MULTISAMPLE_PATTERN : 0 + ); + winrt::check_hresult(m_d3dDevice->CreateTexture2D1(&msaaTexDesc, nullptr, m_d3dMSAARenderTarget.put())); + winrt::check_hresult(m_d3dDevice->CreateRenderTargetView1(m_d3dMSAARenderTarget.get(), nullptr, m_d3dMSAARenderTargetView.put())); + } + + // Create a depth stencil view for use with 3D rendering if needed. + CD3D11_TEXTURE2D_DESC1 depthStencilDesc( + DXGI_FORMAT_D24_UNORM_S8_UINT, + lround(m_d3dRenderTargetSize.Width), + lround(m_d3dRenderTargetSize.Height), + 1, // This depth stencil view has only one texture. + 1, // Use a single mipmap level. + D3D11_BIND_DEPTH_STENCIL, + D3D11_USAGE_DEFAULT, + 0, // cpuAccessFlag + _sapp.sample_count, + _sapp.sample_count > 1 ? D3D11_STANDARD_MULTISAMPLE_PATTERN : 0 + ); + winrt::check_hresult(m_d3dDevice->CreateTexture2D1(&depthStencilDesc, nullptr, m_d3dDepthStencil.put())); + + CD3D11_DEPTH_STENCIL_VIEW_DESC depthStencilViewDesc(D3D11_DSV_DIMENSION_TEXTURE2D); + winrt::check_hresult(m_d3dDevice->CreateDepthStencilView(m_d3dDepthStencil.get(), nullptr, m_d3dDepthStencilView.put())); + + // Set sokol window and framebuffer sizes + _sapp.window_width = (int) m_logicalSize.Width; + _sapp.window_height = (int) m_logicalSize.Height; + _sapp.framebuffer_width = lround(m_d3dRenderTargetSize.Width); + _sapp.framebuffer_height = lround(m_d3dRenderTargetSize.Height); + + // Setup Sokol Context + _sapp.d3d11.rt = m_d3dRenderTarget.as().get(); + _sapp.d3d11.rtv = m_d3dRenderTargetView.as().get(); + _sapp.d3d11.ds = m_d3dDepthStencil.as().get(); + _sapp.d3d11.dsv = m_d3dDepthStencilView.get(); + if (_sapp.sample_count > 1) { + _sapp.d3d11.msaa_rt = m_d3dMSAARenderTarget.as().get(); + _sapp.d3d11.msaa_rtv = m_d3dMSAARenderTargetView.as().get(); + } + + // Sokol app is now valid + _sapp.valid = true; +} + +// Determine the dimensions of the render target and whether it will be scaled down. +void DeviceResources::UpdateRenderTargetSize() { + // Calculate the necessary render target size in pixels. + m_outputSize.Width = m_logicalSize.Width * _sapp.uwp.dpi.content_scale; + m_outputSize.Height = m_logicalSize.Height * _sapp.uwp.dpi.content_scale; + + // Prevent zero size DirectX content from being created. + m_outputSize.Width = std::max(m_outputSize.Width, 1.0f); + m_outputSize.Height = std::max(m_outputSize.Height, 1.0f); +} + +// This method is called when the CoreWindow is created (or re-created). +void DeviceResources::SetWindow(winrt::Windows::UI::Core::CoreWindow const& window) { + auto currentDisplayInformation = winrt::Windows::Graphics::Display::DisplayInformation::GetForCurrentView(); + m_window = window; + m_logicalSize = winrt::Windows::Foundation::Size(window.Bounds().Width, window.Bounds().Height); + m_nativeOrientation = currentDisplayInformation.NativeOrientation(); + m_currentOrientation = currentDisplayInformation.CurrentOrientation(); + m_dpi = currentDisplayInformation.LogicalDpi(); + _sapp_uwp_configure_dpi(m_dpi); + CreateWindowSizeDependentResources(); +} + +// This method is called in the event handler for the SizeChanged event. +void DeviceResources::SetLogicalSize(winrt::Windows::Foundation::Size logicalSize) { + if (m_logicalSize != logicalSize) { + m_logicalSize = logicalSize; + CreateWindowSizeDependentResources(); + } +} + +// This method is called in the event handler for the DpiChanged event. +void DeviceResources::SetDpi(float dpi) { + if (dpi != m_dpi) { + m_dpi = dpi; + _sapp_uwp_configure_dpi(m_dpi); + // When the display DPI changes, the logical size of the window (measured in Dips) also changes and needs to be updated. + auto window = m_window.get(); + m_logicalSize = winrt::Windows::Foundation::Size(window.Bounds().Width, window.Bounds().Height); + CreateWindowSizeDependentResources(); + } +} + +// This method is called in the event handler for the OrientationChanged event. +void DeviceResources::SetCurrentOrientation(winrt::Windows::Graphics::Display::DisplayOrientations currentOrientation) { + if (m_currentOrientation != currentOrientation) { + m_currentOrientation = currentOrientation; + CreateWindowSizeDependentResources(); + } +} + +// This method is called in the event handler for the DisplayContentsInvalidated event. +void DeviceResources::ValidateDevice() { + // The D3D Device is no longer valid if the default adapter changed since the device + // was created or if the device has been removed. + + // First, get the information for the default adapter from when the device was created. + winrt::com_ptr dxgiDevice = m_d3dDevice.as< IDXGIDevice3>(); + winrt::com_ptr deviceAdapter; + winrt::check_hresult(dxgiDevice->GetAdapter(deviceAdapter.put())); + winrt::com_ptr deviceFactory; + winrt::check_hresult(deviceAdapter->GetParent(IID_PPV_ARGS(&deviceFactory))); + winrt::com_ptr previousDefaultAdapter; + winrt::check_hresult(deviceFactory->EnumAdapters1(0, previousDefaultAdapter.put())); + DXGI_ADAPTER_DESC1 previousDesc; + winrt::check_hresult(previousDefaultAdapter->GetDesc1(&previousDesc)); + + // Next, get the information for the current default adapter. + winrt::com_ptr currentFactory; + winrt::check_hresult(CreateDXGIFactory1(IID_PPV_ARGS(¤tFactory))); + winrt::com_ptr currentDefaultAdapter; + winrt::check_hresult(currentFactory->EnumAdapters1(0, currentDefaultAdapter.put())); + DXGI_ADAPTER_DESC1 currentDesc; + winrt::check_hresult(currentDefaultAdapter->GetDesc1(¤tDesc)); + + // If the adapter LUIDs don't match, or if the device reports that it has been removed, + // a new D3D device must be created. + if (previousDesc.AdapterLuid.LowPart != currentDesc.AdapterLuid.LowPart || + previousDesc.AdapterLuid.HighPart != currentDesc.AdapterLuid.HighPart || + FAILED(m_d3dDevice->GetDeviceRemovedReason())) + { + // Release references to resources related to the old device. + dxgiDevice = nullptr; + deviceAdapter = nullptr; + deviceFactory = nullptr; + previousDefaultAdapter = nullptr; + + // Create a new device and swap chain. + HandleDeviceLost(); + } +} + +// Recreate all device resources and set them back to the current state. +void DeviceResources::HandleDeviceLost() { + m_swapChain = nullptr; + if (m_deviceNotify != nullptr) { + m_deviceNotify->OnDeviceLost(); + } + CreateDeviceResources(); + CreateWindowSizeDependentResources(); + if (m_deviceNotify != nullptr) { + m_deviceNotify->OnDeviceRestored(); + } +} + +// Register our DeviceNotify to be informed on device lost and creation. +void DeviceResources::RegisterDeviceNotify(IDeviceNotify* deviceNotify) { + m_deviceNotify = deviceNotify; +} + +// Call this method when the app suspends. It provides a hint to the driver that the app +// is entering an idle state and that temporary buffers can be reclaimed for use by other apps. +void DeviceResources::Trim() { + m_d3dDevice.as()->Trim(); +} + +// Present the contents of the swap chain to the screen. +void DeviceResources::Present() { + + // MSAA resolve if needed + if (_sapp.sample_count > 1) { + m_d3dContext->ResolveSubresource(m_d3dRenderTarget.get(), 0, m_d3dMSAARenderTarget.get(), 0, DXGI_FORMAT_B8G8R8A8_UNORM); + m_d3dContext->DiscardView1(m_d3dMSAARenderTargetView.get(), nullptr, 0); + } + + // The first argument instructs DXGI to block until VSync, putting the application + // to sleep until the next VSync. This ensures we don't waste any cycles rendering + // frames that will never be displayed to the screen. + DXGI_PRESENT_PARAMETERS parameters = { 0 }; + HRESULT hr = m_swapChain->Present1(1, 0, ¶meters); + + // Discard the contents of the render target. + // This is a valid operation only when the existing contents will be entirely + // overwritten. If dirty or scroll rects are used, this call should be removed. + m_d3dContext->DiscardView1(m_d3dRenderTargetView.get(), nullptr, 0); + + // Discard the contents of the depth stencil. + m_d3dContext->DiscardView1(m_d3dDepthStencilView.get(), nullptr, 0); + + // If the device was removed either by a disconnection or a driver upgrade, we + // must recreate all device resources. + if (hr == DXGI_ERROR_DEVICE_REMOVED || hr == DXGI_ERROR_DEVICE_RESET) { + HandleDeviceLost(); + } + else { + winrt::check_hresult(hr); + } +} + +// This method determines the rotation between the display device's native orientation and the +// current display orientation. +DXGI_MODE_ROTATION DeviceResources::ComputeDisplayRotation() { + DXGI_MODE_ROTATION rotation = DXGI_MODE_ROTATION_UNSPECIFIED; + + // Note: NativeOrientation can only be Landscape or Portrait even though + // the DisplayOrientations enum has other values. + switch (m_nativeOrientation) { + case winrt::Windows::Graphics::Display::DisplayOrientations::Landscape: + switch (m_currentOrientation) { + case winrt::Windows::Graphics::Display::DisplayOrientations::Landscape: + rotation = DXGI_MODE_ROTATION_IDENTITY; + break; + + case winrt::Windows::Graphics::Display::DisplayOrientations::Portrait: + rotation = DXGI_MODE_ROTATION_ROTATE270; + break; + + case winrt::Windows::Graphics::Display::DisplayOrientations::LandscapeFlipped: + rotation = DXGI_MODE_ROTATION_ROTATE180; + break; + + case winrt::Windows::Graphics::Display::DisplayOrientations::PortraitFlipped: + rotation = DXGI_MODE_ROTATION_ROTATE90; + break; + } + break; + + case winrt::Windows::Graphics::Display::DisplayOrientations::Portrait: + switch (m_currentOrientation) { + case winrt::Windows::Graphics::Display::DisplayOrientations::Landscape: + rotation = DXGI_MODE_ROTATION_ROTATE90; + break; + + case winrt::Windows::Graphics::Display::DisplayOrientations::Portrait: + rotation = DXGI_MODE_ROTATION_IDENTITY; + break; + + case winrt::Windows::Graphics::Display::DisplayOrientations::LandscapeFlipped: + rotation = DXGI_MODE_ROTATION_ROTATE270; + break; + + case winrt::Windows::Graphics::Display::DisplayOrientations::PortraitFlipped: + rotation = DXGI_MODE_ROTATION_ROTATE180; + break; + } + break; + } + return rotation; +} + +// Check for SDK Layer support. +bool DeviceResources::SdkLayersAvailable() { + #if defined(_DEBUG) + HRESULT hr = D3D11CreateDevice( + nullptr, + D3D_DRIVER_TYPE_NULL, // There is no need to create a real hardware device. + 0, + D3D11_CREATE_DEVICE_DEBUG, // Check for the SDK layers. + nullptr, // Any feature level will do. + 0, + D3D11_SDK_VERSION, // Always set this to D3D11_SDK_VERSION for Microsoft Store apps. + nullptr, // No need to keep the D3D device reference. + nullptr, // No need to know the feature level. + nullptr // No need to keep the D3D device context reference. + ); + return SUCCEEDED(hr); + #else + return false; + #endif +} + +// The first method called when the IFrameworkView is being created. +void App::Initialize(winrt::Windows::ApplicationModel::Core::CoreApplicationView const& applicationView) { + // Register event handlers for app lifecycle. This example includes Activated, so that we + // can make the CoreWindow active and start rendering on the window. + applicationView.Activated({ this, &App::OnActivated }); + + winrt::Windows::ApplicationModel::Core::CoreApplication::Suspending({ this, &App::OnSuspending }); + winrt::Windows::ApplicationModel::Core::CoreApplication::Resuming({ this, &App::OnResuming }); + + // At this point we have access to the device. + // We can create the device-dependent resources. + m_deviceResources = std::make_unique(); +} + +// Called when the CoreWindow object is created (or re-created). +void App::SetWindow(winrt::Windows::UI::Core::CoreWindow const& window) { + window.SizeChanged({ this, &App::OnWindowSizeChanged }); + window.VisibilityChanged({ this, &App::OnVisibilityChanged }); + + window.KeyDown({ this, &App::OnKeyDown }); + window.KeyUp({ this, &App::OnKeyUp }); + window.CharacterReceived({ this, &App::OnCharacterReceived }); + + window.PointerEntered({ this, &App::OnPointerEntered }); + window.PointerExited({ this, &App::OnPointerExited }); + window.PointerPressed({ this, &App::OnPointerPressed }); + window.PointerReleased({ this, &App::OnPointerReleased }); + window.PointerMoved({ this, &App::OnPointerMoved }); + window.PointerWheelChanged({ this, &App::OnPointerWheelChanged }); + + auto currentDisplayInformation = winrt::Windows::Graphics::Display::DisplayInformation::GetForCurrentView(); + + currentDisplayInformation.DpiChanged({ this, &App::OnDpiChanged }); + currentDisplayInformation.OrientationChanged({ this, &App::OnOrientationChanged }); + winrt::Windows::Graphics::Display::DisplayInformation::DisplayContentsInvalidated({ this, &App::OnDisplayContentsInvalidated }); + + winrt::Windows::UI::Core::SystemNavigationManager::GetForCurrentView().BackRequested({ this, &App::OnBackRequested }); + + m_deviceResources->SetWindow(window); +} + +// Initializes scene resources, or loads a previously saved app state. +void App::Load(winrt::hstring const& entryPoint) { + _SOKOL_UNUSED(entryPoint); +} + +// This method is called after the window becomes active. +void App::Run() { + // NOTE: UWP will simply terminate an application, it's not possible to detect when an application is being closed + while (true) { + if (m_windowVisible) { + winrt::Windows::UI::Core::CoreWindow::GetForCurrentThread().Dispatcher().ProcessEvents(winrt::Windows::UI::Core::CoreProcessEventsOption::ProcessAllIfPresent); + _sapp_frame(); + m_deviceResources->Present(); + } + else { + winrt::Windows::UI::Core::CoreWindow::GetForCurrentThread().Dispatcher().ProcessEvents(winrt::Windows::UI::Core::CoreProcessEventsOption::ProcessOneAndAllPending); + } + } +} + +// Required for IFrameworkView. +// Terminate events do not cause Uninitialize to be called. It will be called if your IFrameworkView +// class is torn down while the app is in the foreground. +void App::Uninitialize() { + // empty +} + +// Application lifecycle event handlers. +void App::OnActivated(winrt::Windows::ApplicationModel::Core::CoreApplicationView const& applicationView, winrt::Windows::ApplicationModel::Activation::IActivatedEventArgs const& args) { + _SOKOL_UNUSED(args); + _SOKOL_UNUSED(applicationView); + auto appView = winrt::Windows::UI::ViewManagement::ApplicationView::GetForCurrentView(); + auto targetSize = winrt::Windows::Foundation::Size((float)_sapp.desc.width, (float)_sapp.desc.height); + appView.SetPreferredMinSize(targetSize); + appView.TryResizeView(targetSize); + + // Disabling this since it can only append the title to the app name (Title - Appname). + // There's no way of just setting a string to be the window title. + //appView.Title(_sapp.window_title_wide); + + // Run() won't start until the CoreWindow is activated. + winrt::Windows::UI::Core::CoreWindow::GetForCurrentThread().Activate(); + if (_sapp.desc.fullscreen) { + appView.TryEnterFullScreenMode(); + } + _sapp.fullscreen = appView.IsFullScreenMode(); +} + +void App::OnSuspending(winrt::Windows::Foundation::IInspectable const& sender, winrt::Windows::ApplicationModel::SuspendingEventArgs const& args) { + _SOKOL_UNUSED(sender); + _SOKOL_UNUSED(args); + _sapp_win32_uwp_app_event(SAPP_EVENTTYPE_SUSPENDED); +} + +void App::OnResuming(winrt::Windows::Foundation::IInspectable const& sender, winrt::Windows::Foundation::IInspectable const& args) { + _SOKOL_UNUSED(args); + _SOKOL_UNUSED(sender); + _sapp_win32_uwp_app_event(SAPP_EVENTTYPE_RESUMED); +} + +void App::OnWindowSizeChanged(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::WindowSizeChangedEventArgs const& args) { + _SOKOL_UNUSED(args); + m_deviceResources->SetLogicalSize(winrt::Windows::Foundation::Size(sender.Bounds().Width, sender.Bounds().Height)); + _sapp_win32_uwp_app_event(SAPP_EVENTTYPE_RESIZED); +} + +void App::OnVisibilityChanged(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::VisibilityChangedEventArgs const& args) { + _SOKOL_UNUSED(sender); + m_windowVisible = args.Visible(); + _sapp_win32_uwp_app_event(m_windowVisible ? SAPP_EVENTTYPE_RESTORED : SAPP_EVENTTYPE_ICONIFIED); +} + +void App::OnBackRequested(winrt::Windows::Foundation::IInspectable const& sender, winrt::Windows::UI::Core::BackRequestedEventArgs const& args) { + _SOKOL_UNUSED(sender); + args.Handled(true); +} + +void App::OnKeyDown(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::KeyEventArgs const& args) { + auto status = args.KeyStatus(); + _sapp_uwp_key_event(SAPP_EVENTTYPE_KEY_DOWN, sender, args); +} + +void App::OnKeyUp(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::KeyEventArgs const& args) { + auto status = args.KeyStatus(); + _sapp_uwp_key_event(SAPP_EVENTTYPE_KEY_UP, sender, args); +} + +void App::OnCharacterReceived(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::CharacterReceivedEventArgs const& args) { + _sapp_uwp_char_event(args.KeyCode(), args.KeyStatus().WasKeyDown, sender); +} + +void App::OnPointerEntered(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::PointerEventArgs const& args) { + _SOKOL_UNUSED(args); + _sapp.uwp.mouse_tracked = true; + _sapp_uwp_mouse_event(SAPP_EVENTTYPE_MOUSE_ENTER, SAPP_MOUSEBUTTON_INVALID, sender); +} + +void App::OnPointerExited(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::PointerEventArgs const& args) { + _SOKOL_UNUSED(args); + _sapp.uwp.mouse_tracked = false; + _sapp_uwp_mouse_event(SAPP_EVENTTYPE_MOUSE_LEAVE, SAPP_MOUSEBUTTON_INVALID, sender); +} + +void App::OnPointerPressed(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::PointerEventArgs const& args) { + _sapp_uwp_extract_mouse_button_events(sender, args); +} + +// NOTE: for some reason this event handler is never called?? +void App::OnPointerReleased(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::PointerEventArgs const& args) { + _sapp_uwp_extract_mouse_button_events(sender, args); +} + +void App::OnPointerMoved(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::PointerEventArgs const& args) { + auto position = args.CurrentPoint().Position(); + const float new_x = (float)(int)(position.X * _sapp.uwp.dpi.mouse_scale + 0.5f); + const float new_y = (float)(int)(position.Y * _sapp.uwp.dpi.mouse_scale + 0.5f); + // don't update dx/dy in the very first event + if (_sapp.mouse.pos_valid) { + _sapp.mouse.dx = new_x - _sapp.mouse.x; + _sapp.mouse.dy = new_y - _sapp.mouse.y; + } + _sapp.mouse.x = new_x; + _sapp.mouse.y = new_y; + _sapp.mouse.pos_valid = true; + if (!_sapp.uwp.mouse_tracked) { + _sapp.uwp.mouse_tracked = true; + _sapp_uwp_mouse_event(SAPP_EVENTTYPE_MOUSE_ENTER, SAPP_MOUSEBUTTON_INVALID, sender); + } + _sapp_uwp_mouse_event(SAPP_EVENTTYPE_MOUSE_MOVE, SAPP_MOUSEBUTTON_INVALID, sender); + + // HACK for detecting multiple mouse button presses + _sapp_uwp_extract_mouse_button_events(sender, args); +} + +void App::OnPointerWheelChanged(winrt::Windows::UI::Core::CoreWindow const& sender, winrt::Windows::UI::Core::PointerEventArgs const& args) { + auto properties = args.CurrentPoint().Properties(); + _sapp_uwp_scroll_event((float)properties.MouseWheelDelta(), properties.IsHorizontalMouseWheel(), sender); +} + +void App::OnDpiChanged(winrt::Windows::Graphics::Display::DisplayInformation const& sender, winrt::Windows::Foundation::IInspectable const& args) { + // NOTE: UNTESTED + _SOKOL_UNUSED(args); + m_deviceResources->SetDpi(sender.LogicalDpi()); + _sapp_win32_uwp_app_event(SAPP_EVENTTYPE_RESIZED); +} + +void App::OnOrientationChanged(winrt::Windows::Graphics::Display::DisplayInformation const& sender, winrt::Windows::Foundation::IInspectable const& args) { + // NOTE: UNTESTED + _SOKOL_UNUSED(args); + m_deviceResources->SetCurrentOrientation(sender.CurrentOrientation()); + _sapp_win32_uwp_app_event(SAPP_EVENTTYPE_RESIZED); +} + +void App::OnDisplayContentsInvalidated(winrt::Windows::Graphics::Display::DisplayInformation const& sender, winrt::Windows::Foundation::IInspectable const& args) { + // NOTE: UNTESTED + _SOKOL_UNUSED(args); + _SOKOL_UNUSED(sender); + m_deviceResources->ValidateDevice(); +} + +} /* End empty namespace */ + +_SOKOL_PRIVATE void _sapp_uwp_run(const sapp_desc* desc) { + _sapp_init_state(desc); + _sapp_win32_uwp_init_keytable(); + _sapp_win32_uwp_utf8_to_wide(_sapp.window_title, _sapp.window_title_wide, sizeof(_sapp.window_title_wide)); + winrt::Windows::ApplicationModel::Core::CoreApplication::Run(winrt::make()); +} + +#if !defined(SOKOL_NO_ENTRY) +#if defined(UNICODE) +int WINAPI wWinMain(_In_ HINSTANCE hInstance, _In_opt_ HINSTANCE hPrevInstance, _In_ LPWSTR lpCmdLine, _In_ int nCmdShow) { +#else +int WINAPI WinMain(_In_ HINSTANCE hInstance, _In_opt_ HINSTANCE hPrevInstance, _In_ LPSTR lpCmdLine, _In_ int nCmdShow) { +#endif + _SOKOL_UNUSED(hInstance); + _SOKOL_UNUSED(hPrevInstance); + _SOKOL_UNUSED(lpCmdLine); + _SOKOL_UNUSED(nCmdShow); + sapp_desc desc = sokol_main(0, nullptr); + _sapp_uwp_run(&desc); + return 0; +} +#endif /* SOKOL_NO_ENTRY */ +#endif /* _SAPP_UWP */ + +/*== Android ================================================================*/ +#if defined(_SAPP_ANDROID) + +/* android loop thread */ +_SOKOL_PRIVATE bool _sapp_android_init_egl(void) { + SOKOL_ASSERT(_sapp.android.display == EGL_NO_DISPLAY); + SOKOL_ASSERT(_sapp.android.context == EGL_NO_CONTEXT); + + EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY); + if (display == EGL_NO_DISPLAY) { + return false; + } + if (eglInitialize(display, NULL, NULL) == EGL_FALSE) { + return false; + } + + EGLint alpha_size = _sapp.desc.alpha ? 8 : 0; + const EGLint cfg_attributes[] = { + EGL_SURFACE_TYPE, EGL_WINDOW_BIT, + EGL_RED_SIZE, 8, + EGL_GREEN_SIZE, 8, + EGL_BLUE_SIZE, 8, + EGL_ALPHA_SIZE, alpha_size, + EGL_DEPTH_SIZE, 16, + EGL_STENCIL_SIZE, 0, + EGL_NONE, + }; + EGLConfig available_cfgs[32]; + EGLint cfg_count; + eglChooseConfig(display, cfg_attributes, available_cfgs, 32, &cfg_count); + SOKOL_ASSERT(cfg_count > 0); + SOKOL_ASSERT(cfg_count <= 32); + + /* find config with 8-bit rgb buffer if available, ndk sample does not trust egl spec */ + EGLConfig config; + bool exact_cfg_found = false; + for (int i = 0; i < cfg_count; ++i) { + EGLConfig c = available_cfgs[i]; + EGLint r, g, b, a, d; + if (eglGetConfigAttrib(display, c, EGL_RED_SIZE, &r) == EGL_TRUE && + eglGetConfigAttrib(display, c, EGL_GREEN_SIZE, &g) == EGL_TRUE && + eglGetConfigAttrib(display, c, EGL_BLUE_SIZE, &b) == EGL_TRUE && + eglGetConfigAttrib(display, c, EGL_ALPHA_SIZE, &a) == EGL_TRUE && + eglGetConfigAttrib(display, c, EGL_DEPTH_SIZE, &d) == EGL_TRUE && + r == 8 && g == 8 && b == 8 && (alpha_size == 0 || a == alpha_size) && d == 16) { + exact_cfg_found = true; + config = c; + break; + } + } + if (!exact_cfg_found) { + config = available_cfgs[0]; + } + + EGLint ctx_attributes[] = { + #if defined(SOKOL_GLES3) + EGL_CONTEXT_CLIENT_VERSION, _sapp.desc.gl_force_gles2 ? 2 : 3, + #else + EGL_CONTEXT_CLIENT_VERSION, 2, + #endif + EGL_NONE, + }; + EGLContext context = eglCreateContext(display, config, EGL_NO_CONTEXT, ctx_attributes); + if (context == EGL_NO_CONTEXT) { + return false; + } + + _sapp.android.config = config; + _sapp.android.display = display; + _sapp.android.context = context; + return true; +} + +_SOKOL_PRIVATE void _sapp_android_cleanup_egl(void) { + if (_sapp.android.display != EGL_NO_DISPLAY) { + eglMakeCurrent(_sapp.android.display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); + if (_sapp.android.surface != EGL_NO_SURFACE) { + SOKOL_LOG("Destroying egl surface"); + eglDestroySurface(_sapp.android.display, _sapp.android.surface); + _sapp.android.surface = EGL_NO_SURFACE; + } + if (_sapp.android.context != EGL_NO_CONTEXT) { + SOKOL_LOG("Destroying egl context"); + eglDestroyContext(_sapp.android.display, _sapp.android.context); + _sapp.android.context = EGL_NO_CONTEXT; + } + SOKOL_LOG("Terminating egl display"); + eglTerminate(_sapp.android.display); + _sapp.android.display = EGL_NO_DISPLAY; + } +} + +_SOKOL_PRIVATE bool _sapp_android_init_egl_surface(ANativeWindow* window) { + SOKOL_ASSERT(_sapp.android.display != EGL_NO_DISPLAY); + SOKOL_ASSERT(_sapp.android.context != EGL_NO_CONTEXT); + SOKOL_ASSERT(_sapp.android.surface == EGL_NO_SURFACE); + SOKOL_ASSERT(window); + + /* TODO: set window flags */ + /* ANativeActivity_setWindowFlags(activity, AWINDOW_FLAG_KEEP_SCREEN_ON, 0); */ + + /* create egl surface and make it current */ + EGLSurface surface = eglCreateWindowSurface(_sapp.android.display, _sapp.android.config, window, NULL); + if (surface == EGL_NO_SURFACE) { + return false; + } + if (eglMakeCurrent(_sapp.android.display, surface, surface, _sapp.android.context) == EGL_FALSE) { + return false; + } + _sapp.android.surface = surface; + return true; +} + +_SOKOL_PRIVATE void _sapp_android_cleanup_egl_surface(void) { + if (_sapp.android.display == EGL_NO_DISPLAY) { + return; + } + eglMakeCurrent(_sapp.android.display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); + if (_sapp.android.surface != EGL_NO_SURFACE) { + eglDestroySurface(_sapp.android.display, _sapp.android.surface); + _sapp.android.surface = EGL_NO_SURFACE; + } +} + +_SOKOL_PRIVATE void _sapp_android_app_event(sapp_event_type type) { + if (_sapp_events_enabled()) { + _sapp_init_event(type); + SOKOL_LOG("event_cb()"); + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE void _sapp_android_update_dimensions(ANativeWindow* window, bool force_update) { + SOKOL_ASSERT(_sapp.android.display != EGL_NO_DISPLAY); + SOKOL_ASSERT(_sapp.android.context != EGL_NO_CONTEXT); + SOKOL_ASSERT(_sapp.android.surface != EGL_NO_SURFACE); + SOKOL_ASSERT(window); + + const int32_t win_w = ANativeWindow_getWidth(window); + const int32_t win_h = ANativeWindow_getHeight(window); + SOKOL_ASSERT(win_w >= 0 && win_h >= 0); + const bool win_changed = (win_w != _sapp.window_width) || (win_h != _sapp.window_height); + _sapp.window_width = win_w; + _sapp.window_height = win_h; + if (win_changed || force_update) { + if (!_sapp.desc.high_dpi) { + const int32_t buf_w = win_w / 2; + const int32_t buf_h = win_h / 2; + EGLint format; + EGLBoolean egl_result = eglGetConfigAttrib(_sapp.android.display, _sapp.android.config, EGL_NATIVE_VISUAL_ID, &format); + SOKOL_ASSERT(egl_result == EGL_TRUE); + /* NOTE: calling ANativeWindow_setBuffersGeometry() with the same dimensions + as the ANativeWindow size results in weird display artefacts, that's + why it's only called when the buffer geometry is different from + the window size + */ + int32_t result = ANativeWindow_setBuffersGeometry(window, buf_w, buf_h, format); + SOKOL_ASSERT(result == 0); + } + } + + /* query surface size */ + EGLint fb_w, fb_h; + EGLBoolean egl_result_w = eglQuerySurface(_sapp.android.display, _sapp.android.surface, EGL_WIDTH, &fb_w); + EGLBoolean egl_result_h = eglQuerySurface(_sapp.android.display, _sapp.android.surface, EGL_HEIGHT, &fb_h); + SOKOL_ASSERT(egl_result_w == EGL_TRUE); + SOKOL_ASSERT(egl_result_h == EGL_TRUE); + const bool fb_changed = (fb_w != _sapp.framebuffer_width) || (fb_h != _sapp.framebuffer_height); + _sapp.framebuffer_width = fb_w; + _sapp.framebuffer_height = fb_h; + _sapp.dpi_scale = (float)_sapp.framebuffer_width / (float)_sapp.window_width; + if (win_changed || fb_changed || force_update) { + if (!_sapp.first_frame) { + SOKOL_LOG("SAPP_EVENTTYPE_RESIZED"); + _sapp_android_app_event(SAPP_EVENTTYPE_RESIZED); + } + } +} + +_SOKOL_PRIVATE void _sapp_android_cleanup(void) { + SOKOL_LOG("Cleaning up"); + if (_sapp.android.surface != EGL_NO_SURFACE) { + /* egl context is bound, cleanup gracefully */ + if (_sapp.init_called && !_sapp.cleanup_called) { + SOKOL_LOG("cleanup_cb()"); + _sapp_call_cleanup(); + } + } + /* always try to cleanup by destroying egl context */ + _sapp_android_cleanup_egl(); +} + +_SOKOL_PRIVATE void _sapp_android_shutdown(void) { + /* try to cleanup while we still have a surface and can call cleanup_cb() */ + _sapp_android_cleanup(); + /* request exit */ + ANativeActivity_finish(_sapp.android.activity); +} + +_SOKOL_PRIVATE void _sapp_android_frame(void) { + SOKOL_ASSERT(_sapp.android.display != EGL_NO_DISPLAY); + SOKOL_ASSERT(_sapp.android.context != EGL_NO_CONTEXT); + SOKOL_ASSERT(_sapp.android.surface != EGL_NO_SURFACE); + _sapp_android_update_dimensions(_sapp.android.current.window, false); + _sapp_frame(); + eglSwapBuffers(_sapp.android.display, _sapp.android.surface); +} + +_SOKOL_PRIVATE bool _sapp_android_touch_event(const AInputEvent* e) { + if (AInputEvent_getType(e) != AINPUT_EVENT_TYPE_MOTION) { + return false; + } + if (!_sapp_events_enabled()) { + return false; + } + int32_t action_idx = AMotionEvent_getAction(e); + int32_t action = action_idx & AMOTION_EVENT_ACTION_MASK; + sapp_event_type type = SAPP_EVENTTYPE_INVALID; + switch (action) { + case AMOTION_EVENT_ACTION_DOWN: + SOKOL_LOG("Touch: down"); + case AMOTION_EVENT_ACTION_POINTER_DOWN: + SOKOL_LOG("Touch: ptr down"); + type = SAPP_EVENTTYPE_TOUCHES_BEGAN; + break; + case AMOTION_EVENT_ACTION_MOVE: + type = SAPP_EVENTTYPE_TOUCHES_MOVED; + break; + case AMOTION_EVENT_ACTION_UP: + SOKOL_LOG("Touch: up"); + case AMOTION_EVENT_ACTION_POINTER_UP: + SOKOL_LOG("Touch: ptr up"); + type = SAPP_EVENTTYPE_TOUCHES_ENDED; + break; + case AMOTION_EVENT_ACTION_CANCEL: + SOKOL_LOG("Touch: cancel"); + type = SAPP_EVENTTYPE_TOUCHES_CANCELLED; + break; + default: + break; + } + if (type == SAPP_EVENTTYPE_INVALID) { + return false; + } + int32_t idx = action_idx >> AMOTION_EVENT_ACTION_POINTER_INDEX_SHIFT; + _sapp_init_event(type); + _sapp.event.num_touches = (int)AMotionEvent_getPointerCount(e); + if (_sapp.event.num_touches > SAPP_MAX_TOUCHPOINTS) { + _sapp.event.num_touches = SAPP_MAX_TOUCHPOINTS; + } + for (int32_t i = 0; i < _sapp.event.num_touches; i++) { + sapp_touchpoint* dst = &_sapp.event.touches[i]; + dst->identifier = (uintptr_t)AMotionEvent_getPointerId(e, (size_t)i); + dst->pos_x = (AMotionEvent_getRawX(e, (size_t)i) / _sapp.window_width) * _sapp.framebuffer_width; + dst->pos_y = (AMotionEvent_getRawY(e, (size_t)i) / _sapp.window_height) * _sapp.framebuffer_height; + + if (action == AMOTION_EVENT_ACTION_POINTER_DOWN || + action == AMOTION_EVENT_ACTION_POINTER_UP) { + dst->changed = (i == idx); + } else { + dst->changed = true; + } + } + _sapp_call_event(&_sapp.event); + return true; +} + +_SOKOL_PRIVATE bool _sapp_android_key_event(const AInputEvent* e) { + if (AInputEvent_getType(e) != AINPUT_EVENT_TYPE_KEY) { + return false; + } + if (AKeyEvent_getKeyCode(e) == AKEYCODE_BACK) { + /* FIXME: this should be hooked into a "really quit?" mechanism + so the app can ask the user for confirmation, this is currently + generally missing in sokol_app.h + */ + _sapp_android_shutdown(); + return true; + } + return false; +} + +_SOKOL_PRIVATE int _sapp_android_input_cb(int fd, int events, void* data) { + if ((events & ALOOPER_EVENT_INPUT) == 0) { + SOKOL_LOG("_sapp_android_input_cb() encountered unsupported event"); + return 1; + } + SOKOL_ASSERT(_sapp.android.current.input); + AInputEvent* event = NULL; + while (AInputQueue_getEvent(_sapp.android.current.input, &event) >= 0) { + if (AInputQueue_preDispatchEvent(_sapp.android.current.input, event) != 0) { + continue; + } + int32_t handled = 0; + if (_sapp_android_touch_event(event) || _sapp_android_key_event(event)) { + handled = 1; + } + AInputQueue_finishEvent(_sapp.android.current.input, event, handled); + } + return 1; +} + +_SOKOL_PRIVATE int _sapp_android_main_cb(int fd, int events, void* data) { + if ((events & ALOOPER_EVENT_INPUT) == 0) { + SOKOL_LOG("_sapp_android_main_cb() encountered unsupported event"); + return 1; + } + + _sapp_android_msg_t msg; + if (read(fd, &msg, sizeof(msg)) != sizeof(msg)) { + SOKOL_LOG("Could not write to read_from_main_fd"); + return 1; + } + + pthread_mutex_lock(&_sapp.android.pt.mutex); + switch (msg) { + case _SOKOL_ANDROID_MSG_CREATE: + { + SOKOL_LOG("MSG_CREATE"); + SOKOL_ASSERT(!_sapp.valid); + bool result = _sapp_android_init_egl(); + SOKOL_ASSERT(result); + _sapp.valid = true; + _sapp.android.has_created = true; + } + break; + case _SOKOL_ANDROID_MSG_RESUME: + SOKOL_LOG("MSG_RESUME"); + _sapp.android.has_resumed = true; + _sapp_android_app_event(SAPP_EVENTTYPE_RESUMED); + break; + case _SOKOL_ANDROID_MSG_PAUSE: + SOKOL_LOG("MSG_PAUSE"); + _sapp.android.has_resumed = false; + _sapp_android_app_event(SAPP_EVENTTYPE_SUSPENDED); + break; + case _SOKOL_ANDROID_MSG_FOCUS: + SOKOL_LOG("MSG_FOCUS"); + _sapp.android.has_focus = true; + break; + case _SOKOL_ANDROID_MSG_NO_FOCUS: + SOKOL_LOG("MSG_NO_FOCUS"); + _sapp.android.has_focus = false; + break; + case _SOKOL_ANDROID_MSG_SET_NATIVE_WINDOW: + SOKOL_LOG("MSG_SET_NATIVE_WINDOW"); + if (_sapp.android.current.window != _sapp.android.pending.window) { + if (_sapp.android.current.window != NULL) { + _sapp_android_cleanup_egl_surface(); + } + if (_sapp.android.pending.window != NULL) { + SOKOL_LOG("Creating egl surface ..."); + if (_sapp_android_init_egl_surface(_sapp.android.pending.window)) { + SOKOL_LOG("... ok!"); + _sapp_android_update_dimensions(_sapp.android.pending.window, true); + } else { + SOKOL_LOG("... failed!"); + _sapp_android_shutdown(); + } + } + } + _sapp.android.current.window = _sapp.android.pending.window; + break; + case _SOKOL_ANDROID_MSG_SET_INPUT_QUEUE: + SOKOL_LOG("MSG_SET_INPUT_QUEUE"); + if (_sapp.android.current.input != _sapp.android.pending.input) { + if (_sapp.android.current.input != NULL) { + AInputQueue_detachLooper(_sapp.android.current.input); + } + if (_sapp.android.pending.input != NULL) { + AInputQueue_attachLooper( + _sapp.android.pending.input, + _sapp.android.looper, + ALOOPER_POLL_CALLBACK, + _sapp_android_input_cb, + NULL); /* data */ + } + } + _sapp.android.current.input = _sapp.android.pending.input; + break; + case _SOKOL_ANDROID_MSG_DESTROY: + SOKOL_LOG("MSG_DESTROY"); + _sapp_android_cleanup(); + _sapp.valid = false; + _sapp.android.is_thread_stopping = true; + break; + default: + SOKOL_LOG("Unknown msg type received"); + break; + } + pthread_cond_broadcast(&_sapp.android.pt.cond); /* signal "received" */ + pthread_mutex_unlock(&_sapp.android.pt.mutex); + return 1; +} + +_SOKOL_PRIVATE bool _sapp_android_should_update(void) { + bool is_in_front = _sapp.android.has_resumed && _sapp.android.has_focus; + bool has_surface = _sapp.android.surface != EGL_NO_SURFACE; + return is_in_front && has_surface; +} + +_SOKOL_PRIVATE void _sapp_android_show_keyboard(bool shown) { + SOKOL_ASSERT(_sapp.valid); + /* This seems to be broken in the NDK, but there is (a very cumbersome) workaround... */ + if (shown) { + SOKOL_LOG("Showing keyboard"); + ANativeActivity_showSoftInput(_sapp.android.activity, ANATIVEACTIVITY_SHOW_SOFT_INPUT_FORCED); + } else { + SOKOL_LOG("Hiding keyboard"); + ANativeActivity_hideSoftInput(_sapp.android.activity, ANATIVEACTIVITY_HIDE_SOFT_INPUT_NOT_ALWAYS); + } +} + +_SOKOL_PRIVATE void* _sapp_android_loop(void* arg) { + _SOKOL_UNUSED(arg); + SOKOL_LOG("Loop thread started"); + + _sapp.android.looper = ALooper_prepare(0 /* or ALOOPER_PREPARE_ALLOW_NON_CALLBACKS*/); + ALooper_addFd(_sapp.android.looper, + _sapp.android.pt.read_from_main_fd, + ALOOPER_POLL_CALLBACK, + ALOOPER_EVENT_INPUT, + _sapp_android_main_cb, + NULL); /* data */ + + /* signal start to main thread */ + pthread_mutex_lock(&_sapp.android.pt.mutex); + _sapp.android.is_thread_started = true; + pthread_cond_broadcast(&_sapp.android.pt.cond); + pthread_mutex_unlock(&_sapp.android.pt.mutex); + + /* main loop */ + while (!_sapp.android.is_thread_stopping) { + /* sokol frame */ + if (_sapp_android_should_update()) { + _sapp_android_frame(); + } + + /* process all events (or stop early if app is requested to quit) */ + bool process_events = true; + while (process_events && !_sapp.android.is_thread_stopping) { + bool block_until_event = !_sapp.android.is_thread_stopping && !_sapp_android_should_update(); + process_events = ALooper_pollOnce(block_until_event ? -1 : 0, NULL, NULL, NULL) == ALOOPER_POLL_CALLBACK; + } + } + + /* cleanup thread */ + if (_sapp.android.current.input != NULL) { + AInputQueue_detachLooper(_sapp.android.current.input); + } + + /* the following causes heap corruption on exit, why?? + ALooper_removeFd(_sapp.android.looper, _sapp.android.pt.read_from_main_fd); + ALooper_release(_sapp.android.looper);*/ + + /* signal "destroyed" */ + pthread_mutex_lock(&_sapp.android.pt.mutex); + _sapp.android.is_thread_stopped = true; + pthread_cond_broadcast(&_sapp.android.pt.cond); + pthread_mutex_unlock(&_sapp.android.pt.mutex); + SOKOL_LOG("Loop thread done"); + return NULL; +} + +/* android main/ui thread */ +_SOKOL_PRIVATE void _sapp_android_msg(_sapp_android_msg_t msg) { + if (write(_sapp.android.pt.write_from_main_fd, &msg, sizeof(msg)) != sizeof(msg)) { + SOKOL_LOG("Could not write to write_from_main_fd"); + } +} + +_SOKOL_PRIVATE void _sapp_android_on_start(ANativeActivity* activity) { + SOKOL_LOG("NativeActivity onStart()"); +} + +_SOKOL_PRIVATE void _sapp_android_on_resume(ANativeActivity* activity) { + SOKOL_LOG("NativeActivity onResume()"); + _sapp_android_msg(_SOKOL_ANDROID_MSG_RESUME); +} + +_SOKOL_PRIVATE void* _sapp_android_on_save_instance_state(ANativeActivity* activity, size_t* out_size) { + SOKOL_LOG("NativeActivity onSaveInstanceState()"); + *out_size = 0; + return NULL; +} + +_SOKOL_PRIVATE void _sapp_android_on_window_focus_changed(ANativeActivity* activity, int has_focus) { + SOKOL_LOG("NativeActivity onWindowFocusChanged()"); + if (has_focus) { + _sapp_android_msg(_SOKOL_ANDROID_MSG_FOCUS); + } else { + _sapp_android_msg(_SOKOL_ANDROID_MSG_NO_FOCUS); + } +} + +_SOKOL_PRIVATE void _sapp_android_on_pause(ANativeActivity* activity) { + SOKOL_LOG("NativeActivity onPause()"); + _sapp_android_msg(_SOKOL_ANDROID_MSG_PAUSE); +} + +_SOKOL_PRIVATE void _sapp_android_on_stop(ANativeActivity* activity) { + SOKOL_LOG("NativeActivity onStop()"); +} + +_SOKOL_PRIVATE void _sapp_android_msg_set_native_window(ANativeWindow* window) { + pthread_mutex_lock(&_sapp.android.pt.mutex); + _sapp.android.pending.window = window; + _sapp_android_msg(_SOKOL_ANDROID_MSG_SET_NATIVE_WINDOW); + while (_sapp.android.current.window != window) { + pthread_cond_wait(&_sapp.android.pt.cond, &_sapp.android.pt.mutex); + } + pthread_mutex_unlock(&_sapp.android.pt.mutex); +} + +_SOKOL_PRIVATE void _sapp_android_on_native_window_created(ANativeActivity* activity, ANativeWindow* window) { + SOKOL_LOG("NativeActivity onNativeWindowCreated()"); + _sapp_android_msg_set_native_window(window); +} + +_SOKOL_PRIVATE void _sapp_android_on_native_window_destroyed(ANativeActivity* activity, ANativeWindow* window) { + SOKOL_LOG("NativeActivity onNativeWindowDestroyed()"); + _sapp_android_msg_set_native_window(NULL); +} + +_SOKOL_PRIVATE void _sapp_android_msg_set_input_queue(AInputQueue* input) { + pthread_mutex_lock(&_sapp.android.pt.mutex); + _sapp.android.pending.input = input; + _sapp_android_msg(_SOKOL_ANDROID_MSG_SET_INPUT_QUEUE); + while (_sapp.android.current.input != input) { + pthread_cond_wait(&_sapp.android.pt.cond, &_sapp.android.pt.mutex); + } + pthread_mutex_unlock(&_sapp.android.pt.mutex); +} + +_SOKOL_PRIVATE void _sapp_android_on_input_queue_created(ANativeActivity* activity, AInputQueue* queue) { + SOKOL_LOG("NativeActivity onInputQueueCreated()"); + _sapp_android_msg_set_input_queue(queue); +} + +_SOKOL_PRIVATE void _sapp_android_on_input_queue_destroyed(ANativeActivity* activity, AInputQueue* queue) { + SOKOL_LOG("NativeActivity onInputQueueDestroyed()"); + _sapp_android_msg_set_input_queue(NULL); +} + +_SOKOL_PRIVATE void _sapp_android_on_config_changed(ANativeActivity* activity) { + SOKOL_LOG("NativeActivity onConfigurationChanged()"); + /* see android:configChanges in manifest */ +} + +_SOKOL_PRIVATE void _sapp_android_on_low_memory(ANativeActivity* activity) { + SOKOL_LOG("NativeActivity onLowMemory()"); +} + +_SOKOL_PRIVATE void _sapp_android_on_destroy(ANativeActivity* activity) { + /* + * For some reason even an empty app using nativeactivity.h will crash (WIN DEATH) + * on my device (Moto X 2nd gen) when the app is removed from the task view + * (TaskStackView: onTaskViewDismissed). + * + * However, if ANativeActivity_finish() is explicitly called from for example + * _sapp_android_on_stop(), the crash disappears. Is this a bug in NativeActivity? + */ + SOKOL_LOG("NativeActivity onDestroy()"); + + /* send destroy msg */ + pthread_mutex_lock(&_sapp.android.pt.mutex); + _sapp_android_msg(_SOKOL_ANDROID_MSG_DESTROY); + while (!_sapp.android.is_thread_stopped) { + pthread_cond_wait(&_sapp.android.pt.cond, &_sapp.android.pt.mutex); + } + pthread_mutex_unlock(&_sapp.android.pt.mutex); + + /* clean up main thread */ + pthread_cond_destroy(&_sapp.android.pt.cond); + pthread_mutex_destroy(&_sapp.android.pt.mutex); + + close(_sapp.android.pt.read_from_main_fd); + close(_sapp.android.pt.write_from_main_fd); + + SOKOL_LOG("NativeActivity done"); + + /* this is a bit naughty, but causes a clean restart of the app (static globals are reset) */ + exit(0); +} + +JNIEXPORT +void ANativeActivity_onCreate(ANativeActivity* activity, void* saved_state, size_t saved_state_size) { + SOKOL_LOG("NativeActivity onCreate()"); + + sapp_desc desc = sokol_main(0, NULL); + _sapp_init_state(&desc); + + /* start loop thread */ + _sapp.android.activity = activity; + + int pipe_fd[2]; + if (pipe(pipe_fd) != 0) { + SOKOL_LOG("Could not create thread pipe"); + return; + } + _sapp.android.pt.read_from_main_fd = pipe_fd[0]; + _sapp.android.pt.write_from_main_fd = pipe_fd[1]; + + pthread_mutex_init(&_sapp.android.pt.mutex, NULL); + pthread_cond_init(&_sapp.android.pt.cond, NULL); + + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + pthread_create(&_sapp.android.pt.thread, &attr, _sapp_android_loop, 0); + pthread_attr_destroy(&attr); + + /* wait until main loop has started */ + pthread_mutex_lock(&_sapp.android.pt.mutex); + while (!_sapp.android.is_thread_started) { + pthread_cond_wait(&_sapp.android.pt.cond, &_sapp.android.pt.mutex); + } + pthread_mutex_unlock(&_sapp.android.pt.mutex); + + /* send create msg */ + pthread_mutex_lock(&_sapp.android.pt.mutex); + _sapp_android_msg(_SOKOL_ANDROID_MSG_CREATE); + while (!_sapp.android.has_created) { + pthread_cond_wait(&_sapp.android.pt.cond, &_sapp.android.pt.mutex); + } + pthread_mutex_unlock(&_sapp.android.pt.mutex); + + /* register for callbacks */ + activity->callbacks->onStart = _sapp_android_on_start; + activity->callbacks->onResume = _sapp_android_on_resume; + activity->callbacks->onSaveInstanceState = _sapp_android_on_save_instance_state; + activity->callbacks->onWindowFocusChanged = _sapp_android_on_window_focus_changed; + activity->callbacks->onPause = _sapp_android_on_pause; + activity->callbacks->onStop = _sapp_android_on_stop; + activity->callbacks->onDestroy = _sapp_android_on_destroy; + activity->callbacks->onNativeWindowCreated = _sapp_android_on_native_window_created; + /* activity->callbacks->onNativeWindowResized = _sapp_android_on_native_window_resized; */ + /* activity->callbacks->onNativeWindowRedrawNeeded = _sapp_android_on_native_window_redraw_needed; */ + activity->callbacks->onNativeWindowDestroyed = _sapp_android_on_native_window_destroyed; + activity->callbacks->onInputQueueCreated = _sapp_android_on_input_queue_created; + activity->callbacks->onInputQueueDestroyed = _sapp_android_on_input_queue_destroyed; + /* activity->callbacks->onContentRectChanged = _sapp_android_on_content_rect_changed; */ + activity->callbacks->onConfigurationChanged = _sapp_android_on_config_changed; + activity->callbacks->onLowMemory = _sapp_android_on_low_memory; + + SOKOL_LOG("NativeActivity successfully created"); + + /* NOT A BUG: do NOT call sapp_discard_state() */ +} + +#endif /* _SAPP_ANDROID */ + +/*== LINUX ==================================================================*/ +#if defined(_SAPP_LINUX) + +/* see GLFW's xkb_unicode.c */ +static const struct _sapp_x11_codepair { + uint16_t keysym; + uint16_t ucs; +} _sapp_x11_keysymtab[] = { + { 0x01a1, 0x0104 }, + { 0x01a2, 0x02d8 }, + { 0x01a3, 0x0141 }, + { 0x01a5, 0x013d }, + { 0x01a6, 0x015a }, + { 0x01a9, 0x0160 }, + { 0x01aa, 0x015e }, + { 0x01ab, 0x0164 }, + { 0x01ac, 0x0179 }, + { 0x01ae, 0x017d }, + { 0x01af, 0x017b }, + { 0x01b1, 0x0105 }, + { 0x01b2, 0x02db }, + { 0x01b3, 0x0142 }, + { 0x01b5, 0x013e }, + { 0x01b6, 0x015b }, + { 0x01b7, 0x02c7 }, + { 0x01b9, 0x0161 }, + { 0x01ba, 0x015f }, + { 0x01bb, 0x0165 }, + { 0x01bc, 0x017a }, + { 0x01bd, 0x02dd }, + { 0x01be, 0x017e }, + { 0x01bf, 0x017c }, + { 0x01c0, 0x0154 }, + { 0x01c3, 0x0102 }, + { 0x01c5, 0x0139 }, + { 0x01c6, 0x0106 }, + { 0x01c8, 0x010c }, + { 0x01ca, 0x0118 }, + { 0x01cc, 0x011a }, + { 0x01cf, 0x010e }, + { 0x01d0, 0x0110 }, + { 0x01d1, 0x0143 }, + { 0x01d2, 0x0147 }, + { 0x01d5, 0x0150 }, + { 0x01d8, 0x0158 }, + { 0x01d9, 0x016e }, + { 0x01db, 0x0170 }, + { 0x01de, 0x0162 }, + { 0x01e0, 0x0155 }, + { 0x01e3, 0x0103 }, + { 0x01e5, 0x013a }, + { 0x01e6, 0x0107 }, + { 0x01e8, 0x010d }, + { 0x01ea, 0x0119 }, + { 0x01ec, 0x011b }, + { 0x01ef, 0x010f }, + { 0x01f0, 0x0111 }, + { 0x01f1, 0x0144 }, + { 0x01f2, 0x0148 }, + { 0x01f5, 0x0151 }, + { 0x01f8, 0x0159 }, + { 0x01f9, 0x016f }, + { 0x01fb, 0x0171 }, + { 0x01fe, 0x0163 }, + { 0x01ff, 0x02d9 }, + { 0x02a1, 0x0126 }, + { 0x02a6, 0x0124 }, + { 0x02a9, 0x0130 }, + { 0x02ab, 0x011e }, + { 0x02ac, 0x0134 }, + { 0x02b1, 0x0127 }, + { 0x02b6, 0x0125 }, + { 0x02b9, 0x0131 }, + { 0x02bb, 0x011f }, + { 0x02bc, 0x0135 }, + { 0x02c5, 0x010a }, + { 0x02c6, 0x0108 }, + { 0x02d5, 0x0120 }, + { 0x02d8, 0x011c }, + { 0x02dd, 0x016c }, + { 0x02de, 0x015c }, + { 0x02e5, 0x010b }, + { 0x02e6, 0x0109 }, + { 0x02f5, 0x0121 }, + { 0x02f8, 0x011d }, + { 0x02fd, 0x016d }, + { 0x02fe, 0x015d }, + { 0x03a2, 0x0138 }, + { 0x03a3, 0x0156 }, + { 0x03a5, 0x0128 }, + { 0x03a6, 0x013b }, + { 0x03aa, 0x0112 }, + { 0x03ab, 0x0122 }, + { 0x03ac, 0x0166 }, + { 0x03b3, 0x0157 }, + { 0x03b5, 0x0129 }, + { 0x03b6, 0x013c }, + { 0x03ba, 0x0113 }, + { 0x03bb, 0x0123 }, + { 0x03bc, 0x0167 }, + { 0x03bd, 0x014a }, + { 0x03bf, 0x014b }, + { 0x03c0, 0x0100 }, + { 0x03c7, 0x012e }, + { 0x03cc, 0x0116 }, + { 0x03cf, 0x012a }, + { 0x03d1, 0x0145 }, + { 0x03d2, 0x014c }, + { 0x03d3, 0x0136 }, + { 0x03d9, 0x0172 }, + { 0x03dd, 0x0168 }, + { 0x03de, 0x016a }, + { 0x03e0, 0x0101 }, + { 0x03e7, 0x012f }, + { 0x03ec, 0x0117 }, + { 0x03ef, 0x012b }, + { 0x03f1, 0x0146 }, + { 0x03f2, 0x014d }, + { 0x03f3, 0x0137 }, + { 0x03f9, 0x0173 }, + { 0x03fd, 0x0169 }, + { 0x03fe, 0x016b }, + { 0x047e, 0x203e }, + { 0x04a1, 0x3002 }, + { 0x04a2, 0x300c }, + { 0x04a3, 0x300d }, + { 0x04a4, 0x3001 }, + { 0x04a5, 0x30fb }, + { 0x04a6, 0x30f2 }, + { 0x04a7, 0x30a1 }, + { 0x04a8, 0x30a3 }, + { 0x04a9, 0x30a5 }, + { 0x04aa, 0x30a7 }, + { 0x04ab, 0x30a9 }, + { 0x04ac, 0x30e3 }, + { 0x04ad, 0x30e5 }, + { 0x04ae, 0x30e7 }, + { 0x04af, 0x30c3 }, + { 0x04b0, 0x30fc }, + { 0x04b1, 0x30a2 }, + { 0x04b2, 0x30a4 }, + { 0x04b3, 0x30a6 }, + { 0x04b4, 0x30a8 }, + { 0x04b5, 0x30aa }, + { 0x04b6, 0x30ab }, + { 0x04b7, 0x30ad }, + { 0x04b8, 0x30af }, + { 0x04b9, 0x30b1 }, + { 0x04ba, 0x30b3 }, + { 0x04bb, 0x30b5 }, + { 0x04bc, 0x30b7 }, + { 0x04bd, 0x30b9 }, + { 0x04be, 0x30bb }, + { 0x04bf, 0x30bd }, + { 0x04c0, 0x30bf }, + { 0x04c1, 0x30c1 }, + { 0x04c2, 0x30c4 }, + { 0x04c3, 0x30c6 }, + { 0x04c4, 0x30c8 }, + { 0x04c5, 0x30ca }, + { 0x04c6, 0x30cb }, + { 0x04c7, 0x30cc }, + { 0x04c8, 0x30cd }, + { 0x04c9, 0x30ce }, + { 0x04ca, 0x30cf }, + { 0x04cb, 0x30d2 }, + { 0x04cc, 0x30d5 }, + { 0x04cd, 0x30d8 }, + { 0x04ce, 0x30db }, + { 0x04cf, 0x30de }, + { 0x04d0, 0x30df }, + { 0x04d1, 0x30e0 }, + { 0x04d2, 0x30e1 }, + { 0x04d3, 0x30e2 }, + { 0x04d4, 0x30e4 }, + { 0x04d5, 0x30e6 }, + { 0x04d6, 0x30e8 }, + { 0x04d7, 0x30e9 }, + { 0x04d8, 0x30ea }, + { 0x04d9, 0x30eb }, + { 0x04da, 0x30ec }, + { 0x04db, 0x30ed }, + { 0x04dc, 0x30ef }, + { 0x04dd, 0x30f3 }, + { 0x04de, 0x309b }, + { 0x04df, 0x309c }, + { 0x05ac, 0x060c }, + { 0x05bb, 0x061b }, + { 0x05bf, 0x061f }, + { 0x05c1, 0x0621 }, + { 0x05c2, 0x0622 }, + { 0x05c3, 0x0623 }, + { 0x05c4, 0x0624 }, + { 0x05c5, 0x0625 }, + { 0x05c6, 0x0626 }, + { 0x05c7, 0x0627 }, + { 0x05c8, 0x0628 }, + { 0x05c9, 0x0629 }, + { 0x05ca, 0x062a }, + { 0x05cb, 0x062b }, + { 0x05cc, 0x062c }, + { 0x05cd, 0x062d }, + { 0x05ce, 0x062e }, + { 0x05cf, 0x062f }, + { 0x05d0, 0x0630 }, + { 0x05d1, 0x0631 }, + { 0x05d2, 0x0632 }, + { 0x05d3, 0x0633 }, + { 0x05d4, 0x0634 }, + { 0x05d5, 0x0635 }, + { 0x05d6, 0x0636 }, + { 0x05d7, 0x0637 }, + { 0x05d8, 0x0638 }, + { 0x05d9, 0x0639 }, + { 0x05da, 0x063a }, + { 0x05e0, 0x0640 }, + { 0x05e1, 0x0641 }, + { 0x05e2, 0x0642 }, + { 0x05e3, 0x0643 }, + { 0x05e4, 0x0644 }, + { 0x05e5, 0x0645 }, + { 0x05e6, 0x0646 }, + { 0x05e7, 0x0647 }, + { 0x05e8, 0x0648 }, + { 0x05e9, 0x0649 }, + { 0x05ea, 0x064a }, + { 0x05eb, 0x064b }, + { 0x05ec, 0x064c }, + { 0x05ed, 0x064d }, + { 0x05ee, 0x064e }, + { 0x05ef, 0x064f }, + { 0x05f0, 0x0650 }, + { 0x05f1, 0x0651 }, + { 0x05f2, 0x0652 }, + { 0x06a1, 0x0452 }, + { 0x06a2, 0x0453 }, + { 0x06a3, 0x0451 }, + { 0x06a4, 0x0454 }, + { 0x06a5, 0x0455 }, + { 0x06a6, 0x0456 }, + { 0x06a7, 0x0457 }, + { 0x06a8, 0x0458 }, + { 0x06a9, 0x0459 }, + { 0x06aa, 0x045a }, + { 0x06ab, 0x045b }, + { 0x06ac, 0x045c }, + { 0x06ae, 0x045e }, + { 0x06af, 0x045f }, + { 0x06b0, 0x2116 }, + { 0x06b1, 0x0402 }, + { 0x06b2, 0x0403 }, + { 0x06b3, 0x0401 }, + { 0x06b4, 0x0404 }, + { 0x06b5, 0x0405 }, + { 0x06b6, 0x0406 }, + { 0x06b7, 0x0407 }, + { 0x06b8, 0x0408 }, + { 0x06b9, 0x0409 }, + { 0x06ba, 0x040a }, + { 0x06bb, 0x040b }, + { 0x06bc, 0x040c }, + { 0x06be, 0x040e }, + { 0x06bf, 0x040f }, + { 0x06c0, 0x044e }, + { 0x06c1, 0x0430 }, + { 0x06c2, 0x0431 }, + { 0x06c3, 0x0446 }, + { 0x06c4, 0x0434 }, + { 0x06c5, 0x0435 }, + { 0x06c6, 0x0444 }, + { 0x06c7, 0x0433 }, + { 0x06c8, 0x0445 }, + { 0x06c9, 0x0438 }, + { 0x06ca, 0x0439 }, + { 0x06cb, 0x043a }, + { 0x06cc, 0x043b }, + { 0x06cd, 0x043c }, + { 0x06ce, 0x043d }, + { 0x06cf, 0x043e }, + { 0x06d0, 0x043f }, + { 0x06d1, 0x044f }, + { 0x06d2, 0x0440 }, + { 0x06d3, 0x0441 }, + { 0x06d4, 0x0442 }, + { 0x06d5, 0x0443 }, + { 0x06d6, 0x0436 }, + { 0x06d7, 0x0432 }, + { 0x06d8, 0x044c }, + { 0x06d9, 0x044b }, + { 0x06da, 0x0437 }, + { 0x06db, 0x0448 }, + { 0x06dc, 0x044d }, + { 0x06dd, 0x0449 }, + { 0x06de, 0x0447 }, + { 0x06df, 0x044a }, + { 0x06e0, 0x042e }, + { 0x06e1, 0x0410 }, + { 0x06e2, 0x0411 }, + { 0x06e3, 0x0426 }, + { 0x06e4, 0x0414 }, + { 0x06e5, 0x0415 }, + { 0x06e6, 0x0424 }, + { 0x06e7, 0x0413 }, + { 0x06e8, 0x0425 }, + { 0x06e9, 0x0418 }, + { 0x06ea, 0x0419 }, + { 0x06eb, 0x041a }, + { 0x06ec, 0x041b }, + { 0x06ed, 0x041c }, + { 0x06ee, 0x041d }, + { 0x06ef, 0x041e }, + { 0x06f0, 0x041f }, + { 0x06f1, 0x042f }, + { 0x06f2, 0x0420 }, + { 0x06f3, 0x0421 }, + { 0x06f4, 0x0422 }, + { 0x06f5, 0x0423 }, + { 0x06f6, 0x0416 }, + { 0x06f7, 0x0412 }, + { 0x06f8, 0x042c }, + { 0x06f9, 0x042b }, + { 0x06fa, 0x0417 }, + { 0x06fb, 0x0428 }, + { 0x06fc, 0x042d }, + { 0x06fd, 0x0429 }, + { 0x06fe, 0x0427 }, + { 0x06ff, 0x042a }, + { 0x07a1, 0x0386 }, + { 0x07a2, 0x0388 }, + { 0x07a3, 0x0389 }, + { 0x07a4, 0x038a }, + { 0x07a5, 0x03aa }, + { 0x07a7, 0x038c }, + { 0x07a8, 0x038e }, + { 0x07a9, 0x03ab }, + { 0x07ab, 0x038f }, + { 0x07ae, 0x0385 }, + { 0x07af, 0x2015 }, + { 0x07b1, 0x03ac }, + { 0x07b2, 0x03ad }, + { 0x07b3, 0x03ae }, + { 0x07b4, 0x03af }, + { 0x07b5, 0x03ca }, + { 0x07b6, 0x0390 }, + { 0x07b7, 0x03cc }, + { 0x07b8, 0x03cd }, + { 0x07b9, 0x03cb }, + { 0x07ba, 0x03b0 }, + { 0x07bb, 0x03ce }, + { 0x07c1, 0x0391 }, + { 0x07c2, 0x0392 }, + { 0x07c3, 0x0393 }, + { 0x07c4, 0x0394 }, + { 0x07c5, 0x0395 }, + { 0x07c6, 0x0396 }, + { 0x07c7, 0x0397 }, + { 0x07c8, 0x0398 }, + { 0x07c9, 0x0399 }, + { 0x07ca, 0x039a }, + { 0x07cb, 0x039b }, + { 0x07cc, 0x039c }, + { 0x07cd, 0x039d }, + { 0x07ce, 0x039e }, + { 0x07cf, 0x039f }, + { 0x07d0, 0x03a0 }, + { 0x07d1, 0x03a1 }, + { 0x07d2, 0x03a3 }, + { 0x07d4, 0x03a4 }, + { 0x07d5, 0x03a5 }, + { 0x07d6, 0x03a6 }, + { 0x07d7, 0x03a7 }, + { 0x07d8, 0x03a8 }, + { 0x07d9, 0x03a9 }, + { 0x07e1, 0x03b1 }, + { 0x07e2, 0x03b2 }, + { 0x07e3, 0x03b3 }, + { 0x07e4, 0x03b4 }, + { 0x07e5, 0x03b5 }, + { 0x07e6, 0x03b6 }, + { 0x07e7, 0x03b7 }, + { 0x07e8, 0x03b8 }, + { 0x07e9, 0x03b9 }, + { 0x07ea, 0x03ba }, + { 0x07eb, 0x03bb }, + { 0x07ec, 0x03bc }, + { 0x07ed, 0x03bd }, + { 0x07ee, 0x03be }, + { 0x07ef, 0x03bf }, + { 0x07f0, 0x03c0 }, + { 0x07f1, 0x03c1 }, + { 0x07f2, 0x03c3 }, + { 0x07f3, 0x03c2 }, + { 0x07f4, 0x03c4 }, + { 0x07f5, 0x03c5 }, + { 0x07f6, 0x03c6 }, + { 0x07f7, 0x03c7 }, + { 0x07f8, 0x03c8 }, + { 0x07f9, 0x03c9 }, + { 0x08a1, 0x23b7 }, + { 0x08a2, 0x250c }, + { 0x08a3, 0x2500 }, + { 0x08a4, 0x2320 }, + { 0x08a5, 0x2321 }, + { 0x08a6, 0x2502 }, + { 0x08a7, 0x23a1 }, + { 0x08a8, 0x23a3 }, + { 0x08a9, 0x23a4 }, + { 0x08aa, 0x23a6 }, + { 0x08ab, 0x239b }, + { 0x08ac, 0x239d }, + { 0x08ad, 0x239e }, + { 0x08ae, 0x23a0 }, + { 0x08af, 0x23a8 }, + { 0x08b0, 0x23ac }, + { 0x08bc, 0x2264 }, + { 0x08bd, 0x2260 }, + { 0x08be, 0x2265 }, + { 0x08bf, 0x222b }, + { 0x08c0, 0x2234 }, + { 0x08c1, 0x221d }, + { 0x08c2, 0x221e }, + { 0x08c5, 0x2207 }, + { 0x08c8, 0x223c }, + { 0x08c9, 0x2243 }, + { 0x08cd, 0x21d4 }, + { 0x08ce, 0x21d2 }, + { 0x08cf, 0x2261 }, + { 0x08d6, 0x221a }, + { 0x08da, 0x2282 }, + { 0x08db, 0x2283 }, + { 0x08dc, 0x2229 }, + { 0x08dd, 0x222a }, + { 0x08de, 0x2227 }, + { 0x08df, 0x2228 }, + { 0x08ef, 0x2202 }, + { 0x08f6, 0x0192 }, + { 0x08fb, 0x2190 }, + { 0x08fc, 0x2191 }, + { 0x08fd, 0x2192 }, + { 0x08fe, 0x2193 }, + { 0x09e0, 0x25c6 }, + { 0x09e1, 0x2592 }, + { 0x09e2, 0x2409 }, + { 0x09e3, 0x240c }, + { 0x09e4, 0x240d }, + { 0x09e5, 0x240a }, + { 0x09e8, 0x2424 }, + { 0x09e9, 0x240b }, + { 0x09ea, 0x2518 }, + { 0x09eb, 0x2510 }, + { 0x09ec, 0x250c }, + { 0x09ed, 0x2514 }, + { 0x09ee, 0x253c }, + { 0x09ef, 0x23ba }, + { 0x09f0, 0x23bb }, + { 0x09f1, 0x2500 }, + { 0x09f2, 0x23bc }, + { 0x09f3, 0x23bd }, + { 0x09f4, 0x251c }, + { 0x09f5, 0x2524 }, + { 0x09f6, 0x2534 }, + { 0x09f7, 0x252c }, + { 0x09f8, 0x2502 }, + { 0x0aa1, 0x2003 }, + { 0x0aa2, 0x2002 }, + { 0x0aa3, 0x2004 }, + { 0x0aa4, 0x2005 }, + { 0x0aa5, 0x2007 }, + { 0x0aa6, 0x2008 }, + { 0x0aa7, 0x2009 }, + { 0x0aa8, 0x200a }, + { 0x0aa9, 0x2014 }, + { 0x0aaa, 0x2013 }, + { 0x0aae, 0x2026 }, + { 0x0aaf, 0x2025 }, + { 0x0ab0, 0x2153 }, + { 0x0ab1, 0x2154 }, + { 0x0ab2, 0x2155 }, + { 0x0ab3, 0x2156 }, + { 0x0ab4, 0x2157 }, + { 0x0ab5, 0x2158 }, + { 0x0ab6, 0x2159 }, + { 0x0ab7, 0x215a }, + { 0x0ab8, 0x2105 }, + { 0x0abb, 0x2012 }, + { 0x0abc, 0x2329 }, + { 0x0abe, 0x232a }, + { 0x0ac3, 0x215b }, + { 0x0ac4, 0x215c }, + { 0x0ac5, 0x215d }, + { 0x0ac6, 0x215e }, + { 0x0ac9, 0x2122 }, + { 0x0aca, 0x2613 }, + { 0x0acc, 0x25c1 }, + { 0x0acd, 0x25b7 }, + { 0x0ace, 0x25cb }, + { 0x0acf, 0x25af }, + { 0x0ad0, 0x2018 }, + { 0x0ad1, 0x2019 }, + { 0x0ad2, 0x201c }, + { 0x0ad3, 0x201d }, + { 0x0ad4, 0x211e }, + { 0x0ad6, 0x2032 }, + { 0x0ad7, 0x2033 }, + { 0x0ad9, 0x271d }, + { 0x0adb, 0x25ac }, + { 0x0adc, 0x25c0 }, + { 0x0add, 0x25b6 }, + { 0x0ade, 0x25cf }, + { 0x0adf, 0x25ae }, + { 0x0ae0, 0x25e6 }, + { 0x0ae1, 0x25ab }, + { 0x0ae2, 0x25ad }, + { 0x0ae3, 0x25b3 }, + { 0x0ae4, 0x25bd }, + { 0x0ae5, 0x2606 }, + { 0x0ae6, 0x2022 }, + { 0x0ae7, 0x25aa }, + { 0x0ae8, 0x25b2 }, + { 0x0ae9, 0x25bc }, + { 0x0aea, 0x261c }, + { 0x0aeb, 0x261e }, + { 0x0aec, 0x2663 }, + { 0x0aed, 0x2666 }, + { 0x0aee, 0x2665 }, + { 0x0af0, 0x2720 }, + { 0x0af1, 0x2020 }, + { 0x0af2, 0x2021 }, + { 0x0af3, 0x2713 }, + { 0x0af4, 0x2717 }, + { 0x0af5, 0x266f }, + { 0x0af6, 0x266d }, + { 0x0af7, 0x2642 }, + { 0x0af8, 0x2640 }, + { 0x0af9, 0x260e }, + { 0x0afa, 0x2315 }, + { 0x0afb, 0x2117 }, + { 0x0afc, 0x2038 }, + { 0x0afd, 0x201a }, + { 0x0afe, 0x201e }, + { 0x0ba3, 0x003c }, + { 0x0ba6, 0x003e }, + { 0x0ba8, 0x2228 }, + { 0x0ba9, 0x2227 }, + { 0x0bc0, 0x00af }, + { 0x0bc2, 0x22a5 }, + { 0x0bc3, 0x2229 }, + { 0x0bc4, 0x230a }, + { 0x0bc6, 0x005f }, + { 0x0bca, 0x2218 }, + { 0x0bcc, 0x2395 }, + { 0x0bce, 0x22a4 }, + { 0x0bcf, 0x25cb }, + { 0x0bd3, 0x2308 }, + { 0x0bd6, 0x222a }, + { 0x0bd8, 0x2283 }, + { 0x0bda, 0x2282 }, + { 0x0bdc, 0x22a2 }, + { 0x0bfc, 0x22a3 }, + { 0x0cdf, 0x2017 }, + { 0x0ce0, 0x05d0 }, + { 0x0ce1, 0x05d1 }, + { 0x0ce2, 0x05d2 }, + { 0x0ce3, 0x05d3 }, + { 0x0ce4, 0x05d4 }, + { 0x0ce5, 0x05d5 }, + { 0x0ce6, 0x05d6 }, + { 0x0ce7, 0x05d7 }, + { 0x0ce8, 0x05d8 }, + { 0x0ce9, 0x05d9 }, + { 0x0cea, 0x05da }, + { 0x0ceb, 0x05db }, + { 0x0cec, 0x05dc }, + { 0x0ced, 0x05dd }, + { 0x0cee, 0x05de }, + { 0x0cef, 0x05df }, + { 0x0cf0, 0x05e0 }, + { 0x0cf1, 0x05e1 }, + { 0x0cf2, 0x05e2 }, + { 0x0cf3, 0x05e3 }, + { 0x0cf4, 0x05e4 }, + { 0x0cf5, 0x05e5 }, + { 0x0cf6, 0x05e6 }, + { 0x0cf7, 0x05e7 }, + { 0x0cf8, 0x05e8 }, + { 0x0cf9, 0x05e9 }, + { 0x0cfa, 0x05ea }, + { 0x0da1, 0x0e01 }, + { 0x0da2, 0x0e02 }, + { 0x0da3, 0x0e03 }, + { 0x0da4, 0x0e04 }, + { 0x0da5, 0x0e05 }, + { 0x0da6, 0x0e06 }, + { 0x0da7, 0x0e07 }, + { 0x0da8, 0x0e08 }, + { 0x0da9, 0x0e09 }, + { 0x0daa, 0x0e0a }, + { 0x0dab, 0x0e0b }, + { 0x0dac, 0x0e0c }, + { 0x0dad, 0x0e0d }, + { 0x0dae, 0x0e0e }, + { 0x0daf, 0x0e0f }, + { 0x0db0, 0x0e10 }, + { 0x0db1, 0x0e11 }, + { 0x0db2, 0x0e12 }, + { 0x0db3, 0x0e13 }, + { 0x0db4, 0x0e14 }, + { 0x0db5, 0x0e15 }, + { 0x0db6, 0x0e16 }, + { 0x0db7, 0x0e17 }, + { 0x0db8, 0x0e18 }, + { 0x0db9, 0x0e19 }, + { 0x0dba, 0x0e1a }, + { 0x0dbb, 0x0e1b }, + { 0x0dbc, 0x0e1c }, + { 0x0dbd, 0x0e1d }, + { 0x0dbe, 0x0e1e }, + { 0x0dbf, 0x0e1f }, + { 0x0dc0, 0x0e20 }, + { 0x0dc1, 0x0e21 }, + { 0x0dc2, 0x0e22 }, + { 0x0dc3, 0x0e23 }, + { 0x0dc4, 0x0e24 }, + { 0x0dc5, 0x0e25 }, + { 0x0dc6, 0x0e26 }, + { 0x0dc7, 0x0e27 }, + { 0x0dc8, 0x0e28 }, + { 0x0dc9, 0x0e29 }, + { 0x0dca, 0x0e2a }, + { 0x0dcb, 0x0e2b }, + { 0x0dcc, 0x0e2c }, + { 0x0dcd, 0x0e2d }, + { 0x0dce, 0x0e2e }, + { 0x0dcf, 0x0e2f }, + { 0x0dd0, 0x0e30 }, + { 0x0dd1, 0x0e31 }, + { 0x0dd2, 0x0e32 }, + { 0x0dd3, 0x0e33 }, + { 0x0dd4, 0x0e34 }, + { 0x0dd5, 0x0e35 }, + { 0x0dd6, 0x0e36 }, + { 0x0dd7, 0x0e37 }, + { 0x0dd8, 0x0e38 }, + { 0x0dd9, 0x0e39 }, + { 0x0dda, 0x0e3a }, + { 0x0ddf, 0x0e3f }, + { 0x0de0, 0x0e40 }, + { 0x0de1, 0x0e41 }, + { 0x0de2, 0x0e42 }, + { 0x0de3, 0x0e43 }, + { 0x0de4, 0x0e44 }, + { 0x0de5, 0x0e45 }, + { 0x0de6, 0x0e46 }, + { 0x0de7, 0x0e47 }, + { 0x0de8, 0x0e48 }, + { 0x0de9, 0x0e49 }, + { 0x0dea, 0x0e4a }, + { 0x0deb, 0x0e4b }, + { 0x0dec, 0x0e4c }, + { 0x0ded, 0x0e4d }, + { 0x0df0, 0x0e50 }, + { 0x0df1, 0x0e51 }, + { 0x0df2, 0x0e52 }, + { 0x0df3, 0x0e53 }, + { 0x0df4, 0x0e54 }, + { 0x0df5, 0x0e55 }, + { 0x0df6, 0x0e56 }, + { 0x0df7, 0x0e57 }, + { 0x0df8, 0x0e58 }, + { 0x0df9, 0x0e59 }, + { 0x0ea1, 0x3131 }, + { 0x0ea2, 0x3132 }, + { 0x0ea3, 0x3133 }, + { 0x0ea4, 0x3134 }, + { 0x0ea5, 0x3135 }, + { 0x0ea6, 0x3136 }, + { 0x0ea7, 0x3137 }, + { 0x0ea8, 0x3138 }, + { 0x0ea9, 0x3139 }, + { 0x0eaa, 0x313a }, + { 0x0eab, 0x313b }, + { 0x0eac, 0x313c }, + { 0x0ead, 0x313d }, + { 0x0eae, 0x313e }, + { 0x0eaf, 0x313f }, + { 0x0eb0, 0x3140 }, + { 0x0eb1, 0x3141 }, + { 0x0eb2, 0x3142 }, + { 0x0eb3, 0x3143 }, + { 0x0eb4, 0x3144 }, + { 0x0eb5, 0x3145 }, + { 0x0eb6, 0x3146 }, + { 0x0eb7, 0x3147 }, + { 0x0eb8, 0x3148 }, + { 0x0eb9, 0x3149 }, + { 0x0eba, 0x314a }, + { 0x0ebb, 0x314b }, + { 0x0ebc, 0x314c }, + { 0x0ebd, 0x314d }, + { 0x0ebe, 0x314e }, + { 0x0ebf, 0x314f }, + { 0x0ec0, 0x3150 }, + { 0x0ec1, 0x3151 }, + { 0x0ec2, 0x3152 }, + { 0x0ec3, 0x3153 }, + { 0x0ec4, 0x3154 }, + { 0x0ec5, 0x3155 }, + { 0x0ec6, 0x3156 }, + { 0x0ec7, 0x3157 }, + { 0x0ec8, 0x3158 }, + { 0x0ec9, 0x3159 }, + { 0x0eca, 0x315a }, + { 0x0ecb, 0x315b }, + { 0x0ecc, 0x315c }, + { 0x0ecd, 0x315d }, + { 0x0ece, 0x315e }, + { 0x0ecf, 0x315f }, + { 0x0ed0, 0x3160 }, + { 0x0ed1, 0x3161 }, + { 0x0ed2, 0x3162 }, + { 0x0ed3, 0x3163 }, + { 0x0ed4, 0x11a8 }, + { 0x0ed5, 0x11a9 }, + { 0x0ed6, 0x11aa }, + { 0x0ed7, 0x11ab }, + { 0x0ed8, 0x11ac }, + { 0x0ed9, 0x11ad }, + { 0x0eda, 0x11ae }, + { 0x0edb, 0x11af }, + { 0x0edc, 0x11b0 }, + { 0x0edd, 0x11b1 }, + { 0x0ede, 0x11b2 }, + { 0x0edf, 0x11b3 }, + { 0x0ee0, 0x11b4 }, + { 0x0ee1, 0x11b5 }, + { 0x0ee2, 0x11b6 }, + { 0x0ee3, 0x11b7 }, + { 0x0ee4, 0x11b8 }, + { 0x0ee5, 0x11b9 }, + { 0x0ee6, 0x11ba }, + { 0x0ee7, 0x11bb }, + { 0x0ee8, 0x11bc }, + { 0x0ee9, 0x11bd }, + { 0x0eea, 0x11be }, + { 0x0eeb, 0x11bf }, + { 0x0eec, 0x11c0 }, + { 0x0eed, 0x11c1 }, + { 0x0eee, 0x11c2 }, + { 0x0eef, 0x316d }, + { 0x0ef0, 0x3171 }, + { 0x0ef1, 0x3178 }, + { 0x0ef2, 0x317f }, + { 0x0ef3, 0x3181 }, + { 0x0ef4, 0x3184 }, + { 0x0ef5, 0x3186 }, + { 0x0ef6, 0x318d }, + { 0x0ef7, 0x318e }, + { 0x0ef8, 0x11eb }, + { 0x0ef9, 0x11f0 }, + { 0x0efa, 0x11f9 }, + { 0x0eff, 0x20a9 }, + { 0x13a4, 0x20ac }, + { 0x13bc, 0x0152 }, + { 0x13bd, 0x0153 }, + { 0x13be, 0x0178 }, + { 0x20ac, 0x20ac }, + { 0xfe50, '`' }, + { 0xfe51, 0x00b4 }, + { 0xfe52, '^' }, + { 0xfe53, '~' }, + { 0xfe54, 0x00af }, + { 0xfe55, 0x02d8 }, + { 0xfe56, 0x02d9 }, + { 0xfe57, 0x00a8 }, + { 0xfe58, 0x02da }, + { 0xfe59, 0x02dd }, + { 0xfe5a, 0x02c7 }, + { 0xfe5b, 0x00b8 }, + { 0xfe5c, 0x02db }, + { 0xfe5d, 0x037a }, + { 0xfe5e, 0x309b }, + { 0xfe5f, 0x309c }, + { 0xfe63, '/' }, + { 0xfe64, 0x02bc }, + { 0xfe65, 0x02bd }, + { 0xfe66, 0x02f5 }, + { 0xfe67, 0x02f3 }, + { 0xfe68, 0x02cd }, + { 0xfe69, 0xa788 }, + { 0xfe6a, 0x02f7 }, + { 0xfe6e, ',' }, + { 0xfe6f, 0x00a4 }, + { 0xfe80, 'a' }, /* XK_dead_a */ + { 0xfe81, 'A' }, /* XK_dead_A */ + { 0xfe82, 'e' }, /* XK_dead_e */ + { 0xfe83, 'E' }, /* XK_dead_E */ + { 0xfe84, 'i' }, /* XK_dead_i */ + { 0xfe85, 'I' }, /* XK_dead_I */ + { 0xfe86, 'o' }, /* XK_dead_o */ + { 0xfe87, 'O' }, /* XK_dead_O */ + { 0xfe88, 'u' }, /* XK_dead_u */ + { 0xfe89, 'U' }, /* XK_dead_U */ + { 0xfe8a, 0x0259 }, + { 0xfe8b, 0x018f }, + { 0xfe8c, 0x00b5 }, + { 0xfe90, '_' }, + { 0xfe91, 0x02c8 }, + { 0xfe92, 0x02cc }, + { 0xff80 /*XKB_KEY_KP_Space*/, ' ' }, + { 0xff95 /*XKB_KEY_KP_7*/, 0x0037 }, + { 0xff96 /*XKB_KEY_KP_4*/, 0x0034 }, + { 0xff97 /*XKB_KEY_KP_8*/, 0x0038 }, + { 0xff98 /*XKB_KEY_KP_6*/, 0x0036 }, + { 0xff99 /*XKB_KEY_KP_2*/, 0x0032 }, + { 0xff9a /*XKB_KEY_KP_9*/, 0x0039 }, + { 0xff9b /*XKB_KEY_KP_3*/, 0x0033 }, + { 0xff9c /*XKB_KEY_KP_1*/, 0x0031 }, + { 0xff9d /*XKB_KEY_KP_5*/, 0x0035 }, + { 0xff9e /*XKB_KEY_KP_0*/, 0x0030 }, + { 0xffaa /*XKB_KEY_KP_Multiply*/, '*' }, + { 0xffab /*XKB_KEY_KP_Add*/, '+' }, + { 0xffac /*XKB_KEY_KP_Separator*/, ',' }, + { 0xffad /*XKB_KEY_KP_Subtract*/, '-' }, + { 0xffae /*XKB_KEY_KP_Decimal*/, '.' }, + { 0xffaf /*XKB_KEY_KP_Divide*/, '/' }, + { 0xffb0 /*XKB_KEY_KP_0*/, 0x0030 }, + { 0xffb1 /*XKB_KEY_KP_1*/, 0x0031 }, + { 0xffb2 /*XKB_KEY_KP_2*/, 0x0032 }, + { 0xffb3 /*XKB_KEY_KP_3*/, 0x0033 }, + { 0xffb4 /*XKB_KEY_KP_4*/, 0x0034 }, + { 0xffb5 /*XKB_KEY_KP_5*/, 0x0035 }, + { 0xffb6 /*XKB_KEY_KP_6*/, 0x0036 }, + { 0xffb7 /*XKB_KEY_KP_7*/, 0x0037 }, + { 0xffb8 /*XKB_KEY_KP_8*/, 0x0038 }, + { 0xffb9 /*XKB_KEY_KP_9*/, 0x0039 }, + { 0xffbd /*XKB_KEY_KP_Equal*/, '=' } +}; + +_SOKOL_PRIVATE int _sapp_x11_error_handler(Display* display, XErrorEvent* event) { + _SOKOL_UNUSED(display); + _sapp.x11.error_code = event->error_code; + return 0; +} + +_SOKOL_PRIVATE void _sapp_x11_grab_error_handler(void) { + _sapp.x11.error_code = Success; + XSetErrorHandler(_sapp_x11_error_handler); +} + +_SOKOL_PRIVATE void _sapp_x11_release_error_handler(void) { + XSync(_sapp.x11.display, False); + XSetErrorHandler(NULL); +} + +_SOKOL_PRIVATE void _sapp_x11_init_extensions(void) { + _sapp.x11.UTF8_STRING = XInternAtom(_sapp.x11.display, "UTF8_STRING", False); + _sapp.x11.WM_PROTOCOLS = XInternAtom(_sapp.x11.display, "WM_PROTOCOLS", False); + _sapp.x11.WM_DELETE_WINDOW = XInternAtom(_sapp.x11.display, "WM_DELETE_WINDOW", False); + _sapp.x11.WM_STATE = XInternAtom(_sapp.x11.display, "WM_STATE", False); + _sapp.x11.NET_WM_NAME = XInternAtom(_sapp.x11.display, "_NET_WM_NAME", False); + _sapp.x11.NET_WM_ICON_NAME = XInternAtom(_sapp.x11.display, "_NET_WM_ICON_NAME", False); + _sapp.x11.NET_WM_STATE = XInternAtom(_sapp.x11.display, "_NET_WM_STATE", False); + _sapp.x11.NET_WM_STATE_FULLSCREEN = XInternAtom(_sapp.x11.display, "_NET_WM_STATE_FULLSCREEN", False); + if (_sapp.drop.enabled) { + _sapp.x11.xdnd.XdndAware = XInternAtom(_sapp.x11.display, "XdndAware", False); + _sapp.x11.xdnd.XdndEnter = XInternAtom(_sapp.x11.display, "XdndEnter", False); + _sapp.x11.xdnd.XdndPosition = XInternAtom(_sapp.x11.display, "XdndPosition", False); + _sapp.x11.xdnd.XdndStatus = XInternAtom(_sapp.x11.display, "XdndStatus", False); + _sapp.x11.xdnd.XdndActionCopy = XInternAtom(_sapp.x11.display, "XdndActionCopy", False); + _sapp.x11.xdnd.XdndDrop = XInternAtom(_sapp.x11.display, "XdndDrop", False); + _sapp.x11.xdnd.XdndFinished = XInternAtom(_sapp.x11.display, "XdndFinished", False); + _sapp.x11.xdnd.XdndSelection = XInternAtom(_sapp.x11.display, "XdndSelection", False); + _sapp.x11.xdnd.XdndTypeList = XInternAtom(_sapp.x11.display, "XdndTypeList", False); + _sapp.x11.xdnd.text_uri_list = XInternAtom(_sapp.x11.display, "text/uri-list", False); + } + + /* check Xi extension for raw mouse input */ + if (XQueryExtension(_sapp.x11.display, "XInputExtension", &_sapp.x11.xi.major_opcode, &_sapp.x11.xi.event_base, &_sapp.x11.xi.error_base)) { + _sapp.x11.xi.major = 2; + _sapp.x11.xi.minor = 0; + if (XIQueryVersion(_sapp.x11.display, &_sapp.x11.xi.major, &_sapp.x11.xi.minor) == Success) { + _sapp.x11.xi.available = true; + } + } +} + +_SOKOL_PRIVATE void _sapp_x11_query_system_dpi(void) { + /* from GLFW: + + NOTE: Default to the display-wide DPI as we don't currently have a policy + for which monitor a window is considered to be on + + _sapp.x11.dpi = DisplayWidth(_sapp.x11.display, _sapp.x11.screen) * + 25.4f / DisplayWidthMM(_sapp.x11.display, _sapp.x11.screen); + + NOTE: Basing the scale on Xft.dpi where available should provide the most + consistent user experience (matches Qt, Gtk, etc), although not + always the most accurate one + */ + char* rms = XResourceManagerString(_sapp.x11.display); + if (rms) { + XrmDatabase db = XrmGetStringDatabase(rms); + if (db) { + XrmValue value; + char* type = NULL; + if (XrmGetResource(db, "Xft.dpi", "Xft.Dpi", &type, &value)) { + if (type && strcmp(type, "String") == 0) { + _sapp.x11.dpi = atof(value.addr); + } + } + XrmDestroyDatabase(db); + } + } +} + +_SOKOL_PRIVATE bool _sapp_glx_has_ext(const char* ext, const char* extensions) { + SOKOL_ASSERT(ext); + const char* start = extensions; + while (true) { + const char* where = strstr(start, ext); + if (!where) { + return false; + } + const char* terminator = where + strlen(ext); + if ((where == start) || (*(where - 1) == ' ')) { + if (*terminator == ' ' || *terminator == '\0') { + break; + } + } + start = terminator; + } + return true; +} + +_SOKOL_PRIVATE bool _sapp_glx_extsupported(const char* ext, const char* extensions) { + if (extensions) { + return _sapp_glx_has_ext(ext, extensions); + } + else { + return false; + } +} + +_SOKOL_PRIVATE void* _sapp_glx_getprocaddr(const char* procname) +{ + if (_sapp.glx.GetProcAddress) { + return (void*) _sapp.glx.GetProcAddress(procname); + } + else if (_sapp.glx.GetProcAddressARB) { + return (void*) _sapp.glx.GetProcAddressARB(procname); + } + else { + return dlsym(_sapp.glx.libgl, procname); + } +} + +_SOKOL_PRIVATE void _sapp_glx_init() { + const char* sonames[] = { "libGL.so.1", "libGL.so", 0 }; + for (int i = 0; sonames[i]; i++) { + _sapp.glx.libgl = dlopen(sonames[i], RTLD_LAZY|RTLD_GLOBAL); + if (_sapp.glx.libgl) { + break; + } + } + if (!_sapp.glx.libgl) { + _sapp_fail("GLX: failed to load libGL"); + } + _sapp.glx.GetFBConfigs = (PFNGLXGETFBCONFIGSPROC) dlsym(_sapp.glx.libgl, "glXGetFBConfigs"); + _sapp.glx.GetFBConfigAttrib = (PFNGLXGETFBCONFIGATTRIBPROC) dlsym(_sapp.glx.libgl, "glXGetFBConfigAttrib"); + _sapp.glx.GetClientString = (PFNGLXGETCLIENTSTRINGPROC) dlsym(_sapp.glx.libgl, "glXGetClientString"); + _sapp.glx.QueryExtension = (PFNGLXQUERYEXTENSIONPROC) dlsym(_sapp.glx.libgl, "glXQueryExtension"); + _sapp.glx.QueryVersion = (PFNGLXQUERYVERSIONPROC) dlsym(_sapp.glx.libgl, "glXQueryVersion"); + _sapp.glx.DestroyContext = (PFNGLXDESTROYCONTEXTPROC) dlsym(_sapp.glx.libgl, "glXDestroyContext"); + _sapp.glx.MakeCurrent = (PFNGLXMAKECURRENTPROC) dlsym(_sapp.glx.libgl, "glXMakeCurrent"); + _sapp.glx.SwapBuffers = (PFNGLXSWAPBUFFERSPROC) dlsym(_sapp.glx.libgl, "glXSwapBuffers"); + _sapp.glx.QueryExtensionsString = (PFNGLXQUERYEXTENSIONSSTRINGPROC) dlsym(_sapp.glx.libgl, "glXQueryExtensionsString"); + _sapp.glx.CreateWindow = (PFNGLXCREATEWINDOWPROC) dlsym(_sapp.glx.libgl, "glXCreateWindow"); + _sapp.glx.DestroyWindow = (PFNGLXDESTROYWINDOWPROC) dlsym(_sapp.glx.libgl, "glXDestroyWindow"); + _sapp.glx.GetProcAddress = (PFNGLXGETPROCADDRESSPROC) dlsym(_sapp.glx.libgl, "glXGetProcAddress"); + _sapp.glx.GetProcAddressARB = (PFNGLXGETPROCADDRESSPROC) dlsym(_sapp.glx.libgl, "glXGetProcAddressARB"); + _sapp.glx.GetVisualFromFBConfig = (PFNGLXGETVISUALFROMFBCONFIGPROC) dlsym(_sapp.glx.libgl, "glXGetVisualFromFBConfig"); + if (!_sapp.glx.GetFBConfigs || + !_sapp.glx.GetFBConfigAttrib || + !_sapp.glx.GetClientString || + !_sapp.glx.QueryExtension || + !_sapp.glx.QueryVersion || + !_sapp.glx.DestroyContext || + !_sapp.glx.MakeCurrent || + !_sapp.glx.SwapBuffers || + !_sapp.glx.QueryExtensionsString || + !_sapp.glx.CreateWindow || + !_sapp.glx.DestroyWindow || + !_sapp.glx.GetProcAddress || + !_sapp.glx.GetProcAddressARB || + !_sapp.glx.GetVisualFromFBConfig) + { + _sapp_fail("GLX: failed to load required entry points"); + } + + if (!_sapp.glx.QueryExtension(_sapp.x11.display, &_sapp.glx.error_base, &_sapp.glx.event_base)) { + _sapp_fail("GLX: GLX extension not found"); + } + if (!_sapp.glx.QueryVersion(_sapp.x11.display, &_sapp.glx.major, &_sapp.glx.minor)) { + _sapp_fail("GLX: Failed to query GLX version"); + } + if (_sapp.glx.major == 1 && _sapp.glx.minor < 3) { + _sapp_fail("GLX: GLX version 1.3 is required"); + } + const char* exts = _sapp.glx.QueryExtensionsString(_sapp.x11.display, _sapp.x11.screen); + if (_sapp_glx_extsupported("GLX_EXT_swap_control", exts)) { + _sapp.glx.SwapIntervalEXT = (PFNGLXSWAPINTERVALEXTPROC) _sapp_glx_getprocaddr("glXSwapIntervalEXT"); + _sapp.glx.EXT_swap_control = 0 != _sapp.glx.SwapIntervalEXT; + } + if (_sapp_glx_extsupported("GLX_MESA_swap_control", exts)) { + _sapp.glx.SwapIntervalMESA = (PFNGLXSWAPINTERVALMESAPROC) _sapp_glx_getprocaddr("glXSwapIntervalMESA"); + _sapp.glx.MESA_swap_control = 0 != _sapp.glx.SwapIntervalMESA; + } + _sapp.glx.ARB_multisample = _sapp_glx_extsupported("GLX_ARB_multisample", exts); + if (_sapp_glx_extsupported("GLX_ARB_create_context", exts)) { + _sapp.glx.CreateContextAttribsARB = (PFNGLXCREATECONTEXTATTRIBSARBPROC) _sapp_glx_getprocaddr("glXCreateContextAttribsARB"); + _sapp.glx.ARB_create_context = 0 != _sapp.glx.CreateContextAttribsARB; + } + _sapp.glx.ARB_create_context_profile = _sapp_glx_extsupported("GLX_ARB_create_context_profile", exts); +} + +_SOKOL_PRIVATE int _sapp_glx_attrib(GLXFBConfig fbconfig, int attrib) { + int value; + _sapp.glx.GetFBConfigAttrib(_sapp.x11.display, fbconfig, attrib, &value); + return value; +} + +_SOKOL_PRIVATE GLXFBConfig _sapp_glx_choosefbconfig() { + GLXFBConfig* native_configs; + _sapp_gl_fbconfig* usable_configs; + const _sapp_gl_fbconfig* closest; + int i, native_count, usable_count; + const char* vendor; + bool trust_window_bit = true; + + /* HACK: This is a (hopefully temporary) workaround for Chromium + (VirtualBox GL) not setting the window bit on any GLXFBConfigs + */ + vendor = _sapp.glx.GetClientString(_sapp.x11.display, GLX_VENDOR); + if (vendor && strcmp(vendor, "Chromium") == 0) { + trust_window_bit = false; + } + + native_configs = _sapp.glx.GetFBConfigs(_sapp.x11.display, _sapp.x11.screen, &native_count); + if (!native_configs || !native_count) { + _sapp_fail("GLX: No GLXFBConfigs returned"); + } + + usable_configs = (_sapp_gl_fbconfig*) SOKOL_CALLOC((size_t)native_count, sizeof(_sapp_gl_fbconfig)); + usable_count = 0; + for (i = 0; i < native_count; i++) { + const GLXFBConfig n = native_configs[i]; + _sapp_gl_fbconfig* u = usable_configs + usable_count; + _sapp_gl_init_fbconfig(u); + + /* Only consider RGBA GLXFBConfigs */ + if (0 == (_sapp_glx_attrib(n, GLX_RENDER_TYPE) & GLX_RGBA_BIT)) { + continue; + } + /* Only consider window GLXFBConfigs */ + if (0 == (_sapp_glx_attrib(n, GLX_DRAWABLE_TYPE) & GLX_WINDOW_BIT)) { + if (trust_window_bit) { + continue; + } + } + u->red_bits = _sapp_glx_attrib(n, GLX_RED_SIZE); + u->green_bits = _sapp_glx_attrib(n, GLX_GREEN_SIZE); + u->blue_bits = _sapp_glx_attrib(n, GLX_BLUE_SIZE); + u->alpha_bits = _sapp_glx_attrib(n, GLX_ALPHA_SIZE); + u->depth_bits = _sapp_glx_attrib(n, GLX_DEPTH_SIZE); + u->stencil_bits = _sapp_glx_attrib(n, GLX_STENCIL_SIZE); + if (_sapp_glx_attrib(n, GLX_DOUBLEBUFFER)) { + u->doublebuffer = true; + } + if (_sapp.glx.ARB_multisample) { + u->samples = _sapp_glx_attrib(n, GLX_SAMPLES); + } + u->handle = (uintptr_t) n; + usable_count++; + } + _sapp_gl_fbconfig desired; + _sapp_gl_init_fbconfig(&desired); + desired.red_bits = 8; + desired.green_bits = 8; + desired.blue_bits = 8; + desired.alpha_bits = 8; + desired.depth_bits = 24; + desired.stencil_bits = 8; + desired.doublebuffer = true; + desired.samples = _sapp.sample_count > 1 ? _sapp.sample_count : 0; + closest = _sapp_gl_choose_fbconfig(&desired, usable_configs, usable_count); + GLXFBConfig result = 0; + if (closest) { + result = (GLXFBConfig) closest->handle; + } + XFree(native_configs); + SOKOL_FREE(usable_configs); + return result; +} + +_SOKOL_PRIVATE void _sapp_glx_choose_visual(Visual** visual, int* depth) { + GLXFBConfig native = _sapp_glx_choosefbconfig(); + if (0 == native) { + _sapp_fail("GLX: Failed to find a suitable GLXFBConfig"); + } + XVisualInfo* result = _sapp.glx.GetVisualFromFBConfig(_sapp.x11.display, native); + if (!result) { + _sapp_fail("GLX: Failed to retrieve Visual for GLXFBConfig"); + } + *visual = result->visual; + *depth = result->depth; + XFree(result); +} + +_SOKOL_PRIVATE void _sapp_glx_create_context(void) { + GLXFBConfig native = _sapp_glx_choosefbconfig(); + if (0 == native){ + _sapp_fail("GLX: Failed to find a suitable GLXFBConfig (2)"); + } + if (!(_sapp.glx.ARB_create_context && _sapp.glx.ARB_create_context_profile)) { + _sapp_fail("GLX: ARB_create_context and ARB_create_context_profile required"); + } + _sapp_x11_grab_error_handler(); + const int attribs[] = { + GLX_CONTEXT_MAJOR_VERSION_ARB, 3, + GLX_CONTEXT_MINOR_VERSION_ARB, 3, + GLX_CONTEXT_PROFILE_MASK_ARB, GLX_CONTEXT_CORE_PROFILE_BIT_ARB, + GLX_CONTEXT_FLAGS_ARB, GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB, + 0, 0 + }; + _sapp.glx.ctx = _sapp.glx.CreateContextAttribsARB(_sapp.x11.display, native, NULL, True, attribs); + if (!_sapp.glx.ctx) { + _sapp_fail("GLX: failed to create GL context"); + } + _sapp_x11_release_error_handler(); + _sapp.glx.window = _sapp.glx.CreateWindow(_sapp.x11.display, native, _sapp.x11.window, NULL); + if (!_sapp.glx.window) { + _sapp_fail("GLX: failed to create window"); + } +} + +_SOKOL_PRIVATE void _sapp_glx_destroy_context(void) { + if (_sapp.glx.window) { + _sapp.glx.DestroyWindow(_sapp.x11.display, _sapp.glx.window); + _sapp.glx.window = 0; + } + if (_sapp.glx.ctx) { + _sapp.glx.DestroyContext(_sapp.x11.display, _sapp.glx.ctx); + _sapp.glx.ctx = 0; + } +} + +_SOKOL_PRIVATE void _sapp_glx_make_current(void) { + _sapp.glx.MakeCurrent(_sapp.x11.display, _sapp.glx.window, _sapp.glx.ctx); +} + +_SOKOL_PRIVATE void _sapp_glx_swap_buffers(void) { + _sapp.glx.SwapBuffers(_sapp.x11.display, _sapp.glx.window); +} + +_SOKOL_PRIVATE void _sapp_glx_swapinterval(int interval) { + _sapp_glx_make_current(); + if (_sapp.glx.EXT_swap_control) { + _sapp.glx.SwapIntervalEXT(_sapp.x11.display, _sapp.glx.window, interval); + } + else if (_sapp.glx.MESA_swap_control) { + _sapp.glx.SwapIntervalMESA(interval); + } +} + +_SOKOL_PRIVATE void _sapp_x11_send_event(Atom type, int a, int b, int c, int d, int e) { + XEvent event; + memset(&event, 0, sizeof(event)); + + event.type = ClientMessage; + event.xclient.window = _sapp.x11.window; + event.xclient.format = 32; + event.xclient.message_type = type; + event.xclient.data.l[0] = a; + event.xclient.data.l[1] = b; + event.xclient.data.l[2] = c; + event.xclient.data.l[3] = d; + event.xclient.data.l[4] = e; + + XSendEvent(_sapp.x11.display, _sapp.x11.root, + False, + SubstructureNotifyMask | SubstructureRedirectMask, + &event); +} + +_SOKOL_PRIVATE void _sapp_x11_query_window_size(void) { + XWindowAttributes attribs; + XGetWindowAttributes(_sapp.x11.display, _sapp.x11.window, &attribs); + _sapp.window_width = attribs.width; + _sapp.window_height = attribs.height; + _sapp.framebuffer_width = _sapp.window_width; + _sapp.framebuffer_height = _sapp.window_height; +} + +_SOKOL_PRIVATE void _sapp_x11_set_fullscreen(bool enable) { + /* NOTE: this function must be called after XMapWindow (which happens in _sapp_x11_show_window()) */ + if (_sapp.x11.NET_WM_STATE && _sapp.x11.NET_WM_STATE_FULLSCREEN) { + if (enable) { + const int _NET_WM_STATE_ADD = 1; + _sapp_x11_send_event(_sapp.x11.NET_WM_STATE, + _NET_WM_STATE_ADD, + _sapp.x11.NET_WM_STATE_FULLSCREEN, + 0, 1, 0); + } + else { + const int _NET_WM_STATE_REMOVE = 0; + _sapp_x11_send_event(_sapp.x11.NET_WM_STATE, + _NET_WM_STATE_REMOVE, + _sapp.x11.NET_WM_STATE_FULLSCREEN, + 0, 1, 0); + } + } + XFlush(_sapp.x11.display); +} + +_SOKOL_PRIVATE void _sapp_x11_create_hidden_cursor(void) { + SOKOL_ASSERT(0 == _sapp.x11.hidden_cursor); + const int w = 16; + const int h = 16; + XcursorImage* img = XcursorImageCreate(w, h); + SOKOL_ASSERT(img && (img->width == 16) && (img->height == 16) && img->pixels); + img->xhot = 0; + img->yhot = 0; + const size_t num_bytes = (size_t)(w * h) * sizeof(XcursorPixel); + memset(img->pixels, 0, num_bytes); + _sapp.x11.hidden_cursor = XcursorImageLoadCursor(_sapp.x11.display, img); + XcursorImageDestroy(img); +} + +_SOKOL_PRIVATE void _sapp_x11_toggle_fullscreen(void) { + _sapp.fullscreen = !_sapp.fullscreen; + _sapp_x11_set_fullscreen(_sapp.fullscreen); + _sapp_x11_query_window_size(); +} + +_SOKOL_PRIVATE void _sapp_x11_show_mouse(bool show) { + if (show) { + XUndefineCursor(_sapp.x11.display, _sapp.x11.window); + } + else { + XDefineCursor(_sapp.x11.display, _sapp.x11.window, _sapp.x11.hidden_cursor); + } +} + +_SOKOL_PRIVATE void _sapp_x11_lock_mouse(bool lock) { + if (lock == _sapp.mouse.locked) { + return; + } + _sapp.mouse.dx = 0.0f; + _sapp.mouse.dy = 0.0f; + _sapp.mouse.locked = lock; + if (_sapp.mouse.locked) { + if (_sapp.x11.xi.available) { + XIEventMask em; + unsigned char mask[XIMaskLen(XI_RawMotion)] = { 0 }; // XIMaskLen is a macro + em.deviceid = XIAllMasterDevices; + em.mask_len = sizeof(mask); + em.mask = mask; + XISetMask(mask, XI_RawMotion); + XISelectEvents(_sapp.x11.display, _sapp.x11.root, &em, 1); + } + XGrabPointer(_sapp.x11.display, // display + _sapp.x11.window, // grab_window + True, // owner_events + ButtonPressMask | ButtonReleaseMask | PointerMotionMask, // event_mask + GrabModeAsync, // pointer_mode + GrabModeAsync, // keyboard_mode + _sapp.x11.window, // confine_to + _sapp.x11.hidden_cursor, // cursor + CurrentTime); // time + } + else { + if (_sapp.x11.xi.available) { + XIEventMask em; + unsigned char mask[] = { 0 }; + em.deviceid = XIAllMasterDevices; + em.mask_len = sizeof(mask); + em.mask = mask; + XISelectEvents(_sapp.x11.display, _sapp.x11.root, &em, 1); + } + XWarpPointer(_sapp.x11.display, None, _sapp.x11.window, 0, 0, 0, 0, (int) _sapp.mouse.x, _sapp.mouse.y); + XUngrabPointer(_sapp.x11.display, CurrentTime); + } + XFlush(_sapp.x11.display); +} + +_SOKOL_PRIVATE void _sapp_x11_update_window_title(void) { + Xutf8SetWMProperties(_sapp.x11.display, + _sapp.x11.window, + _sapp.window_title, _sapp.window_title, + NULL, 0, NULL, NULL, NULL); + XChangeProperty(_sapp.x11.display, _sapp.x11.window, + _sapp.x11.NET_WM_NAME, _sapp.x11.UTF8_STRING, 8, + PropModeReplace, + (unsigned char*)_sapp.window_title, + strlen(_sapp.window_title)); + XChangeProperty(_sapp.x11.display, _sapp.x11.window, + _sapp.x11.NET_WM_ICON_NAME, _sapp.x11.UTF8_STRING, 8, + PropModeReplace, + (unsigned char*)_sapp.window_title, + strlen(_sapp.window_title)); + XFlush(_sapp.x11.display); +} + +_SOKOL_PRIVATE void _sapp_x11_create_window(Visual* visual, int depth) { + _sapp.x11.colormap = XCreateColormap(_sapp.x11.display, _sapp.x11.root, visual, AllocNone); + XSetWindowAttributes wa; + memset(&wa, 0, sizeof(wa)); + const uint32_t wamask = CWBorderPixel | CWColormap | CWEventMask; + wa.colormap = _sapp.x11.colormap; + wa.border_pixel = 0; + wa.event_mask = StructureNotifyMask | KeyPressMask | KeyReleaseMask | + PointerMotionMask | ButtonPressMask | ButtonReleaseMask | + ExposureMask | FocusChangeMask | VisibilityChangeMask | + EnterWindowMask | LeaveWindowMask | PropertyChangeMask; + _sapp_x11_grab_error_handler(); + _sapp.x11.window = XCreateWindow(_sapp.x11.display, + _sapp.x11.root, + 0, 0, + (uint32_t)_sapp.window_width, + (uint32_t)_sapp.window_height, + 0, /* border width */ + depth, /* color depth */ + InputOutput, + visual, + wamask, + &wa); + _sapp_x11_release_error_handler(); + if (!_sapp.x11.window) { + _sapp_fail("X11: Failed to create window"); + } + Atom protocols[] = { + _sapp.x11.WM_DELETE_WINDOW + }; + XSetWMProtocols(_sapp.x11.display, _sapp.x11.window, protocols, 1); + + XSizeHints* hints = XAllocSizeHints(); + hints->flags |= PWinGravity; + hints->win_gravity = StaticGravity; + XSetWMNormalHints(_sapp.x11.display, _sapp.x11.window, hints); + XFree(hints); + + /* announce support for drag'n'drop */ + if (_sapp.drop.enabled) { + const Atom version = _SAPP_X11_XDND_VERSION; + XChangeProperty(_sapp.x11.display, _sapp.x11.window, _sapp.x11.xdnd.XdndAware, XA_ATOM, 32, PropModeReplace, (unsigned char*) &version, 1); + } + + _sapp_x11_update_window_title(); +} + +_SOKOL_PRIVATE void _sapp_x11_destroy_window(void) { + if (_sapp.x11.window) { + XUnmapWindow(_sapp.x11.display, _sapp.x11.window); + XDestroyWindow(_sapp.x11.display, _sapp.x11.window); + _sapp.x11.window = 0; + } + if (_sapp.x11.colormap) { + XFreeColormap(_sapp.x11.display, _sapp.x11.colormap); + _sapp.x11.colormap = 0; + } + XFlush(_sapp.x11.display); +} + +_SOKOL_PRIVATE bool _sapp_x11_window_visible(void) { + XWindowAttributes wa; + XGetWindowAttributes(_sapp.x11.display, _sapp.x11.window, &wa); + return wa.map_state == IsViewable; +} + +_SOKOL_PRIVATE void _sapp_x11_show_window(void) { + if (!_sapp_x11_window_visible()) { + XMapWindow(_sapp.x11.display, _sapp.x11.window); + XRaiseWindow(_sapp.x11.display, _sapp.x11.window); + XFlush(_sapp.x11.display); + } +} + +_SOKOL_PRIVATE void _sapp_x11_hide_window(void) { + XUnmapWindow(_sapp.x11.display, _sapp.x11.window); + XFlush(_sapp.x11.display); +} + +_SOKOL_PRIVATE unsigned long _sapp_x11_get_window_property(Window window, Atom property, Atom type, unsigned char** value) { + Atom actualType; + int actualFormat; + unsigned long itemCount, bytesAfter; + XGetWindowProperty(_sapp.x11.display, + window, + property, + 0, + LONG_MAX, + False, + type, + &actualType, + &actualFormat, + &itemCount, + &bytesAfter, + value); + return itemCount; +} + +_SOKOL_PRIVATE int _sapp_x11_get_window_state(void) { + int result = WithdrawnState; + struct { + CARD32 state; + Window icon; + } *state = NULL; + + if (_sapp_x11_get_window_property(_sapp.x11.window, _sapp.x11.WM_STATE, _sapp.x11.WM_STATE, (unsigned char**)&state) >= 2) { + result = (int)state->state; + } + if (state) { + XFree(state); + } + return result; +} + +_SOKOL_PRIVATE uint32_t _sapp_x11_mod(uint32_t x11_mods) { + uint32_t mods = 0; + if (x11_mods & ShiftMask) { + mods |= SAPP_MODIFIER_SHIFT; + } + if (x11_mods & ControlMask) { + mods |= SAPP_MODIFIER_CTRL; + } + if (x11_mods & Mod1Mask) { + mods |= SAPP_MODIFIER_ALT; + } + if (x11_mods & Mod4Mask) { + mods |= SAPP_MODIFIER_SUPER; + } + return mods; +} + +_SOKOL_PRIVATE void _sapp_x11_app_event(sapp_event_type type) { + if (_sapp_events_enabled()) { + _sapp_init_event(type); + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE sapp_mousebutton _sapp_x11_translate_button(const XEvent* event) { + switch (event->xbutton.button) { + case Button1: return SAPP_MOUSEBUTTON_LEFT; + case Button2: return SAPP_MOUSEBUTTON_MIDDLE; + case Button3: return SAPP_MOUSEBUTTON_RIGHT; + default: return SAPP_MOUSEBUTTON_INVALID; + } +} + +_SOKOL_PRIVATE void _sapp_x11_mouse_event(sapp_event_type type, sapp_mousebutton btn, uint32_t mods) { + if (_sapp_events_enabled()) { + _sapp_init_event(type); + _sapp.event.mouse_button = btn; + _sapp.event.modifiers = mods; + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE void _sapp_x11_scroll_event(float x, float y, uint32_t mods) { + if (_sapp_events_enabled()) { + _sapp_init_event(SAPP_EVENTTYPE_MOUSE_SCROLL); + _sapp.event.modifiers = mods; + _sapp.event.scroll_x = x; + _sapp.event.scroll_y = y; + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE void _sapp_x11_key_event(sapp_event_type type, sapp_keycode key, bool repeat, uint32_t mods) { + if (_sapp_events_enabled()) { + _sapp_init_event(type); + _sapp.event.key_code = key; + _sapp.event.key_repeat = repeat; + _sapp.event.modifiers = mods; + _sapp_call_event(&_sapp.event); + /* check if a CLIPBOARD_PASTED event must be sent too */ + if (_sapp.clipboard.enabled && + (type == SAPP_EVENTTYPE_KEY_DOWN) && + (_sapp.event.modifiers == SAPP_MODIFIER_CTRL) && + (_sapp.event.key_code == SAPP_KEYCODE_V)) + { + _sapp_init_event(SAPP_EVENTTYPE_CLIPBOARD_PASTED); + _sapp_call_event(&_sapp.event); + } + } +} + +_SOKOL_PRIVATE void _sapp_x11_char_event(uint32_t chr, bool repeat, uint32_t mods) { + if (_sapp_events_enabled()) { + _sapp_init_event(SAPP_EVENTTYPE_CHAR); + _sapp.event.char_code = chr; + _sapp.event.key_repeat = repeat; + _sapp.event.modifiers = mods; + _sapp_call_event(&_sapp.event); + } +} + +_SOKOL_PRIVATE sapp_keycode _sapp_x11_translate_key(int scancode) { + int dummy; + KeySym* keysyms = XGetKeyboardMapping(_sapp.x11.display, scancode, 1, &dummy); + SOKOL_ASSERT(keysyms); + KeySym keysym = keysyms[0]; + XFree(keysyms); + switch (keysym) { + case XK_Escape: return SAPP_KEYCODE_ESCAPE; + case XK_Tab: return SAPP_KEYCODE_TAB; + case XK_Shift_L: return SAPP_KEYCODE_LEFT_SHIFT; + case XK_Shift_R: return SAPP_KEYCODE_RIGHT_SHIFT; + case XK_Control_L: return SAPP_KEYCODE_LEFT_CONTROL; + case XK_Control_R: return SAPP_KEYCODE_RIGHT_CONTROL; + case XK_Meta_L: + case XK_Alt_L: return SAPP_KEYCODE_LEFT_ALT; + case XK_Mode_switch: /* Mapped to Alt_R on many keyboards */ + case XK_ISO_Level3_Shift: /* AltGr on at least some machines */ + case XK_Meta_R: + case XK_Alt_R: return SAPP_KEYCODE_RIGHT_ALT; + case XK_Super_L: return SAPP_KEYCODE_LEFT_SUPER; + case XK_Super_R: return SAPP_KEYCODE_RIGHT_SUPER; + case XK_Menu: return SAPP_KEYCODE_MENU; + case XK_Num_Lock: return SAPP_KEYCODE_NUM_LOCK; + case XK_Caps_Lock: return SAPP_KEYCODE_CAPS_LOCK; + case XK_Print: return SAPP_KEYCODE_PRINT_SCREEN; + case XK_Scroll_Lock: return SAPP_KEYCODE_SCROLL_LOCK; + case XK_Pause: return SAPP_KEYCODE_PAUSE; + case XK_Delete: return SAPP_KEYCODE_DELETE; + case XK_BackSpace: return SAPP_KEYCODE_BACKSPACE; + case XK_Return: return SAPP_KEYCODE_ENTER; + case XK_Home: return SAPP_KEYCODE_HOME; + case XK_End: return SAPP_KEYCODE_END; + case XK_Page_Up: return SAPP_KEYCODE_PAGE_UP; + case XK_Page_Down: return SAPP_KEYCODE_PAGE_DOWN; + case XK_Insert: return SAPP_KEYCODE_INSERT; + case XK_Left: return SAPP_KEYCODE_LEFT; + case XK_Right: return SAPP_KEYCODE_RIGHT; + case XK_Down: return SAPP_KEYCODE_DOWN; + case XK_Up: return SAPP_KEYCODE_UP; + case XK_F1: return SAPP_KEYCODE_F1; + case XK_F2: return SAPP_KEYCODE_F2; + case XK_F3: return SAPP_KEYCODE_F3; + case XK_F4: return SAPP_KEYCODE_F4; + case XK_F5: return SAPP_KEYCODE_F5; + case XK_F6: return SAPP_KEYCODE_F6; + case XK_F7: return SAPP_KEYCODE_F7; + case XK_F8: return SAPP_KEYCODE_F8; + case XK_F9: return SAPP_KEYCODE_F9; + case XK_F10: return SAPP_KEYCODE_F10; + case XK_F11: return SAPP_KEYCODE_F11; + case XK_F12: return SAPP_KEYCODE_F12; + case XK_F13: return SAPP_KEYCODE_F13; + case XK_F14: return SAPP_KEYCODE_F14; + case XK_F15: return SAPP_KEYCODE_F15; + case XK_F16: return SAPP_KEYCODE_F16; + case XK_F17: return SAPP_KEYCODE_F17; + case XK_F18: return SAPP_KEYCODE_F18; + case XK_F19: return SAPP_KEYCODE_F19; + case XK_F20: return SAPP_KEYCODE_F20; + case XK_F21: return SAPP_KEYCODE_F21; + case XK_F22: return SAPP_KEYCODE_F22; + case XK_F23: return SAPP_KEYCODE_F23; + case XK_F24: return SAPP_KEYCODE_F24; + case XK_F25: return SAPP_KEYCODE_F25; + + case XK_KP_Divide: return SAPP_KEYCODE_KP_DIVIDE; + case XK_KP_Multiply: return SAPP_KEYCODE_KP_MULTIPLY; + case XK_KP_Subtract: return SAPP_KEYCODE_KP_SUBTRACT; + case XK_KP_Add: return SAPP_KEYCODE_KP_ADD; + + case XK_KP_Insert: return SAPP_KEYCODE_KP_0; + case XK_KP_End: return SAPP_KEYCODE_KP_1; + case XK_KP_Down: return SAPP_KEYCODE_KP_2; + case XK_KP_Page_Down: return SAPP_KEYCODE_KP_3; + case XK_KP_Left: return SAPP_KEYCODE_KP_4; + case XK_KP_Begin: return SAPP_KEYCODE_KP_5; + case XK_KP_Right: return SAPP_KEYCODE_KP_6; + case XK_KP_Home: return SAPP_KEYCODE_KP_7; + case XK_KP_Up: return SAPP_KEYCODE_KP_8; + case XK_KP_Page_Up: return SAPP_KEYCODE_KP_9; + case XK_KP_Delete: return SAPP_KEYCODE_KP_DECIMAL; + case XK_KP_Equal: return SAPP_KEYCODE_KP_EQUAL; + case XK_KP_Enter: return SAPP_KEYCODE_KP_ENTER; + + case XK_a: return SAPP_KEYCODE_A; + case XK_b: return SAPP_KEYCODE_B; + case XK_c: return SAPP_KEYCODE_C; + case XK_d: return SAPP_KEYCODE_D; + case XK_e: return SAPP_KEYCODE_E; + case XK_f: return SAPP_KEYCODE_F; + case XK_g: return SAPP_KEYCODE_G; + case XK_h: return SAPP_KEYCODE_H; + case XK_i: return SAPP_KEYCODE_I; + case XK_j: return SAPP_KEYCODE_J; + case XK_k: return SAPP_KEYCODE_K; + case XK_l: return SAPP_KEYCODE_L; + case XK_m: return SAPP_KEYCODE_M; + case XK_n: return SAPP_KEYCODE_N; + case XK_o: return SAPP_KEYCODE_O; + case XK_p: return SAPP_KEYCODE_P; + case XK_q: return SAPP_KEYCODE_Q; + case XK_r: return SAPP_KEYCODE_R; + case XK_s: return SAPP_KEYCODE_S; + case XK_t: return SAPP_KEYCODE_T; + case XK_u: return SAPP_KEYCODE_U; + case XK_v: return SAPP_KEYCODE_V; + case XK_w: return SAPP_KEYCODE_W; + case XK_x: return SAPP_KEYCODE_X; + case XK_y: return SAPP_KEYCODE_Y; + case XK_z: return SAPP_KEYCODE_Z; + case XK_1: return SAPP_KEYCODE_1; + case XK_2: return SAPP_KEYCODE_2; + case XK_3: return SAPP_KEYCODE_3; + case XK_4: return SAPP_KEYCODE_4; + case XK_5: return SAPP_KEYCODE_5; + case XK_6: return SAPP_KEYCODE_6; + case XK_7: return SAPP_KEYCODE_7; + case XK_8: return SAPP_KEYCODE_8; + case XK_9: return SAPP_KEYCODE_9; + case XK_0: return SAPP_KEYCODE_0; + case XK_space: return SAPP_KEYCODE_SPACE; + case XK_minus: return SAPP_KEYCODE_MINUS; + case XK_equal: return SAPP_KEYCODE_EQUAL; + case XK_bracketleft: return SAPP_KEYCODE_LEFT_BRACKET; + case XK_bracketright: return SAPP_KEYCODE_RIGHT_BRACKET; + case XK_backslash: return SAPP_KEYCODE_BACKSLASH; + case XK_semicolon: return SAPP_KEYCODE_SEMICOLON; + case XK_apostrophe: return SAPP_KEYCODE_APOSTROPHE; + case XK_grave: return SAPP_KEYCODE_GRAVE_ACCENT; + case XK_comma: return SAPP_KEYCODE_COMMA; + case XK_period: return SAPP_KEYCODE_PERIOD; + case XK_slash: return SAPP_KEYCODE_SLASH; + case XK_less: return SAPP_KEYCODE_WORLD_1; /* At least in some layouts... */ + default: return SAPP_KEYCODE_INVALID; + } +} + +_SOKOL_PRIVATE int32_t _sapp_x11_keysym_to_unicode(KeySym keysym) { + int min = 0; + int max = sizeof(_sapp_x11_keysymtab) / sizeof(struct _sapp_x11_codepair) - 1; + int mid; + + /* First check for Latin-1 characters (1:1 mapping) */ + if ((keysym >= 0x0020 && keysym <= 0x007e) || + (keysym >= 0x00a0 && keysym <= 0x00ff)) + { + return keysym; + } + + /* Also check for directly encoded 24-bit UCS characters */ + if ((keysym & 0xff000000) == 0x01000000) { + return keysym & 0x00ffffff; + } + + /* Binary search in table */ + while (max >= min) { + mid = (min + max) / 2; + if (_sapp_x11_keysymtab[mid].keysym < keysym) { + min = mid + 1; + } + else if (_sapp_x11_keysymtab[mid].keysym > keysym) { + max = mid - 1; + } + else { + return _sapp_x11_keysymtab[mid].ucs; + } + } + + /* No matching Unicode value found */ + return -1; +} + +_SOKOL_PRIVATE bool _sapp_x11_parse_dropped_files_list(const char* src) { + SOKOL_ASSERT(src); + SOKOL_ASSERT(_sapp.drop.buffer); + + _sapp_clear_drop_buffer(); + _sapp.drop.num_files = 0; + + /* + src is (potentially percent-encoded) string made of one or multiple paths + separated by \r\n, each path starting with 'file://' + */ + bool err = false; + int src_count = 0; + char src_chr = 0; + char* dst_ptr = _sapp.drop.buffer; + const char* dst_end_ptr = dst_ptr + (_sapp.drop.max_path_length - 1); // room for terminating 0 + while (0 != (src_chr = *src++)) { + src_count++; + char dst_chr = 0; + /* check leading 'file://' */ + if (src_count <= 7) { + if (((src_count == 1) && (src_chr != 'f')) || + ((src_count == 2) && (src_chr != 'i')) || + ((src_count == 3) && (src_chr != 'l')) || + ((src_count == 4) && (src_chr != 'e')) || + ((src_count == 5) && (src_chr != ':')) || + ((src_count == 6) && (src_chr != '/')) || + ((src_count == 7) && (src_chr != '/'))) + { + SOKOL_LOG("sokol_app.h: dropped file URI doesn't start with file://"); + err = true; + break; + } + } + else if (src_chr == '\r') { + // skip + } + else if (src_chr == '\n') { + src_chr = 0; + src_count = 0; + _sapp.drop.num_files++; + // too many files is not an error + if (_sapp.drop.num_files >= _sapp.drop.max_files) { + break; + } + dst_ptr = _sapp.drop.buffer + _sapp.drop.num_files * _sapp.drop.max_path_length; + dst_end_ptr = dst_ptr + (_sapp.drop.max_path_length - 1); + } + else if ((src_chr == '%') && src[0] && src[1]) { + // a percent-encoded byte (most like UTF-8 multibyte sequence) + const char digits[3] = { src[0], src[1], 0 }; + src += 2; + dst_chr = (char) strtol(digits, 0, 16); + } + else { + dst_chr = src_chr; + } + if (dst_chr) { + // dst_end_ptr already has adjustment for terminating zero + if (dst_ptr < dst_end_ptr) { + *dst_ptr++ = dst_chr; + } + else { + SOKOL_LOG("sokol_app.h: dropped file path too long (sapp_desc.max_dropped_file_path_length)"); + err = true; + break; + } + } + } + if (err) { + _sapp_clear_drop_buffer(); + _sapp.drop.num_files = 0; + return false; + } + else { + return true; + } +} + +// XLib manual says keycodes are in the range [8, 255] inclusive. +// https://tronche.com/gui/x/xlib/input/keyboard-encoding.html +static bool _sapp_x11_keycodes[256]; + +_SOKOL_PRIVATE void _sapp_x11_process_event(XEvent* event) { + Bool filtered = XFilterEvent(event, None); + switch (event->type) { + case GenericEvent: + if (_sapp.mouse.locked && _sapp.x11.xi.available) { + if (event->xcookie.extension == _sapp.x11.xi.major_opcode) { + if (XGetEventData(_sapp.x11.display, &event->xcookie)) { + if (event->xcookie.evtype == XI_RawMotion) { + XIRawEvent* re = (XIRawEvent*) event->xcookie.data; + if (re->valuators.mask_len) { + const double* values = re->raw_values; + if (XIMaskIsSet(re->valuators.mask, 0)) { + _sapp.mouse.dx = (float) *values; + values++; + } + if (XIMaskIsSet(re->valuators.mask, 1)) { + _sapp.mouse.dy = (float) *values; + } + _sapp_x11_mouse_event(SAPP_EVENTTYPE_MOUSE_MOVE, SAPP_MOUSEBUTTON_INVALID, _sapp_x11_mod(event->xmotion.state)); + } + } + XFreeEventData(_sapp.x11.display, &event->xcookie); + } + } + } + break; + case FocusOut: + /* if focus is lost for any reason, and we're in mouse locked mode, disable mouse lock */ + if (_sapp.mouse.locked) { + _sapp_x11_lock_mouse(false); + } + break; + case KeyPress: + { + int keycode = (int)event->xkey.keycode; + const sapp_keycode key = _sapp_x11_translate_key(keycode); + bool repeat = _sapp_x11_keycodes[keycode & 0xFF]; + _sapp_x11_keycodes[keycode & 0xFF] = true; + const uint32_t mods = _sapp_x11_mod(event->xkey.state); + if (key != SAPP_KEYCODE_INVALID) { + _sapp_x11_key_event(SAPP_EVENTTYPE_KEY_DOWN, key, repeat, mods); + } + KeySym keysym; + XLookupString(&event->xkey, NULL, 0, &keysym, NULL); + int32_t chr = _sapp_x11_keysym_to_unicode(keysym); + if (chr > 0) { + _sapp_x11_char_event((uint32_t)chr, repeat, mods); + } + } + break; + case KeyRelease: + { + int keycode = (int)event->xkey.keycode; + const sapp_keycode key = _sapp_x11_translate_key(keycode); + _sapp_x11_keycodes[keycode & 0xFF] = false; + if (key != SAPP_KEYCODE_INVALID) { + const uint32_t mods = _sapp_x11_mod(event->xkey.state); + _sapp_x11_key_event(SAPP_EVENTTYPE_KEY_UP, key, false, mods); + } + } + break; + case ButtonPress: + { + const sapp_mousebutton btn = _sapp_x11_translate_button(event); + const uint32_t mods = _sapp_x11_mod(event->xbutton.state); + if (btn != SAPP_MOUSEBUTTON_INVALID) { + _sapp_x11_mouse_event(SAPP_EVENTTYPE_MOUSE_DOWN, btn, mods); + _sapp.x11.mouse_buttons |= (1 << btn); + } + else { + /* might be a scroll event */ + switch (event->xbutton.button) { + case 4: _sapp_x11_scroll_event(0.0f, 1.0f, mods); break; + case 5: _sapp_x11_scroll_event(0.0f, -1.0f, mods); break; + case 6: _sapp_x11_scroll_event(1.0f, 0.0f, mods); break; + case 7: _sapp_x11_scroll_event(-1.0f, 0.0f, mods); break; + } + } + } + break; + case ButtonRelease: + { + const sapp_mousebutton btn = _sapp_x11_translate_button(event); + if (btn != SAPP_MOUSEBUTTON_INVALID) { + _sapp_x11_mouse_event(SAPP_EVENTTYPE_MOUSE_UP, btn, _sapp_x11_mod(event->xbutton.state)); + _sapp.x11.mouse_buttons &= ~(1 << btn); + } + } + break; + case EnterNotify: + /* don't send enter/leave events while mouse button held down */ + if (0 == _sapp.x11.mouse_buttons) { + _sapp_x11_mouse_event(SAPP_EVENTTYPE_MOUSE_ENTER, SAPP_MOUSEBUTTON_INVALID, _sapp_x11_mod(event->xcrossing.state)); + } + break; + case LeaveNotify: + if (0 == _sapp.x11.mouse_buttons) { + _sapp_x11_mouse_event(SAPP_EVENTTYPE_MOUSE_LEAVE, SAPP_MOUSEBUTTON_INVALID, _sapp_x11_mod(event->xcrossing.state)); + } + break; + case MotionNotify: + if (!_sapp.mouse.locked) { + const float new_x = (float) event->xmotion.x; + const float new_y = (float) event->xmotion.y; + if (_sapp.mouse.pos_valid) { + _sapp.mouse.dx = new_x - _sapp.mouse.x; + _sapp.mouse.dy = new_y - _sapp.mouse.y; + } + _sapp.mouse.x = new_x; + _sapp.mouse.y = new_y; + _sapp.mouse.pos_valid = true; + _sapp_x11_mouse_event(SAPP_EVENTTYPE_MOUSE_MOVE, SAPP_MOUSEBUTTON_INVALID, _sapp_x11_mod(event->xmotion.state)); + } + break; + case ConfigureNotify: + if ((event->xconfigure.width != _sapp.window_width) || (event->xconfigure.height != _sapp.window_height)) { + _sapp.window_width = event->xconfigure.width; + _sapp.window_height = event->xconfigure.height; + _sapp.framebuffer_width = _sapp.window_width; + _sapp.framebuffer_height = _sapp.window_height; + _sapp_x11_app_event(SAPP_EVENTTYPE_RESIZED); + } + break; + case PropertyNotify: + if (event->xproperty.state == PropertyNewValue) { + if (event->xproperty.atom == _sapp.x11.WM_STATE) { + const int state = _sapp_x11_get_window_state(); + if (state != _sapp.x11.window_state) { + _sapp.x11.window_state = state; + if (state == IconicState) { + _sapp_x11_app_event(SAPP_EVENTTYPE_ICONIFIED); + } + else if (state == NormalState) { + _sapp_x11_app_event(SAPP_EVENTTYPE_RESTORED); + } + } + } + } + break; + case ClientMessage: + if (filtered) { + return; + } + if (event->xclient.message_type == _sapp.x11.WM_PROTOCOLS) { + const Atom protocol = (Atom)event->xclient.data.l[0]; + if (protocol == _sapp.x11.WM_DELETE_WINDOW) { + _sapp.quit_requested = true; + } + } + else if (event->xclient.message_type == _sapp.x11.xdnd.XdndEnter) { + const bool is_list = 0 != (event->xclient.data.l[1] & 1); + _sapp.x11.xdnd.source = (Window)event->xclient.data.l[0]; + _sapp.x11.xdnd.version = event->xclient.data.l[1] >> 24; + _sapp.x11.xdnd.format = None; + if (_sapp.x11.xdnd.version > _SAPP_X11_XDND_VERSION) { + return; + } + uint32_t count = 0; + Atom* formats = 0; + if (is_list) { + count = _sapp_x11_get_window_property(_sapp.x11.xdnd.source, _sapp.x11.xdnd.XdndTypeList, XA_ATOM, (unsigned char**)&formats); + } + else { + count = 3; + formats = (Atom*) event->xclient.data.l + 2; + } + for (uint32_t i = 0; i < count; i++) { + if (formats[i] == _sapp.x11.xdnd.text_uri_list) { + _sapp.x11.xdnd.format = _sapp.x11.xdnd.text_uri_list; + break; + } + } + if (is_list && formats) { + XFree(formats); + } + } + else if (event->xclient.message_type == _sapp.x11.xdnd.XdndDrop) { + if (_sapp.x11.xdnd.version > _SAPP_X11_XDND_VERSION) { + return; + } + Time time = CurrentTime; + if (_sapp.x11.xdnd.format) { + if (_sapp.x11.xdnd.version >= 1) { + time = (Time)event->xclient.data.l[2]; + } + XConvertSelection(_sapp.x11.display, + _sapp.x11.xdnd.XdndSelection, + _sapp.x11.xdnd.format, + _sapp.x11.xdnd.XdndSelection, + _sapp.x11.window, + time); + } + else if (_sapp.x11.xdnd.version >= 2) { + XEvent reply; + memset(&reply, 0, sizeof(reply)); + reply.type = ClientMessage; + reply.xclient.window = _sapp.x11.window; + reply.xclient.message_type = _sapp.x11.xdnd.XdndFinished; + reply.xclient.format = 32; + reply.xclient.data.l[0] = (long)_sapp.x11.window; + reply.xclient.data.l[1] = 0; // drag was rejected + reply.xclient.data.l[2] = None; + XSendEvent(_sapp.x11.display, _sapp.x11.xdnd.source, False, NoEventMask, &reply); + XFlush(_sapp.x11.display); + } + } + else if (event->xclient.message_type == _sapp.x11.xdnd.XdndPosition) { + /* drag operation has moved over the window + FIXME: we could track the mouse position here, but + this isn't implemented on other platforms either so far + */ + if (_sapp.x11.xdnd.version > _SAPP_X11_XDND_VERSION) { + return; + } + XEvent reply; + memset(&reply, 0, sizeof(reply)); + reply.type = ClientMessage; + reply.xclient.window = _sapp.x11.xdnd.source; + reply.xclient.message_type = _sapp.x11.xdnd.XdndStatus; + reply.xclient.format = 32; + reply.xclient.data.l[0] = (long)_sapp.x11.window; + if (_sapp.x11.xdnd.format) { + /* reply that we are ready to copy the dragged data */ + reply.xclient.data.l[1] = 1; // accept with no rectangle + if (_sapp.x11.xdnd.version >= 2) { + reply.xclient.data.l[4] = (long)_sapp.x11.xdnd.XdndActionCopy; + } + } + XSendEvent(_sapp.x11.display, _sapp.x11.xdnd.source, False, NoEventMask, &reply); + XFlush(_sapp.x11.display); + } + break; + case SelectionNotify: + if (event->xselection.property == _sapp.x11.xdnd.XdndSelection) { + char* data = 0; + uint32_t result = _sapp_x11_get_window_property(event->xselection.requestor, + event->xselection.property, + event->xselection.target, + (unsigned char**) &data); + if (_sapp.drop.enabled && result) { + if (_sapp_x11_parse_dropped_files_list(data)) { + if (_sapp_events_enabled()) { + _sapp_init_event(SAPP_EVENTTYPE_FILES_DROPPED); + _sapp_call_event(&_sapp.event); + } + } + } + if (_sapp.x11.xdnd.version >= 2) { + XEvent reply; + memset(&reply, 0, sizeof(reply)); + reply.type = ClientMessage; + reply.xclient.window = _sapp.x11.window; + reply.xclient.message_type = _sapp.x11.xdnd.XdndFinished; + reply.xclient.format = 32; + reply.xclient.data.l[0] = (long)_sapp.x11.window; + reply.xclient.data.l[1] = result; + reply.xclient.data.l[2] = (long)_sapp.x11.xdnd.XdndActionCopy; + XSendEvent(_sapp.x11.display, _sapp.x11.xdnd.source, False, NoEventMask, &reply); + XFlush(_sapp.x11.display); + } + } + break; + case DestroyNotify: + break; + } +} + +_SOKOL_PRIVATE void _sapp_linux_run(const sapp_desc* desc) { + /* The following lines are here to trigger a linker error instead of an + obscure runtime error if the user has forgotten to add -pthread to + the compiler or linker options. They have no other purpose. + */ + pthread_attr_t pthread_attr; + pthread_attr_init(&pthread_attr); + pthread_attr_destroy(&pthread_attr); + + _sapp_init_state(desc); + _sapp.x11.window_state = NormalState; + + XInitThreads(); + XrmInitialize(); + _sapp.x11.display = XOpenDisplay(NULL); + if (!_sapp.x11.display) { + _sapp_fail("XOpenDisplay() failed!\n"); + } + _sapp.x11.screen = DefaultScreen(_sapp.x11.display); + _sapp.x11.root = DefaultRootWindow(_sapp.x11.display); + XkbSetDetectableAutoRepeat(_sapp.x11.display, true, NULL); + _sapp_x11_query_system_dpi(); + _sapp.dpi_scale = _sapp.x11.dpi / 96.0f; + _sapp_x11_init_extensions(); + _sapp_x11_create_hidden_cursor(); + _sapp_glx_init(); + Visual* visual = 0; + int depth = 0; + _sapp_glx_choose_visual(&visual, &depth); + _sapp_x11_create_window(visual, depth); + _sapp_glx_create_context(); + _sapp.valid = true; + _sapp_x11_show_window(); + if (_sapp.fullscreen) { + _sapp_x11_set_fullscreen(true); + } + _sapp_x11_query_window_size(); + _sapp_glx_swapinterval(_sapp.swap_interval); + XFlush(_sapp.x11.display); + while (!_sapp.quit_ordered) { + _sapp_glx_make_current(); + int count = XPending(_sapp.x11.display); + while (count--) { + XEvent event; + XNextEvent(_sapp.x11.display, &event); + _sapp_x11_process_event(&event); + } + _sapp_frame(); + _sapp_glx_swap_buffers(); + XFlush(_sapp.x11.display); + /* handle quit-requested, either from window or from sapp_request_quit() */ + if (_sapp.quit_requested && !_sapp.quit_ordered) { + /* give user code a chance to intervene */ + _sapp_x11_app_event(SAPP_EVENTTYPE_QUIT_REQUESTED); + /* if user code hasn't intervened, quit the app */ + if (_sapp.quit_requested) { + _sapp.quit_ordered = true; + } + } + } + _sapp_call_cleanup(); + _sapp_glx_destroy_context(); + _sapp_x11_destroy_window(); + XCloseDisplay(_sapp.x11.display); + _sapp_discard_state(); +} + +#if !defined(SOKOL_NO_ENTRY) +int main(int argc, char* argv[]) { + sapp_desc desc = sokol_main(argc, argv); + _sapp_linux_run(&desc); + return 0; +} +#endif /* SOKOL_NO_ENTRY */ +#endif /* _SAPP_LINUX */ + +/*== PUBLIC API FUNCTIONS ====================================================*/ +#if defined(SOKOL_NO_ENTRY) +SOKOL_API_IMPL void sapp_run(const sapp_desc* desc) { + SOKOL_ASSERT(desc); + #if defined(_SAPP_MACOS) + _sapp_macos_run(desc); + #elif defined(_SAPP_IOS) + _sapp_ios_run(desc); + #elif defined(_SAPP_EMSCRIPTEN) + _sapp_emsc_run(desc); + #elif defined(_SAPP_WIN32) + _sapp_win32_run(desc); + #elif defined(_SAPP_UWP) + _sapp_uwp_run(desc); + #elif defined(_SAPP_LINUX) + _sapp_linux_run(desc); + #else + // calling sapp_run() directly is not supported on Android) + _sapp_fail("sapp_run() not supported on this platform!"); + #endif +} + +/* this is just a stub so the linker doesn't complain */ +sapp_desc sokol_main(int argc, char* argv[]) { + _SOKOL_UNUSED(argc); + _SOKOL_UNUSED(argv); + sapp_desc desc; + memset(&desc, 0, sizeof(desc)); + return desc; +} +#else +/* likewise, in normal mode, sapp_run() is just an empty stub */ +SOKOL_API_IMPL void sapp_run(const sapp_desc* desc) { + _SOKOL_UNUSED(desc); +} +#endif + +SOKOL_API_IMPL bool sapp_isvalid(void) { + return _sapp.valid; +} + +SOKOL_API_IMPL void* sapp_userdata(void) { + return _sapp.desc.user_data; +} + +SOKOL_API_IMPL sapp_desc sapp_query_desc(void) { + return _sapp.desc; +} + +SOKOL_API_IMPL uint64_t sapp_frame_count(void) { + return _sapp.frame_count; +} + +SOKOL_API_IMPL int sapp_width(void) { + return (_sapp.framebuffer_width > 0) ? _sapp.framebuffer_width : 1; +} + +SOKOL_API_IMPL float sapp_widthf(void) { + return (float)sapp_width(); +} + +SOKOL_API_IMPL int sapp_height(void) { + return (_sapp.framebuffer_height > 0) ? _sapp.framebuffer_height : 1; +} + +SOKOL_API_IMPL float sapp_heightf(void) { + return (float)sapp_height(); +} + +SOKOL_API_IMPL int sapp_color_format(void) { + #if defined(_SAPP_EMSCRIPTEN) && defined(SOKOL_WGPU) + switch (_sapp.emsc.wgpu.render_format) { + case WGPUTextureFormat_RGBA8Unorm: + return _SAPP_PIXELFORMAT_RGBA8; + case WGPUTextureFormat_BGRA8Unorm: + return _SAPP_PIXELFORMAT_BGRA8; + default: + SOKOL_UNREACHABLE; + return 0; + } + #elif defined(SOKOL_METAL) || defined(SOKOL_D3D11) + return _SAPP_PIXELFORMAT_BGRA8; + #else + return _SAPP_PIXELFORMAT_RGBA8; + #endif +} + +SOKOL_API_IMPL int sapp_depth_format(void) { + return _SAPP_PIXELFORMAT_DEPTH_STENCIL; +} + +SOKOL_API_IMPL int sapp_sample_count(void) { + return _sapp.sample_count; +} + +SOKOL_API_IMPL bool sapp_high_dpi(void) { + return _sapp.desc.high_dpi && (_sapp.dpi_scale >= 1.5f); +} + +SOKOL_API_IMPL float sapp_dpi_scale(void) { + return _sapp.dpi_scale; +} + +SOKOL_API_IMPL bool sapp_gles2(void) { + return _sapp.gles2_fallback; +} + +SOKOL_API_IMPL void sapp_show_keyboard(bool show) { + #if defined(_SAPP_IOS) + _sapp_ios_show_keyboard(show); + #elif defined(_SAPP_EMSCRIPTEN) + _sapp_emsc_show_keyboard(show); + #elif defined(_SAPP_ANDROID) + _sapp_android_show_keyboard(show); + #else + _SOKOL_UNUSED(show); + #endif +} + +SOKOL_API_IMPL bool sapp_keyboard_shown(void) { + return _sapp.onscreen_keyboard_shown; +} + +SOKOL_APP_API_DECL bool sapp_is_fullscreen(void) { + return _sapp.fullscreen; +} + +SOKOL_APP_API_DECL void sapp_toggle_fullscreen(void) { + #if defined(_SAPP_MACOS) + _sapp_macos_toggle_fullscreen(); + #elif defined(_SAPP_WIN32) + _sapp_win32_toggle_fullscreen(); + #elif defined(_SAPP_UWP) + _sapp_uwp_toggle_fullscreen(); + #elif defined(_SAPP_LINUX) + _sapp_x11_toggle_fullscreen(); + #endif +} + +/* NOTE that sapp_show_mouse() does not "stack" like the Win32 or macOS API functions! */ +SOKOL_API_IMPL void sapp_show_mouse(bool show) { + if (_sapp.mouse.shown != show) { + #if defined(_SAPP_MACOS) + _sapp_macos_show_mouse(show); + #elif defined(_SAPP_WIN32) + _sapp_win32_show_mouse(show); + #elif defined(_SAPP_LINUX) + _sapp_x11_show_mouse(show); + #elif defined(_SAPP_UWP) + _sapp_uwp_show_mouse(show); + #endif + _sapp.mouse.shown = show; + } +} + +SOKOL_API_IMPL bool sapp_mouse_shown(void) { + return _sapp.mouse.shown; +} + +SOKOL_API_IMPL void sapp_lock_mouse(bool lock) { + #if defined(_SAPP_MACOS) + _sapp_macos_lock_mouse(lock); + #elif defined(_SAPP_EMSCRIPTEN) + _sapp_emsc_lock_mouse(lock); + #elif defined(_SAPP_WIN32) + _sapp_win32_lock_mouse(lock); + #elif defined(_SAPP_LINUX) + _sapp_x11_lock_mouse(lock); + #else + _sapp.mouse.locked = lock; + #endif +} + +SOKOL_API_IMPL bool sapp_mouse_locked(void) { + return _sapp.mouse.locked; +} + +SOKOL_API_IMPL void sapp_request_quit(void) { + _sapp.quit_requested = true; +} + +SOKOL_API_IMPL void sapp_cancel_quit(void) { + _sapp.quit_requested = false; +} + +SOKOL_API_IMPL void sapp_quit(void) { + _sapp.quit_ordered = true; +} + +SOKOL_API_IMPL void sapp_consume_event(void) { + _sapp.event_consumed = true; +} + +/* NOTE: on HTML5, sapp_set_clipboard_string() must be called from within event handler! */ +SOKOL_API_IMPL void sapp_set_clipboard_string(const char* str) { + SOKOL_ASSERT(_sapp.clipboard.enabled); + if (!_sapp.clipboard.enabled) { + return; + } + SOKOL_ASSERT(str); + #if defined(_SAPP_MACOS) + _sapp_macos_set_clipboard_string(str); + #elif defined(_SAPP_EMSCRIPTEN) + _sapp_emsc_set_clipboard_string(str); + #elif defined(_SAPP_WIN32) + _sapp_win32_set_clipboard_string(str); + #else + /* not implemented */ + #endif + _sapp_strcpy(str, _sapp.clipboard.buffer, _sapp.clipboard.buf_size); +} + +SOKOL_API_IMPL const char* sapp_get_clipboard_string(void) { + SOKOL_ASSERT(_sapp.clipboard.enabled); + if (!_sapp.clipboard.enabled) { + return ""; + } + #if defined(_SAPP_MACOS) + return _sapp_macos_get_clipboard_string(); + #elif defined(_SAPP_EMSCRIPTEN) + return _sapp.clipboard.buffer; + #elif defined(_SAPP_WIN32) + return _sapp_win32_get_clipboard_string(); + #else + /* not implemented */ + return _sapp.clipboard.buffer; + #endif +} + +SOKOL_API_IMPL void sapp_set_window_title(const char* title) { + SOKOL_ASSERT(title); + _sapp_strcpy(title, _sapp.window_title, sizeof(_sapp.window_title)); + #if defined(_SAPP_MACOS) + _sapp_macos_update_window_title(); + #elif defined(_SAPP_WIN32) + _sapp_win32_update_window_title(); + #elif defined(_SAPP_LINUX) + _sapp_x11_update_window_title(); + #endif +} + +SOKOL_API_IMPL int sapp_get_num_dropped_files(void) { + SOKOL_ASSERT(_sapp.drop.enabled); + return _sapp.drop.num_files; +} + +SOKOL_API_IMPL const char* sapp_get_dropped_file_path(int index) { + SOKOL_ASSERT(_sapp.drop.enabled); + SOKOL_ASSERT((index >= 0) && (index < _sapp.drop.num_files)); + SOKOL_ASSERT(_sapp.drop.buffer); + if (!_sapp.drop.enabled) { + return ""; + } + if ((index < 0) || (index >= _sapp.drop.max_files)) { + return ""; + } + return (const char*) _sapp_dropped_file_path_ptr(index); +} + +SOKOL_API_IMPL uint32_t sapp_html5_get_dropped_file_size(int index) { + SOKOL_ASSERT(_sapp.drop.enabled); + SOKOL_ASSERT((index >= 0) && (index < _sapp.drop.num_files)); + #if defined(_SAPP_EMSCRIPTEN) + if (!_sapp.drop.enabled) { + return 0; + } + return sapp_js_dropped_file_size(index); + #else + (void)index; + return 0; + #endif +} + +SOKOL_API_IMPL void sapp_html5_fetch_dropped_file(const sapp_html5_fetch_request* request) { + SOKOL_ASSERT(_sapp.drop.enabled); + SOKOL_ASSERT(request); + SOKOL_ASSERT(request->callback); + SOKOL_ASSERT(request->buffer_ptr); + SOKOL_ASSERT(request->buffer_size > 0); + #if defined(_SAPP_EMSCRIPTEN) + const int index = request->dropped_file_index; + sapp_html5_fetch_error error_code = SAPP_HTML5_FETCH_ERROR_NO_ERROR; + if ((index < 0) || (index >= _sapp.drop.num_files)) { + error_code = SAPP_HTML5_FETCH_ERROR_OTHER; + } + if (sapp_html5_get_dropped_file_size(index) > request->buffer_size) { + error_code = SAPP_HTML5_FETCH_ERROR_BUFFER_TOO_SMALL; + } + if (SAPP_HTML5_FETCH_ERROR_NO_ERROR != error_code) { + _sapp_emsc_invoke_fetch_cb(index, + false, // success + (int)error_code, + request->callback, + 0, // fetched_size + request->buffer_ptr, + request->buffer_size, + request->user_data); + } + else { + sapp_js_fetch_dropped_file(index, + request->callback, + request->buffer_ptr, + request->buffer_size, + request->user_data); + } + #else + (void)request; + #endif +} + +SOKOL_API_IMPL const void* sapp_metal_get_device(void) { + SOKOL_ASSERT(_sapp.valid); + #if defined(SOKOL_METAL) + #if defined(_SAPP_MACOS) + const void* obj = (__bridge const void*) _sapp.macos.mtl_device; + #else + const void* obj = (__bridge const void*) _sapp.ios.mtl_device; + #endif + SOKOL_ASSERT(obj); + return obj; + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_metal_get_renderpass_descriptor(void) { + SOKOL_ASSERT(_sapp.valid); + #if defined(SOKOL_METAL) + #if defined(_SAPP_MACOS) + const void* obj = (__bridge const void*) [_sapp.macos.view currentRenderPassDescriptor]; + #else + const void* obj = (__bridge const void*) [_sapp.ios.view currentRenderPassDescriptor]; + #endif + SOKOL_ASSERT(obj); + return obj; + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_metal_get_drawable(void) { + SOKOL_ASSERT(_sapp.valid); + #if defined(SOKOL_METAL) + #if defined(_SAPP_MACOS) + const void* obj = (__bridge const void*) [_sapp.macos.view currentDrawable]; + #else + const void* obj = (__bridge const void*) [_sapp.ios.view currentDrawable]; + #endif + SOKOL_ASSERT(obj); + return obj; + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_macos_get_window(void) { + #if defined(_SAPP_MACOS) + const void* obj = (__bridge const void*) _sapp.macos.window; + SOKOL_ASSERT(obj); + return obj; + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_ios_get_window(void) { + #if defined(_SAPP_IOS) + const void* obj = (__bridge const void*) _sapp.ios.window; + SOKOL_ASSERT(obj); + return obj; + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_d3d11_get_device(void) { + SOKOL_ASSERT(_sapp.valid); + #if defined(SOKOL_D3D11) + return _sapp.d3d11.device; + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_d3d11_get_device_context(void) { + SOKOL_ASSERT(_sapp.valid); + #if defined(SOKOL_D3D11) + return _sapp.d3d11.device_context; + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_d3d11_get_render_target_view(void) { + SOKOL_ASSERT(_sapp.valid); + #if defined(SOKOL_D3D11) + if (_sapp.d3d11.msaa_rtv) { + return _sapp.d3d11.msaa_rtv; + } + else { + return _sapp.d3d11.rtv; + } + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_d3d11_get_depth_stencil_view(void) { + SOKOL_ASSERT(_sapp.valid); + #if defined(SOKOL_D3D11) + return _sapp.d3d11.dsv; + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_win32_get_hwnd(void) { + SOKOL_ASSERT(_sapp.valid); + #if defined(_SAPP_WIN32) + return _sapp.win32.hwnd; + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_wgpu_get_device(void) { + SOKOL_ASSERT(_sapp.valid); + #if defined(_SAPP_EMSCRIPTEN) && defined(SOKOL_WGPU) + return (const void*) _sapp.emsc.wgpu.device; + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_wgpu_get_render_view(void) { + SOKOL_ASSERT(_sapp.valid); + #if defined(_SAPP_EMSCRIPTEN) && defined(SOKOL_WGPU) + if (_sapp.sample_count > 1) { + return (const void*) _sapp.emsc.wgpu.msaa_view; + } + else { + return (const void*) _sapp.emsc.wgpu.swapchain_view; + } + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_wgpu_get_resolve_view(void) { + SOKOL_ASSERT(_sapp.valid); + #if defined(_SAPP_EMSCRIPTEN) && defined(SOKOL_WGPU) + if (_sapp.sample_count > 1) { + return (const void*) _sapp.emsc.wgpu.swapchain_view; + } + else { + return 0; + } + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_wgpu_get_depth_stencil_view(void) { + SOKOL_ASSERT(_sapp.valid); + #if defined(_SAPP_EMSCRIPTEN) && defined(SOKOL_WGPU) + return (const void*) _sapp.emsc.wgpu.depth_stencil_view; + #else + return 0; + #endif +} + +SOKOL_API_IMPL const void* sapp_android_get_native_activity(void) { + SOKOL_ASSERT(_sapp.valid); + #if defined(_SAPP_ANDROID) + return (void*)_sapp.android.activity; + #else + return 0; + #endif +} + +SOKOL_API_IMPL void sapp_html5_ask_leave_site(bool ask) { + _sapp.html5_ask_leave_site = ask; +} + +#endif /* SOKOL_APP_IMPL */ diff --git a/v_windows/v/old/thirdparty/sokol/sokol_app2.h b/v_windows/v/old/thirdparty/sokol/sokol_app2.h new file mode 100644 index 0000000..aa6d654 --- /dev/null +++ b/v_windows/v/old/thirdparty/sokol/sokol_app2.h @@ -0,0 +1,40 @@ + +@implementation MyView2 + +int __v_sokol_inited = 0; + +// Alternative drawRect which calls a frame function with native Cocoa calls +- (void)drawRect:(NSRect)rect { + //puts("drawRect()"); + if (__v_sokol_inited == 0) { + _sapp_call_init(); + __v_sokol_inited = 1; + } + _sapp_call_frame_native(); +} + +//- (BOOL)isOpaque { +// return NO; +//} + +- (BOOL)canBecomeKeyView { + return YES; +} +- (BOOL)acceptsFirstResponder { + return YES; +} + +// - (void)mouseExited:(NSEvent*)event { +// } + +// - (void)mouseDown:(NSEvent*)event { +// } + +- (BOOL)acceptsFirstMouse:(NSEvent *)event { + return YES; +} + + +@end + + diff --git a/v_windows/v/old/thirdparty/sokol/sokol_audio.h b/v_windows/v/old/thirdparty/sokol/sokol_audio.h new file mode 100644 index 0000000..faea817 --- /dev/null +++ b/v_windows/v/old/thirdparty/sokol/sokol_audio.h @@ -0,0 +1,2147 @@ +#if defined(SOKOL_IMPL) && !defined(SOKOL_AUDIO_IMPL) +#define SOKOL_AUDIO_IMPL +#endif +#ifndef SOKOL_AUDIO_INCLUDED +/* + sokol_audio.h -- cross-platform audio-streaming API + + Project URL: https://github.com/floooh/sokol + + Do this: + #define SOKOL_IMPL or + #define SOKOL_AUDIO_IMPL + before you include this file in *one* C or C++ file to create the + implementation. + + Optionally provide the following defines with your own implementations: + + SOKOL_DUMMY_BACKEND - use a dummy backend + SOKOL_ASSERT(c) - your own assert macro (default: assert(c)) + SOKOL_LOG(msg) - your own logging function (default: puts(msg)) + SOKOL_MALLOC(s) - your own malloc() implementation (default: malloc(s)) + SOKOL_FREE(p) - your own free() implementation (default: free(p)) + SOKOL_AUDIO_API_DECL- public function declaration prefix (default: extern) + SOKOL_API_DECL - same as SOKOL_AUDIO_API_DECL + SOKOL_API_IMPL - public function implementation prefix (default: -) + + SAUDIO_RING_MAX_SLOTS - max number of slots in the push-audio ring buffer (default 1024) + SAUDIO_OSX_USE_SYSTEM_HEADERS - define this to force inclusion of system headers on + macOS instead of using embedded CoreAudio declarations + + If sokol_audio.h is compiled as a DLL, define the following before + including the declaration or implementation: + + SOKOL_DLL + + On Windows, SOKOL_DLL will define SOKOL_AUDIO_API_DECL as __declspec(dllexport) + or __declspec(dllimport) as needed. + + Link with the following libraries: + + - on macOS: AudioToolbox + - on iOS: AudioToolbox, AVFoundation + - on Linux: asound + - on Android: link with OpenSLES + - on Windows with MSVC or Clang toolchain: no action needed, libs are defined in-source via pragma-comment-lib + - on Windows with MINGW/MSYS2 gcc: compile with '-mwin32' and link with -lole32 + + FEATURE OVERVIEW + ================ + You provide a mono- or stereo-stream of 32-bit float samples, which + Sokol Audio feeds into platform-specific audio backends: + + - Windows: WASAPI + - Linux: ALSA + - macOS: CoreAudio + - iOS: CoreAudio+AVAudioSession + - emscripten: WebAudio with ScriptProcessorNode + - Android: OpenSLES + + Sokol Audio will not do any buffer mixing or volume control, if you have + multiple independent input streams of sample data you need to perform the + mixing yourself before forwarding the data to Sokol Audio. + + There are two mutually exclusive ways to provide the sample data: + + 1. Callback model: You provide a callback function, which will be called + when Sokol Audio needs new samples. On all platforms except emscripten, + this function is called from a separate thread. + 2. Push model: Your code pushes small blocks of sample data from your + main loop or a thread you created. The pushed data is stored in + a ring buffer where it is pulled by the backend code when + needed. + + The callback model is preferred because it is the most direct way to + feed sample data into the audio backends and also has less moving parts + (there is no ring buffer between your code and the audio backend). + + Sometimes it is not possible to generate the audio stream directly in a + callback function running in a separate thread, for such cases Sokol Audio + provides the push-model as a convenience. + + SOKOL AUDIO, SOLOUD AND MINIAUDIO + ================================= + The WASAPI, ALSA, OpenSLES and CoreAudio backend code has been taken from the + SoLoud library (with some modifications, so any bugs in there are most + likely my fault). If you need a more fully-featured audio solution, check + out SoLoud, it's excellent: + + https://github.com/jarikomppa/soloud + + Another alternative which feature-wise is somewhere inbetween SoLoud and + sokol-audio might be MiniAudio: + + https://github.com/mackron/miniaudio + + GLOSSARY + ======== + - stream buffer: + The internal audio data buffer, usually provided by the backend API. The + size of the stream buffer defines the base latency, smaller buffers have + lower latency but may cause audio glitches. Bigger buffers reduce or + eliminate glitches, but have a higher base latency. + + - stream callback: + Optional callback function which is called by Sokol Audio when it + needs new samples. On Windows, macOS/iOS and Linux, this is called in + a separate thread, on WebAudio, this is called per-frame in the + browser thread. + + - channel: + A discrete track of audio data, currently 1-channel (mono) and + 2-channel (stereo) is supported and tested. + + - sample: + The magnitude of an audio signal on one channel at a given time. In + Sokol Audio, samples are 32-bit float numbers in the range -1.0 to + +1.0. + + - frame: + The tightly packed set of samples for all channels at a given time. + For mono 1 frame is 1 sample. For stereo, 1 frame is 2 samples. + + - packet: + In Sokol Audio, a small chunk of audio data that is moved from the + main thread to the audio streaming thread in order to decouple the + rate at which the main thread provides new audio data, and the + streaming thread consuming audio data. + + WORKING WITH SOKOL AUDIO + ======================== + First call saudio_setup() with your preferred audio playback options. + In most cases you can stick with the default values, these provide + a good balance between low-latency and glitch-free playback + on all audio backends. + + If you want to use the callback-model, you need to provide a stream + callback function either in saudio_desc.stream_cb or saudio_desc.stream_userdata_cb, + otherwise keep both function pointers zero-initialized. + + Use push model and default playback parameters: + + saudio_setup(&(saudio_desc){0}); + + Use stream callback model and default playback parameters: + + saudio_setup(&(saudio_desc){ + .stream_cb = my_stream_callback + }); + + The standard stream callback doesn't have a user data argument, if you want + that, use the alternative stream_userdata_cb and also set the user_data pointer: + + saudio_setup(&(saudio_desc){ + .stream_userdata_cb = my_stream_callback, + .user_data = &my_data + }); + + The following playback parameters can be provided through the + saudio_desc struct: + + General parameters (both for stream-callback and push-model): + + int sample_rate -- the sample rate in Hz, default: 44100 + int num_channels -- number of channels, default: 1 (mono) + int buffer_frames -- number of frames in streaming buffer, default: 2048 + + The stream callback prototype (either with or without userdata): + + void (*stream_cb)(float* buffer, int num_frames, int num_channels) + void (*stream_userdata_cb)(float* buffer, int num_frames, int num_channels, void* user_data) + Function pointer to the user-provide stream callback. + + Push-model parameters: + + int packet_frames -- number of frames in a packet, default: 128 + int num_packets -- number of packets in ring buffer, default: 64 + + The sample_rate and num_channels parameters are only hints for the audio + backend, it isn't guaranteed that those are the values used for actual + playback. + + To get the actual parameters, call the following functions after + saudio_setup(): + + int saudio_sample_rate(void) + int saudio_channels(void); + + It's unlikely that the number of channels will be different than requested, + but a different sample rate isn't uncommon. + + (NOTE: there's an yet unsolved issue when an audio backend might switch + to a different sample rate when switching output devices, for instance + plugging in a bluetooth headset, this case is currently not handled in + Sokol Audio). + + You can check if audio initialization was successful with + saudio_isvalid(). If backend initialization failed for some reason + (for instance when there's no audio device in the machine), this + will return false. Not checking for success won't do any harm, all + Sokol Audio function will silently fail when called after initialization + has failed, so apart from missing audio output, nothing bad will happen. + + Before your application exits, you should call + + saudio_shutdown(); + + This stops the audio thread (on Linux, Windows and macOS/iOS) and + properly shuts down the audio backend. + + THE STREAM CALLBACK MODEL + ========================= + To use Sokol Audio in stream-callback-mode, provide a callback function + like this in the saudio_desc struct when calling saudio_setup(): + + void stream_cb(float* buffer, int num_frames, int num_channels) { + ... + } + + Or the alternative version with a user-data argument: + + void stream_userdata_cb(float* buffer, int num_frames, int num_channels, void* user_data) { + my_data_t* my_data = (my_data_t*) user_data; + ... + } + + The job of the callback function is to fill the *buffer* with 32-bit + float sample values. + + To output silence, fill the buffer with zeros: + + void stream_cb(float* buffer, int num_frames, int num_channels) { + const int num_samples = num_frames * num_channels; + for (int i = 0; i < num_samples; i++) { + buffer[i] = 0.0f; + } + } + + For stereo output (num_channels == 2), the samples for the left + and right channel are interleaved: + + void stream_cb(float* buffer, int num_frames, int num_channels) { + assert(2 == num_channels); + for (int i = 0; i < num_frames; i++) { + buffer[2*i + 0] = ...; // left channel + buffer[2*i + 1] = ...; // right channel + } + } + + Please keep in mind that the stream callback function is running in a + separate thread, if you need to share data with the main thread you need + to take care yourself to make the access to the shared data thread-safe! + + THE PUSH MODEL + ============== + To use the push-model for providing audio data, simply don't set (keep + zero-initialized) the stream_cb field in the saudio_desc struct when + calling saudio_setup(). + + To provide sample data with the push model, call the saudio_push() + function at regular intervals (for instance once per frame). You can + call the saudio_expect() function to ask Sokol Audio how much room is + in the ring buffer, but if you provide a continuous stream of data + at the right sample rate, saudio_expect() isn't required (it's a simple + way to sync/throttle your sample generation code with the playback + rate though). + + With saudio_push() you may need to maintain your own intermediate sample + buffer, since pushing individual sample values isn't very efficient. + The following example is from the MOD player sample in + sokol-samples (https://github.com/floooh/sokol-samples): + + const int num_frames = saudio_expect(); + if (num_frames > 0) { + const int num_samples = num_frames * saudio_channels(); + read_samples(flt_buf, num_samples); + saudio_push(flt_buf, num_frames); + } + + Another option is to ignore saudio_expect(), and just push samples as they + are generated in small batches. In this case you *need* to generate the + samples at the right sample rate: + + The following example is taken from the Tiny Emulators project + (https://github.com/floooh/chips-test), this is for mono playback, + so (num_samples == num_frames): + + // tick the sound generator + if (ay38910_tick(&sys->psg)) { + // new sample is ready + sys->sample_buffer[sys->sample_pos++] = sys->psg.sample; + if (sys->sample_pos == sys->num_samples) { + // new sample packet is ready + saudio_push(sys->sample_buffer, sys->num_samples); + sys->sample_pos = 0; + } + } + + THE WEBAUDIO BACKEND + ==================== + The WebAudio backend is currently using a ScriptProcessorNode callback to + feed the sample data into WebAudio. ScriptProcessorNode has been + deprecated for a while because it is running from the main thread, with + the default initialization parameters it works 'pretty well' though. + Ultimately Sokol Audio will use Audio Worklets, but this requires a few + more things to fall into place (Audio Worklets implemented everywhere, + SharedArrayBuffers enabled again, and I need to figure out a 'low-cost' + solution in terms of implementation effort, since Audio Worklets are + a lot more complex than ScriptProcessorNode if the audio data needs to come + from the main thread). + + The WebAudio backend is automatically selected when compiling for + emscripten (__EMSCRIPTEN__ define exists). + + https://developers.google.com/web/updates/2017/12/audio-worklet + https://developers.google.com/web/updates/2018/06/audio-worklet-design-pattern + + "Blob URLs": https://www.html5rocks.com/en/tutorials/workers/basics/ + + THE COREAUDIO BACKEND + ===================== + The CoreAudio backend is selected on macOS and iOS (__APPLE__ is defined). + Since the CoreAudio API is implemented in C (not Objective-C) on macOS the + implementation part of Sokol Audio can be included into a C source file. + + However on iOS, Sokol Audio must be compiled as Objective-C due to it's + reliance on the AVAudioSession object. The iOS code path support both + being compiled with or without ARC (Automatic Reference Counting). + + For thread synchronisation, the CoreAudio backend will use the + pthread_mutex_* functions. + + The incoming floating point samples will be directly forwarded to + CoreAudio without further conversion. + + macOS and iOS applications that use Sokol Audio need to link with + the AudioToolbox framework. + + THE WASAPI BACKEND + ================== + The WASAPI backend is automatically selected when compiling on Windows + (_WIN32 is defined). + + For thread synchronisation a Win32 critical section is used. + + WASAPI may use a different size for its own streaming buffer then requested, + so the base latency may be slightly bigger. The current backend implementation + converts the incoming floating point sample values to signed 16-bit + integers. + + The required Windows system DLLs are linked with #pragma comment(lib, ...), + so you shouldn't need to add additional linker libs in the build process + (otherwise this is a bug which should be fixed in sokol_audio.h). + + THE ALSA BACKEND + ================ + The ALSA backend is automatically selected when compiling on Linux + ('linux' is defined). + + For thread synchronisation, the pthread_mutex_* functions are used. + + Samples are directly forwarded to ALSA in 32-bit float format, no + further conversion is taking place. + + You need to link with the 'asound' library, and the + header must be present (usually both are installed with some sort + of ALSA development package). + + LICENSE + ======= + + zlib/libpng license + + Copyright (c) 2018 Andre Weissflog + + This software is provided 'as-is', without any express or implied warranty. + In no event will the authors be held liable for any damages arising from the + use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software in a + product, an acknowledgment in the product documentation would be + appreciated but is not required. + + 2. Altered source versions must be plainly marked as such, and must not + be misrepresented as being the original software. + + 3. This notice may not be removed or altered from any source + distribution. +*/ +#define SOKOL_AUDIO_INCLUDED (1) +#include +#include + +#if defined(SOKOL_API_DECL) && !defined(SOKOL_AUDIO_API_DECL) +#define SOKOL_AUDIO_API_DECL SOKOL_API_DECL +#endif +#ifndef SOKOL_AUDIO_API_DECL +#if defined(_WIN32) && defined(SOKOL_DLL) && defined(SOKOL_AUDIO_IMPL) +#define SOKOL_AUDIO_API_DECL __declspec(dllexport) +#elif defined(_WIN32) && defined(SOKOL_DLL) +#define SOKOL_AUDIO_API_DECL __declspec(dllimport) +#else +#define SOKOL_AUDIO_API_DECL extern +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct saudio_desc { + int sample_rate; /* requested sample rate */ + int num_channels; /* number of channels, default: 1 (mono) */ + int buffer_frames; /* number of frames in streaming buffer */ + int packet_frames; /* number of frames in a packet */ + int num_packets; /* number of packets in packet queue */ + void (*stream_cb)(float* buffer, int num_frames, int num_channels); /* optional streaming callback (no user data) */ + void (*stream_userdata_cb)(float* buffer, int num_frames, int num_channels, void* user_data); /*... and with user data */ + void* user_data; /* optional user data argument for stream_userdata_cb */ +} saudio_desc; + +/* setup sokol-audio */ +SOKOL_AUDIO_API_DECL void saudio_setup(const saudio_desc* desc); +/* shutdown sokol-audio */ +SOKOL_AUDIO_API_DECL void saudio_shutdown(void); +/* true after setup if audio backend was successfully initialized */ +SOKOL_AUDIO_API_DECL bool saudio_isvalid(void); +/* return the saudio_desc.user_data pointer */ +SOKOL_AUDIO_API_DECL void* saudio_userdata(void); +/* return a copy of the original saudio_desc struct */ +SOKOL_AUDIO_API_DECL saudio_desc saudio_query_desc(void); +/* actual sample rate */ +SOKOL_AUDIO_API_DECL int saudio_sample_rate(void); +/* return actual backend buffer size in number of frames */ +SOKOL_AUDIO_API_DECL int saudio_buffer_frames(void); +/* actual number of channels */ +SOKOL_AUDIO_API_DECL int saudio_channels(void); +/* get current number of frames to fill packet queue */ +SOKOL_AUDIO_API_DECL int saudio_expect(void); +/* push sample frames from main thread, returns number of frames actually pushed */ +SOKOL_AUDIO_API_DECL int saudio_push(const float* frames, int num_frames); + +#ifdef __cplusplus +} /* extern "C" */ + +/* reference-based equivalents for c++ */ +inline void saudio_setup(const saudio_desc& desc) { return saudio_setup(&desc); } + +#endif +#endif // SOKOL_AUDIO_INCLUDED + +/*=== IMPLEMENTATION =========================================================*/ +#ifdef SOKOL_AUDIO_IMPL +#define SOKOL_AUDIO_IMPL_INCLUDED (1) +#include // memset, memcpy +#include // size_t + +#ifndef SOKOL_API_IMPL + #define SOKOL_API_IMPL +#endif +#ifndef SOKOL_DEBUG + #ifndef NDEBUG + #define SOKOL_DEBUG (1) + #endif +#endif +#ifndef SOKOL_ASSERT + #include + #define SOKOL_ASSERT(c) assert(c) +#endif +#ifndef SOKOL_MALLOC + #include + #define SOKOL_MALLOC(s) malloc(s) + #define SOKOL_FREE(p) free(p) +#endif +#ifndef SOKOL_LOG + #ifdef SOKOL_DEBUG + #include + #define SOKOL_LOG(s) { SOKOL_ASSERT(s); puts(s); } + #else + #define SOKOL_LOG(s) + #endif +#endif + +#ifndef _SOKOL_PRIVATE + #if defined(__GNUC__) || defined(__clang__) + #define _SOKOL_PRIVATE __attribute__((unused)) static + #else + #define _SOKOL_PRIVATE static + #endif +#endif + +#ifndef _SOKOL_UNUSED + #define _SOKOL_UNUSED(x) (void)(x) +#endif + +// platform detection defines +#if defined(SOKOL_DUMMY_BACKEND) + // nothing +#elif defined(__APPLE__) + #define _SAUDIO_APPLE (1) + #include + #if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE + #define _SAUDIO_IOS (1) + #else + #define _SAUDIO_MACOS (1) + #endif +#elif defined(__EMSCRIPTEN__) + #define _SAUDIO_EMSCRIPTEN +#elif defined(_WIN32) + #define _SAUDIO_WINDOWS (1) + #include + #if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)) + #define _SAUDIO_UWP (1) + #else + #define _SAUDIO_WIN32 (1) + #endif +#elif defined(__ANDROID__) + #define _SAUDIO_ANDROID (1) +#elif defined(__linux__) || defined(__unix__) + #define _SAUDIO_LINUX (1) +#else +#error "sokol_audio.h: Unknown platform" +#endif + +// platform-specific headers and definitions +#if defined(SOKOL_DUMMY_BACKEND) + #define _SAUDIO_NOTHREADS (1) +#elif defined(_SAUDIO_WINDOWS) + #define _SAUDIO_WINTHREADS (1) + #ifndef WIN32_LEAN_AND_MEAN + #define WIN32_LEAN_AND_MEAN + #endif + #ifndef NOMINMAX + #define NOMINMAX + #endif + #include + #include + #if defined(_SAUDIO_UWP) + #pragma comment (lib, "WindowsApp") + #else + #pragma comment (lib, "kernel32") + #pragma comment (lib, "ole32") + #endif + #ifndef CINTERFACE + #define CINTERFACE + #endif + #ifndef COBJMACROS + #define COBJMACROS + #endif + #ifndef CONST_VTABLE + #define CONST_VTABLE + #endif + #include + #include + static const IID _saudio_IID_IAudioClient = { 0x1cb9ad4c, 0xdbfa, 0x4c32, { 0xb1, 0x78, 0xc2, 0xf5, 0x68, 0xa7, 0x03, 0xb2 } }; + static const IID _saudio_IID_IMMDeviceEnumerator = { 0xa95664d2, 0x9614, 0x4f35, { 0xa7, 0x46, 0xde, 0x8d, 0xb6, 0x36, 0x17, 0xe6 } }; + static const CLSID _saudio_CLSID_IMMDeviceEnumerator = { 0xbcde0395, 0xe52f, 0x467c, { 0x8e, 0x3d, 0xc4, 0x57, 0x92, 0x91, 0x69, 0x2e } }; + static const IID _saudio_IID_IAudioRenderClient = { 0xf294acfc, 0x3146, 0x4483,{ 0xa7, 0xbf, 0xad, 0xdc, 0xa7, 0xc2, 0x60, 0xe2 } }; + static const IID _saudio_IID_Devinterface_Audio_Render = { 0xe6327cad, 0xdcec, 0x4949, {0xae, 0x8a, 0x99, 0x1e, 0x97, 0x6a, 0x79, 0xd2 } }; + static const IID _saudio_IID_IActivateAudioInterface_Completion_Handler = { 0x94ea2b94, 0xe9cc, 0x49e0, {0xc0, 0xff, 0xee, 0x64, 0xca, 0x8f, 0x5b, 0x90} }; + #if defined(__cplusplus) + #define _SOKOL_AUDIO_WIN32COM_ID(x) (x) + #else + #define _SOKOL_AUDIO_WIN32COM_ID(x) (&x) + #endif + /* fix for Visual Studio 2015 SDKs */ + #ifndef AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM + #define AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM 0x80000000 + #endif + #ifndef AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY + #define AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY 0x08000000 + #endif + #ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable:4505) /* unreferenced local function has been removed */ + #endif +#elif defined(_SAUDIO_APPLE) + #define _SAUDIO_PTHREADS (1) + #include + #if defined(_SAUDIO_IOS) + // always use system headers on iOS (for now at least) + #if !defined(SAUDIO_OSX_USE_SYSTEM_HEADERS) + #define SAUDIO_OSX_USE_SYSTEM_HEADERS (1) + #endif + #if !defined(__cplusplus) + #if __has_feature(objc_arc) && !__has_feature(objc_arc_fields) + #error "sokol_audio.h on iOS requires __has_feature(objc_arc_field) if ARC is enabled (use a more recent compiler version)" + #endif + #endif + #include + #include + #else + #if defined(SAUDIO_OSX_USE_SYSTEM_HEADERS) + #include + #endif + #endif +#elif defined(_SAUDIO_ANDROID) + #define _SAUDIO_PTHREADS (1) + #include + #include "SLES/OpenSLES_Android.h" +#elif defined(_SAUDIO_LINUX) + #define _SAUDIO_PTHREADS (1) + #include + #define ALSA_PCM_NEW_HW_PARAMS_API + #include +#elif defined(__EMSCRIPTEN__) + #define _SAUDIO_NOTHREADS (1) + #include +#endif + +#define _saudio_def(val, def) (((val) == 0) ? (def) : (val)) +#define _saudio_def_flt(val, def) (((val) == 0.0f) ? (def) : (val)) + +#define _SAUDIO_DEFAULT_SAMPLE_RATE (44100) +#define _SAUDIO_DEFAULT_BUFFER_FRAMES (2048) +#define _SAUDIO_DEFAULT_PACKET_FRAMES (128) +#define _SAUDIO_DEFAULT_NUM_PACKETS ((_SAUDIO_DEFAULT_BUFFER_FRAMES/_SAUDIO_DEFAULT_PACKET_FRAMES)*4) + +#ifndef SAUDIO_RING_MAX_SLOTS +#define SAUDIO_RING_MAX_SLOTS (1024) +#endif + +/*=== MUTEX WRAPPER DECLARATIONS =============================================*/ +#if defined(_SAUDIO_PTHREADS) + +typedef struct { + pthread_mutex_t mutex; +} _saudio_mutex_t; + +#elif defined(_SAUDIO_WINTHREADS) + +typedef struct { + CRITICAL_SECTION critsec; +} _saudio_mutex_t; + +#elif defined(_SAUDIO_NOTHREADS) + +typedef struct { + int dummy_mutex; +} _saudio_mutex_t; + +#endif + +/*=== DUMMY BACKEND DECLARATIONS =============================================*/ +#if defined(SOKOL_DUMMY_BACKEND) + +typedef struct { + int dummy_backend; +} _saudio_backend_t; + +/*=== COREAUDIO BACKEND DECLARATIONS =========================================*/ +#elif defined(_SAUDIO_APPLE) + +#if defined(SAUDIO_OSX_USE_SYSTEM_HEADERS) + +typedef AudioQueueRef _saudio_AudioQueueRef; +typedef AudioQueueBufferRef _saudio_AudioQueueBufferRef; +typedef AudioStreamBasicDescription _saudio_AudioStreamBasicDescription; +typedef OSStatus _saudio_OSStatus; + +#define _saudio_kAudioFormatLinearPCM (kAudioFormatLinearPCM) +#define _saudio_kLinearPCMFormatFlagIsFloat (kLinearPCMFormatFlagIsFloat) +#define _saudio_kAudioFormatFlagIsPacked (kAudioFormatFlagIsPacked) + +#else + +// embedded AudioToolbox declarations +typedef uint32_t _saudio_AudioFormatID; +typedef uint32_t _saudio_AudioFormatFlags; +typedef int32_t _saudio_OSStatus; +typedef uint32_t _saudio_SMPTETimeType; +typedef uint32_t _saudio_SMPTETimeFlags; +typedef uint32_t _saudio_AudioTimeStampFlags; +typedef void* _saudio_CFRunLoopRef; +typedef void* _saudio_CFStringRef; +typedef void* _saudio_AudioQueueRef; + +#define _saudio_kAudioFormatLinearPCM ('lpcm') +#define _saudio_kLinearPCMFormatFlagIsFloat (1U << 0) +#define _saudio_kAudioFormatFlagIsPacked (1U << 3) + +typedef struct _saudio_AudioStreamBasicDescription { + double mSampleRate; + _saudio_AudioFormatID mFormatID; + _saudio_AudioFormatFlags mFormatFlags; + uint32_t mBytesPerPacket; + uint32_t mFramesPerPacket; + uint32_t mBytesPerFrame; + uint32_t mChannelsPerFrame; + uint32_t mBitsPerChannel; + uint32_t mReserved; +} _saudio_AudioStreamBasicDescription; + +typedef struct _saudio_AudioStreamPacketDescription { + int64_t mStartOffset; + uint32_t mVariableFramesInPacket; + uint32_t mDataByteSize; +} _saudio_AudioStreamPacketDescription; + +typedef struct _saudio_SMPTETime { + int16_t mSubframes; + int16_t mSubframeDivisor; + uint32_t mCounter; + _saudio_SMPTETimeType mType; + _saudio_SMPTETimeFlags mFlags; + int16_t mHours; + int16_t mMinutes; + int16_t mSeconds; + int16_t mFrames; +} _saudio_SMPTETime; + +typedef struct _saudio_AudioTimeStamp { + double mSampleTime; + uint64_t mHostTime; + double mRateScalar; + uint64_t mWordClockTime; + _saudio_SMPTETime mSMPTETime; + _saudio_AudioTimeStampFlags mFlags; + uint32_t mReserved; +} _saudio_AudioTimeStamp; + +typedef struct _saudio_AudioQueueBuffer { + const uint32_t mAudioDataBytesCapacity; + void* const mAudioData; + uint32_t mAudioDataByteSize; + void * mUserData; + const uint32_t mPacketDescriptionCapacity; + _saudio_AudioStreamPacketDescription* const mPacketDescriptions; + uint32_t mPacketDescriptionCount; +} _saudio_AudioQueueBuffer; +typedef _saudio_AudioQueueBuffer* _saudio_AudioQueueBufferRef; + +typedef void (*_saudio_AudioQueueOutputCallback)(void* user_data, _saudio_AudioQueueRef inAQ, _saudio_AudioQueueBufferRef inBuffer); + +extern _saudio_OSStatus AudioQueueNewOutput(const _saudio_AudioStreamBasicDescription* inFormat, _saudio_AudioQueueOutputCallback inCallbackProc, void* inUserData, _saudio_CFRunLoopRef inCallbackRunLoop, _saudio_CFStringRef inCallbackRunLoopMode, uint32_t inFlags, _saudio_AudioQueueRef* outAQ); +extern _saudio_OSStatus AudioQueueDispose(_saudio_AudioQueueRef inAQ, bool inImmediate); +extern _saudio_OSStatus AudioQueueAllocateBuffer(_saudio_AudioQueueRef inAQ, uint32_t inBufferByteSize, _saudio_AudioQueueBufferRef* outBuffer); +extern _saudio_OSStatus AudioQueueEnqueueBuffer(_saudio_AudioQueueRef inAQ, _saudio_AudioQueueBufferRef inBuffer, uint32_t inNumPacketDescs, const _saudio_AudioStreamPacketDescription* inPacketDescs); +extern _saudio_OSStatus AudioQueueStart(_saudio_AudioQueueRef inAQ, const _saudio_AudioTimeStamp * inStartTime); +extern _saudio_OSStatus AudioQueueStop(_saudio_AudioQueueRef inAQ, bool inImmediate); +#endif // SAUDIO_OSX_USE_SYSTEM_HEADERS + +typedef struct { + _saudio_AudioQueueRef ca_audio_queue; + #if defined(_SAUDIO_IOS) + id ca_interruption_handler; + #endif +} _saudio_backend_t; + +/*=== ALSA BACKEND DECLARATIONS ==============================================*/ +#elif defined(_SAUDIO_LINUX) + +typedef struct { + snd_pcm_t* device; + float* buffer; + int buffer_byte_size; + int buffer_frames; + pthread_t thread; + bool thread_stop; +} _saudio_backend_t; + +/*=== OpenSLES BACKEND DECLARATIONS ==============================================*/ +#elif defined(_SAUDIO_ANDROID) + +#define SAUDIO_NUM_BUFFERS 2 + +typedef struct { + pthread_mutex_t mutex; + pthread_cond_t cond; + int count; +} _saudio_semaphore_t; + +typedef struct { + SLObjectItf engine_obj; + SLEngineItf engine; + SLObjectItf output_mix_obj; + SLVolumeItf output_mix_vol; + SLDataLocator_OutputMix out_locator; + SLDataSink dst_data_sink; + SLObjectItf player_obj; + SLPlayItf player; + SLVolumeItf player_vol; + SLAndroidSimpleBufferQueueItf player_buffer_queue; + + int16_t* output_buffers[SAUDIO_NUM_BUFFERS]; + float* src_buffer; + int active_buffer; + _saudio_semaphore_t buffer_sem; + pthread_t thread; + volatile int thread_stop; + SLDataLocator_AndroidSimpleBufferQueue in_locator; +} _saudio_backend_t; + +/*=== WASAPI BACKEND DECLARATIONS ============================================*/ +#elif defined(_SAUDIO_WINDOWS) + +typedef struct { + HANDLE thread_handle; + HANDLE buffer_end_event; + bool stop; + UINT32 dst_buffer_frames; + int src_buffer_frames; + int src_buffer_byte_size; + int src_buffer_pos; + float* src_buffer; +} _saudio_wasapi_thread_data_t; + +typedef struct { + #if defined(_SAUDIO_UWP) + LPOLESTR interface_activation_audio_interface_uid_string; + IActivateAudioInterfaceAsyncOperation* interface_activation_operation; + BOOL interface_activation_success; + HANDLE interface_activation_mutex; + #else + IMMDeviceEnumerator* device_enumerator; + IMMDevice* device; + #endif + IAudioClient* audio_client; + IAudioRenderClient* render_client; + int si16_bytes_per_frame; + _saudio_wasapi_thread_data_t thread; +} _saudio_backend_t; + +/*=== WEBAUDIO BACKEND DECLARATIONS ==========================================*/ +#elif defined(_SAUDIO_EMSCRIPTEN) + +typedef struct { + uint8_t* buffer; +} _saudio_backend_t; + +#else +#error "unknown platform" +#endif + +/*=== GENERAL DECLARATIONS ===================================================*/ + +/* a ringbuffer structure */ +typedef struct { + int head; // next slot to write to + int tail; // next slot to read from + int num; // number of slots in queue + int queue[SAUDIO_RING_MAX_SLOTS]; +} _saudio_ring_t; + +/* a packet FIFO structure */ +typedef struct { + bool valid; + int packet_size; /* size of a single packets in bytes(!) */ + int num_packets; /* number of packet in fifo */ + uint8_t* base_ptr; /* packet memory chunk base pointer (dynamically allocated) */ + int cur_packet; /* current write-packet */ + int cur_offset; /* current byte-offset into current write packet */ + _saudio_mutex_t mutex; /* mutex for thread-safe access */ + _saudio_ring_t read_queue; /* buffers with data, ready to be streamed */ + _saudio_ring_t write_queue; /* empty buffers, ready to be pushed to */ +} _saudio_fifo_t; + +/* sokol-audio state */ +typedef struct { + bool valid; + void (*stream_cb)(float* buffer, int num_frames, int num_channels); + void (*stream_userdata_cb)(float* buffer, int num_frames, int num_channels, void* user_data); + void* user_data; + int sample_rate; /* sample rate */ + int buffer_frames; /* number of frames in streaming buffer */ + int bytes_per_frame; /* filled by backend */ + int packet_frames; /* number of frames in a packet */ + int num_packets; /* number of packets in packet queue */ + int num_channels; /* actual number of channels */ + saudio_desc desc; + _saudio_fifo_t fifo; + _saudio_backend_t backend; +} _saudio_state_t; + +static _saudio_state_t _saudio; + +_SOKOL_PRIVATE bool _saudio_has_callback(void) { + return (_saudio.stream_cb || _saudio.stream_userdata_cb); +} + +_SOKOL_PRIVATE void _saudio_stream_callback(float* buffer, int num_frames, int num_channels) { + if (_saudio.stream_cb) { + _saudio.stream_cb(buffer, num_frames, num_channels); + } + else if (_saudio.stream_userdata_cb) { + _saudio.stream_userdata_cb(buffer, num_frames, num_channels, _saudio.user_data); + } +} + +/*=== MUTEX IMPLEMENTATION ===================================================*/ +#if defined(_SAUDIO_NOTHREADS) + +_SOKOL_PRIVATE void _saudio_mutex_init(_saudio_mutex_t* m) { (void)m; } +_SOKOL_PRIVATE void _saudio_mutex_destroy(_saudio_mutex_t* m) { (void)m; } +_SOKOL_PRIVATE void _saudio_mutex_lock(_saudio_mutex_t* m) { (void)m; } +_SOKOL_PRIVATE void _saudio_mutex_unlock(_saudio_mutex_t* m) { (void)m; } + +#elif defined(_SAUDIO_PTHREADS) + +_SOKOL_PRIVATE void _saudio_mutex_init(_saudio_mutex_t* m) { + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutex_init(&m->mutex, &attr); +} + +_SOKOL_PRIVATE void _saudio_mutex_destroy(_saudio_mutex_t* m) { + pthread_mutex_destroy(&m->mutex); +} + +_SOKOL_PRIVATE void _saudio_mutex_lock(_saudio_mutex_t* m) { + pthread_mutex_lock(&m->mutex); +} + +_SOKOL_PRIVATE void _saudio_mutex_unlock(_saudio_mutex_t* m) { + pthread_mutex_unlock(&m->mutex); +} + +#elif defined(_SAUDIO_WINTHREADS) + +_SOKOL_PRIVATE void _saudio_mutex_init(_saudio_mutex_t* m) { + InitializeCriticalSection(&m->critsec); +} + +_SOKOL_PRIVATE void _saudio_mutex_destroy(_saudio_mutex_t* m) { + DeleteCriticalSection(&m->critsec); +} + +_SOKOL_PRIVATE void _saudio_mutex_lock(_saudio_mutex_t* m) { + EnterCriticalSection(&m->critsec); +} + +_SOKOL_PRIVATE void _saudio_mutex_unlock(_saudio_mutex_t* m) { + LeaveCriticalSection(&m->critsec); +} +#else +#error "unknown platform!" +#endif + +/*=== RING-BUFFER QUEUE IMPLEMENTATION =======================================*/ +_SOKOL_PRIVATE int _saudio_ring_idx(_saudio_ring_t* ring, int i) { + return (i % ring->num); +} + +_SOKOL_PRIVATE void _saudio_ring_init(_saudio_ring_t* ring, int num_slots) { + SOKOL_ASSERT((num_slots + 1) <= SAUDIO_RING_MAX_SLOTS); + ring->head = 0; + ring->tail = 0; + /* one slot reserved to detect 'full' vs 'empty' */ + ring->num = num_slots + 1; +} + +_SOKOL_PRIVATE bool _saudio_ring_full(_saudio_ring_t* ring) { + return _saudio_ring_idx(ring, ring->head + 1) == ring->tail; +} + +_SOKOL_PRIVATE bool _saudio_ring_empty(_saudio_ring_t* ring) { + return ring->head == ring->tail; +} + +_SOKOL_PRIVATE int _saudio_ring_count(_saudio_ring_t* ring) { + int count; + if (ring->head >= ring->tail) { + count = ring->head - ring->tail; + } + else { + count = (ring->head + ring->num) - ring->tail; + } + SOKOL_ASSERT(count < ring->num); + return count; +} + +_SOKOL_PRIVATE void _saudio_ring_enqueue(_saudio_ring_t* ring, int val) { + SOKOL_ASSERT(!_saudio_ring_full(ring)); + ring->queue[ring->head] = val; + ring->head = _saudio_ring_idx(ring, ring->head + 1); +} + +_SOKOL_PRIVATE int _saudio_ring_dequeue(_saudio_ring_t* ring) { + SOKOL_ASSERT(!_saudio_ring_empty(ring)); + int val = ring->queue[ring->tail]; + ring->tail = _saudio_ring_idx(ring, ring->tail + 1); + return val; +} + +/*--- a packet fifo for queueing audio data from main thread ----------------*/ +_SOKOL_PRIVATE void _saudio_fifo_init_mutex(_saudio_fifo_t* fifo) { + /* this must be called before initializing both the backend and the fifo itself! */ + _saudio_mutex_init(&fifo->mutex); +} + +_SOKOL_PRIVATE void _saudio_fifo_init(_saudio_fifo_t* fifo, int packet_size, int num_packets) { + /* NOTE: there's a chicken-egg situation during the init phase where the + streaming thread must be started before the fifo is actually initialized, + thus the fifo init must already be protected from access by the fifo_read() func. + */ + _saudio_mutex_lock(&fifo->mutex); + SOKOL_ASSERT((packet_size > 0) && (num_packets > 0)); + fifo->packet_size = packet_size; + fifo->num_packets = num_packets; + fifo->base_ptr = (uint8_t*) SOKOL_MALLOC((size_t)(packet_size * num_packets)); + SOKOL_ASSERT(fifo->base_ptr); + fifo->cur_packet = -1; + fifo->cur_offset = 0; + _saudio_ring_init(&fifo->read_queue, num_packets); + _saudio_ring_init(&fifo->write_queue, num_packets); + for (int i = 0; i < num_packets; i++) { + _saudio_ring_enqueue(&fifo->write_queue, i); + } + SOKOL_ASSERT(_saudio_ring_full(&fifo->write_queue)); + SOKOL_ASSERT(_saudio_ring_count(&fifo->write_queue) == num_packets); + SOKOL_ASSERT(_saudio_ring_empty(&fifo->read_queue)); + SOKOL_ASSERT(_saudio_ring_count(&fifo->read_queue) == 0); + fifo->valid = true; + _saudio_mutex_unlock(&fifo->mutex); +} + +_SOKOL_PRIVATE void _saudio_fifo_shutdown(_saudio_fifo_t* fifo) { + SOKOL_ASSERT(fifo->base_ptr); + SOKOL_FREE(fifo->base_ptr); + fifo->base_ptr = 0; + fifo->valid = false; + _saudio_mutex_destroy(&fifo->mutex); +} + +_SOKOL_PRIVATE int _saudio_fifo_writable_bytes(_saudio_fifo_t* fifo) { + _saudio_mutex_lock(&fifo->mutex); + int num_bytes = (_saudio_ring_count(&fifo->write_queue) * fifo->packet_size); + if (fifo->cur_packet != -1) { + num_bytes += fifo->packet_size - fifo->cur_offset; + } + _saudio_mutex_unlock(&fifo->mutex); + SOKOL_ASSERT((num_bytes >= 0) && (num_bytes <= (fifo->num_packets * fifo->packet_size))); + return num_bytes; +} + +/* write new data to the write queue, this is called from main thread */ +_SOKOL_PRIVATE int _saudio_fifo_write(_saudio_fifo_t* fifo, const uint8_t* ptr, int num_bytes) { + /* returns the number of bytes written, this will be smaller then requested + if the write queue runs full + */ + int all_to_copy = num_bytes; + while (all_to_copy > 0) { + /* need to grab a new packet? */ + if (fifo->cur_packet == -1) { + _saudio_mutex_lock(&fifo->mutex); + if (!_saudio_ring_empty(&fifo->write_queue)) { + fifo->cur_packet = _saudio_ring_dequeue(&fifo->write_queue); + } + _saudio_mutex_unlock(&fifo->mutex); + SOKOL_ASSERT(fifo->cur_offset == 0); + } + /* append data to current write packet */ + if (fifo->cur_packet != -1) { + int to_copy = all_to_copy; + const int max_copy = fifo->packet_size - fifo->cur_offset; + if (to_copy > max_copy) { + to_copy = max_copy; + } + uint8_t* dst = fifo->base_ptr + fifo->cur_packet * fifo->packet_size + fifo->cur_offset; + memcpy(dst, ptr, (size_t)to_copy); + ptr += to_copy; + fifo->cur_offset += to_copy; + all_to_copy -= to_copy; + SOKOL_ASSERT(fifo->cur_offset <= fifo->packet_size); + SOKOL_ASSERT(all_to_copy >= 0); + } + else { + /* early out if we're starving */ + int bytes_copied = num_bytes - all_to_copy; + SOKOL_ASSERT((bytes_copied >= 0) && (bytes_copied < num_bytes)); + return bytes_copied; + } + /* if write packet is full, push to read queue */ + if (fifo->cur_offset == fifo->packet_size) { + _saudio_mutex_lock(&fifo->mutex); + _saudio_ring_enqueue(&fifo->read_queue, fifo->cur_packet); + _saudio_mutex_unlock(&fifo->mutex); + fifo->cur_packet = -1; + fifo->cur_offset = 0; + } + } + SOKOL_ASSERT(all_to_copy == 0); + return num_bytes; +} + +/* read queued data, this is called form the stream callback (maybe separate thread) */ +_SOKOL_PRIVATE int _saudio_fifo_read(_saudio_fifo_t* fifo, uint8_t* ptr, int num_bytes) { + /* NOTE: fifo_read might be called before the fifo is properly initialized */ + _saudio_mutex_lock(&fifo->mutex); + int num_bytes_copied = 0; + if (fifo->valid) { + SOKOL_ASSERT(0 == (num_bytes % fifo->packet_size)); + SOKOL_ASSERT(num_bytes <= (fifo->packet_size * fifo->num_packets)); + const int num_packets_needed = num_bytes / fifo->packet_size; + uint8_t* dst = ptr; + /* either pull a full buffer worth of data, or nothing */ + if (_saudio_ring_count(&fifo->read_queue) >= num_packets_needed) { + for (int i = 0; i < num_packets_needed; i++) { + int packet_index = _saudio_ring_dequeue(&fifo->read_queue); + _saudio_ring_enqueue(&fifo->write_queue, packet_index); + const uint8_t* src = fifo->base_ptr + packet_index * fifo->packet_size; + memcpy(dst, src, (size_t)fifo->packet_size); + dst += fifo->packet_size; + num_bytes_copied += fifo->packet_size; + } + SOKOL_ASSERT(num_bytes == num_bytes_copied); + } + } + _saudio_mutex_unlock(&fifo->mutex); + return num_bytes_copied; +} + +/*=== DUMMY BACKEND IMPLEMENTATION ===========================================*/ +#if defined(SOKOL_DUMMY_BACKEND) +_SOKOL_PRIVATE bool _saudio_backend_init(void) { + _saudio.bytes_per_frame = _saudio.num_channels * (int)sizeof(float); + return true; +}; +_SOKOL_PRIVATE void _saudio_backend_shutdown(void) { }; + +/*=== COREAUDIO BACKEND IMPLEMENTATION =======================================*/ +#elif defined(_SAUDIO_APPLE) + +#if defined(_SAUDIO_IOS) +#if __has_feature(objc_arc) +#define _SAUDIO_OBJC_RELEASE(obj) { obj = nil; } +#else +#define _SAUDIO_OBJC_RELEASE(obj) { [obj release]; obj = nil; } +#endif + +@interface _saudio_interruption_handler : NSObject { } +@end + +@implementation _saudio_interruption_handler +-(id)init { + self = [super init]; + AVAudioSession* session = [AVAudioSession sharedInstance]; + [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(handle_interruption:) name:AVAudioSessionInterruptionNotification object:session]; + return self; +} + +-(void)dealloc { + [self remove_handler]; + #if !__has_feature(objc_arc) + [super dealloc]; + #endif +} + +-(void)remove_handler { + [[NSNotificationCenter defaultCenter] removeObserver:self name:@"AVAudioSessionInterruptionNotification" object:nil]; +} + +-(void)handle_interruption:(NSNotification*)notification { + AVAudioSession* session = [AVAudioSession sharedInstance]; + SOKOL_ASSERT(session); + NSDictionary* dict = notification.userInfo; + SOKOL_ASSERT(dict); + NSInteger type = [[dict valueForKey:AVAudioSessionInterruptionTypeKey] integerValue]; + switch (type) { + case AVAudioSessionInterruptionTypeBegan: + AudioQueuePause(_saudio.backend.ca_audio_queue); + [session setActive:false error:nil]; + break; + case AVAudioSessionInterruptionTypeEnded: + [session setActive:true error:nil]; + AudioQueueStart(_saudio.backend.ca_audio_queue, NULL); + break; + default: + break; + } +} +@end +#endif // _SAUDIO_IOS + +/* NOTE: the buffer data callback is called on a separate thread! */ +_SOKOL_PRIVATE void _saudio_coreaudio_callback(void* user_data, _saudio_AudioQueueRef queue, _saudio_AudioQueueBufferRef buffer) { + _SOKOL_UNUSED(user_data); + if (_saudio_has_callback()) { + const int num_frames = (int)buffer->mAudioDataByteSize / _saudio.bytes_per_frame; + const int num_channels = _saudio.num_channels; + _saudio_stream_callback((float*)buffer->mAudioData, num_frames, num_channels); + } + else { + uint8_t* ptr = (uint8_t*)buffer->mAudioData; + int num_bytes = (int) buffer->mAudioDataByteSize; + if (0 == _saudio_fifo_read(&_saudio.fifo, ptr, num_bytes)) { + /* not enough read data available, fill the entire buffer with silence */ + memset(ptr, 0, (size_t)num_bytes); + } + } + AudioQueueEnqueueBuffer(queue, buffer, 0, NULL); +} + +_SOKOL_PRIVATE bool _saudio_backend_init(void) { + SOKOL_ASSERT(0 == _saudio.backend.ca_audio_queue); + + #if defined(_SAUDIO_IOS) + /* activate audio session */ + AVAudioSession* session = [AVAudioSession sharedInstance]; + SOKOL_ASSERT(session != nil); + [session setCategory: AVAudioSessionCategoryPlayback withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker error:nil]; + [session setActive:true error:nil]; + + /* create interruption handler */ + _saudio.backend.ca_interruption_handler = [[_saudio_interruption_handler alloc] init]; + #endif // _SAUDIO_IOS + + /* create an audio queue with fp32 samples */ + _saudio_AudioStreamBasicDescription fmt; + memset(&fmt, 0, sizeof(fmt)); + fmt.mSampleRate = (double) _saudio.sample_rate; + fmt.mFormatID = _saudio_kAudioFormatLinearPCM; + fmt.mFormatFlags = _saudio_kLinearPCMFormatFlagIsFloat | _saudio_kAudioFormatFlagIsPacked; + fmt.mFramesPerPacket = 1; + fmt.mChannelsPerFrame = (uint32_t) _saudio.num_channels; + fmt.mBytesPerFrame = (uint32_t)sizeof(float) * (uint32_t)_saudio.num_channels; + fmt.mBytesPerPacket = fmt.mBytesPerFrame; + fmt.mBitsPerChannel = 32; + _saudio_OSStatus res = AudioQueueNewOutput(&fmt, _saudio_coreaudio_callback, 0, NULL, NULL, 0, &_saudio.backend.ca_audio_queue); + SOKOL_ASSERT((res == 0) && _saudio.backend.ca_audio_queue); + + /* create 2 audio buffers */ + for (int i = 0; i < 2; i++) { + _saudio_AudioQueueBufferRef buf = NULL; + const uint32_t buf_byte_size = (uint32_t)_saudio.buffer_frames * fmt.mBytesPerFrame; + res = AudioQueueAllocateBuffer(_saudio.backend.ca_audio_queue, buf_byte_size, &buf); + SOKOL_ASSERT((res == 0) && buf); + buf->mAudioDataByteSize = buf_byte_size; + memset(buf->mAudioData, 0, buf->mAudioDataByteSize); + AudioQueueEnqueueBuffer(_saudio.backend.ca_audio_queue, buf, 0, NULL); + } + + /* init or modify actual playback parameters */ + _saudio.bytes_per_frame = (int)fmt.mBytesPerFrame; + + /* ...and start playback */ + res = AudioQueueStart(_saudio.backend.ca_audio_queue, NULL); + SOKOL_ASSERT(0 == res); + + return true; +} + +_SOKOL_PRIVATE void _saudio_backend_shutdown(void) { + AudioQueueStop(_saudio.backend.ca_audio_queue, true); + AudioQueueDispose(_saudio.backend.ca_audio_queue, false); + _saudio.backend.ca_audio_queue = NULL; + #if defined(_SAUDIO_IOS) + /* remove interruption handler */ + if (_saudio.backend.ca_interruption_handler != nil) { + [_saudio.backend.ca_interruption_handler remove_handler]; + _SAUDIO_OBJC_RELEASE(_saudio.backend.ca_interruption_handler); + } + /* deactivate audio session */ + AVAudioSession* session = [AVAudioSession sharedInstance]; + SOKOL_ASSERT(session); + [session setActive:false error:nil];; + #endif // _SAUDIO_IOS +} + +/*=== ALSA BACKEND IMPLEMENTATION ============================================*/ +#elif defined(_SAUDIO_LINUX) + +/* the streaming callback runs in a separate thread */ +_SOKOL_PRIVATE void* _saudio_alsa_cb(void* param) { + _SOKOL_UNUSED(param); + while (!_saudio.backend.thread_stop) { + /* snd_pcm_writei() will be blocking until it needs data */ + int write_res = snd_pcm_writei(_saudio.backend.device, _saudio.backend.buffer, (snd_pcm_uframes_t)_saudio.backend.buffer_frames); + if (write_res < 0) { + /* underrun occurred */ + snd_pcm_prepare(_saudio.backend.device); + } + else { + /* fill the streaming buffer with new data */ + if (_saudio_has_callback()) { + _saudio_stream_callback(_saudio.backend.buffer, _saudio.backend.buffer_frames, _saudio.num_channels); + } + else { + if (0 == _saudio_fifo_read(&_saudio.fifo, (uint8_t*)_saudio.backend.buffer, _saudio.backend.buffer_byte_size)) { + /* not enough read data available, fill the entire buffer with silence */ + memset(_saudio.backend.buffer, 0, (size_t)_saudio.backend.buffer_byte_size); + } + } + } + } + return 0; +} + +_SOKOL_PRIVATE bool _saudio_backend_init(void) { + int dir; uint32_t rate; + int rc = snd_pcm_open(&_saudio.backend.device, "default", SND_PCM_STREAM_PLAYBACK, 0); + if (rc < 0) { + SOKOL_LOG("sokol_audio.h: snd_pcm_open() failed"); + return false; + } + + /* configuration works by restricting the 'configuration space' step + by step, we require all parameters except the sample rate to + match perfectly + */ + snd_pcm_hw_params_t* params = 0; + snd_pcm_hw_params_alloca(¶ms); + snd_pcm_hw_params_any(_saudio.backend.device, params); + snd_pcm_hw_params_set_access(_saudio.backend.device, params, SND_PCM_ACCESS_RW_INTERLEAVED); + if (0 > snd_pcm_hw_params_set_format(_saudio.backend.device, params, SND_PCM_FORMAT_FLOAT_LE)) { + SOKOL_LOG("sokol_audio.h: float samples not supported"); + goto error; + } + if (0 > snd_pcm_hw_params_set_buffer_size(_saudio.backend.device, params, (snd_pcm_uframes_t)_saudio.buffer_frames)) { + SOKOL_LOG("sokol_audio.h: requested buffer size not supported"); + goto error; + } + if (0 > snd_pcm_hw_params_set_channels(_saudio.backend.device, params, (uint32_t)_saudio.num_channels)) { + SOKOL_LOG("sokol_audio.h: requested channel count not supported"); + goto error; + } + /* let ALSA pick a nearby sampling rate */ + rate = (uint32_t) _saudio.sample_rate; + dir = 0; + if (0 > snd_pcm_hw_params_set_rate_near(_saudio.backend.device, params, &rate, &dir)) { + SOKOL_LOG("sokol_audio.h: snd_pcm_hw_params_set_rate_near() failed"); + goto error; + } + if (0 > snd_pcm_hw_params(_saudio.backend.device, params)) { + SOKOL_LOG("sokol_audio.h: snd_pcm_hw_params() failed"); + goto error; + } + + /* read back actual sample rate and channels */ + _saudio.sample_rate = (int)rate; + _saudio.bytes_per_frame = _saudio.num_channels * (int)sizeof(float); + + /* allocate the streaming buffer */ + _saudio.backend.buffer_byte_size = _saudio.buffer_frames * _saudio.bytes_per_frame; + _saudio.backend.buffer_frames = _saudio.buffer_frames; + _saudio.backend.buffer = (float*) SOKOL_MALLOC((size_t)_saudio.backend.buffer_byte_size); + memset(_saudio.backend.buffer, 0, (size_t)_saudio.backend.buffer_byte_size); + + /* create the buffer-streaming start thread */ + if (0 != pthread_create(&_saudio.backend.thread, 0, _saudio_alsa_cb, 0)) { + SOKOL_LOG("sokol_audio.h: pthread_create() failed"); + goto error; + } + + return true; +error: + if (_saudio.backend.device) { + snd_pcm_close(_saudio.backend.device); + _saudio.backend.device = 0; + } + return false; +}; + +_SOKOL_PRIVATE void _saudio_backend_shutdown(void) { + SOKOL_ASSERT(_saudio.backend.device); + _saudio.backend.thread_stop = true; + pthread_join(_saudio.backend.thread, 0); + snd_pcm_drain(_saudio.backend.device); + snd_pcm_close(_saudio.backend.device); + SOKOL_FREE(_saudio.backend.buffer); +}; + +/*=== WASAPI BACKEND IMPLEMENTATION ==========================================*/ +#elif defined(_SAUDIO_WINDOWS) + +#if defined(_SAUDIO_UWP) +/* Minimal implementation of an IActivateAudioInterfaceCompletionHandler COM object in plain C. + Meant to be a static singleton (always one reference when add/remove reference) + and implements IUnknown and IActivateAudioInterfaceCompletionHandler when queryinterface'd + + Do not know why but IActivateAudioInterfaceCompletionHandler's GUID is not the one system queries for, + so I'm advertising the one actually requested. +*/ +_SOKOL_PRIVATE HRESULT STDMETHODCALLTYPE _saudio_interface_completion_handler_queryinterface(IActivateAudioInterfaceCompletionHandler* instance, REFIID riid, void** ppvObject) { + if (!ppvObject) { + return E_POINTER; + } + + if (IsEqualIID(riid, _SOKOL_AUDIO_WIN32COM_ID(_saudio_IID_IActivateAudioInterface_Completion_Handler)) || IsEqualIID(riid, _SOKOL_AUDIO_WIN32COM_ID(IID_IUnknown))) + { + *ppvObject = (void*)instance; + return S_OK; + } + + *ppvObject = NULL; + return E_NOINTERFACE; +} + +_SOKOL_PRIVATE ULONG STDMETHODCALLTYPE _saudio_interface_completion_handler_addref_release(IActivateAudioInterfaceCompletionHandler* instance) { + _SOKOL_UNUSED(instance); + return 1; +} + +_SOKOL_PRIVATE HRESULT STDMETHODCALLTYPE _saudio_backend_activate_audio_interface_cb(IActivateAudioInterfaceCompletionHandler* instance, IActivateAudioInterfaceAsyncOperation* activateOperation) { + _SOKOL_UNUSED(instance); + WaitForSingleObject(_saudio.backend.interface_activation_mutex, INFINITE); + _saudio.backend.interface_activation_success = TRUE; + HRESULT activation_result; + if (FAILED(activateOperation->lpVtbl->GetActivateResult(activateOperation, &activation_result, (IUnknown**)(&_saudio.backend.audio_client))) || FAILED(activation_result)) { + _saudio.backend.interface_activation_success = FALSE; + } + + ReleaseMutex(_saudio.backend.interface_activation_mutex); + return S_OK; +} +#endif // _SAUDIO_UWP + +/* fill intermediate buffer with new data and reset buffer_pos */ +_SOKOL_PRIVATE void _saudio_wasapi_fill_buffer(void) { + if (_saudio_has_callback()) { + _saudio_stream_callback(_saudio.backend.thread.src_buffer, _saudio.backend.thread.src_buffer_frames, _saudio.num_channels); + } + else { + if (0 == _saudio_fifo_read(&_saudio.fifo, (uint8_t*)_saudio.backend.thread.src_buffer, _saudio.backend.thread.src_buffer_byte_size)) { + /* not enough read data available, fill the entire buffer with silence */ + memset(_saudio.backend.thread.src_buffer, 0, (size_t)_saudio.backend.thread.src_buffer_byte_size); + } + } +} + +_SOKOL_PRIVATE void _saudio_wasapi_submit_buffer(int num_frames) { + BYTE* wasapi_buffer = 0; + if (FAILED(IAudioRenderClient_GetBuffer(_saudio.backend.render_client, num_frames, &wasapi_buffer))) { + return; + } + SOKOL_ASSERT(wasapi_buffer); + + /* convert float samples to int16_t, refill float buffer if needed */ + const int num_samples = num_frames * _saudio.num_channels; + int16_t* dst = (int16_t*) wasapi_buffer; + int buffer_pos = _saudio.backend.thread.src_buffer_pos; + const int buffer_float_size = _saudio.backend.thread.src_buffer_byte_size / (int)sizeof(float); + float* src = _saudio.backend.thread.src_buffer; + for (int i = 0; i < num_samples; i++) { + if (0 == buffer_pos) { + _saudio_wasapi_fill_buffer(); + } + dst[i] = (int16_t) (src[buffer_pos] * 0x7FFF); + buffer_pos += 1; + if (buffer_pos == buffer_float_size) { + buffer_pos = 0; + } + } + _saudio.backend.thread.src_buffer_pos = buffer_pos; + + IAudioRenderClient_ReleaseBuffer(_saudio.backend.render_client, num_frames, 0); +} + +_SOKOL_PRIVATE DWORD WINAPI _saudio_wasapi_thread_fn(LPVOID param) { + (void)param; + _saudio_wasapi_submit_buffer(_saudio.backend.thread.src_buffer_frames); + IAudioClient_Start(_saudio.backend.audio_client); + while (!_saudio.backend.thread.stop) { + WaitForSingleObject(_saudio.backend.thread.buffer_end_event, INFINITE); + UINT32 padding = 0; + if (FAILED(IAudioClient_GetCurrentPadding(_saudio.backend.audio_client, &padding))) { + continue; + } + SOKOL_ASSERT(_saudio.backend.thread.dst_buffer_frames >= padding); + int num_frames = (int)_saudio.backend.thread.dst_buffer_frames - (int)padding; + if (num_frames > 0) { + _saudio_wasapi_submit_buffer(num_frames); + } + } + return 0; +} + +_SOKOL_PRIVATE void _saudio_wasapi_release(void) { + if (_saudio.backend.thread.src_buffer) { + SOKOL_FREE(_saudio.backend.thread.src_buffer); + _saudio.backend.thread.src_buffer = 0; + } + if (_saudio.backend.render_client) { + IAudioRenderClient_Release(_saudio.backend.render_client); + _saudio.backend.render_client = 0; + } + if (_saudio.backend.audio_client) { + IAudioClient_Release(_saudio.backend.audio_client); + _saudio.backend.audio_client = 0; + } + #if defined(_SAUDIO_UWP) + if (_saudio.backend.interface_activation_audio_interface_uid_string) { + CoTaskMemFree(_saudio.backend.interface_activation_audio_interface_uid_string); + _saudio.backend.interface_activation_audio_interface_uid_string = 0; + } + if (_saudio.backend.interface_activation_operation) { + IActivateAudioInterfaceAsyncOperation_Release(_saudio.backend.interface_activation_operation); + _saudio.backend.interface_activation_operation = 0; + } + #else + if (_saudio.backend.device) { + IMMDevice_Release(_saudio.backend.device); + _saudio.backend.device = 0; + } + if (_saudio.backend.device_enumerator) { + IMMDeviceEnumerator_Release(_saudio.backend.device_enumerator); + _saudio.backend.device_enumerator = 0; + } + #endif + if (0 != _saudio.backend.thread.buffer_end_event) { + CloseHandle(_saudio.backend.thread.buffer_end_event); + _saudio.backend.thread.buffer_end_event = 0; + } +} + +_SOKOL_PRIVATE bool _saudio_backend_init(void) { + REFERENCE_TIME dur; + /* UWP Threads are CoInitialized by default with a different threading model, and this call fails + See https://github.com/Microsoft/cppwinrt/issues/6#issuecomment-253930637 */ + #if defined(_SAUDIO_WIN32) + /* CoInitializeEx could have been called elsewhere already, in which + case the function returns with S_FALSE (thus it does not make much + sense to check the result) + */ + HRESULT hr = CoInitializeEx(0, COINIT_MULTITHREADED); + _SOKOL_UNUSED(hr); + #endif + _saudio.backend.thread.buffer_end_event = CreateEvent(0, FALSE, FALSE, 0); + if (0 == _saudio.backend.thread.buffer_end_event) { + SOKOL_LOG("sokol_audio wasapi: failed to create buffer_end_event"); + goto error; + } + #if defined(_SAUDIO_UWP) + _saudio.backend.interface_activation_mutex = CreateMutexA(NULL, FALSE, "interface_activation_mutex"); + if (_saudio.backend.interface_activation_mutex == NULL) { + SOKOL_LOG("sokol_audio wasapi: failed to create interface activation mutex"); + goto error; + } + if (FAILED(StringFromIID(_SOKOL_AUDIO_WIN32COM_ID(_saudio_IID_Devinterface_Audio_Render), &_saudio.backend.interface_activation_audio_interface_uid_string))) { + SOKOL_LOG("sokol_audio wasapi: failed to get default audio device ID string"); + goto error; + } + + /* static instance of the fake COM object */ + static IActivateAudioInterfaceCompletionHandlerVtbl completion_handler_interface_vtable = { + _saudio_interface_completion_handler_queryinterface, + _saudio_interface_completion_handler_addref_release, + _saudio_interface_completion_handler_addref_release, + _saudio_backend_activate_audio_interface_cb + }; + static IActivateAudioInterfaceCompletionHandler completion_handler_interface = { &completion_handler_interface_vtable }; + + if (FAILED(ActivateAudioInterfaceAsync(_saudio.backend.interface_activation_audio_interface_uid_string, _SOKOL_AUDIO_WIN32COM_ID(_saudio_IID_IAudioClient), NULL, &completion_handler_interface, &_saudio.backend.interface_activation_operation))) { + SOKOL_LOG("sokol_audio wasapi: failed to get default audio device ID string"); + goto error; + } + while (!(_saudio.backend.audio_client)) { + if (WaitForSingleObject(_saudio.backend.interface_activation_mutex, 10) != WAIT_TIMEOUT) { + ReleaseMutex(_saudio.backend.interface_activation_mutex); + } + } + + if (!(_saudio.backend.interface_activation_success)) { + SOKOL_LOG("sokol_audio wasapi: interface activation failed. Unable to get audio client"); + goto error; + } + + #else + if (FAILED(CoCreateInstance(_SOKOL_AUDIO_WIN32COM_ID(_saudio_CLSID_IMMDeviceEnumerator), + 0, CLSCTX_ALL, + _SOKOL_AUDIO_WIN32COM_ID(_saudio_IID_IMMDeviceEnumerator), + (void**)&_saudio.backend.device_enumerator))) + { + SOKOL_LOG("sokol_audio wasapi: failed to create device enumerator"); + goto error; + } + if (FAILED(IMMDeviceEnumerator_GetDefaultAudioEndpoint(_saudio.backend.device_enumerator, + eRender, eConsole, + &_saudio.backend.device))) + { + SOKOL_LOG("sokol_audio wasapi: GetDefaultAudioEndPoint failed"); + goto error; + } + if (FAILED(IMMDevice_Activate(_saudio.backend.device, + _SOKOL_AUDIO_WIN32COM_ID(_saudio_IID_IAudioClient), + CLSCTX_ALL, 0, + (void**)&_saudio.backend.audio_client))) + { + SOKOL_LOG("sokol_audio wasapi: device activate failed"); + goto error; + } + #endif + WAVEFORMATEX fmt; + memset(&fmt, 0, sizeof(fmt)); + fmt.nChannels = (WORD)_saudio.num_channels; + fmt.nSamplesPerSec = (DWORD)_saudio.sample_rate; + fmt.wFormatTag = WAVE_FORMAT_PCM; + fmt.wBitsPerSample = 16; + fmt.nBlockAlign = (fmt.nChannels * fmt.wBitsPerSample) / 8; + fmt.nAvgBytesPerSec = fmt.nSamplesPerSec * fmt.nBlockAlign; + dur = (REFERENCE_TIME) + (((double)_saudio.buffer_frames) / (((double)_saudio.sample_rate) * (1.0/10000000.0))); + if (FAILED(IAudioClient_Initialize(_saudio.backend.audio_client, + AUDCLNT_SHAREMODE_SHARED, + AUDCLNT_STREAMFLAGS_EVENTCALLBACK|AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM|AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY, + dur, 0, &fmt, 0))) + { + SOKOL_LOG("sokol_audio wasapi: audio client initialize failed"); + goto error; + } + if (FAILED(IAudioClient_GetBufferSize(_saudio.backend.audio_client, &_saudio.backend.thread.dst_buffer_frames))) { + SOKOL_LOG("sokol_audio wasapi: audio client get buffer size failed"); + goto error; + } + if (FAILED(IAudioClient_GetService(_saudio.backend.audio_client, + _SOKOL_AUDIO_WIN32COM_ID(_saudio_IID_IAudioRenderClient), + (void**)&_saudio.backend.render_client))) + { + SOKOL_LOG("sokol_audio wasapi: audio client GetService failed"); + goto error; + } + if (FAILED(IAudioClient_SetEventHandle(_saudio.backend.audio_client, _saudio.backend.thread.buffer_end_event))) { + SOKOL_LOG("sokol_audio wasapi: audio client SetEventHandle failed"); + goto error; + } + _saudio.backend.si16_bytes_per_frame = _saudio.num_channels * (int)sizeof(int16_t); + _saudio.bytes_per_frame = _saudio.num_channels * (int)sizeof(float); + _saudio.backend.thread.src_buffer_frames = _saudio.buffer_frames; + _saudio.backend.thread.src_buffer_byte_size = _saudio.backend.thread.src_buffer_frames * _saudio.bytes_per_frame; + + /* allocate an intermediate buffer for sample format conversion */ + _saudio.backend.thread.src_buffer = (float*) SOKOL_MALLOC((size_t)_saudio.backend.thread.src_buffer_byte_size); + SOKOL_ASSERT(_saudio.backend.thread.src_buffer); + + /* create streaming thread */ + _saudio.backend.thread.thread_handle = CreateThread(NULL, 0, _saudio_wasapi_thread_fn, 0, 0, 0); + if (0 == _saudio.backend.thread.thread_handle) { + SOKOL_LOG("sokol_audio wasapi: CreateThread failed"); + goto error; + } + return true; +error: + _saudio_wasapi_release(); + return false; +} + +_SOKOL_PRIVATE void _saudio_backend_shutdown(void) { + if (_saudio.backend.thread.thread_handle) { + _saudio.backend.thread.stop = true; + SetEvent(_saudio.backend.thread.buffer_end_event); + WaitForSingleObject(_saudio.backend.thread.thread_handle, INFINITE); + CloseHandle(_saudio.backend.thread.thread_handle); + _saudio.backend.thread.thread_handle = 0; + } + if (_saudio.backend.audio_client) { + IAudioClient_Stop(_saudio.backend.audio_client); + } + _saudio_wasapi_release(); + + #if defined(_SAUDIO_WIN32) + CoUninitialize(); + #endif +} + +/*=== EMSCRIPTEN BACKEND IMPLEMENTATION ======================================*/ +#elif defined(_SAUDIO_EMSCRIPTEN) + +#ifdef __cplusplus +extern "C" { +#endif + +EMSCRIPTEN_KEEPALIVE int _saudio_emsc_pull(int num_frames) { + SOKOL_ASSERT(_saudio.backend.buffer); + if (num_frames == _saudio.buffer_frames) { + if (_saudio_has_callback()) { + _saudio_stream_callback((float*)_saudio.backend.buffer, num_frames, _saudio.num_channels); + } + else { + const int num_bytes = num_frames * _saudio.bytes_per_frame; + if (0 == _saudio_fifo_read(&_saudio.fifo, _saudio.backend.buffer, num_bytes)) { + /* not enough read data available, fill the entire buffer with silence */ + memset(_saudio.backend.buffer, 0, (size_t)num_bytes); + } + } + int res = (int) _saudio.backend.buffer; + return res; + } + else { + return 0; + } +} + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +/* setup the WebAudio context and attach a ScriptProcessorNode */ +EM_JS(int, saudio_js_init, (int sample_rate, int num_channels, int buffer_size), { + Module._saudio_context = null; + Module._saudio_node = null; + if (typeof AudioContext !== 'undefined') { + Module._saudio_context = new AudioContext({ + sampleRate: sample_rate, + latencyHint: 'interactive', + }); + } + else if (typeof webkitAudioContext !== 'undefined') { + Module._saudio_context = new webkitAudioContext({ + sampleRate: sample_rate, + latencyHint: 'interactive', + }); + } + else { + Module._saudio_context = null; + console.log('sokol_audio.h: no WebAudio support'); + } + if (Module._saudio_context) { + console.log('sokol_audio.h: sample rate ', Module._saudio_context.sampleRate); + Module._saudio_node = Module._saudio_context.createScriptProcessor(buffer_size, 0, num_channels); + Module._saudio_node.onaudioprocess = function pump_audio(event) { + var num_frames = event.outputBuffer.length; + var ptr = __saudio_emsc_pull(num_frames); + if (ptr) { + var num_channels = event.outputBuffer.numberOfChannels; + for (var chn = 0; chn < num_channels; chn++) { + var chan = event.outputBuffer.getChannelData(chn); + for (var i = 0; i < num_frames; i++) { + chan[i] = HEAPF32[(ptr>>2) + ((num_channels*i)+chn)] + } + } + } + }; + Module._saudio_node.connect(Module._saudio_context.destination); + + // in some browsers, WebAudio needs to be activated on a user action + var resume_webaudio = function() { + if (Module._saudio_context) { + if (Module._saudio_context.state === 'suspended') { + Module._saudio_context.resume(); + } + } + }; + document.addEventListener('click', resume_webaudio, {once:true}); + document.addEventListener('touchstart', resume_webaudio, {once:true}); + document.addEventListener('keydown', resume_webaudio, {once:true}); + return 1; + } + else { + return 0; + } +}); + +/* shutdown the WebAudioContext and ScriptProcessorNode */ +EM_JS(void, saudio_js_shutdown, (void), { + if (Module._saudio_context !== null) { + if (Module._saudio_node) { + Module._saudio_node.disconnect(); + } + Module._saudio_context.close(); + Module._saudio_context = null; + Module._saudio_node = null; + } +}); + +/* get the actual sample rate back from the WebAudio context */ +EM_JS(int, saudio_js_sample_rate, (void), { + if (Module._saudio_context) { + return Module._saudio_context.sampleRate; + } + else { + return 0; + } +}); + +/* get the actual buffer size in number of frames */ +EM_JS(int, saudio_js_buffer_frames, (void), { + if (Module._saudio_node) { + return Module._saudio_node.bufferSize; + } + else { + return 0; + } +}); + +_SOKOL_PRIVATE bool _saudio_backend_init(void) { + if (saudio_js_init(_saudio.sample_rate, _saudio.num_channels, _saudio.buffer_frames)) { + _saudio.bytes_per_frame = (int)sizeof(float) * _saudio.num_channels; + _saudio.sample_rate = saudio_js_sample_rate(); + _saudio.buffer_frames = saudio_js_buffer_frames(); + const size_t buf_size = (size_t) (_saudio.buffer_frames * _saudio.bytes_per_frame); + _saudio.backend.buffer = (uint8_t*) SOKOL_MALLOC(buf_size); + return true; + } + else { + return false; + } +} + +_SOKOL_PRIVATE void _saudio_backend_shutdown(void) { + saudio_js_shutdown(); + if (_saudio.backend.buffer) { + SOKOL_FREE(_saudio.backend.buffer); + _saudio.backend.buffer = 0; + } +} + +/*=== ANDROID BACKEND IMPLEMENTATION ======================================*/ +#elif defined(_SAUDIO_ANDROID) + +#ifdef __cplusplus +extern "C" { +#endif + +_SOKOL_PRIVATE void _saudio_semaphore_init(_saudio_semaphore_t* sem) { + sem->count = 0; + int r = pthread_mutex_init(&sem->mutex, NULL); + SOKOL_ASSERT(r == 0); + + r = pthread_cond_init(&sem->cond, NULL); + SOKOL_ASSERT(r == 0); + + (void)(r); +} + +_SOKOL_PRIVATE void _saudio_semaphore_destroy(_saudio_semaphore_t* sem) +{ + pthread_cond_destroy(&sem->cond); + pthread_mutex_destroy(&sem->mutex); +} + +_SOKOL_PRIVATE void _saudio_semaphore_post(_saudio_semaphore_t* sem, int count) +{ + int r = pthread_mutex_lock(&sem->mutex); + SOKOL_ASSERT(r == 0); + + for (int ii = 0; ii < count; ii++) { + r = pthread_cond_signal(&sem->cond); + SOKOL_ASSERT(r == 0); + } + + sem->count += count; + r = pthread_mutex_unlock(&sem->mutex); + SOKOL_ASSERT(r == 0); + + (void)(r); +} + +_SOKOL_PRIVATE bool _saudio_semaphore_wait(_saudio_semaphore_t* sem) +{ + int r = pthread_mutex_lock(&sem->mutex); + SOKOL_ASSERT(r == 0); + + while (r == 0 && sem->count <= 0) { + r = pthread_cond_wait(&sem->cond, &sem->mutex); + } + + bool ok = (r == 0); + if (ok) { + --sem->count; + } + r = pthread_mutex_unlock(&sem->mutex); + (void)(r); + return ok; +} + +/* fill intermediate buffer with new data and reset buffer_pos */ +_SOKOL_PRIVATE void _saudio_opensles_fill_buffer(void) { + int src_buffer_frames = _saudio.buffer_frames; + if (_saudio_has_callback()) { + _saudio_stream_callback(_saudio.backend.src_buffer, src_buffer_frames, _saudio.num_channels); + } + else { + const int src_buffer_byte_size = src_buffer_frames * _saudio.num_channels * (int)sizeof(float); + if (0 == _saudio_fifo_read(&_saudio.fifo, (uint8_t*)_saudio.backend.src_buffer, src_buffer_byte_size)) { + /* not enough read data available, fill the entire buffer with silence */ + memset(_saudio.backend.src_buffer, 0x0, (size_t)src_buffer_byte_size); + } + } +} + +_SOKOL_PRIVATE void SLAPIENTRY _saudio_opensles_play_cb(SLPlayItf player, void *context, SLuint32 event) { + (void)(context); + (void)(player); + + if (event & SL_PLAYEVENT_HEADATEND) { + _saudio_semaphore_post(&_saudio.backend.buffer_sem, 1); + } +} + +_SOKOL_PRIVATE void* _saudio_opensles_thread_fn(void* param) { + while (!_saudio.backend.thread_stop) { + /* get next output buffer, advance, next buffer. */ + int16_t* out_buffer = _saudio.backend.output_buffers[_saudio.backend.active_buffer]; + _saudio.backend.active_buffer = (_saudio.backend.active_buffer + 1) % SAUDIO_NUM_BUFFERS; + int16_t* next_buffer = _saudio.backend.output_buffers[_saudio.backend.active_buffer]; + + /* queue this buffer */ + const int buffer_size_bytes = _saudio.buffer_frames * _saudio.num_channels * (int)sizeof(short); + (*_saudio.backend.player_buffer_queue)->Enqueue(_saudio.backend.player_buffer_queue, out_buffer, (SLuint32)buffer_size_bytes); + + /* fill the next buffer */ + _saudio_opensles_fill_buffer(); + const int num_samples = _saudio.num_channels * _saudio.buffer_frames; + for (int i = 0; i < num_samples; ++i) { + next_buffer[i] = (int16_t) (_saudio.backend.src_buffer[i] * 0x7FFF); + } + + _saudio_semaphore_wait(&_saudio.backend.buffer_sem); + } + + return 0; +} + +_SOKOL_PRIVATE void _saudio_backend_shutdown(void) { + _saudio.backend.thread_stop = 1; + pthread_join(_saudio.backend.thread, 0); + + if (_saudio.backend.player_obj) { + (*_saudio.backend.player_obj)->Destroy(_saudio.backend.player_obj); + } + + if (_saudio.backend.output_mix_obj) { + (*_saudio.backend.output_mix_obj)->Destroy(_saudio.backend.output_mix_obj); + } + + if (_saudio.backend.engine_obj) { + (*_saudio.backend.engine_obj)->Destroy(_saudio.backend.engine_obj); + } + + for (int i = 0; i < SAUDIO_NUM_BUFFERS; i++) { + SOKOL_FREE(_saudio.backend.output_buffers[i]); + } + SOKOL_FREE(_saudio.backend.src_buffer); +} + +_SOKOL_PRIVATE bool _saudio_backend_init(void) { + _saudio.bytes_per_frame = (int)sizeof(float) * _saudio.num_channels; + + for (int i = 0; i < SAUDIO_NUM_BUFFERS; ++i) { + const int buffer_size_bytes = (int)sizeof(int16_t) * _saudio.num_channels * _saudio.buffer_frames; + _saudio.backend.output_buffers[i] = (int16_t*) SOKOL_MALLOC((size_t)buffer_size_bytes); + SOKOL_ASSERT(_saudio.backend.output_buffers[i]); + memset(_saudio.backend.output_buffers[i], 0x0, (size_t)buffer_size_bytes); + } + + { + const int buffer_size_bytes = _saudio.bytes_per_frame * _saudio.buffer_frames; + _saudio.backend.src_buffer = (float*) SOKOL_MALLOC((size_t)buffer_size_bytes); + SOKOL_ASSERT(_saudio.backend.src_buffer); + memset(_saudio.backend.src_buffer, 0x0, (size_t)buffer_size_bytes); + } + + /* Create engine */ + const SLEngineOption opts[] = { SL_ENGINEOPTION_THREADSAFE, SL_BOOLEAN_TRUE }; + if (slCreateEngine(&_saudio.backend.engine_obj, 1, opts, 0, NULL, NULL ) != SL_RESULT_SUCCESS) { + SOKOL_LOG("sokol_audio opensles: slCreateEngine failed"); + _saudio_backend_shutdown(); + return false; + } + + (*_saudio.backend.engine_obj)->Realize(_saudio.backend.engine_obj, SL_BOOLEAN_FALSE); + if ((*_saudio.backend.engine_obj)->GetInterface(_saudio.backend.engine_obj, SL_IID_ENGINE, &_saudio.backend.engine) != SL_RESULT_SUCCESS) { + SOKOL_LOG("sokol_audio opensles: GetInterface->Engine failed"); + _saudio_backend_shutdown(); + return false; + } + + /* Create output mix. */ + { + const SLInterfaceID ids[] = { SL_IID_VOLUME }; + const SLboolean req[] = { SL_BOOLEAN_FALSE }; + + if( (*_saudio.backend.engine)->CreateOutputMix(_saudio.backend.engine, &_saudio.backend.output_mix_obj, 1, ids, req) != SL_RESULT_SUCCESS) + { + SOKOL_LOG("sokol_audio opensles: CreateOutputMix failed"); + _saudio_backend_shutdown(); + return false; + } + (*_saudio.backend.output_mix_obj)->Realize(_saudio.backend.output_mix_obj, SL_BOOLEAN_FALSE); + + if((*_saudio.backend.output_mix_obj)->GetInterface(_saudio.backend.output_mix_obj, SL_IID_VOLUME, &_saudio.backend.output_mix_vol) != SL_RESULT_SUCCESS) { + SOKOL_LOG("sokol_audio opensles: GetInterface->OutputMixVol failed"); + } + } + + /* android buffer queue */ + _saudio.backend.in_locator.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE; + _saudio.backend.in_locator.numBuffers = SAUDIO_NUM_BUFFERS; + + /* data format */ + SLDataFormat_PCM format; + format.formatType = SL_DATAFORMAT_PCM; + format.numChannels = (SLuint32)_saudio.num_channels; + format.samplesPerSec = (SLuint32) (_saudio.sample_rate * 1000); + format.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16; + format.containerSize = 16; + format.endianness = SL_BYTEORDER_LITTLEENDIAN; + + if (_saudio.num_channels == 2) { + format.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; + } else { + format.channelMask = SL_SPEAKER_FRONT_CENTER; + } + + SLDataSource src; + src.pLocator = &_saudio.backend.in_locator; + src.pFormat = &format; + + /* Output mix. */ + _saudio.backend.out_locator.locatorType = SL_DATALOCATOR_OUTPUTMIX; + _saudio.backend.out_locator.outputMix = _saudio.backend.output_mix_obj; + + _saudio.backend.dst_data_sink.pLocator = &_saudio.backend.out_locator; + _saudio.backend.dst_data_sink.pFormat = NULL; + + /* setup player */ + { + const SLInterfaceID ids[] = { SL_IID_VOLUME, SL_IID_ANDROIDSIMPLEBUFFERQUEUE }; + const SLboolean req[] = { SL_BOOLEAN_FALSE, SL_BOOLEAN_TRUE }; + + (*_saudio.backend.engine)->CreateAudioPlayer(_saudio.backend.engine, &_saudio.backend.player_obj, &src, &_saudio.backend.dst_data_sink, sizeof(ids) / sizeof(ids[0]), ids, req); + + (*_saudio.backend.player_obj)->Realize(_saudio.backend.player_obj, SL_BOOLEAN_FALSE); + + (*_saudio.backend.player_obj)->GetInterface(_saudio.backend.player_obj, SL_IID_PLAY, &_saudio.backend.player); + (*_saudio.backend.player_obj)->GetInterface(_saudio.backend.player_obj, SL_IID_VOLUME, &_saudio.backend.player_vol); + + (*_saudio.backend.player_obj)->GetInterface(_saudio.backend.player_obj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &_saudio.backend.player_buffer_queue); + } + + /* begin */ + { + const int buffer_size_bytes = (int)sizeof(int16_t) * _saudio.num_channels * _saudio.buffer_frames; + (*_saudio.backend.player_buffer_queue)->Enqueue(_saudio.backend.player_buffer_queue, _saudio.backend.output_buffers[0], (SLuint32)buffer_size_bytes); + _saudio.backend.active_buffer = (_saudio.backend.active_buffer + 1) % SAUDIO_NUM_BUFFERS; + + (*_saudio.backend.player)->RegisterCallback(_saudio.backend.player, _saudio_opensles_play_cb, NULL); + (*_saudio.backend.player)->SetCallbackEventsMask(_saudio.backend.player, SL_PLAYEVENT_HEADATEND); + (*_saudio.backend.player)->SetPlayState(_saudio.backend.player, SL_PLAYSTATE_PLAYING); + } + + /* create the buffer-streaming start thread */ + if (0 != pthread_create(&_saudio.backend.thread, 0, _saudio_opensles_thread_fn, 0)) { + _saudio_backend_shutdown(); + return false; + } + + return true; +} + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#else +#error "unsupported platform" +#endif + +/*=== PUBLIC API FUNCTIONS ===================================================*/ +SOKOL_API_IMPL void saudio_setup(const saudio_desc* desc) { + SOKOL_ASSERT(!_saudio.valid); + SOKOL_ASSERT(desc); + memset(&_saudio, 0, sizeof(_saudio)); + _saudio.desc = *desc; + _saudio.stream_cb = desc->stream_cb; + _saudio.stream_userdata_cb = desc->stream_userdata_cb; + _saudio.user_data = desc->user_data; + _saudio.sample_rate = _saudio_def(_saudio.desc.sample_rate, _SAUDIO_DEFAULT_SAMPLE_RATE); + _saudio.buffer_frames = _saudio_def(_saudio.desc.buffer_frames, _SAUDIO_DEFAULT_BUFFER_FRAMES); + _saudio.packet_frames = _saudio_def(_saudio.desc.packet_frames, _SAUDIO_DEFAULT_PACKET_FRAMES); + _saudio.num_packets = _saudio_def(_saudio.desc.num_packets, _SAUDIO_DEFAULT_NUM_PACKETS); + _saudio.num_channels = _saudio_def(_saudio.desc.num_channels, 1); + _saudio_fifo_init_mutex(&_saudio.fifo); + if (_saudio_backend_init()) { + /* the backend might not support the requested exact buffer size, + make sure the actual buffer size is still a multiple of + the requested packet size + */ + if (0 != (_saudio.buffer_frames % _saudio.packet_frames)) { + SOKOL_LOG("sokol_audio.h: actual backend buffer size isn't multiple of requested packet size"); + _saudio_backend_shutdown(); + return; + } + SOKOL_ASSERT(_saudio.bytes_per_frame > 0); + _saudio_fifo_init(&_saudio.fifo, _saudio.packet_frames * _saudio.bytes_per_frame, _saudio.num_packets); + _saudio.valid = true; + } +} + +SOKOL_API_IMPL void saudio_shutdown(void) { + if (_saudio.valid) { + _saudio_backend_shutdown(); + _saudio_fifo_shutdown(&_saudio.fifo); + _saudio.valid = false; + } +} + +SOKOL_API_IMPL bool saudio_isvalid(void) { + return _saudio.valid; +} + +SOKOL_API_IMPL void* saudio_userdata(void) { + return _saudio.desc.user_data; +} + +SOKOL_API_IMPL saudio_desc saudio_query_desc(void) { + return _saudio.desc; +} + +SOKOL_API_IMPL int saudio_sample_rate(void) { + return _saudio.sample_rate; +} + +SOKOL_API_IMPL int saudio_buffer_frames(void) { + return _saudio.buffer_frames; +} + +SOKOL_API_IMPL int saudio_channels(void) { + return _saudio.num_channels; +} + +SOKOL_API_IMPL int saudio_expect(void) { + if (_saudio.valid) { + const int num_frames = _saudio_fifo_writable_bytes(&_saudio.fifo) / _saudio.bytes_per_frame; + return num_frames; + } + else { + return 0; + } +} + +SOKOL_API_IMPL int saudio_push(const float* frames, int num_frames) { + SOKOL_ASSERT(frames && (num_frames > 0)); + if (_saudio.valid) { + const int num_bytes = num_frames * _saudio.bytes_per_frame; + const int num_written = _saudio_fifo_write(&_saudio.fifo, (const uint8_t*)frames, num_bytes); + return num_written / _saudio.bytes_per_frame; + } + else { + return 0; + } +} + +#undef _saudio_def +#undef _saudio_def_flt + +#if defined(_SAUDIO_WINDOWS) +#ifdef _MSC_VER +#pragma warning(pop) +#endif +#endif + +#endif /* SOKOL_AUDIO_IMPL */ diff --git a/v_windows/v/old/thirdparty/sokol/sokol_gfx.h b/v_windows/v/old/thirdparty/sokol/sokol_gfx.h new file mode 100644 index 0000000..9049ae2 --- /dev/null +++ b/v_windows/v/old/thirdparty/sokol/sokol_gfx.h @@ -0,0 +1,15672 @@ +#if defined(SOKOL_IMPL) && !defined(SOKOL_GFX_IMPL) +#define SOKOL_GFX_IMPL +#endif +#ifndef SOKOL_GFX_INCLUDED +/* + sokol_gfx.h -- simple 3D API wrapper + + Project URL: https://github.com/floooh/sokol + + Example code: https://github.com/floooh/sokol-samples + + Do this: + #define SOKOL_IMPL or + #define SOKOL_GFX_IMPL + before you include this file in *one* C or C++ file to create the + implementation. + + In the same place define one of the following to select the rendering + backend: + #define SOKOL_GLCORE33 + #define SOKOL_GLES2 + #define SOKOL_GLES3 + #define SOKOL_D3D11 + #define SOKOL_METAL + #define SOKOL_WGPU + #define SOKOL_DUMMY_BACKEND + + I.e. for the GL 3.3 Core Profile it should look like this: + + #include ... + #include ... + #define SOKOL_IMPL + #define SOKOL_GLCORE33 + #include "sokol_gfx.h" + + The dummy backend replaces the platform-specific backend code with empty + stub functions. This is useful for writing tests that need to run on the + command line. + + Optionally provide the following defines with your own implementations: + + SOKOL_ASSERT(c) - your own assert macro (default: assert(c)) + SOKOL_MALLOC(s) - your own malloc function (default: malloc(s)) + SOKOL_FREE(p) - your own free function (default: free(p)) + SOKOL_LOG(msg) - your own logging function (default: puts(msg)) + SOKOL_UNREACHABLE() - a guard macro for unreachable code (default: assert(false)) + SOKOL_GFX_API_DECL - public function declaration prefix (default: extern) + SOKOL_API_DECL - same as SOKOL_GFX_API_DECL + SOKOL_API_IMPL - public function implementation prefix (default: -) + SOKOL_TRACE_HOOKS - enable trace hook callbacks (search below for TRACE HOOKS) + SOKOL_EXTERNAL_GL_LOADER - indicates that you're using your own GL loader, in this case + sokol_gfx.h will not include any platform GL headers and disable + the integrated Win32 GL loader + + If sokol_gfx.h is compiled as a DLL, define the following before + including the declaration or implementation: + + SOKOL_DLL + + On Windows, SOKOL_DLL will define SOKOL_GFX_API_DECL as __declspec(dllexport) + or __declspec(dllimport) as needed. + + If you want to compile without deprecated structs and functions, + define: + + SOKOL_NO_DEPRECATED + + Optionally define the following to force debug checks and validations + even in release mode: + + SOKOL_DEBUG - by default this is defined if _DEBUG is defined + + sokol_gfx DOES NOT: + =================== + - create a window or the 3D-API context/device, you must do this + before sokol_gfx is initialized, and pass any required information + (like 3D device pointers) to the sokol_gfx initialization call + + - present the rendered frame, how this is done exactly usually depends + on how the window and 3D-API context/device was created + + - provide a unified shader language, instead 3D-API-specific shader + source-code or shader-bytecode must be provided (for the "official" + offline shader cross-compiler, see here: + https://github.com/floooh/sokol-tools/blob/master/docs/sokol-shdc.md) + + STEP BY STEP + ============ + --- to initialize sokol_gfx, after creating a window and a 3D-API + context/device, call: + + sg_setup(const sg_desc*) + + --- create resource objects (at least buffers, shaders and pipelines, + and optionally images and passes): + + sg_buffer sg_make_buffer(const sg_buffer_desc*) + sg_image sg_make_image(const sg_image_desc*) + sg_shader sg_make_shader(const sg_shader_desc*) + sg_pipeline sg_make_pipeline(const sg_pipeline_desc*) + sg_pass sg_make_pass(const sg_pass_desc*) + + --- start rendering to the default frame buffer with: + + sg_begin_default_pass(const sg_pass_action* action, int width, int height) + + ...or alternatively with: + + sg_begin_default_passf(const sg_pass_action* action, float width, float height) + + ...which takes the framebuffer width and height as float values. + + --- or start rendering to an offscreen framebuffer with: + + sg_begin_pass(sg_pass pass, const sg_pass_action* action) + + --- set the pipeline state for the next draw call with: + + sg_apply_pipeline(sg_pipeline pip) + + --- fill an sg_bindings struct with the resource bindings for the next + draw call (1..N vertex buffers, 0 or 1 index buffer, 0..N image objects + to use as textures each on the vertex-shader- and fragment-shader-stage + and then call + + sg_apply_bindings(const sg_bindings* bindings) + + to update the resource bindings + + --- optionally update shader uniform data with: + + sg_apply_uniforms(sg_shader_stage stage, int ub_index, const void* data, int num_bytes) + + --- kick off a draw call with: + + sg_draw(int base_element, int num_elements, int num_instances) + + The sg_draw() function unifies all the different ways to render primitives + in a single call (indexed vs non-indexed rendering, and instanced vs non-instanced + rendering). In case of indexed rendering, base_element and num_element specify + indices in the currently bound index buffer. In case of non-indexed rendering + base_element and num_elements specify vertices in the currently bound + vertex-buffer(s). To perform instanced rendering, the rendering pipeline + must be setup for instancing (see sg_pipeline_desc below), a separate vertex buffer + containing per-instance data must be bound, and the num_instances parameter + must be > 1. + + --- finish the current rendering pass with: + + sg_end_pass() + + --- when done with the current frame, call + + sg_commit() + + --- at the end of your program, shutdown sokol_gfx with: + + sg_shutdown() + + --- if you need to destroy resources before sg_shutdown(), call: + + sg_destroy_buffer(sg_buffer buf) + sg_destroy_image(sg_image img) + sg_destroy_shader(sg_shader shd) + sg_destroy_pipeline(sg_pipeline pip) + sg_destroy_pass(sg_pass pass) + + --- to set a new viewport rectangle, call + + sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left) + + ...or if you want to specifiy the viewport rectangle with float values: + + sg_apply_viewportf(float x, float y, float width, float height, bool origin_top_left) + + --- to set a new scissor rect, call: + + sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left) + + ...or with float values: + + sg_apply_scissor_rectf(float x, float y, float width, float height, bool origin_top_left) + + Both sg_apply_viewport() and sg_apply_scissor_rect() must be called + inside a rendering pass + + Note that sg_begin_default_pass() and sg_begin_pass() will reset both the + viewport and scissor rectangles to cover the entire framebuffer. + + --- to update (overwrite) the content of buffer and image resources, call: + + sg_update_buffer(sg_buffer buf, const sg_range* data) + sg_update_image(sg_image img, const sg_image_data* data) + + Buffers and images to be updated must have been created with + SG_USAGE_DYNAMIC or SG_USAGE_STREAM + + Only one update per frame is allowed for buffer and image resources when + using the sg_update_*() functions. The rationale is to have a simple + countermeasure to avoid the CPU scribbling over data the GPU is currently + using, or the CPU having to wait for the GPU + + Buffer and image updates can be partial, as long as a rendering + operation only references the valid (updated) data in the + buffer or image. + + --- to append a chunk of data to a buffer resource, call: + + int sg_append_buffer(sg_buffer buf, const sg_range* data) + + The difference to sg_update_buffer() is that sg_append_buffer() + can be called multiple times per frame to append new data to the + buffer piece by piece, optionally interleaved with draw calls referencing + the previously written data. + + sg_append_buffer() returns a byte offset to the start of the + written data, this offset can be assigned to + sg_bindings.vertex_buffer_offsets[n] or + sg_bindings.index_buffer_offset + + Code example: + + for (...) { + const void* data = ...; + const int num_bytes = ...; + int offset = sg_append_buffer(buf, &(sg_range) { .ptr=data, .size=num_bytes }); + bindings.vertex_buffer_offsets[0] = offset; + sg_apply_pipeline(pip); + sg_apply_bindings(&bindings); + sg_apply_uniforms(...); + sg_draw(...); + } + + A buffer to be used with sg_append_buffer() must have been created + with SG_USAGE_DYNAMIC or SG_USAGE_STREAM. + + If the application appends more data to the buffer then fits into + the buffer, the buffer will go into the "overflow" state for the + rest of the frame. + + Any draw calls attempting to render an overflown buffer will be + silently dropped (in debug mode this will also result in a + validation error). + + You can also check manually if a buffer is in overflow-state by calling + + bool sg_query_buffer_overflow(sg_buffer buf) + + NOTE: Due to restrictions in underlying 3D-APIs, appended chunks of + data will be 4-byte aligned in the destination buffer. This means + that there will be gaps in index buffers containing 16-bit indices + when the number of indices in a call to sg_append_buffer() is + odd. This isn't a problem when each call to sg_append_buffer() + is associated with one draw call, but will be problematic when + a single indexed draw call spans several appended chunks of indices. + + --- to check at runtime for optional features, limits and pixelformat support, + call: + + sg_features sg_query_features() + sg_limits sg_query_limits() + sg_pixelformat_info sg_query_pixelformat(sg_pixel_format fmt) + + --- if you need to call into the underlying 3D-API directly, you must call: + + sg_reset_state_cache() + + ...before calling sokol_gfx functions again + + --- you can inspect the original sg_desc structure handed to sg_setup() + by calling sg_query_desc(). This will return an sg_desc struct with + the default values patched in instead of any zero-initialized values + + --- you can inspect various internal resource attributes via: + + sg_buffer_info sg_query_buffer_info(sg_buffer buf) + sg_image_info sg_query_image_info(sg_image img) + sg_shader_info sg_query_shader_info(sg_shader shd) + sg_pipeline_info sg_query_pipeline_info(sg_pipeline pip) + sg_pass_info sg_query_pass_info(sg_pass pass) + + ...please note that the returned info-structs are tied quite closely + to sokol_gfx.h internals, and may change more often than other + public API functions and structs. + + --- you can ask at runtime what backend sokol_gfx.h has been compiled + for, or whether the GLES3 backend had to fall back to GLES2 with: + + sg_backend sg_query_backend(void) + + --- you can query the default resource creation parameters through the functions + + sg_buffer_desc sg_query_buffer_defaults(const sg_buffer_desc* desc) + sg_image_desc sg_query_image_defaults(const sg_image_desc* desc) + sg_shader_desc sg_query_shader_defaults(const sg_shader_desc* desc) + sg_pipeline_desc sg_query_pipeline_defaults(const sg_pipeline_desc* desc) + sg_pass_desc sg_query_pass_defaults(const sg_pass_desc* desc) + + These functions take a pointer to a desc structure which may contain + zero-initialized items for default values. These zero-init values + will be replaced with their concrete values in the returned desc + struct. + + ON INITIALIZATION: + ================== + When calling sg_setup(), a pointer to an sg_desc struct must be provided + which contains initialization options. These options provide two types + of information to sokol-gfx: + + (1) upper bounds and limits needed to allocate various internal + data structures: + - the max number of resources of each type that can + be alive at the same time, this is used for allocating + internal pools + - the max overall size of uniform data that can be + updated per frame, including a worst-case alignment + per uniform update (this worst-case alignment is 256 bytes) + - the max size of all dynamic resource updates (sg_update_buffer, + sg_append_buffer and sg_update_image) per frame + - the max number of entries in the texture sampler cache + (how many unique texture sampler can exist at the same time) + Not all of those limit values are used by all backends, but it is + good practice to provide them none-the-less. + + (2) 3D-API "context information" (sometimes also called "bindings"): + sokol_gfx.h doesn't create or initialize 3D API objects which are + closely related to the presentation layer (this includes the "rendering + device", the swapchain, and any objects which depend on the + swapchain). These API objects (or callback functions to obtain + them, if those objects might change between frames), must + be provided in a nested sg_context_desc struct inside the + sg_desc struct. If sokol_gfx.h is used together with + sokol_app.h, have a look at the sokol_glue.h header which provides + a convenience function to get a sg_context_desc struct filled out + with context information provided by sokol_app.h + + See the documention block of the sg_desc struct below for more information. + + BACKEND-SPECIFIC TOPICS: + ======================== + --- the GL backends need to know about the internal structure of uniform + blocks, and the texture sampler-name and -type: + + typedef struct { + float mvp[16]; // model-view-projection matrix + float offset0[2]; // some 2D vectors + float offset1[2]; + float offset2[2]; + } params_t; + + // uniform block structure and texture image definition in sg_shader_desc: + sg_shader_desc desc = { + // uniform block description (size and internal structure) + .vs.uniform_blocks[0] = { + .size = sizeof(params_t), + .uniforms = { + [0] = { .name="mvp", .type=SG_UNIFORMTYPE_MAT4 }, + [1] = { .name="offset0", .type=SG_UNIFORMTYPE_VEC2 }, + ... + } + }, + // one texture on the fragment-shader-stage, GLES2/WebGL needs name and image type + .fs.images[0] = { .name="tex", .type=SG_IMAGETYPE_ARRAY } + ... + }; + + --- the Metal and D3D11 backends only need to know the size of uniform blocks, + not their internal member structure, and they only need to know + the type of a texture sampler, not its name: + + sg_shader_desc desc = { + .vs.uniform_blocks[0].size = sizeof(params_t), + .fs.images[0].type = SG_IMAGETYPE_ARRAY, + ... + }; + + --- when creating a shader object, GLES2/WebGL need to know the vertex + attribute names as used in the vertex shader: + + sg_shader_desc desc = { + .attrs = { + [0] = { .name="position" }, + [1] = { .name="color1" } + } + }; + + The vertex attribute names provided when creating a shader will be + used later in sg_create_pipeline() for matching the vertex layout + to vertex shader inputs. + + --- on D3D11 you need to provide a semantic name and semantic index in the + shader description struct instead (see the D3D11 documentation on + D3D11_INPUT_ELEMENT_DESC for details): + + sg_shader_desc desc = { + .attrs = { + [0] = { .sem_name="POSITION", .sem_index=0 } + [1] = { .sem_name="COLOR", .sem_index=1 } + } + }; + + The provided semantic information will be used later in sg_create_pipeline() + to match the vertex layout to vertex shader inputs. + + --- on D3D11, and when passing HLSL source code (instead of byte code) to shader + creation, you can optionally define the shader model targets on the vertex + stage: + + sg_shader_Desc desc = { + .vs = { + ... + .d3d11_target = "vs_5_0" + }, + .fs = { + ... + .d3d11_target = "ps_5_0" + } + }; + + The default targets are "ps_4_0" and "fs_4_0". Note that those target names + are only used when compiling shaders from source. They are ignored when + creating a shader from bytecode. + + --- on Metal, GL 3.3 or GLES3/WebGL2, you don't need to provide an attribute + name or semantic name, since vertex attributes can be bound by their slot index + (this is mandatory in Metal, and optional in GL): + + sg_pipeline_desc desc = { + .layout = { + .attrs = { + [0] = { .format=SG_VERTEXFORMAT_FLOAT3 }, + [1] = { .format=SG_VERTEXFORMAT_FLOAT4 } + } + } + }; + + WORKING WITH CONTEXTS + ===================== + sokol-gfx allows to switch between different rendering contexts and + associate resource objects with contexts. This is useful to + create GL applications that render into multiple windows. + + A rendering context keeps track of all resources created while + the context is active. When the context is destroyed, all resources + "belonging to the context" are destroyed as well. + + A default context will be created and activated implicitly in + sg_setup(), and destroyed in sg_shutdown(). So for a typical application + which *doesn't* use multiple contexts, nothing changes, and calling + the context functions isn't necessary. + + Three functions have been added to work with contexts: + + --- sg_context sg_setup_context(): + This must be called once after a GL context has been created and + made active. + + --- void sg_activate_context(sg_context ctx) + This must be called after making a different GL context active. + Apart from 3D-API-specific actions, the call to sg_activate_context() + will internally call sg_reset_state_cache(). + + --- void sg_discard_context(sg_context ctx) + This must be called right before a GL context is destroyed and + will destroy all resources associated with the context (that + have been created while the context was active) The GL context must be + active at the time sg_discard_context(sg_context ctx) is called. + + Also note that resources (buffers, images, shaders and pipelines) must + only be used or destroyed while the same GL context is active that + was also active while the resource was created (an exception is + resource sharing on GL, such resources can be used while + another context is active, but must still be destroyed under + the same context that was active during creation). + + For more information, check out the multiwindow-glfw sample: + + https://github.com/floooh/sokol-samples/blob/master/glfw/multiwindow-glfw.c + + TRACE HOOKS: + ============ + sokol_gfx.h optionally allows to install "trace hook" callbacks for + each public API functions. When a public API function is called, and + a trace hook callback has been installed for this function, the + callback will be invoked with the parameters and result of the function. + This is useful for things like debugging- and profiling-tools, or + keeping track of resource creation and destruction. + + To use the trace hook feature: + + --- Define SOKOL_TRACE_HOOKS before including the implementation. + + --- Setup an sg_trace_hooks structure with your callback function + pointers (keep all function pointers you're not interested + in zero-initialized), optionally set the user_data member + in the sg_trace_hooks struct. + + --- Install the trace hooks by calling sg_install_trace_hooks(), + the return value of this function is another sg_trace_hooks + struct which contains the previously set of trace hooks. + You should keep this struct around, and call those previous + functions pointers from your own trace callbacks for proper + chaining. + + As an example of how trace hooks are used, have a look at the + imgui/sokol_gfx_imgui.h header which implements a realtime + debugging UI for sokol_gfx.h on top of Dear ImGui. + + A NOTE ON PORTABLE PACKED VERTEX FORMATS: + ========================================= + There are two things to consider when using packed + vertex formats like UBYTE4, SHORT2, etc which need to work + across all backends: + + - D3D11 can only convert *normalized* vertex formats to + floating point during vertex fetch, normalized formats + have a trailing 'N', and are "normalized" to a range + -1.0..+1.0 (for the signed formats) or 0.0..1.0 (for the + unsigned formats): + + - SG_VERTEXFORMAT_BYTE4N + - SG_VERTEXFORMAT_UBYTE4N + - SG_VERTEXFORMAT_SHORT2N + - SG_VERTEXFORMAT_USHORT2N + - SG_VERTEXFORMAT_SHORT4N + - SG_VERTEXFORMAT_USHORT4N + + D3D11 will not convert *non-normalized* vertex formats to floating point + vertex shader inputs, those can only be uses with the *ivecn* vertex shader + input types when D3D11 is used as backend (GL and Metal can use both formats) + + - SG_VERTEXFORMAT_BYTE4, + - SG_VERTEXFORMAT_UBYTE4 + - SG_VERTEXFORMAT_SHORT2 + - SG_VERTEXFORMAT_SHORT4 + + - WebGL/GLES2 cannot use integer vertex shader inputs (int or ivecn) + + - SG_VERTEXFORMAT_UINT10_N2 is not supported on WebGL/GLES2 + + So for a vertex input layout which works on all platforms, only use the following + vertex formats, and if needed "expand" the normalized vertex shader + inputs in the vertex shader by multiplying with 127.0, 255.0, 32767.0 or + 65535.0: + + - SG_VERTEXFORMAT_FLOAT, + - SG_VERTEXFORMAT_FLOAT2, + - SG_VERTEXFORMAT_FLOAT3, + - SG_VERTEXFORMAT_FLOAT4, + - SG_VERTEXFORMAT_BYTE4N, + - SG_VERTEXFORMAT_UBYTE4N, + - SG_VERTEXFORMAT_SHORT2N, + - SG_VERTEXFORMAT_USHORT2N + - SG_VERTEXFORMAT_SHORT4N, + - SG_VERTEXFORMAT_USHORT4N + + TODO: + ==== + - talk about asynchronous resource creation + + zlib/libpng license + + Copyright (c) 2018 Andre Weissflog + + This software is provided 'as-is', without any express or implied warranty. + In no event will the authors be held liable for any damages arising from the + use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software in a + product, an acknowledgment in the product documentation would be + appreciated but is not required. + + 2. Altered source versions must be plainly marked as such, and must not + be misrepresented as being the original software. + + 3. This notice may not be removed or altered from any source + distribution. +*/ +#define SOKOL_GFX_INCLUDED (1) +#include // size_t +#include +#include + +#if defined(SOKOL_API_DECL) && !defined(SOKOL_GFX_API_DECL) +#define SOKOL_GFX_API_DECL SOKOL_API_DECL +#endif +#ifndef SOKOL_GFX_API_DECL +#if defined(_WIN32) && defined(SOKOL_DLL) && defined(SOKOL_GFX_IMPL) +#define SOKOL_GFX_API_DECL __declspec(dllexport) +#elif defined(_WIN32) && defined(SOKOL_DLL) +#define SOKOL_GFX_API_DECL __declspec(dllimport) +#else +#define SOKOL_GFX_API_DECL extern +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* + Resource id typedefs: + + sg_buffer: vertex- and index-buffers + sg_image: textures and render targets + sg_shader: vertex- and fragment-shaders, uniform blocks + sg_pipeline: associated shader and vertex-layouts, and render states + sg_pass: a bundle of render targets and actions on them + sg_context: a 'context handle' for switching between 3D-API contexts + + Instead of pointers, resource creation functions return a 32-bit + number which uniquely identifies the resource object. + + The 32-bit resource id is split into a 16-bit pool index in the lower bits, + and a 16-bit 'unique counter' in the upper bits. The index allows fast + pool lookups, and combined with the unique-mask it allows to detect + 'dangling accesses' (trying to use an object which no longer exists, and + its pool slot has been reused for a new object) + + The resource ids are wrapped into a struct so that the compiler + can complain when the wrong resource type is used. +*/ +typedef struct sg_buffer { uint32_t id; } sg_buffer; +typedef struct sg_image { uint32_t id; } sg_image; +typedef struct sg_shader { uint32_t id; } sg_shader; +typedef struct sg_pipeline { uint32_t id; } sg_pipeline; +typedef struct sg_pass { uint32_t id; } sg_pass; +typedef struct sg_context { uint32_t id; } sg_context; + +/* + sg_range is a pointer-size-pair struct used to pass memory blobs into + sokol-gfx. When initialized from a value type (array or struct), you can + use the SG_RANGE() macro to build an sg_range struct. For functions which + take either a sg_range pointer, or a (C++) sg_range reference, use the + SG_RANGE_REF macro as a solution which compiles both in C and C++. +*/ +typedef struct sg_range { + const void* ptr; + size_t size; +} sg_range; + +// disabling this for every includer isn't great, but the warnings are also quite pointless +#if defined(_MSC_VER) +#pragma warning(disable:4221) /* /W4 only: nonstandard extension used: 'x': cannot be initialized using address of automatic variable 'y' */ +#pragma warning(disable:4204) /* VS2015: nonstandard extension used: non-constant aggregate initializer */ +#endif +#if defined(__cplusplus) +#define SG_RANGE(x) sg_range{ &x, sizeof(x) } +#define SG_RANGE_REF(x) sg_range{ &x, sizeof(x) } +#else +#define SG_RANGE(x) (sg_range){ &x, sizeof(x) } +#define SG_RANGE_REF(x) &(sg_range){ &x, sizeof(x) } +#endif + +// various compile-time constants +enum { + SG_INVALID_ID = 0, + SG_NUM_SHADER_STAGES = 2, + SG_NUM_INFLIGHT_FRAMES = 2, + SG_MAX_COLOR_ATTACHMENTS = 4, + SG_MAX_SHADERSTAGE_BUFFERS = 8, + SG_MAX_SHADERSTAGE_IMAGES = 12, + SG_MAX_SHADERSTAGE_UBS = 4, + SG_MAX_UB_MEMBERS = 16, + SG_MAX_VERTEX_ATTRIBUTES = 16, /* NOTE: actual max vertex attrs can be less on GLES2, see sg_limits! */ + SG_MAX_MIPMAPS = 16, + SG_MAX_TEXTUREARRAY_LAYERS = 128 +}; + +/* + sg_color + + An RGBA color value. +*/ +typedef struct sg_color { float r, g, b, a; } sg_color; + +/* + sg_backend + + The active 3D-API backend, use the function sg_query_backend() + to get the currently active backend. + + NOTE that SG_BACKEND_GLES2 will be returned if sokol-gfx was + compiled with SOKOL_GLES3, but the runtime platform doesn't support + GLES3/WebGL2 and sokol-gfx had to fallback to GLES2/WebGL. +*/ +typedef enum sg_backend { + SG_BACKEND_GLCORE33, + SG_BACKEND_GLES2, + SG_BACKEND_GLES3, + SG_BACKEND_D3D11, + SG_BACKEND_METAL_IOS, + SG_BACKEND_METAL_MACOS, + SG_BACKEND_METAL_SIMULATOR, + SG_BACKEND_WGPU, + SG_BACKEND_DUMMY, +} sg_backend; + +/* + sg_pixel_format + + sokol_gfx.h basically uses the same pixel formats as WebGPU, since these + are supported on most newer GPUs. GLES2 and WebGL only supports a much + smaller subset of actually available pixel formats. Call + sg_query_pixelformat() to check at runtime if a pixel format supports the + desired features. + + A pixelformat name consist of three parts: + + - components (R, RG, RGB or RGBA) + - bit width per component (8, 16 or 32) + - component data type: + - unsigned normalized (no postfix) + - signed normalized (SN postfix) + - unsigned integer (UI postfix) + - signed integer (SI postfix) + - float (F postfix) + + Not all pixel formats can be used for everything, call sg_query_pixelformat() + to inspect the capabilities of a given pixelformat. The function returns + an sg_pixelformat_info struct with the following bool members: + + - sample: the pixelformat can be sampled as texture at least with + nearest filtering + - filter: the pixelformat can be samples as texture with linear + filtering + - render: the pixelformat can be used for render targets + - blend: blending is supported when using the pixelformat for + render targets + - msaa: multisample-antialiasing is supported when using the + pixelformat for render targets + - depth: the pixelformat can be used for depth-stencil attachments + + When targeting GLES2/WebGL, the only safe formats to use + as texture are SG_PIXELFORMAT_R8 and SG_PIXELFORMAT_RGBA8. For rendering + in GLES2/WebGL, only SG_PIXELFORMAT_RGBA8 is safe. All other formats + must be checked via sg_query_pixelformats(). + + The default pixel format for texture images is SG_PIXELFORMAT_RGBA8. + + The default pixel format for render target images is platform-dependent: + - for Metal and D3D11 it is SG_PIXELFORMAT_BGRA8 + - for GL backends it is SG_PIXELFORMAT_RGBA8 + + This is mainly because of the default framebuffer which is setup outside + of sokol_gfx.h. On some backends, using BGRA for the default frame buffer + allows more efficient frame flips. For your own offscreen-render-targets, + use whatever renderable pixel format is convenient for you. +*/ +typedef enum sg_pixel_format { + _SG_PIXELFORMAT_DEFAULT, /* value 0 reserved for default-init */ + SG_PIXELFORMAT_NONE, + + SG_PIXELFORMAT_R8, + SG_PIXELFORMAT_R8SN, + SG_PIXELFORMAT_R8UI, + SG_PIXELFORMAT_R8SI, + + SG_PIXELFORMAT_R16, + SG_PIXELFORMAT_R16SN, + SG_PIXELFORMAT_R16UI, + SG_PIXELFORMAT_R16SI, + SG_PIXELFORMAT_R16F, + SG_PIXELFORMAT_RG8, + SG_PIXELFORMAT_RG8SN, + SG_PIXELFORMAT_RG8UI, + SG_PIXELFORMAT_RG8SI, + + SG_PIXELFORMAT_R32UI, + SG_PIXELFORMAT_R32SI, + SG_PIXELFORMAT_R32F, + SG_PIXELFORMAT_RG16, + SG_PIXELFORMAT_RG16SN, + SG_PIXELFORMAT_RG16UI, + SG_PIXELFORMAT_RG16SI, + SG_PIXELFORMAT_RG16F, + SG_PIXELFORMAT_RGBA8, + SG_PIXELFORMAT_RGBA8SN, + SG_PIXELFORMAT_RGBA8UI, + SG_PIXELFORMAT_RGBA8SI, + SG_PIXELFORMAT_BGRA8, + SG_PIXELFORMAT_RGB10A2, + SG_PIXELFORMAT_RG11B10F, + + SG_PIXELFORMAT_RG32UI, + SG_PIXELFORMAT_RG32SI, + SG_PIXELFORMAT_RG32F, + SG_PIXELFORMAT_RGBA16, + SG_PIXELFORMAT_RGBA16SN, + SG_PIXELFORMAT_RGBA16UI, + SG_PIXELFORMAT_RGBA16SI, + SG_PIXELFORMAT_RGBA16F, + + SG_PIXELFORMAT_RGBA32UI, + SG_PIXELFORMAT_RGBA32SI, + SG_PIXELFORMAT_RGBA32F, + + SG_PIXELFORMAT_DEPTH, + SG_PIXELFORMAT_DEPTH_STENCIL, + + SG_PIXELFORMAT_BC1_RGBA, + SG_PIXELFORMAT_BC2_RGBA, + SG_PIXELFORMAT_BC3_RGBA, + SG_PIXELFORMAT_BC4_R, + SG_PIXELFORMAT_BC4_RSN, + SG_PIXELFORMAT_BC5_RG, + SG_PIXELFORMAT_BC5_RGSN, + SG_PIXELFORMAT_BC6H_RGBF, + SG_PIXELFORMAT_BC6H_RGBUF, + SG_PIXELFORMAT_BC7_RGBA, + SG_PIXELFORMAT_PVRTC_RGB_2BPP, + SG_PIXELFORMAT_PVRTC_RGB_4BPP, + SG_PIXELFORMAT_PVRTC_RGBA_2BPP, + SG_PIXELFORMAT_PVRTC_RGBA_4BPP, + SG_PIXELFORMAT_ETC2_RGB8, + SG_PIXELFORMAT_ETC2_RGB8A1, + SG_PIXELFORMAT_ETC2_RGBA8, + SG_PIXELFORMAT_ETC2_RG11, + SG_PIXELFORMAT_ETC2_RG11SN, + + _SG_PIXELFORMAT_NUM, + _SG_PIXELFORMAT_FORCE_U32 = 0x7FFFFFFF +} sg_pixel_format; + +/* + Runtime information about a pixel format, returned + by sg_query_pixelformat(). +*/ +typedef struct sg_pixelformat_info { + bool sample; // pixel format can be sampled in shaders + bool filter; // pixel format can be sampled with filtering + bool render; // pixel format can be used as render target + bool blend; // alpha-blending is supported + bool msaa; // pixel format can be used as MSAA render target + bool depth; // pixel format is a depth format + #if defined(SOKOL_ZIG_BINDINGS) + uint32_t __pad[3]; + #endif +} sg_pixelformat_info; + +/* + Runtime information about available optional features, + returned by sg_query_features() +*/ +typedef struct sg_features { + bool instancing; // hardware instancing supported + bool origin_top_left; // framebuffer and texture origin is in top left corner + bool multiple_render_targets; // offscreen render passes can have multiple render targets attached + bool msaa_render_targets; // offscreen render passes support MSAA antialiasing + bool imagetype_3d; // creation of SG_IMAGETYPE_3D images is supported + bool imagetype_array; // creation of SG_IMAGETYPE_ARRAY images is supported + bool image_clamp_to_border; // border color and clamp-to-border UV-wrap mode is supported + bool mrt_independent_blend_state; // multiple-render-target rendering can use per-render-target blend state + bool mrt_independent_write_mask; // multiple-render-target rendering can use per-render-target color write masks + #if defined(SOKOL_ZIG_BINDINGS) + uint32_t __pad[3]; + #endif +} sg_features; + +/* + Runtime information about resource limits, returned by sg_query_limit() +*/ +typedef struct sg_limits { + int max_image_size_2d; // max width/height of SG_IMAGETYPE_2D images + int max_image_size_cube; // max width/height of SG_IMAGETYPE_CUBE images + int max_image_size_3d; // max width/height/depth of SG_IMAGETYPE_3D images + int max_image_size_array; // max width/height of SG_IMAGETYPE_ARRAY images + int max_image_array_layers; // max number of layers in SG_IMAGETYPE_ARRAY images + int max_vertex_attrs; // <= SG_MAX_VERTEX_ATTRIBUTES (only on some GLES2 impls) +} sg_limits; + +/* + sg_resource_state + + The current state of a resource in its resource pool. + Resources start in the INITIAL state, which means the + pool slot is unoccupied and can be allocated. When a resource is + created, first an id is allocated, and the resource pool slot + is set to state ALLOC. After allocation, the resource is + initialized, which may result in the VALID or FAILED state. The + reason why allocation and initialization are separate is because + some resource types (e.g. buffers and images) might be asynchronously + initialized by the user application. If a resource which is not + in the VALID state is attempted to be used for rendering, rendering + operations will silently be dropped. + + The special INVALID state is returned in sg_query_xxx_state() if no + resource object exists for the provided resource id. +*/ +typedef enum sg_resource_state { + SG_RESOURCESTATE_INITIAL, + SG_RESOURCESTATE_ALLOC, + SG_RESOURCESTATE_VALID, + SG_RESOURCESTATE_FAILED, + SG_RESOURCESTATE_INVALID, + _SG_RESOURCESTATE_FORCE_U32 = 0x7FFFFFFF +} sg_resource_state; + +/* + sg_usage + + A resource usage hint describing the update strategy of + buffers and images. This is used in the sg_buffer_desc.usage + and sg_image_desc.usage members when creating buffers + and images: + + SG_USAGE_IMMUTABLE: the resource will never be updated with + new data, instead the content of the + resource must be provided on creation + SG_USAGE_DYNAMIC: the resource will be updated infrequently + with new data (this could range from "once + after creation", to "quite often but not + every frame") + SG_USAGE_STREAM: the resource will be updated each frame + with new content + + The rendering backends use this hint to prevent that the + CPU needs to wait for the GPU when attempting to update + a resource that might be currently accessed by the GPU. + + Resource content is updated with the functions sg_update_buffer() or + sg_append_buffer() for buffer objects, and sg_update_image() for image + objects. For the sg_update_*() functions, only one update is allowed per + frame and resource object, while sg_append_buffer() can be called + multiple times per frame on the same buffer. The application must update + all data required for rendering (this means that the update data can be + smaller than the resource size, if only a part of the overall resource + size is used for rendering, you only need to make sure that the data that + *is* used is valid). + + The default usage is SG_USAGE_IMMUTABLE. +*/ +typedef enum sg_usage { + _SG_USAGE_DEFAULT, /* value 0 reserved for default-init */ + SG_USAGE_IMMUTABLE, + SG_USAGE_DYNAMIC, + SG_USAGE_STREAM, + _SG_USAGE_NUM, + _SG_USAGE_FORCE_U32 = 0x7FFFFFFF +} sg_usage; + +/* + sg_buffer_type + + This indicates whether a buffer contains vertex- or index-data, + used in the sg_buffer_desc.type member when creating a buffer. + + The default value is SG_BUFFERTYPE_VERTEXBUFFER. +*/ +typedef enum sg_buffer_type { + _SG_BUFFERTYPE_DEFAULT, /* value 0 reserved for default-init */ + SG_BUFFERTYPE_VERTEXBUFFER, + SG_BUFFERTYPE_INDEXBUFFER, + _SG_BUFFERTYPE_NUM, + _SG_BUFFERTYPE_FORCE_U32 = 0x7FFFFFFF +} sg_buffer_type; + +/* + sg_index_type + + Indicates whether indexed rendering (fetching vertex-indices from an + index buffer) is used, and if yes, the index data type (16- or 32-bits). + This is used in the sg_pipeline_desc.index_type member when creating a + pipeline object. + + The default index type is SG_INDEXTYPE_NONE. +*/ +typedef enum sg_index_type { + _SG_INDEXTYPE_DEFAULT, /* value 0 reserved for default-init */ + SG_INDEXTYPE_NONE, + SG_INDEXTYPE_UINT16, + SG_INDEXTYPE_UINT32, + _SG_INDEXTYPE_NUM, + _SG_INDEXTYPE_FORCE_U32 = 0x7FFFFFFF +} sg_index_type; + +/* + sg_image_type + + Indicates the basic type of an image object (2D-texture, cubemap, + 3D-texture or 2D-array-texture). 3D- and array-textures are not supported + on the GLES2/WebGL backend (use sg_query_features().imagetype_3d and + sg_query_features().imagetype_array to check for support). The image type + is used in the sg_image_desc.type member when creating an image, and + in sg_shader_image_desc when describing a shader's texture sampler binding. + + The default image type when creating an image is SG_IMAGETYPE_2D. +*/ +typedef enum sg_image_type { + _SG_IMAGETYPE_DEFAULT, /* value 0 reserved for default-init */ + SG_IMAGETYPE_2D, + SG_IMAGETYPE_CUBE, + SG_IMAGETYPE_3D, + SG_IMAGETYPE_ARRAY, + _SG_IMAGETYPE_NUM, + _SG_IMAGETYPE_FORCE_U32 = 0x7FFFFFFF +} sg_image_type; + +/* + sg_sampler_type + + Indicates the basic data type of a shader's texture sampler which + can be float , unsigned integer or signed integer. The sampler + type is used in the sg_shader_image_desc to describe the + sampler type of a shader's texture sampler binding. + + The default sampler type is SG_SAMPLERTYPE_FLOAT. +*/ +typedef enum sg_sampler_type { + _SG_SAMPLERTYPE_DEFAULT, /* value 0 reserved for default-init */ + SG_SAMPLERTYPE_FLOAT, + SG_SAMPLERTYPE_SINT, + SG_SAMPLERTYPE_UINT, +} sg_sampler_type; + +/* + sg_cube_face + + The cubemap faces. Use these as indices in the sg_image_desc.content + array. +*/ +typedef enum sg_cube_face { + SG_CUBEFACE_POS_X, + SG_CUBEFACE_NEG_X, + SG_CUBEFACE_POS_Y, + SG_CUBEFACE_NEG_Y, + SG_CUBEFACE_POS_Z, + SG_CUBEFACE_NEG_Z, + SG_CUBEFACE_NUM, + _SG_CUBEFACE_FORCE_U32 = 0x7FFFFFFF +} sg_cube_face; + +/* + sg_shader_stage + + There are 2 shader stages: vertex- and fragment-shader-stage. + Each shader stage consists of: + + - one slot for a shader function (provided as source- or byte-code) + - SG_MAX_SHADERSTAGE_UBS slots for uniform blocks + - SG_MAX_SHADERSTAGE_IMAGES slots for images used as textures by + the shader function +*/ +typedef enum sg_shader_stage { + SG_SHADERSTAGE_VS, + SG_SHADERSTAGE_FS, + _SG_SHADERSTAGE_FORCE_U32 = 0x7FFFFFFF +} sg_shader_stage; + +/* + sg_primitive_type + + This is the common subset of 3D primitive types supported across all 3D + APIs. This is used in the sg_pipeline_desc.primitive_type member when + creating a pipeline object. + + The default primitive type is SG_PRIMITIVETYPE_TRIANGLES. +*/ +typedef enum sg_primitive_type { + _SG_PRIMITIVETYPE_DEFAULT, /* value 0 reserved for default-init */ + SG_PRIMITIVETYPE_POINTS, + SG_PRIMITIVETYPE_LINES, + SG_PRIMITIVETYPE_LINE_STRIP, + SG_PRIMITIVETYPE_TRIANGLES, + SG_PRIMITIVETYPE_TRIANGLE_STRIP, + _SG_PRIMITIVETYPE_NUM, + _SG_PRIMITIVETYPE_FORCE_U32 = 0x7FFFFFFF +} sg_primitive_type; + +/* + sg_filter + + The filtering mode when sampling a texture image. This is + used in the sg_image_desc.min_filter and sg_image_desc.mag_filter + members when creating an image object. + + The default filter mode is SG_FILTER_NEAREST. +*/ +typedef enum sg_filter { + _SG_FILTER_DEFAULT, /* value 0 reserved for default-init */ + SG_FILTER_NEAREST, + SG_FILTER_LINEAR, + SG_FILTER_NEAREST_MIPMAP_NEAREST, + SG_FILTER_NEAREST_MIPMAP_LINEAR, + SG_FILTER_LINEAR_MIPMAP_NEAREST, + SG_FILTER_LINEAR_MIPMAP_LINEAR, + _SG_FILTER_NUM, + _SG_FILTER_FORCE_U32 = 0x7FFFFFFF +} sg_filter; + +/* + sg_wrap + + The texture coordinates wrapping mode when sampling a texture + image. This is used in the sg_image_desc.wrap_u, .wrap_v + and .wrap_w members when creating an image. + + The default wrap mode is SG_WRAP_REPEAT. + + NOTE: SG_WRAP_CLAMP_TO_BORDER is not supported on all backends + and platforms. To check for support, call sg_query_features() + and check the "clamp_to_border" boolean in the returned + sg_features struct. + + Platforms which don't support SG_WRAP_CLAMP_TO_BORDER will silently fall back + to SG_WRAP_CLAMP_TO_EDGE without a validation error. + + Platforms which support clamp-to-border are: + + - all desktop GL platforms + - Metal on macOS + - D3D11 + + Platforms which do not support clamp-to-border: + + - GLES2/3 and WebGL/WebGL2 + - Metal on iOS +*/ +typedef enum sg_wrap { + _SG_WRAP_DEFAULT, /* value 0 reserved for default-init */ + SG_WRAP_REPEAT, + SG_WRAP_CLAMP_TO_EDGE, + SG_WRAP_CLAMP_TO_BORDER, + SG_WRAP_MIRRORED_REPEAT, + _SG_WRAP_NUM, + _SG_WRAP_FORCE_U32 = 0x7FFFFFFF +} sg_wrap; + +/* + sg_border_color + + The border color to use when sampling a texture, and the UV wrap + mode is SG_WRAP_CLAMP_TO_BORDER. + + The default border color is SG_BORDERCOLOR_OPAQUE_BLACK +*/ +typedef enum sg_border_color { + _SG_BORDERCOLOR_DEFAULT, /* value 0 reserved for default-init */ + SG_BORDERCOLOR_TRANSPARENT_BLACK, + SG_BORDERCOLOR_OPAQUE_BLACK, + SG_BORDERCOLOR_OPAQUE_WHITE, + _SG_BORDERCOLOR_NUM, + _SG_BORDERCOLOR_FORCE_U32 = 0x7FFFFFFF +} sg_border_color; + +/* + sg_vertex_format + + The data type of a vertex component. This is used to describe + the layout of vertex data when creating a pipeline object. +*/ +typedef enum sg_vertex_format { + SG_VERTEXFORMAT_INVALID, + SG_VERTEXFORMAT_FLOAT, + SG_VERTEXFORMAT_FLOAT2, + SG_VERTEXFORMAT_FLOAT3, + SG_VERTEXFORMAT_FLOAT4, + SG_VERTEXFORMAT_BYTE4, + SG_VERTEXFORMAT_BYTE4N, + SG_VERTEXFORMAT_UBYTE4, + SG_VERTEXFORMAT_UBYTE4N, + SG_VERTEXFORMAT_SHORT2, + SG_VERTEXFORMAT_SHORT2N, + SG_VERTEXFORMAT_USHORT2N, + SG_VERTEXFORMAT_SHORT4, + SG_VERTEXFORMAT_SHORT4N, + SG_VERTEXFORMAT_USHORT4N, + SG_VERTEXFORMAT_UINT10_N2, + _SG_VERTEXFORMAT_NUM, + _SG_VERTEXFORMAT_FORCE_U32 = 0x7FFFFFFF +} sg_vertex_format; + +/* + sg_vertex_step + + Defines whether the input pointer of a vertex input stream is advanced + 'per vertex' or 'per instance'. The default step-func is + SG_VERTEXSTEP_PER_VERTEX. SG_VERTEXSTEP_PER_INSTANCE is used with + instanced-rendering. + + The vertex-step is part of the vertex-layout definition + when creating pipeline objects. +*/ +typedef enum sg_vertex_step { + _SG_VERTEXSTEP_DEFAULT, /* value 0 reserved for default-init */ + SG_VERTEXSTEP_PER_VERTEX, + SG_VERTEXSTEP_PER_INSTANCE, + _SG_VERTEXSTEP_NUM, + _SG_VERTEXSTEP_FORCE_U32 = 0x7FFFFFFF +} sg_vertex_step; + +/* + sg_uniform_type + + The data type of a uniform block member. This is used to + describe the internal layout of uniform blocks when creating + a shader object. +*/ +typedef enum sg_uniform_type { + SG_UNIFORMTYPE_INVALID, + SG_UNIFORMTYPE_FLOAT, + SG_UNIFORMTYPE_FLOAT2, + SG_UNIFORMTYPE_FLOAT3, + SG_UNIFORMTYPE_FLOAT4, + SG_UNIFORMTYPE_MAT4, + _SG_UNIFORMTYPE_NUM, + _SG_UNIFORMTYPE_FORCE_U32 = 0x7FFFFFFF +} sg_uniform_type; + +/* + sg_cull_mode + + The face-culling mode, this is used in the + sg_pipeline_desc.cull_mode member when creating a + pipeline object. + + The default cull mode is SG_CULLMODE_NONE +*/ +typedef enum sg_cull_mode { + _SG_CULLMODE_DEFAULT, /* value 0 reserved for default-init */ + SG_CULLMODE_NONE, + SG_CULLMODE_FRONT, + SG_CULLMODE_BACK, + _SG_CULLMODE_NUM, + _SG_CULLMODE_FORCE_U32 = 0x7FFFFFFF +} sg_cull_mode; + +/* + sg_face_winding + + The vertex-winding rule that determines a front-facing primitive. This + is used in the member sg_pipeline_desc.face_winding + when creating a pipeline object. + + The default winding is SG_FACEWINDING_CW (clockwise) +*/ +typedef enum sg_face_winding { + _SG_FACEWINDING_DEFAULT, /* value 0 reserved for default-init */ + SG_FACEWINDING_CCW, + SG_FACEWINDING_CW, + _SG_FACEWINDING_NUM, + _SG_FACEWINDING_FORCE_U32 = 0x7FFFFFFF +} sg_face_winding; + +/* + sg_compare_func + + The compare-function for depth- and stencil-ref tests. + This is used when creating pipeline objects in the members: + + sg_pipeline_desc + .depth + .compare + .stencil + .front.compare + .back.compar + + The default compare func for depth- and stencil-tests is + SG_COMPAREFUNC_ALWAYS. +*/ +typedef enum sg_compare_func { + _SG_COMPAREFUNC_DEFAULT, /* value 0 reserved for default-init */ + SG_COMPAREFUNC_NEVER, + SG_COMPAREFUNC_LESS, + SG_COMPAREFUNC_EQUAL, + SG_COMPAREFUNC_LESS_EQUAL, + SG_COMPAREFUNC_GREATER, + SG_COMPAREFUNC_NOT_EQUAL, + SG_COMPAREFUNC_GREATER_EQUAL, + SG_COMPAREFUNC_ALWAYS, + _SG_COMPAREFUNC_NUM, + _SG_COMPAREFUNC_FORCE_U32 = 0x7FFFFFFF +} sg_compare_func; + +/* + sg_stencil_op + + The operation performed on a currently stored stencil-value when a + comparison test passes or fails. This is used when creating a pipeline + object in the members: + + sg_pipeline_desc + .stencil + .front + .fail_op + .depth_fail_op + .pass_op + .back + .fail_op + .depth_fail_op + .pass_op + + The default value is SG_STENCILOP_KEEP. +*/ +typedef enum sg_stencil_op { + _SG_STENCILOP_DEFAULT, /* value 0 reserved for default-init */ + SG_STENCILOP_KEEP, + SG_STENCILOP_ZERO, + SG_STENCILOP_REPLACE, + SG_STENCILOP_INCR_CLAMP, + SG_STENCILOP_DECR_CLAMP, + SG_STENCILOP_INVERT, + SG_STENCILOP_INCR_WRAP, + SG_STENCILOP_DECR_WRAP, + _SG_STENCILOP_NUM, + _SG_STENCILOP_FORCE_U32 = 0x7FFFFFFF +} sg_stencil_op; + +/* + sg_blend_factor + + The source and destination factors in blending operations. + This is used in the following members when creating a pipeline object: + + sg_pipeline_desc + .colors[i] + .blend + .src_factor_rgb + .dst_factor_rgb + .src_factor_alpha + .dst_factor_alpha + + The default value is SG_BLENDFACTOR_ONE for source + factors, and SG_BLENDFACTOR_ZERO for destination factors. +*/ +typedef enum sg_blend_factor { + _SG_BLENDFACTOR_DEFAULT, /* value 0 reserved for default-init */ + SG_BLENDFACTOR_ZERO, + SG_BLENDFACTOR_ONE, + SG_BLENDFACTOR_SRC_COLOR, + SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR, + SG_BLENDFACTOR_SRC_ALPHA, + SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA, + SG_BLENDFACTOR_DST_COLOR, + SG_BLENDFACTOR_ONE_MINUS_DST_COLOR, + SG_BLENDFACTOR_DST_ALPHA, + SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA, + SG_BLENDFACTOR_SRC_ALPHA_SATURATED, + SG_BLENDFACTOR_BLEND_COLOR, + SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR, + SG_BLENDFACTOR_BLEND_ALPHA, + SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA, + _SG_BLENDFACTOR_NUM, + _SG_BLENDFACTOR_FORCE_U32 = 0x7FFFFFFF +} sg_blend_factor; + +/* + sg_blend_op + + Describes how the source and destination values are combined in the + fragment blending operation. It is used in the following members when + creating a pipeline object: + + sg_pipeline_desc + .colors[i] + .blend + .op_rgb + .op_alpha + + The default value is SG_BLENDOP_ADD. +*/ +typedef enum sg_blend_op { + _SG_BLENDOP_DEFAULT, /* value 0 reserved for default-init */ + SG_BLENDOP_ADD, + SG_BLENDOP_SUBTRACT, + SG_BLENDOP_REVERSE_SUBTRACT, + _SG_BLENDOP_NUM, + _SG_BLENDOP_FORCE_U32 = 0x7FFFFFFF +} sg_blend_op; + +/* + sg_color_mask + + Selects the active color channels when writing a fragment color to the + framebuffer. This is used in the members + sg_pipeline_desc.colors[i].write_mask when creating a pipeline object. + + The default colormask is SG_COLORMASK_RGBA (write all colors channels) + + NOTE: since the color mask value 0 is reserved for the default value + (SG_COLORMASK_RGBA), use SG_COLORMASK_NONE if all color channels + should be disabled. +*/ +typedef enum sg_color_mask { + _SG_COLORMASK_DEFAULT = 0, /* value 0 reserved for default-init */ + SG_COLORMASK_NONE = 0x10, /* special value for 'all channels disabled */ + SG_COLORMASK_R = 0x1, + SG_COLORMASK_G = 0x2, + SG_COLORMASK_RG = 0x3, + SG_COLORMASK_B = 0x4, + SG_COLORMASK_RB = 0x5, + SG_COLORMASK_GB = 0x6, + SG_COLORMASK_RGB = 0x7, + SG_COLORMASK_A = 0x8, + SG_COLORMASK_RA = 0x9, + SG_COLORMASK_GA = 0xA, + SG_COLORMASK_RGA = 0xB, + SG_COLORMASK_BA = 0xC, + SG_COLORMASK_RBA = 0xD, + SG_COLORMASK_GBA = 0xE, + SG_COLORMASK_RGBA = 0xF, + _SG_COLORMASK_FORCE_U32 = 0x7FFFFFFF +} sg_color_mask; + +/* + sg_action + + Defines what action should be performed at the start of a render pass: + + SG_ACTION_CLEAR: clear the render target image + SG_ACTION_LOAD: load the previous content of the render target image + SG_ACTION_DONTCARE: leave the render target image content undefined + + This is used in the sg_pass_action structure. + + The default action for all pass attachments is SG_ACTION_CLEAR, with the + clear color rgba = {0.5f, 0.5f, 0.5f, 1.0f], depth=1.0 and stencil=0. + + If you want to override the default behaviour, it is important to not + only set the clear color, but the 'action' field as well (as long as this + is in its _SG_ACTION_DEFAULT, the value fields will be ignored). +*/ +typedef enum sg_action { + _SG_ACTION_DEFAULT, + SG_ACTION_CLEAR, + SG_ACTION_LOAD, + SG_ACTION_DONTCARE, + _SG_ACTION_NUM, + _SG_ACTION_FORCE_U32 = 0x7FFFFFFF +} sg_action; + +/* + sg_pass_action + + The sg_pass_action struct defines the actions to be performed + at the start of a rendering pass in the functions sg_begin_pass() + and sg_begin_default_pass(). + + A separate action and clear values can be defined for each + color attachment, and for the depth-stencil attachment. + + The default clear values are defined by the macros: + + - SG_DEFAULT_CLEAR_RED: 0.5f + - SG_DEFAULT_CLEAR_GREEN: 0.5f + - SG_DEFAULT_CLEAR_BLUE: 0.5f + - SG_DEFAULT_CLEAR_ALPHA: 1.0f + - SG_DEFAULT_CLEAR_DEPTH: 1.0f + - SG_DEFAULT_CLEAR_STENCIL: 0 +*/ +typedef struct sg_color_attachment_action { + sg_action action; + sg_color value; +} sg_color_attachment_action; + +typedef struct sg_depth_attachment_action { + sg_action action; + float value; +} sg_depth_attachment_action; + +typedef struct sg_stencil_attachment_action { + sg_action action; + uint8_t value; +} sg_stencil_attachment_action; + +typedef struct sg_pass_action { + uint32_t _start_canary; + sg_color_attachment_action colors[SG_MAX_COLOR_ATTACHMENTS]; + sg_depth_attachment_action depth; + sg_stencil_attachment_action stencil; + uint32_t _end_canary; +} sg_pass_action; + +/* + sg_bindings + + The sg_bindings structure defines the resource binding slots + of the sokol_gfx render pipeline, used as argument to the + sg_apply_bindings() function. + + A resource binding struct contains: + + - 1..N vertex buffers + - 0..N vertex buffer offsets + - 0..1 index buffers + - 0..1 index buffer offsets + - 0..N vertex shader stage images + - 0..N fragment shader stage images + + The max number of vertex buffer and shader stage images + are defined by the SG_MAX_SHADERSTAGE_BUFFERS and + SG_MAX_SHADERSTAGE_IMAGES configuration constants. + + The optional buffer offsets can be used to put different unrelated + chunks of vertex- and/or index-data into the same buffer objects. +*/ +typedef struct sg_bindings { + uint32_t _start_canary; + sg_buffer vertex_buffers[SG_MAX_SHADERSTAGE_BUFFERS]; + int vertex_buffer_offsets[SG_MAX_SHADERSTAGE_BUFFERS]; + sg_buffer index_buffer; + int index_buffer_offset; + sg_image vs_images[SG_MAX_SHADERSTAGE_IMAGES]; + sg_image fs_images[SG_MAX_SHADERSTAGE_IMAGES]; + uint32_t _end_canary; +} sg_bindings; + +/* + sg_buffer_desc + + Creation parameters for sg_buffer objects, used in the + sg_make_buffer() call. + + The default configuration is: + + .size: 0 (*must* be >0 for buffers without data) + .type: SG_BUFFERTYPE_VERTEXBUFFER + .usage: SG_USAGE_IMMUTABLE + .data.ptr 0 (*must* be valid for immutable buffers) + .data.size 0 (*must* be > 0 for immutable buffers) + .label 0 (optional string label for trace hooks) + + The label will be ignored by sokol_gfx.h, it is only useful + when hooking into sg_make_buffer() or sg_init_buffer() via + the sg_install_trace_hooks() function. + + For immutable buffers which are initialized with initial data, + keep the .size item zero-initialized, and set the size together with the + pointer to the initial data in the .data item. + + For mutable buffers without initial data, keep the .data item + zero-initialized, and set the buffer size in the .size item instead. + + You can also set both size values, but currently both size values must + be identical (this may change in the future when the dynamic resource + management may become more flexible). + + ADVANCED TOPIC: Injecting native 3D-API buffers: + + The following struct members allow to inject your own GL, Metal + or D3D11 buffers into sokol_gfx: + + .gl_buffers[SG_NUM_INFLIGHT_FRAMES] + .mtl_buffers[SG_NUM_INFLIGHT_FRAMES] + .d3d11_buffer + + You must still provide all other struct items except the .data item, and + these must match the creation parameters of the native buffers you + provide. For SG_USAGE_IMMUTABLE, only provide a single native 3D-API + buffer, otherwise you need to provide SG_NUM_INFLIGHT_FRAMES buffers + (only for GL and Metal, not D3D11). Providing multiple buffers for GL and + Metal is necessary because sokol_gfx will rotate through them when + calling sg_update_buffer() to prevent lock-stalls. + + Note that it is expected that immutable injected buffer have already been + initialized with content, and the .content member must be 0! + + Also you need to call sg_reset_state_cache() after calling native 3D-API + functions, and before calling any sokol_gfx function. +*/ +typedef struct sg_buffer_desc { + uint32_t _start_canary; + size_t size; + sg_buffer_type type; + sg_usage usage; + sg_range data; + const char* label; + /* GL specific */ + uint32_t gl_buffers[SG_NUM_INFLIGHT_FRAMES]; + /* Metal specific */ + const void* mtl_buffers[SG_NUM_INFLIGHT_FRAMES]; + /* D3D11 specific */ + const void* d3d11_buffer; + /* WebGPU specific */ + const void* wgpu_buffer; + uint32_t _end_canary; +} sg_buffer_desc; + +/* + sg_image_data + + Defines the content of an image through a 2D array of sg_range structs. + The first array dimension is the cubemap face, and the second array + dimension the mipmap level. +*/ +typedef struct sg_image_data { + sg_range subimage[SG_CUBEFACE_NUM][SG_MAX_MIPMAPS]; +} sg_image_data; + +/* + sg_image_desc + + Creation parameters for sg_image objects, used in the sg_make_image() + call. + + The default configuration is: + + .type: SG_IMAGETYPE_2D + .render_target: false + .width 0 (must be set to >0) + .height 0 (must be set to >0) + .num_slices 1 (3D textures: depth; array textures: number of layers) + .num_mipmaps: 1 + .usage: SG_USAGE_IMMUTABLE + .pixel_format: SG_PIXELFORMAT_RGBA8 for textures, or sg_desc.context.color_format for render targets + .sample_count: 1 for textures, or sg_desc.context.sample_count for render targets + .min_filter: SG_FILTER_NEAREST + .mag_filter: SG_FILTER_NEAREST + .wrap_u: SG_WRAP_REPEAT + .wrap_v: SG_WRAP_REPEAT + .wrap_w: SG_WRAP_REPEAT (only SG_IMAGETYPE_3D) + .border_color SG_BORDERCOLOR_OPAQUE_BLACK + .max_anisotropy 1 (must be 1..16) + .min_lod 0.0f + .max_lod FLT_MAX + .data an sg_image_data struct to define the initial content + .label 0 (optional string label for trace hooks) + + Q: Why is the default sample_count for render targets identical with the + "default sample count" from sg_desc.context.sample_count? + + A: So that it matches the default sample count in pipeline objects. Even + though it is a bit strange/confusing that offscreen render targets by default + get the same sample count as the default framebuffer, but it's better that + an offscreen render target created with default parameters matches + a pipeline object created with default parameters. + + NOTE: + + SG_IMAGETYPE_ARRAY and SG_IMAGETYPE_3D are not supported on WebGL/GLES2, + use sg_query_features().imagetype_array and + sg_query_features().imagetype_3d at runtime to check if array- and + 3D-textures are supported. + + Images with usage SG_USAGE_IMMUTABLE must be fully initialized by + providing a valid .data member which points to initialization data. + + ADVANCED TOPIC: Injecting native 3D-API textures: + + The following struct members allow to inject your own GL, Metal or D3D11 + textures into sokol_gfx: + + .gl_textures[SG_NUM_INFLIGHT_FRAMES] + .mtl_textures[SG_NUM_INFLIGHT_FRAMES] + .d3d11_texture + .d3d11_shader_resource_view + + For GL, you can also specify the texture target or leave it empty to use + the default texture target for the image type (GL_TEXTURE_2D for + SG_IMAGETYPE_2D etc) + + For D3D11, you can provide either a D3D11 texture, or a + shader-resource-view, or both. If only a texture is provided, a matching + shader-resource-view will be created. If only a shader-resource-view is + provided, the texture will be looked up from the shader-resource-view. + + The same rules apply as for injecting native buffers (see sg_buffer_desc + documentation for more details). +*/ +typedef struct sg_image_desc { + uint32_t _start_canary; + sg_image_type type; + bool render_target; + int width; + int height; + int num_slices; + int num_mipmaps; + sg_usage usage; + sg_pixel_format pixel_format; + int sample_count; + sg_filter min_filter; + sg_filter mag_filter; + sg_wrap wrap_u; + sg_wrap wrap_v; + sg_wrap wrap_w; + sg_border_color border_color; + uint32_t max_anisotropy; + float min_lod; + float max_lod; + sg_image_data data; + const char* label; + /* GL specific */ + uint32_t gl_textures[SG_NUM_INFLIGHT_FRAMES]; + uint32_t gl_texture_target; + /* Metal specific */ + const void* mtl_textures[SG_NUM_INFLIGHT_FRAMES]; + /* D3D11 specific */ + const void* d3d11_texture; + const void* d3d11_shader_resource_view; + /* WebGPU specific */ + const void* wgpu_texture; + uint32_t _end_canary; +} sg_image_desc; + +/* + sg_shader_desc + + The structure sg_shader_desc defines all creation parameters for shader + programs, used as input to the sg_make_shader() function: + + - reflection information for vertex attributes (vertex shader inputs): + - vertex attribute name (required for GLES2, optional for GLES3 and GL) + - a semantic name and index (required for D3D11) + - for each shader-stage (vertex and fragment): + - the shader source or bytecode + - an optional entry function name + - an optional compile target (only for D3D11 when source is provided, + defaults are "vs_4_0" and "ps_4_0") + - reflection info for each uniform block used by the shader stage: + - the size of the uniform block in bytes + - reflection info for each uniform block member (only required for GL backends): + - member name + - member type (SG_UNIFORMTYPE_xxx) + - if the member is an array, the number of array items + - reflection info for the texture images used by the shader stage: + - the image type (SG_IMAGETYPE_xxx) + - the sampler type (SG_SAMPLERTYPE_xxx, default is SG_SAMPLERTYPE_FLOAT) + - the name of the texture sampler (required for GLES2, optional everywhere else) + + For all GL backends, shader source-code must be provided. For D3D11 and Metal, + either shader source-code or byte-code can be provided. + + For D3D11, if source code is provided, the d3dcompiler_47.dll will be loaded + on demand. If this fails, shader creation will fail. When compiling HLSL + source code, you can provide an optional target string via + sg_shader_stage_desc.d3d11_target, the default target is "vs_4_0" for the + vertex shader stage and "ps_4_0" for the pixel shader stage. +*/ +typedef struct sg_shader_attr_desc { + const char* name; // GLSL vertex attribute name (only strictly required for GLES2) + const char* sem_name; // HLSL semantic name + int sem_index; // HLSL semantic index +} sg_shader_attr_desc; + +typedef struct sg_shader_uniform_desc { + const char* name; + sg_uniform_type type; + int array_count; +} sg_shader_uniform_desc; + +typedef struct sg_shader_uniform_block_desc { + size_t size; + sg_shader_uniform_desc uniforms[SG_MAX_UB_MEMBERS]; +} sg_shader_uniform_block_desc; + +typedef struct sg_shader_image_desc { + const char* name; + sg_image_type image_type; + sg_sampler_type sampler_type; +} sg_shader_image_desc; + +typedef struct sg_shader_stage_desc { + const char* source; + sg_range bytecode; + const char* entry; + const char* d3d11_target; + sg_shader_uniform_block_desc uniform_blocks[SG_MAX_SHADERSTAGE_UBS]; + sg_shader_image_desc images[SG_MAX_SHADERSTAGE_IMAGES]; +} sg_shader_stage_desc; + +typedef struct sg_shader_desc { + uint32_t _start_canary; + sg_shader_attr_desc attrs[SG_MAX_VERTEX_ATTRIBUTES]; + sg_shader_stage_desc vs; + sg_shader_stage_desc fs; + const char* label; + uint32_t _end_canary; +} sg_shader_desc; + +/* + sg_pipeline_desc + + The sg_pipeline_desc struct defines all creation parameters for an + sg_pipeline object, used as argument to the sg_make_pipeline() function: + + - the vertex layout for all input vertex buffers + - a shader object + - the 3D primitive type (points, lines, triangles, ...) + - the index type (none, 16- or 32-bit) + - all the fixed-function-pipeline state (depth-, stencil-, blend-state, etc...) + + If the vertex data has no gaps between vertex components, you can omit + the .layout.buffers[].stride and layout.attrs[].offset items (leave them + default-initialized to 0), sokol-gfx will then compute the offsets and + strides from the vertex component formats (.layout.attrs[].format). + Please note that ALL vertex attribute offsets must be 0 in order for the + automatic offset computation to kick in. + + The default configuration is as follows: + + .shader: 0 (must be initialized with a valid sg_shader id!) + .layout: + .buffers[]: vertex buffer layouts + .stride: 0 (if no stride is given it will be computed) + .step_func SG_VERTEXSTEP_PER_VERTEX + .step_rate 1 + .attrs[]: vertex attribute declarations + .buffer_index 0 the vertex buffer bind slot + .offset 0 (offsets can be omitted if the vertex layout has no gaps) + .format SG_VERTEXFORMAT_INVALID (must be initialized!) + .depth: + .pixel_format: sg_desc.context.depth_format + .compare: SG_COMPAREFUNC_ALWAYS + .write_enabled: false + .bias: 0.0f + .bias_slope_scale: 0.0f + .bias_clamp: 0.0f + .stencil: + .enabled: false + .front/back: + .compare: SG_COMPAREFUNC_ALWAYS + .depth_fail_op: SG_STENCILOP_KEEP + .pass_op: SG_STENCILOP_KEEP + .compare: SG_COMPAREFUNC_ALWAYS + .read_mask: 0 + .write_mask: 0 + .ref: 0 + .color_count 1 + .colors[0..color_count] + .pixel_format sg_desc.context.color_format + .write_mask: SG_COLORMASK_RGBA + .blend: + .enabled: false + .src_factor_rgb: SG_BLENDFACTOR_ONE + .dst_factor_rgb: SG_BLENDFACTOR_ZERO + .op_rgb: SG_BLENDOP_ADD + .src_factor_alpha: SG_BLENDFACTOR_ONE + .dst_factor_alpha: SG_BLENDFACTOR_ZERO + .op_alpha: SG_BLENDOP_ADD + .primitive_type: SG_PRIMITIVETYPE_TRIANGLES + .index_type: SG_INDEXTYPE_NONE + .cull_mode: SG_CULLMODE_NONE + .face_winding: SG_FACEWINDING_CW + .sample_count: sg_desc.context.sample_count + .blend_color: (sg_color) { 0.0f, 0.0f, 0.0f, 0.0f } + .alpha_to_coverage_enabled: false + .label 0 (optional string label for trace hooks) +*/ +typedef struct sg_buffer_layout_desc { + int stride; + sg_vertex_step step_func; + int step_rate; + #if defined(SOKOL_ZIG_BINDINGS) + uint32_t __pad[2]; + #endif +} sg_buffer_layout_desc; + +typedef struct sg_vertex_attr_desc { + int buffer_index; + int offset; + sg_vertex_format format; + #if defined(SOKOL_ZIG_BINDINGS) + uint32_t __pad[2]; + #endif +} sg_vertex_attr_desc; + +typedef struct sg_layout_desc { + sg_buffer_layout_desc buffers[SG_MAX_SHADERSTAGE_BUFFERS]; + sg_vertex_attr_desc attrs[SG_MAX_VERTEX_ATTRIBUTES]; +} sg_layout_desc; + +typedef struct sg_stencil_face_state { + sg_compare_func compare; + sg_stencil_op fail_op; + sg_stencil_op depth_fail_op; + sg_stencil_op pass_op; +} sg_stencil_face_state; + +typedef struct sg_stencil_state { + bool enabled; + sg_stencil_face_state front; + sg_stencil_face_state back; + uint8_t read_mask; + uint8_t write_mask; + uint8_t ref; +} sg_stencil_state; + +typedef struct sg_depth_state { + sg_pixel_format pixel_format; + sg_compare_func compare; + bool write_enabled; + float bias; + float bias_slope_scale; + float bias_clamp; +} sg_depth_state; + +typedef struct sg_blend_state { + bool enabled; + sg_blend_factor src_factor_rgb; + sg_blend_factor dst_factor_rgb; + sg_blend_op op_rgb; + sg_blend_factor src_factor_alpha; + sg_blend_factor dst_factor_alpha; + sg_blend_op op_alpha; +} sg_blend_state; + +typedef struct sg_color_state { + sg_pixel_format pixel_format; + sg_color_mask write_mask; + sg_blend_state blend; +} sg_color_state; + +typedef struct sg_pipeline_desc { + uint32_t _start_canary; + sg_shader shader; + sg_layout_desc layout; + sg_depth_state depth; + sg_stencil_state stencil; + int color_count; + sg_color_state colors[SG_MAX_COLOR_ATTACHMENTS]; + sg_primitive_type primitive_type; + sg_index_type index_type; + sg_cull_mode cull_mode; + sg_face_winding face_winding; + int sample_count; + sg_color blend_color; + bool alpha_to_coverage_enabled; + const char* label; + uint32_t _end_canary; +} sg_pipeline_desc; + +/* + sg_pass_desc + + Creation parameters for an sg_pass object, used as argument + to the sg_make_pass() function. + + A pass object contains 1..4 color-attachments and none, or one, + depth-stencil-attachment. Each attachment consists of + an image, and two additional indices describing + which subimage the pass will render to: one mipmap index, and + if the image is a cubemap, array-texture or 3D-texture, the + face-index, array-layer or depth-slice. + + Pass images must fulfill the following requirements: + + All images must have: + - been created as render target (sg_image_desc.render_target = true) + - the same size + - the same sample count + + In addition, all color-attachment images must have the same pixel format. +*/ +typedef struct sg_pass_attachment_desc { + sg_image image; + int mip_level; + int slice; /* cube texture: face; array texture: layer; 3D texture: slice */ +} sg_pass_attachment_desc; + +typedef struct sg_pass_desc { + uint32_t _start_canary; + sg_pass_attachment_desc color_attachments[SG_MAX_COLOR_ATTACHMENTS]; + sg_pass_attachment_desc depth_stencil_attachment; + const char* label; + uint32_t _end_canary; +} sg_pass_desc; + +/* + sg_trace_hooks + + Installable callback functions to keep track of the sokol-gfx calls, + this is useful for debugging, or keeping track of resource creation + and destruction. + + Trace hooks are installed with sg_install_trace_hooks(), this returns + another sg_trace_hooks struct with the previous set of + trace hook function pointers. These should be invoked by the + new trace hooks to form a proper call chain. +*/ +typedef struct sg_trace_hooks { + void* user_data; + void (*reset_state_cache)(void* user_data); + void (*make_buffer)(const sg_buffer_desc* desc, sg_buffer result, void* user_data); + void (*make_image)(const sg_image_desc* desc, sg_image result, void* user_data); + void (*make_shader)(const sg_shader_desc* desc, sg_shader result, void* user_data); + void (*make_pipeline)(const sg_pipeline_desc* desc, sg_pipeline result, void* user_data); + void (*make_pass)(const sg_pass_desc* desc, sg_pass result, void* user_data); + void (*destroy_buffer)(sg_buffer buf, void* user_data); + void (*destroy_image)(sg_image img, void* user_data); + void (*destroy_shader)(sg_shader shd, void* user_data); + void (*destroy_pipeline)(sg_pipeline pip, void* user_data); + void (*destroy_pass)(sg_pass pass, void* user_data); + void (*update_buffer)(sg_buffer buf, const sg_range* data, void* user_data); + void (*update_image)(sg_image img, const sg_image_data* data, void* user_data); + void (*append_buffer)(sg_buffer buf, const sg_range* data, int result, void* user_data); + void (*begin_default_pass)(const sg_pass_action* pass_action, int width, int height, void* user_data); + void (*begin_pass)(sg_pass pass, const sg_pass_action* pass_action, void* user_data); + void (*apply_viewport)(int x, int y, int width, int height, bool origin_top_left, void* user_data); + void (*apply_scissor_rect)(int x, int y, int width, int height, bool origin_top_left, void* user_data); + void (*apply_pipeline)(sg_pipeline pip, void* user_data); + void (*apply_bindings)(const sg_bindings* bindings, void* user_data); + void (*apply_uniforms)(sg_shader_stage stage, int ub_index, const sg_range* data, void* user_data); + void (*draw)(int base_element, int num_elements, int num_instances, void* user_data); + void (*end_pass)(void* user_data); + void (*commit)(void* user_data); + void (*alloc_buffer)(sg_buffer result, void* user_data); + void (*alloc_image)(sg_image result, void* user_data); + void (*alloc_shader)(sg_shader result, void* user_data); + void (*alloc_pipeline)(sg_pipeline result, void* user_data); + void (*alloc_pass)(sg_pass result, void* user_data); + void (*dealloc_buffer)(sg_buffer buf_id, void* user_data); + void (*dealloc_image)(sg_image img_id, void* user_data); + void (*dealloc_shader)(sg_shader shd_id, void* user_data); + void (*dealloc_pipeline)(sg_pipeline pip_id, void* user_data); + void (*dealloc_pass)(sg_pass pass_id, void* user_data); + void (*init_buffer)(sg_buffer buf_id, const sg_buffer_desc* desc, void* user_data); + void (*init_image)(sg_image img_id, const sg_image_desc* desc, void* user_data); + void (*init_shader)(sg_shader shd_id, const sg_shader_desc* desc, void* user_data); + void (*init_pipeline)(sg_pipeline pip_id, const sg_pipeline_desc* desc, void* user_data); + void (*init_pass)(sg_pass pass_id, const sg_pass_desc* desc, void* user_data); + void (*uninit_buffer)(sg_buffer buf_id, void* user_data); + void (*uninit_image)(sg_image img_id, void* user_data); + void (*uninit_shader)(sg_shader shd_id, void* user_data); + void (*uninit_pipeline)(sg_pipeline pip_id, void* user_data); + void (*uninit_pass)(sg_pass pass_id, void* user_data); + void (*fail_buffer)(sg_buffer buf_id, void* user_data); + void (*fail_image)(sg_image img_id, void* user_data); + void (*fail_shader)(sg_shader shd_id, void* user_data); + void (*fail_pipeline)(sg_pipeline pip_id, void* user_data); + void (*fail_pass)(sg_pass pass_id, void* user_data); + void (*push_debug_group)(const char* name, void* user_data); + void (*pop_debug_group)(void* user_data); + void (*err_buffer_pool_exhausted)(void* user_data); + void (*err_image_pool_exhausted)(void* user_data); + void (*err_shader_pool_exhausted)(void* user_data); + void (*err_pipeline_pool_exhausted)(void* user_data); + void (*err_pass_pool_exhausted)(void* user_data); + void (*err_context_mismatch)(void* user_data); + void (*err_pass_invalid)(void* user_data); + void (*err_draw_invalid)(void* user_data); + void (*err_bindings_invalid)(void* user_data); +} sg_trace_hooks; + +/* + sg_buffer_info + sg_image_info + sg_shader_info + sg_pipeline_info + sg_pass_info + + These structs contain various internal resource attributes which + might be useful for debug-inspection. Please don't rely on the + actual content of those structs too much, as they are quite closely + tied to sokol_gfx.h internals and may change more frequently than + the other public API elements. + + The *_info structs are used as the return values of the following functions: + + sg_query_buffer_info() + sg_query_image_info() + sg_query_shader_info() + sg_query_pipeline_info() + sg_query_pass_info() +*/ +typedef struct sg_slot_info { + sg_resource_state state; /* the current state of this resource slot */ + uint32_t res_id; /* type-neutral resource if (e.g. sg_buffer.id) */ + uint32_t ctx_id; /* the context this resource belongs to */ +} sg_slot_info; + +typedef struct sg_buffer_info { + sg_slot_info slot; /* resource pool slot info */ + uint32_t update_frame_index; /* frame index of last sg_update_buffer() */ + uint32_t append_frame_index; /* frame index of last sg_append_buffer() */ + int append_pos; /* current position in buffer for sg_append_buffer() */ + bool append_overflow; /* is buffer in overflow state (due to sg_append_buffer) */ + int num_slots; /* number of renaming-slots for dynamically updated buffers */ + int active_slot; /* currently active write-slot for dynamically updated buffers */ +} sg_buffer_info; + +typedef struct sg_image_info { + sg_slot_info slot; /* resource pool slot info */ + uint32_t upd_frame_index; /* frame index of last sg_update_image() */ + int num_slots; /* number of renaming-slots for dynamically updated images */ + int active_slot; /* currently active write-slot for dynamically updated images */ + int width; /* image width */ + int height; /* image height */ +} sg_image_info; + +typedef struct sg_shader_info { + sg_slot_info slot; /* resoure pool slot info */ +} sg_shader_info; + +typedef struct sg_pipeline_info { + sg_slot_info slot; /* resource pool slot info */ +} sg_pipeline_info; + +typedef struct sg_pass_info { + sg_slot_info slot; /* resource pool slot info */ +} sg_pass_info; + +/* + sg_desc + + The sg_desc struct contains configuration values for sokol_gfx, + it is used as parameter to the sg_setup() call. + + NOTE that all callback function pointers come in two versions, one without + a userdata pointer, and one with a userdata pointer. You would + either initialize one or the other depending on whether you pass data + to your callbacks. + + FIXME: explain the various configuration options + + The default configuration is: + + .buffer_pool_size 128 + .image_pool_size 128 + .shader_pool_size 32 + .pipeline_pool_size 64 + .pass_pool_size 16 + .context_pool_size 16 + .sampler_cache_size 64 + .uniform_buffer_size 4 MB (4*1024*1024) + .staging_buffer_size 8 MB (8*1024*1024) + + .context.color_format: default value depends on selected backend: + all GL backends: SG_PIXELFORMAT_RGBA8 + Metal and D3D11: SG_PIXELFORMAT_BGRA8 + WGPU: *no default* (must be queried from WGPU swapchain) + .context.depth_format SG_PIXELFORMAT_DEPTH_STENCIL + .context.sample_count 1 + + GL specific: + .context.gl.force_gles2 + if this is true the GL backend will act in "GLES2 fallback mode" even + when compiled with SOKOL_GLES3, this is useful to fall back + to traditional WebGL if a browser doesn't support a WebGL2 context + + Metal specific: + (NOTE: All Objective-C object references are transferred through + a bridged (const void*) to sokol_gfx, which will use a unretained + bridged cast (__bridged id) to retrieve the Objective-C + references back. Since the bridge cast is unretained, the caller + must hold a strong reference to the Objective-C object for the + duration of the sokol_gfx call! + + .context.metal.device + a pointer to the MTLDevice object + .context.metal.renderpass_descriptor_cb + .context.metal_renderpass_descriptor_userdata_cb + A C callback function to obtain the MTLRenderPassDescriptor for the + current frame when rendering to the default framebuffer, will be called + in sg_begin_default_pass(). + .context.metal.drawable_cb + .context.metal.drawable_userdata_cb + a C callback function to obtain a MTLDrawable for the current + frame when rendering to the default framebuffer, will be called in + sg_end_pass() of the default pass + .context.metal.user_data + optional user data pointer passed to the userdata versions of + callback functions + + D3D11 specific: + .context.d3d11.device + a pointer to the ID3D11Device object, this must have been created + before sg_setup() is called + .context.d3d11.device_context + a pointer to the ID3D11DeviceContext object + .context.d3d11.render_target_view_cb + .context.d3d11.render_target_view_userdata_cb + a C callback function to obtain a pointer to the current + ID3D11RenderTargetView object of the default framebuffer, + this function will be called in sg_begin_pass() when rendering + to the default framebuffer + .context.d3d11.depth_stencil_view_cb + .context.d3d11.depth_stencil_view_userdata_cb + a C callback function to obtain a pointer to the current + ID3D11DepthStencilView object of the default framebuffer, + this function will be called in sg_begin_pass() when rendering + to the default framebuffer + .context.metal.user_data + optional user data pointer passed to the userdata versions of + callback functions + + WebGPU specific: + .context.wgpu.device + a WGPUDevice handle + .context.wgpu.render_format + WGPUTextureFormat of the swap chain surface + .context.wgpu.render_view_cb + .context.wgpu.render_view_userdata_cb + callback to get the current WGPUTextureView of the swapchain's + rendering attachment (may be an MSAA surface) + .context.wgpu.resolve_view_cb + .context.wgpu.resolve_view_userdata_cb + callback to get the current WGPUTextureView of the swapchain's + MSAA-resolve-target surface, must return 0 if not MSAA rendering + .context.wgpu.depth_stencil_view_cb + .context.wgpu.depth_stencil_view_userdata_cb + callback to get current default-pass depth-stencil-surface WGPUTextureView + the pixel format of the default WGPUTextureView must be WGPUTextureFormat_Depth24Plus8 + .context.metal.user_data + optional user data pointer passed to the userdata versions of + callback functions + + When using sokol_gfx.h and sokol_app.h together, consider using the + helper function sapp_sgcontext() in the sokol_glue.h header to + initialize the sg_desc.context nested struct. sapp_sgcontext() returns + a completely initialized sg_context_desc struct with information + provided by sokol_app.h. +*/ +typedef struct sg_gl_context_desc { + bool force_gles2; +} sg_gl_context_desc; + +typedef struct sg_metal_context_desc { + const void* device; + const void* (*renderpass_descriptor_cb)(void); + const void* (*renderpass_descriptor_userdata_cb)(void*); + const void* (*drawable_cb)(void); + const void* (*drawable_userdata_cb)(void*); + void* user_data; +} sg_metal_context_desc; + +typedef struct sg_d3d11_context_desc { + const void* device; + const void* device_context; + const void* (*render_target_view_cb)(void); + const void* (*render_target_view_userdata_cb)(void*); + const void* (*depth_stencil_view_cb)(void); + const void* (*depth_stencil_view_userdata_cb)(void*); + void* user_data; +} sg_d3d11_context_desc; + +typedef struct sg_wgpu_context_desc { + const void* device; /* WGPUDevice */ + const void* (*render_view_cb)(void); /* returns WGPUTextureView */ + const void* (*render_view_userdata_cb)(void*); + const void* (*resolve_view_cb)(void); /* returns WGPUTextureView */ + const void* (*resolve_view_userdata_cb)(void*); + const void* (*depth_stencil_view_cb)(void); /* returns WGPUTextureView, must be WGPUTextureFormat_Depth24Plus8 */ + const void* (*depth_stencil_view_userdata_cb)(void*); + void* user_data; +} sg_wgpu_context_desc; + +typedef struct sg_context_desc { + sg_pixel_format color_format; + sg_pixel_format depth_format; + int sample_count; + sg_gl_context_desc gl; + sg_metal_context_desc metal; + sg_d3d11_context_desc d3d11; + sg_wgpu_context_desc wgpu; +} sg_context_desc; + +typedef struct sg_desc { + uint32_t _start_canary; + int buffer_pool_size; + int image_pool_size; + int shader_pool_size; + int pipeline_pool_size; + int pass_pool_size; + int context_pool_size; + int uniform_buffer_size; + int staging_buffer_size; + int sampler_cache_size; + sg_context_desc context; + uint32_t _end_canary; +} sg_desc; + +/* setup and misc functions */ +SOKOL_GFX_API_DECL void sg_setup(const sg_desc* desc); +SOKOL_GFX_API_DECL void sg_shutdown(void); +SOKOL_GFX_API_DECL bool sg_isvalid(void); +SOKOL_GFX_API_DECL void sg_reset_state_cache(void); +SOKOL_GFX_API_DECL sg_trace_hooks sg_install_trace_hooks(const sg_trace_hooks* trace_hooks); +SOKOL_GFX_API_DECL void sg_push_debug_group(const char* name); +SOKOL_GFX_API_DECL void sg_pop_debug_group(void); + +/* resource creation, destruction and updating */ +SOKOL_GFX_API_DECL sg_buffer sg_make_buffer(const sg_buffer_desc* desc); +SOKOL_GFX_API_DECL sg_image sg_make_image(const sg_image_desc* desc); +SOKOL_GFX_API_DECL sg_shader sg_make_shader(const sg_shader_desc* desc); +SOKOL_GFX_API_DECL sg_pipeline sg_make_pipeline(const sg_pipeline_desc* desc); +SOKOL_GFX_API_DECL sg_pass sg_make_pass(const sg_pass_desc* desc); +SOKOL_GFX_API_DECL void sg_destroy_buffer(sg_buffer buf); +SOKOL_GFX_API_DECL void sg_destroy_image(sg_image img); +SOKOL_GFX_API_DECL void sg_destroy_shader(sg_shader shd); +SOKOL_GFX_API_DECL void sg_destroy_pipeline(sg_pipeline pip); +SOKOL_GFX_API_DECL void sg_destroy_pass(sg_pass pass); +SOKOL_GFX_API_DECL void sg_update_buffer(sg_buffer buf, const sg_range* data); +SOKOL_GFX_API_DECL void sg_update_image(sg_image img, const sg_image_data* data); +SOKOL_GFX_API_DECL int sg_append_buffer(sg_buffer buf, const sg_range* data); +SOKOL_GFX_API_DECL bool sg_query_buffer_overflow(sg_buffer buf); + +/* rendering functions */ +SOKOL_GFX_API_DECL void sg_begin_default_pass(const sg_pass_action* pass_action, int width, int height); +SOKOL_GFX_API_DECL void sg_begin_default_passf(const sg_pass_action* pass_action, float width, float height); +SOKOL_GFX_API_DECL void sg_begin_pass(sg_pass pass, const sg_pass_action* pass_action); +SOKOL_GFX_API_DECL void sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left); +SOKOL_GFX_API_DECL void sg_apply_viewportf(float x, float y, float width, float height, bool origin_top_left); +SOKOL_GFX_API_DECL void sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left); +SOKOL_GFX_API_DECL void sg_apply_scissor_rectf(float x, float y, float width, float height, bool origin_top_left); +SOKOL_GFX_API_DECL void sg_apply_pipeline(sg_pipeline pip); +SOKOL_GFX_API_DECL void sg_apply_bindings(const sg_bindings* bindings); +SOKOL_GFX_API_DECL void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const sg_range* data); +SOKOL_GFX_API_DECL void sg_draw(int base_element, int num_elements, int num_instances); +SOKOL_GFX_API_DECL void sg_end_pass(void); +SOKOL_GFX_API_DECL void sg_commit(void); + +/* getting information */ +SOKOL_GFX_API_DECL sg_desc sg_query_desc(void); +SOKOL_GFX_API_DECL sg_backend sg_query_backend(void); +SOKOL_GFX_API_DECL sg_features sg_query_features(void); +SOKOL_GFX_API_DECL sg_limits sg_query_limits(void); +SOKOL_GFX_API_DECL sg_pixelformat_info sg_query_pixelformat(sg_pixel_format fmt); +/* get current state of a resource (INITIAL, ALLOC, VALID, FAILED, INVALID) */ +SOKOL_GFX_API_DECL sg_resource_state sg_query_buffer_state(sg_buffer buf); +SOKOL_GFX_API_DECL sg_resource_state sg_query_image_state(sg_image img); +SOKOL_GFX_API_DECL sg_resource_state sg_query_shader_state(sg_shader shd); +SOKOL_GFX_API_DECL sg_resource_state sg_query_pipeline_state(sg_pipeline pip); +SOKOL_GFX_API_DECL sg_resource_state sg_query_pass_state(sg_pass pass); +/* get runtime information about a resource */ +SOKOL_GFX_API_DECL sg_buffer_info sg_query_buffer_info(sg_buffer buf); +SOKOL_GFX_API_DECL sg_image_info sg_query_image_info(sg_image img); +SOKOL_GFX_API_DECL sg_shader_info sg_query_shader_info(sg_shader shd); +SOKOL_GFX_API_DECL sg_pipeline_info sg_query_pipeline_info(sg_pipeline pip); +SOKOL_GFX_API_DECL sg_pass_info sg_query_pass_info(sg_pass pass); +/* get resource creation desc struct with their default values replaced */ +SOKOL_GFX_API_DECL sg_buffer_desc sg_query_buffer_defaults(const sg_buffer_desc* desc); +SOKOL_GFX_API_DECL sg_image_desc sg_query_image_defaults(const sg_image_desc* desc); +SOKOL_GFX_API_DECL sg_shader_desc sg_query_shader_defaults(const sg_shader_desc* desc); +SOKOL_GFX_API_DECL sg_pipeline_desc sg_query_pipeline_defaults(const sg_pipeline_desc* desc); +SOKOL_GFX_API_DECL sg_pass_desc sg_query_pass_defaults(const sg_pass_desc* desc); + +/* separate resource allocation and initialization (for async setup) */ +SOKOL_GFX_API_DECL sg_buffer sg_alloc_buffer(void); +SOKOL_GFX_API_DECL sg_image sg_alloc_image(void); +SOKOL_GFX_API_DECL sg_shader sg_alloc_shader(void); +SOKOL_GFX_API_DECL sg_pipeline sg_alloc_pipeline(void); +SOKOL_GFX_API_DECL sg_pass sg_alloc_pass(void); +SOKOL_GFX_API_DECL void sg_dealloc_buffer(sg_buffer buf_id); +SOKOL_GFX_API_DECL void sg_dealloc_image(sg_image img_id); +SOKOL_GFX_API_DECL void sg_dealloc_shader(sg_shader shd_id); +SOKOL_GFX_API_DECL void sg_dealloc_pipeline(sg_pipeline pip_id); +SOKOL_GFX_API_DECL void sg_dealloc_pass(sg_pass pass_id); +SOKOL_GFX_API_DECL void sg_init_buffer(sg_buffer buf_id, const sg_buffer_desc* desc); +SOKOL_GFX_API_DECL void sg_init_image(sg_image img_id, const sg_image_desc* desc); +SOKOL_GFX_API_DECL void sg_init_shader(sg_shader shd_id, const sg_shader_desc* desc); +SOKOL_GFX_API_DECL void sg_init_pipeline(sg_pipeline pip_id, const sg_pipeline_desc* desc); +SOKOL_GFX_API_DECL void sg_init_pass(sg_pass pass_id, const sg_pass_desc* desc); +SOKOL_GFX_API_DECL bool sg_uninit_buffer(sg_buffer buf_id); +SOKOL_GFX_API_DECL bool sg_uninit_image(sg_image img_id); +SOKOL_GFX_API_DECL bool sg_uninit_shader(sg_shader shd_id); +SOKOL_GFX_API_DECL bool sg_uninit_pipeline(sg_pipeline pip_id); +SOKOL_GFX_API_DECL bool sg_uninit_pass(sg_pass pass_id); +SOKOL_GFX_API_DECL void sg_fail_buffer(sg_buffer buf_id); +SOKOL_GFX_API_DECL void sg_fail_image(sg_image img_id); +SOKOL_GFX_API_DECL void sg_fail_shader(sg_shader shd_id); +SOKOL_GFX_API_DECL void sg_fail_pipeline(sg_pipeline pip_id); +SOKOL_GFX_API_DECL void sg_fail_pass(sg_pass pass_id); + +/* rendering contexts (optional) */ +SOKOL_GFX_API_DECL sg_context sg_setup_context(void); +SOKOL_GFX_API_DECL void sg_activate_context(sg_context ctx_id); +SOKOL_GFX_API_DECL void sg_discard_context(sg_context ctx_id); + +/* Backend-specific helper functions, these may come in handy for mixing + sokol-gfx rendering with 'native backend' rendering functions. + + This group of functions will be expanded as needed. +*/ + +/* D3D11: return ID3D11Device */ +SOKOL_GFX_API_DECL const void* sg_d3d11_device(void); + +/* Metal: return __bridge-casted MTLDevice */ +SOKOL_GFX_API_DECL const void* sg_mtl_device(void); + +/* Metal: return __bridge-casted MTLRenderCommandEncoder in current pass (or zero if outside pass) */ +SOKOL_GFX_API_DECL const void* sg_mtl_render_command_encoder(void); + +#ifdef __cplusplus +} /* extern "C" */ + +/* reference-based equivalents for c++ */ +inline void sg_setup(const sg_desc& desc) { return sg_setup(&desc); } + +inline sg_buffer sg_make_buffer(const sg_buffer_desc& desc) { return sg_make_buffer(&desc); } +inline sg_image sg_make_image(const sg_image_desc& desc) { return sg_make_image(&desc); } +inline sg_shader sg_make_shader(const sg_shader_desc& desc) { return sg_make_shader(&desc); } +inline sg_pipeline sg_make_pipeline(const sg_pipeline_desc& desc) { return sg_make_pipeline(&desc); } +inline sg_pass sg_make_pass(const sg_pass_desc& desc) { return sg_make_pass(&desc); } +inline void sg_update_image(sg_image img, const sg_image_data& data) { return sg_update_image(img, &data); } + +inline void sg_begin_default_pass(const sg_pass_action& pass_action, int width, int height) { return sg_begin_default_pass(&pass_action, width, height); } +inline void sg_begin_default_passf(const sg_pass_action& pass_action, float width, float height) { return sg_begin_default_passf(&pass_action, width, height); } +inline void sg_begin_pass(sg_pass pass, const sg_pass_action& pass_action) { return sg_begin_pass(pass, &pass_action); } +inline void sg_apply_bindings(const sg_bindings& bindings) { return sg_apply_bindings(&bindings); } +inline void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const sg_range& data) { return sg_apply_uniforms(stage, ub_index, &data); } + +inline sg_buffer_desc sg_query_buffer_defaults(const sg_buffer_desc& desc) { return sg_query_buffer_defaults(&desc); } +inline sg_image_desc sg_query_image_defaults(const sg_image_desc& desc) { return sg_query_image_defaults(&desc); } +inline sg_shader_desc sg_query_shader_defaults(const sg_shader_desc& desc) { return sg_query_shader_defaults(&desc); } +inline sg_pipeline_desc sg_query_pipeline_defaults(const sg_pipeline_desc& desc) { return sg_query_pipeline_defaults(&desc); } +inline sg_pass_desc sg_query_pass_defaults(const sg_pass_desc& desc) { return sg_query_pass_defaults(&desc); } + +inline void sg_init_buffer(sg_buffer buf_id, const sg_buffer_desc& desc) { return sg_init_buffer(buf_id, &desc); } +inline void sg_init_image(sg_image img_id, const sg_image_desc& desc) { return sg_init_image(img_id, &desc); } +inline void sg_init_shader(sg_shader shd_id, const sg_shader_desc& desc) { return sg_init_shader(shd_id, &desc); } +inline void sg_init_pipeline(sg_pipeline pip_id, const sg_pipeline_desc& desc) { return sg_init_pipeline(pip_id, &desc); } +inline void sg_init_pass(sg_pass pass_id, const sg_pass_desc& desc) { return sg_init_pass(pass_id, &desc); } + +inline void sg_update_buffer(sg_buffer buf_id, const sg_range& data) { return sg_update_buffer(buf_id, &data); } +inline int sg_append_buffer(sg_buffer buf_id, const sg_range& data) { return sg_append_buffer(buf_id, &data); } +#endif +#endif // SOKOL_GFX_INCLUDED + +/*--- IMPLEMENTATION ---------------------------------------------------------*/ +#ifdef SOKOL_GFX_IMPL +#define SOKOL_GFX_IMPL_INCLUDED (1) + +#if !(defined(SOKOL_GLCORE33)||defined(SOKOL_GLES2)||defined(SOKOL_GLES3)||defined(SOKOL_D3D11)||defined(SOKOL_METAL)||defined(SOKOL_WGPU)||defined(SOKOL_DUMMY_BACKEND)) +#error "Please select a backend with SOKOL_GLCORE33, SOKOL_GLES2, SOKOL_GLES3, SOKOL_D3D11, SOKOL_METAL, SOKOL_WGPU or SOKOL_DUMMY_BACKEND" +#endif +#include /* memset */ +#include /* FLT_MAX */ + +#ifndef SOKOL_API_IMPL + #define SOKOL_API_IMPL +#endif +#ifndef SOKOL_DEBUG + #ifndef NDEBUG + #define SOKOL_DEBUG (1) + #endif +#endif +#ifndef SOKOL_ASSERT + #include + #define SOKOL_ASSERT(c) assert(c) +#endif +#ifndef SOKOL_VALIDATE_BEGIN + #define SOKOL_VALIDATE_BEGIN() _sg_validate_begin() +#endif +#ifndef SOKOL_VALIDATE + #define SOKOL_VALIDATE(cond, err) _sg_validate((cond), err) +#endif +#ifndef SOKOL_VALIDATE_END + #define SOKOL_VALIDATE_END() _sg_validate_end() +#endif +#ifndef SOKOL_UNREACHABLE + #define SOKOL_UNREACHABLE SOKOL_ASSERT(false) +#endif +#ifndef SOKOL_MALLOC + #include + #define SOKOL_MALLOC(s) malloc(s) + #define SOKOL_FREE(p) free(p) +#endif +#ifndef SOKOL_LOG + #ifdef SOKOL_DEBUG + #include + #define SOKOL_LOG(s) { SOKOL_ASSERT(s); puts(s); } + #else + #define SOKOL_LOG(s) + #endif +#endif + +#ifndef _SOKOL_PRIVATE + #if defined(__GNUC__) || defined(__clang__) + #define _SOKOL_PRIVATE __attribute__((unused)) static + #else + #define _SOKOL_PRIVATE static + #endif +#endif + +#ifndef _SOKOL_UNUSED + #define _SOKOL_UNUSED(x) (void)(x) +#endif + +#if defined(SOKOL_TRACE_HOOKS) +#define _SG_TRACE_ARGS(fn, ...) if (_sg.hooks.fn) { _sg.hooks.fn(__VA_ARGS__, _sg.hooks.user_data); } +#define _SG_TRACE_NOARGS(fn) if (_sg.hooks.fn) { _sg.hooks.fn(_sg.hooks.user_data); } +#else +#define _SG_TRACE_ARGS(fn, ...) +#define _SG_TRACE_NOARGS(fn) +#endif + +/* default clear values */ +#ifndef SG_DEFAULT_CLEAR_RED +#define SG_DEFAULT_CLEAR_RED (0.5f) +#endif +#ifndef SG_DEFAULT_CLEAR_GREEN +#define SG_DEFAULT_CLEAR_GREEN (0.5f) +#endif +#ifndef SG_DEFAULT_CLEAR_BLUE +#define SG_DEFAULT_CLEAR_BLUE (0.5f) +#endif +#ifndef SG_DEFAULT_CLEAR_ALPHA +#define SG_DEFAULT_CLEAR_ALPHA (1.0f) +#endif +#ifndef SG_DEFAULT_CLEAR_DEPTH +#define SG_DEFAULT_CLEAR_DEPTH (1.0f) +#endif +#ifndef SG_DEFAULT_CLEAR_STENCIL +#define SG_DEFAULT_CLEAR_STENCIL (0) +#endif + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable:4115) /* named type definition in parentheses */ +#pragma warning(disable:4505) /* unreferenced local function has been removed */ +#pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union (needed by d3d11.h) */ +#pragma warning(disable:4054) /* 'type cast': from function pointer */ +#pragma warning(disable:4055) /* 'type cast': from data pointer */ +#endif + +#if defined(SOKOL_D3D11) + #ifndef D3D11_NO_HELPERS + #define D3D11_NO_HELPERS + #endif + #ifndef WIN32_LEAN_AND_MEAN + #define WIN32_LEAN_AND_MEAN + #endif + #ifndef NOMINMAX + #define NOMINMAX + #endif + #include + #include + #ifdef _MSC_VER + #if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)) + #pragma comment (lib, "WindowsApp") + #else + #pragma comment (lib, "kernel32") + #pragma comment (lib, "user32") + #pragma comment (lib, "dxgi") + #pragma comment (lib, "d3d11") + #pragma comment (lib, "dxguid") + #endif + #endif +#elif defined(SOKOL_METAL) + // see https://clang.llvm.org/docs/LanguageExtensions.html#automatic-reference-counting + #if !defined(__cplusplus) + #if __has_feature(objc_arc) && !__has_feature(objc_arc_fields) + #error "sokol_gfx.h requires __has_feature(objc_arc_field) if ARC is enabled (use a more recent compiler version)" + #endif + #endif + #include + #if defined(TARGET_OS_IPHONE) && !TARGET_OS_IPHONE + #define _SG_TARGET_MACOS (1) + #else + #define _SG_TARGET_IOS (1) + #if defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR + #define _SG_TARGET_IOS_SIMULATOR (1) + #endif + #endif + #import +#elif defined(SOKOL_WGPU) + #if defined(__EMSCRIPTEN__) + #include + #else + #include + #endif +#elif defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3) + #define _SOKOL_ANY_GL (1) + + // include platform specific GL headers (or on Win32: use an embedded GL loader) + #if !defined(SOKOL_EXTERNAL_GL_LOADER) + #if defined(_WIN32) + #if defined(SOKOL_GLCORE33) && !defined(SOKOL_EXTERNAL_GL_LOADER) + #ifndef WIN32_LEAN_AND_MEAN + #define WIN32_LEAN_AND_MEAN + #endif + #ifndef NOMINMAX + #define NOMINMAX + #endif + #include + #define _SOKOL_USE_WIN32_GL_LOADER (1) + #pragma comment (lib, "kernel32") // GetProcAddress() + #endif + #elif defined(__APPLE__) + #include + #ifndef GL_SILENCE_DEPRECATION + #define GL_SILENCE_DEPRECATION + #endif + #if defined(TARGET_OS_IPHONE) && !TARGET_OS_IPHONE + #include + #else + #include + #include + #endif + #elif defined(__EMSCRIPTEN__) || defined(__ANDROID__) + #if defined(SOKOL_GLES3) + #include + #elif defined(SOKOL_GLES2) + #ifndef GL_EXT_PROTOTYPES + #define GL_GLEXT_PROTOTYPES + #endif + #include + #include + #endif + #elif defined(__linux__) || defined(__unix__) + #define GL_GLEXT_PROTOTYPES + #include + #endif + #endif + + // optional GL loader definitions (only on Win32) + #if defined(_SOKOL_USE_WIN32_GL_LOADER) + #define __gl_h_ 1 + #define __gl32_h_ 1 + #define __gl31_h_ 1 + #define __GL_H__ 1 + #define __glext_h_ 1 + #define __GLEXT_H_ 1 + #define __gltypes_h_ 1 + #define __glcorearb_h_ 1 + #define __gl_glcorearb_h_ 1 + #define GL_APIENTRY APIENTRY + + typedef unsigned int GLenum; + typedef unsigned int GLuint; + typedef int GLsizei; + typedef char GLchar; + typedef ptrdiff_t GLintptr; + typedef ptrdiff_t GLsizeiptr; + typedef double GLclampd; + typedef unsigned short GLushort; + typedef unsigned char GLubyte; + typedef unsigned char GLboolean; + typedef uint64_t GLuint64; + typedef double GLdouble; + typedef unsigned short GLhalf; + typedef float GLclampf; + typedef unsigned int GLbitfield; + typedef signed char GLbyte; + typedef short GLshort; + typedef void GLvoid; + typedef int64_t GLint64; + typedef float GLfloat; + typedef struct __GLsync * GLsync; + typedef int GLint; + #define GL_INT_2_10_10_10_REV 0x8D9F + #define GL_R32F 0x822E + #define GL_PROGRAM_POINT_SIZE 0x8642 + #define GL_STENCIL_ATTACHMENT 0x8D20 + #define GL_DEPTH_ATTACHMENT 0x8D00 + #define GL_COLOR_ATTACHMENT2 0x8CE2 + #define GL_COLOR_ATTACHMENT0 0x8CE0 + #define GL_R16F 0x822D + #define GL_COLOR_ATTACHMENT22 0x8CF6 + #define GL_DRAW_FRAMEBUFFER 0x8CA9 + #define GL_FRAMEBUFFER_COMPLETE 0x8CD5 + #define GL_NUM_EXTENSIONS 0x821D + #define GL_INFO_LOG_LENGTH 0x8B84 + #define GL_VERTEX_SHADER 0x8B31 + #define GL_INCR 0x1E02 + #define GL_DYNAMIC_DRAW 0x88E8 + #define GL_STATIC_DRAW 0x88E4 + #define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519 + #define GL_TEXTURE_CUBE_MAP 0x8513 + #define GL_FUNC_SUBTRACT 0x800A + #define GL_FUNC_REVERSE_SUBTRACT 0x800B + #define GL_CONSTANT_COLOR 0x8001 + #define GL_DECR_WRAP 0x8508 + #define GL_R8 0x8229 + #define GL_LINEAR_MIPMAP_LINEAR 0x2703 + #define GL_ELEMENT_ARRAY_BUFFER 0x8893 + #define GL_SHORT 0x1402 + #define GL_DEPTH_TEST 0x0B71 + #define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518 + #define GL_LINK_STATUS 0x8B82 + #define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517 + #define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E + #define GL_RGBA16F 0x881A + #define GL_CONSTANT_ALPHA 0x8003 + #define GL_READ_FRAMEBUFFER 0x8CA8 + #define GL_TEXTURE0 0x84C0 + #define GL_TEXTURE_MIN_LOD 0x813A + #define GL_CLAMP_TO_EDGE 0x812F + #define GL_UNSIGNED_SHORT_5_6_5 0x8363 + #define GL_TEXTURE_WRAP_R 0x8072 + #define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 + #define GL_NEAREST_MIPMAP_NEAREST 0x2700 + #define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 + #define GL_SRC_ALPHA_SATURATE 0x0308 + #define GL_STREAM_DRAW 0x88E0 + #define GL_ONE 1 + #define GL_NEAREST_MIPMAP_LINEAR 0x2702 + #define GL_RGB10_A2 0x8059 + #define GL_RGBA8 0x8058 + #define GL_COLOR_ATTACHMENT1 0x8CE1 + #define GL_RGBA4 0x8056 + #define GL_RGB8 0x8051 + #define GL_ARRAY_BUFFER 0x8892 + #define GL_STENCIL 0x1802 + #define GL_TEXTURE_2D 0x0DE1 + #define GL_DEPTH 0x1801 + #define GL_FRONT 0x0404 + #define GL_STENCIL_BUFFER_BIT 0x00000400 + #define GL_REPEAT 0x2901 + #define GL_RGBA 0x1908 + #define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515 + #define GL_DECR 0x1E03 + #define GL_FRAGMENT_SHADER 0x8B30 + #define GL_FLOAT 0x1406 + #define GL_TEXTURE_MAX_LOD 0x813B + #define GL_DEPTH_COMPONENT 0x1902 + #define GL_ONE_MINUS_DST_ALPHA 0x0305 + #define GL_COLOR 0x1800 + #define GL_TEXTURE_2D_ARRAY 0x8C1A + #define GL_TRIANGLES 0x0004 + #define GL_UNSIGNED_BYTE 0x1401 + #define GL_TEXTURE_MAG_FILTER 0x2800 + #define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004 + #define GL_NONE 0 + #define GL_SRC_COLOR 0x0300 + #define GL_BYTE 0x1400 + #define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A + #define GL_LINE_STRIP 0x0003 + #define GL_TEXTURE_3D 0x806F + #define GL_CW 0x0900 + #define GL_LINEAR 0x2601 + #define GL_RENDERBUFFER 0x8D41 + #define GL_GEQUAL 0x0206 + #define GL_COLOR_BUFFER_BIT 0x00004000 + #define GL_RGBA32F 0x8814 + #define GL_BLEND 0x0BE2 + #define GL_ONE_MINUS_SRC_ALPHA 0x0303 + #define GL_ONE_MINUS_CONSTANT_COLOR 0x8002 + #define GL_TEXTURE_WRAP_T 0x2803 + #define GL_TEXTURE_WRAP_S 0x2802 + #define GL_TEXTURE_MIN_FILTER 0x2801 + #define GL_LINEAR_MIPMAP_NEAREST 0x2701 + #define GL_EXTENSIONS 0x1F03 + #define GL_NO_ERROR 0 + #define GL_REPLACE 0x1E01 + #define GL_KEEP 0x1E00 + #define GL_CCW 0x0901 + #define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516 + #define GL_RGB 0x1907 + #define GL_TRIANGLE_STRIP 0x0005 + #define GL_FALSE 0 + #define GL_ZERO 0 + #define GL_CULL_FACE 0x0B44 + #define GL_INVERT 0x150A + #define GL_INT 0x1404 + #define GL_UNSIGNED_INT 0x1405 + #define GL_UNSIGNED_SHORT 0x1403 + #define GL_NEAREST 0x2600 + #define GL_SCISSOR_TEST 0x0C11 + #define GL_LEQUAL 0x0203 + #define GL_STENCIL_TEST 0x0B90 + #define GL_DITHER 0x0BD0 + #define GL_DEPTH_COMPONENT16 0x81A5 + #define GL_EQUAL 0x0202 + #define GL_FRAMEBUFFER 0x8D40 + #define GL_RGB5 0x8050 + #define GL_LINES 0x0001 + #define GL_DEPTH_BUFFER_BIT 0x00000100 + #define GL_SRC_ALPHA 0x0302 + #define GL_INCR_WRAP 0x8507 + #define GL_LESS 0x0201 + #define GL_MULTISAMPLE 0x809D + #define GL_FRAMEBUFFER_BINDING 0x8CA6 + #define GL_BACK 0x0405 + #define GL_ALWAYS 0x0207 + #define GL_FUNC_ADD 0x8006 + #define GL_ONE_MINUS_DST_COLOR 0x0307 + #define GL_NOTEQUAL 0x0205 + #define GL_DST_COLOR 0x0306 + #define GL_COMPILE_STATUS 0x8B81 + #define GL_RED 0x1903 + #define GL_COLOR_ATTACHMENT3 0x8CE3 + #define GL_DST_ALPHA 0x0304 + #define GL_RGB5_A1 0x8057 + #define GL_GREATER 0x0204 + #define GL_POLYGON_OFFSET_FILL 0x8037 + #define GL_TRUE 1 + #define GL_NEVER 0x0200 + #define GL_POINTS 0x0000 + #define GL_ONE_MINUS_SRC_COLOR 0x0301 + #define GL_MIRRORED_REPEAT 0x8370 + #define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D + #define GL_R11F_G11F_B10F 0x8C3A + #define GL_UNSIGNED_INT_10F_11F_11F_REV 0x8C3B + #define GL_RGBA32UI 0x8D70 + #define GL_RGB32UI 0x8D71 + #define GL_RGBA16UI 0x8D76 + #define GL_RGB16UI 0x8D77 + #define GL_RGBA8UI 0x8D7C + #define GL_RGB8UI 0x8D7D + #define GL_RGBA32I 0x8D82 + #define GL_RGB32I 0x8D83 + #define GL_RGBA16I 0x8D88 + #define GL_RGB16I 0x8D89 + #define GL_RGBA8I 0x8D8E + #define GL_RGB8I 0x8D8F + #define GL_RED_INTEGER 0x8D94 + #define GL_RG 0x8227 + #define GL_RG_INTEGER 0x8228 + #define GL_R8 0x8229 + #define GL_R16 0x822A + #define GL_RG8 0x822B + #define GL_RG16 0x822C + #define GL_R16F 0x822D + #define GL_R32F 0x822E + #define GL_RG16F 0x822F + #define GL_RG32F 0x8230 + #define GL_R8I 0x8231 + #define GL_R8UI 0x8232 + #define GL_R16I 0x8233 + #define GL_R16UI 0x8234 + #define GL_R32I 0x8235 + #define GL_R32UI 0x8236 + #define GL_RG8I 0x8237 + #define GL_RG8UI 0x8238 + #define GL_RG16I 0x8239 + #define GL_RG16UI 0x823A + #define GL_RG32I 0x823B + #define GL_RG32UI 0x823C + #define GL_RGBA_INTEGER 0x8D99 + #define GL_R8_SNORM 0x8F94 + #define GL_RG8_SNORM 0x8F95 + #define GL_RGB8_SNORM 0x8F96 + #define GL_RGBA8_SNORM 0x8F97 + #define GL_R16_SNORM 0x8F98 + #define GL_RG16_SNORM 0x8F99 + #define GL_RGB16_SNORM 0x8F9A + #define GL_RGBA16_SNORM 0x8F9B + #define GL_RGBA16 0x805B + #define GL_MAX_TEXTURE_SIZE 0x0D33 + #define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C + #define GL_MAX_3D_TEXTURE_SIZE 0x8073 + #define GL_MAX_ARRAY_TEXTURE_LAYERS 0x88FF + #define GL_MAX_VERTEX_ATTRIBS 0x8869 + #define GL_CLAMP_TO_BORDER 0x812D + #define GL_TEXTURE_BORDER_COLOR 0x1004 + #define GL_CURRENT_PROGRAM 0x8B8D + #endif + + #ifndef GL_UNSIGNED_INT_2_10_10_10_REV + #define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368 + #endif + #ifndef GL_UNSIGNED_INT_24_8 + #define GL_UNSIGNED_INT_24_8 0x84FA + #endif + #ifndef GL_TEXTURE_MAX_ANISOTROPY_EXT + #define GL_TEXTURE_MAX_ANISOTROPY_EXT 0x84FE + #endif + #ifndef GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT + #define GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT 0x84FF + #endif + #ifndef GL_COMPRESSED_RGBA_S3TC_DXT1_EXT + #define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1 + #endif + #ifndef GL_COMPRESSED_RGBA_S3TC_DXT3_EXT + #define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2 + #endif + #ifndef GL_COMPRESSED_RGBA_S3TC_DXT5_EXT + #define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3 + #endif + #ifndef GL_COMPRESSED_RED_RGTC1 + #define GL_COMPRESSED_RED_RGTC1 0x8DBB + #endif + #ifndef GL_COMPRESSED_SIGNED_RED_RGTC1 + #define GL_COMPRESSED_SIGNED_RED_RGTC1 0x8DBC + #endif + #ifndef GL_COMPRESSED_RED_GREEN_RGTC2 + #define GL_COMPRESSED_RED_GREEN_RGTC2 0x8DBD + #endif + #ifndef GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2 + #define GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2 0x8DBE + #endif + #ifndef GL_COMPRESSED_RGBA_BPTC_UNORM_ARB + #define GL_COMPRESSED_RGBA_BPTC_UNORM_ARB 0x8E8C + #endif + #ifndef GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB + #define GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB 0x8E8D + #endif + #ifndef GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB + #define GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB 0x8E8E + #endif + #ifndef GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB + #define GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB 0x8E8F + #endif + #ifndef GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG + #define GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG 0x8C01 + #endif + #ifndef GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG + #define GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG 0x8C00 + #endif + #ifndef GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG + #define GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG 0x8C03 + #endif + #ifndef GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG + #define GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG 0x8C02 + #endif + #ifndef GL_COMPRESSED_RGB8_ETC2 + #define GL_COMPRESSED_RGB8_ETC2 0x9274 + #endif + #ifndef GL_COMPRESSED_RGBA8_ETC2_EAC + #define GL_COMPRESSED_RGBA8_ETC2_EAC 0x9278 + #endif + #ifndef GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 + #define GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9276 + #endif + #ifndef GL_COMPRESSED_RG11_EAC + #define GL_COMPRESSED_RG11_EAC 0x9272 + #endif + #ifndef GL_COMPRESSED_SIGNED_RG11_EAC + #define GL_COMPRESSED_SIGNED_RG11_EAC 0x9273 + #endif + #ifndef GL_DEPTH24_STENCIL8 + #define GL_DEPTH24_STENCIL8 0x88F0 + #endif + #ifndef GL_HALF_FLOAT + #define GL_HALF_FLOAT 0x140B + #endif + #ifndef GL_DEPTH_STENCIL + #define GL_DEPTH_STENCIL 0x84F9 + #endif + #ifndef GL_LUMINANCE + #define GL_LUMINANCE 0x1909 + #endif + + #ifdef SOKOL_GLES2 + #ifdef GL_ANGLE_instanced_arrays + #define _SOKOL_GL_INSTANCING_ENABLED + #define glDrawArraysInstanced(mode, first, count, instancecount) glDrawArraysInstancedANGLE(mode, first, count, instancecount) + #define glDrawElementsInstanced(mode, count, type, indices, instancecount) glDrawElementsInstancedANGLE(mode, count, type, indices, instancecount) + #define glVertexAttribDivisor(index, divisor) glVertexAttribDivisorANGLE(index, divisor) + #elif defined(GL_EXT_draw_instanced) && defined(GL_EXT_instanced_arrays) + #define _SOKOL_GL_INSTANCING_ENABLED + #define glDrawArraysInstanced(mode, first, count, instancecount) glDrawArraysInstancedEXT(mode, first, count, instancecount) + #define glDrawElementsInstanced(mode, count, type, indices, instancecount) glDrawElementsInstancedEXT(mode, count, type, indices, instancecount) + #define glVertexAttribDivisor(index, divisor) glVertexAttribDivisorEXT(index, divisor) + #else + #define _SOKOL_GLES2_INSTANCING_ERROR "Select GL_ANGLE_instanced_arrays or (GL_EXT_draw_instanced & GL_EXT_instanced_arrays) to enable instancing in GLES2" + #define glDrawArraysInstanced(mode, first, count, instancecount) SOKOL_ASSERT(0 && _SOKOL_GLES2_INSTANCING_ERROR) + #define glDrawElementsInstanced(mode, count, type, indices, instancecount) SOKOL_ASSERT(0 && _SOKOL_GLES2_INSTANCING_ERROR) + #define glVertexAttribDivisor(index, divisor) SOKOL_ASSERT(0 && _SOKOL_GLES2_INSTANCING_ERROR) + #endif + #else + #define _SOKOL_GL_INSTANCING_ENABLED + #endif + #define _SG_GL_CHECK_ERROR() { SOKOL_ASSERT(glGetError() == GL_NO_ERROR); } +#endif + +/*=== COMMON BACKEND STUFF ===================================================*/ + +/* resource pool slots */ +typedef struct { + uint32_t id; + uint32_t ctx_id; + sg_resource_state state; +} _sg_slot_t; + +/* constants */ +enum { + _SG_STRING_SIZE = 16, + _SG_SLOT_SHIFT = 16, + _SG_SLOT_MASK = (1<<_SG_SLOT_SHIFT)-1, + _SG_MAX_POOL_SIZE = (1<<_SG_SLOT_SHIFT), + _SG_DEFAULT_BUFFER_POOL_SIZE = 128, + _SG_DEFAULT_IMAGE_POOL_SIZE = 128, + _SG_DEFAULT_SHADER_POOL_SIZE = 32, + _SG_DEFAULT_PIPELINE_POOL_SIZE = 64, + _SG_DEFAULT_PASS_POOL_SIZE = 16, + _SG_DEFAULT_CONTEXT_POOL_SIZE = 16, + _SG_DEFAULT_SAMPLER_CACHE_CAPACITY = 64, + _SG_DEFAULT_UB_SIZE = 4 * 1024 * 1024, + _SG_DEFAULT_STAGING_SIZE = 8 * 1024 * 1024, +}; + +/* fixed-size string */ +typedef struct { + char buf[_SG_STRING_SIZE]; +} _sg_str_t; + +/* helper macros */ +#define _sg_def(val, def) (((val) == 0) ? (def) : (val)) +#define _sg_def_flt(val, def) (((val) == 0.0f) ? (def) : (val)) +#define _sg_min(a,b) ((ab)?a:b) +#define _sg_clamp(v,v0,v1) ((vv1)?(v1):(v))) +#define _sg_fequal(val,cmp,delta) (((val-cmp)> -delta)&&((val-cmp)size = (int)desc->size; + cmn->append_pos = 0; + cmn->append_overflow = false; + cmn->type = desc->type; + cmn->usage = desc->usage; + cmn->update_frame_index = 0; + cmn->append_frame_index = 0; + cmn->num_slots = (cmn->usage == SG_USAGE_IMMUTABLE) ? 1 : SG_NUM_INFLIGHT_FRAMES; + cmn->active_slot = 0; +} + +typedef struct { + sg_image_type type; + bool render_target; + int width; + int height; + int num_slices; + int num_mipmaps; + sg_usage usage; + sg_pixel_format pixel_format; + int sample_count; + sg_filter min_filter; + sg_filter mag_filter; + sg_wrap wrap_u; + sg_wrap wrap_v; + sg_wrap wrap_w; + sg_border_color border_color; + uint32_t max_anisotropy; + uint32_t upd_frame_index; + int num_slots; + int active_slot; +} _sg_image_common_t; + +_SOKOL_PRIVATE void _sg_image_common_init(_sg_image_common_t* cmn, const sg_image_desc* desc) { + cmn->type = desc->type; + cmn->render_target = desc->render_target; + cmn->width = desc->width; + cmn->height = desc->height; + cmn->num_slices = desc->num_slices; + cmn->num_mipmaps = desc->num_mipmaps; + cmn->usage = desc->usage; + cmn->pixel_format = desc->pixel_format; + cmn->sample_count = desc->sample_count; + cmn->min_filter = desc->min_filter; + cmn->mag_filter = desc->mag_filter; + cmn->wrap_u = desc->wrap_u; + cmn->wrap_v = desc->wrap_v; + cmn->wrap_w = desc->wrap_w; + cmn->border_color = desc->border_color; + cmn->max_anisotropy = desc->max_anisotropy; + cmn->upd_frame_index = 0; + cmn->num_slots = (cmn->usage == SG_USAGE_IMMUTABLE) ? 1 : SG_NUM_INFLIGHT_FRAMES; + cmn->active_slot = 0; +} + +typedef struct { + size_t size; +} _sg_uniform_block_t; + +typedef struct { + sg_image_type image_type; + sg_sampler_type sampler_type; +} _sg_shader_image_t; + +typedef struct { + int num_uniform_blocks; + int num_images; + _sg_uniform_block_t uniform_blocks[SG_MAX_SHADERSTAGE_UBS]; + _sg_shader_image_t images[SG_MAX_SHADERSTAGE_IMAGES]; +} _sg_shader_stage_t; + +typedef struct { + _sg_shader_stage_t stage[SG_NUM_SHADER_STAGES]; +} _sg_shader_common_t; + +_SOKOL_PRIVATE void _sg_shader_common_init(_sg_shader_common_t* cmn, const sg_shader_desc* desc) { + for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { + const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS) ? &desc->vs : &desc->fs; + _sg_shader_stage_t* stage = &cmn->stage[stage_index]; + SOKOL_ASSERT(stage->num_uniform_blocks == 0); + for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { + const sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index]; + if (0 == ub_desc->size) { + break; + } + stage->uniform_blocks[ub_index].size = ub_desc->size; + stage->num_uniform_blocks++; + } + SOKOL_ASSERT(stage->num_images == 0); + for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) { + const sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; + if (img_desc->image_type == _SG_IMAGETYPE_DEFAULT) { + break; + } + stage->images[img_index].image_type = img_desc->image_type; + stage->images[img_index].sampler_type = img_desc->sampler_type; + stage->num_images++; + } + } +} + +typedef struct { + sg_shader shader_id; + sg_index_type index_type; + bool vertex_layout_valid[SG_MAX_SHADERSTAGE_BUFFERS]; + int color_attachment_count; + sg_pixel_format color_formats[SG_MAX_COLOR_ATTACHMENTS]; + sg_pixel_format depth_format; + int sample_count; + float depth_bias; + float depth_bias_slope_scale; + float depth_bias_clamp; + sg_color blend_color; +} _sg_pipeline_common_t; + +_SOKOL_PRIVATE void _sg_pipeline_common_init(_sg_pipeline_common_t* cmn, const sg_pipeline_desc* desc) { + SOKOL_ASSERT(desc->color_count < SG_MAX_COLOR_ATTACHMENTS); + cmn->shader_id = desc->shader; + cmn->index_type = desc->index_type; + for (int i = 0; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) { + cmn->vertex_layout_valid[i] = false; + } + cmn->color_attachment_count = desc->color_count; + for (int i = 0; i < cmn->color_attachment_count; i++) { + cmn->color_formats[i] = desc->colors[i].pixel_format; + } + cmn->depth_format = desc->depth.pixel_format; + cmn->sample_count = desc->sample_count; + cmn->depth_bias = desc->depth.bias; + cmn->depth_bias_slope_scale = desc->depth.bias_slope_scale; + cmn->depth_bias_clamp = desc->depth.bias_clamp; + cmn->blend_color = desc->blend_color; +} + +typedef struct { + sg_image image_id; + int mip_level; + int slice; +} _sg_pass_attachment_common_t; + +typedef struct { + int num_color_atts; + _sg_pass_attachment_common_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; + _sg_pass_attachment_common_t ds_att; +} _sg_pass_common_t; + +_SOKOL_PRIVATE void _sg_pass_common_init(_sg_pass_common_t* cmn, const sg_pass_desc* desc) { + const sg_pass_attachment_desc* att_desc; + _sg_pass_attachment_common_t* att; + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + att_desc = &desc->color_attachments[i]; + if (att_desc->image.id != SG_INVALID_ID) { + cmn->num_color_atts++; + att = &cmn->color_atts[i]; + att->image_id = att_desc->image; + att->mip_level = att_desc->mip_level; + att->slice = att_desc->slice; + } + } + att_desc = &desc->depth_stencil_attachment; + if (att_desc->image.id != SG_INVALID_ID) { + att = &cmn->ds_att; + att->image_id = att_desc->image; + att->mip_level = att_desc->mip_level; + att->slice = att_desc->slice; + } +} + +/*=== GENERIC SAMPLER CACHE ==================================================*/ + +/* + this is used by the Metal and WGPU backends to reduce the + number of sampler state objects created through the backend API +*/ +typedef struct { + sg_filter min_filter; + sg_filter mag_filter; + sg_wrap wrap_u; + sg_wrap wrap_v; + sg_wrap wrap_w; + sg_border_color border_color; + uint32_t max_anisotropy; + int min_lod; /* orig min/max_lod is float, this is int(min/max_lod*1000.0) */ + int max_lod; + uintptr_t sampler_handle; +} _sg_sampler_cache_item_t; + +typedef struct { + int capacity; + int num_items; + _sg_sampler_cache_item_t* items; +} _sg_sampler_cache_t; + +_SOKOL_PRIVATE void _sg_smpcache_init(_sg_sampler_cache_t* cache, int capacity) { + SOKOL_ASSERT(cache && (capacity > 0)); + memset(cache, 0, sizeof(_sg_sampler_cache_t)); + cache->capacity = capacity; + const size_t size = (size_t)cache->capacity * sizeof(_sg_sampler_cache_item_t); + cache->items = (_sg_sampler_cache_item_t*) SOKOL_MALLOC(size); + SOKOL_ASSERT(cache->items); + memset(cache->items, 0, size); +} + +_SOKOL_PRIVATE void _sg_smpcache_discard(_sg_sampler_cache_t* cache) { + SOKOL_ASSERT(cache && cache->items); + SOKOL_FREE(cache->items); + cache->items = 0; + cache->num_items = 0; + cache->capacity = 0; +} + +_SOKOL_PRIVATE int _sg_smpcache_minlod_int(float min_lod) { + return (int) (min_lod * 1000.0f); +} + +_SOKOL_PRIVATE int _sg_smpcache_maxlod_int(float max_lod) { + return (int) (_sg_clamp(max_lod, 0.0f, 1000.0f) * 1000.0f); +} + +_SOKOL_PRIVATE int _sg_smpcache_find_item(const _sg_sampler_cache_t* cache, const sg_image_desc* img_desc) { + /* return matching sampler cache item index or -1 */ + SOKOL_ASSERT(cache && cache->items); + SOKOL_ASSERT(img_desc); + const int min_lod = _sg_smpcache_minlod_int(img_desc->min_lod); + const int max_lod = _sg_smpcache_maxlod_int(img_desc->max_lod); + for (int i = 0; i < cache->num_items; i++) { + const _sg_sampler_cache_item_t* item = &cache->items[i]; + if ((img_desc->min_filter == item->min_filter) && + (img_desc->mag_filter == item->mag_filter) && + (img_desc->wrap_u == item->wrap_u) && + (img_desc->wrap_v == item->wrap_v) && + (img_desc->wrap_w == item->wrap_w) && + (img_desc->max_anisotropy == item->max_anisotropy) && + (img_desc->border_color == item->border_color) && + (min_lod == item->min_lod) && + (max_lod == item->max_lod)) + { + return i; + } + } + /* fallthrough: no matching cache item found */ + return -1; +} + +_SOKOL_PRIVATE void _sg_smpcache_add_item(_sg_sampler_cache_t* cache, const sg_image_desc* img_desc, uintptr_t sampler_handle) { + SOKOL_ASSERT(cache && cache->items); + SOKOL_ASSERT(img_desc); + SOKOL_ASSERT(cache->num_items < cache->capacity); + const int item_index = cache->num_items++; + _sg_sampler_cache_item_t* item = &cache->items[item_index]; + item->min_filter = img_desc->min_filter; + item->mag_filter = img_desc->mag_filter; + item->wrap_u = img_desc->wrap_u; + item->wrap_v = img_desc->wrap_v; + item->wrap_w = img_desc->wrap_w; + item->border_color = img_desc->border_color; + item->max_anisotropy = img_desc->max_anisotropy; + item->min_lod = _sg_smpcache_minlod_int(img_desc->min_lod); + item->max_lod = _sg_smpcache_maxlod_int(img_desc->max_lod); + item->sampler_handle = sampler_handle; +} + +_SOKOL_PRIVATE uintptr_t _sg_smpcache_sampler(_sg_sampler_cache_t* cache, int item_index) { + SOKOL_ASSERT(cache && cache->items); + SOKOL_ASSERT(item_index < cache->num_items); + return cache->items[item_index].sampler_handle; +} + +/*=== DUMMY BACKEND DECLARATIONS =============================================*/ +#if defined(SOKOL_DUMMY_BACKEND) +typedef struct { + _sg_slot_t slot; + _sg_buffer_common_t cmn; +} _sg_dummy_buffer_t; +typedef _sg_dummy_buffer_t _sg_buffer_t; + +typedef struct { + _sg_slot_t slot; + _sg_image_common_t cmn; +} _sg_dummy_image_t; +typedef _sg_dummy_image_t _sg_image_t; + +typedef struct { + _sg_slot_t slot; + _sg_shader_common_t cmn; +} _sg_dummy_shader_t; +typedef _sg_dummy_shader_t _sg_shader_t; + +typedef struct { + _sg_slot_t slot; + _sg_shader_t* shader; + _sg_pipeline_common_t cmn; +} _sg_dummy_pipeline_t; +typedef _sg_dummy_pipeline_t _sg_pipeline_t; + +typedef struct { + _sg_image_t* image; +} _sg_dummy_attachment_t; + +typedef struct { + _sg_slot_t slot; + _sg_pass_common_t cmn; + struct { + _sg_dummy_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; + _sg_dummy_attachment_t ds_att; + } dmy; +} _sg_dummy_pass_t; +typedef _sg_dummy_pass_t _sg_pass_t; +typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; + +typedef struct { + _sg_slot_t slot; +} _sg_dummy_context_t; +typedef _sg_dummy_context_t _sg_context_t; + +/*== GL BACKEND DECLARATIONS =================================================*/ +#elif defined(_SOKOL_ANY_GL) +typedef struct { + _sg_slot_t slot; + _sg_buffer_common_t cmn; + struct { + GLuint buf[SG_NUM_INFLIGHT_FRAMES]; + bool ext_buffers; /* if true, external buffers were injected with sg_buffer_desc.gl_buffers */ + } gl; +} _sg_gl_buffer_t; +typedef _sg_gl_buffer_t _sg_buffer_t; + +typedef struct { + _sg_slot_t slot; + _sg_image_common_t cmn; + struct { + GLenum target; + GLuint depth_render_buffer; + GLuint msaa_render_buffer; + GLuint tex[SG_NUM_INFLIGHT_FRAMES]; + bool ext_textures; /* if true, external textures were injected with sg_image_desc.gl_textures */ + } gl; +} _sg_gl_image_t; +typedef _sg_gl_image_t _sg_image_t; + +typedef struct { + GLint gl_loc; + sg_uniform_type type; + uint8_t count; + uint16_t offset; +} _sg_gl_uniform_t; + +typedef struct { + int num_uniforms; + _sg_gl_uniform_t uniforms[SG_MAX_UB_MEMBERS]; +} _sg_gl_uniform_block_t; + +typedef struct { + int gl_tex_slot; +} _sg_gl_shader_image_t; + +typedef struct { + _sg_str_t name; +} _sg_gl_shader_attr_t; + +typedef struct { + _sg_gl_uniform_block_t uniform_blocks[SG_MAX_SHADERSTAGE_UBS]; + _sg_gl_shader_image_t images[SG_MAX_SHADERSTAGE_IMAGES]; +} _sg_gl_shader_stage_t; + +typedef struct { + _sg_slot_t slot; + _sg_shader_common_t cmn; + struct { + GLuint prog; + _sg_gl_shader_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES]; + _sg_gl_shader_stage_t stage[SG_NUM_SHADER_STAGES]; + } gl; +} _sg_gl_shader_t; +typedef _sg_gl_shader_t _sg_shader_t; + +typedef struct { + int8_t vb_index; /* -1 if attr is not enabled */ + int8_t divisor; /* -1 if not initialized */ + uint8_t stride; + uint8_t size; + uint8_t normalized; + int offset; + GLenum type; +} _sg_gl_attr_t; + +typedef struct { + _sg_slot_t slot; + _sg_pipeline_common_t cmn; + _sg_shader_t* shader; + struct { + _sg_gl_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES]; + sg_depth_state depth; + sg_stencil_state stencil; + sg_primitive_type primitive_type; + sg_blend_state blend; + sg_color_mask color_write_mask[SG_MAX_COLOR_ATTACHMENTS]; + sg_cull_mode cull_mode; + sg_face_winding face_winding; + int sample_count; + bool alpha_to_coverage_enabled; + } gl; +} _sg_gl_pipeline_t; +typedef _sg_gl_pipeline_t _sg_pipeline_t; + +typedef struct { + _sg_image_t* image; + GLuint gl_msaa_resolve_buffer; +} _sg_gl_attachment_t; + +typedef struct { + _sg_slot_t slot; + _sg_pass_common_t cmn; + struct { + GLuint fb; + _sg_gl_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; + _sg_gl_attachment_t ds_att; + } gl; +} _sg_gl_pass_t; +typedef _sg_gl_pass_t _sg_pass_t; +typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; + +typedef struct { + _sg_slot_t slot; + #if !defined(SOKOL_GLES2) + GLuint vao; + #endif + GLuint default_framebuffer; +} _sg_gl_context_t; +typedef _sg_gl_context_t _sg_context_t; + +typedef struct { + _sg_gl_attr_t gl_attr; + GLuint gl_vbuf; +} _sg_gl_cache_attr_t; + +typedef struct { + GLenum target; + GLuint texture; +} _sg_gl_texture_bind_slot; + +typedef struct { + sg_depth_state depth; + sg_stencil_state stencil; + sg_blend_state blend; + sg_color_mask color_write_mask[SG_MAX_COLOR_ATTACHMENTS]; + sg_cull_mode cull_mode; + sg_face_winding face_winding; + bool polygon_offset_enabled; + int sample_count; + sg_color blend_color; + bool alpha_to_coverage_enabled; + _sg_gl_cache_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES]; + GLuint vertex_buffer; + GLuint index_buffer; + GLuint stored_vertex_buffer; + GLuint stored_index_buffer; + GLuint prog; + _sg_gl_texture_bind_slot textures[SG_MAX_SHADERSTAGE_IMAGES]; + _sg_gl_texture_bind_slot stored_texture; + int cur_ib_offset; + GLenum cur_primitive_type; + GLenum cur_index_type; + GLenum cur_active_texture; + _sg_pipeline_t* cur_pipeline; + sg_pipeline cur_pipeline_id; +} _sg_gl_state_cache_t; + +typedef struct { + bool valid; + bool gles2; + bool in_pass; + int cur_pass_width; + int cur_pass_height; + _sg_context_t* cur_context; + _sg_pass_t* cur_pass; + sg_pass cur_pass_id; + _sg_gl_state_cache_t cache; + bool ext_anisotropic; + GLint max_anisotropy; + GLint max_combined_texture_image_units; + #if _SOKOL_USE_WIN32_GL_LOADER + HINSTANCE opengl32_dll; + #endif +} _sg_gl_backend_t; + +/*== D3D11 BACKEND DECLARATIONS ==============================================*/ +#elif defined(SOKOL_D3D11) + +typedef struct { + _sg_slot_t slot; + _sg_buffer_common_t cmn; + struct { + ID3D11Buffer* buf; + } d3d11; +} _sg_d3d11_buffer_t; +typedef _sg_d3d11_buffer_t _sg_buffer_t; + +typedef struct { + _sg_slot_t slot; + _sg_image_common_t cmn; + struct { + DXGI_FORMAT format; + ID3D11Texture2D* tex2d; + ID3D11Texture3D* tex3d; + ID3D11Texture2D* texds; + ID3D11Texture2D* texmsaa; + ID3D11ShaderResourceView* srv; + ID3D11SamplerState* smp; + } d3d11; +} _sg_d3d11_image_t; +typedef _sg_d3d11_image_t _sg_image_t; + +typedef struct { + _sg_str_t sem_name; + int sem_index; +} _sg_d3d11_shader_attr_t; + +typedef struct { + ID3D11Buffer* cbufs[SG_MAX_SHADERSTAGE_UBS]; +} _sg_d3d11_shader_stage_t; + +typedef struct { + _sg_slot_t slot; + _sg_shader_common_t cmn; + struct { + _sg_d3d11_shader_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES]; + _sg_d3d11_shader_stage_t stage[SG_NUM_SHADER_STAGES]; + ID3D11VertexShader* vs; + ID3D11PixelShader* fs; + void* vs_blob; + size_t vs_blob_length; + } d3d11; +} _sg_d3d11_shader_t; +typedef _sg_d3d11_shader_t _sg_shader_t; + +typedef struct { + _sg_slot_t slot; + _sg_pipeline_common_t cmn; + _sg_shader_t* shader; + struct { + UINT stencil_ref; + UINT vb_strides[SG_MAX_SHADERSTAGE_BUFFERS]; + D3D_PRIMITIVE_TOPOLOGY topology; + DXGI_FORMAT index_format; + ID3D11InputLayout* il; + ID3D11RasterizerState* rs; + ID3D11DepthStencilState* dss; + ID3D11BlendState* bs; + } d3d11; +} _sg_d3d11_pipeline_t; +typedef _sg_d3d11_pipeline_t _sg_pipeline_t; + +typedef struct { + _sg_image_t* image; + ID3D11RenderTargetView* rtv; +} _sg_d3d11_color_attachment_t; + +typedef struct { + _sg_image_t* image; + ID3D11DepthStencilView* dsv; +} _sg_d3d11_ds_attachment_t; + +typedef struct { + _sg_slot_t slot; + _sg_pass_common_t cmn; + struct { + _sg_d3d11_color_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; + _sg_d3d11_ds_attachment_t ds_att; + } d3d11; +} _sg_d3d11_pass_t; +typedef _sg_d3d11_pass_t _sg_pass_t; +typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; + +typedef struct { + _sg_slot_t slot; +} _sg_d3d11_context_t; +typedef _sg_d3d11_context_t _sg_context_t; + +typedef struct { + bool valid; + ID3D11Device* dev; + ID3D11DeviceContext* ctx; + const void* (*rtv_cb)(void); + const void* (*rtv_userdata_cb)(void*); + const void* (*dsv_cb)(void); + const void* (*dsv_userdata_cb)(void*); + void* user_data; + bool in_pass; + bool use_indexed_draw; + int cur_width; + int cur_height; + int num_rtvs; + _sg_pass_t* cur_pass; + sg_pass cur_pass_id; + _sg_pipeline_t* cur_pipeline; + sg_pipeline cur_pipeline_id; + ID3D11RenderTargetView* cur_rtvs[SG_MAX_COLOR_ATTACHMENTS]; + ID3D11DepthStencilView* cur_dsv; + /* on-demand loaded d3dcompiler_47.dll handles */ + HINSTANCE d3dcompiler_dll; + bool d3dcompiler_dll_load_failed; + pD3DCompile D3DCompile_func; + /* the following arrays are used for unbinding resources, they will always contain zeroes */ + ID3D11RenderTargetView* zero_rtvs[SG_MAX_COLOR_ATTACHMENTS]; + ID3D11Buffer* zero_vbs[SG_MAX_SHADERSTAGE_BUFFERS]; + UINT zero_vb_offsets[SG_MAX_SHADERSTAGE_BUFFERS]; + UINT zero_vb_strides[SG_MAX_SHADERSTAGE_BUFFERS]; + ID3D11Buffer* zero_cbs[SG_MAX_SHADERSTAGE_UBS]; + ID3D11ShaderResourceView* zero_srvs[SG_MAX_SHADERSTAGE_IMAGES]; + ID3D11SamplerState* zero_smps[SG_MAX_SHADERSTAGE_IMAGES]; + /* global subresourcedata array for texture updates */ + D3D11_SUBRESOURCE_DATA subres_data[SG_MAX_MIPMAPS * SG_MAX_TEXTUREARRAY_LAYERS]; +} _sg_d3d11_backend_t; + +/*=== METAL BACKEND DECLARATIONS =============================================*/ +#elif defined(SOKOL_METAL) + +#if defined(_SG_TARGET_MACOS) || defined(_SG_TARGET_IOS_SIMULATOR) +#define _SG_MTL_UB_ALIGN (256) +#else +#define _SG_MTL_UB_ALIGN (16) +#endif +#define _SG_MTL_INVALID_SLOT_INDEX (0) + +typedef struct { + uint32_t frame_index; /* frame index at which it is safe to release this resource */ + int slot_index; +} _sg_mtl_release_item_t; + +typedef struct { + NSMutableArray* pool; + int num_slots; + int free_queue_top; + int* free_queue; + int release_queue_front; + int release_queue_back; + _sg_mtl_release_item_t* release_queue; +} _sg_mtl_idpool_t; + +typedef struct { + _sg_slot_t slot; + _sg_buffer_common_t cmn; + struct { + int buf[SG_NUM_INFLIGHT_FRAMES]; /* index into _sg_mtl_pool */ + } mtl; +} _sg_mtl_buffer_t; +typedef _sg_mtl_buffer_t _sg_buffer_t; + +typedef struct { + _sg_slot_t slot; + _sg_image_common_t cmn; + struct { + int tex[SG_NUM_INFLIGHT_FRAMES]; + int depth_tex; + int msaa_tex; + int sampler_state; + } mtl; +} _sg_mtl_image_t; +typedef _sg_mtl_image_t _sg_image_t; + +typedef struct { + int mtl_lib; + int mtl_func; +} _sg_mtl_shader_stage_t; + +typedef struct { + _sg_slot_t slot; + _sg_shader_common_t cmn; + struct { + _sg_mtl_shader_stage_t stage[SG_NUM_SHADER_STAGES]; + } mtl; +} _sg_mtl_shader_t; +typedef _sg_mtl_shader_t _sg_shader_t; + +typedef struct { + _sg_slot_t slot; + _sg_pipeline_common_t cmn; + _sg_shader_t* shader; + struct { + MTLPrimitiveType prim_type; + int index_size; + MTLIndexType index_type; + MTLCullMode cull_mode; + MTLWinding winding; + uint32_t stencil_ref; + int rps; + int dss; + } mtl; +} _sg_mtl_pipeline_t; +typedef _sg_mtl_pipeline_t _sg_pipeline_t; + +typedef struct { + _sg_image_t* image; +} _sg_mtl_attachment_t; + +typedef struct { + _sg_slot_t slot; + _sg_pass_common_t cmn; + struct { + _sg_mtl_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; + _sg_mtl_attachment_t ds_att; + } mtl; +} _sg_mtl_pass_t; +typedef _sg_mtl_pass_t _sg_pass_t; +typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; + +typedef struct { + _sg_slot_t slot; +} _sg_mtl_context_t; +typedef _sg_mtl_context_t _sg_context_t; + +/* resouce binding state cache */ +typedef struct { + const _sg_pipeline_t* cur_pipeline; + sg_pipeline cur_pipeline_id; + const _sg_buffer_t* cur_indexbuffer; + int cur_indexbuffer_offset; + sg_buffer cur_indexbuffer_id; + const _sg_buffer_t* cur_vertexbuffers[SG_MAX_SHADERSTAGE_BUFFERS]; + int cur_vertexbuffer_offsets[SG_MAX_SHADERSTAGE_BUFFERS]; + sg_buffer cur_vertexbuffer_ids[SG_MAX_SHADERSTAGE_BUFFERS]; + const _sg_image_t* cur_vs_images[SG_MAX_SHADERSTAGE_IMAGES]; + sg_image cur_vs_image_ids[SG_MAX_SHADERSTAGE_IMAGES]; + const _sg_image_t* cur_fs_images[SG_MAX_SHADERSTAGE_IMAGES]; + sg_image cur_fs_image_ids[SG_MAX_SHADERSTAGE_IMAGES]; +} _sg_mtl_state_cache_t; + +typedef struct { + bool valid; + const void*(*renderpass_descriptor_cb)(void); + const void*(*renderpass_descriptor_userdata_cb)(void*); + const void*(*drawable_cb)(void); + const void*(*drawable_userdata_cb)(void*); + void* user_data; + uint32_t frame_index; + uint32_t cur_frame_rotate_index; + int ub_size; + int cur_ub_offset; + uint8_t* cur_ub_base_ptr; + bool in_pass; + bool pass_valid; + int cur_width; + int cur_height; + _sg_mtl_state_cache_t state_cache; + _sg_sampler_cache_t sampler_cache; + _sg_mtl_idpool_t idpool; + dispatch_semaphore_t sem; + id device; + id cmd_queue; + id cmd_buffer; + id cmd_encoder; + id uniform_buffers[SG_NUM_INFLIGHT_FRAMES]; +} _sg_mtl_backend_t; + +/*=== WGPU BACKEND DECLARATIONS ==============================================*/ +#elif defined(SOKOL_WGPU) + +#define _SG_WGPU_STAGING_ALIGN (256) +#define _SG_WGPU_STAGING_PIPELINE_SIZE (8) +#define _SG_WGPU_ROWPITCH_ALIGN (256) +#define _SG_WGPU_MAX_SHADERSTAGE_IMAGES (8) +#define _SG_WGPU_MAX_UNIFORM_UPDATE_SIZE (1<<16) + +typedef struct { + _sg_slot_t slot; + _sg_buffer_common_t cmn; + struct { + WGPUBuffer buf; + } wgpu; +} _sg_wgpu_buffer_t; +typedef _sg_wgpu_buffer_t _sg_buffer_t; + +typedef struct { + _sg_slot_t slot; + _sg_image_common_t cmn; + struct { + WGPUTexture tex; + WGPUTextureView tex_view; + WGPUTexture msaa_tex; + WGPUSampler sampler; + } wgpu; +} _sg_wgpu_image_t; +typedef _sg_wgpu_image_t _sg_image_t; + +typedef struct { + WGPUShaderModule module; + WGPUBindGroupLayout bind_group_layout; + _sg_str_t entry; +} _sg_wgpu_shader_stage_t; + +typedef struct { + _sg_slot_t slot; + _sg_shader_common_t cmn; + struct { + _sg_wgpu_shader_stage_t stage[SG_NUM_SHADER_STAGES]; + } wgpu; +} _sg_wgpu_shader_t; +typedef _sg_wgpu_shader_t _sg_shader_t; + +typedef struct { + _sg_slot_t slot; + _sg_pipeline_common_t cmn; + _sg_shader_t* shader; + struct { + WGPURenderPipeline pip; + uint32_t stencil_ref; + } wgpu; +} _sg_wgpu_pipeline_t; +typedef _sg_wgpu_pipeline_t _sg_pipeline_t; + +typedef struct { + _sg_image_t* image; + WGPUTextureView render_tex_view; + WGPUTextureView resolve_tex_view; +} _sg_wgpu_attachment_t; + +typedef struct { + _sg_slot_t slot; + _sg_pass_common_t cmn; + struct { + _sg_wgpu_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS]; + _sg_wgpu_attachment_t ds_att; + } wgpu; +} _sg_wgpu_pass_t; +typedef _sg_wgpu_pass_t _sg_pass_t; +typedef _sg_pass_attachment_common_t _sg_pass_attachment_t; + +typedef struct { + _sg_slot_t slot; +} _sg_wgpu_context_t; +typedef _sg_wgpu_context_t _sg_context_t; + +/* a pool of per-frame uniform buffers */ +typedef struct { + WGPUBindGroupLayout bindgroup_layout; + uint32_t num_bytes; + uint32_t offset; /* current offset into current frame's mapped uniform buffer */ + uint32_t bind_offsets[SG_NUM_SHADER_STAGES][SG_MAX_SHADERSTAGE_UBS]; + WGPUBuffer buf; /* the GPU-side uniform buffer */ + WGPUBindGroup bindgroup; + struct { + int num; + int cur; + WGPUBuffer buf[_SG_WGPU_STAGING_PIPELINE_SIZE]; /* CPU-side staging buffers */ + uint8_t* ptr[_SG_WGPU_STAGING_PIPELINE_SIZE]; /* if != 0, staging buffer currently mapped */ + } stage; +} _sg_wgpu_ubpool_t; + +/* ...a similar pool (like uniform buffer pool) of dynamic-resource staging buffers */ +typedef struct { + uint32_t num_bytes; + uint32_t offset; /* current offset into current frame's staging buffer */ + int num; /* number of staging buffers */ + int cur; /* this frame's staging buffer */ + WGPUBuffer buf[_SG_WGPU_STAGING_PIPELINE_SIZE]; /* CPU-side staging buffers */ + uint8_t* ptr[_SG_WGPU_STAGING_PIPELINE_SIZE]; /* if != 0, staging buffer currently mapped */ +} _sg_wgpu_stagingpool_t; + +/* the WGPU backend state */ +typedef struct { + bool valid; + bool in_pass; + bool draw_indexed; + int cur_width; + int cur_height; + WGPUDevice dev; + WGPUTextureView (*render_view_cb)(void); + WGPUTextureView (*render_view_userdata_cb)(void*); + WGPUTextureView (*resolve_view_cb)(void); + WGPUTextureView (*resolve_view_userdata_cb)(void*); + WGPUTextureView (*depth_stencil_view_cb)(void); + WGPUTextureView (*depth_stencil_view_userdata_cb)(void*); + void* user_data; + WGPUQueue queue; + WGPUCommandEncoder render_cmd_enc; + WGPUCommandEncoder staging_cmd_enc; + WGPURenderPassEncoder pass_enc; + WGPUBindGroup empty_bind_group; + const _sg_pipeline_t* cur_pipeline; + sg_pipeline cur_pipeline_id; + _sg_sampler_cache_t sampler_cache; + _sg_wgpu_ubpool_t ub; + _sg_wgpu_stagingpool_t staging; +} _sg_wgpu_backend_t; +#endif + +/*=== RESOURCE POOL DECLARATIONS =============================================*/ + +/* this *MUST* remain 0 */ +#define _SG_INVALID_SLOT_INDEX (0) + +typedef struct { + int size; + int queue_top; + uint32_t* gen_ctrs; + int* free_queue; +} _sg_pool_t; + +typedef struct { + _sg_pool_t buffer_pool; + _sg_pool_t image_pool; + _sg_pool_t shader_pool; + _sg_pool_t pipeline_pool; + _sg_pool_t pass_pool; + _sg_pool_t context_pool; + _sg_buffer_t* buffers; + _sg_image_t* images; + _sg_shader_t* shaders; + _sg_pipeline_t* pipelines; + _sg_pass_t* passes; + _sg_context_t* contexts; +} _sg_pools_t; + +/*=== VALIDATION LAYER DECLARATIONS ==========================================*/ +typedef enum { + /* special case 'validation was successful' */ + _SG_VALIDATE_SUCCESS, + + /* buffer creation */ + _SG_VALIDATE_BUFFERDESC_CANARY, + _SG_VALIDATE_BUFFERDESC_SIZE, + _SG_VALIDATE_BUFFERDESC_DATA, + _SG_VALIDATE_BUFFERDESC_DATA_SIZE, + _SG_VALIDATE_BUFFERDESC_NO_DATA, + + /* image creation */ + _SG_VALIDATE_IMAGEDESC_CANARY, + _SG_VALIDATE_IMAGEDESC_WIDTH, + _SG_VALIDATE_IMAGEDESC_HEIGHT, + _SG_VALIDATE_IMAGEDESC_RT_PIXELFORMAT, + _SG_VALIDATE_IMAGEDESC_NONRT_PIXELFORMAT, + _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT, + _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT, + _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE, + _SG_VALIDATE_IMAGEDESC_RT_NO_DATA, + _SG_VALIDATE_IMAGEDESC_DATA, + _SG_VALIDATE_IMAGEDESC_NO_DATA, + + /* shader creation */ + _SG_VALIDATE_SHADERDESC_CANARY, + _SG_VALIDATE_SHADERDESC_SOURCE, + _SG_VALIDATE_SHADERDESC_BYTECODE, + _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE, + _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE, + _SG_VALIDATE_SHADERDESC_NO_CONT_UBS, + _SG_VALIDATE_SHADERDESC_NO_CONT_IMGS, + _SG_VALIDATE_SHADERDESC_NO_CONT_UB_MEMBERS, + _SG_VALIDATE_SHADERDESC_NO_UB_MEMBERS, + _SG_VALIDATE_SHADERDESC_UB_MEMBER_NAME, + _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH, + _SG_VALIDATE_SHADERDESC_IMG_NAME, + _SG_VALIDATE_SHADERDESC_ATTR_NAMES, + _SG_VALIDATE_SHADERDESC_ATTR_SEMANTICS, + _SG_VALIDATE_SHADERDESC_ATTR_STRING_TOO_LONG, + + /* pipeline creation */ + _SG_VALIDATE_PIPELINEDESC_CANARY, + _SG_VALIDATE_PIPELINEDESC_SHADER, + _SG_VALIDATE_PIPELINEDESC_NO_ATTRS, + _SG_VALIDATE_PIPELINEDESC_LAYOUT_STRIDE4, + _SG_VALIDATE_PIPELINEDESC_ATTR_NAME, + _SG_VALIDATE_PIPELINEDESC_ATTR_SEMANTICS, + + /* pass creation */ + _SG_VALIDATE_PASSDESC_CANARY, + _SG_VALIDATE_PASSDESC_NO_COLOR_ATTS, + _SG_VALIDATE_PASSDESC_NO_CONT_COLOR_ATTS, + _SG_VALIDATE_PASSDESC_IMAGE, + _SG_VALIDATE_PASSDESC_MIPLEVEL, + _SG_VALIDATE_PASSDESC_FACE, + _SG_VALIDATE_PASSDESC_LAYER, + _SG_VALIDATE_PASSDESC_SLICE, + _SG_VALIDATE_PASSDESC_IMAGE_NO_RT, + _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT, + _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT, + _SG_VALIDATE_PASSDESC_IMAGE_SIZES, + _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS, + + /* sg_begin_pass validation */ + _SG_VALIDATE_BEGINPASS_PASS, + _SG_VALIDATE_BEGINPASS_IMAGE, + + /* sg_apply_pipeline validation */ + _SG_VALIDATE_APIP_PIPELINE_VALID_ID, + _SG_VALIDATE_APIP_PIPELINE_EXISTS, + _SG_VALIDATE_APIP_PIPELINE_VALID, + _SG_VALIDATE_APIP_SHADER_EXISTS, + _SG_VALIDATE_APIP_SHADER_VALID, + _SG_VALIDATE_APIP_ATT_COUNT, + _SG_VALIDATE_APIP_COLOR_FORMAT, + _SG_VALIDATE_APIP_DEPTH_FORMAT, + _SG_VALIDATE_APIP_SAMPLE_COUNT, + + /* sg_apply_bindings validation */ + _SG_VALIDATE_ABND_PIPELINE, + _SG_VALIDATE_ABND_PIPELINE_EXISTS, + _SG_VALIDATE_ABND_PIPELINE_VALID, + _SG_VALIDATE_ABND_VBS, + _SG_VALIDATE_ABND_VB_EXISTS, + _SG_VALIDATE_ABND_VB_TYPE, + _SG_VALIDATE_ABND_VB_OVERFLOW, + _SG_VALIDATE_ABND_NO_IB, + _SG_VALIDATE_ABND_IB, + _SG_VALIDATE_ABND_IB_EXISTS, + _SG_VALIDATE_ABND_IB_TYPE, + _SG_VALIDATE_ABND_IB_OVERFLOW, + _SG_VALIDATE_ABND_VS_IMGS, + _SG_VALIDATE_ABND_VS_IMG_EXISTS, + _SG_VALIDATE_ABND_VS_IMG_TYPES, + _SG_VALIDATE_ABND_FS_IMGS, + _SG_VALIDATE_ABND_FS_IMG_EXISTS, + _SG_VALIDATE_ABND_FS_IMG_TYPES, + + /* sg_apply_uniforms validation */ + _SG_VALIDATE_AUB_NO_PIPELINE, + _SG_VALIDATE_AUB_NO_UB_AT_SLOT, + _SG_VALIDATE_AUB_SIZE, + + /* sg_update_buffer validation */ + _SG_VALIDATE_UPDATEBUF_USAGE, + _SG_VALIDATE_UPDATEBUF_SIZE, + _SG_VALIDATE_UPDATEBUF_ONCE, + _SG_VALIDATE_UPDATEBUF_APPEND, + + /* sg_append_buffer validation */ + _SG_VALIDATE_APPENDBUF_USAGE, + _SG_VALIDATE_APPENDBUF_SIZE, + _SG_VALIDATE_APPENDBUF_UPDATE, + + /* sg_update_image validation */ + _SG_VALIDATE_UPDIMG_USAGE, + _SG_VALIDATE_UPDIMG_NOTENOUGHDATA, + _SG_VALIDATE_UPDIMG_SIZE, + _SG_VALIDATE_UPDIMG_COMPRESSED, + _SG_VALIDATE_UPDIMG_ONCE +} _sg_validate_error_t; + +/*=== GENERIC BACKEND STATE ==================================================*/ + +typedef struct { + bool valid; + sg_desc desc; /* original desc with default values patched in */ + uint32_t frame_index; + sg_context active_context; + sg_pass cur_pass; + sg_pipeline cur_pipeline; + bool pass_valid; + bool bindings_valid; + bool next_draw_valid; + #if defined(SOKOL_DEBUG) + _sg_validate_error_t validate_error; + #endif + _sg_pools_t pools; + sg_backend backend; + sg_features features; + sg_limits limits; + sg_pixelformat_info formats[_SG_PIXELFORMAT_NUM]; + #if defined(_SOKOL_ANY_GL) + _sg_gl_backend_t gl; + #elif defined(SOKOL_METAL) + _sg_mtl_backend_t mtl; + #elif defined(SOKOL_D3D11) + _sg_d3d11_backend_t d3d11; + #elif defined(SOKOL_WGPU) + _sg_wgpu_backend_t wgpu; + #endif + #if defined(SOKOL_TRACE_HOOKS) + sg_trace_hooks hooks; + #endif +} _sg_state_t; +static _sg_state_t _sg; + +/*-- helper functions --------------------------------------------------------*/ + +_SOKOL_PRIVATE bool _sg_strempty(const _sg_str_t* str) { + return 0 == str->buf[0]; +} + +_SOKOL_PRIVATE const char* _sg_strptr(const _sg_str_t* str) { + return &str->buf[0]; +} + +_SOKOL_PRIVATE void _sg_strcpy(_sg_str_t* dst, const char* src) { + SOKOL_ASSERT(dst); + if (src) { + #if defined(_MSC_VER) + strncpy_s(dst->buf, _SG_STRING_SIZE, src, (_SG_STRING_SIZE-1)); + #else + strncpy(dst->buf, src, _SG_STRING_SIZE); + #endif + dst->buf[_SG_STRING_SIZE-1] = 0; + } + else { + memset(dst->buf, 0, _SG_STRING_SIZE); + } +} + +/* return byte size of a vertex format */ +_SOKOL_PRIVATE int _sg_vertexformat_bytesize(sg_vertex_format fmt) { + switch (fmt) { + case SG_VERTEXFORMAT_FLOAT: return 4; + case SG_VERTEXFORMAT_FLOAT2: return 8; + case SG_VERTEXFORMAT_FLOAT3: return 12; + case SG_VERTEXFORMAT_FLOAT4: return 16; + case SG_VERTEXFORMAT_BYTE4: return 4; + case SG_VERTEXFORMAT_BYTE4N: return 4; + case SG_VERTEXFORMAT_UBYTE4: return 4; + case SG_VERTEXFORMAT_UBYTE4N: return 4; + case SG_VERTEXFORMAT_SHORT2: return 4; + case SG_VERTEXFORMAT_SHORT2N: return 4; + case SG_VERTEXFORMAT_USHORT2N: return 4; + case SG_VERTEXFORMAT_SHORT4: return 8; + case SG_VERTEXFORMAT_SHORT4N: return 8; + case SG_VERTEXFORMAT_USHORT4N: return 8; + case SG_VERTEXFORMAT_UINT10_N2: return 4; + case SG_VERTEXFORMAT_INVALID: return 0; + default: + SOKOL_UNREACHABLE; + return -1; + } +} + +/* return the byte size of a shader uniform */ +_SOKOL_PRIVATE int _sg_uniform_size(sg_uniform_type type, int count) { + switch (type) { + case SG_UNIFORMTYPE_INVALID: return 0; + case SG_UNIFORMTYPE_FLOAT: return 4 * count; + case SG_UNIFORMTYPE_FLOAT2: return 8 * count; + case SG_UNIFORMTYPE_FLOAT3: return 12 * count; /* FIXME: std140??? */ + case SG_UNIFORMTYPE_FLOAT4: return 16 * count; + case SG_UNIFORMTYPE_MAT4: return 64 * count; + default: + SOKOL_UNREACHABLE; + return -1; + } +} + +/* return true if pixel format is a compressed format */ +_SOKOL_PRIVATE bool _sg_is_compressed_pixel_format(sg_pixel_format fmt) { + switch (fmt) { + case SG_PIXELFORMAT_BC1_RGBA: + case SG_PIXELFORMAT_BC2_RGBA: + case SG_PIXELFORMAT_BC3_RGBA: + case SG_PIXELFORMAT_BC4_R: + case SG_PIXELFORMAT_BC4_RSN: + case SG_PIXELFORMAT_BC5_RG: + case SG_PIXELFORMAT_BC5_RGSN: + case SG_PIXELFORMAT_BC6H_RGBF: + case SG_PIXELFORMAT_BC6H_RGBUF: + case SG_PIXELFORMAT_BC7_RGBA: + case SG_PIXELFORMAT_PVRTC_RGB_2BPP: + case SG_PIXELFORMAT_PVRTC_RGB_4BPP: + case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: + case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: + case SG_PIXELFORMAT_ETC2_RGB8: + case SG_PIXELFORMAT_ETC2_RGB8A1: + case SG_PIXELFORMAT_ETC2_RGBA8: + case SG_PIXELFORMAT_ETC2_RG11: + case SG_PIXELFORMAT_ETC2_RG11SN: + return true; + default: + return false; + } +} + +/* return true if pixel format is a valid render target format */ +_SOKOL_PRIVATE bool _sg_is_valid_rendertarget_color_format(sg_pixel_format fmt) { + const int fmt_index = (int) fmt; + SOKOL_ASSERT((fmt_index >= 0) && (fmt_index < _SG_PIXELFORMAT_NUM)); + return _sg.formats[fmt_index].render && !_sg.formats[fmt_index].depth; +} + +/* return true if pixel format is a valid depth format */ +_SOKOL_PRIVATE bool _sg_is_valid_rendertarget_depth_format(sg_pixel_format fmt) { + const int fmt_index = (int) fmt; + SOKOL_ASSERT((fmt_index >= 0) && (fmt_index < _SG_PIXELFORMAT_NUM)); + return _sg.formats[fmt_index].render && _sg.formats[fmt_index].depth; +} + +/* return true if pixel format is a depth-stencil format */ +_SOKOL_PRIVATE bool _sg_is_depth_stencil_format(sg_pixel_format fmt) { + return (SG_PIXELFORMAT_DEPTH_STENCIL == fmt); +} + +/* return the bytes-per-pixel for a pixel format */ +_SOKOL_PRIVATE int _sg_pixelformat_bytesize(sg_pixel_format fmt) { + switch (fmt) { + case SG_PIXELFORMAT_R8: + case SG_PIXELFORMAT_R8SN: + case SG_PIXELFORMAT_R8UI: + case SG_PIXELFORMAT_R8SI: + return 1; + + case SG_PIXELFORMAT_R16: + case SG_PIXELFORMAT_R16SN: + case SG_PIXELFORMAT_R16UI: + case SG_PIXELFORMAT_R16SI: + case SG_PIXELFORMAT_R16F: + case SG_PIXELFORMAT_RG8: + case SG_PIXELFORMAT_RG8SN: + case SG_PIXELFORMAT_RG8UI: + case SG_PIXELFORMAT_RG8SI: + return 2; + + case SG_PIXELFORMAT_R32UI: + case SG_PIXELFORMAT_R32SI: + case SG_PIXELFORMAT_R32F: + case SG_PIXELFORMAT_RG16: + case SG_PIXELFORMAT_RG16SN: + case SG_PIXELFORMAT_RG16UI: + case SG_PIXELFORMAT_RG16SI: + case SG_PIXELFORMAT_RG16F: + case SG_PIXELFORMAT_RGBA8: + case SG_PIXELFORMAT_RGBA8SN: + case SG_PIXELFORMAT_RGBA8UI: + case SG_PIXELFORMAT_RGBA8SI: + case SG_PIXELFORMAT_BGRA8: + case SG_PIXELFORMAT_RGB10A2: + case SG_PIXELFORMAT_RG11B10F: + return 4; + + case SG_PIXELFORMAT_RG32UI: + case SG_PIXELFORMAT_RG32SI: + case SG_PIXELFORMAT_RG32F: + case SG_PIXELFORMAT_RGBA16: + case SG_PIXELFORMAT_RGBA16SN: + case SG_PIXELFORMAT_RGBA16UI: + case SG_PIXELFORMAT_RGBA16SI: + case SG_PIXELFORMAT_RGBA16F: + return 8; + + case SG_PIXELFORMAT_RGBA32UI: + case SG_PIXELFORMAT_RGBA32SI: + case SG_PIXELFORMAT_RGBA32F: + return 16; + + default: + SOKOL_UNREACHABLE; + return 0; + } +} + +_SOKOL_PRIVATE int _sg_roundup(int val, int round_to) { + return (val+(round_to-1)) & ~(round_to-1); +} + +/* return row pitch for an image + see ComputePitch in https://github.com/microsoft/DirectXTex/blob/master/DirectXTex/DirectXTexUtil.cpp +*/ +_SOKOL_PRIVATE int _sg_row_pitch(sg_pixel_format fmt, int width, int row_align) { + int pitch; + switch (fmt) { + case SG_PIXELFORMAT_BC1_RGBA: + case SG_PIXELFORMAT_BC4_R: + case SG_PIXELFORMAT_BC4_RSN: + case SG_PIXELFORMAT_ETC2_RGB8: + case SG_PIXELFORMAT_ETC2_RGB8A1: + pitch = ((width + 3) / 4) * 8; + pitch = pitch < 8 ? 8 : pitch; + break; + case SG_PIXELFORMAT_BC2_RGBA: + case SG_PIXELFORMAT_BC3_RGBA: + case SG_PIXELFORMAT_BC5_RG: + case SG_PIXELFORMAT_BC5_RGSN: + case SG_PIXELFORMAT_BC6H_RGBF: + case SG_PIXELFORMAT_BC6H_RGBUF: + case SG_PIXELFORMAT_BC7_RGBA: + case SG_PIXELFORMAT_ETC2_RGBA8: + case SG_PIXELFORMAT_ETC2_RG11: + case SG_PIXELFORMAT_ETC2_RG11SN: + pitch = ((width + 3) / 4) * 16; + pitch = pitch < 16 ? 16 : pitch; + break; + case SG_PIXELFORMAT_PVRTC_RGB_4BPP: + case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: + { + const int block_size = 4*4; + const int bpp = 4; + int width_blocks = width / 4; + width_blocks = width_blocks < 2 ? 2 : width_blocks; + pitch = width_blocks * ((block_size * bpp) / 8); + } + break; + case SG_PIXELFORMAT_PVRTC_RGB_2BPP: + case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: + { + const int block_size = 8*4; + const int bpp = 2; + int width_blocks = width / 4; + width_blocks = width_blocks < 2 ? 2 : width_blocks; + pitch = width_blocks * ((block_size * bpp) / 8); + } + break; + default: + pitch = width * _sg_pixelformat_bytesize(fmt); + break; + } + pitch = _sg_roundup(pitch, row_align); + return pitch; +} + +/* compute the number of rows in a surface depending on pixel format */ +_SOKOL_PRIVATE int _sg_num_rows(sg_pixel_format fmt, int height) { + int num_rows; + switch (fmt) { + case SG_PIXELFORMAT_BC1_RGBA: + case SG_PIXELFORMAT_BC4_R: + case SG_PIXELFORMAT_BC4_RSN: + case SG_PIXELFORMAT_ETC2_RGB8: + case SG_PIXELFORMAT_ETC2_RGB8A1: + case SG_PIXELFORMAT_ETC2_RGBA8: + case SG_PIXELFORMAT_ETC2_RG11: + case SG_PIXELFORMAT_ETC2_RG11SN: + case SG_PIXELFORMAT_BC2_RGBA: + case SG_PIXELFORMAT_BC3_RGBA: + case SG_PIXELFORMAT_BC5_RG: + case SG_PIXELFORMAT_BC5_RGSN: + case SG_PIXELFORMAT_BC6H_RGBF: + case SG_PIXELFORMAT_BC6H_RGBUF: + case SG_PIXELFORMAT_BC7_RGBA: + case SG_PIXELFORMAT_PVRTC_RGB_4BPP: + case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: + case SG_PIXELFORMAT_PVRTC_RGB_2BPP: + case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: + num_rows = ((height + 3) / 4); + break; + default: + num_rows = height; + break; + } + if (num_rows < 1) { + num_rows = 1; + } + return num_rows; +} + +/* return pitch of a 2D subimage / texture slice + see ComputePitch in https://github.com/microsoft/DirectXTex/blob/master/DirectXTex/DirectXTexUtil.cpp +*/ +_SOKOL_PRIVATE int _sg_surface_pitch(sg_pixel_format fmt, int width, int height, int row_align) { + int num_rows = _sg_num_rows(fmt, height); + return num_rows * _sg_row_pitch(fmt, width, row_align); +} + +/* capability table pixel format helper functions */ +_SOKOL_PRIVATE void _sg_pixelformat_all(sg_pixelformat_info* pfi) { + pfi->sample = true; + pfi->filter = true; + pfi->blend = true; + pfi->render = true; + pfi->msaa = true; +} + +_SOKOL_PRIVATE void _sg_pixelformat_s(sg_pixelformat_info* pfi) { + pfi->sample = true; +} + +_SOKOL_PRIVATE void _sg_pixelformat_sf(sg_pixelformat_info* pfi) { + pfi->sample = true; + pfi->filter = true; +} + +_SOKOL_PRIVATE void _sg_pixelformat_sr(sg_pixelformat_info* pfi) { + pfi->sample = true; + pfi->render = true; +} + +_SOKOL_PRIVATE void _sg_pixelformat_srmd(sg_pixelformat_info* pfi) { + pfi->sample = true; + pfi->render = true; + pfi->msaa = true; + pfi->depth = true; +} + +_SOKOL_PRIVATE void _sg_pixelformat_srm(sg_pixelformat_info* pfi) { + pfi->sample = true; + pfi->render = true; + pfi->msaa = true; +} + +_SOKOL_PRIVATE void _sg_pixelformat_sfrm(sg_pixelformat_info* pfi) { + pfi->sample = true; + pfi->filter = true; + pfi->render = true; + pfi->msaa = true; +} +_SOKOL_PRIVATE void _sg_pixelformat_sbrm(sg_pixelformat_info* pfi) { + pfi->sample = true; + pfi->blend = true; + pfi->render = true; + pfi->msaa = true; +} + +_SOKOL_PRIVATE void _sg_pixelformat_sbr(sg_pixelformat_info* pfi) { + pfi->sample = true; + pfi->blend = true; + pfi->render = true; +} + +_SOKOL_PRIVATE void _sg_pixelformat_sfbr(sg_pixelformat_info* pfi) { + pfi->sample = true; + pfi->filter = true; + pfi->blend = true; + pfi->render = true; +} + +/* resolve pass action defaults into a new pass action struct */ +_SOKOL_PRIVATE void _sg_resolve_default_pass_action(const sg_pass_action* from, sg_pass_action* to) { + SOKOL_ASSERT(from && to); + *to = *from; + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + if (to->colors[i].action == _SG_ACTION_DEFAULT) { + to->colors[i].action = SG_ACTION_CLEAR; + to->colors[i].value.r = SG_DEFAULT_CLEAR_RED; + to->colors[i].value.g = SG_DEFAULT_CLEAR_GREEN; + to->colors[i].value.b = SG_DEFAULT_CLEAR_BLUE; + to->colors[i].value.a = SG_DEFAULT_CLEAR_ALPHA; + } + } + if (to->depth.action == _SG_ACTION_DEFAULT) { + to->depth.action = SG_ACTION_CLEAR; + to->depth.value = SG_DEFAULT_CLEAR_DEPTH; + } + if (to->stencil.action == _SG_ACTION_DEFAULT) { + to->stencil.action = SG_ACTION_CLEAR; + to->stencil.value = SG_DEFAULT_CLEAR_STENCIL; + } +} + +/*== DUMMY BACKEND IMPL ======================================================*/ +#if defined(SOKOL_DUMMY_BACKEND) + +_SOKOL_PRIVATE void _sg_dummy_setup_backend(const sg_desc* desc) { + SOKOL_ASSERT(desc); + _SOKOL_UNUSED(desc); + _sg.backend = SG_BACKEND_DUMMY; + for (int i = SG_PIXELFORMAT_R8; i < SG_PIXELFORMAT_BC1_RGBA; i++) { + _sg.formats[i].sample = true; + _sg.formats[i].filter = true; + _sg.formats[i].render = true; + _sg.formats[i].blend = true; + _sg.formats[i].msaa = true; + } + _sg.formats[SG_PIXELFORMAT_DEPTH].depth = true; + _sg.formats[SG_PIXELFORMAT_DEPTH_STENCIL].depth = true; +} + +_SOKOL_PRIVATE void _sg_dummy_discard_backend(void) { + /* empty */ +} + +_SOKOL_PRIVATE void _sg_dummy_reset_state_cache(void) { + /* empty*/ +} + +_SOKOL_PRIVATE sg_resource_state _sg_dummy_create_context(_sg_context_t* ctx) { + SOKOL_ASSERT(ctx); + _SOKOL_UNUSED(ctx); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_dummy_destroy_context(_sg_context_t* ctx) { + SOKOL_ASSERT(ctx); + _SOKOL_UNUSED(ctx); +} + +_SOKOL_PRIVATE void _sg_dummy_activate_context(_sg_context_t* ctx) { + SOKOL_ASSERT(ctx); + _SOKOL_UNUSED(ctx); +} + +_SOKOL_PRIVATE sg_resource_state _sg_dummy_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) { + SOKOL_ASSERT(buf && desc); + _sg_buffer_common_init(&buf->cmn, desc); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_dummy_destroy_buffer(_sg_buffer_t* buf) { + SOKOL_ASSERT(buf); + _SOKOL_UNUSED(buf); +} + +_SOKOL_PRIVATE sg_resource_state _sg_dummy_create_image(_sg_image_t* img, const sg_image_desc* desc) { + SOKOL_ASSERT(img && desc); + _sg_image_common_init(&img->cmn, desc); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_dummy_destroy_image(_sg_image_t* img) { + SOKOL_ASSERT(img); + _SOKOL_UNUSED(img); +} + +_SOKOL_PRIVATE sg_resource_state _sg_dummy_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { + SOKOL_ASSERT(shd && desc); + _sg_shader_common_init(&shd->cmn, desc); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_dummy_destroy_shader(_sg_shader_t* shd) { + SOKOL_ASSERT(shd); + _SOKOL_UNUSED(shd); +} + +_SOKOL_PRIVATE sg_resource_state _sg_dummy_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) { + SOKOL_ASSERT(pip && desc); + pip->shader = shd; + _sg_pipeline_common_init(&pip->cmn, desc); + for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { + const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; + if (a_desc->format == SG_VERTEXFORMAT_INVALID) { + break; + } + SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); + pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true; + } + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_dummy_destroy_pipeline(_sg_pipeline_t* pip) { + SOKOL_ASSERT(pip); + _SOKOL_UNUSED(pip); +} + +_SOKOL_PRIVATE sg_resource_state _sg_dummy_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) { + SOKOL_ASSERT(pass && desc); + SOKOL_ASSERT(att_images && att_images[0]); + + _sg_pass_common_init(&pass->cmn, desc); + + const sg_pass_attachment_desc* att_desc; + for (int i = 0; i < pass->cmn.num_color_atts; i++) { + att_desc = &desc->color_attachments[i]; + SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); + SOKOL_ASSERT(0 == pass->dmy.color_atts[i].image); + SOKOL_ASSERT(att_images[i] && (att_images[i]->slot.id == att_desc->image.id)); + SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_images[i]->cmn.pixel_format)); + pass->dmy.color_atts[i].image = att_images[i]; + } + + SOKOL_ASSERT(0 == pass->dmy.ds_att.image); + att_desc = &desc->depth_stencil_attachment; + if (att_desc->image.id != SG_INVALID_ID) { + const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; + SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id)); + SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->cmn.pixel_format)); + pass->dmy.ds_att.image = att_images[ds_img_index]; + } + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_dummy_destroy_pass(_sg_pass_t* pass) { + SOKOL_ASSERT(pass); + _SOKOL_UNUSED(pass); +} + +_SOKOL_PRIVATE _sg_image_t* _sg_dummy_pass_color_image(const _sg_pass_t* pass, int index) { + SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS)); + /* NOTE: may return null */ + return pass->dmy.color_atts[index].image; +} + +_SOKOL_PRIVATE _sg_image_t* _sg_dummy_pass_ds_image(const _sg_pass_t* pass) { + /* NOTE: may return null */ + SOKOL_ASSERT(pass); + return pass->dmy.ds_att.image; +} + +_SOKOL_PRIVATE void _sg_dummy_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) { + SOKOL_ASSERT(action); + _SOKOL_UNUSED(pass); + _SOKOL_UNUSED(action); + _SOKOL_UNUSED(w); + _SOKOL_UNUSED(h); +} + +_SOKOL_PRIVATE void _sg_dummy_end_pass(void) { + /* empty */ +} + +_SOKOL_PRIVATE void _sg_dummy_commit(void) { + /* empty */ +} + +_SOKOL_PRIVATE void _sg_dummy_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { + _SOKOL_UNUSED(x); + _SOKOL_UNUSED(y); + _SOKOL_UNUSED(w); + _SOKOL_UNUSED(h); + _SOKOL_UNUSED(origin_top_left); +} + +_SOKOL_PRIVATE void _sg_dummy_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { + _SOKOL_UNUSED(x); + _SOKOL_UNUSED(y); + _SOKOL_UNUSED(w); + _SOKOL_UNUSED(h); + _SOKOL_UNUSED(origin_top_left); +} + +_SOKOL_PRIVATE void _sg_dummy_apply_pipeline(_sg_pipeline_t* pip) { + SOKOL_ASSERT(pip); + _SOKOL_UNUSED(pip); +} + +_SOKOL_PRIVATE void _sg_dummy_apply_bindings( + _sg_pipeline_t* pip, + _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs, + _sg_buffer_t* ib, int ib_offset, + _sg_image_t** vs_imgs, int num_vs_imgs, + _sg_image_t** fs_imgs, int num_fs_imgs) +{ + SOKOL_ASSERT(pip); + SOKOL_ASSERT(vbs && vb_offsets); + SOKOL_ASSERT(vs_imgs); + SOKOL_ASSERT(fs_imgs); + _SOKOL_UNUSED(pip); + _SOKOL_UNUSED(vbs); _SOKOL_UNUSED(vb_offsets); _SOKOL_UNUSED(num_vbs); + _SOKOL_UNUSED(ib); _SOKOL_UNUSED(ib_offset); + _SOKOL_UNUSED(vs_imgs); _SOKOL_UNUSED(num_vs_imgs); + _SOKOL_UNUSED(fs_imgs); _SOKOL_UNUSED(num_fs_imgs); +} + +_SOKOL_PRIVATE void _sg_dummy_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { + _SOKOL_UNUSED(stage_index); + _SOKOL_UNUSED(ub_index); + _SOKOL_UNUSED(data); +} + +_SOKOL_PRIVATE void _sg_dummy_draw(int base_element, int num_elements, int num_instances) { + _SOKOL_UNUSED(base_element); + _SOKOL_UNUSED(num_elements); + _SOKOL_UNUSED(num_instances); +} + +_SOKOL_PRIVATE void _sg_dummy_update_buffer(_sg_buffer_t* buf, const sg_range* data) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); + _SOKOL_UNUSED(data); + if (++buf->cmn.active_slot >= buf->cmn.num_slots) { + buf->cmn.active_slot = 0; + } +} + +_SOKOL_PRIVATE int _sg_dummy_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); + _SOKOL_UNUSED(data); + if (new_frame) { + if (++buf->cmn.active_slot >= buf->cmn.num_slots) { + buf->cmn.active_slot = 0; + } + } + /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backend */ + return _sg_roundup((int)data->size, 4); +} + +_SOKOL_PRIVATE void _sg_dummy_update_image(_sg_image_t* img, const sg_image_data* data) { + SOKOL_ASSERT(img && data); + _SOKOL_UNUSED(data); + if (++img->cmn.active_slot >= img->cmn.num_slots) { + img->cmn.active_slot = 0; + } +} + +/*== GL BACKEND ==============================================================*/ +#elif defined(_SOKOL_ANY_GL) + +/*=== OPTIONAL GL LOADER FOR WIN32 ===========================================*/ +#if defined(_SOKOL_USE_WIN32_GL_LOADER) + +// X Macro list of GL function names and signatures +#define _SG_GL_FUNCS \ + _SG_XMACRO(glBindVertexArray, void, (GLuint array)) \ + _SG_XMACRO(glFramebufferTextureLayer, void, (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer)) \ + _SG_XMACRO(glGenFramebuffers, void, (GLsizei n, GLuint * framebuffers)) \ + _SG_XMACRO(glBindFramebuffer, void, (GLenum target, GLuint framebuffer)) \ + _SG_XMACRO(glBindRenderbuffer, void, (GLenum target, GLuint renderbuffer)) \ + _SG_XMACRO(glGetStringi, const GLubyte *, (GLenum name, GLuint index)) \ + _SG_XMACRO(glClearBufferfi, void, (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil)) \ + _SG_XMACRO(glClearBufferfv, void, (GLenum buffer, GLint drawbuffer, const GLfloat * value)) \ + _SG_XMACRO(glClearBufferuiv, void, (GLenum buffer, GLint drawbuffer, const GLuint * value)) \ + _SG_XMACRO(glClearBufferiv, void, (GLenum buffer, GLint drawbuffer, const GLint * value)) \ + _SG_XMACRO(glDeleteRenderbuffers, void, (GLsizei n, const GLuint * renderbuffers)) \ + _SG_XMACRO(glUniform4fv, void, (GLint location, GLsizei count, const GLfloat * value)) \ + _SG_XMACRO(glUniform2fv, void, (GLint location, GLsizei count, const GLfloat * value)) \ + _SG_XMACRO(glUseProgram, void, (GLuint program)) \ + _SG_XMACRO(glShaderSource, void, (GLuint shader, GLsizei count, const GLchar *const* string, const GLint * length)) \ + _SG_XMACRO(glLinkProgram, void, (GLuint program)) \ + _SG_XMACRO(glGetUniformLocation, GLint, (GLuint program, const GLchar * name)) \ + _SG_XMACRO(glGetShaderiv, void, (GLuint shader, GLenum pname, GLint * params)) \ + _SG_XMACRO(glGetProgramInfoLog, void, (GLuint program, GLsizei bufSize, GLsizei * length, GLchar * infoLog)) \ + _SG_XMACRO(glGetAttribLocation, GLint, (GLuint program, const GLchar * name)) \ + _SG_XMACRO(glDisableVertexAttribArray, void, (GLuint index)) \ + _SG_XMACRO(glDeleteShader, void, (GLuint shader)) \ + _SG_XMACRO(glDeleteProgram, void, (GLuint program)) \ + _SG_XMACRO(glCompileShader, void, (GLuint shader)) \ + _SG_XMACRO(glStencilFuncSeparate, void, (GLenum face, GLenum func, GLint ref, GLuint mask)) \ + _SG_XMACRO(glStencilOpSeparate, void, (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass)) \ + _SG_XMACRO(glRenderbufferStorageMultisample, void, (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height)) \ + _SG_XMACRO(glDrawBuffers, void, (GLsizei n, const GLenum * bufs)) \ + _SG_XMACRO(glVertexAttribDivisor, void, (GLuint index, GLuint divisor)) \ + _SG_XMACRO(glBufferSubData, void, (GLenum target, GLintptr offset, GLsizeiptr size, const void * data)) \ + _SG_XMACRO(glGenBuffers, void, (GLsizei n, GLuint * buffers)) \ + _SG_XMACRO(glCheckFramebufferStatus, GLenum, (GLenum target)) \ + _SG_XMACRO(glFramebufferRenderbuffer, void, (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer)) \ + _SG_XMACRO(glCompressedTexImage2D, void, (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void * data)) \ + _SG_XMACRO(glCompressedTexImage3D, void, (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void * data)) \ + _SG_XMACRO(glActiveTexture, void, (GLenum texture)) \ + _SG_XMACRO(glTexSubImage3D, void, (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void * pixels)) \ + _SG_XMACRO(glUniformMatrix4fv, void, (GLint location, GLsizei count, GLboolean transpose, const GLfloat * value)) \ + _SG_XMACRO(glRenderbufferStorage, void, (GLenum target, GLenum internalformat, GLsizei width, GLsizei height)) \ + _SG_XMACRO(glGenTextures, void, (GLsizei n, GLuint * textures)) \ + _SG_XMACRO(glPolygonOffset, void, (GLfloat factor, GLfloat units)) \ + _SG_XMACRO(glDrawElements, void, (GLenum mode, GLsizei count, GLenum type, const void * indices)) \ + _SG_XMACRO(glDeleteFramebuffers, void, (GLsizei n, const GLuint * framebuffers)) \ + _SG_XMACRO(glBlendEquationSeparate, void, (GLenum modeRGB, GLenum modeAlpha)) \ + _SG_XMACRO(glDeleteTextures, void, (GLsizei n, const GLuint * textures)) \ + _SG_XMACRO(glGetProgramiv, void, (GLuint program, GLenum pname, GLint * params)) \ + _SG_XMACRO(glBindTexture, void, (GLenum target, GLuint texture)) \ + _SG_XMACRO(glTexImage3D, void, (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void * pixels)) \ + _SG_XMACRO(glCreateShader, GLuint, (GLenum type)) \ + _SG_XMACRO(glTexSubImage2D, void, (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void * pixels)) \ + _SG_XMACRO(glClearDepth, void, (GLdouble depth)) \ + _SG_XMACRO(glFramebufferTexture2D, void, (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level)) \ + _SG_XMACRO(glCreateProgram, GLuint, (void)) \ + _SG_XMACRO(glViewport, void, (GLint x, GLint y, GLsizei width, GLsizei height)) \ + _SG_XMACRO(glDeleteBuffers, void, (GLsizei n, const GLuint * buffers)) \ + _SG_XMACRO(glDrawArrays, void, (GLenum mode, GLint first, GLsizei count)) \ + _SG_XMACRO(glDrawElementsInstanced, void, (GLenum mode, GLsizei count, GLenum type, const void * indices, GLsizei instancecount)) \ + _SG_XMACRO(glVertexAttribPointer, void, (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void * pointer)) \ + _SG_XMACRO(glUniform1i, void, (GLint location, GLint v0)) \ + _SG_XMACRO(glDisable, void, (GLenum cap)) \ + _SG_XMACRO(glColorMask, void, (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha)) \ + _SG_XMACRO(glColorMaski, void, (GLuint buf, GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha)) \ + _SG_XMACRO(glBindBuffer, void, (GLenum target, GLuint buffer)) \ + _SG_XMACRO(glDeleteVertexArrays, void, (GLsizei n, const GLuint * arrays)) \ + _SG_XMACRO(glDepthMask, void, (GLboolean flag)) \ + _SG_XMACRO(glDrawArraysInstanced, void, (GLenum mode, GLint first, GLsizei count, GLsizei instancecount)) \ + _SG_XMACRO(glClearStencil, void, (GLint s)) \ + _SG_XMACRO(glScissor, void, (GLint x, GLint y, GLsizei width, GLsizei height)) \ + _SG_XMACRO(glUniform3fv, void, (GLint location, GLsizei count, const GLfloat * value)) \ + _SG_XMACRO(glGenRenderbuffers, void, (GLsizei n, GLuint * renderbuffers)) \ + _SG_XMACRO(glBufferData, void, (GLenum target, GLsizeiptr size, const void * data, GLenum usage)) \ + _SG_XMACRO(glBlendFuncSeparate, void, (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha)) \ + _SG_XMACRO(glTexParameteri, void, (GLenum target, GLenum pname, GLint param)) \ + _SG_XMACRO(glGetIntegerv, void, (GLenum pname, GLint * data)) \ + _SG_XMACRO(glEnable, void, (GLenum cap)) \ + _SG_XMACRO(glBlitFramebuffer, void, (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter)) \ + _SG_XMACRO(glStencilMask, void, (GLuint mask)) \ + _SG_XMACRO(glAttachShader, void, (GLuint program, GLuint shader)) \ + _SG_XMACRO(glGetError, GLenum, (void)) \ + _SG_XMACRO(glClearColor, void, (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha)) \ + _SG_XMACRO(glBlendColor, void, (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha)) \ + _SG_XMACRO(glTexParameterf, void, (GLenum target, GLenum pname, GLfloat param)) \ + _SG_XMACRO(glTexParameterfv, void, (GLenum target, GLenum pname, GLfloat* params)) \ + _SG_XMACRO(glGetShaderInfoLog, void, (GLuint shader, GLsizei bufSize, GLsizei * length, GLchar * infoLog)) \ + _SG_XMACRO(glDepthFunc, void, (GLenum func)) \ + _SG_XMACRO(glStencilOp , void, (GLenum fail, GLenum zfail, GLenum zpass)) \ + _SG_XMACRO(glStencilFunc, void, (GLenum func, GLint ref, GLuint mask)) \ + _SG_XMACRO(glEnableVertexAttribArray, void, (GLuint index)) \ + _SG_XMACRO(glBlendFunc, void, (GLenum sfactor, GLenum dfactor)) \ + _SG_XMACRO(glUniform1fv, void, (GLint location, GLsizei count, const GLfloat * value)) \ + _SG_XMACRO(glReadBuffer, void, (GLenum src)) \ + _SG_XMACRO(glClear, void, (GLbitfield mask)) \ + _SG_XMACRO(glTexImage2D, void, (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void * pixels)) \ + _SG_XMACRO(glGenVertexArrays, void, (GLsizei n, GLuint * arrays)) \ + _SG_XMACRO(glFrontFace, void, (GLenum mode)) \ + _SG_XMACRO(glCullFace, void, (GLenum mode)) + +// generate GL function pointer typedefs +#define _SG_XMACRO(name, ret, args) typedef ret (GL_APIENTRY* PFN_ ## name) args; +_SG_GL_FUNCS +#undef _SG_XMACRO + +// generate GL function pointers +#define _SG_XMACRO(name, ret, args) static PFN_ ## name name; +_SG_GL_FUNCS +#undef _SG_XMACRO + +// helper function to lookup GL functions in GL DLL +typedef PROC (WINAPI * _sg_wglGetProcAddress)(LPCSTR); +_SOKOL_PRIVATE void* _sg_gl_getprocaddr(const char* name, _sg_wglGetProcAddress wgl_getprocaddress) { + void* proc_addr = (void*) wgl_getprocaddress(name); + if (0 == proc_addr) { + proc_addr = (void*) GetProcAddress(_sg.gl.opengl32_dll, name); + } + SOKOL_ASSERT(proc_addr); + return proc_addr; +} + +// populate GL function pointers +_SOKOL_PRIVATE void _sg_gl_load_opengl(void) { + SOKOL_ASSERT(0 == _sg.gl.opengl32_dll); + _sg.gl.opengl32_dll = LoadLibraryA("opengl32.dll"); + SOKOL_ASSERT(_sg.gl.opengl32_dll); + _sg_wglGetProcAddress wgl_getprocaddress = (_sg_wglGetProcAddress) GetProcAddress(_sg.gl.opengl32_dll, "wglGetProcAddress"); + SOKOL_ASSERT(wgl_getprocaddress); + #define _SG_XMACRO(name, ret, args) name = (PFN_ ## name) _sg_gl_getprocaddr(#name, wgl_getprocaddress); + _SG_GL_FUNCS + #undef _SG_XMACRO +} + +_SOKOL_PRIVATE void _sg_gl_unload_opengl(void) { + SOKOL_ASSERT(_sg.gl.opengl32_dll); + FreeLibrary(_sg.gl.opengl32_dll); + _sg.gl.opengl32_dll = 0; +} +#endif // _SOKOL_USE_WIN32_GL_LOADER + +/*-- type translation --------------------------------------------------------*/ +_SOKOL_PRIVATE GLenum _sg_gl_buffer_target(sg_buffer_type t) { + switch (t) { + case SG_BUFFERTYPE_VERTEXBUFFER: return GL_ARRAY_BUFFER; + case SG_BUFFERTYPE_INDEXBUFFER: return GL_ELEMENT_ARRAY_BUFFER; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_texture_target(sg_image_type t) { + switch (t) { + case SG_IMAGETYPE_2D: return GL_TEXTURE_2D; + case SG_IMAGETYPE_CUBE: return GL_TEXTURE_CUBE_MAP; + #if !defined(SOKOL_GLES2) + case SG_IMAGETYPE_3D: return GL_TEXTURE_3D; + case SG_IMAGETYPE_ARRAY: return GL_TEXTURE_2D_ARRAY; + #endif + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_usage(sg_usage u) { + switch (u) { + case SG_USAGE_IMMUTABLE: return GL_STATIC_DRAW; + case SG_USAGE_DYNAMIC: return GL_DYNAMIC_DRAW; + case SG_USAGE_STREAM: return GL_STREAM_DRAW; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_shader_stage(sg_shader_stage stage) { + switch (stage) { + case SG_SHADERSTAGE_VS: return GL_VERTEX_SHADER; + case SG_SHADERSTAGE_FS: return GL_FRAGMENT_SHADER; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLint _sg_gl_vertexformat_size(sg_vertex_format fmt) { + switch (fmt) { + case SG_VERTEXFORMAT_FLOAT: return 1; + case SG_VERTEXFORMAT_FLOAT2: return 2; + case SG_VERTEXFORMAT_FLOAT3: return 3; + case SG_VERTEXFORMAT_FLOAT4: return 4; + case SG_VERTEXFORMAT_BYTE4: return 4; + case SG_VERTEXFORMAT_BYTE4N: return 4; + case SG_VERTEXFORMAT_UBYTE4: return 4; + case SG_VERTEXFORMAT_UBYTE4N: return 4; + case SG_VERTEXFORMAT_SHORT2: return 2; + case SG_VERTEXFORMAT_SHORT2N: return 2; + case SG_VERTEXFORMAT_USHORT2N: return 2; + case SG_VERTEXFORMAT_SHORT4: return 4; + case SG_VERTEXFORMAT_SHORT4N: return 4; + case SG_VERTEXFORMAT_USHORT4N: return 4; + case SG_VERTEXFORMAT_UINT10_N2: return 4; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_vertexformat_type(sg_vertex_format fmt) { + switch (fmt) { + case SG_VERTEXFORMAT_FLOAT: + case SG_VERTEXFORMAT_FLOAT2: + case SG_VERTEXFORMAT_FLOAT3: + case SG_VERTEXFORMAT_FLOAT4: + return GL_FLOAT; + case SG_VERTEXFORMAT_BYTE4: + case SG_VERTEXFORMAT_BYTE4N: + return GL_BYTE; + case SG_VERTEXFORMAT_UBYTE4: + case SG_VERTEXFORMAT_UBYTE4N: + return GL_UNSIGNED_BYTE; + case SG_VERTEXFORMAT_SHORT2: + case SG_VERTEXFORMAT_SHORT2N: + case SG_VERTEXFORMAT_SHORT4: + case SG_VERTEXFORMAT_SHORT4N: + return GL_SHORT; + case SG_VERTEXFORMAT_USHORT2N: + case SG_VERTEXFORMAT_USHORT4N: + return GL_UNSIGNED_SHORT; + case SG_VERTEXFORMAT_UINT10_N2: + return GL_UNSIGNED_INT_2_10_10_10_REV; + default: + SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLboolean _sg_gl_vertexformat_normalized(sg_vertex_format fmt) { + switch (fmt) { + case SG_VERTEXFORMAT_BYTE4N: + case SG_VERTEXFORMAT_UBYTE4N: + case SG_VERTEXFORMAT_SHORT2N: + case SG_VERTEXFORMAT_USHORT2N: + case SG_VERTEXFORMAT_SHORT4N: + case SG_VERTEXFORMAT_USHORT4N: + case SG_VERTEXFORMAT_UINT10_N2: + return GL_TRUE; + default: + return GL_FALSE; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_primitive_type(sg_primitive_type t) { + switch (t) { + case SG_PRIMITIVETYPE_POINTS: return GL_POINTS; + case SG_PRIMITIVETYPE_LINES: return GL_LINES; + case SG_PRIMITIVETYPE_LINE_STRIP: return GL_LINE_STRIP; + case SG_PRIMITIVETYPE_TRIANGLES: return GL_TRIANGLES; + case SG_PRIMITIVETYPE_TRIANGLE_STRIP: return GL_TRIANGLE_STRIP; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_index_type(sg_index_type t) { + switch (t) { + case SG_INDEXTYPE_NONE: return 0; + case SG_INDEXTYPE_UINT16: return GL_UNSIGNED_SHORT; + case SG_INDEXTYPE_UINT32: return GL_UNSIGNED_INT; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_compare_func(sg_compare_func cmp) { + switch (cmp) { + case SG_COMPAREFUNC_NEVER: return GL_NEVER; + case SG_COMPAREFUNC_LESS: return GL_LESS; + case SG_COMPAREFUNC_EQUAL: return GL_EQUAL; + case SG_COMPAREFUNC_LESS_EQUAL: return GL_LEQUAL; + case SG_COMPAREFUNC_GREATER: return GL_GREATER; + case SG_COMPAREFUNC_NOT_EQUAL: return GL_NOTEQUAL; + case SG_COMPAREFUNC_GREATER_EQUAL: return GL_GEQUAL; + case SG_COMPAREFUNC_ALWAYS: return GL_ALWAYS; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_stencil_op(sg_stencil_op op) { + switch (op) { + case SG_STENCILOP_KEEP: return GL_KEEP; + case SG_STENCILOP_ZERO: return GL_ZERO; + case SG_STENCILOP_REPLACE: return GL_REPLACE; + case SG_STENCILOP_INCR_CLAMP: return GL_INCR; + case SG_STENCILOP_DECR_CLAMP: return GL_DECR; + case SG_STENCILOP_INVERT: return GL_INVERT; + case SG_STENCILOP_INCR_WRAP: return GL_INCR_WRAP; + case SG_STENCILOP_DECR_WRAP: return GL_DECR_WRAP; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_blend_factor(sg_blend_factor f) { + switch (f) { + case SG_BLENDFACTOR_ZERO: return GL_ZERO; + case SG_BLENDFACTOR_ONE: return GL_ONE; + case SG_BLENDFACTOR_SRC_COLOR: return GL_SRC_COLOR; + case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR: return GL_ONE_MINUS_SRC_COLOR; + case SG_BLENDFACTOR_SRC_ALPHA: return GL_SRC_ALPHA; + case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA: return GL_ONE_MINUS_SRC_ALPHA; + case SG_BLENDFACTOR_DST_COLOR: return GL_DST_COLOR; + case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR: return GL_ONE_MINUS_DST_COLOR; + case SG_BLENDFACTOR_DST_ALPHA: return GL_DST_ALPHA; + case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA: return GL_ONE_MINUS_DST_ALPHA; + case SG_BLENDFACTOR_SRC_ALPHA_SATURATED: return GL_SRC_ALPHA_SATURATE; + case SG_BLENDFACTOR_BLEND_COLOR: return GL_CONSTANT_COLOR; + case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR: return GL_ONE_MINUS_CONSTANT_COLOR; + case SG_BLENDFACTOR_BLEND_ALPHA: return GL_CONSTANT_ALPHA; + case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA: return GL_ONE_MINUS_CONSTANT_ALPHA; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_blend_op(sg_blend_op op) { + switch (op) { + case SG_BLENDOP_ADD: return GL_FUNC_ADD; + case SG_BLENDOP_SUBTRACT: return GL_FUNC_SUBTRACT; + case SG_BLENDOP_REVERSE_SUBTRACT: return GL_FUNC_REVERSE_SUBTRACT; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_filter(sg_filter f) { + switch (f) { + case SG_FILTER_NEAREST: return GL_NEAREST; + case SG_FILTER_LINEAR: return GL_LINEAR; + case SG_FILTER_NEAREST_MIPMAP_NEAREST: return GL_NEAREST_MIPMAP_NEAREST; + case SG_FILTER_NEAREST_MIPMAP_LINEAR: return GL_NEAREST_MIPMAP_LINEAR; + case SG_FILTER_LINEAR_MIPMAP_NEAREST: return GL_LINEAR_MIPMAP_NEAREST; + case SG_FILTER_LINEAR_MIPMAP_LINEAR: return GL_LINEAR_MIPMAP_LINEAR; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_wrap(sg_wrap w) { + switch (w) { + case SG_WRAP_CLAMP_TO_EDGE: return GL_CLAMP_TO_EDGE; + #if defined(SOKOL_GLCORE33) + case SG_WRAP_CLAMP_TO_BORDER: return GL_CLAMP_TO_BORDER; + #else + case SG_WRAP_CLAMP_TO_BORDER: return GL_CLAMP_TO_EDGE; + #endif + case SG_WRAP_REPEAT: return GL_REPEAT; + case SG_WRAP_MIRRORED_REPEAT: return GL_MIRRORED_REPEAT; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_teximage_type(sg_pixel_format fmt) { + switch (fmt) { + case SG_PIXELFORMAT_R8: + case SG_PIXELFORMAT_R8UI: + case SG_PIXELFORMAT_RG8: + case SG_PIXELFORMAT_RG8UI: + case SG_PIXELFORMAT_RGBA8: + case SG_PIXELFORMAT_RGBA8UI: + case SG_PIXELFORMAT_BGRA8: + return GL_UNSIGNED_BYTE; + case SG_PIXELFORMAT_R8SN: + case SG_PIXELFORMAT_R8SI: + case SG_PIXELFORMAT_RG8SN: + case SG_PIXELFORMAT_RG8SI: + case SG_PIXELFORMAT_RGBA8SN: + case SG_PIXELFORMAT_RGBA8SI: + return GL_BYTE; + case SG_PIXELFORMAT_R16: + case SG_PIXELFORMAT_R16UI: + case SG_PIXELFORMAT_RG16: + case SG_PIXELFORMAT_RG16UI: + case SG_PIXELFORMAT_RGBA16: + case SG_PIXELFORMAT_RGBA16UI: + return GL_UNSIGNED_SHORT; + case SG_PIXELFORMAT_R16SN: + case SG_PIXELFORMAT_R16SI: + case SG_PIXELFORMAT_RG16SN: + case SG_PIXELFORMAT_RG16SI: + case SG_PIXELFORMAT_RGBA16SN: + case SG_PIXELFORMAT_RGBA16SI: + return GL_SHORT; + case SG_PIXELFORMAT_R16F: + case SG_PIXELFORMAT_RG16F: + case SG_PIXELFORMAT_RGBA16F: + return GL_HALF_FLOAT; + case SG_PIXELFORMAT_R32UI: + case SG_PIXELFORMAT_RG32UI: + case SG_PIXELFORMAT_RGBA32UI: + return GL_UNSIGNED_INT; + case SG_PIXELFORMAT_R32SI: + case SG_PIXELFORMAT_RG32SI: + case SG_PIXELFORMAT_RGBA32SI: + return GL_INT; + case SG_PIXELFORMAT_R32F: + case SG_PIXELFORMAT_RG32F: + case SG_PIXELFORMAT_RGBA32F: + return GL_FLOAT; + #if !defined(SOKOL_GLES2) + case SG_PIXELFORMAT_RGB10A2: + return GL_UNSIGNED_INT_2_10_10_10_REV; + case SG_PIXELFORMAT_RG11B10F: + return GL_UNSIGNED_INT_10F_11F_11F_REV; + #endif + case SG_PIXELFORMAT_DEPTH: + return GL_UNSIGNED_SHORT; + case SG_PIXELFORMAT_DEPTH_STENCIL: + return GL_UNSIGNED_INT_24_8; + default: + SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_teximage_format(sg_pixel_format fmt) { + switch (fmt) { + case SG_PIXELFORMAT_R8: + case SG_PIXELFORMAT_R8SN: + case SG_PIXELFORMAT_R16: + case SG_PIXELFORMAT_R16SN: + case SG_PIXELFORMAT_R16F: + case SG_PIXELFORMAT_R32F: + #if defined(SOKOL_GLES2) + return GL_LUMINANCE; + #else + if (_sg.gl.gles2) { + return GL_LUMINANCE; + } + else { + return GL_RED; + } + #endif + #if !defined(SOKOL_GLES2) + case SG_PIXELFORMAT_R8UI: + case SG_PIXELFORMAT_R8SI: + case SG_PIXELFORMAT_R16UI: + case SG_PIXELFORMAT_R16SI: + case SG_PIXELFORMAT_R32UI: + case SG_PIXELFORMAT_R32SI: + return GL_RED_INTEGER; + case SG_PIXELFORMAT_RG8: + case SG_PIXELFORMAT_RG8SN: + case SG_PIXELFORMAT_RG16: + case SG_PIXELFORMAT_RG16SN: + case SG_PIXELFORMAT_RG16F: + case SG_PIXELFORMAT_RG32F: + return GL_RG; + case SG_PIXELFORMAT_RG8UI: + case SG_PIXELFORMAT_RG8SI: + case SG_PIXELFORMAT_RG16UI: + case SG_PIXELFORMAT_RG16SI: + case SG_PIXELFORMAT_RG32UI: + case SG_PIXELFORMAT_RG32SI: + return GL_RG_INTEGER; + #endif + case SG_PIXELFORMAT_RGBA8: + case SG_PIXELFORMAT_RGBA8SN: + case SG_PIXELFORMAT_RGBA16: + case SG_PIXELFORMAT_RGBA16SN: + case SG_PIXELFORMAT_RGBA16F: + case SG_PIXELFORMAT_RGBA32F: + case SG_PIXELFORMAT_RGB10A2: + return GL_RGBA; + #if !defined(SOKOL_GLES2) + case SG_PIXELFORMAT_RGBA8UI: + case SG_PIXELFORMAT_RGBA8SI: + case SG_PIXELFORMAT_RGBA16UI: + case SG_PIXELFORMAT_RGBA16SI: + case SG_PIXELFORMAT_RGBA32UI: + case SG_PIXELFORMAT_RGBA32SI: + return GL_RGBA_INTEGER; + #endif + case SG_PIXELFORMAT_RG11B10F: + return GL_RGB; + case SG_PIXELFORMAT_DEPTH: + return GL_DEPTH_COMPONENT; + case SG_PIXELFORMAT_DEPTH_STENCIL: + return GL_DEPTH_STENCIL; + case SG_PIXELFORMAT_BC1_RGBA: + return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT; + case SG_PIXELFORMAT_BC2_RGBA: + return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT; + case SG_PIXELFORMAT_BC3_RGBA: + return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; + case SG_PIXELFORMAT_BC4_R: + return GL_COMPRESSED_RED_RGTC1; + case SG_PIXELFORMAT_BC4_RSN: + return GL_COMPRESSED_SIGNED_RED_RGTC1; + case SG_PIXELFORMAT_BC5_RG: + return GL_COMPRESSED_RED_GREEN_RGTC2; + case SG_PIXELFORMAT_BC5_RGSN: + return GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2; + case SG_PIXELFORMAT_BC6H_RGBF: + return GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB; + case SG_PIXELFORMAT_BC6H_RGBUF: + return GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB; + case SG_PIXELFORMAT_BC7_RGBA: + return GL_COMPRESSED_RGBA_BPTC_UNORM_ARB; + case SG_PIXELFORMAT_PVRTC_RGB_2BPP: + return GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG; + case SG_PIXELFORMAT_PVRTC_RGB_4BPP: + return GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG; + case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: + return GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG; + case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: + return GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG; + case SG_PIXELFORMAT_ETC2_RGB8: + return GL_COMPRESSED_RGB8_ETC2; + case SG_PIXELFORMAT_ETC2_RGB8A1: + return GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2; + case SG_PIXELFORMAT_ETC2_RGBA8: + return GL_COMPRESSED_RGBA8_ETC2_EAC; + case SG_PIXELFORMAT_ETC2_RG11: + return GL_COMPRESSED_RG11_EAC; + case SG_PIXELFORMAT_ETC2_RG11SN: + return GL_COMPRESSED_SIGNED_RG11_EAC; + default: + SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_teximage_internal_format(sg_pixel_format fmt) { + #if defined(SOKOL_GLES2) + return _sg_gl_teximage_format(fmt); + #else + if (_sg.gl.gles2) { + return _sg_gl_teximage_format(fmt); + } + else { + switch (fmt) { + case SG_PIXELFORMAT_R8: return GL_R8; + case SG_PIXELFORMAT_R8SN: return GL_R8_SNORM; + case SG_PIXELFORMAT_R8UI: return GL_R8UI; + case SG_PIXELFORMAT_R8SI: return GL_R8I; + #if !defined(SOKOL_GLES3) + case SG_PIXELFORMAT_R16: return GL_R16; + case SG_PIXELFORMAT_R16SN: return GL_R16_SNORM; + #endif + case SG_PIXELFORMAT_R16UI: return GL_R16UI; + case SG_PIXELFORMAT_R16SI: return GL_R16I; + case SG_PIXELFORMAT_R16F: return GL_R16F; + case SG_PIXELFORMAT_RG8: return GL_RG8; + case SG_PIXELFORMAT_RG8SN: return GL_RG8_SNORM; + case SG_PIXELFORMAT_RG8UI: return GL_RG8UI; + case SG_PIXELFORMAT_RG8SI: return GL_RG8I; + case SG_PIXELFORMAT_R32UI: return GL_R32UI; + case SG_PIXELFORMAT_R32SI: return GL_R32I; + case SG_PIXELFORMAT_R32F: return GL_R32F; + #if !defined(SOKOL_GLES3) + case SG_PIXELFORMAT_RG16: return GL_RG16; + case SG_PIXELFORMAT_RG16SN: return GL_RG16_SNORM; + #endif + case SG_PIXELFORMAT_RG16UI: return GL_RG16UI; + case SG_PIXELFORMAT_RG16SI: return GL_RG16I; + case SG_PIXELFORMAT_RG16F: return GL_RG16F; + case SG_PIXELFORMAT_RGBA8: return GL_RGBA8; + case SG_PIXELFORMAT_RGBA8SN: return GL_RGBA8_SNORM; + case SG_PIXELFORMAT_RGBA8UI: return GL_RGBA8UI; + case SG_PIXELFORMAT_RGBA8SI: return GL_RGBA8I; + case SG_PIXELFORMAT_RGB10A2: return GL_RGB10_A2; + case SG_PIXELFORMAT_RG11B10F: return GL_R11F_G11F_B10F; + case SG_PIXELFORMAT_RG32UI: return GL_RG32UI; + case SG_PIXELFORMAT_RG32SI: return GL_RG32I; + case SG_PIXELFORMAT_RG32F: return GL_RG32F; + #if !defined(SOKOL_GLES3) + case SG_PIXELFORMAT_RGBA16: return GL_RGBA16; + case SG_PIXELFORMAT_RGBA16SN: return GL_RGBA16_SNORM; + #endif + case SG_PIXELFORMAT_RGBA16UI: return GL_RGBA16UI; + case SG_PIXELFORMAT_RGBA16SI: return GL_RGBA16I; + case SG_PIXELFORMAT_RGBA16F: return GL_RGBA16F; + case SG_PIXELFORMAT_RGBA32UI: return GL_RGBA32UI; + case SG_PIXELFORMAT_RGBA32SI: return GL_RGBA32I; + case SG_PIXELFORMAT_RGBA32F: return GL_RGBA32F; + case SG_PIXELFORMAT_DEPTH: return GL_DEPTH_COMPONENT16; + case SG_PIXELFORMAT_DEPTH_STENCIL: return GL_DEPTH24_STENCIL8; + case SG_PIXELFORMAT_BC1_RGBA: return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT; + case SG_PIXELFORMAT_BC2_RGBA: return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT; + case SG_PIXELFORMAT_BC3_RGBA: return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; + case SG_PIXELFORMAT_BC4_R: return GL_COMPRESSED_RED_RGTC1; + case SG_PIXELFORMAT_BC4_RSN: return GL_COMPRESSED_SIGNED_RED_RGTC1; + case SG_PIXELFORMAT_BC5_RG: return GL_COMPRESSED_RED_GREEN_RGTC2; + case SG_PIXELFORMAT_BC5_RGSN: return GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2; + case SG_PIXELFORMAT_BC6H_RGBF: return GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB; + case SG_PIXELFORMAT_BC6H_RGBUF: return GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB; + case SG_PIXELFORMAT_BC7_RGBA: return GL_COMPRESSED_RGBA_BPTC_UNORM_ARB; + case SG_PIXELFORMAT_PVRTC_RGB_2BPP: return GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG; + case SG_PIXELFORMAT_PVRTC_RGB_4BPP: return GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG; + case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: return GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG; + case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: return GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG; + case SG_PIXELFORMAT_ETC2_RGB8: return GL_COMPRESSED_RGB8_ETC2; + case SG_PIXELFORMAT_ETC2_RGB8A1: return GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2; + case SG_PIXELFORMAT_ETC2_RGBA8: return GL_COMPRESSED_RGBA8_ETC2_EAC; + case SG_PIXELFORMAT_ETC2_RG11: return GL_COMPRESSED_RG11_EAC; + case SG_PIXELFORMAT_ETC2_RG11SN: return GL_COMPRESSED_SIGNED_RG11_EAC; + default: SOKOL_UNREACHABLE; return 0; + } + } + #endif +} + +_SOKOL_PRIVATE GLenum _sg_gl_cubeface_target(int face_index) { + switch (face_index) { + case 0: return GL_TEXTURE_CUBE_MAP_POSITIVE_X; + case 1: return GL_TEXTURE_CUBE_MAP_NEGATIVE_X; + case 2: return GL_TEXTURE_CUBE_MAP_POSITIVE_Y; + case 3: return GL_TEXTURE_CUBE_MAP_NEGATIVE_Y; + case 4: return GL_TEXTURE_CUBE_MAP_POSITIVE_Z; + case 5: return GL_TEXTURE_CUBE_MAP_NEGATIVE_Z; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE GLenum _sg_gl_depth_attachment_format(sg_pixel_format fmt) { + switch (fmt) { + case SG_PIXELFORMAT_DEPTH: return GL_DEPTH_COMPONENT16; + case SG_PIXELFORMAT_DEPTH_STENCIL: return GL_DEPTH24_STENCIL8; + default: SOKOL_UNREACHABLE; return 0; + } +} + +/* see: https://www.khronos.org/registry/OpenGL-Refpages/es3.0/html/glTexImage2D.xhtml */ +_SOKOL_PRIVATE void _sg_gl_init_pixelformats(bool has_bgra) { + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R8]); + } + else { + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R8]); + } + #else + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R8]); + #endif + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R8SN]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8SI]); + #if !defined(SOKOL_GLES3) + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16SN]); + #endif + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16SI]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG8]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG8SN]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8SI]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32UI]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32SI]); + #if !defined(SOKOL_GLES3) + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16SN]); + #endif + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16SI]); + } + #endif + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA8]); + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA8SN]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8SI]); + } + #endif + if (has_bgra) { + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_BGRA8]); + } + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGB10A2]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG11B10F]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG32UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG32SI]); + #if !defined(SOKOL_GLES3) + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16SN]); + #endif + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16SI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA32UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA32SI]); + } + #endif + // FIXME: WEBGL_depth_texture extension? + _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH]); + _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH_STENCIL]); +} + +/* FIXME: OES_half_float_blend */ +_SOKOL_PRIVATE void _sg_gl_init_pixelformats_half_float(bool has_colorbuffer_half_float, bool has_texture_half_float_linear) { + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + if (has_texture_half_float_linear) { + if (has_colorbuffer_half_float) { + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16F]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16F]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); + } + else { + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R16F]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG16F]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); + } + } + else { + if (has_colorbuffer_half_float) { + _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_R16F]); + _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RG16F]); + _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); + } + else { + _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_R16F]); + _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RG16F]); + _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); + } + } + } + else { + #endif + /* GLES2 can only render to RGBA, and there's no RG format */ + if (has_texture_half_float_linear) { + if (has_colorbuffer_half_float) { + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); + } + else { + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); + } + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R16F]); + } + else { + if (has_colorbuffer_half_float) { + _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); + } + else { + _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); + } + _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_R16F]); + } + #if !defined(SOKOL_GLES2) + } + #endif +} + +_SOKOL_PRIVATE void _sg_gl_init_pixelformats_float(bool has_colorbuffer_float, bool has_texture_float_linear, bool has_float_blend) { + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + if (has_texture_float_linear) { + if (has_colorbuffer_float) { + if (has_float_blend) { + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R32F]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG32F]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); + } + else { + _sg_pixelformat_sfrm(&_sg.formats[SG_PIXELFORMAT_R32F]); + _sg_pixelformat_sfrm(&_sg.formats[SG_PIXELFORMAT_RG32F]); + _sg_pixelformat_sfrm(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); + } + } + else { + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R32F]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG32F]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); + } + } + else { + if (has_colorbuffer_float) { + _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_R32F]); + _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RG32F]); + _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); + } + else { + _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_R32F]); + _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RG32F]); + _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); + } + } + } + else { + #endif + /* GLES2 can only render to RGBA, and there's no RG format */ + if (has_texture_float_linear) { + if (has_colorbuffer_float) { + if (has_float_blend) { + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); + } + else { + _sg_pixelformat_sfrm(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); + } + } + else { + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); + } + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R32F]); + } + else { + if (has_colorbuffer_float) { + _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); + } + else { + _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); + } + _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_R32F]); + } + #if !defined(SOKOL_GLES2) + } + #endif +} + +_SOKOL_PRIVATE void _sg_gl_init_pixelformats_s3tc(void) { + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC1_RGBA]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC2_RGBA]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC3_RGBA]); +} + +_SOKOL_PRIVATE void _sg_gl_init_pixelformats_rgtc(void) { + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_R]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_RSN]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RG]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RGSN]); +} + +_SOKOL_PRIVATE void _sg_gl_init_pixelformats_bptc(void) { + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBF]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBUF]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC7_RGBA]); +} + +_SOKOL_PRIVATE void _sg_gl_init_pixelformats_pvrtc(void) { + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGB_2BPP]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGB_4BPP]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGBA_2BPP]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGBA_4BPP]); +} + +_SOKOL_PRIVATE void _sg_gl_init_pixelformats_etc2(void) { + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGB8]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGB8A1]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGBA8]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RG11]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RG11SN]); +} + +_SOKOL_PRIVATE void _sg_gl_init_limits(void) { + _SG_GL_CHECK_ERROR(); + GLint gl_int; + glGetIntegerv(GL_MAX_TEXTURE_SIZE, &gl_int); + _SG_GL_CHECK_ERROR(); + _sg.limits.max_image_size_2d = gl_int; + _sg.limits.max_image_size_array = gl_int; + glGetIntegerv(GL_MAX_CUBE_MAP_TEXTURE_SIZE, &gl_int); + _SG_GL_CHECK_ERROR(); + _sg.limits.max_image_size_cube = gl_int; + glGetIntegerv(GL_MAX_VERTEX_ATTRIBS, &gl_int); + _SG_GL_CHECK_ERROR(); + if (gl_int > SG_MAX_VERTEX_ATTRIBUTES) { + gl_int = SG_MAX_VERTEX_ATTRIBUTES; + } + _sg.limits.max_vertex_attrs = gl_int; + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE, &gl_int); + _SG_GL_CHECK_ERROR(); + _sg.limits.max_image_size_3d = gl_int; + glGetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &gl_int); + _SG_GL_CHECK_ERROR(); + _sg.limits.max_image_array_layers = gl_int; + } + #endif + if (_sg.gl.ext_anisotropic) { + glGetIntegerv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &gl_int); + _SG_GL_CHECK_ERROR(); + _sg.gl.max_anisotropy = gl_int; + } + else { + _sg.gl.max_anisotropy = 1; + } + glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &gl_int); + _SG_GL_CHECK_ERROR(); + _sg.gl.max_combined_texture_image_units = gl_int; +} + +#if defined(SOKOL_GLCORE33) +_SOKOL_PRIVATE void _sg_gl_init_caps_glcore33(void) { + _sg.backend = SG_BACKEND_GLCORE33; + + _sg.features.origin_top_left = false; + _sg.features.instancing = true; + _sg.features.multiple_render_targets = true; + _sg.features.msaa_render_targets = true; + _sg.features.imagetype_3d = true; + _sg.features.imagetype_array = true; + _sg.features.image_clamp_to_border = true; + _sg.features.mrt_independent_blend_state = false; + _sg.features.mrt_independent_write_mask = true; + + /* scan extensions */ + bool has_s3tc = false; /* BC1..BC3 */ + bool has_rgtc = false; /* BC4 and BC5 */ + bool has_bptc = false; /* BC6H and BC7 */ + bool has_pvrtc = false; + bool has_etc2 = false; + GLint num_ext = 0; + glGetIntegerv(GL_NUM_EXTENSIONS, &num_ext); + for (int i = 0; i < num_ext; i++) { + const char* ext = (const char*) glGetStringi(GL_EXTENSIONS, (GLuint)i); + if (ext) { + if (strstr(ext, "_texture_compression_s3tc")) { + has_s3tc = true; + } + else if (strstr(ext, "_texture_compression_rgtc")) { + has_rgtc = true; + } + else if (strstr(ext, "_texture_compression_bptc")) { + has_bptc = true; + } + else if (strstr(ext, "_texture_compression_pvrtc")) { + has_pvrtc = true; + } + else if (strstr(ext, "_ES3_compatibility")) { + has_etc2 = true; + } + else if (strstr(ext, "_texture_filter_anisotropic")) { + _sg.gl.ext_anisotropic = true; + } + } + } + + /* limits */ + _sg_gl_init_limits(); + + /* pixel formats */ + const bool has_bgra = false; /* not a bug */ + const bool has_colorbuffer_float = true; + const bool has_colorbuffer_half_float = true; + const bool has_texture_float_linear = true; /* FIXME??? */ + const bool has_texture_half_float_linear = true; + const bool has_float_blend = true; + _sg_gl_init_pixelformats(has_bgra); + _sg_gl_init_pixelformats_float(has_colorbuffer_float, has_texture_float_linear, has_float_blend); + _sg_gl_init_pixelformats_half_float(has_colorbuffer_half_float, has_texture_half_float_linear); + if (has_s3tc) { + _sg_gl_init_pixelformats_s3tc(); + } + if (has_rgtc) { + _sg_gl_init_pixelformats_rgtc(); + } + if (has_bptc) { + _sg_gl_init_pixelformats_bptc(); + } + if (has_pvrtc) { + _sg_gl_init_pixelformats_pvrtc(); + } + if (has_etc2) { + _sg_gl_init_pixelformats_etc2(); + } +} +#endif + +#if defined(SOKOL_GLES3) +_SOKOL_PRIVATE void _sg_gl_init_caps_gles3(void) { + _sg.backend = SG_BACKEND_GLES3; + + _sg.features.origin_top_left = false; + _sg.features.instancing = true; + _sg.features.multiple_render_targets = true; + _sg.features.msaa_render_targets = true; + _sg.features.imagetype_3d = true; + _sg.features.imagetype_array = true; + _sg.features.image_clamp_to_border = false; + _sg.features.mrt_independent_blend_state = false; + _sg.features.mrt_independent_write_mask = false; + + bool has_s3tc = false; /* BC1..BC3 */ + bool has_rgtc = false; /* BC4 and BC5 */ + bool has_bptc = false; /* BC6H and BC7 */ + bool has_pvrtc = false; + #if defined(__EMSCRIPTEN__) + bool has_etc2 = false; + #else + bool has_etc2 = true; + #endif + bool has_colorbuffer_float = false; + bool has_colorbuffer_half_float = false; + bool has_texture_float_linear = false; + bool has_float_blend = false; + GLint num_ext = 0; + glGetIntegerv(GL_NUM_EXTENSIONS, &num_ext); + for (int i = 0; i < num_ext; i++) { + const char* ext = (const char*) glGetStringi(GL_EXTENSIONS, (GLuint)i); + if (ext) { + if (strstr(ext, "_texture_compression_s3tc")) { + has_s3tc = true; + } + else if (strstr(ext, "_compressed_texture_s3tc")) { + has_s3tc = true; + } + else if (strstr(ext, "_texture_compression_rgtc")) { + has_rgtc = true; + } + else if (strstr(ext, "_texture_compression_bptc")) { + has_bptc = true; + } + else if (strstr(ext, "_texture_compression_pvrtc")) { + has_pvrtc = true; + } + else if (strstr(ext, "_compressed_texture_pvrtc")) { + has_pvrtc = true; + } + else if (strstr(ext, "_compressed_texture_etc")) { + has_etc2 = true; + } + else if (strstr(ext, "_color_buffer_float")) { + has_colorbuffer_float = true; + } + else if (strstr(ext, "_color_buffer_half_float")) { + has_colorbuffer_half_float = true; + } + else if (strstr(ext, "_texture_float_linear")) { + has_texture_float_linear = true; + } + else if (strstr(ext, "_float_blend")) { + has_float_blend = true; + } + else if (strstr(ext, "_texture_filter_anisotropic")) { + _sg.gl.ext_anisotropic = true; + } + } + } + + /* on WebGL2, color_buffer_float also includes 16-bit formats + see: https://developer.mozilla.org/en-US/docs/Web/API/EXT_color_buffer_float + */ + #if defined(__EMSCRIPTEN__) + has_colorbuffer_half_float = has_colorbuffer_float; + #endif + + /* limits */ + _sg_gl_init_limits(); + + /* pixel formats */ + const bool has_texture_half_float_linear = true; + const bool has_bgra = false; /* not a bug */ + _sg_gl_init_pixelformats(has_bgra); + _sg_gl_init_pixelformats_float(has_colorbuffer_float, has_texture_float_linear, has_float_blend); + _sg_gl_init_pixelformats_half_float(has_colorbuffer_half_float, has_texture_half_float_linear); + if (has_s3tc) { + _sg_gl_init_pixelformats_s3tc(); + } + if (has_rgtc) { + _sg_gl_init_pixelformats_rgtc(); + } + if (has_bptc) { + _sg_gl_init_pixelformats_bptc(); + } + if (has_pvrtc) { + _sg_gl_init_pixelformats_pvrtc(); + } + if (has_etc2) { + _sg_gl_init_pixelformats_etc2(); + } +} +#endif + +#if defined(SOKOL_GLES3) || defined(SOKOL_GLES2) +_SOKOL_PRIVATE void _sg_gl_init_caps_gles2(void) { + _sg.backend = SG_BACKEND_GLES2; + + bool has_s3tc = false; /* BC1..BC3 */ + bool has_rgtc = false; /* BC4 and BC5 */ + bool has_bptc = false; /* BC6H and BC7 */ + bool has_pvrtc = false; + bool has_etc2 = false; + bool has_texture_float = false; + bool has_texture_float_linear = false; + bool has_colorbuffer_float = false; + bool has_float_blend = false; + bool has_instancing = false; + const char* ext = (const char*) glGetString(GL_EXTENSIONS); + if (ext) { + has_s3tc = strstr(ext, "_texture_compression_s3tc") || strstr(ext, "_compressed_texture_s3tc"); + has_rgtc = strstr(ext, "_texture_compression_rgtc"); + has_bptc = strstr(ext, "_texture_compression_bptc"); + has_pvrtc = strstr(ext, "_texture_compression_pvrtc") || strstr(ext, "_compressed_texture_pvrtc"); + has_etc2 = strstr(ext, "_compressed_texture_etc"); + has_texture_float = strstr(ext, "_texture_float"); + has_texture_float_linear = strstr(ext, "_texture_float_linear"); + has_colorbuffer_float = strstr(ext, "_color_buffer_float"); + has_float_blend = strstr(ext, "_float_blend"); + /* don't bother with half_float support on WebGL1 + has_texture_half_float = strstr(ext, "_texture_half_float"); + has_texture_half_float_linear = strstr(ext, "_texture_half_float_linear"); + has_colorbuffer_half_float = strstr(ext, "_color_buffer_half_float"); + */ + has_instancing = strstr(ext, "_instanced_arrays"); + _sg.gl.ext_anisotropic = strstr(ext, "ext_anisotropic"); + } + + _sg.features.origin_top_left = false; + #if defined(_SOKOL_GL_INSTANCING_ENABLED) + _sg.features.instancing = has_instancing; + #endif + _sg.features.multiple_render_targets = false; + _sg.features.msaa_render_targets = false; + _sg.features.imagetype_3d = false; + _sg.features.imagetype_array = false; + _sg.features.image_clamp_to_border = false; + _sg.features.mrt_independent_blend_state = false; + _sg.features.mrt_independent_write_mask = false; + + /* limits */ + _sg_gl_init_limits(); + + /* pixel formats */ + const bool has_bgra = false; /* not a bug */ + const bool has_texture_half_float = false; + const bool has_texture_half_float_linear = false; + const bool has_colorbuffer_half_float = false; + _sg_gl_init_pixelformats(has_bgra); + if (has_texture_float) { + _sg_gl_init_pixelformats_float(has_colorbuffer_float, has_texture_float_linear, has_float_blend); + } + if (has_texture_half_float) { + _sg_gl_init_pixelformats_half_float(has_colorbuffer_half_float, has_texture_half_float_linear); + } + if (has_s3tc) { + _sg_gl_init_pixelformats_s3tc(); + } + if (has_rgtc) { + _sg_gl_init_pixelformats_rgtc(); + } + if (has_bptc) { + _sg_gl_init_pixelformats_bptc(); + } + if (has_pvrtc) { + _sg_gl_init_pixelformats_pvrtc(); + } + if (has_etc2) { + _sg_gl_init_pixelformats_etc2(); + } + /* GLES2 doesn't allow multi-sampled render targets at all */ + for (int i = 0; i < _SG_PIXELFORMAT_NUM; i++) { + _sg.formats[i].msaa = false; + } +} +#endif + +/*-- state cache implementation ----------------------------------------------*/ +_SOKOL_PRIVATE void _sg_gl_cache_clear_buffer_bindings(bool force) { + if (force || (_sg.gl.cache.vertex_buffer != 0)) { + glBindBuffer(GL_ARRAY_BUFFER, 0); + _sg.gl.cache.vertex_buffer = 0; + } + if (force || (_sg.gl.cache.index_buffer != 0)) { + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); + _sg.gl.cache.index_buffer = 0; + } +} + +_SOKOL_PRIVATE void _sg_gl_cache_bind_buffer(GLenum target, GLuint buffer) { + SOKOL_ASSERT((GL_ARRAY_BUFFER == target) || (GL_ELEMENT_ARRAY_BUFFER == target)); + if (target == GL_ARRAY_BUFFER) { + if (_sg.gl.cache.vertex_buffer != buffer) { + _sg.gl.cache.vertex_buffer = buffer; + glBindBuffer(target, buffer); + } + } + else { + if (_sg.gl.cache.index_buffer != buffer) { + _sg.gl.cache.index_buffer = buffer; + glBindBuffer(target, buffer); + } + } +} + +_SOKOL_PRIVATE void _sg_gl_cache_store_buffer_binding(GLenum target) { + if (target == GL_ARRAY_BUFFER) { + _sg.gl.cache.stored_vertex_buffer = _sg.gl.cache.vertex_buffer; + } + else { + _sg.gl.cache.stored_index_buffer = _sg.gl.cache.index_buffer; + } +} + +_SOKOL_PRIVATE void _sg_gl_cache_restore_buffer_binding(GLenum target) { + if (target == GL_ARRAY_BUFFER) { + if (_sg.gl.cache.stored_vertex_buffer != 0) { + /* we only care restoring valid ids */ + _sg_gl_cache_bind_buffer(target, _sg.gl.cache.stored_vertex_buffer); + _sg.gl.cache.stored_vertex_buffer = 0; + } + } + else { + if (_sg.gl.cache.stored_index_buffer != 0) { + /* we only care restoring valid ids */ + _sg_gl_cache_bind_buffer(target, _sg.gl.cache.stored_index_buffer); + _sg.gl.cache.stored_index_buffer = 0; + } + } +} + +/* called when from _sg_gl_destroy_buffer() */ +_SOKOL_PRIVATE void _sg_gl_cache_invalidate_buffer(GLuint buf) { + if (buf == _sg.gl.cache.vertex_buffer) { + _sg.gl.cache.vertex_buffer = 0; + glBindBuffer(GL_ARRAY_BUFFER, 0); + } + if (buf == _sg.gl.cache.index_buffer) { + _sg.gl.cache.index_buffer = 0; + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); + } + if (buf == _sg.gl.cache.stored_vertex_buffer) { + _sg.gl.cache.stored_vertex_buffer = 0; + } + if (buf == _sg.gl.cache.stored_index_buffer) { + _sg.gl.cache.stored_index_buffer = 0; + } + for (int i = 0; i < SG_MAX_VERTEX_ATTRIBUTES; i++) { + if (buf == _sg.gl.cache.attrs[i].gl_vbuf) { + _sg.gl.cache.attrs[i].gl_vbuf = 0; + } + } +} + +_SOKOL_PRIVATE void _sg_gl_cache_active_texture(GLenum texture) { + if (_sg.gl.cache.cur_active_texture != texture) { + _sg.gl.cache.cur_active_texture = texture; + glActiveTexture(texture); + } +} + +_SOKOL_PRIVATE void _sg_gl_cache_clear_texture_bindings(bool force) { + for (int i = 0; (i < SG_MAX_SHADERSTAGE_IMAGES) && (i < _sg.gl.max_combined_texture_image_units); i++) { + if (force || (_sg.gl.cache.textures[i].texture != 0)) { + GLenum gl_texture_slot = (GLenum) (GL_TEXTURE0 + i); + glActiveTexture(gl_texture_slot); + glBindTexture(GL_TEXTURE_2D, 0); + glBindTexture(GL_TEXTURE_CUBE_MAP, 0); + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + glBindTexture(GL_TEXTURE_3D, 0); + glBindTexture(GL_TEXTURE_2D_ARRAY, 0); + } + #endif + _sg.gl.cache.textures[i].target = 0; + _sg.gl.cache.textures[i].texture = 0; + _sg.gl.cache.cur_active_texture = gl_texture_slot; + } + } +} + +_SOKOL_PRIVATE void _sg_gl_cache_bind_texture(int slot_index, GLenum target, GLuint texture) { + /* it's valid to call this function with target=0 and/or texture=0 + target=0 will unbind the previous binding, texture=0 will clear + the new binding + */ + SOKOL_ASSERT(slot_index < SG_MAX_SHADERSTAGE_IMAGES); + if (slot_index >= _sg.gl.max_combined_texture_image_units) { + return; + } + _sg_gl_texture_bind_slot* slot = &_sg.gl.cache.textures[slot_index]; + if ((slot->target != target) || (slot->texture != texture)) { + _sg_gl_cache_active_texture((GLenum)(GL_TEXTURE0 + slot_index)); + /* if the target has changed, clear the previous binding on that target */ + if ((target != slot->target) && (slot->target != 0)) { + glBindTexture(slot->target, 0); + } + /* apply new binding (texture can be 0 to unbind) */ + if (target != 0) { + glBindTexture(target, texture); + } + slot->target = target; + slot->texture = texture; + } +} + +_SOKOL_PRIVATE void _sg_gl_cache_store_texture_binding(int slot_index) { + SOKOL_ASSERT(slot_index < SG_MAX_SHADERSTAGE_IMAGES); + _sg.gl.cache.stored_texture = _sg.gl.cache.textures[slot_index]; +} + +_SOKOL_PRIVATE void _sg_gl_cache_restore_texture_binding(int slot_index) { + SOKOL_ASSERT(slot_index < SG_MAX_SHADERSTAGE_IMAGES); + _sg_gl_texture_bind_slot* slot = &_sg.gl.cache.stored_texture; + if (slot->texture != 0) { + /* we only care restoring valid ids */ + SOKOL_ASSERT(slot->target != 0); + _sg_gl_cache_bind_texture(slot_index, slot->target, slot->texture); + slot->target = 0; + slot->texture = 0; + } +} + +/* called from _sg_gl_destroy_texture() */ +_SOKOL_PRIVATE void _sg_gl_cache_invalidate_texture(GLuint tex) { + for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { + _sg_gl_texture_bind_slot* slot = &_sg.gl.cache.textures[i]; + if (tex == slot->texture) { + _sg_gl_cache_active_texture((GLenum)(GL_TEXTURE0 + i)); + glBindTexture(slot->target, 0); + slot->target = 0; + slot->texture = 0; + } + } + if (tex == _sg.gl.cache.stored_texture.texture) { + _sg.gl.cache.stored_texture.target = 0; + _sg.gl.cache.stored_texture.texture = 0; + } +} + +/* called from _sg_gl_destroy_shader() */ +_SOKOL_PRIVATE void _sg_gl_cache_invalidate_program(GLuint prog) { + if (prog == _sg.gl.cache.prog) { + _sg.gl.cache.prog = 0; + glUseProgram(0); + } +} + +_SOKOL_PRIVATE void _sg_gl_reset_state_cache(void) { + if (_sg.gl.cur_context) { + _SG_GL_CHECK_ERROR(); + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + glBindVertexArray(_sg.gl.cur_context->vao); + _SG_GL_CHECK_ERROR(); + } + #endif + memset(&_sg.gl.cache, 0, sizeof(_sg.gl.cache)); + _sg_gl_cache_clear_buffer_bindings(true); + _SG_GL_CHECK_ERROR(); + _sg_gl_cache_clear_texture_bindings(true); + _SG_GL_CHECK_ERROR(); + for (int i = 0; i < _sg.limits.max_vertex_attrs; i++) { + _sg_gl_attr_t* attr = &_sg.gl.cache.attrs[i].gl_attr; + attr->vb_index = -1; + attr->divisor = -1; + glDisableVertexAttribArray((GLuint)i); + _SG_GL_CHECK_ERROR(); + } + _sg.gl.cache.cur_primitive_type = GL_TRIANGLES; + + /* shader program */ + glGetIntegerv(GL_CURRENT_PROGRAM, (GLint*)&_sg.gl.cache.prog); + _SG_GL_CHECK_ERROR(); + + /* depth and stencil state */ + _sg.gl.cache.depth.compare = SG_COMPAREFUNC_ALWAYS; + _sg.gl.cache.stencil.front.compare = SG_COMPAREFUNC_ALWAYS; + _sg.gl.cache.stencil.front.fail_op = SG_STENCILOP_KEEP; + _sg.gl.cache.stencil.front.depth_fail_op = SG_STENCILOP_KEEP; + _sg.gl.cache.stencil.front.pass_op = SG_STENCILOP_KEEP; + _sg.gl.cache.stencil.back.compare = SG_COMPAREFUNC_ALWAYS; + _sg.gl.cache.stencil.back.fail_op = SG_STENCILOP_KEEP; + _sg.gl.cache.stencil.back.depth_fail_op = SG_STENCILOP_KEEP; + _sg.gl.cache.stencil.back.pass_op = SG_STENCILOP_KEEP; + glEnable(GL_DEPTH_TEST); + glDepthFunc(GL_ALWAYS); + glDepthMask(GL_FALSE); + glDisable(GL_STENCIL_TEST); + glStencilFunc(GL_ALWAYS, 0, 0); + glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP); + glStencilMask(0); + + /* blend state */ + _sg.gl.cache.blend.src_factor_rgb = SG_BLENDFACTOR_ONE; + _sg.gl.cache.blend.dst_factor_rgb = SG_BLENDFACTOR_ZERO; + _sg.gl.cache.blend.op_rgb = SG_BLENDOP_ADD; + _sg.gl.cache.blend.src_factor_alpha = SG_BLENDFACTOR_ONE; + _sg.gl.cache.blend.dst_factor_alpha = SG_BLENDFACTOR_ZERO; + _sg.gl.cache.blend.op_alpha = SG_BLENDOP_ADD; + glDisable(GL_BLEND); + glBlendFuncSeparate(GL_ONE, GL_ZERO, GL_ONE, GL_ZERO); + glBlendEquationSeparate(GL_FUNC_ADD, GL_FUNC_ADD); + glBlendColor(0.0f, 0.0f, 0.0f, 0.0f); + + /* standalone state */ + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + _sg.gl.cache.color_write_mask[i] = SG_COLORMASK_RGBA; + } + _sg.gl.cache.cull_mode = SG_CULLMODE_NONE; + _sg.gl.cache.face_winding = SG_FACEWINDING_CW; + _sg.gl.cache.sample_count = 1; + glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); + glPolygonOffset(0.0f, 0.0f); + glDisable(GL_POLYGON_OFFSET_FILL); + glDisable(GL_CULL_FACE); + glFrontFace(GL_CW); + glCullFace(GL_BACK); + glEnable(GL_SCISSOR_TEST); + glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE); + glEnable(GL_DITHER); + glDisable(GL_POLYGON_OFFSET_FILL); + #if defined(SOKOL_GLCORE33) + glEnable(GL_MULTISAMPLE); + glEnable(GL_PROGRAM_POINT_SIZE); + #endif + } +} + +_SOKOL_PRIVATE void _sg_gl_setup_backend(const sg_desc* desc) { + /* assumes that _sg.gl is already zero-initialized */ + _sg.gl.valid = true; + #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) + _sg.gl.gles2 = desc->context.gl.force_gles2; + #else + _SOKOL_UNUSED(desc); + _sg.gl.gles2 = false; + #endif + + #if defined(_SOKOL_USE_WIN32_GL_LOADER) + _sg_gl_load_opengl(); + #endif + + /* clear initial GL error state */ + #if defined(SOKOL_DEBUG) + while (glGetError() != GL_NO_ERROR); + #endif + #if defined(SOKOL_GLCORE33) + _sg_gl_init_caps_glcore33(); + #elif defined(SOKOL_GLES3) + if (_sg.gl.gles2) { + _sg_gl_init_caps_gles2(); + } + else { + _sg_gl_init_caps_gles3(); + } + #else + _sg_gl_init_caps_gles2(); + #endif +} + +_SOKOL_PRIVATE void _sg_gl_discard_backend(void) { + SOKOL_ASSERT(_sg.gl.valid); + _sg.gl.valid = false; + #if defined(_SOKOL_USE_WIN32_GL_LOADER) + _sg_gl_unload_opengl(); + #endif +} + +_SOKOL_PRIVATE void _sg_gl_activate_context(_sg_context_t* ctx) { + SOKOL_ASSERT(_sg.gl.valid); + /* NOTE: ctx can be 0 to unset the current context */ + _sg.gl.cur_context = ctx; + _sg_gl_reset_state_cache(); +} + +/*-- GL backend resource creation and destruction ----------------------------*/ +_SOKOL_PRIVATE sg_resource_state _sg_gl_create_context(_sg_context_t* ctx) { + SOKOL_ASSERT(ctx); + SOKOL_ASSERT(0 == ctx->default_framebuffer); + _SG_GL_CHECK_ERROR(); + glGetIntegerv(GL_FRAMEBUFFER_BINDING, (GLint*)&ctx->default_framebuffer); + _SG_GL_CHECK_ERROR(); + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + SOKOL_ASSERT(0 == ctx->vao); + glGenVertexArrays(1, &ctx->vao); + glBindVertexArray(ctx->vao); + _SG_GL_CHECK_ERROR(); + } + #endif + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_gl_destroy_context(_sg_context_t* ctx) { + SOKOL_ASSERT(ctx); + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + if (ctx->vao) { + glDeleteVertexArrays(1, &ctx->vao); + } + _SG_GL_CHECK_ERROR(); + } + #else + _SOKOL_UNUSED(ctx); + #endif +} + +_SOKOL_PRIVATE sg_resource_state _sg_gl_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) { + SOKOL_ASSERT(buf && desc); + _SG_GL_CHECK_ERROR(); + _sg_buffer_common_init(&buf->cmn, desc); + buf->gl.ext_buffers = (0 != desc->gl_buffers[0]); + GLenum gl_target = _sg_gl_buffer_target(buf->cmn.type); + GLenum gl_usage = _sg_gl_usage(buf->cmn.usage); + for (int slot = 0; slot < buf->cmn.num_slots; slot++) { + GLuint gl_buf = 0; + if (buf->gl.ext_buffers) { + SOKOL_ASSERT(desc->gl_buffers[slot]); + gl_buf = desc->gl_buffers[slot]; + } + else { + glGenBuffers(1, &gl_buf); + _sg_gl_cache_store_buffer_binding(gl_target); + _sg_gl_cache_bind_buffer(gl_target, gl_buf); + glBufferData(gl_target, buf->cmn.size, 0, gl_usage); + if (buf->cmn.usage == SG_USAGE_IMMUTABLE) { + SOKOL_ASSERT(desc->data.ptr); + glBufferSubData(gl_target, 0, buf->cmn.size, desc->data.ptr); + } + _sg_gl_cache_restore_buffer_binding(gl_target); + } + buf->gl.buf[slot] = gl_buf; + } + _SG_GL_CHECK_ERROR(); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_gl_destroy_buffer(_sg_buffer_t* buf) { + SOKOL_ASSERT(buf); + _SG_GL_CHECK_ERROR(); + for (int slot = 0; slot < buf->cmn.num_slots; slot++) { + if (buf->gl.buf[slot]) { + _sg_gl_cache_invalidate_buffer(buf->gl.buf[slot]); + if (!buf->gl.ext_buffers) { + glDeleteBuffers(1, &buf->gl.buf[slot]); + } + } + } + _SG_GL_CHECK_ERROR(); +} + +_SOKOL_PRIVATE bool _sg_gl_supported_texture_format(sg_pixel_format fmt) { + const int fmt_index = (int) fmt; + SOKOL_ASSERT((fmt_index > SG_PIXELFORMAT_NONE) && (fmt_index < _SG_PIXELFORMAT_NUM)); + return _sg.formats[fmt_index].sample; +} + +_SOKOL_PRIVATE sg_resource_state _sg_gl_create_image(_sg_image_t* img, const sg_image_desc* desc) { + SOKOL_ASSERT(img && desc); + _SG_GL_CHECK_ERROR(); + _sg_image_common_init(&img->cmn, desc); + img->gl.ext_textures = (0 != desc->gl_textures[0]); + + /* check if texture format is support */ + if (!_sg_gl_supported_texture_format(img->cmn.pixel_format)) { + SOKOL_LOG("texture format not supported by GL context\n"); + return SG_RESOURCESTATE_FAILED; + } + /* check for optional texture types */ + if ((img->cmn.type == SG_IMAGETYPE_3D) && !_sg.features.imagetype_3d) { + SOKOL_LOG("3D textures not supported by GL context\n"); + return SG_RESOURCESTATE_FAILED; + } + if ((img->cmn.type == SG_IMAGETYPE_ARRAY) && !_sg.features.imagetype_array) { + SOKOL_LOG("array textures not supported by GL context\n"); + return SG_RESOURCESTATE_FAILED; + } + + #if !defined(SOKOL_GLES2) + bool msaa = false; + if (!_sg.gl.gles2) { + msaa = (img->cmn.sample_count > 1) && (_sg.features.msaa_render_targets); + } + #endif + + if (_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format)) { + /* special case depth-stencil-buffer? */ + SOKOL_ASSERT((img->cmn.usage == SG_USAGE_IMMUTABLE) && (img->cmn.num_slots == 1)); + SOKOL_ASSERT(!img->gl.ext_textures); /* cannot provide external texture for depth images */ + glGenRenderbuffers(1, &img->gl.depth_render_buffer); + glBindRenderbuffer(GL_RENDERBUFFER, img->gl.depth_render_buffer); + GLenum gl_depth_format = _sg_gl_depth_attachment_format(img->cmn.pixel_format); + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2 && msaa) { + glRenderbufferStorageMultisample(GL_RENDERBUFFER, img->cmn.sample_count, gl_depth_format, img->cmn.width, img->cmn.height); + } + else + #endif + { + glRenderbufferStorage(GL_RENDERBUFFER, gl_depth_format, img->cmn.width, img->cmn.height); + } + } + else { + /* regular color texture */ + img->gl.target = _sg_gl_texture_target(img->cmn.type); + const GLenum gl_internal_format = _sg_gl_teximage_internal_format(img->cmn.pixel_format); + + /* if this is a MSAA render target, need to create a separate render buffer */ + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2 && img->cmn.render_target && msaa) { + glGenRenderbuffers(1, &img->gl.msaa_render_buffer); + glBindRenderbuffer(GL_RENDERBUFFER, img->gl.msaa_render_buffer); + glRenderbufferStorageMultisample(GL_RENDERBUFFER, img->cmn.sample_count, gl_internal_format, img->cmn.width, img->cmn.height); + } + #endif + + if (img->gl.ext_textures) { + /* inject externally GL textures */ + for (int slot = 0; slot < img->cmn.num_slots; slot++) { + SOKOL_ASSERT(desc->gl_textures[slot]); + img->gl.tex[slot] = desc->gl_textures[slot]; + } + if (desc->gl_texture_target) { + img->gl.target = (GLenum)desc->gl_texture_target; + } + } + else { + /* create our own GL texture(s) */ + const GLenum gl_format = _sg_gl_teximage_format(img->cmn.pixel_format); + const bool is_compressed = _sg_is_compressed_pixel_format(img->cmn.pixel_format); + for (int slot = 0; slot < img->cmn.num_slots; slot++) { + glGenTextures(1, &img->gl.tex[slot]); + SOKOL_ASSERT(img->gl.tex[slot]); + _sg_gl_cache_store_texture_binding(0); + _sg_gl_cache_bind_texture(0, img->gl.target, img->gl.tex[slot]); + GLenum gl_min_filter = _sg_gl_filter(img->cmn.min_filter); + GLenum gl_mag_filter = _sg_gl_filter(img->cmn.mag_filter); + glTexParameteri(img->gl.target, GL_TEXTURE_MIN_FILTER, (GLint)gl_min_filter); + glTexParameteri(img->gl.target, GL_TEXTURE_MAG_FILTER, (GLint)gl_mag_filter); + if (_sg.gl.ext_anisotropic && (img->cmn.max_anisotropy > 1)) { + GLint max_aniso = (GLint) img->cmn.max_anisotropy; + if (max_aniso > _sg.gl.max_anisotropy) { + max_aniso = _sg.gl.max_anisotropy; + } + glTexParameteri(img->gl.target, GL_TEXTURE_MAX_ANISOTROPY_EXT, max_aniso); + } + if (img->cmn.type == SG_IMAGETYPE_CUBE) { + glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + } + else { + glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_S, (GLint)_sg_gl_wrap(img->cmn.wrap_u)); + glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_T, (GLint)_sg_gl_wrap(img->cmn.wrap_v)); + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2 && (img->cmn.type == SG_IMAGETYPE_3D)) { + glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_R, (GLint)_sg_gl_wrap(img->cmn.wrap_w)); + } + #endif + #if defined(SOKOL_GLCORE33) + float border[4]; + switch (img->cmn.border_color) { + case SG_BORDERCOLOR_TRANSPARENT_BLACK: + border[0] = 0.0f; border[1] = 0.0f; border[2] = 0.0f; border[3] = 0.0f; + break; + case SG_BORDERCOLOR_OPAQUE_WHITE: + border[0] = 1.0f; border[1] = 1.0f; border[2] = 1.0f; border[3] = 1.0f; + break; + default: + border[0] = 0.0f; border[1] = 0.0f; border[2] = 0.0f; border[3] = 1.0f; + break; + } + glTexParameterfv(img->gl.target, GL_TEXTURE_BORDER_COLOR, border); + #endif + } + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + /* GL spec has strange defaults for mipmap min/max lod: -1000 to +1000 */ + const float min_lod = _sg_clamp(desc->min_lod, 0.0f, 1000.0f); + const float max_lod = _sg_clamp(desc->max_lod, 0.0f, 1000.0f); + glTexParameterf(img->gl.target, GL_TEXTURE_MIN_LOD, min_lod); + glTexParameterf(img->gl.target, GL_TEXTURE_MAX_LOD, max_lod); + } + #endif + const int num_faces = img->cmn.type == SG_IMAGETYPE_CUBE ? 6 : 1; + int data_index = 0; + for (int face_index = 0; face_index < num_faces; face_index++) { + for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++, data_index++) { + GLenum gl_img_target = img->gl.target; + if (SG_IMAGETYPE_CUBE == img->cmn.type) { + gl_img_target = _sg_gl_cubeface_target(face_index); + } + const GLvoid* data_ptr = desc->data.subimage[face_index][mip_index].ptr; + const GLsizei data_size = (GLsizei) desc->data.subimage[face_index][mip_index].size; + int mip_width = img->cmn.width >> mip_index; + if (mip_width == 0) { + mip_width = 1; + } + int mip_height = img->cmn.height >> mip_index; + if (mip_height == 0) { + mip_height = 1; + } + if ((SG_IMAGETYPE_2D == img->cmn.type) || (SG_IMAGETYPE_CUBE == img->cmn.type)) { + if (is_compressed) { + glCompressedTexImage2D(gl_img_target, mip_index, gl_internal_format, + mip_width, mip_height, 0, data_size, data_ptr); + } + else { + const GLenum gl_type = _sg_gl_teximage_type(img->cmn.pixel_format); + glTexImage2D(gl_img_target, mip_index, (GLint)gl_internal_format, + mip_width, mip_height, 0, gl_format, gl_type, data_ptr); + } + } + #if !defined(SOKOL_GLES2) + else if (!_sg.gl.gles2 && ((SG_IMAGETYPE_3D == img->cmn.type) || (SG_IMAGETYPE_ARRAY == img->cmn.type))) { + int mip_depth = img->cmn.num_slices; + if (SG_IMAGETYPE_3D == img->cmn.type) { + mip_depth >>= mip_index; + } + if (mip_depth == 0) { + mip_depth = 1; + } + if (is_compressed) { + glCompressedTexImage3D(gl_img_target, mip_index, gl_internal_format, + mip_width, mip_height, mip_depth, 0, data_size, data_ptr); + } + else { + const GLenum gl_type = _sg_gl_teximage_type(img->cmn.pixel_format); + glTexImage3D(gl_img_target, mip_index, (GLint)gl_internal_format, + mip_width, mip_height, mip_depth, 0, gl_format, gl_type, data_ptr); + } + } + #endif + } + } + _sg_gl_cache_restore_texture_binding(0); + } + } + } + _SG_GL_CHECK_ERROR(); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_gl_destroy_image(_sg_image_t* img) { + SOKOL_ASSERT(img); + _SG_GL_CHECK_ERROR(); + for (int slot = 0; slot < img->cmn.num_slots; slot++) { + if (img->gl.tex[slot]) { + _sg_gl_cache_invalidate_texture(img->gl.tex[slot]); + if (!img->gl.ext_textures) { + glDeleteTextures(1, &img->gl.tex[slot]); + } + } + } + if (img->gl.depth_render_buffer) { + glDeleteRenderbuffers(1, &img->gl.depth_render_buffer); + } + if (img->gl.msaa_render_buffer) { + glDeleteRenderbuffers(1, &img->gl.msaa_render_buffer); + } + _SG_GL_CHECK_ERROR(); +} + +_SOKOL_PRIVATE GLuint _sg_gl_compile_shader(sg_shader_stage stage, const char* src) { + SOKOL_ASSERT(src); + _SG_GL_CHECK_ERROR(); + GLuint gl_shd = glCreateShader(_sg_gl_shader_stage(stage)); + glShaderSource(gl_shd, 1, &src, 0); + glCompileShader(gl_shd); + GLint compile_status = 0; + glGetShaderiv(gl_shd, GL_COMPILE_STATUS, &compile_status); + if (!compile_status) { + /* compilation failed, log error and delete shader */ + GLint log_len = 0; + glGetShaderiv(gl_shd, GL_INFO_LOG_LENGTH, &log_len); + if (log_len > 0) { + GLchar* log_buf = (GLchar*) SOKOL_MALLOC((size_t)log_len); + glGetShaderInfoLog(gl_shd, log_len, &log_len, log_buf); + SOKOL_LOG(log_buf); + SOKOL_FREE(log_buf); + } + glDeleteShader(gl_shd); + gl_shd = 0; + } + _SG_GL_CHECK_ERROR(); + return gl_shd; +} + +_SOKOL_PRIVATE sg_resource_state _sg_gl_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { + SOKOL_ASSERT(shd && desc); + SOKOL_ASSERT(!shd->gl.prog); + _SG_GL_CHECK_ERROR(); + + _sg_shader_common_init(&shd->cmn, desc); + + /* copy vertex attribute names over, these are required for GLES2, and optional for GLES3 and GL3.x */ + for (int i = 0; i < SG_MAX_VERTEX_ATTRIBUTES; i++) { + _sg_strcpy(&shd->gl.attrs[i].name, desc->attrs[i].name); + } + + GLuint gl_vs = _sg_gl_compile_shader(SG_SHADERSTAGE_VS, desc->vs.source); + GLuint gl_fs = _sg_gl_compile_shader(SG_SHADERSTAGE_FS, desc->fs.source); + if (!(gl_vs && gl_fs)) { + return SG_RESOURCESTATE_FAILED; + } + GLuint gl_prog = glCreateProgram(); + glAttachShader(gl_prog, gl_vs); + glAttachShader(gl_prog, gl_fs); + glLinkProgram(gl_prog); + glDeleteShader(gl_vs); + glDeleteShader(gl_fs); + _SG_GL_CHECK_ERROR(); + + GLint link_status; + glGetProgramiv(gl_prog, GL_LINK_STATUS, &link_status); + if (!link_status) { + GLint log_len = 0; + glGetProgramiv(gl_prog, GL_INFO_LOG_LENGTH, &log_len); + if (log_len > 0) { + GLchar* log_buf = (GLchar*) SOKOL_MALLOC((size_t)log_len); + glGetProgramInfoLog(gl_prog, log_len, &log_len, log_buf); + SOKOL_LOG(log_buf); + SOKOL_FREE(log_buf); + } + glDeleteProgram(gl_prog); + return SG_RESOURCESTATE_FAILED; + } + shd->gl.prog = gl_prog; + + /* resolve uniforms */ + _SG_GL_CHECK_ERROR(); + for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { + const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS)? &desc->vs : &desc->fs; + _sg_gl_shader_stage_t* gl_stage = &shd->gl.stage[stage_index]; + for (int ub_index = 0; ub_index < shd->cmn.stage[stage_index].num_uniform_blocks; ub_index++) { + const sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index]; + SOKOL_ASSERT(ub_desc->size > 0); + _sg_gl_uniform_block_t* ub = &gl_stage->uniform_blocks[ub_index]; + SOKOL_ASSERT(ub->num_uniforms == 0); + int cur_uniform_offset = 0; + for (int u_index = 0; u_index < SG_MAX_UB_MEMBERS; u_index++) { + const sg_shader_uniform_desc* u_desc = &ub_desc->uniforms[u_index]; + if (u_desc->type == SG_UNIFORMTYPE_INVALID) { + break; + } + _sg_gl_uniform_t* u = &ub->uniforms[u_index]; + u->type = u_desc->type; + u->count = (uint8_t) u_desc->array_count; + u->offset = (uint16_t) cur_uniform_offset; + cur_uniform_offset += _sg_uniform_size(u->type, u->count); + if (u_desc->name) { + u->gl_loc = glGetUniformLocation(gl_prog, u_desc->name); + } + else { + u->gl_loc = u_index; + } + ub->num_uniforms++; + } + SOKOL_ASSERT(ub_desc->size == (size_t)cur_uniform_offset); + } + } + + /* resolve image locations */ + _SG_GL_CHECK_ERROR(); + GLuint cur_prog = 0; + glGetIntegerv(GL_CURRENT_PROGRAM, (GLint*)&cur_prog); + glUseProgram(gl_prog); + int gl_tex_slot = 0; + for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { + const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS)? &desc->vs : &desc->fs; + _sg_gl_shader_stage_t* gl_stage = &shd->gl.stage[stage_index]; + for (int img_index = 0; img_index < shd->cmn.stage[stage_index].num_images; img_index++) { + const sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; + SOKOL_ASSERT(img_desc->image_type != _SG_IMAGETYPE_DEFAULT); + _sg_gl_shader_image_t* gl_img = &gl_stage->images[img_index]; + GLint gl_loc = img_index; + if (img_desc->name) { + gl_loc = glGetUniformLocation(gl_prog, img_desc->name); + } + if (gl_loc != -1) { + gl_img->gl_tex_slot = gl_tex_slot++; + glUniform1i(gl_loc, gl_img->gl_tex_slot); + } + else { + gl_img->gl_tex_slot = -1; + } + } + } + /* it's legal to call glUseProgram with 0 */ + glUseProgram(cur_prog); + _SG_GL_CHECK_ERROR(); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_gl_destroy_shader(_sg_shader_t* shd) { + SOKOL_ASSERT(shd); + _SG_GL_CHECK_ERROR(); + if (shd->gl.prog) { + _sg_gl_cache_invalidate_program(shd->gl.prog); + glDeleteProgram(shd->gl.prog); + } + _SG_GL_CHECK_ERROR(); +} + +_SOKOL_PRIVATE sg_resource_state _sg_gl_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) { + SOKOL_ASSERT(pip && shd && desc); + SOKOL_ASSERT(!pip->shader && pip->cmn.shader_id.id == SG_INVALID_ID); + SOKOL_ASSERT(desc->shader.id == shd->slot.id); + SOKOL_ASSERT(shd->gl.prog); + pip->shader = shd; + _sg_pipeline_common_init(&pip->cmn, desc); + pip->gl.primitive_type = desc->primitive_type; + pip->gl.depth = desc->depth; + pip->gl.stencil = desc->stencil; + // FIXME: blend color and write mask per draw-buffer-attachment (requires GL4) + pip->gl.blend = desc->colors[0].blend; + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + pip->gl.color_write_mask[i] = desc->colors[i].write_mask; + } + pip->gl.cull_mode = desc->cull_mode; + pip->gl.face_winding = desc->face_winding; + pip->gl.sample_count = desc->sample_count; + pip->gl.alpha_to_coverage_enabled = desc->alpha_to_coverage_enabled; + + /* resolve vertex attributes */ + for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { + pip->gl.attrs[attr_index].vb_index = -1; + } + for (int attr_index = 0; attr_index < _sg.limits.max_vertex_attrs; attr_index++) { + const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; + if (a_desc->format == SG_VERTEXFORMAT_INVALID) { + break; + } + SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); + const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[a_desc->buffer_index]; + const sg_vertex_step step_func = l_desc->step_func; + const int step_rate = l_desc->step_rate; + GLint attr_loc = attr_index; + if (!_sg_strempty(&shd->gl.attrs[attr_index].name)) { + attr_loc = glGetAttribLocation(pip->shader->gl.prog, _sg_strptr(&shd->gl.attrs[attr_index].name)); + } + SOKOL_ASSERT(attr_loc < (GLint)_sg.limits.max_vertex_attrs); + if (attr_loc != -1) { + _sg_gl_attr_t* gl_attr = &pip->gl.attrs[attr_loc]; + SOKOL_ASSERT(gl_attr->vb_index == -1); + gl_attr->vb_index = (int8_t) a_desc->buffer_index; + if (step_func == SG_VERTEXSTEP_PER_VERTEX) { + gl_attr->divisor = 0; + } + else { + gl_attr->divisor = (int8_t) step_rate; + } + SOKOL_ASSERT(l_desc->stride > 0); + gl_attr->stride = (uint8_t) l_desc->stride; + gl_attr->offset = a_desc->offset; + gl_attr->size = (uint8_t) _sg_gl_vertexformat_size(a_desc->format); + gl_attr->type = _sg_gl_vertexformat_type(a_desc->format); + gl_attr->normalized = _sg_gl_vertexformat_normalized(a_desc->format); + pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true; + } + else { + SOKOL_LOG("Vertex attribute not found in shader: "); + SOKOL_LOG(_sg_strptr(&shd->gl.attrs[attr_index].name)); + } + } + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_gl_destroy_pipeline(_sg_pipeline_t* pip) { + SOKOL_ASSERT(pip); + _SOKOL_UNUSED(pip); + /* empty */ +} + +/* + _sg_create_pass + + att_imgs must point to a _sg_image* att_imgs[SG_MAX_COLOR_ATTACHMENTS+1] array, + first entries are the color attachment images (or nullptr), last entry + is the depth-stencil image (or nullptr). +*/ +_SOKOL_PRIVATE sg_resource_state _sg_gl_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) { + SOKOL_ASSERT(pass && att_images && desc); + SOKOL_ASSERT(att_images && att_images[0]); + _SG_GL_CHECK_ERROR(); + + _sg_pass_common_init(&pass->cmn, desc); + + /* copy image pointers */ + const sg_pass_attachment_desc* att_desc; + for (int i = 0; i < pass->cmn.num_color_atts; i++) { + att_desc = &desc->color_attachments[i]; + SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); + SOKOL_ASSERT(0 == pass->gl.color_atts[i].image); + SOKOL_ASSERT(att_images[i] && (att_images[i]->slot.id == att_desc->image.id)); + SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_images[i]->cmn.pixel_format)); + pass->gl.color_atts[i].image = att_images[i]; + } + SOKOL_ASSERT(0 == pass->gl.ds_att.image); + att_desc = &desc->depth_stencil_attachment; + if (att_desc->image.id != SG_INVALID_ID) { + const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; + SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id)); + SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->cmn.pixel_format)); + pass->gl.ds_att.image = att_images[ds_img_index]; + } + + /* store current framebuffer binding (restored at end of function) */ + GLuint gl_orig_fb; + glGetIntegerv(GL_FRAMEBUFFER_BINDING, (GLint*)&gl_orig_fb); + + /* create a framebuffer object */ + glGenFramebuffers(1, &pass->gl.fb); + glBindFramebuffer(GL_FRAMEBUFFER, pass->gl.fb); + + /* attach msaa render buffer or textures */ + const bool is_msaa = (0 != att_images[0]->gl.msaa_render_buffer); + if (is_msaa) { + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + const _sg_image_t* att_img = pass->gl.color_atts[i].image; + if (att_img) { + const GLuint gl_render_buffer = att_img->gl.msaa_render_buffer; + SOKOL_ASSERT(gl_render_buffer); + glFramebufferRenderbuffer(GL_FRAMEBUFFER, (GLenum)(GL_COLOR_ATTACHMENT0+i), GL_RENDERBUFFER, gl_render_buffer); + } + } + } + else { + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + const _sg_image_t* att_img = pass->gl.color_atts[i].image; + const int mip_level = pass->cmn.color_atts[i].mip_level; + const int slice = pass->cmn.color_atts[i].slice; + if (att_img) { + const GLuint gl_tex = att_img->gl.tex[0]; + SOKOL_ASSERT(gl_tex); + const GLenum gl_att = (GLenum)(GL_COLOR_ATTACHMENT0 + i); + switch (att_img->cmn.type) { + case SG_IMAGETYPE_2D: + glFramebufferTexture2D(GL_FRAMEBUFFER, gl_att, GL_TEXTURE_2D, gl_tex, mip_level); + break; + case SG_IMAGETYPE_CUBE: + glFramebufferTexture2D(GL_FRAMEBUFFER, gl_att, _sg_gl_cubeface_target(slice), gl_tex, mip_level); + break; + default: + /* 3D- or array-texture */ + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + glFramebufferTextureLayer(GL_FRAMEBUFFER, gl_att, gl_tex, mip_level, slice); + } + #endif + break; + } + } + } + } + + /* attach depth-stencil buffer to framebuffer */ + if (pass->gl.ds_att.image) { + const GLuint gl_render_buffer = pass->gl.ds_att.image->gl.depth_render_buffer; + SOKOL_ASSERT(gl_render_buffer); + glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, gl_render_buffer); + if (_sg_is_depth_stencil_format(pass->gl.ds_att.image->cmn.pixel_format)) { + glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, gl_render_buffer); + } + } + + /* check if framebuffer is complete */ + if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) { + SOKOL_LOG("Framebuffer completeness check failed!\n"); + return SG_RESOURCESTATE_FAILED; + } + + /* setup color attachments for the framebuffer */ + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + GLenum att[SG_MAX_COLOR_ATTACHMENTS] = { + GL_COLOR_ATTACHMENT0, + GL_COLOR_ATTACHMENT1, + GL_COLOR_ATTACHMENT2, + GL_COLOR_ATTACHMENT3 + }; + glDrawBuffers(pass->cmn.num_color_atts, att); + } + #endif + + /* create MSAA resolve framebuffers if necessary */ + if (is_msaa) { + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + _sg_gl_attachment_t* gl_att = &pass->gl.color_atts[i]; + _sg_pass_attachment_t* cmn_att = &pass->cmn.color_atts[i]; + if (gl_att->image) { + SOKOL_ASSERT(0 == gl_att->gl_msaa_resolve_buffer); + glGenFramebuffers(1, &gl_att->gl_msaa_resolve_buffer); + glBindFramebuffer(GL_FRAMEBUFFER, gl_att->gl_msaa_resolve_buffer); + const GLuint gl_tex = gl_att->image->gl.tex[0]; + SOKOL_ASSERT(gl_tex); + switch (gl_att->image->cmn.type) { + case SG_IMAGETYPE_2D: + glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, + GL_TEXTURE_2D, gl_tex, cmn_att->mip_level); + break; + case SG_IMAGETYPE_CUBE: + glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, + _sg_gl_cubeface_target(cmn_att->slice), gl_tex, cmn_att->mip_level); + break; + default: + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + glFramebufferTextureLayer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, gl_tex, cmn_att->mip_level, cmn_att->slice); + } + #endif + break; + } + /* check if framebuffer is complete */ + if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) { + SOKOL_LOG("Framebuffer completeness check failed (msaa resolve buffer)!\n"); + return SG_RESOURCESTATE_FAILED; + } + /* setup color attachments for the framebuffer */ + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2) { + const GLenum gl_draw_bufs = GL_COLOR_ATTACHMENT0; + glDrawBuffers(1, &gl_draw_bufs); + } + #endif + } + } + } + + /* restore original framebuffer binding */ + glBindFramebuffer(GL_FRAMEBUFFER, gl_orig_fb); + _SG_GL_CHECK_ERROR(); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_gl_destroy_pass(_sg_pass_t* pass) { + SOKOL_ASSERT(pass); + _SG_GL_CHECK_ERROR(); + if (0 != pass->gl.fb) { + glDeleteFramebuffers(1, &pass->gl.fb); + } + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + if (pass->gl.color_atts[i].gl_msaa_resolve_buffer) { + glDeleteFramebuffers(1, &pass->gl.color_atts[i].gl_msaa_resolve_buffer); + } + } + if (pass->gl.ds_att.gl_msaa_resolve_buffer) { + glDeleteFramebuffers(1, &pass->gl.ds_att.gl_msaa_resolve_buffer); + } + _SG_GL_CHECK_ERROR(); +} + +_SOKOL_PRIVATE _sg_image_t* _sg_gl_pass_color_image(const _sg_pass_t* pass, int index) { + SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS)); + /* NOTE: may return null */ + return pass->gl.color_atts[index].image; +} + +_SOKOL_PRIVATE _sg_image_t* _sg_gl_pass_ds_image(const _sg_pass_t* pass) { + /* NOTE: may return null */ + SOKOL_ASSERT(pass); + return pass->gl.ds_att.image; +} + +_SOKOL_PRIVATE void _sg_gl_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) { + /* FIXME: what if a texture used as render target is still bound, should we + unbind all currently bound textures in begin pass? */ + SOKOL_ASSERT(action); + SOKOL_ASSERT(!_sg.gl.in_pass); + _SG_GL_CHECK_ERROR(); + _sg.gl.in_pass = true; + _sg.gl.cur_pass = pass; /* can be 0 */ + if (pass) { + _sg.gl.cur_pass_id.id = pass->slot.id; + } + else { + _sg.gl.cur_pass_id.id = SG_INVALID_ID; + } + _sg.gl.cur_pass_width = w; + _sg.gl.cur_pass_height = h; + + /* number of color attachments */ + const int num_color_atts = pass ? pass->cmn.num_color_atts : 1; + + /* bind the render pass framebuffer */ + if (pass) { + /* offscreen pass */ + SOKOL_ASSERT(pass->gl.fb); + glBindFramebuffer(GL_FRAMEBUFFER, pass->gl.fb); + } + else { + /* default pass */ + SOKOL_ASSERT(_sg.gl.cur_context); + glBindFramebuffer(GL_FRAMEBUFFER, _sg.gl.cur_context->default_framebuffer); + } + glViewport(0, 0, w, h); + glScissor(0, 0, w, h); + + /* clear color and depth-stencil attachments if needed */ + bool clear_color = false; + for (int i = 0; i < num_color_atts; i++) { + if (SG_ACTION_CLEAR == action->colors[i].action) { + clear_color = true; + break; + } + } + const bool clear_depth = (action->depth.action == SG_ACTION_CLEAR); + const bool clear_stencil = (action->stencil.action == SG_ACTION_CLEAR); + + bool need_pip_cache_flush = false; + if (clear_color) { + bool need_color_mask_flush = false; + // NOTE: not a bug to iterate over all possible color attachments + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + if (SG_COLORMASK_RGBA != _sg.gl.cache.color_write_mask[i]) { + need_pip_cache_flush = true; + need_color_mask_flush = true; + _sg.gl.cache.color_write_mask[i] = SG_COLORMASK_RGBA; + } + } + if (need_color_mask_flush) { + glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); + } + } + if (clear_depth) { + if (!_sg.gl.cache.depth.write_enabled) { + need_pip_cache_flush = true; + _sg.gl.cache.depth.write_enabled = true; + glDepthMask(GL_TRUE); + } + if (_sg.gl.cache.depth.compare != SG_COMPAREFUNC_ALWAYS) { + need_pip_cache_flush = true; + _sg.gl.cache.depth.compare = SG_COMPAREFUNC_ALWAYS; + glDepthFunc(GL_ALWAYS); + } + } + if (clear_stencil) { + if (_sg.gl.cache.stencil.write_mask != 0xFF) { + need_pip_cache_flush = true; + _sg.gl.cache.stencil.write_mask = 0xFF; + glStencilMask(0xFF); + } + } + if (need_pip_cache_flush) { + /* we messed with the state cache directly, need to clear cached + pipeline to force re-evaluation in next sg_apply_pipeline() */ + _sg.gl.cache.cur_pipeline = 0; + _sg.gl.cache.cur_pipeline_id.id = SG_INVALID_ID; + } + bool use_mrt_clear = (0 != pass); + #if defined(SOKOL_GLES2) + use_mrt_clear = false; + #else + if (_sg.gl.gles2) { + use_mrt_clear = false; + } + #endif + if (!use_mrt_clear) { + GLbitfield clear_mask = 0; + if (clear_color) { + clear_mask |= GL_COLOR_BUFFER_BIT; + const sg_color c = action->colors[0].value; + glClearColor(c.r, c.g, c.b, c.a); + } + if (clear_depth) { + clear_mask |= GL_DEPTH_BUFFER_BIT; + #ifdef SOKOL_GLCORE33 + glClearDepth(action->depth.value); + #else + glClearDepthf(action->depth.value); + #endif + } + if (clear_stencil) { + clear_mask |= GL_STENCIL_BUFFER_BIT; + glClearStencil(action->stencil.value); + } + if (0 != clear_mask) { + glClear(clear_mask); + } + } + #if !defined SOKOL_GLES2 + else { + SOKOL_ASSERT(pass); + for (int i = 0; i < num_color_atts; i++) { + if (action->colors[i].action == SG_ACTION_CLEAR) { + glClearBufferfv(GL_COLOR, i, &action->colors[i].value.r); + } + } + if (pass->gl.ds_att.image) { + if (clear_depth && clear_stencil) { + glClearBufferfi(GL_DEPTH_STENCIL, 0, action->depth.value, action->stencil.value); + } + else if (clear_depth) { + glClearBufferfv(GL_DEPTH, 0, &action->depth.value); + } + else if (clear_stencil) { + GLint val = (GLint) action->stencil.value; + glClearBufferiv(GL_STENCIL, 0, &val); + } + } + } + #endif + _SG_GL_CHECK_ERROR(); +} + +_SOKOL_PRIVATE void _sg_gl_end_pass(void) { + SOKOL_ASSERT(_sg.gl.in_pass); + _SG_GL_CHECK_ERROR(); + + /* if this was an offscreen pass, and MSAA rendering was used, need + to resolve into the pass images */ + #if !defined(SOKOL_GLES2) + if (!_sg.gl.gles2 && _sg.gl.cur_pass) { + /* check if the pass object is still valid */ + const _sg_pass_t* pass = _sg.gl.cur_pass; + SOKOL_ASSERT(pass->slot.id == _sg.gl.cur_pass_id.id); + bool is_msaa = (0 != _sg.gl.cur_pass->gl.color_atts[0].gl_msaa_resolve_buffer); + if (is_msaa) { + SOKOL_ASSERT(pass->gl.fb); + glBindFramebuffer(GL_READ_FRAMEBUFFER, pass->gl.fb); + SOKOL_ASSERT(pass->gl.color_atts[0].image); + const int w = pass->gl.color_atts[0].image->cmn.width; + const int h = pass->gl.color_atts[0].image->cmn.height; + for (int att_index = 0; att_index < SG_MAX_COLOR_ATTACHMENTS; att_index++) { + const _sg_gl_attachment_t* gl_att = &pass->gl.color_atts[att_index]; + if (gl_att->image) { + SOKOL_ASSERT(gl_att->gl_msaa_resolve_buffer); + glBindFramebuffer(GL_DRAW_FRAMEBUFFER, gl_att->gl_msaa_resolve_buffer); + glReadBuffer((GLenum)(GL_COLOR_ATTACHMENT0 + att_index)); + glBlitFramebuffer(0, 0, w, h, 0, 0, w, h, GL_COLOR_BUFFER_BIT, GL_NEAREST); + } + else { + break; + } + } + } + } + #endif + _sg.gl.cur_pass = 0; + _sg.gl.cur_pass_id.id = SG_INVALID_ID; + _sg.gl.cur_pass_width = 0; + _sg.gl.cur_pass_height = 0; + + SOKOL_ASSERT(_sg.gl.cur_context); + glBindFramebuffer(GL_FRAMEBUFFER, _sg.gl.cur_context->default_framebuffer); + _sg.gl.in_pass = false; + _SG_GL_CHECK_ERROR(); +} + +_SOKOL_PRIVATE void _sg_gl_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { + SOKOL_ASSERT(_sg.gl.in_pass); + y = origin_top_left ? (_sg.gl.cur_pass_height - (y+h)) : y; + glViewport(x, y, w, h); +} + +_SOKOL_PRIVATE void _sg_gl_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { + SOKOL_ASSERT(_sg.gl.in_pass); + y = origin_top_left ? (_sg.gl.cur_pass_height - (y+h)) : y; + glScissor(x, y, w, h); +} + +_SOKOL_PRIVATE void _sg_gl_apply_pipeline(_sg_pipeline_t* pip) { + SOKOL_ASSERT(pip); + SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id)); + _SG_GL_CHECK_ERROR(); + if ((_sg.gl.cache.cur_pipeline != pip) || (_sg.gl.cache.cur_pipeline_id.id != pip->slot.id)) { + _sg.gl.cache.cur_pipeline = pip; + _sg.gl.cache.cur_pipeline_id.id = pip->slot.id; + _sg.gl.cache.cur_primitive_type = _sg_gl_primitive_type(pip->gl.primitive_type); + _sg.gl.cache.cur_index_type = _sg_gl_index_type(pip->cmn.index_type); + + /* update depth state */ + { + const sg_depth_state* state_ds = &pip->gl.depth; + sg_depth_state* cache_ds = &_sg.gl.cache.depth; + if (state_ds->compare != cache_ds->compare) { + cache_ds->compare = state_ds->compare; + glDepthFunc(_sg_gl_compare_func(state_ds->compare)); + } + if (state_ds->write_enabled != cache_ds->write_enabled) { + cache_ds->write_enabled = state_ds->write_enabled; + glDepthMask(state_ds->write_enabled); + } + if (!_sg_fequal(state_ds->bias, cache_ds->bias, 0.000001f) || + !_sg_fequal(state_ds->bias_slope_scale, cache_ds->bias_slope_scale, 0.000001f)) + { + /* according to ANGLE's D3D11 backend: + D3D11 SlopeScaledDepthBias ==> GL polygonOffsetFactor + D3D11 DepthBias ==> GL polygonOffsetUnits + DepthBiasClamp has no meaning on GL + */ + cache_ds->bias = state_ds->bias; + cache_ds->bias_slope_scale = state_ds->bias_slope_scale; + glPolygonOffset(state_ds->bias_slope_scale, state_ds->bias); + bool po_enabled = true; + if (_sg_fequal(state_ds->bias, 0.0f, 0.000001f) && + _sg_fequal(state_ds->bias_slope_scale, 0.0f, 0.000001f)) + { + po_enabled = false; + } + if (po_enabled != _sg.gl.cache.polygon_offset_enabled) { + _sg.gl.cache.polygon_offset_enabled = po_enabled; + if (po_enabled) { + glEnable(GL_POLYGON_OFFSET_FILL); + } + else { + glDisable(GL_POLYGON_OFFSET_FILL); + } + } + } + } + + /* update stencil state */ + { + const sg_stencil_state* state_ss = &pip->gl.stencil; + sg_stencil_state* cache_ss = &_sg.gl.cache.stencil; + if (state_ss->enabled != cache_ss->enabled) { + cache_ss->enabled = state_ss->enabled; + if (state_ss->enabled) { + glEnable(GL_STENCIL_TEST); + } + else { + glDisable(GL_STENCIL_TEST); + } + } + if (state_ss->write_mask != cache_ss->write_mask) { + cache_ss->write_mask = state_ss->write_mask; + glStencilMask(state_ss->write_mask); + } + for (int i = 0; i < 2; i++) { + const sg_stencil_face_state* state_sfs = (i==0)? &state_ss->front : &state_ss->back; + sg_stencil_face_state* cache_sfs = (i==0)? &cache_ss->front : &cache_ss->back; + GLenum gl_face = (i==0)? GL_FRONT : GL_BACK; + if ((state_sfs->compare != cache_sfs->compare) || + (state_ss->read_mask != cache_ss->read_mask) || + (state_ss->ref != cache_ss->ref)) + { + cache_sfs->compare = state_sfs->compare; + glStencilFuncSeparate(gl_face, + _sg_gl_compare_func(state_sfs->compare), + state_ss->ref, + state_ss->read_mask); + } + if ((state_sfs->fail_op != cache_sfs->fail_op) || + (state_sfs->depth_fail_op != cache_sfs->depth_fail_op) || + (state_sfs->pass_op != cache_sfs->pass_op)) + { + cache_sfs->fail_op = state_sfs->fail_op; + cache_sfs->depth_fail_op = state_sfs->depth_fail_op; + cache_sfs->pass_op = state_sfs->pass_op; + glStencilOpSeparate(gl_face, + _sg_gl_stencil_op(state_sfs->fail_op), + _sg_gl_stencil_op(state_sfs->depth_fail_op), + _sg_gl_stencil_op(state_sfs->pass_op)); + } + } + cache_ss->read_mask = state_ss->read_mask; + cache_ss->ref = state_ss->ref; + } + + /* update blend state + FIXME: separate blend state per color attachment not support, needs GL4 + */ + { + const sg_blend_state* state_bs = &pip->gl.blend; + sg_blend_state* cache_bs = &_sg.gl.cache.blend; + if (state_bs->enabled != cache_bs->enabled) { + cache_bs->enabled = state_bs->enabled; + if (state_bs->enabled) { + glEnable(GL_BLEND); + } + else { + glDisable(GL_BLEND); + } + } + if ((state_bs->src_factor_rgb != cache_bs->src_factor_rgb) || + (state_bs->dst_factor_rgb != cache_bs->dst_factor_rgb) || + (state_bs->src_factor_alpha != cache_bs->src_factor_alpha) || + (state_bs->dst_factor_alpha != cache_bs->dst_factor_alpha)) + { + cache_bs->src_factor_rgb = state_bs->src_factor_rgb; + cache_bs->dst_factor_rgb = state_bs->dst_factor_rgb; + cache_bs->src_factor_alpha = state_bs->src_factor_alpha; + cache_bs->dst_factor_alpha = state_bs->dst_factor_alpha; + glBlendFuncSeparate(_sg_gl_blend_factor(state_bs->src_factor_rgb), + _sg_gl_blend_factor(state_bs->dst_factor_rgb), + _sg_gl_blend_factor(state_bs->src_factor_alpha), + _sg_gl_blend_factor(state_bs->dst_factor_alpha)); + } + if ((state_bs->op_rgb != cache_bs->op_rgb) || (state_bs->op_alpha != cache_bs->op_alpha)) { + cache_bs->op_rgb = state_bs->op_rgb; + cache_bs->op_alpha = state_bs->op_alpha; + glBlendEquationSeparate(_sg_gl_blend_op(state_bs->op_rgb), _sg_gl_blend_op(state_bs->op_alpha)); + } + } + + /* standalone state */ + for (GLuint i = 0; i < (GLuint)pip->cmn.color_attachment_count; i++) { + if (pip->gl.color_write_mask[i] != _sg.gl.cache.color_write_mask[i]) { + const sg_color_mask cm = pip->gl.color_write_mask[i]; + _sg.gl.cache.color_write_mask[i] = cm; + #ifdef SOKOL_GLCORE33 + glColorMaski(i, + (cm & SG_COLORMASK_R) != 0, + (cm & SG_COLORMASK_G) != 0, + (cm & SG_COLORMASK_B) != 0, + (cm & SG_COLORMASK_A) != 0); + #else + if (0 == i) { + glColorMask((cm & SG_COLORMASK_R) != 0, + (cm & SG_COLORMASK_G) != 0, + (cm & SG_COLORMASK_B) != 0, + (cm & SG_COLORMASK_A) != 0); + } + #endif + } + } + + if (!_sg_fequal(pip->cmn.blend_color.r, _sg.gl.cache.blend_color.r, 0.0001f) || + !_sg_fequal(pip->cmn.blend_color.g, _sg.gl.cache.blend_color.g, 0.0001f) || + !_sg_fequal(pip->cmn.blend_color.b, _sg.gl.cache.blend_color.b, 0.0001f) || + !_sg_fequal(pip->cmn.blend_color.a, _sg.gl.cache.blend_color.a, 0.0001f)) + { + sg_color c = pip->cmn.blend_color; + _sg.gl.cache.blend_color = c; + glBlendColor(c.r, c.g, c.b, c.a); + } + if (pip->gl.cull_mode != _sg.gl.cache.cull_mode) { + _sg.gl.cache.cull_mode = pip->gl.cull_mode; + if (SG_CULLMODE_NONE == pip->gl.cull_mode) { + glDisable(GL_CULL_FACE); + } + else { + glEnable(GL_CULL_FACE); + GLenum gl_mode = (SG_CULLMODE_FRONT == pip->gl.cull_mode) ? GL_FRONT : GL_BACK; + glCullFace(gl_mode); + } + } + if (pip->gl.face_winding != _sg.gl.cache.face_winding) { + _sg.gl.cache.face_winding = pip->gl.face_winding; + GLenum gl_winding = (SG_FACEWINDING_CW == pip->gl.face_winding) ? GL_CW : GL_CCW; + glFrontFace(gl_winding); + } + if (pip->gl.alpha_to_coverage_enabled != _sg.gl.cache.alpha_to_coverage_enabled) { + _sg.gl.cache.alpha_to_coverage_enabled = pip->gl.alpha_to_coverage_enabled; + if (pip->gl.alpha_to_coverage_enabled) { + glEnable(GL_SAMPLE_ALPHA_TO_COVERAGE); + } + else { + glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE); + } + } + #ifdef SOKOL_GLCORE33 + if (pip->gl.sample_count != _sg.gl.cache.sample_count) { + _sg.gl.cache.sample_count = pip->gl.sample_count; + if (pip->gl.sample_count > 1) { + glEnable(GL_MULTISAMPLE); + } + else { + glDisable(GL_MULTISAMPLE); + } + } + #endif + + /* bind shader program */ + if (pip->shader->gl.prog != _sg.gl.cache.prog) { + _sg.gl.cache.prog = pip->shader->gl.prog; + glUseProgram(pip->shader->gl.prog); + } + } + _SG_GL_CHECK_ERROR(); +} + +_SOKOL_PRIVATE void _sg_gl_apply_bindings( + _sg_pipeline_t* pip, + _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs, + _sg_buffer_t* ib, int ib_offset, + _sg_image_t** vs_imgs, int num_vs_imgs, + _sg_image_t** fs_imgs, int num_fs_imgs) +{ + SOKOL_ASSERT(pip); + _SOKOL_UNUSED(num_fs_imgs); + _SOKOL_UNUSED(num_vs_imgs); + _SOKOL_UNUSED(num_vbs); + _SG_GL_CHECK_ERROR(); + + /* bind textures */ + _SG_GL_CHECK_ERROR(); + for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { + const _sg_shader_stage_t* stage = &pip->shader->cmn.stage[stage_index]; + const _sg_gl_shader_stage_t* gl_stage = &pip->shader->gl.stage[stage_index]; + _sg_image_t** imgs = (stage_index == SG_SHADERSTAGE_VS)? vs_imgs : fs_imgs; + SOKOL_ASSERT(((stage_index == SG_SHADERSTAGE_VS)? num_vs_imgs : num_fs_imgs) == stage->num_images); + for (int img_index = 0; img_index < stage->num_images; img_index++) { + const _sg_gl_shader_image_t* gl_shd_img = &gl_stage->images[img_index]; + if (gl_shd_img->gl_tex_slot != -1) { + _sg_image_t* img = imgs[img_index]; + const GLuint gl_tex = img->gl.tex[img->cmn.active_slot]; + SOKOL_ASSERT(img && img->gl.target); + SOKOL_ASSERT((gl_shd_img->gl_tex_slot != -1) && gl_tex); + _sg_gl_cache_bind_texture(gl_shd_img->gl_tex_slot, img->gl.target, gl_tex); + } + } + } + _SG_GL_CHECK_ERROR(); + + /* index buffer (can be 0) */ + const GLuint gl_ib = ib ? ib->gl.buf[ib->cmn.active_slot] : 0; + _sg_gl_cache_bind_buffer(GL_ELEMENT_ARRAY_BUFFER, gl_ib); + _sg.gl.cache.cur_ib_offset = ib_offset; + + /* vertex attributes */ + for (GLuint attr_index = 0; attr_index < (GLuint)_sg.limits.max_vertex_attrs; attr_index++) { + _sg_gl_attr_t* attr = &pip->gl.attrs[attr_index]; + _sg_gl_cache_attr_t* cache_attr = &_sg.gl.cache.attrs[attr_index]; + bool cache_attr_dirty = false; + int vb_offset = 0; + GLuint gl_vb = 0; + if (attr->vb_index >= 0) { + /* attribute is enabled */ + SOKOL_ASSERT(attr->vb_index < num_vbs); + _sg_buffer_t* vb = vbs[attr->vb_index]; + SOKOL_ASSERT(vb); + gl_vb = vb->gl.buf[vb->cmn.active_slot]; + vb_offset = vb_offsets[attr->vb_index] + attr->offset; + if ((gl_vb != cache_attr->gl_vbuf) || + (attr->size != cache_attr->gl_attr.size) || + (attr->type != cache_attr->gl_attr.type) || + (attr->normalized != cache_attr->gl_attr.normalized) || + (attr->stride != cache_attr->gl_attr.stride) || + (vb_offset != cache_attr->gl_attr.offset) || + (cache_attr->gl_attr.divisor != attr->divisor)) + { + _sg_gl_cache_bind_buffer(GL_ARRAY_BUFFER, gl_vb); + glVertexAttribPointer(attr_index, attr->size, attr->type, + attr->normalized, attr->stride, + (const GLvoid*)(GLintptr)vb_offset); + #if defined(_SOKOL_GL_INSTANCING_ENABLED) + if (_sg.features.instancing) { + glVertexAttribDivisor(attr_index, (GLuint)attr->divisor); + } + #endif + cache_attr_dirty = true; + } + if (cache_attr->gl_attr.vb_index == -1) { + glEnableVertexAttribArray(attr_index); + cache_attr_dirty = true; + } + } + else { + /* attribute is disabled */ + if (cache_attr->gl_attr.vb_index != -1) { + glDisableVertexAttribArray(attr_index); + cache_attr_dirty = true; + } + } + if (cache_attr_dirty) { + cache_attr->gl_attr = *attr; + cache_attr->gl_attr.offset = vb_offset; + cache_attr->gl_vbuf = gl_vb; + } + } + _SG_GL_CHECK_ERROR(); +} + +_SOKOL_PRIVATE void _sg_gl_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { + SOKOL_ASSERT(_sg.gl.cache.cur_pipeline); + SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->slot.id == _sg.gl.cache.cur_pipeline_id.id); + SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->shader->slot.id == _sg.gl.cache.cur_pipeline->cmn.shader_id.id); + SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks > ub_index); + SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size == data->size); + const _sg_gl_shader_stage_t* gl_stage = &_sg.gl.cache.cur_pipeline->shader->gl.stage[stage_index]; + const _sg_gl_uniform_block_t* gl_ub = &gl_stage->uniform_blocks[ub_index]; + for (int u_index = 0; u_index < gl_ub->num_uniforms; u_index++) { + const _sg_gl_uniform_t* u = &gl_ub->uniforms[u_index]; + SOKOL_ASSERT(u->type != SG_UNIFORMTYPE_INVALID); + if (u->gl_loc == -1) { + continue; + } + GLfloat* ptr = (GLfloat*) (((uint8_t*)data->ptr) + u->offset); + switch (u->type) { + case SG_UNIFORMTYPE_INVALID: + break; + case SG_UNIFORMTYPE_FLOAT: + glUniform1fv(u->gl_loc, u->count, ptr); + break; + case SG_UNIFORMTYPE_FLOAT2: + glUniform2fv(u->gl_loc, u->count, ptr); + break; + case SG_UNIFORMTYPE_FLOAT3: + glUniform3fv(u->gl_loc, u->count, ptr); + break; + case SG_UNIFORMTYPE_FLOAT4: + glUniform4fv(u->gl_loc, u->count, ptr); + break; + case SG_UNIFORMTYPE_MAT4: + glUniformMatrix4fv(u->gl_loc, u->count, GL_FALSE, ptr); + break; + default: + SOKOL_UNREACHABLE; + break; + } + } +} + +_SOKOL_PRIVATE void _sg_gl_draw(int base_element, int num_elements, int num_instances) { + const GLenum i_type = _sg.gl.cache.cur_index_type; + const GLenum p_type = _sg.gl.cache.cur_primitive_type; + if (0 != i_type) { + /* indexed rendering */ + const int i_size = (i_type == GL_UNSIGNED_SHORT) ? 2 : 4; + const int ib_offset = _sg.gl.cache.cur_ib_offset; + const GLvoid* indices = (const GLvoid*)(GLintptr)(base_element*i_size+ib_offset); + if (num_instances == 1) { + glDrawElements(p_type, num_elements, i_type, indices); + } + else { + if (_sg.features.instancing) { + glDrawElementsInstanced(p_type, num_elements, i_type, indices, num_instances); + } + } + } + else { + /* non-indexed rendering */ + if (num_instances == 1) { + glDrawArrays(p_type, base_element, num_elements); + } + else { + if (_sg.features.instancing) { + glDrawArraysInstanced(p_type, base_element, num_elements, num_instances); + } + } + } +} + +_SOKOL_PRIVATE void _sg_gl_commit(void) { + SOKOL_ASSERT(!_sg.gl.in_pass); + /* "soft" clear bindings (only those that are actually bound) */ + _sg_gl_cache_clear_buffer_bindings(false); + _sg_gl_cache_clear_texture_bindings(false); +} + +_SOKOL_PRIVATE void _sg_gl_update_buffer(_sg_buffer_t* buf, const sg_range* data) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); + /* only one update per buffer per frame allowed */ + if (++buf->cmn.active_slot >= buf->cmn.num_slots) { + buf->cmn.active_slot = 0; + } + GLenum gl_tgt = _sg_gl_buffer_target(buf->cmn.type); + SOKOL_ASSERT(buf->cmn.active_slot < SG_NUM_INFLIGHT_FRAMES); + GLuint gl_buf = buf->gl.buf[buf->cmn.active_slot]; + SOKOL_ASSERT(gl_buf); + _SG_GL_CHECK_ERROR(); + _sg_gl_cache_store_buffer_binding(gl_tgt); + _sg_gl_cache_bind_buffer(gl_tgt, gl_buf); + glBufferSubData(gl_tgt, 0, (GLsizeiptr)data->size, data->ptr); + _sg_gl_cache_restore_buffer_binding(gl_tgt); + _SG_GL_CHECK_ERROR(); +} + +_SOKOL_PRIVATE int _sg_gl_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); + if (new_frame) { + if (++buf->cmn.active_slot >= buf->cmn.num_slots) { + buf->cmn.active_slot = 0; + } + } + GLenum gl_tgt = _sg_gl_buffer_target(buf->cmn.type); + SOKOL_ASSERT(buf->cmn.active_slot < SG_NUM_INFLIGHT_FRAMES); + GLuint gl_buf = buf->gl.buf[buf->cmn.active_slot]; + SOKOL_ASSERT(gl_buf); + _SG_GL_CHECK_ERROR(); + _sg_gl_cache_store_buffer_binding(gl_tgt); + _sg_gl_cache_bind_buffer(gl_tgt, gl_buf); + glBufferSubData(gl_tgt, buf->cmn.append_pos, (GLsizeiptr)data->size, data->ptr); + _sg_gl_cache_restore_buffer_binding(gl_tgt); + _SG_GL_CHECK_ERROR(); + /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backend */ + return _sg_roundup((int)data->size, 4); +} + +_SOKOL_PRIVATE void _sg_gl_update_image(_sg_image_t* img, const sg_image_data* data) { + SOKOL_ASSERT(img && data); + /* only one update per image per frame allowed */ + if (++img->cmn.active_slot >= img->cmn.num_slots) { + img->cmn.active_slot = 0; + } + SOKOL_ASSERT(img->cmn.active_slot < SG_NUM_INFLIGHT_FRAMES); + SOKOL_ASSERT(0 != img->gl.tex[img->cmn.active_slot]); + _sg_gl_cache_store_texture_binding(0); + _sg_gl_cache_bind_texture(0, img->gl.target, img->gl.tex[img->cmn.active_slot]); + const GLenum gl_img_format = _sg_gl_teximage_format(img->cmn.pixel_format); + const GLenum gl_img_type = _sg_gl_teximage_type(img->cmn.pixel_format); + const int num_faces = img->cmn.type == SG_IMAGETYPE_CUBE ? 6 : 1; + const int num_mips = img->cmn.num_mipmaps; + for (int face_index = 0; face_index < num_faces; face_index++) { + for (int mip_index = 0; mip_index < num_mips; mip_index++) { + GLenum gl_img_target = img->gl.target; + if (SG_IMAGETYPE_CUBE == img->cmn.type) { + gl_img_target = _sg_gl_cubeface_target(face_index); + } + const GLvoid* data_ptr = data->subimage[face_index][mip_index].ptr; + int mip_width = img->cmn.width >> mip_index; + if (mip_width == 0) { + mip_width = 1; + } + int mip_height = img->cmn.height >> mip_index; + if (mip_height == 0) { + mip_height = 1; + } + if ((SG_IMAGETYPE_2D == img->cmn.type) || (SG_IMAGETYPE_CUBE == img->cmn.type)) { + glTexSubImage2D(gl_img_target, mip_index, + 0, 0, + mip_width, mip_height, + gl_img_format, gl_img_type, + data_ptr); + } + #if !defined(SOKOL_GLES2) + else if (!_sg.gl.gles2 && ((SG_IMAGETYPE_3D == img->cmn.type) || (SG_IMAGETYPE_ARRAY == img->cmn.type))) { + int mip_depth = img->cmn.num_slices >> mip_index; + if (mip_depth == 0) { + mip_depth = 1; + } + glTexSubImage3D(gl_img_target, mip_index, + 0, 0, 0, + mip_width, mip_height, mip_depth, + gl_img_format, gl_img_type, + data_ptr); + + } + #endif + } + } + _sg_gl_cache_restore_texture_binding(0); +} + +/*== D3D11 BACKEND IMPLEMENTATION ============================================*/ +#elif defined(SOKOL_D3D11) + +#if defined(__cplusplus) +#define _sg_d3d11_AddRef(self) (self)->AddRef() +#else +#define _sg_d3d11_AddRef(self) (self)->lpVtbl->AddRef(self) +#endif + +#if defined(__cplusplus) +#define _sg_d3d11_Release(self) (self)->Release() +#else +#define _sg_d3d11_Release(self) (self)->lpVtbl->Release(self) +#endif + +/*-- D3D11 C/C++ wrappers ----------------------------------------------------*/ +static inline HRESULT _sg_d3d11_CheckFormatSupport(ID3D11Device* self, DXGI_FORMAT Format, UINT* pFormatSupport) { + #if defined(__cplusplus) + return self->CheckFormatSupport(Format, pFormatSupport); + #else + return self->lpVtbl->CheckFormatSupport(self, Format, pFormatSupport); + #endif +} + +static inline void _sg_d3d11_OMSetRenderTargets(ID3D11DeviceContext* self, UINT NumViews, ID3D11RenderTargetView* const* ppRenderTargetViews, ID3D11DepthStencilView *pDepthStencilView) { + #if defined(__cplusplus) + self->OMSetRenderTargets(NumViews, ppRenderTargetViews, pDepthStencilView); + #else + self->lpVtbl->OMSetRenderTargets(self, NumViews, ppRenderTargetViews, pDepthStencilView); + #endif +} + +static inline void _sg_d3d11_RSSetState(ID3D11DeviceContext* self, ID3D11RasterizerState* pRasterizerState) { + #if defined(__cplusplus) + self->RSSetState(pRasterizerState); + #else + self->lpVtbl->RSSetState(self, pRasterizerState); + #endif +} + +static inline void _sg_d3d11_OMSetDepthStencilState(ID3D11DeviceContext* self, ID3D11DepthStencilState* pDepthStencilState, UINT StencilRef) { + #if defined(__cplusplus) + self->OMSetDepthStencilState(pDepthStencilState, StencilRef); + #else + self->lpVtbl->OMSetDepthStencilState(self, pDepthStencilState, StencilRef); + #endif +} + +static inline void _sg_d3d11_OMSetBlendState(ID3D11DeviceContext* self, ID3D11BlendState* pBlendState, const FLOAT BlendFactor[4], UINT SampleMask) { + #if defined(__cplusplus) + self->OMSetBlendState(pBlendState, BlendFactor, SampleMask); + #else + self->lpVtbl->OMSetBlendState(self, pBlendState, BlendFactor, SampleMask); + #endif +} + +static inline void _sg_d3d11_IASetVertexBuffers(ID3D11DeviceContext* self, UINT StartSlot, UINT NumBuffers, ID3D11Buffer* const* ppVertexBuffers, const UINT* pStrides, const UINT* pOffsets) { + #if defined(__cplusplus) + self->IASetVertexBuffers(StartSlot, NumBuffers, ppVertexBuffers, pStrides, pOffsets); + #else + self->lpVtbl->IASetVertexBuffers(self, StartSlot, NumBuffers, ppVertexBuffers, pStrides, pOffsets); + #endif +} + +static inline void _sg_d3d11_IASetIndexBuffer(ID3D11DeviceContext* self, ID3D11Buffer* pIndexBuffer, DXGI_FORMAT Format, UINT Offset) { + #if defined(__cplusplus) + self->IASetIndexBuffer(pIndexBuffer, Format, Offset); + #else + self->lpVtbl->IASetIndexBuffer(self, pIndexBuffer, Format, Offset); + #endif +} + +static inline void _sg_d3d11_IASetInputLayout(ID3D11DeviceContext* self, ID3D11InputLayout* pInputLayout) { + #if defined(__cplusplus) + self->IASetInputLayout(pInputLayout); + #else + self->lpVtbl->IASetInputLayout(self, pInputLayout); + #endif +} + +static inline void _sg_d3d11_VSSetShader(ID3D11DeviceContext* self, ID3D11VertexShader* pVertexShader, ID3D11ClassInstance* const* ppClassInstances, UINT NumClassInstances) { + #if defined(__cplusplus) + self->VSSetShader(pVertexShader, ppClassInstances, NumClassInstances); + #else + self->lpVtbl->VSSetShader(self, pVertexShader, ppClassInstances, NumClassInstances); + #endif +} + +static inline void _sg_d3d11_PSSetShader(ID3D11DeviceContext* self, ID3D11PixelShader* pPixelShader, ID3D11ClassInstance* const* ppClassInstances, UINT NumClassInstances) { + #if defined(__cplusplus) + self->PSSetShader(pPixelShader, ppClassInstances, NumClassInstances); + #else + self->lpVtbl->PSSetShader(self, pPixelShader, ppClassInstances, NumClassInstances); + #endif +} + +static inline void _sg_d3d11_VSSetConstantBuffers(ID3D11DeviceContext* self, UINT StartSlot, UINT NumBuffers, ID3D11Buffer* const* ppConstantBuffers) { + #if defined(__cplusplus) + self->VSSetConstantBuffers(StartSlot, NumBuffers, ppConstantBuffers); + #else + self->lpVtbl->VSSetConstantBuffers(self, StartSlot, NumBuffers, ppConstantBuffers); + #endif +} + +static inline void _sg_d3d11_PSSetConstantBuffers(ID3D11DeviceContext* self, UINT StartSlot, UINT NumBuffers, ID3D11Buffer* const* ppConstantBuffers) { + #if defined(__cplusplus) + self->PSSetConstantBuffers(StartSlot, NumBuffers, ppConstantBuffers); + #else + self->lpVtbl->PSSetConstantBuffers(self, StartSlot, NumBuffers, ppConstantBuffers); + #endif +} + +static inline void _sg_d3d11_VSSetShaderResources(ID3D11DeviceContext* self, UINT StartSlot, UINT NumViews, ID3D11ShaderResourceView* const* ppShaderResourceViews) { + #if defined(__cplusplus) + self->VSSetShaderResources(StartSlot, NumViews, ppShaderResourceViews); + #else + self->lpVtbl->VSSetShaderResources(self, StartSlot, NumViews, ppShaderResourceViews); + #endif +} + +static inline void _sg_d3d11_PSSetShaderResources(ID3D11DeviceContext* self, UINT StartSlot, UINT NumViews, ID3D11ShaderResourceView* const* ppShaderResourceViews) { + #if defined(__cplusplus) + self->PSSetShaderResources(StartSlot, NumViews, ppShaderResourceViews); + #else + self->lpVtbl->PSSetShaderResources(self, StartSlot, NumViews, ppShaderResourceViews); + #endif +} + +static inline void _sg_d3d11_VSSetSamplers(ID3D11DeviceContext* self, UINT StartSlot, UINT NumSamplers, ID3D11SamplerState* const* ppSamplers) { + #if defined(__cplusplus) + self->VSSetSamplers(StartSlot, NumSamplers, ppSamplers); + #else + self->lpVtbl->VSSetSamplers(self, StartSlot, NumSamplers, ppSamplers); + #endif +} + +static inline void _sg_d3d11_PSSetSamplers(ID3D11DeviceContext* self, UINT StartSlot, UINT NumSamplers, ID3D11SamplerState* const* ppSamplers) { + #if defined(__cplusplus) + self->PSSetSamplers(StartSlot, NumSamplers, ppSamplers); + #else + self->lpVtbl->PSSetSamplers(self, StartSlot, NumSamplers, ppSamplers); + #endif +} + +static inline HRESULT _sg_d3d11_CreateBuffer(ID3D11Device* self, const D3D11_BUFFER_DESC* pDesc, const D3D11_SUBRESOURCE_DATA* pInitialData, ID3D11Buffer** ppBuffer) { + #if defined(__cplusplus) + return self->CreateBuffer(pDesc, pInitialData, ppBuffer); + #else + return self->lpVtbl->CreateBuffer(self, pDesc, pInitialData, ppBuffer); + #endif +} + +static inline HRESULT _sg_d3d11_CreateTexture2D(ID3D11Device* self, const D3D11_TEXTURE2D_DESC* pDesc, const D3D11_SUBRESOURCE_DATA* pInitialData, ID3D11Texture2D** ppTexture2D) { + #if defined(__cplusplus) + return self->CreateTexture2D(pDesc, pInitialData, ppTexture2D); + #else + return self->lpVtbl->CreateTexture2D(self, pDesc, pInitialData, ppTexture2D); + #endif +} + +static inline HRESULT _sg_d3d11_CreateShaderResourceView(ID3D11Device* self, ID3D11Resource* pResource, const D3D11_SHADER_RESOURCE_VIEW_DESC* pDesc, ID3D11ShaderResourceView** ppSRView) { + #if defined(__cplusplus) + return self->CreateShaderResourceView(pResource, pDesc, ppSRView); + #else + return self->lpVtbl->CreateShaderResourceView(self, pResource, pDesc, ppSRView); + #endif +} + +static inline void _sg_d3d11_GetResource(ID3D11View* self, ID3D11Resource** ppResource) { + #if defined(__cplusplus) + self->GetResource(ppResource); + #else + self->lpVtbl->GetResource(self, ppResource); + #endif +} + +static inline HRESULT _sg_d3d11_CreateTexture3D(ID3D11Device* self, const D3D11_TEXTURE3D_DESC* pDesc, const D3D11_SUBRESOURCE_DATA* pInitialData, ID3D11Texture3D** ppTexture3D) { + #if defined(__cplusplus) + return self->CreateTexture3D(pDesc, pInitialData, ppTexture3D); + #else + return self->lpVtbl->CreateTexture3D(self, pDesc, pInitialData, ppTexture3D); + #endif +} + +static inline HRESULT _sg_d3d11_CreateSamplerState(ID3D11Device* self, const D3D11_SAMPLER_DESC* pSamplerDesc, ID3D11SamplerState** ppSamplerState) { + #if defined(__cplusplus) + return self->CreateSamplerState(pSamplerDesc, ppSamplerState); + #else + return self->lpVtbl->CreateSamplerState(self, pSamplerDesc, ppSamplerState); + #endif +} + +static inline LPVOID _sg_d3d11_GetBufferPointer(ID3D10Blob* self) { + #if defined(__cplusplus) + return self->GetBufferPointer(); + #else + return self->lpVtbl->GetBufferPointer(self); + #endif +} + +static inline SIZE_T _sg_d3d11_GetBufferSize(ID3D10Blob* self) { + #if defined(__cplusplus) + return self->GetBufferSize(); + #else + return self->lpVtbl->GetBufferSize(self); + #endif +} + +static inline HRESULT _sg_d3d11_CreateVertexShader(ID3D11Device* self, const void* pShaderBytecode, SIZE_T BytecodeLength, ID3D11ClassLinkage* pClassLinkage, ID3D11VertexShader** ppVertexShader) { + #if defined(__cplusplus) + return self->CreateVertexShader(pShaderBytecode, BytecodeLength, pClassLinkage, ppVertexShader); + #else + return self->lpVtbl->CreateVertexShader(self, pShaderBytecode, BytecodeLength, pClassLinkage, ppVertexShader); + #endif +} + +static inline HRESULT _sg_d3d11_CreatePixelShader(ID3D11Device* self, const void* pShaderBytecode, SIZE_T BytecodeLength, ID3D11ClassLinkage* pClassLinkage, ID3D11PixelShader** ppPixelShader) { + #if defined(__cplusplus) + return self->CreatePixelShader(pShaderBytecode, BytecodeLength, pClassLinkage, ppPixelShader); + #else + return self->lpVtbl->CreatePixelShader(self, pShaderBytecode, BytecodeLength, pClassLinkage, ppPixelShader); + #endif +} + +static inline HRESULT _sg_d3d11_CreateInputLayout(ID3D11Device* self, const D3D11_INPUT_ELEMENT_DESC* pInputElementDescs, UINT NumElements, const void* pShaderBytecodeWithInputSignature, SIZE_T BytecodeLength, ID3D11InputLayout **ppInputLayout) { + #if defined(__cplusplus) + return self->CreateInputLayout(pInputElementDescs, NumElements, pShaderBytecodeWithInputSignature, BytecodeLength, ppInputLayout); + #else + return self->lpVtbl->CreateInputLayout(self, pInputElementDescs, NumElements, pShaderBytecodeWithInputSignature, BytecodeLength, ppInputLayout); + #endif +} + +static inline HRESULT _sg_d3d11_CreateRasterizerState(ID3D11Device* self, const D3D11_RASTERIZER_DESC* pRasterizerDesc, ID3D11RasterizerState** ppRasterizerState) { + #if defined(__cplusplus) + return self->CreateRasterizerState(pRasterizerDesc, ppRasterizerState); + #else + return self->lpVtbl->CreateRasterizerState(self, pRasterizerDesc, ppRasterizerState); + #endif +} + +static inline HRESULT _sg_d3d11_CreateDepthStencilState(ID3D11Device* self, const D3D11_DEPTH_STENCIL_DESC* pDepthStencilDesc, ID3D11DepthStencilState** ppDepthStencilState) { + #if defined(__cplusplus) + return self->CreateDepthStencilState(pDepthStencilDesc, ppDepthStencilState); + #else + return self->lpVtbl->CreateDepthStencilState(self, pDepthStencilDesc, ppDepthStencilState); + #endif +} + +static inline HRESULT _sg_d3d11_CreateBlendState(ID3D11Device* self, const D3D11_BLEND_DESC* pBlendStateDesc, ID3D11BlendState** ppBlendState) { + #if defined(__cplusplus) + return self->CreateBlendState(pBlendStateDesc, ppBlendState); + #else + return self->lpVtbl->CreateBlendState(self, pBlendStateDesc, ppBlendState); + #endif +} + +static inline HRESULT _sg_d3d11_CreateRenderTargetView(ID3D11Device* self, ID3D11Resource *pResource, const D3D11_RENDER_TARGET_VIEW_DESC* pDesc, ID3D11RenderTargetView** ppRTView) { + #if defined(__cplusplus) + return self->CreateRenderTargetView(pResource, pDesc, ppRTView); + #else + return self->lpVtbl->CreateRenderTargetView(self, pResource, pDesc, ppRTView); + #endif +} + +static inline HRESULT _sg_d3d11_CreateDepthStencilView(ID3D11Device* self, ID3D11Resource* pResource, const D3D11_DEPTH_STENCIL_VIEW_DESC* pDesc, ID3D11DepthStencilView** ppDepthStencilView) { + #if defined(__cplusplus) + return self->CreateDepthStencilView(pResource, pDesc, ppDepthStencilView); + #else + return self->lpVtbl->CreateDepthStencilView(self, pResource, pDesc, ppDepthStencilView); + #endif +} + +static inline void _sg_d3d11_RSSetViewports(ID3D11DeviceContext* self, UINT NumViewports, const D3D11_VIEWPORT* pViewports) { + #if defined(__cplusplus) + self->RSSetViewports(NumViewports, pViewports); + #else + self->lpVtbl->RSSetViewports(self, NumViewports, pViewports); + #endif +} + +static inline void _sg_d3d11_RSSetScissorRects(ID3D11DeviceContext* self, UINT NumRects, const D3D11_RECT* pRects) { + #if defined(__cplusplus) + self->RSSetScissorRects(NumRects, pRects); + #else + self->lpVtbl->RSSetScissorRects(self, NumRects, pRects); + #endif +} + +static inline void _sg_d3d11_ClearRenderTargetView(ID3D11DeviceContext* self, ID3D11RenderTargetView* pRenderTargetView, const FLOAT ColorRGBA[4]) { + #if defined(__cplusplus) + self->ClearRenderTargetView(pRenderTargetView, ColorRGBA); + #else + self->lpVtbl->ClearRenderTargetView(self, pRenderTargetView, ColorRGBA); + #endif +} + +static inline void _sg_d3d11_ClearDepthStencilView(ID3D11DeviceContext* self, ID3D11DepthStencilView* pDepthStencilView, UINT ClearFlags, FLOAT Depth, UINT8 Stencil) { + #if defined(__cplusplus) + self->ClearDepthStencilView(pDepthStencilView, ClearFlags, Depth, Stencil); + #else + self->lpVtbl->ClearDepthStencilView(self, pDepthStencilView, ClearFlags, Depth, Stencil); + #endif +} + +static inline void _sg_d3d11_ResolveSubresource(ID3D11DeviceContext* self, ID3D11Resource* pDstResource, UINT DstSubresource, ID3D11Resource* pSrcResource, UINT SrcSubresource, DXGI_FORMAT Format) { + #if defined(__cplusplus) + self->ResolveSubresource(pDstResource, DstSubresource, pSrcResource, SrcSubresource, Format); + #else + self->lpVtbl->ResolveSubresource(self, pDstResource, DstSubresource, pSrcResource, SrcSubresource, Format); + #endif +} + +static inline void _sg_d3d11_IASetPrimitiveTopology(ID3D11DeviceContext* self, D3D11_PRIMITIVE_TOPOLOGY Topology) { + #if defined(__cplusplus) + self->IASetPrimitiveTopology(Topology); + #else + self->lpVtbl->IASetPrimitiveTopology(self, Topology); + #endif +} + +static inline void _sg_d3d11_UpdateSubresource(ID3D11DeviceContext* self, ID3D11Resource* pDstResource, UINT DstSubresource, const D3D11_BOX* pDstBox, const void* pSrcData, UINT SrcRowPitch, UINT SrcDepthPitch) { + #if defined(__cplusplus) + self->UpdateSubresource(pDstResource, DstSubresource, pDstBox, pSrcData, SrcRowPitch, SrcDepthPitch); + #else + self->lpVtbl->UpdateSubresource(self, pDstResource, DstSubresource, pDstBox, pSrcData, SrcRowPitch, SrcDepthPitch); + #endif +} + +static inline void _sg_d3d11_DrawIndexed(ID3D11DeviceContext* self, UINT IndexCount, UINT StartIndexLocation, INT BaseVertexLocation) { + #if defined(__cplusplus) + self->DrawIndexed(IndexCount, StartIndexLocation, BaseVertexLocation); + #else + self->lpVtbl->DrawIndexed(self, IndexCount, StartIndexLocation, BaseVertexLocation); + #endif +} + +static inline void _sg_d3d11_DrawIndexedInstanced(ID3D11DeviceContext* self, UINT IndexCountPerInstance, UINT InstanceCount, UINT StartIndexLocation, INT BaseVertexLocation, UINT StartInstanceLocation) { + #if defined(__cplusplus) + self->DrawIndexedInstanced(IndexCountPerInstance, InstanceCount, StartIndexLocation, BaseVertexLocation, StartInstanceLocation); + #else + self->lpVtbl->DrawIndexedInstanced(self, IndexCountPerInstance, InstanceCount, StartIndexLocation, BaseVertexLocation, StartInstanceLocation); + #endif +} + +static inline void _sg_d3d11_Draw(ID3D11DeviceContext* self, UINT VertexCount, UINT StartVertexLocation) { + #if defined(__cplusplus) + self->Draw(VertexCount, StartVertexLocation); + #else + self->lpVtbl->Draw(self, VertexCount, StartVertexLocation); + #endif +} + +static inline void _sg_d3d11_DrawInstanced(ID3D11DeviceContext* self, UINT VertexCountPerInstance, UINT InstanceCount, UINT StartVertexLocation, UINT StartInstanceLocation) { + #if defined(__cplusplus) + self->DrawInstanced(VertexCountPerInstance, InstanceCount, StartVertexLocation, StartInstanceLocation); + #else + self->lpVtbl->DrawInstanced(self, VertexCountPerInstance, InstanceCount, StartVertexLocation, StartInstanceLocation); + #endif +} + +static inline HRESULT _sg_d3d11_Map(ID3D11DeviceContext* self, ID3D11Resource* pResource, UINT Subresource, D3D11_MAP MapType, UINT MapFlags, D3D11_MAPPED_SUBRESOURCE* pMappedResource) { + #if defined(__cplusplus) + return self->Map(pResource, Subresource, MapType, MapFlags, pMappedResource); + #else + return self->lpVtbl->Map(self, pResource, Subresource, MapType, MapFlags, pMappedResource); + #endif +} + +static inline void _sg_d3d11_Unmap(ID3D11DeviceContext* self, ID3D11Resource* pResource, UINT Subresource) { + #if defined(__cplusplus) + self->Unmap(pResource, Subresource); + #else + self->lpVtbl->Unmap(self, pResource, Subresource); + #endif +} + +/*-- enum translation functions ----------------------------------------------*/ +_SOKOL_PRIVATE D3D11_USAGE _sg_d3d11_usage(sg_usage usg) { + switch (usg) { + case SG_USAGE_IMMUTABLE: + return D3D11_USAGE_IMMUTABLE; + case SG_USAGE_DYNAMIC: + case SG_USAGE_STREAM: + return D3D11_USAGE_DYNAMIC; + default: + SOKOL_UNREACHABLE; + return (D3D11_USAGE) 0; + } +} + +_SOKOL_PRIVATE UINT _sg_d3d11_cpu_access_flags(sg_usage usg) { + switch (usg) { + case SG_USAGE_IMMUTABLE: + return 0; + case SG_USAGE_DYNAMIC: + case SG_USAGE_STREAM: + return D3D11_CPU_ACCESS_WRITE; + default: + SOKOL_UNREACHABLE; + return 0; + } +} + +_SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_pixel_format(sg_pixel_format fmt) { + switch (fmt) { + case SG_PIXELFORMAT_R8: return DXGI_FORMAT_R8_UNORM; + case SG_PIXELFORMAT_R8SN: return DXGI_FORMAT_R8_SNORM; + case SG_PIXELFORMAT_R8UI: return DXGI_FORMAT_R8_UINT; + case SG_PIXELFORMAT_R8SI: return DXGI_FORMAT_R8_SINT; + case SG_PIXELFORMAT_R16: return DXGI_FORMAT_R16_UNORM; + case SG_PIXELFORMAT_R16SN: return DXGI_FORMAT_R16_SNORM; + case SG_PIXELFORMAT_R16UI: return DXGI_FORMAT_R16_UINT; + case SG_PIXELFORMAT_R16SI: return DXGI_FORMAT_R16_SINT; + case SG_PIXELFORMAT_R16F: return DXGI_FORMAT_R16_FLOAT; + case SG_PIXELFORMAT_RG8: return DXGI_FORMAT_R8G8_UNORM; + case SG_PIXELFORMAT_RG8SN: return DXGI_FORMAT_R8G8_SNORM; + case SG_PIXELFORMAT_RG8UI: return DXGI_FORMAT_R8G8_UINT; + case SG_PIXELFORMAT_RG8SI: return DXGI_FORMAT_R8G8_SINT; + case SG_PIXELFORMAT_R32UI: return DXGI_FORMAT_R32_UINT; + case SG_PIXELFORMAT_R32SI: return DXGI_FORMAT_R32_SINT; + case SG_PIXELFORMAT_R32F: return DXGI_FORMAT_R32_FLOAT; + case SG_PIXELFORMAT_RG16: return DXGI_FORMAT_R16G16_UNORM; + case SG_PIXELFORMAT_RG16SN: return DXGI_FORMAT_R16G16_SNORM; + case SG_PIXELFORMAT_RG16UI: return DXGI_FORMAT_R16G16_UINT; + case SG_PIXELFORMAT_RG16SI: return DXGI_FORMAT_R16G16_SINT; + case SG_PIXELFORMAT_RG16F: return DXGI_FORMAT_R16G16_FLOAT; + case SG_PIXELFORMAT_RGBA8: return DXGI_FORMAT_R8G8B8A8_UNORM; + case SG_PIXELFORMAT_RGBA8SN: return DXGI_FORMAT_R8G8B8A8_SNORM; + case SG_PIXELFORMAT_RGBA8UI: return DXGI_FORMAT_R8G8B8A8_UINT; + case SG_PIXELFORMAT_RGBA8SI: return DXGI_FORMAT_R8G8B8A8_SINT; + case SG_PIXELFORMAT_BGRA8: return DXGI_FORMAT_B8G8R8A8_UNORM; + case SG_PIXELFORMAT_RGB10A2: return DXGI_FORMAT_R10G10B10A2_UNORM; + case SG_PIXELFORMAT_RG11B10F: return DXGI_FORMAT_R11G11B10_FLOAT; + case SG_PIXELFORMAT_RG32UI: return DXGI_FORMAT_R32G32_UINT; + case SG_PIXELFORMAT_RG32SI: return DXGI_FORMAT_R32G32_SINT; + case SG_PIXELFORMAT_RG32F: return DXGI_FORMAT_R32G32_FLOAT; + case SG_PIXELFORMAT_RGBA16: return DXGI_FORMAT_R16G16B16A16_UNORM; + case SG_PIXELFORMAT_RGBA16SN: return DXGI_FORMAT_R16G16B16A16_SNORM; + case SG_PIXELFORMAT_RGBA16UI: return DXGI_FORMAT_R16G16B16A16_UINT; + case SG_PIXELFORMAT_RGBA16SI: return DXGI_FORMAT_R16G16B16A16_SINT; + case SG_PIXELFORMAT_RGBA16F: return DXGI_FORMAT_R16G16B16A16_FLOAT; + case SG_PIXELFORMAT_RGBA32UI: return DXGI_FORMAT_R32G32B32A32_UINT; + case SG_PIXELFORMAT_RGBA32SI: return DXGI_FORMAT_R32G32B32A32_SINT; + case SG_PIXELFORMAT_RGBA32F: return DXGI_FORMAT_R32G32B32A32_FLOAT; + case SG_PIXELFORMAT_DEPTH: return DXGI_FORMAT_D32_FLOAT; + case SG_PIXELFORMAT_DEPTH_STENCIL: return DXGI_FORMAT_D24_UNORM_S8_UINT; + case SG_PIXELFORMAT_BC1_RGBA: return DXGI_FORMAT_BC1_UNORM; + case SG_PIXELFORMAT_BC2_RGBA: return DXGI_FORMAT_BC2_UNORM; + case SG_PIXELFORMAT_BC3_RGBA: return DXGI_FORMAT_BC3_UNORM; + case SG_PIXELFORMAT_BC4_R: return DXGI_FORMAT_BC4_UNORM; + case SG_PIXELFORMAT_BC4_RSN: return DXGI_FORMAT_BC4_SNORM; + case SG_PIXELFORMAT_BC5_RG: return DXGI_FORMAT_BC5_UNORM; + case SG_PIXELFORMAT_BC5_RGSN: return DXGI_FORMAT_BC5_SNORM; + case SG_PIXELFORMAT_BC6H_RGBF: return DXGI_FORMAT_BC6H_SF16; + case SG_PIXELFORMAT_BC6H_RGBUF: return DXGI_FORMAT_BC6H_UF16; + case SG_PIXELFORMAT_BC7_RGBA: return DXGI_FORMAT_BC7_UNORM; + default: return DXGI_FORMAT_UNKNOWN; + }; +} + +_SOKOL_PRIVATE D3D11_PRIMITIVE_TOPOLOGY _sg_d3d11_primitive_topology(sg_primitive_type prim_type) { + switch (prim_type) { + case SG_PRIMITIVETYPE_POINTS: return D3D11_PRIMITIVE_TOPOLOGY_POINTLIST; + case SG_PRIMITIVETYPE_LINES: return D3D11_PRIMITIVE_TOPOLOGY_LINELIST; + case SG_PRIMITIVETYPE_LINE_STRIP: return D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP; + case SG_PRIMITIVETYPE_TRIANGLES: return D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST; + case SG_PRIMITIVETYPE_TRIANGLE_STRIP: return D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP; + default: SOKOL_UNREACHABLE; return (D3D11_PRIMITIVE_TOPOLOGY) 0; + } +} + +_SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_index_format(sg_index_type index_type) { + switch (index_type) { + case SG_INDEXTYPE_NONE: return DXGI_FORMAT_UNKNOWN; + case SG_INDEXTYPE_UINT16: return DXGI_FORMAT_R16_UINT; + case SG_INDEXTYPE_UINT32: return DXGI_FORMAT_R32_UINT; + default: SOKOL_UNREACHABLE; return (DXGI_FORMAT) 0; + } +} + +_SOKOL_PRIVATE D3D11_FILTER _sg_d3d11_filter(sg_filter min_f, sg_filter mag_f, uint32_t max_anisotropy) { + if (max_anisotropy > 1) { + return D3D11_FILTER_ANISOTROPIC; + } + else if (mag_f == SG_FILTER_NEAREST) { + switch (min_f) { + case SG_FILTER_NEAREST: + case SG_FILTER_NEAREST_MIPMAP_NEAREST: + return D3D11_FILTER_MIN_MAG_MIP_POINT; + case SG_FILTER_LINEAR: + case SG_FILTER_LINEAR_MIPMAP_NEAREST: + return D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT; + case SG_FILTER_NEAREST_MIPMAP_LINEAR: + return D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR; + case SG_FILTER_LINEAR_MIPMAP_LINEAR: + return D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR; + default: + SOKOL_UNREACHABLE; break; + } + } + else if (mag_f == SG_FILTER_LINEAR) { + switch (min_f) { + case SG_FILTER_NEAREST: + case SG_FILTER_NEAREST_MIPMAP_NEAREST: + return D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT; + case SG_FILTER_LINEAR: + case SG_FILTER_LINEAR_MIPMAP_NEAREST: + return D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT; + case SG_FILTER_NEAREST_MIPMAP_LINEAR: + return D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR; + case SG_FILTER_LINEAR_MIPMAP_LINEAR: + return D3D11_FILTER_MIN_MAG_MIP_LINEAR; + default: + SOKOL_UNREACHABLE; break; + } + } + /* invalid value for mag filter */ + SOKOL_UNREACHABLE; + return D3D11_FILTER_MIN_MAG_MIP_POINT; +} + +_SOKOL_PRIVATE D3D11_TEXTURE_ADDRESS_MODE _sg_d3d11_address_mode(sg_wrap m) { + switch (m) { + case SG_WRAP_REPEAT: return D3D11_TEXTURE_ADDRESS_WRAP; + case SG_WRAP_CLAMP_TO_EDGE: return D3D11_TEXTURE_ADDRESS_CLAMP; + case SG_WRAP_CLAMP_TO_BORDER: return D3D11_TEXTURE_ADDRESS_BORDER; + case SG_WRAP_MIRRORED_REPEAT: return D3D11_TEXTURE_ADDRESS_MIRROR; + default: SOKOL_UNREACHABLE; return (D3D11_TEXTURE_ADDRESS_MODE) 0; + } +} + +_SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_vertex_format(sg_vertex_format fmt) { + switch (fmt) { + case SG_VERTEXFORMAT_FLOAT: return DXGI_FORMAT_R32_FLOAT; + case SG_VERTEXFORMAT_FLOAT2: return DXGI_FORMAT_R32G32_FLOAT; + case SG_VERTEXFORMAT_FLOAT3: return DXGI_FORMAT_R32G32B32_FLOAT; + case SG_VERTEXFORMAT_FLOAT4: return DXGI_FORMAT_R32G32B32A32_FLOAT; + case SG_VERTEXFORMAT_BYTE4: return DXGI_FORMAT_R8G8B8A8_SINT; + case SG_VERTEXFORMAT_BYTE4N: return DXGI_FORMAT_R8G8B8A8_SNORM; + case SG_VERTEXFORMAT_UBYTE4: return DXGI_FORMAT_R8G8B8A8_UINT; + case SG_VERTEXFORMAT_UBYTE4N: return DXGI_FORMAT_R8G8B8A8_UNORM; + case SG_VERTEXFORMAT_SHORT2: return DXGI_FORMAT_R16G16_SINT; + case SG_VERTEXFORMAT_SHORT2N: return DXGI_FORMAT_R16G16_SNORM; + case SG_VERTEXFORMAT_USHORT2N: return DXGI_FORMAT_R16G16_UNORM; + case SG_VERTEXFORMAT_SHORT4: return DXGI_FORMAT_R16G16B16A16_SINT; + case SG_VERTEXFORMAT_SHORT4N: return DXGI_FORMAT_R16G16B16A16_SNORM; + case SG_VERTEXFORMAT_USHORT4N: return DXGI_FORMAT_R16G16B16A16_UNORM; + case SG_VERTEXFORMAT_UINT10_N2: return DXGI_FORMAT_R10G10B10A2_UNORM; + default: SOKOL_UNREACHABLE; return (DXGI_FORMAT) 0; + } +} + +_SOKOL_PRIVATE D3D11_INPUT_CLASSIFICATION _sg_d3d11_input_classification(sg_vertex_step step) { + switch (step) { + case SG_VERTEXSTEP_PER_VERTEX: return D3D11_INPUT_PER_VERTEX_DATA; + case SG_VERTEXSTEP_PER_INSTANCE: return D3D11_INPUT_PER_INSTANCE_DATA; + default: SOKOL_UNREACHABLE; return (D3D11_INPUT_CLASSIFICATION) 0; + } +} + +_SOKOL_PRIVATE D3D11_CULL_MODE _sg_d3d11_cull_mode(sg_cull_mode m) { + switch (m) { + case SG_CULLMODE_NONE: return D3D11_CULL_NONE; + case SG_CULLMODE_FRONT: return D3D11_CULL_FRONT; + case SG_CULLMODE_BACK: return D3D11_CULL_BACK; + default: SOKOL_UNREACHABLE; return (D3D11_CULL_MODE) 0; + } +} + +_SOKOL_PRIVATE D3D11_COMPARISON_FUNC _sg_d3d11_compare_func(sg_compare_func f) { + switch (f) { + case SG_COMPAREFUNC_NEVER: return D3D11_COMPARISON_NEVER; + case SG_COMPAREFUNC_LESS: return D3D11_COMPARISON_LESS; + case SG_COMPAREFUNC_EQUAL: return D3D11_COMPARISON_EQUAL; + case SG_COMPAREFUNC_LESS_EQUAL: return D3D11_COMPARISON_LESS_EQUAL; + case SG_COMPAREFUNC_GREATER: return D3D11_COMPARISON_GREATER; + case SG_COMPAREFUNC_NOT_EQUAL: return D3D11_COMPARISON_NOT_EQUAL; + case SG_COMPAREFUNC_GREATER_EQUAL: return D3D11_COMPARISON_GREATER_EQUAL; + case SG_COMPAREFUNC_ALWAYS: return D3D11_COMPARISON_ALWAYS; + default: SOKOL_UNREACHABLE; return (D3D11_COMPARISON_FUNC) 0; + } +} + +_SOKOL_PRIVATE D3D11_STENCIL_OP _sg_d3d11_stencil_op(sg_stencil_op op) { + switch (op) { + case SG_STENCILOP_KEEP: return D3D11_STENCIL_OP_KEEP; + case SG_STENCILOP_ZERO: return D3D11_STENCIL_OP_ZERO; + case SG_STENCILOP_REPLACE: return D3D11_STENCIL_OP_REPLACE; + case SG_STENCILOP_INCR_CLAMP: return D3D11_STENCIL_OP_INCR_SAT; + case SG_STENCILOP_DECR_CLAMP: return D3D11_STENCIL_OP_DECR_SAT; + case SG_STENCILOP_INVERT: return D3D11_STENCIL_OP_INVERT; + case SG_STENCILOP_INCR_WRAP: return D3D11_STENCIL_OP_INCR; + case SG_STENCILOP_DECR_WRAP: return D3D11_STENCIL_OP_DECR; + default: SOKOL_UNREACHABLE; return (D3D11_STENCIL_OP) 0; + } +} + +_SOKOL_PRIVATE D3D11_BLEND _sg_d3d11_blend_factor(sg_blend_factor f) { + switch (f) { + case SG_BLENDFACTOR_ZERO: return D3D11_BLEND_ZERO; + case SG_BLENDFACTOR_ONE: return D3D11_BLEND_ONE; + case SG_BLENDFACTOR_SRC_COLOR: return D3D11_BLEND_SRC_COLOR; + case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR: return D3D11_BLEND_INV_SRC_COLOR; + case SG_BLENDFACTOR_SRC_ALPHA: return D3D11_BLEND_SRC_ALPHA; + case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA: return D3D11_BLEND_INV_SRC_ALPHA; + case SG_BLENDFACTOR_DST_COLOR: return D3D11_BLEND_DEST_COLOR; + case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR: return D3D11_BLEND_INV_DEST_COLOR; + case SG_BLENDFACTOR_DST_ALPHA: return D3D11_BLEND_DEST_ALPHA; + case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA: return D3D11_BLEND_INV_DEST_ALPHA; + case SG_BLENDFACTOR_SRC_ALPHA_SATURATED: return D3D11_BLEND_SRC_ALPHA_SAT; + case SG_BLENDFACTOR_BLEND_COLOR: return D3D11_BLEND_BLEND_FACTOR; + case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR: return D3D11_BLEND_INV_BLEND_FACTOR; + case SG_BLENDFACTOR_BLEND_ALPHA: return D3D11_BLEND_BLEND_FACTOR; + case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA: return D3D11_BLEND_INV_BLEND_FACTOR; + default: SOKOL_UNREACHABLE; return (D3D11_BLEND) 0; + } +} + +_SOKOL_PRIVATE D3D11_BLEND_OP _sg_d3d11_blend_op(sg_blend_op op) { + switch (op) { + case SG_BLENDOP_ADD: return D3D11_BLEND_OP_ADD; + case SG_BLENDOP_SUBTRACT: return D3D11_BLEND_OP_SUBTRACT; + case SG_BLENDOP_REVERSE_SUBTRACT: return D3D11_BLEND_OP_REV_SUBTRACT; + default: SOKOL_UNREACHABLE; return (D3D11_BLEND_OP) 0; + } +} + +_SOKOL_PRIVATE UINT8 _sg_d3d11_color_write_mask(sg_color_mask m) { + UINT8 res = 0; + if (m & SG_COLORMASK_R) { + res |= D3D11_COLOR_WRITE_ENABLE_RED; + } + if (m & SG_COLORMASK_G) { + res |= D3D11_COLOR_WRITE_ENABLE_GREEN; + } + if (m & SG_COLORMASK_B) { + res |= D3D11_COLOR_WRITE_ENABLE_BLUE; + } + if (m & SG_COLORMASK_A) { + res |= D3D11_COLOR_WRITE_ENABLE_ALPHA; + } + return res; +} + +/* see: https://docs.microsoft.com/en-us/windows/win32/direct3d11/overviews-direct3d-11-resources-limits#resource-limits-for-feature-level-11-hardware */ +_SOKOL_PRIVATE void _sg_d3d11_init_caps(void) { + _sg.backend = SG_BACKEND_D3D11; + + _sg.features.instancing = true; + _sg.features.origin_top_left = true; + _sg.features.multiple_render_targets = true; + _sg.features.msaa_render_targets = true; + _sg.features.imagetype_3d = true; + _sg.features.imagetype_array = true; + _sg.features.image_clamp_to_border = true; + _sg.features.mrt_independent_blend_state = true; + _sg.features.mrt_independent_write_mask = true; + + _sg.limits.max_image_size_2d = 16 * 1024; + _sg.limits.max_image_size_cube = 16 * 1024; + _sg.limits.max_image_size_3d = 2 * 1024; + _sg.limits.max_image_size_array = 16 * 1024; + _sg.limits.max_image_array_layers = 2 * 1024; + _sg.limits.max_vertex_attrs = SG_MAX_VERTEX_ATTRIBUTES; + + /* see: https://docs.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_format_support */ + for (int fmt = (SG_PIXELFORMAT_NONE+1); fmt < _SG_PIXELFORMAT_NUM; fmt++) { + UINT dxgi_fmt_caps = 0; + const DXGI_FORMAT dxgi_fmt = _sg_d3d11_pixel_format((sg_pixel_format)fmt); + if (dxgi_fmt != DXGI_FORMAT_UNKNOWN) { + HRESULT hr = _sg_d3d11_CheckFormatSupport(_sg.d3d11.dev, dxgi_fmt, &dxgi_fmt_caps); + SOKOL_ASSERT(SUCCEEDED(hr) || (E_FAIL == hr)); + if (!SUCCEEDED(hr)) { + dxgi_fmt_caps = 0; + } + } + sg_pixelformat_info* info = &_sg.formats[fmt]; + info->sample = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_TEXTURE2D); + info->filter = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_SHADER_SAMPLE); + info->render = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_RENDER_TARGET); + info->blend = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_BLENDABLE); + info->msaa = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET); + info->depth = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_DEPTH_STENCIL); + if (info->depth) { + info->render = true; + } + } +} + +_SOKOL_PRIVATE void _sg_d3d11_setup_backend(const sg_desc* desc) { + /* assume _sg.d3d11 already is zero-initialized */ + SOKOL_ASSERT(desc); + SOKOL_ASSERT(desc->context.d3d11.device); + SOKOL_ASSERT(desc->context.d3d11.device_context); + SOKOL_ASSERT(desc->context.d3d11.render_target_view_cb || desc->context.d3d11.render_target_view_userdata_cb); + SOKOL_ASSERT(desc->context.d3d11.depth_stencil_view_cb || desc->context.d3d11.depth_stencil_view_userdata_cb); + _sg.d3d11.valid = true; + _sg.d3d11.dev = (ID3D11Device*) desc->context.d3d11.device; + _sg.d3d11.ctx = (ID3D11DeviceContext*) desc->context.d3d11.device_context; + _sg.d3d11.rtv_cb = desc->context.d3d11.render_target_view_cb; + _sg.d3d11.rtv_userdata_cb = desc->context.d3d11.render_target_view_userdata_cb; + _sg.d3d11.dsv_cb = desc->context.d3d11.depth_stencil_view_cb; + _sg.d3d11.dsv_userdata_cb = desc->context.d3d11.depth_stencil_view_userdata_cb; + _sg.d3d11.user_data = desc->context.d3d11.user_data; + _sg_d3d11_init_caps(); +} + +_SOKOL_PRIVATE void _sg_d3d11_discard_backend(void) { + SOKOL_ASSERT(_sg.d3d11.valid); + _sg.d3d11.valid = false; +} + +_SOKOL_PRIVATE void _sg_d3d11_clear_state(void) { + /* clear all the device context state, so that resource refs don't keep stuck in the d3d device context */ + _sg_d3d11_OMSetRenderTargets(_sg.d3d11.ctx, SG_MAX_COLOR_ATTACHMENTS, _sg.d3d11.zero_rtvs, NULL); + _sg_d3d11_RSSetState(_sg.d3d11.ctx, NULL); + _sg_d3d11_OMSetDepthStencilState(_sg.d3d11.ctx, NULL, 0); + _sg_d3d11_OMSetBlendState(_sg.d3d11.ctx, NULL, NULL, 0xFFFFFFFF); + _sg_d3d11_IASetVertexBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_BUFFERS, _sg.d3d11.zero_vbs, _sg.d3d11.zero_vb_strides, _sg.d3d11.zero_vb_offsets); + _sg_d3d11_IASetIndexBuffer(_sg.d3d11.ctx, NULL, DXGI_FORMAT_UNKNOWN, 0); + _sg_d3d11_IASetInputLayout(_sg.d3d11.ctx, NULL); + _sg_d3d11_VSSetShader(_sg.d3d11.ctx, NULL, NULL, 0); + _sg_d3d11_PSSetShader(_sg.d3d11.ctx, NULL, NULL, 0); + _sg_d3d11_VSSetConstantBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, _sg.d3d11.zero_cbs); + _sg_d3d11_PSSetConstantBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, _sg.d3d11.zero_cbs); + _sg_d3d11_VSSetShaderResources(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, _sg.d3d11.zero_srvs); + _sg_d3d11_PSSetShaderResources(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, _sg.d3d11.zero_srvs); + _sg_d3d11_VSSetSamplers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, _sg.d3d11.zero_smps); + _sg_d3d11_PSSetSamplers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, _sg.d3d11.zero_smps); +} + +_SOKOL_PRIVATE void _sg_d3d11_reset_state_cache(void) { + /* just clear the d3d11 device context state */ + _sg_d3d11_clear_state(); +} + +_SOKOL_PRIVATE void _sg_d3d11_activate_context(_sg_context_t* ctx) { + _SOKOL_UNUSED(ctx); + _sg_d3d11_clear_state(); +} + +_SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_context(_sg_context_t* ctx) { + SOKOL_ASSERT(ctx); + _SOKOL_UNUSED(ctx); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_d3d11_destroy_context(_sg_context_t* ctx) { + SOKOL_ASSERT(ctx); + _SOKOL_UNUSED(ctx); + /* empty */ +} + +_SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) { + SOKOL_ASSERT(buf && desc); + SOKOL_ASSERT(!buf->d3d11.buf); + _sg_buffer_common_init(&buf->cmn, desc); + const bool injected = (0 != desc->d3d11_buffer); + if (injected) { + buf->d3d11.buf = (ID3D11Buffer*) desc->d3d11_buffer; + _sg_d3d11_AddRef(buf->d3d11.buf); + } + else { + D3D11_BUFFER_DESC d3d11_desc; + memset(&d3d11_desc, 0, sizeof(d3d11_desc)); + d3d11_desc.ByteWidth = (UINT)buf->cmn.size; + d3d11_desc.Usage = _sg_d3d11_usage(buf->cmn.usage); + d3d11_desc.BindFlags = buf->cmn.type == SG_BUFFERTYPE_VERTEXBUFFER ? D3D11_BIND_VERTEX_BUFFER : D3D11_BIND_INDEX_BUFFER; + d3d11_desc.CPUAccessFlags = _sg_d3d11_cpu_access_flags(buf->cmn.usage); + D3D11_SUBRESOURCE_DATA* init_data_ptr = 0; + D3D11_SUBRESOURCE_DATA init_data; + memset(&init_data, 0, sizeof(init_data)); + if (buf->cmn.usage == SG_USAGE_IMMUTABLE) { + SOKOL_ASSERT(desc->data.ptr); + init_data.pSysMem = desc->data.ptr; + init_data_ptr = &init_data; + } + HRESULT hr = _sg_d3d11_CreateBuffer(_sg.d3d11.dev, &d3d11_desc, init_data_ptr, &buf->d3d11.buf); + _SOKOL_UNUSED(hr); + SOKOL_ASSERT(SUCCEEDED(hr) && buf->d3d11.buf); + } + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_d3d11_destroy_buffer(_sg_buffer_t* buf) { + SOKOL_ASSERT(buf); + if (buf->d3d11.buf) { + _sg_d3d11_Release(buf->d3d11.buf); + } +} + +_SOKOL_PRIVATE void _sg_d3d11_fill_subres_data(const _sg_image_t* img, const sg_image_data* data) { + const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; + const int num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices:1; + int subres_index = 0; + for (int face_index = 0; face_index < num_faces; face_index++) { + for (int slice_index = 0; slice_index < num_slices; slice_index++) { + for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++, subres_index++) { + SOKOL_ASSERT(subres_index < (SG_MAX_MIPMAPS * SG_MAX_TEXTUREARRAY_LAYERS)); + D3D11_SUBRESOURCE_DATA* subres_data = &_sg.d3d11.subres_data[subres_index]; + const int mip_width = ((img->cmn.width>>mip_index)>0) ? img->cmn.width>>mip_index : 1; + const int mip_height = ((img->cmn.height>>mip_index)>0) ? img->cmn.height>>mip_index : 1; + const sg_range* subimg_data = &(data->subimage[face_index][mip_index]); + const size_t slice_size = subimg_data->size / (size_t)num_slices; + const size_t slice_offset = slice_size * (size_t)slice_index; + const uint8_t* ptr = (const uint8_t*) subimg_data->ptr; + subres_data->pSysMem = ptr + slice_offset; + subres_data->SysMemPitch = (UINT)_sg_row_pitch(img->cmn.pixel_format, mip_width, 1); + if (img->cmn.type == SG_IMAGETYPE_3D) { + /* FIXME? const int mip_depth = ((img->depth>>mip_index)>0) ? img->depth>>mip_index : 1; */ + subres_data->SysMemSlicePitch = (UINT)_sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, 1); + } + else { + subres_data->SysMemSlicePitch = 0; + } + } + } + } +} + +_SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_image(_sg_image_t* img, const sg_image_desc* desc) { + SOKOL_ASSERT(img && desc); + SOKOL_ASSERT(!img->d3d11.tex2d && !img->d3d11.tex3d && !img->d3d11.texds && !img->d3d11.texmsaa); + SOKOL_ASSERT(!img->d3d11.srv && !img->d3d11.smp); + HRESULT hr; + _SOKOL_UNUSED(hr); + + _sg_image_common_init(&img->cmn, desc); + const bool injected = (0 != desc->d3d11_texture) || (0 != desc->d3d11_shader_resource_view); + const bool msaa = (img->cmn.sample_count > 1); + img->d3d11.format = _sg_d3d11_pixel_format(img->cmn.pixel_format); + + /* special case depth-stencil buffer? */ + if (_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format)) { + /* create only a depth-texture */ + SOKOL_ASSERT(!injected); + if (img->d3d11.format == DXGI_FORMAT_UNKNOWN) { + SOKOL_LOG("trying to create a D3D11 depth-texture with unsupported pixel format\n"); + return SG_RESOURCESTATE_FAILED; + } + D3D11_TEXTURE2D_DESC d3d11_desc; + memset(&d3d11_desc, 0, sizeof(d3d11_desc)); + d3d11_desc.Width = (UINT)img->cmn.width; + d3d11_desc.Height = (UINT)img->cmn.height; + d3d11_desc.MipLevels = 1; + d3d11_desc.ArraySize = 1; + d3d11_desc.Format = img->d3d11.format; + d3d11_desc.Usage = D3D11_USAGE_DEFAULT; + d3d11_desc.BindFlags = D3D11_BIND_DEPTH_STENCIL; + d3d11_desc.SampleDesc.Count = (UINT)img->cmn.sample_count; + d3d11_desc.SampleDesc.Quality = (UINT) (msaa ? D3D11_STANDARD_MULTISAMPLE_PATTERN : 0); + hr = _sg_d3d11_CreateTexture2D(_sg.d3d11.dev, &d3d11_desc, NULL, &img->d3d11.texds); + SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.texds); + } + else { + /* create (or inject) color texture and shader-resource-view */ + + /* prepare initial content pointers */ + D3D11_SUBRESOURCE_DATA* init_data = 0; + if (!injected && (img->cmn.usage == SG_USAGE_IMMUTABLE) && !img->cmn.render_target) { + _sg_d3d11_fill_subres_data(img, &desc->data); + init_data = _sg.d3d11.subres_data; + } + if (img->cmn.type != SG_IMAGETYPE_3D) { + /* 2D-, cube- or array-texture */ + /* if this is an MSAA render target, the following texture will be the 'resolve-texture' */ + + /* first check for injected texture and/or resource view */ + if (injected) { + img->d3d11.tex2d = (ID3D11Texture2D*) desc->d3d11_texture; + img->d3d11.srv = (ID3D11ShaderResourceView*) desc->d3d11_shader_resource_view; + if (img->d3d11.tex2d) { + _sg_d3d11_AddRef(img->d3d11.tex2d); + } + else { + /* if only a shader-resource-view was provided, but no texture, lookup + the texture from the shader-resource-view, this also bumps the refcount + */ + SOKOL_ASSERT(img->d3d11.srv); + _sg_d3d11_GetResource((ID3D11View*)img->d3d11.srv, (ID3D11Resource**)&img->d3d11.tex2d); + SOKOL_ASSERT(img->d3d11.tex2d); + } + if (img->d3d11.srv) { + _sg_d3d11_AddRef(img->d3d11.srv); + } + } + + /* if not injected, create texture */ + if (0 == img->d3d11.tex2d) { + D3D11_TEXTURE2D_DESC d3d11_tex_desc; + memset(&d3d11_tex_desc, 0, sizeof(d3d11_tex_desc)); + d3d11_tex_desc.Width = (UINT)img->cmn.width; + d3d11_tex_desc.Height = (UINT)img->cmn.height; + d3d11_tex_desc.MipLevels = (UINT)img->cmn.num_mipmaps; + switch (img->cmn.type) { + case SG_IMAGETYPE_ARRAY: d3d11_tex_desc.ArraySize = (UINT)img->cmn.num_slices; break; + case SG_IMAGETYPE_CUBE: d3d11_tex_desc.ArraySize = 6; break; + default: d3d11_tex_desc.ArraySize = 1; break; + } + d3d11_tex_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE; + d3d11_tex_desc.Format = img->d3d11.format; + if (img->cmn.render_target) { + d3d11_tex_desc.Usage = D3D11_USAGE_DEFAULT; + if (!msaa) { + d3d11_tex_desc.BindFlags |= D3D11_BIND_RENDER_TARGET; + } + d3d11_tex_desc.CPUAccessFlags = 0; + } + else { + d3d11_tex_desc.Usage = _sg_d3d11_usage(img->cmn.usage); + d3d11_tex_desc.CPUAccessFlags = _sg_d3d11_cpu_access_flags(img->cmn.usage); + } + if (img->d3d11.format == DXGI_FORMAT_UNKNOWN) { + /* trying to create a texture format that's not supported by D3D */ + SOKOL_LOG("trying to create a D3D11 texture with unsupported pixel format\n"); + return SG_RESOURCESTATE_FAILED; + } + d3d11_tex_desc.SampleDesc.Count = 1; + d3d11_tex_desc.SampleDesc.Quality = 0; + d3d11_tex_desc.MiscFlags = (img->cmn.type == SG_IMAGETYPE_CUBE) ? D3D11_RESOURCE_MISC_TEXTURECUBE : 0; + + hr = _sg_d3d11_CreateTexture2D(_sg.d3d11.dev, &d3d11_tex_desc, init_data, &img->d3d11.tex2d); + SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.tex2d); + } + + /* ...and similar, if not injected, create shader-resource-view */ + if (0 == img->d3d11.srv) { + D3D11_SHADER_RESOURCE_VIEW_DESC d3d11_srv_desc; + memset(&d3d11_srv_desc, 0, sizeof(d3d11_srv_desc)); + d3d11_srv_desc.Format = img->d3d11.format; + switch (img->cmn.type) { + case SG_IMAGETYPE_2D: + d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D; + d3d11_srv_desc.Texture2D.MipLevels = (UINT)img->cmn.num_mipmaps; + break; + case SG_IMAGETYPE_CUBE: + d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURECUBE; + d3d11_srv_desc.TextureCube.MipLevels = (UINT)img->cmn.num_mipmaps; + break; + case SG_IMAGETYPE_ARRAY: + d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2DARRAY; + d3d11_srv_desc.Texture2DArray.MipLevels = (UINT)img->cmn.num_mipmaps; + d3d11_srv_desc.Texture2DArray.ArraySize = (UINT)img->cmn.num_slices; + break; + default: + SOKOL_UNREACHABLE; break; + } + hr = _sg_d3d11_CreateShaderResourceView(_sg.d3d11.dev, (ID3D11Resource*)img->d3d11.tex2d, &d3d11_srv_desc, &img->d3d11.srv); + SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.srv); + } + } + else { + /* 3D texture - same procedure, first check if injected, than create non-injected */ + if (injected) { + img->d3d11.tex3d = (ID3D11Texture3D*) desc->d3d11_texture; + img->d3d11.srv = (ID3D11ShaderResourceView*) desc->d3d11_shader_resource_view; + if (img->d3d11.tex3d) { + _sg_d3d11_AddRef(img->d3d11.tex3d); + } + else { + SOKOL_ASSERT(img->d3d11.srv); + _sg_d3d11_GetResource((ID3D11View*)img->d3d11.srv, (ID3D11Resource**)&img->d3d11.tex3d); + SOKOL_ASSERT(img->d3d11.tex3d); + } + if (img->d3d11.srv) { + _sg_d3d11_AddRef(img->d3d11.srv); + } + } + + if (0 == img->d3d11.tex3d) { + D3D11_TEXTURE3D_DESC d3d11_tex_desc; + memset(&d3d11_tex_desc, 0, sizeof(d3d11_tex_desc)); + d3d11_tex_desc.Width = (UINT)img->cmn.width; + d3d11_tex_desc.Height = (UINT)img->cmn.height; + d3d11_tex_desc.Depth = (UINT)img->cmn.num_slices; + d3d11_tex_desc.MipLevels = (UINT)img->cmn.num_mipmaps; + d3d11_tex_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE; + d3d11_tex_desc.Format = img->d3d11.format; + if (img->cmn.render_target) { + d3d11_tex_desc.Usage = D3D11_USAGE_DEFAULT; + if (!msaa) { + d3d11_tex_desc.BindFlags |= D3D11_BIND_RENDER_TARGET; + } + d3d11_tex_desc.CPUAccessFlags = 0; + } + else { + d3d11_tex_desc.Usage = _sg_d3d11_usage(img->cmn.usage); + d3d11_tex_desc.CPUAccessFlags = _sg_d3d11_cpu_access_flags(img->cmn.usage); + } + if (img->d3d11.format == DXGI_FORMAT_UNKNOWN) { + /* trying to create a texture format that's not supported by D3D */ + SOKOL_LOG("trying to create a D3D11 texture with unsupported pixel format\n"); + return SG_RESOURCESTATE_FAILED; + } + hr = _sg_d3d11_CreateTexture3D(_sg.d3d11.dev, &d3d11_tex_desc, init_data, &img->d3d11.tex3d); + SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.tex3d); + } + + if (0 == img->d3d11.srv) { + D3D11_SHADER_RESOURCE_VIEW_DESC d3d11_srv_desc; + memset(&d3d11_srv_desc, 0, sizeof(d3d11_srv_desc)); + d3d11_srv_desc.Format = img->d3d11.format; + d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE3D; + d3d11_srv_desc.Texture3D.MipLevels = (UINT)img->cmn.num_mipmaps; + hr = _sg_d3d11_CreateShaderResourceView(_sg.d3d11.dev, (ID3D11Resource*)img->d3d11.tex3d, &d3d11_srv_desc, &img->d3d11.srv); + SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.srv); + } + } + + /* also need to create a separate MSAA render target texture? */ + if (msaa) { + D3D11_TEXTURE2D_DESC d3d11_tex_desc; + memset(&d3d11_tex_desc, 0, sizeof(d3d11_tex_desc)); + d3d11_tex_desc.Width = (UINT)img->cmn.width; + d3d11_tex_desc.Height = (UINT)img->cmn.height; + d3d11_tex_desc.MipLevels = 1; + d3d11_tex_desc.ArraySize = 1; + d3d11_tex_desc.Format = img->d3d11.format; + d3d11_tex_desc.Usage = D3D11_USAGE_DEFAULT; + d3d11_tex_desc.BindFlags = D3D11_BIND_RENDER_TARGET; + d3d11_tex_desc.CPUAccessFlags = 0; + d3d11_tex_desc.SampleDesc.Count = (UINT)img->cmn.sample_count; + d3d11_tex_desc.SampleDesc.Quality = (UINT)D3D11_STANDARD_MULTISAMPLE_PATTERN; + hr = _sg_d3d11_CreateTexture2D(_sg.d3d11.dev, &d3d11_tex_desc, NULL, &img->d3d11.texmsaa); + SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.texmsaa); + } + + /* sampler state object, note D3D11 implements an internal shared-pool for sampler objects */ + D3D11_SAMPLER_DESC d3d11_smp_desc; + memset(&d3d11_smp_desc, 0, sizeof(d3d11_smp_desc)); + d3d11_smp_desc.Filter = _sg_d3d11_filter(img->cmn.min_filter, img->cmn.mag_filter, img->cmn.max_anisotropy); + d3d11_smp_desc.AddressU = _sg_d3d11_address_mode(img->cmn.wrap_u); + d3d11_smp_desc.AddressV = _sg_d3d11_address_mode(img->cmn.wrap_v); + d3d11_smp_desc.AddressW = _sg_d3d11_address_mode(img->cmn.wrap_w); + switch (img->cmn.border_color) { + case SG_BORDERCOLOR_TRANSPARENT_BLACK: + /* all 0.0f */ + break; + case SG_BORDERCOLOR_OPAQUE_WHITE: + for (int i = 0; i < 4; i++) { + d3d11_smp_desc.BorderColor[i] = 1.0f; + } + break; + default: + /* opaque black */ + d3d11_smp_desc.BorderColor[3] = 1.0f; + break; + } + d3d11_smp_desc.MaxAnisotropy = img->cmn.max_anisotropy; + d3d11_smp_desc.ComparisonFunc = D3D11_COMPARISON_NEVER; + d3d11_smp_desc.MinLOD = desc->min_lod; + d3d11_smp_desc.MaxLOD = desc->max_lod; + hr = _sg_d3d11_CreateSamplerState(_sg.d3d11.dev, &d3d11_smp_desc, &img->d3d11.smp); + SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.smp); + } + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_d3d11_destroy_image(_sg_image_t* img) { + SOKOL_ASSERT(img); + if (img->d3d11.tex2d) { + _sg_d3d11_Release(img->d3d11.tex2d); + } + if (img->d3d11.tex3d) { + _sg_d3d11_Release(img->d3d11.tex3d); + } + if (img->d3d11.texds) { + _sg_d3d11_Release(img->d3d11.texds); + } + if (img->d3d11.texmsaa) { + _sg_d3d11_Release(img->d3d11.texmsaa); + } + if (img->d3d11.srv) { + _sg_d3d11_Release(img->d3d11.srv); + } + if (img->d3d11.smp) { + _sg_d3d11_Release(img->d3d11.smp); + } +} + +_SOKOL_PRIVATE bool _sg_d3d11_load_d3dcompiler_dll(void) { + /* on UWP, don't do anything (not tested) */ + #if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)) + return true; + #else + /* load DLL on demand */ + if ((0 == _sg.d3d11.d3dcompiler_dll) && !_sg.d3d11.d3dcompiler_dll_load_failed) { + _sg.d3d11.d3dcompiler_dll = LoadLibraryA("d3dcompiler_47.dll"); + if (0 == _sg.d3d11.d3dcompiler_dll) { + /* don't attempt to load missing DLL in the future */ + SOKOL_LOG("failed to load d3dcompiler_47.dll!\n"); + _sg.d3d11.d3dcompiler_dll_load_failed = true; + return false; + } + /* look up function pointers */ + _sg.d3d11.D3DCompile_func = (pD3DCompile)(void*) GetProcAddress(_sg.d3d11.d3dcompiler_dll, "D3DCompile"); + SOKOL_ASSERT(_sg.d3d11.D3DCompile_func); + } + return 0 != _sg.d3d11.d3dcompiler_dll; + #endif +} + +#if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)) +#define _sg_d3d11_D3DCompile D3DCompile +#else +#define _sg_d3d11_D3DCompile _sg.d3d11.D3DCompile_func +#endif + +_SOKOL_PRIVATE ID3DBlob* _sg_d3d11_compile_shader(const sg_shader_stage_desc* stage_desc) { + if (!_sg_d3d11_load_d3dcompiler_dll()) { + return NULL; + } + SOKOL_ASSERT(stage_desc->d3d11_target); + ID3DBlob* output = NULL; + ID3DBlob* errors_or_warnings = NULL; + HRESULT hr = _sg_d3d11_D3DCompile( + stage_desc->source, /* pSrcData */ + strlen(stage_desc->source), /* SrcDataSize */ + NULL, /* pSourceName */ + NULL, /* pDefines */ + NULL, /* pInclude */ + stage_desc->entry ? stage_desc->entry : "main", /* pEntryPoint */ + stage_desc->d3d11_target, /* pTarget (vs_5_0 or ps_5_0) */ + D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR | D3DCOMPILE_OPTIMIZATION_LEVEL3, /* Flags1 */ + 0, /* Flags2 */ + &output, /* ppCode */ + &errors_or_warnings); /* ppErrorMsgs */ + if (errors_or_warnings) { + SOKOL_LOG((LPCSTR)_sg_d3d11_GetBufferPointer(errors_or_warnings)); + _sg_d3d11_Release(errors_or_warnings); errors_or_warnings = NULL; + } + if (FAILED(hr)) { + /* just in case, usually output is NULL here */ + if (output) { + _sg_d3d11_Release(output); + output = NULL; + } + } + return output; +} + +_SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { + SOKOL_ASSERT(shd && desc); + SOKOL_ASSERT(!shd->d3d11.vs && !shd->d3d11.fs && !shd->d3d11.vs_blob); + HRESULT hr; + _SOKOL_UNUSED(hr); + + _sg_shader_common_init(&shd->cmn, desc); + + /* copy vertex attribute semantic names and indices */ + for (int i = 0; i < SG_MAX_VERTEX_ATTRIBUTES; i++) { + _sg_strcpy(&shd->d3d11.attrs[i].sem_name, desc->attrs[i].sem_name); + shd->d3d11.attrs[i].sem_index = desc->attrs[i].sem_index; + } + + /* shader stage uniform blocks and image slots */ + for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { + _sg_shader_stage_t* cmn_stage = &shd->cmn.stage[stage_index]; + _sg_d3d11_shader_stage_t* d3d11_stage = &shd->d3d11.stage[stage_index]; + for (int ub_index = 0; ub_index < cmn_stage->num_uniform_blocks; ub_index++) { + const _sg_uniform_block_t* ub = &cmn_stage->uniform_blocks[ub_index]; + + /* create a D3D constant buffer for each uniform block */ + SOKOL_ASSERT(0 == d3d11_stage->cbufs[ub_index]); + D3D11_BUFFER_DESC cb_desc; + memset(&cb_desc, 0, sizeof(cb_desc)); + cb_desc.ByteWidth = (UINT)_sg_roundup((int)ub->size, 16); + cb_desc.Usage = D3D11_USAGE_DEFAULT; + cb_desc.BindFlags = D3D11_BIND_CONSTANT_BUFFER; + hr = _sg_d3d11_CreateBuffer(_sg.d3d11.dev, &cb_desc, NULL, &d3d11_stage->cbufs[ub_index]); + SOKOL_ASSERT(SUCCEEDED(hr) && d3d11_stage->cbufs[ub_index]); + } + } + + const void* vs_ptr = 0, *fs_ptr = 0; + SIZE_T vs_length = 0, fs_length = 0; + ID3DBlob* vs_blob = 0, *fs_blob = 0; + if (desc->vs.bytecode.ptr && desc->fs.bytecode.ptr) { + /* create from shader byte code */ + vs_ptr = desc->vs.bytecode.ptr; + fs_ptr = desc->fs.bytecode.ptr; + vs_length = desc->vs.bytecode.size; + fs_length = desc->fs.bytecode.size; + } + else { + /* compile from shader source code */ + vs_blob = _sg_d3d11_compile_shader(&desc->vs); + fs_blob = _sg_d3d11_compile_shader(&desc->fs); + if (vs_blob && fs_blob) { + vs_ptr = _sg_d3d11_GetBufferPointer(vs_blob); + vs_length = _sg_d3d11_GetBufferSize(vs_blob); + fs_ptr = _sg_d3d11_GetBufferPointer(fs_blob); + fs_length = _sg_d3d11_GetBufferSize(fs_blob); + } + } + sg_resource_state result = SG_RESOURCESTATE_FAILED; + if (vs_ptr && fs_ptr && (vs_length > 0) && (fs_length > 0)) { + /* create the D3D vertex- and pixel-shader objects */ + hr = _sg_d3d11_CreateVertexShader(_sg.d3d11.dev, vs_ptr, vs_length, NULL, &shd->d3d11.vs); + bool vs_succeeded = SUCCEEDED(hr) && shd->d3d11.vs; + hr = _sg_d3d11_CreatePixelShader(_sg.d3d11.dev, fs_ptr, fs_length, NULL, &shd->d3d11.fs); + bool fs_succeeded = SUCCEEDED(hr) && shd->d3d11.fs; + + /* need to store the vertex shader byte code, this is needed later in sg_create_pipeline */ + if (vs_succeeded && fs_succeeded) { + shd->d3d11.vs_blob_length = vs_length; + shd->d3d11.vs_blob = SOKOL_MALLOC((size_t)vs_length); + SOKOL_ASSERT(shd->d3d11.vs_blob); + memcpy(shd->d3d11.vs_blob, vs_ptr, vs_length); + result = SG_RESOURCESTATE_VALID; + } + } + if (vs_blob) { + _sg_d3d11_Release(vs_blob); vs_blob = 0; + } + if (fs_blob) { + _sg_d3d11_Release(fs_blob); fs_blob = 0; + } + return result; +} + +_SOKOL_PRIVATE void _sg_d3d11_destroy_shader(_sg_shader_t* shd) { + SOKOL_ASSERT(shd); + if (shd->d3d11.vs) { + _sg_d3d11_Release(shd->d3d11.vs); + } + if (shd->d3d11.fs) { + _sg_d3d11_Release(shd->d3d11.fs); + } + if (shd->d3d11.vs_blob) { + SOKOL_FREE(shd->d3d11.vs_blob); + } + for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { + _sg_shader_stage_t* cmn_stage = &shd->cmn.stage[stage_index]; + _sg_d3d11_shader_stage_t* d3d11_stage = &shd->d3d11.stage[stage_index]; + for (int ub_index = 0; ub_index < cmn_stage->num_uniform_blocks; ub_index++) { + if (d3d11_stage->cbufs[ub_index]) { + _sg_d3d11_Release(d3d11_stage->cbufs[ub_index]); + } + } + } +} + +_SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) { + SOKOL_ASSERT(pip && shd && desc); + SOKOL_ASSERT(desc->shader.id == shd->slot.id); + SOKOL_ASSERT(shd->slot.state == SG_RESOURCESTATE_VALID); + SOKOL_ASSERT(shd->d3d11.vs_blob && shd->d3d11.vs_blob_length > 0); + SOKOL_ASSERT(!pip->d3d11.il && !pip->d3d11.rs && !pip->d3d11.dss && !pip->d3d11.bs); + + pip->shader = shd; + _sg_pipeline_common_init(&pip->cmn, desc); + pip->d3d11.index_format = _sg_d3d11_index_format(pip->cmn.index_type); + pip->d3d11.topology = _sg_d3d11_primitive_topology(desc->primitive_type); + pip->d3d11.stencil_ref = desc->stencil.ref; + + /* create input layout object */ + HRESULT hr; + _SOKOL_UNUSED(hr); + D3D11_INPUT_ELEMENT_DESC d3d11_comps[SG_MAX_VERTEX_ATTRIBUTES]; + memset(d3d11_comps, 0, sizeof(d3d11_comps)); + int attr_index = 0; + for (; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { + const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; + if (a_desc->format == SG_VERTEXFORMAT_INVALID) { + break; + } + SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); + const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[a_desc->buffer_index]; + const sg_vertex_step step_func = l_desc->step_func; + const int step_rate = l_desc->step_rate; + D3D11_INPUT_ELEMENT_DESC* d3d11_comp = &d3d11_comps[attr_index]; + d3d11_comp->SemanticName = _sg_strptr(&shd->d3d11.attrs[attr_index].sem_name); + d3d11_comp->SemanticIndex = (UINT)shd->d3d11.attrs[attr_index].sem_index; + d3d11_comp->Format = _sg_d3d11_vertex_format(a_desc->format); + d3d11_comp->InputSlot = (UINT)a_desc->buffer_index; + d3d11_comp->AlignedByteOffset = (UINT)a_desc->offset; + d3d11_comp->InputSlotClass = _sg_d3d11_input_classification(step_func); + if (SG_VERTEXSTEP_PER_INSTANCE == step_func) { + d3d11_comp->InstanceDataStepRate = (UINT)step_rate; + } + pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true; + } + for (int layout_index = 0; layout_index < SG_MAX_SHADERSTAGE_BUFFERS; layout_index++) { + if (pip->cmn.vertex_layout_valid[layout_index]) { + const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[layout_index]; + SOKOL_ASSERT(l_desc->stride > 0); + pip->d3d11.vb_strides[layout_index] = (UINT)l_desc->stride; + } + else { + pip->d3d11.vb_strides[layout_index] = 0; + } + } + hr = _sg_d3d11_CreateInputLayout(_sg.d3d11.dev, + d3d11_comps, /* pInputElementDesc */ + (UINT)attr_index, /* NumElements */ + shd->d3d11.vs_blob, /* pShaderByteCodeWithInputSignature */ + shd->d3d11.vs_blob_length, /* BytecodeLength */ + &pip->d3d11.il); + SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11.il); + + /* create rasterizer state */ + D3D11_RASTERIZER_DESC rs_desc; + memset(&rs_desc, 0, sizeof(rs_desc)); + rs_desc.FillMode = D3D11_FILL_SOLID; + rs_desc.CullMode = _sg_d3d11_cull_mode(desc->cull_mode); + rs_desc.FrontCounterClockwise = desc->face_winding == SG_FACEWINDING_CCW; + rs_desc.DepthBias = (INT) pip->cmn.depth_bias; + rs_desc.DepthBiasClamp = pip->cmn.depth_bias_clamp; + rs_desc.SlopeScaledDepthBias = pip->cmn.depth_bias_slope_scale; + rs_desc.DepthClipEnable = TRUE; + rs_desc.ScissorEnable = TRUE; + rs_desc.MultisampleEnable = desc->sample_count > 1; + rs_desc.AntialiasedLineEnable = FALSE; + hr = _sg_d3d11_CreateRasterizerState(_sg.d3d11.dev, &rs_desc, &pip->d3d11.rs); + SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11.rs); + + /* create depth-stencil state */ + D3D11_DEPTH_STENCIL_DESC dss_desc; + memset(&dss_desc, 0, sizeof(dss_desc)); + dss_desc.DepthEnable = TRUE; + dss_desc.DepthWriteMask = desc->depth.write_enabled ? D3D11_DEPTH_WRITE_MASK_ALL : D3D11_DEPTH_WRITE_MASK_ZERO; + dss_desc.DepthFunc = _sg_d3d11_compare_func(desc->depth.compare); + dss_desc.StencilEnable = desc->stencil.enabled; + dss_desc.StencilReadMask = desc->stencil.read_mask; + dss_desc.StencilWriteMask = desc->stencil.write_mask; + const sg_stencil_face_state* sf = &desc->stencil.front; + dss_desc.FrontFace.StencilFailOp = _sg_d3d11_stencil_op(sf->fail_op); + dss_desc.FrontFace.StencilDepthFailOp = _sg_d3d11_stencil_op(sf->depth_fail_op); + dss_desc.FrontFace.StencilPassOp = _sg_d3d11_stencil_op(sf->pass_op); + dss_desc.FrontFace.StencilFunc = _sg_d3d11_compare_func(sf->compare); + const sg_stencil_face_state* sb = &desc->stencil.back; + dss_desc.BackFace.StencilFailOp = _sg_d3d11_stencil_op(sb->fail_op); + dss_desc.BackFace.StencilDepthFailOp = _sg_d3d11_stencil_op(sb->depth_fail_op); + dss_desc.BackFace.StencilPassOp = _sg_d3d11_stencil_op(sb->pass_op); + dss_desc.BackFace.StencilFunc = _sg_d3d11_compare_func(sb->compare); + hr = _sg_d3d11_CreateDepthStencilState(_sg.d3d11.dev, &dss_desc, &pip->d3d11.dss); + SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11.dss); + + /* create blend state */ + D3D11_BLEND_DESC bs_desc; + memset(&bs_desc, 0, sizeof(bs_desc)); + bs_desc.AlphaToCoverageEnable = desc->alpha_to_coverage_enabled; + bs_desc.IndependentBlendEnable = TRUE; + { + int i = 0; + for (i = 0; i < desc->color_count; i++) { + const sg_blend_state* src = &desc->colors[i].blend; + D3D11_RENDER_TARGET_BLEND_DESC* dst = &bs_desc.RenderTarget[i]; + dst->BlendEnable = src->enabled; + dst->SrcBlend = _sg_d3d11_blend_factor(src->src_factor_rgb); + dst->DestBlend = _sg_d3d11_blend_factor(src->dst_factor_rgb); + dst->BlendOp = _sg_d3d11_blend_op(src->op_rgb); + dst->SrcBlendAlpha = _sg_d3d11_blend_factor(src->src_factor_alpha); + dst->DestBlendAlpha = _sg_d3d11_blend_factor(src->dst_factor_alpha); + dst->BlendOpAlpha = _sg_d3d11_blend_op(src->op_alpha); + dst->RenderTargetWriteMask = _sg_d3d11_color_write_mask(desc->colors[i].write_mask); + } + for (; i < 8; i++) { + D3D11_RENDER_TARGET_BLEND_DESC* dst = &bs_desc.RenderTarget[i]; + dst->BlendEnable = FALSE; + dst->SrcBlend = dst->SrcBlendAlpha = D3D11_BLEND_ONE; + dst->DestBlend = dst->DestBlendAlpha = D3D11_BLEND_ZERO; + dst->BlendOp = dst->BlendOpAlpha = D3D11_BLEND_OP_ADD; + dst->RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL; + } + } + hr = _sg_d3d11_CreateBlendState(_sg.d3d11.dev, &bs_desc, &pip->d3d11.bs); + SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11.bs); + + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_d3d11_destroy_pipeline(_sg_pipeline_t* pip) { + SOKOL_ASSERT(pip); + if (pip->d3d11.il) { + _sg_d3d11_Release(pip->d3d11.il); + } + if (pip->d3d11.rs) { + _sg_d3d11_Release(pip->d3d11.rs); + } + if (pip->d3d11.dss) { + _sg_d3d11_Release(pip->d3d11.dss); + } + if (pip->d3d11.bs) { + _sg_d3d11_Release(pip->d3d11.bs); + } +} + +_SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) { + SOKOL_ASSERT(pass && desc); + SOKOL_ASSERT(att_images && att_images[0]); + SOKOL_ASSERT(_sg.d3d11.dev); + + _sg_pass_common_init(&pass->cmn, desc); + + for (int i = 0; i < pass->cmn.num_color_atts; i++) { + const sg_pass_attachment_desc* att_desc = &desc->color_attachments[i]; + _SOKOL_UNUSED(att_desc); + SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); + _sg_image_t* att_img = att_images[i]; + SOKOL_ASSERT(att_img && (att_img->slot.id == att_desc->image.id)); + SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_img->cmn.pixel_format)); + SOKOL_ASSERT(0 == pass->d3d11.color_atts[i].image); + pass->d3d11.color_atts[i].image = att_img; + + /* create D3D11 render-target-view */ + const _sg_pass_attachment_t* cmn_att = &pass->cmn.color_atts[i]; + SOKOL_ASSERT(0 == pass->d3d11.color_atts[i].rtv); + ID3D11Resource* d3d11_res = 0; + const bool is_msaa = att_img->cmn.sample_count > 1; + D3D11_RENDER_TARGET_VIEW_DESC d3d11_rtv_desc; + memset(&d3d11_rtv_desc, 0, sizeof(d3d11_rtv_desc)); + d3d11_rtv_desc.Format = att_img->d3d11.format; + if ((att_img->cmn.type == SG_IMAGETYPE_2D) || is_msaa) { + if (is_msaa) { + d3d11_res = (ID3D11Resource*) att_img->d3d11.texmsaa; + d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2DMS; + } + else { + d3d11_res = (ID3D11Resource*) att_img->d3d11.tex2d; + d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D; + d3d11_rtv_desc.Texture2D.MipSlice = (UINT)cmn_att->mip_level; + } + } + else if ((att_img->cmn.type == SG_IMAGETYPE_CUBE) || (att_img->cmn.type == SG_IMAGETYPE_ARRAY)) { + d3d11_res = (ID3D11Resource*) att_img->d3d11.tex2d; + d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2DARRAY; + d3d11_rtv_desc.Texture2DArray.MipSlice = (UINT)cmn_att->mip_level; + d3d11_rtv_desc.Texture2DArray.FirstArraySlice = (UINT)cmn_att->slice; + d3d11_rtv_desc.Texture2DArray.ArraySize = 1; + } + else { + SOKOL_ASSERT(att_img->cmn.type == SG_IMAGETYPE_3D); + d3d11_res = (ID3D11Resource*) att_img->d3d11.tex3d; + d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE3D; + d3d11_rtv_desc.Texture3D.MipSlice = (UINT)cmn_att->mip_level; + d3d11_rtv_desc.Texture3D.FirstWSlice = (UINT)cmn_att->slice; + d3d11_rtv_desc.Texture3D.WSize = 1; + } + SOKOL_ASSERT(d3d11_res); + HRESULT hr = _sg_d3d11_CreateRenderTargetView(_sg.d3d11.dev, d3d11_res, &d3d11_rtv_desc, &pass->d3d11.color_atts[i].rtv); + _SOKOL_UNUSED(hr); + SOKOL_ASSERT(SUCCEEDED(hr) && pass->d3d11.color_atts[i].rtv); + } + + /* optional depth-stencil image */ + SOKOL_ASSERT(0 == pass->d3d11.ds_att.image); + SOKOL_ASSERT(0 == pass->d3d11.ds_att.dsv); + if (desc->depth_stencil_attachment.image.id != SG_INVALID_ID) { + const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; + const sg_pass_attachment_desc* att_desc = &desc->depth_stencil_attachment; + _SOKOL_UNUSED(att_desc); + _sg_image_t* att_img = att_images[ds_img_index]; + SOKOL_ASSERT(att_img && (att_img->slot.id == att_desc->image.id)); + SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_img->cmn.pixel_format)); + SOKOL_ASSERT(0 == pass->d3d11.ds_att.image); + pass->d3d11.ds_att.image = att_img; + + /* create D3D11 depth-stencil-view */ + D3D11_DEPTH_STENCIL_VIEW_DESC d3d11_dsv_desc; + memset(&d3d11_dsv_desc, 0, sizeof(d3d11_dsv_desc)); + d3d11_dsv_desc.Format = att_img->d3d11.format; + const bool is_msaa = att_img->cmn.sample_count > 1; + if (is_msaa) { + d3d11_dsv_desc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2DMS; + } + else { + d3d11_dsv_desc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D; + } + ID3D11Resource* d3d11_res = (ID3D11Resource*) att_img->d3d11.texds; + SOKOL_ASSERT(d3d11_res); + HRESULT hr = _sg_d3d11_CreateDepthStencilView(_sg.d3d11.dev, d3d11_res, &d3d11_dsv_desc, &pass->d3d11.ds_att.dsv); + _SOKOL_UNUSED(hr); + SOKOL_ASSERT(SUCCEEDED(hr) && pass->d3d11.ds_att.dsv); + } + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_d3d11_destroy_pass(_sg_pass_t* pass) { + SOKOL_ASSERT(pass); + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + if (pass->d3d11.color_atts[i].rtv) { + _sg_d3d11_Release(pass->d3d11.color_atts[i].rtv); + } + } + if (pass->d3d11.ds_att.dsv) { + _sg_d3d11_Release(pass->d3d11.ds_att.dsv); + } +} + +_SOKOL_PRIVATE _sg_image_t* _sg_d3d11_pass_color_image(const _sg_pass_t* pass, int index) { + SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS)); + /* NOTE: may return null */ + return pass->d3d11.color_atts[index].image; +} + +_SOKOL_PRIVATE _sg_image_t* _sg_d3d11_pass_ds_image(const _sg_pass_t* pass) { + /* NOTE: may return null */ + SOKOL_ASSERT(pass); + return pass->d3d11.ds_att.image; +} + +_SOKOL_PRIVATE void _sg_d3d11_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) { + SOKOL_ASSERT(action); + SOKOL_ASSERT(!_sg.d3d11.in_pass); + SOKOL_ASSERT(_sg.d3d11.rtv_cb || _sg.d3d11.rtv_userdata_cb); + SOKOL_ASSERT(_sg.d3d11.dsv_cb || _sg.d3d11.dsv_userdata_cb); + _sg.d3d11.in_pass = true; + _sg.d3d11.cur_width = w; + _sg.d3d11.cur_height = h; + if (pass) { + _sg.d3d11.cur_pass = pass; + _sg.d3d11.cur_pass_id.id = pass->slot.id; + _sg.d3d11.num_rtvs = 0; + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + _sg.d3d11.cur_rtvs[i] = pass->d3d11.color_atts[i].rtv; + if (_sg.d3d11.cur_rtvs[i]) { + _sg.d3d11.num_rtvs++; + } + } + _sg.d3d11.cur_dsv = pass->d3d11.ds_att.dsv; + } + else { + /* render to default frame buffer */ + _sg.d3d11.cur_pass = 0; + _sg.d3d11.cur_pass_id.id = SG_INVALID_ID; + _sg.d3d11.num_rtvs = 1; + if (_sg.d3d11.rtv_cb) { + _sg.d3d11.cur_rtvs[0] = (ID3D11RenderTargetView*) _sg.d3d11.rtv_cb(); + } + else { + _sg.d3d11.cur_rtvs[0] = (ID3D11RenderTargetView*) _sg.d3d11.rtv_userdata_cb(_sg.d3d11.user_data); + } + for (int i = 1; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + _sg.d3d11.cur_rtvs[i] = 0; + } + if (_sg.d3d11.dsv_cb) { + _sg.d3d11.cur_dsv = (ID3D11DepthStencilView*) _sg.d3d11.dsv_cb(); + } + else { + _sg.d3d11.cur_dsv = (ID3D11DepthStencilView*) _sg.d3d11.dsv_userdata_cb(_sg.d3d11.user_data); + } + SOKOL_ASSERT(_sg.d3d11.cur_rtvs[0] && _sg.d3d11.cur_dsv); + } + /* apply the render-target- and depth-stencil-views */ + _sg_d3d11_OMSetRenderTargets(_sg.d3d11.ctx, SG_MAX_COLOR_ATTACHMENTS, _sg.d3d11.cur_rtvs, _sg.d3d11.cur_dsv); + + /* set viewport and scissor rect to cover whole screen */ + D3D11_VIEWPORT vp; + memset(&vp, 0, sizeof(vp)); + vp.Width = (FLOAT) w; + vp.Height = (FLOAT) h; + vp.MaxDepth = 1.0f; + _sg_d3d11_RSSetViewports(_sg.d3d11.ctx, 1, &vp); + D3D11_RECT rect; + rect.left = 0; + rect.top = 0; + rect.right = w; + rect.bottom = h; + _sg_d3d11_RSSetScissorRects(_sg.d3d11.ctx, 1, &rect); + + /* perform clear action */ + for (int i = 0; i < _sg.d3d11.num_rtvs; i++) { + if (action->colors[i].action == SG_ACTION_CLEAR) { + _sg_d3d11_ClearRenderTargetView(_sg.d3d11.ctx, _sg.d3d11.cur_rtvs[i], &action->colors[i].value.r); + } + } + UINT ds_flags = 0; + if (action->depth.action == SG_ACTION_CLEAR) { + ds_flags |= D3D11_CLEAR_DEPTH; + } + if (action->stencil.action == SG_ACTION_CLEAR) { + ds_flags |= D3D11_CLEAR_STENCIL; + } + if ((0 != ds_flags) && _sg.d3d11.cur_dsv) { + _sg_d3d11_ClearDepthStencilView(_sg.d3d11.ctx, _sg.d3d11.cur_dsv, ds_flags, action->depth.value, action->stencil.value); + } +} + +/* D3D11CalcSubresource only exists for C++ */ +_SOKOL_PRIVATE UINT _sg_d3d11_calcsubresource(UINT mip_slice, UINT array_slice, UINT mip_levels) { + return mip_slice + array_slice * mip_levels; +} + +_SOKOL_PRIVATE void _sg_d3d11_end_pass(void) { + SOKOL_ASSERT(_sg.d3d11.in_pass && _sg.d3d11.ctx); + _sg.d3d11.in_pass = false; + + /* need to resolve MSAA render target into texture? */ + if (_sg.d3d11.cur_pass) { + SOKOL_ASSERT(_sg.d3d11.cur_pass->slot.id == _sg.d3d11.cur_pass_id.id); + for (int i = 0; i < _sg.d3d11.num_rtvs; i++) { + _sg_pass_attachment_t* cmn_att = &_sg.d3d11.cur_pass->cmn.color_atts[i]; + _sg_image_t* att_img = _sg.d3d11.cur_pass->d3d11.color_atts[i].image; + SOKOL_ASSERT(att_img && (att_img->slot.id == cmn_att->image_id.id)); + if (att_img->cmn.sample_count > 1) { + /* FIXME: support MSAA resolve into 3D texture */ + SOKOL_ASSERT(att_img->d3d11.tex2d && att_img->d3d11.texmsaa && !att_img->d3d11.tex3d); + SOKOL_ASSERT(DXGI_FORMAT_UNKNOWN != att_img->d3d11.format); + UINT dst_subres = _sg_d3d11_calcsubresource((UINT)cmn_att->mip_level, (UINT)cmn_att->slice, (UINT)att_img->cmn.num_mipmaps); + _sg_d3d11_ResolveSubresource(_sg.d3d11.ctx, + (ID3D11Resource*) att_img->d3d11.tex2d, /* pDstResource */ + dst_subres, /* DstSubresource */ + (ID3D11Resource*) att_img->d3d11.texmsaa, /* pSrcResource */ + 0, /* SrcSubresource */ + att_img->d3d11.format); + } + } + } + _sg.d3d11.cur_pass = 0; + _sg.d3d11.cur_pass_id.id = SG_INVALID_ID; + _sg.d3d11.cur_pipeline = 0; + _sg.d3d11.cur_pipeline_id.id = SG_INVALID_ID; + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + _sg.d3d11.cur_rtvs[i] = 0; + } + _sg.d3d11.cur_dsv = 0; + _sg_d3d11_clear_state(); +} + +_SOKOL_PRIVATE void _sg_d3d11_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { + SOKOL_ASSERT(_sg.d3d11.ctx); + SOKOL_ASSERT(_sg.d3d11.in_pass); + D3D11_VIEWPORT vp; + vp.TopLeftX = (FLOAT) x; + vp.TopLeftY = (FLOAT) (origin_top_left ? y : (_sg.d3d11.cur_height - (y + h))); + vp.Width = (FLOAT) w; + vp.Height = (FLOAT) h; + vp.MinDepth = 0.0f; + vp.MaxDepth = 1.0f; + _sg_d3d11_RSSetViewports(_sg.d3d11.ctx, 1, &vp); +} + +_SOKOL_PRIVATE void _sg_d3d11_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { + SOKOL_ASSERT(_sg.d3d11.ctx); + SOKOL_ASSERT(_sg.d3d11.in_pass); + D3D11_RECT rect; + rect.left = x; + rect.top = (origin_top_left ? y : (_sg.d3d11.cur_height - (y + h))); + rect.right = x + w; + rect.bottom = origin_top_left ? (y + h) : (_sg.d3d11.cur_height - y); + _sg_d3d11_RSSetScissorRects(_sg.d3d11.ctx, 1, &rect); +} + +_SOKOL_PRIVATE void _sg_d3d11_apply_pipeline(_sg_pipeline_t* pip) { + SOKOL_ASSERT(pip); + SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id)); + SOKOL_ASSERT(_sg.d3d11.ctx); + SOKOL_ASSERT(_sg.d3d11.in_pass); + SOKOL_ASSERT(pip->d3d11.rs && pip->d3d11.bs && pip->d3d11.dss && pip->d3d11.il); + + _sg.d3d11.cur_pipeline = pip; + _sg.d3d11.cur_pipeline_id.id = pip->slot.id; + _sg.d3d11.use_indexed_draw = (pip->d3d11.index_format != DXGI_FORMAT_UNKNOWN); + + _sg_d3d11_RSSetState(_sg.d3d11.ctx, pip->d3d11.rs); + _sg_d3d11_OMSetDepthStencilState(_sg.d3d11.ctx, pip->d3d11.dss, pip->d3d11.stencil_ref); + _sg_d3d11_OMSetBlendState(_sg.d3d11.ctx, pip->d3d11.bs, &pip->cmn.blend_color.r, 0xFFFFFFFF); + _sg_d3d11_IASetPrimitiveTopology(_sg.d3d11.ctx, pip->d3d11.topology); + _sg_d3d11_IASetInputLayout(_sg.d3d11.ctx, pip->d3d11.il); + _sg_d3d11_VSSetShader(_sg.d3d11.ctx, pip->shader->d3d11.vs, NULL, 0); + _sg_d3d11_VSSetConstantBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, pip->shader->d3d11.stage[SG_SHADERSTAGE_VS].cbufs); + _sg_d3d11_PSSetShader(_sg.d3d11.ctx, pip->shader->d3d11.fs, NULL, 0); + _sg_d3d11_PSSetConstantBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, pip->shader->d3d11.stage[SG_SHADERSTAGE_FS].cbufs); +} + +_SOKOL_PRIVATE void _sg_d3d11_apply_bindings( + _sg_pipeline_t* pip, + _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs, + _sg_buffer_t* ib, int ib_offset, + _sg_image_t** vs_imgs, int num_vs_imgs, + _sg_image_t** fs_imgs, int num_fs_imgs) +{ + SOKOL_ASSERT(pip); + SOKOL_ASSERT(_sg.d3d11.ctx); + SOKOL_ASSERT(_sg.d3d11.in_pass); + + /* gather all the D3D11 resources into arrays */ + ID3D11Buffer* d3d11_ib = ib ? ib->d3d11.buf : 0; + ID3D11Buffer* d3d11_vbs[SG_MAX_SHADERSTAGE_BUFFERS]; + UINT d3d11_vb_offsets[SG_MAX_SHADERSTAGE_BUFFERS]; + ID3D11ShaderResourceView* d3d11_vs_srvs[SG_MAX_SHADERSTAGE_IMAGES]; + ID3D11SamplerState* d3d11_vs_smps[SG_MAX_SHADERSTAGE_IMAGES]; + ID3D11ShaderResourceView* d3d11_fs_srvs[SG_MAX_SHADERSTAGE_IMAGES]; + ID3D11SamplerState* d3d11_fs_smps[SG_MAX_SHADERSTAGE_IMAGES]; + int i; + for (i = 0; i < num_vbs; i++) { + SOKOL_ASSERT(vbs[i]->d3d11.buf); + d3d11_vbs[i] = vbs[i]->d3d11.buf; + d3d11_vb_offsets[i] = (UINT)vb_offsets[i]; + } + for (; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) { + d3d11_vbs[i] = 0; + d3d11_vb_offsets[i] = 0; + } + for (i = 0; i < num_vs_imgs; i++) { + SOKOL_ASSERT(vs_imgs[i]->d3d11.srv); + SOKOL_ASSERT(vs_imgs[i]->d3d11.smp); + d3d11_vs_srvs[i] = vs_imgs[i]->d3d11.srv; + d3d11_vs_smps[i] = vs_imgs[i]->d3d11.smp; + } + for (; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { + d3d11_vs_srvs[i] = 0; + d3d11_vs_smps[i] = 0; + } + for (i = 0; i < num_fs_imgs; i++) { + SOKOL_ASSERT(fs_imgs[i]->d3d11.srv); + SOKOL_ASSERT(fs_imgs[i]->d3d11.smp); + d3d11_fs_srvs[i] = fs_imgs[i]->d3d11.srv; + d3d11_fs_smps[i] = fs_imgs[i]->d3d11.smp; + } + for (; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { + d3d11_fs_srvs[i] = 0; + d3d11_fs_smps[i] = 0; + } + + _sg_d3d11_IASetVertexBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_BUFFERS, d3d11_vbs, pip->d3d11.vb_strides, d3d11_vb_offsets); + _sg_d3d11_IASetIndexBuffer(_sg.d3d11.ctx, d3d11_ib, pip->d3d11.index_format, (UINT)ib_offset); + _sg_d3d11_VSSetShaderResources(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_vs_srvs); + _sg_d3d11_VSSetSamplers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_vs_smps); + _sg_d3d11_PSSetShaderResources(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_fs_srvs); + _sg_d3d11_PSSetSamplers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_fs_smps); +} + +_SOKOL_PRIVATE void _sg_d3d11_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { + SOKOL_ASSERT(_sg.d3d11.ctx && _sg.d3d11.in_pass); + SOKOL_ASSERT(_sg.d3d11.cur_pipeline && _sg.d3d11.cur_pipeline->slot.id == _sg.d3d11.cur_pipeline_id.id); + SOKOL_ASSERT(_sg.d3d11.cur_pipeline->shader && _sg.d3d11.cur_pipeline->shader->slot.id == _sg.d3d11.cur_pipeline->cmn.shader_id.id); + SOKOL_ASSERT(ub_index < _sg.d3d11.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks); + SOKOL_ASSERT(data->size == _sg.d3d11.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size); + ID3D11Buffer* cb = _sg.d3d11.cur_pipeline->shader->d3d11.stage[stage_index].cbufs[ub_index]; + SOKOL_ASSERT(cb); + _sg_d3d11_UpdateSubresource(_sg.d3d11.ctx, (ID3D11Resource*)cb, 0, NULL, data->ptr, 0, 0); +} + +_SOKOL_PRIVATE void _sg_d3d11_draw(int base_element, int num_elements, int num_instances) { + SOKOL_ASSERT(_sg.d3d11.in_pass); + if (_sg.d3d11.use_indexed_draw) { + if (1 == num_instances) { + _sg_d3d11_DrawIndexed(_sg.d3d11.ctx, (UINT)num_elements, (UINT)base_element, 0); + } + else { + _sg_d3d11_DrawIndexedInstanced(_sg.d3d11.ctx, (UINT)num_elements, (UINT)num_instances, (UINT)base_element, 0, 0); + } + } + else { + if (1 == num_instances) { + _sg_d3d11_Draw(_sg.d3d11.ctx, (UINT)num_elements, (UINT)base_element); + } + else { + _sg_d3d11_DrawInstanced(_sg.d3d11.ctx, (UINT)num_elements, (UINT)num_instances, (UINT)base_element, 0); + } + } +} + +_SOKOL_PRIVATE void _sg_d3d11_commit(void) { + SOKOL_ASSERT(!_sg.d3d11.in_pass); +} + +_SOKOL_PRIVATE void _sg_d3d11_update_buffer(_sg_buffer_t* buf, const sg_range* data) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); + SOKOL_ASSERT(_sg.d3d11.ctx); + SOKOL_ASSERT(buf->d3d11.buf); + D3D11_MAPPED_SUBRESOURCE d3d11_msr; + HRESULT hr = _sg_d3d11_Map(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0, D3D11_MAP_WRITE_DISCARD, 0, &d3d11_msr); + _SOKOL_UNUSED(hr); + SOKOL_ASSERT(SUCCEEDED(hr)); + memcpy(d3d11_msr.pData, data->ptr, data->size); + _sg_d3d11_Unmap(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0); +} + +_SOKOL_PRIVATE int _sg_d3d11_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); + SOKOL_ASSERT(_sg.d3d11.ctx); + SOKOL_ASSERT(buf->d3d11.buf); + D3D11_MAP map_type = new_frame ? D3D11_MAP_WRITE_DISCARD : D3D11_MAP_WRITE_NO_OVERWRITE; + D3D11_MAPPED_SUBRESOURCE d3d11_msr; + HRESULT hr = _sg_d3d11_Map(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0, map_type, 0, &d3d11_msr); + _SOKOL_UNUSED(hr); + SOKOL_ASSERT(SUCCEEDED(hr)); + uint8_t* dst_ptr = (uint8_t*)d3d11_msr.pData + buf->cmn.append_pos; + memcpy(dst_ptr, data->ptr, data->size); + _sg_d3d11_Unmap(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0); + /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backend */ + return _sg_roundup((int)data->size, 4); +} + +_SOKOL_PRIVATE void _sg_d3d11_update_image(_sg_image_t* img, const sg_image_data* data) { + SOKOL_ASSERT(img && data); + SOKOL_ASSERT(_sg.d3d11.ctx); + SOKOL_ASSERT(img->d3d11.tex2d || img->d3d11.tex3d); + ID3D11Resource* d3d11_res = 0; + if (img->d3d11.tex3d) { + d3d11_res = (ID3D11Resource*) img->d3d11.tex3d; + } + else { + d3d11_res = (ID3D11Resource*) img->d3d11.tex2d; + } + SOKOL_ASSERT(d3d11_res); + const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; + const int num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices:1; + UINT subres_index = 0; + HRESULT hr; + _SOKOL_UNUSED(hr); + D3D11_MAPPED_SUBRESOURCE d3d11_msr; + for (int face_index = 0; face_index < num_faces; face_index++) { + for (int slice_index = 0; slice_index < num_slices; slice_index++) { + for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++, subres_index++) { + SOKOL_ASSERT(subres_index < (SG_MAX_MIPMAPS * SG_MAX_TEXTUREARRAY_LAYERS)); + const int mip_width = ((img->cmn.width>>mip_index)>0) ? img->cmn.width>>mip_index : 1; + const int mip_height = ((img->cmn.height>>mip_index)>0) ? img->cmn.height>>mip_index : 1; + const int src_pitch = _sg_row_pitch(img->cmn.pixel_format, mip_width, 1); + const sg_range* subimg_data = &(data->subimage[face_index][mip_index]); + const size_t slice_size = subimg_data->size / (size_t)num_slices; + const size_t slice_offset = slice_size * (size_t)slice_index; + const uint8_t* slice_ptr = ((const uint8_t*)subimg_data->ptr) + slice_offset; + hr = _sg_d3d11_Map(_sg.d3d11.ctx, d3d11_res, subres_index, D3D11_MAP_WRITE_DISCARD, 0, &d3d11_msr); + SOKOL_ASSERT(SUCCEEDED(hr)); + /* FIXME: need to handle difference in depth-pitch for 3D textures as well! */ + if (src_pitch == (int)d3d11_msr.RowPitch) { + memcpy(d3d11_msr.pData, slice_ptr, slice_size); + } + else { + SOKOL_ASSERT(src_pitch < (int)d3d11_msr.RowPitch); + const uint8_t* src_ptr = slice_ptr; + uint8_t* dst_ptr = (uint8_t*) d3d11_msr.pData; + for (int row_index = 0; row_index < mip_height; row_index++) { + memcpy(dst_ptr, src_ptr, (size_t)src_pitch); + src_ptr += src_pitch; + dst_ptr += d3d11_msr.RowPitch; + } + } + _sg_d3d11_Unmap(_sg.d3d11.ctx, d3d11_res, subres_index); + } + } + } +} + +/*== METAL BACKEND IMPLEMENTATION ============================================*/ +#elif defined(SOKOL_METAL) + +#if __has_feature(objc_arc) +#define _SG_OBJC_RETAIN(obj) { } +#define _SG_OBJC_RELEASE(obj) { obj = nil; } +#define _SG_OBJC_RELEASE_WITH_NULL(obj) { obj = [NSNull null]; } +#else +#define _SG_OBJC_RETAIN(obj) { [obj retain]; } +#define _SG_OBJC_RELEASE(obj) { [obj release]; obj = nil; } +#define _SG_OBJC_RELEASE_WITH_NULL(obj) { [obj release]; obj = [NSNull null]; } +#endif + +/*-- enum translation functions ----------------------------------------------*/ +_SOKOL_PRIVATE MTLLoadAction _sg_mtl_load_action(sg_action a) { + switch (a) { + case SG_ACTION_CLEAR: return MTLLoadActionClear; + case SG_ACTION_LOAD: return MTLLoadActionLoad; + case SG_ACTION_DONTCARE: return MTLLoadActionDontCare; + default: SOKOL_UNREACHABLE; return (MTLLoadAction)0; + } +} + +_SOKOL_PRIVATE MTLResourceOptions _sg_mtl_buffer_resource_options(sg_usage usg) { + switch (usg) { + case SG_USAGE_IMMUTABLE: + return MTLResourceStorageModeShared; + case SG_USAGE_DYNAMIC: + case SG_USAGE_STREAM: + #if defined(_SG_TARGET_MACOS) + return MTLCPUCacheModeWriteCombined|MTLResourceStorageModeManaged; + #else + return MTLCPUCacheModeWriteCombined; + #endif + default: + SOKOL_UNREACHABLE; + return 0; + } +} + +_SOKOL_PRIVATE MTLVertexStepFunction _sg_mtl_step_function(sg_vertex_step step) { + switch (step) { + case SG_VERTEXSTEP_PER_VERTEX: return MTLVertexStepFunctionPerVertex; + case SG_VERTEXSTEP_PER_INSTANCE: return MTLVertexStepFunctionPerInstance; + default: SOKOL_UNREACHABLE; return (MTLVertexStepFunction)0; + } +} + +_SOKOL_PRIVATE MTLVertexFormat _sg_mtl_vertex_format(sg_vertex_format fmt) { + switch (fmt) { + case SG_VERTEXFORMAT_FLOAT: return MTLVertexFormatFloat; + case SG_VERTEXFORMAT_FLOAT2: return MTLVertexFormatFloat2; + case SG_VERTEXFORMAT_FLOAT3: return MTLVertexFormatFloat3; + case SG_VERTEXFORMAT_FLOAT4: return MTLVertexFormatFloat4; + case SG_VERTEXFORMAT_BYTE4: return MTLVertexFormatChar4; + case SG_VERTEXFORMAT_BYTE4N: return MTLVertexFormatChar4Normalized; + case SG_VERTEXFORMAT_UBYTE4: return MTLVertexFormatUChar4; + case SG_VERTEXFORMAT_UBYTE4N: return MTLVertexFormatUChar4Normalized; + case SG_VERTEXFORMAT_SHORT2: return MTLVertexFormatShort2; + case SG_VERTEXFORMAT_SHORT2N: return MTLVertexFormatShort2Normalized; + case SG_VERTEXFORMAT_USHORT2N: return MTLVertexFormatUShort2Normalized; + case SG_VERTEXFORMAT_SHORT4: return MTLVertexFormatShort4; + case SG_VERTEXFORMAT_SHORT4N: return MTLVertexFormatShort4Normalized; + case SG_VERTEXFORMAT_USHORT4N: return MTLVertexFormatUShort4Normalized; + case SG_VERTEXFORMAT_UINT10_N2: return MTLVertexFormatUInt1010102Normalized; + default: SOKOL_UNREACHABLE; return (MTLVertexFormat)0; + } +} + +_SOKOL_PRIVATE MTLPrimitiveType _sg_mtl_primitive_type(sg_primitive_type t) { + switch (t) { + case SG_PRIMITIVETYPE_POINTS: return MTLPrimitiveTypePoint; + case SG_PRIMITIVETYPE_LINES: return MTLPrimitiveTypeLine; + case SG_PRIMITIVETYPE_LINE_STRIP: return MTLPrimitiveTypeLineStrip; + case SG_PRIMITIVETYPE_TRIANGLES: return MTLPrimitiveTypeTriangle; + case SG_PRIMITIVETYPE_TRIANGLE_STRIP: return MTLPrimitiveTypeTriangleStrip; + default: SOKOL_UNREACHABLE; return (MTLPrimitiveType)0; + } +} + +_SOKOL_PRIVATE MTLPixelFormat _sg_mtl_pixel_format(sg_pixel_format fmt) { + switch (fmt) { + case SG_PIXELFORMAT_R8: return MTLPixelFormatR8Unorm; + case SG_PIXELFORMAT_R8SN: return MTLPixelFormatR8Snorm; + case SG_PIXELFORMAT_R8UI: return MTLPixelFormatR8Uint; + case SG_PIXELFORMAT_R8SI: return MTLPixelFormatR8Sint; + case SG_PIXELFORMAT_R16: return MTLPixelFormatR16Unorm; + case SG_PIXELFORMAT_R16SN: return MTLPixelFormatR16Snorm; + case SG_PIXELFORMAT_R16UI: return MTLPixelFormatR16Uint; + case SG_PIXELFORMAT_R16SI: return MTLPixelFormatR16Sint; + case SG_PIXELFORMAT_R16F: return MTLPixelFormatR16Float; + case SG_PIXELFORMAT_RG8: return MTLPixelFormatRG8Unorm; + case SG_PIXELFORMAT_RG8SN: return MTLPixelFormatRG8Snorm; + case SG_PIXELFORMAT_RG8UI: return MTLPixelFormatRG8Uint; + case SG_PIXELFORMAT_RG8SI: return MTLPixelFormatRG8Sint; + case SG_PIXELFORMAT_R32UI: return MTLPixelFormatR32Uint; + case SG_PIXELFORMAT_R32SI: return MTLPixelFormatR32Sint; + case SG_PIXELFORMAT_R32F: return MTLPixelFormatR32Float; + case SG_PIXELFORMAT_RG16: return MTLPixelFormatRG16Unorm; + case SG_PIXELFORMAT_RG16SN: return MTLPixelFormatRG16Snorm; + case SG_PIXELFORMAT_RG16UI: return MTLPixelFormatRG16Uint; + case SG_PIXELFORMAT_RG16SI: return MTLPixelFormatRG16Sint; + case SG_PIXELFORMAT_RG16F: return MTLPixelFormatRG16Float; + case SG_PIXELFORMAT_RGBA8: return MTLPixelFormatRGBA8Unorm; + case SG_PIXELFORMAT_RGBA8SN: return MTLPixelFormatRGBA8Snorm; + case SG_PIXELFORMAT_RGBA8UI: return MTLPixelFormatRGBA8Uint; + case SG_PIXELFORMAT_RGBA8SI: return MTLPixelFormatRGBA8Sint; + case SG_PIXELFORMAT_BGRA8: return MTLPixelFormatBGRA8Unorm; + case SG_PIXELFORMAT_RGB10A2: return MTLPixelFormatRGB10A2Unorm; + case SG_PIXELFORMAT_RG11B10F: return MTLPixelFormatRG11B10Float; + case SG_PIXELFORMAT_RG32UI: return MTLPixelFormatRG32Uint; + case SG_PIXELFORMAT_RG32SI: return MTLPixelFormatRG32Sint; + case SG_PIXELFORMAT_RG32F: return MTLPixelFormatRG32Float; + case SG_PIXELFORMAT_RGBA16: return MTLPixelFormatRGBA16Unorm; + case SG_PIXELFORMAT_RGBA16SN: return MTLPixelFormatRGBA16Snorm; + case SG_PIXELFORMAT_RGBA16UI: return MTLPixelFormatRGBA16Uint; + case SG_PIXELFORMAT_RGBA16SI: return MTLPixelFormatRGBA16Sint; + case SG_PIXELFORMAT_RGBA16F: return MTLPixelFormatRGBA16Float; + case SG_PIXELFORMAT_RGBA32UI: return MTLPixelFormatRGBA32Uint; + case SG_PIXELFORMAT_RGBA32SI: return MTLPixelFormatRGBA32Sint; + case SG_PIXELFORMAT_RGBA32F: return MTLPixelFormatRGBA32Float; + case SG_PIXELFORMAT_DEPTH: return MTLPixelFormatDepth32Float; + case SG_PIXELFORMAT_DEPTH_STENCIL: return MTLPixelFormatDepth32Float_Stencil8; + #if defined(_SG_TARGET_MACOS) + case SG_PIXELFORMAT_BC1_RGBA: return MTLPixelFormatBC1_RGBA; + case SG_PIXELFORMAT_BC2_RGBA: return MTLPixelFormatBC2_RGBA; + case SG_PIXELFORMAT_BC3_RGBA: return MTLPixelFormatBC3_RGBA; + case SG_PIXELFORMAT_BC4_R: return MTLPixelFormatBC4_RUnorm; + case SG_PIXELFORMAT_BC4_RSN: return MTLPixelFormatBC4_RSnorm; + case SG_PIXELFORMAT_BC5_RG: return MTLPixelFormatBC5_RGUnorm; + case SG_PIXELFORMAT_BC5_RGSN: return MTLPixelFormatBC5_RGSnorm; + case SG_PIXELFORMAT_BC6H_RGBF: return MTLPixelFormatBC6H_RGBFloat; + case SG_PIXELFORMAT_BC6H_RGBUF: return MTLPixelFormatBC6H_RGBUfloat; + case SG_PIXELFORMAT_BC7_RGBA: return MTLPixelFormatBC7_RGBAUnorm; + #else + case SG_PIXELFORMAT_PVRTC_RGB_2BPP: return MTLPixelFormatPVRTC_RGB_2BPP; + case SG_PIXELFORMAT_PVRTC_RGB_4BPP: return MTLPixelFormatPVRTC_RGB_4BPP; + case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: return MTLPixelFormatPVRTC_RGBA_2BPP; + case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: return MTLPixelFormatPVRTC_RGBA_4BPP; + case SG_PIXELFORMAT_ETC2_RGB8: return MTLPixelFormatETC2_RGB8; + case SG_PIXELFORMAT_ETC2_RGB8A1: return MTLPixelFormatETC2_RGB8A1; + case SG_PIXELFORMAT_ETC2_RGBA8: return MTLPixelFormatEAC_RGBA8; + case SG_PIXELFORMAT_ETC2_RG11: return MTLPixelFormatEAC_RG11Unorm; + case SG_PIXELFORMAT_ETC2_RG11SN: return MTLPixelFormatEAC_RG11Snorm; + #endif + default: return MTLPixelFormatInvalid; + } +} + +_SOKOL_PRIVATE MTLColorWriteMask _sg_mtl_color_write_mask(sg_color_mask m) { + MTLColorWriteMask mtl_mask = MTLColorWriteMaskNone; + if (m & SG_COLORMASK_R) { + mtl_mask |= MTLColorWriteMaskRed; + } + if (m & SG_COLORMASK_G) { + mtl_mask |= MTLColorWriteMaskGreen; + } + if (m & SG_COLORMASK_B) { + mtl_mask |= MTLColorWriteMaskBlue; + } + if (m & SG_COLORMASK_A) { + mtl_mask |= MTLColorWriteMaskAlpha; + } + return mtl_mask; +} + +_SOKOL_PRIVATE MTLBlendOperation _sg_mtl_blend_op(sg_blend_op op) { + switch (op) { + case SG_BLENDOP_ADD: return MTLBlendOperationAdd; + case SG_BLENDOP_SUBTRACT: return MTLBlendOperationSubtract; + case SG_BLENDOP_REVERSE_SUBTRACT: return MTLBlendOperationReverseSubtract; + default: SOKOL_UNREACHABLE; return (MTLBlendOperation)0; + } +} + +_SOKOL_PRIVATE MTLBlendFactor _sg_mtl_blend_factor(sg_blend_factor f) { + switch (f) { + case SG_BLENDFACTOR_ZERO: return MTLBlendFactorZero; + case SG_BLENDFACTOR_ONE: return MTLBlendFactorOne; + case SG_BLENDFACTOR_SRC_COLOR: return MTLBlendFactorSourceColor; + case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR: return MTLBlendFactorOneMinusSourceColor; + case SG_BLENDFACTOR_SRC_ALPHA: return MTLBlendFactorSourceAlpha; + case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA: return MTLBlendFactorOneMinusSourceAlpha; + case SG_BLENDFACTOR_DST_COLOR: return MTLBlendFactorDestinationColor; + case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR: return MTLBlendFactorOneMinusDestinationColor; + case SG_BLENDFACTOR_DST_ALPHA: return MTLBlendFactorDestinationAlpha; + case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA: return MTLBlendFactorOneMinusDestinationAlpha; + case SG_BLENDFACTOR_SRC_ALPHA_SATURATED: return MTLBlendFactorSourceAlphaSaturated; + case SG_BLENDFACTOR_BLEND_COLOR: return MTLBlendFactorBlendColor; + case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR: return MTLBlendFactorOneMinusBlendColor; + case SG_BLENDFACTOR_BLEND_ALPHA: return MTLBlendFactorBlendAlpha; + case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA: return MTLBlendFactorOneMinusBlendAlpha; + default: SOKOL_UNREACHABLE; return (MTLBlendFactor)0; + } +} + +_SOKOL_PRIVATE MTLCompareFunction _sg_mtl_compare_func(sg_compare_func f) { + switch (f) { + case SG_COMPAREFUNC_NEVER: return MTLCompareFunctionNever; + case SG_COMPAREFUNC_LESS: return MTLCompareFunctionLess; + case SG_COMPAREFUNC_EQUAL: return MTLCompareFunctionEqual; + case SG_COMPAREFUNC_LESS_EQUAL: return MTLCompareFunctionLessEqual; + case SG_COMPAREFUNC_GREATER: return MTLCompareFunctionGreater; + case SG_COMPAREFUNC_NOT_EQUAL: return MTLCompareFunctionNotEqual; + case SG_COMPAREFUNC_GREATER_EQUAL: return MTLCompareFunctionGreaterEqual; + case SG_COMPAREFUNC_ALWAYS: return MTLCompareFunctionAlways; + default: SOKOL_UNREACHABLE; return (MTLCompareFunction)0; + } +} + +_SOKOL_PRIVATE MTLStencilOperation _sg_mtl_stencil_op(sg_stencil_op op) { + switch (op) { + case SG_STENCILOP_KEEP: return MTLStencilOperationKeep; + case SG_STENCILOP_ZERO: return MTLStencilOperationZero; + case SG_STENCILOP_REPLACE: return MTLStencilOperationReplace; + case SG_STENCILOP_INCR_CLAMP: return MTLStencilOperationIncrementClamp; + case SG_STENCILOP_DECR_CLAMP: return MTLStencilOperationDecrementClamp; + case SG_STENCILOP_INVERT: return MTLStencilOperationInvert; + case SG_STENCILOP_INCR_WRAP: return MTLStencilOperationIncrementWrap; + case SG_STENCILOP_DECR_WRAP: return MTLStencilOperationDecrementWrap; + default: SOKOL_UNREACHABLE; return (MTLStencilOperation)0; + } +} + +_SOKOL_PRIVATE MTLCullMode _sg_mtl_cull_mode(sg_cull_mode m) { + switch (m) { + case SG_CULLMODE_NONE: return MTLCullModeNone; + case SG_CULLMODE_FRONT: return MTLCullModeFront; + case SG_CULLMODE_BACK: return MTLCullModeBack; + default: SOKOL_UNREACHABLE; return (MTLCullMode)0; + } +} + +_SOKOL_PRIVATE MTLWinding _sg_mtl_winding(sg_face_winding w) { + switch (w) { + case SG_FACEWINDING_CW: return MTLWindingClockwise; + case SG_FACEWINDING_CCW: return MTLWindingCounterClockwise; + default: SOKOL_UNREACHABLE; return (MTLWinding)0; + } +} + +_SOKOL_PRIVATE MTLIndexType _sg_mtl_index_type(sg_index_type t) { + switch (t) { + case SG_INDEXTYPE_UINT16: return MTLIndexTypeUInt16; + case SG_INDEXTYPE_UINT32: return MTLIndexTypeUInt32; + default: SOKOL_UNREACHABLE; return (MTLIndexType)0; + } +} + +_SOKOL_PRIVATE int _sg_mtl_index_size(sg_index_type t) { + switch (t) { + case SG_INDEXTYPE_NONE: return 0; + case SG_INDEXTYPE_UINT16: return 2; + case SG_INDEXTYPE_UINT32: return 4; + default: SOKOL_UNREACHABLE; return 0; + } +} + +_SOKOL_PRIVATE MTLTextureType _sg_mtl_texture_type(sg_image_type t) { + switch (t) { + case SG_IMAGETYPE_2D: return MTLTextureType2D; + case SG_IMAGETYPE_CUBE: return MTLTextureTypeCube; + case SG_IMAGETYPE_3D: return MTLTextureType3D; + case SG_IMAGETYPE_ARRAY: return MTLTextureType2DArray; + default: SOKOL_UNREACHABLE; return (MTLTextureType)0; + } +} + +_SOKOL_PRIVATE bool _sg_mtl_is_pvrtc(sg_pixel_format fmt) { + switch (fmt) { + case SG_PIXELFORMAT_PVRTC_RGB_2BPP: + case SG_PIXELFORMAT_PVRTC_RGB_4BPP: + case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: + case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: + return true; + default: + return false; + } +} + +_SOKOL_PRIVATE MTLSamplerAddressMode _sg_mtl_address_mode(sg_wrap w) { + switch (w) { + case SG_WRAP_REPEAT: return MTLSamplerAddressModeRepeat; + case SG_WRAP_CLAMP_TO_EDGE: return MTLSamplerAddressModeClampToEdge; + #if defined(_SG_TARGET_MACOS) + case SG_WRAP_CLAMP_TO_BORDER: return MTLSamplerAddressModeClampToBorderColor; + #else + /* clamp-to-border not supported on iOS, fall back to clamp-to-edge */ + case SG_WRAP_CLAMP_TO_BORDER: return MTLSamplerAddressModeClampToEdge; + #endif + case SG_WRAP_MIRRORED_REPEAT: return MTLSamplerAddressModeMirrorRepeat; + default: SOKOL_UNREACHABLE; return (MTLSamplerAddressMode)0; + } +} + +#if defined(_SG_TARGET_MACOS) +_SOKOL_PRIVATE MTLSamplerBorderColor _sg_mtl_border_color(sg_border_color c) { + switch (c) { + case SG_BORDERCOLOR_TRANSPARENT_BLACK: return MTLSamplerBorderColorTransparentBlack; + case SG_BORDERCOLOR_OPAQUE_BLACK: return MTLSamplerBorderColorOpaqueBlack; + case SG_BORDERCOLOR_OPAQUE_WHITE: return MTLSamplerBorderColorOpaqueWhite; + default: SOKOL_UNREACHABLE; return (MTLSamplerBorderColor)0; + } +} +#endif + +_SOKOL_PRIVATE MTLSamplerMinMagFilter _sg_mtl_minmag_filter(sg_filter f) { + switch (f) { + case SG_FILTER_NEAREST: + case SG_FILTER_NEAREST_MIPMAP_NEAREST: + case SG_FILTER_NEAREST_MIPMAP_LINEAR: + return MTLSamplerMinMagFilterNearest; + case SG_FILTER_LINEAR: + case SG_FILTER_LINEAR_MIPMAP_NEAREST: + case SG_FILTER_LINEAR_MIPMAP_LINEAR: + return MTLSamplerMinMagFilterLinear; + default: + SOKOL_UNREACHABLE; return (MTLSamplerMinMagFilter)0; + } +} + +_SOKOL_PRIVATE MTLSamplerMipFilter _sg_mtl_mip_filter(sg_filter f) { + switch (f) { + case SG_FILTER_NEAREST: + case SG_FILTER_LINEAR: + return MTLSamplerMipFilterNotMipmapped; + case SG_FILTER_NEAREST_MIPMAP_NEAREST: + case SG_FILTER_LINEAR_MIPMAP_NEAREST: + return MTLSamplerMipFilterNearest; + case SG_FILTER_NEAREST_MIPMAP_LINEAR: + case SG_FILTER_LINEAR_MIPMAP_LINEAR: + return MTLSamplerMipFilterLinear; + default: + SOKOL_UNREACHABLE; return (MTLSamplerMipFilter)0; + } +} + +/*-- a pool for all Metal resource objects, with deferred release queue -------*/ + +_SOKOL_PRIVATE void _sg_mtl_init_pool(const sg_desc* desc) { + _sg.mtl.idpool.num_slots = 2 * + ( + 2 * desc->buffer_pool_size + + 5 * desc->image_pool_size + + 4 * desc->shader_pool_size + + 2 * desc->pipeline_pool_size + + desc->pass_pool_size + ); + _sg.mtl.idpool.pool = [NSMutableArray arrayWithCapacity:(NSUInteger)_sg.mtl.idpool.num_slots]; + _SG_OBJC_RETAIN(_sg.mtl.idpool.pool); + NSNull* null = [NSNull null]; + for (int i = 0; i < _sg.mtl.idpool.num_slots; i++) { + [_sg.mtl.idpool.pool addObject:null]; + } + SOKOL_ASSERT([_sg.mtl.idpool.pool count] == (NSUInteger)_sg.mtl.idpool.num_slots); + /* a queue of currently free slot indices */ + _sg.mtl.idpool.free_queue_top = 0; + _sg.mtl.idpool.free_queue = (int*)SOKOL_MALLOC((size_t)_sg.mtl.idpool.num_slots * sizeof(int)); + /* pool slot 0 is reserved! */ + for (int i = _sg.mtl.idpool.num_slots-1; i >= 1; i--) { + _sg.mtl.idpool.free_queue[_sg.mtl.idpool.free_queue_top++] = i; + } + /* a circular queue which holds release items (frame index + when a resource is to be released, and the resource's + pool index + */ + _sg.mtl.idpool.release_queue_front = 0; + _sg.mtl.idpool.release_queue_back = 0; + _sg.mtl.idpool.release_queue = (_sg_mtl_release_item_t*)SOKOL_MALLOC((size_t)_sg.mtl.idpool.num_slots * sizeof(_sg_mtl_release_item_t)); + for (int i = 0; i < _sg.mtl.idpool.num_slots; i++) { + _sg.mtl.idpool.release_queue[i].frame_index = 0; + _sg.mtl.idpool.release_queue[i].slot_index = _SG_MTL_INVALID_SLOT_INDEX; + } +} + +_SOKOL_PRIVATE void _sg_mtl_destroy_pool(void) { + SOKOL_FREE(_sg.mtl.idpool.release_queue); _sg.mtl.idpool.release_queue = 0; + SOKOL_FREE(_sg.mtl.idpool.free_queue); _sg.mtl.idpool.free_queue = 0; + _SG_OBJC_RELEASE(_sg.mtl.idpool.pool); +} + +/* get a new free resource pool slot */ +_SOKOL_PRIVATE int _sg_mtl_alloc_pool_slot(void) { + SOKOL_ASSERT(_sg.mtl.idpool.free_queue_top > 0); + const int slot_index = _sg.mtl.idpool.free_queue[--_sg.mtl.idpool.free_queue_top]; + SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots)); + return slot_index; +} + +/* put a free resource pool slot back into the free-queue */ +_SOKOL_PRIVATE void _sg_mtl_free_pool_slot(int slot_index) { + SOKOL_ASSERT(_sg.mtl.idpool.free_queue_top < _sg.mtl.idpool.num_slots); + SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots)); + _sg.mtl.idpool.free_queue[_sg.mtl.idpool.free_queue_top++] = slot_index; +} + +/* add an MTLResource to the pool, return pool index or 0 if input was 'nil' */ +_SOKOL_PRIVATE int _sg_mtl_add_resource(id res) { + if (nil == res) { + return _SG_MTL_INVALID_SLOT_INDEX; + } + const int slot_index = _sg_mtl_alloc_pool_slot(); + SOKOL_ASSERT([NSNull null] == _sg.mtl.idpool.pool[(NSUInteger)slot_index]); + _sg.mtl.idpool.pool[(NSUInteger)slot_index] = res; + return slot_index; +} + +/* mark an MTLResource for release, this will put the resource into the + deferred-release queue, and the resource will then be released N frames later, + the special pool index 0 will be ignored (this means that a nil + value was provided to _sg_mtl_add_resource() +*/ +_SOKOL_PRIVATE void _sg_mtl_release_resource(uint32_t frame_index, int slot_index) { + if (slot_index == _SG_MTL_INVALID_SLOT_INDEX) { + return; + } + SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots)); + SOKOL_ASSERT([NSNull null] != _sg.mtl.idpool.pool[(NSUInteger)slot_index]); + int release_index = _sg.mtl.idpool.release_queue_front++; + if (_sg.mtl.idpool.release_queue_front >= _sg.mtl.idpool.num_slots) { + /* wrap-around */ + _sg.mtl.idpool.release_queue_front = 0; + } + /* release queue full? */ + SOKOL_ASSERT(_sg.mtl.idpool.release_queue_front != _sg.mtl.idpool.release_queue_back); + SOKOL_ASSERT(0 == _sg.mtl.idpool.release_queue[release_index].frame_index); + const uint32_t safe_to_release_frame_index = frame_index + SG_NUM_INFLIGHT_FRAMES + 1; + _sg.mtl.idpool.release_queue[release_index].frame_index = safe_to_release_frame_index; + _sg.mtl.idpool.release_queue[release_index].slot_index = slot_index; +} + +/* run garbage-collection pass on all resources in the release-queue */ +_SOKOL_PRIVATE void _sg_mtl_garbage_collect(uint32_t frame_index) { + while (_sg.mtl.idpool.release_queue_back != _sg.mtl.idpool.release_queue_front) { + if (frame_index < _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].frame_index) { + /* don't need to check further, release-items past this are too young */ + break; + } + /* safe to release this resource */ + const int slot_index = _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].slot_index; + SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots)); + SOKOL_ASSERT(_sg.mtl.idpool.pool[(NSUInteger)slot_index] != [NSNull null]); + _SG_OBJC_RELEASE_WITH_NULL(_sg.mtl.idpool.pool[(NSUInteger)slot_index]); + /* put the now free pool index back on the free queue */ + _sg_mtl_free_pool_slot(slot_index); + /* reset the release queue slot and advance the back index */ + _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].frame_index = 0; + _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].slot_index = _SG_MTL_INVALID_SLOT_INDEX; + _sg.mtl.idpool.release_queue_back++; + if (_sg.mtl.idpool.release_queue_back >= _sg.mtl.idpool.num_slots) { + /* wrap-around */ + _sg.mtl.idpool.release_queue_back = 0; + } + } +} + +_SOKOL_PRIVATE id _sg_mtl_id(int slot_index) { + return _sg.mtl.idpool.pool[(NSUInteger)slot_index]; +} + +_SOKOL_PRIVATE void _sg_mtl_init_sampler_cache(const sg_desc* desc) { + SOKOL_ASSERT(desc->sampler_cache_size > 0); + _sg_smpcache_init(&_sg.mtl.sampler_cache, desc->sampler_cache_size); +} + +/* destroy the sampler cache, and release all sampler objects */ +_SOKOL_PRIVATE void _sg_mtl_destroy_sampler_cache(uint32_t frame_index) { + SOKOL_ASSERT(_sg.mtl.sampler_cache.items); + SOKOL_ASSERT(_sg.mtl.sampler_cache.num_items <= _sg.mtl.sampler_cache.capacity); + for (int i = 0; i < _sg.mtl.sampler_cache.num_items; i++) { + _sg_mtl_release_resource(frame_index, (int)_sg_smpcache_sampler(&_sg.mtl.sampler_cache, i)); + } + _sg_smpcache_discard(&_sg.mtl.sampler_cache); +} + +/* + create and add an MTLSamplerStateObject and return its resource pool index, + reuse identical sampler state if one exists +*/ +_SOKOL_PRIVATE int _sg_mtl_create_sampler(id mtl_device, const sg_image_desc* img_desc) { + SOKOL_ASSERT(img_desc); + int index = _sg_smpcache_find_item(&_sg.mtl.sampler_cache, img_desc); + if (index >= 0) { + /* reuse existing sampler */ + return (int)_sg_smpcache_sampler(&_sg.mtl.sampler_cache, index); + } + else { + /* create a new Metal sampler state object and add to sampler cache */ + MTLSamplerDescriptor* mtl_desc = [[MTLSamplerDescriptor alloc] init]; + mtl_desc.sAddressMode = _sg_mtl_address_mode(img_desc->wrap_u); + mtl_desc.tAddressMode = _sg_mtl_address_mode(img_desc->wrap_v); + if (SG_IMAGETYPE_3D == img_desc->type) { + mtl_desc.rAddressMode = _sg_mtl_address_mode(img_desc->wrap_w); + } + #if defined(_SG_TARGET_MACOS) + mtl_desc.borderColor = _sg_mtl_border_color(img_desc->border_color); + #endif + mtl_desc.minFilter = _sg_mtl_minmag_filter(img_desc->min_filter); + mtl_desc.magFilter = _sg_mtl_minmag_filter(img_desc->mag_filter); + mtl_desc.mipFilter = _sg_mtl_mip_filter(img_desc->min_filter); + mtl_desc.lodMinClamp = img_desc->min_lod; + mtl_desc.lodMaxClamp = img_desc->max_lod; + mtl_desc.maxAnisotropy = img_desc->max_anisotropy; + mtl_desc.normalizedCoordinates = YES; + id mtl_sampler = [mtl_device newSamplerStateWithDescriptor:mtl_desc]; + _SG_OBJC_RELEASE(mtl_desc); + int sampler_handle = _sg_mtl_add_resource(mtl_sampler); + _sg_smpcache_add_item(&_sg.mtl.sampler_cache, img_desc, (uintptr_t)sampler_handle); + return sampler_handle; + } +} + +_SOKOL_PRIVATE void _sg_mtl_clear_state_cache(void) { + memset(&_sg.mtl.state_cache, 0, sizeof(_sg.mtl.state_cache)); +} + +/* https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf */ +_SOKOL_PRIVATE void _sg_mtl_init_caps(void) { + #if defined(_SG_TARGET_MACOS) + _sg.backend = SG_BACKEND_METAL_MACOS; + #elif defined(_SG_TARGET_IOS) + #if defined(_SG_TARGET_IOS_SIMULATOR) + _sg.backend = SG_BACKEND_METAL_SIMULATOR; + #else + _sg.backend = SG_BACKEND_METAL_IOS; + #endif + #endif + _sg.features.instancing = true; + _sg.features.origin_top_left = true; + _sg.features.multiple_render_targets = true; + _sg.features.msaa_render_targets = true; + _sg.features.imagetype_3d = true; + _sg.features.imagetype_array = true; + #if defined(_SG_TARGET_MACOS) + _sg.features.image_clamp_to_border = true; + #else + _sg.features.image_clamp_to_border = false; + #endif + _sg.features.mrt_independent_blend_state = true; + _sg.features.mrt_independent_write_mask = true; + + #if defined(_SG_TARGET_MACOS) + _sg.limits.max_image_size_2d = 16 * 1024; + _sg.limits.max_image_size_cube = 16 * 1024; + _sg.limits.max_image_size_3d = 2 * 1024; + _sg.limits.max_image_size_array = 16 * 1024; + _sg.limits.max_image_array_layers = 2 * 1024; + #else + /* newer iOS devices support 16k textures */ + _sg.limits.max_image_size_2d = 8 * 1024; + _sg.limits.max_image_size_cube = 8 * 1024; + _sg.limits.max_image_size_3d = 2 * 1024; + _sg.limits.max_image_size_array = 8 * 1024; + _sg.limits.max_image_array_layers = 2 * 1024; + #endif + _sg.limits.max_vertex_attrs = SG_MAX_VERTEX_ATTRIBUTES; + + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R8]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R8SN]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8SI]); + #if defined(_SG_TARGET_MACOS) + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16SN]); + #else + _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_R16]); + _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_R16SN]); + #endif + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16SI]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16F]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG8]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG8SN]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8SI]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32UI]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32SI]); + #if defined(_SG_TARGET_MACOS) + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R32F]); + #else + _sg_pixelformat_sbr(&_sg.formats[SG_PIXELFORMAT_R32F]); + #endif + #if defined(_SG_TARGET_MACOS) + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16SN]); + #else + _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_RG16]); + _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_RG16SN]); + #endif + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16SI]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16F]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA8]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA8SN]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8SI]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_BGRA8]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGB10A2]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG11B10F]); + #if defined(_SG_TARGET_MACOS) + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG32UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG32SI]); + #else + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RG32UI]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RG32SI]); + #endif + #if defined(_SG_TARGET_MACOS) + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG32F]); + #else + _sg_pixelformat_sbr(&_sg.formats[SG_PIXELFORMAT_RG32F]); + #endif + #if defined(_SG_TARGET_MACOS) + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16SN]); + #else + _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_RGBA16]); + _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_RGBA16SN]); + #endif + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16SI]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); + #if defined(_SG_TARGET_MACOS) + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA32UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA32SI]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); + #else + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32UI]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32SI]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); + #endif + _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH]); + _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH_STENCIL]); + #if defined(_SG_TARGET_MACOS) + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC1_RGBA]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC2_RGBA]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC3_RGBA]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_R]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_RSN]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RG]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RGSN]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBF]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBUF]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC7_RGBA]); + #else + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGB_2BPP]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGB_4BPP]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGBA_2BPP]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGBA_4BPP]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGB8]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGB8A1]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGBA8]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RG11]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RG11SN]); + #endif +} + +/*-- main Metal backend state and functions ----------------------------------*/ +_SOKOL_PRIVATE void _sg_mtl_setup_backend(const sg_desc* desc) { + /* assume already zero-initialized */ + SOKOL_ASSERT(desc); + SOKOL_ASSERT(desc->context.metal.device); + SOKOL_ASSERT(desc->context.metal.renderpass_descriptor_cb || desc->context.metal.renderpass_descriptor_userdata_cb); + SOKOL_ASSERT(desc->context.metal.drawable_cb || desc->context.metal.drawable_userdata_cb); + SOKOL_ASSERT(desc->uniform_buffer_size > 0); + _sg_mtl_init_pool(desc); + _sg_mtl_init_sampler_cache(desc); + _sg_mtl_clear_state_cache(); + _sg.mtl.valid = true; + _sg.mtl.renderpass_descriptor_cb = desc->context.metal.renderpass_descriptor_cb; + _sg.mtl.renderpass_descriptor_userdata_cb = desc->context.metal.renderpass_descriptor_userdata_cb; + _sg.mtl.drawable_cb = desc->context.metal.drawable_cb; + _sg.mtl.drawable_userdata_cb = desc->context.metal.drawable_userdata_cb; + _sg.mtl.user_data = desc->context.metal.user_data; + _sg.mtl.frame_index = 1; + _sg.mtl.ub_size = desc->uniform_buffer_size; + _sg.mtl.sem = dispatch_semaphore_create(SG_NUM_INFLIGHT_FRAMES); + _sg.mtl.device = (__bridge id) desc->context.metal.device; + _sg.mtl.cmd_queue = [_sg.mtl.device newCommandQueue]; + MTLResourceOptions res_opts = MTLResourceCPUCacheModeWriteCombined; + #if defined(_SG_TARGET_MACOS) + res_opts |= MTLResourceStorageModeManaged; + #endif + for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { + _sg.mtl.uniform_buffers[i] = [_sg.mtl.device + newBufferWithLength:(NSUInteger)_sg.mtl.ub_size + options:res_opts + ]; + } + _sg_mtl_init_caps(); +} + +_SOKOL_PRIVATE void _sg_mtl_discard_backend(void) { + SOKOL_ASSERT(_sg.mtl.valid); + /* wait for the last frame to finish */ + for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { + dispatch_semaphore_wait(_sg.mtl.sem, DISPATCH_TIME_FOREVER); + } + /* semaphore must be "relinquished" before destruction */ + for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { + dispatch_semaphore_signal(_sg.mtl.sem); + } + _sg_mtl_destroy_sampler_cache(_sg.mtl.frame_index); + _sg_mtl_garbage_collect(_sg.mtl.frame_index + SG_NUM_INFLIGHT_FRAMES + 2); + _sg_mtl_destroy_pool(); + _sg.mtl.valid = false; + + _SG_OBJC_RELEASE(_sg.mtl.sem); + _SG_OBJC_RELEASE(_sg.mtl.device); + _SG_OBJC_RELEASE(_sg.mtl.cmd_queue); + for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { + _SG_OBJC_RELEASE(_sg.mtl.uniform_buffers[i]); + } + /* NOTE: MTLCommandBuffer and MTLRenderCommandEncoder are auto-released */ + _sg.mtl.cmd_buffer = nil; + _sg.mtl.cmd_encoder = nil; +} + +_SOKOL_PRIVATE void _sg_mtl_bind_uniform_buffers(void) { + SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); + for (int slot = 0; slot < SG_MAX_SHADERSTAGE_UBS; slot++) { + [_sg.mtl.cmd_encoder + setVertexBuffer:_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index] + offset:0 + atIndex:(NSUInteger)slot]; + [_sg.mtl.cmd_encoder + setFragmentBuffer:_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index] + offset:0 + atIndex:(NSUInteger)slot]; + } +} + +_SOKOL_PRIVATE void _sg_mtl_reset_state_cache(void) { + _sg_mtl_clear_state_cache(); + + /* need to restore the uniform buffer binding (normally happens in + _sg_mtl_begin_pass() + */ + if (nil != _sg.mtl.cmd_encoder) { + _sg_mtl_bind_uniform_buffers(); + } +} + +_SOKOL_PRIVATE sg_resource_state _sg_mtl_create_context(_sg_context_t* ctx) { + SOKOL_ASSERT(ctx); + _SOKOL_UNUSED(ctx); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_mtl_destroy_context(_sg_context_t* ctx) { + SOKOL_ASSERT(ctx); + _SOKOL_UNUSED(ctx); + /* empty */ +} + +_SOKOL_PRIVATE void _sg_mtl_activate_context(_sg_context_t* ctx) { + _SOKOL_UNUSED(ctx); + _sg_mtl_clear_state_cache(); +} + +_SOKOL_PRIVATE sg_resource_state _sg_mtl_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) { + SOKOL_ASSERT(buf && desc); + _sg_buffer_common_init(&buf->cmn, desc); + const bool injected = (0 != desc->mtl_buffers[0]); + MTLResourceOptions mtl_options = _sg_mtl_buffer_resource_options(buf->cmn.usage); + for (int slot = 0; slot < buf->cmn.num_slots; slot++) { + id mtl_buf; + if (injected) { + SOKOL_ASSERT(desc->mtl_buffers[slot]); + mtl_buf = (__bridge id) desc->mtl_buffers[slot]; + } + else { + if (buf->cmn.usage == SG_USAGE_IMMUTABLE) { + SOKOL_ASSERT(desc->data.ptr); + mtl_buf = [_sg.mtl.device newBufferWithBytes:desc->data.ptr length:(NSUInteger)buf->cmn.size options:mtl_options]; + } + else { + mtl_buf = [_sg.mtl.device newBufferWithLength:(NSUInteger)buf->cmn.size options:mtl_options]; + } + } + buf->mtl.buf[slot] = _sg_mtl_add_resource(mtl_buf); + } + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_mtl_destroy_buffer(_sg_buffer_t* buf) { + SOKOL_ASSERT(buf); + for (int slot = 0; slot < buf->cmn.num_slots; slot++) { + /* it's valid to call release resource with '0' */ + _sg_mtl_release_resource(_sg.mtl.frame_index, buf->mtl.buf[slot]); + } +} + +_SOKOL_PRIVATE void _sg_mtl_copy_image_data(const _sg_image_t* img, __unsafe_unretained id mtl_tex, const sg_image_data* data) { + const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; + const int num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices : 1; + for (int face_index = 0; face_index < num_faces; face_index++) { + for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++) { + SOKOL_ASSERT(data->subimage[face_index][mip_index].ptr); + SOKOL_ASSERT(data->subimage[face_index][mip_index].size > 0); + const uint8_t* data_ptr = (const uint8_t*)data->subimage[face_index][mip_index].ptr; + const int mip_width = _sg_max(img->cmn.width >> mip_index, 1); + const int mip_height = _sg_max(img->cmn.height >> mip_index, 1); + /* special case PVRTC formats: bytePerRow must be 0 */ + int bytes_per_row = 0; + int bytes_per_slice = _sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, 1); + if (!_sg_mtl_is_pvrtc(img->cmn.pixel_format)) { + bytes_per_row = _sg_row_pitch(img->cmn.pixel_format, mip_width, 1); + } + MTLRegion region; + if (img->cmn.type == SG_IMAGETYPE_3D) { + const int mip_depth = _sg_max(img->cmn.num_slices >> mip_index, 1); + region = MTLRegionMake3D(0, 0, 0, (NSUInteger)mip_width, (NSUInteger)mip_height, (NSUInteger)mip_depth); + /* FIXME: apparently the minimal bytes_per_image size for 3D texture + is 4 KByte... somehow need to handle this */ + } + else { + region = MTLRegionMake2D(0, 0, (NSUInteger)mip_width, (NSUInteger)mip_height); + } + for (int slice_index = 0; slice_index < num_slices; slice_index++) { + const int mtl_slice_index = (img->cmn.type == SG_IMAGETYPE_CUBE) ? face_index : slice_index; + const int slice_offset = slice_index * bytes_per_slice; + SOKOL_ASSERT((slice_offset + bytes_per_slice) <= (int)data->subimage[face_index][mip_index].size); + [mtl_tex replaceRegion:region + mipmapLevel:(NSUInteger)mip_index + slice:(NSUInteger)mtl_slice_index + withBytes:data_ptr + slice_offset + bytesPerRow:(NSUInteger)bytes_per_row + bytesPerImage:(NSUInteger)bytes_per_slice]; + } + } + } +} + +/* + FIXME: METAL RESOURCE STORAGE MODE FOR macOS AND iOS + + For immutable textures on macOS, the recommended procedure is to create + a MTLStorageModeManaged texture with the immutable content first, + and then use the GPU to blit the content into a MTLStorageModePrivate + texture before the first use. + + On iOS use the same one-time-blit procedure, but from a + MTLStorageModeShared to a MTLStorageModePrivate texture. + + It probably makes sense to handle this in a separate 'resource manager' + with a recycable pool of blit-source-textures? +*/ + +/* initialize MTLTextureDescritor with common attributes */ +_SOKOL_PRIVATE bool _sg_mtl_init_texdesc_common(MTLTextureDescriptor* mtl_desc, _sg_image_t* img) { + mtl_desc.textureType = _sg_mtl_texture_type(img->cmn.type); + mtl_desc.pixelFormat = _sg_mtl_pixel_format(img->cmn.pixel_format); + if (MTLPixelFormatInvalid == mtl_desc.pixelFormat) { + SOKOL_LOG("Unsupported texture pixel format!\n"); + return false; + } + mtl_desc.width = (NSUInteger)img->cmn.width; + mtl_desc.height = (NSUInteger)img->cmn.height; + if (SG_IMAGETYPE_3D == img->cmn.type) { + mtl_desc.depth = (NSUInteger)img->cmn.num_slices; + } + else { + mtl_desc.depth = 1; + } + mtl_desc.mipmapLevelCount = (NSUInteger)img->cmn.num_mipmaps; + if (SG_IMAGETYPE_ARRAY == img->cmn.type) { + mtl_desc.arrayLength = (NSUInteger)img->cmn.num_slices; + } + else { + mtl_desc.arrayLength = 1; + } + mtl_desc.usage = MTLTextureUsageShaderRead; + if (img->cmn.usage != SG_USAGE_IMMUTABLE) { + mtl_desc.cpuCacheMode = MTLCPUCacheModeWriteCombined; + } + #if defined(_SG_TARGET_MACOS) + /* macOS: use managed textures */ + mtl_desc.resourceOptions = MTLResourceStorageModeManaged; + mtl_desc.storageMode = MTLStorageModeManaged; + #else + /* iOS: use CPU/GPU shared memory */ + mtl_desc.resourceOptions = MTLResourceStorageModeShared; + mtl_desc.storageMode = MTLStorageModeShared; + #endif + return true; +} + +/* initialize MTLTextureDescritor with rendertarget attributes */ +_SOKOL_PRIVATE void _sg_mtl_init_texdesc_rt(MTLTextureDescriptor* mtl_desc, _sg_image_t* img) { + SOKOL_ASSERT(img->cmn.render_target); + _SOKOL_UNUSED(img); + /* reset the cpuCacheMode to 'default' */ + mtl_desc.cpuCacheMode = MTLCPUCacheModeDefaultCache; + /* render targets are only visible to the GPU */ + mtl_desc.resourceOptions = MTLResourceStorageModePrivate; + mtl_desc.storageMode = MTLStorageModePrivate; + /* non-MSAA render targets are shader-readable */ + mtl_desc.usage = MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget; +} + +/* initialize MTLTextureDescritor with MSAA attributes */ +_SOKOL_PRIVATE void _sg_mtl_init_texdesc_rt_msaa(MTLTextureDescriptor* mtl_desc, _sg_image_t* img) { + SOKOL_ASSERT(img->cmn.sample_count > 1); + /* reset the cpuCacheMode to 'default' */ + mtl_desc.cpuCacheMode = MTLCPUCacheModeDefaultCache; + /* render targets are only visible to the GPU */ + mtl_desc.resourceOptions = MTLResourceStorageModePrivate; + mtl_desc.storageMode = MTLStorageModePrivate; + /* MSAA render targets are not shader-readable (instead they are resolved) */ + mtl_desc.usage = MTLTextureUsageRenderTarget; + mtl_desc.textureType = MTLTextureType2DMultisample; + mtl_desc.depth = 1; + mtl_desc.arrayLength = 1; + mtl_desc.mipmapLevelCount = 1; + mtl_desc.sampleCount = (NSUInteger)img->cmn.sample_count; +} + +_SOKOL_PRIVATE sg_resource_state _sg_mtl_create_image(_sg_image_t* img, const sg_image_desc* desc) { + SOKOL_ASSERT(img && desc); + _sg_image_common_init(&img->cmn, desc); + const bool injected = (0 != desc->mtl_textures[0]); + const bool msaa = (img->cmn.sample_count > 1); + + /* first initialize all Metal resource pool slots to 'empty' */ + for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { + img->mtl.tex[i] = _sg_mtl_add_resource(nil); + } + img->mtl.sampler_state = _sg_mtl_add_resource(nil); + img->mtl.depth_tex = _sg_mtl_add_resource(nil); + img->mtl.msaa_tex = _sg_mtl_add_resource(nil); + + /* initialize a Metal texture descriptor with common attributes */ + MTLTextureDescriptor* mtl_desc = [[MTLTextureDescriptor alloc] init]; + if (!_sg_mtl_init_texdesc_common(mtl_desc, img)) { + _SG_OBJC_RELEASE(mtl_desc); + return SG_RESOURCESTATE_FAILED; + } + + /* special case depth-stencil-buffer? */ + if (_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format)) { + /* depth-stencil buffer texture must always be a render target */ + SOKOL_ASSERT(img->cmn.render_target); + SOKOL_ASSERT(img->cmn.type == SG_IMAGETYPE_2D); + SOKOL_ASSERT(img->cmn.num_mipmaps == 1); + SOKOL_ASSERT(!injected); + if (msaa) { + _sg_mtl_init_texdesc_rt_msaa(mtl_desc, img); + } + else { + _sg_mtl_init_texdesc_rt(mtl_desc, img); + } + id tex = [_sg.mtl.device newTextureWithDescriptor:mtl_desc]; + SOKOL_ASSERT(nil != tex); + img->mtl.depth_tex = _sg_mtl_add_resource(tex); + } + else { + /* create the color texture + In case this is a render target without MSAA, add the relevant + render-target descriptor attributes. + In case this is a render target *with* MSAA, the color texture + will serve as MSAA-resolve target (not as render target), and rendering + will go into a separate render target texture of type + MTLTextureType2DMultisample. + */ + if (img->cmn.render_target && !msaa) { + _sg_mtl_init_texdesc_rt(mtl_desc, img); + } + for (int slot = 0; slot < img->cmn.num_slots; slot++) { + id tex; + if (injected) { + SOKOL_ASSERT(desc->mtl_textures[slot]); + tex = (__bridge id) desc->mtl_textures[slot]; + } + else { + tex = [_sg.mtl.device newTextureWithDescriptor:mtl_desc]; + if ((img->cmn.usage == SG_USAGE_IMMUTABLE) && !img->cmn.render_target) { + _sg_mtl_copy_image_data(img, tex, &desc->data); + } + } + img->mtl.tex[slot] = _sg_mtl_add_resource(tex); + } + + /* if MSAA color render target, create an additional MSAA render-surface texture */ + if (img->cmn.render_target && msaa) { + _sg_mtl_init_texdesc_rt_msaa(mtl_desc, img); + id tex = [_sg.mtl.device newTextureWithDescriptor:mtl_desc]; + img->mtl.msaa_tex = _sg_mtl_add_resource(tex); + } + + /* create (possibly shared) sampler state */ + img->mtl.sampler_state = _sg_mtl_create_sampler(_sg.mtl.device, desc); + } + _SG_OBJC_RELEASE(mtl_desc); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_mtl_destroy_image(_sg_image_t* img) { + SOKOL_ASSERT(img); + /* it's valid to call release resource with a 'null resource' */ + for (int slot = 0; slot < img->cmn.num_slots; slot++) { + _sg_mtl_release_resource(_sg.mtl.frame_index, img->mtl.tex[slot]); + } + _sg_mtl_release_resource(_sg.mtl.frame_index, img->mtl.depth_tex); + _sg_mtl_release_resource(_sg.mtl.frame_index, img->mtl.msaa_tex); + /* NOTE: sampler state objects are shared and not released until shutdown */ +} + +_SOKOL_PRIVATE id _sg_mtl_compile_library(const char* src) { + NSError* err = NULL; + id lib = [_sg.mtl.device + newLibraryWithSource:[NSString stringWithUTF8String:src] + options:nil + error:&err + ]; + if (err) { + SOKOL_LOG([err.localizedDescription UTF8String]); + } + return lib; +} + +_SOKOL_PRIVATE id _sg_mtl_library_from_bytecode(const void* ptr, size_t num_bytes) { + NSError* err = NULL; + dispatch_data_t lib_data = dispatch_data_create(ptr, num_bytes, NULL, DISPATCH_DATA_DESTRUCTOR_DEFAULT); + id lib = [_sg.mtl.device newLibraryWithData:lib_data error:&err]; + if (err) { + SOKOL_LOG([err.localizedDescription UTF8String]); + } + _SG_OBJC_RELEASE(lib_data); + return lib; +} + +_SOKOL_PRIVATE sg_resource_state _sg_mtl_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { + SOKOL_ASSERT(shd && desc); + + _sg_shader_common_init(&shd->cmn, desc); + + /* create metal libray objects and lookup entry functions */ + id vs_lib; + id fs_lib; + id vs_func; + id fs_func; + const char* vs_entry = desc->vs.entry; + const char* fs_entry = desc->fs.entry; + if (desc->vs.bytecode.ptr && desc->fs.bytecode.ptr) { + /* separate byte code provided */ + vs_lib = _sg_mtl_library_from_bytecode(desc->vs.bytecode.ptr, desc->vs.bytecode.size); + fs_lib = _sg_mtl_library_from_bytecode(desc->fs.bytecode.ptr, desc->fs.bytecode.size); + if (nil == vs_lib || nil == fs_lib) { + return SG_RESOURCESTATE_FAILED; + } + vs_func = [vs_lib newFunctionWithName:[NSString stringWithUTF8String:vs_entry]]; + fs_func = [fs_lib newFunctionWithName:[NSString stringWithUTF8String:fs_entry]]; + } + else if (desc->vs.source && desc->fs.source) { + /* separate sources provided */ + vs_lib = _sg_mtl_compile_library(desc->vs.source); + fs_lib = _sg_mtl_compile_library(desc->fs.source); + if (nil == vs_lib || nil == fs_lib) { + return SG_RESOURCESTATE_FAILED; + } + vs_func = [vs_lib newFunctionWithName:[NSString stringWithUTF8String:vs_entry]]; + fs_func = [fs_lib newFunctionWithName:[NSString stringWithUTF8String:fs_entry]]; + } + else { + return SG_RESOURCESTATE_FAILED; + } + if (nil == vs_func) { + SOKOL_LOG("vertex shader entry function not found\n"); + return SG_RESOURCESTATE_FAILED; + } + if (nil == fs_func) { + SOKOL_LOG("fragment shader entry function not found\n"); + return SG_RESOURCESTATE_FAILED; + } + /* it is legal to call _sg_mtl_add_resource with a nil value, this will return a special 0xFFFFFFFF index */ + shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_lib = _sg_mtl_add_resource(vs_lib); + shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_lib = _sg_mtl_add_resource(fs_lib); + shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_func = _sg_mtl_add_resource(vs_func); + shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func = _sg_mtl_add_resource(fs_func); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_mtl_destroy_shader(_sg_shader_t* shd) { + SOKOL_ASSERT(shd); + /* it is valid to call _sg_mtl_release_resource with a 'null resource' */ + _sg_mtl_release_resource(_sg.mtl.frame_index, shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_func); + _sg_mtl_release_resource(_sg.mtl.frame_index, shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_lib); + _sg_mtl_release_resource(_sg.mtl.frame_index, shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func); + _sg_mtl_release_resource(_sg.mtl.frame_index, shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_lib); +} + +_SOKOL_PRIVATE sg_resource_state _sg_mtl_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) { + SOKOL_ASSERT(pip && shd && desc); + SOKOL_ASSERT(desc->shader.id == shd->slot.id); + + pip->shader = shd; + _sg_pipeline_common_init(&pip->cmn, desc); + + sg_primitive_type prim_type = desc->primitive_type; + pip->mtl.prim_type = _sg_mtl_primitive_type(prim_type); + pip->mtl.index_size = _sg_mtl_index_size(pip->cmn.index_type); + if (SG_INDEXTYPE_NONE != pip->cmn.index_type) { + pip->mtl.index_type = _sg_mtl_index_type(pip->cmn.index_type); + } + pip->mtl.cull_mode = _sg_mtl_cull_mode(desc->cull_mode); + pip->mtl.winding = _sg_mtl_winding(desc->face_winding); + pip->mtl.stencil_ref = desc->stencil.ref; + + /* create vertex-descriptor */ + MTLVertexDescriptor* vtx_desc = [MTLVertexDescriptor vertexDescriptor]; + for (NSUInteger attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { + const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; + if (a_desc->format == SG_VERTEXFORMAT_INVALID) { + break; + } + SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); + vtx_desc.attributes[attr_index].format = _sg_mtl_vertex_format(a_desc->format); + vtx_desc.attributes[attr_index].offset = (NSUInteger)a_desc->offset; + vtx_desc.attributes[attr_index].bufferIndex = (NSUInteger)(a_desc->buffer_index + SG_MAX_SHADERSTAGE_UBS); + pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true; + } + for (NSUInteger layout_index = 0; layout_index < SG_MAX_SHADERSTAGE_BUFFERS; layout_index++) { + if (pip->cmn.vertex_layout_valid[layout_index]) { + const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[layout_index]; + const NSUInteger mtl_vb_slot = layout_index + SG_MAX_SHADERSTAGE_UBS; + SOKOL_ASSERT(l_desc->stride > 0); + vtx_desc.layouts[mtl_vb_slot].stride = (NSUInteger)l_desc->stride; + vtx_desc.layouts[mtl_vb_slot].stepFunction = _sg_mtl_step_function(l_desc->step_func); + vtx_desc.layouts[mtl_vb_slot].stepRate = (NSUInteger)l_desc->step_rate; + } + } + + /* render-pipeline descriptor */ + MTLRenderPipelineDescriptor* rp_desc = [[MTLRenderPipelineDescriptor alloc] init]; + rp_desc.vertexDescriptor = vtx_desc; + SOKOL_ASSERT(shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_func != _SG_MTL_INVALID_SLOT_INDEX); + rp_desc.vertexFunction = _sg_mtl_id(shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_func); + SOKOL_ASSERT(shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func != _SG_MTL_INVALID_SLOT_INDEX); + rp_desc.fragmentFunction = _sg_mtl_id(shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func); + rp_desc.sampleCount = (NSUInteger)desc->sample_count; + rp_desc.alphaToCoverageEnabled = desc->alpha_to_coverage_enabled; + rp_desc.alphaToOneEnabled = NO; + rp_desc.rasterizationEnabled = YES; + rp_desc.depthAttachmentPixelFormat = _sg_mtl_pixel_format(desc->depth.pixel_format); + if (desc->depth.pixel_format == SG_PIXELFORMAT_DEPTH_STENCIL) { + rp_desc.stencilAttachmentPixelFormat = _sg_mtl_pixel_format(desc->depth.pixel_format); + } + /* FIXME: this only works on macOS 10.13! + for (int i = 0; i < (SG_MAX_SHADERSTAGE_UBS+SG_MAX_SHADERSTAGE_BUFFERS); i++) { + rp_desc.vertexBuffers[i].mutability = MTLMutabilityImmutable; + } + for (int i = 0; i < SG_MAX_SHADERSTAGE_UBS; i++) { + rp_desc.fragmentBuffers[i].mutability = MTLMutabilityImmutable; + } + */ + for (NSUInteger i = 0; i < (NSUInteger)desc->color_count; i++) { + SOKOL_ASSERT(i < SG_MAX_COLOR_ATTACHMENTS); + const sg_color_state* cs = &desc->colors[i]; + rp_desc.colorAttachments[i].pixelFormat = _sg_mtl_pixel_format(cs->pixel_format); + rp_desc.colorAttachments[i].writeMask = _sg_mtl_color_write_mask(cs->write_mask); + rp_desc.colorAttachments[i].blendingEnabled = cs->blend.enabled; + rp_desc.colorAttachments[i].alphaBlendOperation = _sg_mtl_blend_op(cs->blend.op_alpha); + rp_desc.colorAttachments[i].rgbBlendOperation = _sg_mtl_blend_op(cs->blend.op_rgb); + rp_desc.colorAttachments[i].destinationAlphaBlendFactor = _sg_mtl_blend_factor(cs->blend.dst_factor_alpha); + rp_desc.colorAttachments[i].destinationRGBBlendFactor = _sg_mtl_blend_factor(cs->blend.dst_factor_rgb); + rp_desc.colorAttachments[i].sourceAlphaBlendFactor = _sg_mtl_blend_factor(cs->blend.src_factor_alpha); + rp_desc.colorAttachments[i].sourceRGBBlendFactor = _sg_mtl_blend_factor(cs->blend.src_factor_rgb); + } + NSError* err = NULL; + id mtl_rps = [_sg.mtl.device newRenderPipelineStateWithDescriptor:rp_desc error:&err]; + _SG_OBJC_RELEASE(rp_desc); + if (nil == mtl_rps) { + SOKOL_ASSERT(err); + SOKOL_LOG([err.localizedDescription UTF8String]); + return SG_RESOURCESTATE_FAILED; + } + + /* depth-stencil-state */ + MTLDepthStencilDescriptor* ds_desc = [[MTLDepthStencilDescriptor alloc] init]; + ds_desc.depthCompareFunction = _sg_mtl_compare_func(desc->depth.compare); + ds_desc.depthWriteEnabled = desc->depth.write_enabled; + if (desc->stencil.enabled) { + const sg_stencil_face_state* sb = &desc->stencil.back; + ds_desc.backFaceStencil = [[MTLStencilDescriptor alloc] init]; + ds_desc.backFaceStencil.stencilFailureOperation = _sg_mtl_stencil_op(sb->fail_op); + ds_desc.backFaceStencil.depthFailureOperation = _sg_mtl_stencil_op(sb->depth_fail_op); + ds_desc.backFaceStencil.depthStencilPassOperation = _sg_mtl_stencil_op(sb->pass_op); + ds_desc.backFaceStencil.stencilCompareFunction = _sg_mtl_compare_func(sb->compare); + ds_desc.backFaceStencil.readMask = desc->stencil.read_mask; + ds_desc.backFaceStencil.writeMask = desc->stencil.write_mask; + const sg_stencil_face_state* sf = &desc->stencil.front; + ds_desc.frontFaceStencil = [[MTLStencilDescriptor alloc] init]; + ds_desc.frontFaceStencil.stencilFailureOperation = _sg_mtl_stencil_op(sf->fail_op); + ds_desc.frontFaceStencil.depthFailureOperation = _sg_mtl_stencil_op(sf->depth_fail_op); + ds_desc.frontFaceStencil.depthStencilPassOperation = _sg_mtl_stencil_op(sf->pass_op); + ds_desc.frontFaceStencil.stencilCompareFunction = _sg_mtl_compare_func(sf->compare); + ds_desc.frontFaceStencil.readMask = desc->stencil.read_mask; + ds_desc.frontFaceStencil.writeMask = desc->stencil.write_mask; + } + id mtl_dss = [_sg.mtl.device newDepthStencilStateWithDescriptor:ds_desc]; + _SG_OBJC_RELEASE(ds_desc); + pip->mtl.rps = _sg_mtl_add_resource(mtl_rps); + pip->mtl.dss = _sg_mtl_add_resource(mtl_dss); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_mtl_destroy_pipeline(_sg_pipeline_t* pip) { + SOKOL_ASSERT(pip); + /* it's valid to call release resource with a 'null resource' */ + _sg_mtl_release_resource(_sg.mtl.frame_index, pip->mtl.rps); + _sg_mtl_release_resource(_sg.mtl.frame_index, pip->mtl.dss); +} + +_SOKOL_PRIVATE sg_resource_state _sg_mtl_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) { + SOKOL_ASSERT(pass && desc); + SOKOL_ASSERT(att_images && att_images[0]); + + _sg_pass_common_init(&pass->cmn, desc); + + /* copy image pointers */ + const sg_pass_attachment_desc* att_desc; + for (int i = 0; i < pass->cmn.num_color_atts; i++) { + att_desc = &desc->color_attachments[i]; + if (att_desc->image.id != SG_INVALID_ID) { + SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); + SOKOL_ASSERT(0 == pass->mtl.color_atts[i].image); + SOKOL_ASSERT(att_images[i] && (att_images[i]->slot.id == att_desc->image.id)); + SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_images[i]->cmn.pixel_format)); + pass->mtl.color_atts[i].image = att_images[i]; + } + } + SOKOL_ASSERT(0 == pass->mtl.ds_att.image); + att_desc = &desc->depth_stencil_attachment; + if (att_desc->image.id != SG_INVALID_ID) { + const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; + SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id)); + SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->cmn.pixel_format)); + pass->mtl.ds_att.image = att_images[ds_img_index]; + } + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_mtl_destroy_pass(_sg_pass_t* pass) { + SOKOL_ASSERT(pass); + _SOKOL_UNUSED(pass); +} + +_SOKOL_PRIVATE _sg_image_t* _sg_mtl_pass_color_image(const _sg_pass_t* pass, int index) { + SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS)); + /* NOTE: may return null */ + return pass->mtl.color_atts[index].image; +} + +_SOKOL_PRIVATE _sg_image_t* _sg_mtl_pass_ds_image(const _sg_pass_t* pass) { + /* NOTE: may return null */ + SOKOL_ASSERT(pass); + return pass->mtl.ds_att.image; +} + +_SOKOL_PRIVATE void _sg_mtl_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) { + SOKOL_ASSERT(action); + SOKOL_ASSERT(!_sg.mtl.in_pass); + SOKOL_ASSERT(_sg.mtl.cmd_queue); + SOKOL_ASSERT(nil == _sg.mtl.cmd_encoder); + SOKOL_ASSERT(_sg.mtl.renderpass_descriptor_cb || _sg.mtl.renderpass_descriptor_userdata_cb); + _sg.mtl.in_pass = true; + _sg.mtl.cur_width = w; + _sg.mtl.cur_height = h; + _sg_mtl_clear_state_cache(); + + /* if this is the first pass in the frame, create a command buffer */ + if (nil == _sg.mtl.cmd_buffer) { + /* block until the oldest frame in flight has finished */ + dispatch_semaphore_wait(_sg.mtl.sem, DISPATCH_TIME_FOREVER); + _sg.mtl.cmd_buffer = [_sg.mtl.cmd_queue commandBufferWithUnretainedReferences]; + } + + /* if this is first pass in frame, get uniform buffer base pointer */ + if (0 == _sg.mtl.cur_ub_base_ptr) { + _sg.mtl.cur_ub_base_ptr = (uint8_t*)[_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index] contents]; + } + + /* initialize a render pass descriptor */ + MTLRenderPassDescriptor* pass_desc = nil; + if (pass) { + /* offscreen render pass */ + pass_desc = [MTLRenderPassDescriptor renderPassDescriptor]; + } + else { + /* default render pass, call user-provided callback to provide render pass descriptor */ + if (_sg.mtl.renderpass_descriptor_cb) { + pass_desc = (__bridge MTLRenderPassDescriptor*) _sg.mtl.renderpass_descriptor_cb(); + } + else { + pass_desc = (__bridge MTLRenderPassDescriptor*) _sg.mtl.renderpass_descriptor_userdata_cb(_sg.mtl.user_data); + } + + } + if (pass_desc) { + _sg.mtl.pass_valid = true; + } + else { + /* default pass descriptor will not be valid if window is minimized, + don't do any rendering in this case */ + _sg.mtl.pass_valid = false; + return; + } + if (pass) { + /* setup pass descriptor for offscreen rendering */ + SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_VALID); + for (NSUInteger i = 0; i < (NSUInteger)pass->cmn.num_color_atts; i++) { + const _sg_pass_attachment_t* cmn_att = &pass->cmn.color_atts[i]; + const _sg_mtl_attachment_t* mtl_att = &pass->mtl.color_atts[i]; + const _sg_image_t* att_img = mtl_att->image; + SOKOL_ASSERT(att_img->slot.state == SG_RESOURCESTATE_VALID); + SOKOL_ASSERT(att_img->slot.id == cmn_att->image_id.id); + const bool is_msaa = (att_img->cmn.sample_count > 1); + pass_desc.colorAttachments[i].loadAction = _sg_mtl_load_action(action->colors[i].action); + pass_desc.colorAttachments[i].storeAction = is_msaa ? MTLStoreActionMultisampleResolve : MTLStoreActionStore; + sg_color c = action->colors[i].value; + pass_desc.colorAttachments[i].clearColor = MTLClearColorMake(c.r, c.g, c.b, c.a); + if (is_msaa) { + SOKOL_ASSERT(att_img->mtl.msaa_tex != _SG_MTL_INVALID_SLOT_INDEX); + SOKOL_ASSERT(att_img->mtl.tex[mtl_att->image->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); + pass_desc.colorAttachments[i].texture = _sg_mtl_id(att_img->mtl.msaa_tex); + pass_desc.colorAttachments[i].resolveTexture = _sg_mtl_id(att_img->mtl.tex[att_img->cmn.active_slot]); + pass_desc.colorAttachments[i].resolveLevel = (NSUInteger)cmn_att->mip_level; + switch (att_img->cmn.type) { + case SG_IMAGETYPE_CUBE: + case SG_IMAGETYPE_ARRAY: + pass_desc.colorAttachments[i].resolveSlice = (NSUInteger)cmn_att->slice; + break; + case SG_IMAGETYPE_3D: + pass_desc.colorAttachments[i].resolveDepthPlane = (NSUInteger)cmn_att->slice; + break; + default: break; + } + } + else { + SOKOL_ASSERT(att_img->mtl.tex[att_img->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); + pass_desc.colorAttachments[i].texture = _sg_mtl_id(att_img->mtl.tex[att_img->cmn.active_slot]); + pass_desc.colorAttachments[i].level = (NSUInteger)cmn_att->mip_level; + switch (att_img->cmn.type) { + case SG_IMAGETYPE_CUBE: + case SG_IMAGETYPE_ARRAY: + pass_desc.colorAttachments[i].slice = (NSUInteger)cmn_att->slice; + break; + case SG_IMAGETYPE_3D: + pass_desc.colorAttachments[i].depthPlane = (NSUInteger)cmn_att->slice; + break; + default: break; + } + } + } + const _sg_image_t* ds_att_img = pass->mtl.ds_att.image; + if (0 != ds_att_img) { + SOKOL_ASSERT(ds_att_img->slot.state == SG_RESOURCESTATE_VALID); + SOKOL_ASSERT(ds_att_img->slot.id == pass->cmn.ds_att.image_id.id); + SOKOL_ASSERT(ds_att_img->mtl.depth_tex != _SG_MTL_INVALID_SLOT_INDEX); + pass_desc.depthAttachment.texture = _sg_mtl_id(ds_att_img->mtl.depth_tex); + pass_desc.depthAttachment.loadAction = _sg_mtl_load_action(action->depth.action); + pass_desc.depthAttachment.clearDepth = action->depth.value; + if (_sg_is_depth_stencil_format(ds_att_img->cmn.pixel_format)) { + pass_desc.stencilAttachment.texture = _sg_mtl_id(ds_att_img->mtl.depth_tex); + pass_desc.stencilAttachment.loadAction = _sg_mtl_load_action(action->stencil.action); + pass_desc.stencilAttachment.clearStencil = action->stencil.value; + } + } + } + else { + /* setup pass descriptor for default rendering */ + pass_desc.colorAttachments[0].loadAction = _sg_mtl_load_action(action->colors[0].action); + sg_color c = action->colors[0].value; + pass_desc.colorAttachments[0].clearColor = MTLClearColorMake(c.r, c.g, c.b, c.a); + pass_desc.depthAttachment.loadAction = _sg_mtl_load_action(action->depth.action); + pass_desc.depthAttachment.clearDepth = action->depth.value; + pass_desc.stencilAttachment.loadAction = _sg_mtl_load_action(action->stencil.action); + pass_desc.stencilAttachment.clearStencil = action->stencil.value; + } + + /* create a render command encoder, this might return nil if window is minimized */ + _sg.mtl.cmd_encoder = [_sg.mtl.cmd_buffer renderCommandEncoderWithDescriptor:pass_desc]; + if (nil == _sg.mtl.cmd_encoder) { + _sg.mtl.pass_valid = false; + return; + } + + /* bind the global uniform buffer, this only happens once per pass */ + _sg_mtl_bind_uniform_buffers(); +} + +_SOKOL_PRIVATE void _sg_mtl_end_pass(void) { + SOKOL_ASSERT(_sg.mtl.in_pass); + _sg.mtl.in_pass = false; + _sg.mtl.pass_valid = false; + if (nil != _sg.mtl.cmd_encoder) { + [_sg.mtl.cmd_encoder endEncoding]; + /* NOTE: MTLRenderCommandEncoder is autoreleased */ + _sg.mtl.cmd_encoder = nil; + } +} + +_SOKOL_PRIVATE void _sg_mtl_commit(void) { + SOKOL_ASSERT(!_sg.mtl.in_pass); + SOKOL_ASSERT(!_sg.mtl.pass_valid); + SOKOL_ASSERT(_sg.mtl.drawable_cb || _sg.mtl.drawable_userdata_cb); + SOKOL_ASSERT(nil == _sg.mtl.cmd_encoder); + SOKOL_ASSERT(nil != _sg.mtl.cmd_buffer); + + #if defined(_SG_TARGET_MACOS) + [_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index] didModifyRange:NSMakeRange(0, (NSUInteger)_sg.mtl.cur_ub_offset)]; + #endif + + /* present, commit and signal semaphore when done */ + id cur_drawable = nil; + if (_sg.mtl.drawable_cb) { + cur_drawable = (__bridge id) _sg.mtl.drawable_cb(); + } + else { + cur_drawable = (__bridge id) _sg.mtl.drawable_userdata_cb(_sg.mtl.user_data); + } + [_sg.mtl.cmd_buffer presentDrawable:cur_drawable]; + [_sg.mtl.cmd_buffer addCompletedHandler:^(id cmd_buffer) { + _SOKOL_UNUSED(cmd_buffer); + dispatch_semaphore_signal(_sg.mtl.sem); + }]; + [_sg.mtl.cmd_buffer commit]; + + /* garbage-collect resources pending for release */ + _sg_mtl_garbage_collect(_sg.mtl.frame_index); + + /* rotate uniform buffer slot */ + if (++_sg.mtl.cur_frame_rotate_index >= SG_NUM_INFLIGHT_FRAMES) { + _sg.mtl.cur_frame_rotate_index = 0; + } + _sg.mtl.frame_index++; + _sg.mtl.cur_ub_offset = 0; + _sg.mtl.cur_ub_base_ptr = 0; + /* NOTE: MTLCommandBuffer is autoreleased */ + _sg.mtl.cmd_buffer = nil; +} + +_SOKOL_PRIVATE void _sg_mtl_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { + SOKOL_ASSERT(_sg.mtl.in_pass); + if (!_sg.mtl.pass_valid) { + return; + } + SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); + MTLViewport vp; + vp.originX = (double) x; + vp.originY = (double) (origin_top_left ? y : (_sg.mtl.cur_height - (y + h))); + vp.width = (double) w; + vp.height = (double) h; + vp.znear = 0.0; + vp.zfar = 1.0; + [_sg.mtl.cmd_encoder setViewport:vp]; +} + +_SOKOL_PRIVATE void _sg_mtl_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { + SOKOL_ASSERT(_sg.mtl.in_pass); + if (!_sg.mtl.pass_valid) { + return; + } + SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); + /* clip against framebuffer rect */ + x = _sg_min(_sg_max(0, x), _sg.mtl.cur_width-1); + y = _sg_min(_sg_max(0, y), _sg.mtl.cur_height-1); + if ((x + w) > _sg.mtl.cur_width) { + w = _sg.mtl.cur_width - x; + } + if ((y + h) > _sg.mtl.cur_height) { + h = _sg.mtl.cur_height - y; + } + w = _sg_max(w, 1); + h = _sg_max(h, 1); + + MTLScissorRect r; + r.x = (NSUInteger)x; + r.y = (NSUInteger) (origin_top_left ? y : (_sg.mtl.cur_height - (y + h))); + r.width = (NSUInteger)w; + r.height = (NSUInteger)h; + [_sg.mtl.cmd_encoder setScissorRect:r]; +} + +_SOKOL_PRIVATE void _sg_mtl_apply_pipeline(_sg_pipeline_t* pip) { + SOKOL_ASSERT(pip); + SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id)); + SOKOL_ASSERT(_sg.mtl.in_pass); + if (!_sg.mtl.pass_valid) { + return; + } + SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); + + if ((_sg.mtl.state_cache.cur_pipeline != pip) || (_sg.mtl.state_cache.cur_pipeline_id.id != pip->slot.id)) { + _sg.mtl.state_cache.cur_pipeline = pip; + _sg.mtl.state_cache.cur_pipeline_id.id = pip->slot.id; + sg_color c = pip->cmn.blend_color; + [_sg.mtl.cmd_encoder setBlendColorRed:c.r green:c.g blue:c.b alpha:c.a]; + [_sg.mtl.cmd_encoder setCullMode:pip->mtl.cull_mode]; + [_sg.mtl.cmd_encoder setFrontFacingWinding:pip->mtl.winding]; + [_sg.mtl.cmd_encoder setStencilReferenceValue:pip->mtl.stencil_ref]; + [_sg.mtl.cmd_encoder setDepthBias:pip->cmn.depth_bias slopeScale:pip->cmn.depth_bias_slope_scale clamp:pip->cmn.depth_bias_clamp]; + SOKOL_ASSERT(pip->mtl.rps != _SG_MTL_INVALID_SLOT_INDEX); + [_sg.mtl.cmd_encoder setRenderPipelineState:_sg_mtl_id(pip->mtl.rps)]; + SOKOL_ASSERT(pip->mtl.dss != _SG_MTL_INVALID_SLOT_INDEX); + [_sg.mtl.cmd_encoder setDepthStencilState:_sg_mtl_id(pip->mtl.dss)]; + } +} + +_SOKOL_PRIVATE void _sg_mtl_apply_bindings( + _sg_pipeline_t* pip, + _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs, + _sg_buffer_t* ib, int ib_offset, + _sg_image_t** vs_imgs, int num_vs_imgs, + _sg_image_t** fs_imgs, int num_fs_imgs) +{ + _SOKOL_UNUSED(pip); + SOKOL_ASSERT(_sg.mtl.in_pass); + if (!_sg.mtl.pass_valid) { + return; + } + SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); + + /* store index buffer binding, this will be needed later in sg_draw() */ + _sg.mtl.state_cache.cur_indexbuffer = ib; + _sg.mtl.state_cache.cur_indexbuffer_offset = ib_offset; + if (ib) { + SOKOL_ASSERT(pip->cmn.index_type != SG_INDEXTYPE_NONE); + _sg.mtl.state_cache.cur_indexbuffer_id.id = ib->slot.id; + } + else { + SOKOL_ASSERT(pip->cmn.index_type == SG_INDEXTYPE_NONE); + _sg.mtl.state_cache.cur_indexbuffer_id.id = SG_INVALID_ID; + } + + /* apply vertex buffers */ + NSUInteger slot; + for (slot = 0; slot < (NSUInteger)num_vbs; slot++) { + const _sg_buffer_t* vb = vbs[slot]; + if ((_sg.mtl.state_cache.cur_vertexbuffers[slot] != vb) || + (_sg.mtl.state_cache.cur_vertexbuffer_offsets[slot] != vb_offsets[slot]) || + (_sg.mtl.state_cache.cur_vertexbuffer_ids[slot].id != vb->slot.id)) + { + _sg.mtl.state_cache.cur_vertexbuffers[slot] = vb; + _sg.mtl.state_cache.cur_vertexbuffer_offsets[slot] = vb_offsets[slot]; + _sg.mtl.state_cache.cur_vertexbuffer_ids[slot].id = vb->slot.id; + const NSUInteger mtl_slot = SG_MAX_SHADERSTAGE_UBS + slot; + SOKOL_ASSERT(vb->mtl.buf[vb->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); + [_sg.mtl.cmd_encoder setVertexBuffer:_sg_mtl_id(vb->mtl.buf[vb->cmn.active_slot]) + offset:(NSUInteger)vb_offsets[slot] + atIndex:mtl_slot]; + } + } + + /* apply vertex shader images */ + for (slot = 0; slot < (NSUInteger)num_vs_imgs; slot++) { + const _sg_image_t* img = vs_imgs[slot]; + if ((_sg.mtl.state_cache.cur_vs_images[slot] != img) || (_sg.mtl.state_cache.cur_vs_image_ids[slot].id != img->slot.id)) { + _sg.mtl.state_cache.cur_vs_images[slot] = img; + _sg.mtl.state_cache.cur_vs_image_ids[slot].id = img->slot.id; + SOKOL_ASSERT(img->mtl.tex[img->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); + [_sg.mtl.cmd_encoder setVertexTexture:_sg_mtl_id(img->mtl.tex[img->cmn.active_slot]) atIndex:slot]; + SOKOL_ASSERT(img->mtl.sampler_state != _SG_MTL_INVALID_SLOT_INDEX); + [_sg.mtl.cmd_encoder setVertexSamplerState:_sg_mtl_id(img->mtl.sampler_state) atIndex:slot]; + } + } + + /* apply fragment shader images */ + for (slot = 0; slot < (NSUInteger)num_fs_imgs; slot++) { + const _sg_image_t* img = fs_imgs[slot]; + if ((_sg.mtl.state_cache.cur_fs_images[slot] != img) || (_sg.mtl.state_cache.cur_fs_image_ids[slot].id != img->slot.id)) { + _sg.mtl.state_cache.cur_fs_images[slot] = img; + _sg.mtl.state_cache.cur_fs_image_ids[slot].id = img->slot.id; + SOKOL_ASSERT(img->mtl.tex[img->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); + [_sg.mtl.cmd_encoder setFragmentTexture:_sg_mtl_id(img->mtl.tex[img->cmn.active_slot]) atIndex:slot]; + SOKOL_ASSERT(img->mtl.sampler_state != _SG_MTL_INVALID_SLOT_INDEX); + [_sg.mtl.cmd_encoder setFragmentSamplerState:_sg_mtl_id(img->mtl.sampler_state) atIndex:slot]; + } + } +} + +_SOKOL_PRIVATE void _sg_mtl_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { + SOKOL_ASSERT(_sg.mtl.in_pass); + if (!_sg.mtl.pass_valid) { + return; + } + SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); + SOKOL_ASSERT(((size_t)_sg.mtl.cur_ub_offset + data->size) <= (size_t)_sg.mtl.ub_size); + SOKOL_ASSERT((_sg.mtl.cur_ub_offset & (_SG_MTL_UB_ALIGN-1)) == 0); + SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline && _sg.mtl.state_cache.cur_pipeline->shader); + SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline->slot.id == _sg.mtl.state_cache.cur_pipeline_id.id); + SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline->shader->slot.id == _sg.mtl.state_cache.cur_pipeline->cmn.shader_id.id); + SOKOL_ASSERT(ub_index < _sg.mtl.state_cache.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks); + SOKOL_ASSERT(data->size <= _sg.mtl.state_cache.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size); + + /* copy to global uniform buffer, record offset into cmd encoder, and advance offset */ + uint8_t* dst = &_sg.mtl.cur_ub_base_ptr[_sg.mtl.cur_ub_offset]; + memcpy(dst, data->ptr, data->size); + if (stage_index == SG_SHADERSTAGE_VS) { + [_sg.mtl.cmd_encoder setVertexBufferOffset:(NSUInteger)_sg.mtl.cur_ub_offset atIndex:(NSUInteger)ub_index]; + } + else { + [_sg.mtl.cmd_encoder setFragmentBufferOffset:(NSUInteger)_sg.mtl.cur_ub_offset atIndex:(NSUInteger)ub_index]; + } + _sg.mtl.cur_ub_offset = _sg_roundup(_sg.mtl.cur_ub_offset + (int)data->size, _SG_MTL_UB_ALIGN); +} + +_SOKOL_PRIVATE void _sg_mtl_draw(int base_element, int num_elements, int num_instances) { + SOKOL_ASSERT(_sg.mtl.in_pass); + if (!_sg.mtl.pass_valid) { + return; + } + SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder); + SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline && (_sg.mtl.state_cache.cur_pipeline->slot.id == _sg.mtl.state_cache.cur_pipeline_id.id)); + if (SG_INDEXTYPE_NONE != _sg.mtl.state_cache.cur_pipeline->cmn.index_type) { + /* indexed rendering */ + SOKOL_ASSERT(_sg.mtl.state_cache.cur_indexbuffer && (_sg.mtl.state_cache.cur_indexbuffer->slot.id == _sg.mtl.state_cache.cur_indexbuffer_id.id)); + const _sg_buffer_t* ib = _sg.mtl.state_cache.cur_indexbuffer; + SOKOL_ASSERT(ib->mtl.buf[ib->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX); + const NSUInteger index_buffer_offset = (NSUInteger) (_sg.mtl.state_cache.cur_indexbuffer_offset + base_element * _sg.mtl.state_cache.cur_pipeline->mtl.index_size); + [_sg.mtl.cmd_encoder drawIndexedPrimitives:_sg.mtl.state_cache.cur_pipeline->mtl.prim_type + indexCount:(NSUInteger)num_elements + indexType:_sg.mtl.state_cache.cur_pipeline->mtl.index_type + indexBuffer:_sg_mtl_id(ib->mtl.buf[ib->cmn.active_slot]) + indexBufferOffset:index_buffer_offset + instanceCount:(NSUInteger)num_instances]; + } + else { + /* non-indexed rendering */ + [_sg.mtl.cmd_encoder drawPrimitives:_sg.mtl.state_cache.cur_pipeline->mtl.prim_type + vertexStart:(NSUInteger)base_element + vertexCount:(NSUInteger)num_elements + instanceCount:(NSUInteger)num_instances]; + } +} + +_SOKOL_PRIVATE void _sg_mtl_update_buffer(_sg_buffer_t* buf, const sg_range* data) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); + if (++buf->cmn.active_slot >= buf->cmn.num_slots) { + buf->cmn.active_slot = 0; + } + __unsafe_unretained id mtl_buf = _sg_mtl_id(buf->mtl.buf[buf->cmn.active_slot]); + void* dst_ptr = [mtl_buf contents]; + memcpy(dst_ptr, data->ptr, data->size); + #if defined(_SG_TARGET_MACOS) + [mtl_buf didModifyRange:NSMakeRange(0, data->size)]; + #endif +} + +_SOKOL_PRIVATE int _sg_mtl_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); + if (new_frame) { + if (++buf->cmn.active_slot >= buf->cmn.num_slots) { + buf->cmn.active_slot = 0; + } + } + __unsafe_unretained id mtl_buf = _sg_mtl_id(buf->mtl.buf[buf->cmn.active_slot]); + uint8_t* dst_ptr = (uint8_t*) [mtl_buf contents]; + dst_ptr += buf->cmn.append_pos; + memcpy(dst_ptr, data->ptr, data->size); + #if defined(_SG_TARGET_MACOS) + [mtl_buf didModifyRange:NSMakeRange((NSUInteger)buf->cmn.append_pos, (NSUInteger)data->size)]; + #endif + /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backends */ + return _sg_roundup((int)data->size, 4); +} + +_SOKOL_PRIVATE void _sg_mtl_update_image(_sg_image_t* img, const sg_image_data* data) { + SOKOL_ASSERT(img && data); + if (++img->cmn.active_slot >= img->cmn.num_slots) { + img->cmn.active_slot = 0; + } + __unsafe_unretained id mtl_tex = _sg_mtl_id(img->mtl.tex[img->cmn.active_slot]); + _sg_mtl_copy_image_data(img, mtl_tex, data); +} + +/*== WEBGPU BACKEND IMPLEMENTATION ===========================================*/ +#elif defined(SOKOL_WGPU) + +_SOKOL_PRIVATE WGPUBufferUsageFlags _sg_wgpu_buffer_usage(sg_buffer_type t, sg_usage u) { + WGPUBufferUsageFlags res = 0; + if (SG_BUFFERTYPE_VERTEXBUFFER == t) { + res |= WGPUBufferUsage_Vertex; + } + else { + res |= WGPUBufferUsage_Index; + } + if (SG_USAGE_IMMUTABLE != u) { + res |= WGPUBufferUsage_CopyDst; + } + return res; +} + +_SOKOL_PRIVATE WGPULoadOp _sg_wgpu_load_op(sg_action a) { + switch (a) { + case SG_ACTION_CLEAR: + case SG_ACTION_DONTCARE: + return WGPULoadOp_Clear; + case SG_ACTION_LOAD: + return WGPULoadOp_Load; + default: + SOKOL_UNREACHABLE; + return (WGPULoadOp)0; + } +} + +_SOKOL_PRIVATE WGPUTextureViewDimension _sg_wgpu_tex_viewdim(sg_image_type t) { + switch (t) { + case SG_IMAGETYPE_2D: return WGPUTextureViewDimension_2D; + case SG_IMAGETYPE_CUBE: return WGPUTextureViewDimension_Cube; + case SG_IMAGETYPE_3D: return WGPUTextureViewDimension_3D; + case SG_IMAGETYPE_ARRAY: return WGPUTextureViewDimension_2DArray; + default: SOKOL_UNREACHABLE; return WGPUTextureViewDimension_Force32; + } +} + +_SOKOL_PRIVATE WGPUTextureComponentType _sg_wgpu_tex_comptype(sg_sampler_type t) { + switch (t) { + case SG_SAMPLERTYPE_FLOAT: return WGPUTextureComponentType_Float; + case SG_SAMPLERTYPE_SINT: return WGPUTextureComponentType_Sint; + case SG_SAMPLERTYPE_UINT: return WGPUTextureComponentType_Uint; + default: SOKOL_UNREACHABLE; return WGPUTextureComponentType_Force32; + } +} + +_SOKOL_PRIVATE WGPUTextureDimension _sg_wgpu_tex_dim(sg_image_type t) { + if (SG_IMAGETYPE_3D == t) { + return WGPUTextureDimension_3D; + } + else { + return WGPUTextureDimension_2D; + } +} + +_SOKOL_PRIVATE WGPUAddressMode _sg_wgpu_sampler_addrmode(sg_wrap m) { + switch (m) { + case SG_WRAP_REPEAT: + return WGPUAddressMode_Repeat; + case SG_WRAP_CLAMP_TO_EDGE: + case SG_WRAP_CLAMP_TO_BORDER: + return WGPUAddressMode_ClampToEdge; + case SG_WRAP_MIRRORED_REPEAT: + return WGPUAddressMode_MirrorRepeat; + default: + SOKOL_UNREACHABLE; + return WGPUAddressMode_Force32; + } +} + +_SOKOL_PRIVATE WGPUFilterMode _sg_wgpu_sampler_minmagfilter(sg_filter f) { + switch (f) { + case SG_FILTER_NEAREST: + case SG_FILTER_NEAREST_MIPMAP_NEAREST: + case SG_FILTER_NEAREST_MIPMAP_LINEAR: + return WGPUFilterMode_Nearest; + case SG_FILTER_LINEAR: + case SG_FILTER_LINEAR_MIPMAP_NEAREST: + case SG_FILTER_LINEAR_MIPMAP_LINEAR: + return WGPUFilterMode_Linear; + default: + SOKOL_UNREACHABLE; + return WGPUFilterMode_Force32; + } +} + +_SOKOL_PRIVATE WGPUFilterMode _sg_wgpu_sampler_mipfilter(sg_filter f) { + switch (f) { + case SG_FILTER_NEAREST: + case SG_FILTER_LINEAR: + case SG_FILTER_NEAREST_MIPMAP_NEAREST: + case SG_FILTER_LINEAR_MIPMAP_NEAREST: + return WGPUFilterMode_Nearest; + case SG_FILTER_NEAREST_MIPMAP_LINEAR: + case SG_FILTER_LINEAR_MIPMAP_LINEAR: + return WGPUFilterMode_Linear; + default: + SOKOL_UNREACHABLE; + return WGPUFilterMode_Force32; + } +} + +_SOKOL_PRIVATE WGPUIndexFormat _sg_wgpu_indexformat(sg_index_type t) { + /* NOTE: there's no WGPUIndexFormat_None */ + return (t == SG_INDEXTYPE_UINT16) ? WGPUIndexFormat_Uint16 : WGPUIndexFormat_Uint32; +} + +_SOKOL_PRIVATE WGPUInputStepMode _sg_wgpu_stepmode(sg_vertex_step s) { + return (s == SG_VERTEXSTEP_PER_VERTEX) ? WGPUInputStepMode_Vertex : WGPUInputStepMode_Instance; +} + +_SOKOL_PRIVATE WGPUVertexFormat _sg_wgpu_vertexformat(sg_vertex_format f) { + switch (f) { + case SG_VERTEXFORMAT_FLOAT: return WGPUVertexFormat_Float; + case SG_VERTEXFORMAT_FLOAT2: return WGPUVertexFormat_Float2; + case SG_VERTEXFORMAT_FLOAT3: return WGPUVertexFormat_Float3; + case SG_VERTEXFORMAT_FLOAT4: return WGPUVertexFormat_Float4; + case SG_VERTEXFORMAT_BYTE4: return WGPUVertexFormat_Char4; + case SG_VERTEXFORMAT_BYTE4N: return WGPUVertexFormat_Char4Norm; + case SG_VERTEXFORMAT_UBYTE4: return WGPUVertexFormat_UChar4; + case SG_VERTEXFORMAT_UBYTE4N: return WGPUVertexFormat_UChar4Norm; + case SG_VERTEXFORMAT_SHORT2: return WGPUVertexFormat_Short2; + case SG_VERTEXFORMAT_SHORT2N: return WGPUVertexFormat_Short2Norm; + case SG_VERTEXFORMAT_USHORT2N: return WGPUVertexFormat_UShort2Norm; + case SG_VERTEXFORMAT_SHORT4: return WGPUVertexFormat_Short4; + case SG_VERTEXFORMAT_SHORT4N: return WGPUVertexFormat_Short4Norm; + case SG_VERTEXFORMAT_USHORT4N: return WGPUVertexFormat_UShort4Norm; + /* FIXME! UINT10_N2 */ + case SG_VERTEXFORMAT_UINT10_N2: + default: + SOKOL_UNREACHABLE; + return WGPUVertexFormat_Force32; + } +} + +_SOKOL_PRIVATE WGPUPrimitiveTopology _sg_wgpu_topology(sg_primitive_type t) { + switch (t) { + case SG_PRIMITIVETYPE_POINTS: return WGPUPrimitiveTopology_PointList; + case SG_PRIMITIVETYPE_LINES: return WGPUPrimitiveTopology_LineList; + case SG_PRIMITIVETYPE_LINE_STRIP: return WGPUPrimitiveTopology_LineStrip; + case SG_PRIMITIVETYPE_TRIANGLES: return WGPUPrimitiveTopology_TriangleList; + case SG_PRIMITIVETYPE_TRIANGLE_STRIP: return WGPUPrimitiveTopology_TriangleStrip; + default: SOKOL_UNREACHABLE; return WGPUPrimitiveTopology_Force32; + } +} + +_SOKOL_PRIVATE WGPUFrontFace _sg_wgpu_frontface(sg_face_winding fw) { + return (fw == SG_FACEWINDING_CCW) ? WGPUFrontFace_CCW : WGPUFrontFace_CW; +} + +_SOKOL_PRIVATE WGPUCullMode _sg_wgpu_cullmode(sg_cull_mode cm) { + switch (cm) { + case SG_CULLMODE_NONE: return WGPUCullMode_None; + case SG_CULLMODE_FRONT: return WGPUCullMode_Front; + case SG_CULLMODE_BACK: return WGPUCullMode_Back; + default: SOKOL_UNREACHABLE; return WGPUCullMode_Force32; + } +} + +_SOKOL_PRIVATE WGPUTextureFormat _sg_wgpu_textureformat(sg_pixel_format p) { + switch (p) { + case SG_PIXELFORMAT_NONE: return WGPUTextureFormat_Undefined; + case SG_PIXELFORMAT_R8: return WGPUTextureFormat_R8Unorm; + case SG_PIXELFORMAT_R8SN: return WGPUTextureFormat_R8Snorm; + case SG_PIXELFORMAT_R8UI: return WGPUTextureFormat_R8Uint; + case SG_PIXELFORMAT_R8SI: return WGPUTextureFormat_R8Sint; + case SG_PIXELFORMAT_R16UI: return WGPUTextureFormat_R16Uint; + case SG_PIXELFORMAT_R16SI: return WGPUTextureFormat_R16Sint; + case SG_PIXELFORMAT_R16F: return WGPUTextureFormat_R16Float; + case SG_PIXELFORMAT_RG8: return WGPUTextureFormat_RG8Unorm; + case SG_PIXELFORMAT_RG8SN: return WGPUTextureFormat_RG8Snorm; + case SG_PIXELFORMAT_RG8UI: return WGPUTextureFormat_RG8Uint; + case SG_PIXELFORMAT_RG8SI: return WGPUTextureFormat_RG8Sint; + case SG_PIXELFORMAT_R32UI: return WGPUTextureFormat_R32Uint; + case SG_PIXELFORMAT_R32SI: return WGPUTextureFormat_R32Sint; + case SG_PIXELFORMAT_R32F: return WGPUTextureFormat_R32Float; + case SG_PIXELFORMAT_RG16UI: return WGPUTextureFormat_RG16Uint; + case SG_PIXELFORMAT_RG16SI: return WGPUTextureFormat_RG16Sint; + case SG_PIXELFORMAT_RG16F: return WGPUTextureFormat_RG16Float; + case SG_PIXELFORMAT_RGBA8: return WGPUTextureFormat_RGBA8Unorm; + case SG_PIXELFORMAT_RGBA8SN: return WGPUTextureFormat_RGBA8Snorm; + case SG_PIXELFORMAT_RGBA8UI: return WGPUTextureFormat_RGBA8Uint; + case SG_PIXELFORMAT_RGBA8SI: return WGPUTextureFormat_RGBA8Sint; + case SG_PIXELFORMAT_BGRA8: return WGPUTextureFormat_BGRA8Unorm; + case SG_PIXELFORMAT_RGB10A2: return WGPUTextureFormat_RGB10A2Unorm; + case SG_PIXELFORMAT_RG11B10F: return WGPUTextureFormat_RG11B10Float; + case SG_PIXELFORMAT_RG32UI: return WGPUTextureFormat_RG32Uint; + case SG_PIXELFORMAT_RG32SI: return WGPUTextureFormat_RG32Sint; + case SG_PIXELFORMAT_RG32F: return WGPUTextureFormat_RG32Float; + case SG_PIXELFORMAT_RGBA16UI: return WGPUTextureFormat_RGBA16Uint; + case SG_PIXELFORMAT_RGBA16SI: return WGPUTextureFormat_RGBA16Sint; + case SG_PIXELFORMAT_RGBA16F: return WGPUTextureFormat_RGBA16Float; + case SG_PIXELFORMAT_RGBA32UI: return WGPUTextureFormat_RGBA32Uint; + case SG_PIXELFORMAT_RGBA32SI: return WGPUTextureFormat_RGBA32Sint; + case SG_PIXELFORMAT_RGBA32F: return WGPUTextureFormat_RGBA32Float; + case SG_PIXELFORMAT_DEPTH: return WGPUTextureFormat_Depth24Plus; + case SG_PIXELFORMAT_DEPTH_STENCIL: return WGPUTextureFormat_Depth24PlusStencil8; + case SG_PIXELFORMAT_BC1_RGBA: return WGPUTextureFormat_BC1RGBAUnorm; + case SG_PIXELFORMAT_BC2_RGBA: return WGPUTextureFormat_BC2RGBAUnorm; + case SG_PIXELFORMAT_BC3_RGBA: return WGPUTextureFormat_BC3RGBAUnorm; + case SG_PIXELFORMAT_BC4_R: return WGPUTextureFormat_BC4RUnorm; + case SG_PIXELFORMAT_BC4_RSN: return WGPUTextureFormat_BC4RSnorm; + case SG_PIXELFORMAT_BC5_RG: return WGPUTextureFormat_BC5RGUnorm; + case SG_PIXELFORMAT_BC5_RGSN: return WGPUTextureFormat_BC5RGSnorm; + case SG_PIXELFORMAT_BC6H_RGBF: return WGPUTextureFormat_BC6HRGBSfloat; + case SG_PIXELFORMAT_BC6H_RGBUF: return WGPUTextureFormat_BC6HRGBUfloat; + case SG_PIXELFORMAT_BC7_RGBA: return WGPUTextureFormat_BC7RGBAUnorm; + + /* NOT SUPPORTED */ + case SG_PIXELFORMAT_R16: + case SG_PIXELFORMAT_R16SN: + case SG_PIXELFORMAT_RG16: + case SG_PIXELFORMAT_RG16SN: + case SG_PIXELFORMAT_RGBA16: + case SG_PIXELFORMAT_RGBA16SN: + case SG_PIXELFORMAT_PVRTC_RGB_2BPP: + case SG_PIXELFORMAT_PVRTC_RGB_4BPP: + case SG_PIXELFORMAT_PVRTC_RGBA_2BPP: + case SG_PIXELFORMAT_PVRTC_RGBA_4BPP: + case SG_PIXELFORMAT_ETC2_RGB8: + case SG_PIXELFORMAT_ETC2_RGB8A1: + case SG_PIXELFORMAT_ETC2_RGBA8: + case SG_PIXELFORMAT_ETC2_RG11: + case SG_PIXELFORMAT_ETC2_RG11SN: + default: + SOKOL_UNREACHABLE; + return WGPUTextureFormat_Force32; + } +} + +/* +FIXME ??? this isn't needed anywhere? +_SOKOL_PRIVATE WGPUTextureAspect _sg_wgpu_texture_aspect(sg_pixel_format fmt) { + if (_sg_is_valid_rendertarget_depth_format(fmt)) { + if (!_sg_is_depth_stencil_format(fmt)) { + return WGPUTextureAspect_DepthOnly; + } + } + return WGPUTextureAspect_All; +} +*/ + +_SOKOL_PRIVATE WGPUCompareFunction _sg_wgpu_comparefunc(sg_compare_func f) { + switch (f) { + case SG_COMPAREFUNC_NEVER: return WGPUCompareFunction_Never; + case SG_COMPAREFUNC_LESS: return WGPUCompareFunction_Less; + case SG_COMPAREFUNC_EQUAL: return WGPUCompareFunction_Equal; + case SG_COMPAREFUNC_LESS_EQUAL: return WGPUCompareFunction_LessEqual; + case SG_COMPAREFUNC_GREATER: return WGPUCompareFunction_Greater; + case SG_COMPAREFUNC_NOT_EQUAL: return WGPUCompareFunction_NotEqual; + case SG_COMPAREFUNC_GREATER_EQUAL: return WGPUCompareFunction_GreaterEqual; + case SG_COMPAREFUNC_ALWAYS: return WGPUCompareFunction_Always; + default: SOKOL_UNREACHABLE; return WGPUCompareFunction_Force32; + } +} + +_SOKOL_PRIVATE WGPUStencilOperation _sg_wgpu_stencilop(sg_stencil_op op) { + switch (op) { + case SG_STENCILOP_KEEP: return WGPUStencilOperation_Keep; + case SG_STENCILOP_ZERO: return WGPUStencilOperation_Zero; + case SG_STENCILOP_REPLACE: return WGPUStencilOperation_Replace; + case SG_STENCILOP_INCR_CLAMP: return WGPUStencilOperation_IncrementClamp; + case SG_STENCILOP_DECR_CLAMP: return WGPUStencilOperation_DecrementClamp; + case SG_STENCILOP_INVERT: return WGPUStencilOperation_Invert; + case SG_STENCILOP_INCR_WRAP: return WGPUStencilOperation_IncrementWrap; + case SG_STENCILOP_DECR_WRAP: return WGPUStencilOperation_DecrementWrap; + default: SOKOL_UNREACHABLE; return WGPUStencilOperation_Force32; + } +} + +_SOKOL_PRIVATE WGPUBlendOperation _sg_wgpu_blendop(sg_blend_op op) { + switch (op) { + case SG_BLENDOP_ADD: return WGPUBlendOperation_Add; + case SG_BLENDOP_SUBTRACT: return WGPUBlendOperation_Subtract; + case SG_BLENDOP_REVERSE_SUBTRACT: return WGPUBlendOperation_ReverseSubtract; + default: SOKOL_UNREACHABLE; return WGPUBlendOperation_Force32; + } +} + +_SOKOL_PRIVATE WGPUBlendFactor _sg_wgpu_blendfactor(sg_blend_factor f) { + switch (f) { + case SG_BLENDFACTOR_ZERO: return WGPUBlendFactor_Zero; + case SG_BLENDFACTOR_ONE: return WGPUBlendFactor_One; + case SG_BLENDFACTOR_SRC_COLOR: return WGPUBlendFactor_SrcColor; + case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR: return WGPUBlendFactor_OneMinusSrcColor; + case SG_BLENDFACTOR_SRC_ALPHA: return WGPUBlendFactor_SrcAlpha; + case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA: return WGPUBlendFactor_OneMinusSrcAlpha; + case SG_BLENDFACTOR_DST_COLOR: return WGPUBlendFactor_DstColor; + case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR: return WGPUBlendFactor_OneMinusDstColor; + case SG_BLENDFACTOR_DST_ALPHA: return WGPUBlendFactor_DstAlpha; + case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA: return WGPUBlendFactor_OneMinusDstAlpha; + case SG_BLENDFACTOR_SRC_ALPHA_SATURATED: return WGPUBlendFactor_SrcAlphaSaturated; + case SG_BLENDFACTOR_BLEND_COLOR: return WGPUBlendFactor_BlendColor; + case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR: return WGPUBlendFactor_OneMinusBlendColor; + /* FIXME: separate blend alpha value not supported? */ + case SG_BLENDFACTOR_BLEND_ALPHA: return WGPUBlendFactor_BlendColor; + case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA: return WGPUBlendFactor_OneMinusBlendColor; + default: + SOKOL_UNREACHABLE; return WGPUBlendFactor_Force32; + } +} + +_SOKOL_PRIVATE WGPUColorWriteMaskFlags _sg_wgpu_colorwritemask(uint8_t m) { + WGPUColorWriteMaskFlags res = 0; + if (0 != (m & SG_COLORMASK_R)) { + res |= WGPUColorWriteMask_Red; + } + if (0 != (m & SG_COLORMASK_G)) { + res |= WGPUColorWriteMask_Green; + } + if (0 != (m & SG_COLORMASK_B)) { + res |= WGPUColorWriteMask_Blue; + } + if (0 != (m & SG_COLORMASK_A)) { + res |= WGPUColorWriteMask_Alpha; + } + return res; +} + +_SOKOL_PRIVATE void _sg_wgpu_init_caps(void) { + _sg.backend = SG_BACKEND_WGPU; + _sg.features.instancing = true; + _sg.features.origin_top_left = true; + _sg.features.multiple_render_targets = true; + _sg.features.msaa_render_targets = true; + _sg.features.imagetype_3d = true; + _sg.features.imagetype_array = true; + _sg.features.image_clamp_to_border = false; + _sg.features.mrt_independent_blend_state = true; + _sg.features.mrt_independent_write_mask = true; + + /* FIXME: max images size??? */ + _sg.limits.max_image_size_2d = 8 * 1024; + _sg.limits.max_image_size_cube = 8 * 1024; + _sg.limits.max_image_size_3d = 2 * 1024; + _sg.limits.max_image_size_array = 8 * 1024; + _sg.limits.max_image_array_layers = 2 * 1024; + _sg.limits.max_vertex_attrs = SG_MAX_VERTEX_ATTRIBUTES; + + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R8]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R8SN]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8SI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16SI]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16F]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG8]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG8SN]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8SI]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32UI]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32SI]); + _sg_pixelformat_sbr(&_sg.formats[SG_PIXELFORMAT_R32F]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16SI]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16F]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA8]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA8SN]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8SI]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_BGRA8]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGB10A2]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RG32UI]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RG32SI]); + _sg_pixelformat_sbr(&_sg.formats[SG_PIXELFORMAT_RG32F]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16UI]); + _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16SI]); + _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16F]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32UI]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32SI]); + _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32F]); + _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH]); + _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH_STENCIL]); + + /* FIXME FIXME FIXME: need to check if BC texture compression is + actually supported, currently the WebGPU C-API doesn't allow this + */ + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC1_RGBA]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC2_RGBA]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC3_RGBA]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_R]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_RSN]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RG]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RGSN]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBF]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBUF]); + _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC7_RGBA]); +} + +/* + WGPU uniform buffer pool implementation: + + At start of frame, a mapped buffer is grabbed from the pool, + or a new buffer is created if there is no mapped buffer available. + + At end of frame, the current buffer is unmapped before queue submit, + and async-mapped immediately again. + + UNIFORM BUFFER FIXME: + + - As per WebGPU spec, it should be possible to create a Uniform|MapWrite + buffer, but this isn't currently allowed in Dawn. +*/ +_SOKOL_PRIVATE void _sg_wgpu_ubpool_init(const sg_desc* desc) { + + /* Add the max-uniform-update size (64 KB) to the requested buffer size, + this is to prevent validation errors in the WebGPU implementation + if the entire buffer size is used per frame. 64 KB is the allowed + max uniform update size on NVIDIA + */ + _sg.wgpu.ub.num_bytes = desc->uniform_buffer_size + _SG_WGPU_MAX_UNIFORM_UPDATE_SIZE; + + WGPUBufferDescriptor ub_desc; + memset(&ub_desc, 0, sizeof(ub_desc)); + ub_desc.size = _sg.wgpu.ub.num_bytes; + ub_desc.usage = WGPUBufferUsage_Uniform|WGPUBufferUsage_CopyDst; + _sg.wgpu.ub.buf = wgpuDeviceCreateBuffer(_sg.wgpu.dev, &ub_desc); + SOKOL_ASSERT(_sg.wgpu.ub.buf); + + WGPUBindGroupLayoutBinding ub_bglb_desc[SG_NUM_SHADER_STAGES][SG_MAX_SHADERSTAGE_UBS]; + memset(ub_bglb_desc, 0, sizeof(ub_bglb_desc)); + for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { + WGPUShaderStage vis = (stage_index == SG_SHADERSTAGE_VS) ? WGPUShaderStage_Vertex : WGPUShaderStage_Fragment; + for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { + int bind_index = stage_index * SG_MAX_SHADERSTAGE_UBS + ub_index; + ub_bglb_desc[stage_index][ub_index].binding = bind_index; + ub_bglb_desc[stage_index][ub_index].visibility = vis; + ub_bglb_desc[stage_index][ub_index].type = WGPUBindingType_UniformBuffer; + ub_bglb_desc[stage_index][ub_index].hasDynamicOffset = true; + } + } + + WGPUBindGroupLayoutDescriptor ub_bgl_desc; + memset(&ub_bgl_desc, 0, sizeof(ub_bgl_desc)); + ub_bgl_desc.bindingCount = SG_NUM_SHADER_STAGES * SG_MAX_SHADERSTAGE_UBS; + ub_bgl_desc.bindings = &ub_bglb_desc[0][0]; + _sg.wgpu.ub.bindgroup_layout = wgpuDeviceCreateBindGroupLayout(_sg.wgpu.dev, &ub_bgl_desc); + SOKOL_ASSERT(_sg.wgpu.ub.bindgroup_layout); + + WGPUBindGroupBinding ub_bgb[SG_NUM_SHADER_STAGES][SG_MAX_SHADERSTAGE_UBS]; + memset(ub_bgb, 0, sizeof(ub_bgb)); + for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { + for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { + int bind_index = stage_index * SG_MAX_SHADERSTAGE_UBS + ub_index; + ub_bgb[stage_index][ub_index].binding = bind_index; + ub_bgb[stage_index][ub_index].buffer = _sg.wgpu.ub.buf; + // FIXME FIXME FIXME FIXME: HACK FOR VALIDATION BUG IN DAWN + ub_bgb[stage_index][ub_index].size = (1<<16); + } + } + WGPUBindGroupDescriptor bg_desc; + memset(&bg_desc, 0, sizeof(bg_desc)); + bg_desc.layout = _sg.wgpu.ub.bindgroup_layout; + bg_desc.bindingCount = SG_NUM_SHADER_STAGES * SG_MAX_SHADERSTAGE_UBS; + bg_desc.bindings = &ub_bgb[0][0]; + _sg.wgpu.ub.bindgroup = wgpuDeviceCreateBindGroup(_sg.wgpu.dev, &bg_desc); + SOKOL_ASSERT(_sg.wgpu.ub.bindgroup); +} + +_SOKOL_PRIVATE void _sg_wgpu_ubpool_discard(void) { + if (_sg.wgpu.ub.buf) { + wgpuBufferRelease(_sg.wgpu.ub.buf); + _sg.wgpu.ub.buf = 0; + } + if (_sg.wgpu.ub.bindgroup) { + wgpuBindGroupRelease(_sg.wgpu.ub.bindgroup); + _sg.wgpu.ub.bindgroup = 0; + } + if (_sg.wgpu.ub.bindgroup_layout) { + wgpuBindGroupLayoutRelease(_sg.wgpu.ub.bindgroup_layout); + _sg.wgpu.ub.bindgroup_layout = 0; + } + for (int i = 0; i < _sg.wgpu.ub.stage.num; i++) { + if (_sg.wgpu.ub.stage.buf[i]) { + wgpuBufferRelease(_sg.wgpu.ub.stage.buf[i]); + _sg.wgpu.ub.stage.buf[i] = 0; + _sg.wgpu.ub.stage.ptr[i] = 0; + } + } +} + +_SOKOL_PRIVATE void _sg_wgpu_ubpool_mapped_callback(WGPUBufferMapAsyncStatus status, void* data, uint64_t data_len, void* user_data) { + if (!_sg.wgpu.valid) { + return; + } + /* FIXME: better handling for this */ + if (WGPUBufferMapAsyncStatus_Success != status) { + SOKOL_LOG("Mapping uniform buffer failed!\n"); + SOKOL_ASSERT(false); + } + SOKOL_ASSERT(data && (data_len == _sg.wgpu.ub.num_bytes)); + int index = (int)(intptr_t) user_data; + SOKOL_ASSERT(index < _sg.wgpu.ub.stage.num); + SOKOL_ASSERT(0 == _sg.wgpu.ub.stage.ptr[index]); + _sg.wgpu.ub.stage.ptr[index] = (uint8_t*) data; +} + +_SOKOL_PRIVATE void _sg_wgpu_ubpool_next_frame(bool first_frame) { + + /* immediately request a new mapping for the last frame's current staging buffer */ + if (!first_frame) { + WGPUBuffer ub_src = _sg.wgpu.ub.stage.buf[_sg.wgpu.ub.stage.cur]; + wgpuBufferMapWriteAsync(ub_src, _sg_wgpu_ubpool_mapped_callback, (void*)(intptr_t)_sg.wgpu.ub.stage.cur); + } + + /* rewind per-frame offsets */ + _sg.wgpu.ub.offset = 0; + memset(&_sg.wgpu.ub.bind_offsets, 0, sizeof(_sg.wgpu.ub.bind_offsets)); + + /* check if a mapped staging buffer is available, otherwise create one */ + for (int i = 0; i < _sg.wgpu.ub.stage.num; i++) { + if (_sg.wgpu.ub.stage.ptr[i]) { + _sg.wgpu.ub.stage.cur = i; + return; + } + } + + /* no mapped uniform buffer available, create one */ + SOKOL_ASSERT(_sg.wgpu.ub.stage.num < _SG_WGPU_STAGING_PIPELINE_SIZE); + _sg.wgpu.ub.stage.cur = _sg.wgpu.ub.stage.num++; + const int cur = _sg.wgpu.ub.stage.cur; + + WGPUBufferDescriptor desc; + memset(&desc, 0, sizeof(desc)); + desc.size = _sg.wgpu.ub.num_bytes; + desc.usage = WGPUBufferUsage_CopySrc|WGPUBufferUsage_MapWrite; + WGPUCreateBufferMappedResult res = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &desc); + _sg.wgpu.ub.stage.buf[cur] = res.buffer; + _sg.wgpu.ub.stage.ptr[cur] = (uint8_t*) res.data; + SOKOL_ASSERT(_sg.wgpu.ub.stage.buf[cur]); + SOKOL_ASSERT(_sg.wgpu.ub.stage.ptr[cur]); + SOKOL_ASSERT(res.dataLength == _sg.wgpu.ub.num_bytes); +} + +_SOKOL_PRIVATE void _sg_wgpu_ubpool_flush(void) { + /* unmap staging buffer and copy to uniform buffer */ + const int cur = _sg.wgpu.ub.stage.cur; + SOKOL_ASSERT(_sg.wgpu.ub.stage.ptr[cur]); + _sg.wgpu.ub.stage.ptr[cur] = 0; + WGPUBuffer src_buf = _sg.wgpu.ub.stage.buf[cur]; + wgpuBufferUnmap(src_buf); + if (_sg.wgpu.ub.offset > 0) { + WGPUBuffer dst_buf = _sg.wgpu.ub.buf; + wgpuCommandEncoderCopyBufferToBuffer(_sg.wgpu.render_cmd_enc, src_buf, 0, dst_buf, 0, _sg.wgpu.ub.offset); + } +} + +/* helper function to compute number of bytes needed in staging buffer to copy image data */ +_SOKOL_PRIVATE uint32_t _sg_wgpu_image_data_buffer_size(const _sg_image_t* img) { + uint32_t num_bytes = 0; + const uint32_t num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; + const uint32_t num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices : 1; + for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++) { + const uint32_t mip_width = _sg_max(img->cmn.width >> mip_index, 1); + const uint32_t mip_height = _sg_max(img->cmn.height >> mip_index, 1); + /* row-pitch must be 256-aligend */ + const uint32_t bytes_per_slice = _sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, _SG_WGPU_ROWPITCH_ALIGN); + num_bytes += bytes_per_slice * num_slices * num_faces; + } + return num_bytes; +} + +/* helper function to copy image data into a texture via a staging buffer, returns number of + bytes copied +*/ +_SOKOL_PRIVATE uint32_t _sg_wgpu_copy_image_data(WGPUBuffer stg_buf, uint8_t* stg_base_ptr, uint32_t stg_base_offset, _sg_image_t* img, const sg_image_data* data) { + SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); + SOKOL_ASSERT(stg_buf && stg_base_ptr); + SOKOL_ASSERT(img); + SOKOL_ASSERT(data); + uint32_t stg_offset = stg_base_offset; + const uint32_t num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1; + const uint32_t num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.num_slices : 1; + const sg_pixel_format fmt = img->cmn.pixel_format; + WGPUBufferCopyView src_view; + memset(&src_view, 0, sizeof(src_view)); + src_view.buffer = stg_buf; + WGPUTextureCopyView dst_view; + memset(&dst_view, 0, sizeof(dst_view)); + dst_view.texture = img->wgpu.tex; + WGPUExtent3D extent; + memset(&extent, 0, sizeof(extent)); + + for (uint32_t face_index = 0; face_index < num_faces; face_index++) { + for (uint32_t mip_index = 0; mip_index < (uint32_t)img->cmn.num_mipmaps; mip_index++) { + SOKOL_ASSERT(data->subimage[face_index][mip_index].ptr); + SOKOL_ASSERT(data->subimage[face_index][mip_index].size > 0); + const uint8_t* src_base_ptr = (const uint8_t*)data->subimage[face_index][mip_index].ptr; + SOKOL_ASSERT(src_base_ptr); + uint8_t* dst_base_ptr = stg_base_ptr + stg_offset; + + const uint32_t mip_width = _sg_max(img->cmn.width >> mip_index, 1); + const uint32_t mip_height = _sg_max(img->cmn.height >> mip_index, 1); + const uint32_t mip_depth = (img->cmn.type == SG_IMAGETYPE_3D) ? _sg_max(img->cmn.num_slices >> mip_index, 1) : 1; + const uint32_t num_rows = _sg_num_rows(fmt, mip_height); + const uint32_t src_bytes_per_row = _sg_row_pitch(fmt, mip_width, 1); + const uint32_t dst_bytes_per_row = _sg_row_pitch(fmt, mip_width, _SG_WGPU_ROWPITCH_ALIGN); + const uint32_t src_bytes_per_slice = _sg_surface_pitch(fmt, mip_width, mip_height, 1); + const uint32_t dst_bytes_per_slice = _sg_surface_pitch(fmt, mip_width, mip_height, _SG_WGPU_ROWPITCH_ALIGN); + SOKOL_ASSERT((uint32_t)data->subimage[face_index][mip_index].size == (src_bytes_per_slice * num_slices)); + SOKOL_ASSERT(src_bytes_per_row <= dst_bytes_per_row); + SOKOL_ASSERT(src_bytes_per_slice == (src_bytes_per_row * num_rows)); + SOKOL_ASSERT(dst_bytes_per_slice == (dst_bytes_per_row * num_rows)); + _SOKOL_UNUSED(src_bytes_per_slice); + + /* copy data into mapped staging buffer */ + if (src_bytes_per_row == dst_bytes_per_row) { + /* can do a single memcpy */ + uint32_t num_bytes = data->subimage[face_index][mip_index].size; + memcpy(dst_base_ptr, src_base_ptr, num_bytes); + } + else { + /* src/dst pitch doesn't match, need to copy row by row */ + uint8_t* dst_ptr = dst_base_ptr; + const uint8_t* src_ptr = src_base_ptr; + for (uint32_t slice_index = 0; slice_index < num_slices; slice_index++) { + SOKOL_ASSERT(dst_ptr == dst_base_ptr + slice_index * dst_bytes_per_slice); + for (uint32_t row_index = 0; row_index < num_rows; row_index++) { + memcpy(dst_ptr, src_ptr, src_bytes_per_row); + src_ptr += src_bytes_per_row; + dst_ptr += dst_bytes_per_row; + } + } + } + + /* record the staging copy operation into command encoder */ + src_view.imageHeight = mip_height; + src_view.rowPitch = dst_bytes_per_row; + dst_view.mipLevel = mip_index; + extent.width = mip_width; + extent.height = mip_height; + extent.depth = mip_depth; + SOKOL_ASSERT((img->cmn.type != SG_IMAGETYPE_CUBE) || (num_slices == 1)); + for (uint32_t slice_index = 0; slice_index < num_slices; slice_index++) { + const uint32_t layer_index = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? slice_index : face_index; + src_view.offset = stg_offset; + dst_view.arrayLayer = layer_index; + wgpuCommandEncoderCopyBufferToTexture(_sg.wgpu.staging_cmd_enc, &src_view, &dst_view, &extent); + stg_offset += dst_bytes_per_slice; + SOKOL_ASSERT(stg_offset <= _sg.wgpu.staging.num_bytes); + } + } + } + SOKOL_ASSERT(stg_offset >= stg_base_offset); + return (stg_offset - stg_base_offset); +} + +/* + The WGPU staging buffer implementation: + + Very similar to the uniform buffer pool, there's a pool of big + per-frame staging buffers, each must be big enough to hold + all data uploaded to dynamic resources for one frame. + + Staging buffers are created on demand and reused, because the + 'frame pipeline depth' of WGPU isn't predictable. + + The difference to the uniform buffer system is that there isn't + a 1:1 relationship for source- and destination for the + data-copy operation. There's always one staging buffer as copy-source + per frame, but many copy-destinations (regular vertex/index buffers + or images). Instead of one big copy-operation at the end of the frame, + multiple copy-operations will be written throughout the frame. +*/ +_SOKOL_PRIVATE void _sg_wgpu_staging_init(const sg_desc* desc) { + SOKOL_ASSERT(desc && (desc->staging_buffer_size > 0)); + _sg.wgpu.staging.num_bytes = desc->staging_buffer_size; + /* there's actually nothing more to do here */ +} + +_SOKOL_PRIVATE void _sg_wgpu_staging_discard(void) { + for (int i = 0; i < _sg.wgpu.staging.num; i++) { + if (_sg.wgpu.staging.buf[i]) { + wgpuBufferRelease(_sg.wgpu.staging.buf[i]); + _sg.wgpu.staging.buf[i] = 0; + _sg.wgpu.staging.ptr[i] = 0; + } + } +} + +_SOKOL_PRIVATE void _sg_wgpu_staging_mapped_callback(WGPUBufferMapAsyncStatus status, void* data, uint64_t data_len, void* user_data) { + if (!_sg.wgpu.valid) { + return; + } + /* FIXME: better handling for this */ + if (WGPUBufferMapAsyncStatus_Success != status) { + SOKOL_ASSERT("Mapping staging buffer failed!\n"); + SOKOL_ASSERT(false); + } + SOKOL_ASSERT(data && (data_len == _sg.wgpu.staging.num_bytes)); + int index = (int)(intptr_t) user_data; + SOKOL_ASSERT(index < _sg.wgpu.staging.num); + SOKOL_ASSERT(0 == _sg.wgpu.staging.ptr[index]); + _sg.wgpu.staging.ptr[index] = (uint8_t*) data; +} + +_SOKOL_PRIVATE void _sg_wgpu_staging_next_frame(bool first_frame) { + + /* immediately request a new mapping for the last frame's current staging buffer */ + if (!first_frame) { + WGPUBuffer cur_buf = _sg.wgpu.staging.buf[_sg.wgpu.staging.cur]; + wgpuBufferMapWriteAsync(cur_buf, _sg_wgpu_staging_mapped_callback, (void*)(intptr_t)_sg.wgpu.staging.cur); + } + + /* rewind staging-buffer offset */ + _sg.wgpu.staging.offset = 0; + + /* check if mapped staging buffer is available, otherwise create one */ + for (int i = 0; i < _sg.wgpu.staging.num; i++) { + if (_sg.wgpu.staging.ptr[i]) { + _sg.wgpu.staging.cur = i; + return; + } + } + + /* no mapped buffer available, create one */ + SOKOL_ASSERT(_sg.wgpu.staging.num < _SG_WGPU_STAGING_PIPELINE_SIZE); + _sg.wgpu.staging.cur = _sg.wgpu.staging.num++; + const int cur = _sg.wgpu.staging.cur; + + WGPUBufferDescriptor desc; + memset(&desc, 0, sizeof(desc)); + desc.size = _sg.wgpu.staging.num_bytes; + desc.usage = WGPUBufferUsage_CopySrc|WGPUBufferUsage_MapWrite; + WGPUCreateBufferMappedResult res = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &desc); + _sg.wgpu.staging.buf[cur] = res.buffer; + _sg.wgpu.staging.ptr[cur] = (uint8_t*) res.data; + SOKOL_ASSERT(_sg.wgpu.staging.buf[cur]); + SOKOL_ASSERT(_sg.wgpu.staging.ptr[cur]); + SOKOL_ASSERT(res.dataLength == _sg.wgpu.staging.num_bytes); +} + +_SOKOL_PRIVATE uint32_t _sg_wgpu_staging_copy_to_buffer(WGPUBuffer dst_buf, uint32_t dst_buf_offset, const void* data, uint32_t data_num_bytes) { + /* Copy a chunk of data into the staging buffer, and record a blit-operation into + the command encoder, bump the offset for the next data chunk, return 0 if there + was not enough room in the staging buffer, return the number of actually + copied bytes on success. + + NOTE: that the number of staging bytes to be copied must be a multiple of 4. + + */ + SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); + SOKOL_ASSERT((dst_buf_offset & 3) == 0); + SOKOL_ASSERT(data_num_bytes > 0); + uint32_t copy_num_bytes = _sg_roundup(data_num_bytes, 4); + if ((_sg.wgpu.staging.offset + copy_num_bytes) >= _sg.wgpu.staging.num_bytes) { + SOKOL_LOG("WGPU: Per frame staging buffer full (in _sg_wgpu_staging_copy_to_buffer())!\n"); + return false; + } + const int cur = _sg.wgpu.staging.cur; + SOKOL_ASSERT(_sg.wgpu.staging.ptr[cur]); + uint32_t stg_buf_offset = _sg.wgpu.staging.offset; + uint8_t* stg_ptr = _sg.wgpu.staging.ptr[cur] + stg_buf_offset; + memcpy(stg_ptr, data, data_num_bytes); + WGPUBuffer stg_buf = _sg.wgpu.staging.buf[cur]; + wgpuCommandEncoderCopyBufferToBuffer(_sg.wgpu.staging_cmd_enc, stg_buf, stg_buf_offset, dst_buf, dst_buf_offset, copy_num_bytes); + _sg.wgpu.staging.offset = stg_buf_offset + copy_num_bytes; + return copy_num_bytes; +} + +_SOKOL_PRIVATE bool _sg_wgpu_staging_copy_to_texture(_sg_image_t* img, const sg_image_data* data) { + /* similar to _sg_wgpu_staging_copy_to_buffer(), but with image data instead */ + SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); + uint32_t num_bytes = _sg_wgpu_image_data_buffer_size(img); + if ((_sg.wgpu.staging.offset + num_bytes) >= _sg.wgpu.staging.num_bytes) { + SOKOL_LOG("WGPU: Per frame staging buffer full (in _sg_wgpu_staging_copy_to_texture)!\n"); + return false; + } + const int cur = _sg.wgpu.staging.cur; + SOKOL_ASSERT(_sg.wgpu.staging.ptr[cur]); + uint32_t stg_offset = _sg.wgpu.staging.offset; + uint8_t* stg_ptr = _sg.wgpu.staging.ptr[cur]; + WGPUBuffer stg_buf = _sg.wgpu.staging.buf[cur]; + uint32_t bytes_copied = _sg_wgpu_copy_image_data(stg_buf, stg_ptr, stg_offset, img, data); + _SOKOL_UNUSED(bytes_copied); + SOKOL_ASSERT(bytes_copied == num_bytes); + _sg.wgpu.staging.offset = _sg_roundup(stg_offset + num_bytes, _SG_WGPU_STAGING_ALIGN); + return true; +} + +_SOKOL_PRIVATE void _sg_wgpu_staging_unmap(void) { + /* called at end of frame before queue-submit */ + const int cur = _sg.wgpu.staging.cur; + SOKOL_ASSERT(_sg.wgpu.staging.ptr[cur]); + _sg.wgpu.staging.ptr[cur] = 0; + wgpuBufferUnmap(_sg.wgpu.staging.buf[cur]); +} + +/*--- WGPU sampler cache functions ---*/ +_SOKOL_PRIVATE void _sg_wgpu_init_sampler_cache(const sg_desc* desc) { + SOKOL_ASSERT(desc->sampler_cache_size > 0); + _sg_smpcache_init(&_sg.wgpu.sampler_cache, desc->sampler_cache_size); +} + +_SOKOL_PRIVATE void _sg_wgpu_destroy_sampler_cache(void) { + SOKOL_ASSERT(_sg.wgpu.sampler_cache.items); + SOKOL_ASSERT(_sg.wgpu.sampler_cache.num_items <= _sg.wgpu.sampler_cache.capacity); + for (int i = 0; i < _sg.wgpu.sampler_cache.num_items; i++) { + wgpuSamplerRelease((WGPUSampler)_sg_smpcache_sampler(&_sg.wgpu.sampler_cache, i)); + } + _sg_smpcache_discard(&_sg.wgpu.sampler_cache); +} + +_SOKOL_PRIVATE WGPUSampler _sg_wgpu_create_sampler(const sg_image_desc* img_desc) { + SOKOL_ASSERT(img_desc); + int index = _sg_smpcache_find_item(&_sg.wgpu.sampler_cache, img_desc); + if (index >= 0) { + /* reuse existing sampler */ + return (WGPUSampler) _sg_smpcache_sampler(&_sg.wgpu.sampler_cache, index); + } + else { + /* create a new WGPU sampler and add to sampler cache */ + /* FIXME: anisotropic filtering not supported? */ + WGPUSamplerDescriptor smp_desc; + memset(&smp_desc, 0, sizeof(smp_desc)); + smp_desc.addressModeU = _sg_wgpu_sampler_addrmode(img_desc->wrap_u); + smp_desc.addressModeV = _sg_wgpu_sampler_addrmode(img_desc->wrap_v); + smp_desc.addressModeW = _sg_wgpu_sampler_addrmode(img_desc->wrap_w); + smp_desc.magFilter = _sg_wgpu_sampler_minmagfilter(img_desc->mag_filter); + smp_desc.minFilter = _sg_wgpu_sampler_minmagfilter(img_desc->min_filter); + smp_desc.mipmapFilter = _sg_wgpu_sampler_mipfilter(img_desc->min_filter); + smp_desc.lodMinClamp = img_desc->min_lod; + smp_desc.lodMaxClamp = img_desc->max_lod; + WGPUSampler smp = wgpuDeviceCreateSampler(_sg.wgpu.dev, &smp_desc); + SOKOL_ASSERT(smp); + _sg_smpcache_add_item(&_sg.wgpu.sampler_cache, img_desc, (uintptr_t)smp); + return smp; + } +} + +/*--- WGPU backend API functions ---*/ +_SOKOL_PRIVATE void _sg_wgpu_setup_backend(const sg_desc* desc) { + SOKOL_ASSERT(desc); + SOKOL_ASSERT(desc->context.wgpu.device); + SOKOL_ASSERT(desc->context.wgpu.render_view_cb || desc->context.wgpu.render_view_userdata_cb); + SOKOL_ASSERT(desc->context.wgpu.resolve_view_cb || desc->context.wgpu.resolve_view_userdata_cb); + SOKOL_ASSERT(desc->context.wgpu.depth_stencil_view_cb || desc->context.wgpu.depth_stencil_view_userdata_cb); + SOKOL_ASSERT(desc->uniform_buffer_size > 0); + SOKOL_ASSERT(desc->staging_buffer_size > 0); + _sg.backend = SG_BACKEND_WGPU; + _sg.wgpu.valid = true; + _sg.wgpu.dev = (WGPUDevice) desc->context.wgpu.device; + _sg.wgpu.render_view_cb = (WGPUTextureView(*)(void)) desc->context.wgpu.render_view_cb; + _sg.wgpu.render_view_userdata_cb = (WGPUTextureView(*)(void*)) desc->context.wgpu.render_view_userdata_cb; + _sg.wgpu.resolve_view_cb = (WGPUTextureView(*)(void)) desc->context.wgpu.resolve_view_cb; + _sg.wgpu.resolve_view_userdata_cb = (WGPUTextureView(*)(void*)) desc->context.wgpu.resolve_view_userdata_cb; + _sg.wgpu.depth_stencil_view_cb = (WGPUTextureView(*)(void)) desc->context.wgpu.depth_stencil_view_cb; + _sg.wgpu.depth_stencil_view_userdata_cb = (WGPUTextureView(*)(void*)) desc->context.wgpu.depth_stencil_view_userdata_cb; + _sg.wgpu.user_data = desc->context.wgpu.user_data; + _sg.wgpu.queue = wgpuDeviceCreateQueue(_sg.wgpu.dev); + SOKOL_ASSERT(_sg.wgpu.queue); + + /* setup WebGPU features and limits */ + _sg_wgpu_init_caps(); + + /* setup the sampler cache, uniform and staging buffer pools */ + _sg_wgpu_init_sampler_cache(&_sg.desc); + _sg_wgpu_ubpool_init(desc); + _sg_wgpu_ubpool_next_frame(true); + _sg_wgpu_staging_init(desc); + _sg_wgpu_staging_next_frame(true); + + /* create an empty bind group for shader stages without bound images */ + WGPUBindGroupLayoutDescriptor bgl_desc; + memset(&bgl_desc, 0, sizeof(bgl_desc)); + WGPUBindGroupLayout empty_bgl = wgpuDeviceCreateBindGroupLayout(_sg.wgpu.dev, &bgl_desc); + SOKOL_ASSERT(empty_bgl); + WGPUBindGroupDescriptor bg_desc; + memset(&bg_desc, 0, sizeof(bg_desc)); + bg_desc.layout = empty_bgl; + _sg.wgpu.empty_bind_group = wgpuDeviceCreateBindGroup(_sg.wgpu.dev, &bg_desc); + SOKOL_ASSERT(_sg.wgpu.empty_bind_group); + wgpuBindGroupLayoutRelease(empty_bgl); + + /* create initial per-frame command encoders */ + WGPUCommandEncoderDescriptor cmd_enc_desc; + memset(&cmd_enc_desc, 0, sizeof(cmd_enc_desc)); + _sg.wgpu.render_cmd_enc = wgpuDeviceCreateCommandEncoder(_sg.wgpu.dev, &cmd_enc_desc); + SOKOL_ASSERT(_sg.wgpu.render_cmd_enc); + _sg.wgpu.staging_cmd_enc = wgpuDeviceCreateCommandEncoder(_sg.wgpu.dev, &cmd_enc_desc); + SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); +} + +_SOKOL_PRIVATE void _sg_wgpu_discard_backend(void) { + SOKOL_ASSERT(_sg.wgpu.valid); + SOKOL_ASSERT(_sg.wgpu.render_cmd_enc); + SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); + _sg.wgpu.valid = false; + _sg_wgpu_ubpool_discard(); + _sg_wgpu_staging_discard(); + _sg_wgpu_destroy_sampler_cache(); + wgpuBindGroupRelease(_sg.wgpu.empty_bind_group); + wgpuCommandEncoderRelease(_sg.wgpu.render_cmd_enc); + _sg.wgpu.render_cmd_enc = 0; + wgpuCommandEncoderRelease(_sg.wgpu.staging_cmd_enc); + _sg.wgpu.staging_cmd_enc = 0; + if (_sg.wgpu.queue) { + wgpuQueueRelease(_sg.wgpu.queue); + _sg.wgpu.queue = 0; + } +} + +_SOKOL_PRIVATE void _sg_wgpu_reset_state_cache(void) { + SOKOL_LOG("_sg_wgpu_reset_state_cache: FIXME\n"); +} + +_SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_context(_sg_context_t* ctx) { + SOKOL_ASSERT(ctx); + _SOKOL_UNUSED(ctx); + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_wgpu_destroy_context(_sg_context_t* ctx) { + SOKOL_ASSERT(ctx); + _SOKOL_UNUSED(ctx); +} + +_SOKOL_PRIVATE void _sg_wgpu_activate_context(_sg_context_t* ctx) { + (void)ctx; + SOKOL_LOG("_sg_wgpu_activate_context: FIXME\n"); +} + +_SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) { + SOKOL_ASSERT(buf && desc); + const bool injected = (0 != desc->wgpu_buffer); + _sg_buffer_common_init(&buf->cmn, desc); + if (injected) { + buf->wgpu.buf = (WGPUBuffer) desc->wgpu_buffer; + wgpuBufferReference(buf->wgpu.buf); + } + else { + WGPUBufferDescriptor wgpu_buf_desc; + memset(&wgpu_buf_desc, 0, sizeof(wgpu_buf_desc)); + wgpu_buf_desc.usage = _sg_wgpu_buffer_usage(buf->cmn.type, buf->cmn.usage); + wgpu_buf_desc.size = buf->cmn.size; + if (SG_USAGE_IMMUTABLE == buf->cmn.usage) { + SOKOL_ASSERT(desc->data.ptr); + WGPUCreateBufferMappedResult res = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &wgpu_buf_desc); + buf->wgpu.buf = res.buffer; + SOKOL_ASSERT(res.data && (res.dataLength == buf->cmn.size)); + memcpy(res.data, desc->data.ptr, buf->cmn.size); + wgpuBufferUnmap(res.buffer); + } + else { + buf->wgpu.buf = wgpuDeviceCreateBuffer(_sg.wgpu.dev, &wgpu_buf_desc); + } + } + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_wgpu_destroy_buffer(_sg_buffer_t* buf) { + SOKOL_ASSERT(buf); + WGPUBuffer wgpu_buf = buf->wgpu.buf; + if (0 != wgpu_buf) { + wgpuBufferRelease(wgpu_buf); + } +} + +_SOKOL_PRIVATE void _sg_wgpu_init_texdesc_common(WGPUTextureDescriptor* wgpu_tex_desc, const sg_image_desc* desc) { + wgpu_tex_desc->usage = WGPUTextureUsage_Sampled|WGPUTextureUsage_CopyDst; + wgpu_tex_desc->dimension = _sg_wgpu_tex_dim(desc->type); + wgpu_tex_desc->size.width = desc->width; + wgpu_tex_desc->size.height = desc->height; + if (desc->type == SG_IMAGETYPE_3D) { + wgpu_tex_desc->size.depth = desc->num_slices; + wgpu_tex_desc->arrayLayerCount = 1; + } + else if (desc->type == SG_IMAGETYPE_CUBE) { + wgpu_tex_desc->size.depth = 1; + wgpu_tex_desc->arrayLayerCount = 6; + } + else { + wgpu_tex_desc->size.depth = 1; + wgpu_tex_desc->arrayLayerCount = desc->num_slices; + } + wgpu_tex_desc->format = _sg_wgpu_textureformat(desc->pixel_format); + wgpu_tex_desc->mipLevelCount = desc->num_mipmaps; + wgpu_tex_desc->sampleCount = 1; +} + +_SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_image(_sg_image_t* img, const sg_image_desc* desc) { + SOKOL_ASSERT(img && desc); + SOKOL_ASSERT(_sg.wgpu.dev); + SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); + + _sg_image_common_init(&img->cmn, desc); + + const bool injected = (0 != desc->wgpu_texture); + const bool is_msaa = desc->sample_count > 1; + WGPUTextureDescriptor wgpu_tex_desc; + memset(&wgpu_tex_desc, 0, sizeof(wgpu_tex_desc)); + _sg_wgpu_init_texdesc_common(&wgpu_tex_desc, desc); + if (_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format)) { + SOKOL_ASSERT(img->cmn.render_target); + SOKOL_ASSERT(img->cmn.type == SG_IMAGETYPE_2D); + SOKOL_ASSERT(img->cmn.num_mipmaps == 1); + SOKOL_ASSERT(!injected); + /* NOTE: a depth-stencil texture will never be MSAA-resolved, so there + won't be a separate MSAA- and resolve-texture + */ + wgpu_tex_desc.usage = WGPUTextureUsage_OutputAttachment; + wgpu_tex_desc.sampleCount = desc->sample_count; + img->wgpu.tex = wgpuDeviceCreateTexture(_sg.wgpu.dev, &wgpu_tex_desc); + SOKOL_ASSERT(img->wgpu.tex); + } + else { + if (injected) { + img->wgpu.tex = (WGPUTexture) desc->wgpu_texture; + wgpuTextureReference(img->wgpu.tex); + } + else { + /* NOTE: in the MSAA-rendertarget case, both the MSAA texture *and* + the resolve texture need OutputAttachment usage + */ + if (img->cmn.render_target) { + wgpu_tex_desc.usage = WGPUTextureUsage_Sampled|WGPUTextureUsage_OutputAttachment; + } + img->wgpu.tex = wgpuDeviceCreateTexture(_sg.wgpu.dev, &wgpu_tex_desc); + SOKOL_ASSERT(img->wgpu.tex); + + /* copy content into texture via a throw-away staging buffer */ + if (desc->usage == SG_USAGE_IMMUTABLE && !desc->render_target) { + WGPUBufferDescriptor wgpu_buf_desc; + memset(&wgpu_buf_desc, 0, sizeof(wgpu_buf_desc)); + wgpu_buf_desc.size = _sg_wgpu_image_data_buffer_size(img); + wgpu_buf_desc.usage = WGPUBufferUsage_CopySrc|WGPUBufferUsage_CopyDst; + WGPUCreateBufferMappedResult map = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &wgpu_buf_desc); + SOKOL_ASSERT(map.buffer && map.data); + uint32_t num_bytes = _sg_wgpu_copy_image_data(map.buffer, (uint8_t*)map.data, 0, img, &desc->data); + _SOKOL_UNUSED(num_bytes); + SOKOL_ASSERT(num_bytes == wgpu_buf_desc.size); + wgpuBufferUnmap(map.buffer); + wgpuBufferRelease(map.buffer); + } + } + + /* create texture view object */ + WGPUTextureViewDescriptor wgpu_view_desc; + memset(&wgpu_view_desc, 0, sizeof(wgpu_view_desc)); + wgpu_view_desc.dimension = _sg_wgpu_tex_viewdim(desc->type); + img->wgpu.tex_view = wgpuTextureCreateView(img->wgpu.tex, &wgpu_view_desc); + + /* if render target and MSAA, then a separate texture in MSAA format is needed + which will be resolved into the regular texture at the end of the + offscreen-render pass + */ + if (desc->render_target && is_msaa) { + wgpu_tex_desc.dimension = WGPUTextureDimension_2D; + wgpu_tex_desc.size.depth = 1; + wgpu_tex_desc.arrayLayerCount = 1; + wgpu_tex_desc.mipLevelCount = 1; + wgpu_tex_desc.usage = WGPUTextureUsage_OutputAttachment; + wgpu_tex_desc.sampleCount = desc->sample_count; + img->wgpu.msaa_tex = wgpuDeviceCreateTexture(_sg.wgpu.dev, &wgpu_tex_desc); + SOKOL_ASSERT(img->wgpu.msaa_tex); + } + + /* create sampler via shared-sampler-cache */ + img->wgpu.sampler = _sg_wgpu_create_sampler(desc); + SOKOL_ASSERT(img->wgpu.sampler); + } + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_wgpu_destroy_image(_sg_image_t* img) { + SOKOL_ASSERT(img); + if (img->wgpu.tex) { + wgpuTextureRelease(img->wgpu.tex); + img->wgpu.tex = 0; + } + if (img->wgpu.tex_view) { + wgpuTextureViewRelease(img->wgpu.tex_view); + img->wgpu.tex_view = 0; + } + if (img->wgpu.msaa_tex) { + wgpuTextureRelease(img->wgpu.msaa_tex); + img->wgpu.msaa_tex = 0; + } + /* NOTE: do *not* destroy the sampler from the shared-sampler-cache */ + img->wgpu.sampler = 0; +} + +/* + How BindGroups work in WebGPU: + + - up to 4 bind groups can be bound simultaneously + - up to 16 bindings per bind group + - 'binding' slots are local per bind group + - in the shader: + layout(set=0, binding=1) corresponds to bind group 0, binding 1 + + Now how to map this to sokol-gfx's bind model: + + Reduce SG_MAX_SHADERSTAGE_IMAGES to 8, then: + + 1 bind group for all 8 uniform buffers + 1 bind group for vertex shader textures + samplers + 1 bind group for fragment shader textures + samples + + Alternatively: + + 1 bind group for 8 uniform buffer slots + 1 bind group for 8 vs images + 8 vs samplers + 1 bind group for 12 fs images + 1 bind group for 12 fs samplers + + I guess this means that we need to create BindGroups on the + fly during sg_apply_bindings() :/ +*/ +_SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { + SOKOL_ASSERT(shd && desc); + SOKOL_ASSERT(desc->vs.bytecode.ptr && desc->fs.bytecode.ptr); + _sg_shader_common_init(&shd->cmn, desc); + + bool success = true; + for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { + const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS) ? &desc->vs : &desc->fs; + SOKOL_ASSERT((stage_desc->bytecode.size & 3) == 0); + + _sg_shader_stage_t* cmn_stage = &shd->cmn.stage[stage_index]; + _sg_wgpu_shader_stage_t* wgpu_stage = &shd->wgpu.stage[stage_index]; + + _sg_strcpy(&wgpu_stage->entry, stage_desc->entry); + WGPUShaderModuleDescriptor wgpu_shdmod_desc; + memset(&wgpu_shdmod_desc, 0, sizeof(wgpu_shdmod_desc)); + wgpu_shdmod_desc.codeSize = stage_desc->bytecode.size >> 2; + wgpu_shdmod_desc.code = (const uint32_t*) stage_desc->bytecode.ptr; + wgpu_stage->module = wgpuDeviceCreateShaderModule(_sg.wgpu.dev, &wgpu_shdmod_desc); + if (0 == wgpu_stage->module) { + success = false; + } + + /* create image/sampler bind group for the shader stage */ + WGPUShaderStage vis = (stage_index == SG_SHADERSTAGE_VS) ? WGPUShaderStage_Vertex : WGPUShaderStage_Fragment; + int num_imgs = cmn_stage->num_images; + if (num_imgs > _SG_WGPU_MAX_SHADERSTAGE_IMAGES) { + num_imgs = _SG_WGPU_MAX_SHADERSTAGE_IMAGES; + } + WGPUBindGroupLayoutBinding bglb_desc[_SG_WGPU_MAX_SHADERSTAGE_IMAGES * 2]; + memset(bglb_desc, 0, sizeof(bglb_desc)); + for (int img_index = 0; img_index < num_imgs; img_index++) { + /* texture- and sampler-bindings */ + WGPUBindGroupLayoutBinding* tex_desc = &bglb_desc[img_index*2 + 0]; + WGPUBindGroupLayoutBinding* smp_desc = &bglb_desc[img_index*2 + 1]; + + tex_desc->binding = img_index; + tex_desc->visibility = vis; + tex_desc->type = WGPUBindingType_SampledTexture; + tex_desc->textureDimension = _sg_wgpu_tex_viewdim(cmn_stage->images[img_index].image_type); + tex_desc->textureComponentType = _sg_wgpu_tex_comptype(cmn_stage->images[img_index].sampler_type); + + smp_desc->binding = img_index + _SG_WGPU_MAX_SHADERSTAGE_IMAGES; + smp_desc->visibility = vis; + smp_desc->type = WGPUBindingType_Sampler; + } + WGPUBindGroupLayoutDescriptor img_bgl_desc; + memset(&img_bgl_desc, 0, sizeof(img_bgl_desc)); + img_bgl_desc.bindingCount = num_imgs * 2; + img_bgl_desc.bindings = &bglb_desc[0]; + wgpu_stage->bind_group_layout = wgpuDeviceCreateBindGroupLayout(_sg.wgpu.dev, &img_bgl_desc); + SOKOL_ASSERT(wgpu_stage->bind_group_layout); + } + return success ? SG_RESOURCESTATE_VALID : SG_RESOURCESTATE_FAILED; +} + +_SOKOL_PRIVATE void _sg_wgpu_destroy_shader(_sg_shader_t* shd) { + SOKOL_ASSERT(shd); + for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { + _sg_wgpu_shader_stage_t* wgpu_stage = &shd->wgpu.stage[stage_index]; + if (wgpu_stage->module) { + wgpuShaderModuleRelease(wgpu_stage->module); + wgpu_stage->module = 0; + } + if (wgpu_stage->bind_group_layout) { + wgpuBindGroupLayoutRelease(wgpu_stage->bind_group_layout); + wgpu_stage->bind_group_layout = 0; + } + } +} + +_SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) { + SOKOL_ASSERT(pip && shd && desc); + SOKOL_ASSERT(desc->shader.id == shd->slot.id); + SOKOL_ASSERT(shd->wgpu.stage[SG_SHADERSTAGE_VS].bind_group_layout); + SOKOL_ASSERT(shd->wgpu.stage[SG_SHADERSTAGE_FS].bind_group_layout); + pip->shader = shd; + _sg_pipeline_common_init(&pip->cmn, desc); + pip->wgpu.stencil_ref = (uint32_t) desc->stencil.ref; + + WGPUBindGroupLayout pip_bgl[3] = { + _sg.wgpu.ub.bindgroup_layout, + shd->wgpu.stage[SG_SHADERSTAGE_VS].bind_group_layout, + shd->wgpu.stage[SG_SHADERSTAGE_FS].bind_group_layout + }; + WGPUPipelineLayoutDescriptor pl_desc; + memset(&pl_desc, 0, sizeof(pl_desc)); + pl_desc.bindGroupLayoutCount = 3; + pl_desc.bindGroupLayouts = &pip_bgl[0]; + WGPUPipelineLayout pip_layout = wgpuDeviceCreatePipelineLayout(_sg.wgpu.dev, &pl_desc); + + WGPUVertexBufferLayoutDescriptor vb_desc[SG_MAX_SHADERSTAGE_BUFFERS]; + memset(&vb_desc, 0, sizeof(vb_desc)); + WGPUVertexAttributeDescriptor va_desc[SG_MAX_SHADERSTAGE_BUFFERS][SG_MAX_VERTEX_ATTRIBUTES]; + memset(&va_desc, 0, sizeof(va_desc)); + int vb_idx = 0; + for (; vb_idx < SG_MAX_SHADERSTAGE_BUFFERS; vb_idx++) { + const sg_buffer_layout_desc* src_vb_desc = &desc->layout.buffers[vb_idx]; + if (0 == src_vb_desc->stride) { + break; + } + vb_desc[vb_idx].arrayStride = src_vb_desc->stride; + vb_desc[vb_idx].stepMode = _sg_wgpu_stepmode(src_vb_desc->step_func); + /* NOTE: WebGPU has no support for vertex step rate (because that's + not supported by Core Vulkan + */ + int va_idx = 0; + for (int va_loc = 0; va_loc < SG_MAX_VERTEX_ATTRIBUTES; va_loc++) { + const sg_vertex_attr_desc* src_va_desc = &desc->layout.attrs[va_loc]; + if (SG_VERTEXFORMAT_INVALID == src_va_desc->format) { + break; + } + pip->cmn.vertex_layout_valid[src_va_desc->buffer_index] = true; + if (vb_idx == src_va_desc->buffer_index) { + va_desc[vb_idx][va_idx].format = _sg_wgpu_vertexformat(src_va_desc->format); + va_desc[vb_idx][va_idx].offset = src_va_desc->offset; + va_desc[vb_idx][va_idx].shaderLocation = va_loc; + va_idx++; + } + } + vb_desc[vb_idx].attributeCount = va_idx; + vb_desc[vb_idx].attributes = &va_desc[vb_idx][0]; + } + WGPUVertexStateDescriptor vx_state_desc; + memset(&vx_state_desc, 0, sizeof(vx_state_desc)); + vx_state_desc.indexFormat = _sg_wgpu_indexformat(desc->index_type); + vx_state_desc.vertexBufferCount = vb_idx; + vx_state_desc.vertexBuffers = vb_desc; + + WGPURasterizationStateDescriptor rs_desc; + memset(&rs_desc, 0, sizeof(rs_desc)); + rs_desc.frontFace = _sg_wgpu_frontface(desc->face_winding); + rs_desc.cullMode = _sg_wgpu_cullmode(desc->cull_mode); + rs_desc.depthBias = (int32_t) desc->depth.bias; + rs_desc.depthBiasClamp = desc->depth.bias_clamp; + rs_desc.depthBiasSlopeScale = desc->depth.bias_slope_scale; + + WGPUDepthStencilStateDescriptor ds_desc; + memset(&ds_desc, 0, sizeof(ds_desc)); + ds_desc.format = _sg_wgpu_textureformat(desc->depth.pixel_format); + ds_desc.depthWriteEnabled = desc->depth.write_enabled; + ds_desc.depthCompare = _sg_wgpu_comparefunc(desc->depth.compare); + ds_desc.stencilReadMask = desc->stencil.read_mask; + ds_desc.stencilWriteMask = desc->stencil.write_mask; + ds_desc.stencilFront.compare = _sg_wgpu_comparefunc(desc->stencil.front.compare); + ds_desc.stencilFront.failOp = _sg_wgpu_stencilop(desc->stencil.front.fail_op); + ds_desc.stencilFront.depthFailOp = _sg_wgpu_stencilop(desc->stencil.front.depth_fail_op); + ds_desc.stencilFront.passOp = _sg_wgpu_stencilop(desc->stencil.front.pass_op); + ds_desc.stencilBack.compare = _sg_wgpu_comparefunc(desc->stencil.back.compare); + ds_desc.stencilBack.failOp = _sg_wgpu_stencilop(desc->stencil.back.fail_op); + ds_desc.stencilBack.depthFailOp = _sg_wgpu_stencilop(desc->stencil.back.depth_fail_op); + ds_desc.stencilBack.passOp = _sg_wgpu_stencilop(desc->stencil.back.pass_op); + + WGPUProgrammableStageDescriptor fs_desc; + memset(&fs_desc, 0, sizeof(fs_desc)); + fs_desc.module = shd->wgpu.stage[SG_SHADERSTAGE_FS].module; + fs_desc.entryPoint = shd->wgpu.stage[SG_SHADERSTAGE_VS].entry.buf; + + WGPUColorStateDescriptor cs_desc[SG_MAX_COLOR_ATTACHMENTS]; + memset(cs_desc, 0, sizeof(cs_desc)); + for (uint32_t i = 0; i < desc->color_count; i++) { + SOKOL_ASSERT(i < SG_MAX_COLOR_ATTACHMENTS); + cs_desc[i].format = _sg_wgpu_textureformat(desc->colors[i].pixel_format); + cs_desc[i].colorBlend.operation = _sg_wgpu_blendop(desc->colors[i].blend.op_rgb); + cs_desc[i].colorBlend.srcFactor = _sg_wgpu_blendfactor(desc->colors[i].blend.src_factor_rgb); + cs_desc[i].colorBlend.dstFactor = _sg_wgpu_blendfactor(desc->colors[i].blend.dst_factor_rgb); + cs_desc[i].alphaBlend.operation = _sg_wgpu_blendop(desc->colors[i].blend.op_alpha); + cs_desc[i].alphaBlend.srcFactor = _sg_wgpu_blendfactor(desc->colors[i].blend.src_factor_alpha); + cs_desc[i].alphaBlend.dstFactor = _sg_wgpu_blendfactor(desc->colors[i].blend.dst_factor_alpha); + cs_desc[i].writeMask = _sg_wgpu_colorwritemask(desc->colors[i].write_mask); + } + + WGPURenderPipelineDescriptor pip_desc; + memset(&pip_desc, 0, sizeof(pip_desc)); + pip_desc.layout = pip_layout; + pip_desc.vertexStage.module = shd->wgpu.stage[SG_SHADERSTAGE_VS].module; + pip_desc.vertexStage.entryPoint = shd->wgpu.stage[SG_SHADERSTAGE_VS].entry.buf; + pip_desc.fragmentStage = &fs_desc; + pip_desc.vertexState = &vx_state_desc; + pip_desc.primitiveTopology = _sg_wgpu_topology(desc->primitive_type); + pip_desc.rasterizationState = &rs_desc; + pip_desc.sampleCount = desc->sample_count; + if (SG_PIXELFORMAT_NONE != desc->depth.pixel_format) { + pip_desc.depthStencilState = &ds_desc; + } + pip_desc.colorStateCount = desc->color_count; + pip_desc.colorStates = cs_desc; + pip_desc.sampleMask = 0xFFFFFFFF; /* FIXME: ??? */ + pip->wgpu.pip = wgpuDeviceCreateRenderPipeline(_sg.wgpu.dev, &pip_desc); + SOKOL_ASSERT(0 != pip->wgpu.pip); + wgpuPipelineLayoutRelease(pip_layout); + + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_wgpu_destroy_pipeline(_sg_pipeline_t* pip) { + SOKOL_ASSERT(pip); + if (pip->wgpu.pip) { + wgpuRenderPipelineRelease(pip->wgpu.pip); + pip->wgpu.pip = 0; + } +} + +_SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) { + SOKOL_ASSERT(pass && desc); + SOKOL_ASSERT(att_images && att_images[0]); + _sg_pass_common_init(&pass->cmn, desc); + + /* copy image pointers and create render-texture views */ + const sg_pass_attachment_desc* att_desc; + for (uint32_t i = 0; i < pass->cmn.num_color_atts; i++) { + att_desc = &desc->color_attachments[i]; + if (att_desc->image.id != SG_INVALID_ID) { + SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID); + SOKOL_ASSERT(0 == pass->wgpu.color_atts[i].image); + _sg_image_t* img = att_images[i]; + SOKOL_ASSERT(img && (img->slot.id == att_desc->image.id)); + SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(img->cmn.pixel_format)); + pass->wgpu.color_atts[i].image = img; + /* create a render-texture-view to render into the right sub-surface */ + const bool is_msaa = img->cmn.sample_count > 1; + WGPUTextureViewDescriptor view_desc; + memset(&view_desc, 0, sizeof(view_desc)); + view_desc.baseMipLevel = is_msaa ? 0 : att_desc->mip_level; + view_desc.mipLevelCount = 1; + view_desc.baseArrayLayer = is_msaa ? 0 : att_desc->slice; + view_desc.arrayLayerCount = 1; + WGPUTexture wgpu_tex = is_msaa ? img->wgpu.msaa_tex : img->wgpu.tex; + SOKOL_ASSERT(wgpu_tex); + pass->wgpu.color_atts[i].render_tex_view = wgpuTextureCreateView(wgpu_tex, &view_desc); + SOKOL_ASSERT(pass->wgpu.color_atts[i].render_tex_view); + /* ... and if needed a separate resolve texture view */ + if (is_msaa) { + view_desc.baseMipLevel = att_desc->mip_level; + view_desc.baseArrayLayer = att_desc->slice; + WGPUTexture wgpu_tex = img->wgpu.tex; + pass->wgpu.color_atts[i].resolve_tex_view = wgpuTextureCreateView(wgpu_tex, &view_desc); + SOKOL_ASSERT(pass->wgpu.color_atts[i].resolve_tex_view); + } + } + } + SOKOL_ASSERT(0 == pass->wgpu.ds_att.image); + att_desc = &desc->depth_stencil_attachment; + if (att_desc->image.id != SG_INVALID_ID) { + const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; + SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id)); + SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->cmn.pixel_format)); + _sg_image_t* ds_img = att_images[ds_img_index]; + pass->wgpu.ds_att.image = ds_img; + /* create a render-texture view */ + SOKOL_ASSERT(0 == att_desc->mip_level); + SOKOL_ASSERT(0 == att_desc->slice); + WGPUTextureViewDescriptor view_desc; + memset(&view_desc, 0, sizeof(view_desc)); + WGPUTexture wgpu_tex = ds_img->wgpu.tex; + SOKOL_ASSERT(wgpu_tex); + pass->wgpu.ds_att.render_tex_view = wgpuTextureCreateView(wgpu_tex, &view_desc); + SOKOL_ASSERT(pass->wgpu.ds_att.render_tex_view); + } + return SG_RESOURCESTATE_VALID; +} + +_SOKOL_PRIVATE void _sg_wgpu_destroy_pass(_sg_pass_t* pass) { + SOKOL_ASSERT(pass); + for (uint32_t i = 0; i < pass->cmn.num_color_atts; i++) { + if (pass->wgpu.color_atts[i].render_tex_view) { + wgpuTextureViewRelease(pass->wgpu.color_atts[i].render_tex_view); + pass->wgpu.color_atts[i].render_tex_view = 0; + } + if (pass->wgpu.color_atts[i].resolve_tex_view) { + wgpuTextureViewRelease(pass->wgpu.color_atts[i].resolve_tex_view); + pass->wgpu.color_atts[i].resolve_tex_view = 0; + } + } + if (pass->wgpu.ds_att.render_tex_view) { + wgpuTextureViewRelease(pass->wgpu.ds_att.render_tex_view); + pass->wgpu.ds_att.render_tex_view = 0; + } +} + +_SOKOL_PRIVATE _sg_image_t* _sg_wgpu_pass_color_image(const _sg_pass_t* pass, int index) { + SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS)); + /* NOTE: may return null */ + return pass->wgpu.color_atts[index].image; +} + +_SOKOL_PRIVATE _sg_image_t* _sg_wgpu_pass_ds_image(const _sg_pass_t* pass) { + /* NOTE: may return null */ + SOKOL_ASSERT(pass); + return pass->wgpu.ds_att.image; +} + +_SOKOL_PRIVATE void _sg_wgpu_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) { + SOKOL_ASSERT(action); + SOKOL_ASSERT(!_sg.wgpu.in_pass); + SOKOL_ASSERT(_sg.wgpu.render_cmd_enc); + SOKOL_ASSERT(_sg.wgpu.dev); + SOKOL_ASSERT(_sg.wgpu.render_view_cb || _sg.wgpu.render_view_userdata_cb); + SOKOL_ASSERT(_sg.wgpu.resolve_view_cb || _sg.wgpu.resolve_view_userdata_cb); + SOKOL_ASSERT(_sg.wgpu.depth_stencil_view_cb || _sg.wgpu.depth_stencil_view_userdata_cb); + _sg.wgpu.in_pass = true; + _sg.wgpu.cur_width = w; + _sg.wgpu.cur_height = h; + _sg.wgpu.cur_pipeline = 0; + _sg.wgpu.cur_pipeline_id.id = SG_INVALID_ID; + + SOKOL_ASSERT(_sg.wgpu.render_cmd_enc); + if (pass) { + WGPURenderPassDescriptor wgpu_pass_desc; + memset(&wgpu_pass_desc, 0, sizeof(wgpu_pass_desc)); + WGPURenderPassColorAttachmentDescriptor wgpu_color_att_desc[SG_MAX_COLOR_ATTACHMENTS]; + memset(&wgpu_color_att_desc, 0, sizeof(wgpu_color_att_desc)); + SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_VALID); + for (uint32_t i = 0; i < pass->cmn.num_color_atts; i++) { + const _sg_wgpu_attachment_t* wgpu_att = &pass->wgpu.color_atts[i]; + wgpu_color_att_desc[i].loadOp = _sg_wgpu_load_op(action->colors[i].action); + wgpu_color_att_desc[i].storeOp = WGPUStoreOp_Store; + wgpu_color_att_desc[i].clearColor.r = action->colors[i].value.r; + wgpu_color_att_desc[i].clearColor.g = action->colors[i].value.g; + wgpu_color_att_desc[i].clearColor.b = action->colors[i].value.b; + wgpu_color_att_desc[i].clearColor.a = action->colors[i].value.a; + wgpu_color_att_desc[i].attachment = wgpu_att->render_tex_view; + if (wgpu_att->image->cmn.sample_count > 1) { + wgpu_color_att_desc[i].resolveTarget = wgpu_att->resolve_tex_view; + } + } + wgpu_pass_desc.colorAttachmentCount = pass->cmn.num_color_atts; + wgpu_pass_desc.colorAttachments = &wgpu_color_att_desc[0]; + if (pass->wgpu.ds_att.image) { + WGPURenderPassDepthStencilAttachmentDescriptor wgpu_ds_att_desc; + memset(&wgpu_ds_att_desc, 0, sizeof(wgpu_ds_att_desc)); + wgpu_ds_att_desc.depthLoadOp = _sg_wgpu_load_op(action->depth.action); + wgpu_ds_att_desc.clearDepth = action->depth.value; + wgpu_ds_att_desc.stencilLoadOp = _sg_wgpu_load_op(action->stencil.action); + wgpu_ds_att_desc.clearStencil = action->stencil.value; + wgpu_ds_att_desc.attachment = pass->wgpu.ds_att.render_tex_view; + wgpu_pass_desc.depthStencilAttachment = &wgpu_ds_att_desc; + _sg.wgpu.pass_enc = wgpuCommandEncoderBeginRenderPass(_sg.wgpu.render_cmd_enc, &wgpu_pass_desc); + } + } + else { + /* default render pass */ + WGPUTextureView wgpu_render_view = _sg.wgpu.render_view_cb ? _sg.wgpu.render_view_cb() : _sg.wgpu.render_view_userdata_cb(_sg.wgpu.user_data); + WGPUTextureView wgpu_resolve_view = _sg.wgpu.resolve_view_cb ? _sg.wgpu.resolve_view_cb() : _sg.wgpu.resolve_view_userdata_cb(_sg.wgpu.user_data); + WGPUTextureView wgpu_depth_stencil_view = _sg.wgpu.depth_stencil_view_cb ? _sg.wgpu.depth_stencil_view_cb() : _sg.wgpu.depth_stencil_view_userdata_cb(_sg.wgpu.user_data); + + WGPURenderPassDescriptor pass_desc; + memset(&pass_desc, 0, sizeof(pass_desc)); + WGPURenderPassColorAttachmentDescriptor color_att_desc; + memset(&color_att_desc, 0, sizeof(color_att_desc)); + color_att_desc.loadOp = _sg_wgpu_load_op(action->colors[0].action); + color_att_desc.clearColor.r = action->colors[0].value.r; + color_att_desc.clearColor.g = action->colors[0].value.g; + color_att_desc.clearColor.b = action->colors[0].value.b; + color_att_desc.clearColor.a = action->colors[0].value.a; + color_att_desc.attachment = wgpu_render_view; + color_att_desc.resolveTarget = wgpu_resolve_view; /* null if no MSAA rendering */ + pass_desc.colorAttachmentCount = 1; + pass_desc.colorAttachments = &color_att_desc; + WGPURenderPassDepthStencilAttachmentDescriptor ds_att_desc; + memset(&ds_att_desc, 0, sizeof(ds_att_desc)); + ds_att_desc.attachment = wgpu_depth_stencil_view; + SOKOL_ASSERT(0 != ds_att_desc.attachment); + ds_att_desc.depthLoadOp = _sg_wgpu_load_op(action->depth.action); + ds_att_desc.clearDepth = action->depth.value; + ds_att_desc.stencilLoadOp = _sg_wgpu_load_op(action->stencil.action); + ds_att_desc.clearStencil = action->stencil.value; + pass_desc.depthStencilAttachment = &ds_att_desc; + _sg.wgpu.pass_enc = wgpuCommandEncoderBeginRenderPass(_sg.wgpu.render_cmd_enc, &pass_desc); + } + SOKOL_ASSERT(_sg.wgpu.pass_enc); + + /* initial uniform buffer binding (required even if no uniforms are set in the frame) */ + wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, + 0, /* groupIndex 0 is reserved for uniform buffers */ + _sg.wgpu.ub.bindgroup, + SG_NUM_SHADER_STAGES * SG_MAX_SHADERSTAGE_UBS, + &_sg.wgpu.ub.bind_offsets[0][0]); +} + +_SOKOL_PRIVATE void _sg_wgpu_end_pass(void) { + SOKOL_ASSERT(_sg.wgpu.in_pass); + SOKOL_ASSERT(_sg.wgpu.pass_enc); + _sg.wgpu.in_pass = false; + wgpuRenderPassEncoderEndPass(_sg.wgpu.pass_enc); + wgpuRenderPassEncoderRelease(_sg.wgpu.pass_enc); + _sg.wgpu.pass_enc = 0; +} + +_SOKOL_PRIVATE void _sg_wgpu_commit(void) { + SOKOL_ASSERT(!_sg.wgpu.in_pass); + SOKOL_ASSERT(_sg.wgpu.queue); + SOKOL_ASSERT(_sg.wgpu.render_cmd_enc); + SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc); + + /* finish and submit this frame's work */ + _sg_wgpu_ubpool_flush(); + _sg_wgpu_staging_unmap(); + + WGPUCommandBuffer cmd_bufs[2]; + + WGPUCommandBufferDescriptor cmd_buf_desc; + memset(&cmd_buf_desc, 0, sizeof(cmd_buf_desc)); + cmd_bufs[0] = wgpuCommandEncoderFinish(_sg.wgpu.staging_cmd_enc, &cmd_buf_desc); + SOKOL_ASSERT(cmd_bufs[0]); + wgpuCommandEncoderRelease(_sg.wgpu.staging_cmd_enc); + _sg.wgpu.staging_cmd_enc = 0; + + cmd_bufs[1] = wgpuCommandEncoderFinish(_sg.wgpu.render_cmd_enc, &cmd_buf_desc); + SOKOL_ASSERT(cmd_bufs[1]); + wgpuCommandEncoderRelease(_sg.wgpu.render_cmd_enc); + _sg.wgpu.render_cmd_enc = 0; + + wgpuQueueSubmit(_sg.wgpu.queue, 2, &cmd_bufs[0]); + + wgpuCommandBufferRelease(cmd_bufs[0]); + wgpuCommandBufferRelease(cmd_bufs[1]); + + /* create a new render- and staging-command-encoders for next frame */ + WGPUCommandEncoderDescriptor cmd_enc_desc; + memset(&cmd_enc_desc, 0, sizeof(cmd_enc_desc)); + _sg.wgpu.staging_cmd_enc = wgpuDeviceCreateCommandEncoder(_sg.wgpu.dev, &cmd_enc_desc); + _sg.wgpu.render_cmd_enc = wgpuDeviceCreateCommandEncoder(_sg.wgpu.dev, &cmd_enc_desc); + + /* grab new staging buffers for uniform- and vertex/image-updates */ + _sg_wgpu_ubpool_next_frame(false); + _sg_wgpu_staging_next_frame(false); +} + +_SOKOL_PRIVATE void _sg_wgpu_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { + SOKOL_ASSERT(_sg.wgpu.in_pass); + SOKOL_ASSERT(_sg.wgpu.pass_enc); + float xf = (float) x; + float yf = (float) (origin_top_left ? y : (_sg.wgpu.cur_height - (y + h))); + float wf = (float) w; + float hf = (float) h; + wgpuRenderPassEncoderSetViewport(_sg.wgpu.pass_enc, xf, yf, wf, hf, 0.0f, 1.0f); +} + +_SOKOL_PRIVATE void _sg_wgpu_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { + SOKOL_ASSERT(_sg.wgpu.in_pass); + SOKOL_ASSERT(_sg.wgpu.pass_enc); + SOKOL_ASSERT(_sg.wgpu.in_pass); + SOKOL_ASSERT(_sg.wgpu.pass_enc); + + /* clip against framebuffer rect */ + x = _sg_min(_sg_max(0, x), _sg.wgpu.cur_width-1); + y = _sg_min(_sg_max(0, y), _sg.wgpu.cur_height-1); + if ((x + w) > _sg.wgpu.cur_width) { + w = _sg.wgpu.cur_width - x; + } + if ((y + h) > _sg.wgpu.cur_height) { + h = _sg.wgpu.cur_height - y; + } + w = _sg_max(w, 1); + h = _sg_max(h, 1); + + uint32_t sx = (uint32_t) x; + uint32_t sy = origin_top_left ? y : (_sg.wgpu.cur_height - (y + h)); + uint32_t sw = w; + uint32_t sh = h; + wgpuRenderPassEncoderSetScissorRect(_sg.wgpu.pass_enc, sx, sy, sw, sh); +} + +_SOKOL_PRIVATE void _sg_wgpu_apply_pipeline(_sg_pipeline_t* pip) { + SOKOL_ASSERT(pip); + SOKOL_ASSERT(pip->wgpu.pip); + SOKOL_ASSERT(_sg.wgpu.in_pass); + SOKOL_ASSERT(_sg.wgpu.pass_enc); + _sg.wgpu.draw_indexed = (pip->cmn.index_type != SG_INDEXTYPE_NONE); + _sg.wgpu.cur_pipeline = pip; + _sg.wgpu.cur_pipeline_id.id = pip->slot.id; + wgpuRenderPassEncoderSetPipeline(_sg.wgpu.pass_enc, pip->wgpu.pip); + wgpuRenderPassEncoderSetBlendColor(_sg.wgpu.pass_enc, (WGPUColor*)&pip->cmn.blend_color); + wgpuRenderPassEncoderSetStencilReference(_sg.wgpu.pass_enc, pip->wgpu.stencil_ref); +} + +_SOKOL_PRIVATE WGPUBindGroup _sg_wgpu_create_images_bindgroup(WGPUBindGroupLayout bgl, _sg_image_t** imgs, int num_imgs) { + SOKOL_ASSERT(_sg.wgpu.dev); + SOKOL_ASSERT(num_imgs <= _SG_WGPU_MAX_SHADERSTAGE_IMAGES); + WGPUBindGroupBinding img_bgb[_SG_WGPU_MAX_SHADERSTAGE_IMAGES * 2]; + memset(&img_bgb, 0, sizeof(img_bgb)); + for (int img_index = 0; img_index < num_imgs; img_index++) { + WGPUBindGroupBinding* tex_bdg = &img_bgb[img_index*2 + 0]; + WGPUBindGroupBinding* smp_bdg = &img_bgb[img_index*2 + 1]; + tex_bdg->binding = img_index; + tex_bdg->textureView = imgs[img_index]->wgpu.tex_view; + smp_bdg->binding = img_index + _SG_WGPU_MAX_SHADERSTAGE_IMAGES; + smp_bdg->sampler = imgs[img_index]->wgpu.sampler; + } + WGPUBindGroupDescriptor bg_desc; + memset(&bg_desc, 0, sizeof(bg_desc)); + bg_desc.layout = bgl; + bg_desc.bindingCount = 2 * num_imgs; + bg_desc.bindings = &img_bgb[0]; + WGPUBindGroup bg = wgpuDeviceCreateBindGroup(_sg.wgpu.dev, &bg_desc); + SOKOL_ASSERT(bg); + return bg; +} + +_SOKOL_PRIVATE void _sg_wgpu_apply_bindings( + _sg_pipeline_t* pip, + _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs, + _sg_buffer_t* ib, int ib_offset, + _sg_image_t** vs_imgs, int num_vs_imgs, + _sg_image_t** fs_imgs, int num_fs_imgs) +{ + SOKOL_ASSERT(_sg.wgpu.in_pass); + SOKOL_ASSERT(_sg.wgpu.pass_enc); + SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id)); + + /* index buffer */ + if (ib) { + wgpuRenderPassEncoderSetIndexBuffer(_sg.wgpu.pass_enc, ib->wgpu.buf, ib_offset); + } + + /* vertex buffers */ + for (uint32_t slot = 0; slot < (uint32_t)num_vbs; slot++) { + wgpuRenderPassEncoderSetVertexBuffer(_sg.wgpu.pass_enc, slot, vbs[slot]->wgpu.buf, (uint64_t)vb_offsets[slot]); + } + + /* need to create throw-away bind groups for images */ + if (num_vs_imgs > 0) { + if (num_vs_imgs > _SG_WGPU_MAX_SHADERSTAGE_IMAGES) { + num_vs_imgs = _SG_WGPU_MAX_SHADERSTAGE_IMAGES; + } + WGPUBindGroupLayout vs_bgl = pip->shader->wgpu.stage[SG_SHADERSTAGE_VS].bind_group_layout; + SOKOL_ASSERT(vs_bgl); + WGPUBindGroup vs_img_bg = _sg_wgpu_create_images_bindgroup(vs_bgl, vs_imgs, num_vs_imgs); + wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, 1, vs_img_bg, 0, 0); + wgpuBindGroupRelease(vs_img_bg); + } + else { + wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, 1, _sg.wgpu.empty_bind_group, 0, 0); + } + if (num_fs_imgs > 0) { + if (num_fs_imgs > _SG_WGPU_MAX_SHADERSTAGE_IMAGES) { + num_fs_imgs = _SG_WGPU_MAX_SHADERSTAGE_IMAGES; + } + WGPUBindGroupLayout fs_bgl = pip->shader->wgpu.stage[SG_SHADERSTAGE_FS].bind_group_layout; + SOKOL_ASSERT(fs_bgl); + WGPUBindGroup fs_img_bg = _sg_wgpu_create_images_bindgroup(fs_bgl, fs_imgs, num_fs_imgs); + wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, 2, fs_img_bg, 0, 0); + wgpuBindGroupRelease(fs_img_bg); + } + else { + wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, 2, _sg.wgpu.empty_bind_group, 0, 0); + } +} + +_SOKOL_PRIVATE void _sg_wgpu_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { + SOKOL_ASSERT(_sg.wgpu.in_pass); + SOKOL_ASSERT(_sg.wgpu.pass_enc); + SOKOL_ASSERT((_sg.wgpu.ub.offset + data->size) <= _sg.wgpu.ub.num_bytes); + SOKOL_ASSERT((_sg.wgpu.ub.offset & (_SG_WGPU_STAGING_ALIGN-1)) == 0); + SOKOL_ASSERT(_sg.wgpu.cur_pipeline && _sg.wgpu.cur_pipeline->shader); + SOKOL_ASSERT(_sg.wgpu.cur_pipeline->slot.id == _sg.wgpu.cur_pipeline_id.id); + SOKOL_ASSERT(_sg.wgpu.cur_pipeline->shader->slot.id == _sg.wgpu.cur_pipeline->cmn.shader_id.id); + SOKOL_ASSERT(ub_index < _sg.wgpu.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks); + SOKOL_ASSERT(data->size <= _sg.wgpu.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size); + SOKOL_ASSERT(data->size <= _SG_WGPU_MAX_UNIFORM_UPDATE_SIZE); + SOKOL_ASSERT(0 != _sg.wgpu.ub.stage.ptr[_sg.wgpu.ub.stage.cur]); + + uint8_t* dst_ptr = _sg.wgpu.ub.stage.ptr[_sg.wgpu.ub.stage.cur] + _sg.wgpu.ub.offset; + memcpy(dst_ptr, data->ptr, data->size); + _sg.wgpu.ub.bind_offsets[stage_index][ub_index] = _sg.wgpu.ub.offset; + wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, + 0, /* groupIndex 0 is reserved for uniform buffers */ + _sg.wgpu.ub.bindgroup, + SG_NUM_SHADER_STAGES * SG_MAX_SHADERSTAGE_UBS, + &_sg.wgpu.ub.bind_offsets[0][0]); + _sg.wgpu.ub.offset = _sg_roundup(_sg.wgpu.ub.offset + data->size, _SG_WGPU_STAGING_ALIGN); +} + +_SOKOL_PRIVATE void _sg_wgpu_draw(int base_element, int num_elements, int num_instances) { + SOKOL_ASSERT(_sg.wgpu.in_pass); + SOKOL_ASSERT(_sg.wgpu.pass_enc); + if (_sg.wgpu.draw_indexed) { + wgpuRenderPassEncoderDrawIndexed(_sg.wgpu.pass_enc, num_elements, num_instances, base_element, 0, 0); + } + else { + wgpuRenderPassEncoderDraw(_sg.wgpu.pass_enc, num_elements, num_instances, base_element, 0); + } +} + +_SOKOL_PRIVATE void _sg_wgpu_update_buffer(_sg_buffer_t* buf, const sg_range* data) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); + uint32_t copied_num_bytes = _sg_wgpu_staging_copy_to_buffer(buf->wgpu.buf, 0, data->ptr, data->size); + SOKOL_ASSERT(copied_num_bytes > 0); _SOKOL_UNUSED(copied_num_bytes); +} + +_SOKOL_PRIVATE int _sg_wgpu_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { + SOKOL_ASSERT(buf && data && data->ptr && (data->size > 0)); + _SOKOL_UNUSED(new_frame); + uint32_t copied_num_bytes = _sg_wgpu_staging_copy_to_buffer(buf->wgpu.buf, buf->cmn.append_pos, data->ptr, data->size); + SOKOL_ASSERT(copied_num_bytes > 0); _SOKOL_UNUSED(copied_num_bytes); + return (int)copied_num_bytes; +} + +_SOKOL_PRIVATE void _sg_wgpu_update_image(_sg_image_t* img, const sg_image_data* data) { + SOKOL_ASSERT(img && data); + bool success = _sg_wgpu_staging_copy_to_texture(img, data); + SOKOL_ASSERT(success); + _SOKOL_UNUSED(success); +} +#endif + +/*== BACKEND API WRAPPERS ====================================================*/ +static inline void _sg_setup_backend(const sg_desc* desc) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_setup_backend(desc); + #elif defined(SOKOL_METAL) + _sg_mtl_setup_backend(desc); + #elif defined(SOKOL_D3D11) + _sg_d3d11_setup_backend(desc); + #elif defined(SOKOL_WGPU) + _sg_wgpu_setup_backend(desc); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_setup_backend(desc); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_discard_backend(void) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_discard_backend(); + #elif defined(SOKOL_METAL) + _sg_mtl_discard_backend(); + #elif defined(SOKOL_D3D11) + _sg_d3d11_discard_backend(); + #elif defined(SOKOL_WGPU) + _sg_wgpu_discard_backend(); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_discard_backend(); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_reset_state_cache(void) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_reset_state_cache(); + #elif defined(SOKOL_METAL) + _sg_mtl_reset_state_cache(); + #elif defined(SOKOL_D3D11) + _sg_d3d11_reset_state_cache(); + #elif defined(SOKOL_WGPU) + _sg_wgpu_reset_state_cache(); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_reset_state_cache(); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_activate_context(_sg_context_t* ctx) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_activate_context(ctx); + #elif defined(SOKOL_METAL) + _sg_mtl_activate_context(ctx); + #elif defined(SOKOL_D3D11) + _sg_d3d11_activate_context(ctx); + #elif defined(SOKOL_WGPU) + _sg_wgpu_activate_context(ctx); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_activate_context(ctx); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline sg_resource_state _sg_create_context(_sg_context_t* ctx) { + #if defined(_SOKOL_ANY_GL) + return _sg_gl_create_context(ctx); + #elif defined(SOKOL_METAL) + return _sg_mtl_create_context(ctx); + #elif defined(SOKOL_D3D11) + return _sg_d3d11_create_context(ctx); + #elif defined(SOKOL_WGPU) + return _sg_wgpu_create_context(ctx); + #elif defined(SOKOL_DUMMY_BACKEND) + return _sg_dummy_create_context(ctx); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_destroy_context(_sg_context_t* ctx) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_destroy_context(ctx); + #elif defined(SOKOL_METAL) + _sg_mtl_destroy_context(ctx); + #elif defined(SOKOL_D3D11) + _sg_d3d11_destroy_context(ctx); + #elif defined(SOKOL_WGPU) + _sg_wgpu_destroy_context(ctx); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_destroy_context(ctx); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline sg_resource_state _sg_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) { + #if defined(_SOKOL_ANY_GL) + return _sg_gl_create_buffer(buf, desc); + #elif defined(SOKOL_METAL) + return _sg_mtl_create_buffer(buf, desc); + #elif defined(SOKOL_D3D11) + return _sg_d3d11_create_buffer(buf, desc); + #elif defined(SOKOL_WGPU) + return _sg_wgpu_create_buffer(buf, desc); + #elif defined(SOKOL_DUMMY_BACKEND) + return _sg_dummy_create_buffer(buf, desc); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_destroy_buffer(_sg_buffer_t* buf) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_destroy_buffer(buf); + #elif defined(SOKOL_METAL) + _sg_mtl_destroy_buffer(buf); + #elif defined(SOKOL_D3D11) + _sg_d3d11_destroy_buffer(buf); + #elif defined(SOKOL_WGPU) + _sg_wgpu_destroy_buffer(buf); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_destroy_buffer(buf); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline sg_resource_state _sg_create_image(_sg_image_t* img, const sg_image_desc* desc) { + #if defined(_SOKOL_ANY_GL) + return _sg_gl_create_image(img, desc); + #elif defined(SOKOL_METAL) + return _sg_mtl_create_image(img, desc); + #elif defined(SOKOL_D3D11) + return _sg_d3d11_create_image(img, desc); + #elif defined(SOKOL_WGPU) + return _sg_wgpu_create_image(img, desc); + #elif defined(SOKOL_DUMMY_BACKEND) + return _sg_dummy_create_image(img, desc); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_destroy_image(_sg_image_t* img) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_destroy_image(img); + #elif defined(SOKOL_METAL) + _sg_mtl_destroy_image(img); + #elif defined(SOKOL_D3D11) + _sg_d3d11_destroy_image(img); + #elif defined(SOKOL_WGPU) + _sg_wgpu_destroy_image(img); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_destroy_image(img); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline sg_resource_state _sg_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) { + #if defined(_SOKOL_ANY_GL) + return _sg_gl_create_shader(shd, desc); + #elif defined(SOKOL_METAL) + return _sg_mtl_create_shader(shd, desc); + #elif defined(SOKOL_D3D11) + return _sg_d3d11_create_shader(shd, desc); + #elif defined(SOKOL_WGPU) + return _sg_wgpu_create_shader(shd, desc); + #elif defined(SOKOL_DUMMY_BACKEND) + return _sg_dummy_create_shader(shd, desc); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_destroy_shader(_sg_shader_t* shd) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_destroy_shader(shd); + #elif defined(SOKOL_METAL) + _sg_mtl_destroy_shader(shd); + #elif defined(SOKOL_D3D11) + _sg_d3d11_destroy_shader(shd); + #elif defined(SOKOL_WGPU) + _sg_wgpu_destroy_shader(shd); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_destroy_shader(shd); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline sg_resource_state _sg_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) { + #if defined(_SOKOL_ANY_GL) + return _sg_gl_create_pipeline(pip, shd, desc); + #elif defined(SOKOL_METAL) + return _sg_mtl_create_pipeline(pip, shd, desc); + #elif defined(SOKOL_D3D11) + return _sg_d3d11_create_pipeline(pip, shd, desc); + #elif defined(SOKOL_WGPU) + return _sg_wgpu_create_pipeline(pip, shd, desc); + #elif defined(SOKOL_DUMMY_BACKEND) + return _sg_dummy_create_pipeline(pip, shd, desc); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_destroy_pipeline(_sg_pipeline_t* pip) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_destroy_pipeline(pip); + #elif defined(SOKOL_METAL) + _sg_mtl_destroy_pipeline(pip); + #elif defined(SOKOL_D3D11) + _sg_d3d11_destroy_pipeline(pip); + #elif defined(SOKOL_WGPU) + _sg_wgpu_destroy_pipeline(pip); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_destroy_pipeline(pip); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline sg_resource_state _sg_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) { + #if defined(_SOKOL_ANY_GL) + return _sg_gl_create_pass(pass, att_images, desc); + #elif defined(SOKOL_METAL) + return _sg_mtl_create_pass(pass, att_images, desc); + #elif defined(SOKOL_D3D11) + return _sg_d3d11_create_pass(pass, att_images, desc); + #elif defined(SOKOL_WGPU) + return _sg_wgpu_create_pass(pass, att_images, desc); + #elif defined(SOKOL_DUMMY_BACKEND) + return _sg_dummy_create_pass(pass, att_images, desc); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_destroy_pass(_sg_pass_t* pass) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_destroy_pass(pass); + #elif defined(SOKOL_METAL) + _sg_mtl_destroy_pass(pass); + #elif defined(SOKOL_D3D11) + _sg_d3d11_destroy_pass(pass); + #elif defined(SOKOL_WGPU) + return _sg_wgpu_destroy_pass(pass); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_destroy_pass(pass); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline _sg_image_t* _sg_pass_color_image(const _sg_pass_t* pass, int index) { + #if defined(_SOKOL_ANY_GL) + return _sg_gl_pass_color_image(pass, index); + #elif defined(SOKOL_METAL) + return _sg_mtl_pass_color_image(pass, index); + #elif defined(SOKOL_D3D11) + return _sg_d3d11_pass_color_image(pass, index); + #elif defined(SOKOL_WGPU) + return _sg_wgpu_pass_color_image(pass, index); + #elif defined(SOKOL_DUMMY_BACKEND) + return _sg_dummy_pass_color_image(pass, index); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline _sg_image_t* _sg_pass_ds_image(const _sg_pass_t* pass) { + #if defined(_SOKOL_ANY_GL) + return _sg_gl_pass_ds_image(pass); + #elif defined(SOKOL_METAL) + return _sg_mtl_pass_ds_image(pass); + #elif defined(SOKOL_D3D11) + return _sg_d3d11_pass_ds_image(pass); + #elif defined(SOKOL_WGPU) + return _sg_wgpu_pass_ds_image(pass); + #elif defined(SOKOL_DUMMY_BACKEND) + return _sg_dummy_pass_ds_image(pass); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_begin_pass(pass, action, w, h); + #elif defined(SOKOL_METAL) + _sg_mtl_begin_pass(pass, action, w, h); + #elif defined(SOKOL_D3D11) + _sg_d3d11_begin_pass(pass, action, w, h); + #elif defined(SOKOL_WGPU) + _sg_wgpu_begin_pass(pass, action, w, h); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_begin_pass(pass, action, w, h); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_end_pass(void) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_end_pass(); + #elif defined(SOKOL_METAL) + _sg_mtl_end_pass(); + #elif defined(SOKOL_D3D11) + _sg_d3d11_end_pass(); + #elif defined(SOKOL_WGPU) + _sg_wgpu_end_pass(); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_end_pass(); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_apply_viewport(x, y, w, h, origin_top_left); + #elif defined(SOKOL_METAL) + _sg_mtl_apply_viewport(x, y, w, h, origin_top_left); + #elif defined(SOKOL_D3D11) + _sg_d3d11_apply_viewport(x, y, w, h, origin_top_left); + #elif defined(SOKOL_WGPU) + _sg_wgpu_apply_viewport(x, y, w, h, origin_top_left); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_apply_viewport(x, y, w, h, origin_top_left); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_apply_scissor_rect(x, y, w, h, origin_top_left); + #elif defined(SOKOL_METAL) + _sg_mtl_apply_scissor_rect(x, y, w, h, origin_top_left); + #elif defined(SOKOL_D3D11) + _sg_d3d11_apply_scissor_rect(x, y, w, h, origin_top_left); + #elif defined(SOKOL_WGPU) + _sg_wgpu_apply_scissor_rect(x, y, w, h, origin_top_left); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_apply_scissor_rect(x, y, w, h, origin_top_left); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_apply_pipeline(_sg_pipeline_t* pip) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_apply_pipeline(pip); + #elif defined(SOKOL_METAL) + _sg_mtl_apply_pipeline(pip); + #elif defined(SOKOL_D3D11) + _sg_d3d11_apply_pipeline(pip); + #elif defined(SOKOL_WGPU) + _sg_wgpu_apply_pipeline(pip); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_apply_pipeline(pip); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_apply_bindings( + _sg_pipeline_t* pip, + _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs, + _sg_buffer_t* ib, int ib_offset, + _sg_image_t** vs_imgs, int num_vs_imgs, + _sg_image_t** fs_imgs, int num_fs_imgs) +{ + #if defined(_SOKOL_ANY_GL) + _sg_gl_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs); + #elif defined(SOKOL_METAL) + _sg_mtl_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs); + #elif defined(SOKOL_D3D11) + _sg_d3d11_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs); + #elif defined(SOKOL_WGPU) + _sg_wgpu_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_apply_uniforms(stage_index, ub_index, data); + #elif defined(SOKOL_METAL) + _sg_mtl_apply_uniforms(stage_index, ub_index, data); + #elif defined(SOKOL_D3D11) + _sg_d3d11_apply_uniforms(stage_index, ub_index, data); + #elif defined(SOKOL_WGPU) + _sg_wgpu_apply_uniforms(stage_index, ub_index, data); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_apply_uniforms(stage_index, ub_index, data); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_draw(int base_element, int num_elements, int num_instances) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_draw(base_element, num_elements, num_instances); + #elif defined(SOKOL_METAL) + _sg_mtl_draw(base_element, num_elements, num_instances); + #elif defined(SOKOL_D3D11) + _sg_d3d11_draw(base_element, num_elements, num_instances); + #elif defined(SOKOL_WGPU) + _sg_wgpu_draw(base_element, num_elements, num_instances); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_draw(base_element, num_elements, num_instances); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_commit(void) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_commit(); + #elif defined(SOKOL_METAL) + _sg_mtl_commit(); + #elif defined(SOKOL_D3D11) + _sg_d3d11_commit(); + #elif defined(SOKOL_WGPU) + _sg_wgpu_commit(); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_commit(); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_update_buffer(_sg_buffer_t* buf, const sg_range* data) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_update_buffer(buf, data); + #elif defined(SOKOL_METAL) + _sg_mtl_update_buffer(buf, data); + #elif defined(SOKOL_D3D11) + _sg_d3d11_update_buffer(buf, data); + #elif defined(SOKOL_WGPU) + _sg_wgpu_update_buffer(buf, data); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_update_buffer(buf, data); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline int _sg_append_buffer(_sg_buffer_t* buf, const sg_range* data, bool new_frame) { + #if defined(_SOKOL_ANY_GL) + return _sg_gl_append_buffer(buf, data, new_frame); + #elif defined(SOKOL_METAL) + return _sg_mtl_append_buffer(buf, data, new_frame); + #elif defined(SOKOL_D3D11) + return _sg_d3d11_append_buffer(buf, data, new_frame); + #elif defined(SOKOL_WGPU) + return _sg_wgpu_append_buffer(buf, data, new_frame); + #elif defined(SOKOL_DUMMY_BACKEND) + return _sg_dummy_append_buffer(buf, data, new_frame); + #else + #error("INVALID BACKEND"); + #endif +} + +static inline void _sg_update_image(_sg_image_t* img, const sg_image_data* data) { + #if defined(_SOKOL_ANY_GL) + _sg_gl_update_image(img, data); + #elif defined(SOKOL_METAL) + _sg_mtl_update_image(img, data); + #elif defined(SOKOL_D3D11) + _sg_d3d11_update_image(img, data); + #elif defined(SOKOL_WGPU) + _sg_wgpu_update_image(img, data); + #elif defined(SOKOL_DUMMY_BACKEND) + _sg_dummy_update_image(img, data); + #else + #error("INVALID BACKEND"); + #endif +} + +/*== RESOURCE POOLS ==========================================================*/ + +_SOKOL_PRIVATE void _sg_init_pool(_sg_pool_t* pool, int num) { + SOKOL_ASSERT(pool && (num >= 1)); + /* slot 0 is reserved for the 'invalid id', so bump the pool size by 1 */ + pool->size = num + 1; + pool->queue_top = 0; + /* generation counters indexable by pool slot index, slot 0 is reserved */ + size_t gen_ctrs_size = sizeof(uint32_t) * (size_t)pool->size; + pool->gen_ctrs = (uint32_t*) SOKOL_MALLOC(gen_ctrs_size); + SOKOL_ASSERT(pool->gen_ctrs); + memset(pool->gen_ctrs, 0, gen_ctrs_size); + /* it's not a bug to only reserve 'num' here */ + pool->free_queue = (int*) SOKOL_MALLOC(sizeof(int) * (size_t)num); + SOKOL_ASSERT(pool->free_queue); + /* never allocate the zero-th pool item since the invalid id is 0 */ + for (int i = pool->size-1; i >= 1; i--) { + pool->free_queue[pool->queue_top++] = i; + } +} + +_SOKOL_PRIVATE void _sg_discard_pool(_sg_pool_t* pool) { + SOKOL_ASSERT(pool); + SOKOL_ASSERT(pool->free_queue); + SOKOL_FREE(pool->free_queue); + pool->free_queue = 0; + SOKOL_ASSERT(pool->gen_ctrs); + SOKOL_FREE(pool->gen_ctrs); + pool->gen_ctrs = 0; + pool->size = 0; + pool->queue_top = 0; +} + +_SOKOL_PRIVATE int _sg_pool_alloc_index(_sg_pool_t* pool) { + SOKOL_ASSERT(pool); + SOKOL_ASSERT(pool->free_queue); + if (pool->queue_top > 0) { + int slot_index = pool->free_queue[--pool->queue_top]; + SOKOL_ASSERT((slot_index > 0) && (slot_index < pool->size)); + return slot_index; + } + else { + /* pool exhausted */ + return _SG_INVALID_SLOT_INDEX; + } +} + +_SOKOL_PRIVATE void _sg_pool_free_index(_sg_pool_t* pool, int slot_index) { + SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < pool->size)); + SOKOL_ASSERT(pool); + SOKOL_ASSERT(pool->free_queue); + SOKOL_ASSERT(pool->queue_top < pool->size); + #ifdef SOKOL_DEBUG + /* debug check against double-free */ + for (int i = 0; i < pool->queue_top; i++) { + SOKOL_ASSERT(pool->free_queue[i] != slot_index); + } + #endif + pool->free_queue[pool->queue_top++] = slot_index; + SOKOL_ASSERT(pool->queue_top <= (pool->size-1)); +} + +_SOKOL_PRIVATE void _sg_reset_slot(_sg_slot_t* slot) { + SOKOL_ASSERT(slot); + memset(slot, 0, sizeof(_sg_slot_t)); +} + +_SOKOL_PRIVATE void _sg_reset_buffer(_sg_buffer_t* buf) { + SOKOL_ASSERT(buf); + _sg_slot_t slot = buf->slot; + memset(buf, 0, sizeof(_sg_buffer_t)); + buf->slot = slot; + buf->slot.state = SG_RESOURCESTATE_ALLOC; +} + +_SOKOL_PRIVATE void _sg_reset_image(_sg_image_t* img) { + SOKOL_ASSERT(img); + _sg_slot_t slot = img->slot; + memset(img, 0, sizeof(_sg_image_t)); + img->slot = slot; + img->slot.state = SG_RESOURCESTATE_ALLOC; +} + +_SOKOL_PRIVATE void _sg_reset_shader(_sg_shader_t* shd) { + SOKOL_ASSERT(shd); + _sg_slot_t slot = shd->slot; + memset(shd, 0, sizeof(_sg_shader_t)); + shd->slot = slot; + shd->slot.state = SG_RESOURCESTATE_ALLOC; +} + +_SOKOL_PRIVATE void _sg_reset_pipeline(_sg_pipeline_t* pip) { + SOKOL_ASSERT(pip); + _sg_slot_t slot = pip->slot; + memset(pip, 0, sizeof(_sg_pipeline_t)); + pip->slot = slot; + pip->slot.state = SG_RESOURCESTATE_ALLOC; +} + +_SOKOL_PRIVATE void _sg_reset_pass(_sg_pass_t* pass) { + SOKOL_ASSERT(pass); + _sg_slot_t slot = pass->slot; + memset(pass, 0, sizeof(_sg_pass_t)); + pass->slot = slot; + pass->slot.state = SG_RESOURCESTATE_ALLOC; +} + +_SOKOL_PRIVATE void _sg_reset_context(_sg_context_t* ctx) { + SOKOL_ASSERT(ctx); + _sg_slot_t slot = ctx->slot; + memset(ctx, 0, sizeof(_sg_context_t)); + ctx->slot = slot; + ctx->slot.state = SG_RESOURCESTATE_ALLOC; +} + +_SOKOL_PRIVATE void _sg_setup_pools(_sg_pools_t* p, const sg_desc* desc) { + SOKOL_ASSERT(p); + SOKOL_ASSERT(desc); + /* note: the pools here will have an additional item, since slot 0 is reserved */ + SOKOL_ASSERT((desc->buffer_pool_size > 0) && (desc->buffer_pool_size < _SG_MAX_POOL_SIZE)); + _sg_init_pool(&p->buffer_pool, desc->buffer_pool_size); + size_t buffer_pool_byte_size = sizeof(_sg_buffer_t) * (size_t)p->buffer_pool.size; + p->buffers = (_sg_buffer_t*) SOKOL_MALLOC(buffer_pool_byte_size); + SOKOL_ASSERT(p->buffers); + memset(p->buffers, 0, buffer_pool_byte_size); + + SOKOL_ASSERT((desc->image_pool_size > 0) && (desc->image_pool_size < _SG_MAX_POOL_SIZE)); + _sg_init_pool(&p->image_pool, desc->image_pool_size); + size_t image_pool_byte_size = sizeof(_sg_image_t) * (size_t)p->image_pool.size; + p->images = (_sg_image_t*) SOKOL_MALLOC(image_pool_byte_size); + SOKOL_ASSERT(p->images); + memset(p->images, 0, image_pool_byte_size); + + SOKOL_ASSERT((desc->shader_pool_size > 0) && (desc->shader_pool_size < _SG_MAX_POOL_SIZE)); + _sg_init_pool(&p->shader_pool, desc->shader_pool_size); + size_t shader_pool_byte_size = sizeof(_sg_shader_t) * (size_t)p->shader_pool.size; + p->shaders = (_sg_shader_t*) SOKOL_MALLOC(shader_pool_byte_size); + SOKOL_ASSERT(p->shaders); + memset(p->shaders, 0, shader_pool_byte_size); + + SOKOL_ASSERT((desc->pipeline_pool_size > 0) && (desc->pipeline_pool_size < _SG_MAX_POOL_SIZE)); + _sg_init_pool(&p->pipeline_pool, desc->pipeline_pool_size); + size_t pipeline_pool_byte_size = sizeof(_sg_pipeline_t) * (size_t)p->pipeline_pool.size; + p->pipelines = (_sg_pipeline_t*) SOKOL_MALLOC(pipeline_pool_byte_size); + SOKOL_ASSERT(p->pipelines); + memset(p->pipelines, 0, pipeline_pool_byte_size); + + SOKOL_ASSERT((desc->pass_pool_size > 0) && (desc->pass_pool_size < _SG_MAX_POOL_SIZE)); + _sg_init_pool(&p->pass_pool, desc->pass_pool_size); + size_t pass_pool_byte_size = sizeof(_sg_pass_t) * (size_t)p->pass_pool.size; + p->passes = (_sg_pass_t*) SOKOL_MALLOC(pass_pool_byte_size); + SOKOL_ASSERT(p->passes); + memset(p->passes, 0, pass_pool_byte_size); + + SOKOL_ASSERT((desc->context_pool_size > 0) && (desc->context_pool_size < _SG_MAX_POOL_SIZE)); + _sg_init_pool(&p->context_pool, desc->context_pool_size); + size_t context_pool_byte_size = sizeof(_sg_context_t) * (size_t)p->context_pool.size; + p->contexts = (_sg_context_t*) SOKOL_MALLOC(context_pool_byte_size); + SOKOL_ASSERT(p->contexts); + memset(p->contexts, 0, context_pool_byte_size); +} + +_SOKOL_PRIVATE void _sg_discard_pools(_sg_pools_t* p) { + SOKOL_ASSERT(p); + SOKOL_FREE(p->contexts); p->contexts = 0; + SOKOL_FREE(p->passes); p->passes = 0; + SOKOL_FREE(p->pipelines); p->pipelines = 0; + SOKOL_FREE(p->shaders); p->shaders = 0; + SOKOL_FREE(p->images); p->images = 0; + SOKOL_FREE(p->buffers); p->buffers = 0; + _sg_discard_pool(&p->context_pool); + _sg_discard_pool(&p->pass_pool); + _sg_discard_pool(&p->pipeline_pool); + _sg_discard_pool(&p->shader_pool); + _sg_discard_pool(&p->image_pool); + _sg_discard_pool(&p->buffer_pool); +} + +/* allocate the slot at slot_index: + - bump the slot's generation counter + - create a resource id from the generation counter and slot index + - set the slot's id to this id + - set the slot's state to ALLOC + - return the resource id +*/ +_SOKOL_PRIVATE uint32_t _sg_slot_alloc(_sg_pool_t* pool, _sg_slot_t* slot, int slot_index) { + /* FIXME: add handling for an overflowing generation counter, + for now, just overflow (another option is to disable + the slot) + */ + SOKOL_ASSERT(pool && pool->gen_ctrs); + SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < pool->size)); + SOKOL_ASSERT((slot->state == SG_RESOURCESTATE_INITIAL) && (slot->id == SG_INVALID_ID)); + uint32_t ctr = ++pool->gen_ctrs[slot_index]; + slot->id = (ctr<<_SG_SLOT_SHIFT)|(slot_index & _SG_SLOT_MASK); + slot->state = SG_RESOURCESTATE_ALLOC; + return slot->id; +} + +/* extract slot index from id */ +_SOKOL_PRIVATE int _sg_slot_index(uint32_t id) { + int slot_index = (int) (id & _SG_SLOT_MASK); + SOKOL_ASSERT(_SG_INVALID_SLOT_INDEX != slot_index); + return slot_index; +} + +/* returns pointer to resource by id without matching id check */ +_SOKOL_PRIVATE _sg_buffer_t* _sg_buffer_at(const _sg_pools_t* p, uint32_t buf_id) { + SOKOL_ASSERT(p && (SG_INVALID_ID != buf_id)); + int slot_index = _sg_slot_index(buf_id); + SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->buffer_pool.size)); + return &p->buffers[slot_index]; +} + +_SOKOL_PRIVATE _sg_image_t* _sg_image_at(const _sg_pools_t* p, uint32_t img_id) { + SOKOL_ASSERT(p && (SG_INVALID_ID != img_id)); + int slot_index = _sg_slot_index(img_id); + SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->image_pool.size)); + return &p->images[slot_index]; +} + +_SOKOL_PRIVATE _sg_shader_t* _sg_shader_at(const _sg_pools_t* p, uint32_t shd_id) { + SOKOL_ASSERT(p && (SG_INVALID_ID != shd_id)); + int slot_index = _sg_slot_index(shd_id); + SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->shader_pool.size)); + return &p->shaders[slot_index]; +} + +_SOKOL_PRIVATE _sg_pipeline_t* _sg_pipeline_at(const _sg_pools_t* p, uint32_t pip_id) { + SOKOL_ASSERT(p && (SG_INVALID_ID != pip_id)); + int slot_index = _sg_slot_index(pip_id); + SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->pipeline_pool.size)); + return &p->pipelines[slot_index]; +} + +_SOKOL_PRIVATE _sg_pass_t* _sg_pass_at(const _sg_pools_t* p, uint32_t pass_id) { + SOKOL_ASSERT(p && (SG_INVALID_ID != pass_id)); + int slot_index = _sg_slot_index(pass_id); + SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->pass_pool.size)); + return &p->passes[slot_index]; +} + +_SOKOL_PRIVATE _sg_context_t* _sg_context_at(const _sg_pools_t* p, uint32_t context_id) { + SOKOL_ASSERT(p && (SG_INVALID_ID != context_id)); + int slot_index = _sg_slot_index(context_id); + SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->context_pool.size)); + return &p->contexts[slot_index]; +} + +/* returns pointer to resource with matching id check, may return 0 */ +_SOKOL_PRIVATE _sg_buffer_t* _sg_lookup_buffer(const _sg_pools_t* p, uint32_t buf_id) { + if (SG_INVALID_ID != buf_id) { + _sg_buffer_t* buf = _sg_buffer_at(p, buf_id); + if (buf->slot.id == buf_id) { + return buf; + } + } + return 0; +} + +_SOKOL_PRIVATE _sg_image_t* _sg_lookup_image(const _sg_pools_t* p, uint32_t img_id) { + if (SG_INVALID_ID != img_id) { + _sg_image_t* img = _sg_image_at(p, img_id); + if (img->slot.id == img_id) { + return img; + } + } + return 0; +} + +_SOKOL_PRIVATE _sg_shader_t* _sg_lookup_shader(const _sg_pools_t* p, uint32_t shd_id) { + SOKOL_ASSERT(p); + if (SG_INVALID_ID != shd_id) { + _sg_shader_t* shd = _sg_shader_at(p, shd_id); + if (shd->slot.id == shd_id) { + return shd; + } + } + return 0; +} + +_SOKOL_PRIVATE _sg_pipeline_t* _sg_lookup_pipeline(const _sg_pools_t* p, uint32_t pip_id) { + SOKOL_ASSERT(p); + if (SG_INVALID_ID != pip_id) { + _sg_pipeline_t* pip = _sg_pipeline_at(p, pip_id); + if (pip->slot.id == pip_id) { + return pip; + } + } + return 0; +} + +_SOKOL_PRIVATE _sg_pass_t* _sg_lookup_pass(const _sg_pools_t* p, uint32_t pass_id) { + SOKOL_ASSERT(p); + if (SG_INVALID_ID != pass_id) { + _sg_pass_t* pass = _sg_pass_at(p, pass_id); + if (pass->slot.id == pass_id) { + return pass; + } + } + return 0; +} + +_SOKOL_PRIVATE _sg_context_t* _sg_lookup_context(const _sg_pools_t* p, uint32_t ctx_id) { + SOKOL_ASSERT(p); + if (SG_INVALID_ID != ctx_id) { + _sg_context_t* ctx = _sg_context_at(p, ctx_id); + if (ctx->slot.id == ctx_id) { + return ctx; + } + } + return 0; +} + +_SOKOL_PRIVATE void _sg_destroy_all_resources(_sg_pools_t* p, uint32_t ctx_id) { + /* this is a bit dumb since it loops over all pool slots to + find the occupied slots, on the other hand it is only ever + executed at shutdown + NOTE: ONLY EXECUTE THIS AT SHUTDOWN + ...because the free queues will not be reset + and the resource slots not be cleared! + */ + for (int i = 1; i < p->buffer_pool.size; i++) { + if (p->buffers[i].slot.ctx_id == ctx_id) { + sg_resource_state state = p->buffers[i].slot.state; + if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) { + _sg_destroy_buffer(&p->buffers[i]); + } + } + } + for (int i = 1; i < p->image_pool.size; i++) { + if (p->images[i].slot.ctx_id == ctx_id) { + sg_resource_state state = p->images[i].slot.state; + if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) { + _sg_destroy_image(&p->images[i]); + } + } + } + for (int i = 1; i < p->shader_pool.size; i++) { + if (p->shaders[i].slot.ctx_id == ctx_id) { + sg_resource_state state = p->shaders[i].slot.state; + if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) { + _sg_destroy_shader(&p->shaders[i]); + } + } + } + for (int i = 1; i < p->pipeline_pool.size; i++) { + if (p->pipelines[i].slot.ctx_id == ctx_id) { + sg_resource_state state = p->pipelines[i].slot.state; + if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) { + _sg_destroy_pipeline(&p->pipelines[i]); + } + } + } + for (int i = 1; i < p->pass_pool.size; i++) { + if (p->passes[i].slot.ctx_id == ctx_id) { + sg_resource_state state = p->passes[i].slot.state; + if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) { + _sg_destroy_pass(&p->passes[i]); + } + } + } +} + +/*== VALIDATION LAYER ========================================================*/ +#if defined(SOKOL_DEBUG) +/* return a human readable string for an _sg_validate_error */ +_SOKOL_PRIVATE const char* _sg_validate_string(_sg_validate_error_t err) { + switch (err) { + /* buffer creation validation errors */ + case _SG_VALIDATE_BUFFERDESC_CANARY: return "sg_buffer_desc not initialized"; + case _SG_VALIDATE_BUFFERDESC_SIZE: return "sg_buffer_desc.size cannot be 0"; + case _SG_VALIDATE_BUFFERDESC_DATA: return "immutable buffers must be initialized with data (sg_buffer_desc.data.ptr and sg_buffer_desc.data.size)"; + case _SG_VALIDATE_BUFFERDESC_DATA_SIZE: return "immutable buffer data size differs from buffer size"; + case _SG_VALIDATE_BUFFERDESC_NO_DATA: return "dynamic/stream usage buffers cannot be initialized with data"; + + /* image creation validation errros */ + case _SG_VALIDATE_IMAGEDESC_CANARY: return "sg_image_desc not initialized"; + case _SG_VALIDATE_IMAGEDESC_WIDTH: return "sg_image_desc.width must be > 0"; + case _SG_VALIDATE_IMAGEDESC_HEIGHT: return "sg_image_desc.height must be > 0"; + case _SG_VALIDATE_IMAGEDESC_RT_PIXELFORMAT: return "invalid pixel format for render-target image"; + case _SG_VALIDATE_IMAGEDESC_NONRT_PIXELFORMAT: return "invalid pixel format for non-render-target image"; + case _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT: return "non-render-target images cannot be multisampled"; + case _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT: return "MSAA not supported for this pixel format"; + case _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE: return "render target images must be SG_USAGE_IMMUTABLE"; + case _SG_VALIDATE_IMAGEDESC_RT_NO_DATA: return "render target images cannot be initialized with data"; + case _SG_VALIDATE_IMAGEDESC_DATA: return "missing or invalid data for immutable image"; + case _SG_VALIDATE_IMAGEDESC_NO_DATA: return "dynamic/stream usage images cannot be initialized with data"; + + /* shader creation */ + case _SG_VALIDATE_SHADERDESC_CANARY: return "sg_shader_desc not initialized"; + case _SG_VALIDATE_SHADERDESC_SOURCE: return "shader source code required"; + case _SG_VALIDATE_SHADERDESC_BYTECODE: return "shader byte code required"; + case _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE: return "shader source or byte code required"; + case _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE: return "shader byte code length (in bytes) required"; + case _SG_VALIDATE_SHADERDESC_NO_CONT_UBS: return "shader uniform blocks must occupy continuous slots"; + case _SG_VALIDATE_SHADERDESC_NO_CONT_UB_MEMBERS: return "uniform block members must occupy continuous slots"; + case _SG_VALIDATE_SHADERDESC_NO_UB_MEMBERS: return "GL backend requires uniform block member declarations"; + case _SG_VALIDATE_SHADERDESC_UB_MEMBER_NAME: return "uniform block member name missing"; + case _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH: return "size of uniform block members doesn't match uniform block size"; + case _SG_VALIDATE_SHADERDESC_NO_CONT_IMGS: return "shader images must occupy continuous slots"; + case _SG_VALIDATE_SHADERDESC_IMG_NAME: return "GL backend requires uniform block member names"; + case _SG_VALIDATE_SHADERDESC_ATTR_NAMES: return "GLES2 backend requires vertex attribute names"; + case _SG_VALIDATE_SHADERDESC_ATTR_SEMANTICS: return "D3D11 backend requires vertex attribute semantics"; + case _SG_VALIDATE_SHADERDESC_ATTR_STRING_TOO_LONG: return "vertex attribute name/semantic string too long (max len 16)"; + + /* pipeline creation */ + case _SG_VALIDATE_PIPELINEDESC_CANARY: return "sg_pipeline_desc not initialized"; + case _SG_VALIDATE_PIPELINEDESC_SHADER: return "sg_pipeline_desc.shader missing or invalid"; + case _SG_VALIDATE_PIPELINEDESC_NO_ATTRS: return "sg_pipeline_desc.layout.attrs is empty or not continuous"; + case _SG_VALIDATE_PIPELINEDESC_LAYOUT_STRIDE4: return "sg_pipeline_desc.layout.buffers[].stride must be multiple of 4"; + case _SG_VALIDATE_PIPELINEDESC_ATTR_NAME: return "GLES2/WebGL missing vertex attribute name in shader"; + case _SG_VALIDATE_PIPELINEDESC_ATTR_SEMANTICS: return "D3D11 missing vertex attribute semantics in shader"; + + /* pass creation */ + case _SG_VALIDATE_PASSDESC_CANARY: return "sg_pass_desc not initialized"; + case _SG_VALIDATE_PASSDESC_NO_COLOR_ATTS: return "sg_pass_desc.color_attachments[0] must be valid"; + case _SG_VALIDATE_PASSDESC_NO_CONT_COLOR_ATTS: return "color attachments must occupy continuous slots"; + case _SG_VALIDATE_PASSDESC_IMAGE: return "pass attachment image is not valid"; + case _SG_VALIDATE_PASSDESC_MIPLEVEL: return "pass attachment mip level is bigger than image has mipmaps"; + case _SG_VALIDATE_PASSDESC_FACE: return "pass attachment image is cubemap, but face index is too big"; + case _SG_VALIDATE_PASSDESC_LAYER: return "pass attachment image is array texture, but layer index is too big"; + case _SG_VALIDATE_PASSDESC_SLICE: return "pass attachment image is 3d texture, but slice value is too big"; + case _SG_VALIDATE_PASSDESC_IMAGE_NO_RT: return "pass attachment image must be render targets"; + case _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT: return "pass color-attachment images must have a renderable pixel format"; + case _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT: return "pass depth-attachment image must have depth pixel format"; + case _SG_VALIDATE_PASSDESC_IMAGE_SIZES: return "all pass attachments must have the same size"; + case _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS: return "all pass attachments must have the same sample count"; + + /* sg_begin_pass */ + case _SG_VALIDATE_BEGINPASS_PASS: return "sg_begin_pass: pass must be valid"; + case _SG_VALIDATE_BEGINPASS_IMAGE: return "sg_begin_pass: one or more attachment images are not valid"; + + /* sg_apply_pipeline */ + case _SG_VALIDATE_APIP_PIPELINE_VALID_ID: return "sg_apply_pipeline: invalid pipeline id provided"; + case _SG_VALIDATE_APIP_PIPELINE_EXISTS: return "sg_apply_pipeline: pipeline object no longer alive"; + case _SG_VALIDATE_APIP_PIPELINE_VALID: return "sg_apply_pipeline: pipeline object not in valid state"; + case _SG_VALIDATE_APIP_SHADER_EXISTS: return "sg_apply_pipeline: shader object no longer alive"; + case _SG_VALIDATE_APIP_SHADER_VALID: return "sg_apply_pipeline: shader object not in valid state"; + case _SG_VALIDATE_APIP_ATT_COUNT: return "sg_apply_pipeline: number of pipeline color attachments doesn't match number of pass color attachments"; + case _SG_VALIDATE_APIP_COLOR_FORMAT: return "sg_apply_pipeline: pipeline color attachment pixel format doesn't match pass color attachment pixel format"; + case _SG_VALIDATE_APIP_DEPTH_FORMAT: return "sg_apply_pipeline: pipeline depth pixel_format doesn't match pass depth attachment pixel format"; + case _SG_VALIDATE_APIP_SAMPLE_COUNT: return "sg_apply_pipeline: pipeline MSAA sample count doesn't match render pass attachment sample count"; + + /* sg_apply_bindings */ + case _SG_VALIDATE_ABND_PIPELINE: return "sg_apply_bindings: must be called after sg_apply_pipeline"; + case _SG_VALIDATE_ABND_PIPELINE_EXISTS: return "sg_apply_bindings: currently applied pipeline object no longer alive"; + case _SG_VALIDATE_ABND_PIPELINE_VALID: return "sg_apply_bindings: currently applied pipeline object not in valid state"; + case _SG_VALIDATE_ABND_VBS: return "sg_apply_bindings: number of vertex buffers doesn't match number of pipeline vertex layouts"; + case _SG_VALIDATE_ABND_VB_EXISTS: return "sg_apply_bindings: vertex buffer no longer alive"; + case _SG_VALIDATE_ABND_VB_TYPE: return "sg_apply_bindings: buffer in vertex buffer slot is not a SG_BUFFERTYPE_VERTEXBUFFER"; + case _SG_VALIDATE_ABND_VB_OVERFLOW: return "sg_apply_bindings: buffer in vertex buffer slot is overflown"; + case _SG_VALIDATE_ABND_NO_IB: return "sg_apply_bindings: pipeline object defines indexed rendering, but no index buffer provided"; + case _SG_VALIDATE_ABND_IB: return "sg_apply_bindings: pipeline object defines non-indexed rendering, but index buffer provided"; + case _SG_VALIDATE_ABND_IB_EXISTS: return "sg_apply_bindings: index buffer no longer alive"; + case _SG_VALIDATE_ABND_IB_TYPE: return "sg_apply_bindings: buffer in index buffer slot is not a SG_BUFFERTYPE_INDEXBUFFER"; + case _SG_VALIDATE_ABND_IB_OVERFLOW: return "sg_apply_bindings: buffer in index buffer slot is overflown"; + case _SG_VALIDATE_ABND_VS_IMGS: return "sg_apply_bindings: vertex shader image count doesn't match sg_shader_desc"; + case _SG_VALIDATE_ABND_VS_IMG_EXISTS: return "sg_apply_bindings: vertex shader image no longer alive"; + case _SG_VALIDATE_ABND_VS_IMG_TYPES: return "sg_apply_bindings: one or more vertex shader image types don't match sg_shader_desc"; + case _SG_VALIDATE_ABND_FS_IMGS: return "sg_apply_bindings: fragment shader image count doesn't match sg_shader_desc"; + case _SG_VALIDATE_ABND_FS_IMG_EXISTS: return "sg_apply_bindings: fragment shader image no longer alive"; + case _SG_VALIDATE_ABND_FS_IMG_TYPES: return "sg_apply_bindings: one or more fragment shader image types don't match sg_shader_desc"; + + /* sg_apply_uniforms */ + case _SG_VALIDATE_AUB_NO_PIPELINE: return "sg_apply_uniforms: must be called after sg_apply_pipeline()"; + case _SG_VALIDATE_AUB_NO_UB_AT_SLOT: return "sg_apply_uniforms: no uniform block declaration at this shader stage UB slot"; + case _SG_VALIDATE_AUB_SIZE: return "sg_apply_uniforms: data size exceeds declared uniform block size"; + + /* sg_update_buffer */ + case _SG_VALIDATE_UPDATEBUF_USAGE: return "sg_update_buffer: cannot update immutable buffer"; + case _SG_VALIDATE_UPDATEBUF_SIZE: return "sg_update_buffer: update size is bigger than buffer size"; + case _SG_VALIDATE_UPDATEBUF_ONCE: return "sg_update_buffer: only one update allowed per buffer and frame"; + case _SG_VALIDATE_UPDATEBUF_APPEND: return "sg_update_buffer: cannot call sg_update_buffer and sg_append_buffer in same frame"; + + /* sg_append_buffer */ + case _SG_VALIDATE_APPENDBUF_USAGE: return "sg_append_buffer: cannot append to immutable buffer"; + case _SG_VALIDATE_APPENDBUF_SIZE: return "sg_append_buffer: overall appended size is bigger than buffer size"; + case _SG_VALIDATE_APPENDBUF_UPDATE: return "sg_append_buffer: cannot call sg_append_buffer and sg_update_buffer in same frame"; + + /* sg_update_image */ + case _SG_VALIDATE_UPDIMG_USAGE: return "sg_update_image: cannot update immutable image"; + case _SG_VALIDATE_UPDIMG_NOTENOUGHDATA: return "sg_update_image: not enough subimage data provided"; + case _SG_VALIDATE_UPDIMG_SIZE: return "sg_update_image: provided subimage data size too big"; + case _SG_VALIDATE_UPDIMG_COMPRESSED: return "sg_update_image: cannot update images with compressed format"; + case _SG_VALIDATE_UPDIMG_ONCE: return "sg_update_image: only one update allowed per image and frame"; + + default: return "unknown validation error"; + } +} +#endif /* defined(SOKOL_DEBUG) */ + +/*-- validation checks -------------------------------------------------------*/ +#if defined(SOKOL_DEBUG) +_SOKOL_PRIVATE void _sg_validate_begin(void) { + _sg.validate_error = _SG_VALIDATE_SUCCESS; +} + +_SOKOL_PRIVATE void _sg_validate(bool cond, _sg_validate_error_t err) { + if (!cond) { + _sg.validate_error = err; + SOKOL_LOG(_sg_validate_string(err)); + } +} + +_SOKOL_PRIVATE bool _sg_validate_end(void) { + if (_sg.validate_error != _SG_VALIDATE_SUCCESS) { + #if !defined(SOKOL_VALIDATE_NON_FATAL) + SOKOL_LOG("^^^^ VALIDATION FAILED, TERMINATING ^^^^"); + SOKOL_ASSERT(false); + #endif + return false; + } + else { + return true; + } +} +#endif + +_SOKOL_PRIVATE bool _sg_validate_buffer_desc(const sg_buffer_desc* desc) { + #if !defined(SOKOL_DEBUG) + _SOKOL_UNUSED(desc); + return true; + #else + SOKOL_ASSERT(desc); + SOKOL_VALIDATE_BEGIN(); + SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_BUFFERDESC_CANARY); + SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_BUFFERDESC_CANARY); + SOKOL_VALIDATE(desc->size > 0, _SG_VALIDATE_BUFFERDESC_SIZE); + bool injected = (0 != desc->gl_buffers[0]) || + (0 != desc->mtl_buffers[0]) || + (0 != desc->d3d11_buffer) || + (0 != desc->wgpu_buffer); + if (!injected && (desc->usage == SG_USAGE_IMMUTABLE)) { + SOKOL_VALIDATE((0 != desc->data.ptr) && (desc->data.size > 0), _SG_VALIDATE_BUFFERDESC_DATA); + SOKOL_VALIDATE(desc->size == desc->data.size, _SG_VALIDATE_BUFFERDESC_DATA_SIZE); + } + else { + SOKOL_VALIDATE(0 == desc->data.ptr, _SG_VALIDATE_BUFFERDESC_NO_DATA); + } + return SOKOL_VALIDATE_END(); + #endif +} + +_SOKOL_PRIVATE bool _sg_validate_image_desc(const sg_image_desc* desc) { + #if !defined(SOKOL_DEBUG) + _SOKOL_UNUSED(desc); + return true; + #else + SOKOL_ASSERT(desc); + SOKOL_VALIDATE_BEGIN(); + SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_IMAGEDESC_CANARY); + SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_IMAGEDESC_CANARY); + SOKOL_VALIDATE(desc->width > 0, _SG_VALIDATE_IMAGEDESC_WIDTH); + SOKOL_VALIDATE(desc->height > 0, _SG_VALIDATE_IMAGEDESC_HEIGHT); + const sg_pixel_format fmt = desc->pixel_format; + const sg_usage usage = desc->usage; + const bool injected = (0 != desc->gl_textures[0]) || + (0 != desc->mtl_textures[0]) || + (0 != desc->d3d11_texture) || + (0 != desc->wgpu_texture); + if (desc->render_target) { + SOKOL_ASSERT(((int)fmt >= 0) && ((int)fmt < _SG_PIXELFORMAT_NUM)); + SOKOL_VALIDATE(_sg.formats[fmt].render, _SG_VALIDATE_IMAGEDESC_RT_PIXELFORMAT); + /* on GLES2, sample count for render targets is completely ignored */ + #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) + if (!_sg.gl.gles2) { + #endif + if (desc->sample_count > 1) { + SOKOL_VALIDATE(_sg.features.msaa_render_targets && _sg.formats[fmt].msaa, _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT); + } + #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) + } + #endif + SOKOL_VALIDATE(usage == SG_USAGE_IMMUTABLE, _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE); + SOKOL_VALIDATE(desc->data.subimage[0][0].ptr==0, _SG_VALIDATE_IMAGEDESC_RT_NO_DATA); + } + else { + SOKOL_VALIDATE(desc->sample_count <= 1, _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT); + const bool valid_nonrt_fmt = !_sg_is_valid_rendertarget_depth_format(fmt); + SOKOL_VALIDATE(valid_nonrt_fmt, _SG_VALIDATE_IMAGEDESC_NONRT_PIXELFORMAT); + /* FIXME: should use the same "expected size" computation as in _sg_validate_update_image() here */ + if (!injected && (usage == SG_USAGE_IMMUTABLE)) { + const int num_faces = desc->type == SG_IMAGETYPE_CUBE ? 6:1; + const int num_mips = desc->num_mipmaps; + for (int face_index = 0; face_index < num_faces; face_index++) { + for (int mip_index = 0; mip_index < num_mips; mip_index++) { + const bool has_data = desc->data.subimage[face_index][mip_index].ptr != 0; + const bool has_size = desc->data.subimage[face_index][mip_index].size > 0; + SOKOL_VALIDATE(has_data && has_size, _SG_VALIDATE_IMAGEDESC_DATA); + } + } + } + else { + for (int face_index = 0; face_index < SG_CUBEFACE_NUM; face_index++) { + for (int mip_index = 0; mip_index < SG_MAX_MIPMAPS; mip_index++) { + const bool no_data = 0 == desc->data.subimage[face_index][mip_index].ptr; + const bool no_size = 0 == desc->data.subimage[face_index][mip_index].size; + SOKOL_VALIDATE(no_data && no_size, _SG_VALIDATE_IMAGEDESC_NO_DATA); + } + } + } + } + return SOKOL_VALIDATE_END(); + #endif +} + +_SOKOL_PRIVATE bool _sg_validate_shader_desc(const sg_shader_desc* desc) { + #if !defined(SOKOL_DEBUG) + _SOKOL_UNUSED(desc); + return true; + #else + SOKOL_ASSERT(desc); + SOKOL_VALIDATE_BEGIN(); + SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_SHADERDESC_CANARY); + SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_SHADERDESC_CANARY); + #if defined(SOKOL_GLES2) + SOKOL_VALIDATE(0 != desc->attrs[0].name, _SG_VALIDATE_SHADERDESC_ATTR_NAMES); + #elif defined(SOKOL_D3D11) + SOKOL_VALIDATE(0 != desc->attrs[0].sem_name, _SG_VALIDATE_SHADERDESC_ATTR_SEMANTICS); + #endif + #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3) + /* on GL, must provide shader source code */ + SOKOL_VALIDATE(0 != desc->vs.source, _SG_VALIDATE_SHADERDESC_SOURCE); + SOKOL_VALIDATE(0 != desc->fs.source, _SG_VALIDATE_SHADERDESC_SOURCE); + #elif defined(SOKOL_METAL) || defined(SOKOL_D3D11) + /* on Metal or D3D11, must provide shader source code or byte code */ + SOKOL_VALIDATE((0 != desc->vs.source)||(0 != desc->vs.bytecode.ptr), _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE); + SOKOL_VALIDATE((0 != desc->fs.source)||(0 != desc->fs.bytecode.ptr), _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE); + #elif defined(SOKOL_WGPU) + /* on WGPU byte code must be provided */ + SOKOL_VALIDATE((0 != desc->vs.bytecode.ptr), _SG_VALIDATE_SHADERDESC_BYTECODE); + SOKOL_VALIDATE((0 != desc->fs.bytecode.ptr), _SG_VALIDATE_SHADERDESC_BYTECODE); + #else + /* Dummy Backend, don't require source or bytecode */ + #endif + for (int i = 0; i < SG_MAX_VERTEX_ATTRIBUTES; i++) { + if (desc->attrs[i].name) { + SOKOL_VALIDATE(strlen(desc->attrs[i].name) < _SG_STRING_SIZE, _SG_VALIDATE_SHADERDESC_ATTR_STRING_TOO_LONG); + } + if (desc->attrs[i].sem_name) { + SOKOL_VALIDATE(strlen(desc->attrs[i].sem_name) < _SG_STRING_SIZE, _SG_VALIDATE_SHADERDESC_ATTR_STRING_TOO_LONG); + } + } + /* if shader byte code, the size must also be provided */ + if (0 != desc->vs.bytecode.ptr) { + SOKOL_VALIDATE(desc->vs.bytecode.size > 0, _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE); + } + if (0 != desc->fs.bytecode.ptr) { + SOKOL_VALIDATE(desc->fs.bytecode.size > 0, _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE); + } + for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { + const sg_shader_stage_desc* stage_desc = (stage_index == 0)? &desc->vs : &desc->fs; + bool uniform_blocks_continuous = true; + for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { + const sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index]; + if (ub_desc->size > 0) { + SOKOL_VALIDATE(uniform_blocks_continuous, _SG_VALIDATE_SHADERDESC_NO_CONT_UBS); + bool uniforms_continuous = true; + int uniform_offset = 0; + int num_uniforms = 0; + for (int u_index = 0; u_index < SG_MAX_UB_MEMBERS; u_index++) { + const sg_shader_uniform_desc* u_desc = &ub_desc->uniforms[u_index]; + if (u_desc->type != SG_UNIFORMTYPE_INVALID) { + SOKOL_VALIDATE(uniforms_continuous, _SG_VALIDATE_SHADERDESC_NO_CONT_UB_MEMBERS); + #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) + SOKOL_VALIDATE(0 != u_desc->name, _SG_VALIDATE_SHADERDESC_UB_MEMBER_NAME); + #endif + const int array_count = u_desc->array_count; + uniform_offset += _sg_uniform_size(u_desc->type, array_count); + num_uniforms++; + } + else { + uniforms_continuous = false; + } + } + #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3) + SOKOL_VALIDATE((size_t)uniform_offset == ub_desc->size, _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH); + SOKOL_VALIDATE(num_uniforms > 0, _SG_VALIDATE_SHADERDESC_NO_UB_MEMBERS); + #endif + } + else { + uniform_blocks_continuous = false; + } + } + bool images_continuous = true; + for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) { + const sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; + if (img_desc->image_type != _SG_IMAGETYPE_DEFAULT) { + SOKOL_VALIDATE(images_continuous, _SG_VALIDATE_SHADERDESC_NO_CONT_IMGS); + #if defined(SOKOL_GLES2) + SOKOL_VALIDATE(0 != img_desc->name, _SG_VALIDATE_SHADERDESC_IMG_NAME); + #endif + } + else { + images_continuous = false; + } + } + } + return SOKOL_VALIDATE_END(); + #endif +} + +_SOKOL_PRIVATE bool _sg_validate_pipeline_desc(const sg_pipeline_desc* desc) { + #if !defined(SOKOL_DEBUG) + _SOKOL_UNUSED(desc); + return true; + #else + SOKOL_ASSERT(desc); + SOKOL_VALIDATE_BEGIN(); + SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_PIPELINEDESC_CANARY); + SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_PIPELINEDESC_CANARY); + SOKOL_VALIDATE(desc->shader.id != SG_INVALID_ID, _SG_VALIDATE_PIPELINEDESC_SHADER); + for (int buf_index = 0; buf_index < SG_MAX_SHADERSTAGE_BUFFERS; buf_index++) { + const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[buf_index]; + if (l_desc->stride == 0) { + continue; + } + SOKOL_VALIDATE((l_desc->stride & 3) == 0, _SG_VALIDATE_PIPELINEDESC_LAYOUT_STRIDE4); + } + SOKOL_VALIDATE(desc->layout.attrs[0].format != SG_VERTEXFORMAT_INVALID, _SG_VALIDATE_PIPELINEDESC_NO_ATTRS); + const _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, desc->shader.id); + SOKOL_VALIDATE(0 != shd, _SG_VALIDATE_PIPELINEDESC_SHADER); + if (shd) { + SOKOL_VALIDATE(shd->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PIPELINEDESC_SHADER); + bool attrs_cont = true; + for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { + const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; + if (a_desc->format == SG_VERTEXFORMAT_INVALID) { + attrs_cont = false; + continue; + } + SOKOL_VALIDATE(attrs_cont, _SG_VALIDATE_PIPELINEDESC_NO_ATTRS); + SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); + #if defined(SOKOL_GLES2) + /* on GLES2, vertex attribute names must be provided */ + SOKOL_VALIDATE(!_sg_strempty(&shd->gl.attrs[attr_index].name), _SG_VALIDATE_PIPELINEDESC_ATTR_NAME); + #elif defined(SOKOL_D3D11) + /* on D3D11, semantic names (and semantic indices) must be provided */ + SOKOL_VALIDATE(!_sg_strempty(&shd->d3d11.attrs[attr_index].sem_name), _SG_VALIDATE_PIPELINEDESC_ATTR_SEMANTICS); + #endif + } + } + return SOKOL_VALIDATE_END(); + #endif +} + +_SOKOL_PRIVATE bool _sg_validate_pass_desc(const sg_pass_desc* desc) { + #if !defined(SOKOL_DEBUG) + _SOKOL_UNUSED(desc); + return true; + #else + SOKOL_ASSERT(desc); + SOKOL_VALIDATE_BEGIN(); + SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_PASSDESC_CANARY); + SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_PASSDESC_CANARY); + bool atts_cont = true; + int width = -1, height = -1, sample_count = -1; + for (int att_index = 0; att_index < SG_MAX_COLOR_ATTACHMENTS; att_index++) { + const sg_pass_attachment_desc* att = &desc->color_attachments[att_index]; + if (att->image.id == SG_INVALID_ID) { + SOKOL_VALIDATE(att_index > 0, _SG_VALIDATE_PASSDESC_NO_COLOR_ATTS); + atts_cont = false; + continue; + } + SOKOL_VALIDATE(atts_cont, _SG_VALIDATE_PASSDESC_NO_CONT_COLOR_ATTS); + const _sg_image_t* img = _sg_lookup_image(&_sg.pools, att->image.id); + SOKOL_ASSERT(img); + SOKOL_VALIDATE(img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PASSDESC_IMAGE); + SOKOL_VALIDATE(att->mip_level < img->cmn.num_mipmaps, _SG_VALIDATE_PASSDESC_MIPLEVEL); + if (img->cmn.type == SG_IMAGETYPE_CUBE) { + SOKOL_VALIDATE(att->slice < 6, _SG_VALIDATE_PASSDESC_FACE); + } + else if (img->cmn.type == SG_IMAGETYPE_ARRAY) { + SOKOL_VALIDATE(att->slice < img->cmn.num_slices, _SG_VALIDATE_PASSDESC_LAYER); + } + else if (img->cmn.type == SG_IMAGETYPE_3D) { + SOKOL_VALIDATE(att->slice < img->cmn.num_slices, _SG_VALIDATE_PASSDESC_SLICE); + } + SOKOL_VALIDATE(img->cmn.render_target, _SG_VALIDATE_PASSDESC_IMAGE_NO_RT); + if (att_index == 0) { + width = img->cmn.width >> att->mip_level; + height = img->cmn.height >> att->mip_level; + sample_count = img->cmn.sample_count; + } + else { + SOKOL_VALIDATE(width == img->cmn.width >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); + SOKOL_VALIDATE(height == img->cmn.height >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); + SOKOL_VALIDATE(sample_count == img->cmn.sample_count, _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS); + } + SOKOL_VALIDATE(_sg_is_valid_rendertarget_color_format(img->cmn.pixel_format), _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT); + } + if (desc->depth_stencil_attachment.image.id != SG_INVALID_ID) { + const sg_pass_attachment_desc* att = &desc->depth_stencil_attachment; + const _sg_image_t* img = _sg_lookup_image(&_sg.pools, att->image.id); + SOKOL_ASSERT(img); + SOKOL_VALIDATE(img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PASSDESC_IMAGE); + SOKOL_VALIDATE(att->mip_level < img->cmn.num_mipmaps, _SG_VALIDATE_PASSDESC_MIPLEVEL); + if (img->cmn.type == SG_IMAGETYPE_CUBE) { + SOKOL_VALIDATE(att->slice < 6, _SG_VALIDATE_PASSDESC_FACE); + } + else if (img->cmn.type == SG_IMAGETYPE_ARRAY) { + SOKOL_VALIDATE(att->slice < img->cmn.num_slices, _SG_VALIDATE_PASSDESC_LAYER); + } + else if (img->cmn.type == SG_IMAGETYPE_3D) { + SOKOL_VALIDATE(att->slice < img->cmn.num_slices, _SG_VALIDATE_PASSDESC_SLICE); + } + SOKOL_VALIDATE(img->cmn.render_target, _SG_VALIDATE_PASSDESC_IMAGE_NO_RT); + SOKOL_VALIDATE(width == img->cmn.width >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); + SOKOL_VALIDATE(height == img->cmn.height >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); + SOKOL_VALIDATE(sample_count == img->cmn.sample_count, _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS); + SOKOL_VALIDATE(_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format), _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT); + } + return SOKOL_VALIDATE_END(); + #endif +} + +_SOKOL_PRIVATE bool _sg_validate_begin_pass(_sg_pass_t* pass) { + #if !defined(SOKOL_DEBUG) + _SOKOL_UNUSED(pass); + return true; + #else + SOKOL_VALIDATE_BEGIN(); + SOKOL_VALIDATE(pass->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_PASS); + + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + const _sg_pass_attachment_t* att = &pass->cmn.color_atts[i]; + const _sg_image_t* img = _sg_pass_color_image(pass, i); + if (img) { + SOKOL_VALIDATE(img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_IMAGE); + SOKOL_VALIDATE(img->slot.id == att->image_id.id, _SG_VALIDATE_BEGINPASS_IMAGE); + } + } + const _sg_image_t* ds_img = _sg_pass_ds_image(pass); + if (ds_img) { + const _sg_pass_attachment_t* att = &pass->cmn.ds_att; + SOKOL_VALIDATE(ds_img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_IMAGE); + SOKOL_VALIDATE(ds_img->slot.id == att->image_id.id, _SG_VALIDATE_BEGINPASS_IMAGE); + } + return SOKOL_VALIDATE_END(); + #endif +} + +_SOKOL_PRIVATE bool _sg_validate_apply_pipeline(sg_pipeline pip_id) { + #if !defined(SOKOL_DEBUG) + _SOKOL_UNUSED(pip_id); + return true; + #else + SOKOL_VALIDATE_BEGIN(); + /* the pipeline object must be alive and valid */ + SOKOL_VALIDATE(pip_id.id != SG_INVALID_ID, _SG_VALIDATE_APIP_PIPELINE_VALID_ID); + const _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); + SOKOL_VALIDATE(pip != 0, _SG_VALIDATE_APIP_PIPELINE_EXISTS); + if (!pip) { + return SOKOL_VALIDATE_END(); + } + SOKOL_VALIDATE(pip->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_APIP_PIPELINE_VALID); + /* the pipeline's shader must be alive and valid */ + SOKOL_ASSERT(pip->shader); + SOKOL_VALIDATE(pip->shader->slot.id == pip->cmn.shader_id.id, _SG_VALIDATE_APIP_SHADER_EXISTS); + SOKOL_VALIDATE(pip->shader->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_APIP_SHADER_VALID); + /* check that pipeline attributes match current pass attributes */ + const _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, _sg.cur_pass.id); + if (pass) { + /* an offscreen pass */ + SOKOL_VALIDATE(pip->cmn.color_attachment_count == pass->cmn.num_color_atts, _SG_VALIDATE_APIP_ATT_COUNT); + for (int i = 0; i < pip->cmn.color_attachment_count; i++) { + const _sg_image_t* att_img = _sg_pass_color_image(pass, i); + SOKOL_VALIDATE(pip->cmn.color_formats[i] == att_img->cmn.pixel_format, _SG_VALIDATE_APIP_COLOR_FORMAT); + SOKOL_VALIDATE(pip->cmn.sample_count == att_img->cmn.sample_count, _SG_VALIDATE_APIP_SAMPLE_COUNT); + } + const _sg_image_t* att_dsimg = _sg_pass_ds_image(pass); + if (att_dsimg) { + SOKOL_VALIDATE(pip->cmn.depth_format == att_dsimg->cmn.pixel_format, _SG_VALIDATE_APIP_DEPTH_FORMAT); + } + else { + SOKOL_VALIDATE(pip->cmn.depth_format == SG_PIXELFORMAT_NONE, _SG_VALIDATE_APIP_DEPTH_FORMAT); + } + } + else { + /* default pass */ + SOKOL_VALIDATE(pip->cmn.color_attachment_count == 1, _SG_VALIDATE_APIP_ATT_COUNT); + SOKOL_VALIDATE(pip->cmn.color_formats[0] == _sg.desc.context.color_format, _SG_VALIDATE_APIP_COLOR_FORMAT); + SOKOL_VALIDATE(pip->cmn.depth_format == _sg.desc.context.depth_format, _SG_VALIDATE_APIP_DEPTH_FORMAT); + SOKOL_VALIDATE(pip->cmn.sample_count == _sg.desc.context.sample_count, _SG_VALIDATE_APIP_SAMPLE_COUNT); + } + return SOKOL_VALIDATE_END(); + #endif +} + +_SOKOL_PRIVATE bool _sg_validate_apply_bindings(const sg_bindings* bindings) { + #if !defined(SOKOL_DEBUG) + _SOKOL_UNUSED(bindings); + return true; + #else + SOKOL_VALIDATE_BEGIN(); + + /* a pipeline object must have been applied */ + SOKOL_VALIDATE(_sg.cur_pipeline.id != SG_INVALID_ID, _SG_VALIDATE_ABND_PIPELINE); + const _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, _sg.cur_pipeline.id); + SOKOL_VALIDATE(pip != 0, _SG_VALIDATE_ABND_PIPELINE_EXISTS); + if (!pip) { + return SOKOL_VALIDATE_END(); + } + SOKOL_VALIDATE(pip->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_ABND_PIPELINE_VALID); + SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id)); + + /* has expected vertex buffers, and vertex buffers still exist */ + for (int i = 0; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) { + if (bindings->vertex_buffers[i].id != SG_INVALID_ID) { + SOKOL_VALIDATE(pip->cmn.vertex_layout_valid[i], _SG_VALIDATE_ABND_VBS); + /* buffers in vertex-buffer-slots must be of type SG_BUFFERTYPE_VERTEXBUFFER */ + const _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, bindings->vertex_buffers[i].id); + SOKOL_VALIDATE(buf != 0, _SG_VALIDATE_ABND_VB_EXISTS); + if (buf && buf->slot.state == SG_RESOURCESTATE_VALID) { + SOKOL_VALIDATE(SG_BUFFERTYPE_VERTEXBUFFER == buf->cmn.type, _SG_VALIDATE_ABND_VB_TYPE); + SOKOL_VALIDATE(!buf->cmn.append_overflow, _SG_VALIDATE_ABND_VB_OVERFLOW); + } + } + else { + /* vertex buffer provided in a slot which has no vertex layout in pipeline */ + SOKOL_VALIDATE(!pip->cmn.vertex_layout_valid[i], _SG_VALIDATE_ABND_VBS); + } + } + + /* index buffer expected or not, and index buffer still exists */ + if (pip->cmn.index_type == SG_INDEXTYPE_NONE) { + /* pipeline defines non-indexed rendering, but index buffer provided */ + SOKOL_VALIDATE(bindings->index_buffer.id == SG_INVALID_ID, _SG_VALIDATE_ABND_IB); + } + else { + /* pipeline defines indexed rendering, but no index buffer provided */ + SOKOL_VALIDATE(bindings->index_buffer.id != SG_INVALID_ID, _SG_VALIDATE_ABND_NO_IB); + } + if (bindings->index_buffer.id != SG_INVALID_ID) { + /* buffer in index-buffer-slot must be of type SG_BUFFERTYPE_INDEXBUFFER */ + const _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, bindings->index_buffer.id); + SOKOL_VALIDATE(buf != 0, _SG_VALIDATE_ABND_IB_EXISTS); + if (buf && buf->slot.state == SG_RESOURCESTATE_VALID) { + SOKOL_VALIDATE(SG_BUFFERTYPE_INDEXBUFFER == buf->cmn.type, _SG_VALIDATE_ABND_IB_TYPE); + SOKOL_VALIDATE(!buf->cmn.append_overflow, _SG_VALIDATE_ABND_IB_OVERFLOW); + } + } + + /* has expected vertex shader images */ + for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { + _sg_shader_stage_t* stage = &pip->shader->cmn.stage[SG_SHADERSTAGE_VS]; + if (bindings->vs_images[i].id != SG_INVALID_ID) { + SOKOL_VALIDATE(i < stage->num_images, _SG_VALIDATE_ABND_VS_IMGS); + const _sg_image_t* img = _sg_lookup_image(&_sg.pools, bindings->vs_images[i].id); + SOKOL_VALIDATE(img != 0, _SG_VALIDATE_ABND_VS_IMG_EXISTS); + if (img && img->slot.state == SG_RESOURCESTATE_VALID) { + SOKOL_VALIDATE(img->cmn.type == stage->images[i].image_type, _SG_VALIDATE_ABND_VS_IMG_TYPES); + } + } + else { + SOKOL_VALIDATE(i >= stage->num_images, _SG_VALIDATE_ABND_VS_IMGS); + } + } + + /* has expected fragment shader images */ + for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { + _sg_shader_stage_t* stage = &pip->shader->cmn.stage[SG_SHADERSTAGE_FS]; + if (bindings->fs_images[i].id != SG_INVALID_ID) { + SOKOL_VALIDATE(i < stage->num_images, _SG_VALIDATE_ABND_FS_IMGS); + const _sg_image_t* img = _sg_lookup_image(&_sg.pools, bindings->fs_images[i].id); + SOKOL_VALIDATE(img != 0, _SG_VALIDATE_ABND_FS_IMG_EXISTS); + if (img && img->slot.state == SG_RESOURCESTATE_VALID) { + SOKOL_VALIDATE(img->cmn.type == stage->images[i].image_type, _SG_VALIDATE_ABND_FS_IMG_TYPES); + } + } + else { + SOKOL_VALIDATE(i >= stage->num_images, _SG_VALIDATE_ABND_FS_IMGS); + } + } + return SOKOL_VALIDATE_END(); + #endif +} + +_SOKOL_PRIVATE bool _sg_validate_apply_uniforms(sg_shader_stage stage_index, int ub_index, const sg_range* data) { + #if !defined(SOKOL_DEBUG) + _SOKOL_UNUSED(stage_index); + _SOKOL_UNUSED(ub_index); + _SOKOL_UNUSED(data); + return true; + #else + SOKOL_ASSERT((stage_index == SG_SHADERSTAGE_VS) || (stage_index == SG_SHADERSTAGE_FS)); + SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS)); + SOKOL_VALIDATE_BEGIN(); + SOKOL_VALIDATE(_sg.cur_pipeline.id != SG_INVALID_ID, _SG_VALIDATE_AUB_NO_PIPELINE); + const _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, _sg.cur_pipeline.id); + SOKOL_ASSERT(pip && (pip->slot.id == _sg.cur_pipeline.id)); + SOKOL_ASSERT(pip->shader && (pip->shader->slot.id == pip->cmn.shader_id.id)); + + /* check that there is a uniform block at 'stage' and 'ub_index' */ + const _sg_shader_stage_t* stage = &pip->shader->cmn.stage[stage_index]; + SOKOL_VALIDATE(ub_index < stage->num_uniform_blocks, _SG_VALIDATE_AUB_NO_UB_AT_SLOT); + + /* check that the provided data size doesn't exceed the uniform block size */ + SOKOL_VALIDATE(data->size <= stage->uniform_blocks[ub_index].size, _SG_VALIDATE_AUB_SIZE); + + return SOKOL_VALIDATE_END(); + #endif +} + +_SOKOL_PRIVATE bool _sg_validate_update_buffer(const _sg_buffer_t* buf, const sg_range* data) { + #if !defined(SOKOL_DEBUG) + _SOKOL_UNUSED(buf); + _SOKOL_UNUSED(data); + return true; + #else + SOKOL_ASSERT(buf && data && data->ptr); + SOKOL_VALIDATE_BEGIN(); + SOKOL_VALIDATE(buf->cmn.usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_UPDATEBUF_USAGE); + SOKOL_VALIDATE(buf->cmn.size >= (int)data->size, _SG_VALIDATE_UPDATEBUF_SIZE); + SOKOL_VALIDATE(buf->cmn.update_frame_index != _sg.frame_index, _SG_VALIDATE_UPDATEBUF_ONCE); + SOKOL_VALIDATE(buf->cmn.append_frame_index != _sg.frame_index, _SG_VALIDATE_UPDATEBUF_APPEND); + return SOKOL_VALIDATE_END(); + #endif +} + +_SOKOL_PRIVATE bool _sg_validate_append_buffer(const _sg_buffer_t* buf, const sg_range* data) { + #if !defined(SOKOL_DEBUG) + _SOKOL_UNUSED(buf); + _SOKOL_UNUSED(data); + return true; + #else + SOKOL_ASSERT(buf && data && data->ptr); + SOKOL_VALIDATE_BEGIN(); + SOKOL_VALIDATE(buf->cmn.usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_APPENDBUF_USAGE); + SOKOL_VALIDATE(buf->cmn.size >= (buf->cmn.append_pos + (int)data->size), _SG_VALIDATE_APPENDBUF_SIZE); + SOKOL_VALIDATE(buf->cmn.update_frame_index != _sg.frame_index, _SG_VALIDATE_APPENDBUF_UPDATE); + return SOKOL_VALIDATE_END(); + #endif +} + +_SOKOL_PRIVATE bool _sg_validate_update_image(const _sg_image_t* img, const sg_image_data* data) { + #if !defined(SOKOL_DEBUG) + _SOKOL_UNUSED(img); + _SOKOL_UNUSED(data); + return true; + #else + SOKOL_ASSERT(img && data); + SOKOL_VALIDATE_BEGIN(); + SOKOL_VALIDATE(img->cmn.usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_UPDIMG_USAGE); + SOKOL_VALIDATE(img->cmn.upd_frame_index != _sg.frame_index, _SG_VALIDATE_UPDIMG_ONCE); + SOKOL_VALIDATE(!_sg_is_compressed_pixel_format(img->cmn.pixel_format), _SG_VALIDATE_UPDIMG_COMPRESSED); + const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6 : 1; + const int num_mips = img->cmn.num_mipmaps; + for (int face_index = 0; face_index < num_faces; face_index++) { + for (int mip_index = 0; mip_index < num_mips; mip_index++) { + SOKOL_VALIDATE(0 != data->subimage[face_index][mip_index].ptr, _SG_VALIDATE_UPDIMG_NOTENOUGHDATA); + const int mip_width = _sg_max(img->cmn.width >> mip_index, 1); + const int mip_height = _sg_max(img->cmn.height >> mip_index, 1); + const int bytes_per_slice = _sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, 1); + const int expected_size = bytes_per_slice * img->cmn.num_slices; + SOKOL_VALIDATE(data->subimage[face_index][mip_index].size <= (size_t)expected_size, _SG_VALIDATE_UPDIMG_SIZE); + } + } + return SOKOL_VALIDATE_END(); + #endif +} + +/*== fill in desc default values =============================================*/ +_SOKOL_PRIVATE sg_buffer_desc _sg_buffer_desc_defaults(const sg_buffer_desc* desc) { + sg_buffer_desc def = *desc; + def.type = _sg_def(def.type, SG_BUFFERTYPE_VERTEXBUFFER); + def.usage = _sg_def(def.usage, SG_USAGE_IMMUTABLE); + if (def.size == 0) { + def.size = def.data.size; + } + else if (def.data.size == 0) { + def.data.size = def.size; + } + return def; +} + +_SOKOL_PRIVATE sg_image_desc _sg_image_desc_defaults(const sg_image_desc* desc) { + sg_image_desc def = *desc; + def.type = _sg_def(def.type, SG_IMAGETYPE_2D); + def.num_slices = _sg_def(def.num_slices, 1); + def.num_mipmaps = _sg_def(def.num_mipmaps, 1); + def.usage = _sg_def(def.usage, SG_USAGE_IMMUTABLE); + if (desc->render_target) { + def.pixel_format = _sg_def(def.pixel_format, _sg.desc.context.color_format); + def.sample_count = _sg_def(def.sample_count, _sg.desc.context.sample_count); + } + else { + def.pixel_format = _sg_def(def.pixel_format, SG_PIXELFORMAT_RGBA8); + def.sample_count = _sg_def(def.sample_count, 1); + } + def.min_filter = _sg_def(def.min_filter, SG_FILTER_NEAREST); + def.mag_filter = _sg_def(def.mag_filter, SG_FILTER_NEAREST); + def.wrap_u = _sg_def(def.wrap_u, SG_WRAP_REPEAT); + def.wrap_v = _sg_def(def.wrap_v, SG_WRAP_REPEAT); + def.wrap_w = _sg_def(def.wrap_w, SG_WRAP_REPEAT); + def.border_color = _sg_def(def.border_color, SG_BORDERCOLOR_OPAQUE_BLACK); + def.max_anisotropy = _sg_def(def.max_anisotropy, 1); + def.max_lod = _sg_def_flt(def.max_lod, FLT_MAX); + return def; +} + +_SOKOL_PRIVATE sg_shader_desc _sg_shader_desc_defaults(const sg_shader_desc* desc) { + sg_shader_desc def = *desc; + #if defined(SOKOL_METAL) + def.vs.entry = _sg_def(def.vs.entry, "_main"); + def.fs.entry = _sg_def(def.fs.entry, "_main"); + #else + def.vs.entry = _sg_def(def.vs.entry, "main"); + def.fs.entry = _sg_def(def.fs.entry, "main"); + #endif + #if defined(SOKOL_D3D11) + if (def.vs.source) { + def.vs.d3d11_target = _sg_def(def.vs.d3d11_target, "vs_4_0"); + } + if (def.fs.source) { + def.fs.d3d11_target = _sg_def(def.fs.d3d11_target, "ps_4_0"); + } + #endif + for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { + sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS)? &def.vs : &def.fs; + for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { + sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index]; + if (0 == ub_desc->size) { + break; + } + for (int u_index = 0; u_index < SG_MAX_UB_MEMBERS; u_index++) { + sg_shader_uniform_desc* u_desc = &ub_desc->uniforms[u_index]; + if (u_desc->type == SG_UNIFORMTYPE_INVALID) { + break; + } + u_desc->array_count = _sg_def(u_desc->array_count, 1); + } + } + for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) { + sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; + if (img_desc->image_type == _SG_IMAGETYPE_DEFAULT) { + break; + } + img_desc->sampler_type = _sg_def(img_desc->sampler_type, SG_SAMPLERTYPE_FLOAT); + } + } + return def; +} + +_SOKOL_PRIVATE sg_pipeline_desc _sg_pipeline_desc_defaults(const sg_pipeline_desc* desc) { + sg_pipeline_desc def = *desc; + + def.primitive_type = _sg_def(def.primitive_type, SG_PRIMITIVETYPE_TRIANGLES); + def.index_type = _sg_def(def.index_type, SG_INDEXTYPE_NONE); + def.cull_mode = _sg_def(def.cull_mode, SG_CULLMODE_NONE); + def.face_winding = _sg_def(def.face_winding, SG_FACEWINDING_CW); + def.sample_count = _sg_def(def.sample_count, _sg.desc.context.sample_count); + + def.stencil.front.compare = _sg_def(def.stencil.front.compare, SG_COMPAREFUNC_ALWAYS); + def.stencil.front.fail_op = _sg_def(def.stencil.front.fail_op, SG_STENCILOP_KEEP); + def.stencil.front.depth_fail_op = _sg_def(def.stencil.front.depth_fail_op, SG_STENCILOP_KEEP); + def.stencil.front.pass_op = _sg_def(def.stencil.front.pass_op, SG_STENCILOP_KEEP); + def.stencil.back.compare = _sg_def(def.stencil.back.compare, SG_COMPAREFUNC_ALWAYS); + def.stencil.back.fail_op = _sg_def(def.stencil.back.fail_op, SG_STENCILOP_KEEP); + def.stencil.back.depth_fail_op = _sg_def(def.stencil.back.depth_fail_op, SG_STENCILOP_KEEP); + def.stencil.back.pass_op = _sg_def(def.stencil.back.pass_op, SG_STENCILOP_KEEP); + + def.depth.compare = _sg_def(def.depth.compare, SG_COMPAREFUNC_ALWAYS); + def.depth.pixel_format = _sg_def(def.depth.pixel_format, _sg.desc.context.depth_format); + def.color_count = _sg_def(def.color_count, 1); + if (def.color_count > SG_MAX_COLOR_ATTACHMENTS) { + def.color_count = SG_MAX_COLOR_ATTACHMENTS; + } + for (int i = 0; i < def.color_count; i++) { + sg_color_state* cs = &def.colors[i]; + cs->pixel_format = _sg_def(cs->pixel_format, _sg.desc.context.color_format); + cs->write_mask = _sg_def(cs->write_mask, SG_COLORMASK_RGBA); + sg_blend_state* bs = &def.colors[i].blend; + bs->src_factor_rgb = _sg_def(bs->src_factor_rgb, SG_BLENDFACTOR_ONE); + bs->dst_factor_rgb = _sg_def(bs->dst_factor_rgb, SG_BLENDFACTOR_ZERO); + bs->op_rgb = _sg_def(bs->op_rgb, SG_BLENDOP_ADD); + bs->src_factor_alpha = _sg_def(bs->src_factor_alpha, SG_BLENDFACTOR_ONE); + bs->dst_factor_alpha = _sg_def(bs->dst_factor_alpha, SG_BLENDFACTOR_ZERO); + bs->op_alpha = _sg_def(bs->op_alpha, SG_BLENDOP_ADD); + } + + for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { + sg_vertex_attr_desc* a_desc = &def.layout.attrs[attr_index]; + if (a_desc->format == SG_VERTEXFORMAT_INVALID) { + break; + } + SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); + sg_buffer_layout_desc* b_desc = &def.layout.buffers[a_desc->buffer_index]; + b_desc->step_func = _sg_def(b_desc->step_func, SG_VERTEXSTEP_PER_VERTEX); + b_desc->step_rate = _sg_def(b_desc->step_rate, 1); + } + + /* resolve vertex layout strides and offsets */ + int auto_offset[SG_MAX_SHADERSTAGE_BUFFERS]; + memset(auto_offset, 0, sizeof(auto_offset)); + bool use_auto_offset = true; + for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { + /* to use computed offsets, *all* attr offsets must be 0 */ + if (def.layout.attrs[attr_index].offset != 0) { + use_auto_offset = false; + } + } + for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { + sg_vertex_attr_desc* a_desc = &def.layout.attrs[attr_index]; + if (a_desc->format == SG_VERTEXFORMAT_INVALID) { + break; + } + SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); + if (use_auto_offset) { + a_desc->offset = auto_offset[a_desc->buffer_index]; + } + auto_offset[a_desc->buffer_index] += _sg_vertexformat_bytesize(a_desc->format); + } + /* compute vertex strides if needed */ + for (int buf_index = 0; buf_index < SG_MAX_SHADERSTAGE_BUFFERS; buf_index++) { + sg_buffer_layout_desc* l_desc = &def.layout.buffers[buf_index]; + if (l_desc->stride == 0) { + l_desc->stride = auto_offset[buf_index]; + } + } + + return def; +} + +_SOKOL_PRIVATE sg_pass_desc _sg_pass_desc_defaults(const sg_pass_desc* desc) { + /* FIXME: no values to replace in sg_pass_desc? */ + sg_pass_desc def = *desc; + return def; +} + +/*== allocate/initialize resource private functions ==========================*/ +_SOKOL_PRIVATE sg_buffer _sg_alloc_buffer(void) { + sg_buffer res; + int slot_index = _sg_pool_alloc_index(&_sg.pools.buffer_pool); + if (_SG_INVALID_SLOT_INDEX != slot_index) { + res.id = _sg_slot_alloc(&_sg.pools.buffer_pool, &_sg.pools.buffers[slot_index].slot, slot_index); + } + else { + /* pool is exhausted */ + res.id = SG_INVALID_ID; + } + return res; +} + +_SOKOL_PRIVATE sg_image _sg_alloc_image(void) { + sg_image res; + int slot_index = _sg_pool_alloc_index(&_sg.pools.image_pool); + if (_SG_INVALID_SLOT_INDEX != slot_index) { + res.id = _sg_slot_alloc(&_sg.pools.image_pool, &_sg.pools.images[slot_index].slot, slot_index); + } + else { + /* pool is exhausted */ + res.id = SG_INVALID_ID; + } + return res; +} + +_SOKOL_PRIVATE sg_shader _sg_alloc_shader(void) { + sg_shader res; + int slot_index = _sg_pool_alloc_index(&_sg.pools.shader_pool); + if (_SG_INVALID_SLOT_INDEX != slot_index) { + res.id = _sg_slot_alloc(&_sg.pools.shader_pool, &_sg.pools.shaders[slot_index].slot, slot_index); + } + else { + /* pool is exhausted */ + res.id = SG_INVALID_ID; + } + return res; +} + +_SOKOL_PRIVATE sg_pipeline _sg_alloc_pipeline(void) { + sg_pipeline res; + int slot_index = _sg_pool_alloc_index(&_sg.pools.pipeline_pool); + if (_SG_INVALID_SLOT_INDEX != slot_index) { + res.id =_sg_slot_alloc(&_sg.pools.pipeline_pool, &_sg.pools.pipelines[slot_index].slot, slot_index); + } + else { + /* pool is exhausted */ + res.id = SG_INVALID_ID; + } + return res; +} + +_SOKOL_PRIVATE sg_pass _sg_alloc_pass(void) { + sg_pass res; + int slot_index = _sg_pool_alloc_index(&_sg.pools.pass_pool); + if (_SG_INVALID_SLOT_INDEX != slot_index) { + res.id = _sg_slot_alloc(&_sg.pools.pass_pool, &_sg.pools.passes[slot_index].slot, slot_index); + } + else { + /* pool is exhausted */ + res.id = SG_INVALID_ID; + } + return res; +} + +_SOKOL_PRIVATE void _sg_dealloc_buffer(sg_buffer buf_id) { + SOKOL_ASSERT(buf_id.id != SG_INVALID_ID); + _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); + SOKOL_ASSERT(buf && buf->slot.state == SG_RESOURCESTATE_ALLOC); + _sg_reset_slot(&buf->slot); + _sg_pool_free_index(&_sg.pools.buffer_pool, _sg_slot_index(buf_id.id)); +} + +_SOKOL_PRIVATE void _sg_dealloc_image(sg_image img_id) { + SOKOL_ASSERT(img_id.id != SG_INVALID_ID); + _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); + SOKOL_ASSERT(img && img->slot.state == SG_RESOURCESTATE_ALLOC); + _sg_reset_slot(&img->slot); + _sg_pool_free_index(&_sg.pools.image_pool, _sg_slot_index(img_id.id)); +} + +_SOKOL_PRIVATE void _sg_dealloc_shader(sg_shader shd_id) { + SOKOL_ASSERT(shd_id.id != SG_INVALID_ID); + _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); + SOKOL_ASSERT(shd && shd->slot.state == SG_RESOURCESTATE_ALLOC); + _sg_reset_slot(&shd->slot); + _sg_pool_free_index(&_sg.pools.shader_pool, _sg_slot_index(shd_id.id)); +} + +_SOKOL_PRIVATE void _sg_dealloc_pipeline(sg_pipeline pip_id) { + SOKOL_ASSERT(pip_id.id != SG_INVALID_ID); + _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); + SOKOL_ASSERT(pip && pip->slot.state == SG_RESOURCESTATE_ALLOC); + _sg_reset_slot(&pip->slot); + _sg_pool_free_index(&_sg.pools.pipeline_pool, _sg_slot_index(pip_id.id)); +} + +_SOKOL_PRIVATE void _sg_dealloc_pass(sg_pass pass_id) { + SOKOL_ASSERT(pass_id.id != SG_INVALID_ID); + _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); + SOKOL_ASSERT(pass && pass->slot.state == SG_RESOURCESTATE_ALLOC); + _sg_reset_slot(&pass->slot); + _sg_pool_free_index(&_sg.pools.pass_pool, _sg_slot_index(pass_id.id)); +} + +_SOKOL_PRIVATE void _sg_init_buffer(sg_buffer buf_id, const sg_buffer_desc* desc) { + SOKOL_ASSERT(buf_id.id != SG_INVALID_ID && desc); + _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); + SOKOL_ASSERT(buf && buf->slot.state == SG_RESOURCESTATE_ALLOC); + buf->slot.ctx_id = _sg.active_context.id; + if (_sg_validate_buffer_desc(desc)) { + buf->slot.state = _sg_create_buffer(buf, desc); + } + else { + buf->slot.state = SG_RESOURCESTATE_FAILED; + } + SOKOL_ASSERT((buf->slot.state == SG_RESOURCESTATE_VALID)||(buf->slot.state == SG_RESOURCESTATE_FAILED)); +} + +_SOKOL_PRIVATE void _sg_init_image(sg_image img_id, const sg_image_desc* desc) { + SOKOL_ASSERT(img_id.id != SG_INVALID_ID && desc); + _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); + SOKOL_ASSERT(img && img->slot.state == SG_RESOURCESTATE_ALLOC); + img->slot.ctx_id = _sg.active_context.id; + if (_sg_validate_image_desc(desc)) { + img->slot.state = _sg_create_image(img, desc); + } + else { + img->slot.state = SG_RESOURCESTATE_FAILED; + } + SOKOL_ASSERT((img->slot.state == SG_RESOURCESTATE_VALID)||(img->slot.state == SG_RESOURCESTATE_FAILED)); +} + +_SOKOL_PRIVATE void _sg_init_shader(sg_shader shd_id, const sg_shader_desc* desc) { + SOKOL_ASSERT(shd_id.id != SG_INVALID_ID && desc); + _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); + SOKOL_ASSERT(shd && shd->slot.state == SG_RESOURCESTATE_ALLOC); + shd->slot.ctx_id = _sg.active_context.id; + if (_sg_validate_shader_desc(desc)) { + shd->slot.state = _sg_create_shader(shd, desc); + } + else { + shd->slot.state = SG_RESOURCESTATE_FAILED; + } + SOKOL_ASSERT((shd->slot.state == SG_RESOURCESTATE_VALID)||(shd->slot.state == SG_RESOURCESTATE_FAILED)); +} + +_SOKOL_PRIVATE void _sg_init_pipeline(sg_pipeline pip_id, const sg_pipeline_desc* desc) { + SOKOL_ASSERT(pip_id.id != SG_INVALID_ID && desc); + _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); + SOKOL_ASSERT(pip && pip->slot.state == SG_RESOURCESTATE_ALLOC); + pip->slot.ctx_id = _sg.active_context.id; + if (_sg_validate_pipeline_desc(desc)) { + _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, desc->shader.id); + if (shd && (shd->slot.state == SG_RESOURCESTATE_VALID)) { + pip->slot.state = _sg_create_pipeline(pip, shd, desc); + } + else { + pip->slot.state = SG_RESOURCESTATE_FAILED; + } + } + else { + pip->slot.state = SG_RESOURCESTATE_FAILED; + } + SOKOL_ASSERT((pip->slot.state == SG_RESOURCESTATE_VALID)||(pip->slot.state == SG_RESOURCESTATE_FAILED)); +} + +_SOKOL_PRIVATE void _sg_init_pass(sg_pass pass_id, const sg_pass_desc* desc) { + SOKOL_ASSERT(pass_id.id != SG_INVALID_ID && desc); + _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); + SOKOL_ASSERT(pass && pass->slot.state == SG_RESOURCESTATE_ALLOC); + pass->slot.ctx_id = _sg.active_context.id; + if (_sg_validate_pass_desc(desc)) { + /* lookup pass attachment image pointers */ + _sg_image_t* att_imgs[SG_MAX_COLOR_ATTACHMENTS + 1]; + for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { + if (desc->color_attachments[i].image.id) { + att_imgs[i] = _sg_lookup_image(&_sg.pools, desc->color_attachments[i].image.id); + /* FIXME: this shouldn't be an assertion, but result in a SG_RESOURCESTATE_FAILED pass */ + SOKOL_ASSERT(att_imgs[i] && att_imgs[i]->slot.state == SG_RESOURCESTATE_VALID); + } + else { + att_imgs[i] = 0; + } + } + const int ds_att_index = SG_MAX_COLOR_ATTACHMENTS; + if (desc->depth_stencil_attachment.image.id) { + att_imgs[ds_att_index] = _sg_lookup_image(&_sg.pools, desc->depth_stencil_attachment.image.id); + /* FIXME: this shouldn't be an assertion, but result in a SG_RESOURCESTATE_FAILED pass */ + SOKOL_ASSERT(att_imgs[ds_att_index] && att_imgs[ds_att_index]->slot.state == SG_RESOURCESTATE_VALID); + } + else { + att_imgs[ds_att_index] = 0; + } + pass->slot.state = _sg_create_pass(pass, att_imgs, desc); + } + else { + pass->slot.state = SG_RESOURCESTATE_FAILED; + } + SOKOL_ASSERT((pass->slot.state == SG_RESOURCESTATE_VALID)||(pass->slot.state == SG_RESOURCESTATE_FAILED)); +} + +_SOKOL_PRIVATE bool _sg_uninit_buffer(sg_buffer buf_id) { + _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); + if (buf) { + if (buf->slot.ctx_id == _sg.active_context.id) { + _sg_destroy_buffer(buf); + _sg_reset_buffer(buf); + return true; + } + else { + SOKOL_LOG("_sg_uninit_buffer: active context mismatch (must be same as for creation)"); + _SG_TRACE_NOARGS(err_context_mismatch); + } + } + return false; +} + +_SOKOL_PRIVATE bool _sg_uninit_image(sg_image img_id) { + _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); + if (img) { + if (img->slot.ctx_id == _sg.active_context.id) { + _sg_destroy_image(img); + _sg_reset_image(img); + return true; + } + else { + SOKOL_LOG("_sg_uninit_image: active context mismatch (must be same as for creation)"); + _SG_TRACE_NOARGS(err_context_mismatch); + } + } + return false; +} + +_SOKOL_PRIVATE bool _sg_uninit_shader(sg_shader shd_id) { + _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); + if (shd) { + if (shd->slot.ctx_id == _sg.active_context.id) { + _sg_destroy_shader(shd); + _sg_reset_shader(shd); + return true; + } + else { + SOKOL_LOG("_sg_uninit_shader: active context mismatch (must be same as for creation)"); + _SG_TRACE_NOARGS(err_context_mismatch); + } + } + return false; +} + +_SOKOL_PRIVATE bool _sg_uninit_pipeline(sg_pipeline pip_id) { + _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); + if (pip) { + if (pip->slot.ctx_id == _sg.active_context.id) { + _sg_destroy_pipeline(pip); + _sg_reset_pipeline(pip); + return true; + } + else { + SOKOL_LOG("_sg_uninit_pipeline: active context mismatch (must be same as for creation)"); + _SG_TRACE_NOARGS(err_context_mismatch); + } + } + return false; +} + +_SOKOL_PRIVATE bool _sg_uninit_pass(sg_pass pass_id) { + _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); + if (pass) { + if (pass->slot.ctx_id == _sg.active_context.id) { + _sg_destroy_pass(pass); + _sg_reset_pass(pass); + return true; + } + else { + SOKOL_LOG("_sg_uninit_pass: active context mismatch (must be same as for creation)"); + _SG_TRACE_NOARGS(err_context_mismatch); + } + } + return false; +} + +/*== PUBLIC API FUNCTIONS ====================================================*/ + +#if defined(SOKOL_METAL) + // this is ARC compatible + #if defined(__cplusplus) + #define _SG_CLEAR(type, item) { item = (type) { }; } + #else + #define _SG_CLEAR(type, item) { item = (type) { 0 }; } + #endif +#else + #define _SG_CLEAR(type, item) { memset(&item, 0, sizeof(item)); } +#endif + +SOKOL_API_IMPL void sg_setup(const sg_desc* desc) { + SOKOL_ASSERT(desc); + SOKOL_ASSERT((desc->_start_canary == 0) && (desc->_end_canary == 0)); + _SG_CLEAR(_sg_state_t, _sg); + _sg.desc = *desc; + + /* replace zero-init items with their default values + NOTE: on WebGPU, the default color pixel format MUST be provided, + cannot be a default compile-time constant. + */ + #if defined(SOKOL_WGPU) + SOKOL_ASSERT(SG_PIXELFORMAT_NONE != _sg.desc.context.color_format); + #elif defined(SOKOL_METAL) || defined(SOKOL_D3D11) + _sg.desc.context.color_format = _sg_def(_sg.desc.context.color_format, SG_PIXELFORMAT_BGRA8); + #else + _sg.desc.context.color_format = _sg_def(_sg.desc.context.color_format, SG_PIXELFORMAT_RGBA8); + #endif + _sg.desc.context.depth_format = _sg_def(_sg.desc.context.depth_format, SG_PIXELFORMAT_DEPTH_STENCIL); + _sg.desc.context.sample_count = _sg_def(_sg.desc.context.sample_count, 1); + _sg.desc.buffer_pool_size = _sg_def(_sg.desc.buffer_pool_size, _SG_DEFAULT_BUFFER_POOL_SIZE); + _sg.desc.image_pool_size = _sg_def(_sg.desc.image_pool_size, _SG_DEFAULT_IMAGE_POOL_SIZE); + _sg.desc.shader_pool_size = _sg_def(_sg.desc.shader_pool_size, _SG_DEFAULT_SHADER_POOL_SIZE); + _sg.desc.pipeline_pool_size = _sg_def(_sg.desc.pipeline_pool_size, _SG_DEFAULT_PIPELINE_POOL_SIZE); + _sg.desc.pass_pool_size = _sg_def(_sg.desc.pass_pool_size, _SG_DEFAULT_PASS_POOL_SIZE); + _sg.desc.context_pool_size = _sg_def(_sg.desc.context_pool_size, _SG_DEFAULT_CONTEXT_POOL_SIZE); + _sg.desc.uniform_buffer_size = _sg_def(_sg.desc.uniform_buffer_size, _SG_DEFAULT_UB_SIZE); + _sg.desc.staging_buffer_size = _sg_def(_sg.desc.staging_buffer_size, _SG_DEFAULT_STAGING_SIZE); + _sg.desc.sampler_cache_size = _sg_def(_sg.desc.sampler_cache_size, _SG_DEFAULT_SAMPLER_CACHE_CAPACITY); + + _sg_setup_pools(&_sg.pools, &_sg.desc); + _sg.frame_index = 1; + _sg_setup_backend(&_sg.desc); + _sg.valid = true; + sg_setup_context(); +} + +SOKOL_API_IMPL void sg_shutdown(void) { + /* can only delete resources for the currently set context here, if multiple + contexts are used, the app code must take care of properly releasing them + (since only the app code can switch between 3D-API contexts) + */ + if (_sg.active_context.id != SG_INVALID_ID) { + _sg_context_t* ctx = _sg_lookup_context(&_sg.pools, _sg.active_context.id); + if (ctx) { + _sg_destroy_all_resources(&_sg.pools, _sg.active_context.id); + _sg_destroy_context(ctx); + } + } + _sg_discard_backend(); + _sg_discard_pools(&_sg.pools); + _sg.valid = false; +} + +SOKOL_API_IMPL bool sg_isvalid(void) { + return _sg.valid; +} + +SOKOL_API_IMPL sg_desc sg_query_desc(void) { + SOKOL_ASSERT(_sg.valid); + return _sg.desc; +} + +SOKOL_API_IMPL sg_backend sg_query_backend(void) { + SOKOL_ASSERT(_sg.valid); + return _sg.backend; +} + +SOKOL_API_IMPL sg_features sg_query_features(void) { + SOKOL_ASSERT(_sg.valid); + return _sg.features; +} + +SOKOL_API_IMPL sg_limits sg_query_limits(void) { + SOKOL_ASSERT(_sg.valid); + return _sg.limits; +} + +SOKOL_API_IMPL sg_pixelformat_info sg_query_pixelformat(sg_pixel_format fmt) { + SOKOL_ASSERT(_sg.valid); + int fmt_index = (int) fmt; + SOKOL_ASSERT((fmt_index > SG_PIXELFORMAT_NONE) && (fmt_index < _SG_PIXELFORMAT_NUM)); + return _sg.formats[fmt_index]; +} + +SOKOL_API_IMPL sg_context sg_setup_context(void) { + SOKOL_ASSERT(_sg.valid); + sg_context res; + int slot_index = _sg_pool_alloc_index(&_sg.pools.context_pool); + if (_SG_INVALID_SLOT_INDEX != slot_index) { + res.id = _sg_slot_alloc(&_sg.pools.context_pool, &_sg.pools.contexts[slot_index].slot, slot_index); + _sg_context_t* ctx = _sg_context_at(&_sg.pools, res.id); + ctx->slot.state = _sg_create_context(ctx); + SOKOL_ASSERT(ctx->slot.state == SG_RESOURCESTATE_VALID); + _sg_activate_context(ctx); + } + else { + /* pool is exhausted */ + res.id = SG_INVALID_ID; + } + _sg.active_context = res; + return res; +} + +SOKOL_API_IMPL void sg_discard_context(sg_context ctx_id) { + SOKOL_ASSERT(_sg.valid); + _sg_destroy_all_resources(&_sg.pools, ctx_id.id); + _sg_context_t* ctx = _sg_lookup_context(&_sg.pools, ctx_id.id); + if (ctx) { + _sg_destroy_context(ctx); + _sg_reset_context(ctx); + _sg_reset_slot(&ctx->slot); + _sg_pool_free_index(&_sg.pools.context_pool, _sg_slot_index(ctx_id.id)); + } + _sg.active_context.id = SG_INVALID_ID; + _sg_activate_context(0); +} + +SOKOL_API_IMPL void sg_activate_context(sg_context ctx_id) { + SOKOL_ASSERT(_sg.valid); + _sg.active_context = ctx_id; + _sg_context_t* ctx = _sg_lookup_context(&_sg.pools, ctx_id.id); + /* NOTE: ctx can be 0 here if the context is no longer valid */ + _sg_activate_context(ctx); +} + +SOKOL_API_IMPL sg_trace_hooks sg_install_trace_hooks(const sg_trace_hooks* trace_hooks) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(trace_hooks); + _SOKOL_UNUSED(trace_hooks); + #if defined(SOKOL_TRACE_HOOKS) + sg_trace_hooks old_hooks = _sg.hooks; + _sg.hooks = *trace_hooks; + #else + static sg_trace_hooks old_hooks; + SOKOL_LOG("sg_install_trace_hooks() called, but SG_TRACE_HOOKS is not defined!"); + #endif + return old_hooks; +} + +SOKOL_API_IMPL sg_buffer sg_alloc_buffer(void) { + SOKOL_ASSERT(_sg.valid); + sg_buffer res = _sg_alloc_buffer(); + _SG_TRACE_ARGS(alloc_buffer, res); + return res; +} + +SOKOL_API_IMPL sg_image sg_alloc_image(void) { + SOKOL_ASSERT(_sg.valid); + sg_image res = _sg_alloc_image(); + _SG_TRACE_ARGS(alloc_image, res); + return res; +} + +SOKOL_API_IMPL sg_shader sg_alloc_shader(void) { + SOKOL_ASSERT(_sg.valid); + sg_shader res = _sg_alloc_shader(); + _SG_TRACE_ARGS(alloc_shader, res); + return res; +} + +SOKOL_API_IMPL sg_pipeline sg_alloc_pipeline(void) { + SOKOL_ASSERT(_sg.valid); + sg_pipeline res = _sg_alloc_pipeline(); + _SG_TRACE_ARGS(alloc_pipeline, res); + return res; +} + +SOKOL_API_IMPL sg_pass sg_alloc_pass(void) { + SOKOL_ASSERT(_sg.valid); + sg_pass res = _sg_alloc_pass(); + _SG_TRACE_ARGS(alloc_pass, res); + return res; +} + +SOKOL_API_IMPL void sg_dealloc_buffer(sg_buffer buf_id) { + SOKOL_ASSERT(_sg.valid); + _sg_dealloc_buffer(buf_id); + _SG_TRACE_ARGS(dealloc_buffer, buf_id); +} + +SOKOL_API_IMPL void sg_dealloc_image(sg_image img_id) { + SOKOL_ASSERT(_sg.valid); + _sg_dealloc_image(img_id); + _SG_TRACE_ARGS(dealloc_image, img_id); +} + +SOKOL_API_IMPL void sg_dealloc_shader(sg_shader shd_id) { + SOKOL_ASSERT(_sg.valid); + _sg_dealloc_shader(shd_id); + _SG_TRACE_ARGS(dealloc_shader, shd_id); +} + +SOKOL_API_IMPL void sg_dealloc_pipeline(sg_pipeline pip_id) { + SOKOL_ASSERT(_sg.valid); + _sg_dealloc_pipeline(pip_id); + _SG_TRACE_ARGS(dealloc_pipeline, pip_id); +} + +SOKOL_API_IMPL void sg_dealloc_pass(sg_pass pass_id) { + SOKOL_ASSERT(_sg.valid); + _sg_dealloc_pass(pass_id); + _SG_TRACE_ARGS(dealloc_pass, pass_id); +} + +SOKOL_API_IMPL void sg_init_buffer(sg_buffer buf_id, const sg_buffer_desc* desc) { + SOKOL_ASSERT(_sg.valid); + sg_buffer_desc desc_def = _sg_buffer_desc_defaults(desc); + _sg_init_buffer(buf_id, &desc_def); + _SG_TRACE_ARGS(init_buffer, buf_id, &desc_def); +} + +SOKOL_API_IMPL void sg_init_image(sg_image img_id, const sg_image_desc* desc) { + SOKOL_ASSERT(_sg.valid); + sg_image_desc desc_def = _sg_image_desc_defaults(desc); + _sg_init_image(img_id, &desc_def); + _SG_TRACE_ARGS(init_image, img_id, &desc_def); +} + +SOKOL_API_IMPL void sg_init_shader(sg_shader shd_id, const sg_shader_desc* desc) { + SOKOL_ASSERT(_sg.valid); + sg_shader_desc desc_def = _sg_shader_desc_defaults(desc); + _sg_init_shader(shd_id, &desc_def); + _SG_TRACE_ARGS(init_shader, shd_id, &desc_def); +} + +SOKOL_API_IMPL void sg_init_pipeline(sg_pipeline pip_id, const sg_pipeline_desc* desc) { + SOKOL_ASSERT(_sg.valid); + sg_pipeline_desc desc_def = _sg_pipeline_desc_defaults(desc); + _sg_init_pipeline(pip_id, &desc_def); + _SG_TRACE_ARGS(init_pipeline, pip_id, &desc_def); +} + +SOKOL_API_IMPL void sg_init_pass(sg_pass pass_id, const sg_pass_desc* desc) { + SOKOL_ASSERT(_sg.valid); + sg_pass_desc desc_def = _sg_pass_desc_defaults(desc); + _sg_init_pass(pass_id, &desc_def); + _SG_TRACE_ARGS(init_pass, pass_id, &desc_def); +} + +SOKOL_API_IMPL bool sg_uninit_buffer(sg_buffer buf_id) { + SOKOL_ASSERT(_sg.valid); + bool res = _sg_uninit_buffer(buf_id); + _SG_TRACE_ARGS(uninit_buffer, buf_id); + return res; +} + +SOKOL_API_IMPL bool sg_uninit_image(sg_image img_id) { + SOKOL_ASSERT(_sg.valid); + bool res = _sg_uninit_image(img_id); + _SG_TRACE_ARGS(uninit_image, img_id); + return res; +} + +SOKOL_API_IMPL bool sg_uninit_shader(sg_shader shd_id) { + SOKOL_ASSERT(_sg.valid); + bool res = _sg_uninit_shader(shd_id); + _SG_TRACE_ARGS(uninit_shader, shd_id); + return res; +} + +SOKOL_API_IMPL bool sg_uninit_pipeline(sg_pipeline pip_id) { + SOKOL_ASSERT(_sg.valid); + bool res = _sg_uninit_pipeline(pip_id); + _SG_TRACE_ARGS(uninit_pipeline, pip_id); + return res; +} + +SOKOL_API_IMPL bool sg_uninit_pass(sg_pass pass_id) { + SOKOL_ASSERT(_sg.valid); + bool res = _sg_uninit_pass(pass_id); + _SG_TRACE_ARGS(uninit_pass, pass_id); + return res; +} + +/*-- set allocated resource to failed state ----------------------------------*/ +SOKOL_API_IMPL void sg_fail_buffer(sg_buffer buf_id) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(buf_id.id != SG_INVALID_ID); + _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); + SOKOL_ASSERT(buf && buf->slot.state == SG_RESOURCESTATE_ALLOC); + buf->slot.ctx_id = _sg.active_context.id; + buf->slot.state = SG_RESOURCESTATE_FAILED; + _SG_TRACE_ARGS(fail_buffer, buf_id); +} + +SOKOL_API_IMPL void sg_fail_image(sg_image img_id) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(img_id.id != SG_INVALID_ID); + _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); + SOKOL_ASSERT(img && img->slot.state == SG_RESOURCESTATE_ALLOC); + img->slot.ctx_id = _sg.active_context.id; + img->slot.state = SG_RESOURCESTATE_FAILED; + _SG_TRACE_ARGS(fail_image, img_id); +} + +SOKOL_API_IMPL void sg_fail_shader(sg_shader shd_id) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(shd_id.id != SG_INVALID_ID); + _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); + SOKOL_ASSERT(shd && shd->slot.state == SG_RESOURCESTATE_ALLOC); + shd->slot.ctx_id = _sg.active_context.id; + shd->slot.state = SG_RESOURCESTATE_FAILED; + _SG_TRACE_ARGS(fail_shader, shd_id); +} + +SOKOL_API_IMPL void sg_fail_pipeline(sg_pipeline pip_id) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(pip_id.id != SG_INVALID_ID); + _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); + SOKOL_ASSERT(pip && pip->slot.state == SG_RESOURCESTATE_ALLOC); + pip->slot.ctx_id = _sg.active_context.id; + pip->slot.state = SG_RESOURCESTATE_FAILED; + _SG_TRACE_ARGS(fail_pipeline, pip_id); +} + +SOKOL_API_IMPL void sg_fail_pass(sg_pass pass_id) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(pass_id.id != SG_INVALID_ID); + _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); + SOKOL_ASSERT(pass && pass->slot.state == SG_RESOURCESTATE_ALLOC); + pass->slot.ctx_id = _sg.active_context.id; + pass->slot.state = SG_RESOURCESTATE_FAILED; + _SG_TRACE_ARGS(fail_pass, pass_id); +} + +/*-- get resource state */ +SOKOL_API_IMPL sg_resource_state sg_query_buffer_state(sg_buffer buf_id) { + SOKOL_ASSERT(_sg.valid); + _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); + sg_resource_state res = buf ? buf->slot.state : SG_RESOURCESTATE_INVALID; + return res; +} + +SOKOL_API_IMPL sg_resource_state sg_query_image_state(sg_image img_id) { + SOKOL_ASSERT(_sg.valid); + _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); + sg_resource_state res = img ? img->slot.state : SG_RESOURCESTATE_INVALID; + return res; +} + +SOKOL_API_IMPL sg_resource_state sg_query_shader_state(sg_shader shd_id) { + SOKOL_ASSERT(_sg.valid); + _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); + sg_resource_state res = shd ? shd->slot.state : SG_RESOURCESTATE_INVALID; + return res; +} + +SOKOL_API_IMPL sg_resource_state sg_query_pipeline_state(sg_pipeline pip_id) { + SOKOL_ASSERT(_sg.valid); + _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); + sg_resource_state res = pip ? pip->slot.state : SG_RESOURCESTATE_INVALID; + return res; +} + +SOKOL_API_IMPL sg_resource_state sg_query_pass_state(sg_pass pass_id) { + SOKOL_ASSERT(_sg.valid); + _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); + sg_resource_state res = pass ? pass->slot.state : SG_RESOURCESTATE_INVALID; + return res; +} + +/*-- allocate and initialize resource ----------------------------------------*/ +SOKOL_API_IMPL sg_buffer sg_make_buffer(const sg_buffer_desc* desc) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(desc); + sg_buffer_desc desc_def = _sg_buffer_desc_defaults(desc); + sg_buffer buf_id = _sg_alloc_buffer(); + if (buf_id.id != SG_INVALID_ID) { + _sg_init_buffer(buf_id, &desc_def); + } + else { + SOKOL_LOG("buffer pool exhausted!"); + _SG_TRACE_NOARGS(err_buffer_pool_exhausted); + } + _SG_TRACE_ARGS(make_buffer, &desc_def, buf_id); + return buf_id; +} + +SOKOL_API_IMPL sg_image sg_make_image(const sg_image_desc* desc) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(desc); + sg_image_desc desc_def = _sg_image_desc_defaults(desc); + sg_image img_id = _sg_alloc_image(); + if (img_id.id != SG_INVALID_ID) { + _sg_init_image(img_id, &desc_def); + } + else { + SOKOL_LOG("image pool exhausted!"); + _SG_TRACE_NOARGS(err_image_pool_exhausted); + } + _SG_TRACE_ARGS(make_image, &desc_def, img_id); + return img_id; +} + +SOKOL_API_IMPL sg_shader sg_make_shader(const sg_shader_desc* desc) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(desc); + sg_shader_desc desc_def = _sg_shader_desc_defaults(desc); + sg_shader shd_id = _sg_alloc_shader(); + if (shd_id.id != SG_INVALID_ID) { + _sg_init_shader(shd_id, &desc_def); + } + else { + SOKOL_LOG("shader pool exhausted!"); + _SG_TRACE_NOARGS(err_shader_pool_exhausted); + } + _SG_TRACE_ARGS(make_shader, &desc_def, shd_id); + return shd_id; +} + +SOKOL_API_IMPL sg_pipeline sg_make_pipeline(const sg_pipeline_desc* desc) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(desc); + sg_pipeline_desc desc_def = _sg_pipeline_desc_defaults(desc); + sg_pipeline pip_id = _sg_alloc_pipeline(); + if (pip_id.id != SG_INVALID_ID) { + _sg_init_pipeline(pip_id, &desc_def); + } + else { + SOKOL_LOG("pipeline pool exhausted!"); + _SG_TRACE_NOARGS(err_pipeline_pool_exhausted); + } + _SG_TRACE_ARGS(make_pipeline, &desc_def, pip_id); + return pip_id; +} + +SOKOL_API_IMPL sg_pass sg_make_pass(const sg_pass_desc* desc) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(desc); + sg_pass_desc desc_def = _sg_pass_desc_defaults(desc); + sg_pass pass_id = _sg_alloc_pass(); + if (pass_id.id != SG_INVALID_ID) { + _sg_init_pass(pass_id, &desc_def); + } + else { + SOKOL_LOG("pass pool exhausted!"); + _SG_TRACE_NOARGS(err_pass_pool_exhausted); + } + _SG_TRACE_ARGS(make_pass, &desc_def, pass_id); + return pass_id; +} + +/*-- destroy resource --------------------------------------------------------*/ +SOKOL_API_IMPL void sg_destroy_buffer(sg_buffer buf_id) { + SOKOL_ASSERT(_sg.valid); + _SG_TRACE_ARGS(destroy_buffer, buf_id); + if (_sg_uninit_buffer(buf_id)) { + _sg_dealloc_buffer(buf_id); + } +} + +SOKOL_API_IMPL void sg_destroy_image(sg_image img_id) { + SOKOL_ASSERT(_sg.valid); + _SG_TRACE_ARGS(destroy_image, img_id); + if (_sg_uninit_image(img_id)) { + _sg_dealloc_image(img_id); + } +} + +SOKOL_API_IMPL void sg_destroy_shader(sg_shader shd_id) { + SOKOL_ASSERT(_sg.valid); + _SG_TRACE_ARGS(destroy_shader, shd_id); + if (_sg_uninit_shader(shd_id)) { + _sg_dealloc_shader(shd_id); + } +} + +SOKOL_API_IMPL void sg_destroy_pipeline(sg_pipeline pip_id) { + SOKOL_ASSERT(_sg.valid); + _SG_TRACE_ARGS(destroy_pipeline, pip_id); + if (_sg_uninit_pipeline(pip_id)) { + _sg_dealloc_pipeline(pip_id); + } +} + +SOKOL_API_IMPL void sg_destroy_pass(sg_pass pass_id) { + SOKOL_ASSERT(_sg.valid); + _SG_TRACE_ARGS(destroy_pass, pass_id); + if (_sg_uninit_pass(pass_id)) { + _sg_dealloc_pass(pass_id); + } +} + +SOKOL_API_IMPL void sg_begin_default_pass(const sg_pass_action* pass_action, int width, int height) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(pass_action); + SOKOL_ASSERT((pass_action->_start_canary == 0) && (pass_action->_end_canary == 0)); + sg_pass_action pa; + _sg_resolve_default_pass_action(pass_action, &pa); + _sg.cur_pass.id = SG_INVALID_ID; + _sg.pass_valid = true; + _sg_begin_pass(0, &pa, width, height); + _SG_TRACE_ARGS(begin_default_pass, pass_action, width, height); +} + +SOKOL_API_IMPL void sg_begin_default_passf(const sg_pass_action* pass_action, float width, float height) { + sg_begin_default_pass(pass_action, (int)width, (int)height); +} + +SOKOL_API_IMPL void sg_begin_pass(sg_pass pass_id, const sg_pass_action* pass_action) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(pass_action); + SOKOL_ASSERT((pass_action->_start_canary == 0) && (pass_action->_end_canary == 0)); + _sg.cur_pass = pass_id; + _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); + if (pass && _sg_validate_begin_pass(pass)) { + _sg.pass_valid = true; + sg_pass_action pa; + _sg_resolve_default_pass_action(pass_action, &pa); + const _sg_image_t* img = _sg_pass_color_image(pass, 0); + SOKOL_ASSERT(img); + const int w = img->cmn.width; + const int h = img->cmn.height; + _sg_begin_pass(pass, &pa, w, h); + _SG_TRACE_ARGS(begin_pass, pass_id, pass_action); + } + else { + _sg.pass_valid = false; + _SG_TRACE_NOARGS(err_pass_invalid); + } +} + +SOKOL_API_IMPL void sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left) { + SOKOL_ASSERT(_sg.valid); + if (!_sg.pass_valid) { + _SG_TRACE_NOARGS(err_pass_invalid); + return; + } + _sg_apply_viewport(x, y, width, height, origin_top_left); + _SG_TRACE_ARGS(apply_viewport, x, y, width, height, origin_top_left); +} + +SOKOL_API_IMPL void sg_apply_viewportf(float x, float y, float width, float height, bool origin_top_left) { + sg_apply_viewport((int)x, (int)y, (int)width, (int)height, origin_top_left); +} + +SOKOL_API_IMPL void sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left) { + SOKOL_ASSERT(_sg.valid); + if (!_sg.pass_valid) { + _SG_TRACE_NOARGS(err_pass_invalid); + return; + } + _sg_apply_scissor_rect(x, y, width, height, origin_top_left); + _SG_TRACE_ARGS(apply_scissor_rect, x, y, width, height, origin_top_left); +} + +SOKOL_API_IMPL void sg_apply_scissor_rectf(float x, float y, float width, float height, bool origin_top_left) { + sg_apply_scissor_rect((int)x, (int)y, (int)width, (int)height, origin_top_left); +} + +SOKOL_API_IMPL void sg_apply_pipeline(sg_pipeline pip_id) { + SOKOL_ASSERT(_sg.valid); + _sg.bindings_valid = false; + if (!_sg_validate_apply_pipeline(pip_id)) { + _sg.next_draw_valid = false; + _SG_TRACE_NOARGS(err_draw_invalid); + return; + } + if (!_sg.pass_valid) { + _SG_TRACE_NOARGS(err_pass_invalid); + return; + } + _sg.cur_pipeline = pip_id; + _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); + SOKOL_ASSERT(pip); + _sg.next_draw_valid = (SG_RESOURCESTATE_VALID == pip->slot.state); + SOKOL_ASSERT(pip->shader && (pip->shader->slot.id == pip->cmn.shader_id.id)); + _sg_apply_pipeline(pip); + _SG_TRACE_ARGS(apply_pipeline, pip_id); +} + +SOKOL_API_IMPL void sg_apply_bindings(const sg_bindings* bindings) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(bindings); + SOKOL_ASSERT((bindings->_start_canary == 0) && (bindings->_end_canary==0)); + if (!_sg_validate_apply_bindings(bindings)) { + _sg.next_draw_valid = false; + _SG_TRACE_NOARGS(err_draw_invalid); + return; + } + _sg.bindings_valid = true; + + _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, _sg.cur_pipeline.id); + SOKOL_ASSERT(pip); + + _sg_buffer_t* vbs[SG_MAX_SHADERSTAGE_BUFFERS] = { 0 }; + int num_vbs = 0; + for (int i = 0; i < SG_MAX_SHADERSTAGE_BUFFERS; i++, num_vbs++) { + if (bindings->vertex_buffers[i].id) { + vbs[i] = _sg_lookup_buffer(&_sg.pools, bindings->vertex_buffers[i].id); + SOKOL_ASSERT(vbs[i]); + _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == vbs[i]->slot.state); + _sg.next_draw_valid &= !vbs[i]->cmn.append_overflow; + } + else { + break; + } + } + + _sg_buffer_t* ib = 0; + if (bindings->index_buffer.id) { + ib = _sg_lookup_buffer(&_sg.pools, bindings->index_buffer.id); + SOKOL_ASSERT(ib); + _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == ib->slot.state); + _sg.next_draw_valid &= !ib->cmn.append_overflow; + } + + _sg_image_t* vs_imgs[SG_MAX_SHADERSTAGE_IMAGES] = { 0 }; + int num_vs_imgs = 0; + for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++, num_vs_imgs++) { + if (bindings->vs_images[i].id) { + vs_imgs[i] = _sg_lookup_image(&_sg.pools, bindings->vs_images[i].id); + SOKOL_ASSERT(vs_imgs[i]); + _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == vs_imgs[i]->slot.state); + } + else { + break; + } + } + + _sg_image_t* fs_imgs[SG_MAX_SHADERSTAGE_IMAGES] = { 0 }; + int num_fs_imgs = 0; + for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++, num_fs_imgs++) { + if (bindings->fs_images[i].id) { + fs_imgs[i] = _sg_lookup_image(&_sg.pools, bindings->fs_images[i].id); + SOKOL_ASSERT(fs_imgs[i]); + _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == fs_imgs[i]->slot.state); + } + else { + break; + } + } + if (_sg.next_draw_valid) { + const int* vb_offsets = bindings->vertex_buffer_offsets; + int ib_offset = bindings->index_buffer_offset; + _sg_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs); + _SG_TRACE_ARGS(apply_bindings, bindings); + } + else { + _SG_TRACE_NOARGS(err_draw_invalid); + } +} + +SOKOL_API_IMPL void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const sg_range* data) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT((stage == SG_SHADERSTAGE_VS) || (stage == SG_SHADERSTAGE_FS)); + SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS)); + SOKOL_ASSERT(data && data->ptr && (data->size > 0)); + if (!_sg_validate_apply_uniforms(stage, ub_index, data)) { + _sg.next_draw_valid = false; + _SG_TRACE_NOARGS(err_draw_invalid); + return; + } + if (!_sg.pass_valid) { + _SG_TRACE_NOARGS(err_pass_invalid); + return; + } + if (!_sg.next_draw_valid) { + _SG_TRACE_NOARGS(err_draw_invalid); + } + _sg_apply_uniforms(stage, ub_index, data); + _SG_TRACE_ARGS(apply_uniforms, stage, ub_index, data); +} + +SOKOL_API_IMPL void sg_draw(int base_element, int num_elements, int num_instances) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(base_element >= 0); + SOKOL_ASSERT(num_elements >= 0); + SOKOL_ASSERT(num_instances >= 0); + #if defined(SOKOL_DEBUG) + if (!_sg.bindings_valid) { + SOKOL_LOG("attempting to draw without resource bindings"); + } + #endif + if (!_sg.pass_valid) { + _SG_TRACE_NOARGS(err_pass_invalid); + return; + } + if (!_sg.next_draw_valid) { + _SG_TRACE_NOARGS(err_draw_invalid); + return; + } + if (!_sg.bindings_valid) { + _SG_TRACE_NOARGS(err_bindings_invalid); + return; + } + /* attempting to draw with zero elements or instances is not technically an + error, but might be handled as an error in the backend API (e.g. on Metal) + */ + if ((0 == num_elements) || (0 == num_instances)) { + _SG_TRACE_NOARGS(err_draw_invalid); + return; + } + _sg_draw(base_element, num_elements, num_instances); + _SG_TRACE_ARGS(draw, base_element, num_elements, num_instances); +} + +SOKOL_API_IMPL void sg_end_pass(void) { + SOKOL_ASSERT(_sg.valid); + if (!_sg.pass_valid) { + _SG_TRACE_NOARGS(err_pass_invalid); + return; + } + _sg_end_pass(); + _sg.cur_pass.id = SG_INVALID_ID; + _sg.cur_pipeline.id = SG_INVALID_ID; + _sg.pass_valid = false; + _SG_TRACE_NOARGS(end_pass); +} + +SOKOL_API_IMPL void sg_commit(void) { + SOKOL_ASSERT(_sg.valid); + _sg_commit(); + _SG_TRACE_NOARGS(commit); + _sg.frame_index++; +} + +SOKOL_API_IMPL void sg_reset_state_cache(void) { + SOKOL_ASSERT(_sg.valid); + _sg_reset_state_cache(); + _SG_TRACE_NOARGS(reset_state_cache); +} + +SOKOL_API_IMPL void sg_update_buffer(sg_buffer buf_id, const sg_range* data) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(data && data->ptr && (data->size > 0)); + _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); + if ((data->size > 0) && buf && (buf->slot.state == SG_RESOURCESTATE_VALID)) { + if (_sg_validate_update_buffer(buf, data)) { + SOKOL_ASSERT(data->size <= (size_t)buf->cmn.size); + /* only one update allowed per buffer and frame */ + SOKOL_ASSERT(buf->cmn.update_frame_index != _sg.frame_index); + /* update and append on same buffer in same frame not allowed */ + SOKOL_ASSERT(buf->cmn.append_frame_index != _sg.frame_index); + _sg_update_buffer(buf, data); + buf->cmn.update_frame_index = _sg.frame_index; + } + } + _SG_TRACE_ARGS(update_buffer, buf_id, data); +} + +SOKOL_API_IMPL int sg_append_buffer(sg_buffer buf_id, const sg_range* data) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(data && data->ptr); + _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); + int result; + if (buf) { + /* rewind append cursor in a new frame */ + if (buf->cmn.append_frame_index != _sg.frame_index) { + buf->cmn.append_pos = 0; + buf->cmn.append_overflow = false; + } + if ((buf->cmn.append_pos + _sg_roundup((int)data->size, 4)) > buf->cmn.size) { + buf->cmn.append_overflow = true; + } + const int start_pos = buf->cmn.append_pos; + if (buf->slot.state == SG_RESOURCESTATE_VALID) { + if (_sg_validate_append_buffer(buf, data)) { + if (!buf->cmn.append_overflow && (data->size > 0)) { + /* update and append on same buffer in same frame not allowed */ + SOKOL_ASSERT(buf->cmn.update_frame_index != _sg.frame_index); + int copied_num_bytes = _sg_append_buffer(buf, data, buf->cmn.append_frame_index != _sg.frame_index); + buf->cmn.append_pos += copied_num_bytes; + buf->cmn.append_frame_index = _sg.frame_index; + } + } + } + result = start_pos; + } + else { + /* FIXME: should we return -1 here? */ + result = 0; + } + _SG_TRACE_ARGS(append_buffer, buf_id, data, result); + return result; +} + +SOKOL_API_IMPL bool sg_query_buffer_overflow(sg_buffer buf_id) { + SOKOL_ASSERT(_sg.valid); + _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); + bool result = buf ? buf->cmn.append_overflow : false; + return result; +} + +SOKOL_API_IMPL void sg_update_image(sg_image img_id, const sg_image_data* data) { + SOKOL_ASSERT(_sg.valid); + _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); + if (img && img->slot.state == SG_RESOURCESTATE_VALID) { + if (_sg_validate_update_image(img, data)) { + SOKOL_ASSERT(img->cmn.upd_frame_index != _sg.frame_index); + _sg_update_image(img, data); + img->cmn.upd_frame_index = _sg.frame_index; + } + } + _SG_TRACE_ARGS(update_image, img_id, data); +} + +SOKOL_API_IMPL void sg_push_debug_group(const char* name) { + SOKOL_ASSERT(_sg.valid); + SOKOL_ASSERT(name); + _SOKOL_UNUSED(name); + _SG_TRACE_ARGS(push_debug_group, name); +} + +SOKOL_API_IMPL void sg_pop_debug_group(void) { + SOKOL_ASSERT(_sg.valid); + _SG_TRACE_NOARGS(pop_debug_group); +} + +SOKOL_API_IMPL sg_buffer_info sg_query_buffer_info(sg_buffer buf_id) { + SOKOL_ASSERT(_sg.valid); + sg_buffer_info info; + memset(&info, 0, sizeof(info)); + const _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); + if (buf) { + info.slot.state = buf->slot.state; + info.slot.res_id = buf->slot.id; + info.slot.ctx_id = buf->slot.ctx_id; + info.update_frame_index = buf->cmn.update_frame_index; + info.append_frame_index = buf->cmn.append_frame_index; + info.append_pos = buf->cmn.append_pos; + info.append_overflow = buf->cmn.append_overflow; + #if defined(SOKOL_D3D11) + info.num_slots = 1; + info.active_slot = 0; + #else + info.num_slots = buf->cmn.num_slots; + info.active_slot = buf->cmn.active_slot; + #endif + } + return info; +} + +SOKOL_API_IMPL sg_image_info sg_query_image_info(sg_image img_id) { + SOKOL_ASSERT(_sg.valid); + sg_image_info info; + memset(&info, 0, sizeof(info)); + const _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id); + if (img) { + info.slot.state = img->slot.state; + info.slot.res_id = img->slot.id; + info.slot.ctx_id = img->slot.ctx_id; + #if defined(SOKOL_D3D11) + info.num_slots = 1; + info.active_slot = 0; + #else + info.num_slots = img->cmn.num_slots; + info.active_slot = img->cmn.active_slot; + #endif + info.width = img->cmn.width; + info.height = img->cmn.height; + } + return info; +} + +SOKOL_API_IMPL sg_shader_info sg_query_shader_info(sg_shader shd_id) { + SOKOL_ASSERT(_sg.valid); + sg_shader_info info; + memset(&info, 0, sizeof(info)); + const _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); + if (shd) { + info.slot.state = shd->slot.state; + info.slot.res_id = shd->slot.id; + info.slot.ctx_id = shd->slot.ctx_id; + } + return info; +} + +SOKOL_API_IMPL sg_pipeline_info sg_query_pipeline_info(sg_pipeline pip_id) { + SOKOL_ASSERT(_sg.valid); + sg_pipeline_info info; + memset(&info, 0, sizeof(info)); + const _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); + if (pip) { + info.slot.state = pip->slot.state; + info.slot.res_id = pip->slot.id; + info.slot.ctx_id = pip->slot.ctx_id; + } + return info; +} + +SOKOL_API_IMPL sg_pass_info sg_query_pass_info(sg_pass pass_id) { + SOKOL_ASSERT(_sg.valid); + sg_pass_info info; + memset(&info, 0, sizeof(info)); + const _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); + if (pass) { + info.slot.state = pass->slot.state; + info.slot.res_id = pass->slot.id; + info.slot.ctx_id = pass->slot.ctx_id; + } + return info; +} + +SOKOL_API_IMPL sg_buffer_desc sg_query_buffer_defaults(const sg_buffer_desc* desc) { + SOKOL_ASSERT(_sg.valid && desc); + return _sg_buffer_desc_defaults(desc); +} + +SOKOL_API_IMPL sg_image_desc sg_query_image_defaults(const sg_image_desc* desc) { + SOKOL_ASSERT(_sg.valid && desc); + return _sg_image_desc_defaults(desc); +} + +SOKOL_API_IMPL sg_shader_desc sg_query_shader_defaults(const sg_shader_desc* desc) { + SOKOL_ASSERT(_sg.valid && desc); + return _sg_shader_desc_defaults(desc); +} + +SOKOL_API_IMPL sg_pipeline_desc sg_query_pipeline_defaults(const sg_pipeline_desc* desc) { + SOKOL_ASSERT(_sg.valid && desc); + return _sg_pipeline_desc_defaults(desc); +} + +SOKOL_API_IMPL sg_pass_desc sg_query_pass_defaults(const sg_pass_desc* desc) { + SOKOL_ASSERT(_sg.valid && desc); + return _sg_pass_desc_defaults(desc); +} + +SOKOL_API_IMPL const void* sg_d3d11_device(void) { +#if defined(SOKOL_D3D11) + return (const void*) _sg.d3d11.dev; +#else + return 0; +#endif +} + +SOKOL_API_IMPL const void* sg_mtl_device(void) { +#if defined(SOKOL_METAL) + if (nil != _sg.mtl.device) { + return (__bridge const void*) _sg.mtl.device; + } + else { + return 0; + } +#else + return 0; +#endif +} + +SOKOL_API_IMPL const void* sg_mtl_render_command_encoder(void) { + #if defined(SOKOL_METAL) + if (nil != _sg.mtl.cmd_encoder) { + return (__bridge const void*) _sg.mtl.cmd_encoder; + } + else { + return 0; + } + #else + return 0; + #endif +} + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +#endif /* SOKOL_GFX_IMPL */ diff --git a/v_windows/v/old/thirdparty/sokol/sokol_v.h b/v_windows/v/old/thirdparty/sokol/sokol_v.h new file mode 100644 index 0000000..cc08678 --- /dev/null +++ b/v_windows/v/old/thirdparty/sokol/sokol_v.h @@ -0,0 +1,20 @@ +#if defined(__ANDROID__) + // Adapted from https://stackoverflow.com/a/196018/1904615 + #define V_ANDROID_LOG_STR_VALUE(arg) #arg + #define V_ANDROID_LOG_NAME(tag_name) V_ANDROID_LOG_STR_VALUE(tag_name) + + #ifndef V_ANDROID_LOG_TAG + #if defined(APPNAME) + #define V_ANDROID_LOG_TAG APPNAME + #else + #define V_ANDROID_LOG_TAG "V_ANDROID" + #endif + #endif + + #define V_ANDROID_LOG_TAG_NAME V_ANDROID_LOG_NAME(V_ANDROID_LOG_TAG) + + #include + #define printf(...) __android_log_print(ANDROID_LOG_INFO, V_ANDROID_LOG_TAG_NAME, __VA_ARGS__) + #define fprintf(a, ...) __android_log_print(ANDROID_LOG_ERROR, V_ANDROID_LOG_TAG_NAME, __VA_ARGS__) +#endif + diff --git a/v_windows/v/old/thirdparty/sokol/util/sokol_fontstash.h b/v_windows/v/old/thirdparty/sokol/util/sokol_fontstash.h new file mode 100644 index 0000000..9bb915f --- /dev/null +++ b/v_windows/v/old/thirdparty/sokol/util/sokol_fontstash.h @@ -0,0 +1,1785 @@ +#if defined(SOKOL_IMPL) && !defined(SOKOL_FONTSTASH_IMPL) +#define SOKOL_FONTSTASH_IMPL +#endif +#ifndef SOKOL_FONTSTASH_INCLUDED +/* + sokol_fontstash.h -- renderer for https://github.com/memononen/fontstash + on top of sokol_gl.h + + Project URL: https://github.com/floooh/sokol + + Do this: + #define SOKOL_IMPL or + #define SOKOL_FONTSTASH_IMPL + + before you include this file in *one* C or C++ file to create the + implementation. + + The following defines are used by the implementation to select the + platform-specific embedded shader code (these are the same defines as + used by sokol_gfx.h and sokol_app.h): + + SOKOL_GLCORE33 + SOKOL_GLES2 + SOKOL_GLES3 + SOKOL_D3D11 + SOKOL_METAL + + ...optionally provide the following macros to override defaults: + + SOKOL_ASSERT(c) - your own assert macro (default: assert(c)) + SOKOL_MALLOC(s) - your own malloc function (default: malloc(s)) + SOKOL_FREE(p) - your own free function (default: free(p)) + SOKOL_FONTSTASH_API_DECL - public function declaration prefix (default: extern) + SOKOL_API_DECL - same as SOKOL_FONTSTASH_API_DECL + SOKOL_API_IMPL - public function implementation prefix (default: -) + SOKOL_LOG(msg) - your own logging function (default: puts(msg)) + SOKOL_UNREACHABLE() - a guard macro for unreachable code (default: assert(false)) + + Include the following headers before including sokol_fontstash.h: + + sokol_gfx.h + + Additionally include the following headers for including the sokol_fontstash.h + implementation: + + sokol_gl.h + + HOW TO + ====== + --- First initialize sokol-gfx and sokol-gl as usual: + + sg_setup(&(sg_desc){...}); + sgl_setup(&(sgl_desc){...}); + + --- Create at least one fontstash context with sfons_create() (this replaces + glfonsCreate() from fontstash.h's example GL renderer: + + FONScontext* ctx = sfons_create(atlas_width, atlas_height, FONS_ZERO_TOPLEFT); + + Each FONScontext manages one font atlas texture which can hold rasterized + glyphs for multiple fonts. + + --- From here on, use fontstash.h's functions "as usual" to add TTF + font data and draw text. Note that (just like with sokol-gl), text + rendering can happen anywhere in the frame, not only inside + a sokol-gfx rendering pass. + + --- You can use the helper function + + uint32_t sfons_rgba(uint8_t r, uint8_t g, uint8_t b, uint8_t a) + + To convert a 0..255 RGBA color into a packed uint32_t color value + expected by fontstash.h. + + --- Once per frame before calling sgl_draw(), call: + + sfons_flush(FONScontext* ctx) + + ...this will update the dynamic sokol-gfx texture with the latest font + atlas content. + + --- To actually render the text (and any other sokol-gl draw commands), + call sgl_draw() inside a sokol-gfx frame. + + --- NOTE that you can mix fontstash.h calls with sokol-gl calls to mix + text rendering with sokol-gl rendering. You can also use + sokol-gl's matrix stack to position fontstash.h text in 3D. + + --- finally on application shutdown, call: + + sfons_shutdown() + + before sgl_shutdown() and sg_shutdown() + + + WHAT HAPPENS UNDER THE HOOD: + ============================ + + sfons_create(): + - creates a sokol-gfx shader compatible with sokol-gl + - creates an sgl_pipeline object with alpha-blending using + this shader + - creates a 1-byte-per-pixel font atlas texture via sokol-gfx + (pixel format SG_PIXELFORMAT_R8) + + fonsDrawText(): + - this will call the following sequence of sokol-gl functions: + + sgl_enable_texture(); + sgl_texture(...); + sgl_push_pipeline(); + sgl_load_pipeline(...); + sgl_begin_triangles(); + for each vertex: + sg_v2f_t2f_c1i(...); + sgl_end(); + sgl_pop_pipeline(); + sgl_disable_texture(); + + - note that sokol-gl will merge several sgl_*_begin/sgl_end pairs + into a single draw call if no relevant state has changed, typically + all calls to fonsDrawText() will be merged into a single draw call + as long as all calls use the same FONScontext + + sfons_flush(): + - this will call sg_update_image() on the font atlas texture + if fontstash.h has added any rasterized glyphs since the last + frame + + sfons_shutdown(): + - destroy the font atlas texture, sgl_pipeline and sg_shader objects + + LICENSE + ======= + zlib/libpng license + + Copyright (c) 2018 Andre Weissflog + + This software is provided 'as-is', without any express or implied warranty. + In no event will the authors be held liable for any damages arising from the + use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software in a + product, an acknowledgment in the product documentation would be + appreciated but is not required. + + 2. Altered source versions must be plainly marked as such, and must not + be misrepresented as being the original software. + + 3. This notice may not be removed or altered from any source + distribution. +*/ +#define SOKOL_FONTSTASH_INCLUDED (1) +#include +#include + +#if !defined(SOKOL_GFX_INCLUDED) +#error "Please include sokol_gfx.h before sokol_fontstash.h" +#endif + +#if defined(SOKOL_API_DECL) && !defined(SOKOL_FONTSTASH_API_DECL) +#define SOKOL_FONTSTASH_API_DECL SOKOL_API_DECL +#endif +#ifndef SOKOL_FONTSTASH_API_DECL +#if defined(_WIN32) && defined(SOKOL_DLL) && defined(SOKOL_FONTSTASH_IMPL) +#define SOKOL_FONTSTASH_API_DECL __declspec(dllexport) +#elif defined(_WIN32) && defined(SOKOL_DLL) +#define SOKOL_FONTSTASH_API_DECL __declspec(dllimport) +#else +#define SOKOL_FONTSTASH_API_DECL extern +#endif +#endif +#ifdef __cplusplus +extern "C" { +#endif + +SOKOL_FONTSTASH_API_DECL FONScontext* sfons_create(int width, int height, int flags); +SOKOL_FONTSTASH_API_DECL void sfons_destroy(FONScontext* ctx); +SOKOL_FONTSTASH_API_DECL void sfons_flush(FONScontext* ctx); +SOKOL_FONTSTASH_API_DECL uint32_t sfons_rgba(uint8_t r, uint8_t g, uint8_t b, uint8_t a); + +#ifdef __cplusplus +} /* extern "C" */ +#endif +#endif /* SOKOL_FONTSTASH_INCLUDED */ + +/*-- IMPLEMENTATION ----------------------------------------------------------*/ +#ifdef SOKOL_FONTSTASH_IMPL +#define SOKOL_FONTSTASH_IMPL_INCLUDED (1) +#include /* memset, memcpy */ + +#if !defined(SOKOL_GL_INCLUDED) +#error "Please include sokol_gl.h before sokol_fontstash.h" +#endif +#if !defined(FONS_H) +#error "Please include fontstash.h before sokol_fontstash.h" +#endif + +#ifndef SOKOL_API_IMPL + #define SOKOL_API_IMPL +#endif +#ifndef SOKOL_DEBUG + #ifndef NDEBUG + #define SOKOL_DEBUG (1) + #endif +#endif +#ifndef SOKOL_ASSERT + #include + #define SOKOL_ASSERT(c) assert(c) +#endif +#ifndef SOKOL_MALLOC + #include + #define SOKOL_MALLOC(s) malloc(s) + #define SOKOL_FREE(p) free(p) +#endif +#ifndef SOKOL_LOG + #ifdef SOKOL_DEBUG + #include + #define SOKOL_LOG(s) { SOKOL_ASSERT(s); puts(s); } + #else + #define SOKOL_LOG(s) + #endif +#endif +#ifndef SOKOL_UNREACHABLE + #define SOKOL_UNREACHABLE SOKOL_ASSERT(false) +#endif +#ifndef _SOKOL_UNUSED + #define _SOKOL_UNUSED(x) (void)(x) +#endif + +#if defined(SOKOL_GLCORE33) +/* + Embedded source code compiled with: + + sokol-shdc -i sfons.glsl -o sfons.h -l glsl330:glsl100:hlsl4:metal_macos:metal_ios:metal_sim:wgpu -b + + (not that for Metal and D3D11 byte code, sokol-shdc must be run + on macOS and Windows) + + @vs vs + uniform vs_params { + uniform mat4 mvp; + uniform mat4 tm; + }; + in vec4 position; + in vec2 texcoord0; + in vec4 color0; + out vec4 uv; + out vec4 color; + void main() { + gl_Position = mvp * position; + uv = tm * vec4(texcoord0, 0.0, 1.0); + color = color0; + } + @end + + @fs fs + uniform sampler2D tex; + in vec4 uv; + in vec4 color; + out vec4 frag_color; + void main() { + frag_color = vec4(1.0, 1.0, 1.0, texture(tex, uv.xy).r) * color; + } + @end + + @program sfontstash vs fs +*/ +static const char _sfons_vs_source_glsl330[415] = { + 0x23,0x76,0x65,0x72,0x73,0x69,0x6f,0x6e,0x20,0x33,0x33,0x30,0x0a,0x0a,0x75,0x6e, + 0x69,0x66,0x6f,0x72,0x6d,0x20,0x76,0x65,0x63,0x34,0x20,0x76,0x73,0x5f,0x70,0x61, + 0x72,0x61,0x6d,0x73,0x5b,0x38,0x5d,0x3b,0x0a,0x6c,0x61,0x79,0x6f,0x75,0x74,0x28, + 0x6c,0x6f,0x63,0x61,0x74,0x69,0x6f,0x6e,0x20,0x3d,0x20,0x30,0x29,0x20,0x69,0x6e, + 0x20,0x76,0x65,0x63,0x34,0x20,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x3b,0x0a, + 0x6f,0x75,0x74,0x20,0x76,0x65,0x63,0x34,0x20,0x75,0x76,0x3b,0x0a,0x6c,0x61,0x79, + 0x6f,0x75,0x74,0x28,0x6c,0x6f,0x63,0x61,0x74,0x69,0x6f,0x6e,0x20,0x3d,0x20,0x31, + 0x29,0x20,0x69,0x6e,0x20,0x76,0x65,0x63,0x32,0x20,0x74,0x65,0x78,0x63,0x6f,0x6f, + 0x72,0x64,0x30,0x3b,0x0a,0x6f,0x75,0x74,0x20,0x76,0x65,0x63,0x34,0x20,0x63,0x6f, + 0x6c,0x6f,0x72,0x3b,0x0a,0x6c,0x61,0x79,0x6f,0x75,0x74,0x28,0x6c,0x6f,0x63,0x61, + 0x74,0x69,0x6f,0x6e,0x20,0x3d,0x20,0x32,0x29,0x20,0x69,0x6e,0x20,0x76,0x65,0x63, + 0x34,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x30,0x3b,0x0a,0x0a,0x76,0x6f,0x69,0x64,0x20, + 0x6d,0x61,0x69,0x6e,0x28,0x29,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x67,0x6c,0x5f, + 0x50,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x20,0x3d,0x20,0x6d,0x61,0x74,0x34,0x28, + 0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x30,0x5d,0x2c,0x20,0x76,0x73, + 0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x31,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70, + 0x61,0x72,0x61,0x6d,0x73,0x5b,0x32,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70,0x61,0x72, + 0x61,0x6d,0x73,0x5b,0x33,0x5d,0x29,0x20,0x2a,0x20,0x70,0x6f,0x73,0x69,0x74,0x69, + 0x6f,0x6e,0x3b,0x0a,0x20,0x20,0x20,0x20,0x75,0x76,0x20,0x3d,0x20,0x6d,0x61,0x74, + 0x34,0x28,0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x34,0x5d,0x2c,0x20, + 0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x35,0x5d,0x2c,0x20,0x76,0x73, + 0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x36,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70, + 0x61,0x72,0x61,0x6d,0x73,0x5b,0x37,0x5d,0x29,0x20,0x2a,0x20,0x76,0x65,0x63,0x34, + 0x28,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x2c,0x20,0x30,0x2e,0x30,0x2c, + 0x20,0x31,0x2e,0x30,0x29,0x3b,0x0a,0x20,0x20,0x20,0x20,0x63,0x6f,0x6c,0x6f,0x72, + 0x20,0x3d,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x30,0x3b,0x0a,0x7d,0x0a,0x0a,0x00, +}; +static const char _sfons_fs_source_glsl330[195] = { + 0x23,0x76,0x65,0x72,0x73,0x69,0x6f,0x6e,0x20,0x33,0x33,0x30,0x0a,0x0a,0x75,0x6e, + 0x69,0x66,0x6f,0x72,0x6d,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x32,0x44,0x20, + 0x74,0x65,0x78,0x3b,0x0a,0x0a,0x6c,0x61,0x79,0x6f,0x75,0x74,0x28,0x6c,0x6f,0x63, + 0x61,0x74,0x69,0x6f,0x6e,0x20,0x3d,0x20,0x30,0x29,0x20,0x6f,0x75,0x74,0x20,0x76, + 0x65,0x63,0x34,0x20,0x66,0x72,0x61,0x67,0x5f,0x63,0x6f,0x6c,0x6f,0x72,0x3b,0x0a, + 0x69,0x6e,0x20,0x76,0x65,0x63,0x34,0x20,0x75,0x76,0x3b,0x0a,0x69,0x6e,0x20,0x76, + 0x65,0x63,0x34,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x3b,0x0a,0x0a,0x76,0x6f,0x69,0x64, + 0x20,0x6d,0x61,0x69,0x6e,0x28,0x29,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x66,0x72, + 0x61,0x67,0x5f,0x63,0x6f,0x6c,0x6f,0x72,0x20,0x3d,0x20,0x76,0x65,0x63,0x34,0x28, + 0x31,0x2e,0x30,0x2c,0x20,0x31,0x2e,0x30,0x2c,0x20,0x31,0x2e,0x30,0x2c,0x20,0x74, + 0x65,0x78,0x74,0x75,0x72,0x65,0x28,0x74,0x65,0x78,0x2c,0x20,0x75,0x76,0x2e,0x78, + 0x79,0x29,0x2e,0x78,0x29,0x20,0x2a,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x3b,0x0a,0x7d, + 0x0a,0x0a,0x00, +}; +#elif defined(SOKOL_GLES2) || defined(SOKOL_GLES3) +static const char _sfons_vs_source_glsl100[381] = { + 0x23,0x76,0x65,0x72,0x73,0x69,0x6f,0x6e,0x20,0x31,0x30,0x30,0x0a,0x0a,0x75,0x6e, + 0x69,0x66,0x6f,0x72,0x6d,0x20,0x76,0x65,0x63,0x34,0x20,0x76,0x73,0x5f,0x70,0x61, + 0x72,0x61,0x6d,0x73,0x5b,0x38,0x5d,0x3b,0x0a,0x61,0x74,0x74,0x72,0x69,0x62,0x75, + 0x74,0x65,0x20,0x76,0x65,0x63,0x34,0x20,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e, + 0x3b,0x0a,0x76,0x61,0x72,0x79,0x69,0x6e,0x67,0x20,0x76,0x65,0x63,0x34,0x20,0x75, + 0x76,0x3b,0x0a,0x61,0x74,0x74,0x72,0x69,0x62,0x75,0x74,0x65,0x20,0x76,0x65,0x63, + 0x32,0x20,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x3b,0x0a,0x76,0x61,0x72, + 0x79,0x69,0x6e,0x67,0x20,0x76,0x65,0x63,0x34,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x3b, + 0x0a,0x61,0x74,0x74,0x72,0x69,0x62,0x75,0x74,0x65,0x20,0x76,0x65,0x63,0x34,0x20, + 0x63,0x6f,0x6c,0x6f,0x72,0x30,0x3b,0x0a,0x0a,0x76,0x6f,0x69,0x64,0x20,0x6d,0x61, + 0x69,0x6e,0x28,0x29,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x67,0x6c,0x5f,0x50,0x6f, + 0x73,0x69,0x74,0x69,0x6f,0x6e,0x20,0x3d,0x20,0x6d,0x61,0x74,0x34,0x28,0x76,0x73, + 0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x30,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70, + 0x61,0x72,0x61,0x6d,0x73,0x5b,0x31,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70,0x61,0x72, + 0x61,0x6d,0x73,0x5b,0x32,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d, + 0x73,0x5b,0x33,0x5d,0x29,0x20,0x2a,0x20,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e, + 0x3b,0x0a,0x20,0x20,0x20,0x20,0x75,0x76,0x20,0x3d,0x20,0x6d,0x61,0x74,0x34,0x28, + 0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x34,0x5d,0x2c,0x20,0x76,0x73, + 0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x35,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70, + 0x61,0x72,0x61,0x6d,0x73,0x5b,0x36,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70,0x61,0x72, + 0x61,0x6d,0x73,0x5b,0x37,0x5d,0x29,0x20,0x2a,0x20,0x76,0x65,0x63,0x34,0x28,0x74, + 0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x2c,0x20,0x30,0x2e,0x30,0x2c,0x20,0x31, + 0x2e,0x30,0x29,0x3b,0x0a,0x20,0x20,0x20,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x20,0x3d, + 0x20,0x63,0x6f,0x6c,0x6f,0x72,0x30,0x3b,0x0a,0x7d,0x0a,0x0a,0x00, +}; +static const char _sfons_fs_source_glsl100[233] = { + 0x23,0x76,0x65,0x72,0x73,0x69,0x6f,0x6e,0x20,0x31,0x30,0x30,0x0a,0x70,0x72,0x65, + 0x63,0x69,0x73,0x69,0x6f,0x6e,0x20,0x6d,0x65,0x64,0x69,0x75,0x6d,0x70,0x20,0x66, + 0x6c,0x6f,0x61,0x74,0x3b,0x0a,0x70,0x72,0x65,0x63,0x69,0x73,0x69,0x6f,0x6e,0x20, + 0x68,0x69,0x67,0x68,0x70,0x20,0x69,0x6e,0x74,0x3b,0x0a,0x0a,0x75,0x6e,0x69,0x66, + 0x6f,0x72,0x6d,0x20,0x68,0x69,0x67,0x68,0x70,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65, + 0x72,0x32,0x44,0x20,0x74,0x65,0x78,0x3b,0x0a,0x0a,0x76,0x61,0x72,0x79,0x69,0x6e, + 0x67,0x20,0x68,0x69,0x67,0x68,0x70,0x20,0x76,0x65,0x63,0x34,0x20,0x75,0x76,0x3b, + 0x0a,0x76,0x61,0x72,0x79,0x69,0x6e,0x67,0x20,0x68,0x69,0x67,0x68,0x70,0x20,0x76, + 0x65,0x63,0x34,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x3b,0x0a,0x0a,0x76,0x6f,0x69,0x64, + 0x20,0x6d,0x61,0x69,0x6e,0x28,0x29,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x67,0x6c, + 0x5f,0x46,0x72,0x61,0x67,0x44,0x61,0x74,0x61,0x5b,0x30,0x5d,0x20,0x3d,0x20,0x76, + 0x65,0x63,0x34,0x28,0x31,0x2e,0x30,0x2c,0x20,0x31,0x2e,0x30,0x2c,0x20,0x31,0x2e, + 0x30,0x2c,0x20,0x74,0x65,0x78,0x74,0x75,0x72,0x65,0x32,0x44,0x28,0x74,0x65,0x78, + 0x2c,0x20,0x75,0x76,0x2e,0x78,0x79,0x29,0x2e,0x78,0x29,0x20,0x2a,0x20,0x63,0x6f, + 0x6c,0x6f,0x72,0x3b,0x0a,0x7d,0x0a,0x0a,0x00, +}; +#elif defined(SOKOL_METAL) +static const uint8_t _sfons_vs_bytecode_metal_macos[3360] = { + 0x4d,0x54,0x4c,0x42,0x01,0x80,0x02,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x20,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x6d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcd,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x3b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x01,0x00,0x00,0x00,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x01,0x00,0x00,0x00,0x00,0x00,0x00, + 0x10,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x6d,0x00,0x00,0x00, + 0x4e,0x41,0x4d,0x45,0x06,0x00,0x6d,0x61,0x69,0x6e,0x30,0x00,0x54,0x59,0x50,0x45, + 0x01,0x00,0x00,0x48,0x41,0x53,0x48,0x20,0x00,0x54,0xec,0xec,0xa8,0x23,0x83,0x8d, + 0xc7,0xcf,0x6e,0x67,0x94,0x95,0x54,0x94,0xeb,0x98,0x15,0x2a,0x2c,0x54,0xa0,0x4b, + 0x24,0x6f,0x5e,0xed,0x6e,0x76,0x45,0xb7,0x5e,0x4f,0x46,0x46,0x54,0x18,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x56,0x45,0x52,0x53,0x08,0x00,0x01,0x00,0x08, + 0x00,0x01,0x00,0x01,0x00,0x45,0x4e,0x44,0x54,0x45,0x4e,0x44,0x54,0x37,0x00,0x00, + 0x00,0x56,0x41,0x54,0x54,0x22,0x00,0x03,0x00,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f, + 0x6e,0x00,0x00,0x80,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x00,0x01,0x80, + 0x63,0x6f,0x6c,0x6f,0x72,0x30,0x00,0x02,0x80,0x56,0x41,0x54,0x59,0x05,0x00,0x03, + 0x00,0x06,0x04,0x06,0x45,0x4e,0x44,0x54,0x04,0x00,0x00,0x00,0x45,0x4e,0x44,0x54, + 0xde,0xc0,0x17,0x0b,0x00,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0xf0,0x0b,0x00,0x00, + 0xff,0xff,0xff,0xff,0x42,0x43,0xc0,0xde,0x21,0x0c,0x00,0x00,0xf9,0x02,0x00,0x00, + 0x0b,0x82,0x20,0x00,0x02,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x07,0x81,0x23,0x91, + 0x41,0xc8,0x04,0x49,0x06,0x10,0x32,0x39,0x92,0x01,0x84,0x0c,0x25,0x05,0x08,0x19, + 0x1e,0x04,0x8b,0x62,0x80,0x14,0x45,0x02,0x42,0x92,0x0b,0x42,0xa4,0x10,0x32,0x14, + 0x38,0x08,0x18,0x49,0x0a,0x32,0x44,0x24,0x48,0x0a,0x90,0x21,0x23,0xc4,0x52,0x80, + 0x0c,0x19,0x21,0x72,0x24,0x07,0xc8,0x48,0x11,0x62,0xa8,0xa0,0xa8,0x40,0xc6,0xf0, + 0x01,0x00,0x00,0x00,0x51,0x18,0x00,0x00,0x81,0x00,0x00,0x00,0x1b,0xc8,0x25,0xf8, + 0xff,0xff,0xff,0xff,0x01,0x90,0x80,0x8a,0x18,0x87,0x77,0x90,0x07,0x79,0x28,0x87, + 0x71,0xa0,0x07,0x76,0xc8,0x87,0x36,0x90,0x87,0x77,0xa8,0x07,0x77,0x20,0x87,0x72, + 0x20,0x87,0x36,0x20,0x87,0x74,0xb0,0x87,0x74,0x20,0x87,0x72,0x68,0x83,0x79,0x88, + 0x07,0x79,0xa0,0x87,0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a,0x40,0x07, + 0xc0,0x1c,0xc2,0x81,0x1d,0xe6,0xa1,0x1c,0x00,0x82,0x1c,0xd2,0x61,0x1e,0xc2,0x41, + 0x1c,0xd8,0xa1,0x1c,0xda,0x80,0x1e,0xc2,0x21,0x1d,0xd8,0xa1,0x0d,0xc6,0x21,0x1c, + 0xd8,0x81,0x1d,0xe6,0x01,0x30,0x87,0x70,0x60,0x87,0x79,0x28,0x07,0x80,0x60,0x87, + 0x72,0x98,0x87,0x79,0x68,0x03,0x78,0x90,0x87,0x72,0x18,0x87,0x74,0x98,0x87,0x72, + 0x68,0x03,0x73,0x80,0x87,0x76,0x08,0x07,0x72,0x00,0xcc,0x21,0x1c,0xd8,0x61,0x1e, + 0xca,0x01,0x20,0xdc,0xe1,0x1d,0xda,0xc0,0x1c,0xe4,0x21,0x1c,0xda,0xa1,0x1c,0xda, + 0x00,0x1e,0xde,0x21,0x1d,0xdc,0x81,0x1e,0xca,0x41,0x1e,0xda,0xa0,0x1c,0xd8,0x21, + 0x1d,0xda,0x01,0xa0,0x07,0x79,0xa8,0x87,0x72,0x00,0x06,0x77,0x78,0x87,0x36,0x30, + 0x07,0x79,0x08,0x87,0x76,0x28,0x87,0x36,0x80,0x87,0x77,0x48,0x07,0x77,0xa0,0x87, + 0x72,0x90,0x87,0x36,0x28,0x07,0x76,0x48,0x87,0x76,0x68,0x03,0x77,0x78,0x07,0x77, + 0x68,0x03,0x76,0x28,0x87,0x70,0x30,0x07,0x80,0x70,0x87,0x77,0x68,0x83,0x74,0x70, + 0x07,0x73,0x98,0x87,0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a,0x40,0x07, + 0x80,0x1e,0xe4,0xa1,0x1e,0xca,0x01,0x20,0xdc,0xe1,0x1d,0xda,0x40,0x1d,0xea,0xa1, + 0x1d,0xe0,0xa1,0x0d,0xe8,0x21,0x1c,0xc4,0x81,0x1d,0xca,0x61,0x1e,0x00,0x73,0x08, + 0x07,0x76,0x98,0x87,0x72,0x00,0x08,0x77,0x78,0x87,0x36,0x70,0x87,0x70,0x70,0x87, + 0x79,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xe8,0x41, + 0x1e,0xea,0xa1,0x1c,0x00,0xc2,0x1d,0xde,0xa1,0x0d,0xe6,0x21,0x1d,0xce,0xc1,0x1d, + 0xca,0x81,0x1c,0xda,0x40,0x1f,0xca,0x41,0x1e,0xde,0x61,0x1e,0xda,0xc0,0x1c,0xe0, + 0xa1,0x0d,0xda,0x21,0x1c,0xe8,0x01,0x1d,0x00,0x7a,0x90,0x87,0x7a,0x28,0x07,0x80, + 0x70,0x87,0x77,0x68,0x03,0x7a,0x90,0x87,0x70,0x80,0x07,0x78,0x48,0x07,0x77,0x38, + 0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xe8,0x41,0x1e,0xea,0xa1,0x1c,0x00, + 0x62,0x1e,0xe8,0x21,0x1c,0xc6,0x61,0x1d,0xda,0x00,0x1e,0xe4,0xe1,0x1d,0xe8,0xa1, + 0x1c,0xc6,0x81,0x1e,0xde,0x41,0x1e,0xda,0x40,0x1c,0xea,0xc1,0x1c,0xcc,0xa1,0x1c, + 0xe4,0xa1,0x0d,0xe6,0x21,0x1d,0xf4,0xa1,0x1c,0x00,0x3c,0x00,0x88,0x7a,0x70,0x87, + 0x79,0x08,0x07,0x73,0x28,0x87,0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a, + 0x40,0x07,0x80,0x1e,0xe4,0xa1,0x1e,0xca,0x01,0x20,0xea,0x61,0x1e,0xca,0xa1,0x0d, + 0xe6,0xe1,0x1d,0xcc,0x81,0x1e,0xda,0xc0,0x1c,0xd8,0xe1,0x1d,0xc2,0x81,0x1e,0x00, + 0x73,0x08,0x07,0x76,0x98,0x87,0x72,0x00,0x36,0x18,0x02,0x01,0x2c,0x40,0x05,0x00, + 0x49,0x18,0x00,0x00,0x01,0x00,0x00,0x00,0x13,0x84,0x40,0x00,0x89,0x20,0x00,0x00, + 0x23,0x00,0x00,0x00,0x32,0x22,0x48,0x09,0x20,0x64,0x85,0x04,0x93,0x22,0xa4,0x84, + 0x04,0x93,0x22,0xe3,0x84,0xa1,0x90,0x14,0x12,0x4c,0x8a,0x8c,0x0b,0x84,0xa4,0x4c, + 0x10,0x44,0x33,0x00,0xc3,0x08,0x04,0x70,0x90,0x34,0x45,0x94,0x30,0xf9,0x0c,0x80, + 0x34,0xf4,0xef,0x50,0x13,0x1a,0x42,0x08,0xc3,0x08,0x02,0x90,0x04,0x61,0x26,0x6a, + 0x1e,0xe8,0x41,0x1e,0xea,0x61,0x1c,0xe8,0xc1,0x0d,0xda,0xa1,0x1c,0xe8,0x21,0x1c, + 0xd8,0x41,0x0f,0xf4,0xa0,0x1d,0xc2,0x81,0x1e,0xe4,0x21,0x1d,0xf0,0x01,0x05,0xe4, + 0x20,0x69,0x8a,0x28,0x61,0xf2,0x2b,0xe9,0x7f,0x80,0x08,0x60,0x24,0x24,0x94,0x32, + 0x88,0x60,0x08,0xa5,0x10,0x61,0x84,0x43,0x68,0x20,0x60,0x8e,0x00,0x0c,0x72,0x60, + 0xcd,0x11,0x80,0xc2,0x20,0x42,0x20,0x0c,0x23,0x10,0xcb,0x08,0x00,0x00,0x00,0x00, + 0x13,0xb2,0x70,0x48,0x07,0x79,0xb0,0x03,0x3a,0x68,0x83,0x70,0x80,0x07,0x78,0x60, + 0x87,0x72,0x68,0x83,0x76,0x08,0x87,0x71,0x78,0x87,0x79,0xc0,0x87,0x38,0x80,0x03, + 0x37,0x88,0x83,0x38,0x70,0x03,0x38,0xd8,0x70,0x1b,0xe5,0xd0,0x06,0xf0,0xa0,0x07, + 0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x90,0x0e,0x71, + 0xa0,0x07,0x78,0xa0,0x07,0x78,0xd0,0x06,0xe9,0x80,0x07,0x7a,0x80,0x07,0x7a,0x80, + 0x07,0x6d,0x90,0x0e,0x71,0x60,0x07,0x7a,0x10,0x07,0x76,0xa0,0x07,0x71,0x60,0x07, + 0x6d,0x90,0x0e,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x6d, + 0x90,0x0e,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x60, + 0x0e,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x6d,0x60,0x0e, + 0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x60,0x0f,0x71, + 0x60,0x07,0x7a,0x10,0x07,0x76,0xa0,0x07,0x71,0x60,0x07,0x6d,0x60,0x0f,0x72,0x40, + 0x07,0x7a,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x6d,0x60,0x0f,0x73,0x20,0x07, + 0x7a,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x6d,0x60,0x0f,0x74,0x80,0x07,0x7a, + 0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x60,0x0f,0x76,0x40,0x07,0x7a,0x60, + 0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x60,0x0f,0x79,0x60,0x07,0x7a,0x10,0x07, + 0x72,0x80,0x07,0x7a,0x10,0x07,0x72,0x80,0x07,0x6d,0x60,0x0f,0x71,0x20,0x07,0x78, + 0xa0,0x07,0x71,0x20,0x07,0x78,0xa0,0x07,0x71,0x20,0x07,0x78,0xd0,0x06,0xf6,0x10, + 0x07,0x79,0x20,0x07,0x7a,0x20,0x07,0x75,0x60,0x07,0x7a,0x20,0x07,0x75,0x60,0x07, + 0x6d,0x60,0x0f,0x72,0x50,0x07,0x76,0xa0,0x07,0x72,0x50,0x07,0x76,0xa0,0x07,0x72, + 0x50,0x07,0x76,0xd0,0x06,0xf6,0x50,0x07,0x71,0x20,0x07,0x7a,0x50,0x07,0x71,0x20, + 0x07,0x7a,0x50,0x07,0x71,0x20,0x07,0x6d,0x60,0x0f,0x71,0x00,0x07,0x72,0x40,0x07, + 0x7a,0x10,0x07,0x70,0x20,0x07,0x74,0xa0,0x07,0x71,0x00,0x07,0x72,0x40,0x07,0x6d, + 0xe0,0x0e,0x78,0xa0,0x07,0x71,0x60,0x07,0x7a,0x30,0x07,0x72,0x30,0x84,0x49,0x00, + 0x00,0x08,0x00,0x00,0x00,0x00,0x00,0xc8,0x02,0x01,0x00,0x00,0x0b,0x00,0x00,0x00, + 0x32,0x1e,0x98,0x10,0x19,0x11,0x4c,0x90,0x8c,0x09,0x26,0x47,0xc6,0x04,0x43,0x5a, + 0x25,0x30,0x02,0x50,0x04,0x05,0x18,0x50,0x80,0x02,0x85,0x50,0x10,0x65,0x50,0x20, + 0xd4,0x46,0x00,0x88,0x8d,0x35,0x28,0x0f,0x01,0x00,0x00,0x00,0x79,0x18,0x00,0x00, + 0x19,0x01,0x00,0x00,0x1a,0x03,0x4c,0x10,0x97,0x29,0xa2,0x25,0x10,0xab,0x32,0xb9, + 0xb9,0xb4,0x37,0xb7,0x21,0xc6,0x32,0x28,0x00,0xb3,0x50,0xb9,0x1b,0x43,0x0b,0x93, + 0xfb,0x9a,0x4b,0xd3,0x2b,0x1b,0x62,0x2c,0x81,0x22,0x2c,0x06,0xd9,0x20,0x08,0x0e, + 0x8e,0xad,0x0c,0x84,0x89,0xc9,0xaa,0x09,0xc4,0xae,0x4c,0x6e,0x2e,0xed,0xcd,0x0d, + 0x24,0x07,0x46,0xc6,0x25,0x86,0x06,0x04,0xa5,0xad,0x8c,0x2e,0x8c,0xcd,0xac,0xac, + 0x25,0x07,0x46,0xc6,0x25,0x86,0xc6,0x25,0x27,0x65,0x88,0xa0,0x10,0x43,0x8c,0x25, + 0x58,0x8e,0x45,0x60,0xd1,0x54,0x46,0x17,0xc6,0x36,0x04,0x51,0x8e,0x25,0x58,0x82, + 0x45,0xe0,0x16,0x96,0x26,0xe7,0x32,0xf6,0xd6,0x06,0x97,0xc6,0x56,0xe6,0x42,0x56, + 0xe6,0xf6,0x26,0xd7,0x36,0xf7,0x45,0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44,0x50, + 0x12,0x72,0x61,0x69,0x72,0x2e,0x63,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x2e,0x66,0x61, + 0x73,0x74,0x5f,0x6d,0x61,0x74,0x68,0x5f,0x65,0x6e,0x61,0x62,0x6c,0x65,0x43,0x04, + 0x65,0x61,0x19,0x84,0xa5,0xc9,0xb9,0x8c,0xbd,0xb5,0xc1,0xa5,0xb1,0x95,0xb9,0x98, + 0xc9,0x85,0xb5,0x95,0x89,0xd5,0x99,0x99,0x95,0xc9,0x7d,0x99,0x95,0xd1,0x8d,0xa1, + 0x7d,0x91,0xa5,0xcd,0x85,0x89,0xb1,0x95,0x0d,0x11,0x94,0x86,0x61,0x10,0x96,0x26, + 0xe7,0x32,0xf6,0xd6,0x06,0x97,0xc6,0x56,0xe6,0xe2,0x16,0x46,0x97,0x66,0x57,0xf6, + 0x45,0xf6,0x56,0x27,0xc6,0x56,0xf6,0x45,0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44, + 0x50,0x1e,0x92,0x41,0x58,0x9a,0x9c,0xcb,0xd8,0x5b,0x1b,0x5c,0x1a,0x5b,0x99,0x8b, + 0x5b,0x18,0x5d,0x9a,0x5d,0xd9,0x17,0xdb,0x9b,0xdb,0xd9,0x17,0xdb,0x9b,0xdb,0xd9, + 0x17,0x59,0xda,0x5c,0x98,0x18,0x5b,0xd9,0x10,0x41,0x89,0x78,0x06,0x61,0x69,0x72, + 0x2e,0x63,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x2e,0x6e,0x61,0x74,0x69,0x76,0x65,0x5f, + 0x77,0x69,0x64,0x65,0x5f,0x76,0x65,0x63,0x74,0x6f,0x72,0x73,0x5f,0x64,0x69,0x73, + 0x61,0x62,0x6c,0x65,0x43,0x04,0x65,0x62,0x14,0x96,0x26,0xe7,0x62,0x57,0x26,0x47, + 0x57,0x86,0xf7,0xf5,0x56,0x47,0x07,0x57,0x47,0xc7,0xa5,0x6e,0xae,0x4c,0x0e,0x85, + 0xed,0x6d,0xcc,0x0d,0x26,0x85,0x51,0x58,0x9a,0x9c,0x4b,0x98,0xdc,0xd9,0x17,0x5d, + 0x1e,0x5c,0xd9,0x97,0x5b,0x58,0x5b,0x19,0x0d,0x33,0xb6,0xb7,0x30,0x3a,0x1a,0x32, + 0x61,0x69,0x72,0x2e,0x61,0x72,0x67,0x5f,0x6e,0x61,0x6d,0x65,0x14,0xea,0xec,0x86, + 0x30,0x4a,0xa5,0x58,0xca,0xa5,0x60,0x4a,0xa6,0x68,0x5c,0xea,0xe6,0xca,0xe4,0x50, + 0xd8,0xde,0xc6,0xdc,0x62,0x52,0x58,0x8c,0xbd,0xb1,0xbd,0xc9,0x0d,0x61,0x94,0x4a, + 0xe1,0x94,0x4b,0xc1,0x94,0x4c,0xe9,0xc8,0x84,0xa5,0xc9,0xb9,0xc0,0xbd,0xcd,0xa5, + 0xd1,0xa5,0xbd,0xb9,0x71,0x39,0x63,0xfb,0x82,0x7a,0x9b,0x4b,0xa3,0x4b,0x7b,0x73, + 0x1b,0xa2,0x28,0x9f,0x72,0x29,0x98,0x92,0x29,0x60,0x30,0xc4,0x50,0x36,0xc5,0x53, + 0xc2,0x80,0x50,0x58,0x9a,0x9c,0x8b,0x5d,0x99,0x1c,0x5d,0x19,0xde,0x57,0x9a,0x1b, + 0x5c,0x1d,0x1d,0xa5,0xb0,0x34,0x39,0x17,0xb6,0xb7,0xb1,0x30,0xba,0xb4,0x37,0xb7, + 0xaf,0x34,0x37,0xb2,0x32,0x3c,0x66,0x67,0x65,0x6e,0x65,0x72,0x61,0x74,0x65,0x64, + 0x28,0x38,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x44,0x76,0x34,0x5f,0x66,0x29, + 0x44,0xe0,0xde,0xe6,0xd2,0xe8,0xd2,0xde,0xdc,0x86,0x50,0x8b,0xa0,0x8c,0x81,0x42, + 0x06,0x8b,0xb0,0x04,0x4a,0x19,0x28,0x97,0x82,0x29,0x99,0x62,0x06,0xd4,0xce,0xca, + 0xdc,0xca,0xe4,0xc2,0xe8,0xca,0xc8,0x50,0x72,0xe8,0xca,0xf0,0xc6,0xde,0xde,0xe4, + 0xc8,0x60,0x88,0xec,0x64,0xbe,0xcc,0x52,0x68,0x98,0xb1,0xbd,0x85,0xd1,0xc9,0x30, + 0xa1,0x2b,0xc3,0x1b,0x7b,0x7b,0x93,0x23,0x83,0x19,0x42,0x2d,0x81,0x32,0x06,0x0a, + 0x19,0x2c,0xc1,0x12,0x28,0x68,0xa0,0x5c,0x4a,0x1a,0x28,0x99,0xa2,0x06,0xbc,0xce, + 0xca,0xdc,0xca,0xe4,0xc2,0xe8,0xca,0xc8,0x50,0x6c,0xc6,0xde,0xd8,0xde,0xe4,0x60, + 0x88,0xec,0x68,0xbe,0xcc,0x52,0x68,0x8c,0xbd,0xb1,0xbd,0xc9,0xc1,0x0c,0xa1,0x96, + 0x41,0x19,0x03,0x85,0x0c,0x96,0x61,0x09,0x14,0x36,0x50,0x2e,0x05,0x53,0x32,0xa5, + 0x0d,0xa8,0x84,0xa5,0xc9,0xb9,0x88,0xd5,0x99,0x99,0x95,0xc9,0xf1,0x09,0x4b,0x93, + 0x73,0x11,0xab,0x33,0x33,0x2b,0x93,0xfb,0x9a,0x4b,0xd3,0x2b,0x23,0x12,0x96,0x26, + 0xe7,0x22,0x57,0x16,0x46,0x46,0x2a,0x2c,0x4d,0xce,0x65,0x8e,0x4e,0xae,0x6e,0x8c, + 0xee,0x8b,0x2e,0x0f,0xae,0xec,0x2b,0xcd,0xcd,0xec,0x8d,0x88,0x19,0xdb,0x5b,0x18, + 0x1d,0x0d,0x1e,0x0d,0x87,0x36,0x3b,0x38,0x0a,0x74,0x6d,0x43,0xa8,0x45,0x58,0x88, + 0x45,0x50,0xe6,0x40,0xa1,0x83,0x85,0x58,0x88,0x45,0x50,0xe6,0x40,0xa9,0x03,0x46, + 0x61,0x69,0x72,0x2e,0x61,0x72,0x67,0x5f,0x74,0x79,0x70,0x65,0x5f,0x73,0x69,0x7a, + 0x65,0xbc,0xc2,0xd2,0xe4,0x5c,0xc2,0xe4,0xce,0xbe,0xe8,0xf2,0xe0,0xca,0xbe,0xc2, + 0xd8,0xd2,0xce,0xdc,0xbe,0xe6,0xd2,0xf4,0xca,0x98,0xd8,0xcd,0x7d,0xc1,0x85,0xc9, + 0x85,0xb5,0xcd,0x71,0xf8,0x92,0x81,0x19,0x42,0x06,0x0b,0xa2,0xbc,0x81,0x02,0x07, + 0x4b,0xa1,0x90,0xc1,0x22,0x2c,0x81,0x12,0x07,0x8a,0x1c,0x28,0x76,0xa0,0xdc,0xc1, + 0x52,0x28,0x78,0xb0,0x24,0xca,0xa5,0xe4,0x81,0x92,0x29,0x7a,0x30,0x04,0x51,0xce, + 0x40,0x59,0x03,0xc5,0x0d,0x94,0x3d,0x18,0x62,0x24,0x80,0x22,0x06,0x0a,0x1f,0xf0, + 0x79,0x6b,0x73,0x4b,0x83,0x7b,0xa3,0x2b,0x73,0xa3,0x03,0x19,0x43,0x0b,0x93,0xe3, + 0x33,0x95,0xd6,0x06,0xc7,0x56,0x06,0x32,0xb4,0xb2,0x02,0x42,0x25,0x14,0x14,0x34, + 0x44,0x50,0xfe,0x60,0x88,0xa1,0xf8,0x81,0x02,0x0a,0x8d,0x32,0xc4,0x50,0x42,0x41, + 0x09,0x85,0x46,0x19,0x11,0xb1,0x03,0x3b,0xd8,0x43,0x3b,0xb8,0x41,0x3b,0xbc,0x03, + 0x39,0xd4,0x03,0x3b,0x94,0x83,0x1b,0x98,0x03,0x3b,0x84,0xc3,0x39,0xcc,0xc3,0x14, + 0x21,0x18,0x46,0x28,0xec,0xc0,0x0e,0xf6,0xd0,0x0e,0x6e,0x90,0x0e,0xe4,0x50,0x0e, + 0xee,0x40,0x0f,0x53,0x82,0x62,0xc4,0x12,0x0e,0xe9,0x20,0x0f,0x6e,0x60,0x0f,0xe5, + 0x20,0x0f,0xf3,0x90,0x0e,0xef,0xe0,0x0e,0x53,0x02,0x63,0x04,0x15,0x0e,0xe9,0x20, + 0x0f,0x6e,0xc0,0x0e,0xe1,0xe0,0x0e,0xe7,0x50,0x0f,0xe1,0x70,0x0e,0xe5,0xf0,0x0b, + 0xf6,0x50,0x0e,0xf2,0x30,0x0f,0xe9,0xf0,0x0e,0xee,0x30,0x25,0x40,0x46,0x4c,0xe1, + 0x90,0x0e,0xf2,0xe0,0x06,0xe3,0xf0,0x0e,0xed,0x00,0x0f,0xe9,0xc0,0x0e,0xe5,0xf0, + 0x0b,0xef,0x00,0x0f,0xf4,0x90,0x0e,0xef,0xe0,0x0e,0xf3,0x30,0xc5,0x50,0x18,0x07, + 0x92,0xa8,0x11,0x4a,0x38,0xa4,0x83,0x3c,0xb8,0x81,0x3d,0x94,0x83,0x3c,0xd0,0x43, + 0x39,0xe0,0xc3,0x94,0xa0,0x0f,0x00,0x00,0x79,0x18,0x00,0x00,0x6d,0x00,0x00,0x00, + 0x33,0x08,0x80,0x1c,0xc4,0xe1,0x1c,0x66,0x14,0x01,0x3d,0x88,0x43,0x38,0x84,0xc3, + 0x8c,0x42,0x80,0x07,0x79,0x78,0x07,0x73,0x98,0x71,0x0c,0xe6,0x00,0x0f,0xed,0x10, + 0x0e,0xf4,0x80,0x0e,0x33,0x0c,0x42,0x1e,0xc2,0xc1,0x1d,0xce,0xa1,0x1c,0x66,0x30, + 0x05,0x3d,0x88,0x43,0x38,0x84,0x83,0x1b,0xcc,0x03,0x3d,0xc8,0x43,0x3d,0x8c,0x03, + 0x3d,0xcc,0x78,0x8c,0x74,0x70,0x07,0x7b,0x08,0x07,0x79,0x48,0x87,0x70,0x70,0x07, + 0x7a,0x70,0x03,0x76,0x78,0x87,0x70,0x20,0x87,0x19,0xcc,0x11,0x0e,0xec,0x90,0x0e, + 0xe1,0x30,0x0f,0x6e,0x30,0x0f,0xe3,0xf0,0x0e,0xf0,0x50,0x0e,0x33,0x10,0xc4,0x1d, + 0xde,0x21,0x1c,0xd8,0x21,0x1d,0xc2,0x61,0x1e,0x66,0x30,0x89,0x3b,0xbc,0x83,0x3b, + 0xd0,0x43,0x39,0xb4,0x03,0x3c,0xbc,0x83,0x3c,0x84,0x03,0x3b,0xcc,0xf0,0x14,0x76, + 0x60,0x07,0x7b,0x68,0x07,0x37,0x68,0x87,0x72,0x68,0x07,0x37,0x80,0x87,0x70,0x90, + 0x87,0x70,0x60,0x07,0x76,0x28,0x07,0x76,0xf8,0x05,0x76,0x78,0x87,0x77,0x80,0x87, + 0x5f,0x08,0x87,0x71,0x18,0x87,0x72,0x98,0x87,0x79,0x98,0x81,0x2c,0xee,0xf0,0x0e, + 0xee,0xe0,0x0e,0xf5,0xc0,0x0e,0xec,0x30,0x03,0x62,0xc8,0xa1,0x1c,0xe4,0xa1,0x1c, + 0xcc,0xa1,0x1c,0xe4,0xa1,0x1c,0xdc,0x61,0x1c,0xca,0x21,0x1c,0xc4,0x81,0x1d,0xca, + 0x61,0x06,0xd6,0x90,0x43,0x39,0xc8,0x43,0x39,0x98,0x43,0x39,0xc8,0x43,0x39,0xb8, + 0xc3,0x38,0x94,0x43,0x38,0x88,0x03,0x3b,0x94,0xc3,0x2f,0xbc,0x83,0x3c,0xfc,0x82, + 0x3b,0xd4,0x03,0x3b,0xb0,0xc3,0x0c,0xc7,0x69,0x87,0x70,0x58,0x87,0x72,0x70,0x83, + 0x74,0x68,0x07,0x78,0x60,0x87,0x74,0x18,0x87,0x74,0xa0,0x87,0x19,0xce,0x53,0x0f, + 0xee,0x00,0x0f,0xf2,0x50,0x0e,0xe4,0x90,0x0e,0xe3,0x40,0x0f,0xe1,0x20,0x0e,0xec, + 0x50,0x0e,0x33,0x20,0x28,0x1d,0xdc,0xc1,0x1e,0xc2,0x41,0x1e,0xd2,0x21,0x1c,0xdc, + 0x81,0x1e,0xdc,0xe0,0x1c,0xe4,0xe1,0x1d,0xea,0x01,0x1e,0x66,0x18,0x51,0x38,0xb0, + 0x43,0x3a,0x9c,0x83,0x3b,0xcc,0x50,0x24,0x76,0x60,0x07,0x7b,0x68,0x07,0x37,0x60, + 0x87,0x77,0x78,0x07,0x78,0x98,0x51,0x4c,0xf4,0x90,0x0f,0xf0,0x50,0x0e,0x33,0x1e, + 0x6a,0x1e,0xca,0x61,0x1c,0xe8,0x21,0x1d,0xde,0xc1,0x1d,0x7e,0x01,0x1e,0xe4,0xa1, + 0x1c,0xcc,0x21,0x1d,0xf0,0x61,0x06,0x54,0x85,0x83,0x38,0xcc,0xc3,0x3b,0xb0,0x43, + 0x3d,0xd0,0x43,0x39,0xfc,0xc2,0x3c,0xe4,0x43,0x3b,0x88,0xc3,0x3b,0xb0,0xc3,0x8c, + 0xc5,0x0a,0x87,0x79,0x98,0x87,0x77,0x18,0x87,0x74,0x08,0x07,0x7a,0x28,0x07,0x72, + 0x00,0x00,0x00,0x00,0x71,0x20,0x00,0x00,0x02,0x00,0x00,0x00,0x06,0x50,0x30,0x00, + 0xd2,0xd0,0x00,0x00,0x61,0x20,0x00,0x00,0x3d,0x00,0x00,0x00,0x13,0x04,0x41,0x2c, + 0x10,0x00,0x00,0x00,0x09,0x00,0x00,0x00,0xf4,0xc6,0x22,0x86,0x61,0x18,0xc6,0x22, + 0x04,0x41,0x10,0xc6,0x22,0x82,0x20,0x08,0xa8,0x95,0x40,0x19,0x14,0x01,0xbd,0x11, + 0x00,0x1a,0x33,0x00,0x24,0x66,0x00,0x28,0xcc,0x00,0x00,0x00,0xe3,0x15,0x0b,0x84, + 0x61,0x10,0x05,0x65,0x90,0x01,0x1a,0x0c,0x13,0x02,0xf9,0x8c,0x57,0x3c,0x14,0xc7, + 0x2d,0x14,0x94,0x41,0x06,0xea,0x70,0x4c,0x08,0xe4,0x63,0x41,0x01,0x9f,0xf1,0x0a, + 0x2a,0x0b,0x83,0x30,0x70,0x28,0x28,0x83,0x0c,0x19,0x43,0x99,0x10,0xc8,0xc7,0x8a, + 0x00,0x3e,0xe3,0x15,0x99,0x67,0x06,0x66,0x40,0x51,0x50,0x06,0x19,0xbc,0x48,0x33, + 0x21,0x90,0x8f,0x15,0x01,0x7c,0xc6,0x2b,0xbc,0x31,0x60,0x83,0x35,0x18,0x03,0x0a, + 0xca,0x20,0x83,0x18,0x60,0x99,0x09,0x81,0x7c,0xc6,0x2b,0xc4,0xe0,0x0c,0xe0,0xe0, + 0x0d,0x3c,0x0a,0xca,0x20,0x83,0x19,0x70,0x61,0x60,0x42,0x20,0x1f,0x0b,0x0a,0xf8, + 0x8c,0x57,0x9c,0x01,0x1b,0xd4,0x01,0x1d,0x88,0x01,0x05,0xc5,0x86,0x00,0x3e,0xb3, + 0x0d,0x61,0x10,0x00,0xb3,0x0d,0x01,0x1b,0x04,0xb3,0x0d,0xc1,0x23,0x64,0x10,0x10, + 0x03,0x00,0x00,0x00,0x09,0x00,0x00,0x00,0x5b,0x86,0x20,0x10,0x85,0x2d,0x43,0x11, + 0x88,0xc2,0x96,0x41,0x09,0x44,0x61,0xcb,0xf0,0x04,0xa2,0xb0,0x65,0xa0,0x02,0x51, + 0xd8,0x32,0x60,0x81,0x28,0x6c,0x19,0xba,0x40,0x14,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +}; +static const uint8_t _sfons_fs_bytecode_metal_macos[2941] = { + 0x4d,0x54,0x4c,0x42,0x01,0x80,0x02,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x7d,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x6d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcd,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd5,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xdd,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0xa0,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x6d,0x00,0x00,0x00, + 0x4e,0x41,0x4d,0x45,0x06,0x00,0x6d,0x61,0x69,0x6e,0x30,0x00,0x54,0x59,0x50,0x45, + 0x01,0x00,0x01,0x48,0x41,0x53,0x48,0x20,0x00,0xfd,0x3a,0x06,0x83,0x09,0xe9,0xe7, + 0xd2,0xb5,0xf9,0x1e,0x86,0x99,0xb0,0x0c,0x8c,0xbe,0xe0,0xd3,0x2d,0x9b,0x8c,0xe9, + 0x2a,0x92,0xa3,0xeb,0x0b,0xf7,0x98,0x18,0x30,0x4f,0x46,0x46,0x54,0x18,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x56,0x45,0x52,0x53,0x08,0x00,0x01,0x00,0x08, + 0x00,0x01,0x00,0x01,0x00,0x45,0x4e,0x44,0x54,0x45,0x4e,0x44,0x54,0x04,0x00,0x00, + 0x00,0x45,0x4e,0x44,0x54,0x04,0x00,0x00,0x00,0x45,0x4e,0x44,0x54,0xde,0xc0,0x17, + 0x0b,0x00,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x84,0x0a,0x00,0x00,0xff,0xff,0xff, + 0xff,0x42,0x43,0xc0,0xde,0x21,0x0c,0x00,0x00,0x9e,0x02,0x00,0x00,0x0b,0x82,0x20, + 0x00,0x02,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x07,0x81,0x23,0x91,0x41,0xc8,0x04, + 0x49,0x06,0x10,0x32,0x39,0x92,0x01,0x84,0x0c,0x25,0x05,0x08,0x19,0x1e,0x04,0x8b, + 0x62,0x80,0x14,0x45,0x02,0x42,0x92,0x0b,0x42,0xa4,0x10,0x32,0x14,0x38,0x08,0x18, + 0x49,0x0a,0x32,0x44,0x24,0x48,0x0a,0x90,0x21,0x23,0xc4,0x52,0x80,0x0c,0x19,0x21, + 0x72,0x24,0x07,0xc8,0x48,0x11,0x62,0xa8,0xa0,0xa8,0x40,0xc6,0xf0,0x01,0x00,0x00, + 0x00,0x51,0x18,0x00,0x00,0x89,0x00,0x00,0x00,0x1b,0xcc,0x25,0xf8,0xff,0xff,0xff, + 0xff,0x01,0x60,0x00,0x09,0xa8,0x88,0x71,0x78,0x07,0x79,0x90,0x87,0x72,0x18,0x07, + 0x7a,0x60,0x87,0x7c,0x68,0x03,0x79,0x78,0x87,0x7a,0x70,0x07,0x72,0x28,0x07,0x72, + 0x68,0x03,0x72,0x48,0x07,0x7b,0x48,0x07,0x72,0x28,0x87,0x36,0x98,0x87,0x78,0x90, + 0x07,0x7a,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xcc, + 0x21,0x1c,0xd8,0x61,0x1e,0xca,0x01,0x20,0xc8,0x21,0x1d,0xe6,0x21,0x1c,0xc4,0x81, + 0x1d,0xca,0xa1,0x0d,0xe8,0x21,0x1c,0xd2,0x81,0x1d,0xda,0x60,0x1c,0xc2,0x81,0x1d, + 0xd8,0x61,0x1e,0x00,0x73,0x08,0x07,0x76,0x98,0x87,0x72,0x00,0x08,0x76,0x28,0x87, + 0x79,0x98,0x87,0x36,0x80,0x07,0x79,0x28,0x87,0x71,0x48,0x87,0x79,0x28,0x87,0x36, + 0x30,0x07,0x78,0x68,0x87,0x70,0x20,0x07,0xc0,0x1c,0xc2,0x81,0x1d,0xe6,0xa1,0x1c, + 0x00,0xc2,0x1d,0xde,0xa1,0x0d,0xcc,0x41,0x1e,0xc2,0xa1,0x1d,0xca,0xa1,0x0d,0xe0, + 0xe1,0x1d,0xd2,0xc1,0x1d,0xe8,0xa1,0x1c,0xe4,0xa1,0x0d,0xca,0x81,0x1d,0xd2,0xa1, + 0x1d,0x00,0x7a,0x90,0x87,0x7a,0x28,0x07,0x60,0x70,0x87,0x77,0x68,0x03,0x73,0x90, + 0x87,0x70,0x68,0x87,0x72,0x68,0x03,0x78,0x78,0x87,0x74,0x70,0x07,0x7a,0x28,0x07, + 0x79,0x68,0x83,0x72,0x60,0x87,0x74,0x68,0x87,0x36,0x70,0x87,0x77,0x70,0x87,0x36, + 0x60,0x87,0x72,0x08,0x07,0x73,0x00,0x08,0x77,0x78,0x87,0x36,0x48,0x07,0x77,0x30, + 0x87,0x79,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xe8, + 0x41,0x1e,0xea,0xa1,0x1c,0x00,0xc2,0x1d,0xde,0xa1,0x0d,0xd4,0xa1,0x1e,0xda,0x01, + 0x1e,0xda,0x80,0x1e,0xc2,0x41,0x1c,0xd8,0xa1,0x1c,0xe6,0x01,0x30,0x87,0x70,0x60, + 0x87,0x79,0x28,0x07,0x80,0x70,0x87,0x77,0x68,0x03,0x77,0x08,0x07,0x77,0x98,0x87, + 0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a,0x40,0x07,0x80,0x1e,0xe4,0xa1, + 0x1e,0xca,0x01,0x20,0xdc,0xe1,0x1d,0xda,0x60,0x1e,0xd2,0xe1,0x1c,0xdc,0xa1,0x1c, + 0xc8,0xa1,0x0d,0xf4,0xa1,0x1c,0xe4,0xe1,0x1d,0xe6,0xa1,0x0d,0xcc,0x01,0x1e,0xda, + 0xa0,0x1d,0xc2,0x81,0x1e,0xd0,0x01,0xa0,0x07,0x79,0xa8,0x87,0x72,0x00,0x08,0x77, + 0x78,0x87,0x36,0xa0,0x07,0x79,0x08,0x07,0x78,0x80,0x87,0x74,0x70,0x87,0x73,0x68, + 0x83,0x76,0x08,0x07,0x7a,0x40,0x07,0x80,0x1e,0xe4,0xa1,0x1e,0xca,0x01,0x20,0xe6, + 0x81,0x1e,0xc2,0x61,0x1c,0xd6,0xa1,0x0d,0xe0,0x41,0x1e,0xde,0x81,0x1e,0xca,0x61, + 0x1c,0xe8,0xe1,0x1d,0xe4,0xa1,0x0d,0xc4,0xa1,0x1e,0xcc,0xc1,0x1c,0xca,0x41,0x1e, + 0xda,0x60,0x1e,0xd2,0x41,0x1f,0xca,0x01,0xc0,0x03,0x80,0xa8,0x07,0x77,0x98,0x87, + 0x70,0x30,0x87,0x72,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74, + 0x00,0xe8,0x41,0x1e,0xea,0xa1,0x1c,0x00,0xa2,0x1e,0xe6,0xa1,0x1c,0xda,0x60,0x1e, + 0xde,0xc1,0x1c,0xe8,0xa1,0x0d,0xcc,0x81,0x1d,0xde,0x21,0x1c,0xe8,0x01,0x30,0x87, + 0x70,0x60,0x87,0x79,0x28,0x07,0x60,0x83,0x21,0x0c,0xc0,0x02,0x54,0x1b,0x8c,0x81, + 0x00,0x16,0xa0,0xda,0x80,0x10,0xff,0xff,0xff,0xff,0x3f,0x00,0x0c,0x20,0x01,0xd5, + 0x06,0xa3,0x08,0x80,0x05,0xa8,0x36,0x18,0x86,0x00,0x2c,0x40,0x05,0x49,0x18,0x00, + 0x00,0x03,0x00,0x00,0x00,0x13,0x86,0x40,0x18,0x26,0x0c,0x44,0x61,0x00,0x00,0x00, + 0x00,0x89,0x20,0x00,0x00,0x21,0x00,0x00,0x00,0x32,0x22,0x48,0x09,0x20,0x64,0x85, + 0x04,0x93,0x22,0xa4,0x84,0x04,0x93,0x22,0xe3,0x84,0xa1,0x90,0x14,0x12,0x4c,0x8a, + 0x8c,0x0b,0x84,0xa4,0x4c,0x10,0x4c,0x33,0x00,0xc3,0x08,0x04,0x70,0x90,0x34,0x45, + 0x94,0x30,0xf9,0x0c,0x80,0x34,0xf4,0xef,0x50,0x13,0x0a,0xc2,0x51,0xd2,0x14,0x51, + 0xc2,0xe4,0xff,0x13,0x71,0x4d,0x54,0x44,0xfc,0xf6,0xf0,0x4f,0x63,0x04,0xc0,0x20, + 0xc2,0x10,0x5c,0x24,0x4d,0x11,0x25,0x4c,0xfe,0x2f,0x01,0xcc,0xb3,0x10,0xd1,0x3f, + 0x8d,0x11,0x00,0x83,0x08,0x85,0x50,0x0a,0x11,0x02,0x31,0x74,0x86,0x11,0x04,0x60, + 0x8e,0x20,0x98,0x23,0x00,0x83,0x61,0x04,0x61,0x29,0x48,0x20,0x26,0x29,0xa6,0x00, + 0xb5,0x81,0x80,0x1c,0x58,0xc3,0x08,0xc4,0x32,0x02,0x00,0x00,0x00,0x13,0xb2,0x70, + 0x48,0x07,0x79,0xb0,0x03,0x3a,0x68,0x83,0x70,0x80,0x07,0x78,0x60,0x87,0x72,0x68, + 0x83,0x76,0x08,0x87,0x71,0x78,0x87,0x79,0xc0,0x87,0x38,0x80,0x03,0x37,0x88,0x83, + 0x38,0x70,0x03,0x38,0xd8,0x70,0x1b,0xe5,0xd0,0x06,0xf0,0xa0,0x07,0x76,0x40,0x07, + 0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x90,0x0e,0x71,0xa0,0x07,0x78, + 0xa0,0x07,0x78,0xd0,0x06,0xe9,0x80,0x07,0x7a,0x80,0x07,0x7a,0x80,0x07,0x6d,0x90, + 0x0e,0x71,0x60,0x07,0x7a,0x10,0x07,0x76,0xa0,0x07,0x71,0x60,0x07,0x6d,0x90,0x0e, + 0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x6d,0x90,0x0e,0x76, + 0x40,0x07,0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x60,0x0e,0x73,0x20, + 0x07,0x7a,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x6d,0x60,0x0e,0x76,0x40,0x07, + 0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x60,0x0f,0x71,0x60,0x07,0x7a, + 0x10,0x07,0x76,0xa0,0x07,0x71,0x60,0x07,0x6d,0x60,0x0f,0x72,0x40,0x07,0x7a,0x30, + 0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x6d,0x60,0x0f,0x73,0x20,0x07,0x7a,0x30,0x07, + 0x72,0xa0,0x07,0x73,0x20,0x07,0x6d,0x60,0x0f,0x74,0x80,0x07,0x7a,0x60,0x07,0x74, + 0xa0,0x07,0x76,0x40,0x07,0x6d,0x60,0x0f,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xa0, + 0x07,0x76,0x40,0x07,0x6d,0x60,0x0f,0x79,0x60,0x07,0x7a,0x10,0x07,0x72,0x80,0x07, + 0x7a,0x10,0x07,0x72,0x80,0x07,0x6d,0x60,0x0f,0x71,0x20,0x07,0x78,0xa0,0x07,0x71, + 0x20,0x07,0x78,0xa0,0x07,0x71,0x20,0x07,0x78,0xd0,0x06,0xf6,0x10,0x07,0x79,0x20, + 0x07,0x7a,0x20,0x07,0x75,0x60,0x07,0x7a,0x20,0x07,0x75,0x60,0x07,0x6d,0x60,0x0f, + 0x72,0x50,0x07,0x76,0xa0,0x07,0x72,0x50,0x07,0x76,0xa0,0x07,0x72,0x50,0x07,0x76, + 0xd0,0x06,0xf6,0x50,0x07,0x71,0x20,0x07,0x7a,0x50,0x07,0x71,0x20,0x07,0x7a,0x50, + 0x07,0x71,0x20,0x07,0x6d,0x60,0x0f,0x71,0x00,0x07,0x72,0x40,0x07,0x7a,0x10,0x07, + 0x70,0x20,0x07,0x74,0xa0,0x07,0x71,0x00,0x07,0x72,0x40,0x07,0x6d,0xe0,0x0e,0x78, + 0xa0,0x07,0x71,0x60,0x07,0x7a,0x30,0x07,0x72,0x30,0x84,0x41,0x00,0x00,0x08,0x00, + 0x00,0x00,0x00,0x00,0x18,0xc2,0x38,0x40,0x00,0x08,0x00,0x00,0x00,0x00,0x00,0x64, + 0x81,0x00,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x32,0x1e,0x98,0x10,0x19,0x11,0x4c, + 0x90,0x8c,0x09,0x26,0x47,0xc6,0x04,0x43,0x5a,0x25,0x30,0x02,0x50,0x04,0x85,0x50, + 0x10,0x65,0x40,0x70,0xac,0x41,0x79,0x08,0x00,0x79,0x18,0x00,0x00,0xd7,0x00,0x00, + 0x00,0x1a,0x03,0x4c,0x10,0x97,0x29,0xa2,0x25,0x10,0xab,0x32,0xb9,0xb9,0xb4,0x37, + 0xb7,0x21,0xc6,0x42,0x3c,0x00,0x84,0x50,0xb9,0x1b,0x43,0x0b,0x93,0xfb,0x9a,0x4b, + 0xd3,0x2b,0x1b,0x62,0x2c,0xc2,0x23,0x2c,0x05,0xd9,0x20,0x08,0x0e,0x8e,0xad,0x0c, + 0x84,0x89,0xc9,0xaa,0x09,0xc4,0xae,0x4c,0x6e,0x2e,0xed,0xcd,0x0d,0x24,0x07,0x46, + 0xc6,0x25,0x86,0x06,0x04,0xa5,0xad,0x8c,0x2e,0x8c,0xcd,0xac,0xac,0x25,0x07,0x46, + 0xc6,0x25,0x86,0xc6,0x25,0x27,0x65,0x88,0xf0,0x10,0x43,0x8c,0x45,0x58,0x8c,0x65, + 0x60,0xd1,0x54,0x46,0x17,0xc6,0x36,0x04,0x79,0x8e,0x45,0x58,0x84,0x65,0xe0,0x16, + 0x96,0x26,0xe7,0x32,0xf6,0xd6,0x06,0x97,0xc6,0x56,0xe6,0x42,0x56,0xe6,0xf6,0x26, + 0xd7,0x36,0xf7,0x45,0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44,0x78,0x12,0x72,0x61, + 0x69,0x72,0x2e,0x63,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x2e,0x66,0x61,0x73,0x74,0x5f, + 0x6d,0x61,0x74,0x68,0x5f,0x65,0x6e,0x61,0x62,0x6c,0x65,0x43,0x84,0x67,0x61,0x19, + 0x84,0xa5,0xc9,0xb9,0x8c,0xbd,0xb5,0xc1,0xa5,0xb1,0x95,0xb9,0x98,0xc9,0x85,0xb5, + 0x95,0x89,0xd5,0x99,0x99,0x95,0xc9,0x7d,0x99,0x95,0xd1,0x8d,0xa1,0x7d,0x91,0xa5, + 0xcd,0x85,0x89,0xb1,0x95,0x0d,0x11,0x9e,0x86,0x61,0x10,0x96,0x26,0xe7,0x32,0xf6, + 0xd6,0x06,0x97,0xc6,0x56,0xe6,0xe2,0x16,0x46,0x97,0x66,0x57,0xf6,0x45,0xf6,0x56, + 0x27,0xc6,0x56,0xf6,0x45,0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44,0x78,0x1e,0x92, + 0x41,0x58,0x9a,0x9c,0xcb,0xd8,0x5b,0x1b,0x5c,0x1a,0x5b,0x99,0x8b,0x5b,0x18,0x5d, + 0x9a,0x5d,0xd9,0x17,0xdb,0x9b,0xdb,0xd9,0x17,0xdb,0x9b,0xdb,0xd9,0x17,0x59,0xda, + 0x5c,0x98,0x18,0x5b,0xd9,0x10,0xe1,0x89,0x78,0x06,0x61,0x69,0x72,0x2e,0x63,0x6f, + 0x6d,0x70,0x69,0x6c,0x65,0x2e,0x6e,0x61,0x74,0x69,0x76,0x65,0x5f,0x77,0x69,0x64, + 0x65,0x5f,0x76,0x65,0x63,0x74,0x6f,0x72,0x73,0x5f,0x64,0x69,0x73,0x61,0x62,0x6c, + 0x65,0x43,0x84,0x67,0x62,0x14,0x96,0x26,0xe7,0x22,0x57,0xe6,0x46,0x56,0x26,0xf7, + 0x45,0x17,0x26,0x77,0x56,0x46,0xc7,0x28,0x2c,0x4d,0xce,0x25,0x4c,0xee,0xec,0x8b, + 0x2e,0x0f,0xae,0xec,0xcb,0x2d,0xac,0xad,0x8c,0x86,0x19,0xdb,0x5b,0x18,0x1d,0x0d, + 0x99,0xb0,0x34,0x39,0x97,0x30,0xb9,0xb3,0x2f,0xb7,0xb0,0xb6,0x32,0x2a,0x66,0x72, + 0x61,0x67,0x5f,0x63,0x6f,0x6c,0x6f,0x72,0x43,0x98,0xa7,0x5a,0x86,0xc7,0x7a,0xae, + 0x07,0x7b,0xb2,0x21,0xc2,0xa3,0x51,0x0a,0x4b,0x93,0x73,0x31,0x93,0x0b,0x3b,0x6b, + 0x2b,0x73,0xa3,0xfb,0x4a,0x73,0x83,0xab,0xa3,0xe3,0x52,0x37,0x57,0x26,0x87,0xc2, + 0xf6,0x36,0xe6,0x06,0x93,0x42,0x25,0x2c,0x4d,0xce,0x65,0xac,0xcc,0x8d,0xae,0x4c, + 0x8e,0x4f,0x58,0x9a,0x9c,0x0b,0x5c,0x99,0xdc,0x1c,0x5c,0xd9,0x18,0x5d,0x9a,0x5d, + 0x19,0x85,0x3a,0xbb,0x21,0xd2,0x32,0x3c,0xdc,0xd3,0x3d,0xde,0xf3,0x3d,0xd6,0x73, + 0x3d,0xd8,0x03,0x06,0x5c,0xea,0xe6,0xca,0xe4,0x50,0xd8,0xde,0xc6,0xdc,0x62,0x52, + 0x58,0x8c,0xbd,0xb1,0xbd,0xc9,0x0d,0x91,0x16,0xe1,0xe1,0x1e,0x31,0x78,0xbc,0xe7, + 0x7b,0xac,0xe7,0x7a,0xb0,0x67,0x0c,0xb8,0x84,0xa5,0xc9,0xb9,0xd0,0x95,0xe1,0xd1, + 0xd5,0xc9,0x95,0x51,0x0a,0x4b,0x93,0x73,0x61,0x7b,0x1b,0x0b,0xa3,0x4b,0x7b,0x73, + 0xfb,0x4a,0x73,0x23,0x2b,0xc3,0xa3,0x12,0x96,0x26,0xe7,0x32,0x17,0xd6,0x06,0xc7, + 0x56,0x46,0x8c,0xae,0x0c,0x8f,0xae,0x4e,0xae,0x4c,0x86,0x8c,0xc7,0x8c,0xed,0x2d, + 0x8c,0x8e,0x05,0x64,0x2e,0xac,0x0d,0x8e,0xad,0xcc,0x87,0x03,0x5d,0x19,0xde,0x10, + 0x6a,0x21,0x9e,0x32,0x78,0xcc,0x60,0x19,0x16,0xe1,0x39,0x83,0xc7,0x7a,0xd0,0xe0, + 0xc1,0x9e,0x34,0xe0,0x12,0x96,0x26,0xe7,0x32,0x17,0xd6,0x06,0xc7,0x56,0x26,0xc7, + 0x63,0x2e,0xac,0x0d,0x8e,0xad,0x4c,0x8e,0x08,0x5d,0x19,0xde,0x54,0x1b,0x1c,0x9b, + 0xdc,0x10,0x69,0x39,0x9e,0x35,0x78,0xcc,0x60,0x19,0x16,0xe1,0xb1,0x1e,0x36,0x78, + 0xb0,0xa7,0x0d,0x86,0x20,0x4f,0x18,0x3c,0x64,0xf0,0xa8,0xc1,0xe3,0x06,0x43,0x0c, + 0x04,0x78,0xb6,0xe7,0x0d,0x46,0x44,0xec,0xc0,0x0e,0xf6,0xd0,0x0e,0x6e,0xd0,0x0e, + 0xef,0x40,0x0e,0xf5,0xc0,0x0e,0xe5,0xe0,0x06,0xe6,0xc0,0x0e,0xe1,0x70,0x0e,0xf3, + 0x30,0x45,0x08,0x86,0x11,0x0a,0x3b,0xb0,0x83,0x3d,0xb4,0x83,0x1b,0xa4,0x03,0x39, + 0x94,0x83,0x3b,0xd0,0xc3,0x94,0xa0,0x18,0xb1,0x84,0x43,0x3a,0xc8,0x83,0x1b,0xd8, + 0x43,0x39,0xc8,0xc3,0x3c,0xa4,0xc3,0x3b,0xb8,0xc3,0x94,0xc0,0x18,0x41,0x85,0x43, + 0x3a,0xc8,0x83,0x1b,0xb0,0x43,0x38,0xb8,0xc3,0x39,0xd4,0x43,0x38,0x9c,0x43,0x39, + 0xfc,0x82,0x3d,0x94,0x83,0x3c,0xcc,0x43,0x3a,0xbc,0x83,0x3b,0x4c,0x09,0x90,0x11, + 0x53,0x38,0xa4,0x83,0x3c,0xb8,0xc1,0x38,0xbc,0x43,0x3b,0xc0,0x43,0x3a,0xb0,0x43, + 0x39,0xfc,0xc2,0x3b,0xc0,0x03,0x3d,0xa4,0xc3,0x3b,0xb8,0xc3,0x3c,0x4c,0x31,0x14, + 0xc6,0x81,0x24,0x6a,0x04,0x13,0x0e,0xe9,0x20,0x0f,0x6e,0x60,0x0e,0xf2,0x10,0x0e, + 0xe7,0xd0,0x0e,0xe5,0xe0,0x0e,0xf4,0x30,0x25,0x80,0x03,0x00,0x00,0x79,0x18,0x00, + 0x00,0x6d,0x00,0x00,0x00,0x33,0x08,0x80,0x1c,0xc4,0xe1,0x1c,0x66,0x14,0x01,0x3d, + 0x88,0x43,0x38,0x84,0xc3,0x8c,0x42,0x80,0x07,0x79,0x78,0x07,0x73,0x98,0x71,0x0c, + 0xe6,0x00,0x0f,0xed,0x10,0x0e,0xf4,0x80,0x0e,0x33,0x0c,0x42,0x1e,0xc2,0xc1,0x1d, + 0xce,0xa1,0x1c,0x66,0x30,0x05,0x3d,0x88,0x43,0x38,0x84,0x83,0x1b,0xcc,0x03,0x3d, + 0xc8,0x43,0x3d,0x8c,0x03,0x3d,0xcc,0x78,0x8c,0x74,0x70,0x07,0x7b,0x08,0x07,0x79, + 0x48,0x87,0x70,0x70,0x07,0x7a,0x70,0x03,0x76,0x78,0x87,0x70,0x20,0x87,0x19,0xcc, + 0x11,0x0e,0xec,0x90,0x0e,0xe1,0x30,0x0f,0x6e,0x30,0x0f,0xe3,0xf0,0x0e,0xf0,0x50, + 0x0e,0x33,0x10,0xc4,0x1d,0xde,0x21,0x1c,0xd8,0x21,0x1d,0xc2,0x61,0x1e,0x66,0x30, + 0x89,0x3b,0xbc,0x83,0x3b,0xd0,0x43,0x39,0xb4,0x03,0x3c,0xbc,0x83,0x3c,0x84,0x03, + 0x3b,0xcc,0xf0,0x14,0x76,0x60,0x07,0x7b,0x68,0x07,0x37,0x68,0x87,0x72,0x68,0x07, + 0x37,0x80,0x87,0x70,0x90,0x87,0x70,0x60,0x07,0x76,0x28,0x07,0x76,0xf8,0x05,0x76, + 0x78,0x87,0x77,0x80,0x87,0x5f,0x08,0x87,0x71,0x18,0x87,0x72,0x98,0x87,0x79,0x98, + 0x81,0x2c,0xee,0xf0,0x0e,0xee,0xe0,0x0e,0xf5,0xc0,0x0e,0xec,0x30,0x03,0x62,0xc8, + 0xa1,0x1c,0xe4,0xa1,0x1c,0xcc,0xa1,0x1c,0xe4,0xa1,0x1c,0xdc,0x61,0x1c,0xca,0x21, + 0x1c,0xc4,0x81,0x1d,0xca,0x61,0x06,0xd6,0x90,0x43,0x39,0xc8,0x43,0x39,0x98,0x43, + 0x39,0xc8,0x43,0x39,0xb8,0xc3,0x38,0x94,0x43,0x38,0x88,0x03,0x3b,0x94,0xc3,0x2f, + 0xbc,0x83,0x3c,0xfc,0x82,0x3b,0xd4,0x03,0x3b,0xb0,0xc3,0x0c,0xc7,0x69,0x87,0x70, + 0x58,0x87,0x72,0x70,0x83,0x74,0x68,0x07,0x78,0x60,0x87,0x74,0x18,0x87,0x74,0xa0, + 0x87,0x19,0xce,0x53,0x0f,0xee,0x00,0x0f,0xf2,0x50,0x0e,0xe4,0x90,0x0e,0xe3,0x40, + 0x0f,0xe1,0x20,0x0e,0xec,0x50,0x0e,0x33,0x20,0x28,0x1d,0xdc,0xc1,0x1e,0xc2,0x41, + 0x1e,0xd2,0x21,0x1c,0xdc,0x81,0x1e,0xdc,0xe0,0x1c,0xe4,0xe1,0x1d,0xea,0x01,0x1e, + 0x66,0x18,0x51,0x38,0xb0,0x43,0x3a,0x9c,0x83,0x3b,0xcc,0x50,0x24,0x76,0x60,0x07, + 0x7b,0x68,0x07,0x37,0x60,0x87,0x77,0x78,0x07,0x78,0x98,0x51,0x4c,0xf4,0x90,0x0f, + 0xf0,0x50,0x0e,0x33,0x1e,0x6a,0x1e,0xca,0x61,0x1c,0xe8,0x21,0x1d,0xde,0xc1,0x1d, + 0x7e,0x01,0x1e,0xe4,0xa1,0x1c,0xcc,0x21,0x1d,0xf0,0x61,0x06,0x54,0x85,0x83,0x38, + 0xcc,0xc3,0x3b,0xb0,0x43,0x3d,0xd0,0x43,0x39,0xfc,0xc2,0x3c,0xe4,0x43,0x3b,0x88, + 0xc3,0x3b,0xb0,0xc3,0x8c,0xc5,0x0a,0x87,0x79,0x98,0x87,0x77,0x18,0x87,0x74,0x08, + 0x07,0x7a,0x28,0x07,0x72,0x00,0x00,0x00,0x00,0x71,0x20,0x00,0x00,0x08,0x00,0x00, + 0x00,0x16,0xb0,0x01,0x48,0xe4,0x4b,0x00,0xf3,0x2c,0xc4,0x3f,0x11,0xd7,0x44,0x45, + 0xc4,0x6f,0x0f,0x7e,0x85,0x17,0xb7,0x6d,0x00,0x05,0x03,0x20,0x0d,0x0d,0x00,0x00, + 0x00,0x61,0x20,0x00,0x00,0x16,0x00,0x00,0x00,0x13,0x04,0x41,0x2c,0x10,0x00,0x00, + 0x00,0x0b,0x00,0x00,0x00,0x14,0xc7,0x22,0x80,0x40,0x20,0x88,0x8d,0x00,0x8c,0x25, + 0x00,0x01,0xa9,0x11,0x80,0x1a,0x20,0x31,0x03,0x40,0x61,0x0e,0xe2,0xba,0xae,0x6a, + 0x06,0x80,0xc0,0x0c,0xc0,0x08,0xc0,0x18,0x01,0x08,0x82,0x20,0xfe,0x01,0x00,0x00, + 0x00,0x83,0x0c,0x0f,0x91,0x8c,0x18,0x28,0x42,0x80,0x39,0x4d,0x80,0x2c,0xc9,0x30, + 0xc8,0x70,0x04,0x8d,0x05,0x91,0x7c,0x66,0x1b,0x94,0x00,0xc8,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +}; +static const uint8_t _sfons_vs_bytecode_metal_ios[3344] = { + 0x4d,0x54,0x4c,0x42,0x01,0x00,0x02,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x10,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x6d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcd,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x3b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x01,0x00,0x00,0x00,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x01,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x6d,0x00,0x00,0x00, + 0x4e,0x41,0x4d,0x45,0x06,0x00,0x6d,0x61,0x69,0x6e,0x30,0x00,0x54,0x59,0x50,0x45, + 0x01,0x00,0x00,0x48,0x41,0x53,0x48,0x20,0x00,0x58,0xff,0x12,0x36,0x80,0x88,0xb5, + 0xf7,0x1c,0xc3,0xd5,0x31,0x57,0x18,0x26,0x26,0xf0,0x8c,0xd4,0x15,0xa2,0x55,0x8b, + 0xd5,0x3c,0x6b,0x11,0xbd,0x17,0x49,0x4a,0x02,0x4f,0x46,0x46,0x54,0x18,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x56,0x45,0x52,0x53,0x08,0x00,0x01,0x00,0x08, + 0x00,0x01,0x00,0x00,0x00,0x45,0x4e,0x44,0x54,0x45,0x4e,0x44,0x54,0x37,0x00,0x00, + 0x00,0x56,0x41,0x54,0x54,0x22,0x00,0x03,0x00,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f, + 0x6e,0x00,0x00,0x80,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x00,0x01,0x80, + 0x63,0x6f,0x6c,0x6f,0x72,0x30,0x00,0x02,0x80,0x56,0x41,0x54,0x59,0x05,0x00,0x03, + 0x00,0x06,0x04,0x06,0x45,0x4e,0x44,0x54,0x04,0x00,0x00,0x00,0x45,0x4e,0x44,0x54, + 0xde,0xc0,0x17,0x0b,0x00,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0xec,0x0b,0x00,0x00, + 0xff,0xff,0xff,0xff,0x42,0x43,0xc0,0xde,0x21,0x0c,0x00,0x00,0xf8,0x02,0x00,0x00, + 0x0b,0x82,0x20,0x00,0x02,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x07,0x81,0x23,0x91, + 0x41,0xc8,0x04,0x49,0x06,0x10,0x32,0x39,0x92,0x01,0x84,0x0c,0x25,0x05,0x08,0x19, + 0x1e,0x04,0x8b,0x62,0x80,0x14,0x45,0x02,0x42,0x92,0x0b,0x42,0xa4,0x10,0x32,0x14, + 0x38,0x08,0x18,0x49,0x0a,0x32,0x44,0x24,0x48,0x0a,0x90,0x21,0x23,0xc4,0x52,0x80, + 0x0c,0x19,0x21,0x72,0x24,0x07,0xc8,0x48,0x11,0x62,0xa8,0xa0,0xa8,0x40,0xc6,0xf0, + 0x01,0x00,0x00,0x00,0x51,0x18,0x00,0x00,0x82,0x00,0x00,0x00,0x1b,0xc8,0x25,0xf8, + 0xff,0xff,0xff,0xff,0x01,0x90,0x80,0x8a,0x18,0x87,0x77,0x90,0x07,0x79,0x28,0x87, + 0x71,0xa0,0x07,0x76,0xc8,0x87,0x36,0x90,0x87,0x77,0xa8,0x07,0x77,0x20,0x87,0x72, + 0x20,0x87,0x36,0x20,0x87,0x74,0xb0,0x87,0x74,0x20,0x87,0x72,0x68,0x83,0x79,0x88, + 0x07,0x79,0xa0,0x87,0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a,0x40,0x07, + 0xc0,0x1c,0xc2,0x81,0x1d,0xe6,0xa1,0x1c,0x00,0x82,0x1c,0xd2,0x61,0x1e,0xc2,0x41, + 0x1c,0xd8,0xa1,0x1c,0xda,0x80,0x1e,0xc2,0x21,0x1d,0xd8,0xa1,0x0d,0xc6,0x21,0x1c, + 0xd8,0x81,0x1d,0xe6,0x01,0x30,0x87,0x70,0x60,0x87,0x79,0x28,0x07,0x80,0x60,0x87, + 0x72,0x98,0x87,0x79,0x68,0x03,0x78,0x90,0x87,0x72,0x18,0x87,0x74,0x98,0x87,0x72, + 0x68,0x03,0x73,0x80,0x87,0x76,0x08,0x07,0x72,0x00,0xcc,0x21,0x1c,0xd8,0x61,0x1e, + 0xca,0x01,0x20,0xdc,0xe1,0x1d,0xda,0xc0,0x1c,0xe4,0x21,0x1c,0xda,0xa1,0x1c,0xda, + 0x00,0x1e,0xde,0x21,0x1d,0xdc,0x81,0x1e,0xca,0x41,0x1e,0xda,0xa0,0x1c,0xd8,0x21, + 0x1d,0xda,0x01,0xa0,0x07,0x79,0xa8,0x87,0x72,0x00,0x06,0x77,0x78,0x87,0x36,0x30, + 0x07,0x79,0x08,0x87,0x76,0x28,0x87,0x36,0x80,0x87,0x77,0x48,0x07,0x77,0xa0,0x87, + 0x72,0x90,0x87,0x36,0x28,0x07,0x76,0x48,0x87,0x76,0x68,0x03,0x77,0x78,0x07,0x77, + 0x68,0x03,0x76,0x28,0x87,0x70,0x30,0x07,0x80,0x70,0x87,0x77,0x68,0x83,0x74,0x70, + 0x07,0x73,0x98,0x87,0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a,0x40,0x07, + 0x80,0x1e,0xe4,0xa1,0x1e,0xca,0x01,0x20,0xdc,0xe1,0x1d,0xda,0x40,0x1d,0xea,0xa1, + 0x1d,0xe0,0xa1,0x0d,0xe8,0x21,0x1c,0xc4,0x81,0x1d,0xca,0x61,0x1e,0x00,0x73,0x08, + 0x07,0x76,0x98,0x87,0x72,0x00,0x08,0x77,0x78,0x87,0x36,0x70,0x87,0x70,0x70,0x87, + 0x79,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xe8,0x41, + 0x1e,0xea,0xa1,0x1c,0x00,0xc2,0x1d,0xde,0xa1,0x0d,0xe6,0x21,0x1d,0xce,0xc1,0x1d, + 0xca,0x81,0x1c,0xda,0x40,0x1f,0xca,0x41,0x1e,0xde,0x61,0x1e,0xda,0xc0,0x1c,0xe0, + 0xa1,0x0d,0xda,0x21,0x1c,0xe8,0x01,0x1d,0x00,0x7a,0x90,0x87,0x7a,0x28,0x07,0x80, + 0x70,0x87,0x77,0x68,0x03,0x7a,0x90,0x87,0x70,0x80,0x07,0x78,0x48,0x07,0x77,0x38, + 0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xe8,0x41,0x1e,0xea,0xa1,0x1c,0x00, + 0x62,0x1e,0xe8,0x21,0x1c,0xc6,0x61,0x1d,0xda,0x00,0x1e,0xe4,0xe1,0x1d,0xe8,0xa1, + 0x1c,0xc6,0x81,0x1e,0xde,0x41,0x1e,0xda,0x40,0x1c,0xea,0xc1,0x1c,0xcc,0xa1,0x1c, + 0xe4,0xa1,0x0d,0xe6,0x21,0x1d,0xf4,0xa1,0x1c,0x00,0x3c,0x00,0x88,0x7a,0x70,0x87, + 0x79,0x08,0x07,0x73,0x28,0x87,0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a, + 0x40,0x07,0x80,0x1e,0xe4,0xa1,0x1e,0xca,0x01,0x20,0xea,0x61,0x1e,0xca,0xa1,0x0d, + 0xe6,0xe1,0x1d,0xcc,0x81,0x1e,0xda,0xc0,0x1c,0xd8,0xe1,0x1d,0xc2,0x81,0x1e,0x00, + 0x73,0x08,0x07,0x76,0x98,0x87,0x72,0x00,0x36,0x20,0x02,0x01,0x24,0xc0,0x02,0x54, + 0x00,0x00,0x00,0x00,0x49,0x18,0x00,0x00,0x01,0x00,0x00,0x00,0x13,0x84,0x40,0x00, + 0x89,0x20,0x00,0x00,0x23,0x00,0x00,0x00,0x32,0x22,0x48,0x09,0x20,0x64,0x85,0x04, + 0x93,0x22,0xa4,0x84,0x04,0x93,0x22,0xe3,0x84,0xa1,0x90,0x14,0x12,0x4c,0x8a,0x8c, + 0x0b,0x84,0xa4,0x4c,0x10,0x44,0x33,0x00,0xc3,0x08,0x04,0x70,0x90,0x34,0x45,0x94, + 0x30,0xf9,0x0c,0x80,0x34,0xf4,0xef,0x50,0x13,0x1a,0x42,0x08,0xc3,0x08,0x02,0x90, + 0x04,0x61,0x26,0x6a,0x1e,0xe8,0x41,0x1e,0xea,0x61,0x1c,0xe8,0xc1,0x0d,0xda,0xa1, + 0x1c,0xe8,0x21,0x1c,0xd8,0x41,0x0f,0xf4,0xa0,0x1d,0xc2,0x81,0x1e,0xe4,0x21,0x1d, + 0xf0,0x01,0x05,0xe4,0x20,0x69,0x8a,0x28,0x61,0xf2,0x2b,0xe9,0x7f,0x80,0x08,0x60, + 0x24,0x24,0x94,0x32,0x88,0x60,0x08,0xa5,0x10,0x61,0x84,0x43,0x68,0x20,0x60,0x8e, + 0x00,0x0c,0x52,0x60,0xcd,0x11,0x80,0xc2,0x20,0x42,0x20,0x0c,0x23,0x10,0xcb,0x08, + 0x00,0x00,0x00,0x00,0x13,0xa8,0x70,0x48,0x07,0x79,0xb0,0x03,0x3a,0x68,0x83,0x70, + 0x80,0x07,0x78,0x60,0x87,0x72,0x68,0x83,0x74,0x78,0x87,0x79,0xc8,0x03,0x37,0x80, + 0x03,0x37,0x80,0x83,0x0d,0xb7,0x51,0x0e,0x6d,0x00,0x0f,0x7a,0x60,0x07,0x74,0xa0, + 0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xd0,0x06,0xe9,0x10,0x07,0x7a,0x80,0x07, + 0x7a,0x80,0x07,0x6d,0x90,0x0e,0x78,0xa0,0x07,0x78,0xa0,0x07,0x78,0xd0,0x06,0xe9, + 0x10,0x07,0x76,0xa0,0x07,0x71,0x60,0x07,0x7a,0x10,0x07,0x76,0xd0,0x06,0xe9,0x30, + 0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xd0,0x06,0xe9,0x60,0x07, + 0x74,0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xd0,0x06,0xe6,0x30,0x07,0x72, + 0xa0,0x07,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xd0,0x06,0xe6,0x60,0x07,0x74,0xa0, + 0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xd0,0x06,0xf6,0x10,0x07,0x76,0xa0,0x07, + 0x71,0x60,0x07,0x7a,0x10,0x07,0x76,0xd0,0x06,0xf6,0x20,0x07,0x74,0xa0,0x07,0x73, + 0x20,0x07,0x7a,0x30,0x07,0x72,0xd0,0x06,0xf6,0x30,0x07,0x72,0xa0,0x07,0x73,0x20, + 0x07,0x7a,0x30,0x07,0x72,0xd0,0x06,0xf6,0x40,0x07,0x78,0xa0,0x07,0x76,0x40,0x07, + 0x7a,0x60,0x07,0x74,0xd0,0x06,0xf6,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x7a, + 0x60,0x07,0x74,0xd0,0x06,0xf6,0x90,0x07,0x76,0xa0,0x07,0x71,0x20,0x07,0x78,0xa0, + 0x07,0x71,0x20,0x07,0x78,0xd0,0x06,0xf6,0x10,0x07,0x72,0x80,0x07,0x7a,0x10,0x07, + 0x72,0x80,0x07,0x7a,0x10,0x07,0x72,0x80,0x07,0x6d,0x60,0x0f,0x71,0x90,0x07,0x72, + 0xa0,0x07,0x72,0x50,0x07,0x76,0xa0,0x07,0x72,0x50,0x07,0x76,0xd0,0x06,0xf6,0x20, + 0x07,0x75,0x60,0x07,0x7a,0x20,0x07,0x75,0x60,0x07,0x7a,0x20,0x07,0x75,0x60,0x07, + 0x6d,0x60,0x0f,0x75,0x10,0x07,0x72,0xa0,0x07,0x75,0x10,0x07,0x72,0xa0,0x07,0x75, + 0x10,0x07,0x72,0xd0,0x06,0xf6,0x10,0x07,0x70,0x20,0x07,0x74,0xa0,0x07,0x71,0x00, + 0x07,0x72,0x40,0x07,0x7a,0x10,0x07,0x70,0x20,0x07,0x74,0xd0,0x06,0xee,0x80,0x07, + 0x7a,0x10,0x07,0x76,0xa0,0x07,0x73,0x20,0x07,0x43,0x98,0x04,0x00,0x80,0x00,0x00, + 0x00,0x00,0x00,0x80,0x2c,0x10,0x00,0x00,0x0b,0x00,0x00,0x00,0x32,0x1e,0x98,0x10, + 0x19,0x11,0x4c,0x90,0x8c,0x09,0x26,0x47,0xc6,0x04,0x43,0x5a,0x25,0x30,0x02,0x50, + 0x04,0x05,0x18,0x50,0x80,0x02,0x85,0x50,0x10,0x65,0x50,0x20,0xd4,0x46,0x00,0x88, + 0x8d,0x25,0x34,0x05,0x00,0x00,0x00,0x00,0x79,0x18,0x00,0x00,0x19,0x01,0x00,0x00, + 0x1a,0x03,0x4c,0x10,0x97,0x29,0xa2,0x25,0x10,0xab,0x32,0xb9,0xb9,0xb4,0x37,0xb7, + 0x21,0xc6,0x32,0x28,0x00,0xb3,0x50,0xb9,0x1b,0x43,0x0b,0x93,0xfb,0x9a,0x4b,0xd3, + 0x2b,0x1b,0x62,0x2c,0x81,0x22,0x2c,0x06,0xd9,0x20,0x08,0x0e,0x8e,0xad,0x0c,0x84, + 0x89,0xc9,0xaa,0x09,0xc4,0xae,0x4c,0x6e,0x2e,0xed,0xcd,0x0d,0x64,0x26,0x07,0x46, + 0xc6,0xc5,0xe6,0x06,0x04,0xa5,0xad,0x8c,0x2e,0x8c,0xcd,0xac,0xac,0x65,0x26,0x07, + 0x46,0xc6,0xc5,0xe6,0x26,0x65,0x88,0xa0,0x10,0x43,0x8c,0x25,0x58,0x8e,0x45,0x60, + 0xd1,0x54,0x46,0x17,0xc6,0x36,0x04,0x51,0x8e,0x25,0x58,0x84,0x45,0xe0,0x16,0x96, + 0x26,0xe7,0x32,0xf6,0xd6,0x06,0x97,0xc6,0x56,0xe6,0x42,0x56,0xe6,0xf6,0x26,0xd7, + 0x36,0xf7,0x45,0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44,0x50,0x12,0x72,0x61,0x69, + 0x72,0x2e,0x63,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x2e,0x66,0x61,0x73,0x74,0x5f,0x6d, + 0x61,0x74,0x68,0x5f,0x65,0x6e,0x61,0x62,0x6c,0x65,0x43,0x04,0x65,0x21,0x19,0x84, + 0xa5,0xc9,0xb9,0x8c,0xbd,0xb5,0xc1,0xa5,0xb1,0x95,0xb9,0x98,0xc9,0x85,0xb5,0x95, + 0x89,0xd5,0x99,0x99,0x95,0xc9,0x7d,0x99,0x95,0xd1,0x8d,0xa1,0x7d,0x95,0xb9,0x85, + 0x89,0xb1,0x95,0x0d,0x11,0x94,0x86,0x61,0x10,0x96,0x26,0xe7,0x32,0xf6,0xd6,0x06, + 0x97,0xc6,0x56,0xe6,0xe2,0x16,0x46,0x97,0x66,0x57,0xf6,0x45,0xf6,0x56,0x27,0xc6, + 0x56,0xf6,0x45,0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44,0x50,0x1e,0x92,0x41,0x58, + 0x9a,0x9c,0xcb,0xd8,0x5b,0x1b,0x5c,0x1a,0x5b,0x99,0x8b,0x5b,0x18,0x5d,0x9a,0x5d, + 0xd9,0x17,0xdb,0x9b,0xdb,0xd9,0x17,0xdb,0x9b,0xdb,0xd9,0x17,0x59,0xda,0x5c,0x98, + 0x18,0x5b,0xd9,0x10,0x41,0x89,0x78,0x06,0x61,0x69,0x72,0x2e,0x63,0x6f,0x6d,0x70, + 0x69,0x6c,0x65,0x2e,0x6e,0x61,0x74,0x69,0x76,0x65,0x5f,0x77,0x69,0x64,0x65,0x5f, + 0x76,0x65,0x63,0x74,0x6f,0x72,0x73,0x5f,0x64,0x69,0x73,0x61,0x62,0x6c,0x65,0x43, + 0x04,0x65,0x62,0x14,0x96,0x26,0xe7,0x62,0x57,0x26,0x47,0x57,0x86,0xf7,0xf5,0x56, + 0x47,0x07,0x57,0x47,0xc7,0xa5,0x6e,0xae,0x4c,0x0e,0x85,0xed,0x6d,0xcc,0x0d,0x26, + 0x85,0x51,0x58,0x9a,0x9c,0x4b,0x98,0xdc,0xd9,0x17,0x5d,0x1e,0x5c,0xd9,0x97,0x5b, + 0x58,0x5b,0x19,0x0d,0x33,0xb6,0xb7,0x30,0x3a,0x1a,0x32,0x61,0x69,0x72,0x2e,0x61, + 0x72,0x67,0x5f,0x6e,0x61,0x6d,0x65,0x14,0xea,0xec,0x86,0x30,0x4a,0xa5,0x58,0xca, + 0xa5,0x60,0x4a,0xa6,0x68,0x5c,0xea,0xe6,0xca,0xe4,0x50,0xd8,0xde,0xc6,0xdc,0x62, + 0x52,0x58,0x8c,0xbd,0xb1,0xbd,0xc9,0x0d,0x61,0x94,0x4a,0xe1,0x94,0x4b,0xc1,0x94, + 0x4c,0xe9,0xc8,0x84,0xa5,0xc9,0xb9,0xc0,0xbd,0xcd,0xa5,0xd1,0xa5,0xbd,0xb9,0x71, + 0x39,0x63,0xfb,0x82,0x7a,0x9b,0x4b,0xa3,0x4b,0x7b,0x73,0x1b,0xa2,0x28,0x9f,0x72, + 0x29,0x98,0x92,0x29,0x60,0x30,0xc4,0x50,0x36,0xc5,0x53,0xc2,0x80,0x50,0x58,0x9a, + 0x9c,0x8b,0x5d,0x99,0x1c,0x5d,0x19,0xde,0x57,0x9a,0x1b,0x5c,0x1d,0x1d,0xa5,0xb0, + 0x34,0x39,0x17,0xb6,0xb7,0xb1,0x30,0xba,0xb4,0x37,0xb7,0xaf,0x34,0x37,0xb2,0x32, + 0x3c,0x66,0x67,0x65,0x6e,0x65,0x72,0x61,0x74,0x65,0x64,0x28,0x38,0x70,0x6f,0x73, + 0x69,0x74,0x69,0x6f,0x6e,0x44,0x76,0x34,0x5f,0x66,0x29,0x44,0xe0,0xde,0xe6,0xd2, + 0xe8,0xd2,0xde,0xdc,0x86,0x50,0x8b,0xa0,0x8c,0x81,0x42,0x06,0x8b,0xb0,0x04,0x4a, + 0x19,0x28,0x97,0x82,0x29,0x99,0x62,0x06,0xd4,0xce,0xca,0xdc,0xca,0xe4,0xc2,0xe8, + 0xca,0xc8,0x50,0x72,0xe8,0xca,0xf0,0xc6,0xde,0xde,0xe4,0xc8,0x60,0x88,0xec,0x64, + 0xbe,0xcc,0x52,0x68,0x98,0xb1,0xbd,0x85,0xd1,0xc9,0x30,0xa1,0x2b,0xc3,0x1b,0x7b, + 0x7b,0x93,0x23,0x83,0x19,0x42,0x2d,0x81,0x32,0x06,0x0a,0x19,0x2c,0xc1,0x12,0x28, + 0x68,0xa0,0x5c,0x4a,0x1a,0x28,0x99,0xa2,0x06,0xbc,0xce,0xca,0xdc,0xca,0xe4,0xc2, + 0xe8,0xca,0xc8,0x50,0x6c,0xc6,0xde,0xd8,0xde,0xe4,0x60,0x88,0xec,0x68,0xbe,0xcc, + 0x52,0x68,0x8c,0xbd,0xb1,0xbd,0xc9,0xc1,0x0c,0xa1,0x96,0x41,0x19,0x03,0x85,0x0c, + 0x96,0x61,0x09,0x14,0x36,0x50,0x2e,0x05,0x53,0x32,0xa5,0x0d,0xa8,0x84,0xa5,0xc9, + 0xb9,0x88,0xd5,0x99,0x99,0x95,0xc9,0xf1,0x09,0x4b,0x93,0x73,0x11,0xab,0x33,0x33, + 0x2b,0x93,0xfb,0x9a,0x4b,0xd3,0x2b,0x23,0x12,0x96,0x26,0xe7,0x22,0x57,0x16,0x46, + 0x46,0x2a,0x2c,0x4d,0xce,0x65,0x8e,0x4e,0xae,0x6e,0x8c,0xee,0x8b,0x2e,0x0f,0xae, + 0xec,0x2b,0xcd,0xcd,0xec,0x8d,0x88,0x19,0xdb,0x5b,0x18,0x1d,0x0d,0x1e,0x0d,0x87, + 0x36,0x3b,0x38,0x0a,0x74,0x6d,0x43,0xa8,0x45,0x58,0x88,0x45,0x50,0xe6,0x40,0xa1, + 0x83,0x85,0x58,0x88,0x45,0x50,0xe6,0x40,0xa9,0x03,0x46,0x61,0x69,0x72,0x2e,0x61, + 0x72,0x67,0x5f,0x74,0x79,0x70,0x65,0x5f,0x73,0x69,0x7a,0x65,0xbc,0xc2,0xd2,0xe4, + 0x5c,0xc2,0xe4,0xce,0xbe,0xe8,0xf2,0xe0,0xca,0xbe,0xc2,0xd8,0xd2,0xce,0xdc,0xbe, + 0xe6,0xd2,0xf4,0xca,0x98,0xd8,0xcd,0x7d,0xc1,0x85,0xc9,0x85,0xb5,0xcd,0x71,0xf8, + 0x92,0x81,0x19,0x42,0x06,0x0b,0xa2,0xbc,0x81,0x02,0x07,0x4b,0xa1,0x90,0xc1,0x22, + 0x2c,0x81,0x12,0x07,0x8a,0x1c,0x28,0x76,0xa0,0xdc,0xc1,0x52,0x28,0x78,0xb0,0x24, + 0xca,0xa5,0xe4,0x81,0x92,0x29,0x7a,0x30,0x04,0x51,0xce,0x40,0x59,0x03,0xc5,0x0d, + 0x94,0x3d,0x18,0x62,0x24,0x80,0x22,0x06,0x0a,0x1f,0xf0,0x79,0x6b,0x73,0x4b,0x83, + 0x7b,0xa3,0x2b,0x73,0xa3,0x03,0x19,0x43,0x0b,0x93,0xe3,0x33,0x95,0xd6,0x06,0xc7, + 0x56,0x06,0x32,0xb4,0xb2,0x02,0x42,0x25,0x14,0x14,0x34,0x44,0x50,0xfe,0x60,0x88, + 0xa1,0xf8,0x81,0x02,0x0a,0x8d,0x32,0xc4,0x50,0x42,0x41,0x09,0x85,0x46,0x19,0x11, + 0xb1,0x03,0x3b,0xd8,0x43,0x3b,0xb8,0x41,0x3b,0xbc,0x03,0x39,0xd4,0x03,0x3b,0x94, + 0x83,0x1b,0x98,0x03,0x3b,0x84,0xc3,0x39,0xcc,0xc3,0x14,0x21,0x18,0x46,0x28,0xec, + 0xc0,0x0e,0xf6,0xd0,0x0e,0x6e,0x90,0x0e,0xe4,0x50,0x0e,0xee,0x40,0x0f,0x53,0x82, + 0x62,0xc4,0x12,0x0e,0xe9,0x20,0x0f,0x6e,0x60,0x0f,0xe5,0x20,0x0f,0xf3,0x90,0x0e, + 0xef,0xe0,0x0e,0x53,0x02,0x63,0x04,0x15,0x0e,0xe9,0x20,0x0f,0x6e,0xc0,0x0e,0xe1, + 0xe0,0x0e,0xe7,0x50,0x0f,0xe1,0x70,0x0e,0xe5,0xf0,0x0b,0xf6,0x50,0x0e,0xf2,0x30, + 0x0f,0xe9,0xf0,0x0e,0xee,0x30,0x25,0x40,0x46,0x4c,0xe1,0x90,0x0e,0xf2,0xe0,0x06, + 0xe3,0xf0,0x0e,0xed,0x00,0x0f,0xe9,0xc0,0x0e,0xe5,0xf0,0x0b,0xef,0x00,0x0f,0xf4, + 0x90,0x0e,0xef,0xe0,0x0e,0xf3,0x30,0xc5,0x50,0x18,0x07,0x92,0xa8,0x11,0x4a,0x38, + 0xa4,0x83,0x3c,0xb8,0x81,0x3d,0x94,0x83,0x3c,0xd0,0x43,0x39,0xe0,0xc3,0x94,0xa0, + 0x0f,0x00,0x00,0x00,0x79,0x18,0x00,0x00,0x6d,0x00,0x00,0x00,0x33,0x08,0x80,0x1c, + 0xc4,0xe1,0x1c,0x66,0x14,0x01,0x3d,0x88,0x43,0x38,0x84,0xc3,0x8c,0x42,0x80,0x07, + 0x79,0x78,0x07,0x73,0x98,0x71,0x0c,0xe6,0x00,0x0f,0xed,0x10,0x0e,0xf4,0x80,0x0e, + 0x33,0x0c,0x42,0x1e,0xc2,0xc1,0x1d,0xce,0xa1,0x1c,0x66,0x30,0x05,0x3d,0x88,0x43, + 0x38,0x84,0x83,0x1b,0xcc,0x03,0x3d,0xc8,0x43,0x3d,0x8c,0x03,0x3d,0xcc,0x78,0x8c, + 0x74,0x70,0x07,0x7b,0x08,0x07,0x79,0x48,0x87,0x70,0x70,0x07,0x7a,0x70,0x03,0x76, + 0x78,0x87,0x70,0x20,0x87,0x19,0xcc,0x11,0x0e,0xec,0x90,0x0e,0xe1,0x30,0x0f,0x6e, + 0x30,0x0f,0xe3,0xf0,0x0e,0xf0,0x50,0x0e,0x33,0x10,0xc4,0x1d,0xde,0x21,0x1c,0xd8, + 0x21,0x1d,0xc2,0x61,0x1e,0x66,0x30,0x89,0x3b,0xbc,0x83,0x3b,0xd0,0x43,0x39,0xb4, + 0x03,0x3c,0xbc,0x83,0x3c,0x84,0x03,0x3b,0xcc,0xf0,0x14,0x76,0x60,0x07,0x7b,0x68, + 0x07,0x37,0x68,0x87,0x72,0x68,0x07,0x37,0x80,0x87,0x70,0x90,0x87,0x70,0x60,0x07, + 0x76,0x28,0x07,0x76,0xf8,0x05,0x76,0x78,0x87,0x77,0x80,0x87,0x5f,0x08,0x87,0x71, + 0x18,0x87,0x72,0x98,0x87,0x79,0x98,0x81,0x2c,0xee,0xf0,0x0e,0xee,0xe0,0x0e,0xf5, + 0xc0,0x0e,0xec,0x30,0x03,0x62,0xc8,0xa1,0x1c,0xe4,0xa1,0x1c,0xcc,0xa1,0x1c,0xe4, + 0xa1,0x1c,0xdc,0x61,0x1c,0xca,0x21,0x1c,0xc4,0x81,0x1d,0xca,0x61,0x06,0xd6,0x90, + 0x43,0x39,0xc8,0x43,0x39,0x98,0x43,0x39,0xc8,0x43,0x39,0xb8,0xc3,0x38,0x94,0x43, + 0x38,0x88,0x03,0x3b,0x94,0xc3,0x2f,0xbc,0x83,0x3c,0xfc,0x82,0x3b,0xd4,0x03,0x3b, + 0xb0,0xc3,0x0c,0xc7,0x69,0x87,0x70,0x58,0x87,0x72,0x70,0x83,0x74,0x68,0x07,0x78, + 0x60,0x87,0x74,0x18,0x87,0x74,0xa0,0x87,0x19,0xce,0x53,0x0f,0xee,0x00,0x0f,0xf2, + 0x50,0x0e,0xe4,0x90,0x0e,0xe3,0x40,0x0f,0xe1,0x20,0x0e,0xec,0x50,0x0e,0x33,0x20, + 0x28,0x1d,0xdc,0xc1,0x1e,0xc2,0x41,0x1e,0xd2,0x21,0x1c,0xdc,0x81,0x1e,0xdc,0xe0, + 0x1c,0xe4,0xe1,0x1d,0xea,0x01,0x1e,0x66,0x18,0x51,0x38,0xb0,0x43,0x3a,0x9c,0x83, + 0x3b,0xcc,0x50,0x24,0x76,0x60,0x07,0x7b,0x68,0x07,0x37,0x60,0x87,0x77,0x78,0x07, + 0x78,0x98,0x51,0x4c,0xf4,0x90,0x0f,0xf0,0x50,0x0e,0x33,0x1e,0x6a,0x1e,0xca,0x61, + 0x1c,0xe8,0x21,0x1d,0xde,0xc1,0x1d,0x7e,0x01,0x1e,0xe4,0xa1,0x1c,0xcc,0x21,0x1d, + 0xf0,0x61,0x06,0x54,0x85,0x83,0x38,0xcc,0xc3,0x3b,0xb0,0x43,0x3d,0xd0,0x43,0x39, + 0xfc,0xc2,0x3c,0xe4,0x43,0x3b,0x88,0xc3,0x3b,0xb0,0xc3,0x8c,0xc5,0x0a,0x87,0x79, + 0x98,0x87,0x77,0x18,0x87,0x74,0x08,0x07,0x7a,0x28,0x07,0x72,0x00,0x00,0x00,0x00, + 0x71,0x20,0x00,0x00,0x02,0x00,0x00,0x00,0x06,0x50,0x30,0x00,0xd2,0xd0,0x00,0x00, + 0x61,0x20,0x00,0x00,0x3d,0x00,0x00,0x00,0x13,0x04,0x41,0x2c,0x10,0x00,0x00,0x00, + 0x09,0x00,0x00,0x00,0xf4,0xc6,0x22,0x86,0x61,0x18,0xc6,0x22,0x04,0x41,0x10,0xc6, + 0x22,0x82,0x20,0x08,0xa8,0x95,0x40,0x19,0x14,0x01,0xbd,0x11,0x00,0x1a,0x33,0x00, + 0x24,0x66,0x00,0x28,0xcc,0x00,0x00,0x00,0xe3,0x15,0x0b,0x84,0x61,0x10,0x05,0x65, + 0x90,0x01,0x1a,0x0c,0x13,0x02,0xf9,0x8c,0x57,0x3c,0x14,0xc7,0x2d,0x14,0x94,0x41, + 0x06,0xea,0x70,0x4c,0x08,0xe4,0x63,0x41,0x01,0x9f,0xf1,0x0a,0x2a,0x0b,0x83,0x30, + 0x70,0x28,0x28,0x83,0x0c,0x19,0x43,0x99,0x10,0xc8,0xc7,0x8a,0x00,0x3e,0xe3,0x15, + 0x99,0x67,0x06,0x66,0x40,0x51,0x50,0x06,0x19,0xbc,0x48,0x33,0x21,0x90,0x8f,0x15, + 0x01,0x7c,0xc6,0x2b,0xbc,0x31,0x60,0x83,0x35,0x18,0x03,0x0a,0xca,0x20,0x83,0x18, + 0x60,0x99,0x09,0x81,0x7c,0xc6,0x2b,0xc4,0xe0,0x0c,0xe0,0xe0,0x0d,0x3c,0x0a,0xca, + 0x20,0x83,0x19,0x70,0x61,0x60,0x42,0x20,0x1f,0x0b,0x0a,0xf8,0x8c,0x57,0x9c,0x01, + 0x1b,0xd4,0x01,0x1d,0x88,0x01,0x05,0xc5,0x86,0x00,0x3e,0xb3,0x0d,0x61,0x10,0x00, + 0xb3,0x0d,0x01,0x1b,0x04,0xb3,0x0d,0xc1,0x23,0x64,0x10,0x10,0x03,0x00,0x00,0x00, + 0x09,0x00,0x00,0x00,0x5b,0x86,0x20,0x10,0x85,0x2d,0x43,0x11,0x88,0xc2,0x96,0x41, + 0x09,0x44,0x61,0xcb,0xf0,0x04,0xa2,0xb0,0x65,0xa0,0x02,0x51,0xd8,0x32,0x60,0x81, + 0x28,0x6c,0x19,0xba,0x40,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +}; +static const uint8_t _sfons_fs_bytecode_metal_ios[2925] = { + 0x4d,0x54,0x4c,0x42,0x01,0x00,0x02,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x6d,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x6d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcd,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd5,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xdd,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x90,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x6d,0x00,0x00,0x00, + 0x4e,0x41,0x4d,0x45,0x06,0x00,0x6d,0x61,0x69,0x6e,0x30,0x00,0x54,0x59,0x50,0x45, + 0x01,0x00,0x01,0x48,0x41,0x53,0x48,0x20,0x00,0x1b,0x2c,0xa9,0x59,0x2d,0x7d,0xde, + 0xcd,0x5c,0x34,0x76,0x1e,0x63,0x4d,0x0c,0xdc,0xfc,0x74,0x70,0x26,0x76,0xe8,0xde, + 0xed,0xf7,0xc3,0xf7,0xe9,0x62,0x28,0xfa,0xd6,0x4f,0x46,0x46,0x54,0x18,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x56,0x45,0x52,0x53,0x08,0x00,0x01,0x00,0x08, + 0x00,0x01,0x00,0x00,0x00,0x45,0x4e,0x44,0x54,0x45,0x4e,0x44,0x54,0x04,0x00,0x00, + 0x00,0x45,0x4e,0x44,0x54,0x04,0x00,0x00,0x00,0x45,0x4e,0x44,0x54,0xde,0xc0,0x17, + 0x0b,0x00,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x7c,0x0a,0x00,0x00,0xff,0xff,0xff, + 0xff,0x42,0x43,0xc0,0xde,0x21,0x0c,0x00,0x00,0x9c,0x02,0x00,0x00,0x0b,0x82,0x20, + 0x00,0x02,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x07,0x81,0x23,0x91,0x41,0xc8,0x04, + 0x49,0x06,0x10,0x32,0x39,0x92,0x01,0x84,0x0c,0x25,0x05,0x08,0x19,0x1e,0x04,0x8b, + 0x62,0x80,0x14,0x45,0x02,0x42,0x92,0x0b,0x42,0xa4,0x10,0x32,0x14,0x38,0x08,0x18, + 0x49,0x0a,0x32,0x44,0x24,0x48,0x0a,0x90,0x21,0x23,0xc4,0x52,0x80,0x0c,0x19,0x21, + 0x72,0x24,0x07,0xc8,0x48,0x11,0x62,0xa8,0xa0,0xa8,0x40,0xc6,0xf0,0x01,0x00,0x00, + 0x00,0x51,0x18,0x00,0x00,0x89,0x00,0x00,0x00,0x1b,0xcc,0x25,0xf8,0xff,0xff,0xff, + 0xff,0x01,0x60,0x00,0x09,0xa8,0x88,0x71,0x78,0x07,0x79,0x90,0x87,0x72,0x18,0x07, + 0x7a,0x60,0x87,0x7c,0x68,0x03,0x79,0x78,0x87,0x7a,0x70,0x07,0x72,0x28,0x07,0x72, + 0x68,0x03,0x72,0x48,0x07,0x7b,0x48,0x07,0x72,0x28,0x87,0x36,0x98,0x87,0x78,0x90, + 0x07,0x7a,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xcc, + 0x21,0x1c,0xd8,0x61,0x1e,0xca,0x01,0x20,0xc8,0x21,0x1d,0xe6,0x21,0x1c,0xc4,0x81, + 0x1d,0xca,0xa1,0x0d,0xe8,0x21,0x1c,0xd2,0x81,0x1d,0xda,0x60,0x1c,0xc2,0x81,0x1d, + 0xd8,0x61,0x1e,0x00,0x73,0x08,0x07,0x76,0x98,0x87,0x72,0x00,0x08,0x76,0x28,0x87, + 0x79,0x98,0x87,0x36,0x80,0x07,0x79,0x28,0x87,0x71,0x48,0x87,0x79,0x28,0x87,0x36, + 0x30,0x07,0x78,0x68,0x87,0x70,0x20,0x07,0xc0,0x1c,0xc2,0x81,0x1d,0xe6,0xa1,0x1c, + 0x00,0xc2,0x1d,0xde,0xa1,0x0d,0xcc,0x41,0x1e,0xc2,0xa1,0x1d,0xca,0xa1,0x0d,0xe0, + 0xe1,0x1d,0xd2,0xc1,0x1d,0xe8,0xa1,0x1c,0xe4,0xa1,0x0d,0xca,0x81,0x1d,0xd2,0xa1, + 0x1d,0x00,0x7a,0x90,0x87,0x7a,0x28,0x07,0x60,0x70,0x87,0x77,0x68,0x03,0x73,0x90, + 0x87,0x70,0x68,0x87,0x72,0x68,0x03,0x78,0x78,0x87,0x74,0x70,0x07,0x7a,0x28,0x07, + 0x79,0x68,0x83,0x72,0x60,0x87,0x74,0x68,0x87,0x36,0x70,0x87,0x77,0x70,0x87,0x36, + 0x60,0x87,0x72,0x08,0x07,0x73,0x00,0x08,0x77,0x78,0x87,0x36,0x48,0x07,0x77,0x30, + 0x87,0x79,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xe8, + 0x41,0x1e,0xea,0xa1,0x1c,0x00,0xc2,0x1d,0xde,0xa1,0x0d,0xd4,0xa1,0x1e,0xda,0x01, + 0x1e,0xda,0x80,0x1e,0xc2,0x41,0x1c,0xd8,0xa1,0x1c,0xe6,0x01,0x30,0x87,0x70,0x60, + 0x87,0x79,0x28,0x07,0x80,0x70,0x87,0x77,0x68,0x03,0x77,0x08,0x07,0x77,0x98,0x87, + 0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a,0x40,0x07,0x80,0x1e,0xe4,0xa1, + 0x1e,0xca,0x01,0x20,0xdc,0xe1,0x1d,0xda,0x60,0x1e,0xd2,0xe1,0x1c,0xdc,0xa1,0x1c, + 0xc8,0xa1,0x0d,0xf4,0xa1,0x1c,0xe4,0xe1,0x1d,0xe6,0xa1,0x0d,0xcc,0x01,0x1e,0xda, + 0xa0,0x1d,0xc2,0x81,0x1e,0xd0,0x01,0xa0,0x07,0x79,0xa8,0x87,0x72,0x00,0x08,0x77, + 0x78,0x87,0x36,0xa0,0x07,0x79,0x08,0x07,0x78,0x80,0x87,0x74,0x70,0x87,0x73,0x68, + 0x83,0x76,0x08,0x07,0x7a,0x40,0x07,0x80,0x1e,0xe4,0xa1,0x1e,0xca,0x01,0x20,0xe6, + 0x81,0x1e,0xc2,0x61,0x1c,0xd6,0xa1,0x0d,0xe0,0x41,0x1e,0xde,0x81,0x1e,0xca,0x61, + 0x1c,0xe8,0xe1,0x1d,0xe4,0xa1,0x0d,0xc4,0xa1,0x1e,0xcc,0xc1,0x1c,0xca,0x41,0x1e, + 0xda,0x60,0x1e,0xd2,0x41,0x1f,0xca,0x01,0xc0,0x03,0x80,0xa8,0x07,0x77,0x98,0x87, + 0x70,0x30,0x87,0x72,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74, + 0x00,0xe8,0x41,0x1e,0xea,0xa1,0x1c,0x00,0xa2,0x1e,0xe6,0xa1,0x1c,0xda,0x60,0x1e, + 0xde,0xc1,0x1c,0xe8,0xa1,0x0d,0xcc,0x81,0x1d,0xde,0x21,0x1c,0xe8,0x01,0x30,0x87, + 0x70,0x60,0x87,0x79,0x28,0x07,0x60,0x83,0x21,0x0c,0xc0,0x02,0x54,0x1b,0x8c,0x81, + 0x00,0x16,0xa0,0xda,0x80,0x10,0xff,0xff,0xff,0xff,0x3f,0x00,0x0c,0x20,0x01,0xd5, + 0x06,0xa3,0x08,0x80,0x05,0xa8,0x36,0x18,0x86,0x00,0x2c,0x40,0x05,0x49,0x18,0x00, + 0x00,0x03,0x00,0x00,0x00,0x13,0x86,0x40,0x18,0x26,0x0c,0x44,0x61,0x00,0x00,0x00, + 0x00,0x89,0x20,0x00,0x00,0x21,0x00,0x00,0x00,0x32,0x22,0x48,0x09,0x20,0x64,0x85, + 0x04,0x93,0x22,0xa4,0x84,0x04,0x93,0x22,0xe3,0x84,0xa1,0x90,0x14,0x12,0x4c,0x8a, + 0x8c,0x0b,0x84,0xa4,0x4c,0x10,0x4c,0x33,0x00,0xc3,0x08,0x04,0x70,0x90,0x34,0x45, + 0x94,0x30,0xf9,0x0c,0x80,0x34,0xf4,0xef,0x50,0x13,0x0a,0xc2,0x51,0xd2,0x14,0x51, + 0xc2,0xe4,0xff,0x13,0x71,0x4d,0x54,0x44,0xfc,0xf6,0xf0,0x4f,0x63,0x04,0xc0,0x20, + 0xc2,0x10,0x5c,0x24,0x4d,0x11,0x25,0x4c,0xfe,0x2f,0x01,0xcc,0xb3,0x10,0xd1,0x3f, + 0x8d,0x11,0x00,0x83,0x08,0x85,0x50,0x0a,0x11,0x02,0x31,0x74,0x86,0x11,0x04,0x60, + 0x8e,0x20,0x98,0x23,0x00,0x83,0x61,0x04,0x61,0x29,0x48,0x20,0x26,0x29,0xa6,0x00, + 0xb5,0x81,0x80,0x14,0x58,0xc3,0x08,0xc4,0x32,0x02,0x00,0x00,0x00,0x13,0xa8,0x70, + 0x48,0x07,0x79,0xb0,0x03,0x3a,0x68,0x83,0x70,0x80,0x07,0x78,0x60,0x87,0x72,0x68, + 0x83,0x74,0x78,0x87,0x79,0xc8,0x03,0x37,0x80,0x03,0x37,0x80,0x83,0x0d,0xb7,0x51, + 0x0e,0x6d,0x00,0x0f,0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,0x07, + 0x74,0xd0,0x06,0xe9,0x10,0x07,0x7a,0x80,0x07,0x7a,0x80,0x07,0x6d,0x90,0x0e,0x78, + 0xa0,0x07,0x78,0xa0,0x07,0x78,0xd0,0x06,0xe9,0x10,0x07,0x76,0xa0,0x07,0x71,0x60, + 0x07,0x7a,0x10,0x07,0x76,0xd0,0x06,0xe9,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07, + 0x7a,0x30,0x07,0x72,0xd0,0x06,0xe9,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x7a, + 0x60,0x07,0x74,0xd0,0x06,0xe6,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x7a,0x30, + 0x07,0x72,0xd0,0x06,0xe6,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,0x07, + 0x74,0xd0,0x06,0xf6,0x10,0x07,0x76,0xa0,0x07,0x71,0x60,0x07,0x7a,0x10,0x07,0x76, + 0xd0,0x06,0xf6,0x20,0x07,0x74,0xa0,0x07,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xd0, + 0x06,0xf6,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xd0,0x06, + 0xf6,0x40,0x07,0x78,0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xd0,0x06,0xf6, + 0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xd0,0x06,0xf6,0x90, + 0x07,0x76,0xa0,0x07,0x71,0x20,0x07,0x78,0xa0,0x07,0x71,0x20,0x07,0x78,0xd0,0x06, + 0xf6,0x10,0x07,0x72,0x80,0x07,0x7a,0x10,0x07,0x72,0x80,0x07,0x7a,0x10,0x07,0x72, + 0x80,0x07,0x6d,0x60,0x0f,0x71,0x90,0x07,0x72,0xa0,0x07,0x72,0x50,0x07,0x76,0xa0, + 0x07,0x72,0x50,0x07,0x76,0xd0,0x06,0xf6,0x20,0x07,0x75,0x60,0x07,0x7a,0x20,0x07, + 0x75,0x60,0x07,0x7a,0x20,0x07,0x75,0x60,0x07,0x6d,0x60,0x0f,0x75,0x10,0x07,0x72, + 0xa0,0x07,0x75,0x10,0x07,0x72,0xa0,0x07,0x75,0x10,0x07,0x72,0xd0,0x06,0xf6,0x10, + 0x07,0x70,0x20,0x07,0x74,0xa0,0x07,0x71,0x00,0x07,0x72,0x40,0x07,0x7a,0x10,0x07, + 0x70,0x20,0x07,0x74,0xd0,0x06,0xee,0x80,0x07,0x7a,0x10,0x07,0x76,0xa0,0x07,0x73, + 0x20,0x07,0x43,0x18,0x04,0x00,0x80,0x00,0x00,0x00,0x00,0x00,0x80,0x21,0x8c,0x03, + 0x04,0x80,0x00,0x00,0x00,0x00,0x00,0x40,0x16,0x08,0x00,0x00,0x00,0x08,0x00,0x00, + 0x00,0x32,0x1e,0x98,0x10,0x19,0x11,0x4c,0x90,0x8c,0x09,0x26,0x47,0xc6,0x04,0x43, + 0x5a,0x23,0x00,0x25,0x50,0x04,0x85,0x50,0x10,0x65,0x40,0x70,0x2c,0xa1,0x29,0x00, + 0x00,0x79,0x18,0x00,0x00,0xd7,0x00,0x00,0x00,0x1a,0x03,0x4c,0x10,0x97,0x29,0xa2, + 0x25,0x10,0xab,0x32,0xb9,0xb9,0xb4,0x37,0xb7,0x21,0xc6,0x42,0x3c,0x00,0x84,0x50, + 0xb9,0x1b,0x43,0x0b,0x93,0xfb,0x9a,0x4b,0xd3,0x2b,0x1b,0x62,0x2c,0xc3,0x23,0x2c, + 0x05,0xd9,0x20,0x08,0x0e,0x8e,0xad,0x0c,0x84,0x89,0xc9,0xaa,0x09,0xc4,0xae,0x4c, + 0x6e,0x2e,0xed,0xcd,0x0d,0x64,0x26,0x07,0x46,0xc6,0xc5,0xe6,0x06,0x04,0xa5,0xad, + 0x8c,0x2e,0x8c,0xcd,0xac,0xac,0x65,0x26,0x07,0x46,0xc6,0xc5,0xe6,0x26,0x65,0x88, + 0xf0,0x10,0x43,0x8c,0x65,0x58,0x8c,0x45,0x60,0xd1,0x54,0x46,0x17,0xc6,0x36,0x04, + 0x79,0x8e,0x65,0x58,0x84,0x45,0xe0,0x16,0x96,0x26,0xe7,0x32,0xf6,0xd6,0x06,0x97, + 0xc6,0x56,0xe6,0x42,0x56,0xe6,0xf6,0x26,0xd7,0x36,0xf7,0x45,0x96,0x36,0x17,0x26, + 0xc6,0x56,0x36,0x44,0x78,0x12,0x72,0x61,0x69,0x72,0x2e,0x63,0x6f,0x6d,0x70,0x69, + 0x6c,0x65,0x2e,0x66,0x61,0x73,0x74,0x5f,0x6d,0x61,0x74,0x68,0x5f,0x65,0x6e,0x61, + 0x62,0x6c,0x65,0x43,0x84,0x67,0x21,0x19,0x84,0xa5,0xc9,0xb9,0x8c,0xbd,0xb5,0xc1, + 0xa5,0xb1,0x95,0xb9,0x98,0xc9,0x85,0xb5,0x95,0x89,0xd5,0x99,0x99,0x95,0xc9,0x7d, + 0x99,0x95,0xd1,0x8d,0xa1,0x7d,0x95,0xb9,0x85,0x89,0xb1,0x95,0x0d,0x11,0x9e,0x86, + 0x61,0x10,0x96,0x26,0xe7,0x32,0xf6,0xd6,0x06,0x97,0xc6,0x56,0xe6,0xe2,0x16,0x46, + 0x97,0x66,0x57,0xf6,0x45,0xf6,0x56,0x27,0xc6,0x56,0xf6,0x45,0x96,0x36,0x17,0x26, + 0xc6,0x56,0x36,0x44,0x78,0x1e,0x92,0x41,0x58,0x9a,0x9c,0xcb,0xd8,0x5b,0x1b,0x5c, + 0x1a,0x5b,0x99,0x8b,0x5b,0x18,0x5d,0x9a,0x5d,0xd9,0x17,0xdb,0x9b,0xdb,0xd9,0x17, + 0xdb,0x9b,0xdb,0xd9,0x17,0x59,0xda,0x5c,0x98,0x18,0x5b,0xd9,0x10,0xe1,0x89,0x78, + 0x06,0x61,0x69,0x72,0x2e,0x63,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x2e,0x6e,0x61,0x74, + 0x69,0x76,0x65,0x5f,0x77,0x69,0x64,0x65,0x5f,0x76,0x65,0x63,0x74,0x6f,0x72,0x73, + 0x5f,0x64,0x69,0x73,0x61,0x62,0x6c,0x65,0x43,0x84,0x67,0x62,0x14,0x96,0x26,0xe7, + 0x22,0x57,0xe6,0x46,0x56,0x26,0xf7,0x45,0x17,0x26,0x77,0x56,0x46,0xc7,0x28,0x2c, + 0x4d,0xce,0x25,0x4c,0xee,0xec,0x8b,0x2e,0x0f,0xae,0xec,0xcb,0x2d,0xac,0xad,0x8c, + 0x86,0x19,0xdb,0x5b,0x18,0x1d,0x0d,0x99,0xb0,0x34,0x39,0x97,0x30,0xb9,0xb3,0x2f, + 0xb7,0xb0,0xb6,0x32,0x2a,0x66,0x72,0x61,0x67,0x5f,0x63,0x6f,0x6c,0x6f,0x72,0x43, + 0x98,0xa7,0x5a,0x84,0xc7,0x7a,0xae,0x07,0x7b,0xb2,0x21,0xc2,0xa3,0x51,0x0a,0x4b, + 0x93,0x73,0x31,0x93,0x0b,0x3b,0x6b,0x2b,0x73,0xa3,0xfb,0x4a,0x73,0x83,0xab,0xa3, + 0xe3,0x52,0x37,0x57,0x26,0x87,0xc2,0xf6,0x36,0xe6,0x06,0x93,0x42,0x25,0x2c,0x4d, + 0xce,0x65,0xac,0xcc,0x8d,0xae,0x4c,0x8e,0x4f,0x58,0x9a,0x9c,0x0b,0x5c,0x99,0xdc, + 0x1c,0x5c,0xd9,0x18,0x5d,0x9a,0x5d,0x19,0x85,0x3a,0xbb,0x21,0xd2,0x22,0x3c,0xdc, + 0xd3,0x3d,0xde,0xf3,0x3d,0xd6,0x73,0x3d,0xd8,0x03,0x06,0x5c,0xea,0xe6,0xca,0xe4, + 0x50,0xd8,0xde,0xc6,0xdc,0x62,0x52,0x58,0x8c,0xbd,0xb1,0xbd,0xc9,0x0d,0x91,0x96, + 0xe1,0xe1,0x1e,0x31,0x78,0xbc,0xe7,0x7b,0xac,0xe7,0x7a,0xb0,0x67,0x0c,0xb8,0x84, + 0xa5,0xc9,0xb9,0xd0,0x95,0xe1,0xd1,0xd5,0xc9,0x95,0x51,0x0a,0x4b,0x93,0x73,0x61, + 0x7b,0x1b,0x0b,0xa3,0x4b,0x7b,0x73,0xfb,0x4a,0x73,0x23,0x2b,0xc3,0xa3,0x12,0x96, + 0x26,0xe7,0x32,0x17,0xd6,0x06,0xc7,0x56,0x46,0x8c,0xae,0x0c,0x8f,0xae,0x4e,0xae, + 0x4c,0x86,0x8c,0xc7,0x8c,0xed,0x2d,0x8c,0x8e,0x05,0x64,0x2e,0xac,0x0d,0x8e,0xad, + 0xcc,0x87,0x03,0x5d,0x19,0xde,0x10,0x6a,0x21,0x9e,0x32,0x78,0xcc,0x60,0x11,0x96, + 0xe1,0x39,0x83,0xc7,0x7a,0xd0,0xe0,0xc1,0x9e,0x34,0xe0,0x12,0x96,0x26,0xe7,0x32, + 0x17,0xd6,0x06,0xc7,0x56,0x26,0xc7,0x63,0x2e,0xac,0x0d,0x8e,0xad,0x4c,0x8e,0x08, + 0x5d,0x19,0xde,0x54,0x1b,0x1c,0x9b,0xdc,0x10,0x69,0x39,0x9e,0x35,0x78,0xcc,0x60, + 0x11,0x96,0xe1,0xb1,0x1e,0x36,0x78,0xb0,0xa7,0x0d,0x86,0x20,0x4f,0x18,0x3c,0x64, + 0xf0,0xa8,0xc1,0xe3,0x06,0x43,0x0c,0x04,0x78,0xb6,0xe7,0x0d,0x46,0x44,0xec,0xc0, + 0x0e,0xf6,0xd0,0x0e,0x6e,0xd0,0x0e,0xef,0x40,0x0e,0xf5,0xc0,0x0e,0xe5,0xe0,0x06, + 0xe6,0xc0,0x0e,0xe1,0x70,0x0e,0xf3,0x30,0x45,0x08,0x86,0x11,0x0a,0x3b,0xb0,0x83, + 0x3d,0xb4,0x83,0x1b,0xa4,0x03,0x39,0x94,0x83,0x3b,0xd0,0xc3,0x94,0xa0,0x18,0xb1, + 0x84,0x43,0x3a,0xc8,0x83,0x1b,0xd8,0x43,0x39,0xc8,0xc3,0x3c,0xa4,0xc3,0x3b,0xb8, + 0xc3,0x94,0xc0,0x18,0x41,0x85,0x43,0x3a,0xc8,0x83,0x1b,0xb0,0x43,0x38,0xb8,0xc3, + 0x39,0xd4,0x43,0x38,0x9c,0x43,0x39,0xfc,0x82,0x3d,0x94,0x83,0x3c,0xcc,0x43,0x3a, + 0xbc,0x83,0x3b,0x4c,0x09,0x90,0x11,0x53,0x38,0xa4,0x83,0x3c,0xb8,0xc1,0x38,0xbc, + 0x43,0x3b,0xc0,0x43,0x3a,0xb0,0x43,0x39,0xfc,0xc2,0x3b,0xc0,0x03,0x3d,0xa4,0xc3, + 0x3b,0xb8,0xc3,0x3c,0x4c,0x31,0x14,0xc6,0x81,0x24,0x6a,0x04,0x13,0x0e,0xe9,0x20, + 0x0f,0x6e,0x60,0x0e,0xf2,0x10,0x0e,0xe7,0xd0,0x0e,0xe5,0xe0,0x0e,0xf4,0x30,0x25, + 0x80,0x03,0x00,0x00,0x00,0x79,0x18,0x00,0x00,0x6d,0x00,0x00,0x00,0x33,0x08,0x80, + 0x1c,0xc4,0xe1,0x1c,0x66,0x14,0x01,0x3d,0x88,0x43,0x38,0x84,0xc3,0x8c,0x42,0x80, + 0x07,0x79,0x78,0x07,0x73,0x98,0x71,0x0c,0xe6,0x00,0x0f,0xed,0x10,0x0e,0xf4,0x80, + 0x0e,0x33,0x0c,0x42,0x1e,0xc2,0xc1,0x1d,0xce,0xa1,0x1c,0x66,0x30,0x05,0x3d,0x88, + 0x43,0x38,0x84,0x83,0x1b,0xcc,0x03,0x3d,0xc8,0x43,0x3d,0x8c,0x03,0x3d,0xcc,0x78, + 0x8c,0x74,0x70,0x07,0x7b,0x08,0x07,0x79,0x48,0x87,0x70,0x70,0x07,0x7a,0x70,0x03, + 0x76,0x78,0x87,0x70,0x20,0x87,0x19,0xcc,0x11,0x0e,0xec,0x90,0x0e,0xe1,0x30,0x0f, + 0x6e,0x30,0x0f,0xe3,0xf0,0x0e,0xf0,0x50,0x0e,0x33,0x10,0xc4,0x1d,0xde,0x21,0x1c, + 0xd8,0x21,0x1d,0xc2,0x61,0x1e,0x66,0x30,0x89,0x3b,0xbc,0x83,0x3b,0xd0,0x43,0x39, + 0xb4,0x03,0x3c,0xbc,0x83,0x3c,0x84,0x03,0x3b,0xcc,0xf0,0x14,0x76,0x60,0x07,0x7b, + 0x68,0x07,0x37,0x68,0x87,0x72,0x68,0x07,0x37,0x80,0x87,0x70,0x90,0x87,0x70,0x60, + 0x07,0x76,0x28,0x07,0x76,0xf8,0x05,0x76,0x78,0x87,0x77,0x80,0x87,0x5f,0x08,0x87, + 0x71,0x18,0x87,0x72,0x98,0x87,0x79,0x98,0x81,0x2c,0xee,0xf0,0x0e,0xee,0xe0,0x0e, + 0xf5,0xc0,0x0e,0xec,0x30,0x03,0x62,0xc8,0xa1,0x1c,0xe4,0xa1,0x1c,0xcc,0xa1,0x1c, + 0xe4,0xa1,0x1c,0xdc,0x61,0x1c,0xca,0x21,0x1c,0xc4,0x81,0x1d,0xca,0x61,0x06,0xd6, + 0x90,0x43,0x39,0xc8,0x43,0x39,0x98,0x43,0x39,0xc8,0x43,0x39,0xb8,0xc3,0x38,0x94, + 0x43,0x38,0x88,0x03,0x3b,0x94,0xc3,0x2f,0xbc,0x83,0x3c,0xfc,0x82,0x3b,0xd4,0x03, + 0x3b,0xb0,0xc3,0x0c,0xc7,0x69,0x87,0x70,0x58,0x87,0x72,0x70,0x83,0x74,0x68,0x07, + 0x78,0x60,0x87,0x74,0x18,0x87,0x74,0xa0,0x87,0x19,0xce,0x53,0x0f,0xee,0x00,0x0f, + 0xf2,0x50,0x0e,0xe4,0x90,0x0e,0xe3,0x40,0x0f,0xe1,0x20,0x0e,0xec,0x50,0x0e,0x33, + 0x20,0x28,0x1d,0xdc,0xc1,0x1e,0xc2,0x41,0x1e,0xd2,0x21,0x1c,0xdc,0x81,0x1e,0xdc, + 0xe0,0x1c,0xe4,0xe1,0x1d,0xea,0x01,0x1e,0x66,0x18,0x51,0x38,0xb0,0x43,0x3a,0x9c, + 0x83,0x3b,0xcc,0x50,0x24,0x76,0x60,0x07,0x7b,0x68,0x07,0x37,0x60,0x87,0x77,0x78, + 0x07,0x78,0x98,0x51,0x4c,0xf4,0x90,0x0f,0xf0,0x50,0x0e,0x33,0x1e,0x6a,0x1e,0xca, + 0x61,0x1c,0xe8,0x21,0x1d,0xde,0xc1,0x1d,0x7e,0x01,0x1e,0xe4,0xa1,0x1c,0xcc,0x21, + 0x1d,0xf0,0x61,0x06,0x54,0x85,0x83,0x38,0xcc,0xc3,0x3b,0xb0,0x43,0x3d,0xd0,0x43, + 0x39,0xfc,0xc2,0x3c,0xe4,0x43,0x3b,0x88,0xc3,0x3b,0xb0,0xc3,0x8c,0xc5,0x0a,0x87, + 0x79,0x98,0x87,0x77,0x18,0x87,0x74,0x08,0x07,0x7a,0x28,0x07,0x72,0x00,0x00,0x00, + 0x00,0x71,0x20,0x00,0x00,0x08,0x00,0x00,0x00,0x16,0xb0,0x01,0x48,0xe4,0x4b,0x00, + 0xf3,0x2c,0xc4,0x3f,0x11,0xd7,0x44,0x45,0xc4,0x6f,0x0f,0x7e,0x85,0x17,0xb7,0x6d, + 0x00,0x05,0x03,0x20,0x0d,0x0d,0x00,0x00,0x00,0x61,0x20,0x00,0x00,0x16,0x00,0x00, + 0x00,0x13,0x04,0x41,0x2c,0x10,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x14,0xc7,0x22, + 0x80,0x40,0x20,0x88,0x8d,0x00,0x8c,0x25,0x00,0x01,0xa9,0x11,0x80,0x1a,0x20,0x31, + 0x03,0x40,0x61,0x0e,0xe2,0xba,0xae,0x6a,0x06,0x80,0xc0,0x0c,0xc0,0x08,0xc0,0x18, + 0x01,0x08,0x82,0x20,0xfe,0x01,0x00,0x00,0x00,0x83,0x0c,0x0f,0x91,0x8c,0x18,0x28, + 0x42,0x80,0x39,0x4d,0x80,0x2c,0xc9,0x30,0xc8,0x70,0x04,0x8d,0x05,0x91,0x7c,0x66, + 0x1b,0x94,0x00,0xc8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +}; +static const char _sfons_vs_source_metal_sim[698] = { + 0x23,0x69,0x6e,0x63,0x6c,0x75,0x64,0x65,0x20,0x3c,0x6d,0x65,0x74,0x61,0x6c,0x5f, + 0x73,0x74,0x64,0x6c,0x69,0x62,0x3e,0x0a,0x23,0x69,0x6e,0x63,0x6c,0x75,0x64,0x65, + 0x20,0x3c,0x73,0x69,0x6d,0x64,0x2f,0x73,0x69,0x6d,0x64,0x2e,0x68,0x3e,0x0a,0x0a, + 0x75,0x73,0x69,0x6e,0x67,0x20,0x6e,0x61,0x6d,0x65,0x73,0x70,0x61,0x63,0x65,0x20, + 0x6d,0x65,0x74,0x61,0x6c,0x3b,0x0a,0x0a,0x73,0x74,0x72,0x75,0x63,0x74,0x20,0x76, + 0x73,0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x66, + 0x6c,0x6f,0x61,0x74,0x34,0x78,0x34,0x20,0x6d,0x76,0x70,0x3b,0x0a,0x20,0x20,0x20, + 0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x78,0x34,0x20,0x74,0x6d,0x3b,0x0a,0x7d,0x3b, + 0x0a,0x0a,0x73,0x74,0x72,0x75,0x63,0x74,0x20,0x6d,0x61,0x69,0x6e,0x30,0x5f,0x6f, + 0x75,0x74,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20, + 0x75,0x76,0x20,0x5b,0x5b,0x75,0x73,0x65,0x72,0x28,0x6c,0x6f,0x63,0x6e,0x30,0x29, + 0x5d,0x5d,0x3b,0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x63, + 0x6f,0x6c,0x6f,0x72,0x20,0x5b,0x5b,0x75,0x73,0x65,0x72,0x28,0x6c,0x6f,0x63,0x6e, + 0x31,0x29,0x5d,0x5d,0x3b,0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34, + 0x20,0x67,0x6c,0x5f,0x50,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x20,0x5b,0x5b,0x70, + 0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x5d,0x5d,0x3b,0x0a,0x7d,0x3b,0x0a,0x0a,0x73, + 0x74,0x72,0x75,0x63,0x74,0x20,0x6d,0x61,0x69,0x6e,0x30,0x5f,0x69,0x6e,0x0a,0x7b, + 0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x70,0x6f,0x73,0x69, + 0x74,0x69,0x6f,0x6e,0x20,0x5b,0x5b,0x61,0x74,0x74,0x72,0x69,0x62,0x75,0x74,0x65, + 0x28,0x30,0x29,0x5d,0x5d,0x3b,0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74, + 0x32,0x20,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x20,0x5b,0x5b,0x61,0x74, + 0x74,0x72,0x69,0x62,0x75,0x74,0x65,0x28,0x31,0x29,0x5d,0x5d,0x3b,0x0a,0x20,0x20, + 0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x30,0x20, + 0x5b,0x5b,0x61,0x74,0x74,0x72,0x69,0x62,0x75,0x74,0x65,0x28,0x32,0x29,0x5d,0x5d, + 0x3b,0x0a,0x7d,0x3b,0x0a,0x0a,0x23,0x6c,0x69,0x6e,0x65,0x20,0x31,0x36,0x20,0x22, + 0x22,0x0a,0x76,0x65,0x72,0x74,0x65,0x78,0x20,0x6d,0x61,0x69,0x6e,0x30,0x5f,0x6f, + 0x75,0x74,0x20,0x6d,0x61,0x69,0x6e,0x30,0x28,0x6d,0x61,0x69,0x6e,0x30,0x5f,0x69, + 0x6e,0x20,0x69,0x6e,0x20,0x5b,0x5b,0x73,0x74,0x61,0x67,0x65,0x5f,0x69,0x6e,0x5d, + 0x5d,0x2c,0x20,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x76,0x73,0x5f,0x70, + 0x61,0x72,0x61,0x6d,0x73,0x26,0x20,0x5f,0x32,0x30,0x20,0x5b,0x5b,0x62,0x75,0x66, + 0x66,0x65,0x72,0x28,0x30,0x29,0x5d,0x5d,0x29,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20, + 0x6d,0x61,0x69,0x6e,0x30,0x5f,0x6f,0x75,0x74,0x20,0x6f,0x75,0x74,0x20,0x3d,0x20, + 0x7b,0x7d,0x3b,0x0a,0x23,0x6c,0x69,0x6e,0x65,0x20,0x31,0x36,0x20,0x22,0x22,0x0a, + 0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x67,0x6c,0x5f,0x50,0x6f,0x73,0x69,0x74, + 0x69,0x6f,0x6e,0x20,0x3d,0x20,0x5f,0x32,0x30,0x2e,0x6d,0x76,0x70,0x20,0x2a,0x20, + 0x69,0x6e,0x2e,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x3b,0x0a,0x23,0x6c,0x69, + 0x6e,0x65,0x20,0x31,0x37,0x20,0x22,0x22,0x0a,0x20,0x20,0x20,0x20,0x6f,0x75,0x74, + 0x2e,0x75,0x76,0x20,0x3d,0x20,0x5f,0x32,0x30,0x2e,0x74,0x6d,0x20,0x2a,0x20,0x66, + 0x6c,0x6f,0x61,0x74,0x34,0x28,0x69,0x6e,0x2e,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72, + 0x64,0x30,0x2c,0x20,0x30,0x2e,0x30,0x2c,0x20,0x31,0x2e,0x30,0x29,0x3b,0x0a,0x23, + 0x6c,0x69,0x6e,0x65,0x20,0x31,0x38,0x20,0x22,0x22,0x0a,0x20,0x20,0x20,0x20,0x6f, + 0x75,0x74,0x2e,0x63,0x6f,0x6c,0x6f,0x72,0x20,0x3d,0x20,0x69,0x6e,0x2e,0x63,0x6f, + 0x6c,0x6f,0x72,0x30,0x3b,0x0a,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e, + 0x20,0x6f,0x75,0x74,0x3b,0x0a,0x7d,0x0a,0x0a,0x00, +}; +static const char _sfons_fs_source_metal_sim[498] = { + 0x23,0x69,0x6e,0x63,0x6c,0x75,0x64,0x65,0x20,0x3c,0x6d,0x65,0x74,0x61,0x6c,0x5f, + 0x73,0x74,0x64,0x6c,0x69,0x62,0x3e,0x0a,0x23,0x69,0x6e,0x63,0x6c,0x75,0x64,0x65, + 0x20,0x3c,0x73,0x69,0x6d,0x64,0x2f,0x73,0x69,0x6d,0x64,0x2e,0x68,0x3e,0x0a,0x0a, + 0x75,0x73,0x69,0x6e,0x67,0x20,0x6e,0x61,0x6d,0x65,0x73,0x70,0x61,0x63,0x65,0x20, + 0x6d,0x65,0x74,0x61,0x6c,0x3b,0x0a,0x0a,0x73,0x74,0x72,0x75,0x63,0x74,0x20,0x6d, + 0x61,0x69,0x6e,0x30,0x5f,0x6f,0x75,0x74,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x66, + 0x6c,0x6f,0x61,0x74,0x34,0x20,0x66,0x72,0x61,0x67,0x5f,0x63,0x6f,0x6c,0x6f,0x72, + 0x20,0x5b,0x5b,0x63,0x6f,0x6c,0x6f,0x72,0x28,0x30,0x29,0x5d,0x5d,0x3b,0x0a,0x7d, + 0x3b,0x0a,0x0a,0x73,0x74,0x72,0x75,0x63,0x74,0x20,0x6d,0x61,0x69,0x6e,0x30,0x5f, + 0x69,0x6e,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20, + 0x75,0x76,0x20,0x5b,0x5b,0x75,0x73,0x65,0x72,0x28,0x6c,0x6f,0x63,0x6e,0x30,0x29, + 0x5d,0x5d,0x3b,0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x63, + 0x6f,0x6c,0x6f,0x72,0x20,0x5b,0x5b,0x75,0x73,0x65,0x72,0x28,0x6c,0x6f,0x63,0x6e, + 0x31,0x29,0x5d,0x5d,0x3b,0x0a,0x7d,0x3b,0x0a,0x0a,0x23,0x6c,0x69,0x6e,0x65,0x20, + 0x31,0x31,0x20,0x22,0x22,0x0a,0x66,0x72,0x61,0x67,0x6d,0x65,0x6e,0x74,0x20,0x6d, + 0x61,0x69,0x6e,0x30,0x5f,0x6f,0x75,0x74,0x20,0x6d,0x61,0x69,0x6e,0x30,0x28,0x6d, + 0x61,0x69,0x6e,0x30,0x5f,0x69,0x6e,0x20,0x69,0x6e,0x20,0x5b,0x5b,0x73,0x74,0x61, + 0x67,0x65,0x5f,0x69,0x6e,0x5d,0x5d,0x2c,0x20,0x74,0x65,0x78,0x74,0x75,0x72,0x65, + 0x32,0x64,0x3c,0x66,0x6c,0x6f,0x61,0x74,0x3e,0x20,0x74,0x65,0x78,0x20,0x5b,0x5b, + 0x74,0x65,0x78,0x74,0x75,0x72,0x65,0x28,0x30,0x29,0x5d,0x5d,0x2c,0x20,0x73,0x61, + 0x6d,0x70,0x6c,0x65,0x72,0x20,0x74,0x65,0x78,0x53,0x6d,0x70,0x6c,0x72,0x20,0x5b, + 0x5b,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x28,0x30,0x29,0x5d,0x5d,0x29,0x0a,0x7b, + 0x0a,0x20,0x20,0x20,0x20,0x6d,0x61,0x69,0x6e,0x30,0x5f,0x6f,0x75,0x74,0x20,0x6f, + 0x75,0x74,0x20,0x3d,0x20,0x7b,0x7d,0x3b,0x0a,0x23,0x6c,0x69,0x6e,0x65,0x20,0x31, + 0x31,0x20,0x22,0x22,0x0a,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x66,0x72,0x61, + 0x67,0x5f,0x63,0x6f,0x6c,0x6f,0x72,0x20,0x3d,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34, + 0x28,0x31,0x2e,0x30,0x2c,0x20,0x31,0x2e,0x30,0x2c,0x20,0x31,0x2e,0x30,0x2c,0x20, + 0x74,0x65,0x78,0x2e,0x73,0x61,0x6d,0x70,0x6c,0x65,0x28,0x74,0x65,0x78,0x53,0x6d, + 0x70,0x6c,0x72,0x2c,0x20,0x69,0x6e,0x2e,0x75,0x76,0x2e,0x78,0x79,0x29,0x2e,0x78, + 0x29,0x20,0x2a,0x20,0x69,0x6e,0x2e,0x63,0x6f,0x6c,0x6f,0x72,0x3b,0x0a,0x20,0x20, + 0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x20,0x6f,0x75,0x74,0x3b,0x0a,0x7d,0x0a, + 0x0a,0x00, +}; +#elif defined(SOKOL_D3D11) +static const uint8_t _sfons_vs_bytecode_hlsl4[1008] = { + 0x44,0x58,0x42,0x43,0xf3,0xd9,0xf4,0x58,0x44,0xc2,0xb9,0x96,0x56,0x7c,0xb3,0x71, + 0x84,0xc5,0xde,0x61,0x01,0x00,0x00,0x00,0xf0,0x03,0x00,0x00,0x05,0x00,0x00,0x00, + 0x34,0x00,0x00,0x00,0x14,0x01,0x00,0x00,0x78,0x01,0x00,0x00,0xe8,0x01,0x00,0x00, + 0x74,0x03,0x00,0x00,0x52,0x44,0x45,0x46,0xd8,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x48,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x1c,0x00,0x00,0x00,0x00,0x04,0xfe,0xff, + 0x10,0x81,0x00,0x00,0xaf,0x00,0x00,0x00,0x3c,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d, + 0x73,0x00,0xab,0xab,0x3c,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x60,0x00,0x00,0x00, + 0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x90,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x98,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0xa8,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x40,0x00,0x00,0x00, + 0x02,0x00,0x00,0x00,0x98,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x5f,0x32,0x30,0x5f, + 0x6d,0x76,0x70,0x00,0x02,0x00,0x03,0x00,0x04,0x00,0x04,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x5f,0x32,0x30,0x5f,0x74,0x6d,0x00,0x4d,0x69,0x63,0x72,0x6f, + 0x73,0x6f,0x66,0x74,0x20,0x28,0x52,0x29,0x20,0x48,0x4c,0x53,0x4c,0x20,0x53,0x68, + 0x61,0x64,0x65,0x72,0x20,0x43,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x72,0x20,0x31,0x30, + 0x2e,0x31,0x00,0xab,0x49,0x53,0x47,0x4e,0x5c,0x00,0x00,0x00,0x03,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0f,0x0f,0x00,0x00,0x50,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x03,0x03,0x00,0x00,0x50,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x03,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x0f,0x0f,0x00,0x00,0x54,0x45,0x58,0x43, + 0x4f,0x4f,0x52,0x44,0x00,0xab,0xab,0xab,0x4f,0x53,0x47,0x4e,0x68,0x00,0x00,0x00, + 0x03,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0f,0x00,0x00,0x00, + 0x50,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x59,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x0f,0x00,0x00,0x00, + 0x54,0x45,0x58,0x43,0x4f,0x4f,0x52,0x44,0x00,0x53,0x56,0x5f,0x50,0x6f,0x73,0x69, + 0x74,0x69,0x6f,0x6e,0x00,0xab,0xab,0xab,0x53,0x48,0x44,0x52,0x84,0x01,0x00,0x00, + 0x40,0x00,0x01,0x00,0x61,0x00,0x00,0x00,0x59,0x00,0x00,0x04,0x46,0x8e,0x20,0x00, + 0x00,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x5f,0x00,0x00,0x03,0xf2,0x10,0x10,0x00, + 0x00,0x00,0x00,0x00,0x5f,0x00,0x00,0x03,0x32,0x10,0x10,0x00,0x01,0x00,0x00,0x00, + 0x5f,0x00,0x00,0x03,0xf2,0x10,0x10,0x00,0x02,0x00,0x00,0x00,0x65,0x00,0x00,0x03, + 0xf2,0x20,0x10,0x00,0x00,0x00,0x00,0x00,0x65,0x00,0x00,0x03,0xf2,0x20,0x10,0x00, + 0x01,0x00,0x00,0x00,0x67,0x00,0x00,0x04,0xf2,0x20,0x10,0x00,0x02,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x68,0x00,0x00,0x02,0x01,0x00,0x00,0x00,0x38,0x00,0x00,0x08, + 0xf2,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x56,0x15,0x10,0x00,0x01,0x00,0x00,0x00, + 0x46,0x8e,0x20,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x32,0x00,0x00,0x0a, + 0xf2,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x06,0x10,0x10,0x00,0x01,0x00,0x00,0x00, + 0x46,0x8e,0x20,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x46,0x0e,0x10,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0xf2,0x20,0x10,0x00,0x00,0x00,0x00,0x00, + 0x46,0x0e,0x10,0x00,0x00,0x00,0x00,0x00,0x46,0x8e,0x20,0x00,0x00,0x00,0x00,0x00, + 0x07,0x00,0x00,0x00,0x36,0x00,0x00,0x05,0xf2,0x20,0x10,0x00,0x01,0x00,0x00,0x00, + 0x46,0x1e,0x10,0x00,0x02,0x00,0x00,0x00,0x38,0x00,0x00,0x08,0xf2,0x00,0x10,0x00, + 0x00,0x00,0x00,0x00,0x56,0x15,0x10,0x00,0x00,0x00,0x00,0x00,0x46,0x8e,0x20,0x00, + 0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x32,0x00,0x00,0x0a,0xf2,0x00,0x10,0x00, + 0x00,0x00,0x00,0x00,0x06,0x10,0x10,0x00,0x00,0x00,0x00,0x00,0x46,0x8e,0x20,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x46,0x0e,0x10,0x00,0x00,0x00,0x00,0x00, + 0x32,0x00,0x00,0x0a,0xf2,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0xa6,0x1a,0x10,0x00, + 0x00,0x00,0x00,0x00,0x46,0x8e,0x20,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00, + 0x46,0x0e,0x10,0x00,0x00,0x00,0x00,0x00,0x32,0x00,0x00,0x0a,0xf2,0x20,0x10,0x00, + 0x02,0x00,0x00,0x00,0xf6,0x1f,0x10,0x00,0x00,0x00,0x00,0x00,0x46,0x8e,0x20,0x00, + 0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x46,0x0e,0x10,0x00,0x00,0x00,0x00,0x00, + 0x3e,0x00,0x00,0x01,0x53,0x54,0x41,0x54,0x74,0x00,0x00,0x00,0x09,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x07,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + +}; +static const uint8_t _sfons_fs_bytecode_hlsl4[640] = { + 0x44,0x58,0x42,0x43,0x5e,0xbc,0x77,0xd9,0xc9,0xdf,0x7d,0x8e,0x0c,0x24,0x18,0x66, + 0x36,0xd3,0xf4,0x78,0x01,0x00,0x00,0x00,0x80,0x02,0x00,0x00,0x05,0x00,0x00,0x00, + 0x34,0x00,0x00,0x00,0xd4,0x00,0x00,0x00,0x20,0x01,0x00,0x00,0x54,0x01,0x00,0x00, + 0x04,0x02,0x00,0x00,0x52,0x44,0x45,0x46,0x98,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x1c,0x00,0x00,0x00,0x00,0x04,0xff,0xff, + 0x10,0x81,0x00,0x00,0x6d,0x00,0x00,0x00,0x5c,0x00,0x00,0x00,0x03,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x69,0x00,0x00,0x00,0x02,0x00,0x00,0x00, + 0x05,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x0d,0x00,0x00,0x00,0x5f,0x74,0x65,0x78,0x5f,0x73,0x61,0x6d, + 0x70,0x6c,0x65,0x72,0x00,0x74,0x65,0x78,0x00,0x4d,0x69,0x63,0x72,0x6f,0x73,0x6f, + 0x66,0x74,0x20,0x28,0x52,0x29,0x20,0x48,0x4c,0x53,0x4c,0x20,0x53,0x68,0x61,0x64, + 0x65,0x72,0x20,0x43,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x72,0x20,0x31,0x30,0x2e,0x31, + 0x00,0xab,0xab,0xab,0x49,0x53,0x47,0x4e,0x44,0x00,0x00,0x00,0x02,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0f,0x03,0x00,0x00,0x38,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x0f,0x0f,0x00,0x00,0x54,0x45,0x58,0x43,0x4f,0x4f,0x52,0x44,0x00,0xab,0xab,0xab, + 0x4f,0x53,0x47,0x4e,0x2c,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x08,0x00,0x00,0x00, + 0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x53,0x56,0x5f,0x54,0x61,0x72,0x67,0x65, + 0x74,0x00,0xab,0xab,0x53,0x48,0x44,0x52,0xa8,0x00,0x00,0x00,0x40,0x00,0x00,0x00, + 0x2a,0x00,0x00,0x00,0x5a,0x00,0x00,0x03,0x00,0x60,0x10,0x00,0x00,0x00,0x00,0x00, + 0x58,0x18,0x00,0x04,0x00,0x70,0x10,0x00,0x00,0x00,0x00,0x00,0x55,0x55,0x00,0x00, + 0x62,0x10,0x00,0x03,0x32,0x10,0x10,0x00,0x00,0x00,0x00,0x00,0x62,0x10,0x00,0x03, + 0xf2,0x10,0x10,0x00,0x01,0x00,0x00,0x00,0x65,0x00,0x00,0x03,0xf2,0x20,0x10,0x00, + 0x00,0x00,0x00,0x00,0x68,0x00,0x00,0x02,0x01,0x00,0x00,0x00,0x45,0x00,0x00,0x09, + 0xf2,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x46,0x10,0x10,0x00,0x00,0x00,0x00,0x00, + 0x96,0x73,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x60,0x10,0x00,0x00,0x00,0x00,0x00, + 0x36,0x00,0x00,0x05,0x12,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x01,0x40,0x00,0x00, + 0x00,0x00,0x80,0x3f,0x38,0x00,0x00,0x07,0xf2,0x20,0x10,0x00,0x00,0x00,0x00,0x00, + 0x06,0x0c,0x10,0x00,0x00,0x00,0x00,0x00,0x46,0x1e,0x10,0x00,0x01,0x00,0x00,0x00, + 0x3e,0x00,0x00,0x01,0x53,0x54,0x41,0x54,0x74,0x00,0x00,0x00,0x04,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + +}; +#elif defined(SOKOL_WGPU) +/* + WebGPU shader blobs: + + Vertex shader source: + + #version 450 + + layout(set = 0, binding = 0, std140) uniform vs_params + { + mat4 mvp; + mat4 tm; + } _20; + + layout(location = 0) in vec4 position; + layout(location = 0) out vec4 uv; + layout(location = 1) in vec2 texcoord0; + layout(location = 1) out vec4 color; + layout(location = 2) in vec4 color0; + + void main() + { + gl_Position = _20.mvp * position; + uv = _20.tm * vec4(texcoord0, 0.0, 1.0); + color = color0; + } + + Fragment shader source: + + #version 450 + + layout(location = 0, set = 2, binding = 0) uniform sampler2D tex; + + layout(location = 0) out vec4 frag_color; + layout(location = 0) in vec4 uv; + layout(location = 1) in vec4 color; + + void main() + { + frag_color = vec4(1.0, 1.0, 1.0, texture(tex, uv.xy).x) * color; + } +*/ +static const uint8_t _sfons_vs_bytecode_wgpu[1932] = { + 0x03,0x02,0x23,0x07,0x00,0x00,0x01,0x00,0x08,0x00,0x08,0x00,0x32,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x11,0x00,0x02,0x00,0x01,0x00,0x00,0x00,0x0b,0x00,0x06,0x00, + 0x02,0x00,0x00,0x00,0x47,0x4c,0x53,0x4c,0x2e,0x73,0x74,0x64,0x2e,0x34,0x35,0x30, + 0x00,0x00,0x00,0x00,0x0e,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x0f,0x00,0x0d,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x6d,0x61,0x69,0x6e, + 0x00,0x00,0x00,0x00,0x0e,0x00,0x00,0x00,0x19,0x00,0x00,0x00,0x1e,0x00,0x00,0x00, + 0x24,0x00,0x00,0x00,0x2c,0x00,0x00,0x00,0x2d,0x00,0x00,0x00,0x30,0x00,0x00,0x00, + 0x31,0x00,0x00,0x00,0x07,0x00,0x03,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x03,0x00,0x37,0x00,0x02,0x00,0x00,0x00,0xc2,0x01,0x00,0x00,0x01,0x00,0x00,0x00, + 0x2f,0x2f,0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65,0x50,0x72,0x6f,0x63,0x65, + 0x73,0x73,0x65,0x64,0x20,0x63,0x6c,0x69,0x65,0x6e,0x74,0x20,0x76,0x75,0x6c,0x6b, + 0x61,0x6e,0x31,0x30,0x30,0x0a,0x2f,0x2f,0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c, + 0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73,0x65,0x64,0x20,0x63,0x6c,0x69,0x65,0x6e, + 0x74,0x20,0x6f,0x70,0x65,0x6e,0x67,0x6c,0x31,0x30,0x30,0x0a,0x2f,0x2f,0x20,0x4f, + 0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73,0x65,0x64, + 0x20,0x74,0x61,0x72,0x67,0x65,0x74,0x2d,0x65,0x6e,0x76,0x20,0x76,0x75,0x6c,0x6b, + 0x61,0x6e,0x31,0x2e,0x30,0x0a,0x2f,0x2f,0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c, + 0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73,0x65,0x64,0x20,0x74,0x61,0x72,0x67,0x65, + 0x74,0x2d,0x65,0x6e,0x76,0x20,0x6f,0x70,0x65,0x6e,0x67,0x6c,0x0a,0x2f,0x2f,0x20, + 0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73,0x65, + 0x64,0x20,0x65,0x6e,0x74,0x72,0x79,0x2d,0x70,0x6f,0x69,0x6e,0x74,0x20,0x6d,0x61, + 0x69,0x6e,0x0a,0x23,0x6c,0x69,0x6e,0x65,0x20,0x31,0x0a,0x00,0x05,0x00,0x04,0x00, + 0x05,0x00,0x00,0x00,0x6d,0x61,0x69,0x6e,0x00,0x00,0x00,0x00,0x05,0x00,0x06,0x00, + 0x0c,0x00,0x00,0x00,0x67,0x6c,0x5f,0x50,0x65,0x72,0x56,0x65,0x72,0x74,0x65,0x78, + 0x00,0x00,0x00,0x00,0x06,0x00,0x06,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x67,0x6c,0x5f,0x50,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x00,0x06,0x00,0x07,0x00, + 0x0c,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x67,0x6c,0x5f,0x50,0x6f,0x69,0x6e,0x74, + 0x53,0x69,0x7a,0x65,0x00,0x00,0x00,0x00,0x06,0x00,0x07,0x00,0x0c,0x00,0x00,0x00, + 0x02,0x00,0x00,0x00,0x67,0x6c,0x5f,0x43,0x6c,0x69,0x70,0x44,0x69,0x73,0x74,0x61, + 0x6e,0x63,0x65,0x00,0x06,0x00,0x07,0x00,0x0c,0x00,0x00,0x00,0x03,0x00,0x00,0x00, + 0x67,0x6c,0x5f,0x43,0x75,0x6c,0x6c,0x44,0x69,0x73,0x74,0x61,0x6e,0x63,0x65,0x00, + 0x05,0x00,0x03,0x00,0x0e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x05,0x00, + 0x12,0x00,0x00,0x00,0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x00,0x00,0x00, + 0x06,0x00,0x04,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x6d,0x76,0x70,0x00, + 0x06,0x00,0x04,0x00,0x12,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x74,0x6d,0x00,0x00, + 0x05,0x00,0x03,0x00,0x14,0x00,0x00,0x00,0x5f,0x32,0x30,0x00,0x05,0x00,0x05,0x00, + 0x19,0x00,0x00,0x00,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x00,0x00,0x00,0x00, + 0x05,0x00,0x03,0x00,0x1e,0x00,0x00,0x00,0x75,0x76,0x00,0x00,0x05,0x00,0x05,0x00, + 0x24,0x00,0x00,0x00,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x00,0x00,0x00, + 0x05,0x00,0x04,0x00,0x2c,0x00,0x00,0x00,0x63,0x6f,0x6c,0x6f,0x72,0x00,0x00,0x00, + 0x05,0x00,0x04,0x00,0x2d,0x00,0x00,0x00,0x63,0x6f,0x6c,0x6f,0x72,0x30,0x00,0x00, + 0x05,0x00,0x05,0x00,0x30,0x00,0x00,0x00,0x67,0x6c,0x5f,0x56,0x65,0x72,0x74,0x65, + 0x78,0x49,0x44,0x00,0x05,0x00,0x06,0x00,0x31,0x00,0x00,0x00,0x67,0x6c,0x5f,0x49, + 0x6e,0x73,0x74,0x61,0x6e,0x63,0x65,0x49,0x44,0x00,0x00,0x00,0x48,0x00,0x05,0x00, + 0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x48,0x00,0x05,0x00,0x0c,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x0b,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x48,0x00,0x05,0x00,0x0c,0x00,0x00,0x00,0x02,0x00,0x00,0x00, + 0x0b,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x48,0x00,0x05,0x00,0x0c,0x00,0x00,0x00, + 0x03,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x47,0x00,0x03,0x00, + 0x0c,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x48,0x00,0x04,0x00,0x12,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x48,0x00,0x05,0x00,0x12,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x23,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x48,0x00,0x05,0x00, + 0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x10,0x00,0x00,0x00, + 0x48,0x00,0x04,0x00,0x12,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x05,0x00,0x00,0x00, + 0x48,0x00,0x05,0x00,0x12,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x23,0x00,0x00,0x00, + 0x40,0x00,0x00,0x00,0x48,0x00,0x05,0x00,0x12,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x07,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x47,0x00,0x03,0x00,0x12,0x00,0x00,0x00, + 0x02,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x14,0x00,0x00,0x00,0x22,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x14,0x00,0x00,0x00,0x21,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x19,0x00,0x00,0x00,0x1e,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x1e,0x00,0x00,0x00,0x1e,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x24,0x00,0x00,0x00,0x1e,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x2c,0x00,0x00,0x00,0x1e,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x2d,0x00,0x00,0x00,0x1e,0x00,0x00,0x00, + 0x02,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x30,0x00,0x00,0x00,0x0b,0x00,0x00,0x00, + 0x05,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x31,0x00,0x00,0x00,0x0b,0x00,0x00,0x00, + 0x06,0x00,0x00,0x00,0x13,0x00,0x02,0x00,0x03,0x00,0x00,0x00,0x21,0x00,0x03,0x00, + 0x04,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x16,0x00,0x03,0x00,0x07,0x00,0x00,0x00, + 0x20,0x00,0x00,0x00,0x17,0x00,0x04,0x00,0x08,0x00,0x00,0x00,0x07,0x00,0x00,0x00, + 0x04,0x00,0x00,0x00,0x15,0x00,0x04,0x00,0x09,0x00,0x00,0x00,0x20,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x2b,0x00,0x04,0x00,0x09,0x00,0x00,0x00,0x0a,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x1c,0x00,0x04,0x00,0x0b,0x00,0x00,0x00,0x07,0x00,0x00,0x00, + 0x0a,0x00,0x00,0x00,0x1e,0x00,0x06,0x00,0x0c,0x00,0x00,0x00,0x08,0x00,0x00,0x00, + 0x07,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x0d,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x0c,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x0d,0x00,0x00,0x00,0x0e,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x15,0x00,0x04,0x00, + 0x0f,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x2b,0x00,0x04,0x00, + 0x0f,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x00,0x04,0x00, + 0x11,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x1e,0x00,0x04,0x00, + 0x12,0x00,0x00,0x00,0x11,0x00,0x00,0x00,0x11,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x13,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x13,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x15,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x11,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x18,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x18,0x00,0x00,0x00,0x19,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x1c,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x1c,0x00,0x00,0x00,0x1e,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x2b,0x00,0x04,0x00, + 0x0f,0x00,0x00,0x00,0x1f,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x17,0x00,0x04,0x00, + 0x22,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x23,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x22,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x23,0x00,0x00,0x00,0x24,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x2b,0x00,0x04,0x00, + 0x07,0x00,0x00,0x00,0x26,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x2b,0x00,0x04,0x00, + 0x07,0x00,0x00,0x00,0x27,0x00,0x00,0x00,0x00,0x00,0x80,0x3f,0x3b,0x00,0x04,0x00, + 0x1c,0x00,0x00,0x00,0x2c,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x18,0x00,0x00,0x00,0x2d,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x2f,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x2f,0x00,0x00,0x00,0x30,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x2f,0x00,0x00,0x00,0x31,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x36,0x00,0x05,0x00, + 0x03,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00, + 0xf8,0x00,0x02,0x00,0x06,0x00,0x00,0x00,0x08,0x00,0x04,0x00,0x01,0x00,0x00,0x00, + 0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x41,0x00,0x05,0x00,0x15,0x00,0x00,0x00, + 0x16,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x3d,0x00,0x04,0x00, + 0x11,0x00,0x00,0x00,0x17,0x00,0x00,0x00,0x16,0x00,0x00,0x00,0x3d,0x00,0x04,0x00, + 0x08,0x00,0x00,0x00,0x1a,0x00,0x00,0x00,0x19,0x00,0x00,0x00,0x91,0x00,0x05,0x00, + 0x08,0x00,0x00,0x00,0x1b,0x00,0x00,0x00,0x17,0x00,0x00,0x00,0x1a,0x00,0x00,0x00, + 0x41,0x00,0x05,0x00,0x1c,0x00,0x00,0x00,0x1d,0x00,0x00,0x00,0x0e,0x00,0x00,0x00, + 0x10,0x00,0x00,0x00,0x3e,0x00,0x03,0x00,0x1d,0x00,0x00,0x00,0x1b,0x00,0x00,0x00, + 0x08,0x00,0x04,0x00,0x01,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x41,0x00,0x05,0x00,0x15,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x14,0x00,0x00,0x00, + 0x1f,0x00,0x00,0x00,0x3d,0x00,0x04,0x00,0x11,0x00,0x00,0x00,0x21,0x00,0x00,0x00, + 0x20,0x00,0x00,0x00,0x3d,0x00,0x04,0x00,0x22,0x00,0x00,0x00,0x25,0x00,0x00,0x00, + 0x24,0x00,0x00,0x00,0x51,0x00,0x05,0x00,0x07,0x00,0x00,0x00,0x28,0x00,0x00,0x00, + 0x25,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x51,0x00,0x05,0x00,0x07,0x00,0x00,0x00, + 0x29,0x00,0x00,0x00,0x25,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x50,0x00,0x07,0x00, + 0x08,0x00,0x00,0x00,0x2a,0x00,0x00,0x00,0x28,0x00,0x00,0x00,0x29,0x00,0x00,0x00, + 0x26,0x00,0x00,0x00,0x27,0x00,0x00,0x00,0x91,0x00,0x05,0x00,0x08,0x00,0x00,0x00, + 0x2b,0x00,0x00,0x00,0x21,0x00,0x00,0x00,0x2a,0x00,0x00,0x00,0x3e,0x00,0x03,0x00, + 0x1e,0x00,0x00,0x00,0x2b,0x00,0x00,0x00,0x08,0x00,0x04,0x00,0x01,0x00,0x00,0x00, + 0x13,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3d,0x00,0x04,0x00,0x08,0x00,0x00,0x00, + 0x2e,0x00,0x00,0x00,0x2d,0x00,0x00,0x00,0x3e,0x00,0x03,0x00,0x2c,0x00,0x00,0x00, + 0x2e,0x00,0x00,0x00,0xfd,0x00,0x01,0x00,0x38,0x00,0x01,0x00, +}; +static const uint8_t fs_bytecode_wgpu[980] = { + 0x03,0x02,0x23,0x07,0x00,0x00,0x01,0x00,0x08,0x00,0x08,0x00,0x1e,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x11,0x00,0x02,0x00,0x01,0x00,0x00,0x00,0x0b,0x00,0x06,0x00, + 0x02,0x00,0x00,0x00,0x47,0x4c,0x53,0x4c,0x2e,0x73,0x74,0x64,0x2e,0x34,0x35,0x30, + 0x00,0x00,0x00,0x00,0x0e,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x0f,0x00,0x08,0x00,0x04,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x6d,0x61,0x69,0x6e, + 0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x1b,0x00,0x00,0x00, + 0x10,0x00,0x03,0x00,0x05,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x07,0x00,0x03,0x00, + 0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x37,0x00,0x02,0x00,0x00,0x00, + 0xc2,0x01,0x00,0x00,0x01,0x00,0x00,0x00,0x2f,0x2f,0x20,0x4f,0x70,0x4d,0x6f,0x64, + 0x75,0x6c,0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73,0x65,0x64,0x20,0x63,0x6c,0x69, + 0x65,0x6e,0x74,0x20,0x76,0x75,0x6c,0x6b,0x61,0x6e,0x31,0x30,0x30,0x0a,0x2f,0x2f, + 0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73, + 0x65,0x64,0x20,0x63,0x6c,0x69,0x65,0x6e,0x74,0x20,0x6f,0x70,0x65,0x6e,0x67,0x6c, + 0x31,0x30,0x30,0x0a,0x2f,0x2f,0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65,0x50, + 0x72,0x6f,0x63,0x65,0x73,0x73,0x65,0x64,0x20,0x74,0x61,0x72,0x67,0x65,0x74,0x2d, + 0x65,0x6e,0x76,0x20,0x76,0x75,0x6c,0x6b,0x61,0x6e,0x31,0x2e,0x30,0x0a,0x2f,0x2f, + 0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73, + 0x65,0x64,0x20,0x74,0x61,0x72,0x67,0x65,0x74,0x2d,0x65,0x6e,0x76,0x20,0x6f,0x70, + 0x65,0x6e,0x67,0x6c,0x0a,0x2f,0x2f,0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65, + 0x50,0x72,0x6f,0x63,0x65,0x73,0x73,0x65,0x64,0x20,0x65,0x6e,0x74,0x72,0x79,0x2d, + 0x70,0x6f,0x69,0x6e,0x74,0x20,0x6d,0x61,0x69,0x6e,0x0a,0x23,0x6c,0x69,0x6e,0x65, + 0x20,0x31,0x0a,0x00,0x05,0x00,0x04,0x00,0x05,0x00,0x00,0x00,0x6d,0x61,0x69,0x6e, + 0x00,0x00,0x00,0x00,0x05,0x00,0x05,0x00,0x0a,0x00,0x00,0x00,0x66,0x72,0x61,0x67, + 0x5f,0x63,0x6f,0x6c,0x6f,0x72,0x00,0x00,0x05,0x00,0x03,0x00,0x0f,0x00,0x00,0x00, + 0x74,0x65,0x78,0x00,0x05,0x00,0x03,0x00,0x12,0x00,0x00,0x00,0x75,0x76,0x00,0x00, + 0x05,0x00,0x04,0x00,0x1b,0x00,0x00,0x00,0x63,0x6f,0x6c,0x6f,0x72,0x00,0x00,0x00, + 0x47,0x00,0x04,0x00,0x0a,0x00,0x00,0x00,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x47,0x00,0x04,0x00,0x0f,0x00,0x00,0x00,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x47,0x00,0x04,0x00,0x0f,0x00,0x00,0x00,0x22,0x00,0x00,0x00,0x02,0x00,0x00,0x00, + 0x47,0x00,0x04,0x00,0x0f,0x00,0x00,0x00,0x21,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x47,0x00,0x04,0x00,0x12,0x00,0x00,0x00,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x47,0x00,0x04,0x00,0x1b,0x00,0x00,0x00,0x1e,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x13,0x00,0x02,0x00,0x03,0x00,0x00,0x00,0x21,0x00,0x03,0x00,0x04,0x00,0x00,0x00, + 0x03,0x00,0x00,0x00,0x16,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x20,0x00,0x00,0x00, + 0x17,0x00,0x04,0x00,0x08,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x04,0x00,0x00,0x00, + 0x20,0x00,0x04,0x00,0x09,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x08,0x00,0x00,0x00, + 0x3b,0x00,0x04,0x00,0x09,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x03,0x00,0x00,0x00, + 0x2b,0x00,0x04,0x00,0x07,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x00,0x00,0x80,0x3f, + 0x19,0x00,0x09,0x00,0x0c,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x1b,0x00,0x03,0x00,0x0d,0x00,0x00,0x00,0x0c,0x00,0x00,0x00, + 0x20,0x00,0x04,0x00,0x0e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0d,0x00,0x00,0x00, + 0x3b,0x00,0x04,0x00,0x0e,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x20,0x00,0x04,0x00,0x11,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x08,0x00,0x00,0x00, + 0x3b,0x00,0x04,0x00,0x11,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x17,0x00,0x04,0x00,0x13,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x02,0x00,0x00,0x00, + 0x3b,0x00,0x04,0x00,0x11,0x00,0x00,0x00,0x1b,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x36,0x00,0x05,0x00,0x03,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x04,0x00,0x00,0x00,0xf8,0x00,0x02,0x00,0x06,0x00,0x00,0x00,0x08,0x00,0x04,0x00, + 0x01,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3d,0x00,0x04,0x00, + 0x0d,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x3d,0x00,0x04,0x00, + 0x08,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x4f,0x00,0x07,0x00, + 0x13,0x00,0x00,0x00,0x15,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x14,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x57,0x00,0x05,0x00,0x08,0x00,0x00,0x00, + 0x16,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x15,0x00,0x00,0x00,0x51,0x00,0x05,0x00, + 0x07,0x00,0x00,0x00,0x19,0x00,0x00,0x00,0x16,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x50,0x00,0x07,0x00,0x08,0x00,0x00,0x00,0x1a,0x00,0x00,0x00,0x0b,0x00,0x00,0x00, + 0x0b,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x19,0x00,0x00,0x00,0x3d,0x00,0x04,0x00, + 0x08,0x00,0x00,0x00,0x1c,0x00,0x00,0x00,0x1b,0x00,0x00,0x00,0x85,0x00,0x05,0x00, + 0x08,0x00,0x00,0x00,0x1d,0x00,0x00,0x00,0x1a,0x00,0x00,0x00,0x1c,0x00,0x00,0x00, + 0x3e,0x00,0x03,0x00,0x0a,0x00,0x00,0x00,0x1d,0x00,0x00,0x00,0xfd,0x00,0x01,0x00, + 0x38,0x00,0x01,0x00, +}; +#elif defined(SOKOL_DUMMY_BACKEND) +static const char* _sfons_vs_source_dummy = ""; +static const char* _sfons_fs_source_dummy = ""; +#else +#error "Please define one of SOKOL_GLCORE33, SOKOL_GLES2, SOKOL_GLES3, SOKOL_D3D11, SOKOL_METAL, SOKOL_WGPU or SOKOL_DUMMY_BACKEND!" +#endif + +typedef struct _sfons_t { + sg_shader shd; + sgl_pipeline pip; + sg_image img; + int width, height; + bool img_dirty; +} _sfons_t; + +static int _sfons_render_create(void* user_ptr, int width, int height) { + SOKOL_ASSERT(user_ptr && (width > 8) && (height > 8)); + _sfons_t* sfons = (_sfons_t*) user_ptr; + + /* sokol-gl compatible shader which treats RED channel as alpha */ + if (sfons->shd.id == SG_INVALID_ID) { + sg_shader_desc shd_desc; + memset(&shd_desc, 0, sizeof(shd_desc)); + shd_desc.attrs[0].name = "position"; + shd_desc.attrs[1].name = "texcoord0"; + shd_desc.attrs[2].name = "color0"; + shd_desc.attrs[0].sem_name = "TEXCOORD"; + shd_desc.attrs[0].sem_index = 0; + shd_desc.attrs[1].sem_name = "TEXCOORD"; + shd_desc.attrs[1].sem_index = 1; + shd_desc.attrs[2].sem_name = "TEXCOORD"; + shd_desc.attrs[2].sem_index = 2; + sg_shader_uniform_block_desc* ub = &shd_desc.vs.uniform_blocks[0]; + ub->size = 128; + ub->uniforms[0].name = "vs_params"; + ub->uniforms[0].type = SG_UNIFORMTYPE_FLOAT4; + ub->uniforms[0].array_count = 8; + shd_desc.fs.images[0].name = "tex"; + shd_desc.fs.images[0].image_type = SG_IMAGETYPE_2D; + shd_desc.fs.images[0].sampler_type = SG_SAMPLERTYPE_FLOAT; + shd_desc.label = "sokol-fontstash-shader"; + #if defined(SOKOL_GLCORE33) + shd_desc.vs.source = _sfons_vs_source_glsl330; + shd_desc.fs.source = _sfons_fs_source_glsl330; + #elif defined(SOKOL_GLES2) || defined(SOKOL_GLES3) + shd_desc.vs.source = _sfons_vs_source_glsl100; + shd_desc.fs.source = _sfons_fs_source_glsl100; + #elif defined(SOKOL_METAL) + shd_desc.vs.entry = "main0"; + shd_desc.fs.entry = "main0"; + switch (sg_query_backend()) { + case SG_BACKEND_METAL_MACOS: + shd_desc.vs.bytecode = SG_RANGE(_sfons_vs_bytecode_metal_macos); + shd_desc.fs.bytecode = SG_RANGE(_sfons_fs_bytecode_metal_macos); + break; + case SG_BACKEND_METAL_IOS: + shd_desc.vs.bytecode = SG_RANGE(_sfons_vs_bytecode_metal_ios); + shd_desc.fs.bytecode = SG_RANGE(_sfons_fs_bytecode_metal_ios); + break; + default: + shd_desc.vs.source = _sfons_vs_source_metal_sim; + shd_desc.fs.source = _sfons_fs_source_metal_sim; + break; + } + #elif defined(SOKOL_D3D11) + shd_desc.vs.bytecode = SG_RANGE(_sfons_vs_bytecode_hlsl4); + shd_desc.fs.bytecode = SG_RANGE(_sfons_fs_bytecode_hlsl4); + #elif defined(SOKOL_WGPU) + shd_desc.vs.byte_code = _sfons_vs_bytecode_wgpu; + shd_desc.vs.byte_code_size = sizeof(_sfons_vs_bytecode_wgpu); + shd_desc.fs.byte_code = _sfons_fs_bytecode_wgpu; + shd_desc.fs.byte_code_size = sizeof(_sfons_fs_bytecode_wgpu); + #else + shd_desc.vs.source = _sfons_vs_src_dummy; + shd_desc.fs.source = _sfons_fs_src_dummy; + #endif + shd_desc.label = "sfons-shader"; + sfons->shd = sg_make_shader(&shd_desc); + } + + /* sokol-gl pipeline object */ + if (sfons->pip.id == SG_INVALID_ID) { + sg_pipeline_desc pip_desc; + memset(&pip_desc, 0, sizeof(pip_desc)); + pip_desc.shader = sfons->shd; + pip_desc.colors[0].blend.enabled = true; + pip_desc.colors[0].blend.src_factor_rgb = SG_BLENDFACTOR_SRC_ALPHA; + pip_desc.colors[0].blend.dst_factor_rgb = SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA; + sfons->pip = sgl_make_pipeline(&pip_desc); + } + + /* create or re-create font atlas texture */ + if (sfons->img.id != SG_INVALID_ID) { + sg_destroy_image(sfons->img); + sfons->img.id = SG_INVALID_ID; + } + sfons->width = width; + sfons->height = height; + + SOKOL_ASSERT(sfons->img.id == SG_INVALID_ID); + sg_image_desc img_desc; + memset(&img_desc, 0, sizeof(img_desc)); + img_desc.width = sfons->width; + img_desc.height = sfons->height; + img_desc.min_filter = SG_FILTER_LINEAR; + img_desc.mag_filter = SG_FILTER_LINEAR; + img_desc.usage = SG_USAGE_DYNAMIC; + img_desc.pixel_format = SG_PIXELFORMAT_R8; + sfons->img = sg_make_image(&img_desc); + return 1; +} + +static int _sfons_render_resize(void* user_ptr, int width, int height) { + return _sfons_render_create(user_ptr, width, height); +} + +static void _sfons_render_update(void* user_ptr, int* rect, const unsigned char* data) { + SOKOL_ASSERT(user_ptr && rect && data); + _SOKOL_UNUSED(rect); + _SOKOL_UNUSED(data); + _sfons_t* sfons = (_sfons_t*) user_ptr; + sfons->img_dirty = true; +} + +static void _sfons_render_draw(void* user_ptr, const float* verts, const float* tcoords, const unsigned int* colors, int nverts) { + SOKOL_ASSERT(user_ptr && verts && tcoords && colors && (nverts > 0)); + _sfons_t* sfons = (_sfons_t*) user_ptr; + sgl_enable_texture(); + sgl_texture(sfons->img); + sgl_push_pipeline(); + sgl_load_pipeline(sfons->pip); + sgl_begin_triangles(); + for (int i = 0; i < nverts; i++) { + sgl_v2f_t2f_c1i(verts[2*i+0], verts[2*i+1], tcoords[2*i+0], tcoords[2*i+1], colors[i]); + } + sgl_end(); + sgl_pop_pipeline(); + sgl_disable_texture(); +} + +static void _sfons_render_delete(void* user_ptr) { + SOKOL_ASSERT(user_ptr); + _sfons_t* sfons = (_sfons_t*) user_ptr; + if (sfons->img.id != SG_INVALID_ID) { + sg_destroy_image(sfons->img); + sfons->img.id = SG_INVALID_ID; + } + if (sfons->pip.id != SG_INVALID_ID) { + sgl_destroy_pipeline(sfons->pip); + sfons->pip.id = SG_INVALID_ID; + } + if (sfons->shd.id != SG_INVALID_ID) { + sg_destroy_shader(sfons->shd); + sfons->shd.id = SG_INVALID_ID; + } + SOKOL_FREE(sfons); +} + +// NOTE clang analyzer will report a potential memory leak for the call +// to SOKOL_MALLOC in the sfons_create() function, this is a false positive +// (the freeing happens in _sfons_render_delete()). The following macro +// silences the false positive when compilation happens with the analyzer active +#if __clang_analyzer__ +#define _SFONS_CLANG_ANALYZER_SILENCE_POTENTIAL_LEAK_FALSE_POSITIVE(x) SOKOL_FREE(x) +#else +#define _SFONS_CLANG_ANALYZER_SILENCE_POTENTIAL_LEAK_FALSE_POSITIVE(x) +#endif + +SOKOL_API_IMPL FONScontext* sfons_create(int width, int height, int flags) { + SOKOL_ASSERT((width > 0) && (height > 0)); + FONSparams params; + _sfons_t* sfons = (_sfons_t*) SOKOL_MALLOC(sizeof(_sfons_t)); + memset(sfons, 0, sizeof(_sfons_t)); + memset(¶ms, 0, sizeof(params)); + params.width = width; + params.height = height; + params.flags = (unsigned char) flags; + params.renderCreate = _sfons_render_create; + params.renderResize = _sfons_render_resize; + params.renderUpdate = _sfons_render_update; + params.renderDraw = _sfons_render_draw; + params.renderDelete = _sfons_render_delete; + params.userPtr = sfons; + FONScontext* ctx = fonsCreateInternal(¶ms); + _SFONS_CLANG_ANALYZER_SILENCE_POTENTIAL_LEAK_FALSE_POSITIVE(sfons); + return ctx; +} + +SOKOL_API_IMPL void sfons_destroy(FONScontext* ctx) { + SOKOL_ASSERT(ctx); + fonsDeleteInternal(ctx); +} + +SOKOL_API_IMPL void sfons_flush(FONScontext* ctx) { + SOKOL_ASSERT(ctx && ctx->params.userPtr); + _sfons_t* sfons = (_sfons_t*) ctx->params.userPtr; + if (sfons->img_dirty) { + sfons->img_dirty = false; + sg_image_data data; + memset(&data, 0, sizeof(data)); + data.subimage[0][0].ptr = ctx->texData; + data.subimage[0][0].size = (size_t) (sfons->width * sfons->height); + sg_update_image(sfons->img, &data); + } +} + +SOKOL_API_IMPL uint32_t sfons_rgba(uint8_t r, uint8_t g, uint8_t b, uint8_t a) { + return ((uint32_t)r) | ((uint32_t)g<<8) | ((uint32_t)b<<16) | ((uint32_t)a<<24); +} + +#endif /* SOKOL_FONTSTASH_IMPL */ diff --git a/v_windows/v/old/thirdparty/sokol/util/sokol_gl.h b/v_windows/v/old/thirdparty/sokol/util/sokol_gl.h new file mode 100644 index 0000000..e41fc06 --- /dev/null +++ b/v_windows/v/old/thirdparty/sokol/util/sokol_gl.h @@ -0,0 +1,3297 @@ +#if defined(SOKOL_IMPL) && !defined(SOKOL_GL_IMPL) +#define SOKOL_GL_IMPL +#endif +#ifndef SOKOL_GL_INCLUDED +/* + sokol_gl.h -- OpenGL 1.x style rendering on top of sokol_gfx.h + + Project URL: https://github.com/floooh/sokol + + Do this: + #define SOKOL_IMPL or + #define SOKOL_GL_IMPL + before you include this file in *one* C or C++ file to create the + implementation. + + The following defines are used by the implementation to select the + platform-specific embedded shader code (these are the same defines as + used by sokol_gfx.h and sokol_app.h): + + SOKOL_GLCORE33 + SOKOL_GLES2 + SOKOL_GLES3 + SOKOL_D3D11 + SOKOL_METAL + SOKOL_WGPU + + ...optionally provide the following macros to override defaults: + + SOKOL_ASSERT(c) - your own assert macro (default: assert(c)) + SOKOL_MALLOC(s) - your own malloc function (default: malloc(s)) + SOKOL_FREE(p) - your own free function (default: free(p)) + SOKOL_GL_API_DECL - public function declaration prefix (default: extern) + SOKOL_API_DECL - same as SOKOL_GL_API_DECL + SOKOL_API_IMPL - public function implementation prefix (default: -) + SOKOL_LOG(msg) - your own logging function (default: puts(msg)) + SOKOL_UNREACHABLE() - a guard macro for unreachable code (default: assert(false)) + + If sokol_gl.h is compiled as a DLL, define the following before + including the declaration or implementation: + + SOKOL_DLL + + On Windows, SOKOL_DLL will define SOKOL_GL_API_DECL as __declspec(dllexport) + or __declspec(dllimport) as needed. + + Include the following headers before including sokol_gl.h: + + sokol_gfx.h + + Matrix functions have been taken from MESA and Regal. + + FEATURE OVERVIEW: + ================= + sokol_gl.h implements a subset of the OpenGLES 1.x feature set useful for + when you just want to quickly render a bunch of colored triangles or + lines without having to mess with buffers and + shaders. + + The current feature set is mostly useful for debug visualizations + and simple UI-style 2D rendering: + + What's implemented: + - vertex components: + - position (x, y, z) + - 2D texture coords (u, v) + - color (r, g, b, a) + - primitive types: + - triangle list and strip + - line list and strip + - quad list (TODO: quad strips) + - point list (TODO: point size) + - one texture layer (no multi-texturing) + - viewport and scissor-rect with selectable origin (top-left or bottom-left) + - all GL 1.x matrix stack functions, and additionally equivalent + functions for gluPerspective and gluLookat + + Notable GLES 1.x features that are *NOT* implemented: + - vertex lighting (this is the most likely GL feature that might be added later) + - vertex arrays (although providing whole chunks of vertex data at once + might be a useful feature for a later version) + - texture coordinate generation + - point size and line width + - all pixel store functions + - no ALPHA_TEST + - no clear functions (clearing is handled by the sokol-gfx render pass) + - fog + + Notable differences to GL: + - No "enum soup" for render states etc, instead there's a + 'pipeline stack', this is similar to GL's matrix stack, + but for pipeline-state-objects. The pipeline object at + the top of the pipeline stack defines the active set of render states + - All angles are in radians, not degrees (note the sgl_rad() and + sgl_deg() conversion functions) + - No enable/disable state for scissor test, this is always enabled + + STEP BY STEP: + ============= + --- To initialize sokol-gl, call: + + sgl_setup(const sgl_desc_t* desc) + + NOTE that sgl_setup() must be called *after* initializing sokol-gfx + (via sg_setup). This is because sgl_setup() needs to create + sokol-gfx resource objects. + + sgl_setup() needs to know the attributes of the sokol-gfx render pass + where sokol-gl rendering will happen through the passed-in sgl_desc_t + struct: + + sg_pixel_format color_format - color pixel format of render pass + sg_pixel_format depth_format - depth pixel format of render pass + int sample_count - MSAA sample count of render pass + + These values have the same defaults as sokol_gfx.h and sokol_app.h, + to use the default values, leave them zero-initialized. + + You can adjust the maximum number of vertices and drawing commands + per frame through the members: + + int max_vertices - default is 65536 + int max_commands - default is 16384 + + You can adjust the size of the internal pipeline state object pool + with: + + int pipeline_pool_size - default is 64 + + Finally you can change the face winding for front-facing triangles + and quads: + + sg_face_winding face_winding - default is SG_FACEWINDING_CCW + + The default winding for front faces is counter-clock-wise. This is + the same as OpenGL's default, but different from sokol-gfx. + + --- Optionally create pipeline-state-objects if you need render state + that differs from sokol-gl's default state: + + sgl_pipeline pip = sgl_make_pipeline(const sg_pipeline_desc* desc) + + The similarity with sokol_gfx.h's sg_pipeline type and sg_make_pipeline() + function is intended. sgl_make_pipeline() also takes a standard + sokol-gfx sg_pipeline_desc object to describe the render state, but + without: + - shader + - vertex layout + - color- and depth-pixel-formats + - primitive type (lines, triangles, ...) + - MSAA sample count + Those will be filled in by sgl_make_pipeline(). Note that each + call to sgl_make_pipeline() needs to create several sokol-gfx + pipeline objects (one for each primitive type). + + --- if you need to destroy sgl_pipeline objects before sgl_shutdown(): + + sgl_destroy_pipeline(sgl_pipeline pip) + + --- After sgl_setup() you can call any of the sokol-gl functions anywhere + in a frame, *except* sgl_draw(). The 'vanilla' functions + will only change internal sokol-gl state, and not call any sokol-gfx + functions. + + --- Unlike OpenGL, sokol-gl has a function to reset internal state to + a known default. This is useful at the start of a sequence of + rendering operations: + + void sgl_defaults(void) + + This will set the following default state: + + - current texture coordinate to u=0.0f, v=0.0f + - current color to white (rgba all 1.0f) + - unbind the current texture and texturing will be disabled + - *all* matrices will be set to identity (also the projection matrix) + - the default render state will be set by loading the 'default pipeline' + into the top of the pipeline stack + + The current matrix- and pipeline-stack-depths will not be changed by + sgl_defaults(). + + --- change the currently active renderstate through the + pipeline-stack functions, this works similar to the + traditional GL matrix stack: + + ...load the default pipeline state on the top of the pipeline stack: + + sgl_default_pipeline() + + ...load a specific pipeline on the top of the pipeline stack: + + sgl_load_pipeline(sgl_pipeline pip) + + ...push and pop the pipeline stack: + sgl_push_pipeline() + sgl_pop_pipeline() + + --- control texturing with: + + sgl_enable_texture() + sgl_disable_texture() + sgl_texture(sg_image img) + + --- set the current viewport and scissor rect with: + + sgl_viewport(int x, int y, int w, int h, bool origin_top_left) + sgl_scissor_rect(int x, int y, int w, int h, bool origin_top_left) + + ...or call these alternatives which take float arguments (this might allow + to avoid casting between float and integer in more strongly typed languages + when floating point pixel coordinates are used): + + sgl_viewportf(float x, float y, float w, float h, bool origin_top_left) + sgl_scissor_rectf(float x, float y, float w, float h, bool origin_top_left) + + ...these calls add a new command to the internal command queue, so + that the viewport or scissor rect are set at the right time relative + to other sokol-gl calls. + + --- adjust the transform matrices, matrix manipulation works just like + the OpenGL matrix stack: + + ...set the current matrix mode: + + sgl_matrix_mode_modelview() + sgl_matrix_mode_projection() + sgl_matrix_mode_texture() + + ...load the identity matrix into the current matrix: + + sgl_load_identity() + + ...translate, rotate and scale the current matrix: + + sgl_translate(float x, float y, float z) + sgl_rotate(float angle_rad, float x, float y, float z) + sgl_scale(float x, float y, float z) + + NOTE that all angles in sokol-gl are in radians, not in degree. + Convert between radians and degree with the helper functions: + + float sgl_rad(float deg) - degrees to radians + float sgl_deg(float rad) - radians to degrees + + ...directly load the current matrix from a float[16] array: + + sgl_load_matrix(const float m[16]) + sgl_load_transpose_matrix(const float m[16]) + + ...directly multiply the current matrix from a float[16] array: + + sgl_mult_matrix(const float m[16]) + sgl_mult_transpose_matrix(const float m[16]) + + The memory layout of those float[16] arrays is the same as in OpenGL. + + ...more matrix functions: + + sgl_frustum(float left, float right, float bottom, float top, float near, float far) + sgl_ortho(float left, float right, float bottom, float top, float near, float far) + sgl_perspective(float fov_y, float aspect, float near, float far) + sgl_lookat(float eye_x, float eye_y, float eye_z, float center_x, float center_y, float center_z, float up_x, float up_y, float up_z) + + These functions work the same as glFrustum(), glOrtho(), gluPerspective() + and gluLookAt(). + + ...and finally to push / pop the current matrix stack: + + sgl_push_matrix(void) + sgl_pop_matrix(void) + + Again, these work the same as glPushMatrix() and glPopMatrix(). + + --- perform primitive rendering: + + ...set the current texture coordinate and color 'registers' with: + + sgl_t2f(float u, float v) - set current texture coordinate + sgl_c*(...) - set current color + + There are several functions for setting the color (as float values, + unsigned byte values, packed as unsigned 32-bit integer, with + and without alpha). + + NOTE that these are the only functions that can be called both inside + sgl_begin_*() / sgl_end() and outside. + + ...start a primitive vertex sequence with: + + sgl_begin_points() + sgl_begin_lines() + sgl_begin_line_strip() + sgl_begin_triangles() + sgl_begin_triangle_strip() + sgl_begin_quads() + + ...after sgl_begin_*() specify vertices: + + sgl_v*(...) + sgl_v*_t*(...) + sgl_v*_c*(...) + sgl_v*_t*_c*(...) + + These functions write a new vertex to sokol-gl's internal vertex buffer, + optionally with texture-coords and color. If the texture coordinate + and/or color is missing, it will be taken from the current texture-coord + and color 'register'. + + ...finally, after specifying vertices, call: + + sgl_end() + + This will record a new draw command in sokol-gl's internal command + list, or it will extend the previous draw command if no relevant + state has changed since the last sgl_begin/end pair. + + --- inside a sokol-gfx rendering pass, call: + + sgl_draw() + + This will render everything that has been recorded since the last + call to sgl_draw() through sokol-gfx, and will 'rewind' the internal + vertex-, uniform- and command-buffers. + + --- sokol-gl tracks a single internal error code which can be + queried with + + sgl_error_t sgl_error(void) + + ...which can return the following error codes: + + SGL_NO_ERROR - all OK, no error occurred since last sgl_draw() + SGL_ERROR_VERTICES_FULL - internal vertex buffer is full (checked in sgl_end()) + SGL_ERROR_UNIFORMS_FULL - the internal uniforms buffer is full (checked in sgl_end()) + SGL_ERROR_COMMANDS_FULL - the internal command buffer is full (checked in sgl_end()) + SGL_ERROR_STACK_OVERFLOW - matrix- or pipeline-stack overflow + SGL_ERROR_STACK_UNDERFLOW - matrix- or pipeline-stack underflow + + ...if sokol-gl is in an error-state, sgl_draw() will skip any rendering, + and reset the error code to SGL_NO_ERROR. + + UNDER THE HOOD: + =============== + sokol_gl.h works by recording vertex data and rendering commands into + memory buffers, and then drawing the recorded commands via sokol_gfx.h + + The only functions which call into sokol_gfx.h are: + - sgl_setup() + - sgl_shutdown() + - sgl_draw() + + sgl_setup() must be called after initializing sokol-gfx. + sgl_shutdown() must be called before shutting down sokol-gfx. + sgl_draw() must be called once per frame inside a sokol-gfx render pass. + + All other sokol-gl function can be called anywhere in a frame, since + they just record data into memory buffers owned by sokol-gl. + + What happens in: + + sgl_setup(): + - 3 memory buffers are allocated, one for vertex data, + one for uniform data, and one for commands + - sokol-gfx resources are created: a (dynamic) vertex buffer, + a shader object (using embedded shader source or byte code), + and an 8x8 all-white default texture + + One vertex is 24 bytes: + - float3 position + - float2 texture coords + - uint32_t color + + One uniform block is 128 bytes: + - mat4 model-view-projection matrix + - mat4 texture matrix + + One draw command is ca. 24 bytes for the actual + command code plus command arguments. + + Each sgl_end() consumes one command, and one uniform block + (only when the matrices have changed). + The required size for one sgl_begin/end pair is (at most): + + (152 + 24 * num_verts) bytes + + sgl_shutdown(): + - all sokol-gfx resources (buffer, shader, default-texture and + all pipeline objects) are destroyed + - the 3 memory buffers are freed + + sgl_draw(): + - copy all recorded vertex data into the dynamic sokol-gfx buffer + via a call to sg_update_buffer() + - for each recorded command: + - if it's a viewport command, call sg_apply_viewport() + - if it's a scissor-rect command, call sg_apply_scissor_rect() + - if it's a draw command: + - depending on what has changed since the last draw command, + call sg_apply_pipeline(), sg_apply_bindings() and + sg_apply_uniforms() + - finally call sg_draw() + + All other functions only modify the internally tracked state, add + data to the vertex, uniform and command buffers, or manipulate + the matrix stack. + + ON DRAW COMMAND MERGING + ======================= + Not every call to sgl_end() will automatically record a new draw command. + If possible, the previous draw command will simply be extended, + resulting in fewer actual draw calls later in sgl_draw(). + + A draw command will be merged with the previous command if "no relevant + state has changed" since the last sgl_end(), meaning: + + - no calls to sgl_apply_viewport() and sgl_apply_scissor_rect() + - the primitive type hasn't changed + - the primitive type isn't a 'strip type' (no line or triangle strip) + - the pipeline state object hasn't changed + - none of the matrices has changed + - none of the texture state has changed + + Merging a draw command simply means that the number of vertices + to render in the previous draw command will be incremented by the + number of vertices in the new draw command. + + LICENSE + ======= + zlib/libpng license + + Copyright (c) 2018 Andre Weissflog + + This software is provided 'as-is', without any express or implied warranty. + In no event will the authors be held liable for any damages arising from the + use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software in a + product, an acknowledgment in the product documentation would be + appreciated but is not required. + + 2. Altered source versions must be plainly marked as such, and must not + be misrepresented as being the original software. + + 3. This notice may not be removed or altered from any source + distribution. +*/ +#define SOKOL_GL_INCLUDED (1) +#include +#include + +#if !defined(SOKOL_GFX_INCLUDED) +#error "Please include sokol_gfx.h before sokol_gl.h" +#endif + +#if defined(SOKOL_API_DECL) && !defined(SOKOL_GL_API_DECL) +#define SOKOL_GL_API_DECL SOKOL_API_DECL +#endif +#ifndef SOKOL_GL_API_DECL +#if defined(_WIN32) && defined(SOKOL_DLL) && defined(SOKOL_GL_IMPL) +#define SOKOL_GL_API_DECL __declspec(dllexport) +#elif defined(_WIN32) && defined(SOKOL_DLL) +#define SOKOL_GL_API_DECL __declspec(dllimport) +#else +#define SOKOL_GL_API_DECL extern +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* sokol_gl pipeline handle (created with sgl_make_pipeline()) */ +typedef struct sgl_pipeline { uint32_t id; } sgl_pipeline; + +/* + sgl_error_t + + Errors are reset each frame after calling sgl_draw(), + get the last error code with sgl_error() +*/ +typedef enum sgl_error_t { + SGL_NO_ERROR = 0, + SGL_ERROR_VERTICES_FULL, + SGL_ERROR_UNIFORMS_FULL, + SGL_ERROR_COMMANDS_FULL, + SGL_ERROR_STACK_OVERFLOW, + SGL_ERROR_STACK_UNDERFLOW, +} sgl_error_t; + +typedef struct sgl_desc_t { + int max_vertices; /* size for vertex buffer */ + int max_commands; /* size of uniform- and command-buffers */ + int pipeline_pool_size; /* size of the internal pipeline pool, default is 64 */ + sg_pixel_format color_format; + sg_pixel_format depth_format; + int sample_count; + sg_face_winding face_winding; /* default front face winding is CCW */ +} sgl_desc_t; + +/* setup/shutdown/misc */ +SOKOL_GL_API_DECL void sgl_setup(const sgl_desc_t* desc); +SOKOL_GL_API_DECL void sgl_shutdown(void); +SOKOL_GL_API_DECL sgl_error_t sgl_error(void); +SOKOL_GL_API_DECL void sgl_defaults(void); +SOKOL_GL_API_DECL float sgl_rad(float deg); +SOKOL_GL_API_DECL float sgl_deg(float rad); + +/* create and destroy pipeline objects */ +SOKOL_GL_API_DECL sgl_pipeline sgl_make_pipeline(const sg_pipeline_desc* desc); +SOKOL_GL_API_DECL void sgl_destroy_pipeline(sgl_pipeline pip); + +/* render state functions */ +SOKOL_GL_API_DECL void sgl_viewport(int x, int y, int w, int h, bool origin_top_left); +SOKOL_GL_API_DECL void sgl_viewportf(float x, float y, float w, float h, bool origin_top_left); +SOKOL_GL_API_DECL void sgl_scissor_rect(int x, int y, int w, int h, bool origin_top_left); +SOKOL_GL_API_DECL void sgl_scissor_rectf(float x, float y, float w, float h, bool origin_top_left); +SOKOL_GL_API_DECL void sgl_enable_texture(void); +SOKOL_GL_API_DECL void sgl_disable_texture(void); +SOKOL_GL_API_DECL void sgl_texture(sg_image img); + +/* pipeline stack functions */ +SOKOL_GL_API_DECL void sgl_default_pipeline(void); +SOKOL_GL_API_DECL void sgl_load_pipeline(sgl_pipeline pip); +SOKOL_GL_API_DECL void sgl_push_pipeline(void); +SOKOL_GL_API_DECL void sgl_pop_pipeline(void); + +/* matrix stack functions */ +SOKOL_GL_API_DECL void sgl_matrix_mode_modelview(void); +SOKOL_GL_API_DECL void sgl_matrix_mode_projection(void); +SOKOL_GL_API_DECL void sgl_matrix_mode_texture(void); +SOKOL_GL_API_DECL void sgl_load_identity(void); +SOKOL_GL_API_DECL void sgl_load_matrix(const float m[16]); +SOKOL_GL_API_DECL void sgl_load_transpose_matrix(const float m[16]); +SOKOL_GL_API_DECL void sgl_mult_matrix(const float m[16]); +SOKOL_GL_API_DECL void sgl_mult_transpose_matrix(const float m[16]); +SOKOL_GL_API_DECL void sgl_rotate(float angle_rad, float x, float y, float z); +SOKOL_GL_API_DECL void sgl_scale(float x, float y, float z); +SOKOL_GL_API_DECL void sgl_translate(float x, float y, float z); +SOKOL_GL_API_DECL void sgl_frustum(float l, float r, float b, float t, float n, float f); +SOKOL_GL_API_DECL void sgl_ortho(float l, float r, float b, float t, float n, float f); +SOKOL_GL_API_DECL void sgl_perspective(float fov_y, float aspect, float z_near, float z_far); +SOKOL_GL_API_DECL void sgl_lookat(float eye_x, float eye_y, float eye_z, float center_x, float center_y, float center_z, float up_x, float up_y, float up_z); +SOKOL_GL_API_DECL void sgl_push_matrix(void); +SOKOL_GL_API_DECL void sgl_pop_matrix(void); + +/* these functions only set the internal 'current texcoord / color' (valid inside or outside begin/end) */ +SOKOL_GL_API_DECL void sgl_t2f(float u, float v); +SOKOL_GL_API_DECL void sgl_c3f(float r, float g, float b); +SOKOL_GL_API_DECL void sgl_c4f(float r, float g, float b, float a); +SOKOL_GL_API_DECL void sgl_c3b(uint8_t r, uint8_t g, uint8_t b); +SOKOL_GL_API_DECL void sgl_c4b(uint8_t r, uint8_t g, uint8_t b, uint8_t a); +SOKOL_GL_API_DECL void sgl_c1i(uint32_t rgba); + +/* define primitives, each begin/end is one draw command */ +SOKOL_GL_API_DECL void sgl_begin_points(void); +SOKOL_GL_API_DECL void sgl_begin_lines(void); +SOKOL_GL_API_DECL void sgl_begin_line_strip(void); +SOKOL_GL_API_DECL void sgl_begin_triangles(void); +SOKOL_GL_API_DECL void sgl_begin_triangle_strip(void); +SOKOL_GL_API_DECL void sgl_begin_quads(void); +SOKOL_GL_API_DECL void sgl_v2f(float x, float y); +SOKOL_GL_API_DECL void sgl_v3f(float x, float y, float z); +SOKOL_GL_API_DECL void sgl_v2f_t2f(float x, float y, float u, float v); +SOKOL_GL_API_DECL void sgl_v3f_t2f(float x, float y, float z, float u, float v); +SOKOL_GL_API_DECL void sgl_v2f_c3f(float x, float y, float r, float g, float b); +SOKOL_GL_API_DECL void sgl_v2f_c3b(float x, float y, uint8_t r, uint8_t g, uint8_t b); +SOKOL_GL_API_DECL void sgl_v2f_c4f(float x, float y, float r, float g, float b, float a); +SOKOL_GL_API_DECL void sgl_v2f_c4b(float x, float y, uint8_t r, uint8_t g, uint8_t b, uint8_t a); +SOKOL_GL_API_DECL void sgl_v2f_c1i(float x, float y, uint32_t rgba); +SOKOL_GL_API_DECL void sgl_v3f_c3f(float x, float y, float z, float r, float g, float b); +SOKOL_GL_API_DECL void sgl_v3f_c3b(float x, float y, float z, uint8_t r, uint8_t g, uint8_t b); +SOKOL_GL_API_DECL void sgl_v3f_c4f(float x, float y, float z, float r, float g, float b, float a); +SOKOL_GL_API_DECL void sgl_v3f_c4b(float x, float y, float z, uint8_t r, uint8_t g, uint8_t b, uint8_t a); +SOKOL_GL_API_DECL void sgl_v3f_c1i(float x, float y, float z, uint32_t rgba); +SOKOL_GL_API_DECL void sgl_v2f_t2f_c3f(float x, float y, float u, float v, float r, float g, float b); +SOKOL_GL_API_DECL void sgl_v2f_t2f_c3b(float x, float y, float u, float v, uint8_t r, uint8_t g, uint8_t b); +SOKOL_GL_API_DECL void sgl_v2f_t2f_c4f(float x, float y, float u, float v, float r, float g, float b, float a); +SOKOL_GL_API_DECL void sgl_v2f_t2f_c4b(float x, float y, float u, float v, uint8_t r, uint8_t g, uint8_t b, uint8_t a); +SOKOL_GL_API_DECL void sgl_v2f_t2f_c1i(float x, float y, float u, float v, uint32_t rgba); +SOKOL_GL_API_DECL void sgl_v3f_t2f_c3f(float x, float y, float z, float u, float v, float r, float g, float b); +SOKOL_GL_API_DECL void sgl_v3f_t2f_c3b(float x, float y, float z, float u, float v, uint8_t r, uint8_t g, uint8_t b); +SOKOL_GL_API_DECL void sgl_v3f_t2f_c4f(float x, float y, float z, float u, float v, float r, float g, float b, float a); +SOKOL_GL_API_DECL void sgl_v3f_t2f_c4b(float x, float y, float z, float u, float v, uint8_t r, uint8_t g, uint8_t b, uint8_t a); +SOKOL_GL_API_DECL void sgl_v3f_t2f_c1i(float x, float y, float z, float u, float v, uint32_t rgba); +SOKOL_GL_API_DECL void sgl_end(void); + +/* render everything */ +SOKOL_GL_API_DECL void sgl_draw(void); + +#ifdef __cplusplus +} /* extern "C" */ + +/* reference-based equivalents for C++ */ +inline void sgl_setup(const sgl_desc_t& desc) { return sgl_setup(&desc); } +inline sgl_pipeline sgl_make_pipeline(const sg_pipeline_desc& desc) { return sgl_make_pipeline(&desc); } +#endif +#endif /* SOKOL_GL_INCLUDED */ + +/*-- IMPLEMENTATION ----------------------------------------------------------*/ +#ifdef SOKOL_GL_IMPL +#define SOKOL_GL_IMPL_INCLUDED (1) + +#include /* offsetof */ +#include /* memset */ +#include /* M_PI, sqrtf, sinf, cosf */ + +#ifndef M_PI +#define M_PI 3.14159265358979323846264338327 +#endif + +#ifndef SOKOL_API_IMPL + #define SOKOL_API_IMPL +#endif +#ifndef SOKOL_DEBUG + #ifndef NDEBUG + #define SOKOL_DEBUG (1) + #endif +#endif +#ifndef SOKOL_ASSERT + #include + #define SOKOL_ASSERT(c) assert(c) +#endif +#ifndef SOKOL_MALLOC + #include + #define SOKOL_MALLOC(s) malloc(s) + #define SOKOL_FREE(p) free(p) +#endif +#ifndef SOKOL_LOG + #ifdef SOKOL_DEBUG + #include + #define SOKOL_LOG(s) { SOKOL_ASSERT(s); puts(s); } + #else + #define SOKOL_LOG(s) + #endif +#endif +#ifndef SOKOL_UNREACHABLE + #define SOKOL_UNREACHABLE SOKOL_ASSERT(false) +#endif + +#define _sgl_def(val, def) (((val) == 0) ? (def) : (val)) +#define _SGL_INIT_COOKIE (0xABCDABCD) + +/* + Embedded source code compiled with: + + sokol-shdc -i sgl.glsl -o sgl.h -l glsl330:glsl100:hlsl4:metal_macos:metal_ios:metal_sim:wgpu -b + + (not that for Metal and D3D11 byte code, sokol-shdc must be run + on macOS and Windows) + + @vs vs + uniform vs_params { + mat4 mvp; + mat4 tm; + }; + in vec4 position; + in vec2 texcoord0; + in vec4 color0; + out vec4 uv; + out vec4 color; + void main() { + gl_Position = mvp * position; + uv = tm * vec4(texcoord0, 0.0, 1.0); + color = color0; + } + @end + + @fs fs + uniform sampler2D tex; + in vec4 uv; + in vec4 color; + out vec4 frag_color; + void main() { + frag_color = texture(tex, uv.xy) * color; + } + @end + + @program sgl vs fs +*/ + +#if defined(SOKOL_GLCORE33) +static const char _sgl_vs_source_glsl330[415] = { + 0x23,0x76,0x65,0x72,0x73,0x69,0x6f,0x6e,0x20,0x33,0x33,0x30,0x0a,0x0a,0x75,0x6e, + 0x69,0x66,0x6f,0x72,0x6d,0x20,0x76,0x65,0x63,0x34,0x20,0x76,0x73,0x5f,0x70,0x61, + 0x72,0x61,0x6d,0x73,0x5b,0x38,0x5d,0x3b,0x0a,0x6c,0x61,0x79,0x6f,0x75,0x74,0x28, + 0x6c,0x6f,0x63,0x61,0x74,0x69,0x6f,0x6e,0x20,0x3d,0x20,0x30,0x29,0x20,0x69,0x6e, + 0x20,0x76,0x65,0x63,0x34,0x20,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x3b,0x0a, + 0x6f,0x75,0x74,0x20,0x76,0x65,0x63,0x34,0x20,0x75,0x76,0x3b,0x0a,0x6c,0x61,0x79, + 0x6f,0x75,0x74,0x28,0x6c,0x6f,0x63,0x61,0x74,0x69,0x6f,0x6e,0x20,0x3d,0x20,0x31, + 0x29,0x20,0x69,0x6e,0x20,0x76,0x65,0x63,0x32,0x20,0x74,0x65,0x78,0x63,0x6f,0x6f, + 0x72,0x64,0x30,0x3b,0x0a,0x6f,0x75,0x74,0x20,0x76,0x65,0x63,0x34,0x20,0x63,0x6f, + 0x6c,0x6f,0x72,0x3b,0x0a,0x6c,0x61,0x79,0x6f,0x75,0x74,0x28,0x6c,0x6f,0x63,0x61, + 0x74,0x69,0x6f,0x6e,0x20,0x3d,0x20,0x32,0x29,0x20,0x69,0x6e,0x20,0x76,0x65,0x63, + 0x34,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x30,0x3b,0x0a,0x0a,0x76,0x6f,0x69,0x64,0x20, + 0x6d,0x61,0x69,0x6e,0x28,0x29,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x67,0x6c,0x5f, + 0x50,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x20,0x3d,0x20,0x6d,0x61,0x74,0x34,0x28, + 0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x30,0x5d,0x2c,0x20,0x76,0x73, + 0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x31,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70, + 0x61,0x72,0x61,0x6d,0x73,0x5b,0x32,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70,0x61,0x72, + 0x61,0x6d,0x73,0x5b,0x33,0x5d,0x29,0x20,0x2a,0x20,0x70,0x6f,0x73,0x69,0x74,0x69, + 0x6f,0x6e,0x3b,0x0a,0x20,0x20,0x20,0x20,0x75,0x76,0x20,0x3d,0x20,0x6d,0x61,0x74, + 0x34,0x28,0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x34,0x5d,0x2c,0x20, + 0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x35,0x5d,0x2c,0x20,0x76,0x73, + 0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x36,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70, + 0x61,0x72,0x61,0x6d,0x73,0x5b,0x37,0x5d,0x29,0x20,0x2a,0x20,0x76,0x65,0x63,0x34, + 0x28,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x2c,0x20,0x30,0x2e,0x30,0x2c, + 0x20,0x31,0x2e,0x30,0x29,0x3b,0x0a,0x20,0x20,0x20,0x20,0x63,0x6f,0x6c,0x6f,0x72, + 0x20,0x3d,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x30,0x3b,0x0a,0x7d,0x0a,0x0a,0x00, +}; +static const char _sgl_fs_source_glsl330[172] = { + 0x23,0x76,0x65,0x72,0x73,0x69,0x6f,0x6e,0x20,0x33,0x33,0x30,0x0a,0x0a,0x75,0x6e, + 0x69,0x66,0x6f,0x72,0x6d,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x32,0x44,0x20, + 0x74,0x65,0x78,0x3b,0x0a,0x0a,0x6c,0x61,0x79,0x6f,0x75,0x74,0x28,0x6c,0x6f,0x63, + 0x61,0x74,0x69,0x6f,0x6e,0x20,0x3d,0x20,0x30,0x29,0x20,0x6f,0x75,0x74,0x20,0x76, + 0x65,0x63,0x34,0x20,0x66,0x72,0x61,0x67,0x5f,0x63,0x6f,0x6c,0x6f,0x72,0x3b,0x0a, + 0x69,0x6e,0x20,0x76,0x65,0x63,0x34,0x20,0x75,0x76,0x3b,0x0a,0x69,0x6e,0x20,0x76, + 0x65,0x63,0x34,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x3b,0x0a,0x0a,0x76,0x6f,0x69,0x64, + 0x20,0x6d,0x61,0x69,0x6e,0x28,0x29,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x66,0x72, + 0x61,0x67,0x5f,0x63,0x6f,0x6c,0x6f,0x72,0x20,0x3d,0x20,0x74,0x65,0x78,0x74,0x75, + 0x72,0x65,0x28,0x74,0x65,0x78,0x2c,0x20,0x75,0x76,0x2e,0x78,0x79,0x29,0x20,0x2a, + 0x20,0x63,0x6f,0x6c,0x6f,0x72,0x3b,0x0a,0x7d,0x0a,0x0a,0x00, +}; +#elif defined(SOKOL_GLES2) || defined(SOKOL_GLES3) +static const char _sgl_vs_source_glsl100[381] = { + 0x23,0x76,0x65,0x72,0x73,0x69,0x6f,0x6e,0x20,0x31,0x30,0x30,0x0a,0x0a,0x75,0x6e, + 0x69,0x66,0x6f,0x72,0x6d,0x20,0x76,0x65,0x63,0x34,0x20,0x76,0x73,0x5f,0x70,0x61, + 0x72,0x61,0x6d,0x73,0x5b,0x38,0x5d,0x3b,0x0a,0x61,0x74,0x74,0x72,0x69,0x62,0x75, + 0x74,0x65,0x20,0x76,0x65,0x63,0x34,0x20,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e, + 0x3b,0x0a,0x76,0x61,0x72,0x79,0x69,0x6e,0x67,0x20,0x76,0x65,0x63,0x34,0x20,0x75, + 0x76,0x3b,0x0a,0x61,0x74,0x74,0x72,0x69,0x62,0x75,0x74,0x65,0x20,0x76,0x65,0x63, + 0x32,0x20,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x3b,0x0a,0x76,0x61,0x72, + 0x79,0x69,0x6e,0x67,0x20,0x76,0x65,0x63,0x34,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x3b, + 0x0a,0x61,0x74,0x74,0x72,0x69,0x62,0x75,0x74,0x65,0x20,0x76,0x65,0x63,0x34,0x20, + 0x63,0x6f,0x6c,0x6f,0x72,0x30,0x3b,0x0a,0x0a,0x76,0x6f,0x69,0x64,0x20,0x6d,0x61, + 0x69,0x6e,0x28,0x29,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x67,0x6c,0x5f,0x50,0x6f, + 0x73,0x69,0x74,0x69,0x6f,0x6e,0x20,0x3d,0x20,0x6d,0x61,0x74,0x34,0x28,0x76,0x73, + 0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x30,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70, + 0x61,0x72,0x61,0x6d,0x73,0x5b,0x31,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70,0x61,0x72, + 0x61,0x6d,0x73,0x5b,0x32,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d, + 0x73,0x5b,0x33,0x5d,0x29,0x20,0x2a,0x20,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e, + 0x3b,0x0a,0x20,0x20,0x20,0x20,0x75,0x76,0x20,0x3d,0x20,0x6d,0x61,0x74,0x34,0x28, + 0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x34,0x5d,0x2c,0x20,0x76,0x73, + 0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x5b,0x35,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70, + 0x61,0x72,0x61,0x6d,0x73,0x5b,0x36,0x5d,0x2c,0x20,0x76,0x73,0x5f,0x70,0x61,0x72, + 0x61,0x6d,0x73,0x5b,0x37,0x5d,0x29,0x20,0x2a,0x20,0x76,0x65,0x63,0x34,0x28,0x74, + 0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x2c,0x20,0x30,0x2e,0x30,0x2c,0x20,0x31, + 0x2e,0x30,0x29,0x3b,0x0a,0x20,0x20,0x20,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x20,0x3d, + 0x20,0x63,0x6f,0x6c,0x6f,0x72,0x30,0x3b,0x0a,0x7d,0x0a,0x0a,0x00, +}; +static const char _sgl_fs_source_glsl100[210] = { + 0x23,0x76,0x65,0x72,0x73,0x69,0x6f,0x6e,0x20,0x31,0x30,0x30,0x0a,0x70,0x72,0x65, + 0x63,0x69,0x73,0x69,0x6f,0x6e,0x20,0x6d,0x65,0x64,0x69,0x75,0x6d,0x70,0x20,0x66, + 0x6c,0x6f,0x61,0x74,0x3b,0x0a,0x70,0x72,0x65,0x63,0x69,0x73,0x69,0x6f,0x6e,0x20, + 0x68,0x69,0x67,0x68,0x70,0x20,0x69,0x6e,0x74,0x3b,0x0a,0x0a,0x75,0x6e,0x69,0x66, + 0x6f,0x72,0x6d,0x20,0x68,0x69,0x67,0x68,0x70,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65, + 0x72,0x32,0x44,0x20,0x74,0x65,0x78,0x3b,0x0a,0x0a,0x76,0x61,0x72,0x79,0x69,0x6e, + 0x67,0x20,0x68,0x69,0x67,0x68,0x70,0x20,0x76,0x65,0x63,0x34,0x20,0x75,0x76,0x3b, + 0x0a,0x76,0x61,0x72,0x79,0x69,0x6e,0x67,0x20,0x68,0x69,0x67,0x68,0x70,0x20,0x76, + 0x65,0x63,0x34,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x3b,0x0a,0x0a,0x76,0x6f,0x69,0x64, + 0x20,0x6d,0x61,0x69,0x6e,0x28,0x29,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x67,0x6c, + 0x5f,0x46,0x72,0x61,0x67,0x44,0x61,0x74,0x61,0x5b,0x30,0x5d,0x20,0x3d,0x20,0x74, + 0x65,0x78,0x74,0x75,0x72,0x65,0x32,0x44,0x28,0x74,0x65,0x78,0x2c,0x20,0x75,0x76, + 0x2e,0x78,0x79,0x29,0x20,0x2a,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x3b,0x0a,0x7d,0x0a, + 0x0a,0x00, +}; +#elif defined(SOKOL_METAL) +static const uint8_t _sgl_vs_bytecode_metal_macos[3360] = { + 0x4d,0x54,0x4c,0x42,0x01,0x80,0x02,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x20,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x6d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcd,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x3b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x01,0x00,0x00,0x00,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x01,0x00,0x00,0x00,0x00,0x00,0x00, + 0x10,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x6d,0x00,0x00,0x00, + 0x4e,0x41,0x4d,0x45,0x06,0x00,0x6d,0x61,0x69,0x6e,0x30,0x00,0x54,0x59,0x50,0x45, + 0x01,0x00,0x00,0x48,0x41,0x53,0x48,0x20,0x00,0x54,0xec,0xec,0xa8,0x23,0x83,0x8d, + 0xc7,0xcf,0x6e,0x67,0x94,0x95,0x54,0x94,0xeb,0x98,0x15,0x2a,0x2c,0x54,0xa0,0x4b, + 0x24,0x6f,0x5e,0xed,0x6e,0x76,0x45,0xb7,0x5e,0x4f,0x46,0x46,0x54,0x18,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x56,0x45,0x52,0x53,0x08,0x00,0x01,0x00,0x08, + 0x00,0x01,0x00,0x01,0x00,0x45,0x4e,0x44,0x54,0x45,0x4e,0x44,0x54,0x37,0x00,0x00, + 0x00,0x56,0x41,0x54,0x54,0x22,0x00,0x03,0x00,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f, + 0x6e,0x00,0x00,0x80,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x00,0x01,0x80, + 0x63,0x6f,0x6c,0x6f,0x72,0x30,0x00,0x02,0x80,0x56,0x41,0x54,0x59,0x05,0x00,0x03, + 0x00,0x06,0x04,0x06,0x45,0x4e,0x44,0x54,0x04,0x00,0x00,0x00,0x45,0x4e,0x44,0x54, + 0xde,0xc0,0x17,0x0b,0x00,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0xf0,0x0b,0x00,0x00, + 0xff,0xff,0xff,0xff,0x42,0x43,0xc0,0xde,0x21,0x0c,0x00,0x00,0xf9,0x02,0x00,0x00, + 0x0b,0x82,0x20,0x00,0x02,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x07,0x81,0x23,0x91, + 0x41,0xc8,0x04,0x49,0x06,0x10,0x32,0x39,0x92,0x01,0x84,0x0c,0x25,0x05,0x08,0x19, + 0x1e,0x04,0x8b,0x62,0x80,0x14,0x45,0x02,0x42,0x92,0x0b,0x42,0xa4,0x10,0x32,0x14, + 0x38,0x08,0x18,0x49,0x0a,0x32,0x44,0x24,0x48,0x0a,0x90,0x21,0x23,0xc4,0x52,0x80, + 0x0c,0x19,0x21,0x72,0x24,0x07,0xc8,0x48,0x11,0x62,0xa8,0xa0,0xa8,0x40,0xc6,0xf0, + 0x01,0x00,0x00,0x00,0x51,0x18,0x00,0x00,0x81,0x00,0x00,0x00,0x1b,0xc8,0x25,0xf8, + 0xff,0xff,0xff,0xff,0x01,0x90,0x80,0x8a,0x18,0x87,0x77,0x90,0x07,0x79,0x28,0x87, + 0x71,0xa0,0x07,0x76,0xc8,0x87,0x36,0x90,0x87,0x77,0xa8,0x07,0x77,0x20,0x87,0x72, + 0x20,0x87,0x36,0x20,0x87,0x74,0xb0,0x87,0x74,0x20,0x87,0x72,0x68,0x83,0x79,0x88, + 0x07,0x79,0xa0,0x87,0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a,0x40,0x07, + 0xc0,0x1c,0xc2,0x81,0x1d,0xe6,0xa1,0x1c,0x00,0x82,0x1c,0xd2,0x61,0x1e,0xc2,0x41, + 0x1c,0xd8,0xa1,0x1c,0xda,0x80,0x1e,0xc2,0x21,0x1d,0xd8,0xa1,0x0d,0xc6,0x21,0x1c, + 0xd8,0x81,0x1d,0xe6,0x01,0x30,0x87,0x70,0x60,0x87,0x79,0x28,0x07,0x80,0x60,0x87, + 0x72,0x98,0x87,0x79,0x68,0x03,0x78,0x90,0x87,0x72,0x18,0x87,0x74,0x98,0x87,0x72, + 0x68,0x03,0x73,0x80,0x87,0x76,0x08,0x07,0x72,0x00,0xcc,0x21,0x1c,0xd8,0x61,0x1e, + 0xca,0x01,0x20,0xdc,0xe1,0x1d,0xda,0xc0,0x1c,0xe4,0x21,0x1c,0xda,0xa1,0x1c,0xda, + 0x00,0x1e,0xde,0x21,0x1d,0xdc,0x81,0x1e,0xca,0x41,0x1e,0xda,0xa0,0x1c,0xd8,0x21, + 0x1d,0xda,0x01,0xa0,0x07,0x79,0xa8,0x87,0x72,0x00,0x06,0x77,0x78,0x87,0x36,0x30, + 0x07,0x79,0x08,0x87,0x76,0x28,0x87,0x36,0x80,0x87,0x77,0x48,0x07,0x77,0xa0,0x87, + 0x72,0x90,0x87,0x36,0x28,0x07,0x76,0x48,0x87,0x76,0x68,0x03,0x77,0x78,0x07,0x77, + 0x68,0x03,0x76,0x28,0x87,0x70,0x30,0x07,0x80,0x70,0x87,0x77,0x68,0x83,0x74,0x70, + 0x07,0x73,0x98,0x87,0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a,0x40,0x07, + 0x80,0x1e,0xe4,0xa1,0x1e,0xca,0x01,0x20,0xdc,0xe1,0x1d,0xda,0x40,0x1d,0xea,0xa1, + 0x1d,0xe0,0xa1,0x0d,0xe8,0x21,0x1c,0xc4,0x81,0x1d,0xca,0x61,0x1e,0x00,0x73,0x08, + 0x07,0x76,0x98,0x87,0x72,0x00,0x08,0x77,0x78,0x87,0x36,0x70,0x87,0x70,0x70,0x87, + 0x79,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xe8,0x41, + 0x1e,0xea,0xa1,0x1c,0x00,0xc2,0x1d,0xde,0xa1,0x0d,0xe6,0x21,0x1d,0xce,0xc1,0x1d, + 0xca,0x81,0x1c,0xda,0x40,0x1f,0xca,0x41,0x1e,0xde,0x61,0x1e,0xda,0xc0,0x1c,0xe0, + 0xa1,0x0d,0xda,0x21,0x1c,0xe8,0x01,0x1d,0x00,0x7a,0x90,0x87,0x7a,0x28,0x07,0x80, + 0x70,0x87,0x77,0x68,0x03,0x7a,0x90,0x87,0x70,0x80,0x07,0x78,0x48,0x07,0x77,0x38, + 0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xe8,0x41,0x1e,0xea,0xa1,0x1c,0x00, + 0x62,0x1e,0xe8,0x21,0x1c,0xc6,0x61,0x1d,0xda,0x00,0x1e,0xe4,0xe1,0x1d,0xe8,0xa1, + 0x1c,0xc6,0x81,0x1e,0xde,0x41,0x1e,0xda,0x40,0x1c,0xea,0xc1,0x1c,0xcc,0xa1,0x1c, + 0xe4,0xa1,0x0d,0xe6,0x21,0x1d,0xf4,0xa1,0x1c,0x00,0x3c,0x00,0x88,0x7a,0x70,0x87, + 0x79,0x08,0x07,0x73,0x28,0x87,0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a, + 0x40,0x07,0x80,0x1e,0xe4,0xa1,0x1e,0xca,0x01,0x20,0xea,0x61,0x1e,0xca,0xa1,0x0d, + 0xe6,0xe1,0x1d,0xcc,0x81,0x1e,0xda,0xc0,0x1c,0xd8,0xe1,0x1d,0xc2,0x81,0x1e,0x00, + 0x73,0x08,0x07,0x76,0x98,0x87,0x72,0x00,0x36,0x18,0x02,0x01,0x2c,0x40,0x05,0x00, + 0x49,0x18,0x00,0x00,0x01,0x00,0x00,0x00,0x13,0x84,0x40,0x00,0x89,0x20,0x00,0x00, + 0x23,0x00,0x00,0x00,0x32,0x22,0x48,0x09,0x20,0x64,0x85,0x04,0x93,0x22,0xa4,0x84, + 0x04,0x93,0x22,0xe3,0x84,0xa1,0x90,0x14,0x12,0x4c,0x8a,0x8c,0x0b,0x84,0xa4,0x4c, + 0x10,0x44,0x33,0x00,0xc3,0x08,0x04,0x70,0x90,0x34,0x45,0x94,0x30,0xf9,0x0c,0x80, + 0x34,0xf4,0xef,0x50,0x13,0x1a,0x42,0x08,0xc3,0x08,0x02,0x90,0x04,0x61,0x26,0x6a, + 0x1e,0xe8,0x41,0x1e,0xea,0x61,0x1c,0xe8,0xc1,0x0d,0xda,0xa1,0x1c,0xe8,0x21,0x1c, + 0xd8,0x41,0x0f,0xf4,0xa0,0x1d,0xc2,0x81,0x1e,0xe4,0x21,0x1d,0xf0,0x01,0x05,0xe4, + 0x20,0x69,0x8a,0x28,0x61,0xf2,0x2b,0xe9,0x7f,0x80,0x08,0x60,0x24,0x24,0x94,0x32, + 0x88,0x60,0x08,0xa5,0x10,0x61,0x84,0x43,0x68,0x20,0x60,0x8e,0x00,0x0c,0x72,0x60, + 0xcd,0x11,0x80,0xc2,0x20,0x42,0x20,0x0c,0x23,0x10,0xcb,0x08,0x00,0x00,0x00,0x00, + 0x13,0xb2,0x70,0x48,0x07,0x79,0xb0,0x03,0x3a,0x68,0x83,0x70,0x80,0x07,0x78,0x60, + 0x87,0x72,0x68,0x83,0x76,0x08,0x87,0x71,0x78,0x87,0x79,0xc0,0x87,0x38,0x80,0x03, + 0x37,0x88,0x83,0x38,0x70,0x03,0x38,0xd8,0x70,0x1b,0xe5,0xd0,0x06,0xf0,0xa0,0x07, + 0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x90,0x0e,0x71, + 0xa0,0x07,0x78,0xa0,0x07,0x78,0xd0,0x06,0xe9,0x80,0x07,0x7a,0x80,0x07,0x7a,0x80, + 0x07,0x6d,0x90,0x0e,0x71,0x60,0x07,0x7a,0x10,0x07,0x76,0xa0,0x07,0x71,0x60,0x07, + 0x6d,0x90,0x0e,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x6d, + 0x90,0x0e,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x60, + 0x0e,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x6d,0x60,0x0e, + 0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x60,0x0f,0x71, + 0x60,0x07,0x7a,0x10,0x07,0x76,0xa0,0x07,0x71,0x60,0x07,0x6d,0x60,0x0f,0x72,0x40, + 0x07,0x7a,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x6d,0x60,0x0f,0x73,0x20,0x07, + 0x7a,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x6d,0x60,0x0f,0x74,0x80,0x07,0x7a, + 0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x60,0x0f,0x76,0x40,0x07,0x7a,0x60, + 0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x60,0x0f,0x79,0x60,0x07,0x7a,0x10,0x07, + 0x72,0x80,0x07,0x7a,0x10,0x07,0x72,0x80,0x07,0x6d,0x60,0x0f,0x71,0x20,0x07,0x78, + 0xa0,0x07,0x71,0x20,0x07,0x78,0xa0,0x07,0x71,0x20,0x07,0x78,0xd0,0x06,0xf6,0x10, + 0x07,0x79,0x20,0x07,0x7a,0x20,0x07,0x75,0x60,0x07,0x7a,0x20,0x07,0x75,0x60,0x07, + 0x6d,0x60,0x0f,0x72,0x50,0x07,0x76,0xa0,0x07,0x72,0x50,0x07,0x76,0xa0,0x07,0x72, + 0x50,0x07,0x76,0xd0,0x06,0xf6,0x50,0x07,0x71,0x20,0x07,0x7a,0x50,0x07,0x71,0x20, + 0x07,0x7a,0x50,0x07,0x71,0x20,0x07,0x6d,0x60,0x0f,0x71,0x00,0x07,0x72,0x40,0x07, + 0x7a,0x10,0x07,0x70,0x20,0x07,0x74,0xa0,0x07,0x71,0x00,0x07,0x72,0x40,0x07,0x6d, + 0xe0,0x0e,0x78,0xa0,0x07,0x71,0x60,0x07,0x7a,0x30,0x07,0x72,0x30,0x84,0x49,0x00, + 0x00,0x08,0x00,0x00,0x00,0x00,0x00,0xc8,0x02,0x01,0x00,0x00,0x0b,0x00,0x00,0x00, + 0x32,0x1e,0x98,0x10,0x19,0x11,0x4c,0x90,0x8c,0x09,0x26,0x47,0xc6,0x04,0x43,0x5a, + 0x25,0x30,0x02,0x50,0x04,0x05,0x18,0x50,0x80,0x02,0x85,0x50,0x10,0x65,0x50,0x20, + 0xd4,0x46,0x00,0x88,0x8d,0x35,0x28,0x0f,0x01,0x00,0x00,0x00,0x79,0x18,0x00,0x00, + 0x19,0x01,0x00,0x00,0x1a,0x03,0x4c,0x10,0x97,0x29,0xa2,0x25,0x10,0xab,0x32,0xb9, + 0xb9,0xb4,0x37,0xb7,0x21,0xc6,0x32,0x28,0x00,0xb3,0x50,0xb9,0x1b,0x43,0x0b,0x93, + 0xfb,0x9a,0x4b,0xd3,0x2b,0x1b,0x62,0x2c,0x81,0x22,0x2c,0x06,0xd9,0x20,0x08,0x0e, + 0x8e,0xad,0x0c,0x84,0x89,0xc9,0xaa,0x09,0xc4,0xae,0x4c,0x6e,0x2e,0xed,0xcd,0x0d, + 0x24,0x07,0x46,0xc6,0x25,0x86,0x06,0x04,0xa5,0xad,0x8c,0x2e,0x8c,0xcd,0xac,0xac, + 0x25,0x07,0x46,0xc6,0x25,0x86,0xc6,0x25,0x27,0x65,0x88,0xa0,0x10,0x43,0x8c,0x25, + 0x58,0x8e,0x45,0x60,0xd1,0x54,0x46,0x17,0xc6,0x36,0x04,0x51,0x8e,0x25,0x58,0x82, + 0x45,0xe0,0x16,0x96,0x26,0xe7,0x32,0xf6,0xd6,0x06,0x97,0xc6,0x56,0xe6,0x42,0x56, + 0xe6,0xf6,0x26,0xd7,0x36,0xf7,0x45,0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44,0x50, + 0x12,0x72,0x61,0x69,0x72,0x2e,0x63,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x2e,0x66,0x61, + 0x73,0x74,0x5f,0x6d,0x61,0x74,0x68,0x5f,0x65,0x6e,0x61,0x62,0x6c,0x65,0x43,0x04, + 0x65,0x61,0x19,0x84,0xa5,0xc9,0xb9,0x8c,0xbd,0xb5,0xc1,0xa5,0xb1,0x95,0xb9,0x98, + 0xc9,0x85,0xb5,0x95,0x89,0xd5,0x99,0x99,0x95,0xc9,0x7d,0x99,0x95,0xd1,0x8d,0xa1, + 0x7d,0x91,0xa5,0xcd,0x85,0x89,0xb1,0x95,0x0d,0x11,0x94,0x86,0x61,0x10,0x96,0x26, + 0xe7,0x32,0xf6,0xd6,0x06,0x97,0xc6,0x56,0xe6,0xe2,0x16,0x46,0x97,0x66,0x57,0xf6, + 0x45,0xf6,0x56,0x27,0xc6,0x56,0xf6,0x45,0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44, + 0x50,0x1e,0x92,0x41,0x58,0x9a,0x9c,0xcb,0xd8,0x5b,0x1b,0x5c,0x1a,0x5b,0x99,0x8b, + 0x5b,0x18,0x5d,0x9a,0x5d,0xd9,0x17,0xdb,0x9b,0xdb,0xd9,0x17,0xdb,0x9b,0xdb,0xd9, + 0x17,0x59,0xda,0x5c,0x98,0x18,0x5b,0xd9,0x10,0x41,0x89,0x78,0x06,0x61,0x69,0x72, + 0x2e,0x63,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x2e,0x6e,0x61,0x74,0x69,0x76,0x65,0x5f, + 0x77,0x69,0x64,0x65,0x5f,0x76,0x65,0x63,0x74,0x6f,0x72,0x73,0x5f,0x64,0x69,0x73, + 0x61,0x62,0x6c,0x65,0x43,0x04,0x65,0x62,0x14,0x96,0x26,0xe7,0x62,0x57,0x26,0x47, + 0x57,0x86,0xf7,0xf5,0x56,0x47,0x07,0x57,0x47,0xc7,0xa5,0x6e,0xae,0x4c,0x0e,0x85, + 0xed,0x6d,0xcc,0x0d,0x26,0x85,0x51,0x58,0x9a,0x9c,0x4b,0x98,0xdc,0xd9,0x17,0x5d, + 0x1e,0x5c,0xd9,0x97,0x5b,0x58,0x5b,0x19,0x0d,0x33,0xb6,0xb7,0x30,0x3a,0x1a,0x32, + 0x61,0x69,0x72,0x2e,0x61,0x72,0x67,0x5f,0x6e,0x61,0x6d,0x65,0x14,0xea,0xec,0x86, + 0x30,0x4a,0xa5,0x58,0xca,0xa5,0x60,0x4a,0xa6,0x68,0x5c,0xea,0xe6,0xca,0xe4,0x50, + 0xd8,0xde,0xc6,0xdc,0x62,0x52,0x58,0x8c,0xbd,0xb1,0xbd,0xc9,0x0d,0x61,0x94,0x4a, + 0xe1,0x94,0x4b,0xc1,0x94,0x4c,0xe9,0xc8,0x84,0xa5,0xc9,0xb9,0xc0,0xbd,0xcd,0xa5, + 0xd1,0xa5,0xbd,0xb9,0x71,0x39,0x63,0xfb,0x82,0x7a,0x9b,0x4b,0xa3,0x4b,0x7b,0x73, + 0x1b,0xa2,0x28,0x9f,0x72,0x29,0x98,0x92,0x29,0x60,0x30,0xc4,0x50,0x36,0xc5,0x53, + 0xc2,0x80,0x50,0x58,0x9a,0x9c,0x8b,0x5d,0x99,0x1c,0x5d,0x19,0xde,0x57,0x9a,0x1b, + 0x5c,0x1d,0x1d,0xa5,0xb0,0x34,0x39,0x17,0xb6,0xb7,0xb1,0x30,0xba,0xb4,0x37,0xb7, + 0xaf,0x34,0x37,0xb2,0x32,0x3c,0x66,0x67,0x65,0x6e,0x65,0x72,0x61,0x74,0x65,0x64, + 0x28,0x38,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x44,0x76,0x34,0x5f,0x66,0x29, + 0x44,0xe0,0xde,0xe6,0xd2,0xe8,0xd2,0xde,0xdc,0x86,0x50,0x8b,0xa0,0x8c,0x81,0x42, + 0x06,0x8b,0xb0,0x04,0x4a,0x19,0x28,0x97,0x82,0x29,0x99,0x62,0x06,0xd4,0xce,0xca, + 0xdc,0xca,0xe4,0xc2,0xe8,0xca,0xc8,0x50,0x72,0xe8,0xca,0xf0,0xc6,0xde,0xde,0xe4, + 0xc8,0x60,0x88,0xec,0x64,0xbe,0xcc,0x52,0x68,0x98,0xb1,0xbd,0x85,0xd1,0xc9,0x30, + 0xa1,0x2b,0xc3,0x1b,0x7b,0x7b,0x93,0x23,0x83,0x19,0x42,0x2d,0x81,0x32,0x06,0x0a, + 0x19,0x2c,0xc1,0x12,0x28,0x68,0xa0,0x5c,0x4a,0x1a,0x28,0x99,0xa2,0x06,0xbc,0xce, + 0xca,0xdc,0xca,0xe4,0xc2,0xe8,0xca,0xc8,0x50,0x6c,0xc6,0xde,0xd8,0xde,0xe4,0x60, + 0x88,0xec,0x68,0xbe,0xcc,0x52,0x68,0x8c,0xbd,0xb1,0xbd,0xc9,0xc1,0x0c,0xa1,0x96, + 0x41,0x19,0x03,0x85,0x0c,0x96,0x61,0x09,0x14,0x36,0x50,0x2e,0x05,0x53,0x32,0xa5, + 0x0d,0xa8,0x84,0xa5,0xc9,0xb9,0x88,0xd5,0x99,0x99,0x95,0xc9,0xf1,0x09,0x4b,0x93, + 0x73,0x11,0xab,0x33,0x33,0x2b,0x93,0xfb,0x9a,0x4b,0xd3,0x2b,0x23,0x12,0x96,0x26, + 0xe7,0x22,0x57,0x16,0x46,0x46,0x2a,0x2c,0x4d,0xce,0x65,0x8e,0x4e,0xae,0x6e,0x8c, + 0xee,0x8b,0x2e,0x0f,0xae,0xec,0x2b,0xcd,0xcd,0xec,0x8d,0x88,0x19,0xdb,0x5b,0x18, + 0x1d,0x0d,0x1e,0x0d,0x87,0x36,0x3b,0x38,0x0a,0x74,0x6d,0x43,0xa8,0x45,0x58,0x88, + 0x45,0x50,0xe6,0x40,0xa1,0x83,0x85,0x58,0x88,0x45,0x50,0xe6,0x40,0xa9,0x03,0x46, + 0x61,0x69,0x72,0x2e,0x61,0x72,0x67,0x5f,0x74,0x79,0x70,0x65,0x5f,0x73,0x69,0x7a, + 0x65,0xbc,0xc2,0xd2,0xe4,0x5c,0xc2,0xe4,0xce,0xbe,0xe8,0xf2,0xe0,0xca,0xbe,0xc2, + 0xd8,0xd2,0xce,0xdc,0xbe,0xe6,0xd2,0xf4,0xca,0x98,0xd8,0xcd,0x7d,0xc1,0x85,0xc9, + 0x85,0xb5,0xcd,0x71,0xf8,0x92,0x81,0x19,0x42,0x06,0x0b,0xa2,0xbc,0x81,0x02,0x07, + 0x4b,0xa1,0x90,0xc1,0x22,0x2c,0x81,0x12,0x07,0x8a,0x1c,0x28,0x76,0xa0,0xdc,0xc1, + 0x52,0x28,0x78,0xb0,0x24,0xca,0xa5,0xe4,0x81,0x92,0x29,0x7a,0x30,0x04,0x51,0xce, + 0x40,0x59,0x03,0xc5,0x0d,0x94,0x3d,0x18,0x62,0x24,0x80,0x22,0x06,0x0a,0x1f,0xf0, + 0x79,0x6b,0x73,0x4b,0x83,0x7b,0xa3,0x2b,0x73,0xa3,0x03,0x19,0x43,0x0b,0x93,0xe3, + 0x33,0x95,0xd6,0x06,0xc7,0x56,0x06,0x32,0xb4,0xb2,0x02,0x42,0x25,0x14,0x14,0x34, + 0x44,0x50,0xfe,0x60,0x88,0xa1,0xf8,0x81,0x02,0x0a,0x8d,0x32,0xc4,0x50,0x42,0x41, + 0x09,0x85,0x46,0x19,0x11,0xb1,0x03,0x3b,0xd8,0x43,0x3b,0xb8,0x41,0x3b,0xbc,0x03, + 0x39,0xd4,0x03,0x3b,0x94,0x83,0x1b,0x98,0x03,0x3b,0x84,0xc3,0x39,0xcc,0xc3,0x14, + 0x21,0x18,0x46,0x28,0xec,0xc0,0x0e,0xf6,0xd0,0x0e,0x6e,0x90,0x0e,0xe4,0x50,0x0e, + 0xee,0x40,0x0f,0x53,0x82,0x62,0xc4,0x12,0x0e,0xe9,0x20,0x0f,0x6e,0x60,0x0f,0xe5, + 0x20,0x0f,0xf3,0x90,0x0e,0xef,0xe0,0x0e,0x53,0x02,0x63,0x04,0x15,0x0e,0xe9,0x20, + 0x0f,0x6e,0xc0,0x0e,0xe1,0xe0,0x0e,0xe7,0x50,0x0f,0xe1,0x70,0x0e,0xe5,0xf0,0x0b, + 0xf6,0x50,0x0e,0xf2,0x30,0x0f,0xe9,0xf0,0x0e,0xee,0x30,0x25,0x40,0x46,0x4c,0xe1, + 0x90,0x0e,0xf2,0xe0,0x06,0xe3,0xf0,0x0e,0xed,0x00,0x0f,0xe9,0xc0,0x0e,0xe5,0xf0, + 0x0b,0xef,0x00,0x0f,0xf4,0x90,0x0e,0xef,0xe0,0x0e,0xf3,0x30,0xc5,0x50,0x18,0x07, + 0x92,0xa8,0x11,0x4a,0x38,0xa4,0x83,0x3c,0xb8,0x81,0x3d,0x94,0x83,0x3c,0xd0,0x43, + 0x39,0xe0,0xc3,0x94,0xa0,0x0f,0x00,0x00,0x79,0x18,0x00,0x00,0x6d,0x00,0x00,0x00, + 0x33,0x08,0x80,0x1c,0xc4,0xe1,0x1c,0x66,0x14,0x01,0x3d,0x88,0x43,0x38,0x84,0xc3, + 0x8c,0x42,0x80,0x07,0x79,0x78,0x07,0x73,0x98,0x71,0x0c,0xe6,0x00,0x0f,0xed,0x10, + 0x0e,0xf4,0x80,0x0e,0x33,0x0c,0x42,0x1e,0xc2,0xc1,0x1d,0xce,0xa1,0x1c,0x66,0x30, + 0x05,0x3d,0x88,0x43,0x38,0x84,0x83,0x1b,0xcc,0x03,0x3d,0xc8,0x43,0x3d,0x8c,0x03, + 0x3d,0xcc,0x78,0x8c,0x74,0x70,0x07,0x7b,0x08,0x07,0x79,0x48,0x87,0x70,0x70,0x07, + 0x7a,0x70,0x03,0x76,0x78,0x87,0x70,0x20,0x87,0x19,0xcc,0x11,0x0e,0xec,0x90,0x0e, + 0xe1,0x30,0x0f,0x6e,0x30,0x0f,0xe3,0xf0,0x0e,0xf0,0x50,0x0e,0x33,0x10,0xc4,0x1d, + 0xde,0x21,0x1c,0xd8,0x21,0x1d,0xc2,0x61,0x1e,0x66,0x30,0x89,0x3b,0xbc,0x83,0x3b, + 0xd0,0x43,0x39,0xb4,0x03,0x3c,0xbc,0x83,0x3c,0x84,0x03,0x3b,0xcc,0xf0,0x14,0x76, + 0x60,0x07,0x7b,0x68,0x07,0x37,0x68,0x87,0x72,0x68,0x07,0x37,0x80,0x87,0x70,0x90, + 0x87,0x70,0x60,0x07,0x76,0x28,0x07,0x76,0xf8,0x05,0x76,0x78,0x87,0x77,0x80,0x87, + 0x5f,0x08,0x87,0x71,0x18,0x87,0x72,0x98,0x87,0x79,0x98,0x81,0x2c,0xee,0xf0,0x0e, + 0xee,0xe0,0x0e,0xf5,0xc0,0x0e,0xec,0x30,0x03,0x62,0xc8,0xa1,0x1c,0xe4,0xa1,0x1c, + 0xcc,0xa1,0x1c,0xe4,0xa1,0x1c,0xdc,0x61,0x1c,0xca,0x21,0x1c,0xc4,0x81,0x1d,0xca, + 0x61,0x06,0xd6,0x90,0x43,0x39,0xc8,0x43,0x39,0x98,0x43,0x39,0xc8,0x43,0x39,0xb8, + 0xc3,0x38,0x94,0x43,0x38,0x88,0x03,0x3b,0x94,0xc3,0x2f,0xbc,0x83,0x3c,0xfc,0x82, + 0x3b,0xd4,0x03,0x3b,0xb0,0xc3,0x0c,0xc7,0x69,0x87,0x70,0x58,0x87,0x72,0x70,0x83, + 0x74,0x68,0x07,0x78,0x60,0x87,0x74,0x18,0x87,0x74,0xa0,0x87,0x19,0xce,0x53,0x0f, + 0xee,0x00,0x0f,0xf2,0x50,0x0e,0xe4,0x90,0x0e,0xe3,0x40,0x0f,0xe1,0x20,0x0e,0xec, + 0x50,0x0e,0x33,0x20,0x28,0x1d,0xdc,0xc1,0x1e,0xc2,0x41,0x1e,0xd2,0x21,0x1c,0xdc, + 0x81,0x1e,0xdc,0xe0,0x1c,0xe4,0xe1,0x1d,0xea,0x01,0x1e,0x66,0x18,0x51,0x38,0xb0, + 0x43,0x3a,0x9c,0x83,0x3b,0xcc,0x50,0x24,0x76,0x60,0x07,0x7b,0x68,0x07,0x37,0x60, + 0x87,0x77,0x78,0x07,0x78,0x98,0x51,0x4c,0xf4,0x90,0x0f,0xf0,0x50,0x0e,0x33,0x1e, + 0x6a,0x1e,0xca,0x61,0x1c,0xe8,0x21,0x1d,0xde,0xc1,0x1d,0x7e,0x01,0x1e,0xe4,0xa1, + 0x1c,0xcc,0x21,0x1d,0xf0,0x61,0x06,0x54,0x85,0x83,0x38,0xcc,0xc3,0x3b,0xb0,0x43, + 0x3d,0xd0,0x43,0x39,0xfc,0xc2,0x3c,0xe4,0x43,0x3b,0x88,0xc3,0x3b,0xb0,0xc3,0x8c, + 0xc5,0x0a,0x87,0x79,0x98,0x87,0x77,0x18,0x87,0x74,0x08,0x07,0x7a,0x28,0x07,0x72, + 0x00,0x00,0x00,0x00,0x71,0x20,0x00,0x00,0x02,0x00,0x00,0x00,0x06,0x50,0x30,0x00, + 0xd2,0xd0,0x00,0x00,0x61,0x20,0x00,0x00,0x3d,0x00,0x00,0x00,0x13,0x04,0x41,0x2c, + 0x10,0x00,0x00,0x00,0x09,0x00,0x00,0x00,0xf4,0xc6,0x22,0x86,0x61,0x18,0xc6,0x22, + 0x04,0x41,0x10,0xc6,0x22,0x82,0x20,0x08,0xa8,0x95,0x40,0x19,0x14,0x01,0xbd,0x11, + 0x00,0x1a,0x33,0x00,0x24,0x66,0x00,0x28,0xcc,0x00,0x00,0x00,0xe3,0x15,0x0b,0x84, + 0x61,0x10,0x05,0x65,0x90,0x01,0x1a,0x0c,0x13,0x02,0xf9,0x8c,0x57,0x3c,0x14,0xc7, + 0x2d,0x14,0x94,0x41,0x06,0xea,0x70,0x4c,0x08,0xe4,0x63,0x41,0x01,0x9f,0xf1,0x0a, + 0x2a,0x0b,0x83,0x30,0x70,0x28,0x28,0x83,0x0c,0x19,0x43,0x99,0x10,0xc8,0xc7,0x8a, + 0x00,0x3e,0xe3,0x15,0x99,0x67,0x06,0x66,0x40,0x51,0x50,0x06,0x19,0xbc,0x48,0x33, + 0x21,0x90,0x8f,0x15,0x01,0x7c,0xc6,0x2b,0xbc,0x31,0x60,0x83,0x35,0x18,0x03,0x0a, + 0xca,0x20,0x83,0x18,0x60,0x99,0x09,0x81,0x7c,0xc6,0x2b,0xc4,0xe0,0x0c,0xe0,0xe0, + 0x0d,0x3c,0x0a,0xca,0x20,0x83,0x19,0x70,0x61,0x60,0x42,0x20,0x1f,0x0b,0x0a,0xf8, + 0x8c,0x57,0x9c,0x01,0x1b,0xd4,0x01,0x1d,0x88,0x01,0x05,0xc5,0x86,0x00,0x3e,0xb3, + 0x0d,0x61,0x10,0x00,0xb3,0x0d,0x01,0x1b,0x04,0xb3,0x0d,0xc1,0x23,0x64,0x10,0x10, + 0x03,0x00,0x00,0x00,0x09,0x00,0x00,0x00,0x5b,0x86,0x20,0x10,0x85,0x2d,0x43,0x11, + 0x88,0xc2,0x96,0x41,0x09,0x44,0x61,0xcb,0xf0,0x04,0xa2,0xb0,0x65,0xa0,0x02,0x51, + 0xd8,0x32,0x60,0x81,0x28,0x6c,0x19,0xba,0x40,0x14,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +}; +static const uint8_t _sgl_fs_bytecode_metal_macos[2909] = { + 0x4d,0x54,0x4c,0x42,0x01,0x80,0x02,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x5d,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x6d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcd,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd5,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xdd,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x80,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x6d,0x00,0x00,0x00, + 0x4e,0x41,0x4d,0x45,0x06,0x00,0x6d,0x61,0x69,0x6e,0x30,0x00,0x54,0x59,0x50,0x45, + 0x01,0x00,0x01,0x48,0x41,0x53,0x48,0x20,0x00,0xc3,0xf2,0x7e,0x47,0xcc,0xf2,0x87, + 0xc2,0x27,0x51,0x05,0xdb,0xac,0xc5,0x2d,0x03,0xfc,0xe6,0x91,0x1c,0x38,0xc9,0xd8, + 0x34,0x4c,0xa7,0xd1,0x68,0xc2,0x62,0x41,0xf6,0x4f,0x46,0x46,0x54,0x18,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x56,0x45,0x52,0x53,0x08,0x00,0x01,0x00,0x08, + 0x00,0x01,0x00,0x01,0x00,0x45,0x4e,0x44,0x54,0x45,0x4e,0x44,0x54,0x04,0x00,0x00, + 0x00,0x45,0x4e,0x44,0x54,0x04,0x00,0x00,0x00,0x45,0x4e,0x44,0x54,0xde,0xc0,0x17, + 0x0b,0x00,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x64,0x0a,0x00,0x00,0xff,0xff,0xff, + 0xff,0x42,0x43,0xc0,0xde,0x21,0x0c,0x00,0x00,0x96,0x02,0x00,0x00,0x0b,0x82,0x20, + 0x00,0x02,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x07,0x81,0x23,0x91,0x41,0xc8,0x04, + 0x49,0x06,0x10,0x32,0x39,0x92,0x01,0x84,0x0c,0x25,0x05,0x08,0x19,0x1e,0x04,0x8b, + 0x62,0x80,0x14,0x45,0x02,0x42,0x92,0x0b,0x42,0xa4,0x10,0x32,0x14,0x38,0x08,0x18, + 0x49,0x0a,0x32,0x44,0x24,0x48,0x0a,0x90,0x21,0x23,0xc4,0x52,0x80,0x0c,0x19,0x21, + 0x72,0x24,0x07,0xc8,0x48,0x11,0x62,0xa8,0xa0,0xa8,0x40,0xc6,0xf0,0x01,0x00,0x00, + 0x00,0x51,0x18,0x00,0x00,0x89,0x00,0x00,0x00,0x1b,0xcc,0x25,0xf8,0xff,0xff,0xff, + 0xff,0x01,0x60,0x00,0x09,0xa8,0x88,0x71,0x78,0x07,0x79,0x90,0x87,0x72,0x18,0x07, + 0x7a,0x60,0x87,0x7c,0x68,0x03,0x79,0x78,0x87,0x7a,0x70,0x07,0x72,0x28,0x07,0x72, + 0x68,0x03,0x72,0x48,0x07,0x7b,0x48,0x07,0x72,0x28,0x87,0x36,0x98,0x87,0x78,0x90, + 0x07,0x7a,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xcc, + 0x21,0x1c,0xd8,0x61,0x1e,0xca,0x01,0x20,0xc8,0x21,0x1d,0xe6,0x21,0x1c,0xc4,0x81, + 0x1d,0xca,0xa1,0x0d,0xe8,0x21,0x1c,0xd2,0x81,0x1d,0xda,0x60,0x1c,0xc2,0x81,0x1d, + 0xd8,0x61,0x1e,0x00,0x73,0x08,0x07,0x76,0x98,0x87,0x72,0x00,0x08,0x76,0x28,0x87, + 0x79,0x98,0x87,0x36,0x80,0x07,0x79,0x28,0x87,0x71,0x48,0x87,0x79,0x28,0x87,0x36, + 0x30,0x07,0x78,0x68,0x87,0x70,0x20,0x07,0xc0,0x1c,0xc2,0x81,0x1d,0xe6,0xa1,0x1c, + 0x00,0xc2,0x1d,0xde,0xa1,0x0d,0xcc,0x41,0x1e,0xc2,0xa1,0x1d,0xca,0xa1,0x0d,0xe0, + 0xe1,0x1d,0xd2,0xc1,0x1d,0xe8,0xa1,0x1c,0xe4,0xa1,0x0d,0xca,0x81,0x1d,0xd2,0xa1, + 0x1d,0x00,0x7a,0x90,0x87,0x7a,0x28,0x07,0x60,0x70,0x87,0x77,0x68,0x03,0x73,0x90, + 0x87,0x70,0x68,0x87,0x72,0x68,0x03,0x78,0x78,0x87,0x74,0x70,0x07,0x7a,0x28,0x07, + 0x79,0x68,0x83,0x72,0x60,0x87,0x74,0x68,0x87,0x36,0x70,0x87,0x77,0x70,0x87,0x36, + 0x60,0x87,0x72,0x08,0x07,0x73,0x00,0x08,0x77,0x78,0x87,0x36,0x48,0x07,0x77,0x30, + 0x87,0x79,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xe8, + 0x41,0x1e,0xea,0xa1,0x1c,0x00,0xc2,0x1d,0xde,0xa1,0x0d,0xd4,0xa1,0x1e,0xda,0x01, + 0x1e,0xda,0x80,0x1e,0xc2,0x41,0x1c,0xd8,0xa1,0x1c,0xe6,0x01,0x30,0x87,0x70,0x60, + 0x87,0x79,0x28,0x07,0x80,0x70,0x87,0x77,0x68,0x03,0x77,0x08,0x07,0x77,0x98,0x87, + 0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a,0x40,0x07,0x80,0x1e,0xe4,0xa1, + 0x1e,0xca,0x01,0x20,0xdc,0xe1,0x1d,0xda,0x60,0x1e,0xd2,0xe1,0x1c,0xdc,0xa1,0x1c, + 0xc8,0xa1,0x0d,0xf4,0xa1,0x1c,0xe4,0xe1,0x1d,0xe6,0xa1,0x0d,0xcc,0x01,0x1e,0xda, + 0xa0,0x1d,0xc2,0x81,0x1e,0xd0,0x01,0xa0,0x07,0x79,0xa8,0x87,0x72,0x00,0x08,0x77, + 0x78,0x87,0x36,0xa0,0x07,0x79,0x08,0x07,0x78,0x80,0x87,0x74,0x70,0x87,0x73,0x68, + 0x83,0x76,0x08,0x07,0x7a,0x40,0x07,0x80,0x1e,0xe4,0xa1,0x1e,0xca,0x01,0x20,0xe6, + 0x81,0x1e,0xc2,0x61,0x1c,0xd6,0xa1,0x0d,0xe0,0x41,0x1e,0xde,0x81,0x1e,0xca,0x61, + 0x1c,0xe8,0xe1,0x1d,0xe4,0xa1,0x0d,0xc4,0xa1,0x1e,0xcc,0xc1,0x1c,0xca,0x41,0x1e, + 0xda,0x60,0x1e,0xd2,0x41,0x1f,0xca,0x01,0xc0,0x03,0x80,0xa8,0x07,0x77,0x98,0x87, + 0x70,0x30,0x87,0x72,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74, + 0x00,0xe8,0x41,0x1e,0xea,0xa1,0x1c,0x00,0xa2,0x1e,0xe6,0xa1,0x1c,0xda,0x60,0x1e, + 0xde,0xc1,0x1c,0xe8,0xa1,0x0d,0xcc,0x81,0x1d,0xde,0x21,0x1c,0xe8,0x01,0x30,0x87, + 0x70,0x60,0x87,0x79,0x28,0x07,0x60,0x83,0x21,0x0c,0xc0,0x02,0x54,0x1b,0x8c,0x81, + 0x00,0x16,0xa0,0xda,0x80,0x10,0xff,0xff,0xff,0xff,0x3f,0x00,0x0c,0x20,0x01,0xd5, + 0x06,0xa3,0x08,0x80,0x05,0xa8,0x36,0x18,0x86,0x00,0x2c,0x40,0x05,0x49,0x18,0x00, + 0x00,0x03,0x00,0x00,0x00,0x13,0x86,0x40,0x18,0x26,0x0c,0x44,0x61,0x00,0x00,0x00, + 0x00,0x89,0x20,0x00,0x00,0x20,0x00,0x00,0x00,0x32,0x22,0x48,0x09,0x20,0x64,0x85, + 0x04,0x93,0x22,0xa4,0x84,0x04,0x93,0x22,0xe3,0x84,0xa1,0x90,0x14,0x12,0x4c,0x8a, + 0x8c,0x0b,0x84,0xa4,0x4c,0x10,0x48,0x33,0x00,0xc3,0x08,0x04,0x70,0x90,0x34,0x45, + 0x94,0x30,0xf9,0x0c,0x80,0x34,0xf4,0xef,0x50,0x13,0x0a,0xc2,0x51,0xd2,0x14,0x51, + 0xc2,0xe4,0xff,0x13,0x71,0x4d,0x54,0x44,0xfc,0xf6,0xf0,0x4f,0x63,0x04,0xc0,0x20, + 0xc2,0x10,0x5c,0x24,0x4d,0x11,0x25,0x4c,0xfe,0x2f,0x01,0xcc,0xb3,0x10,0xd1,0x3f, + 0x8d,0x11,0x00,0x83,0x08,0x85,0x50,0x0a,0x11,0x02,0x31,0x74,0x86,0x11,0x04,0x60, + 0x8e,0x20,0x98,0x23,0x00,0x83,0x61,0x04,0x61,0x29,0x48,0x20,0x26,0x29,0xa6,0x00, + 0xb5,0x81,0x80,0x1c,0x58,0x23,0x00,0x00,0x00,0x13,0xb2,0x70,0x48,0x07,0x79,0xb0, + 0x03,0x3a,0x68,0x83,0x70,0x80,0x07,0x78,0x60,0x87,0x72,0x68,0x83,0x76,0x08,0x87, + 0x71,0x78,0x87,0x79,0xc0,0x87,0x38,0x80,0x03,0x37,0x88,0x83,0x38,0x70,0x03,0x38, + 0xd8,0x70,0x1b,0xe5,0xd0,0x06,0xf0,0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74, + 0xa0,0x07,0x76,0x40,0x07,0x6d,0x90,0x0e,0x71,0xa0,0x07,0x78,0xa0,0x07,0x78,0xd0, + 0x06,0xe9,0x80,0x07,0x7a,0x80,0x07,0x7a,0x80,0x07,0x6d,0x90,0x0e,0x71,0x60,0x07, + 0x7a,0x10,0x07,0x76,0xa0,0x07,0x71,0x60,0x07,0x6d,0x90,0x0e,0x73,0x20,0x07,0x7a, + 0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x6d,0x90,0x0e,0x76,0x40,0x07,0x7a,0x60, + 0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x6d,0x60,0x0e,0x73,0x20,0x07,0x7a,0x30,0x07, + 0x72,0xa0,0x07,0x73,0x20,0x07,0x6d,0x60,0x0e,0x76,0x40,0x07,0x7a,0x60,0x07,0x74, + 0xa0,0x07,0x76,0x40,0x07,0x6d,0x60,0x0f,0x71,0x60,0x07,0x7a,0x10,0x07,0x76,0xa0, + 0x07,0x71,0x60,0x07,0x6d,0x60,0x0f,0x72,0x40,0x07,0x7a,0x30,0x07,0x72,0xa0,0x07, + 0x73,0x20,0x07,0x6d,0x60,0x0f,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xa0,0x07,0x73, + 0x20,0x07,0x6d,0x60,0x0f,0x74,0x80,0x07,0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40, + 0x07,0x6d,0x60,0x0f,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07, + 0x6d,0x60,0x0f,0x79,0x60,0x07,0x7a,0x10,0x07,0x72,0x80,0x07,0x7a,0x10,0x07,0x72, + 0x80,0x07,0x6d,0x60,0x0f,0x71,0x20,0x07,0x78,0xa0,0x07,0x71,0x20,0x07,0x78,0xa0, + 0x07,0x71,0x20,0x07,0x78,0xd0,0x06,0xf6,0x10,0x07,0x79,0x20,0x07,0x7a,0x20,0x07, + 0x75,0x60,0x07,0x7a,0x20,0x07,0x75,0x60,0x07,0x6d,0x60,0x0f,0x72,0x50,0x07,0x76, + 0xa0,0x07,0x72,0x50,0x07,0x76,0xa0,0x07,0x72,0x50,0x07,0x76,0xd0,0x06,0xf6,0x50, + 0x07,0x71,0x20,0x07,0x7a,0x50,0x07,0x71,0x20,0x07,0x7a,0x50,0x07,0x71,0x20,0x07, + 0x6d,0x60,0x0f,0x71,0x00,0x07,0x72,0x40,0x07,0x7a,0x10,0x07,0x70,0x20,0x07,0x74, + 0xa0,0x07,0x71,0x00,0x07,0x72,0x40,0x07,0x6d,0xe0,0x0e,0x78,0xa0,0x07,0x71,0x60, + 0x07,0x7a,0x30,0x07,0x72,0x30,0x84,0x41,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x00, + 0x18,0xc2,0x38,0x40,0x00,0x08,0x00,0x00,0x00,0x00,0x00,0x64,0x81,0x00,0x00,0x00, + 0x00,0x08,0x00,0x00,0x00,0x32,0x1e,0x98,0x10,0x19,0x11,0x4c,0x90,0x8c,0x09,0x26, + 0x47,0xc6,0x04,0x43,0x5a,0x25,0x30,0x02,0x50,0x04,0x85,0x50,0x10,0x65,0x40,0x70, + 0xac,0x41,0x79,0x08,0x00,0x79,0x18,0x00,0x00,0xd7,0x00,0x00,0x00,0x1a,0x03,0x4c, + 0x10,0x97,0x29,0xa2,0x25,0x10,0xab,0x32,0xb9,0xb9,0xb4,0x37,0xb7,0x21,0xc6,0x42, + 0x3c,0x00,0x84,0x50,0xb9,0x1b,0x43,0x0b,0x93,0xfb,0x9a,0x4b,0xd3,0x2b,0x1b,0x62, + 0x2c,0xc2,0x23,0x2c,0x05,0xd9,0x20,0x08,0x0e,0x8e,0xad,0x0c,0x84,0x89,0xc9,0xaa, + 0x09,0xc4,0xae,0x4c,0x6e,0x2e,0xed,0xcd,0x0d,0x24,0x07,0x46,0xc6,0x25,0x86,0x06, + 0x04,0xa5,0xad,0x8c,0x2e,0x8c,0xcd,0xac,0xac,0x25,0x07,0x46,0xc6,0x25,0x86,0xc6, + 0x25,0x27,0x65,0x88,0xf0,0x10,0x43,0x8c,0x45,0x58,0x8c,0x65,0x60,0xd1,0x54,0x46, + 0x17,0xc6,0x36,0x04,0x79,0x8e,0x45,0x58,0x84,0x65,0xe0,0x16,0x96,0x26,0xe7,0x32, + 0xf6,0xd6,0x06,0x97,0xc6,0x56,0xe6,0x42,0x56,0xe6,0xf6,0x26,0xd7,0x36,0xf7,0x45, + 0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44,0x78,0x12,0x72,0x61,0x69,0x72,0x2e,0x63, + 0x6f,0x6d,0x70,0x69,0x6c,0x65,0x2e,0x66,0x61,0x73,0x74,0x5f,0x6d,0x61,0x74,0x68, + 0x5f,0x65,0x6e,0x61,0x62,0x6c,0x65,0x43,0x84,0x67,0x61,0x19,0x84,0xa5,0xc9,0xb9, + 0x8c,0xbd,0xb5,0xc1,0xa5,0xb1,0x95,0xb9,0x98,0xc9,0x85,0xb5,0x95,0x89,0xd5,0x99, + 0x99,0x95,0xc9,0x7d,0x99,0x95,0xd1,0x8d,0xa1,0x7d,0x91,0xa5,0xcd,0x85,0x89,0xb1, + 0x95,0x0d,0x11,0x9e,0x86,0x61,0x10,0x96,0x26,0xe7,0x32,0xf6,0xd6,0x06,0x97,0xc6, + 0x56,0xe6,0xe2,0x16,0x46,0x97,0x66,0x57,0xf6,0x45,0xf6,0x56,0x27,0xc6,0x56,0xf6, + 0x45,0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44,0x78,0x1e,0x92,0x41,0x58,0x9a,0x9c, + 0xcb,0xd8,0x5b,0x1b,0x5c,0x1a,0x5b,0x99,0x8b,0x5b,0x18,0x5d,0x9a,0x5d,0xd9,0x17, + 0xdb,0x9b,0xdb,0xd9,0x17,0xdb,0x9b,0xdb,0xd9,0x17,0x59,0xda,0x5c,0x98,0x18,0x5b, + 0xd9,0x10,0xe1,0x89,0x78,0x06,0x61,0x69,0x72,0x2e,0x63,0x6f,0x6d,0x70,0x69,0x6c, + 0x65,0x2e,0x6e,0x61,0x74,0x69,0x76,0x65,0x5f,0x77,0x69,0x64,0x65,0x5f,0x76,0x65, + 0x63,0x74,0x6f,0x72,0x73,0x5f,0x64,0x69,0x73,0x61,0x62,0x6c,0x65,0x43,0x84,0x67, + 0x62,0x14,0x96,0x26,0xe7,0x22,0x57,0xe6,0x46,0x56,0x26,0xf7,0x45,0x17,0x26,0x77, + 0x56,0x46,0xc7,0x28,0x2c,0x4d,0xce,0x25,0x4c,0xee,0xec,0x8b,0x2e,0x0f,0xae,0xec, + 0xcb,0x2d,0xac,0xad,0x8c,0x86,0x19,0xdb,0x5b,0x18,0x1d,0x0d,0x99,0xb0,0x34,0x39, + 0x97,0x30,0xb9,0xb3,0x2f,0xb7,0xb0,0xb6,0x32,0x2a,0x66,0x72,0x61,0x67,0x5f,0x63, + 0x6f,0x6c,0x6f,0x72,0x43,0x98,0xa7,0x5a,0x86,0xc7,0x7a,0xae,0x07,0x7b,0xb2,0x21, + 0xc2,0xa3,0x51,0x0a,0x4b,0x93,0x73,0x31,0x93,0x0b,0x3b,0x6b,0x2b,0x73,0xa3,0xfb, + 0x4a,0x73,0x83,0xab,0xa3,0xe3,0x52,0x37,0x57,0x26,0x87,0xc2,0xf6,0x36,0xe6,0x06, + 0x93,0x42,0x25,0x2c,0x4d,0xce,0x65,0xac,0xcc,0x8d,0xae,0x4c,0x8e,0x4f,0x58,0x9a, + 0x9c,0x0b,0x5c,0x99,0xdc,0x1c,0x5c,0xd9,0x18,0x5d,0x9a,0x5d,0x19,0x85,0x3a,0xbb, + 0x21,0xd2,0x32,0x3c,0xdc,0xd3,0x3d,0xde,0xf3,0x3d,0xd6,0x73,0x3d,0xd8,0x03,0x06, + 0x5c,0xea,0xe6,0xca,0xe4,0x50,0xd8,0xde,0xc6,0xdc,0x62,0x52,0x58,0x8c,0xbd,0xb1, + 0xbd,0xc9,0x0d,0x91,0x16,0xe1,0xe1,0x1e,0x31,0x78,0xbc,0xe7,0x7b,0xac,0xe7,0x7a, + 0xb0,0x67,0x0c,0xb8,0x84,0xa5,0xc9,0xb9,0xd0,0x95,0xe1,0xd1,0xd5,0xc9,0x95,0x51, + 0x0a,0x4b,0x93,0x73,0x61,0x7b,0x1b,0x0b,0xa3,0x4b,0x7b,0x73,0xfb,0x4a,0x73,0x23, + 0x2b,0xc3,0xa3,0x12,0x96,0x26,0xe7,0x32,0x17,0xd6,0x06,0xc7,0x56,0x46,0x8c,0xae, + 0x0c,0x8f,0xae,0x4e,0xae,0x4c,0x86,0x8c,0xc7,0x8c,0xed,0x2d,0x8c,0x8e,0x05,0x64, + 0x2e,0xac,0x0d,0x8e,0xad,0xcc,0x87,0x03,0x5d,0x19,0xde,0x10,0x6a,0x21,0x9e,0x32, + 0x78,0xcc,0x60,0x19,0x16,0xe1,0x39,0x83,0xc7,0x7a,0xd0,0xe0,0xc1,0x9e,0x34,0xe0, + 0x12,0x96,0x26,0xe7,0x32,0x17,0xd6,0x06,0xc7,0x56,0x26,0xc7,0x63,0x2e,0xac,0x0d, + 0x8e,0xad,0x4c,0x8e,0x08,0x5d,0x19,0xde,0x54,0x1b,0x1c,0x9b,0xdc,0x10,0x69,0x39, + 0x9e,0x35,0x78,0xcc,0x60,0x19,0x16,0xe1,0xb1,0x1e,0x36,0x78,0xb0,0xa7,0x0d,0x86, + 0x20,0x4f,0x18,0x3c,0x64,0xf0,0xa8,0xc1,0xe3,0x06,0x43,0x0c,0x04,0x78,0xb6,0xe7, + 0x0d,0x46,0x44,0xec,0xc0,0x0e,0xf6,0xd0,0x0e,0x6e,0xd0,0x0e,0xef,0x40,0x0e,0xf5, + 0xc0,0x0e,0xe5,0xe0,0x06,0xe6,0xc0,0x0e,0xe1,0x70,0x0e,0xf3,0x30,0x45,0x08,0x86, + 0x11,0x0a,0x3b,0xb0,0x83,0x3d,0xb4,0x83,0x1b,0xa4,0x03,0x39,0x94,0x83,0x3b,0xd0, + 0xc3,0x94,0xa0,0x18,0xb1,0x84,0x43,0x3a,0xc8,0x83,0x1b,0xd8,0x43,0x39,0xc8,0xc3, + 0x3c,0xa4,0xc3,0x3b,0xb8,0xc3,0x94,0xc0,0x18,0x41,0x85,0x43,0x3a,0xc8,0x83,0x1b, + 0xb0,0x43,0x38,0xb8,0xc3,0x39,0xd4,0x43,0x38,0x9c,0x43,0x39,0xfc,0x82,0x3d,0x94, + 0x83,0x3c,0xcc,0x43,0x3a,0xbc,0x83,0x3b,0x4c,0x09,0x90,0x11,0x53,0x38,0xa4,0x83, + 0x3c,0xb8,0xc1,0x38,0xbc,0x43,0x3b,0xc0,0x43,0x3a,0xb0,0x43,0x39,0xfc,0xc2,0x3b, + 0xc0,0x03,0x3d,0xa4,0xc3,0x3b,0xb8,0xc3,0x3c,0x4c,0x31,0x14,0xc6,0x81,0x24,0x6a, + 0x04,0x13,0x0e,0xe9,0x20,0x0f,0x6e,0x60,0x0e,0xf2,0x10,0x0e,0xe7,0xd0,0x0e,0xe5, + 0xe0,0x0e,0xf4,0x30,0x25,0x80,0x03,0x00,0x00,0x79,0x18,0x00,0x00,0x6d,0x00,0x00, + 0x00,0x33,0x08,0x80,0x1c,0xc4,0xe1,0x1c,0x66,0x14,0x01,0x3d,0x88,0x43,0x38,0x84, + 0xc3,0x8c,0x42,0x80,0x07,0x79,0x78,0x07,0x73,0x98,0x71,0x0c,0xe6,0x00,0x0f,0xed, + 0x10,0x0e,0xf4,0x80,0x0e,0x33,0x0c,0x42,0x1e,0xc2,0xc1,0x1d,0xce,0xa1,0x1c,0x66, + 0x30,0x05,0x3d,0x88,0x43,0x38,0x84,0x83,0x1b,0xcc,0x03,0x3d,0xc8,0x43,0x3d,0x8c, + 0x03,0x3d,0xcc,0x78,0x8c,0x74,0x70,0x07,0x7b,0x08,0x07,0x79,0x48,0x87,0x70,0x70, + 0x07,0x7a,0x70,0x03,0x76,0x78,0x87,0x70,0x20,0x87,0x19,0xcc,0x11,0x0e,0xec,0x90, + 0x0e,0xe1,0x30,0x0f,0x6e,0x30,0x0f,0xe3,0xf0,0x0e,0xf0,0x50,0x0e,0x33,0x10,0xc4, + 0x1d,0xde,0x21,0x1c,0xd8,0x21,0x1d,0xc2,0x61,0x1e,0x66,0x30,0x89,0x3b,0xbc,0x83, + 0x3b,0xd0,0x43,0x39,0xb4,0x03,0x3c,0xbc,0x83,0x3c,0x84,0x03,0x3b,0xcc,0xf0,0x14, + 0x76,0x60,0x07,0x7b,0x68,0x07,0x37,0x68,0x87,0x72,0x68,0x07,0x37,0x80,0x87,0x70, + 0x90,0x87,0x70,0x60,0x07,0x76,0x28,0x07,0x76,0xf8,0x05,0x76,0x78,0x87,0x77,0x80, + 0x87,0x5f,0x08,0x87,0x71,0x18,0x87,0x72,0x98,0x87,0x79,0x98,0x81,0x2c,0xee,0xf0, + 0x0e,0xee,0xe0,0x0e,0xf5,0xc0,0x0e,0xec,0x30,0x03,0x62,0xc8,0xa1,0x1c,0xe4,0xa1, + 0x1c,0xcc,0xa1,0x1c,0xe4,0xa1,0x1c,0xdc,0x61,0x1c,0xca,0x21,0x1c,0xc4,0x81,0x1d, + 0xca,0x61,0x06,0xd6,0x90,0x43,0x39,0xc8,0x43,0x39,0x98,0x43,0x39,0xc8,0x43,0x39, + 0xb8,0xc3,0x38,0x94,0x43,0x38,0x88,0x03,0x3b,0x94,0xc3,0x2f,0xbc,0x83,0x3c,0xfc, + 0x82,0x3b,0xd4,0x03,0x3b,0xb0,0xc3,0x0c,0xc7,0x69,0x87,0x70,0x58,0x87,0x72,0x70, + 0x83,0x74,0x68,0x07,0x78,0x60,0x87,0x74,0x18,0x87,0x74,0xa0,0x87,0x19,0xce,0x53, + 0x0f,0xee,0x00,0x0f,0xf2,0x50,0x0e,0xe4,0x90,0x0e,0xe3,0x40,0x0f,0xe1,0x20,0x0e, + 0xec,0x50,0x0e,0x33,0x20,0x28,0x1d,0xdc,0xc1,0x1e,0xc2,0x41,0x1e,0xd2,0x21,0x1c, + 0xdc,0x81,0x1e,0xdc,0xe0,0x1c,0xe4,0xe1,0x1d,0xea,0x01,0x1e,0x66,0x18,0x51,0x38, + 0xb0,0x43,0x3a,0x9c,0x83,0x3b,0xcc,0x50,0x24,0x76,0x60,0x07,0x7b,0x68,0x07,0x37, + 0x60,0x87,0x77,0x78,0x07,0x78,0x98,0x51,0x4c,0xf4,0x90,0x0f,0xf0,0x50,0x0e,0x33, + 0x1e,0x6a,0x1e,0xca,0x61,0x1c,0xe8,0x21,0x1d,0xde,0xc1,0x1d,0x7e,0x01,0x1e,0xe4, + 0xa1,0x1c,0xcc,0x21,0x1d,0xf0,0x61,0x06,0x54,0x85,0x83,0x38,0xcc,0xc3,0x3b,0xb0, + 0x43,0x3d,0xd0,0x43,0x39,0xfc,0xc2,0x3c,0xe4,0x43,0x3b,0x88,0xc3,0x3b,0xb0,0xc3, + 0x8c,0xc5,0x0a,0x87,0x79,0x98,0x87,0x77,0x18,0x87,0x74,0x08,0x07,0x7a,0x28,0x07, + 0x72,0x00,0x00,0x00,0x00,0x71,0x20,0x00,0x00,0x08,0x00,0x00,0x00,0x16,0xb0,0x01, + 0x48,0xe4,0x4b,0x00,0xf3,0x2c,0xc4,0x3f,0x11,0xd7,0x44,0x45,0xc4,0x6f,0x0f,0x7e, + 0x85,0x17,0xb7,0x6d,0x00,0x05,0x03,0x20,0x0d,0x0d,0x00,0x00,0x00,0x61,0x20,0x00, + 0x00,0x0f,0x00,0x00,0x00,0x13,0x04,0x41,0x2c,0x10,0x00,0x00,0x00,0x06,0x00,0x00, + 0x00,0xc4,0x46,0x00,0xc6,0x12,0x80,0x80,0xd4,0x08,0x40,0x0d,0x90,0x98,0x01,0xa0, + 0x30,0x03,0x40,0x60,0x04,0x00,0x00,0x00,0x00,0x83,0x0c,0x8b,0x60,0x8c,0x18,0x28, + 0x42,0x40,0x29,0x49,0x50,0x20,0x86,0x60,0x01,0x23,0x9f,0xd9,0x06,0x23,0x00,0x32, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +}; +static const uint8_t _sgl_vs_bytecode_metal_ios[3344] = { + 0x4d,0x54,0x4c,0x42,0x01,0x00,0x02,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x10,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x6d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcd,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x3b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x01,0x00,0x00,0x00,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x01,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x6d,0x00,0x00,0x00, + 0x4e,0x41,0x4d,0x45,0x06,0x00,0x6d,0x61,0x69,0x6e,0x30,0x00,0x54,0x59,0x50,0x45, + 0x01,0x00,0x00,0x48,0x41,0x53,0x48,0x20,0x00,0x58,0xff,0x12,0x36,0x80,0x88,0xb5, + 0xf7,0x1c,0xc3,0xd5,0x31,0x57,0x18,0x26,0x26,0xf0,0x8c,0xd4,0x15,0xa2,0x55,0x8b, + 0xd5,0x3c,0x6b,0x11,0xbd,0x17,0x49,0x4a,0x02,0x4f,0x46,0x46,0x54,0x18,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x56,0x45,0x52,0x53,0x08,0x00,0x01,0x00,0x08, + 0x00,0x01,0x00,0x00,0x00,0x45,0x4e,0x44,0x54,0x45,0x4e,0x44,0x54,0x37,0x00,0x00, + 0x00,0x56,0x41,0x54,0x54,0x22,0x00,0x03,0x00,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f, + 0x6e,0x00,0x00,0x80,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x00,0x01,0x80, + 0x63,0x6f,0x6c,0x6f,0x72,0x30,0x00,0x02,0x80,0x56,0x41,0x54,0x59,0x05,0x00,0x03, + 0x00,0x06,0x04,0x06,0x45,0x4e,0x44,0x54,0x04,0x00,0x00,0x00,0x45,0x4e,0x44,0x54, + 0xde,0xc0,0x17,0x0b,0x00,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0xec,0x0b,0x00,0x00, + 0xff,0xff,0xff,0xff,0x42,0x43,0xc0,0xde,0x21,0x0c,0x00,0x00,0xf8,0x02,0x00,0x00, + 0x0b,0x82,0x20,0x00,0x02,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x07,0x81,0x23,0x91, + 0x41,0xc8,0x04,0x49,0x06,0x10,0x32,0x39,0x92,0x01,0x84,0x0c,0x25,0x05,0x08,0x19, + 0x1e,0x04,0x8b,0x62,0x80,0x14,0x45,0x02,0x42,0x92,0x0b,0x42,0xa4,0x10,0x32,0x14, + 0x38,0x08,0x18,0x49,0x0a,0x32,0x44,0x24,0x48,0x0a,0x90,0x21,0x23,0xc4,0x52,0x80, + 0x0c,0x19,0x21,0x72,0x24,0x07,0xc8,0x48,0x11,0x62,0xa8,0xa0,0xa8,0x40,0xc6,0xf0, + 0x01,0x00,0x00,0x00,0x51,0x18,0x00,0x00,0x82,0x00,0x00,0x00,0x1b,0xc8,0x25,0xf8, + 0xff,0xff,0xff,0xff,0x01,0x90,0x80,0x8a,0x18,0x87,0x77,0x90,0x07,0x79,0x28,0x87, + 0x71,0xa0,0x07,0x76,0xc8,0x87,0x36,0x90,0x87,0x77,0xa8,0x07,0x77,0x20,0x87,0x72, + 0x20,0x87,0x36,0x20,0x87,0x74,0xb0,0x87,0x74,0x20,0x87,0x72,0x68,0x83,0x79,0x88, + 0x07,0x79,0xa0,0x87,0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a,0x40,0x07, + 0xc0,0x1c,0xc2,0x81,0x1d,0xe6,0xa1,0x1c,0x00,0x82,0x1c,0xd2,0x61,0x1e,0xc2,0x41, + 0x1c,0xd8,0xa1,0x1c,0xda,0x80,0x1e,0xc2,0x21,0x1d,0xd8,0xa1,0x0d,0xc6,0x21,0x1c, + 0xd8,0x81,0x1d,0xe6,0x01,0x30,0x87,0x70,0x60,0x87,0x79,0x28,0x07,0x80,0x60,0x87, + 0x72,0x98,0x87,0x79,0x68,0x03,0x78,0x90,0x87,0x72,0x18,0x87,0x74,0x98,0x87,0x72, + 0x68,0x03,0x73,0x80,0x87,0x76,0x08,0x07,0x72,0x00,0xcc,0x21,0x1c,0xd8,0x61,0x1e, + 0xca,0x01,0x20,0xdc,0xe1,0x1d,0xda,0xc0,0x1c,0xe4,0x21,0x1c,0xda,0xa1,0x1c,0xda, + 0x00,0x1e,0xde,0x21,0x1d,0xdc,0x81,0x1e,0xca,0x41,0x1e,0xda,0xa0,0x1c,0xd8,0x21, + 0x1d,0xda,0x01,0xa0,0x07,0x79,0xa8,0x87,0x72,0x00,0x06,0x77,0x78,0x87,0x36,0x30, + 0x07,0x79,0x08,0x87,0x76,0x28,0x87,0x36,0x80,0x87,0x77,0x48,0x07,0x77,0xa0,0x87, + 0x72,0x90,0x87,0x36,0x28,0x07,0x76,0x48,0x87,0x76,0x68,0x03,0x77,0x78,0x07,0x77, + 0x68,0x03,0x76,0x28,0x87,0x70,0x30,0x07,0x80,0x70,0x87,0x77,0x68,0x83,0x74,0x70, + 0x07,0x73,0x98,0x87,0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a,0x40,0x07, + 0x80,0x1e,0xe4,0xa1,0x1e,0xca,0x01,0x20,0xdc,0xe1,0x1d,0xda,0x40,0x1d,0xea,0xa1, + 0x1d,0xe0,0xa1,0x0d,0xe8,0x21,0x1c,0xc4,0x81,0x1d,0xca,0x61,0x1e,0x00,0x73,0x08, + 0x07,0x76,0x98,0x87,0x72,0x00,0x08,0x77,0x78,0x87,0x36,0x70,0x87,0x70,0x70,0x87, + 0x79,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xe8,0x41, + 0x1e,0xea,0xa1,0x1c,0x00,0xc2,0x1d,0xde,0xa1,0x0d,0xe6,0x21,0x1d,0xce,0xc1,0x1d, + 0xca,0x81,0x1c,0xda,0x40,0x1f,0xca,0x41,0x1e,0xde,0x61,0x1e,0xda,0xc0,0x1c,0xe0, + 0xa1,0x0d,0xda,0x21,0x1c,0xe8,0x01,0x1d,0x00,0x7a,0x90,0x87,0x7a,0x28,0x07,0x80, + 0x70,0x87,0x77,0x68,0x03,0x7a,0x90,0x87,0x70,0x80,0x07,0x78,0x48,0x07,0x77,0x38, + 0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xe8,0x41,0x1e,0xea,0xa1,0x1c,0x00, + 0x62,0x1e,0xe8,0x21,0x1c,0xc6,0x61,0x1d,0xda,0x00,0x1e,0xe4,0xe1,0x1d,0xe8,0xa1, + 0x1c,0xc6,0x81,0x1e,0xde,0x41,0x1e,0xda,0x40,0x1c,0xea,0xc1,0x1c,0xcc,0xa1,0x1c, + 0xe4,0xa1,0x0d,0xe6,0x21,0x1d,0xf4,0xa1,0x1c,0x00,0x3c,0x00,0x88,0x7a,0x70,0x87, + 0x79,0x08,0x07,0x73,0x28,0x87,0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a, + 0x40,0x07,0x80,0x1e,0xe4,0xa1,0x1e,0xca,0x01,0x20,0xea,0x61,0x1e,0xca,0xa1,0x0d, + 0xe6,0xe1,0x1d,0xcc,0x81,0x1e,0xda,0xc0,0x1c,0xd8,0xe1,0x1d,0xc2,0x81,0x1e,0x00, + 0x73,0x08,0x07,0x76,0x98,0x87,0x72,0x00,0x36,0x20,0x02,0x01,0x24,0xc0,0x02,0x54, + 0x00,0x00,0x00,0x00,0x49,0x18,0x00,0x00,0x01,0x00,0x00,0x00,0x13,0x84,0x40,0x00, + 0x89,0x20,0x00,0x00,0x23,0x00,0x00,0x00,0x32,0x22,0x48,0x09,0x20,0x64,0x85,0x04, + 0x93,0x22,0xa4,0x84,0x04,0x93,0x22,0xe3,0x84,0xa1,0x90,0x14,0x12,0x4c,0x8a,0x8c, + 0x0b,0x84,0xa4,0x4c,0x10,0x44,0x33,0x00,0xc3,0x08,0x04,0x70,0x90,0x34,0x45,0x94, + 0x30,0xf9,0x0c,0x80,0x34,0xf4,0xef,0x50,0x13,0x1a,0x42,0x08,0xc3,0x08,0x02,0x90, + 0x04,0x61,0x26,0x6a,0x1e,0xe8,0x41,0x1e,0xea,0x61,0x1c,0xe8,0xc1,0x0d,0xda,0xa1, + 0x1c,0xe8,0x21,0x1c,0xd8,0x41,0x0f,0xf4,0xa0,0x1d,0xc2,0x81,0x1e,0xe4,0x21,0x1d, + 0xf0,0x01,0x05,0xe4,0x20,0x69,0x8a,0x28,0x61,0xf2,0x2b,0xe9,0x7f,0x80,0x08,0x60, + 0x24,0x24,0x94,0x32,0x88,0x60,0x08,0xa5,0x10,0x61,0x84,0x43,0x68,0x20,0x60,0x8e, + 0x00,0x0c,0x52,0x60,0xcd,0x11,0x80,0xc2,0x20,0x42,0x20,0x0c,0x23,0x10,0xcb,0x08, + 0x00,0x00,0x00,0x00,0x13,0xa8,0x70,0x48,0x07,0x79,0xb0,0x03,0x3a,0x68,0x83,0x70, + 0x80,0x07,0x78,0x60,0x87,0x72,0x68,0x83,0x74,0x78,0x87,0x79,0xc8,0x03,0x37,0x80, + 0x03,0x37,0x80,0x83,0x0d,0xb7,0x51,0x0e,0x6d,0x00,0x0f,0x7a,0x60,0x07,0x74,0xa0, + 0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xd0,0x06,0xe9,0x10,0x07,0x7a,0x80,0x07, + 0x7a,0x80,0x07,0x6d,0x90,0x0e,0x78,0xa0,0x07,0x78,0xa0,0x07,0x78,0xd0,0x06,0xe9, + 0x10,0x07,0x76,0xa0,0x07,0x71,0x60,0x07,0x7a,0x10,0x07,0x76,0xd0,0x06,0xe9,0x30, + 0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xd0,0x06,0xe9,0x60,0x07, + 0x74,0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xd0,0x06,0xe6,0x30,0x07,0x72, + 0xa0,0x07,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xd0,0x06,0xe6,0x60,0x07,0x74,0xa0, + 0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xd0,0x06,0xf6,0x10,0x07,0x76,0xa0,0x07, + 0x71,0x60,0x07,0x7a,0x10,0x07,0x76,0xd0,0x06,0xf6,0x20,0x07,0x74,0xa0,0x07,0x73, + 0x20,0x07,0x7a,0x30,0x07,0x72,0xd0,0x06,0xf6,0x30,0x07,0x72,0xa0,0x07,0x73,0x20, + 0x07,0x7a,0x30,0x07,0x72,0xd0,0x06,0xf6,0x40,0x07,0x78,0xa0,0x07,0x76,0x40,0x07, + 0x7a,0x60,0x07,0x74,0xd0,0x06,0xf6,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x7a, + 0x60,0x07,0x74,0xd0,0x06,0xf6,0x90,0x07,0x76,0xa0,0x07,0x71,0x20,0x07,0x78,0xa0, + 0x07,0x71,0x20,0x07,0x78,0xd0,0x06,0xf6,0x10,0x07,0x72,0x80,0x07,0x7a,0x10,0x07, + 0x72,0x80,0x07,0x7a,0x10,0x07,0x72,0x80,0x07,0x6d,0x60,0x0f,0x71,0x90,0x07,0x72, + 0xa0,0x07,0x72,0x50,0x07,0x76,0xa0,0x07,0x72,0x50,0x07,0x76,0xd0,0x06,0xf6,0x20, + 0x07,0x75,0x60,0x07,0x7a,0x20,0x07,0x75,0x60,0x07,0x7a,0x20,0x07,0x75,0x60,0x07, + 0x6d,0x60,0x0f,0x75,0x10,0x07,0x72,0xa0,0x07,0x75,0x10,0x07,0x72,0xa0,0x07,0x75, + 0x10,0x07,0x72,0xd0,0x06,0xf6,0x10,0x07,0x70,0x20,0x07,0x74,0xa0,0x07,0x71,0x00, + 0x07,0x72,0x40,0x07,0x7a,0x10,0x07,0x70,0x20,0x07,0x74,0xd0,0x06,0xee,0x80,0x07, + 0x7a,0x10,0x07,0x76,0xa0,0x07,0x73,0x20,0x07,0x43,0x98,0x04,0x00,0x80,0x00,0x00, + 0x00,0x00,0x00,0x80,0x2c,0x10,0x00,0x00,0x0b,0x00,0x00,0x00,0x32,0x1e,0x98,0x10, + 0x19,0x11,0x4c,0x90,0x8c,0x09,0x26,0x47,0xc6,0x04,0x43,0x5a,0x25,0x30,0x02,0x50, + 0x04,0x05,0x18,0x50,0x80,0x02,0x85,0x50,0x10,0x65,0x50,0x20,0xd4,0x46,0x00,0x88, + 0x8d,0x25,0x34,0x05,0x00,0x00,0x00,0x00,0x79,0x18,0x00,0x00,0x19,0x01,0x00,0x00, + 0x1a,0x03,0x4c,0x10,0x97,0x29,0xa2,0x25,0x10,0xab,0x32,0xb9,0xb9,0xb4,0x37,0xb7, + 0x21,0xc6,0x32,0x28,0x00,0xb3,0x50,0xb9,0x1b,0x43,0x0b,0x93,0xfb,0x9a,0x4b,0xd3, + 0x2b,0x1b,0x62,0x2c,0x81,0x22,0x2c,0x06,0xd9,0x20,0x08,0x0e,0x8e,0xad,0x0c,0x84, + 0x89,0xc9,0xaa,0x09,0xc4,0xae,0x4c,0x6e,0x2e,0xed,0xcd,0x0d,0x64,0x26,0x07,0x46, + 0xc6,0xc5,0xe6,0x06,0x04,0xa5,0xad,0x8c,0x2e,0x8c,0xcd,0xac,0xac,0x65,0x26,0x07, + 0x46,0xc6,0xc5,0xe6,0x26,0x65,0x88,0xa0,0x10,0x43,0x8c,0x25,0x58,0x8e,0x45,0x60, + 0xd1,0x54,0x46,0x17,0xc6,0x36,0x04,0x51,0x8e,0x25,0x58,0x84,0x45,0xe0,0x16,0x96, + 0x26,0xe7,0x32,0xf6,0xd6,0x06,0x97,0xc6,0x56,0xe6,0x42,0x56,0xe6,0xf6,0x26,0xd7, + 0x36,0xf7,0x45,0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44,0x50,0x12,0x72,0x61,0x69, + 0x72,0x2e,0x63,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x2e,0x66,0x61,0x73,0x74,0x5f,0x6d, + 0x61,0x74,0x68,0x5f,0x65,0x6e,0x61,0x62,0x6c,0x65,0x43,0x04,0x65,0x21,0x19,0x84, + 0xa5,0xc9,0xb9,0x8c,0xbd,0xb5,0xc1,0xa5,0xb1,0x95,0xb9,0x98,0xc9,0x85,0xb5,0x95, + 0x89,0xd5,0x99,0x99,0x95,0xc9,0x7d,0x99,0x95,0xd1,0x8d,0xa1,0x7d,0x95,0xb9,0x85, + 0x89,0xb1,0x95,0x0d,0x11,0x94,0x86,0x61,0x10,0x96,0x26,0xe7,0x32,0xf6,0xd6,0x06, + 0x97,0xc6,0x56,0xe6,0xe2,0x16,0x46,0x97,0x66,0x57,0xf6,0x45,0xf6,0x56,0x27,0xc6, + 0x56,0xf6,0x45,0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44,0x50,0x1e,0x92,0x41,0x58, + 0x9a,0x9c,0xcb,0xd8,0x5b,0x1b,0x5c,0x1a,0x5b,0x99,0x8b,0x5b,0x18,0x5d,0x9a,0x5d, + 0xd9,0x17,0xdb,0x9b,0xdb,0xd9,0x17,0xdb,0x9b,0xdb,0xd9,0x17,0x59,0xda,0x5c,0x98, + 0x18,0x5b,0xd9,0x10,0x41,0x89,0x78,0x06,0x61,0x69,0x72,0x2e,0x63,0x6f,0x6d,0x70, + 0x69,0x6c,0x65,0x2e,0x6e,0x61,0x74,0x69,0x76,0x65,0x5f,0x77,0x69,0x64,0x65,0x5f, + 0x76,0x65,0x63,0x74,0x6f,0x72,0x73,0x5f,0x64,0x69,0x73,0x61,0x62,0x6c,0x65,0x43, + 0x04,0x65,0x62,0x14,0x96,0x26,0xe7,0x62,0x57,0x26,0x47,0x57,0x86,0xf7,0xf5,0x56, + 0x47,0x07,0x57,0x47,0xc7,0xa5,0x6e,0xae,0x4c,0x0e,0x85,0xed,0x6d,0xcc,0x0d,0x26, + 0x85,0x51,0x58,0x9a,0x9c,0x4b,0x98,0xdc,0xd9,0x17,0x5d,0x1e,0x5c,0xd9,0x97,0x5b, + 0x58,0x5b,0x19,0x0d,0x33,0xb6,0xb7,0x30,0x3a,0x1a,0x32,0x61,0x69,0x72,0x2e,0x61, + 0x72,0x67,0x5f,0x6e,0x61,0x6d,0x65,0x14,0xea,0xec,0x86,0x30,0x4a,0xa5,0x58,0xca, + 0xa5,0x60,0x4a,0xa6,0x68,0x5c,0xea,0xe6,0xca,0xe4,0x50,0xd8,0xde,0xc6,0xdc,0x62, + 0x52,0x58,0x8c,0xbd,0xb1,0xbd,0xc9,0x0d,0x61,0x94,0x4a,0xe1,0x94,0x4b,0xc1,0x94, + 0x4c,0xe9,0xc8,0x84,0xa5,0xc9,0xb9,0xc0,0xbd,0xcd,0xa5,0xd1,0xa5,0xbd,0xb9,0x71, + 0x39,0x63,0xfb,0x82,0x7a,0x9b,0x4b,0xa3,0x4b,0x7b,0x73,0x1b,0xa2,0x28,0x9f,0x72, + 0x29,0x98,0x92,0x29,0x60,0x30,0xc4,0x50,0x36,0xc5,0x53,0xc2,0x80,0x50,0x58,0x9a, + 0x9c,0x8b,0x5d,0x99,0x1c,0x5d,0x19,0xde,0x57,0x9a,0x1b,0x5c,0x1d,0x1d,0xa5,0xb0, + 0x34,0x39,0x17,0xb6,0xb7,0xb1,0x30,0xba,0xb4,0x37,0xb7,0xaf,0x34,0x37,0xb2,0x32, + 0x3c,0x66,0x67,0x65,0x6e,0x65,0x72,0x61,0x74,0x65,0x64,0x28,0x38,0x70,0x6f,0x73, + 0x69,0x74,0x69,0x6f,0x6e,0x44,0x76,0x34,0x5f,0x66,0x29,0x44,0xe0,0xde,0xe6,0xd2, + 0xe8,0xd2,0xde,0xdc,0x86,0x50,0x8b,0xa0,0x8c,0x81,0x42,0x06,0x8b,0xb0,0x04,0x4a, + 0x19,0x28,0x97,0x82,0x29,0x99,0x62,0x06,0xd4,0xce,0xca,0xdc,0xca,0xe4,0xc2,0xe8, + 0xca,0xc8,0x50,0x72,0xe8,0xca,0xf0,0xc6,0xde,0xde,0xe4,0xc8,0x60,0x88,0xec,0x64, + 0xbe,0xcc,0x52,0x68,0x98,0xb1,0xbd,0x85,0xd1,0xc9,0x30,0xa1,0x2b,0xc3,0x1b,0x7b, + 0x7b,0x93,0x23,0x83,0x19,0x42,0x2d,0x81,0x32,0x06,0x0a,0x19,0x2c,0xc1,0x12,0x28, + 0x68,0xa0,0x5c,0x4a,0x1a,0x28,0x99,0xa2,0x06,0xbc,0xce,0xca,0xdc,0xca,0xe4,0xc2, + 0xe8,0xca,0xc8,0x50,0x6c,0xc6,0xde,0xd8,0xde,0xe4,0x60,0x88,0xec,0x68,0xbe,0xcc, + 0x52,0x68,0x8c,0xbd,0xb1,0xbd,0xc9,0xc1,0x0c,0xa1,0x96,0x41,0x19,0x03,0x85,0x0c, + 0x96,0x61,0x09,0x14,0x36,0x50,0x2e,0x05,0x53,0x32,0xa5,0x0d,0xa8,0x84,0xa5,0xc9, + 0xb9,0x88,0xd5,0x99,0x99,0x95,0xc9,0xf1,0x09,0x4b,0x93,0x73,0x11,0xab,0x33,0x33, + 0x2b,0x93,0xfb,0x9a,0x4b,0xd3,0x2b,0x23,0x12,0x96,0x26,0xe7,0x22,0x57,0x16,0x46, + 0x46,0x2a,0x2c,0x4d,0xce,0x65,0x8e,0x4e,0xae,0x6e,0x8c,0xee,0x8b,0x2e,0x0f,0xae, + 0xec,0x2b,0xcd,0xcd,0xec,0x8d,0x88,0x19,0xdb,0x5b,0x18,0x1d,0x0d,0x1e,0x0d,0x87, + 0x36,0x3b,0x38,0x0a,0x74,0x6d,0x43,0xa8,0x45,0x58,0x88,0x45,0x50,0xe6,0x40,0xa1, + 0x83,0x85,0x58,0x88,0x45,0x50,0xe6,0x40,0xa9,0x03,0x46,0x61,0x69,0x72,0x2e,0x61, + 0x72,0x67,0x5f,0x74,0x79,0x70,0x65,0x5f,0x73,0x69,0x7a,0x65,0xbc,0xc2,0xd2,0xe4, + 0x5c,0xc2,0xe4,0xce,0xbe,0xe8,0xf2,0xe0,0xca,0xbe,0xc2,0xd8,0xd2,0xce,0xdc,0xbe, + 0xe6,0xd2,0xf4,0xca,0x98,0xd8,0xcd,0x7d,0xc1,0x85,0xc9,0x85,0xb5,0xcd,0x71,0xf8, + 0x92,0x81,0x19,0x42,0x06,0x0b,0xa2,0xbc,0x81,0x02,0x07,0x4b,0xa1,0x90,0xc1,0x22, + 0x2c,0x81,0x12,0x07,0x8a,0x1c,0x28,0x76,0xa0,0xdc,0xc1,0x52,0x28,0x78,0xb0,0x24, + 0xca,0xa5,0xe4,0x81,0x92,0x29,0x7a,0x30,0x04,0x51,0xce,0x40,0x59,0x03,0xc5,0x0d, + 0x94,0x3d,0x18,0x62,0x24,0x80,0x22,0x06,0x0a,0x1f,0xf0,0x79,0x6b,0x73,0x4b,0x83, + 0x7b,0xa3,0x2b,0x73,0xa3,0x03,0x19,0x43,0x0b,0x93,0xe3,0x33,0x95,0xd6,0x06,0xc7, + 0x56,0x06,0x32,0xb4,0xb2,0x02,0x42,0x25,0x14,0x14,0x34,0x44,0x50,0xfe,0x60,0x88, + 0xa1,0xf8,0x81,0x02,0x0a,0x8d,0x32,0xc4,0x50,0x42,0x41,0x09,0x85,0x46,0x19,0x11, + 0xb1,0x03,0x3b,0xd8,0x43,0x3b,0xb8,0x41,0x3b,0xbc,0x03,0x39,0xd4,0x03,0x3b,0x94, + 0x83,0x1b,0x98,0x03,0x3b,0x84,0xc3,0x39,0xcc,0xc3,0x14,0x21,0x18,0x46,0x28,0xec, + 0xc0,0x0e,0xf6,0xd0,0x0e,0x6e,0x90,0x0e,0xe4,0x50,0x0e,0xee,0x40,0x0f,0x53,0x82, + 0x62,0xc4,0x12,0x0e,0xe9,0x20,0x0f,0x6e,0x60,0x0f,0xe5,0x20,0x0f,0xf3,0x90,0x0e, + 0xef,0xe0,0x0e,0x53,0x02,0x63,0x04,0x15,0x0e,0xe9,0x20,0x0f,0x6e,0xc0,0x0e,0xe1, + 0xe0,0x0e,0xe7,0x50,0x0f,0xe1,0x70,0x0e,0xe5,0xf0,0x0b,0xf6,0x50,0x0e,0xf2,0x30, + 0x0f,0xe9,0xf0,0x0e,0xee,0x30,0x25,0x40,0x46,0x4c,0xe1,0x90,0x0e,0xf2,0xe0,0x06, + 0xe3,0xf0,0x0e,0xed,0x00,0x0f,0xe9,0xc0,0x0e,0xe5,0xf0,0x0b,0xef,0x00,0x0f,0xf4, + 0x90,0x0e,0xef,0xe0,0x0e,0xf3,0x30,0xc5,0x50,0x18,0x07,0x92,0xa8,0x11,0x4a,0x38, + 0xa4,0x83,0x3c,0xb8,0x81,0x3d,0x94,0x83,0x3c,0xd0,0x43,0x39,0xe0,0xc3,0x94,0xa0, + 0x0f,0x00,0x00,0x00,0x79,0x18,0x00,0x00,0x6d,0x00,0x00,0x00,0x33,0x08,0x80,0x1c, + 0xc4,0xe1,0x1c,0x66,0x14,0x01,0x3d,0x88,0x43,0x38,0x84,0xc3,0x8c,0x42,0x80,0x07, + 0x79,0x78,0x07,0x73,0x98,0x71,0x0c,0xe6,0x00,0x0f,0xed,0x10,0x0e,0xf4,0x80,0x0e, + 0x33,0x0c,0x42,0x1e,0xc2,0xc1,0x1d,0xce,0xa1,0x1c,0x66,0x30,0x05,0x3d,0x88,0x43, + 0x38,0x84,0x83,0x1b,0xcc,0x03,0x3d,0xc8,0x43,0x3d,0x8c,0x03,0x3d,0xcc,0x78,0x8c, + 0x74,0x70,0x07,0x7b,0x08,0x07,0x79,0x48,0x87,0x70,0x70,0x07,0x7a,0x70,0x03,0x76, + 0x78,0x87,0x70,0x20,0x87,0x19,0xcc,0x11,0x0e,0xec,0x90,0x0e,0xe1,0x30,0x0f,0x6e, + 0x30,0x0f,0xe3,0xf0,0x0e,0xf0,0x50,0x0e,0x33,0x10,0xc4,0x1d,0xde,0x21,0x1c,0xd8, + 0x21,0x1d,0xc2,0x61,0x1e,0x66,0x30,0x89,0x3b,0xbc,0x83,0x3b,0xd0,0x43,0x39,0xb4, + 0x03,0x3c,0xbc,0x83,0x3c,0x84,0x03,0x3b,0xcc,0xf0,0x14,0x76,0x60,0x07,0x7b,0x68, + 0x07,0x37,0x68,0x87,0x72,0x68,0x07,0x37,0x80,0x87,0x70,0x90,0x87,0x70,0x60,0x07, + 0x76,0x28,0x07,0x76,0xf8,0x05,0x76,0x78,0x87,0x77,0x80,0x87,0x5f,0x08,0x87,0x71, + 0x18,0x87,0x72,0x98,0x87,0x79,0x98,0x81,0x2c,0xee,0xf0,0x0e,0xee,0xe0,0x0e,0xf5, + 0xc0,0x0e,0xec,0x30,0x03,0x62,0xc8,0xa1,0x1c,0xe4,0xa1,0x1c,0xcc,0xa1,0x1c,0xe4, + 0xa1,0x1c,0xdc,0x61,0x1c,0xca,0x21,0x1c,0xc4,0x81,0x1d,0xca,0x61,0x06,0xd6,0x90, + 0x43,0x39,0xc8,0x43,0x39,0x98,0x43,0x39,0xc8,0x43,0x39,0xb8,0xc3,0x38,0x94,0x43, + 0x38,0x88,0x03,0x3b,0x94,0xc3,0x2f,0xbc,0x83,0x3c,0xfc,0x82,0x3b,0xd4,0x03,0x3b, + 0xb0,0xc3,0x0c,0xc7,0x69,0x87,0x70,0x58,0x87,0x72,0x70,0x83,0x74,0x68,0x07,0x78, + 0x60,0x87,0x74,0x18,0x87,0x74,0xa0,0x87,0x19,0xce,0x53,0x0f,0xee,0x00,0x0f,0xf2, + 0x50,0x0e,0xe4,0x90,0x0e,0xe3,0x40,0x0f,0xe1,0x20,0x0e,0xec,0x50,0x0e,0x33,0x20, + 0x28,0x1d,0xdc,0xc1,0x1e,0xc2,0x41,0x1e,0xd2,0x21,0x1c,0xdc,0x81,0x1e,0xdc,0xe0, + 0x1c,0xe4,0xe1,0x1d,0xea,0x01,0x1e,0x66,0x18,0x51,0x38,0xb0,0x43,0x3a,0x9c,0x83, + 0x3b,0xcc,0x50,0x24,0x76,0x60,0x07,0x7b,0x68,0x07,0x37,0x60,0x87,0x77,0x78,0x07, + 0x78,0x98,0x51,0x4c,0xf4,0x90,0x0f,0xf0,0x50,0x0e,0x33,0x1e,0x6a,0x1e,0xca,0x61, + 0x1c,0xe8,0x21,0x1d,0xde,0xc1,0x1d,0x7e,0x01,0x1e,0xe4,0xa1,0x1c,0xcc,0x21,0x1d, + 0xf0,0x61,0x06,0x54,0x85,0x83,0x38,0xcc,0xc3,0x3b,0xb0,0x43,0x3d,0xd0,0x43,0x39, + 0xfc,0xc2,0x3c,0xe4,0x43,0x3b,0x88,0xc3,0x3b,0xb0,0xc3,0x8c,0xc5,0x0a,0x87,0x79, + 0x98,0x87,0x77,0x18,0x87,0x74,0x08,0x07,0x7a,0x28,0x07,0x72,0x00,0x00,0x00,0x00, + 0x71,0x20,0x00,0x00,0x02,0x00,0x00,0x00,0x06,0x50,0x30,0x00,0xd2,0xd0,0x00,0x00, + 0x61,0x20,0x00,0x00,0x3d,0x00,0x00,0x00,0x13,0x04,0x41,0x2c,0x10,0x00,0x00,0x00, + 0x09,0x00,0x00,0x00,0xf4,0xc6,0x22,0x86,0x61,0x18,0xc6,0x22,0x04,0x41,0x10,0xc6, + 0x22,0x82,0x20,0x08,0xa8,0x95,0x40,0x19,0x14,0x01,0xbd,0x11,0x00,0x1a,0x33,0x00, + 0x24,0x66,0x00,0x28,0xcc,0x00,0x00,0x00,0xe3,0x15,0x0b,0x84,0x61,0x10,0x05,0x65, + 0x90,0x01,0x1a,0x0c,0x13,0x02,0xf9,0x8c,0x57,0x3c,0x14,0xc7,0x2d,0x14,0x94,0x41, + 0x06,0xea,0x70,0x4c,0x08,0xe4,0x63,0x41,0x01,0x9f,0xf1,0x0a,0x2a,0x0b,0x83,0x30, + 0x70,0x28,0x28,0x83,0x0c,0x19,0x43,0x99,0x10,0xc8,0xc7,0x8a,0x00,0x3e,0xe3,0x15, + 0x99,0x67,0x06,0x66,0x40,0x51,0x50,0x06,0x19,0xbc,0x48,0x33,0x21,0x90,0x8f,0x15, + 0x01,0x7c,0xc6,0x2b,0xbc,0x31,0x60,0x83,0x35,0x18,0x03,0x0a,0xca,0x20,0x83,0x18, + 0x60,0x99,0x09,0x81,0x7c,0xc6,0x2b,0xc4,0xe0,0x0c,0xe0,0xe0,0x0d,0x3c,0x0a,0xca, + 0x20,0x83,0x19,0x70,0x61,0x60,0x42,0x20,0x1f,0x0b,0x0a,0xf8,0x8c,0x57,0x9c,0x01, + 0x1b,0xd4,0x01,0x1d,0x88,0x01,0x05,0xc5,0x86,0x00,0x3e,0xb3,0x0d,0x61,0x10,0x00, + 0xb3,0x0d,0x01,0x1b,0x04,0xb3,0x0d,0xc1,0x23,0x64,0x10,0x10,0x03,0x00,0x00,0x00, + 0x09,0x00,0x00,0x00,0x5b,0x86,0x20,0x10,0x85,0x2d,0x43,0x11,0x88,0xc2,0x96,0x41, + 0x09,0x44,0x61,0xcb,0xf0,0x04,0xa2,0xb0,0x65,0xa0,0x02,0x51,0xd8,0x32,0x60,0x81, + 0x28,0x6c,0x19,0xba,0x40,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +}; +static const uint8_t _sgl_fs_bytecode_metal_ios[2893] = { + 0x4d,0x54,0x4c,0x42,0x01,0x00,0x02,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x4d,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x6d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcd,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd5,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xdd,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x70,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x6d,0x00,0x00,0x00, + 0x4e,0x41,0x4d,0x45,0x06,0x00,0x6d,0x61,0x69,0x6e,0x30,0x00,0x54,0x59,0x50,0x45, + 0x01,0x00,0x01,0x48,0x41,0x53,0x48,0x20,0x00,0xa8,0xe6,0x5c,0x09,0x1c,0x98,0x6d, + 0xf9,0x59,0x27,0x6f,0xf0,0xd6,0x41,0x4d,0xf9,0x93,0x2f,0x51,0x99,0xc5,0xc8,0xc7, + 0xad,0x1e,0x89,0xca,0xfe,0x25,0x89,0x6a,0x2a,0x4f,0x46,0x46,0x54,0x18,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x56,0x45,0x52,0x53,0x08,0x00,0x01,0x00,0x08, + 0x00,0x01,0x00,0x00,0x00,0x45,0x4e,0x44,0x54,0x45,0x4e,0x44,0x54,0x04,0x00,0x00, + 0x00,0x45,0x4e,0x44,0x54,0x04,0x00,0x00,0x00,0x45,0x4e,0x44,0x54,0xde,0xc0,0x17, + 0x0b,0x00,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x5c,0x0a,0x00,0x00,0xff,0xff,0xff, + 0xff,0x42,0x43,0xc0,0xde,0x21,0x0c,0x00,0x00,0x94,0x02,0x00,0x00,0x0b,0x82,0x20, + 0x00,0x02,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x07,0x81,0x23,0x91,0x41,0xc8,0x04, + 0x49,0x06,0x10,0x32,0x39,0x92,0x01,0x84,0x0c,0x25,0x05,0x08,0x19,0x1e,0x04,0x8b, + 0x62,0x80,0x14,0x45,0x02,0x42,0x92,0x0b,0x42,0xa4,0x10,0x32,0x14,0x38,0x08,0x18, + 0x49,0x0a,0x32,0x44,0x24,0x48,0x0a,0x90,0x21,0x23,0xc4,0x52,0x80,0x0c,0x19,0x21, + 0x72,0x24,0x07,0xc8,0x48,0x11,0x62,0xa8,0xa0,0xa8,0x40,0xc6,0xf0,0x01,0x00,0x00, + 0x00,0x51,0x18,0x00,0x00,0x89,0x00,0x00,0x00,0x1b,0xcc,0x25,0xf8,0xff,0xff,0xff, + 0xff,0x01,0x60,0x00,0x09,0xa8,0x88,0x71,0x78,0x07,0x79,0x90,0x87,0x72,0x18,0x07, + 0x7a,0x60,0x87,0x7c,0x68,0x03,0x79,0x78,0x87,0x7a,0x70,0x07,0x72,0x28,0x07,0x72, + 0x68,0x03,0x72,0x48,0x07,0x7b,0x48,0x07,0x72,0x28,0x87,0x36,0x98,0x87,0x78,0x90, + 0x07,0x7a,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xcc, + 0x21,0x1c,0xd8,0x61,0x1e,0xca,0x01,0x20,0xc8,0x21,0x1d,0xe6,0x21,0x1c,0xc4,0x81, + 0x1d,0xca,0xa1,0x0d,0xe8,0x21,0x1c,0xd2,0x81,0x1d,0xda,0x60,0x1c,0xc2,0x81,0x1d, + 0xd8,0x61,0x1e,0x00,0x73,0x08,0x07,0x76,0x98,0x87,0x72,0x00,0x08,0x76,0x28,0x87, + 0x79,0x98,0x87,0x36,0x80,0x07,0x79,0x28,0x87,0x71,0x48,0x87,0x79,0x28,0x87,0x36, + 0x30,0x07,0x78,0x68,0x87,0x70,0x20,0x07,0xc0,0x1c,0xc2,0x81,0x1d,0xe6,0xa1,0x1c, + 0x00,0xc2,0x1d,0xde,0xa1,0x0d,0xcc,0x41,0x1e,0xc2,0xa1,0x1d,0xca,0xa1,0x0d,0xe0, + 0xe1,0x1d,0xd2,0xc1,0x1d,0xe8,0xa1,0x1c,0xe4,0xa1,0x0d,0xca,0x81,0x1d,0xd2,0xa1, + 0x1d,0x00,0x7a,0x90,0x87,0x7a,0x28,0x07,0x60,0x70,0x87,0x77,0x68,0x03,0x73,0x90, + 0x87,0x70,0x68,0x87,0x72,0x68,0x03,0x78,0x78,0x87,0x74,0x70,0x07,0x7a,0x28,0x07, + 0x79,0x68,0x83,0x72,0x60,0x87,0x74,0x68,0x87,0x36,0x70,0x87,0x77,0x70,0x87,0x36, + 0x60,0x87,0x72,0x08,0x07,0x73,0x00,0x08,0x77,0x78,0x87,0x36,0x48,0x07,0x77,0x30, + 0x87,0x79,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74,0x00,0xe8, + 0x41,0x1e,0xea,0xa1,0x1c,0x00,0xc2,0x1d,0xde,0xa1,0x0d,0xd4,0xa1,0x1e,0xda,0x01, + 0x1e,0xda,0x80,0x1e,0xc2,0x41,0x1c,0xd8,0xa1,0x1c,0xe6,0x01,0x30,0x87,0x70,0x60, + 0x87,0x79,0x28,0x07,0x80,0x70,0x87,0x77,0x68,0x03,0x77,0x08,0x07,0x77,0x98,0x87, + 0x36,0x30,0x07,0x78,0x68,0x83,0x76,0x08,0x07,0x7a,0x40,0x07,0x80,0x1e,0xe4,0xa1, + 0x1e,0xca,0x01,0x20,0xdc,0xe1,0x1d,0xda,0x60,0x1e,0xd2,0xe1,0x1c,0xdc,0xa1,0x1c, + 0xc8,0xa1,0x0d,0xf4,0xa1,0x1c,0xe4,0xe1,0x1d,0xe6,0xa1,0x0d,0xcc,0x01,0x1e,0xda, + 0xa0,0x1d,0xc2,0x81,0x1e,0xd0,0x01,0xa0,0x07,0x79,0xa8,0x87,0x72,0x00,0x08,0x77, + 0x78,0x87,0x36,0xa0,0x07,0x79,0x08,0x07,0x78,0x80,0x87,0x74,0x70,0x87,0x73,0x68, + 0x83,0x76,0x08,0x07,0x7a,0x40,0x07,0x80,0x1e,0xe4,0xa1,0x1e,0xca,0x01,0x20,0xe6, + 0x81,0x1e,0xc2,0x61,0x1c,0xd6,0xa1,0x0d,0xe0,0x41,0x1e,0xde,0x81,0x1e,0xca,0x61, + 0x1c,0xe8,0xe1,0x1d,0xe4,0xa1,0x0d,0xc4,0xa1,0x1e,0xcc,0xc1,0x1c,0xca,0x41,0x1e, + 0xda,0x60,0x1e,0xd2,0x41,0x1f,0xca,0x01,0xc0,0x03,0x80,0xa8,0x07,0x77,0x98,0x87, + 0x70,0x30,0x87,0x72,0x68,0x03,0x73,0x80,0x87,0x36,0x68,0x87,0x70,0xa0,0x07,0x74, + 0x00,0xe8,0x41,0x1e,0xea,0xa1,0x1c,0x00,0xa2,0x1e,0xe6,0xa1,0x1c,0xda,0x60,0x1e, + 0xde,0xc1,0x1c,0xe8,0xa1,0x0d,0xcc,0x81,0x1d,0xde,0x21,0x1c,0xe8,0x01,0x30,0x87, + 0x70,0x60,0x87,0x79,0x28,0x07,0x60,0x83,0x21,0x0c,0xc0,0x02,0x54,0x1b,0x8c,0x81, + 0x00,0x16,0xa0,0xda,0x80,0x10,0xff,0xff,0xff,0xff,0x3f,0x00,0x0c,0x20,0x01,0xd5, + 0x06,0xa3,0x08,0x80,0x05,0xa8,0x36,0x18,0x86,0x00,0x2c,0x40,0x05,0x49,0x18,0x00, + 0x00,0x03,0x00,0x00,0x00,0x13,0x86,0x40,0x18,0x26,0x0c,0x44,0x61,0x00,0x00,0x00, + 0x00,0x89,0x20,0x00,0x00,0x20,0x00,0x00,0x00,0x32,0x22,0x48,0x09,0x20,0x64,0x85, + 0x04,0x93,0x22,0xa4,0x84,0x04,0x93,0x22,0xe3,0x84,0xa1,0x90,0x14,0x12,0x4c,0x8a, + 0x8c,0x0b,0x84,0xa4,0x4c,0x10,0x48,0x33,0x00,0xc3,0x08,0x04,0x70,0x90,0x34,0x45, + 0x94,0x30,0xf9,0x0c,0x80,0x34,0xf4,0xef,0x50,0x13,0x0a,0xc2,0x51,0xd2,0x14,0x51, + 0xc2,0xe4,0xff,0x13,0x71,0x4d,0x54,0x44,0xfc,0xf6,0xf0,0x4f,0x63,0x04,0xc0,0x20, + 0xc2,0x10,0x5c,0x24,0x4d,0x11,0x25,0x4c,0xfe,0x2f,0x01,0xcc,0xb3,0x10,0xd1,0x3f, + 0x8d,0x11,0x00,0x83,0x08,0x85,0x50,0x0a,0x11,0x02,0x31,0x74,0x86,0x11,0x04,0x60, + 0x8e,0x20,0x98,0x23,0x00,0x83,0x61,0x04,0x61,0x29,0x48,0x20,0x26,0x29,0xa6,0x00, + 0xb5,0x81,0x80,0x14,0x58,0x23,0x00,0x00,0x00,0x13,0xa8,0x70,0x48,0x07,0x79,0xb0, + 0x03,0x3a,0x68,0x83,0x70,0x80,0x07,0x78,0x60,0x87,0x72,0x68,0x83,0x74,0x78,0x87, + 0x79,0xc8,0x03,0x37,0x80,0x03,0x37,0x80,0x83,0x0d,0xb7,0x51,0x0e,0x6d,0x00,0x0f, + 0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xd0,0x06,0xe9, + 0x10,0x07,0x7a,0x80,0x07,0x7a,0x80,0x07,0x6d,0x90,0x0e,0x78,0xa0,0x07,0x78,0xa0, + 0x07,0x78,0xd0,0x06,0xe9,0x10,0x07,0x76,0xa0,0x07,0x71,0x60,0x07,0x7a,0x10,0x07, + 0x76,0xd0,0x06,0xe9,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x7a,0x30,0x07,0x72, + 0xd0,0x06,0xe9,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xd0, + 0x06,0xe6,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xd0,0x06, + 0xe6,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xd0,0x06,0xf6, + 0x10,0x07,0x76,0xa0,0x07,0x71,0x60,0x07,0x7a,0x10,0x07,0x76,0xd0,0x06,0xf6,0x20, + 0x07,0x74,0xa0,0x07,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xd0,0x06,0xf6,0x30,0x07, + 0x72,0xa0,0x07,0x73,0x20,0x07,0x7a,0x30,0x07,0x72,0xd0,0x06,0xf6,0x40,0x07,0x78, + 0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xd0,0x06,0xf6,0x60,0x07,0x74,0xa0, + 0x07,0x76,0x40,0x07,0x7a,0x60,0x07,0x74,0xd0,0x06,0xf6,0x90,0x07,0x76,0xa0,0x07, + 0x71,0x20,0x07,0x78,0xa0,0x07,0x71,0x20,0x07,0x78,0xd0,0x06,0xf6,0x10,0x07,0x72, + 0x80,0x07,0x7a,0x10,0x07,0x72,0x80,0x07,0x7a,0x10,0x07,0x72,0x80,0x07,0x6d,0x60, + 0x0f,0x71,0x90,0x07,0x72,0xa0,0x07,0x72,0x50,0x07,0x76,0xa0,0x07,0x72,0x50,0x07, + 0x76,0xd0,0x06,0xf6,0x20,0x07,0x75,0x60,0x07,0x7a,0x20,0x07,0x75,0x60,0x07,0x7a, + 0x20,0x07,0x75,0x60,0x07,0x6d,0x60,0x0f,0x75,0x10,0x07,0x72,0xa0,0x07,0x75,0x10, + 0x07,0x72,0xa0,0x07,0x75,0x10,0x07,0x72,0xd0,0x06,0xf6,0x10,0x07,0x70,0x20,0x07, + 0x74,0xa0,0x07,0x71,0x00,0x07,0x72,0x40,0x07,0x7a,0x10,0x07,0x70,0x20,0x07,0x74, + 0xd0,0x06,0xee,0x80,0x07,0x7a,0x10,0x07,0x76,0xa0,0x07,0x73,0x20,0x07,0x43,0x18, + 0x04,0x00,0x80,0x00,0x00,0x00,0x00,0x00,0x80,0x21,0x8c,0x03,0x04,0x80,0x00,0x00, + 0x00,0x00,0x00,0x40,0x16,0x08,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x32,0x1e,0x98, + 0x10,0x19,0x11,0x4c,0x90,0x8c,0x09,0x26,0x47,0xc6,0x04,0x43,0x5a,0x23,0x00,0x25, + 0x50,0x04,0x85,0x50,0x10,0x65,0x40,0x70,0x2c,0xa1,0x29,0x00,0x00,0x79,0x18,0x00, + 0x00,0xd7,0x00,0x00,0x00,0x1a,0x03,0x4c,0x10,0x97,0x29,0xa2,0x25,0x10,0xab,0x32, + 0xb9,0xb9,0xb4,0x37,0xb7,0x21,0xc6,0x42,0x3c,0x00,0x84,0x50,0xb9,0x1b,0x43,0x0b, + 0x93,0xfb,0x9a,0x4b,0xd3,0x2b,0x1b,0x62,0x2c,0xc3,0x23,0x2c,0x05,0xd9,0x20,0x08, + 0x0e,0x8e,0xad,0x0c,0x84,0x89,0xc9,0xaa,0x09,0xc4,0xae,0x4c,0x6e,0x2e,0xed,0xcd, + 0x0d,0x64,0x26,0x07,0x46,0xc6,0xc5,0xe6,0x06,0x04,0xa5,0xad,0x8c,0x2e,0x8c,0xcd, + 0xac,0xac,0x65,0x26,0x07,0x46,0xc6,0xc5,0xe6,0x26,0x65,0x88,0xf0,0x10,0x43,0x8c, + 0x65,0x58,0x8c,0x45,0x60,0xd1,0x54,0x46,0x17,0xc6,0x36,0x04,0x79,0x8e,0x65,0x58, + 0x84,0x45,0xe0,0x16,0x96,0x26,0xe7,0x32,0xf6,0xd6,0x06,0x97,0xc6,0x56,0xe6,0x42, + 0x56,0xe6,0xf6,0x26,0xd7,0x36,0xf7,0x45,0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44, + 0x78,0x12,0x72,0x61,0x69,0x72,0x2e,0x63,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x2e,0x66, + 0x61,0x73,0x74,0x5f,0x6d,0x61,0x74,0x68,0x5f,0x65,0x6e,0x61,0x62,0x6c,0x65,0x43, + 0x84,0x67,0x21,0x19,0x84,0xa5,0xc9,0xb9,0x8c,0xbd,0xb5,0xc1,0xa5,0xb1,0x95,0xb9, + 0x98,0xc9,0x85,0xb5,0x95,0x89,0xd5,0x99,0x99,0x95,0xc9,0x7d,0x99,0x95,0xd1,0x8d, + 0xa1,0x7d,0x95,0xb9,0x85,0x89,0xb1,0x95,0x0d,0x11,0x9e,0x86,0x61,0x10,0x96,0x26, + 0xe7,0x32,0xf6,0xd6,0x06,0x97,0xc6,0x56,0xe6,0xe2,0x16,0x46,0x97,0x66,0x57,0xf6, + 0x45,0xf6,0x56,0x27,0xc6,0x56,0xf6,0x45,0x96,0x36,0x17,0x26,0xc6,0x56,0x36,0x44, + 0x78,0x1e,0x92,0x41,0x58,0x9a,0x9c,0xcb,0xd8,0x5b,0x1b,0x5c,0x1a,0x5b,0x99,0x8b, + 0x5b,0x18,0x5d,0x9a,0x5d,0xd9,0x17,0xdb,0x9b,0xdb,0xd9,0x17,0xdb,0x9b,0xdb,0xd9, + 0x17,0x59,0xda,0x5c,0x98,0x18,0x5b,0xd9,0x10,0xe1,0x89,0x78,0x06,0x61,0x69,0x72, + 0x2e,0x63,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x2e,0x6e,0x61,0x74,0x69,0x76,0x65,0x5f, + 0x77,0x69,0x64,0x65,0x5f,0x76,0x65,0x63,0x74,0x6f,0x72,0x73,0x5f,0x64,0x69,0x73, + 0x61,0x62,0x6c,0x65,0x43,0x84,0x67,0x62,0x14,0x96,0x26,0xe7,0x22,0x57,0xe6,0x46, + 0x56,0x26,0xf7,0x45,0x17,0x26,0x77,0x56,0x46,0xc7,0x28,0x2c,0x4d,0xce,0x25,0x4c, + 0xee,0xec,0x8b,0x2e,0x0f,0xae,0xec,0xcb,0x2d,0xac,0xad,0x8c,0x86,0x19,0xdb,0x5b, + 0x18,0x1d,0x0d,0x99,0xb0,0x34,0x39,0x97,0x30,0xb9,0xb3,0x2f,0xb7,0xb0,0xb6,0x32, + 0x2a,0x66,0x72,0x61,0x67,0x5f,0x63,0x6f,0x6c,0x6f,0x72,0x43,0x98,0xa7,0x5a,0x84, + 0xc7,0x7a,0xae,0x07,0x7b,0xb2,0x21,0xc2,0xa3,0x51,0x0a,0x4b,0x93,0x73,0x31,0x93, + 0x0b,0x3b,0x6b,0x2b,0x73,0xa3,0xfb,0x4a,0x73,0x83,0xab,0xa3,0xe3,0x52,0x37,0x57, + 0x26,0x87,0xc2,0xf6,0x36,0xe6,0x06,0x93,0x42,0x25,0x2c,0x4d,0xce,0x65,0xac,0xcc, + 0x8d,0xae,0x4c,0x8e,0x4f,0x58,0x9a,0x9c,0x0b,0x5c,0x99,0xdc,0x1c,0x5c,0xd9,0x18, + 0x5d,0x9a,0x5d,0x19,0x85,0x3a,0xbb,0x21,0xd2,0x22,0x3c,0xdc,0xd3,0x3d,0xde,0xf3, + 0x3d,0xd6,0x73,0x3d,0xd8,0x03,0x06,0x5c,0xea,0xe6,0xca,0xe4,0x50,0xd8,0xde,0xc6, + 0xdc,0x62,0x52,0x58,0x8c,0xbd,0xb1,0xbd,0xc9,0x0d,0x91,0x96,0xe1,0xe1,0x1e,0x31, + 0x78,0xbc,0xe7,0x7b,0xac,0xe7,0x7a,0xb0,0x67,0x0c,0xb8,0x84,0xa5,0xc9,0xb9,0xd0, + 0x95,0xe1,0xd1,0xd5,0xc9,0x95,0x51,0x0a,0x4b,0x93,0x73,0x61,0x7b,0x1b,0x0b,0xa3, + 0x4b,0x7b,0x73,0xfb,0x4a,0x73,0x23,0x2b,0xc3,0xa3,0x12,0x96,0x26,0xe7,0x32,0x17, + 0xd6,0x06,0xc7,0x56,0x46,0x8c,0xae,0x0c,0x8f,0xae,0x4e,0xae,0x4c,0x86,0x8c,0xc7, + 0x8c,0xed,0x2d,0x8c,0x8e,0x05,0x64,0x2e,0xac,0x0d,0x8e,0xad,0xcc,0x87,0x03,0x5d, + 0x19,0xde,0x10,0x6a,0x21,0x9e,0x32,0x78,0xcc,0x60,0x11,0x96,0xe1,0x39,0x83,0xc7, + 0x7a,0xd0,0xe0,0xc1,0x9e,0x34,0xe0,0x12,0x96,0x26,0xe7,0x32,0x17,0xd6,0x06,0xc7, + 0x56,0x26,0xc7,0x63,0x2e,0xac,0x0d,0x8e,0xad,0x4c,0x8e,0x08,0x5d,0x19,0xde,0x54, + 0x1b,0x1c,0x9b,0xdc,0x10,0x69,0x39,0x9e,0x35,0x78,0xcc,0x60,0x11,0x96,0xe1,0xb1, + 0x1e,0x36,0x78,0xb0,0xa7,0x0d,0x86,0x20,0x4f,0x18,0x3c,0x64,0xf0,0xa8,0xc1,0xe3, + 0x06,0x43,0x0c,0x04,0x78,0xb6,0xe7,0x0d,0x46,0x44,0xec,0xc0,0x0e,0xf6,0xd0,0x0e, + 0x6e,0xd0,0x0e,0xef,0x40,0x0e,0xf5,0xc0,0x0e,0xe5,0xe0,0x06,0xe6,0xc0,0x0e,0xe1, + 0x70,0x0e,0xf3,0x30,0x45,0x08,0x86,0x11,0x0a,0x3b,0xb0,0x83,0x3d,0xb4,0x83,0x1b, + 0xa4,0x03,0x39,0x94,0x83,0x3b,0xd0,0xc3,0x94,0xa0,0x18,0xb1,0x84,0x43,0x3a,0xc8, + 0x83,0x1b,0xd8,0x43,0x39,0xc8,0xc3,0x3c,0xa4,0xc3,0x3b,0xb8,0xc3,0x94,0xc0,0x18, + 0x41,0x85,0x43,0x3a,0xc8,0x83,0x1b,0xb0,0x43,0x38,0xb8,0xc3,0x39,0xd4,0x43,0x38, + 0x9c,0x43,0x39,0xfc,0x82,0x3d,0x94,0x83,0x3c,0xcc,0x43,0x3a,0xbc,0x83,0x3b,0x4c, + 0x09,0x90,0x11,0x53,0x38,0xa4,0x83,0x3c,0xb8,0xc1,0x38,0xbc,0x43,0x3b,0xc0,0x43, + 0x3a,0xb0,0x43,0x39,0xfc,0xc2,0x3b,0xc0,0x03,0x3d,0xa4,0xc3,0x3b,0xb8,0xc3,0x3c, + 0x4c,0x31,0x14,0xc6,0x81,0x24,0x6a,0x04,0x13,0x0e,0xe9,0x20,0x0f,0x6e,0x60,0x0e, + 0xf2,0x10,0x0e,0xe7,0xd0,0x0e,0xe5,0xe0,0x0e,0xf4,0x30,0x25,0x80,0x03,0x00,0x00, + 0x00,0x79,0x18,0x00,0x00,0x6d,0x00,0x00,0x00,0x33,0x08,0x80,0x1c,0xc4,0xe1,0x1c, + 0x66,0x14,0x01,0x3d,0x88,0x43,0x38,0x84,0xc3,0x8c,0x42,0x80,0x07,0x79,0x78,0x07, + 0x73,0x98,0x71,0x0c,0xe6,0x00,0x0f,0xed,0x10,0x0e,0xf4,0x80,0x0e,0x33,0x0c,0x42, + 0x1e,0xc2,0xc1,0x1d,0xce,0xa1,0x1c,0x66,0x30,0x05,0x3d,0x88,0x43,0x38,0x84,0x83, + 0x1b,0xcc,0x03,0x3d,0xc8,0x43,0x3d,0x8c,0x03,0x3d,0xcc,0x78,0x8c,0x74,0x70,0x07, + 0x7b,0x08,0x07,0x79,0x48,0x87,0x70,0x70,0x07,0x7a,0x70,0x03,0x76,0x78,0x87,0x70, + 0x20,0x87,0x19,0xcc,0x11,0x0e,0xec,0x90,0x0e,0xe1,0x30,0x0f,0x6e,0x30,0x0f,0xe3, + 0xf0,0x0e,0xf0,0x50,0x0e,0x33,0x10,0xc4,0x1d,0xde,0x21,0x1c,0xd8,0x21,0x1d,0xc2, + 0x61,0x1e,0x66,0x30,0x89,0x3b,0xbc,0x83,0x3b,0xd0,0x43,0x39,0xb4,0x03,0x3c,0xbc, + 0x83,0x3c,0x84,0x03,0x3b,0xcc,0xf0,0x14,0x76,0x60,0x07,0x7b,0x68,0x07,0x37,0x68, + 0x87,0x72,0x68,0x07,0x37,0x80,0x87,0x70,0x90,0x87,0x70,0x60,0x07,0x76,0x28,0x07, + 0x76,0xf8,0x05,0x76,0x78,0x87,0x77,0x80,0x87,0x5f,0x08,0x87,0x71,0x18,0x87,0x72, + 0x98,0x87,0x79,0x98,0x81,0x2c,0xee,0xf0,0x0e,0xee,0xe0,0x0e,0xf5,0xc0,0x0e,0xec, + 0x30,0x03,0x62,0xc8,0xa1,0x1c,0xe4,0xa1,0x1c,0xcc,0xa1,0x1c,0xe4,0xa1,0x1c,0xdc, + 0x61,0x1c,0xca,0x21,0x1c,0xc4,0x81,0x1d,0xca,0x61,0x06,0xd6,0x90,0x43,0x39,0xc8, + 0x43,0x39,0x98,0x43,0x39,0xc8,0x43,0x39,0xb8,0xc3,0x38,0x94,0x43,0x38,0x88,0x03, + 0x3b,0x94,0xc3,0x2f,0xbc,0x83,0x3c,0xfc,0x82,0x3b,0xd4,0x03,0x3b,0xb0,0xc3,0x0c, + 0xc7,0x69,0x87,0x70,0x58,0x87,0x72,0x70,0x83,0x74,0x68,0x07,0x78,0x60,0x87,0x74, + 0x18,0x87,0x74,0xa0,0x87,0x19,0xce,0x53,0x0f,0xee,0x00,0x0f,0xf2,0x50,0x0e,0xe4, + 0x90,0x0e,0xe3,0x40,0x0f,0xe1,0x20,0x0e,0xec,0x50,0x0e,0x33,0x20,0x28,0x1d,0xdc, + 0xc1,0x1e,0xc2,0x41,0x1e,0xd2,0x21,0x1c,0xdc,0x81,0x1e,0xdc,0xe0,0x1c,0xe4,0xe1, + 0x1d,0xea,0x01,0x1e,0x66,0x18,0x51,0x38,0xb0,0x43,0x3a,0x9c,0x83,0x3b,0xcc,0x50, + 0x24,0x76,0x60,0x07,0x7b,0x68,0x07,0x37,0x60,0x87,0x77,0x78,0x07,0x78,0x98,0x51, + 0x4c,0xf4,0x90,0x0f,0xf0,0x50,0x0e,0x33,0x1e,0x6a,0x1e,0xca,0x61,0x1c,0xe8,0x21, + 0x1d,0xde,0xc1,0x1d,0x7e,0x01,0x1e,0xe4,0xa1,0x1c,0xcc,0x21,0x1d,0xf0,0x61,0x06, + 0x54,0x85,0x83,0x38,0xcc,0xc3,0x3b,0xb0,0x43,0x3d,0xd0,0x43,0x39,0xfc,0xc2,0x3c, + 0xe4,0x43,0x3b,0x88,0xc3,0x3b,0xb0,0xc3,0x8c,0xc5,0x0a,0x87,0x79,0x98,0x87,0x77, + 0x18,0x87,0x74,0x08,0x07,0x7a,0x28,0x07,0x72,0x00,0x00,0x00,0x00,0x71,0x20,0x00, + 0x00,0x08,0x00,0x00,0x00,0x16,0xb0,0x01,0x48,0xe4,0x4b,0x00,0xf3,0x2c,0xc4,0x3f, + 0x11,0xd7,0x44,0x45,0xc4,0x6f,0x0f,0x7e,0x85,0x17,0xb7,0x6d,0x00,0x05,0x03,0x20, + 0x0d,0x0d,0x00,0x00,0x00,0x61,0x20,0x00,0x00,0x0f,0x00,0x00,0x00,0x13,0x04,0x41, + 0x2c,0x10,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0xc4,0x46,0x00,0xc6,0x12,0x80,0x80, + 0xd4,0x08,0x40,0x0d,0x90,0x98,0x01,0xa0,0x30,0x03,0x40,0x60,0x04,0x00,0x00,0x00, + 0x00,0x83,0x0c,0x8b,0x60,0x8c,0x18,0x28,0x42,0x40,0x29,0x49,0x50,0x20,0x86,0x60, + 0x01,0x23,0x9f,0xd9,0x06,0x23,0x00,0x32,0x00,0x00,0x00,0x00,0x00, +}; +static const char _sgl_vs_source_metal_sim[698] = { + 0x23,0x69,0x6e,0x63,0x6c,0x75,0x64,0x65,0x20,0x3c,0x6d,0x65,0x74,0x61,0x6c,0x5f, + 0x73,0x74,0x64,0x6c,0x69,0x62,0x3e,0x0a,0x23,0x69,0x6e,0x63,0x6c,0x75,0x64,0x65, + 0x20,0x3c,0x73,0x69,0x6d,0x64,0x2f,0x73,0x69,0x6d,0x64,0x2e,0x68,0x3e,0x0a,0x0a, + 0x75,0x73,0x69,0x6e,0x67,0x20,0x6e,0x61,0x6d,0x65,0x73,0x70,0x61,0x63,0x65,0x20, + 0x6d,0x65,0x74,0x61,0x6c,0x3b,0x0a,0x0a,0x73,0x74,0x72,0x75,0x63,0x74,0x20,0x76, + 0x73,0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x66, + 0x6c,0x6f,0x61,0x74,0x34,0x78,0x34,0x20,0x6d,0x76,0x70,0x3b,0x0a,0x20,0x20,0x20, + 0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x78,0x34,0x20,0x74,0x6d,0x3b,0x0a,0x7d,0x3b, + 0x0a,0x0a,0x73,0x74,0x72,0x75,0x63,0x74,0x20,0x6d,0x61,0x69,0x6e,0x30,0x5f,0x6f, + 0x75,0x74,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20, + 0x75,0x76,0x20,0x5b,0x5b,0x75,0x73,0x65,0x72,0x28,0x6c,0x6f,0x63,0x6e,0x30,0x29, + 0x5d,0x5d,0x3b,0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x63, + 0x6f,0x6c,0x6f,0x72,0x20,0x5b,0x5b,0x75,0x73,0x65,0x72,0x28,0x6c,0x6f,0x63,0x6e, + 0x31,0x29,0x5d,0x5d,0x3b,0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34, + 0x20,0x67,0x6c,0x5f,0x50,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x20,0x5b,0x5b,0x70, + 0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x5d,0x5d,0x3b,0x0a,0x7d,0x3b,0x0a,0x0a,0x73, + 0x74,0x72,0x75,0x63,0x74,0x20,0x6d,0x61,0x69,0x6e,0x30,0x5f,0x69,0x6e,0x0a,0x7b, + 0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x70,0x6f,0x73,0x69, + 0x74,0x69,0x6f,0x6e,0x20,0x5b,0x5b,0x61,0x74,0x74,0x72,0x69,0x62,0x75,0x74,0x65, + 0x28,0x30,0x29,0x5d,0x5d,0x3b,0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74, + 0x32,0x20,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x20,0x5b,0x5b,0x61,0x74, + 0x74,0x72,0x69,0x62,0x75,0x74,0x65,0x28,0x31,0x29,0x5d,0x5d,0x3b,0x0a,0x20,0x20, + 0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x30,0x20, + 0x5b,0x5b,0x61,0x74,0x74,0x72,0x69,0x62,0x75,0x74,0x65,0x28,0x32,0x29,0x5d,0x5d, + 0x3b,0x0a,0x7d,0x3b,0x0a,0x0a,0x23,0x6c,0x69,0x6e,0x65,0x20,0x31,0x36,0x20,0x22, + 0x22,0x0a,0x76,0x65,0x72,0x74,0x65,0x78,0x20,0x6d,0x61,0x69,0x6e,0x30,0x5f,0x6f, + 0x75,0x74,0x20,0x6d,0x61,0x69,0x6e,0x30,0x28,0x6d,0x61,0x69,0x6e,0x30,0x5f,0x69, + 0x6e,0x20,0x69,0x6e,0x20,0x5b,0x5b,0x73,0x74,0x61,0x67,0x65,0x5f,0x69,0x6e,0x5d, + 0x5d,0x2c,0x20,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x76,0x73,0x5f,0x70, + 0x61,0x72,0x61,0x6d,0x73,0x26,0x20,0x5f,0x32,0x30,0x20,0x5b,0x5b,0x62,0x75,0x66, + 0x66,0x65,0x72,0x28,0x30,0x29,0x5d,0x5d,0x29,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20, + 0x6d,0x61,0x69,0x6e,0x30,0x5f,0x6f,0x75,0x74,0x20,0x6f,0x75,0x74,0x20,0x3d,0x20, + 0x7b,0x7d,0x3b,0x0a,0x23,0x6c,0x69,0x6e,0x65,0x20,0x31,0x36,0x20,0x22,0x22,0x0a, + 0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x67,0x6c,0x5f,0x50,0x6f,0x73,0x69,0x74, + 0x69,0x6f,0x6e,0x20,0x3d,0x20,0x5f,0x32,0x30,0x2e,0x6d,0x76,0x70,0x20,0x2a,0x20, + 0x69,0x6e,0x2e,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x3b,0x0a,0x23,0x6c,0x69, + 0x6e,0x65,0x20,0x31,0x37,0x20,0x22,0x22,0x0a,0x20,0x20,0x20,0x20,0x6f,0x75,0x74, + 0x2e,0x75,0x76,0x20,0x3d,0x20,0x5f,0x32,0x30,0x2e,0x74,0x6d,0x20,0x2a,0x20,0x66, + 0x6c,0x6f,0x61,0x74,0x34,0x28,0x69,0x6e,0x2e,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72, + 0x64,0x30,0x2c,0x20,0x30,0x2e,0x30,0x2c,0x20,0x31,0x2e,0x30,0x29,0x3b,0x0a,0x23, + 0x6c,0x69,0x6e,0x65,0x20,0x31,0x38,0x20,0x22,0x22,0x0a,0x20,0x20,0x20,0x20,0x6f, + 0x75,0x74,0x2e,0x63,0x6f,0x6c,0x6f,0x72,0x20,0x3d,0x20,0x69,0x6e,0x2e,0x63,0x6f, + 0x6c,0x6f,0x72,0x30,0x3b,0x0a,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e, + 0x20,0x6f,0x75,0x74,0x3b,0x0a,0x7d,0x0a,0x0a,0x00, +}; +static const char _sgl_fs_source_metal_sim[473] = { + 0x23,0x69,0x6e,0x63,0x6c,0x75,0x64,0x65,0x20,0x3c,0x6d,0x65,0x74,0x61,0x6c,0x5f, + 0x73,0x74,0x64,0x6c,0x69,0x62,0x3e,0x0a,0x23,0x69,0x6e,0x63,0x6c,0x75,0x64,0x65, + 0x20,0x3c,0x73,0x69,0x6d,0x64,0x2f,0x73,0x69,0x6d,0x64,0x2e,0x68,0x3e,0x0a,0x0a, + 0x75,0x73,0x69,0x6e,0x67,0x20,0x6e,0x61,0x6d,0x65,0x73,0x70,0x61,0x63,0x65,0x20, + 0x6d,0x65,0x74,0x61,0x6c,0x3b,0x0a,0x0a,0x73,0x74,0x72,0x75,0x63,0x74,0x20,0x6d, + 0x61,0x69,0x6e,0x30,0x5f,0x6f,0x75,0x74,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x66, + 0x6c,0x6f,0x61,0x74,0x34,0x20,0x66,0x72,0x61,0x67,0x5f,0x63,0x6f,0x6c,0x6f,0x72, + 0x20,0x5b,0x5b,0x63,0x6f,0x6c,0x6f,0x72,0x28,0x30,0x29,0x5d,0x5d,0x3b,0x0a,0x7d, + 0x3b,0x0a,0x0a,0x73,0x74,0x72,0x75,0x63,0x74,0x20,0x6d,0x61,0x69,0x6e,0x30,0x5f, + 0x69,0x6e,0x0a,0x7b,0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20, + 0x75,0x76,0x20,0x5b,0x5b,0x75,0x73,0x65,0x72,0x28,0x6c,0x6f,0x63,0x6e,0x30,0x29, + 0x5d,0x5d,0x3b,0x0a,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x63, + 0x6f,0x6c,0x6f,0x72,0x20,0x5b,0x5b,0x75,0x73,0x65,0x72,0x28,0x6c,0x6f,0x63,0x6e, + 0x31,0x29,0x5d,0x5d,0x3b,0x0a,0x7d,0x3b,0x0a,0x0a,0x23,0x6c,0x69,0x6e,0x65,0x20, + 0x31,0x31,0x20,0x22,0x22,0x0a,0x66,0x72,0x61,0x67,0x6d,0x65,0x6e,0x74,0x20,0x6d, + 0x61,0x69,0x6e,0x30,0x5f,0x6f,0x75,0x74,0x20,0x6d,0x61,0x69,0x6e,0x30,0x28,0x6d, + 0x61,0x69,0x6e,0x30,0x5f,0x69,0x6e,0x20,0x69,0x6e,0x20,0x5b,0x5b,0x73,0x74,0x61, + 0x67,0x65,0x5f,0x69,0x6e,0x5d,0x5d,0x2c,0x20,0x74,0x65,0x78,0x74,0x75,0x72,0x65, + 0x32,0x64,0x3c,0x66,0x6c,0x6f,0x61,0x74,0x3e,0x20,0x74,0x65,0x78,0x20,0x5b,0x5b, + 0x74,0x65,0x78,0x74,0x75,0x72,0x65,0x28,0x30,0x29,0x5d,0x5d,0x2c,0x20,0x73,0x61, + 0x6d,0x70,0x6c,0x65,0x72,0x20,0x74,0x65,0x78,0x53,0x6d,0x70,0x6c,0x72,0x20,0x5b, + 0x5b,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x28,0x30,0x29,0x5d,0x5d,0x29,0x0a,0x7b, + 0x0a,0x20,0x20,0x20,0x20,0x6d,0x61,0x69,0x6e,0x30,0x5f,0x6f,0x75,0x74,0x20,0x6f, + 0x75,0x74,0x20,0x3d,0x20,0x7b,0x7d,0x3b,0x0a,0x23,0x6c,0x69,0x6e,0x65,0x20,0x31, + 0x31,0x20,0x22,0x22,0x0a,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x66,0x72,0x61, + 0x67,0x5f,0x63,0x6f,0x6c,0x6f,0x72,0x20,0x3d,0x20,0x74,0x65,0x78,0x2e,0x73,0x61, + 0x6d,0x70,0x6c,0x65,0x28,0x74,0x65,0x78,0x53,0x6d,0x70,0x6c,0x72,0x2c,0x20,0x69, + 0x6e,0x2e,0x75,0x76,0x2e,0x78,0x79,0x29,0x20,0x2a,0x20,0x69,0x6e,0x2e,0x63,0x6f, + 0x6c,0x6f,0x72,0x3b,0x0a,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x20, + 0x6f,0x75,0x74,0x3b,0x0a,0x7d,0x0a,0x0a,0x00, +}; +#elif defined(SOKOL_D3D11) +static const uint8_t _sgl_vs_bytecode_hlsl4[1008] = { + 0x44,0x58,0x42,0x43,0xf3,0xd9,0xf4,0x58,0x44,0xc2,0xb9,0x96,0x56,0x7c,0xb3,0x71, + 0x84,0xc5,0xde,0x61,0x01,0x00,0x00,0x00,0xf0,0x03,0x00,0x00,0x05,0x00,0x00,0x00, + 0x34,0x00,0x00,0x00,0x14,0x01,0x00,0x00,0x78,0x01,0x00,0x00,0xe8,0x01,0x00,0x00, + 0x74,0x03,0x00,0x00,0x52,0x44,0x45,0x46,0xd8,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x48,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x1c,0x00,0x00,0x00,0x00,0x04,0xfe,0xff, + 0x10,0x81,0x00,0x00,0xaf,0x00,0x00,0x00,0x3c,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d, + 0x73,0x00,0xab,0xab,0x3c,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x60,0x00,0x00,0x00, + 0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x90,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x98,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0xa8,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x40,0x00,0x00,0x00, + 0x02,0x00,0x00,0x00,0x98,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x5f,0x32,0x30,0x5f, + 0x6d,0x76,0x70,0x00,0x02,0x00,0x03,0x00,0x04,0x00,0x04,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x5f,0x32,0x30,0x5f,0x74,0x6d,0x00,0x4d,0x69,0x63,0x72,0x6f, + 0x73,0x6f,0x66,0x74,0x20,0x28,0x52,0x29,0x20,0x48,0x4c,0x53,0x4c,0x20,0x53,0x68, + 0x61,0x64,0x65,0x72,0x20,0x43,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x72,0x20,0x31,0x30, + 0x2e,0x31,0x00,0xab,0x49,0x53,0x47,0x4e,0x5c,0x00,0x00,0x00,0x03,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0f,0x0f,0x00,0x00,0x50,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x03,0x03,0x00,0x00,0x50,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x03,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x0f,0x0f,0x00,0x00,0x54,0x45,0x58,0x43, + 0x4f,0x4f,0x52,0x44,0x00,0xab,0xab,0xab,0x4f,0x53,0x47,0x4e,0x68,0x00,0x00,0x00, + 0x03,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0f,0x00,0x00,0x00, + 0x50,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x59,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x0f,0x00,0x00,0x00, + 0x54,0x45,0x58,0x43,0x4f,0x4f,0x52,0x44,0x00,0x53,0x56,0x5f,0x50,0x6f,0x73,0x69, + 0x74,0x69,0x6f,0x6e,0x00,0xab,0xab,0xab,0x53,0x48,0x44,0x52,0x84,0x01,0x00,0x00, + 0x40,0x00,0x01,0x00,0x61,0x00,0x00,0x00,0x59,0x00,0x00,0x04,0x46,0x8e,0x20,0x00, + 0x00,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x5f,0x00,0x00,0x03,0xf2,0x10,0x10,0x00, + 0x00,0x00,0x00,0x00,0x5f,0x00,0x00,0x03,0x32,0x10,0x10,0x00,0x01,0x00,0x00,0x00, + 0x5f,0x00,0x00,0x03,0xf2,0x10,0x10,0x00,0x02,0x00,0x00,0x00,0x65,0x00,0x00,0x03, + 0xf2,0x20,0x10,0x00,0x00,0x00,0x00,0x00,0x65,0x00,0x00,0x03,0xf2,0x20,0x10,0x00, + 0x01,0x00,0x00,0x00,0x67,0x00,0x00,0x04,0xf2,0x20,0x10,0x00,0x02,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x68,0x00,0x00,0x02,0x01,0x00,0x00,0x00,0x38,0x00,0x00,0x08, + 0xf2,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x56,0x15,0x10,0x00,0x01,0x00,0x00,0x00, + 0x46,0x8e,0x20,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x32,0x00,0x00,0x0a, + 0xf2,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x06,0x10,0x10,0x00,0x01,0x00,0x00,0x00, + 0x46,0x8e,0x20,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x46,0x0e,0x10,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0xf2,0x20,0x10,0x00,0x00,0x00,0x00,0x00, + 0x46,0x0e,0x10,0x00,0x00,0x00,0x00,0x00,0x46,0x8e,0x20,0x00,0x00,0x00,0x00,0x00, + 0x07,0x00,0x00,0x00,0x36,0x00,0x00,0x05,0xf2,0x20,0x10,0x00,0x01,0x00,0x00,0x00, + 0x46,0x1e,0x10,0x00,0x02,0x00,0x00,0x00,0x38,0x00,0x00,0x08,0xf2,0x00,0x10,0x00, + 0x00,0x00,0x00,0x00,0x56,0x15,0x10,0x00,0x00,0x00,0x00,0x00,0x46,0x8e,0x20,0x00, + 0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x32,0x00,0x00,0x0a,0xf2,0x00,0x10,0x00, + 0x00,0x00,0x00,0x00,0x06,0x10,0x10,0x00,0x00,0x00,0x00,0x00,0x46,0x8e,0x20,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x46,0x0e,0x10,0x00,0x00,0x00,0x00,0x00, + 0x32,0x00,0x00,0x0a,0xf2,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0xa6,0x1a,0x10,0x00, + 0x00,0x00,0x00,0x00,0x46,0x8e,0x20,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00, + 0x46,0x0e,0x10,0x00,0x00,0x00,0x00,0x00,0x32,0x00,0x00,0x0a,0xf2,0x20,0x10,0x00, + 0x02,0x00,0x00,0x00,0xf6,0x1f,0x10,0x00,0x00,0x00,0x00,0x00,0x46,0x8e,0x20,0x00, + 0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x46,0x0e,0x10,0x00,0x00,0x00,0x00,0x00, + 0x3e,0x00,0x00,0x01,0x53,0x54,0x41,0x54,0x74,0x00,0x00,0x00,0x09,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x07,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + +}; +static const uint8_t _sgl_fs_bytecode_hlsl4[620] = { + 0x44,0x58,0x42,0x43,0x4f,0x7a,0xe1,0xcf,0x0c,0x89,0xc0,0x09,0x50,0x5a,0xca,0xe9, + 0x75,0x77,0xd1,0x26,0x01,0x00,0x00,0x00,0x6c,0x02,0x00,0x00,0x05,0x00,0x00,0x00, + 0x34,0x00,0x00,0x00,0xd4,0x00,0x00,0x00,0x20,0x01,0x00,0x00,0x54,0x01,0x00,0x00, + 0xf0,0x01,0x00,0x00,0x52,0x44,0x45,0x46,0x98,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x1c,0x00,0x00,0x00,0x00,0x04,0xff,0xff, + 0x10,0x81,0x00,0x00,0x6d,0x00,0x00,0x00,0x5c,0x00,0x00,0x00,0x03,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x69,0x00,0x00,0x00,0x02,0x00,0x00,0x00, + 0x05,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x0d,0x00,0x00,0x00,0x5f,0x74,0x65,0x78,0x5f,0x73,0x61,0x6d, + 0x70,0x6c,0x65,0x72,0x00,0x74,0x65,0x78,0x00,0x4d,0x69,0x63,0x72,0x6f,0x73,0x6f, + 0x66,0x74,0x20,0x28,0x52,0x29,0x20,0x48,0x4c,0x53,0x4c,0x20,0x53,0x68,0x61,0x64, + 0x65,0x72,0x20,0x43,0x6f,0x6d,0x70,0x69,0x6c,0x65,0x72,0x20,0x31,0x30,0x2e,0x31, + 0x00,0xab,0xab,0xab,0x49,0x53,0x47,0x4e,0x44,0x00,0x00,0x00,0x02,0x00,0x00,0x00, + 0x08,0x00,0x00,0x00,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0f,0x03,0x00,0x00,0x38,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x0f,0x0f,0x00,0x00,0x54,0x45,0x58,0x43,0x4f,0x4f,0x52,0x44,0x00,0xab,0xab,0xab, + 0x4f,0x53,0x47,0x4e,0x2c,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x08,0x00,0x00,0x00, + 0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x53,0x56,0x5f,0x54,0x61,0x72,0x67,0x65, + 0x74,0x00,0xab,0xab,0x53,0x48,0x44,0x52,0x94,0x00,0x00,0x00,0x40,0x00,0x00,0x00, + 0x25,0x00,0x00,0x00,0x5a,0x00,0x00,0x03,0x00,0x60,0x10,0x00,0x00,0x00,0x00,0x00, + 0x58,0x18,0x00,0x04,0x00,0x70,0x10,0x00,0x00,0x00,0x00,0x00,0x55,0x55,0x00,0x00, + 0x62,0x10,0x00,0x03,0x32,0x10,0x10,0x00,0x00,0x00,0x00,0x00,0x62,0x10,0x00,0x03, + 0xf2,0x10,0x10,0x00,0x01,0x00,0x00,0x00,0x65,0x00,0x00,0x03,0xf2,0x20,0x10,0x00, + 0x00,0x00,0x00,0x00,0x68,0x00,0x00,0x02,0x01,0x00,0x00,0x00,0x45,0x00,0x00,0x09, + 0xf2,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x46,0x10,0x10,0x00,0x00,0x00,0x00,0x00, + 0x46,0x7e,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x60,0x10,0x00,0x00,0x00,0x00,0x00, + 0x38,0x00,0x00,0x07,0xf2,0x20,0x10,0x00,0x00,0x00,0x00,0x00,0x46,0x0e,0x10,0x00, + 0x00,0x00,0x00,0x00,0x46,0x1e,0x10,0x00,0x01,0x00,0x00,0x00,0x3e,0x00,0x00,0x01, + 0x53,0x54,0x41,0x54,0x74,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +}; +#elif defined(SOKOL_WGPU) +static const uint8_t _sgl_vs_bytecode_wgpu[1932] = { + 0x03,0x02,0x23,0x07,0x00,0x00,0x01,0x00,0x08,0x00,0x08,0x00,0x32,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x11,0x00,0x02,0x00,0x01,0x00,0x00,0x00,0x0b,0x00,0x06,0x00, + 0x02,0x00,0x00,0x00,0x47,0x4c,0x53,0x4c,0x2e,0x73,0x74,0x64,0x2e,0x34,0x35,0x30, + 0x00,0x00,0x00,0x00,0x0e,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x0f,0x00,0x0d,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x6d,0x61,0x69,0x6e, + 0x00,0x00,0x00,0x00,0x0e,0x00,0x00,0x00,0x19,0x00,0x00,0x00,0x1e,0x00,0x00,0x00, + 0x24,0x00,0x00,0x00,0x2c,0x00,0x00,0x00,0x2d,0x00,0x00,0x00,0x30,0x00,0x00,0x00, + 0x31,0x00,0x00,0x00,0x07,0x00,0x03,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x03,0x00,0x37,0x00,0x02,0x00,0x00,0x00,0xc2,0x01,0x00,0x00,0x01,0x00,0x00,0x00, + 0x2f,0x2f,0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65,0x50,0x72,0x6f,0x63,0x65, + 0x73,0x73,0x65,0x64,0x20,0x63,0x6c,0x69,0x65,0x6e,0x74,0x20,0x76,0x75,0x6c,0x6b, + 0x61,0x6e,0x31,0x30,0x30,0x0a,0x2f,0x2f,0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c, + 0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73,0x65,0x64,0x20,0x63,0x6c,0x69,0x65,0x6e, + 0x74,0x20,0x6f,0x70,0x65,0x6e,0x67,0x6c,0x31,0x30,0x30,0x0a,0x2f,0x2f,0x20,0x4f, + 0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73,0x65,0x64, + 0x20,0x74,0x61,0x72,0x67,0x65,0x74,0x2d,0x65,0x6e,0x76,0x20,0x76,0x75,0x6c,0x6b, + 0x61,0x6e,0x31,0x2e,0x30,0x0a,0x2f,0x2f,0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c, + 0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73,0x65,0x64,0x20,0x74,0x61,0x72,0x67,0x65, + 0x74,0x2d,0x65,0x6e,0x76,0x20,0x6f,0x70,0x65,0x6e,0x67,0x6c,0x0a,0x2f,0x2f,0x20, + 0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73,0x65, + 0x64,0x20,0x65,0x6e,0x74,0x72,0x79,0x2d,0x70,0x6f,0x69,0x6e,0x74,0x20,0x6d,0x61, + 0x69,0x6e,0x0a,0x23,0x6c,0x69,0x6e,0x65,0x20,0x31,0x0a,0x00,0x05,0x00,0x04,0x00, + 0x05,0x00,0x00,0x00,0x6d,0x61,0x69,0x6e,0x00,0x00,0x00,0x00,0x05,0x00,0x06,0x00, + 0x0c,0x00,0x00,0x00,0x67,0x6c,0x5f,0x50,0x65,0x72,0x56,0x65,0x72,0x74,0x65,0x78, + 0x00,0x00,0x00,0x00,0x06,0x00,0x06,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x67,0x6c,0x5f,0x50,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x00,0x06,0x00,0x07,0x00, + 0x0c,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x67,0x6c,0x5f,0x50,0x6f,0x69,0x6e,0x74, + 0x53,0x69,0x7a,0x65,0x00,0x00,0x00,0x00,0x06,0x00,0x07,0x00,0x0c,0x00,0x00,0x00, + 0x02,0x00,0x00,0x00,0x67,0x6c,0x5f,0x43,0x6c,0x69,0x70,0x44,0x69,0x73,0x74,0x61, + 0x6e,0x63,0x65,0x00,0x06,0x00,0x07,0x00,0x0c,0x00,0x00,0x00,0x03,0x00,0x00,0x00, + 0x67,0x6c,0x5f,0x43,0x75,0x6c,0x6c,0x44,0x69,0x73,0x74,0x61,0x6e,0x63,0x65,0x00, + 0x05,0x00,0x03,0x00,0x0e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x05,0x00, + 0x12,0x00,0x00,0x00,0x76,0x73,0x5f,0x70,0x61,0x72,0x61,0x6d,0x73,0x00,0x00,0x00, + 0x06,0x00,0x04,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x6d,0x76,0x70,0x00, + 0x06,0x00,0x04,0x00,0x12,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x74,0x6d,0x00,0x00, + 0x05,0x00,0x03,0x00,0x14,0x00,0x00,0x00,0x5f,0x32,0x30,0x00,0x05,0x00,0x05,0x00, + 0x19,0x00,0x00,0x00,0x70,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x00,0x00,0x00,0x00, + 0x05,0x00,0x03,0x00,0x1e,0x00,0x00,0x00,0x75,0x76,0x00,0x00,0x05,0x00,0x05,0x00, + 0x24,0x00,0x00,0x00,0x74,0x65,0x78,0x63,0x6f,0x6f,0x72,0x64,0x30,0x00,0x00,0x00, + 0x05,0x00,0x04,0x00,0x2c,0x00,0x00,0x00,0x63,0x6f,0x6c,0x6f,0x72,0x00,0x00,0x00, + 0x05,0x00,0x04,0x00,0x2d,0x00,0x00,0x00,0x63,0x6f,0x6c,0x6f,0x72,0x30,0x00,0x00, + 0x05,0x00,0x05,0x00,0x30,0x00,0x00,0x00,0x67,0x6c,0x5f,0x56,0x65,0x72,0x74,0x65, + 0x78,0x49,0x44,0x00,0x05,0x00,0x06,0x00,0x31,0x00,0x00,0x00,0x67,0x6c,0x5f,0x49, + 0x6e,0x73,0x74,0x61,0x6e,0x63,0x65,0x49,0x44,0x00,0x00,0x00,0x48,0x00,0x05,0x00, + 0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x48,0x00,0x05,0x00,0x0c,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x0b,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x48,0x00,0x05,0x00,0x0c,0x00,0x00,0x00,0x02,0x00,0x00,0x00, + 0x0b,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x48,0x00,0x05,0x00,0x0c,0x00,0x00,0x00, + 0x03,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x47,0x00,0x03,0x00, + 0x0c,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x48,0x00,0x04,0x00,0x12,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x48,0x00,0x05,0x00,0x12,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x23,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x48,0x00,0x05,0x00, + 0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x10,0x00,0x00,0x00, + 0x48,0x00,0x04,0x00,0x12,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x05,0x00,0x00,0x00, + 0x48,0x00,0x05,0x00,0x12,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x23,0x00,0x00,0x00, + 0x40,0x00,0x00,0x00,0x48,0x00,0x05,0x00,0x12,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x07,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x47,0x00,0x03,0x00,0x12,0x00,0x00,0x00, + 0x02,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x14,0x00,0x00,0x00,0x22,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x14,0x00,0x00,0x00,0x21,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x19,0x00,0x00,0x00,0x1e,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x1e,0x00,0x00,0x00,0x1e,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x24,0x00,0x00,0x00,0x1e,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x2c,0x00,0x00,0x00,0x1e,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x2d,0x00,0x00,0x00,0x1e,0x00,0x00,0x00, + 0x02,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x30,0x00,0x00,0x00,0x0b,0x00,0x00,0x00, + 0x05,0x00,0x00,0x00,0x47,0x00,0x04,0x00,0x31,0x00,0x00,0x00,0x0b,0x00,0x00,0x00, + 0x06,0x00,0x00,0x00,0x13,0x00,0x02,0x00,0x03,0x00,0x00,0x00,0x21,0x00,0x03,0x00, + 0x04,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x16,0x00,0x03,0x00,0x07,0x00,0x00,0x00, + 0x20,0x00,0x00,0x00,0x17,0x00,0x04,0x00,0x08,0x00,0x00,0x00,0x07,0x00,0x00,0x00, + 0x04,0x00,0x00,0x00,0x15,0x00,0x04,0x00,0x09,0x00,0x00,0x00,0x20,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x2b,0x00,0x04,0x00,0x09,0x00,0x00,0x00,0x0a,0x00,0x00,0x00, + 0x01,0x00,0x00,0x00,0x1c,0x00,0x04,0x00,0x0b,0x00,0x00,0x00,0x07,0x00,0x00,0x00, + 0x0a,0x00,0x00,0x00,0x1e,0x00,0x06,0x00,0x0c,0x00,0x00,0x00,0x08,0x00,0x00,0x00, + 0x07,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x0d,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x0c,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x0d,0x00,0x00,0x00,0x0e,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x15,0x00,0x04,0x00, + 0x0f,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x2b,0x00,0x04,0x00, + 0x0f,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x00,0x04,0x00, + 0x11,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x1e,0x00,0x04,0x00, + 0x12,0x00,0x00,0x00,0x11,0x00,0x00,0x00,0x11,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x13,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x13,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x15,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x11,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x18,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x18,0x00,0x00,0x00,0x19,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x1c,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x1c,0x00,0x00,0x00,0x1e,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x2b,0x00,0x04,0x00, + 0x0f,0x00,0x00,0x00,0x1f,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x17,0x00,0x04,0x00, + 0x22,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x23,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x22,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x23,0x00,0x00,0x00,0x24,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x2b,0x00,0x04,0x00, + 0x07,0x00,0x00,0x00,0x26,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x2b,0x00,0x04,0x00, + 0x07,0x00,0x00,0x00,0x27,0x00,0x00,0x00,0x00,0x00,0x80,0x3f,0x3b,0x00,0x04,0x00, + 0x1c,0x00,0x00,0x00,0x2c,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x18,0x00,0x00,0x00,0x2d,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x20,0x00,0x04,0x00, + 0x2f,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x2f,0x00,0x00,0x00,0x30,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x3b,0x00,0x04,0x00, + 0x2f,0x00,0x00,0x00,0x31,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x36,0x00,0x05,0x00, + 0x03,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00, + 0xf8,0x00,0x02,0x00,0x06,0x00,0x00,0x00,0x08,0x00,0x04,0x00,0x01,0x00,0x00,0x00, + 0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x41,0x00,0x05,0x00,0x15,0x00,0x00,0x00, + 0x16,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x3d,0x00,0x04,0x00, + 0x11,0x00,0x00,0x00,0x17,0x00,0x00,0x00,0x16,0x00,0x00,0x00,0x3d,0x00,0x04,0x00, + 0x08,0x00,0x00,0x00,0x1a,0x00,0x00,0x00,0x19,0x00,0x00,0x00,0x91,0x00,0x05,0x00, + 0x08,0x00,0x00,0x00,0x1b,0x00,0x00,0x00,0x17,0x00,0x00,0x00,0x1a,0x00,0x00,0x00, + 0x41,0x00,0x05,0x00,0x1c,0x00,0x00,0x00,0x1d,0x00,0x00,0x00,0x0e,0x00,0x00,0x00, + 0x10,0x00,0x00,0x00,0x3e,0x00,0x03,0x00,0x1d,0x00,0x00,0x00,0x1b,0x00,0x00,0x00, + 0x08,0x00,0x04,0x00,0x01,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x41,0x00,0x05,0x00,0x15,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x14,0x00,0x00,0x00, + 0x1f,0x00,0x00,0x00,0x3d,0x00,0x04,0x00,0x11,0x00,0x00,0x00,0x21,0x00,0x00,0x00, + 0x20,0x00,0x00,0x00,0x3d,0x00,0x04,0x00,0x22,0x00,0x00,0x00,0x25,0x00,0x00,0x00, + 0x24,0x00,0x00,0x00,0x51,0x00,0x05,0x00,0x07,0x00,0x00,0x00,0x28,0x00,0x00,0x00, + 0x25,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x51,0x00,0x05,0x00,0x07,0x00,0x00,0x00, + 0x29,0x00,0x00,0x00,0x25,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x50,0x00,0x07,0x00, + 0x08,0x00,0x00,0x00,0x2a,0x00,0x00,0x00,0x28,0x00,0x00,0x00,0x29,0x00,0x00,0x00, + 0x26,0x00,0x00,0x00,0x27,0x00,0x00,0x00,0x91,0x00,0x05,0x00,0x08,0x00,0x00,0x00, + 0x2b,0x00,0x00,0x00,0x21,0x00,0x00,0x00,0x2a,0x00,0x00,0x00,0x3e,0x00,0x03,0x00, + 0x1e,0x00,0x00,0x00,0x2b,0x00,0x00,0x00,0x08,0x00,0x04,0x00,0x01,0x00,0x00,0x00, + 0x13,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3d,0x00,0x04,0x00,0x08,0x00,0x00,0x00, + 0x2e,0x00,0x00,0x00,0x2d,0x00,0x00,0x00,0x3e,0x00,0x03,0x00,0x2c,0x00,0x00,0x00, + 0x2e,0x00,0x00,0x00,0xfd,0x00,0x01,0x00,0x38,0x00,0x01,0x00, +}; +static const uint8_t _sgl_fs_bytecode_wgpu[916] = { + 0x03,0x02,0x23,0x07,0x00,0x00,0x01,0x00,0x08,0x00,0x08,0x00,0x19,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x11,0x00,0x02,0x00,0x01,0x00,0x00,0x00,0x0b,0x00,0x06,0x00, + 0x02,0x00,0x00,0x00,0x47,0x4c,0x53,0x4c,0x2e,0x73,0x74,0x64,0x2e,0x34,0x35,0x30, + 0x00,0x00,0x00,0x00,0x0e,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x0f,0x00,0x08,0x00,0x04,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x6d,0x61,0x69,0x6e, + 0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x11,0x00,0x00,0x00,0x16,0x00,0x00,0x00, + 0x10,0x00,0x03,0x00,0x05,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x07,0x00,0x03,0x00, + 0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x37,0x00,0x02,0x00,0x00,0x00, + 0xc2,0x01,0x00,0x00,0x01,0x00,0x00,0x00,0x2f,0x2f,0x20,0x4f,0x70,0x4d,0x6f,0x64, + 0x75,0x6c,0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73,0x65,0x64,0x20,0x63,0x6c,0x69, + 0x65,0x6e,0x74,0x20,0x76,0x75,0x6c,0x6b,0x61,0x6e,0x31,0x30,0x30,0x0a,0x2f,0x2f, + 0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73, + 0x65,0x64,0x20,0x63,0x6c,0x69,0x65,0x6e,0x74,0x20,0x6f,0x70,0x65,0x6e,0x67,0x6c, + 0x31,0x30,0x30,0x0a,0x2f,0x2f,0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65,0x50, + 0x72,0x6f,0x63,0x65,0x73,0x73,0x65,0x64,0x20,0x74,0x61,0x72,0x67,0x65,0x74,0x2d, + 0x65,0x6e,0x76,0x20,0x76,0x75,0x6c,0x6b,0x61,0x6e,0x31,0x2e,0x30,0x0a,0x2f,0x2f, + 0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65,0x50,0x72,0x6f,0x63,0x65,0x73,0x73, + 0x65,0x64,0x20,0x74,0x61,0x72,0x67,0x65,0x74,0x2d,0x65,0x6e,0x76,0x20,0x6f,0x70, + 0x65,0x6e,0x67,0x6c,0x0a,0x2f,0x2f,0x20,0x4f,0x70,0x4d,0x6f,0x64,0x75,0x6c,0x65, + 0x50,0x72,0x6f,0x63,0x65,0x73,0x73,0x65,0x64,0x20,0x65,0x6e,0x74,0x72,0x79,0x2d, + 0x70,0x6f,0x69,0x6e,0x74,0x20,0x6d,0x61,0x69,0x6e,0x0a,0x23,0x6c,0x69,0x6e,0x65, + 0x20,0x31,0x0a,0x00,0x05,0x00,0x04,0x00,0x05,0x00,0x00,0x00,0x6d,0x61,0x69,0x6e, + 0x00,0x00,0x00,0x00,0x05,0x00,0x05,0x00,0x0a,0x00,0x00,0x00,0x66,0x72,0x61,0x67, + 0x5f,0x63,0x6f,0x6c,0x6f,0x72,0x00,0x00,0x05,0x00,0x03,0x00,0x0e,0x00,0x00,0x00, + 0x74,0x65,0x78,0x00,0x05,0x00,0x03,0x00,0x11,0x00,0x00,0x00,0x75,0x76,0x00,0x00, + 0x05,0x00,0x04,0x00,0x16,0x00,0x00,0x00,0x63,0x6f,0x6c,0x6f,0x72,0x00,0x00,0x00, + 0x47,0x00,0x04,0x00,0x0a,0x00,0x00,0x00,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x47,0x00,0x04,0x00,0x0e,0x00,0x00,0x00,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x47,0x00,0x04,0x00,0x0e,0x00,0x00,0x00,0x22,0x00,0x00,0x00,0x02,0x00,0x00,0x00, + 0x47,0x00,0x04,0x00,0x0e,0x00,0x00,0x00,0x21,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x47,0x00,0x04,0x00,0x11,0x00,0x00,0x00,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x47,0x00,0x04,0x00,0x16,0x00,0x00,0x00,0x1e,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x13,0x00,0x02,0x00,0x03,0x00,0x00,0x00,0x21,0x00,0x03,0x00,0x04,0x00,0x00,0x00, + 0x03,0x00,0x00,0x00,0x16,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x20,0x00,0x00,0x00, + 0x17,0x00,0x04,0x00,0x08,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x04,0x00,0x00,0x00, + 0x20,0x00,0x04,0x00,0x09,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x08,0x00,0x00,0x00, + 0x3b,0x00,0x04,0x00,0x09,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x03,0x00,0x00,0x00, + 0x19,0x00,0x09,0x00,0x0b,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x1b,0x00,0x03,0x00,0x0c,0x00,0x00,0x00,0x0b,0x00,0x00,0x00, + 0x20,0x00,0x04,0x00,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0c,0x00,0x00,0x00, + 0x3b,0x00,0x04,0x00,0x0d,0x00,0x00,0x00,0x0e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x20,0x00,0x04,0x00,0x10,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x08,0x00,0x00,0x00, + 0x3b,0x00,0x04,0x00,0x10,0x00,0x00,0x00,0x11,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x17,0x00,0x04,0x00,0x12,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x02,0x00,0x00,0x00, + 0x3b,0x00,0x04,0x00,0x10,0x00,0x00,0x00,0x16,0x00,0x00,0x00,0x01,0x00,0x00,0x00, + 0x36,0x00,0x05,0x00,0x03,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x04,0x00,0x00,0x00,0xf8,0x00,0x02,0x00,0x06,0x00,0x00,0x00,0x08,0x00,0x04,0x00, + 0x01,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3d,0x00,0x04,0x00, + 0x0c,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x0e,0x00,0x00,0x00,0x3d,0x00,0x04,0x00, + 0x08,0x00,0x00,0x00,0x13,0x00,0x00,0x00,0x11,0x00,0x00,0x00,0x4f,0x00,0x07,0x00, + 0x12,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x13,0x00,0x00,0x00,0x13,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x57,0x00,0x05,0x00,0x08,0x00,0x00,0x00, + 0x15,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x3d,0x00,0x04,0x00, + 0x08,0x00,0x00,0x00,0x17,0x00,0x00,0x00,0x16,0x00,0x00,0x00,0x85,0x00,0x05,0x00, + 0x08,0x00,0x00,0x00,0x18,0x00,0x00,0x00,0x15,0x00,0x00,0x00,0x17,0x00,0x00,0x00, + 0x3e,0x00,0x03,0x00,0x0a,0x00,0x00,0x00,0x18,0x00,0x00,0x00,0xfd,0x00,0x01,0x00, + 0x38,0x00,0x01,0x00, +}; +#elif defined(SOKOL_DUMMY_BACKEND) +static const char* _sgl_vs_source_dummy = ""; +static const char* _sgl_fs_source_dummy = ""; +#else +#error "Please define one of SOKOL_GLCORE33, SOKOL_GLES2, SOKOL_GLES3, SOKOL_D3D11, SOKOL_METAL, SOKOL_WGPU or SOKOL_DUMMY_BACKEND!" +#endif + +typedef enum { + SGL_PRIMITIVETYPE_POINTS = 0, + SGL_PRIMITIVETYPE_LINES, + SGL_PRIMITIVETYPE_LINE_STRIP, + SGL_PRIMITIVETYPE_TRIANGLES, + SGL_PRIMITIVETYPE_TRIANGLE_STRIP, + SGL_PRIMITIVETYPE_QUADS, + SGL_NUM_PRIMITIVE_TYPES, +} _sgl_primitive_type_t; + +typedef struct { + uint32_t id; + sg_resource_state state; +} _sgl_slot_t; + +typedef struct { + int size; + int queue_top; + uint32_t* gen_ctrs; + int* free_queue; +} _sgl_pool_t; + +typedef struct { + _sgl_slot_t slot; + sg_pipeline pip[SGL_NUM_PRIMITIVE_TYPES]; +} _sgl_pipeline_t; + +typedef struct { + _sgl_pool_t pool; + _sgl_pipeline_t* pips; +} _sgl_pipeline_pool_t; + +typedef enum { + SGL_MATRIXMODE_MODELVIEW, + SGL_MATRIXMODE_PROJECTION, + SGL_MATRIXMODE_TEXTURE, + SGL_NUM_MATRIXMODES +} _sgl_matrix_mode_t; + +typedef struct { + float pos[3]; + float uv[2]; + uint32_t rgba; +} _sgl_vertex_t; + +typedef struct { + float v[4][4]; +} _sgl_matrix_t; + +typedef struct { + _sgl_matrix_t mvp; /* model-view-projection matrix */ + _sgl_matrix_t tm; /* texture matrix */ +} _sgl_uniform_t; + +typedef enum { + SGL_COMMAND_DRAW, + SGL_COMMAND_VIEWPORT, + SGL_COMMAND_SCISSOR_RECT, +} _sgl_command_type_t; + +typedef struct { + sg_pipeline pip; + sg_image img; + int base_vertex; + int num_vertices; + int uniform_index; +} _sgl_draw_args_t; + +typedef struct { + int x, y, w, h; + bool origin_top_left; +} _sgl_viewport_args_t; + +typedef struct { + int x, y, w, h; + bool origin_top_left; +} _sgl_scissor_rect_args_t; + +typedef union { + _sgl_draw_args_t draw; + _sgl_viewport_args_t viewport; + _sgl_scissor_rect_args_t scissor_rect; +} _sgl_args_t; + +typedef struct { + _sgl_command_type_t cmd; + _sgl_args_t args; +} _sgl_command_t; + +#define _SGL_INVALID_SLOT_INDEX (0) +#define _SGL_MAX_STACK_DEPTH (64) +#define _SGL_DEFAULT_PIPELINE_POOL_SIZE (64) +#define _SGL_DEFAULT_MAX_VERTICES (1<<16) +#define _SGL_DEFAULT_MAX_COMMANDS (1<<14) +#define _SGL_SLOT_SHIFT (16) +#define _SGL_MAX_POOL_SIZE (1<<_SGL_SLOT_SHIFT) +#define _SGL_SLOT_MASK (_SGL_MAX_POOL_SIZE-1) + +typedef struct { + uint32_t init_cookie; + sgl_desc_t desc; + + int num_vertices; + int num_uniforms; + int num_commands; + int cur_vertex; + int cur_uniform; + int cur_command; + _sgl_vertex_t* vertices; + _sgl_uniform_t* uniforms; + _sgl_command_t* commands; + + /* state tracking */ + int base_vertex; + int vtx_count; /* number of times vtx function has been called, used for non-triangle primitives */ + sgl_error_t error; + bool in_begin; + float u, v; + uint32_t rgba; + _sgl_primitive_type_t cur_prim_type; + sg_image cur_img; + bool texturing_enabled; + bool matrix_dirty; /* reset in sgl_end(), set in any of the matrix stack functions */ + + /* sokol-gfx resources */ + sg_buffer vbuf; + sg_image def_img; /* a default white texture */ + sg_shader shd; + sg_bindings bind; + sgl_pipeline def_pip; + _sgl_pipeline_pool_t pip_pool; + + /* pipeline stack */ + int pip_tos; + sgl_pipeline pip_stack[_SGL_MAX_STACK_DEPTH]; + + /* matrix stacks */ + _sgl_matrix_mode_t cur_matrix_mode; + int matrix_tos[SGL_NUM_MATRIXMODES]; + _sgl_matrix_t matrix_stack[SGL_NUM_MATRIXMODES][_SGL_MAX_STACK_DEPTH]; +} _sgl_t; +static _sgl_t _sgl; + +/*== PRIVATE FUNCTIONS =======================================================*/ + +static void _sgl_init_pool(_sgl_pool_t* pool, int num) { + SOKOL_ASSERT(pool && (num >= 1)); + /* slot 0 is reserved for the 'invalid id', so bump the pool size by 1 */ + pool->size = num + 1; + pool->queue_top = 0; + /* generation counters indexable by pool slot index, slot 0 is reserved */ + size_t gen_ctrs_size = sizeof(uint32_t) * (size_t)pool->size; + pool->gen_ctrs = (uint32_t*) SOKOL_MALLOC(gen_ctrs_size); + SOKOL_ASSERT(pool->gen_ctrs); + memset(pool->gen_ctrs, 0, gen_ctrs_size); + /* it's not a bug to only reserve 'num' here */ + pool->free_queue = (int*) SOKOL_MALLOC(sizeof(int) * (size_t)num); + SOKOL_ASSERT(pool->free_queue); + /* never allocate the zero-th pool item since the invalid id is 0 */ + for (int i = pool->size-1; i >= 1; i--) { + pool->free_queue[pool->queue_top++] = i; + } +} + +static void _sgl_discard_pool(_sgl_pool_t* pool) { + SOKOL_ASSERT(pool); + SOKOL_ASSERT(pool->free_queue); + SOKOL_FREE(pool->free_queue); + pool->free_queue = 0; + SOKOL_ASSERT(pool->gen_ctrs); + SOKOL_FREE(pool->gen_ctrs); + pool->gen_ctrs = 0; + pool->size = 0; + pool->queue_top = 0; +} + +static int _sgl_pool_alloc_index(_sgl_pool_t* pool) { + SOKOL_ASSERT(pool); + SOKOL_ASSERT(pool->free_queue); + if (pool->queue_top > 0) { + int slot_index = pool->free_queue[--pool->queue_top]; + SOKOL_ASSERT((slot_index > 0) && (slot_index < pool->size)); + return slot_index; + } + else { + /* pool exhausted */ + return _SGL_INVALID_SLOT_INDEX; + } +} + +static void _sgl_pool_free_index(_sgl_pool_t* pool, int slot_index) { + SOKOL_ASSERT((slot_index > _SGL_INVALID_SLOT_INDEX) && (slot_index < pool->size)); + SOKOL_ASSERT(pool); + SOKOL_ASSERT(pool->free_queue); + SOKOL_ASSERT(pool->queue_top < pool->size); + #ifdef SOKOL_DEBUG + /* debug check against double-free */ + for (int i = 0; i < pool->queue_top; i++) { + SOKOL_ASSERT(pool->free_queue[i] != slot_index); + } + #endif + pool->free_queue[pool->queue_top++] = slot_index; + SOKOL_ASSERT(pool->queue_top <= (pool->size-1)); +} + +static void _sgl_reset_pipeline(_sgl_pipeline_t* pip) { + SOKOL_ASSERT(pip); + memset(pip, 0, sizeof(_sgl_pipeline_t)); +} + +static void _sgl_setup_pipeline_pool(const sgl_desc_t* desc) { + SOKOL_ASSERT(desc); + /* note: the pools here will have an additional item, since slot 0 is reserved */ + SOKOL_ASSERT((desc->pipeline_pool_size > 0) && (desc->pipeline_pool_size < _SGL_MAX_POOL_SIZE)); + _sgl_init_pool(&_sgl.pip_pool.pool, desc->pipeline_pool_size); + size_t pool_byte_size = sizeof(_sgl_pipeline_t) * (size_t)_sgl.pip_pool.pool.size; + _sgl.pip_pool.pips = (_sgl_pipeline_t*) SOKOL_MALLOC(pool_byte_size); + SOKOL_ASSERT(_sgl.pip_pool.pips); + memset(_sgl.pip_pool.pips, 0, pool_byte_size); +} + +static void _sgl_discard_pipeline_pool(void) { + SOKOL_FREE(_sgl.pip_pool.pips); _sgl.pip_pool.pips = 0; + _sgl_discard_pool(&_sgl.pip_pool.pool); +} + +/* allocate the slot at slot_index: + - bump the slot's generation counter + - create a resource id from the generation counter and slot index + - set the slot's id to this id + - set the slot's state to ALLOC + - return the resource id +*/ +static uint32_t _sgl_slot_alloc(_sgl_pool_t* pool, _sgl_slot_t* slot, int slot_index) { + /* FIXME: add handling for an overflowing generation counter, + for now, just overflow (another option is to disable + the slot) + */ + SOKOL_ASSERT(pool && pool->gen_ctrs); + SOKOL_ASSERT((slot_index > _SGL_INVALID_SLOT_INDEX) && (slot_index < pool->size)); + SOKOL_ASSERT((slot->state == SG_RESOURCESTATE_INITIAL) && (slot->id == SG_INVALID_ID)); + uint32_t ctr = ++pool->gen_ctrs[slot_index]; + slot->id = (ctr<<_SGL_SLOT_SHIFT)|(slot_index & _SGL_SLOT_MASK); + slot->state = SG_RESOURCESTATE_ALLOC; + return slot->id; +} + +/* extract slot index from id */ +static int _sgl_slot_index(uint32_t id) { + int slot_index = (int) (id & _SGL_SLOT_MASK); + SOKOL_ASSERT(_SGL_INVALID_SLOT_INDEX != slot_index); + return slot_index; +} + +/* get pipeline pointer without id-check */ +static _sgl_pipeline_t* _sgl_pipeline_at(uint32_t pip_id) { + SOKOL_ASSERT(SG_INVALID_ID != pip_id); + int slot_index = _sgl_slot_index(pip_id); + SOKOL_ASSERT((slot_index > _SGL_INVALID_SLOT_INDEX) && (slot_index < _sgl.pip_pool.pool.size)); + return &_sgl.pip_pool.pips[slot_index]; +} + +/* get pipeline pointer with id-check, returns 0 if no match */ +static _sgl_pipeline_t* _sgl_lookup_pipeline(uint32_t pip_id) { + if (SG_INVALID_ID != pip_id) { + _sgl_pipeline_t* pip = _sgl_pipeline_at(pip_id); + if (pip->slot.id == pip_id) { + return pip; + } + } + return 0; +} + +/* make pipeline id from uint32_t id */ +static sgl_pipeline _sgl_make_pip_id(uint32_t pip_id) { + sgl_pipeline pip; + pip.id = pip_id; + return pip; +} + +static sgl_pipeline _sgl_alloc_pipeline(void) { + sgl_pipeline res; + int slot_index = _sgl_pool_alloc_index(&_sgl.pip_pool.pool); + if (_SGL_INVALID_SLOT_INDEX != slot_index) { + res = _sgl_make_pip_id(_sgl_slot_alloc(&_sgl.pip_pool.pool, &_sgl.pip_pool.pips[slot_index].slot, slot_index)); + } + else { + /* pool is exhausted */ + res = _sgl_make_pip_id(SG_INVALID_ID); + } + return res; +} + +static void _sgl_init_pipeline(sgl_pipeline pip_id, const sg_pipeline_desc* in_desc) { + SOKOL_ASSERT((pip_id.id != SG_INVALID_ID) && in_desc); + + /* create a new desc with 'patched' shader and pixel format state */ + sg_pipeline_desc desc = *in_desc; + desc.layout.buffers[0].stride = sizeof(_sgl_vertex_t); + { + sg_vertex_attr_desc* pos = &desc.layout.attrs[0]; + pos->offset = offsetof(_sgl_vertex_t, pos); + pos->format = SG_VERTEXFORMAT_FLOAT3; + } + { + sg_vertex_attr_desc* uv = &desc.layout.attrs[1]; + uv->offset = offsetof(_sgl_vertex_t, uv); + uv->format = SG_VERTEXFORMAT_FLOAT2; + } + { + sg_vertex_attr_desc* rgba = &desc.layout.attrs[2]; + rgba->offset = offsetof(_sgl_vertex_t, rgba); + rgba->format = SG_VERTEXFORMAT_UBYTE4N; + } + if (in_desc->shader.id == SG_INVALID_ID) { + desc.shader = _sgl.shd; + } + desc.index_type = SG_INDEXTYPE_NONE; + desc.sample_count = _sgl.desc.sample_count; + if (desc.face_winding == _SG_FACEWINDING_DEFAULT) { + desc.face_winding = _sgl.desc.face_winding; + } + desc.depth.pixel_format = _sgl.desc.depth_format; + desc.colors[0].pixel_format = _sgl.desc.color_format; + if (desc.colors[0].write_mask == _SG_COLORMASK_DEFAULT) { + desc.colors[0].write_mask = SG_COLORMASK_RGB; + } + + _sgl_pipeline_t* pip = _sgl_lookup_pipeline(pip_id.id); + SOKOL_ASSERT(pip && (pip->slot.state == SG_RESOURCESTATE_ALLOC)); + pip->slot.state = SG_RESOURCESTATE_VALID; + for (int i = 0; i < SGL_NUM_PRIMITIVE_TYPES; i++) { + switch (i) { + case SGL_PRIMITIVETYPE_POINTS: + desc.primitive_type = SG_PRIMITIVETYPE_POINTS; + break; + case SGL_PRIMITIVETYPE_LINES: + desc.primitive_type = SG_PRIMITIVETYPE_LINES; + break; + case SGL_PRIMITIVETYPE_LINE_STRIP: + desc.primitive_type = SG_PRIMITIVETYPE_LINE_STRIP; + break; + case SGL_PRIMITIVETYPE_TRIANGLES: + desc.primitive_type = SG_PRIMITIVETYPE_TRIANGLES; + break; + case SGL_PRIMITIVETYPE_TRIANGLE_STRIP: + case SGL_PRIMITIVETYPE_QUADS: + desc.primitive_type = SG_PRIMITIVETYPE_TRIANGLE_STRIP; + break; + } + if (SGL_PRIMITIVETYPE_QUADS == i) { + /* quads are emulated via triangles, use the same pipeline object */ + pip->pip[i] = pip->pip[SGL_PRIMITIVETYPE_TRIANGLES]; + } + else { + pip->pip[i] = sg_make_pipeline(&desc); + if (pip->pip[i].id == SG_INVALID_ID) { + SOKOL_LOG("sokol_gl.h: failed to create pipeline object"); + pip->slot.state = SG_RESOURCESTATE_FAILED; + } + } + } +} + +static sgl_pipeline _sgl_make_pipeline(const sg_pipeline_desc* desc) { + SOKOL_ASSERT(desc); + sgl_pipeline pip_id = _sgl_alloc_pipeline(); + if (pip_id.id != SG_INVALID_ID) { + _sgl_init_pipeline(pip_id, desc); + } + else { + SOKOL_LOG("sokol_gl.h: pipeline pool exhausted!"); + } + return pip_id; +} + +static void _sgl_destroy_pipeline(sgl_pipeline pip_id) { + _sgl_pipeline_t* pip = _sgl_lookup_pipeline(pip_id.id); + if (pip) { + for (int i = 0; i < SGL_NUM_PRIMITIVE_TYPES; i++) { + if (i != SGL_PRIMITIVETYPE_QUADS) { + sg_destroy_pipeline(pip->pip[i]); + } + } + _sgl_reset_pipeline(pip); + _sgl_pool_free_index(&_sgl.pip_pool.pool, _sgl_slot_index(pip_id.id)); + } +} + +static sg_pipeline _sgl_get_pipeline(sgl_pipeline pip_id, _sgl_primitive_type_t prim_type) { + _sgl_pipeline_t* pip = _sgl_lookup_pipeline(pip_id.id); + if (pip) { + return pip->pip[prim_type]; + } + else { + sg_pipeline dummy_id = { SG_INVALID_ID }; + return dummy_id; + } +} + +static inline void _sgl_begin(_sgl_primitive_type_t mode) { + _sgl.in_begin = true; + _sgl.base_vertex = _sgl.cur_vertex; + _sgl.vtx_count = 0; + _sgl.cur_prim_type = mode; +} + +static void _sgl_rewind(void) { + _sgl.base_vertex = 0; + _sgl.cur_vertex = 0; + _sgl.cur_uniform = 0; + _sgl.cur_command = 0; + _sgl.error = SGL_NO_ERROR; + _sgl.matrix_dirty = true; +} + +static inline _sgl_vertex_t* _sgl_next_vertex(void) { + if (_sgl.cur_vertex < _sgl.num_vertices) { + return &_sgl.vertices[_sgl.cur_vertex++]; + } + else { + _sgl.error = SGL_ERROR_VERTICES_FULL; + return 0; + } +} + +static inline _sgl_uniform_t* _sgl_next_uniform(void) { + if (_sgl.cur_uniform < _sgl.num_uniforms) { + return &_sgl.uniforms[_sgl.cur_uniform++]; + } + else { + _sgl.error = SGL_ERROR_UNIFORMS_FULL; + return 0; + } +} + +static inline _sgl_command_t* _sgl_prev_command(void) { + if (_sgl.cur_command > 0) { + return &_sgl.commands[_sgl.cur_command - 1]; + } + else { + return 0; + } +} + +static inline _sgl_command_t* _sgl_next_command(void) { + if (_sgl.cur_command < _sgl.num_commands) { + return &_sgl.commands[_sgl.cur_command++]; + } + else { + _sgl.error = SGL_ERROR_COMMANDS_FULL; + return 0; + } +} + +static inline uint32_t _sgl_pack_rgbab(uint8_t r, uint8_t g, uint8_t b, uint8_t a) { + return (uint32_t)(((uint32_t)a<<24)|((uint32_t)b<<16)|((uint32_t)g<<8)|r); +} + +static inline float _sgl_clamp(float v, float lo, float hi) { + if (v < lo) return lo; + else if (v > hi) return hi; + else return v; +} + +static inline uint32_t _sgl_pack_rgbaf(float r, float g, float b, float a) { + uint8_t r_u8 = (uint8_t) (_sgl_clamp(r, 0.0f, 1.0f) * 255.0f); + uint8_t g_u8 = (uint8_t) (_sgl_clamp(g, 0.0f, 1.0f) * 255.0f); + uint8_t b_u8 = (uint8_t) (_sgl_clamp(b, 0.0f, 1.0f) * 255.0f); + uint8_t a_u8 = (uint8_t) (_sgl_clamp(a, 0.0f, 1.0f) * 255.0f); + return _sgl_pack_rgbab(r_u8, g_u8, b_u8, a_u8); +} + +static inline void _sgl_vtx(float x, float y, float z, float u, float v, uint32_t rgba) { + SOKOL_ASSERT(_sgl.in_begin); + _sgl_vertex_t* vtx; + /* handle non-native primitive types */ + if ((_sgl.cur_prim_type == SGL_PRIMITIVETYPE_QUADS) && ((_sgl.vtx_count & 3) == 3)) { + /* for quads, before writing the last quad vertex, reuse + the first and third vertex to start the second triangle in the quad + */ + vtx = _sgl_next_vertex(); + if (vtx) { *vtx = *(vtx - 3); } + vtx = _sgl_next_vertex(); + if (vtx) { *vtx = *(vtx - 2); } + } + vtx = _sgl_next_vertex(); + if (vtx) { + vtx->pos[0] = x; vtx->pos[1] = y; vtx->pos[2] = z; + vtx->uv[0] = u; vtx->uv[1] = v; + vtx->rgba = rgba; + } + _sgl.vtx_count++; +} + +static void _sgl_identity(_sgl_matrix_t* m) { + for (int c = 0; c < 4; c++) { + for (int r = 0; r < 4; r++) { + m->v[c][r] = (r == c) ? 1.0f : 0.0f; + } + } +} + +static void _sgl_transpose(_sgl_matrix_t* dst, const _sgl_matrix_t* m) { + SOKOL_ASSERT(dst != m); + for (int c = 0; c < 4; c++) { + for (int r = 0; r < 4; r++) { + dst->v[r][c] = m->v[c][r]; + } + } +} + +/* _sgl_rotate, _sgl_frustum, _sgl_ortho from MESA m_matric.c */ +static void _sgl_matmul4(_sgl_matrix_t* p, const _sgl_matrix_t* a, const _sgl_matrix_t* b) { + for (int r = 0; r < 4; r++) { + float ai0=a->v[0][r], ai1=a->v[1][r], ai2=a->v[2][r], ai3=a->v[3][r]; + p->v[0][r] = ai0*b->v[0][0] + ai1*b->v[0][1] + ai2*b->v[0][2] + ai3*b->v[0][3]; + p->v[1][r] = ai0*b->v[1][0] + ai1*b->v[1][1] + ai2*b->v[1][2] + ai3*b->v[1][3]; + p->v[2][r] = ai0*b->v[2][0] + ai1*b->v[2][1] + ai2*b->v[2][2] + ai3*b->v[2][3]; + p->v[3][r] = ai0*b->v[3][0] + ai1*b->v[3][1] + ai2*b->v[3][2] + ai3*b->v[3][3]; + } +} + +static void _sgl_mul(_sgl_matrix_t* dst, const _sgl_matrix_t* m) { + _sgl_matmul4(dst, dst, m); +} + +static void _sgl_rotate(_sgl_matrix_t* dst, float a, float x, float y, float z) { + + float s = sinf(a); + float c = cosf(a); + + float mag = sqrtf(x*x + y*y + z*z); + if (mag < 1.0e-4F) { + return; + } + x /= mag; + y /= mag; + z /= mag; + float xx = x * x; + float yy = y * y; + float zz = z * z; + float xy = x * y; + float yz = y * z; + float zx = z * x; + float xs = x * s; + float ys = y * s; + float zs = z * s; + float one_c = 1.0f - c; + + _sgl_matrix_t m; + m.v[0][0] = (one_c * xx) + c; + m.v[1][0] = (one_c * xy) - zs; + m.v[2][0] = (one_c * zx) + ys; + m.v[3][0] = 0.0f; + m.v[0][1] = (one_c * xy) + zs; + m.v[1][1] = (one_c * yy) + c; + m.v[2][1] = (one_c * yz) - xs; + m.v[3][1] = 0.0f; + m.v[0][2] = (one_c * zx) - ys; + m.v[1][2] = (one_c * yz) + xs; + m.v[2][2] = (one_c * zz) + c; + m.v[3][2] = 0.0f; + m.v[0][3] = 0.0f; + m.v[1][3] = 0.0f; + m.v[2][3] = 0.0f; + m.v[3][3] = 1.0f; + _sgl_mul(dst, &m); +} + +static void _sgl_scale(_sgl_matrix_t* dst, float x, float y, float z) { + for (int r = 0; r < 4; r++) { + dst->v[0][r] *= x; + dst->v[1][r] *= y; + dst->v[2][r] *= z; + } +} + +static void _sgl_translate(_sgl_matrix_t* dst, float x, float y, float z) { + for (int r = 0; r < 4; r++) { + dst->v[3][r] = dst->v[0][r]*x + dst->v[1][r]*y + dst->v[2][r]*z + dst->v[3][r]; + } +} + +static void _sgl_frustum(_sgl_matrix_t* dst, float left, float right, float bottom, float top, float znear, float zfar) { + float x = (2.0f * znear) / (right - left); + float y = (2.0f * znear) / (top - bottom); + float a = (right + left) / (right - left); + float b = (top + bottom) / (top - bottom); + float c = -(zfar + znear) / (zfar - znear); + float d = -(2.0f * zfar * znear) / (zfar - znear); + _sgl_matrix_t m; + m.v[0][0] = x; m.v[0][1] = 0.0f; m.v[0][2] = 0.0f; m.v[0][3] = 0.0f; + m.v[1][0] = 0.0f; m.v[1][1] = y; m.v[1][2] = 0.0f; m.v[1][3] = 0.0f; + m.v[2][0] = a; m.v[2][1] = b; m.v[2][2] = c; m.v[2][3] = -1.0f; + m.v[3][0] = 0.0f; m.v[3][1] = 0.0f; m.v[3][2] = d; m.v[3][3] = 0.0f; + _sgl_mul(dst, &m); +} + +static void _sgl_ortho(_sgl_matrix_t* dst, float left, float right, float bottom, float top, float znear, float zfar) { + _sgl_matrix_t m; + m.v[0][0] = 2.0f / (right - left); + m.v[1][0] = 0.0f; + m.v[2][0] = 0.0f; + m.v[3][0] = -(right + left) / (right - left); + m.v[0][1] = 0.0f; + m.v[1][1] = 2.0f / (top - bottom); + m.v[2][1] = 0.0f; + m.v[3][1] = -(top + bottom) / (top - bottom); + m.v[0][2] = 0.0f; + m.v[1][2] = 0.0f; + m.v[2][2] = -2.0f / (zfar - znear); + m.v[3][2] = -(zfar + znear) / (zfar - znear); + m.v[0][3] = 0.0f; + m.v[1][3] = 0.0f; + m.v[2][3] = 0.0f; + m.v[3][3] = 1.0f; + + _sgl_mul(dst, &m); +} + +/* _sgl_perspective, _sgl_lookat from Regal project.c */ +static void _sgl_perspective(_sgl_matrix_t* dst, float fovy, float aspect, float znear, float zfar) { + float sine = sinf(fovy / 2.0f); + float delta_z = zfar - znear; + if ((delta_z == 0.0f) || (sine == 0.0f) || (aspect == 0.0f)) { + return; + } + float cotan = cosf(fovy / 2.0f) / sine; + _sgl_matrix_t m; + _sgl_identity(&m); + m.v[0][0] = cotan / aspect; + m.v[1][1] = cotan; + m.v[2][2] = -(zfar + znear) / delta_z; + m.v[2][3] = -1.0f; + m.v[3][2] = -2.0f * znear * zfar / delta_z; + m.v[3][3] = 0.0f; + _sgl_mul(dst, &m); +} + +static void _sgl_normalize(float v[3]) { + float r = sqrtf(v[0]*v[0] + v[1]*v[1] + v[2]*v[2]); + if (r == 0.0f) { + return; + } + v[0] /= r; + v[1] /= r; + v[2] /= r; +} + +static void _sgl_cross(float v1[3], float v2[3], float res[3]) { + res[0] = v1[1]*v2[2] - v1[2]*v2[1]; + res[1] = v1[2]*v2[0] - v1[0]*v2[2]; + res[2] = v1[0]*v2[1] - v1[1]*v2[0]; +} + +static void _sgl_lookat(_sgl_matrix_t* dst, + float eye_x, float eye_y, float eye_z, + float center_x, float center_y, float center_z, + float up_x, float up_y, float up_z) +{ + float fwd[3], side[3], up[3]; + + fwd[0] = center_x - eye_x; fwd[1] = center_y - eye_y; fwd[2] = center_z - eye_z; + up[0] = up_x; up[1] = up_y; up[2] = up_z; + _sgl_normalize(fwd); + _sgl_cross(fwd, up, side); + _sgl_normalize(side); + _sgl_cross(side, fwd, up); + + _sgl_matrix_t m; + _sgl_identity(&m); + m.v[0][0] = side[0]; + m.v[1][0] = side[1]; + m.v[2][0] = side[2]; + m.v[0][1] = up[0]; + m.v[1][1] = up[1]; + m.v[2][1] = up[2]; + m.v[0][2] = -fwd[0]; + m.v[1][2] = -fwd[1]; + m.v[2][2] = -fwd[2]; + _sgl_mul(dst, &m); + _sgl_translate(dst, -eye_x, -eye_y, -eye_z); +} + +/* current top-of-stack projection matrix */ +static inline _sgl_matrix_t* _sgl_matrix_projection(void) { + return &_sgl.matrix_stack[SGL_MATRIXMODE_PROJECTION][_sgl.matrix_tos[SGL_MATRIXMODE_PROJECTION]]; +} + +/* get top-of-stack modelview matrix */ +static inline _sgl_matrix_t* _sgl_matrix_modelview(void) { + return &_sgl.matrix_stack[SGL_MATRIXMODE_MODELVIEW][_sgl.matrix_tos[SGL_MATRIXMODE_MODELVIEW]]; +} + +/* get top-of-stack texture matrix */ +static inline _sgl_matrix_t* _sgl_matrix_texture(void) { + return &_sgl.matrix_stack[SGL_MATRIXMODE_TEXTURE][_sgl.matrix_tos[SGL_MATRIXMODE_TEXTURE]]; +} + +/* get pointer to current top-of-stack of current matrix mode */ +static inline _sgl_matrix_t* _sgl_matrix(void) { + return &_sgl.matrix_stack[_sgl.cur_matrix_mode][_sgl.matrix_tos[_sgl.cur_matrix_mode]]; +} + +/*== PUBLIC FUNCTIONS ========================================================*/ +SOKOL_API_IMPL void sgl_setup(const sgl_desc_t* desc) { + SOKOL_ASSERT(desc); + memset(&_sgl, 0, sizeof(_sgl)); + _sgl.init_cookie = _SGL_INIT_COOKIE; + _sgl.desc = *desc; + _sgl.desc.pipeline_pool_size = _sgl_def(_sgl.desc.pipeline_pool_size, _SGL_DEFAULT_PIPELINE_POOL_SIZE); + _sgl.desc.max_vertices = _sgl_def(_sgl.desc.max_vertices, _SGL_DEFAULT_MAX_VERTICES); + _sgl.desc.max_commands = _sgl_def(_sgl.desc.max_commands, _SGL_DEFAULT_MAX_COMMANDS); + _sgl.desc.face_winding = _sgl_def(_sgl.desc.face_winding, SG_FACEWINDING_CCW); + + /* allocate buffers and pools */ + _sgl.num_vertices = _sgl.desc.max_vertices; + _sgl.num_uniforms = _sgl.desc.max_commands; + _sgl.num_commands = _sgl.num_uniforms; + _sgl.vertices = (_sgl_vertex_t*) SOKOL_MALLOC((size_t)_sgl.num_vertices * sizeof(_sgl_vertex_t)); + SOKOL_ASSERT(_sgl.vertices); + _sgl.uniforms = (_sgl_uniform_t*) SOKOL_MALLOC((size_t)_sgl.num_uniforms * sizeof(_sgl_uniform_t)); + SOKOL_ASSERT(_sgl.uniforms); + _sgl.commands = (_sgl_command_t*) SOKOL_MALLOC((size_t)_sgl.num_commands * sizeof(_sgl_command_t)); + SOKOL_ASSERT(_sgl.commands); + _sgl_setup_pipeline_pool(&_sgl.desc); + + /* create sokol-gfx resource objects */ + sg_push_debug_group("sokol-gl"); + + sg_buffer_desc vbuf_desc; + memset(&vbuf_desc, 0, sizeof(vbuf_desc)); + vbuf_desc.size = (size_t)_sgl.num_vertices * sizeof(_sgl_vertex_t); + vbuf_desc.type = SG_BUFFERTYPE_VERTEXBUFFER; + vbuf_desc.usage = SG_USAGE_STREAM; + vbuf_desc.label = "sgl-vertex-buffer"; + _sgl.vbuf = sg_make_buffer(&vbuf_desc); + SOKOL_ASSERT(SG_INVALID_ID != _sgl.vbuf.id); + + uint32_t pixels[64]; + for (int i = 0; i < 64; i++) { + pixels[i] = 0xFFFFFFFF; + } + sg_image_desc img_desc; + memset(&img_desc, 0, sizeof(img_desc)); + img_desc.type = SG_IMAGETYPE_2D; + img_desc.width = 8; + img_desc.height = 8; + img_desc.num_mipmaps = 1; + img_desc.pixel_format = SG_PIXELFORMAT_RGBA8; + img_desc.min_filter = SG_FILTER_NEAREST; + img_desc.mag_filter = SG_FILTER_NEAREST; + img_desc.data.subimage[0][0] = SG_RANGE(pixels); + img_desc.label = "sgl-default-texture"; + _sgl.def_img = sg_make_image(&img_desc); + SOKOL_ASSERT(SG_INVALID_ID != _sgl.def_img.id); + _sgl.cur_img = _sgl.def_img; + + sg_shader_desc shd_desc; + memset(&shd_desc, 0, sizeof(shd_desc)); + shd_desc.attrs[0].name = "position"; + shd_desc.attrs[1].name = "texcoord0"; + shd_desc.attrs[2].name = "color0"; + shd_desc.attrs[0].sem_name = "TEXCOORD"; + shd_desc.attrs[0].sem_index = 0; + shd_desc.attrs[1].sem_name = "TEXCOORD"; + shd_desc.attrs[1].sem_index = 1; + shd_desc.attrs[2].sem_name = "TEXCOORD"; + shd_desc.attrs[2].sem_index = 2; + sg_shader_uniform_block_desc* ub = &shd_desc.vs.uniform_blocks[0]; + ub->size = sizeof(_sgl_uniform_t); + ub->uniforms[0].name = "vs_params"; + ub->uniforms[0].type = SG_UNIFORMTYPE_FLOAT4; + ub->uniforms[0].array_count = 8; + shd_desc.fs.images[0].name = "tex"; + shd_desc.fs.images[0].image_type = SG_IMAGETYPE_2D; + shd_desc.fs.images[0].sampler_type = SG_SAMPLERTYPE_FLOAT; + shd_desc.label = "sgl-shader"; + #if defined(SOKOL_GLCORE33) + shd_desc.vs.source = _sgl_vs_source_glsl330; + shd_desc.fs.source = _sgl_fs_source_glsl330; + #elif defined(SOKOL_GLES2) || defined(SOKOL_GLES3) + shd_desc.vs.source = _sgl_vs_source_glsl100; + shd_desc.fs.source = _sgl_fs_source_glsl100; + #elif defined(SOKOL_METAL) + shd_desc.vs.entry = "main0"; + shd_desc.fs.entry = "main0"; + switch (sg_query_backend()) { + case SG_BACKEND_METAL_MACOS: + shd_desc.vs.bytecode = SG_RANGE(_sgl_vs_bytecode_metal_macos); + shd_desc.fs.bytecode = SG_RANGE(_sgl_fs_bytecode_metal_macos); + break; + case SG_BACKEND_METAL_IOS: + shd_desc.vs.bytecode = SG_RANGE(_sgl_vs_bytecode_metal_ios); + shd_desc.fs.bytecode = SG_RANGE(_sgl_fs_bytecode_metal_ios); + break; + default: + shd_desc.vs.source = _sgl_vs_source_metal_sim; + shd_desc.fs.source = _sgl_fs_source_metal_sim; + break; + } + #elif defined(SOKOL_D3D11) + shd_desc.vs.bytecode = SG_RANGE(_sgl_vs_bytecode_hlsl4); + shd_desc.fs.bytecode = SG_RANGE(_sgl_fs_bytecode_hlsl4); + #elif defined(SOKOL_WGPU) + shd_desc.vs.bytecode = SG_RANGE(_sgl_vs_bytecode_wgpu); + shd_desc.fs.bytecode = SG_RANGE(_sgl_fs_bytecode_wgpu); + #else + shd_desc.vs.source = _sgl_vs_src_dummy; + shd_desc.fs.source = _sgl_fs_src_dummy; + #endif + _sgl.shd = sg_make_shader(&shd_desc); + SOKOL_ASSERT(SG_INVALID_ID != _sgl.shd.id); + + /* create default pipeline object */ + sg_pipeline_desc def_pip_desc; + memset(&def_pip_desc, 0, sizeof(def_pip_desc)); + def_pip_desc.depth.write_enabled = true; + _sgl.def_pip = _sgl_make_pipeline(&def_pip_desc); + sg_pop_debug_group(); + + /* default state */ + _sgl.rgba = 0xFFFFFFFF; + for (int i = 0; i < SGL_NUM_MATRIXMODES; i++) { + _sgl_identity(&_sgl.matrix_stack[i][0]); + } + _sgl.pip_stack[0] = _sgl.def_pip; + _sgl.matrix_dirty = true; +} + +SOKOL_API_IMPL void sgl_shutdown(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_FREE(_sgl.vertices); _sgl.vertices = 0; + SOKOL_FREE(_sgl.uniforms); _sgl.uniforms = 0; + SOKOL_FREE(_sgl.commands); _sgl.commands = 0; + sg_push_debug_group("sokol-gl"); + sg_destroy_buffer(_sgl.vbuf); + sg_destroy_image(_sgl.def_img); + sg_destroy_shader(_sgl.shd); + for (int i = 0; i < _sgl.pip_pool.pool.size; i++) { + _sgl_pipeline_t* pip = &_sgl.pip_pool.pips[i]; + _sgl_destroy_pipeline(_sgl_make_pip_id(pip->slot.id)); + } + sg_pop_debug_group(); + _sgl_discard_pipeline_pool(); + _sgl.init_cookie = 0; +} + +SOKOL_API_IMPL sgl_error_t sgl_error(void) { + return _sgl.error; +} + +SOKOL_API_IMPL float sgl_rad(float deg) { + return (deg * (float)M_PI) / 180.0f; +} + +SOKOL_API_IMPL float sgl_deg(float rad) { + return (rad * 180.0f) / (float)M_PI; +} + +SOKOL_API_IMPL sgl_pipeline sgl_make_pipeline(const sg_pipeline_desc* desc) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + return _sgl_make_pipeline(desc); +} + +SOKOL_API_IMPL void sgl_destroy_pipeline(sgl_pipeline pip_id) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl_destroy_pipeline(pip_id); +} + +SOKOL_API_IMPL void sgl_load_pipeline(sgl_pipeline pip_id) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT((_sgl.pip_tos >= 0) && (_sgl.pip_tos < _SGL_MAX_STACK_DEPTH)); + _sgl.pip_stack[_sgl.pip_tos] = pip_id; +} + +SOKOL_API_IMPL void sgl_default_pipeline(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT((_sgl.pip_tos >= 0) && (_sgl.pip_tos < _SGL_MAX_STACK_DEPTH)); + _sgl.pip_stack[_sgl.pip_tos] = _sgl.def_pip; +} + +SOKOL_API_IMPL void sgl_push_pipeline(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + if (_sgl.pip_tos < (_SGL_MAX_STACK_DEPTH - 1)) { + _sgl.pip_tos++; + _sgl.pip_stack[_sgl.pip_tos] = _sgl.pip_stack[_sgl.pip_tos-1]; + } + else { + _sgl.error = SGL_ERROR_STACK_OVERFLOW; + } +} + +SOKOL_API_IMPL void sgl_pop_pipeline(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + if (_sgl.pip_tos > 0) { + _sgl.pip_tos--; + } + else { + _sgl.error = SGL_ERROR_STACK_UNDERFLOW; + } +} + +SOKOL_API_IMPL void sgl_defaults(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT(!_sgl.in_begin); + _sgl.u = 0.0f; _sgl.v = 0.0f; + _sgl.rgba = 0xFFFFFFFF; + _sgl.texturing_enabled = false; + _sgl.cur_img = _sgl.def_img; + sgl_default_pipeline(); + _sgl_identity(_sgl_matrix_texture()); + _sgl_identity(_sgl_matrix_modelview()); + _sgl_identity(_sgl_matrix_projection()); + _sgl.cur_matrix_mode = SGL_MATRIXMODE_MODELVIEW; + _sgl.matrix_dirty = true; +} + +SOKOL_API_IMPL void sgl_viewport(int x, int y, int w, int h, bool origin_top_left) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT(!_sgl.in_begin); + _sgl_command_t* cmd = _sgl_next_command(); + if (cmd) { + cmd->cmd = SGL_COMMAND_VIEWPORT; + cmd->args.viewport.x = x; + cmd->args.viewport.y = y; + cmd->args.viewport.w = w; + cmd->args.viewport.h = h; + cmd->args.viewport.origin_top_left = origin_top_left; + } +} + +SOKOL_API_IMPL void sgl_viewportf(float x, float y, float w, float h, bool origin_top_left) { + sgl_viewport((int)x, (int)y, (int)w, (int)h, origin_top_left); +} + +SOKOL_API_IMPL void sgl_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT(!_sgl.in_begin); + _sgl_command_t* cmd = _sgl_next_command(); + if (cmd) { + cmd->cmd = SGL_COMMAND_SCISSOR_RECT; + cmd->args.scissor_rect.x = x; + cmd->args.scissor_rect.y = y; + cmd->args.scissor_rect.w = w; + cmd->args.scissor_rect.h = h; + cmd->args.scissor_rect.origin_top_left = origin_top_left; + } +} + +SOKOL_API_IMPL void sgl_scissor_rectf(float x, float y, float w, float h, bool origin_top_left) { + sgl_scissor_rect((int)x, (int)y, (int)w, (int)h, origin_top_left); +} + +SOKOL_API_IMPL void sgl_enable_texture(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT(!_sgl.in_begin); + _sgl.texturing_enabled = true; +} + +SOKOL_API_IMPL void sgl_disable_texture(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT(!_sgl.in_begin); + _sgl.texturing_enabled = false; +} + +SOKOL_API_IMPL void sgl_texture(sg_image img) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT(!_sgl.in_begin); + if (SG_INVALID_ID != img.id) { + _sgl.cur_img = img; + } + else { + _sgl.cur_img = _sgl.def_img; + } +} + +SOKOL_API_IMPL void sgl_begin_points(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT(!_sgl.in_begin); + _sgl_begin(SGL_PRIMITIVETYPE_POINTS); +} + +SOKOL_API_IMPL void sgl_begin_lines(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT(!_sgl.in_begin); + _sgl_begin(SGL_PRIMITIVETYPE_LINES); +} + +SOKOL_API_IMPL void sgl_begin_line_strip(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT(!_sgl.in_begin); + _sgl_begin(SGL_PRIMITIVETYPE_LINE_STRIP); +} + +SOKOL_API_IMPL void sgl_begin_triangles(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT(!_sgl.in_begin); + _sgl_begin(SGL_PRIMITIVETYPE_TRIANGLES); +} + +SOKOL_API_IMPL void sgl_begin_triangle_strip(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT(!_sgl.in_begin); + _sgl_begin(SGL_PRIMITIVETYPE_TRIANGLE_STRIP); +} + +SOKOL_API_IMPL void sgl_begin_quads(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT(!_sgl.in_begin); + _sgl_begin(SGL_PRIMITIVETYPE_QUADS); +} + +SOKOL_API_IMPL void sgl_end(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT(_sgl.in_begin); + SOKOL_ASSERT(_sgl.cur_vertex >= _sgl.base_vertex); + _sgl.in_begin = false; + bool matrix_dirty = _sgl.matrix_dirty; + if (matrix_dirty) { + _sgl.matrix_dirty = false; + _sgl_uniform_t* uni = _sgl_next_uniform(); + if (uni) { + _sgl_matmul4(&uni->mvp, _sgl_matrix_projection(), _sgl_matrix_modelview()); + uni->tm = *_sgl_matrix_texture(); + } + } + /* check if command can be merged with previous command */ + sg_pipeline pip = _sgl_get_pipeline(_sgl.pip_stack[_sgl.pip_tos], _sgl.cur_prim_type); + sg_image img = _sgl.texturing_enabled ? _sgl.cur_img : _sgl.def_img; + _sgl_command_t* prev_cmd = _sgl_prev_command(); + bool merge_cmd = false; + if (prev_cmd) { + if ((prev_cmd->cmd == SGL_COMMAND_DRAW) && + (_sgl.cur_prim_type != SGL_PRIMITIVETYPE_LINE_STRIP) && + (_sgl.cur_prim_type != SGL_PRIMITIVETYPE_TRIANGLE_STRIP) && + !matrix_dirty && + (prev_cmd->args.draw.img.id == img.id) && + (prev_cmd->args.draw.pip.id == pip.id)) + { + merge_cmd = true; + } + } + if (merge_cmd) { + /* draw command can be merged with the previous command */ + prev_cmd->args.draw.num_vertices += _sgl.cur_vertex - _sgl.base_vertex; + } + else { + /* append a new draw command */ + _sgl_command_t* cmd = _sgl_next_command(); + if (cmd) { + SOKOL_ASSERT(_sgl.cur_uniform > 0); + cmd->cmd = SGL_COMMAND_DRAW; + cmd->args.draw.img = img; + cmd->args.draw.pip = _sgl_get_pipeline(_sgl.pip_stack[_sgl.pip_tos], _sgl.cur_prim_type); + cmd->args.draw.base_vertex = _sgl.base_vertex; + cmd->args.draw.num_vertices = _sgl.cur_vertex - _sgl.base_vertex; + cmd->args.draw.uniform_index = _sgl.cur_uniform - 1; + } + } +} + +SOKOL_API_IMPL void sgl_t2f(float u, float v) { + _sgl.u = u; _sgl.v = v; +} + +SOKOL_API_IMPL void sgl_c3f(float r, float g, float b) { + _sgl.rgba = _sgl_pack_rgbaf(r, g, b, 1.0f); +} + +SOKOL_API_IMPL void sgl_c4f(float r, float g, float b, float a) { + _sgl.rgba = _sgl_pack_rgbaf(r, g, b, a); +} + +SOKOL_API_IMPL void sgl_c3b(uint8_t r, uint8_t g, uint8_t b) { + _sgl.rgba = _sgl_pack_rgbab(r, g, b, 255); +} + +SOKOL_API_IMPL void sgl_c4b(uint8_t r, uint8_t g, uint8_t b, uint8_t a) { + _sgl.rgba = _sgl_pack_rgbab(r, g, b, a); +} + +SOKOL_API_IMPL void sgl_c1i(uint32_t rgba) { + _sgl.rgba = rgba; +} + +SOKOL_API_IMPL void sgl_v2f(float x, float y) { + _sgl_vtx(x, y, 0.0f, _sgl.u, _sgl.v, _sgl.rgba); +} + +SOKOL_API_IMPL void sgl_v3f(float x, float y, float z) { + _sgl_vtx(x, y, z, _sgl.u, _sgl.v, _sgl.rgba); +} + +SOKOL_API_IMPL void sgl_v2f_t2f(float x, float y, float u, float v) { + _sgl_vtx(x, y, 0.0f, u, v, _sgl.rgba); +} + +SOKOL_API_IMPL void sgl_v3f_t2f(float x, float y, float z, float u, float v) { + _sgl_vtx(x, y, z, u, v, _sgl.rgba); +} + +SOKOL_API_IMPL void sgl_v2f_c3f(float x, float y, float r, float g, float b) { + _sgl_vtx(x, y, 0.0f, _sgl.u, _sgl.v, _sgl_pack_rgbaf(r, g, b, 1.0f)); +} + +SOKOL_API_IMPL void sgl_v2f_c3b(float x, float y, uint8_t r, uint8_t g, uint8_t b) { + _sgl_vtx(x, y, 0.0f, _sgl.u, _sgl.v, _sgl_pack_rgbab(r, g, b, 255)); +} + +SOKOL_API_IMPL void sgl_v2f_c4f(float x, float y, float r, float g, float b, float a) { + _sgl_vtx(x, y, 0.0f, _sgl.u, _sgl.v, _sgl_pack_rgbaf(r, g, b, a)); +} + +SOKOL_API_IMPL void sgl_v2f_c4b(float x, float y, uint8_t r, uint8_t g, uint8_t b, uint8_t a) { + _sgl_vtx(x, y, 0.0f, _sgl.u, _sgl.v, _sgl_pack_rgbab(r, g, b, a)); +} + +SOKOL_API_IMPL void sgl_v2f_c1i(float x, float y, uint32_t rgba) { + _sgl_vtx(x, y, 0.0f, _sgl.u, _sgl.v, rgba); +} + +SOKOL_API_IMPL void sgl_v3f_c3f(float x, float y, float z, float r, float g, float b) { + _sgl_vtx(x, y, z, _sgl.u, _sgl.v, _sgl_pack_rgbaf(r, g, b, 1.0f)); +} + +SOKOL_API_IMPL void sgl_v3f_c3b(float x, float y, float z, uint8_t r, uint8_t g, uint8_t b) { + _sgl_vtx(x, y, z, _sgl.u, _sgl.v, _sgl_pack_rgbab(r, g, b, 255)); +} + +SOKOL_API_IMPL void sgl_v3f_c4f(float x, float y, float z, float r, float g, float b, float a) { + _sgl_vtx(x, y, z, _sgl.u, _sgl.v, _sgl_pack_rgbaf(r, g, b, a)); +} + +SOKOL_API_IMPL void sgl_v3f_c4b(float x, float y, float z, uint8_t r, uint8_t g, uint8_t b, uint8_t a) { + _sgl_vtx(x, y, z, _sgl.u, _sgl.v, _sgl_pack_rgbab(r, g, b, a)); +} + +SOKOL_API_IMPL void sgl_v3f_c1i(float x, float y, float z, uint32_t rgba) { + _sgl_vtx(x, y, z, _sgl.u, _sgl.v, rgba); +} + +SOKOL_API_IMPL void sgl_v2f_t2f_c3f(float x, float y, float u, float v, float r, float g, float b) { + _sgl_vtx(x, y, 0.0f, u, v, _sgl_pack_rgbaf(r, g, b, 1.0f)); +} + +SOKOL_API_IMPL void sgl_v2f_t2f_c3b(float x, float y, float u, float v, uint8_t r, uint8_t g, uint8_t b) { + _sgl_vtx(x, y, 0.0f, u, v, _sgl_pack_rgbab(r, g, b, 255)); +} + +SOKOL_API_IMPL void sgl_v2f_t2f_c4f(float x, float y, float u, float v, float r, float g, float b, float a) { + _sgl_vtx(x, y, 0.0f, u, v, _sgl_pack_rgbaf(r, g, b, a)); +} + +SOKOL_API_IMPL void sgl_v2f_t2f_c4b(float x, float y, float u, float v, uint8_t r, uint8_t g, uint8_t b, uint8_t a) { + _sgl_vtx(x, y, 0.0f, u, v, _sgl_pack_rgbab(r, g, b, a)); +} + +SOKOL_API_IMPL void sgl_v2f_t2f_c1i(float x, float y, float u, float v, uint32_t rgba) { + _sgl_vtx(x, y, 0.0f, u, v, rgba); +} + +SOKOL_API_IMPL void sgl_v3f_t2f_c3f(float x, float y, float z, float u, float v, float r, float g, float b) { + _sgl_vtx(x, y, z, u, v, _sgl_pack_rgbaf(r, g, b, 1.0f)); +} + +SOKOL_API_IMPL void sgl_v3f_t2f_c3b(float x, float y, float z, float u, float v, uint8_t r, uint8_t g, uint8_t b) { + _sgl_vtx(x, y, z, u, v, _sgl_pack_rgbab(r, g, b, 255)); +} + +SOKOL_API_IMPL void sgl_v3f_t2f_c4f(float x, float y, float z, float u, float v, float r, float g, float b, float a) { + _sgl_vtx(x, y, z, u, v, _sgl_pack_rgbaf(r, g, b, a)); +} + +SOKOL_API_IMPL void sgl_v3f_t2f_c4b(float x, float y, float z, float u, float v, uint8_t r, uint8_t g, uint8_t b, uint8_t a) { + _sgl_vtx(x, y, z, u, v, _sgl_pack_rgbab(r, g, b, a)); +} + +SOKOL_API_IMPL void sgl_v3f_t2f_c1i(float x, float y, float z, float u, float v, uint32_t rgba) { + _sgl_vtx(x, y, z, u, v, rgba); +} + +SOKOL_API_IMPL void sgl_matrix_mode_modelview(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.cur_matrix_mode = SGL_MATRIXMODE_MODELVIEW; +} + +SOKOL_API_IMPL void sgl_matrix_mode_projection(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.cur_matrix_mode = SGL_MATRIXMODE_PROJECTION; +} + +SOKOL_API_IMPL void sgl_matrix_mode_texture(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.cur_matrix_mode = SGL_MATRIXMODE_TEXTURE; +} + +SOKOL_API_IMPL void sgl_load_identity(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.matrix_dirty = true; + _sgl_identity(_sgl_matrix()); +} + +SOKOL_API_IMPL void sgl_load_matrix(const float m[16]) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.matrix_dirty = true; + memcpy(&_sgl_matrix()->v[0][0], &m[0], 64); +} + +SOKOL_API_IMPL void sgl_load_transpose_matrix(const float m[16]) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.matrix_dirty = true; + _sgl_transpose(_sgl_matrix(), (const _sgl_matrix_t*) &m[0]); +} + +SOKOL_API_IMPL void sgl_mult_matrix(const float m[16]) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.matrix_dirty = true; + const _sgl_matrix_t* m0 = (const _sgl_matrix_t*) &m[0]; + _sgl_mul(_sgl_matrix(), m0); +} + +SOKOL_API_IMPL void sgl_mult_transpose_matrix(const float m[16]) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.matrix_dirty = true; + _sgl_matrix_t m0; + _sgl_transpose(&m0, (const _sgl_matrix_t*) &m[0]); + _sgl_mul(_sgl_matrix(), &m0); +} + +SOKOL_API_IMPL void sgl_rotate(float angle_rad, float x, float y, float z) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.matrix_dirty = true; + _sgl_rotate(_sgl_matrix(), angle_rad, x, y, z); +} + +SOKOL_API_IMPL void sgl_scale(float x, float y, float z) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.matrix_dirty = true; + _sgl_scale(_sgl_matrix(), x, y, z); +} + +SOKOL_API_IMPL void sgl_translate(float x, float y, float z) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.matrix_dirty = true; + _sgl_translate(_sgl_matrix(), x, y, z); +} + +SOKOL_API_IMPL void sgl_frustum(float l, float r, float b, float t, float n, float f) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.matrix_dirty = true; + _sgl_frustum(_sgl_matrix(), l, r, b, t, n, f); +} + +SOKOL_API_IMPL void sgl_ortho(float l, float r, float b, float t, float n, float f) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.matrix_dirty = true; + _sgl_ortho(_sgl_matrix(), l, r, b, t, n, f); +} + +SOKOL_API_IMPL void sgl_perspective(float fov_y, float aspect, float z_near, float z_far) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.matrix_dirty = true; + _sgl_perspective(_sgl_matrix(), fov_y, aspect, z_near, z_far); +} + +SOKOL_API_IMPL void sgl_lookat(float eye_x, float eye_y, float eye_z, float center_x, float center_y, float center_z, float up_x, float up_y, float up_z) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + _sgl.matrix_dirty = true; + _sgl_lookat(_sgl_matrix(), eye_x, eye_y, eye_z, center_x, center_y, center_z, up_x, up_y, up_z); +} + +SOKOL_GL_API_DECL void sgl_push_matrix(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT((_sgl.cur_matrix_mode >= 0) && (_sgl.cur_matrix_mode < SGL_NUM_MATRIXMODES)); + _sgl.matrix_dirty = true; + if (_sgl.matrix_tos[_sgl.cur_matrix_mode] < (_SGL_MAX_STACK_DEPTH - 1)) { + const _sgl_matrix_t* src = _sgl_matrix(); + _sgl.matrix_tos[_sgl.cur_matrix_mode]++; + _sgl_matrix_t* dst = _sgl_matrix(); + *dst = *src; + } + else { + _sgl.error = SGL_ERROR_STACK_OVERFLOW; + } +} + +SOKOL_GL_API_DECL void sgl_pop_matrix(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + SOKOL_ASSERT((_sgl.cur_matrix_mode >= 0) && (_sgl.cur_matrix_mode < SGL_NUM_MATRIXMODES)); + _sgl.matrix_dirty = true; + if (_sgl.matrix_tos[_sgl.cur_matrix_mode] > 0) { + _sgl.matrix_tos[_sgl.cur_matrix_mode]--; + } + else { + _sgl.error = SGL_ERROR_STACK_UNDERFLOW; + } +} + +/* this renders the accumulated draw commands via sokol-gfx */ +SOKOL_API_IMPL void sgl_draw(void) { + SOKOL_ASSERT(_SGL_INIT_COOKIE == _sgl.init_cookie); + if ((_sgl.error == SGL_NO_ERROR) && (_sgl.cur_vertex > 0) && (_sgl.cur_command > 0)) { + uint32_t cur_pip_id = SG_INVALID_ID; + uint32_t cur_img_id = SG_INVALID_ID; + int cur_uniform_index = -1; + sg_push_debug_group("sokol-gl"); + const sg_range range = { _sgl.vertices, (size_t)_sgl.cur_vertex * sizeof(_sgl_vertex_t) }; + sg_update_buffer(_sgl.vbuf, &range); + _sgl.bind.vertex_buffers[0] = _sgl.vbuf; + for (int i = 0; i < _sgl.cur_command; i++) { + const _sgl_command_t* cmd = &_sgl.commands[i]; + switch (cmd->cmd) { + case SGL_COMMAND_VIEWPORT: + { + const _sgl_viewport_args_t* args = &cmd->args.viewport; + sg_apply_viewport(args->x, args->y, args->w, args->h, args->origin_top_left); + } + break; + case SGL_COMMAND_SCISSOR_RECT: + { + const _sgl_scissor_rect_args_t* args = &cmd->args.scissor_rect; + sg_apply_scissor_rect(args->x, args->y, args->w, args->h, args->origin_top_left); + } + break; + case SGL_COMMAND_DRAW: + { + const _sgl_draw_args_t* args = &cmd->args.draw; + if (args->pip.id != cur_pip_id) { + sg_apply_pipeline(args->pip); + cur_pip_id = args->pip.id; + /* when pipeline changes, also need to re-apply uniforms and bindings */ + cur_img_id = SG_INVALID_ID; + cur_uniform_index = -1; + } + if (cur_img_id != args->img.id) { + _sgl.bind.fs_images[0] = args->img; + sg_apply_bindings(&_sgl.bind); + cur_img_id = args->img.id; + } + if (cur_uniform_index != args->uniform_index) { + const sg_range ub_range = { &_sgl.uniforms[args->uniform_index], sizeof(_sgl_uniform_t) }; + sg_apply_uniforms(SG_SHADERSTAGE_VS, 0, &ub_range); + cur_uniform_index = args->uniform_index; + } + /* FIXME: what if number of vertices doesn't match the primitive type? */ + if (args->num_vertices > 0) { + sg_draw(args->base_vertex, args->num_vertices, 1); + } + } + break; + } + } + sg_pop_debug_group(); + } + _sgl_rewind(); +} +#endif /* SOKOL_GL_IMPL */ diff --git a/v_windows/v/old/thirdparty/stb_image/stb_image.h b/v_windows/v/old/thirdparty/stb_image/stb_image.h new file mode 100644 index 0000000..816511c --- /dev/null +++ b/v_windows/v/old/thirdparty/stb_image/stb_image.h @@ -0,0 +1,7766 @@ +/* stb_image - v2.26 - public domain image loader - http://nothings.org/stb + no warranty implied; use at your own risk + + Do this: + #define STB_IMAGE_IMPLEMENTATION + before you include this file in *one* C or C++ file to create the implementation. + + // i.e. it should look like this: + #include ... + #include ... + #include ... + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + + You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. + And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + + QUICK NOTES: + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface + + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8/16-bit-per-channel + + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) + + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) + + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 + + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + + Full documentation under "DOCUMENTATION" below. + + +LICENSE + + See end of file for license information. + +RECENT REVISION HISTORY: + + 2.26 (2020-07-13) many minor fixes + 2.25 (2020-02-02) fix warnings + 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically + 2.23 (2019-08-11) fix clang static analysis warning + 2.22 (2019-03-04) gif fixes, fix warnings + 2.21 (2019-02-25) fix typo in comment + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings + 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes + 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 + RGB-format JPEG; remove white matting in PSD; + allocate large structures on the stack; + correct channel count for PNG & BMP + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + + See end of file for full revision history. + + + ============================ Contributors ========================= + + Image formats Extensions, features + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + github:urraka (animated gif) Junggon Kim (PNM comments) + Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit PNG) + Jeremy Sawicki (handle all ImageNet JPGs) + Optimizations & bugfixes Mikhail Morozov (1-bit BMP) + Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) + Arseny Kapoulkine + John-Mark Allen + Carmelo J Fdez-Aguera + + Bug & warning fixes + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski + Phil Jordan Dave Moore Roy Eltham + Hayaki Saito Nathan Reed Won Chun + Luke Graham Johan Duparc Nick Verigakis the Horde3D community + Thomas Ruf Ronny Chevalier github:rlyeh + Janez Zemva John Bartholomew Michal Cichon github:romigrou + Jonathan Blow Ken Hamada Tero Hanninen github:svdijk + Laurent Gomila Cort Stratton github:snagar + Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex + Cass Everitt Ryamond Barbiero github:grim210 + Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw + Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus + Josh Tobin Matthew Gregan github:poppolopoppo + Julian Raschke Gregory Mullen Christian Floisand github:darealshinji + Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 + Brad Weinberger Matvey Cherevko [reserved] + Luca Sas Alexander Veselov Zack Middleton [reserved] + Ryan C. Gordon [reserved] [reserved] + DO NOT ADD YOUR NAME HERE + + To add your name to the credits, pick a random blank space in the middle and fill it. + 80% of merge conflicts on stb PRs are due to people adding their name at the end + of the credits. +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data) +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'desired_channels' if desired_channels is non-zero, or +// *channels_in_file otherwise. If desired_channels is non-zero, +// *channels_in_file has the number of components that _would_ have been +// output otherwise. E.g. if you set desired_channels to 4, you will always +// get RGBA output, but you can check *channels_in_file to see if it's trivially +// opaque because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *channels_in_file will be unchanged. The function +// stbi_failure_reason() can be queried for an extremely brief, end-user +// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS +// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// =========================================================================== +// +// UNICODE: +// +// If compiling for Windows and you wish to use Unicode filenames, compile +// with +// #define STBI_WINDOWS_UTF8 +// and pass utf8-encoded filenames. Call stbi_convert_wchar_to_utf8 to convert +// Windows wchar_t filenames to utf8. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy-to-use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// provide more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small source code footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image supports loading HDR images in general, and currently the Radiance +// .HDR file format specifically. You can still load any file through the existing +// interface; if you attempt to load an HDR file, it will be automatically remapped +// to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// By default we convert iphone-formatted PNGs back to RGB, even though +// they are internally encoded differently. You can disable this conversion +// by calling stbi_convert_iphone_png_to_rgb(0), in which case +// you will always just get the native iphone "format" through (which +// is BGR stored in RGB). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// +// =========================================================================== +// +// ADDITIONAL CONFIGURATION +// +// - You can suppress implementation of any of the decoders to reduce +// your code footprint by #defining one or more of the following +// symbols before creating the implementation. +// +// STBI_NO_JPEG +// STBI_NO_PNG +// STBI_NO_BMP +// STBI_NO_PSD +// STBI_NO_TGA +// STBI_NO_GIF +// STBI_NO_HDR +// STBI_NO_PIC +// STBI_NO_PNM (.ppm and .pgm) +// +// - You can request *only* certain decoders and suppress all other ones +// (this will be more forward-compatible, as addition of new decoders +// doesn't require you to disable them explicitly): +// +// STBI_ONLY_JPEG +// STBI_ONLY_PNG +// STBI_ONLY_BMP +// STBI_ONLY_PSD +// STBI_ONLY_TGA +// STBI_ONLY_GIF +// STBI_ONLY_HDR +// STBI_ONLY_PIC +// STBI_ONLY_PNM (.ppm and .pgm) +// +// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB +// +// - If you define STBI_MAX_DIMENSIONS, stb_image will reject images greater +// than that size (in either width or height) without further processing. +// This is to let programs in the wild set an upper bound to prevent +// denial-of-service attacks on untrusted data, as one could generate a +// valid image of gigantic dimensions and force stb_image to allocate a +// huge block of memory and spend disproportionate time decoding it. By +// default this is set to (1 << 24), which is 16777216, but that's still +// very big. + +#ifdef __TINYC__ +#define STBI_NO_SIMD +#endif + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for desired_channels + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +#include +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef STBIDEF +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +#endif + +#ifdef STBI_WINDOWS_UTF8 +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + +//////////////////////////////////// +// +// float-per-channel interface +// +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// on most compilers (and ALL modern mainstream compilers) this is threadsafe +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit (char const *filename); +STBIDEF int stbi_is_16_bit_from_file(FILE *f); +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// as above, but only applies to images loaded on the thread that calls the function +// this function is only available if your compiler supports thread-local variables; +// calling it will fail to link if your compiler doesn't +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp, pow +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + +#ifdef __cplusplus +#define STBI_EXTERN extern "C" +#else +#define STBI_EXTERN extern +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + +#ifndef STBI_NO_THREAD_LOCALS + #if defined(__cplusplus) && __cplusplus >= 201103L + #define STBI_THREAD_LOCAL thread_local + #elif defined(__GNUC__) && __GNUC__ < 5 + #define STBI_THREAD_LOCAL __thread + #elif defined(_MSC_VER) + #define STBI_THREAD_LOCAL __declspec(thread) + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) + #define STBI_THREAD_LOCAL _Thread_local + #endif + + #ifndef STBI_THREAD_LOCAL + #if defined(__GNUC__) + #define STBI_THREAD_LOCAL __thread + #endif + #endif +#endif + +#ifdef _MSC_VER +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// which in turn means it gets to use SSE2 everywhere. This is unfortunate, +// but previous attempts to provide the SSE2 functions with runtime +// detection caused numerous issues. The way architecture extensions are +// exposed in GCC/Clang is, sadly, not really suited for one-file libs. +// New behavior: if compiled with -msse2, we use SSE2 without any +// detection; if not, we don't use it at all. +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#endif + +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; +} +#endif + +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +// assume GCC or Clang on ARM targets +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +#ifndef STBI_MAX_DIMENSIONS +#define STBI_MAX_DIMENSIONS (1 << 24) +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + int callback_already_read; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + int ch; + fseek((FILE*) user, n, SEEK_CUR); + ch = fgetc((FILE*) user); /* have to read a byte to reset feof()'s flag */ + if (ch != EOF) { + ungetc(ch, (FILE *) user); /* push byte back onto stream if valid. */ + } +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user) || ferror((FILE *) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__png_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__psd_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +static +#ifdef STBI_THREAD_LOCAL +STBI_THREAD_LOCAL +#endif +const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +#ifndef STBI_NO_FAILURE_STRINGS +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} +#endif + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} +#endif + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} +#endif + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} +#endif + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} +#endif + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load_global = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_global = flag_true_if_should_flip; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__vertically_flip_on_load stbi__vertically_flip_on_load_global +#else +static STBI_THREAD_LOCAL int stbi__vertically_flip_on_load_local, stbi__vertically_flip_on_load_set; + +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_local = flag_true_if_should_flip; + stbi__vertically_flip_on_load_set = 1; +} + +#define stbi__vertically_flip_on_load (stbi__vertically_flip_on_load_set \ + ? stbi__vertically_flip_on_load_local \ + : stbi__vertically_flip_on_load_global) +#endif // STBI_THREAD_LOCAL + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #else + STBI_NOTUSED(bpc); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp, ri); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) +{ + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; + + for (row = 0; row < (h>>1); row++) { + stbi_uc *row0 = bytes + row*bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; + } + } +} + +#ifndef STBI_NO_GIF +static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) +{ + int slice; + int slice_size = w * h * bytes_per_pixel; + + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } +} +#endif + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 8) { + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 16) { + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } + + return (stbi__uint16 *) result; +} + +#if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR) +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); + } +} +#endif + +#ifndef STBI_NO_STDIO + +#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) +STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); +STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); +#endif + +#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) +{ + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); +} +#endif + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename))) + return 0; + + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode))) + return 0; + +#if _MSC_VER >= 1400 + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; +#else + f = _wfopen(wFilename, wMode); +#endif + +#elif defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_mem(&s,buffer,len); + + result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) { + stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); + } + + return result; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + long pos = ftell(f); + int res; + stbi__context s; + stbi__start_file(&s,f); + res = stbi__hdr_test(&s); + fseek(f, pos, SEEK_SET); + return res; + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + s->callback_already_read += (int) (s->img_buffer - s->img_buffer_original); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) +// nothing +#else +static void stbi__skip(stbi__context *s, int n) +{ + if (n == 0) return; // already there! + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_TGA) && defined(STBI_NO_HDR) && defined(STBI_NO_PNM) +// nothing +#else +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} +#endif + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + return z + (stbi__get16le(s) << 16); +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return (stbi__uint16*) stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + } + if (n < comp) { + for (i=0; i < x*y; ++i) { + output[i*comp + n] = data[i*comp + n]/255.0f; + } + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0; + unsigned int code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) + for (j=0; j < count[i]; ++j) + h->size[k++] = (stbi_uc) (i+1); + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (~0U << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + + sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB + k = stbi_lrot(j->code_buffer, n); + if (n < 0 || n >= (int) (sizeof(stbi__bmask)/sizeof(*stbi__bmask))) return 0; + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & ~sgn); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static const stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + if (t == -1) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + diff = t ? stbi__extend_receive(j, t) : 0; + + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc << j->succ_low); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) << shift); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) << shift); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) * 4096) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0]*4; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15,i; + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + L = stbi__get16be(z->s); + if (L < 2) { + if (m == 0xFE) + return stbi__err("bad COM len","Corrupt JPEG"); + else + return stbi__err("bad APP len","Corrupt JPEG"); + } + L -= 2; + + if (m == 0xE0 && L >= 5) { // JFIF APP0 segment + static const unsigned char tag[5] = {'J','F','I','F','\0'}; + int ok = 1; + int i; + for (i=0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment + static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; + int ok = 1; + int i; + for (i=0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } + + stbi__skip(z->s, L); + return 1; + } + + return stbi__err("unknown marker","Corrupt JPEG"); +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + c = stbi__get8(s); + if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + z->rgb = 0; + for (i=0; i < s->img_n; ++i) { + static const unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + // handle 0s at the end of image data from IP Kamera 9060 + while (!stbi__at_eof(j->s)) { + int x = stbi__get8(j->s); + if (x == 255) { + j->marker = stbi__get8(j->s); + break; + } + } + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + } else { + if (!stbi__process_marker(j, m)) return 0; + } + m = stbi__get_marker(j); + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +// fast 0..255 * 0..255 => 0..255 rounded multiplication +static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) +{ + unsigned int t = x*y + 128; + return (stbi_uc) ((t + (t >>8)) >> 8); +} + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n, is_rgb; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; + + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + + if (z->s->img_n == 3 && n < 3 && !is_rgb) + decode_n = 1; + else + decode_n = z->s->img_n; + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4] = { NULL, NULL, NULL, NULL }; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (is_rgb) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else if (z->s->img_n == 4) { + if (z->app14_color_transform == 0) { // CMYK + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } else if (z->app14_color_transform == 2) { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } else { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + if (is_rgb) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) { *out++ = y[i]; *out++ = 255; } + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + STBI_NOTUSED(ri); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x,y,comp,req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[288]; + stbi__uint16 value[288]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static int stbi__zeof(stbi__zbuf *z) +{ + return (z->zbuffer >= z->zbuffer_end); +} + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + return stbi__zeof(z) ? 0 : *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + if (z->code_buffer >= (1U << z->num_bits)) { + z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */ + return; + } + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s >= 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + if (b >= sizeof (z->size)) return -1; // some data was corrupt somewhere! + if (z->size[b] != s) return -1; // was originally an assert, but report failure instead. + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) { + if (stbi__zeof(a)) { + return -1; /* report error for unexpected end of data. */ + } + stbi__fill_bits(a); + } + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + unsigned int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (unsigned int) (z->zout - z->zout_start); + limit = old_limit = (unsigned) (z->zout_end - z->zout_start); + if (UINT_MAX - cur < (unsigned) n) return stbi__err("outofmem", "Out of memory"); + while (cur + n > limit) { + if(limit > UINT_MAX / 2) return stbi__err("outofmem", "Out of memory"); + limit *= 2; + } + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static const int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static const int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static const int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + return 1; + } + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (zout + len > a->zout_end) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) { + c = stbi__zreceive(a,3)+3; + } else if (c == 18) { + c = stbi__zreceive(a,7)+11; + } else { + return stbi__err("bad codelengths", "Corrupt PNG"); + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + if (a->num_bits < 0) return stbi__err("zlib corrupt","Corrupt PNG"); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if (stbi__zeof(a)) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +static const stbi_uc stbi__zdefault_length[288] = +{ + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 +}; +static const stbi_uc stbi__zdefault_distance[32] = +{ + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 +}; +/* +Init algorithm: +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} +*/ + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filters used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static int stbi__paeth(int a, int b, int c) +{ + int p = a + b - c; + int pa = abs(p-a); + int pb = abs(p-b); + int pc = abs(p-c); + if (pa <= pb && pa <= pc) return a; + if (pb <= pc) return b; + return c; +} + +static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + img_len = (img_width_bytes + 1) * y; + + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *prior; + int filter = *raw++; + + if (filter > 4) + return stbi__err("invalid filter","Corrupt PNG"); + + if (depth < 8) { + if (img_width_bytes > x) return stbi__err("invalid width","Corrupt PNG"); + cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place + filter_bytes = 1; + width = img_width_bytes; + } + prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // handle first byte explicitly + for (k=0; k < filter_bytes; ++k) { + switch (filter) { + case STBI__F_none : cur[k] = raw[k]; break; + case STBI__F_sub : cur[k] = raw[k]; break; + case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; + case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; + case STBI__F_avg_first : cur[k] = raw[k]; break; + case STBI__F_paeth_first: cur[k] = raw[k]; break; + } + } + + if (depth == 8) { + if (img_n != out_n) + cur[img_n] = 255; // first pixel + raw += img_n; + cur += out_n; + prior += out_n; + } else if (depth == 16) { + if (img_n != out_n) { + cur[filter_bytes] = 255; // first pixel top byte + cur[filter_bytes+1] = 255; // first pixel bottom byte + } + raw += filter_bytes; + cur += output_bytes; + prior += output_bytes; + } else { + raw += 1; + cur += 1; + prior += 1; + } + + // this is a little gross, so that we don't switch per-pixel or per-component + if (depth < 8 || img_n == out_n) { + int nk = (width - 1)*filter_bytes; + #define STBI__CASE(f) \ + case f: \ + for (k=0; k < nk; ++k) + switch (filter) { + // "none" filter turns into a memcpy here; make that explicit. + case STBI__F_none: memcpy(cur, raw, nk); break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; + } + #undef STBI__CASE + raw += nk; + } else { + STBI_ASSERT(img_n+1 == out_n); + #define STBI__CASE(f) \ + case f: \ + for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ + for (k=0; k < filter_bytes; ++k) + switch (filter) { + STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; + } + #undef STBI__CASE + + // the loop above sets the high byte of the pixels' alpha, but for + // 16 bit png files we also need the low byte set. we'll do that here. + if (depth == 16) { + cur = a->out + stride*j; // start at the beginning of the row again + for (i=0; i < x; ++i,cur+=output_bytes) { + cur[filter_bytes+1] = 255; + } + } + } + } + + // we make a separate pass to expand bits to pixels; for performance, + // this could run two scanlines behind the above code, so it won't + // intefere with filtering but will still be in the cache. + if (depth < 8) { + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; + // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit + // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + + // note that the final byte might overshoot and write more data than desired. + // we can allocate enough data that this never writes out of memory, but it + // could also overwrite the next scanline. can it overwrite non-empty data + // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. + // so we need to explicitly clamp the final ones + + if (depth == 4) { + for (k=x*img_n; k >= 2; k-=2, ++in) { + *cur++ = scale * ((*in >> 4) ); + *cur++ = scale * ((*in ) & 0x0f); + } + if (k > 0) *cur++ = scale * ((*in >> 4) ); + } else if (depth == 2) { + for (k=x*img_n; k >= 4; k-=4, ++in) { + *cur++ = scale * ((*in >> 6) ); + *cur++ = scale * ((*in >> 4) & 0x03); + *cur++ = scale * ((*in >> 2) & 0x03); + *cur++ = scale * ((*in ) & 0x03); + } + if (k > 0) *cur++ = scale * ((*in >> 6) ); + if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); + if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); + } else if (depth == 1) { + for (k=x*img_n; k >= 8; k-=8, ++in) { + *cur++ = scale * ((*in >> 7) ); + *cur++ = scale * ((*in >> 6) & 0x01); + *cur++ = scale * ((*in >> 5) & 0x01); + *cur++ = scale * ((*in >> 4) & 0x01); + *cur++ = scale * ((*in >> 3) & 0x01); + *cur++ = scale * ((*in >> 2) & 0x01); + *cur++ = scale * ((*in >> 1) & 0x01); + *cur++ = scale * ((*in ) & 0x01); + } + if (k > 0) *cur++ = scale * ((*in >> 7) ); + if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); + if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); + if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); + if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); + if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); + if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); + } + if (img_n != out_n) { + int q; + // insert alpha = 255 + cur = a->out + stride*j; + if (img_n == 1) { + for (q=x-1; q >= 0; --q) { + cur[q*2+1] = 255; + cur[q*2+0] = cur[q]; + } + } else { + STBI_ASSERT(img_n == 3); + for (q=x-1; q >= 0; --q) { + cur[q*4+3] = 255; + cur[q*4+2] = cur[q*3+2]; + cur[q*4+1] = cur[q*3+1]; + cur[q*4+0] = cur[q*3+0]; + } + } + } + } + } else if (depth == 16) { + // force the image data from big-endian to platform-native. + // this is done in a separate pass due to the decoding relying + // on the data being untouched, but could probably be done + // per-line during decode if care is taken. + stbi_uc *cur = a->out; + stbi__uint16 *cur16 = (stbi__uint16*)cur; + + for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { + *cur16 = (cur[0] << 8) | cur[1]; + } + } + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*) z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load = 0; +static int stbi__de_iphone_flag = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag = flag_true_if_should_convert; +} + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = ( t * 255 + half) / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]={0}; + stbi__uint16 tc16[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); + s->img_y = stbi__get32be(s); + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + if (scan == STBI__SCAN_header) return 1; + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + // if SCAN_header, have to scan to see if we have a tRNS + } + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + if (z->depth == 16) { + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } else { + for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } else if (has_trans) { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; + } + STBI_FREE(z->expanded); z->expanded = NULL; + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth <= 8) + ri->bits_per_channel = 8; + else if (p->depth == 16) + ri->bits_per_channel = 16; + else + return stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} + +static int stbi__png_is16(stbi__context *s) +{ + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) { + stbi__rewind(p.s); + return 0; + } + return 1; +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) { n += 16; z >>= 16; } + if (z >= 0x00100) { n += 8; z >>= 8; } + if (z >= 0x00010) { n += 4; z >>= 4; } + if (z >= 0x00004) { n += 2; z >>= 2; } + if (z >= 0x00002) { n += 1;/* >>= 1;*/ } + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +// extract an arbitrarily-aligned N-bit value (N=bits) +// from v, and then make it 8-bits long and fractionally +// extend it to full full range. +static int stbi__shiftsigned(unsigned int v, int shift, int bits) +{ + static unsigned int mul_table[9] = { + 0, + 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, + 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, 0,0,1,0,2,4,6,0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v < 256); + v >>= (8-bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; + int extra_read; +} stbi__bmp_data; + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + info->extra_read = 14; + + if (info->offset < 0) return stbi__errpuc("bad BMP", "bad BMP"); + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->extra_read += 12; + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - info.extra_read - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - info.extra_read - info.hsz) >> 2; + } + if (psize == 0) { + STBI_ASSERT(info.offset == s->callback_already_read + (int) (s->img_buffer - s->img_buffer_original)); + if (info.offset != s->callback_already_read + (int) (s->img_buffer - s->img_buffer_original)) { + return stbi__errpuc("bad offset", "Corrupt BMP"); + } + } + + if (info.bpp == 24 && ma == 0xff000000) + s->img_n = 3; + else + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + if (info.bpp == 1) { + for (j=0; j < (int) s->img_y; ++j) { + int bit_offset = 7, v = stbi__get8(s); + for (i=0; i < (int) s->img_x; ++i) { + int color = (v>>bit_offset)&0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + if((--bit_offset) < 0) { + bit_offset = 7; + v = stbi__get8(s); + } + } + stbi__skip(s, pad); + } + } else { + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - info.extra_read - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + unsigned int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i]; p1[i] = p2[i]; p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // fallthrough + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fallthrough + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + STBI_NOTUSED(tga_x_origin); // @TODO + STBI_NOTUSED(tga_y_origin); // @TODO + + if (tga_height > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (tga_width > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + if (tga_palette_len == 0) { /* you have to have at least one entry! */ + STBI_FREE(tga_data); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + STBI_NOTUSED(tga_palette_start); + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w,h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + if (h > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (w > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x,y, internal_comp; + STBI_NOTUSED(ri); + + if (!comp) comp = &internal_comp; + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + + if (y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[8192]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; + int delay; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (g->w > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (g->h > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind( s ); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + int idx; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; + + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) { // don't render transparent pixels; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 8192) { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } + + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +// two back is the image from two frames ago, used for a very specific disposal format +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) +{ + int dispose; + int first_frame; + int pi; + int pcount; + STBI_NOTUSED(req_comp); + + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) { + if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header + if (!stbi__mad3sizes_valid(4, g->w, g->h, 0)) + return stbi__errpuc("too large", "GIF image is too large"); + pcount = g->w * g->h; + g->out = (stbi_uc *) stbi__malloc(4 * pcount); + g->background = (stbi_uc *) stbi__malloc(4 * pcount); + g->history = (stbi_uc *) stbi__malloc(pcount); + if (!g->out || !g->background || !g->history) + return stbi__errpuc("outofmem", "Out of memory"); + + // image is treated as "transparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to the color that was there the previous frame. + memset(g->out, 0x00, 4 * pcount); + memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent) + memset(g->history, 0x00, pcount); // pixels that were affected previous frame + first_frame = 1; + } else { + // second frame - how do we dispose of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; + + if ((dispose == 3) && (two_back == 0)) { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } + + if (dispose == 3) { // use previous graphic + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); + } + } + } else if (dispose == 2) { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); + } + } + } else { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } + + // background is what out is after the undoing of the previou frame; + memcpy( g->background, g->out, 4 * g->w * g->h ); + } + + // clear my history; + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + + for (;;) { + int tag = stbi__get8(s); + switch (tag) { + case 0x2C: /* Image Descriptor */ + { + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + // if the width of the specified rectangle is 0, that means + // we may not see *any* pixels or the image is malformed; + // to make sure this is caught, move the current y down to + // max_y (which is what out_gif_code checks). + if (w == 0) + g->cur_y = g->max_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (!o) return NULL; + + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi] == 0) { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); + } + } + } + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + int ext = stbi__get8(s); + if (ext == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + + // unset old transparent + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 0; + } + } else { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) { + stbi__skip(s, len); + } + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } +} + +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + if (stbi__gif_test(s)) { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + int out_size = 0; + int delays_size = 0; + memset(&g, 0, sizeof(g)); + if (delays) { + *delays = 0; + } + + do { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + + if (u) { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; + + if (out) { + void *tmp = (stbi_uc*) STBI_REALLOC_SIZED( out, out_size, layers * stride ); + if (NULL == tmp) { + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + return stbi__errpuc("outofmem", "Out of memory"); + } + else { + out = (stbi_uc*) tmp; + out_size = layers * stride; + } + + if (delays) { + *delays = (int*) STBI_REALLOC_SIZED( *delays, delays_size, sizeof(int) * layers ); + delays_size = layers * sizeof(int); + } + } else { + out = (stbi_uc*)stbi__malloc( layers * stride ); + out_size = layers * stride; + if (delays) { + *delays = (int*) stbi__malloc( layers * sizeof(int) ); + delays_size = layers * sizeof(int); + } + } + memcpy( out + ((layers - 1) * stride), u, stride ); + if (layers >= 2) { + two_back = out - 2 * stride; + } + + if (delays) { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); + + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + + *z = layers; + return out; + } else { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + STBI_NOTUSED(ri); + + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g.w; + *y = g.h; + + // moved conversion to after successful load so that the same + // can be done for multiple frames. + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } else if (g.out) { + // if there was an error and we allocated an image buffer, free it! + STBI_FREE(g.out); + } + + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); + + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + if (height > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + if (width > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int dummy; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + stbi__rewind( s ); + if (p == NULL) + return 0; + if (x) *x = s->img_x; + if (y) *y = s->img_y; + if (comp) { + if (info.bpp == 24 && info.ma == 0xff000000) + *comp = 3; + else + *comp = info.ma ? 4 : 3; + } + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount, dummy, depth; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} + +static int stbi__psd_is16(stbi__context *s) +{ + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + (void) stbi__get32be(s); + (void) stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 16) { + stbi__rewind( s ); + return 0; + } + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained,dummy; + stbi__pic_packet packets[10]; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) +// Does not support 16-bit-per-channel + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n)) + return 0; + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + + if (!stbi__mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *) stbi__malloc_mad3(s->img_n, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + stbi__getn(s, out, s->img_n * s->img_x * s->img_y); + + if (req_comp && req_comp != s->img_n) { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv, dummy; + char c, p, t; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + stbi__rewind(s); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + + if (maxv > 255) + return stbi__err("max value > 255", "PPM image not 8-bit"); + else + return 1; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +static int stbi__is_16_main(stbi__context *s) +{ + #ifndef STBI_NO_PNG + if (stbi__png_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_is16(s)) return 1; + #endif + + return 0; +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + fseek(f,pos,SEEK_SET); + return r; +} + +STBIDEF int stbi_is_16_bit(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; +} + +STBIDEF int stbi_is_16_bit_from_file(FILE *f) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + fseek(f,pos,SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__is_16_main(&s); +} + +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__is_16_main(&s); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/v_windows/v/old/thirdparty/stb_image/stbi.c b/v_windows/v/old/thirdparty/stb_image/stbi.c new file mode 100644 index 0000000..0375a5a --- /dev/null +++ b/v_windows/v/old/thirdparty/stb_image/stbi.c @@ -0,0 +1,3 @@ + +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" diff --git a/v_windows/v/old/thirdparty/stdatomic/nix/atomic.h b/v_windows/v/old/thirdparty/stdatomic/nix/atomic.h new file mode 100644 index 0000000..3bcb867 --- /dev/null +++ b/v_windows/v/old/thirdparty/stdatomic/nix/atomic.h @@ -0,0 +1,435 @@ +/* + Compability header for stdatomic.h that works for all compilers supported + by V. For TCC libatomic from the operating system is used + +*/ +#ifndef __ATOMIC_H +#define __ATOMIC_H + +#ifndef __cplusplus +// If C just use stdatomic.h +#ifndef __TINYC__ +#include +#endif +#else +// CPP wrapper for atomic operations that are compatible with C +#include "atomic_cpp.h" +#endif + +#ifdef __TINYC__ + +typedef volatile long long atomic_llong; +typedef volatile unsigned long long atomic_ullong; +typedef volatile uintptr_t atomic_uintptr_t; + +// use functions for 64, 32 and 8 bit from libatomic directly +// since tcc is not capible to use "generic" C functions +// there is no header file for libatomic so we provide function declarations here + +extern unsigned long long __atomic_load_8(unsigned long long* x, int mo); +extern void __atomic_store_8(unsigned long long* x, unsigned long long y, int mo); +extern _Bool __atomic_compare_exchange_8(unsigned long long* x, unsigned long long* expected, unsigned long long y, int mo, int mo2); +extern _Bool __atomic_compare_exchange_8(unsigned long long* x, unsigned long long* expected, unsigned long long y, int mo, int mo2); +extern unsigned long long __atomic_exchange_8(unsigned long long* x, unsigned long long y, int mo); +extern unsigned long long __atomic_fetch_add_8(unsigned long long* x, unsigned long long y, int mo); +extern unsigned long long __atomic_fetch_sub_8(unsigned long long* x, unsigned long long y, int mo); +extern unsigned long long __atomic_fetch_and_8(unsigned long long* x, unsigned long long y, int mo); +extern unsigned long long __atomic_fetch_or_8(unsigned long long* x, unsigned long long y, int mo); +extern unsigned long long __atomic_fetch_xor_8(unsigned long long* x, unsigned long long y, int mo); + +extern unsigned int __atomic_load_4(unsigned int* x, int mo); +extern void __atomic_store_4(unsigned int* x, unsigned int y, int mo); +extern _Bool __atomic_compare_exchange_4(unsigned int* x, unsigned int* expected, unsigned int y, int mo, int mo2); +extern _Bool __atomic_compare_exchange_4(unsigned int* x, unsigned int* expected, unsigned int y, int mo, int mo2); +extern unsigned int __atomic_exchange_4(unsigned int* x, unsigned int y, int mo); +extern unsigned int __atomic_fetch_add_4(unsigned int* x, unsigned int y, int mo); +extern unsigned int __atomic_fetch_sub_4(unsigned int* x, unsigned int y, int mo); +extern unsigned int __atomic_fetch_and_4(unsigned int* x, unsigned int y, int mo); +extern unsigned int __atomic_fetch_or_4(unsigned int* x, unsigned int y, int mo); +extern unsigned int __atomic_fetch_xor_4(unsigned int* x, unsigned int y, int mo); + +extern unsigned short __atomic_load_2(unsigned short* x, int mo); +extern void __atomic_store_2(unsigned short* x, unsigned short y, int mo); +extern _Bool __atomic_compare_exchange_2(unsigned short* x, unsigned short* expected, unsigned short y, int mo, int mo2); +extern _Bool __atomic_compare_exchange_2(unsigned short* x, unsigned short* expected, unsigned short y, int mo, int mo2); +extern unsigned short __atomic_exchange_2(unsigned short* x, unsigned short y, int mo); +extern unsigned short __atomic_fetch_add_2(unsigned short* x, unsigned short y, int mo); +extern unsigned short __atomic_fetch_sub_2(unsigned short* x, unsigned short y, int mo); +extern unsigned short __atomic_fetch_and_2(unsigned short* x, unsigned short y, int mo); +extern unsigned short __atomic_fetch_or_2(unsigned short* x, unsigned short y, int mo); +extern unsigned short __atomic_fetch_xor_2(unsigned short* x, unsigned short y, int mo); + +extern unsigned char __atomic_load_1(unsigned char* x, int mo); +extern void __atomic_store_1(unsigned char* x, unsigned char y, int mo); +extern _Bool __atomic_compare_exchange_1(unsigned char* x, unsigned char* expected, unsigned char y, int mo, int mo2); +extern _Bool __atomic_compare_exchange_1(unsigned char* x, unsigned char* expected, unsigned char y, int mo, int mo2); +extern unsigned char __atomic_exchange_1(unsigned char* x, unsigned char y, int mo); +extern unsigned char __atomic_fetch_add_1(unsigned char* x, unsigned char y, int mo); +extern unsigned char __atomic_fetch_sub_1(unsigned char* x, unsigned char y, int mo); +extern unsigned char __atomic_fetch_and_1(unsigned char* x, unsigned char y, int mo); +extern unsigned char __atomic_fetch_or_1(unsigned char* x, unsigned char y, int mo); +extern unsigned char __atomic_fetch_xor_1(unsigned char* x, unsigned char y, int mo); + +// The default functions should work with pointers so we have to decide based on pointer size +#if UINTPTR_MAX == 0xFFFFFFFF + +#define atomic_load_explicit __atomic_load_4 +#define atomic_store_explicit __atomic_store_4 +#define atomic_compare_exchange_weak_explicit __atomic_compare_exchange_4 +#define atomic_compare_exchange_strong_explicit __atomic_compare_exchange_4 +#define atomic_exchange_explicit __atomic_exchange_4 +#define atomic_fetch_add_explicit __atomic_fetch_add_4 +#define atomic_fetch_sub_explicit __atomic_sub_fetch_4 + +#else + +#define atomic_load_explicit __atomic_load_8 +#define atomic_store_explicit __atomic_store_8 +#define atomic_compare_exchange_weak_explicit __atomic_compare_exchange_8 +#define atomic_compare_exchange_strong_explicit __atomic_compare_exchange_8 +#define atomic_exchange_explicit __atomic_exchange_8 +#define atomic_fetch_add_explicit __atomic_fetch_add_8 +#define atomic_fetch_sub_explicit __atomic_sub_fetch_8 + +#endif + +// memory order policies - we use "sequentially consistent" by default + +#define memory_order_relaxed 0 +#define memory_order_consume 1 +#define memory_order_acquire 2 +#define memory_order_release 3 +#define memory_order_acq_rel 4 +#define memory_order_seq_cst 5 + +static inline void** atomic_load(void** x) { + return (void**)atomic_load_explicit((unsigned long long*)x, memory_order_seq_cst); +} +static inline void atomic_store(void** x, void* y) { + atomic_store_explicit((unsigned long long*)x, (uintptr_t)y, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_weak(void** x, void** expected, void* y) { + return (int)atomic_compare_exchange_weak_explicit((unsigned long long*)x, (unsigned long long*)expected, (uintptr_t)y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_strong(void** x, void** expected, void* y) { + return (int)atomic_compare_exchange_strong_explicit((unsigned long long*)x, (unsigned long long*)expected, (uintptr_t)y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline uintptr_t atomic_exchange(void** x, void* y) { + return atomic_exchange_explicit((unsigned long long*)x, (uintptr_t)y, memory_order_seq_cst); +} +static inline uintptr_t atomic_fetch_add(uintptr_t* x, uintptr_t y) { + return atomic_fetch_add_explicit(x, y, memory_order_seq_cst); +} +static inline uintptr_t atomic_fetch_sub(uintptr_t* x, uintptr_t y) { + return atomic_fetch_sub_explicit(x, y, memory_order_seq_cst); +} +static inline uintptr_t atomic_fetch_and(uintptr_t* x, uintptr_t y) { + return atomic_fetch_and_explicit(x, y, memory_order_seq_cst); +} +static inline uintptr_t atomic_fetch_or(uintptr_t* x, uintptr_t y) { + return atomic_fetch_or_explicit(x, y, memory_order_seq_cst); +} +static inline uintptr_t atomic_fetch_xor(uintptr_t* x, uintptr_t y) { + return atomic_fetch_xor_explicit(x, y, memory_order_seq_cst); +} + +#define atomic_load_ptr atomic_load +#define atomic_store_ptr atomic_store +#define atomic_compare_exchange_weak_ptr atomic_compare_exchange_weak +#define atomic_compare_exchange_strong_ptr atomic_compare_exchange_strong +#define atomic_exchange_ptr atomic_exchange +#define atomic_fetch_add_ptr atomic_fetch_add +#define atomic_fetch_sub_ptr atomic_fetch_sub +#define atomic_fetch_and_ptr atomic_fetch_and +#define atomic_fetch_or_ptr atomic_fetch_or +#define atomic_fetch_xor_ptr atomic_fetch_xor + +// specialized versions for 64 bit + +static inline unsigned long long atomic_load_u64(unsigned long long* x) { + return __atomic_load_8(x, memory_order_seq_cst); +} +static inline void atomic_store_u64(unsigned long long* x, unsigned long long y) { + __atomic_store_8(x, y, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_weak_u64(unsigned long long* x, unsigned long long* expected, unsigned long long y) { + return (int)__atomic_compare_exchange_8(x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_strong_u64(unsigned long long* x, unsigned long long* expected, unsigned long long y) { + return (int)__atomic_compare_exchange_8(x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline unsigned long long atomic_exchange_u64(unsigned long long* x, unsigned long long y) { + return __atomic_exchange_8(x, y, memory_order_seq_cst); +} +static inline unsigned long long atomic_fetch_add_u64(unsigned long long* x, unsigned long long y) { + return __atomic_fetch_add_8(x, y, memory_order_seq_cst); +} +static inline unsigned long long atomic_fetch_sub_u64(unsigned long long* x, unsigned long long y) { + return __atomic_fetch_sub_8(x, y, memory_order_seq_cst); +} +static inline unsigned long long atomic_fetch_and_u64(unsigned long long* x, unsigned long long y) { + return __atomic_fetch_and_8(x, y, memory_order_seq_cst); +} +static inline unsigned long long atomic_fetch_or_u64(unsigned long long* x, unsigned long long y) { + return __atomic_fetch_or_8(x, y, memory_order_seq_cst); +} +static inline unsigned long long atomic_fetch_xor_u64(unsigned long long* x, unsigned long long y) { + return __atomic_fetch_xor_8(x, y, memory_order_seq_cst); +} + +static inline unsigned atomic_load_u32(unsigned* x) { + return __atomic_load_4(x, memory_order_seq_cst); +} +static inline void atomic_store_u32(unsigned* x, unsigned y) { + __atomic_store_4(x, y, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_weak_u32(unsigned* x, unsigned* expected, unsigned y) { + return (int)__atomic_compare_exchange_4(x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_strong_u32(unsigned* x, unsigned* expected, unsigned y) { + return (int)__atomic_compare_exchange_4(x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline unsigned atomic_exchange_u32(unsigned* x, unsigned y) { + return __atomic_exchange_4(x, y, memory_order_seq_cst); +} +static inline unsigned atomic_fetch_add_u32(unsigned* x, unsigned y) { + return __atomic_fetch_add_4(x, y, memory_order_seq_cst); +} +static inline unsigned atomic_fetch_sub_u32(unsigned* x, unsigned y) { + return __atomic_fetch_sub_4(x, y, memory_order_seq_cst); +} +static inline unsigned atomic_fetch_and_u32(unsigned* x, unsigned y) { + return __atomic_fetch_and_4(x, y, memory_order_seq_cst); +} +static inline unsigned atomic_fetch_or_u32(unsigned* x, unsigned y) { + return __atomic_fetch_or_4(x, y, memory_order_seq_cst); +} +static inline unsigned atomic_fetch_xor_u32(unsigned* x, unsigned y) { + return __atomic_fetch_xor_4(x, y, memory_order_seq_cst); +} + +static inline unsigned short atomic_load_u16(unsigned short* x) { + return __atomic_load_2(x, memory_order_seq_cst); +} +static inline void atomic_store_u16(void* x, unsigned short y) { + __atomic_store_2(x, y, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_weak_u16(void* x, unsigned short* expected, unsigned short y) { + return (int)__atomic_compare_exchange_2(x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_strong_u16(unsigned short* x, unsigned short* expected, unsigned short y) { + return (int)__atomic_compare_exchange_2(x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline unsigned short atomic_exchange_u16(unsigned short* x, unsigned short y) { + return __atomic_exchange_2(x, y, memory_order_seq_cst); +} +static inline unsigned short atomic_fetch_add_u16(unsigned short* x, unsigned short y) { + return __atomic_fetch_add_2(x, y, memory_order_seq_cst); +} +static inline unsigned short atomic_fetch_sub_u16(unsigned short* x, unsigned short y) { + return __atomic_fetch_sub_2(x, y, memory_order_seq_cst); +} +static inline unsigned short atomic_fetch_and_u16(unsigned short* x, unsigned short y) { + return __atomic_fetch_and_2(x, y, memory_order_seq_cst); +} +static inline unsigned short atomic_fetch_or_u16(unsigned short* x, unsigned short y) { + return __atomic_fetch_or_2(x, y, memory_order_seq_cst); +} +static inline unsigned short atomic_fetch_xor_u16(unsigned short* x, unsigned short y) { + return __atomic_fetch_xor_2(x, y, memory_order_seq_cst); +} + +static inline unsigned char atomic_load_byte(unsigned char* x) { + return __atomic_load_1(x, memory_order_seq_cst); +} +static inline void atomic_store_byte(unsigned char* x, unsigned char y) { + __atomic_store_1(x, y, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_weak_byte(unsigned char* x, unsigned char* expected, unsigned char y) { + return __atomic_compare_exchange_1(x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_strong_byte(unsigned char* x, unsigned char* expected, unsigned char y) { + return __atomic_compare_exchange_1(x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline unsigned char atomic_exchange_byte(unsigned char* x, unsigned char y) { + return __atomic_exchange_1(x, y, memory_order_seq_cst); +} +static inline unsigned char atomic_fetch_add_byte(unsigned char* x, unsigned char y) { + return __atomic_fetch_add_1(x, y, memory_order_seq_cst); +} +static inline unsigned char atomic_fetch_sub_byte(unsigned char* x, unsigned char y) { + return __atomic_fetch_sub_1(x, y, memory_order_seq_cst); +} +static inline unsigned char atomic_fetch_and_byte(unsigned char* x, unsigned char y) { + return __atomic_fetch_and_1(x, y, memory_order_seq_cst); +} +static inline unsigned char atomic_fetch_or_byte(unsigned char* x, unsigned char y) { + return __atomic_fetch_or_1(x, y, memory_order_seq_cst); +} +static inline unsigned char atomic_fetch_xor_byte(unsigned char* x, unsigned char y) { + return __atomic_fetch_xor_1(x, y, memory_order_seq_cst); +} + +#else + +// Since V might be confused with "generic" C functions either we provide special versions +// for gcc/clang, too +static inline unsigned long long atomic_load_u64(unsigned long long* x) { + return atomic_load_explicit((_Atomic unsigned long long*)x, memory_order_seq_cst); +} +static inline void atomic_store_u64(unsigned long long* x, unsigned long long y) { + atomic_store_explicit((_Atomic unsigned long long*)x, y, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_weak_u64(unsigned long long* x, unsigned long long* expected, unsigned long long y) { + return (int)atomic_compare_exchange_weak_explicit((_Atomic unsigned long long*)x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_strong_u64(unsigned long long* x, unsigned long long* expected, unsigned long long y) { + return (int)atomic_compare_exchange_strong_explicit((_Atomic unsigned long long*)x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline unsigned long long atomic_exchange_u64(unsigned long long* x, unsigned long long y) { + return atomic_exchange_explicit((_Atomic unsigned long long*)x, y, memory_order_seq_cst); +} +static inline unsigned long long atomic_fetch_add_u64(unsigned long long* x, unsigned long long y) { + return atomic_fetch_add_explicit((_Atomic unsigned long long*)x, y, memory_order_seq_cst); +} +static inline unsigned long long atomic_fetch_sub_u64(unsigned long long* x, unsigned long long y) { + return atomic_fetch_sub_explicit((_Atomic unsigned long long*)x, y, memory_order_seq_cst); +} +static inline unsigned long long atomic_fetch_and_u64(unsigned long long* x, unsigned long long y) { + return atomic_fetch_and_explicit((_Atomic unsigned long long*)x, y, memory_order_seq_cst); +} +static inline unsigned long long atomic_fetch_or_u64(unsigned long long* x, unsigned long long y) { + return atomic_fetch_or_explicit((_Atomic unsigned long long*)x, y, memory_order_seq_cst); +} +static inline unsigned long long atomic_fetch_xor_u64(unsigned long long* x, unsigned long long y) { + return atomic_fetch_xor_explicit((_Atomic unsigned long long*)x, y, memory_order_seq_cst); +} + + +static inline void* atomic_load_ptr(void** x) { + return (void*)atomic_load_explicit((_Atomic uintptr_t*)x, memory_order_seq_cst); +} +static inline void atomic_store_ptr(void** x, void* y) { + atomic_store_explicit((_Atomic uintptr_t*)x, (uintptr_t)y, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_weak_ptr(void** x, void** expected, void* y) { + return (int)atomic_compare_exchange_weak_explicit((_Atomic uintptr_t*)x, (unsigned long *)expected, (uintptr_t)y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_strong_ptr(void** x, void** expected, void* y) { + return (int)atomic_compare_exchange_strong_explicit((_Atomic uintptr_t*)x, (unsigned long *)expected, (uintptr_t)y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline void* atomic_exchange_ptr(void** x, void* y) { + return (void*)atomic_exchange_explicit((_Atomic uintptr_t*)x, (uintptr_t)y, memory_order_seq_cst); +} +static inline void* atomic_fetch_add_ptr(void** x, void* y) { + return (void*)atomic_fetch_add_explicit((_Atomic uintptr_t*)x, (uintptr_t)y, memory_order_seq_cst); +} +static inline void* atomic_fetch_sub_ptr(void** x, void* y) { + return (void*)atomic_fetch_sub_explicit((_Atomic uintptr_t*)x, (uintptr_t)y, memory_order_seq_cst); +} +static inline void* atomic_fetch_and_ptr(void** x, void* y) { + return (void*)atomic_fetch_and_explicit((_Atomic uintptr_t*)x, (uintptr_t)y, memory_order_seq_cst); +} +static inline void* atomic_fetch_or_ptr(void** x, void* y) { + return (void*)atomic_fetch_or_explicit((_Atomic uintptr_t*)x, (uintptr_t)y, memory_order_seq_cst); +} +static inline void* atomic_fetch_xor_ptr(void** x, void* y) { + return (void*)atomic_fetch_xor_explicit((_Atomic uintptr_t*)x, (uintptr_t)y, memory_order_seq_cst); +} + + +static inline unsigned atomic_load_u32(unsigned* x) { + return atomic_load_explicit((_Atomic unsigned*)x, memory_order_seq_cst); +} +static inline void atomic_store_u32(unsigned* x, unsigned y) { + atomic_store_explicit((_Atomic unsigned*)x, y, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_weak_u32(unsigned* x, unsigned* expected, unsigned y) { + return (int)atomic_compare_exchange_weak_explicit((_Atomic unsigned*)x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_strong_u32(unsigned* x, unsigned* expected, unsigned y) { + return (int)atomic_compare_exchange_strong_explicit((_Atomic unsigned*)x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline unsigned atomic_exchange_u32(unsigned* x, unsigned y) { + return atomic_exchange_explicit((_Atomic unsigned*)x, y, memory_order_seq_cst); +} +static inline unsigned atomic_fetch_add_u32(unsigned* x, unsigned y) { + return atomic_fetch_add_explicit((_Atomic unsigned*)x, y, memory_order_seq_cst); +} +static inline unsigned atomic_fetch_sub_u32(unsigned* x, unsigned y) { + return atomic_fetch_sub_explicit((_Atomic unsigned*)x, y, memory_order_seq_cst); +} +static inline unsigned atomic_fetch_and_u32(unsigned* x, unsigned y) { + return atomic_fetch_and_explicit((_Atomic unsigned*)x, y, memory_order_seq_cst); +} +static inline unsigned atomic_fetch_or_u32(unsigned* x, unsigned y) { + return atomic_fetch_or_explicit((_Atomic unsigned*)x, y, memory_order_seq_cst); +} +static inline unsigned atomic_fetch_xor_u32(unsigned* x, unsigned y) { + return atomic_fetch_xor_explicit((_Atomic unsigned*)x, y, memory_order_seq_cst); +} + +static inline unsigned short atomic_load_u16(unsigned short* x) { + return atomic_load_explicit((_Atomic unsigned short*)x, memory_order_seq_cst); +} +static inline void atomic_store_u16(void* x, unsigned short y) { + atomic_store_explicit((_Atomic unsigned short*)x, y, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_weak_u16(void* x, unsigned short* expected, unsigned short y) { + return (int)atomic_compare_exchange_weak_explicit((_Atomic unsigned short*)x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_strong_u16(unsigned short* x, unsigned short* expected, unsigned short y) { + return (int)atomic_compare_exchange_strong_explicit((_Atomic unsigned short*)x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline unsigned short atomic_exchange_u16(unsigned short* x, unsigned short y) { + return atomic_exchange_explicit((_Atomic unsigned short*)x, y, memory_order_seq_cst); +} +static inline unsigned short atomic_fetch_add_u16(unsigned short* x, unsigned short y) { + return atomic_fetch_add_explicit((_Atomic unsigned short*)x, y, memory_order_seq_cst); +} +static inline unsigned short atomic_fetch_sub_u16(unsigned short* x, unsigned short y) { + return atomic_fetch_sub_explicit((_Atomic unsigned short*)x, y, memory_order_seq_cst); +} +static inline unsigned short atomic_fetch_and_u16(unsigned short* x, unsigned short y) { + return atomic_fetch_and_explicit((_Atomic unsigned short*)x, y, memory_order_seq_cst); +} +static inline unsigned short atomic_fetch_or_u16(unsigned short* x, unsigned short y) { + return atomic_fetch_or_explicit((_Atomic unsigned short*)x, y, memory_order_seq_cst); +} +static inline unsigned short atomic_fetch_xor_u16(unsigned short* x, unsigned short y) { + return atomic_fetch_xor_explicit((_Atomic unsigned short*)x, y, memory_order_seq_cst); +} + +static inline unsigned char atomic_load_byte(unsigned char* x) { + return atomic_load_explicit((_Atomic unsigned char*)x, memory_order_seq_cst); +} +static inline void atomic_store_byte(unsigned char* x, unsigned char y) { + atomic_store_explicit((_Atomic unsigned char*)x, y, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_weak_byte(unsigned char* x, unsigned char* expected, unsigned char y) { + return (int)atomic_compare_exchange_weak_explicit((_Atomic unsigned char*)x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline int atomic_compare_exchange_strong_byte(unsigned char* x, unsigned char* expected, unsigned char y) { + return (int)atomic_compare_exchange_strong_explicit((_Atomic unsigned char*)x, expected, y, memory_order_seq_cst, memory_order_seq_cst); +} +static inline unsigned char atomic_exchange_byte(unsigned char* x, unsigned char y) { + return atomic_exchange_explicit((_Atomic unsigned char*)x, y, memory_order_seq_cst); +} +static inline unsigned char atomic_fetch_add_byte(unsigned char* x, unsigned char y) { + return atomic_fetch_add_explicit((_Atomic unsigned char*)x, y, memory_order_seq_cst); +} +static inline unsigned char atomic_fetch_sub_byte(unsigned char* x, unsigned char y) { + return atomic_fetch_sub_explicit((_Atomic unsigned char*)x, y, memory_order_seq_cst); +} +static inline unsigned char atomic_fetch_and_byte(unsigned char* x, unsigned char y) { + return atomic_fetch_and_explicit((_Atomic unsigned char*)x, y, memory_order_seq_cst); +} +static inline unsigned char atomic_fetch_or_byte(unsigned char* x, unsigned char y) { + return atomic_fetch_or_explicit((_Atomic unsigned char*)x, y, memory_order_seq_cst); +} +static inline unsigned char atomic_fetch_xor_byte(unsigned char* x, unsigned char y) { + return atomic_fetch_xor_explicit((_Atomic unsigned char*)x, y, memory_order_seq_cst); +} + +#endif +#endif diff --git a/v_windows/v/old/thirdparty/stdatomic/nix/atomic_cpp.h b/v_windows/v/old/thirdparty/stdatomic/nix/atomic_cpp.h new file mode 100644 index 0000000..252f5c6 --- /dev/null +++ b/v_windows/v/old/thirdparty/stdatomic/nix/atomic_cpp.h @@ -0,0 +1,541 @@ +// Source: https://android.googlesource.com/platform/bionic/+/lollipop-release/libc/include/stdatomic.h +/*- + * Copyright (c) 2011 Ed Schouten + * David Chisnall + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ +#ifndef _STDATOMIC_H_ +#define _STDATOMIC_H_ +#include +#if defined(__cplusplus) && defined(_USING_LIBCXX) +#ifdef __clang__ +#if __has_feature(cxx_atomic) +#define _STDATOMIC_HAVE_ATOMIC +#endif +#else /* gcc */ +#if __GNUC_PREREQ(4, 7) +#define _STDATOMIC_HAVE_ATOMIC +#endif +#endif +#endif +#ifdef _STDATOMIC_HAVE_ATOMIC +/* We have a usable C++ ; use it instead. */ +#include +#undef _Atomic +/* Also defined by for gcc. But not used in macros. */ +/* Also a clang intrinsic. */ +/* Should not be used by client code before this file is */ +/* included. The definitions in themselves see */ +/* the old definition, as they should. */ +/* Client code sees the following definition. */ +#define _Atomic(t) std::atomic +using std::atomic_bool; +using std::atomic_char; +using std::atomic_char16_t; +using std::atomic_char32_t; +using std::atomic_compare_exchange_strong; +using std::atomic_compare_exchange_strong_explicit; +using std::atomic_compare_exchange_weak; +using std::atomic_compare_exchange_weak_explicit; +using std::atomic_exchange; +using std::atomic_exchange_explicit; +using std::atomic_fetch_add; +using std::atomic_fetch_add_explicit; +using std::atomic_fetch_and; +using std::atomic_fetch_and_explicit; +using std::atomic_fetch_or; +using std::atomic_fetch_or_explicit; +using std::atomic_fetch_sub; +using std::atomic_fetch_sub_explicit; +using std::atomic_fetch_xor; +using std::atomic_fetch_xor_explicit; +using std::atomic_init; +using std::atomic_int; +using std::atomic_int_fast16_t; +using std::atomic_int_fast32_t; +using std::atomic_int_fast64_t; +using std::atomic_int_fast8_t; +using std::atomic_int_least16_t; +using std::atomic_int_least32_t; +using std::atomic_int_least64_t; +using std::atomic_int_least8_t; +using std::atomic_intmax_t; +using std::atomic_intptr_t; +using std::atomic_is_lock_free; +using std::atomic_llong; +using std::atomic_load; +using std::atomic_load_explicit; +using std::atomic_long; +using std::atomic_ptrdiff_t; +using std::atomic_schar; +using std::atomic_short; +using std::atomic_signal_fence; +using std::atomic_size_t; +using std::atomic_store; +using std::atomic_store_explicit; +using std::atomic_thread_fence; +using std::atomic_uchar; +using std::atomic_uint; +using std::atomic_uint_fast16_t; +using std::atomic_uint_fast32_t; +using std::atomic_uint_fast64_t; +using std::atomic_uint_fast8_t; +using std::atomic_uint_least16_t; +using std::atomic_uint_least32_t; +using std::atomic_uint_least64_t; +using std::atomic_uint_least8_t; +using std::atomic_uintmax_t; +using std::atomic_uintptr_t; +using std::atomic_ullong; +using std::atomic_ulong; +using std::atomic_ushort; +using std::atomic_wchar_t; +using std::memory_order; +using std::memory_order_acq_rel; +using std::memory_order_consume; +using std::memory_order_relaxed; +using std::memory_order_release; +using std::memory_order_seq_cst; +#else /* unavailable, possibly because this is C, not C++ */ +#include +#include +/* + * C: Do it ourselves. + * Note that the runtime representation defined here should be compatible + * with the C++ one, i.e. an _Atomic(T) needs to contain the same + * bits as a T. + */ +#include /* For ptrdiff_t. */ +#include /* TODO: Should pollute namespace less. */ +#if __STDC_VERSION__ >= 201112L +#include /* For char16_t and char32_t. */ +#endif +#ifdef __clang__ +#if __has_extension(c_atomic) || __has_extension(cxx_atomic) +#define __CLANG_ATOMICS +#else +#error "stdatomic.h does not support your compiler" +#endif +#if __has_builtin(__sync_swap) +#define __HAS_BUILTIN_SYNC_SWAP +#endif +#else +#if __GNUC_PREREQ(4, 7) +#define __GNUC_ATOMICS +#else +#define __SYNC_ATOMICS +#ifdef __cplusplus +#define __ATOMICS_AVOID_DOT_INIT +#endif +#endif +#endif +/* + * 7.17.1 Atomic lock-free macros. + */ +#ifdef __GCC_ATOMIC_BOOL_LOCK_FREE +#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE +#elif defined(__SYNC_ATOMICS) +#define ATOMIC_BOOL_LOCK_FREE 2 /* For all modern platforms */ +#endif +#ifdef __GCC_ATOMIC_CHAR_LOCK_FREE +#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE +#elif defined(__SYNC_ATOMICS) +#define ATOMIC_CHAR_LOCK_FREE 2 +#endif +#ifdef __GCC_ATOMIC_CHAR16_T_LOCK_FREE +#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE +#elif defined(__SYNC_ATOMICS) +#define ATOMIC_CHAR16_T_LOCK_FREE 2 +#endif +#ifdef __GCC_ATOMIC_CHAR32_T_LOCK_FREE +#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE +#elif defined(__SYNC_ATOMICS) +#define ATOMIC_CHAR32_T_LOCK_FREE 2 +#endif +#ifdef __GCC_ATOMIC_WCHAR_T_LOCK_FREE +#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE +#elif defined(__SYNC_ATOMICS) +#define ATOMIC_WCHAR_T_LOCK_FREE 2 +#endif +#ifdef __GCC_ATOMIC_SHORT_LOCK_FREE +#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE +#elif defined(__SYNC_ATOMICS) +#define ATOMIC_SHORT_LOCK_FREE 2 +#endif +#ifdef __GCC_ATOMIC_INT_LOCK_FREE +#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE +#elif defined(__SYNC_ATOMICS) +#define ATOMIC_INT_LOCK_FREE 2 +#endif +#ifdef __GCC_ATOMIC_LONG_LOCK_FREE +#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE +#elif defined(__SYNC_ATOMICS) +#define ATOMIC_LONG_LOCK_FREE 2 +#endif +#ifdef __GCC_ATOMIC_LLONG_LOCK_FREE +#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE +#elif defined(__SYNC_ATOMICS) +#define ATOMIC_LLONG_LOCK_FREE 1 /* maybe */ +#endif +#ifdef __GCC_ATOMIC_POINTER_LOCK_FREE +#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE +#elif defined(__SYNC_ATOMICS) +#define ATOMIC_POINTER_LOCK_FREE 2 +#endif +/* + * 7.17.2 Initialization. + */ +#if defined(__CLANG_ATOMICS) +#define ATOMIC_VAR_INIT(value) (value) +#define atomic_init(obj, value) __c11_atomic_init(obj, value) +#else +#ifdef __ATOMICS_AVOID_DOT_INIT +#define ATOMIC_VAR_INIT(value) \ + { \ + value \ + } +#else +#define ATOMIC_VAR_INIT(value) \ + { \ + .__val = (value) \ + } +#endif +#define atomic_init(obj, value) ((void)((obj)->__val = (value))) +#endif +/* + * Clang and recent GCC both provide predefined macros for the memory + * orderings. If we are using a compiler that doesn't define them, use the + * clang values - these will be ignored in the fallback path. + */ +#ifndef __ATOMIC_RELAXED +#define __ATOMIC_RELAXED 0 +#endif +#ifndef __ATOMIC_CONSUME +#define __ATOMIC_CONSUME 1 +#endif +#ifndef __ATOMIC_ACQUIRE +#define __ATOMIC_ACQUIRE 2 +#endif +#ifndef __ATOMIC_RELEASE +#define __ATOMIC_RELEASE 3 +#endif +#ifndef __ATOMIC_ACQ_REL +#define __ATOMIC_ACQ_REL 4 +#endif +#ifndef __ATOMIC_SEQ_CST +#define __ATOMIC_SEQ_CST 5 +#endif +/* + * 7.17.3 Order and consistency. + * + * The memory_order_* constants that denote the barrier behaviour of the + * atomic operations. + * The enum values must be identical to those used by the + * C++ header. + */ +typedef enum +{ + memory_order_relaxed = __ATOMIC_RELAXED, + memory_order_consume = __ATOMIC_CONSUME, + memory_order_acquire = __ATOMIC_ACQUIRE, + memory_order_release = __ATOMIC_RELEASE, + memory_order_acq_rel = __ATOMIC_ACQ_REL, + memory_order_seq_cst = __ATOMIC_SEQ_CST +} memory_order; +/* + * 7.17.4 Fences. + */ +static __inline void +atomic_thread_fence(memory_order __order __attribute__((unused))) +{ +#ifdef __CLANG_ATOMICS + __c11_atomic_thread_fence(__order); +#elif defined(__GNUC_ATOMICS) + __atomic_thread_fence(__order); +#else + __sync_synchronize(); +#endif +} +static __inline void +atomic_signal_fence(memory_order __order __attribute__((unused))) +{ +#ifdef __CLANG_ATOMICS + __c11_atomic_signal_fence(__order); +#elif defined(__GNUC_ATOMICS) + __atomic_signal_fence(__order); +#else + __asm volatile("" :: + : "memory"); +#endif +} +/* + * 7.17.5 Lock-free property. + */ +#if defined(_KERNEL) +/* Atomics in kernelspace are always lock-free. */ +#define atomic_is_lock_free(obj) \ + ((void)(obj), (_Bool)1) +#elif defined(__CLANG_ATOMICS) +#define atomic_is_lock_free(obj) \ + __c11_atomic_is_lock_free(sizeof(*(obj))) +#elif defined(__GNUC_ATOMICS) +#define atomic_is_lock_free(obj) \ + __atomic_is_lock_free(sizeof((obj)->__val), &(obj)->__val) +#else +#define atomic_is_lock_free(obj) \ + ((void)(obj), sizeof((obj)->__val) <= sizeof(void *)) +#endif +/* + * 7.17.6 Atomic integer types. + */ +#ifndef __CLANG_ATOMICS +/* + * No native support for _Atomic(). Place object in structure to prevent + * most forms of direct non-atomic access. + */ +#define _Atomic(T) \ + struct \ + { \ + T volatile __val; \ + } +#endif +typedef _Atomic(bool) atomic_bool; +typedef _Atomic(char) atomic_char; +typedef _Atomic(signed char) atomic_schar; +typedef _Atomic(unsigned char) atomic_uchar; +typedef _Atomic(short) atomic_short; +typedef _Atomic(unsigned short) atomic_ushort; +typedef _Atomic(int) atomic_int; +typedef _Atomic(unsigned int) atomic_uint; +typedef _Atomic(long) atomic_long; +typedef _Atomic(unsigned long) atomic_ulong; +typedef _Atomic(long long) atomic_llong; +typedef _Atomic(unsigned long long) atomic_ullong; +#if __STDC_VERSION__ >= 201112L || __cplusplus >= 201103L +typedef _Atomic(char16_t) atomic_char16_t; +typedef _Atomic(char32_t) atomic_char32_t; +#endif +typedef _Atomic(wchar_t) atomic_wchar_t; +typedef _Atomic(int_least8_t) atomic_int_least8_t; +typedef _Atomic(uint_least8_t) atomic_uint_least8_t; +typedef _Atomic(int_least16_t) atomic_int_least16_t; +typedef _Atomic(uint_least16_t) atomic_uint_least16_t; +typedef _Atomic(int_least32_t) atomic_int_least32_t; +typedef _Atomic(uint_least32_t) atomic_uint_least32_t; +typedef _Atomic(int_least64_t) atomic_int_least64_t; +typedef _Atomic(uint_least64_t) atomic_uint_least64_t; +typedef _Atomic(int_fast8_t) atomic_int_fast8_t; +typedef _Atomic(uint_fast8_t) atomic_uint_fast8_t; +typedef _Atomic(int_fast16_t) atomic_int_fast16_t; +typedef _Atomic(uint_fast16_t) atomic_uint_fast16_t; +typedef _Atomic(int_fast32_t) atomic_int_fast32_t; +typedef _Atomic(uint_fast32_t) atomic_uint_fast32_t; +typedef _Atomic(int_fast64_t) atomic_int_fast64_t; +typedef _Atomic(uint_fast64_t) atomic_uint_fast64_t; +typedef _Atomic(intptr_t) atomic_intptr_t; +typedef _Atomic(uintptr_t) atomic_uintptr_t; +typedef _Atomic(size_t) atomic_size_t; +typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t; +typedef _Atomic(intmax_t) atomic_intmax_t; +typedef _Atomic(uintmax_t) atomic_uintmax_t; +/* + * 7.17.7 Operations on atomic types. + */ +/* + * Compiler-specific operations. + */ +#if defined(__CLANG_ATOMICS) +#define atomic_compare_exchange_strong_explicit(object, expected, \ + desired, success, failure) \ + __c11_atomic_compare_exchange_strong(object, expected, desired, \ + success, failure) +#define atomic_compare_exchange_weak_explicit(object, expected, \ + desired, success, failure) \ + __c11_atomic_compare_exchange_weak(object, expected, desired, \ + success, failure) +#define atomic_exchange_explicit(object, desired, order) \ + __c11_atomic_exchange(object, desired, order) +#define atomic_fetch_add_explicit(object, operand, order) \ + __c11_atomic_fetch_add(object, operand, order) +#define atomic_fetch_and_explicit(object, operand, order) \ + __c11_atomic_fetch_and(object, operand, order) +#define atomic_fetch_or_explicit(object, operand, order) \ + __c11_atomic_fetch_or(object, operand, order) +#define atomic_fetch_sub_explicit(object, operand, order) \ + __c11_atomic_fetch_sub(object, operand, order) +#define atomic_fetch_xor_explicit(object, operand, order) \ + __c11_atomic_fetch_xor(object, operand, order) +#define atomic_load_explicit(object, order) \ + __c11_atomic_load(object, order) +#define atomic_store_explicit(object, desired, order) \ + __c11_atomic_store(object, desired, order) +#elif defined(__GNUC_ATOMICS) +#define atomic_compare_exchange_strong_explicit(object, expected, \ + desired, success, failure) \ + __atomic_compare_exchange_n(&(object)->__val, expected, \ + desired, 0, success, failure) +#define atomic_compare_exchange_weak_explicit(object, expected, \ + desired, success, failure) \ + __atomic_compare_exchange_n(&(object)->__val, expected, \ + desired, 1, success, failure) +#define atomic_exchange_explicit(object, desired, order) \ + __atomic_exchange_n(&(object)->__val, desired, order) +#define atomic_fetch_add_explicit(object, operand, order) \ + __atomic_fetch_add(&(object)->__val, operand, order) +#define atomic_fetch_and_explicit(object, operand, order) \ + __atomic_fetch_and(&(object)->__val, operand, order) +#define atomic_fetch_or_explicit(object, operand, order) \ + __atomic_fetch_or(&(object)->__val, operand, order) +#define atomic_fetch_sub_explicit(object, operand, order) \ + __atomic_fetch_sub(&(object)->__val, operand, order) +#define atomic_fetch_xor_explicit(object, operand, order) \ + __atomic_fetch_xor(&(object)->__val, operand, order) +#define atomic_load_explicit(object, order) \ + __atomic_load_n(&(object)->__val, order) +#define atomic_store_explicit(object, desired, order) \ + __atomic_store_n(&(object)->__val, desired, order) +#else +#define __atomic_apply_stride(object, operand) \ + (((__typeof__((object)->__val))0) + (operand)) +#define atomic_compare_exchange_strong_explicit(object, expected, \ + desired, success, failure) __extension__({ \ + __typeof__(expected) __ep = (expected); \ + __typeof__(*__ep) __e = *__ep; \ + (void)(success); \ + (void)(failure); \ + (bool)((*__ep = __sync_val_compare_and_swap(&(object)->__val, \ + __e, desired)) == __e); \ +}) +#define atomic_compare_exchange_weak_explicit(object, expected, \ + desired, success, failure) \ + atomic_compare_exchange_strong_explicit(object, expected, \ + desired, success, failure) +#ifdef __HAS_BUILTIN_SYNC_SWAP +/* Clang provides a full-barrier atomic exchange - use it if available. */ +#define atomic_exchange_explicit(object, desired, order) \ + ((void)(order), __sync_swap(&(object)->__val, desired)) +#else +/* + * __sync_lock_test_and_set() is only an acquire barrier in theory (although in + * practice it is usually a full barrier) so we need an explicit barrier before + * it. + */ +#define atomic_exchange_explicit(object, desired, order) \ + __extension__({ \ + __typeof__(object) __o = (object); \ + __typeof__(desired) __d = (desired); \ + (void)(order); \ + __sync_synchronize(); \ + __sync_lock_test_and_set(&(__o)->__val, __d); \ + }) +#endif +#define atomic_fetch_add_explicit(object, operand, order) \ + ((void)(order), __sync_fetch_and_add(&(object)->__val, \ + __atomic_apply_stride(object, operand))) +#define atomic_fetch_and_explicit(object, operand, order) \ + ((void)(order), __sync_fetch_and_and(&(object)->__val, operand)) +#define atomic_fetch_or_explicit(object, operand, order) \ + ((void)(order), __sync_fetch_and_or(&(object)->__val, operand)) +#define atomic_fetch_sub_explicit(object, operand, order) \ + ((void)(order), __sync_fetch_and_sub(&(object)->__val, \ + __atomic_apply_stride(object, operand))) +#define atomic_fetch_xor_explicit(object, operand, order) \ + ((void)(order), __sync_fetch_and_xor(&(object)->__val, operand)) +#define atomic_load_explicit(object, order) \ + ((void)(order), __sync_fetch_and_add(&(object)->__val, 0)) +#define atomic_store_explicit(object, desired, order) \ + ((void)atomic_exchange_explicit(object, desired, order)) +#endif +/* + * Convenience functions. + * + * Don't provide these in kernel space. In kernel space, we should be + * disciplined enough to always provide explicit barriers. + */ +#ifndef _KERNEL +#define atomic_compare_exchange_strong(object, expected, desired) \ + atomic_compare_exchange_strong_explicit(object, expected, \ + desired, memory_order_seq_cst, memory_order_seq_cst) +#define atomic_compare_exchange_weak(object, expected, desired) \ + atomic_compare_exchange_weak_explicit(object, expected, \ + desired, memory_order_seq_cst, memory_order_seq_cst) +#define atomic_exchange(object, desired) \ + atomic_exchange_explicit(object, desired, memory_order_seq_cst) +#define atomic_fetch_add(object, operand) \ + atomic_fetch_add_explicit(object, operand, memory_order_seq_cst) +#define atomic_fetch_and(object, operand) \ + atomic_fetch_and_explicit(object, operand, memory_order_seq_cst) +#define atomic_fetch_or(object, operand) \ + atomic_fetch_or_explicit(object, operand, memory_order_seq_cst) +#define atomic_fetch_sub(object, operand) \ + atomic_fetch_sub_explicit(object, operand, memory_order_seq_cst) +#define atomic_fetch_xor(object, operand) \ + atomic_fetch_xor_explicit(object, operand, memory_order_seq_cst) +#define atomic_load(object) \ + atomic_load_explicit(object, memory_order_seq_cst) +#define atomic_store(object, desired) \ + atomic_store_explicit(object, desired, memory_order_seq_cst) +#endif /* !_KERNEL */ +/* + * 7.17.8 Atomic flag type and operations. + * + * XXX: Assume atomic_bool can be used as an atomic_flag. Is there some + * kind of compiler built-in type we could use? + */ +typedef struct +{ + atomic_bool __flag; +} atomic_flag; +#define ATOMIC_FLAG_INIT \ + { \ + ATOMIC_VAR_INIT(false) \ + } +static __inline bool +atomic_flag_test_and_set_explicit(volatile atomic_flag *__object, + memory_order __order) +{ + return (atomic_exchange_explicit(&__object->__flag, 1, __order)); +} +static __inline void +atomic_flag_clear_explicit(volatile atomic_flag *__object, memory_order __order) +{ + atomic_store_explicit(&__object->__flag, 0, __order); +} +#ifndef _KERNEL +static __inline bool +atomic_flag_test_and_set(volatile atomic_flag *__object) +{ + return (atomic_flag_test_and_set_explicit(__object, + memory_order_seq_cst)); +} +static __inline void +atomic_flag_clear(volatile atomic_flag *__object) +{ + atomic_flag_clear_explicit(__object, memory_order_seq_cst); +} +#endif /* !_KERNEL */ +#endif /* unavailable */ +#endif /* !_STDATOMIC_H_ */ diff --git a/v_windows/v/old/thirdparty/stdatomic/win/atomic.h b/v_windows/v/old/thirdparty/stdatomic/win/atomic.h new file mode 100644 index 0000000..f6d7cf5 --- /dev/null +++ b/v_windows/v/old/thirdparty/stdatomic/win/atomic.h @@ -0,0 +1,366 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef COMPAT_ATOMICS_WIN32_STDATOMIC_H +#define COMPAT_ATOMICS_WIN32_STDATOMIC_H + +#define WIN32_LEAN_AND_MEAN +#include +#include +#include + +#ifdef __TINYC__ +#endif + +#define ATOMIC_FLAG_INIT 0 + +#define ATOMIC_VAR_INIT(value) (value) + +#define atomic_init(obj, value) \ + do \ + { \ + *(obj) = (value); \ + } while (0) + +#define kill_dependency(y) ((void)0) + +#define atomic_thread_fence(order) \ + MemoryBarrier(); + +#define atomic_signal_fence(order) \ + ((void)0) + +#define atomic_is_lock_free(obj) 0 + +typedef intptr_t atomic_flag; +typedef intptr_t atomic_bool; +typedef intptr_t atomic_char; +typedef intptr_t atomic_schar; +typedef intptr_t atomic_uchar; +typedef intptr_t atomic_short; +typedef intptr_t atomic_ushort; +typedef intptr_t atomic_int; +typedef intptr_t atomic_uint; +typedef intptr_t atomic_long; +typedef intptr_t atomic_ulong; +typedef intptr_t atomic_llong; +typedef intptr_t atomic_ullong; +typedef intptr_t atomic_wchar_t; +typedef intptr_t atomic_int_least8_t; +typedef intptr_t atomic_uint_least8_t; +typedef intptr_t atomic_int_least16_t; +typedef intptr_t atomic_uint_least16_t; +typedef intptr_t atomic_int_least32_t; +typedef intptr_t atomic_uint_least32_t; +typedef intptr_t atomic_int_least64_t; +typedef intptr_t atomic_uint_least64_t; +typedef intptr_t atomic_int_fast8_t; +typedef intptr_t atomic_uint_fast8_t; +typedef intptr_t atomic_int_fast16_t; +typedef intptr_t atomic_uint_fast16_t; +typedef intptr_t atomic_int_fast32_t; +typedef intptr_t atomic_uint_fast32_t; +typedef intptr_t atomic_int_fast64_t; +typedef intptr_t atomic_uint_fast64_t; +typedef intptr_t atomic_intptr_t; +typedef intptr_t atomic_uintptr_t; +typedef intptr_t atomic_size_t; +typedef intptr_t atomic_ptrdiff_t; +typedef intptr_t atomic_intmax_t; +typedef intptr_t atomic_uintmax_t; + +#ifdef __TINYC__ +/* + For TCC it is missing the x64 version of _InterlockedExchangeAdd64 so we + fake it (works the same) with InterlockedCompareExchange64 until it + succeeds +*/ +__CRT_INLINE LONGLONG _InterlockedExchangeAdd64(LONGLONG volatile *Addend, LONGLONG Value) +{ + LONGLONG Old; + do + { + Old = *Addend; + } while (InterlockedCompareExchange64(Addend, Old + Value, Old) != Old); + return Old; +} + +__CRT_INLINE LONG _InterlockedExchangeAdd(LONG volatile *Addend, LONG Value) +{ + LONG Old; + do + { + Old = *Addend; + } while (InterlockedCompareExchange(Addend, Old + Value, Old) != Old); + return Old; +} + +__CRT_INLINE SHORT _InterlockedExchangeAdd16(SHORT volatile *Addend, SHORT Value) +{ + SHORT Old; + do + { + Old = *Addend; + } while (InterlockedCompareExchange16(Addend, Old + Value, Old) != Old); + return Old; +} + +#define InterlockedIncrement64 _InterlockedExchangeAdd64 + +#endif + +#define atomic_store(object, desired) \ + do \ + { \ + *(object) = (desired); \ + MemoryBarrier(); \ + } while (0) + +#define atomic_store_explicit(object, desired, order) \ + atomic_store(object, desired) + +#define atomic_load(object) \ + (MemoryBarrier(), *(object)) + +#define atomic_load_explicit(object, order) \ + atomic_load(object) + +#define atomic_exchange(object, desired) \ + InterlockedExchangePointer(object, desired) + +#define atomic_exchange_explicit(object, desired, order) \ + atomic_exchange(object, desired) + +static inline int atomic_compare_exchange_strong(intptr_t *object, intptr_t *expected, + intptr_t desired) +{ + intptr_t old = *expected; + *expected = (intptr_t)InterlockedCompareExchangePointer( + (PVOID *)object, (PVOID)desired, (PVOID)old); + return *expected == old; +} + +#define atomic_compare_exchange_strong_explicit(object, expected, desired, success, failure) \ + atomic_compare_exchange_strong(object, expected, desired) + +#define atomic_compare_exchange_weak(object, expected, desired) \ + atomic_compare_exchange_strong(object, expected, desired) + +#define atomic_compare_exchange_weak_explicit(object, expected, desired, success, failure) \ + atomic_compare_exchange_weak(object, expected, desired) + +#ifdef _WIN64 + +#define atomic_fetch_add(object, operand) \ + InterlockedExchangeAdd64(object, operand) + +#define atomic_fetch_sub(object, operand) \ + InterlockedExchangeAdd64(object, -(operand)) + +#define atomic_fetch_or(object, operand) \ + InterlockedOr64(object, operand) + +#define atomic_fetch_xor(object, operand) \ + InterlockedXor64(object, operand) + +#define atomic_fetch_and(object, operand) \ + InterlockedAnd64(object, operand) +#else +#define atomic_fetch_add(object, operand) \ + InterlockedExchangeAdd(object, operand) + +#define atomic_fetch_sub(object, operand) \ + InterlockedExchangeAdd(object, -(operand)) + +#define atomic_fetch_or(object, operand) \ + InterlockedOr(object, operand) + +#define atomic_fetch_xor(object, operand) \ + InterlockedXor(object, operand) + +#define atomic_fetch_and(object, operand) \ + InterlockedAnd(object, operand) +#endif /* _WIN64 */ + +/* specialized versions with explicit object size */ + +#define atomic_load_ptr atomic_load +#define atomic_store_ptr atomic_store +#define atomic_compare_exchange_weak_ptr atomic_compare_exchange_weak +#define atomic_compare_exchange_strong_ptr atomic_compare_exchange_strong +#define atomic_exchange_ptr atomic_exchange +#define atomic_fetch_add_ptr atomic_fetch_add +#define atomic_fetch_sub_ptr atomic_fetch_sub +#define atomic_fetch_and_ptr atomic_fetch_and +#define atomic_fetch_or_ptr atomic_fetch_or +#define atomic_fetch_xor_ptr atomic_fetch_xor + +static inline void atomic_store_u64(unsigned long long* object, unsigned long long desired) { + do { + *(object) = (desired); + MemoryBarrier(); + } while (0); +} + +static inline unsigned long long atomic_load_u64(unsigned long long* object) { + return (MemoryBarrier(), *(object)); +} + +#define atomic_exchange_u64(object, desired) \ + InterlockedExchange64(object, desired) + +static inline int atomic_compare_exchange_strong_u64(unsigned long long* object, unsigned long long* expected, + unsigned long long desired) +{ + unsigned long long old = *expected; + *expected = InterlockedCompareExchange64(object, desired, old); + return *expected == old; +} + +#define atomic_compare_exchange_weak_u64(object, expected, desired) \ + atomic_compare_exchange_strong_u64(object, expected, desired) + +#define atomic_fetch_add_u64(object, operand) \ + InterlockedExchangeAdd64(object, operand) + +#define atomic_fetch_sub_u64(object, operand) \ + InterlockedExchangeAdd64(object, -(operand)) + +#define atomic_fetch_or_u64(object, operand) \ + InterlockedOr64(object, operand) + +#define atomic_fetch_xor_u64(object, operand) \ + InterlockedXor64(object, operand) + +#define atomic_fetch_and_u64(object, operand) \ + InterlockedAnd64(object, operand) + + + +static inline void atomic_store_u32(unsigned* object, unsigned desired) { + do { + *(object) = (desired); + MemoryBarrier(); + } while (0); +} + +static inline unsigned atomic_load_u32(unsigned* object) { + return (MemoryBarrier(), *(object)); +} + +#define atomic_exchange_u32(object, desired) \ + InterlockedExchange(object, desired) + +static inline int atomic_compare_exchange_strong_u32(unsigned* object, unsigned* expected, + unsigned desired) +{ + unsigned old = *expected; + *expected = InterlockedCompareExchange(object, desired, old); + return *expected == old; +} + +#define atomic_compare_exchange_weak_u32(object, expected, desired) \ + atomic_compare_exchange_strong_u32(object, expected, desired) + +#define atomic_fetch_add_u32(object, operand) \ + InterlockedExchangeAdd(object, operand) + +#define atomic_fetch_sub_u32(object, operand) \ + InterlockedExchangeAdd(object, -(operand)) + +#define atomic_fetch_or_u32(object, operand) \ + InterlockedOr(object, operand) + +#define atomic_fetch_xor_u32(object, operand) \ + InterlockedXor(object, operand) + +#define atomic_fetch_and_u32(object, operand) \ + InterlockedAnd(object, operand) + + + +static inline void atomic_store_u16(unsigned short* object, unsigned short desired) { + do { + *(object) = (desired); + MemoryBarrier(); + } while (0); +} + +static inline unsigned short atomic_load_u16(unsigned short* object) { + return (MemoryBarrier(), *(object)); +} + +#define atomic_exchange_u16(object, desired) \ + InterlockedExchange16(object, desired) + +static inline int atomic_compare_exchange_strong_u16(unsigned short* object, unsigned short* expected, + unsigned short desired) +{ + unsigned short old = *expected; + *expected = InterlockedCompareExchange16(object, desired, old); + return *expected == old; +} + +#define atomic_compare_exchange_weak_u16(object, expected, desired) \ + atomic_compare_exchange_strong_u16(object, expected, desired) + +#define atomic_fetch_add_u16(object, operand) \ + InterlockedExchangeAdd16(object, operand) + +#define atomic_fetch_sub_u16(object, operand) \ + InterlockedExchangeAdd16(object, -(operand)) + +#define atomic_fetch_or_u16(object, operand) \ + InterlockedOr16(object, operand) + +#define atomic_fetch_xor_u16(object, operand) \ + InterlockedXor16(object, operand) + +#define atomic_fetch_and_u16(object, operand) \ + InterlockedAnd16(object, operand) + + + +#define atomic_fetch_add_explicit(object, operand, order) \ + atomic_fetch_add(object, operand) + +#define atomic_fetch_sub_explicit(object, operand, order) \ + atomic_fetch_sub(object, operand) + +#define atomic_fetch_or_explicit(object, operand, order) \ + atomic_fetch_or(object, operand) + +#define atomic_fetch_xor_explicit(object, operand, order) \ + atomic_fetch_xor(object, operand) + +#define atomic_fetch_and_explicit(object, operand, order) \ + atomic_fetch_and(object, operand) + +#define atomic_flag_test_and_set(object) \ + atomic_exchange(object, 1) + +#define atomic_flag_test_and_set_explicit(object, order) \ + atomic_flag_test_and_set(object) + +#define atomic_flag_clear(object) \ + atomic_store(object, 0) + +#define atomic_flag_clear_explicit(object, order) \ + atomic_flag_clear(object) + +#endif /* COMPAT_ATOMICS_WIN32_STDATOMIC_H */ diff --git a/v_windows/v/old/thirdparty/vschannel/vschannel.c b/v_windows/v/old/thirdparty/vschannel/vschannel.c new file mode 100644 index 0000000..0d55dde --- /dev/null +++ b/v_windows/v/old/thirdparty/vschannel/vschannel.c @@ -0,0 +1,1095 @@ +#include +#include + +// Proxy +WCHAR * psz_proxy_server = L"proxy"; +INT i_proxy_port = 80; + +// Options +INT port_number = 443; +BOOL use_proxy = FALSE; +DWORD protocol = 0; +ALG_ID aid_key_exch = 0; + +// TODO: joe-c +// socket / tls ctx +struct TlsContext { + // SSPI + PSecurityFunctionTable sspi; + // Cred store + HCERTSTORE cert_store; + SCHANNEL_CRED schannel_cred; + // Socket + SOCKET socket; + CredHandle h_client_creds; + CtxtHandle h_context; + PCCERT_CONTEXT p_pemote_cert_context; + BOOL creds_initialized; + BOOL context_initialized; +}; + +TlsContext new_tls_context() { + return (struct TlsContext) { + .cert_store = NULL, + .socket = INVALID_SOCKET, + .creds_initialized = FALSE, + .context_initialized = FALSE, + .p_pemote_cert_context = NULL + }; +}; + +void vschannel_cleanup(TlsContext *tls_ctx) { + // Free the server certificate context. + if(tls_ctx->p_pemote_cert_context) { + CertFreeCertificateContext(tls_ctx->p_pemote_cert_context); + tls_ctx->p_pemote_cert_context = NULL; + } + + // Free SSPI context handle. + if(tls_ctx->context_initialized) { + tls_ctx->sspi->DeleteSecurityContext(&tls_ctx->h_context); + tls_ctx->context_initialized = FALSE; + } + + // Free SSPI credentials handle. + if(tls_ctx->creds_initialized) { + tls_ctx->sspi->FreeCredentialsHandle(&tls_ctx->h_client_creds); + tls_ctx->creds_initialized = FALSE; + } + + // Close socket. + if(tls_ctx->socket != INVALID_SOCKET) { + closesocket(tls_ctx->socket); + tls_ctx->socket = INVALID_SOCKET; + } + + // Close "MY" certificate store. + if(tls_ctx->cert_store) { + CertCloseStore(tls_ctx->cert_store, 0); + tls_ctx->cert_store = NULL; + } +} + +void vschannel_init(TlsContext *tls_ctx) { + tls_ctx->sspi = InitSecurityInterface(); + + if(tls_ctx->sspi == NULL) { + wprintf(L"Error 0x%x reading security interface.\n", + GetLastError()); + vschannel_cleanup(tls_ctx); + } + + // Create credentials. + if(create_credentials(tls_ctx)) { + wprintf(L"Error creating credentials\n"); + vschannel_cleanup(tls_ctx); + } + tls_ctx->creds_initialized = TRUE; +} + +INT request(TlsContext *tls_ctx, INT iport, LPWSTR host, CHAR *req, CHAR **out) +{ + SecBuffer ExtraData; + SECURITY_STATUS Status; + + INT i; + INT iOption; + PCHAR pszOption; + + INT resp_length = 0; + + protocol = SP_PROT_TLS1_2_CLIENT; + + port_number = iport; + + // Connect to server. + if(connect_to_server(tls_ctx, host, port_number)) { + wprintf(L"Error connecting to server\n"); + vschannel_cleanup(tls_ctx); + return resp_length; + } + + // Perform handshake + if(perform_client_handshake(tls_ctx, host, &ExtraData)) { + wprintf(L"Error performing handshake\n"); + vschannel_cleanup(tls_ctx); + return resp_length; + } + tls_ctx->context_initialized = TRUE; + + // Authenticate server's credentials. + + // Get server's certificate. + Status = tls_ctx->sspi->QueryContextAttributes(&tls_ctx->h_context, + SECPKG_ATTR_REMOTE_CERT_CONTEXT, + (PVOID)&tls_ctx->p_pemote_cert_context); + if(Status != SEC_E_OK) { + wprintf(L"Error 0x%x querying remote certificate\n", Status); + vschannel_cleanup(tls_ctx); + return resp_length; + } + + // Attempt to validate server certificate. + Status = verify_server_certificate(tls_ctx->p_pemote_cert_context, host,0); + if(Status) { + // The server certificate did not validate correctly. At this + // point, we cannot tell if we are connecting to the correct + // server, or if we are connecting to a "man in the middle" + // attack server. + + // It is therefore best if we abort the connection. + + wprintf(L"Error 0x%x authenticating server credentials!\n", Status); + vschannel_cleanup(tls_ctx); + return resp_length; + } + + // Free the server certificate context. + CertFreeCertificateContext(tls_ctx->p_pemote_cert_context); + tls_ctx->p_pemote_cert_context = NULL; + + // Request from server + if(https_make_request(tls_ctx, req, out, &resp_length)) { + vschannel_cleanup(tls_ctx); + return resp_length; + } + + // Send a close_notify alert to the server and + // close down the connection. + if(disconnect_from_server(tls_ctx)) { + wprintf(L"Error disconnecting from server\n"); + vschannel_cleanup(tls_ctx); + return resp_length; + } + tls_ctx->context_initialized = FALSE; + tls_ctx->socket = INVALID_SOCKET; + + return resp_length; +} + + +static SECURITY_STATUS create_credentials(TlsContext *tls_ctx) { + TimeStamp tsExpiry; + SECURITY_STATUS Status; + + DWORD cSupportedAlgs = 0; + ALG_ID rgbSupportedAlgs[16]; + + PCCERT_CONTEXT pCertContext = NULL; + + // Open the "MY" certificate store, which is where Internet Explorer + // stores its client certificates. + if(tls_ctx->cert_store == NULL) { + tls_ctx->cert_store = CertOpenSystemStore(0, L"MY"); + + if(!tls_ctx->cert_store) { + wprintf(L"Error 0x%x returned by CertOpenSystemStore\n", + GetLastError()); + return SEC_E_NO_CREDENTIALS; + } + } + + // Build Schannel credential structure. Currently, this sample only + // specifies the protocol to be used (and optionally the certificate, + // of course). Real applications may wish to specify other parameters + // as well. + + ZeroMemory(&tls_ctx->schannel_cred, sizeof(tls_ctx->schannel_cred)); + + tls_ctx->schannel_cred.dwVersion = SCHANNEL_CRED_VERSION; + if(pCertContext) + { + tls_ctx->schannel_cred.cCreds = 1; + tls_ctx->schannel_cred.paCred = &pCertContext; + } + + tls_ctx->schannel_cred.grbitEnabledProtocols = protocol; + + if(aid_key_exch) + { + rgbSupportedAlgs[cSupportedAlgs++] = aid_key_exch; + } + + if(cSupportedAlgs) + { + tls_ctx->schannel_cred.cSupportedAlgs = cSupportedAlgs; + tls_ctx->schannel_cred.palgSupportedAlgs = rgbSupportedAlgs; + } + + tls_ctx->schannel_cred.dwFlags |= SCH_CRED_NO_DEFAULT_CREDS; + + // The SCH_CRED_MANUAL_CRED_VALIDATION flag is specified because + // this sample verifies the server certificate manually. + // Applications that expect to run on WinNT, Win9x, or WinME + // should specify this flag and also manually verify the server + // certificate. Applications running on newer versions of Windows can + // leave off this flag, in which case the InitializeSecurityContext + // function will validate the server certificate automatically. + // tls_ctx->schannel_cred.dwFlags |= SCH_CRED_MANUAL_CRED_VALIDATION; + + // Create an SSPI credential. + + Status = tls_ctx->sspi->AcquireCredentialsHandle( + NULL, // Name of principal + UNISP_NAME_W, // Name of package + SECPKG_CRED_OUTBOUND, // Flags indicating use + NULL, // Pointer to logon ID + &tls_ctx->schannel_cred, // Package specific data + NULL, // Pointer to GetKey() func + NULL, // Value to pass to GetKey() + &tls_ctx->h_client_creds, // (out) Cred Handle + &tsExpiry); // (out) Lifetime (optional) + if(Status != SEC_E_OK) { + wprintf(L"Error 0x%x returned by AcquireCredentialsHandle\n", Status); + goto cleanup; + } + +cleanup: + + // Free the certificate context. Schannel has already made its own copy. + + if(pCertContext) { + CertFreeCertificateContext(pCertContext); + } + + + return Status; +} + + +static INT connect_to_server(TlsContext *tls_ctx, LPWSTR host, INT port_number) { + SOCKET Socket; + + SOCKADDR_STORAGE local_address = { 0 }; + SOCKADDR_STORAGE remote_address = { 0 }; + + DWORD local_address_length = sizeof(local_address); + DWORD remote_address_length = sizeof(remote_address); + + struct timeval tv; + tv.tv_sec = 60; + tv.tv_usec = 0; + + Socket = socket(PF_INET, SOCK_STREAM, 0); + if(Socket == INVALID_SOCKET) { + wprintf(L"Error %d creating socket\n", WSAGetLastError()); + return WSAGetLastError(); + } + + LPWSTR connect_name = use_proxy ? psz_proxy_server : host; + + WCHAR service_name[10]; + int res = wsprintf(service_name, L"%d", port_number); + + if(WSAConnectByNameW(Socket,connect_name, service_name, &local_address_length, + &local_address, &remote_address_length, &remote_address, &tv, NULL) == SOCKET_ERROR) { + wprintf(L"Error %d connecting to \"%s\" (%s)\n", + WSAGetLastError(), + connect_name, + service_name); + closesocket(Socket); + return WSAGetLastError(); + } + + if(use_proxy) { + BYTE pbMessage[200]; + DWORD cbMessage; + + // Build message for proxy server + strcpy(pbMessage, "CONNECT "); + strcat(pbMessage, host); + strcat(pbMessage, ":"); + _itoa(port_number, pbMessage + strlen(pbMessage), 10); + strcat(pbMessage, " HTTP/1.0\r\nUser-Agent: webclient\r\n\r\n"); + cbMessage = (DWORD)strlen(pbMessage); + + // Send message to proxy server + if(send(Socket, pbMessage, cbMessage, 0) == SOCKET_ERROR) { + wprintf(L"Error %d sending message to proxy!\n", WSAGetLastError()); + return WSAGetLastError(); + } + + // Receive message from proxy server + cbMessage = recv(Socket, pbMessage, 200, 0); + if(cbMessage == SOCKET_ERROR) { + wprintf(L"Error %d receiving message from proxy\n", WSAGetLastError()); + return WSAGetLastError(); + } + + // this sample is limited but in normal use it + // should continue to receive until CR LF CR LF is received + } + + tls_ctx->socket = Socket; + + return SEC_E_OK; +} + + +static LONG disconnect_from_server(TlsContext *tls_ctx) { + DWORD dwType; + PBYTE pbMessage; + DWORD cbMessage; + DWORD cbData; + + SecBufferDesc OutBuffer; + SecBuffer OutBuffers[1]; + DWORD dwSSPIFlags; + DWORD dwSSPIOutFlags; + TimeStamp tsExpiry; + DWORD Status; + + // Notify schannel that we are about to close the connection. + + dwType = SCHANNEL_SHUTDOWN; + + OutBuffers[0].pvBuffer = &dwType; + OutBuffers[0].BufferType = SECBUFFER_TOKEN; + OutBuffers[0].cbBuffer = sizeof(dwType); + + OutBuffer.cBuffers = 1; + OutBuffer.pBuffers = OutBuffers; + OutBuffer.ulVersion = SECBUFFER_VERSION; + + Status = tls_ctx->sspi->ApplyControlToken(&tls_ctx->h_context, &OutBuffer); + + if(FAILED(Status)) { + wprintf(L"Error 0x%x returned by ApplyControlToken\n", Status); + goto cleanup; + } + + // Build an SSL close notify message. + + dwSSPIFlags = ISC_REQ_SEQUENCE_DETECT | + ISC_REQ_REPLAY_DETECT | + ISC_REQ_CONFIDENTIALITY | + ISC_RET_EXTENDED_ERROR | + ISC_REQ_ALLOCATE_MEMORY | + ISC_REQ_STREAM; + + OutBuffers[0].pvBuffer = NULL; + OutBuffers[0].BufferType = SECBUFFER_TOKEN; + OutBuffers[0].cbBuffer = 0; + + OutBuffer.cBuffers = 1; + OutBuffer.pBuffers = OutBuffers; + OutBuffer.ulVersion = SECBUFFER_VERSION; + + Status = tls_ctx->sspi->InitializeSecurityContext( + &tls_ctx->h_client_creds, &tls_ctx->h_context, NULL, dwSSPIFlags, 0, SECURITY_NATIVE_DREP, + NULL, 0, &tls_ctx->h_context, &OutBuffer, &dwSSPIOutFlags, &tsExpiry); + + if(FAILED(Status)) { + wprintf(L"Error 0x%x returned by InitializeSecurityContext\n", Status); + goto cleanup; + } + + pbMessage = OutBuffers[0].pvBuffer; + cbMessage = OutBuffers[0].cbBuffer; + + // Send the close notify message to the server. + + if(pbMessage != NULL && cbMessage != 0) { + cbData = send(tls_ctx->socket, pbMessage, cbMessage, 0); + if(cbData == SOCKET_ERROR || cbData == 0) { + Status = WSAGetLastError(); + wprintf(L"Error %d sending close notify\n", Status); + goto cleanup; + } + + // Free output buffer. + tls_ctx->sspi->FreeContextBuffer(pbMessage); + } + + +cleanup: + + // Free the security context. + tls_ctx->sspi->DeleteSecurityContext(&tls_ctx->h_context); + + // Close the socket. + closesocket(tls_ctx->socket); + + return Status; +} + + +static SECURITY_STATUS perform_client_handshake(TlsContext *tls_ctx, WCHAR *host, SecBuffer *pExtraData) { + SecBufferDesc OutBuffer; + SecBuffer OutBuffers[1]; + DWORD dwSSPIFlags; + DWORD dwSSPIOutFlags; + TimeStamp tsExpiry; + SECURITY_STATUS scRet; + DWORD cbData; + + dwSSPIFlags = ISC_REQ_SEQUENCE_DETECT | + ISC_REQ_REPLAY_DETECT | + ISC_REQ_CONFIDENTIALITY | + ISC_RET_EXTENDED_ERROR | + ISC_REQ_ALLOCATE_MEMORY | + ISC_REQ_STREAM; + + // + // Initiate a ClientHello message and generate a token. + // + + OutBuffers[0].pvBuffer = NULL; + OutBuffers[0].BufferType = SECBUFFER_TOKEN; + OutBuffers[0].cbBuffer = 0; + + OutBuffer.cBuffers = 1; + OutBuffer.pBuffers = OutBuffers; + OutBuffer.ulVersion = SECBUFFER_VERSION; + + scRet = tls_ctx->sspi->InitializeSecurityContext( + &tls_ctx->h_client_creds, + NULL, + host, + dwSSPIFlags, + 0, + SECURITY_NATIVE_DREP, + NULL, + 0, + &tls_ctx->h_context, + &OutBuffer, + &dwSSPIOutFlags, + &tsExpiry); + + if(scRet != SEC_I_CONTINUE_NEEDED) + { + wprintf(L"Error %d returned by InitializeSecurityContext (1)\n", scRet); + return scRet; + } + + // Send response to server if there is one. + if(OutBuffers[0].cbBuffer != 0 && OutBuffers[0].pvBuffer != NULL) + { + cbData = send(tls_ctx->socket, OutBuffers[0].pvBuffer, OutBuffers[0].cbBuffer, 0); + if(cbData == SOCKET_ERROR || cbData == 0) { + wprintf(L"Error %d sending data to server (1)\n", WSAGetLastError()); + tls_ctx->sspi->FreeContextBuffer(OutBuffers[0].pvBuffer); + tls_ctx->sspi->DeleteSecurityContext(&tls_ctx->h_context); + return SEC_E_INTERNAL_ERROR; + } + + // Free output buffer. + tls_ctx->sspi->FreeContextBuffer(OutBuffers[0].pvBuffer); + OutBuffers[0].pvBuffer = NULL; + } + + return client_handshake_loop(tls_ctx, TRUE, pExtraData); +} + + +static SECURITY_STATUS client_handshake_loop(TlsContext *tls_ctx, BOOL fDoInitialRead, SecBuffer *pExtraData) { + SecBufferDesc InBuffer; + SecBuffer InBuffers[2]; + SecBufferDesc OutBuffer; + SecBuffer OutBuffers[1]; + DWORD dwSSPIFlags; + DWORD dwSSPIOutFlags; + TimeStamp tsExpiry; + SECURITY_STATUS scRet; + DWORD cbData; + + PUCHAR IoBuffer; + DWORD cbIoBuffer; + BOOL fDoRead; + + + dwSSPIFlags = ISC_REQ_SEQUENCE_DETECT | + ISC_REQ_REPLAY_DETECT | + ISC_REQ_CONFIDENTIALITY | + ISC_RET_EXTENDED_ERROR | + ISC_REQ_ALLOCATE_MEMORY | + ISC_REQ_STREAM; + + // + // Allocate data buffer. + // + + IoBuffer = LocalAlloc(LPTR, IO_BUFFER_SIZE); + if(IoBuffer == NULL) + { + wprintf(L"Out of memory (1)\n"); + return SEC_E_INTERNAL_ERROR; + } + cbIoBuffer = 0; + + fDoRead = fDoInitialRead; + + + // + // Loop until the handshake is finished or an error occurs. + // + + scRet = SEC_I_CONTINUE_NEEDED; + + while(scRet == SEC_I_CONTINUE_NEEDED || + scRet == SEC_E_INCOMPLETE_MESSAGE || + scRet == SEC_I_INCOMPLETE_CREDENTIALS) { + + // Read data from server. + if(0 == cbIoBuffer || scRet == SEC_E_INCOMPLETE_MESSAGE) { + if(fDoRead) { + cbData = recv(tls_ctx->socket, + IoBuffer + cbIoBuffer, + IO_BUFFER_SIZE - cbIoBuffer, + 0); + if(cbData == SOCKET_ERROR) { + wprintf(L"Error %d reading data from server\n", WSAGetLastError()); + scRet = SEC_E_INTERNAL_ERROR; + break; + } + else if(cbData == 0) { + wprintf(L"Server unexpectedly disconnected\n"); + scRet = SEC_E_INTERNAL_ERROR; + break; + } + + cbIoBuffer += cbData; + } + else { + fDoRead = TRUE; + } + } + + // Set up the input buffers. Buffer 0 is used to pass in data + // received from the server. Schannel will consume some or all + // of this. Leftover data (if any) will be placed in buffer 1 and + // given a buffer type of SECBUFFER_EXTRA. + + InBuffers[0].pvBuffer = IoBuffer; + InBuffers[0].cbBuffer = cbIoBuffer; + InBuffers[0].BufferType = SECBUFFER_TOKEN; + + InBuffers[1].pvBuffer = NULL; + InBuffers[1].cbBuffer = 0; + InBuffers[1].BufferType = SECBUFFER_EMPTY; + + InBuffer.cBuffers = 2; + InBuffer.pBuffers = InBuffers; + InBuffer.ulVersion = SECBUFFER_VERSION; + + // Set up the output buffers. These are initialized to NULL + // so as to make it less likely we'll attempt to free random + // garbage later. + + OutBuffers[0].pvBuffer = NULL; + OutBuffers[0].BufferType= SECBUFFER_TOKEN; + OutBuffers[0].cbBuffer = 0; + + OutBuffer.cBuffers = 1; + OutBuffer.pBuffers = OutBuffers; + OutBuffer.ulVersion = SECBUFFER_VERSION; + + // Call InitializeSecurityContext. + + scRet = tls_ctx->sspi->InitializeSecurityContext( + &tls_ctx->h_client_creds, &tls_ctx->h_context, NULL, dwSSPIFlags, 0, SECURITY_NATIVE_DREP, + &InBuffer, 0, NULL, &OutBuffer, &dwSSPIOutFlags, &tsExpiry); + + // If InitializeSecurityContext was successful (or if the error was + // one of the special extended ones), send the contends of the output + // buffer to the server. + + if(scRet == SEC_E_OK || + scRet == SEC_I_CONTINUE_NEEDED || + FAILED(scRet) && (dwSSPIOutFlags & ISC_RET_EXTENDED_ERROR)) { + if(OutBuffers[0].cbBuffer != 0 && OutBuffers[0].pvBuffer != NULL) { + cbData = send(tls_ctx->socket, + OutBuffers[0].pvBuffer, + OutBuffers[0].cbBuffer, + 0); + if(cbData == SOCKET_ERROR || cbData == 0) { + wprintf(L"Error %d sending data to server (2)\n", + WSAGetLastError()); + tls_ctx->sspi->FreeContextBuffer(OutBuffers[0].pvBuffer); + tls_ctx->sspi->DeleteSecurityContext(&tls_ctx->h_context); + return SEC_E_INTERNAL_ERROR; + } + + // Free output buffer. + tls_ctx->sspi->FreeContextBuffer(OutBuffers[0].pvBuffer); + OutBuffers[0].pvBuffer = NULL; + } + } + + // If InitializeSecurityContext returned SEC_E_INCOMPLETE_MESSAGE, + // then we need to read more data from the server and try again. + if(scRet == SEC_E_INCOMPLETE_MESSAGE) { + continue; + } + + // If InitializeSecurityContext returned SEC_E_OK, then the + // handshake completed successfully. + + if(scRet == SEC_E_OK) { + // If the "extra" buffer contains data, this is encrypted application + // protocol layer stuff. It needs to be saved. The application layer + // will later decrypt it with DecryptMessage. + + if(InBuffers[1].BufferType == SECBUFFER_EXTRA) + { + pExtraData->pvBuffer = LocalAlloc(LPTR, InBuffers[1].cbBuffer); + if(pExtraData->pvBuffer == NULL) { + wprintf(L"Out of memory (2)\n"); + return SEC_E_INTERNAL_ERROR; + } + + MoveMemory(pExtraData->pvBuffer, + IoBuffer + (cbIoBuffer - InBuffers[1].cbBuffer), + InBuffers[1].cbBuffer); + + pExtraData->cbBuffer = InBuffers[1].cbBuffer; + pExtraData->BufferType = SECBUFFER_TOKEN; + + // wprintf(L"%d bytes of app data was bundled with handshake data\n", pExtraData->cbBuffer); + } + else { + pExtraData->pvBuffer = NULL; + pExtraData->cbBuffer = 0; + pExtraData->BufferType = SECBUFFER_EMPTY; + } + + // Bail out to quit + break; + } + + // Check for fatal error. + if(FAILED(scRet)) { + wprintf(L"Error 0x%x returned by InitializeSecurityContext (2)\n", scRet); + break; + } + + // If InitializeSecurityContext returned SEC_I_INCOMPLETE_CREDENTIALS, + // then the server just requested client authentication. + if(scRet == SEC_I_INCOMPLETE_CREDENTIALS) { + // Busted. The server has requested client authentication and + // the credential we supplied didn't contain a client certificate. + + // This function will read the list of trusted certificate + // authorities ("issuers") that was received from the server + // and attempt to find a suitable client certificate that + // was issued by one of these. If this function is successful, + // then we will connect using the new certificate. Otherwise, + // we will attempt to connect anonymously (using our current + // credentials). + + get_new_client_credentials(tls_ctx); + + // Go around again. + fDoRead = FALSE; + scRet = SEC_I_CONTINUE_NEEDED; + continue; + } + + // Copy any leftover data from the "extra" buffer, and go around + // again. + + if ( InBuffers[1].BufferType == SECBUFFER_EXTRA ) { + MoveMemory(IoBuffer, + IoBuffer + (cbIoBuffer - InBuffers[1].cbBuffer), + InBuffers[1].cbBuffer); + + cbIoBuffer = InBuffers[1].cbBuffer; + } + else { + cbIoBuffer = 0; + } + } + + // Delete the security context in the case of a fatal error. + if(FAILED(scRet)) { + tls_ctx->sspi->DeleteSecurityContext(&tls_ctx->h_context); + } + + LocalFree(IoBuffer); + + return scRet; +} + + +static SECURITY_STATUS https_make_request(TlsContext *tls_ctx, CHAR *req, CHAR **out, int *length) { + SecPkgContext_StreamSizes Sizes; + SECURITY_STATUS scRet; + SecBufferDesc Message; + SecBuffer Buffers[4]; + SecBuffer *pDataBuffer; + SecBuffer *pExtraBuffer; + SecBuffer ExtraBuffer; + + PBYTE pbIoBuffer; + DWORD cbIoBuffer; + DWORD cbIoBufferLength; + PBYTE pbMessage; + DWORD cbMessage; + + DWORD cbData; + INT i; + + + // Read stream encryption properties. + scRet = tls_ctx->sspi->QueryContextAttributes(&tls_ctx->h_context, SECPKG_ATTR_STREAM_SIZES, &Sizes); + if(scRet != SEC_E_OK) { + wprintf(L"Error 0x%x reading SECPKG_ATTR_STREAM_SIZES\n", scRet); + return scRet; + } + + // Allocate a working buffer. The plaintext sent to EncryptMessage + // should never be more than 'Sizes.cbMaximumMessage', so a buffer + // size of this plus the header and trailer sizes should be safe enough. + cbIoBufferLength = Sizes.cbHeader + Sizes.cbMaximumMessage + Sizes.cbTrailer; + + pbIoBuffer = LocalAlloc(LPTR, cbIoBufferLength); + if(pbIoBuffer == NULL) { + wprintf(L"Out of memory (2)\n"); + return SEC_E_INTERNAL_ERROR; + } + + // Build an HTTP request to send to the server. + + // Build the HTTP request offset into the data buffer by "header size" + // bytes. This enables Schannel to perform the encryption in place, + // which is a significant performance win. + pbMessage = pbIoBuffer + Sizes.cbHeader; + + // Build HTTP request. Note that I'm assuming that this is less than + // the maximum message size. If it weren't, it would have to be broken up. + sprintf(pbMessage, "%s", req); + + cbMessage = (DWORD)strlen(pbMessage); + + + // Encrypt the HTTP request. + Buffers[0].pvBuffer = pbIoBuffer; + Buffers[0].cbBuffer = Sizes.cbHeader; + Buffers[0].BufferType = SECBUFFER_STREAM_HEADER; + + Buffers[1].pvBuffer = pbMessage; + Buffers[1].cbBuffer = cbMessage; + Buffers[1].BufferType = SECBUFFER_DATA; + + Buffers[2].pvBuffer = pbMessage + cbMessage; + Buffers[2].cbBuffer = Sizes.cbTrailer; + Buffers[2].BufferType = SECBUFFER_STREAM_TRAILER; + + Buffers[3].BufferType = SECBUFFER_EMPTY; + + Message.ulVersion = SECBUFFER_VERSION; + Message.cBuffers = 4; + Message.pBuffers = Buffers; + + scRet = tls_ctx->sspi->EncryptMessage(&tls_ctx->h_context, 0, &Message, 0); + + if(FAILED(scRet)) { + wprintf(L"Error 0x%x returned by EncryptMessage\n", scRet); + return scRet; + } + + // Send the encrypted data to the server. + cbData = send(tls_ctx->socket, pbIoBuffer, Buffers[0].cbBuffer + Buffers[1].cbBuffer + Buffers[2].cbBuffer, 0); + if(cbData == SOCKET_ERROR || cbData == 0) { + wprintf(L"Error %d sending data to server (3)\n", WSAGetLastError()); + tls_ctx->sspi->DeleteSecurityContext(&tls_ctx->h_context); + return SEC_E_INTERNAL_ERROR; + } + + // Read data from server until done. + INT buff_size = vsc_init_resp_buff_size; + cbIoBuffer = 0; + while(TRUE){ + // Read some data. + if(0 == cbIoBuffer || scRet == SEC_E_INCOMPLETE_MESSAGE) { + cbData = recv(tls_ctx->socket, pbIoBuffer + cbIoBuffer, cbIoBufferLength - cbIoBuffer, 0); + if(cbData == SOCKET_ERROR) { + wprintf(L"Error %d reading data from server\n", WSAGetLastError()); + scRet = SEC_E_INTERNAL_ERROR; + break; + } + else if(cbData == 0) { + // Server disconnected. + if(cbIoBuffer) { + wprintf(L"Server unexpectedly disconnected\n"); + scRet = SEC_E_INTERNAL_ERROR; + return scRet; + } + else { + break; + } + } + else { + cbIoBuffer += cbData; + } + } + + // Attempt to decrypt the received data. + Buffers[0].pvBuffer = pbIoBuffer; + Buffers[0].cbBuffer = cbIoBuffer; + Buffers[0].BufferType = SECBUFFER_DATA; + + Buffers[1].BufferType = SECBUFFER_EMPTY; + Buffers[2].BufferType = SECBUFFER_EMPTY; + Buffers[3].BufferType = SECBUFFER_EMPTY; + + Message.ulVersion = SECBUFFER_VERSION; + Message.cBuffers = 4; + Message.pBuffers = Buffers; + + scRet = tls_ctx->sspi->DecryptMessage(&tls_ctx->h_context, &Message, 0, NULL); + + if(scRet == SEC_E_INCOMPLETE_MESSAGE) { + // The input buffer contains only a fragment of an + // encrypted record. Loop around and read some more + // data. + continue; + } + + // Server signalled end of session + if(scRet == SEC_I_CONTEXT_EXPIRED) { + break; + } + + if( scRet != SEC_E_OK && + scRet != SEC_I_RENEGOTIATE && + scRet != SEC_I_CONTEXT_EXPIRED) + { + wprintf(L"Error 0x%x returned by DecryptMessage\n", scRet); + return scRet; + } + + // Locate data and (optional) extra buffers. + pDataBuffer = NULL; + pExtraBuffer = NULL; + for(i = 1; i < 4; i++) { + if(pDataBuffer == NULL && Buffers[i].BufferType == SECBUFFER_DATA) + { + pDataBuffer = &Buffers[i]; + // wprintf(L"Buffers[%d].BufferType = SECBUFFER_DATA\n",i); + } + if(pExtraBuffer == NULL && Buffers[i].BufferType == SECBUFFER_EXTRA) + { + pExtraBuffer = &Buffers[i]; + } + } + + // increase buffer size if we need + int required_length = *length+(int)pDataBuffer->cbBuffer; + if( required_length > buff_size ) { + CHAR *a = realloc(*out, required_length); + if( a == NULL ) { + scRet = SEC_E_INTERNAL_ERROR; + return scRet; + } + *out = a; + buff_size = required_length; + } + // Copy the decrypted data to our output buffer + memcpy(*out+*length, pDataBuffer->pvBuffer, (int)pDataBuffer->cbBuffer); + *length += (int)pDataBuffer->cbBuffer; + + // Move any "extra" data to the input buffer. + if(pExtraBuffer) { + MoveMemory(pbIoBuffer, pExtraBuffer->pvBuffer, pExtraBuffer->cbBuffer); + cbIoBuffer = pExtraBuffer->cbBuffer; + } + else { + cbIoBuffer = 0; + } + + if(scRet == SEC_I_RENEGOTIATE) + { + // The server wants to perform another handshake sequence. + scRet = client_handshake_loop(tls_ctx, FALSE, &ExtraBuffer); + if(scRet != SEC_E_OK) { + return scRet; + } + + // Move any "extra" data to the input buffer. + if(ExtraBuffer.pvBuffer) + { + MoveMemory(pbIoBuffer, ExtraBuffer.pvBuffer, ExtraBuffer.cbBuffer); + cbIoBuffer = ExtraBuffer.cbBuffer; + } + } + } + + return SEC_E_OK; +} + + +static DWORD verify_server_certificate( PCCERT_CONTEXT pServerCert, LPWSTR host, DWORD dwCertFlags) { + HTTPSPolicyCallbackData polHttps; + CERT_CHAIN_POLICY_PARA PolicyPara; + CERT_CHAIN_POLICY_STATUS PolicyStatus; + CERT_CHAIN_PARA ChainPara; + PCCERT_CHAIN_CONTEXT pChainContext = NULL; + + CHAR *rgszUsages[] = { szOID_PKIX_KP_SERVER_AUTH, + szOID_SERVER_GATED_CRYPTO, + szOID_SGC_NETSCAPE }; + DWORD cUsages = sizeof(rgszUsages) / sizeof(CHAR*); + + PWSTR pwszServerName = NULL; + DWORD cchServerName; + DWORD Status; + + if(pServerCert == NULL) + { + Status = SEC_E_WRONG_PRINCIPAL; + goto cleanup; + } + + if(host == NULL || wcslen(host) == 0) { + Status = SEC_E_WRONG_PRINCIPAL; + goto cleanup; + } + + // Build certificate chain. + + ZeroMemory(&ChainPara, sizeof(ChainPara)); + ChainPara.cbSize = sizeof(ChainPara); + ChainPara.RequestedUsage.dwType = USAGE_MATCH_TYPE_OR; + ChainPara.RequestedUsage.Usage.cUsageIdentifier = cUsages; + ChainPara.RequestedUsage.Usage.rgpszUsageIdentifier = rgszUsages; + + if(!CertGetCertificateChain(NULL, pServerCert, NULL, pServerCert->hCertStore, &ChainPara, 0, NULL, &pChainContext)) { + Status = GetLastError(); + wprintf(L"Error 0x%x returned by CertGetCertificateChain!\n", Status); + goto cleanup; + } + + // Validate certificate chain. + ZeroMemory(&polHttps, sizeof(HTTPSPolicyCallbackData)); + polHttps.cbStruct = sizeof(HTTPSPolicyCallbackData); + polHttps.dwAuthType = AUTHTYPE_SERVER; + polHttps.fdwChecks = dwCertFlags; + polHttps.pwszServerName = host; + + memset(&PolicyPara, 0, sizeof(PolicyPara)); + PolicyPara.cbSize = sizeof(PolicyPara); + PolicyPara.pvExtraPolicyPara = &polHttps; + + memset(&PolicyStatus, 0, sizeof(PolicyStatus)); + PolicyStatus.cbSize = sizeof(PolicyStatus); + + if(!CertVerifyCertificateChainPolicy(CERT_CHAIN_POLICY_SSL, pChainContext, &PolicyPara, &PolicyStatus)){ + Status = GetLastError(); + wprintf(L"Error 0x%x returned by CertVerifyCertificateChainPolicy!\n", Status); + goto cleanup; + } + + if(PolicyStatus.dwError) { + Status = PolicyStatus.dwError; + goto cleanup; + } + + + Status = SEC_E_OK; + +cleanup: + + if(pChainContext) + { + CertFreeCertificateChain(pChainContext); + } + + if(pwszServerName) + { + LocalFree(pwszServerName); + } + + return Status; +} + + +static void get_new_client_credentials(TlsContext *tls_ctx) { + CredHandle hCreds; + SecPkgContext_IssuerListInfoEx IssuerListInfo; + PCCERT_CHAIN_CONTEXT pChainContext; + CERT_CHAIN_FIND_BY_ISSUER_PARA FindByIssuerPara; + PCCERT_CONTEXT pCertContext; + TimeStamp tsExpiry; + SECURITY_STATUS Status; + + // Read list of trusted issuers from schannel. + Status = tls_ctx->sspi->QueryContextAttributes(&tls_ctx->h_context, SECPKG_ATTR_ISSUER_LIST_EX, (PVOID)&IssuerListInfo); + if(Status != SEC_E_OK) { + wprintf(L"Error 0x%x querying issuer list info\n", Status); + return; + } + + // Enumerate the client certificates. + + ZeroMemory(&FindByIssuerPara, sizeof(FindByIssuerPara)); + + FindByIssuerPara.cbSize = sizeof(FindByIssuerPara); + FindByIssuerPara.pszUsageIdentifier = szOID_PKIX_KP_CLIENT_AUTH; + FindByIssuerPara.dwKeySpec = 0; + FindByIssuerPara.cIssuer = IssuerListInfo.cIssuers; + FindByIssuerPara.rgIssuer = IssuerListInfo.aIssuers; + + pChainContext = NULL; + + while(TRUE) { + // Find a certificate chain. + pChainContext = CertFindChainInStore(tls_ctx->cert_store, + X509_ASN_ENCODING, + 0, + CERT_CHAIN_FIND_BY_ISSUER, + &FindByIssuerPara, + pChainContext); + if(pChainContext == NULL) { + wprintf(L"Error 0x%x finding cert chain\n", GetLastError()); + break; + } + + // Get pointer to leaf certificate context. + pCertContext = pChainContext->rgpChain[0]->rgpElement[0]->pCertContext; + + // Create schannel credential. + tls_ctx->schannel_cred.dwVersion = SCHANNEL_CRED_VERSION; + tls_ctx->schannel_cred.cCreds = 1; + tls_ctx->schannel_cred.paCred = &pCertContext; + + Status = tls_ctx->sspi->AcquireCredentialsHandle( + NULL, // Name of principal + UNISP_NAME_W, // Name of package + SECPKG_CRED_OUTBOUND, // Flags indicating use + NULL, // Pointer to logon ID + &tls_ctx->schannel_cred, // Package specific data + NULL, // Pointer to GetKey() func + NULL, // Value to pass to GetKey() + &hCreds, // (out) Cred Handle + &tsExpiry); // (out) Lifetime (optional) + if(Status != SEC_E_OK) { + wprintf(L"Error 0x%x returned by AcquireCredentialsHandle\n", Status); + continue; + } + + // Destroy the old credentials. + tls_ctx->sspi->FreeCredentialsHandle(&tls_ctx->h_client_creds); + + tls_ctx->h_client_creds = hCreds; + + // + // As you can see, this sample code maintains a single credential + // handle, replacing it as necessary. This is a little unusual. + // + // Many applications maintain a global credential handle that's + // anonymous (that is, it doesn't contain a client certificate), + // which is used to connect to all servers. If a particular server + // should require client authentication, then a new credential + // is created for use when connecting to that server. The global + // anonymous credential is retained for future connections to + // other servers. + // + // Maintaining a single anonymous credential that's used whenever + // possible is most efficient, since creating new credentials all + // the time is rather expensive. + // + + break; + } +} diff --git a/v_windows/v/old/thirdparty/vschannel/vschannel.h b/v_windows/v/old/thirdparty/vschannel/vschannel.h new file mode 100644 index 0000000..0338645 --- /dev/null +++ b/v_windows/v/old/thirdparty/vschannel/vschannel.h @@ -0,0 +1,47 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define SECURITY_WIN32 +#include +#include + +#define vsc_init_resp_buff_size 44000 + +#define IO_BUFFER_SIZE 0x10000 + +#define TLS_MAX_BUFSIZ 32768 + +// Define here to be sure +#define SP_PROT_TLS1_2_CLIENT 0x00000800 + +typedef struct TlsContext TlsContext; + +TlsContext new_tls_context(); + +static void vschannel_init(TlsContext *tls_ctx); + +static void vschannel_cleanup(TlsContext *tls_ctx); + +static INT request(TlsContext *tls_ctx, INT iport, LPWSTR host, CHAR *req, CHAR **out); + +static SECURITY_STATUS https_make_request(TlsContext *tls_ctx, CHAR *req, CHAR **out, int *length); + +static INT connect_to_server(TlsContext *tls_ctx, LPWSTR host, INT port_number); + +static LONG disconnect_from_server(TlsContext *tls_ctx); + +static SECURITY_STATUS perform_client_handshake(TlsContext *tls_ctx, LPWSTR host, SecBuffer *pExtraData); + +static SECURITY_STATUS client_handshake_loop(TlsContext *tls_ctx, BOOL fDoInitialRead, SecBuffer *pExtraData); + +static DWORD verify_server_certificate(PCCERT_CONTEXT pServerCert, LPWSTR host, DWORD dwCertFlags); + +static SECURITY_STATUS create_credentials(TlsContext *tls_ctx); + +static void get_new_client_credentials(TlsContext *tls_ctx); diff --git a/v_windows/v/old/thirdparty/zip/miniz.h b/v_windows/v/old/thirdparty/zip/miniz.h new file mode 100644 index 0000000..33e7fa1 --- /dev/null +++ b/v_windows/v/old/thirdparty/zip/miniz.h @@ -0,0 +1,6859 @@ +/* + miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP + reading/writing/appending, PNG writing See "unlicense" statement at the end + of this file. Rich Geldreich , last updated Oct. 13, + 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: + http://www.ietf.org/rfc/rfc1951.txt + + Most API's defined in miniz.c are optional. For example, to disable the + archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of + all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). + + * Change History + 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major + release with Zip64 support (almost there!): + - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug + (thanks kahmyong.moon@hp.com) which could cause locate files to not find + files. This bug would only have occured in earlier versions if you explicitly + used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or + mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you + can't switch to v1.15 but want to fix this bug, just remove the uses of this + flag from both helper funcs (and of course don't use the flag). + - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when + pUser_read_buf is not NULL and compressed size is > uncompressed size + - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract + compressed data from directory entries, to account for weird zipfiles which + contain zero-size compressed data on dir entries. Hopefully this fix won't + cause any issues on weird zip archives, because it assumes the low 16-bits of + zip external attributes are DOS attributes (which I believe they always are + in practice). + - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the + internal attributes, just the filename and external attributes + - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed + - Added cmake support for Linux builds which builds all the examples, + tested with clang v3.3 and gcc v4.6. + - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti + - Merged MZ_FORCEINLINE fix from hdeanclark + - Fix include before config #ifdef, thanks emil.brink + - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping + (super useful for OpenGL apps), and explicit control over the compression + level (so you can set it to 1 for real-time compression). + - Merged in some compiler fixes from paulharris's github repro. + - Retested this build under Windows (VS 2010, including static analysis), + tcc 0.9.26, gcc v4.6 and clang v3.3. + - Added example6.c, which dumps an image of the mandelbrot set to a PNG + file. + - Modified example2 to help test the + MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. + - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix + possible src file fclose() leak if alignment bytes+local header file write + faiiled + - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the + wrong central dir header offset, appears harmless in this release, but it + became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 + compiler fixes: added MZ_FORCEINLINE, #include (thanks fermtect). + 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix + mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. + - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and + re-ran a randomized regression test on ~500k files. + - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. + - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze + (static analysis) option and fixed all warnings (except for the silly "Use of + the comma-operator in a tested expression.." analysis warning, which I + purposely use to work around a MSVC compiler warning). + - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and + tested Linux executables. The codeblocks workspace is compatible with + Linux+Win32/x64. + - Added miniz_tester solution/project, which is a useful little app + derived from LZHAM's tester app that I use as part of the regression test. + - Ran miniz.c and tinfl.c through another series of regression testing on + ~500,000 files and archives. + - Modified example5.c so it purposely disables a bunch of high-level + functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the + MINIZ_NO_STDIO bug report.) + - Fix ftell() usage in examples so they exit with an error on files which + are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 + - More comments, added low-level example5.c, fixed a couple minor + level_and_flags issues in the archive API's. level_and_flags can now be set + to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson + for the feedback/bug report. 5/28/11 v1.11 - Added statement from + unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: + - Level 1 is now ~4x faster than before. The L1 compressor's throughput + now varies between 70-110MB/sec. on a + - Core i7 (actual throughput varies depending on the type of data, and x64 + vs. x86). + - Improved baseline L2-L9 compression perf. Also, greatly improved + compression perf. issues on some file types. + - Refactored the compression code for better readability and + maintainability. + - Added level 10 compression level (L10 has slightly better ratio than + level 9, but could have a potentially large drop in throughput on some + files). 5/15/11 v1.09 - Initial stable release. + + * Low-level Deflate/Inflate implementation notes: + + Compression: Use the "tdefl" API's. The compressor supports raw, static, + and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, + and Huffman-only streams. It performs and compresses approximately as well as + zlib. + + Decompression: Use the "tinfl" API's. The entire decompressor is + implemented as a single function coroutine: see tinfl_decompress(). It + supports decompression into a 32KB (or larger power of 2) wrapping buffer, or + into a memory block large enough to hold the entire file. + + The low-level tdefl/tinfl API's do not make any use of dynamic memory + allocation. + + * zlib-style API notes: + + miniz.c implements a fairly large subset of zlib. There's enough + functionality present for it to be a drop-in zlib replacement in many apps: + The z_stream struct, optional memory allocation callbacks + deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound + inflateInit/inflateInit2/inflate/inflateEnd + compress, compress2, compressBound, uncompress + CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly + routines. Supports raw deflate streams or standard zlib streams with adler-32 + checking. + + Limitations: + The callback API's are not implemented yet. No support for gzip headers or + zlib static dictionaries. I've tried to closely emulate zlib's various + flavors of stream flushing and return status codes, but there are no + guarantees that miniz.c pulls this off perfectly. + + * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, + originally written by Alex Evans. Supports 1-4 bytes/pixel images. + + * ZIP archive API notes: + + The ZIP archive API's where designed with simplicity and efficiency in + mind, with just enough abstraction to get the job done with minimal fuss. + There are simple API's to retrieve file information, read files from existing + archives, create new archives, append new files to existing archives, or + clone archive data from one archive to another. It supports archives located + in memory or the heap, on disk (using stdio.h), or you can specify custom + file read/write callbacks. + + - Archive reading: Just call this function to read a single file from a + disk archive: + + void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const + char *pArchive_name, size_t *pSize, mz_uint zip_flags); + + For more complex cases, use the "mz_zip_reader" functions. Upon opening an + archive, the entire central directory is located and read as-is into memory, + and subsequent file access only occurs when reading individual files. + + - Archives file scanning: The simple way is to use this function to scan a + loaded archive for a specific file: + + int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, + const char *pComment, mz_uint flags); + + The locate operation can optionally check file comments too, which (as one + example) can be used to identify multiple versions of the same file in an + archive. This function uses a simple linear search through the central + directory, so it's not very fast. + + Alternately, you can iterate through all the files in an archive (using + mz_zip_reader_get_num_files()) and retrieve detailed info on each file by + calling mz_zip_reader_file_stat(). + + - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer + immediately writes compressed file data to disk and builds an exact image of + the central directory in memory. The central directory image is written all + at once at the end of the archive file when the archive is finalized. + + The archive writer can optionally align each file's local header and file + data to any power of 2 alignment, which can be useful when the archive will + be read from optical media. Also, the writer supports placing arbitrary data + blobs at the very beginning of ZIP archives. Archives written using either + feature are still readable by any ZIP tool. + + - Archive appending: The simple way to add a single file to an archive is + to call this function: + + mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, + const char *pArchive_name, const void *pBuf, size_t buf_size, const void + *pComment, mz_uint16 comment_size, mz_uint level_and_flags); + + The archive will be created if it doesn't already exist, otherwise it'll be + appended to. Note the appending is done in-place and is not an atomic + operation, so if something goes wrong during the operation it's possible the + archive could be left without a central directory (although the local file + headers and file data will be fine, so the archive will be recoverable). + + For more complex archive modification scenarios: + 1. The safest way is to use a mz_zip_reader to read the existing archive, + cloning only those bits you want to preserve into a new archive using using + the mz_zip_writer_add_from_zip_reader() function (which compiles the + compressed file data as-is). When you're done, delete the old archive and + rename the newly written archive, and you're done. This is safe but requires + a bunch of temporary disk space or heap memory. + + 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using + mz_zip_writer_init_from_reader(), append new files as needed, then finalize + the archive which will write an updated central directory to the original + archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() + does.) There's a possibility that the archive's central directory could be + lost with this method if anything goes wrong, though. + + - ZIP archive support limitations: + No zip64 or spanning support. Extraction functions can only handle + unencrypted, stored or deflated files. Requires streams capable of seeking. + + * This is a header file library, like stb_image.c. To get only a header file, + either cut and paste the below header, or create miniz.h, #define + MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. + + * Important: For best perf. be sure to customize the below macros for your + target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define + MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 + + * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before + including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), + stat64(), etc. Otherwise you won't be able to process large files (i.e. + 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). +*/ + +#ifndef MINIZ_HEADER_INCLUDED +#define MINIZ_HEADER_INCLUDED + +#include +#include + +// Defines to completely disable specific portions of miniz.c: +// If all macros here are defined the only functionality remaining will be +// CRC-32, adler-32, tinfl, and tdefl. + +// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on +// stdio for file I/O. +//#define MINIZ_NO_STDIO + +// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able +// to get the current time, or get/set file times, and the C run-time funcs that +// get/set times won't be called. The current downside is the times written to +// your archives will be from 1979. +//#define MINIZ_NO_TIME + +// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. +//#define MINIZ_NO_ARCHIVE_APIS + +// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive +// API's. +//#define MINIZ_NO_ARCHIVE_WRITING_APIS + +// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression +// API's. +//#define MINIZ_NO_ZLIB_APIS + +// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent +// conflicts against stock zlib. +//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES + +// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. +// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom +// user alloc/free/realloc callbacks to the zlib and archive API's, and a few +// stand-alone helper API's which don't provide custom user functions (such as +// tdefl_compress_mem_to_heap() and tinfl_decompress_mem_to_heap()) won't work. +//#define MINIZ_NO_MALLOC + +#if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) +// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc +// on Linux +#define MINIZ_NO_TIME +#endif + +#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) +#include +#endif + +#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ + defined(__i386) || defined(__i486__) || defined(__i486) || \ + defined(i386) || defined(__ia64__) || defined(__x86_64__) +// MINIZ_X86_OR_X64_CPU is only used to help set the below macros. +#define MINIZ_X86_OR_X64_CPU 1 +#endif + +#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU +// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. +#define MINIZ_LITTLE_ENDIAN 1 +#endif + +/* Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES only if not set */ +#if !defined(MINIZ_USE_UNALIGNED_LOADS_AND_STORES) +#if MINIZ_X86_OR_X64_CPU +/* Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient + * integer loads and stores from unaligned addresses. */ +#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 +#define MINIZ_UNALIGNED_USE_MEMCPY +#else +#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 0 +#endif +#endif + +#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ + defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ + defined(__x86_64__) +// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are +// reasonably fast (and don't involve compiler generated calls to helper +// functions). +#define MINIZ_HAS_64BIT_REGISTERS 1 +#endif + +#ifdef __APPLE__ +#define ftello64 ftello +#define fseeko64 fseeko +#define fopen64 fopen +#define freopen64 freopen + +// Darwin OSX +#define MZ_PLATFORM 19 +#endif + +#ifndef MZ_PLATFORM +#if defined(_WIN64) || defined(_WIN32) || defined(__WIN32__) +#define MZ_PLATFORM 0 +#else +// UNIX +#define MZ_PLATFORM 3 +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +// ------------------- zlib-style API Definitions. + +// For more compatibility with zlib, miniz.c uses unsigned long for some +// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! +typedef unsigned long mz_ulong; + +// mz_free() internally uses the MZ_FREE() macro (which by default calls free() +// unless you've modified the MZ_MALLOC macro) to release a block allocated from +// the heap. +void mz_free(void *p); + +#define MZ_ADLER32_INIT (1) +// mz_adler32() returns the initial adler-32 value to use when called with +// ptr==NULL. +mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); + +#define MZ_CRC32_INIT (0) +// mz_crc32() returns the initial CRC-32 value to use when called with +// ptr==NULL. +mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); + +// Compression strategies. +enum { + MZ_DEFAULT_STRATEGY = 0, + MZ_FILTERED = 1, + MZ_HUFFMAN_ONLY = 2, + MZ_RLE = 3, + MZ_FIXED = 4 +}; + +/* miniz error codes. Be sure to update mz_zip_get_error_string() if you add or + * modify this enum. */ +typedef enum { + MZ_ZIP_NO_ERROR = 0, + MZ_ZIP_UNDEFINED_ERROR, + MZ_ZIP_TOO_MANY_FILES, + MZ_ZIP_FILE_TOO_LARGE, + MZ_ZIP_UNSUPPORTED_METHOD, + MZ_ZIP_UNSUPPORTED_ENCRYPTION, + MZ_ZIP_UNSUPPORTED_FEATURE, + MZ_ZIP_FAILED_FINDING_CENTRAL_DIR, + MZ_ZIP_NOT_AN_ARCHIVE, + MZ_ZIP_INVALID_HEADER_OR_CORRUPTED, + MZ_ZIP_UNSUPPORTED_MULTIDISK, + MZ_ZIP_DECOMPRESSION_FAILED, + MZ_ZIP_COMPRESSION_FAILED, + MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE, + MZ_ZIP_CRC_CHECK_FAILED, + MZ_ZIP_UNSUPPORTED_CDIR_SIZE, + MZ_ZIP_ALLOC_FAILED, + MZ_ZIP_FILE_OPEN_FAILED, + MZ_ZIP_FILE_CREATE_FAILED, + MZ_ZIP_FILE_WRITE_FAILED, + MZ_ZIP_FILE_READ_FAILED, + MZ_ZIP_FILE_CLOSE_FAILED, + MZ_ZIP_FILE_SEEK_FAILED, + MZ_ZIP_FILE_STAT_FAILED, + MZ_ZIP_INVALID_PARAMETER, + MZ_ZIP_INVALID_FILENAME, + MZ_ZIP_BUF_TOO_SMALL, + MZ_ZIP_INTERNAL_ERROR, + MZ_ZIP_FILE_NOT_FOUND, + MZ_ZIP_ARCHIVE_TOO_LARGE, + MZ_ZIP_VALIDATION_FAILED, + MZ_ZIP_WRITE_CALLBACK_FAILED, + MZ_ZIP_TOTAL_ERRORS +} mz_zip_error; + +// Method +#define MZ_DEFLATED 8 + +#ifndef MINIZ_NO_ZLIB_APIS + +// Heap allocation callbacks. +// Note that mz_alloc_func parameter types purpsosely differ from zlib's: +// items/size is size_t, not unsigned long. +typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); +typedef void (*mz_free_func)(void *opaque, void *address); +typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, + size_t size); + +#define MZ_VERSION "9.1.15" +#define MZ_VERNUM 0x91F0 +#define MZ_VER_MAJOR 9 +#define MZ_VER_MINOR 1 +#define MZ_VER_REVISION 15 +#define MZ_VER_SUBREVISION 0 + +// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The +// other values are for advanced use (refer to the zlib docs). +enum { + MZ_NO_FLUSH = 0, + MZ_PARTIAL_FLUSH = 1, + MZ_SYNC_FLUSH = 2, + MZ_FULL_FLUSH = 3, + MZ_FINISH = 4, + MZ_BLOCK = 5 +}; + +// Return status codes. MZ_PARAM_ERROR is non-standard. +enum { + MZ_OK = 0, + MZ_STREAM_END = 1, + MZ_NEED_DICT = 2, + MZ_ERRNO = -1, + MZ_STREAM_ERROR = -2, + MZ_DATA_ERROR = -3, + MZ_MEM_ERROR = -4, + MZ_BUF_ERROR = -5, + MZ_VERSION_ERROR = -6, + MZ_PARAM_ERROR = -10000 +}; + +// Compression levels: 0-9 are the standard zlib-style levels, 10 is best +// possible compression (not zlib compatible, and may be very slow), +// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. +enum { + MZ_NO_COMPRESSION = 0, + MZ_BEST_SPEED = 1, + MZ_BEST_COMPRESSION = 9, + MZ_UBER_COMPRESSION = 10, + MZ_DEFAULT_LEVEL = 6, + MZ_DEFAULT_COMPRESSION = -1 +}; + +// Window bits +#define MZ_DEFAULT_WINDOW_BITS 15 + +struct mz_internal_state; + +// Compression/decompression stream struct. +typedef struct mz_stream_s { + const unsigned char *next_in; // pointer to next byte to read + unsigned int avail_in; // number of bytes available at next_in + mz_ulong total_in; // total number of bytes consumed so far + + unsigned char *next_out; // pointer to next byte to write + unsigned int avail_out; // number of bytes that can be written to next_out + mz_ulong total_out; // total number of bytes produced so far + + char *msg; // error msg (unused) + struct mz_internal_state *state; // internal state, allocated by zalloc/zfree + + mz_alloc_func + zalloc; // optional heap allocation function (defaults to malloc) + mz_free_func zfree; // optional heap free function (defaults to free) + void *opaque; // heap alloc function user pointer + + int data_type; // data_type (unused) + mz_ulong adler; // adler32 of the source or uncompressed data + mz_ulong reserved; // not used +} mz_stream; + +typedef mz_stream *mz_streamp; + +// Returns the version string of miniz.c. +const char *mz_version(void); + +// mz_deflateInit() initializes a compressor with default options: +// Parameters: +// pStream must point to an initialized mz_stream struct. +// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. +// level 1 enables a specially optimized compression function that's been +// optimized purely for performance, not ratio. (This special func. is +// currently only enabled when MINIZ_USE_UNALIGNED_LOADS_AND_STORES and +// MINIZ_LITTLE_ENDIAN are defined.) +// Return values: +// MZ_OK on success. +// MZ_STREAM_ERROR if the stream is bogus. +// MZ_PARAM_ERROR if the input parameters are bogus. +// MZ_MEM_ERROR on out of memory. +int mz_deflateInit(mz_streamp pStream, int level); + +// mz_deflateInit2() is like mz_deflate(), except with more control: +// Additional parameters: +// method must be MZ_DEFLATED +// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with +// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no +// header or footer) mem_level must be between [1, 9] (it's checked but +// ignored by miniz.c) +int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, + int mem_level, int strategy); + +// Quickly resets a compressor without having to reallocate anything. Same as +// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). +int mz_deflateReset(mz_streamp pStream); + +// mz_deflate() compresses the input to output, consuming as much of the input +// and producing as much output as possible. Parameters: +// pStream is the stream to read from and write to. You must initialize/update +// the next_in, avail_in, next_out, and avail_out members. flush may be +// MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or MZ_FINISH. +// Return values: +// MZ_OK on success (when flushing, or if more input is needed but not +// available, and/or there's more output to be written but the output buffer +// is full). MZ_STREAM_END if all input has been consumed and all output bytes +// have been written. Don't call mz_deflate() on the stream anymore. +// MZ_STREAM_ERROR if the stream is bogus. +// MZ_PARAM_ERROR if one of the parameters is invalid. +// MZ_BUF_ERROR if no forward progress is possible because the input and/or +// output buffers are empty. (Fill up the input buffer or free up some output +// space and try again.) +int mz_deflate(mz_streamp pStream, int flush); + +// mz_deflateEnd() deinitializes a compressor: +// Return values: +// MZ_OK on success. +// MZ_STREAM_ERROR if the stream is bogus. +int mz_deflateEnd(mz_streamp pStream); + +// mz_deflateBound() returns a (very) conservative upper bound on the amount of +// data that could be generated by deflate(), assuming flush is set to only +// MZ_NO_FLUSH or MZ_FINISH. +mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); + +// Single-call compression functions mz_compress() and mz_compress2(): +// Returns MZ_OK on success, or one of the error codes from mz_deflate() on +// failure. +int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, + const unsigned char *pSource, mz_ulong source_len); +int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, + const unsigned char *pSource, mz_ulong source_len, int level); + +// mz_compressBound() returns a (very) conservative upper bound on the amount of +// data that could be generated by calling mz_compress(). +mz_ulong mz_compressBound(mz_ulong source_len); + +// Initializes a decompressor. +int mz_inflateInit(mz_streamp pStream); + +// mz_inflateInit2() is like mz_inflateInit() with an additional option that +// controls the window size and whether or not the stream has been wrapped with +// a zlib header/footer: window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse +// zlib header/footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate). +int mz_inflateInit2(mz_streamp pStream, int window_bits); + +// Decompresses the input stream to the output, consuming only as much of the +// input as needed, and writing as much to the output as possible. Parameters: +// pStream is the stream to read from and write to. You must initialize/update +// the next_in, avail_in, next_out, and avail_out members. flush may be +// MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. On the first call, if flush is +// MZ_FINISH it's assumed the input and output buffers are both sized large +// enough to decompress the entire stream in a single call (this is slightly +// faster). MZ_FINISH implies that there are no more source bytes available +// beside what's already in the input buffer, and that the output buffer is +// large enough to hold the rest of the decompressed data. +// Return values: +// MZ_OK on success. Either more input is needed but not available, and/or +// there's more output to be written but the output buffer is full. +// MZ_STREAM_END if all needed input has been consumed and all output bytes +// have been written. For zlib streams, the adler-32 of the decompressed data +// has also been verified. MZ_STREAM_ERROR if the stream is bogus. +// MZ_DATA_ERROR if the deflate stream is invalid. +// MZ_PARAM_ERROR if one of the parameters is invalid. +// MZ_BUF_ERROR if no forward progress is possible because the input buffer is +// empty but the inflater needs more input to continue, or if the output +// buffer is not large enough. Call mz_inflate() again with more input data, +// or with more room in the output buffer (except when using single call +// decompression, described above). +int mz_inflate(mz_streamp pStream, int flush); + +// Deinitializes a decompressor. +int mz_inflateEnd(mz_streamp pStream); + +// Single-call decompression. +// Returns MZ_OK on success, or one of the error codes from mz_inflate() on +// failure. +int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, + const unsigned char *pSource, mz_ulong source_len); + +// Returns a string description of the specified error code, or NULL if the +// error code is invalid. +const char *mz_error(int err); + +// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used +// as a drop-in replacement for the subset of zlib that miniz.c supports. Define +// MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you use zlib +// in the same project. +#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES +typedef unsigned char Byte; +typedef unsigned int uInt; +typedef mz_ulong uLong; +typedef Byte Bytef; +typedef uInt uIntf; +typedef char charf; +typedef int intf; +typedef void *voidpf; +typedef uLong uLongf; +typedef void *voidp; +typedef void *const voidpc; +#define Z_NULL 0 +#define Z_NO_FLUSH MZ_NO_FLUSH +#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH +#define Z_SYNC_FLUSH MZ_SYNC_FLUSH +#define Z_FULL_FLUSH MZ_FULL_FLUSH +#define Z_FINISH MZ_FINISH +#define Z_BLOCK MZ_BLOCK +#define Z_OK MZ_OK +#define Z_STREAM_END MZ_STREAM_END +#define Z_NEED_DICT MZ_NEED_DICT +#define Z_ERRNO MZ_ERRNO +#define Z_STREAM_ERROR MZ_STREAM_ERROR +#define Z_DATA_ERROR MZ_DATA_ERROR +#define Z_MEM_ERROR MZ_MEM_ERROR +#define Z_BUF_ERROR MZ_BUF_ERROR +#define Z_VERSION_ERROR MZ_VERSION_ERROR +#define Z_PARAM_ERROR MZ_PARAM_ERROR +#define Z_NO_COMPRESSION MZ_NO_COMPRESSION +#define Z_BEST_SPEED MZ_BEST_SPEED +#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION +#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION +#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY +#define Z_FILTERED MZ_FILTERED +#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY +#define Z_RLE MZ_RLE +#define Z_FIXED MZ_FIXED +#define Z_DEFLATED MZ_DEFLATED +#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS +#define alloc_func mz_alloc_func +#define free_func mz_free_func +#define internal_state mz_internal_state +#define z_stream mz_stream +#define deflateInit mz_deflateInit +#define deflateInit2 mz_deflateInit2 +#define deflateReset mz_deflateReset +#define deflate mz_deflate +#define deflateEnd mz_deflateEnd +#define deflateBound mz_deflateBound +#define compress mz_compress +#define compress2 mz_compress2 +#define compressBound mz_compressBound +#define inflateInit mz_inflateInit +#define inflateInit2 mz_inflateInit2 +#define inflate mz_inflate +#define inflateEnd mz_inflateEnd +#define uncompress mz_uncompress +#define crc32 mz_crc32 +#define adler32 mz_adler32 +#define MAX_WBITS 15 +#define MAX_MEM_LEVEL 9 +#define zError mz_error +#define ZLIB_VERSION MZ_VERSION +#define ZLIB_VERNUM MZ_VERNUM +#define ZLIB_VER_MAJOR MZ_VER_MAJOR +#define ZLIB_VER_MINOR MZ_VER_MINOR +#define ZLIB_VER_REVISION MZ_VER_REVISION +#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION +#define zlibVersion mz_version +#define zlib_version mz_version() +#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES + +#endif // MINIZ_NO_ZLIB_APIS + +// ------------------- Types and macros + +typedef unsigned char mz_uint8; +typedef signed short mz_int16; +typedef unsigned short mz_uint16; +typedef unsigned int mz_uint32; +typedef unsigned int mz_uint; +typedef long long mz_int64; +typedef unsigned long long mz_uint64; +typedef int mz_bool; + +#define MZ_FALSE (0) +#define MZ_TRUE (1) + +// An attempt to work around MSVC's spammy "warning C4127: conditional +// expression is constant" message. +#ifdef _MSC_VER +#define MZ_MACRO_END while (0, 0) +#else +#define MZ_MACRO_END while (0) +#endif + +// ------------------- ZIP archive reading/writing + +#ifndef MINIZ_NO_ARCHIVE_APIS + +enum { + MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, + MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, + MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 +}; + +typedef struct { + mz_uint32 m_file_index; + mz_uint32 m_central_dir_ofs; + mz_uint16 m_version_made_by; + mz_uint16 m_version_needed; + mz_uint16 m_bit_flag; + mz_uint16 m_method; +#ifndef MINIZ_NO_TIME + time_t m_time; +#endif + mz_uint32 m_crc32; + mz_uint64 m_comp_size; + mz_uint64 m_uncomp_size; + mz_uint16 m_internal_attr; + mz_uint32 m_external_attr; + mz_uint64 m_local_header_ofs; + mz_uint32 m_comment_size; + char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; + char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; +} mz_zip_archive_file_stat; + +typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, + void *pBuf, size_t n); +typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, + const void *pBuf, size_t n); +typedef mz_bool (*mz_file_needs_keepalive)(void *pOpaque); + +struct mz_zip_internal_state_tag; +typedef struct mz_zip_internal_state_tag mz_zip_internal_state; + +typedef enum { + MZ_ZIP_MODE_INVALID = 0, + MZ_ZIP_MODE_READING = 1, + MZ_ZIP_MODE_WRITING = 2, + MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 +} mz_zip_mode; + +typedef enum { + MZ_ZIP_TYPE_INVALID = 0, + MZ_ZIP_TYPE_USER, + MZ_ZIP_TYPE_MEMORY, + MZ_ZIP_TYPE_HEAP, + MZ_ZIP_TYPE_FILE, + MZ_ZIP_TYPE_CFILE, + MZ_ZIP_TOTAL_TYPES +} mz_zip_type; + +typedef struct { + mz_uint64 m_archive_size; + mz_uint64 m_central_directory_file_ofs; + + /* We only support up to UINT32_MAX files in zip64 mode. */ + mz_uint32 m_total_files; + mz_zip_mode m_zip_mode; + mz_zip_type m_zip_type; + mz_zip_error m_last_error; + + mz_uint64 m_file_offset_alignment; + + mz_alloc_func m_pAlloc; + mz_free_func m_pFree; + mz_realloc_func m_pRealloc; + void *m_pAlloc_opaque; + + mz_file_read_func m_pRead; + mz_file_write_func m_pWrite; + mz_file_needs_keepalive m_pNeeds_keepalive; + void *m_pIO_opaque; + + mz_zip_internal_state *m_pState; + +} mz_zip_archive; + +typedef enum { + MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, + MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, + MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, + MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 +} mz_zip_flags; + +// ZIP archive reading + +// Inits a ZIP archive reader. +// These functions read and validate the archive's central directory. +mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, + mz_uint32 flags); +mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, + size_t size, mz_uint32 flags); + +#ifndef MINIZ_NO_STDIO +mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, + mz_uint32 flags); +#endif + +// Returns the total number of files in the archive. +mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); + +// Returns detailed information about an archive file entry. +mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, + mz_zip_archive_file_stat *pStat); + +// Determines if an archive file entry is a directory entry. +mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, + mz_uint file_index); +mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, + mz_uint file_index); + +// Retrieves the filename of an archive file entry. +// Returns the number of bytes written to pFilename, or if filename_buf_size is +// 0 this function returns the number of bytes needed to fully store the +// filename. +mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, + char *pFilename, mz_uint filename_buf_size); + +// Attempts to locates a file in the archive's central directory. +// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH +// Returns -1 if the file cannot be found. +int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, + const char *pComment, mz_uint flags); + +// Extracts a archive file to a memory buffer using no memory allocation. +mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, + mz_uint file_index, void *pBuf, + size_t buf_size, mz_uint flags, + void *pUser_read_buf, + size_t user_read_buf_size); +mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( + mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, + mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); + +// Extracts a archive file to a memory buffer. +mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, + void *pBuf, size_t buf_size, + mz_uint flags); +mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, + const char *pFilename, void *pBuf, + size_t buf_size, mz_uint flags); + +// Extracts a archive file to a dynamically allocated heap buffer. +void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, + size_t *pSize, mz_uint flags); +void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, + const char *pFilename, size_t *pSize, + mz_uint flags); + +// Extracts a archive file using a callback function to output the file's data. +mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, + mz_uint file_index, + mz_file_write_func pCallback, + void *pOpaque, mz_uint flags); +mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, + const char *pFilename, + mz_file_write_func pCallback, + void *pOpaque, mz_uint flags); + +#ifndef MINIZ_NO_STDIO +// Extracts a archive file to a disk file and sets its last accessed and +// modified times. This function only extracts files, not archive directory +// records. +mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, + const char *pDst_filename, mz_uint flags); +mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, + const char *pArchive_filename, + const char *pDst_filename, + mz_uint flags); +#endif + +// Ends archive reading, freeing all allocations, and closing the input archive +// file if mz_zip_reader_init_file() was used. +mz_bool mz_zip_reader_end(mz_zip_archive *pZip); + +// ZIP archive writing + +#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS + +// Inits a ZIP archive writer. +mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); +mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, + size_t size_to_reserve_at_beginning, + size_t initial_allocation_size); + +#ifndef MINIZ_NO_STDIO +mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, + mz_uint64 size_to_reserve_at_beginning); +#endif + +// Converts a ZIP archive reader object into a writer object, to allow efficient +// in-place file appends to occur on an existing archive. For archives opened +// using mz_zip_reader_init_file, pFilename must be the archive's filename so it +// can be reopened for writing. If the file can't be reopened, +// mz_zip_reader_end() will be called. For archives opened using +// mz_zip_reader_init_mem, the memory block must be growable using the realloc +// callback (which defaults to realloc unless you've overridden it). Finally, +// for archives opened using mz_zip_reader_init, the mz_zip_archive's user +// provided m_pWrite function cannot be NULL. Note: In-place archive +// modification is not recommended unless you know what you're doing, because if +// execution stops or something goes wrong before the archive is finalized the +// file's central directory will be hosed. +mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, + const char *pFilename); + +// Adds the contents of a memory buffer to an archive. These functions record +// the current local time into the archive. To add a directory entry, call this +// method with an archive name ending in a forwardslash with empty buffer. +// level_and_flags - compression level (0-10, see MZ_BEST_SPEED, +// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or +// just set to MZ_DEFAULT_COMPRESSION. +mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, + const void *pBuf, size_t buf_size, + mz_uint level_and_flags); +mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, + const char *pArchive_name, const void *pBuf, + size_t buf_size, const void *pComment, + mz_uint16 comment_size, + mz_uint level_and_flags, mz_uint64 uncomp_size, + mz_uint32 uncomp_crc32); + +#ifndef MINIZ_NO_STDIO +// Adds the contents of a disk file to an archive. This function also records +// the disk file's modified time into the archive. level_and_flags - compression +// level (0-10, see MZ_BEST_SPEED, MZ_BEST_COMPRESSION, etc.) logically OR'd +// with zero or more mz_zip_flags, or just set to MZ_DEFAULT_COMPRESSION. +mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, + const char *pSrc_filename, const void *pComment, + mz_uint16 comment_size, mz_uint level_and_flags, + mz_uint32 ext_attributes); +#endif + +// Adds a file to an archive by fully cloning the data from another archive. +// This function fully clones the source file's compressed data (no +// recompression), along with its full filename, extra data, and comment fields. +mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, + mz_zip_archive *pSource_zip, + mz_uint file_index); + +// Finalizes the archive by writing the central directory records followed by +// the end of central directory record. After an archive is finalized, the only +// valid call on the mz_zip_archive struct is mz_zip_writer_end(). An archive +// must be manually finalized by calling this function for it to be valid. +mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); +mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, + size_t *pSize); + +// Ends archive writing, freeing all allocations, and closing the output file if +// mz_zip_writer_init_file() was used. Note for the archive to be valid, it must +// have been finalized before ending. +mz_bool mz_zip_writer_end(mz_zip_archive *pZip); + +// Misc. high-level helper functions: + +// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) +// appends a memory blob to a ZIP archive. level_and_flags - compression level +// (0-10, see MZ_BEST_SPEED, MZ_BEST_COMPRESSION, etc.) logically OR'd with zero +// or more mz_zip_flags, or just set to MZ_DEFAULT_COMPRESSION. +mz_bool mz_zip_add_mem_to_archive_file_in_place( + const char *pZip_filename, const char *pArchive_name, const void *pBuf, + size_t buf_size, const void *pComment, mz_uint16 comment_size, + mz_uint level_and_flags); + +// Reads a single file from an archive into a heap block. +// Returns NULL on failure. +void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, + const char *pArchive_name, + size_t *pSize, mz_uint zip_flags); + +#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS + +#endif // #ifndef MINIZ_NO_ARCHIVE_APIS + +// ------------------- Low-level Decompression API Definitions + +// Decompression flags used by tinfl_decompress(). +// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and +// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the +// input is a raw deflate stream. TINFL_FLAG_HAS_MORE_INPUT: If set, there are +// more input bytes available beyond the end of the supplied input buffer. If +// clear, the input buffer contains all remaining input. +// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large +// enough to hold the entire decompressed stream. If clear, the output buffer is +// at least the size of the dictionary (typically 32KB). +// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the +// decompressed bytes. +enum { + TINFL_FLAG_PARSE_ZLIB_HEADER = 1, + TINFL_FLAG_HAS_MORE_INPUT = 2, + TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, + TINFL_FLAG_COMPUTE_ADLER32 = 8 +}; + +// High level decompression functions: +// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block +// allocated via malloc(). On entry: +// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data +// to decompress. +// On return: +// Function returns a pointer to the decompressed data, or NULL on failure. +// *pOut_len will be set to the decompressed data's size, which could be larger +// than src_buf_len on uncompressible data. The caller must call mz_free() on +// the returned block when it's no longer needed. +void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, + size_t *pOut_len, int flags); + +// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block +// in memory. Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the +// number of bytes written on success. +#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) +size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, + const void *pSrc_buf, size_t src_buf_len, + int flags); + +// tinfl_decompress_mem_to_callback() decompresses a block in memory to an +// internal 32KB buffer, and a user provided callback function will be called to +// flush the buffer. Returns 1 on success or 0 on failure. +typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); +int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, + tinfl_put_buf_func_ptr pPut_buf_func, + void *pPut_buf_user, int flags); + +struct tinfl_decompressor_tag; +typedef struct tinfl_decompressor_tag tinfl_decompressor; + +// Max size of LZ dictionary. +#define TINFL_LZ_DICT_SIZE 32768 + +// Return status. +typedef enum { + TINFL_STATUS_BAD_PARAM = -3, + TINFL_STATUS_ADLER32_MISMATCH = -2, + TINFL_STATUS_FAILED = -1, + TINFL_STATUS_DONE = 0, + TINFL_STATUS_NEEDS_MORE_INPUT = 1, + TINFL_STATUS_HAS_MORE_OUTPUT = 2 +} tinfl_status; + +// Initializes the decompressor to its initial state. +#define tinfl_init(r) \ + do { \ + (r)->m_state = 0; \ + } \ + MZ_MACRO_END +#define tinfl_get_adler32(r) (r)->m_check_adler32 + +// Main low-level decompressor coroutine function. This is the only function +// actually needed for decompression. All the other functions are just +// high-level helpers for improved usability. This is a universal API, i.e. it +// can be used as a building block to build any desired higher level +// decompression API. In the limit case, it can be called once per every byte +// input or output. +tinfl_status tinfl_decompress(tinfl_decompressor *r, + const mz_uint8 *pIn_buf_next, + size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, + mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, + const mz_uint32 decomp_flags); + +// Internal/private bits follow. +enum { + TINFL_MAX_HUFF_TABLES = 3, + TINFL_MAX_HUFF_SYMBOLS_0 = 288, + TINFL_MAX_HUFF_SYMBOLS_1 = 32, + TINFL_MAX_HUFF_SYMBOLS_2 = 19, + TINFL_FAST_LOOKUP_BITS = 10, + TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS +}; + +typedef struct { + mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; + mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], + m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; +} tinfl_huff_table; + +#if MINIZ_HAS_64BIT_REGISTERS +#define TINFL_USE_64BIT_BITBUF 1 +#endif + +#if TINFL_USE_64BIT_BITBUF +typedef mz_uint64 tinfl_bit_buf_t; +#define TINFL_BITBUF_SIZE (64) +#else +typedef mz_uint32 tinfl_bit_buf_t; +#define TINFL_BITBUF_SIZE (32) +#endif + +struct tinfl_decompressor_tag { + mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, + m_check_adler32, m_dist, m_counter, m_num_extra, + m_table_sizes[TINFL_MAX_HUFF_TABLES]; + tinfl_bit_buf_t m_bit_buf; + size_t m_dist_from_out_buf_start; + tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; + mz_uint8 m_raw_header[4], + m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; +}; + +// ------------------- Low-level Compression API Definitions + +// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly +// slower, and raw/dynamic blocks will be output more frequently). +#define TDEFL_LESS_MEMORY 0 + +// tdefl_init() compression flags logically OR'd together (low 12 bits contain +// the max. number of probes per dictionary search): TDEFL_DEFAULT_MAX_PROBES: +// The compressor defaults to 128 dictionary probes per dictionary search. +// 0=Huffman only, 1=Huffman+LZ (fastest/crap compression), 4095=Huffman+LZ +// (slowest/best compression). +enum { + TDEFL_HUFFMAN_ONLY = 0, + TDEFL_DEFAULT_MAX_PROBES = 128, + TDEFL_MAX_PROBES_MASK = 0xFFF +}; + +// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before +// the deflate data, and the Adler-32 of the source data at the end. Otherwise, +// you'll get raw deflate data. TDEFL_COMPUTE_ADLER32: Always compute the +// adler-32 of the input data (even when not writing zlib headers). +// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more +// efficient lazy parsing. TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to +// decrease the compressor's initialization time to the minimum, but the output +// may vary from run to run given the same input (depending on the contents of +// memory). TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a +// distance of 1) TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. +// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. +// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. +// The low 12 bits are reserved to control the max # of hash probes per +// dictionary lookup (see TDEFL_MAX_PROBES_MASK). +enum { + TDEFL_WRITE_ZLIB_HEADER = 0x01000, + TDEFL_COMPUTE_ADLER32 = 0x02000, + TDEFL_GREEDY_PARSING_FLAG = 0x04000, + TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, + TDEFL_RLE_MATCHES = 0x10000, + TDEFL_FILTER_MATCHES = 0x20000, + TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, + TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 +}; + +// High level compression functions: +// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block +// allocated via malloc(). On entry: +// pSrc_buf, src_buf_len: Pointer and size of source block to compress. +// flags: The max match finder probes (default is 128) logically OR'd against +// the above flags. Higher probes are slower but improve compression. +// On return: +// Function returns a pointer to the compressed data, or NULL on failure. +// *pOut_len will be set to the compressed data's size, which could be larger +// than src_buf_len on uncompressible data. The caller must free() the returned +// block when it's no longer needed. +void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, + size_t *pOut_len, int flags); + +// tdefl_compress_mem_to_mem() compresses a block in memory to another block in +// memory. Returns 0 on failure. +size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, + const void *pSrc_buf, size_t src_buf_len, + int flags); + +// Compresses an image to a compressed PNG file in memory. +// On entry: +// pImage, w, h, and num_chans describe the image to compress. num_chans may be +// 1, 2, 3, or 4. The image pitch in bytes per scanline will be w*num_chans. +// The leftmost pixel on the top scanline is stored first in memory. level may +// range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, +// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL If flip is +// true, the image will be flipped on the Y axis (useful for OpenGL apps). +// On return: +// Function returns a pointer to the compressed data, or NULL on failure. +// *pLen_out will be set to the size of the PNG image file. +// The caller must mz_free() the returned heap block (which will typically be +// larger than *pLen_out) when it's no longer needed. +void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, + int h, int num_chans, + size_t *pLen_out, + mz_uint level, mz_bool flip); +void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, + int num_chans, size_t *pLen_out); + +// Output stream interface. The compressor uses this interface to write +// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. +typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, + void *pUser); + +// tdefl_compress_mem_to_output() compresses a block to an output stream. The +// above helpers use this function internally. +mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, + tdefl_put_buf_func_ptr pPut_buf_func, + void *pPut_buf_user, int flags); + +enum { + TDEFL_MAX_HUFF_TABLES = 3, + TDEFL_MAX_HUFF_SYMBOLS_0 = 288, + TDEFL_MAX_HUFF_SYMBOLS_1 = 32, + TDEFL_MAX_HUFF_SYMBOLS_2 = 19, + TDEFL_LZ_DICT_SIZE = 32768, + TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, + TDEFL_MIN_MATCH_LEN = 3, + TDEFL_MAX_MATCH_LEN = 258 +}; + +// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed +// output block (using static/fixed Huffman codes). +#if TDEFL_LESS_MEMORY +enum { + TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, + TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, + TDEFL_MAX_HUFF_SYMBOLS = 288, + TDEFL_LZ_HASH_BITS = 12, + TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, + TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, + TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS +}; +#else +enum { + TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, + TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, + TDEFL_MAX_HUFF_SYMBOLS = 288, + TDEFL_LZ_HASH_BITS = 15, + TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, + TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, + TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS +}; +#endif + +// The low-level tdefl functions below may be used directly if the above helper +// functions aren't flexible enough. The low-level functions don't make any heap +// allocations, unlike the above helper functions. +typedef enum { + TDEFL_STATUS_BAD_PARAM = -2, + TDEFL_STATUS_PUT_BUF_FAILED = -1, + TDEFL_STATUS_OKAY = 0, + TDEFL_STATUS_DONE = 1, +} tdefl_status; + +// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums +typedef enum { + TDEFL_NO_FLUSH = 0, + TDEFL_SYNC_FLUSH = 2, + TDEFL_FULL_FLUSH = 3, + TDEFL_FINISH = 4 +} tdefl_flush; + +// tdefl's compression state structure. +typedef struct { + tdefl_put_buf_func_ptr m_pPut_buf_func; + void *m_pPut_buf_user; + mz_uint m_flags, m_max_probes[2]; + int m_greedy_parsing; + mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; + mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; + mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, + m_bit_buffer; + mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, + m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, + m_wants_to_finish; + tdefl_status m_prev_return_status; + const void *m_pIn_buf; + void *m_pOut_buf; + size_t *m_pIn_buf_size, *m_pOut_buf_size; + tdefl_flush m_flush; + const mz_uint8 *m_pSrc; + size_t m_src_buf_left, m_out_buf_ofs; + mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; + mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; + mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; + mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; + mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; + mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; + mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; + mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; +} tdefl_compressor; + +// Initializes the compressor. +// There is no corresponding deinit() function because the tdefl API's do not +// dynamically allocate memory. pBut_buf_func: If NULL, output data will be +// supplied to the specified callback. In this case, the user should call the +// tdefl_compress_buffer() API for compression. If pBut_buf_func is NULL the +// user should always call the tdefl_compress() API. flags: See the above enums +// (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, etc.) +tdefl_status tdefl_init(tdefl_compressor *d, + tdefl_put_buf_func_ptr pPut_buf_func, + void *pPut_buf_user, int flags); + +// Compresses a block of data, consuming as much of the specified input buffer +// as possible, and writing as much compressed data to the specified output +// buffer as possible. +tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, + size_t *pIn_buf_size, void *pOut_buf, + size_t *pOut_buf_size, tdefl_flush flush); + +// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a +// non-NULL tdefl_put_buf_func_ptr. tdefl_compress_buffer() always consumes the +// entire input buffer. +tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, + size_t in_buf_size, tdefl_flush flush); + +tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); +mz_uint32 tdefl_get_adler32(tdefl_compressor *d); + +// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't +// defined, because it uses some of its macros. +#ifndef MINIZ_NO_ZLIB_APIS +// Create tdefl_compress() flags given zlib-style compression parameters. +// level may range from [0,10] (where 10 is absolute max compression, but may be +// much slower on some files) window_bits may be -15 (raw deflate) or 15 (zlib) +// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, +// MZ_RLE, or MZ_FIXED +mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, + int strategy); +#endif // #ifndef MINIZ_NO_ZLIB_APIS + +#define MZ_UINT16_MAX (0xFFFFU) +#define MZ_UINT32_MAX (0xFFFFFFFFU) + +#ifdef __cplusplus +} +#endif + +#endif // MINIZ_HEADER_INCLUDED + +// ------------------- End of Header: Implementation follows. (If you only want +// the header, define MINIZ_HEADER_FILE_ONLY.) + +#ifndef MINIZ_HEADER_FILE_ONLY + +typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; +typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; +typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; + +#include +#include + +#define MZ_ASSERT(x) assert(x) + +#ifdef MINIZ_NO_MALLOC +#define MZ_MALLOC(x) NULL +#define MZ_FREE(x) (void)x, ((void)0) +#define MZ_REALLOC(p, x) NULL +#else +#define MZ_MALLOC(x) malloc(x) +#define MZ_FREE(x) free(x) +#define MZ_REALLOC(p, x) realloc(p, x) +#endif + +#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) + +#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN +#define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) +#define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) +#else +#define MZ_READ_LE16(p) \ + ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ + ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) +#define MZ_READ_LE32(p) \ + ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ + ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ + ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ + ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) +#endif + +#define MZ_READ_LE64(p) \ + (((mz_uint64)MZ_READ_LE32(p)) | \ + (((mz_uint64)MZ_READ_LE32((const mz_uint8 *)(p) + sizeof(mz_uint32))) \ + << 32U)) + +#ifdef _MSC_VER +#define MZ_FORCEINLINE __forceinline +#elif defined(__GNUC__) +#define MZ_FORCEINLINE inline __attribute__((__always_inline__)) +#else +#define MZ_FORCEINLINE inline +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +// ------------------- zlib-style API's + +mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { + mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); + size_t block_len = buf_len % 5552; + if (!ptr) + return MZ_ADLER32_INIT; + while (buf_len) { + for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { + s1 += ptr[0], s2 += s1; + s1 += ptr[1], s2 += s1; + s1 += ptr[2], s2 += s1; + s1 += ptr[3], s2 += s1; + s1 += ptr[4], s2 += s1; + s1 += ptr[5], s2 += s1; + s1 += ptr[6], s2 += s1; + s1 += ptr[7], s2 += s1; + } + for (; i < block_len; ++i) + s1 += *ptr++, s2 += s1; + s1 %= 65521U, s2 %= 65521U; + buf_len -= block_len; + block_len = 5552; + } + return (s2 << 16) + s1; +} + +// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C +// implementation that balances processor cache usage against speed": +// http://www.geocities.com/malbrain/ +mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { + static const mz_uint32 s_crc32[16] = { + 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, + 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, + 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; + mz_uint32 crcu32 = (mz_uint32)crc; + if (!ptr) + return MZ_CRC32_INIT; + crcu32 = ~crcu32; + while (buf_len--) { + mz_uint8 b = *ptr++; + crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; + crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; + } + return ~crcu32; +} + +void mz_free(void *p) { MZ_FREE(p); } + +#ifndef MINIZ_NO_ZLIB_APIS + +static void *def_alloc_func(void *opaque, size_t items, size_t size) { + (void)opaque, (void)items, (void)size; + return MZ_MALLOC(items * size); +} +static void def_free_func(void *opaque, void *address) { + (void)opaque, (void)address; + MZ_FREE(address); +} +static void *def_realloc_func(void *opaque, void *address, size_t items, + size_t size) { + (void)opaque, (void)address, (void)items, (void)size; + return MZ_REALLOC(address, items * size); +} + +const char *mz_version(void) { return MZ_VERSION; } + +int mz_deflateInit(mz_streamp pStream, int level) { + return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, + MZ_DEFAULT_STRATEGY); +} + +int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, + int mem_level, int strategy) { + tdefl_compressor *pComp; + mz_uint comp_flags = + TDEFL_COMPUTE_ADLER32 | + tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); + + if (!pStream) + return MZ_STREAM_ERROR; + if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || + ((window_bits != MZ_DEFAULT_WINDOW_BITS) && + (-window_bits != MZ_DEFAULT_WINDOW_BITS))) + return MZ_PARAM_ERROR; + + pStream->data_type = 0; + pStream->adler = MZ_ADLER32_INIT; + pStream->msg = NULL; + pStream->reserved = 0; + pStream->total_in = 0; + pStream->total_out = 0; + if (!pStream->zalloc) + pStream->zalloc = def_alloc_func; + if (!pStream->zfree) + pStream->zfree = def_free_func; + + pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, + sizeof(tdefl_compressor)); + if (!pComp) + return MZ_MEM_ERROR; + + pStream->state = (struct mz_internal_state *)pComp; + + if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { + mz_deflateEnd(pStream); + return MZ_PARAM_ERROR; + } + + return MZ_OK; +} + +int mz_deflateReset(mz_streamp pStream) { + if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || + (!pStream->zfree)) + return MZ_STREAM_ERROR; + pStream->total_in = pStream->total_out = 0; + tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, + ((tdefl_compressor *)pStream->state)->m_flags); + return MZ_OK; +} + +int mz_deflate(mz_streamp pStream, int flush) { + size_t in_bytes, out_bytes; + mz_ulong orig_total_in, orig_total_out; + int mz_status = MZ_OK; + + if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || + (!pStream->next_out)) + return MZ_STREAM_ERROR; + if (!pStream->avail_out) + return MZ_BUF_ERROR; + + if (flush == MZ_PARTIAL_FLUSH) + flush = MZ_SYNC_FLUSH; + + if (((tdefl_compressor *)pStream->state)->m_prev_return_status == + TDEFL_STATUS_DONE) + return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; + + orig_total_in = pStream->total_in; + orig_total_out = pStream->total_out; + for (;;) { + tdefl_status defl_status; + in_bytes = pStream->avail_in; + out_bytes = pStream->avail_out; + + defl_status = tdefl_compress((tdefl_compressor *)pStream->state, + pStream->next_in, &in_bytes, pStream->next_out, + &out_bytes, (tdefl_flush)flush); + pStream->next_in += (mz_uint)in_bytes; + pStream->avail_in -= (mz_uint)in_bytes; + pStream->total_in += (mz_uint)in_bytes; + pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); + + pStream->next_out += (mz_uint)out_bytes; + pStream->avail_out -= (mz_uint)out_bytes; + pStream->total_out += (mz_uint)out_bytes; + + if (defl_status < 0) { + mz_status = MZ_STREAM_ERROR; + break; + } else if (defl_status == TDEFL_STATUS_DONE) { + mz_status = MZ_STREAM_END; + break; + } else if (!pStream->avail_out) + break; + else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { + if ((flush) || (pStream->total_in != orig_total_in) || + (pStream->total_out != orig_total_out)) + break; + return MZ_BUF_ERROR; // Can't make forward progress without some input. + } + } + return mz_status; +} + +int mz_deflateEnd(mz_streamp pStream) { + if (!pStream) + return MZ_STREAM_ERROR; + if (pStream->state) { + pStream->zfree(pStream->opaque, pStream->state); + pStream->state = NULL; + } + return MZ_OK; +} + +mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { + (void)pStream; + // This is really over conservative. (And lame, but it's actually pretty + // tricky to compute a true upper bound given the way tdefl's blocking works.) + return MZ_MAX(128 + (source_len * 110) / 100, + 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); +} + +int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, + const unsigned char *pSource, mz_ulong source_len, int level) { + int status; + mz_stream stream; + memset(&stream, 0, sizeof(stream)); + + // In case mz_ulong is 64-bits (argh I hate longs). + if ((source_len | *pDest_len) > 0xFFFFFFFFU) + return MZ_PARAM_ERROR; + + stream.next_in = pSource; + stream.avail_in = (mz_uint32)source_len; + stream.next_out = pDest; + stream.avail_out = (mz_uint32)*pDest_len; + + status = mz_deflateInit(&stream, level); + if (status != MZ_OK) + return status; + + status = mz_deflate(&stream, MZ_FINISH); + if (status != MZ_STREAM_END) { + mz_deflateEnd(&stream); + return (status == MZ_OK) ? MZ_BUF_ERROR : status; + } + + *pDest_len = stream.total_out; + return mz_deflateEnd(&stream); +} + +int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, + const unsigned char *pSource, mz_ulong source_len) { + return mz_compress2(pDest, pDest_len, pSource, source_len, + MZ_DEFAULT_COMPRESSION); +} + +mz_ulong mz_compressBound(mz_ulong source_len) { + return mz_deflateBound(NULL, source_len); +} + +typedef struct { + tinfl_decompressor m_decomp; + mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; + int m_window_bits; + mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; + tinfl_status m_last_status; +} inflate_state; + +int mz_inflateInit2(mz_streamp pStream, int window_bits) { + inflate_state *pDecomp; + if (!pStream) + return MZ_STREAM_ERROR; + if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && + (-window_bits != MZ_DEFAULT_WINDOW_BITS)) + return MZ_PARAM_ERROR; + + pStream->data_type = 0; + pStream->adler = 0; + pStream->msg = NULL; + pStream->total_in = 0; + pStream->total_out = 0; + pStream->reserved = 0; + if (!pStream->zalloc) + pStream->zalloc = def_alloc_func; + if (!pStream->zfree) + pStream->zfree = def_free_func; + + pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, + sizeof(inflate_state)); + if (!pDecomp) + return MZ_MEM_ERROR; + + pStream->state = (struct mz_internal_state *)pDecomp; + + tinfl_init(&pDecomp->m_decomp); + pDecomp->m_dict_ofs = 0; + pDecomp->m_dict_avail = 0; + pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; + pDecomp->m_first_call = 1; + pDecomp->m_has_flushed = 0; + pDecomp->m_window_bits = window_bits; + + return MZ_OK; +} + +int mz_inflateInit(mz_streamp pStream) { + return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); +} + +int mz_inflate(mz_streamp pStream, int flush) { + inflate_state *pState; + mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; + size_t in_bytes, out_bytes, orig_avail_in; + tinfl_status status; + + if ((!pStream) || (!pStream->state)) + return MZ_STREAM_ERROR; + if (flush == MZ_PARTIAL_FLUSH) + flush = MZ_SYNC_FLUSH; + if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) + return MZ_STREAM_ERROR; + + pState = (inflate_state *)pStream->state; + if (pState->m_window_bits > 0) + decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; + orig_avail_in = pStream->avail_in; + + first_call = pState->m_first_call; + pState->m_first_call = 0; + if (pState->m_last_status < 0) + return MZ_DATA_ERROR; + + if (pState->m_has_flushed && (flush != MZ_FINISH)) + return MZ_STREAM_ERROR; + pState->m_has_flushed |= (flush == MZ_FINISH); + + if ((flush == MZ_FINISH) && (first_call)) { + // MZ_FINISH on the first call implies that the input and output buffers are + // large enough to hold the entire compressed/decompressed file. + decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; + in_bytes = pStream->avail_in; + out_bytes = pStream->avail_out; + status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, + pStream->next_out, pStream->next_out, &out_bytes, + decomp_flags); + pState->m_last_status = status; + pStream->next_in += (mz_uint)in_bytes; + pStream->avail_in -= (mz_uint)in_bytes; + pStream->total_in += (mz_uint)in_bytes; + pStream->adler = tinfl_get_adler32(&pState->m_decomp); + pStream->next_out += (mz_uint)out_bytes; + pStream->avail_out -= (mz_uint)out_bytes; + pStream->total_out += (mz_uint)out_bytes; + + if (status < 0) + return MZ_DATA_ERROR; + else if (status != TINFL_STATUS_DONE) { + pState->m_last_status = TINFL_STATUS_FAILED; + return MZ_BUF_ERROR; + } + return MZ_STREAM_END; + } + // flush != MZ_FINISH then we must assume there's more input. + if (flush != MZ_FINISH) + decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; + + if (pState->m_dict_avail) { + n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); + memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); + pStream->next_out += n; + pStream->avail_out -= n; + pStream->total_out += n; + pState->m_dict_avail -= n; + pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); + return ((pState->m_last_status == TINFL_STATUS_DONE) && + (!pState->m_dict_avail)) + ? MZ_STREAM_END + : MZ_OK; + } + + for (;;) { + in_bytes = pStream->avail_in; + out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; + + status = tinfl_decompress( + &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, + pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); + pState->m_last_status = status; + + pStream->next_in += (mz_uint)in_bytes; + pStream->avail_in -= (mz_uint)in_bytes; + pStream->total_in += (mz_uint)in_bytes; + pStream->adler = tinfl_get_adler32(&pState->m_decomp); + + pState->m_dict_avail = (mz_uint)out_bytes; + + n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); + memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); + pStream->next_out += n; + pStream->avail_out -= n; + pStream->total_out += n; + pState->m_dict_avail -= n; + pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); + + if (status < 0) + return MZ_DATA_ERROR; // Stream is corrupted (there could be some + // uncompressed data left in the output dictionary - + // oh well). + else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) + return MZ_BUF_ERROR; // Signal caller that we can't make forward progress + // without supplying more input or by setting flush + // to MZ_FINISH. + else if (flush == MZ_FINISH) { + // The output buffer MUST be large to hold the remaining uncompressed data + // when flush==MZ_FINISH. + if (status == TINFL_STATUS_DONE) + return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; + // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's + // at least 1 more byte on the way. If there's no more room left in the + // output buffer then something is wrong. + else if (!pStream->avail_out) + return MZ_BUF_ERROR; + } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || + (!pStream->avail_out) || (pState->m_dict_avail)) + break; + } + + return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) + ? MZ_STREAM_END + : MZ_OK; +} + +int mz_inflateEnd(mz_streamp pStream) { + if (!pStream) + return MZ_STREAM_ERROR; + if (pStream->state) { + pStream->zfree(pStream->opaque, pStream->state); + pStream->state = NULL; + } + return MZ_OK; +} + +int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, + const unsigned char *pSource, mz_ulong source_len) { + mz_stream stream; + int status; + memset(&stream, 0, sizeof(stream)); + + // In case mz_ulong is 64-bits (argh I hate longs). + if ((source_len | *pDest_len) > 0xFFFFFFFFU) + return MZ_PARAM_ERROR; + + stream.next_in = pSource; + stream.avail_in = (mz_uint32)source_len; + stream.next_out = pDest; + stream.avail_out = (mz_uint32)*pDest_len; + + status = mz_inflateInit(&stream); + if (status != MZ_OK) + return status; + + status = mz_inflate(&stream, MZ_FINISH); + if (status != MZ_STREAM_END) { + mz_inflateEnd(&stream); + return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR + : status; + } + *pDest_len = stream.total_out; + + return mz_inflateEnd(&stream); +} + +const char *mz_error(int err) { + static struct { + int m_err; + const char *m_pDesc; + } s_error_descs[] = {{MZ_OK, ""}, + {MZ_STREAM_END, "stream end"}, + {MZ_NEED_DICT, "need dictionary"}, + {MZ_ERRNO, "file error"}, + {MZ_STREAM_ERROR, "stream error"}, + {MZ_DATA_ERROR, "data error"}, + {MZ_MEM_ERROR, "out of memory"}, + {MZ_BUF_ERROR, "buf error"}, + {MZ_VERSION_ERROR, "version error"}, + {MZ_PARAM_ERROR, "parameter error"}}; + mz_uint i; + for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) + if (s_error_descs[i].m_err == err) + return s_error_descs[i].m_pDesc; + return NULL; +} + +#endif // MINIZ_NO_ZLIB_APIS + +// ------------------- Low-level Decompression (completely independent from all +// compression API's) + +#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) +#define TINFL_MEMSET(p, c, l) memset(p, c, l) + +#define TINFL_CR_BEGIN \ + switch (r->m_state) { \ + case 0: +#define TINFL_CR_RETURN(state_index, result) \ + do { \ + status = result; \ + r->m_state = state_index; \ + goto common_exit; \ + case state_index:; \ + } \ + MZ_MACRO_END +#define TINFL_CR_RETURN_FOREVER(state_index, result) \ + do { \ + for (;;) { \ + TINFL_CR_RETURN(state_index, result); \ + } \ + } \ + MZ_MACRO_END +#define TINFL_CR_FINISH } + +// TODO: If the caller has indicated that there's no more input, and we attempt +// to read beyond the input buf, then something is wrong with the input because +// the inflator never reads ahead more than it needs to. Currently +// TINFL_GET_BYTE() pads the end of the stream with 0's in this scenario. +#define TINFL_GET_BYTE(state_index, c) \ + do { \ + if (pIn_buf_cur >= pIn_buf_end) { \ + for (;;) { \ + if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ + TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ + if (pIn_buf_cur < pIn_buf_end) { \ + c = *pIn_buf_cur++; \ + break; \ + } \ + } else { \ + c = 0; \ + break; \ + } \ + } \ + } else \ + c = *pIn_buf_cur++; \ + } \ + MZ_MACRO_END + +#define TINFL_NEED_BITS(state_index, n) \ + do { \ + mz_uint c; \ + TINFL_GET_BYTE(state_index, c); \ + bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ + num_bits += 8; \ + } while (num_bits < (mz_uint)(n)) +#define TINFL_SKIP_BITS(state_index, n) \ + do { \ + if (num_bits < (mz_uint)(n)) { \ + TINFL_NEED_BITS(state_index, n); \ + } \ + bit_buf >>= (n); \ + num_bits -= (n); \ + } \ + MZ_MACRO_END +#define TINFL_GET_BITS(state_index, b, n) \ + do { \ + if (num_bits < (mz_uint)(n)) { \ + TINFL_NEED_BITS(state_index, n); \ + } \ + b = bit_buf & ((1 << (n)) - 1); \ + bit_buf >>= (n); \ + num_bits -= (n); \ + } \ + MZ_MACRO_END + +// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes +// remaining in the input buffer falls below 2. It reads just enough bytes from +// the input stream that are needed to decode the next Huffman code (and +// absolutely no more). It works by trying to fully decode a Huffman code by +// using whatever bits are currently present in the bit buffer. If this fails, +// it reads another byte, and tries again until it succeeds or until the bit +// buffer contains >=15 bits (deflate's max. Huffman code size). +#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ + do { \ + temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ + if (temp >= 0) { \ + code_len = temp >> 9; \ + if ((code_len) && (num_bits >= code_len)) \ + break; \ + } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ + code_len = TINFL_FAST_LOOKUP_BITS; \ + do { \ + temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ + } while ((temp < 0) && (num_bits >= (code_len + 1))); \ + if (temp >= 0) \ + break; \ + } \ + TINFL_GET_BYTE(state_index, c); \ + bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ + num_bits += 8; \ + } while (num_bits < 15); + +// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex +// than you would initially expect because the zlib API expects the decompressor +// to never read beyond the final byte of the deflate stream. (In other words, +// when this macro wants to read another byte from the input, it REALLY needs +// another byte in order to fully decode the next Huffman code.) Handling this +// properly is particularly important on raw deflate (non-zlib) streams, which +// aren't followed by a byte aligned adler-32. The slow path is only executed at +// the very end of the input buffer. +#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ + do { \ + int temp; \ + mz_uint code_len, c; \ + if (num_bits < 15) { \ + if ((pIn_buf_end - pIn_buf_cur) < 2) { \ + TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ + } else { \ + bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ + (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ + pIn_buf_cur += 2; \ + num_bits += 16; \ + } \ + } \ + if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ + 0) \ + code_len = temp >> 9, temp &= 511; \ + else { \ + code_len = TINFL_FAST_LOOKUP_BITS; \ + do { \ + temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ + } while (temp < 0); \ + } \ + sym = temp; \ + bit_buf >>= code_len; \ + num_bits -= code_len; \ + } \ + MZ_MACRO_END + +tinfl_status tinfl_decompress(tinfl_decompressor *r, + const mz_uint8 *pIn_buf_next, + size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, + mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, + const mz_uint32 decomp_flags) { + static const int s_length_base[31] = { + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; + static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, + 4, 4, 5, 5, 5, 5, 0, 0, 0}; + static const int s_dist_base[32] = { + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, + 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, + 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; + static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; + static const mz_uint8 s_length_dezigzag[19] = { + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; + static const int s_min_table_sizes[3] = {257, 1, 4}; + + tinfl_status status = TINFL_STATUS_FAILED; + mz_uint32 num_bits, dist, counter, num_extra; + tinfl_bit_buf_t bit_buf; + const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = + pIn_buf_next + *pIn_buf_size; + mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = + pOut_buf_next + *pOut_buf_size; + size_t out_buf_size_mask = + (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) + ? (size_t)-1 + : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, + dist_from_out_buf_start; + + // Ensure the output buffer's size is a power of 2, unless the output buffer + // is large enough to hold the entire output file (in which case it doesn't + // matter). + if (((out_buf_size_mask + 1) & out_buf_size_mask) || + (pOut_buf_next < pOut_buf_start)) { + *pIn_buf_size = *pOut_buf_size = 0; + return TINFL_STATUS_BAD_PARAM; + } + + num_bits = r->m_num_bits; + bit_buf = r->m_bit_buf; + dist = r->m_dist; + counter = r->m_counter; + num_extra = r->m_num_extra; + dist_from_out_buf_start = r->m_dist_from_out_buf_start; + TINFL_CR_BEGIN + + bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; + r->m_z_adler32 = r->m_check_adler32 = 1; + if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { + TINFL_GET_BYTE(1, r->m_zhdr0); + TINFL_GET_BYTE(2, r->m_zhdr1); + counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || + (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); + if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) + counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || + ((out_buf_size_mask + 1) < + (size_t)(1U << (8U + (r->m_zhdr0 >> 4))))); + if (counter) { + TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); + } + } + + do { + TINFL_GET_BITS(3, r->m_final, 3); + r->m_type = r->m_final >> 1; + if (r->m_type == 0) { + TINFL_SKIP_BITS(5, num_bits & 7); + for (counter = 0; counter < 4; ++counter) { + if (num_bits) + TINFL_GET_BITS(6, r->m_raw_header[counter], 8); + else + TINFL_GET_BYTE(7, r->m_raw_header[counter]); + } + if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != + (mz_uint)(0xFFFF ^ + (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { + TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); + } + while ((counter) && (num_bits)) { + TINFL_GET_BITS(51, dist, 8); + while (pOut_buf_cur >= pOut_buf_end) { + TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); + } + *pOut_buf_cur++ = (mz_uint8)dist; + counter--; + } + while (counter) { + size_t n; + while (pOut_buf_cur >= pOut_buf_end) { + TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); + } + while (pIn_buf_cur >= pIn_buf_end) { + if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { + TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); + } else { + TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); + } + } + n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), + (size_t)(pIn_buf_end - pIn_buf_cur)), + counter); + TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); + pIn_buf_cur += n; + pOut_buf_cur += n; + counter -= (mz_uint)n; + } + } else if (r->m_type == 3) { + TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); + } else { + if (r->m_type == 1) { + mz_uint8 *p = r->m_tables[0].m_code_size; + mz_uint i; + r->m_table_sizes[0] = 288; + r->m_table_sizes[1] = 32; + TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); + for (i = 0; i <= 143; ++i) + *p++ = 8; + for (; i <= 255; ++i) + *p++ = 9; + for (; i <= 279; ++i) + *p++ = 7; + for (; i <= 287; ++i) + *p++ = 8; + } else { + for (counter = 0; counter < 3; counter++) { + TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); + r->m_table_sizes[counter] += s_min_table_sizes[counter]; + } + MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); + for (counter = 0; counter < r->m_table_sizes[2]; counter++) { + mz_uint s; + TINFL_GET_BITS(14, s, 3); + r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; + } + r->m_table_sizes[2] = 19; + } + for (; (int)r->m_type >= 0; r->m_type--) { + int tree_next, tree_cur; + tinfl_huff_table *pTable; + mz_uint i, j, used_syms, total, sym_index, next_code[17], + total_syms[16]; + pTable = &r->m_tables[r->m_type]; + MZ_CLEAR_OBJ(total_syms); + MZ_CLEAR_OBJ(pTable->m_look_up); + MZ_CLEAR_OBJ(pTable->m_tree); + for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) + total_syms[pTable->m_code_size[i]]++; + used_syms = 0, total = 0; + next_code[0] = next_code[1] = 0; + for (i = 1; i <= 15; ++i) { + used_syms += total_syms[i]; + next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); + } + if ((65536 != total) && (used_syms > 1)) { + TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); + } + for (tree_next = -1, sym_index = 0; + sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { + mz_uint rev_code = 0, l, cur_code, + code_size = pTable->m_code_size[sym_index]; + if (!code_size) + continue; + cur_code = next_code[code_size]++; + for (l = code_size; l > 0; l--, cur_code >>= 1) + rev_code = (rev_code << 1) | (cur_code & 1); + if (code_size <= TINFL_FAST_LOOKUP_BITS) { + mz_int16 k = (mz_int16)((code_size << 9) | sym_index); + while (rev_code < TINFL_FAST_LOOKUP_SIZE) { + pTable->m_look_up[rev_code] = k; + rev_code += (1 << code_size); + } + continue; + } + if (0 == + (tree_cur = pTable->m_look_up[rev_code & + (TINFL_FAST_LOOKUP_SIZE - 1)])) { + pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = + (mz_int16)tree_next; + tree_cur = tree_next; + tree_next -= 2; + } + rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); + for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { + tree_cur -= ((rev_code >>= 1) & 1); + if (!pTable->m_tree[-tree_cur - 1]) { + pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; + tree_cur = tree_next; + tree_next -= 2; + } else + tree_cur = pTable->m_tree[-tree_cur - 1]; + } + rev_code >>= 1; + tree_cur -= (rev_code & 1); + pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; + } + if (r->m_type == 2) { + for (counter = 0; + counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { + mz_uint s; + TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); + if (dist < 16) { + r->m_len_codes[counter++] = (mz_uint8)dist; + continue; + } + if ((dist == 16) && (!counter)) { + TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); + } + num_extra = "\02\03\07"[dist - 16]; + TINFL_GET_BITS(18, s, num_extra); + s += "\03\03\013"[dist - 16]; + TINFL_MEMSET(r->m_len_codes + counter, + (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); + counter += s; + } + if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { + TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); + } + TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, + r->m_table_sizes[0]); + TINFL_MEMCPY(r->m_tables[1].m_code_size, + r->m_len_codes + r->m_table_sizes[0], + r->m_table_sizes[1]); + } + } + for (;;) { + mz_uint8 *pSrc; + for (;;) { + if (((pIn_buf_end - pIn_buf_cur) < 4) || + ((pOut_buf_end - pOut_buf_cur) < 2)) { + TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); + if (counter >= 256) + break; + while (pOut_buf_cur >= pOut_buf_end) { + TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); + } + *pOut_buf_cur++ = (mz_uint8)counter; + } else { + int sym2; + mz_uint code_len; +#if TINFL_USE_64BIT_BITBUF + if (num_bits < 30) { + bit_buf |= + (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); + pIn_buf_cur += 4; + num_bits += 32; + } +#else + if (num_bits < 15) { + bit_buf |= + (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); + pIn_buf_cur += 2; + num_bits += 16; + } +#endif + if ((sym2 = + r->m_tables[0] + .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= + 0) + code_len = sym2 >> 9; + else { + code_len = TINFL_FAST_LOOKUP_BITS; + do { + sym2 = r->m_tables[0] + .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; + } while (sym2 < 0); + } + counter = sym2; + bit_buf >>= code_len; + num_bits -= code_len; + if (counter & 256) + break; + +#if !TINFL_USE_64BIT_BITBUF + if (num_bits < 15) { + bit_buf |= + (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); + pIn_buf_cur += 2; + num_bits += 16; + } +#endif + if ((sym2 = + r->m_tables[0] + .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= + 0) + code_len = sym2 >> 9; + else { + code_len = TINFL_FAST_LOOKUP_BITS; + do { + sym2 = r->m_tables[0] + .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; + } while (sym2 < 0); + } + bit_buf >>= code_len; + num_bits -= code_len; + + pOut_buf_cur[0] = (mz_uint8)counter; + if (sym2 & 256) { + pOut_buf_cur++; + counter = sym2; + break; + } + pOut_buf_cur[1] = (mz_uint8)sym2; + pOut_buf_cur += 2; + } + } + if ((counter &= 511) == 256) + break; + + num_extra = s_length_extra[counter - 257]; + counter = s_length_base[counter - 257]; + if (num_extra) { + mz_uint extra_bits; + TINFL_GET_BITS(25, extra_bits, num_extra); + counter += extra_bits; + } + + TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); + num_extra = s_dist_extra[dist]; + dist = s_dist_base[dist]; + if (num_extra) { + mz_uint extra_bits; + TINFL_GET_BITS(27, extra_bits, num_extra); + dist += extra_bits; + } + + dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; + if ((dist > dist_from_out_buf_start) && + (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { + TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); + } + + pSrc = pOut_buf_start + + ((dist_from_out_buf_start - dist) & out_buf_size_mask); + + if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { + while (counter--) { + while (pOut_buf_cur >= pOut_buf_end) { + TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); + } + *pOut_buf_cur++ = + pOut_buf_start[(dist_from_out_buf_start++ - dist) & + out_buf_size_mask]; + } + continue; + } +#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES + else if ((counter >= 9) && (counter <= dist)) { + const mz_uint8 *pSrc_end = pSrc + (counter & ~7); + do { + ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; + ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; + pOut_buf_cur += 8; + } while ((pSrc += 8) < pSrc_end); + if ((counter &= 7) < 3) { + if (counter) { + pOut_buf_cur[0] = pSrc[0]; + if (counter > 1) + pOut_buf_cur[1] = pSrc[1]; + pOut_buf_cur += counter; + } + continue; + } + } +#endif + do { + pOut_buf_cur[0] = pSrc[0]; + pOut_buf_cur[1] = pSrc[1]; + pOut_buf_cur[2] = pSrc[2]; + pOut_buf_cur += 3; + pSrc += 3; + } while ((int)(counter -= 3) > 2); + if ((int)counter > 0) { + pOut_buf_cur[0] = pSrc[0]; + if ((int)counter > 1) + pOut_buf_cur[1] = pSrc[1]; + pOut_buf_cur += counter; + } + } + } + } while (!(r->m_final & 1)); + if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { + TINFL_SKIP_BITS(32, num_bits & 7); + for (counter = 0; counter < 4; ++counter) { + mz_uint s; + if (num_bits) + TINFL_GET_BITS(41, s, 8); + else + TINFL_GET_BYTE(42, s); + r->m_z_adler32 = (r->m_z_adler32 << 8) | s; + } + } + TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); + TINFL_CR_FINISH + +common_exit: + r->m_num_bits = num_bits; + r->m_bit_buf = bit_buf; + r->m_dist = dist; + r->m_counter = counter; + r->m_num_extra = num_extra; + r->m_dist_from_out_buf_start = dist_from_out_buf_start; + *pIn_buf_size = pIn_buf_cur - pIn_buf_next; + *pOut_buf_size = pOut_buf_cur - pOut_buf_next; + if ((decomp_flags & + (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && + (status >= 0)) { + const mz_uint8 *ptr = pOut_buf_next; + size_t buf_len = *pOut_buf_size; + mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, + s2 = r->m_check_adler32 >> 16; + size_t block_len = buf_len % 5552; + while (buf_len) { + for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { + s1 += ptr[0], s2 += s1; + s1 += ptr[1], s2 += s1; + s1 += ptr[2], s2 += s1; + s1 += ptr[3], s2 += s1; + s1 += ptr[4], s2 += s1; + s1 += ptr[5], s2 += s1; + s1 += ptr[6], s2 += s1; + s1 += ptr[7], s2 += s1; + } + for (; i < block_len; ++i) + s1 += *ptr++, s2 += s1; + s1 %= 65521U, s2 %= 65521U; + buf_len -= block_len; + block_len = 5552; + } + r->m_check_adler32 = (s2 << 16) + s1; + if ((status == TINFL_STATUS_DONE) && + (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && + (r->m_check_adler32 != r->m_z_adler32)) + status = TINFL_STATUS_ADLER32_MISMATCH; + } + return status; +} + +// Higher level helper functions. +void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, + size_t *pOut_len, int flags) { + tinfl_decompressor decomp; + void *pBuf = NULL, *pNew_buf; + size_t src_buf_ofs = 0, out_buf_capacity = 0; + *pOut_len = 0; + tinfl_init(&decomp); + for (;;) { + size_t src_buf_size = src_buf_len - src_buf_ofs, + dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; + tinfl_status status = tinfl_decompress( + &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, + (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, + &dst_buf_size, + (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | + TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); + if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { + MZ_FREE(pBuf); + *pOut_len = 0; + return NULL; + } + src_buf_ofs += src_buf_size; + *pOut_len += dst_buf_size; + if (status == TINFL_STATUS_DONE) + break; + new_out_buf_capacity = out_buf_capacity * 2; + if (new_out_buf_capacity < 128) + new_out_buf_capacity = 128; + pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); + if (!pNew_buf) { + MZ_FREE(pBuf); + *pOut_len = 0; + return NULL; + } + pBuf = pNew_buf; + out_buf_capacity = new_out_buf_capacity; + } + return pBuf; +} + +size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, + const void *pSrc_buf, size_t src_buf_len, + int flags) { + tinfl_decompressor decomp; + tinfl_status status; + tinfl_init(&decomp); + status = + tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, + (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, + (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | + TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); + return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED + : out_buf_len; +} + +int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, + tinfl_put_buf_func_ptr pPut_buf_func, + void *pPut_buf_user, int flags) { + int result = 0; + tinfl_decompressor decomp; + mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); + size_t in_buf_ofs = 0, dict_ofs = 0; + if (!pDict) + return TINFL_STATUS_FAILED; + tinfl_init(&decomp); + for (;;) { + size_t in_buf_size = *pIn_buf_size - in_buf_ofs, + dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; + tinfl_status status = + tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, + &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, + (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | + TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); + in_buf_ofs += in_buf_size; + if ((dst_buf_size) && + (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) + break; + if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { + result = (status == TINFL_STATUS_DONE); + break; + } + dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); + } + MZ_FREE(pDict); + *pIn_buf_size = in_buf_ofs; + return result; +} + +// ------------------- Low-level Compression (independent from all decompression +// API's) + +// Purposely making these tables static for faster init and thread safety. +static const mz_uint16 s_tdefl_len_sym[256] = { + 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, + 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, + 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, + 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, + 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, + 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, + 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, + 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, + 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, + 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, + 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, + 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, + 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, + 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, + 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, + 285}; + +static const mz_uint8 s_tdefl_len_extra[256] = { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; + +static const mz_uint8 s_tdefl_small_dist_sym[512] = { + 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, + 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, + 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, + 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, + 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, + 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, + 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, + 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, + 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; + +static const mz_uint8 s_tdefl_small_dist_extra[512] = { + 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; + +static const mz_uint8 s_tdefl_large_dist_sym[128] = { + 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, + 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; + +static const mz_uint8 s_tdefl_large_dist_extra[128] = { + 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; + +// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted +// values. +typedef struct { + mz_uint16 m_key, m_sym_index; +} tdefl_sym_freq; +static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, + tdefl_sym_freq *pSyms0, + tdefl_sym_freq *pSyms1) { + mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; + tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; + MZ_CLEAR_OBJ(hist); + for (i = 0; i < num_syms; i++) { + mz_uint freq = pSyms0[i].m_key; + hist[freq & 0xFF]++; + hist[256 + ((freq >> 8) & 0xFF)]++; + } + while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) + total_passes--; + for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { + const mz_uint32 *pHist = &hist[pass << 8]; + mz_uint offsets[256], cur_ofs = 0; + for (i = 0; i < 256; i++) { + offsets[i] = cur_ofs; + cur_ofs += pHist[i]; + } + for (i = 0; i < num_syms; i++) + pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = + pCur_syms[i]; + { + tdefl_sym_freq *t = pCur_syms; + pCur_syms = pNew_syms; + pNew_syms = t; + } + } + return pCur_syms; +} + +// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, +// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. +static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { + int root, leaf, next, avbl, used, dpth; + if (n == 0) + return; + else if (n == 1) { + A[0].m_key = 1; + return; + } + A[0].m_key += A[1].m_key; + root = 0; + leaf = 2; + for (next = 1; next < n - 1; next++) { + if (leaf >= n || A[root].m_key < A[leaf].m_key) { + A[next].m_key = A[root].m_key; + A[root++].m_key = (mz_uint16)next; + } else + A[next].m_key = A[leaf++].m_key; + if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { + A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); + A[root++].m_key = (mz_uint16)next; + } else + A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); + } + A[n - 2].m_key = 0; + for (next = n - 3; next >= 0; next--) + A[next].m_key = A[A[next].m_key].m_key + 1; + avbl = 1; + used = dpth = 0; + root = n - 2; + next = n - 1; + while (avbl > 0) { + while (root >= 0 && (int)A[root].m_key == dpth) { + used++; + root--; + } + while (avbl > used) { + A[next--].m_key = (mz_uint16)(dpth); + avbl--; + } + avbl = 2 * used; + dpth++; + used = 0; + } +} + +// Limits canonical Huffman code table's max code size. +enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; +static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, + int code_list_len, + int max_code_size) { + int i; + mz_uint32 total = 0; + if (code_list_len <= 1) + return; + for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) + pNum_codes[max_code_size] += pNum_codes[i]; + for (i = max_code_size; i > 0; i--) + total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); + while (total != (1UL << max_code_size)) { + pNum_codes[max_code_size]--; + for (i = max_code_size - 1; i > 0; i--) + if (pNum_codes[i]) { + pNum_codes[i]--; + pNum_codes[i + 1] += 2; + break; + } + total--; + } +} + +static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, + int table_len, int code_size_limit, + int static_table) { + int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; + mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; + MZ_CLEAR_OBJ(num_codes); + if (static_table) { + for (i = 0; i < table_len; i++) + num_codes[d->m_huff_code_sizes[table_num][i]]++; + } else { + tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], + *pSyms; + int num_used_syms = 0; + const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; + for (i = 0; i < table_len; i++) + if (pSym_count[i]) { + syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; + syms0[num_used_syms++].m_sym_index = (mz_uint16)i; + } + + pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); + tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); + + for (i = 0; i < num_used_syms; i++) + num_codes[pSyms[i].m_key]++; + + tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, + code_size_limit); + + MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); + MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); + for (i = 1, j = num_used_syms; i <= code_size_limit; i++) + for (l = num_codes[i]; l > 0; l--) + d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); + } + + next_code[1] = 0; + for (j = 0, i = 2; i <= code_size_limit; i++) + next_code[i] = j = ((j + num_codes[i - 1]) << 1); + + for (i = 0; i < table_len; i++) { + mz_uint rev_code = 0, code, code_size; + if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) + continue; + code = next_code[code_size]++; + for (l = code_size; l > 0; l--, code >>= 1) + rev_code = (rev_code << 1) | (code & 1); + d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; + } +} + +#define TDEFL_PUT_BITS(b, l) \ + do { \ + mz_uint bits = b; \ + mz_uint len = l; \ + MZ_ASSERT(bits <= ((1U << len) - 1U)); \ + d->m_bit_buffer |= (bits << d->m_bits_in); \ + d->m_bits_in += len; \ + while (d->m_bits_in >= 8) { \ + if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ + *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ + d->m_bit_buffer >>= 8; \ + d->m_bits_in -= 8; \ + } \ + } \ + MZ_MACRO_END + +#define TDEFL_RLE_PREV_CODE_SIZE() \ + { \ + if (rle_repeat_count) { \ + if (rle_repeat_count < 3) { \ + d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ + d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ + while (rle_repeat_count--) \ + packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ + } else { \ + d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ + packed_code_sizes[num_packed_code_sizes++] = 16; \ + packed_code_sizes[num_packed_code_sizes++] = \ + (mz_uint8)(rle_repeat_count - 3); \ + } \ + rle_repeat_count = 0; \ + } \ + } + +#define TDEFL_RLE_ZERO_CODE_SIZE() \ + { \ + if (rle_z_count) { \ + if (rle_z_count < 3) { \ + d->m_huff_count[2][0] = \ + (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ + while (rle_z_count--) \ + packed_code_sizes[num_packed_code_sizes++] = 0; \ + } else if (rle_z_count <= 10) { \ + d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ + packed_code_sizes[num_packed_code_sizes++] = 17; \ + packed_code_sizes[num_packed_code_sizes++] = \ + (mz_uint8)(rle_z_count - 3); \ + } else { \ + d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ + packed_code_sizes[num_packed_code_sizes++] = 18; \ + packed_code_sizes[num_packed_code_sizes++] = \ + (mz_uint8)(rle_z_count - 11); \ + } \ + rle_z_count = 0; \ + } \ + } + +static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; + +static void tdefl_start_dynamic_block(tdefl_compressor *d) { + int num_lit_codes, num_dist_codes, num_bit_lengths; + mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, + rle_repeat_count, packed_code_sizes_index; + mz_uint8 + code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], + packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], + prev_code_size = 0xFF; + + d->m_huff_count[0][256] = 1; + + tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); + tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); + + for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) + if (d->m_huff_code_sizes[0][num_lit_codes - 1]) + break; + for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) + if (d->m_huff_code_sizes[1][num_dist_codes - 1]) + break; + + memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], + sizeof(mz_uint8) * num_lit_codes); + memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], + sizeof(mz_uint8) * num_dist_codes); + total_code_sizes_to_pack = num_lit_codes + num_dist_codes; + num_packed_code_sizes = 0; + rle_z_count = 0; + rle_repeat_count = 0; + + memset(&d->m_huff_count[2][0], 0, + sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); + for (i = 0; i < total_code_sizes_to_pack; i++) { + mz_uint8 code_size = code_sizes_to_pack[i]; + if (!code_size) { + TDEFL_RLE_PREV_CODE_SIZE(); + if (++rle_z_count == 138) { + TDEFL_RLE_ZERO_CODE_SIZE(); + } + } else { + TDEFL_RLE_ZERO_CODE_SIZE(); + if (code_size != prev_code_size) { + TDEFL_RLE_PREV_CODE_SIZE(); + d->m_huff_count[2][code_size] = + (mz_uint16)(d->m_huff_count[2][code_size] + 1); + packed_code_sizes[num_packed_code_sizes++] = code_size; + } else if (++rle_repeat_count == 6) { + TDEFL_RLE_PREV_CODE_SIZE(); + } + } + prev_code_size = code_size; + } + if (rle_repeat_count) { + TDEFL_RLE_PREV_CODE_SIZE(); + } else { + TDEFL_RLE_ZERO_CODE_SIZE(); + } + + tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); + + TDEFL_PUT_BITS(2, 2); + + TDEFL_PUT_BITS(num_lit_codes - 257, 5); + TDEFL_PUT_BITS(num_dist_codes - 1, 5); + + for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) + if (d->m_huff_code_sizes + [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) + break; + num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); + TDEFL_PUT_BITS(num_bit_lengths - 4, 4); + for (i = 0; (int)i < num_bit_lengths; i++) + TDEFL_PUT_BITS( + d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); + + for (packed_code_sizes_index = 0; + packed_code_sizes_index < num_packed_code_sizes;) { + mz_uint code = packed_code_sizes[packed_code_sizes_index++]; + MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); + TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); + if (code >= 16) + TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], + "\02\03\07"[code - 16]); + } +} + +static void tdefl_start_static_block(tdefl_compressor *d) { + mz_uint i; + mz_uint8 *p = &d->m_huff_code_sizes[0][0]; + + for (i = 0; i <= 143; ++i) + *p++ = 8; + for (; i <= 255; ++i) + *p++ = 9; + for (; i <= 279; ++i) + *p++ = 7; + for (; i <= 287; ++i) + *p++ = 8; + + memset(d->m_huff_code_sizes[1], 5, 32); + + tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); + tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); + + TDEFL_PUT_BITS(1, 2); +} + +static const mz_uint mz_bitmasks[17] = { + 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, + 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; + +#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ + MINIZ_HAS_64BIT_REGISTERS +static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { + mz_uint flags; + mz_uint8 *pLZ_codes; + mz_uint8 *pOutput_buf = d->m_pOutput_buf; + mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; + mz_uint64 bit_buffer = d->m_bit_buffer; + mz_uint bits_in = d->m_bits_in; + +#define TDEFL_PUT_BITS_FAST(b, l) \ + { \ + bit_buffer |= (((mz_uint64)(b)) << bits_in); \ + bits_in += (l); \ + } + + flags = 1; + for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; + flags >>= 1) { + if (flags == 1) + flags = *pLZ_codes++ | 0x100; + + if (flags & 1) { + mz_uint s0, s1, n0, n1, sym, num_extra_bits; + mz_uint match_len = pLZ_codes[0], + match_dist = *(const mz_uint16 *)(pLZ_codes + 1); + pLZ_codes += 3; + + MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); + TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], + d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); + TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], + s_tdefl_len_extra[match_len]); + + // This sequence coaxes MSVC into using cmov's vs. jmp's. + s0 = s_tdefl_small_dist_sym[match_dist & 511]; + n0 = s_tdefl_small_dist_extra[match_dist & 511]; + s1 = s_tdefl_large_dist_sym[match_dist >> 8]; + n1 = s_tdefl_large_dist_extra[match_dist >> 8]; + sym = (match_dist < 512) ? s0 : s1; + num_extra_bits = (match_dist < 512) ? n0 : n1; + + MZ_ASSERT(d->m_huff_code_sizes[1][sym]); + TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], + d->m_huff_code_sizes[1][sym]); + TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], + num_extra_bits); + } else { + mz_uint lit = *pLZ_codes++; + MZ_ASSERT(d->m_huff_code_sizes[0][lit]); + TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], + d->m_huff_code_sizes[0][lit]); + + if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { + flags >>= 1; + lit = *pLZ_codes++; + MZ_ASSERT(d->m_huff_code_sizes[0][lit]); + TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], + d->m_huff_code_sizes[0][lit]); + + if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { + flags >>= 1; + lit = *pLZ_codes++; + MZ_ASSERT(d->m_huff_code_sizes[0][lit]); + TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], + d->m_huff_code_sizes[0][lit]); + } + } + } + + if (pOutput_buf >= d->m_pOutput_buf_end) + return MZ_FALSE; + + *(mz_uint64 *)pOutput_buf = bit_buffer; + pOutput_buf += (bits_in >> 3); + bit_buffer >>= (bits_in & ~7); + bits_in &= 7; + } + +#undef TDEFL_PUT_BITS_FAST + + d->m_pOutput_buf = pOutput_buf; + d->m_bits_in = 0; + d->m_bit_buffer = 0; + + while (bits_in) { + mz_uint32 n = MZ_MIN(bits_in, 16); + TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); + bit_buffer >>= n; + bits_in -= n; + } + + TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); + + return (d->m_pOutput_buf < d->m_pOutput_buf_end); +} +#else +static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { + mz_uint flags; + mz_uint8 *pLZ_codes; + + flags = 1; + for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; + flags >>= 1) { + if (flags == 1) + flags = *pLZ_codes++ | 0x100; + if (flags & 1) { + mz_uint sym, num_extra_bits; + mz_uint match_len = pLZ_codes[0], + match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); + pLZ_codes += 3; + + MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); + TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], + d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); + TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], + s_tdefl_len_extra[match_len]); + + if (match_dist < 512) { + sym = s_tdefl_small_dist_sym[match_dist]; + num_extra_bits = s_tdefl_small_dist_extra[match_dist]; + } else { + sym = s_tdefl_large_dist_sym[match_dist >> 8]; + num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; + } + TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); + TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); + } else { + mz_uint lit = *pLZ_codes++; + MZ_ASSERT(d->m_huff_code_sizes[0][lit]); + TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); + } + } + + TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); + + return (d->m_pOutput_buf < d->m_pOutput_buf_end); +} +#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && + // MINIZ_HAS_64BIT_REGISTERS + +static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { + if (static_block) + tdefl_start_static_block(d); + else + tdefl_start_dynamic_block(d); + return tdefl_compress_lz_codes(d); +} + +static int tdefl_flush_block(tdefl_compressor *d, int flush) { + mz_uint saved_bit_buf, saved_bits_in; + mz_uint8 *pSaved_output_buf; + mz_bool comp_block_succeeded = MZ_FALSE; + int n, use_raw_block = + ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && + (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; + mz_uint8 *pOutput_buf_start = + ((d->m_pPut_buf_func == NULL) && + ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) + ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) + : d->m_output_buf; + + d->m_pOutput_buf = pOutput_buf_start; + d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; + + MZ_ASSERT(!d->m_output_flush_remaining); + d->m_output_flush_ofs = 0; + d->m_output_flush_remaining = 0; + + *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); + d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); + + if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { + TDEFL_PUT_BITS(0x78, 8); + TDEFL_PUT_BITS(0x01, 8); + } + + TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); + + pSaved_output_buf = d->m_pOutput_buf; + saved_bit_buf = d->m_bit_buffer; + saved_bits_in = d->m_bits_in; + + if (!use_raw_block) + comp_block_succeeded = + tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || + (d->m_total_lz_bytes < 48)); + + // If the block gets expanded, forget the current contents of the output + // buffer and send a raw block instead. + if (((use_raw_block) || + ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= + d->m_total_lz_bytes))) && + ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { + mz_uint i; + d->m_pOutput_buf = pSaved_output_buf; + d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; + TDEFL_PUT_BITS(0, 2); + if (d->m_bits_in) { + TDEFL_PUT_BITS(0, 8 - d->m_bits_in); + } + for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { + TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); + } + for (i = 0; i < d->m_total_lz_bytes; ++i) { + TDEFL_PUT_BITS( + d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], + 8); + } + } + // Check for the extremely unlikely (if not impossible) case of the compressed + // block not fitting into the output buffer when using dynamic codes. + else if (!comp_block_succeeded) { + d->m_pOutput_buf = pSaved_output_buf; + d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; + tdefl_compress_block(d, MZ_TRUE); + } + + if (flush) { + if (flush == TDEFL_FINISH) { + if (d->m_bits_in) { + TDEFL_PUT_BITS(0, 8 - d->m_bits_in); + } + if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { + mz_uint i, a = d->m_adler32; + for (i = 0; i < 4; i++) { + TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); + a <<= 8; + } + } + } else { + mz_uint i, z = 0; + TDEFL_PUT_BITS(0, 3); + if (d->m_bits_in) { + TDEFL_PUT_BITS(0, 8 - d->m_bits_in); + } + for (i = 2; i; --i, z ^= 0xFFFF) { + TDEFL_PUT_BITS(z & 0xFFFF, 16); + } + } + } + + MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); + + memset(&d->m_huff_count[0][0], 0, + sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); + memset(&d->m_huff_count[1][0], 0, + sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); + + d->m_pLZ_code_buf = d->m_lz_code_buf + 1; + d->m_pLZ_flags = d->m_lz_code_buf; + d->m_num_flags_left = 8; + d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; + d->m_total_lz_bytes = 0; + d->m_block_index++; + + if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { + if (d->m_pPut_buf_func) { + *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; + if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) + return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); + } else if (pOutput_buf_start == d->m_output_buf) { + int bytes_to_copy = (int)MZ_MIN( + (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); + memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, + bytes_to_copy); + d->m_out_buf_ofs += bytes_to_copy; + if ((n -= bytes_to_copy) != 0) { + d->m_output_flush_ofs = bytes_to_copy; + d->m_output_flush_remaining = n; + } + } else { + d->m_out_buf_ofs += n; + } + } + + return d->m_output_flush_remaining; +} + +#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES +#define TDEFL_READ_UNALIGNED_WORD(p) ((p)[0] | (p)[1] << 8) +static MZ_FORCEINLINE void +tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, + mz_uint max_match_len, mz_uint *pMatch_dist, + mz_uint *pMatch_len) { + mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, + match_len = *pMatch_len, probe_pos = pos, next_probe_pos, + probe_len; + mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; + const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; + mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), + s01 = *s; + MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); + if (max_match_len <= match_len) + return; + for (;;) { + for (;;) { + if (--num_probes_left == 0) + return; +#define TDEFL_PROBE \ + next_probe_pos = d->m_next[probe_pos]; \ + if ((!next_probe_pos) || \ + ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ + return; \ + probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ + if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ + break; + TDEFL_PROBE; + TDEFL_PROBE; + TDEFL_PROBE; + } + if (!dist) + break; + q = (const mz_uint16 *)(d->m_dict + probe_pos); + if (*q != s01) + continue; + p = s; + probe_len = 32; + do { + } while ((*(++p) == *(++q)) && (*(++p) == *(++q)) && (*(++p) == *(++q)) && + (*(++p) == *(++q)) && (--probe_len > 0)); + if (!probe_len) { + *pMatch_dist = dist; + *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); + break; + } else if ((probe_len = ((mz_uint)(p - s) * 2) + + (mz_uint)(*(const mz_uint8 *)p == + *(const mz_uint8 *)q)) > match_len) { + *pMatch_dist = dist; + if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == + max_match_len) + break; + c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); + } + } +} +#else +static MZ_FORCEINLINE void +tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, + mz_uint max_match_len, mz_uint *pMatch_dist, + mz_uint *pMatch_len) { + mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, + match_len = *pMatch_len, probe_pos = pos, next_probe_pos, + probe_len; + mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; + const mz_uint8 *s = d->m_dict + pos, *p, *q; + mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; + MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); + if (max_match_len <= match_len) + return; + for (;;) { + for (;;) { + if (--num_probes_left == 0) + return; +#define TDEFL_PROBE \ + next_probe_pos = d->m_next[probe_pos]; \ + if ((!next_probe_pos) || \ + ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ + return; \ + probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ + if ((d->m_dict[probe_pos + match_len] == c0) && \ + (d->m_dict[probe_pos + match_len - 1] == c1)) \ + break; + TDEFL_PROBE; + TDEFL_PROBE; + TDEFL_PROBE; + } + if (!dist) + break; + p = s; + q = d->m_dict + probe_pos; + for (probe_len = 0; probe_len < max_match_len; probe_len++) + if (*p++ != *q++) + break; + if (probe_len > match_len) { + *pMatch_dist = dist; + if ((*pMatch_len = match_len = probe_len) == max_match_len) + return; + c0 = d->m_dict[pos + match_len]; + c1 = d->m_dict[pos + match_len - 1]; + } + } +} +#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES + +#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN +static mz_bool tdefl_compress_fast(tdefl_compressor *d) { + // Faster, minimally featured LZRW1-style match+parse loop with better + // register utilization. Intended for applications where raw throughput is + // valued more highly than ratio. + mz_uint lookahead_pos = d->m_lookahead_pos, + lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, + total_lz_bytes = d->m_total_lz_bytes, + num_flags_left = d->m_num_flags_left; + mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; + mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; + + while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { + const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; + mz_uint dst_pos = + (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; + mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( + d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); + d->m_src_buf_left -= num_bytes_to_process; + lookahead_size += num_bytes_to_process; + + while (num_bytes_to_process) { + mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); + memcpy(d->m_dict + dst_pos, d->m_pSrc, n); + if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) + memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, + MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); + d->m_pSrc += n; + dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; + num_bytes_to_process -= n; + } + + dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); + if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) + break; + + while (lookahead_size >= 4) { + mz_uint cur_match_dist, cur_match_len = 1; + mz_uint8 *pCur_dict = d->m_dict + cur_pos; + mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; + mz_uint hash = + (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & + TDEFL_LEVEL1_HASH_SIZE_MASK; + mz_uint probe_pos = d->m_hash[hash]; + d->m_hash[hash] = (mz_uint16)lookahead_pos; + + if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= + dict_size) && + ((mz_uint32)( + *(d->m_dict + (probe_pos & TDEFL_LZ_DICT_SIZE_MASK)) | + (*(d->m_dict + ((probe_pos & TDEFL_LZ_DICT_SIZE_MASK) + 1)) + << 8) | + (*(d->m_dict + ((probe_pos & TDEFL_LZ_DICT_SIZE_MASK) + 2)) + << 16)) == first_trigram)) { + const mz_uint16 *p = (const mz_uint16 *)pCur_dict; + const mz_uint16 *q = + (const mz_uint16 *)(d->m_dict + + (probe_pos & TDEFL_LZ_DICT_SIZE_MASK)); + mz_uint32 probe_len = 32; + do { + } while ((*(++p) == *(++q)) && (*(++p) == *(++q)) && + (*(++p) == *(++q)) && (*(++p) == *(++q)) && (--probe_len > 0)); + cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); + if (!probe_len) + cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; + + if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || + ((cur_match_len == TDEFL_MIN_MATCH_LEN) && + (cur_match_dist >= 8U * 1024U))) { + cur_match_len = 1; + *pLZ_code_buf++ = (mz_uint8)first_trigram; + *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); + d->m_huff_count[0][(mz_uint8)first_trigram]++; + } else { + mz_uint32 s0, s1; + cur_match_len = MZ_MIN(cur_match_len, lookahead_size); + + MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && + (cur_match_dist >= 1) && + (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); + + cur_match_dist--; + + pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); + *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; + pLZ_code_buf += 3; + *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); + + s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; + s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; + d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; + + d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - + TDEFL_MIN_MATCH_LEN]]++; + } + } else { + *pLZ_code_buf++ = (mz_uint8)first_trigram; + *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); + d->m_huff_count[0][(mz_uint8)first_trigram]++; + } + + if (--num_flags_left == 0) { + num_flags_left = 8; + pLZ_flags = pLZ_code_buf++; + } + + total_lz_bytes += cur_match_len; + lookahead_pos += cur_match_len; + dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); + cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; + MZ_ASSERT(lookahead_size >= cur_match_len); + lookahead_size -= cur_match_len; + + if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { + int n; + d->m_lookahead_pos = lookahead_pos; + d->m_lookahead_size = lookahead_size; + d->m_dict_size = dict_size; + d->m_total_lz_bytes = total_lz_bytes; + d->m_pLZ_code_buf = pLZ_code_buf; + d->m_pLZ_flags = pLZ_flags; + d->m_num_flags_left = num_flags_left; + if ((n = tdefl_flush_block(d, 0)) != 0) + return (n < 0) ? MZ_FALSE : MZ_TRUE; + total_lz_bytes = d->m_total_lz_bytes; + pLZ_code_buf = d->m_pLZ_code_buf; + pLZ_flags = d->m_pLZ_flags; + num_flags_left = d->m_num_flags_left; + } + } + + while (lookahead_size) { + mz_uint8 lit = d->m_dict[cur_pos]; + + total_lz_bytes++; + *pLZ_code_buf++ = lit; + *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); + if (--num_flags_left == 0) { + num_flags_left = 8; + pLZ_flags = pLZ_code_buf++; + } + + d->m_huff_count[0][lit]++; + + lookahead_pos++; + dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); + cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; + lookahead_size--; + + if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { + int n; + d->m_lookahead_pos = lookahead_pos; + d->m_lookahead_size = lookahead_size; + d->m_dict_size = dict_size; + d->m_total_lz_bytes = total_lz_bytes; + d->m_pLZ_code_buf = pLZ_code_buf; + d->m_pLZ_flags = pLZ_flags; + d->m_num_flags_left = num_flags_left; + if ((n = tdefl_flush_block(d, 0)) != 0) + return (n < 0) ? MZ_FALSE : MZ_TRUE; + total_lz_bytes = d->m_total_lz_bytes; + pLZ_code_buf = d->m_pLZ_code_buf; + pLZ_flags = d->m_pLZ_flags; + num_flags_left = d->m_num_flags_left; + } + } + } + + d->m_lookahead_pos = lookahead_pos; + d->m_lookahead_size = lookahead_size; + d->m_dict_size = dict_size; + d->m_total_lz_bytes = total_lz_bytes; + d->m_pLZ_code_buf = pLZ_code_buf; + d->m_pLZ_flags = pLZ_flags; + d->m_num_flags_left = num_flags_left; + return MZ_TRUE; +} +#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN + +static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, + mz_uint8 lit) { + d->m_total_lz_bytes++; + *d->m_pLZ_code_buf++ = lit; + *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); + if (--d->m_num_flags_left == 0) { + d->m_num_flags_left = 8; + d->m_pLZ_flags = d->m_pLZ_code_buf++; + } + d->m_huff_count[0][lit]++; +} + +static MZ_FORCEINLINE void +tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { + mz_uint32 s0, s1; + + MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && + (match_dist <= TDEFL_LZ_DICT_SIZE)); + + d->m_total_lz_bytes += match_len; + + d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); + + match_dist -= 1; + d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); + d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); + d->m_pLZ_code_buf += 3; + + *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); + if (--d->m_num_flags_left == 0) { + d->m_num_flags_left = 8; + d->m_pLZ_flags = d->m_pLZ_code_buf++; + } + + s0 = s_tdefl_small_dist_sym[match_dist & 511]; + s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; + d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; + + if (match_len >= TDEFL_MIN_MATCH_LEN) + d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; +} + +static mz_bool tdefl_compress_normal(tdefl_compressor *d) { + const mz_uint8 *pSrc = d->m_pSrc; + size_t src_buf_left = d->m_src_buf_left; + tdefl_flush flush = d->m_flush; + + while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { + mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; + // Update dictionary and hash chains. Keeps the lookahead size equal to + // TDEFL_MAX_MATCH_LEN. + if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { + mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & + TDEFL_LZ_DICT_SIZE_MASK, + ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; + mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] + << TDEFL_LZ_HASH_SHIFT) ^ + d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; + mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( + src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); + const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; + src_buf_left -= num_bytes_to_process; + d->m_lookahead_size += num_bytes_to_process; + while (pSrc != pSrc_end) { + mz_uint8 c = *pSrc++; + d->m_dict[dst_pos] = c; + if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) + d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; + hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); + d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; + d->m_hash[hash] = (mz_uint16)(ins_pos); + dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; + ins_pos++; + } + } else { + while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { + mz_uint8 c = *pSrc++; + mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & + TDEFL_LZ_DICT_SIZE_MASK; + src_buf_left--; + d->m_dict[dst_pos] = c; + if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) + d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; + if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { + mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; + mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] + << (TDEFL_LZ_HASH_SHIFT * 2)) ^ + (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] + << TDEFL_LZ_HASH_SHIFT) ^ + c) & + (TDEFL_LZ_HASH_SIZE - 1); + d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; + d->m_hash[hash] = (mz_uint16)(ins_pos); + } + } + } + d->m_dict_size = + MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); + if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) + break; + + // Simple lazy/greedy parsing state machine. + len_to_move = 1; + cur_match_dist = 0; + cur_match_len = + d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); + cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; + if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { + if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { + mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; + cur_match_len = 0; + while (cur_match_len < d->m_lookahead_size) { + if (d->m_dict[cur_pos + cur_match_len] != c) + break; + cur_match_len++; + } + if (cur_match_len < TDEFL_MIN_MATCH_LEN) + cur_match_len = 0; + else + cur_match_dist = 1; + } + } else { + tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, + d->m_lookahead_size, &cur_match_dist, &cur_match_len); + } + if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && + (cur_match_dist >= 8U * 1024U)) || + (cur_pos == cur_match_dist) || + ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { + cur_match_dist = cur_match_len = 0; + } + if (d->m_saved_match_len) { + if (cur_match_len > d->m_saved_match_len) { + tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); + if (cur_match_len >= 128) { + tdefl_record_match(d, cur_match_len, cur_match_dist); + d->m_saved_match_len = 0; + len_to_move = cur_match_len; + } else { + d->m_saved_lit = d->m_dict[cur_pos]; + d->m_saved_match_dist = cur_match_dist; + d->m_saved_match_len = cur_match_len; + } + } else { + tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); + len_to_move = d->m_saved_match_len - 1; + d->m_saved_match_len = 0; + } + } else if (!cur_match_dist) + tdefl_record_literal(d, + d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); + else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || + (cur_match_len >= 128)) { + tdefl_record_match(d, cur_match_len, cur_match_dist); + len_to_move = cur_match_len; + } else { + d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; + d->m_saved_match_dist = cur_match_dist; + d->m_saved_match_len = cur_match_len; + } + // Move the lookahead forward by len_to_move bytes. + d->m_lookahead_pos += len_to_move; + MZ_ASSERT(d->m_lookahead_size >= len_to_move); + d->m_lookahead_size -= len_to_move; + d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, TDEFL_LZ_DICT_SIZE); + // Check if it's time to flush the current LZ codes to the internal output + // buffer. + if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || + ((d->m_total_lz_bytes > 31 * 1024) && + (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= + d->m_total_lz_bytes) || + (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { + int n; + d->m_pSrc = pSrc; + d->m_src_buf_left = src_buf_left; + if ((n = tdefl_flush_block(d, 0)) != 0) + return (n < 0) ? MZ_FALSE : MZ_TRUE; + } + } + + d->m_pSrc = pSrc; + d->m_src_buf_left = src_buf_left; + return MZ_TRUE; +} + +static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { + if (d->m_pIn_buf_size) { + *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; + } + + if (d->m_pOut_buf_size) { + size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, + d->m_output_flush_remaining); + memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, + d->m_output_buf + d->m_output_flush_ofs, n); + d->m_output_flush_ofs += (mz_uint)n; + d->m_output_flush_remaining -= (mz_uint)n; + d->m_out_buf_ofs += n; + + *d->m_pOut_buf_size = d->m_out_buf_ofs; + } + + return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE + : TDEFL_STATUS_OKAY; +} + +tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, + size_t *pIn_buf_size, void *pOut_buf, + size_t *pOut_buf_size, tdefl_flush flush) { + if (!d) { + if (pIn_buf_size) + *pIn_buf_size = 0; + if (pOut_buf_size) + *pOut_buf_size = 0; + return TDEFL_STATUS_BAD_PARAM; + } + + d->m_pIn_buf = pIn_buf; + d->m_pIn_buf_size = pIn_buf_size; + d->m_pOut_buf = pOut_buf; + d->m_pOut_buf_size = pOut_buf_size; + d->m_pSrc = (const mz_uint8 *)(pIn_buf); + d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; + d->m_out_buf_ofs = 0; + d->m_flush = flush; + + if (((d->m_pPut_buf_func != NULL) == + ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || + (d->m_prev_return_status != TDEFL_STATUS_OKAY) || + (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || + (pIn_buf_size && *pIn_buf_size && !pIn_buf) || + (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { + if (pIn_buf_size) + *pIn_buf_size = 0; + if (pOut_buf_size) + *pOut_buf_size = 0; + return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); + } + d->m_wants_to_finish |= (flush == TDEFL_FINISH); + + if ((d->m_output_flush_remaining) || (d->m_finished)) + return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); + +#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN + if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && + ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && + ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | + TDEFL_RLE_MATCHES)) == 0)) { + if (!tdefl_compress_fast(d)) + return d->m_prev_return_status; + } else +#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN + { + if (!tdefl_compress_normal(d)) + return d->m_prev_return_status; + } + + if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && + (pIn_buf)) + d->m_adler32 = + (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, + d->m_pSrc - (const mz_uint8 *)pIn_buf); + + if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && + (!d->m_output_flush_remaining)) { + if (tdefl_flush_block(d, flush) < 0) + return d->m_prev_return_status; + d->m_finished = (flush == TDEFL_FINISH); + if (flush == TDEFL_FULL_FLUSH) { + MZ_CLEAR_OBJ(d->m_hash); + MZ_CLEAR_OBJ(d->m_next); + d->m_dict_size = 0; + } + } + + return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); +} + +tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, + size_t in_buf_size, tdefl_flush flush) { + MZ_ASSERT(d->m_pPut_buf_func); + return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); +} + +tdefl_status tdefl_init(tdefl_compressor *d, + tdefl_put_buf_func_ptr pPut_buf_func, + void *pPut_buf_user, int flags) { + d->m_pPut_buf_func = pPut_buf_func; + d->m_pPut_buf_user = pPut_buf_user; + d->m_flags = (mz_uint)(flags); + d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; + d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; + d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; + if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) + MZ_CLEAR_OBJ(d->m_hash); + d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = + d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; + d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = + d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; + d->m_pLZ_code_buf = d->m_lz_code_buf + 1; + d->m_pLZ_flags = d->m_lz_code_buf; + d->m_num_flags_left = 8; + d->m_pOutput_buf = d->m_output_buf; + d->m_pOutput_buf_end = d->m_output_buf; + d->m_prev_return_status = TDEFL_STATUS_OKAY; + d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; + d->m_adler32 = 1; + d->m_pIn_buf = NULL; + d->m_pOut_buf = NULL; + d->m_pIn_buf_size = NULL; + d->m_pOut_buf_size = NULL; + d->m_flush = TDEFL_NO_FLUSH; + d->m_pSrc = NULL; + d->m_src_buf_left = 0; + d->m_out_buf_ofs = 0; + memset(&d->m_huff_count[0][0], 0, + sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); + memset(&d->m_huff_count[1][0], 0, + sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); + return TDEFL_STATUS_OKAY; +} + +tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { + return d->m_prev_return_status; +} + +mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } + +mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, + tdefl_put_buf_func_ptr pPut_buf_func, + void *pPut_buf_user, int flags) { + tdefl_compressor *pComp; + mz_bool succeeded; + if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) + return MZ_FALSE; + pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); + if (!pComp) + return MZ_FALSE; + succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == + TDEFL_STATUS_OKAY); + succeeded = + succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == + TDEFL_STATUS_DONE); + MZ_FREE(pComp); + return succeeded; +} + +typedef struct { + size_t m_size, m_capacity; + mz_uint8 *m_pBuf; + mz_bool m_expandable; +} tdefl_output_buffer; + +static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, + void *pUser) { + tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; + size_t new_size = p->m_size + len; + if (new_size > p->m_capacity) { + size_t new_capacity = p->m_capacity; + mz_uint8 *pNew_buf; + if (!p->m_expandable) + return MZ_FALSE; + do { + new_capacity = MZ_MAX(128U, new_capacity << 1U); + } while (new_size > new_capacity); + pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); + if (!pNew_buf) + return MZ_FALSE; + p->m_pBuf = pNew_buf; + p->m_capacity = new_capacity; + } + memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); + p->m_size = new_size; + return MZ_TRUE; +} + +void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, + size_t *pOut_len, int flags) { + tdefl_output_buffer out_buf; + MZ_CLEAR_OBJ(out_buf); + if (!pOut_len) + return MZ_FALSE; + else + *pOut_len = 0; + out_buf.m_expandable = MZ_TRUE; + if (!tdefl_compress_mem_to_output( + pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) + return NULL; + *pOut_len = out_buf.m_size; + return out_buf.m_pBuf; +} + +size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, + const void *pSrc_buf, size_t src_buf_len, + int flags) { + tdefl_output_buffer out_buf; + MZ_CLEAR_OBJ(out_buf); + if (!pOut_buf) + return 0; + out_buf.m_pBuf = (mz_uint8 *)pOut_buf; + out_buf.m_capacity = out_buf_len; + if (!tdefl_compress_mem_to_output( + pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) + return 0; + return out_buf.m_size; +} + +#ifndef MINIZ_NO_ZLIB_APIS +static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, + 128, 256, 512, 768, 1500}; + +// level may actually range from [0,10] (10 is a "hidden" max level, where we +// want a bit more compression and it's fine if throughput to fall off a cliff +// on some files). +mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, + int strategy) { + mz_uint comp_flags = + s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | + ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); + if (window_bits > 0) + comp_flags |= TDEFL_WRITE_ZLIB_HEADER; + + if (!level) + comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; + else if (strategy == MZ_FILTERED) + comp_flags |= TDEFL_FILTER_MATCHES; + else if (strategy == MZ_HUFFMAN_ONLY) + comp_flags &= ~TDEFL_MAX_PROBES_MASK; + else if (strategy == MZ_FIXED) + comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; + else if (strategy == MZ_RLE) + comp_flags |= TDEFL_RLE_MATCHES; + + return comp_flags; +} +#endif // MINIZ_NO_ZLIB_APIS + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4204) // nonstandard extension used : non-constant + // aggregate initializer (also supported by GNU + // C and C99, so no big deal) +#endif + +// Simple PNG writer function by Alex Evans, 2011. Released into the public +// domain: https://gist.github.com/908299, more context at +// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. +// This is actually a modification of Alex's original code so PNG files +// generated by this function pass pngcheck. +void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, + int h, int num_chans, + size_t *pLen_out, + mz_uint level, mz_bool flip) { + // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was + // defined. + static const mz_uint s_tdefl_png_num_probes[11] = { + 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; + tdefl_compressor *pComp = + (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); + tdefl_output_buffer out_buf; + int i, bpl = w * num_chans, y, z; + mz_uint32 c; + *pLen_out = 0; + if (!pComp) + return NULL; + MZ_CLEAR_OBJ(out_buf); + out_buf.m_expandable = MZ_TRUE; + out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); + if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { + MZ_FREE(pComp); + return NULL; + } + // write dummy header + for (z = 41; z; --z) + tdefl_output_buffer_putter(&z, 1, &out_buf); + // compress image data + tdefl_init(pComp, tdefl_output_buffer_putter, &out_buf, + s_tdefl_png_num_probes[MZ_MIN(10, level)] | + TDEFL_WRITE_ZLIB_HEADER); + for (y = 0; y < h; ++y) { + tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); + tdefl_compress_buffer(pComp, + (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, + bpl, TDEFL_NO_FLUSH); + } + if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != + TDEFL_STATUS_DONE) { + MZ_FREE(pComp); + MZ_FREE(out_buf.m_pBuf); + return NULL; + } + // write real header + *pLen_out = out_buf.m_size - 41; + { + static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; + mz_uint8 pnghdr[41] = {0x89, + 0x50, + 0x4e, + 0x47, + 0x0d, + 0x0a, + 0x1a, + 0x0a, + 0x00, + 0x00, + 0x00, + 0x0d, + 0x49, + 0x48, + 0x44, + 0x52, + 0, + 0, + (mz_uint8)(w >> 8), + (mz_uint8)w, + 0, + 0, + (mz_uint8)(h >> 8), + (mz_uint8)h, + 8, + chans[num_chans], + 0, + 0, + 0, + 0, + 0, + 0, + 0, + (mz_uint8)(*pLen_out >> 24), + (mz_uint8)(*pLen_out >> 16), + (mz_uint8)(*pLen_out >> 8), + (mz_uint8)*pLen_out, + 0x49, + 0x44, + 0x41, + 0x54}; + c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); + for (i = 0; i < 4; ++i, c <<= 8) + ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); + memcpy(out_buf.m_pBuf, pnghdr, 41); + } + // write footer (IDAT CRC-32, followed by IEND chunk) + if (!tdefl_output_buffer_putter( + "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { + *pLen_out = 0; + MZ_FREE(pComp); + MZ_FREE(out_buf.m_pBuf); + return NULL; + } + c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, + *pLen_out + 4); + for (i = 0; i < 4; ++i, c <<= 8) + (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); + // compute final size of file, grab compressed data buffer and return + *pLen_out += 57; + MZ_FREE(pComp); + return out_buf.m_pBuf; +} +void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, + int num_chans, size_t *pLen_out) { + // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we + // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's + // where #defined out) + return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, + pLen_out, 6, MZ_FALSE); +} + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +// ------------------- .ZIP archive reading + +#ifndef MINIZ_NO_ARCHIVE_APIS + +#ifdef MINIZ_NO_STDIO +#define MZ_FILE void * +#else +#include +#include + +#if defined(_MSC_VER) +static FILE *mz_fopen(const char *pFilename, const char *pMode) { + FILE *pFile = NULL; + fopen_s(&pFile, pFilename, pMode); + return pFile; +} +static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { + FILE *pFile = NULL; + if (freopen_s(&pFile, pPath, pMode, pStream)) + return NULL; + return pFile; +} +#ifndef MINIZ_NO_TIME +#include +#endif +#define MZ_FILE FILE +#define MZ_FOPEN mz_fopen +#define MZ_FCLOSE fclose +#define MZ_FREAD fread +#define MZ_FWRITE fwrite +#define MZ_FTELL64 _ftelli64 +#define MZ_FSEEK64 _fseeki64 +#define MZ_FILE_STAT_STRUCT _stat +#define MZ_FILE_STAT _stat +#define MZ_FFLUSH fflush +#define MZ_FREOPEN mz_freopen +#define MZ_DELETE_FILE remove +#elif defined(__MINGW32__) +#ifndef MINIZ_NO_TIME +#include +#endif +#define MZ_FILE FILE +#define MZ_FOPEN(f, m) fopen(f, m) +#define MZ_FCLOSE fclose +#define MZ_FREAD fread +#define MZ_FWRITE fwrite +#define MZ_FTELL64 ftell +#define MZ_FSEEK64 fseek +#define MZ_FILE_STAT_STRUCT _stat +#define MZ_FILE_STAT _stat +#define MZ_FFLUSH fflush +#define MZ_FREOPEN(f, m, s) freopen(f, m, s) +#define MZ_DELETE_FILE remove +#elif defined(__TINYC__) +#ifndef MINIZ_NO_TIME +#include +#endif +#define MZ_FILE FILE +#define MZ_FOPEN(f, m) fopen(f, m) +#define MZ_FCLOSE fclose +#define MZ_FREAD fread +#define MZ_FWRITE fwrite +#define MZ_FTELL64 ftell +#define MZ_FSEEK64 fseek +#define MZ_FILE_STAT_STRUCT stat +#define MZ_FILE_STAT stat +#define MZ_FFLUSH fflush +#define MZ_FREOPEN(f, m, s) freopen(f, m, s) +#define MZ_DELETE_FILE remove +#elif defined(__GNUC__) && _LARGEFILE64_SOURCE +#ifndef MINIZ_NO_TIME +#include +#endif +#define MZ_FILE FILE +#define MZ_FOPEN(f, m) fopen64(f, m) +#define MZ_FCLOSE fclose +#define MZ_FREAD fread +#define MZ_FWRITE fwrite +#define MZ_FTELL64 ftello64 +#define MZ_FSEEK64 fseeko64 +#define MZ_FILE_STAT_STRUCT stat64 +#define MZ_FILE_STAT stat64 +#define MZ_FFLUSH fflush +#define MZ_FREOPEN(p, m, s) freopen64(p, m, s) +#define MZ_DELETE_FILE remove +#else +#ifndef MINIZ_NO_TIME +#include +#endif +#define MZ_FILE FILE +#define MZ_FOPEN(f, m) fopen(f, m) +#define MZ_FCLOSE fclose +#define MZ_FREAD fread +#define MZ_FWRITE fwrite +#if _FILE_OFFSET_BITS == 64 || _POSIX_C_SOURCE >= 200112L +#define MZ_FTELL64 ftello +#define MZ_FSEEK64 fseeko +#else +#define MZ_FTELL64 ftell +#define MZ_FSEEK64 fseek +#endif +#define MZ_FILE_STAT_STRUCT stat +#define MZ_FILE_STAT stat +#define MZ_FFLUSH fflush +#define MZ_FREOPEN(f, m, s) freopen(f, m, s) +#define MZ_DELETE_FILE remove +#endif // #ifdef _MSC_VER +#endif // #ifdef MINIZ_NO_STDIO + +#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) + +// Various ZIP archive enums. To completely avoid cross platform compiler +// alignment and platform endian issues, miniz.c doesn't use structs for any of +// this stuff. +enum { + // ZIP archive identifiers and record sizes + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, + MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, + MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, + MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, + + /* ZIP64 archive identifier and record sizes */ + MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06064b50, + MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG = 0x07064b50, + MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE = 56, + MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE = 20, + MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID = 0x0001, + MZ_ZIP_DATA_DESCRIPTOR_ID = 0x08074b50, + MZ_ZIP_DATA_DESCRIPTER_SIZE64 = 24, + MZ_ZIP_DATA_DESCRIPTER_SIZE32 = 16, + + // Central directory header record offsets + MZ_ZIP_CDH_SIG_OFS = 0, + MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, + MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, + MZ_ZIP_CDH_BIT_FLAG_OFS = 8, + MZ_ZIP_CDH_METHOD_OFS = 10, + MZ_ZIP_CDH_FILE_TIME_OFS = 12, + MZ_ZIP_CDH_FILE_DATE_OFS = 14, + MZ_ZIP_CDH_CRC32_OFS = 16, + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, + MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, + MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, + MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, + MZ_ZIP_CDH_DISK_START_OFS = 34, + MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, + MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, + // Local directory header offsets + MZ_ZIP_LDH_SIG_OFS = 0, + MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, + MZ_ZIP_LDH_BIT_FLAG_OFS = 6, + MZ_ZIP_LDH_METHOD_OFS = 8, + MZ_ZIP_LDH_FILE_TIME_OFS = 10, + MZ_ZIP_LDH_FILE_DATE_OFS = 12, + MZ_ZIP_LDH_CRC32_OFS = 14, + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, + MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, + MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, + // End of central directory offsets + MZ_ZIP_ECDH_SIG_OFS = 0, + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, + MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, + MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, + MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, + + /* ZIP64 End of central directory locator offsets */ + MZ_ZIP64_ECDL_SIG_OFS = 0, /* 4 bytes */ + MZ_ZIP64_ECDL_NUM_DISK_CDIR_OFS = 4, /* 4 bytes */ + MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS = 8, /* 8 bytes */ + MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS = 16, /* 4 bytes */ + + /* ZIP64 End of central directory header offsets */ + MZ_ZIP64_ECDH_SIG_OFS = 0, /* 4 bytes */ + MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS = 4, /* 8 bytes */ + MZ_ZIP64_ECDH_VERSION_MADE_BY_OFS = 12, /* 2 bytes */ + MZ_ZIP64_ECDH_VERSION_NEEDED_OFS = 14, /* 2 bytes */ + MZ_ZIP64_ECDH_NUM_THIS_DISK_OFS = 16, /* 4 bytes */ + MZ_ZIP64_ECDH_NUM_DISK_CDIR_OFS = 20, /* 4 bytes */ + MZ_ZIP64_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 24, /* 8 bytes */ + MZ_ZIP64_ECDH_CDIR_TOTAL_ENTRIES_OFS = 32, /* 8 bytes */ + MZ_ZIP64_ECDH_CDIR_SIZE_OFS = 40, /* 8 bytes */ + MZ_ZIP64_ECDH_CDIR_OFS_OFS = 48, /* 8 bytes */ + MZ_ZIP_VERSION_MADE_BY_DOS_FILESYSTEM_ID = 0, + MZ_ZIP_DOS_DIR_ATTRIBUTE_BITFLAG = 0x10, + MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED = 1, + MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG = 32, + MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION = 64, + MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_LOCAL_DIR_IS_MASKED = 8192, + MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8 = 1 << 11 +}; + +typedef struct { + void *m_p; + size_t m_size, m_capacity; + mz_uint m_element_size; +} mz_zip_array; + +struct mz_zip_internal_state_tag { + mz_zip_array m_central_dir; + mz_zip_array m_central_dir_offsets; + mz_zip_array m_sorted_central_dir_offsets; + + /* The flags passed in when the archive is initially opened. */ + uint32_t m_init_flags; + + /* MZ_TRUE if the archive has a zip64 end of central directory headers, etc. + */ + mz_bool m_zip64; + + /* MZ_TRUE if we found zip64 extended info in the central directory (m_zip64 + * will also be slammed to true too, even if we didn't find a zip64 end of + * central dir header, etc.) */ + mz_bool m_zip64_has_extended_info_fields; + + /* These fields are used by the file, FILE, memory, and memory/heap read/write + * helpers. */ + MZ_FILE *m_pFile; + mz_uint64 m_file_archive_start_ofs; + + void *m_pMem; + size_t m_mem_size; + size_t m_mem_capacity; +}; + +#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ + (array_ptr)->m_element_size = element_size +#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ + ((element_type *)((array_ptr)->m_p))[index] + +static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, + mz_zip_array *pArray) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); + memset(pArray, 0, sizeof(mz_zip_array)); +} + +static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, + mz_zip_array *pArray, + size_t min_new_capacity, + mz_uint growing) { + void *pNew_p; + size_t new_capacity = min_new_capacity; + MZ_ASSERT(pArray->m_element_size); + if (pArray->m_capacity >= min_new_capacity) + return MZ_TRUE; + if (growing) { + new_capacity = MZ_MAX(1, pArray->m_capacity); + while (new_capacity < min_new_capacity) + new_capacity *= 2; + } + if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, + pArray->m_element_size, new_capacity))) + return MZ_FALSE; + pArray->m_p = pNew_p; + pArray->m_capacity = new_capacity; + return MZ_TRUE; +} + +static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, + mz_zip_array *pArray, + size_t new_capacity, + mz_uint growing) { + if (new_capacity > pArray->m_capacity) { + if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) + return MZ_FALSE; + } + return MZ_TRUE; +} + +static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, + mz_zip_array *pArray, + size_t new_size, + mz_uint growing) { + if (new_size > pArray->m_capacity) { + if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) + return MZ_FALSE; + } + pArray->m_size = new_size; + return MZ_TRUE; +} + +static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, + mz_zip_array *pArray, + size_t n) { + return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); +} + +static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, + mz_zip_array *pArray, + const void *pElements, + size_t n) { + if (0 == n) + return MZ_TRUE; + if (!pElements) + return MZ_FALSE; + + size_t orig_size = pArray->m_size; + if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) + return MZ_FALSE; + memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, + pElements, n * pArray->m_element_size); + return MZ_TRUE; +} + +#ifndef MINIZ_NO_TIME +static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { + struct tm tm; + memset(&tm, 0, sizeof(tm)); + tm.tm_isdst = -1; + tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; + tm.tm_mon = ((dos_date >> 5) & 15) - 1; + tm.tm_mday = dos_date & 31; + tm.tm_hour = (dos_time >> 11) & 31; + tm.tm_min = (dos_time >> 5) & 63; + tm.tm_sec = (dos_time << 1) & 62; + return mktime(&tm); +} + +#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS +static void mz_zip_time_t_to_dos_time(time_t time, mz_uint16 *pDOS_time, + mz_uint16 *pDOS_date) { +#ifdef _MSC_VER + struct tm tm_struct; + struct tm *tm = &tm_struct; + errno_t err = localtime_s(tm, &time); + if (err) { + *pDOS_date = 0; + *pDOS_time = 0; + return; + } +#else + struct tm *tm = localtime(&time); +#endif /* #ifdef _MSC_VER */ + + *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + + ((tm->tm_sec) >> 1)); + *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + + ((tm->tm_mon + 1) << 5) + tm->tm_mday); +} +#endif /* MINIZ_NO_ARCHIVE_WRITING_APIS */ + +#ifndef MINIZ_NO_STDIO +#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS +static mz_bool mz_zip_get_file_modified_time(const char *pFilename, + time_t *pTime) { + struct MZ_FILE_STAT_STRUCT file_stat; + + /* On Linux with x86 glibc, this call will fail on large files (I think >= + * 0x80000000 bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. */ + if (MZ_FILE_STAT(pFilename, &file_stat) != 0) + return MZ_FALSE; + + *pTime = file_stat.st_mtime; + + return MZ_TRUE; +} +#endif /* #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS*/ + +static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, + time_t modified_time) { + struct utimbuf t; + + memset(&t, 0, sizeof(t)); + t.actime = access_time; + t.modtime = modified_time; + + return !utime(pFilename, &t); +} +#endif /* #ifndef MINIZ_NO_STDIO */ +#endif /* #ifndef MINIZ_NO_TIME */ + +static MZ_FORCEINLINE mz_bool mz_zip_set_error(mz_zip_archive *pZip, + mz_zip_error err_num) { + if (pZip) + pZip->m_last_error = err_num; + return MZ_FALSE; +} + +static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, + mz_uint32 flags) { + (void)flags; + if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) + return MZ_FALSE; + + if (!pZip->m_pAlloc) + pZip->m_pAlloc = def_alloc_func; + if (!pZip->m_pFree) + pZip->m_pFree = def_free_func; + if (!pZip->m_pRealloc) + pZip->m_pRealloc = def_realloc_func; + + pZip->m_zip_mode = MZ_ZIP_MODE_READING; + pZip->m_archive_size = 0; + pZip->m_central_directory_file_ofs = 0; + pZip->m_total_files = 0; + + if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( + pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) + return MZ_FALSE; + memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); + MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, + sizeof(mz_uint8)); + MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, + sizeof(mz_uint32)); + MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, + sizeof(mz_uint32)); + return MZ_TRUE; +} + +static MZ_FORCEINLINE mz_bool +mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, + const mz_zip_array *pCentral_dir_offsets, + mz_uint l_index, mz_uint r_index) { + const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( + pCentral_dir_array, mz_uint8, + MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, + l_index)), + *pE; + const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( + pCentral_dir_array, mz_uint8, + MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); + mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), + r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); + mz_uint8 l = 0, r = 0; + pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; + pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; + pE = pL + MZ_MIN(l_len, r_len); + while (pL < pE) { + if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) + break; + pL++; + pR++; + } + return (pL == pE) ? (l_len < r_len) : (l < r); +} + +#define MZ_SWAP_UINT32(a, b) \ + do { \ + mz_uint32 t = a; \ + a = b; \ + b = t; \ + } \ + MZ_MACRO_END + +// Heap sort of lowercased filenames, used to help accelerate plain central +// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), +// but it could allocate memory.) +static void +mz_zip_reader_sort_central_dir_offsets_by_filename(mz_zip_archive *pZip) { + mz_zip_internal_state *pState = pZip->m_pState; + const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; + const mz_zip_array *pCentral_dir = &pState->m_central_dir; + mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( + &pState->m_sorted_central_dir_offsets, mz_uint32, 0); + const int size = pZip->m_total_files; + int start = (size - 2) >> 1, end; + while (start >= 0) { + int child, root = start; + for (;;) { + if ((child = (root << 1) + 1) >= size) + break; + child += + (((child + 1) < size) && + (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, + pIndices[child], pIndices[child + 1]))); + if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, + pIndices[root], pIndices[child])) + break; + MZ_SWAP_UINT32(pIndices[root], pIndices[child]); + root = child; + } + start--; + } + + end = size - 1; + while (end > 0) { + int child, root = 0; + MZ_SWAP_UINT32(pIndices[end], pIndices[0]); + for (;;) { + if ((child = (root << 1) + 1) >= end) + break; + child += + (((child + 1) < end) && + mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, + pIndices[child], pIndices[child + 1])); + if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, + pIndices[root], pIndices[child])) + break; + MZ_SWAP_UINT32(pIndices[root], pIndices[child]); + root = child; + } + end--; + } +} + +static mz_bool mz_zip_reader_locate_header_sig(mz_zip_archive *pZip, + mz_uint32 record_sig, + mz_uint32 record_size, + mz_int64 *pOfs) { + mz_int64 cur_file_ofs; + mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; + mz_uint8 *pBuf = (mz_uint8 *)buf_u32; + + /* Basic sanity checks - reject files which are too small */ + if (pZip->m_archive_size < record_size) + return MZ_FALSE; + + /* Find the record by scanning the file from the end towards the beginning. */ + cur_file_ofs = + MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); + for (;;) { + int i, + n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); + + if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) + return MZ_FALSE; + + for (i = n - 4; i >= 0; --i) { + mz_uint s = MZ_READ_LE32(pBuf + i); + if (s == record_sig) { + if ((pZip->m_archive_size - (cur_file_ofs + i)) >= record_size) + break; + } + } + + if (i >= 0) { + cur_file_ofs += i; + break; + } + + /* Give up if we've searched the entire file, or we've gone back "too far" + * (~64kb) */ + if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= + (MZ_UINT16_MAX + record_size))) + return MZ_FALSE; + + cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); + } + + *pOfs = cur_file_ofs; + return MZ_TRUE; +} + +static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, + mz_uint flags) { + mz_uint cdir_size = 0, cdir_entries_on_this_disk = 0, num_this_disk = 0, + cdir_disk_index = 0; + mz_uint64 cdir_ofs = 0; + mz_int64 cur_file_ofs = 0; + const mz_uint8 *p; + + mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; + mz_uint8 *pBuf = (mz_uint8 *)buf_u32; + mz_bool sort_central_dir = + ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); + mz_uint32 zip64_end_of_central_dir_locator_u32 + [(MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE + sizeof(mz_uint32) - 1) / + sizeof(mz_uint32)]; + mz_uint8 *pZip64_locator = (mz_uint8 *)zip64_end_of_central_dir_locator_u32; + + mz_uint32 zip64_end_of_central_dir_header_u32 + [(MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / + sizeof(mz_uint32)]; + mz_uint8 *pZip64_end_of_central_dir = + (mz_uint8 *)zip64_end_of_central_dir_header_u32; + + mz_uint64 zip64_end_of_central_dir_ofs = 0; + + /* Basic sanity checks - reject files which are too small, and check the first + * 4 bytes of the file to make sure a local header is there. */ + if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) + return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); + + if (!mz_zip_reader_locate_header_sig( + pZip, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG, + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE, &cur_file_ofs)) + return mz_zip_set_error(pZip, MZ_ZIP_FAILED_FINDING_CENTRAL_DIR); + + /* Read and verify the end of central directory record. */ + if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) + return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); + + if (MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) + return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); + + if (cur_file_ofs >= (MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE + + MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE)) { + if (pZip->m_pRead(pZip->m_pIO_opaque, + cur_file_ofs - MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE, + pZip64_locator, + MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) == + MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) { + if (MZ_READ_LE32(pZip64_locator + MZ_ZIP64_ECDL_SIG_OFS) == + MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG) { + zip64_end_of_central_dir_ofs = MZ_READ_LE64( + pZip64_locator + MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS); + if (zip64_end_of_central_dir_ofs > + (pZip->m_archive_size - MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE)) + return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); + + if (pZip->m_pRead(pZip->m_pIO_opaque, zip64_end_of_central_dir_ofs, + pZip64_end_of_central_dir, + MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) == + MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) { + if (MZ_READ_LE32(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_SIG_OFS) == + MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG) { + pZip->m_pState->m_zip64 = MZ_TRUE; + } + } + } + } + } + + pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS); + cdir_entries_on_this_disk = + MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS); + num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); + cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); + cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS); + cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); + + if (pZip->m_pState->m_zip64) { + mz_uint32 zip64_total_num_of_disks = + MZ_READ_LE32(pZip64_locator + MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS); + mz_uint64 zip64_cdir_total_entries = MZ_READ_LE64( + pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_TOTAL_ENTRIES_OFS); + mz_uint64 zip64_cdir_total_entries_on_this_disk = MZ_READ_LE64( + pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS); + mz_uint64 zip64_size_of_end_of_central_dir_record = MZ_READ_LE64( + pZip64_end_of_central_dir + MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS); + mz_uint64 zip64_size_of_central_directory = + MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_SIZE_OFS); + + if (zip64_size_of_end_of_central_dir_record < + (MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE - 12)) + return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); + + if (zip64_total_num_of_disks != 1U) + return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK); + + /* Check for miniz's practical limits */ + if (zip64_cdir_total_entries > MZ_UINT32_MAX) + return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); + + pZip->m_total_files = (mz_uint32)zip64_cdir_total_entries; + + if (zip64_cdir_total_entries_on_this_disk > MZ_UINT32_MAX) + return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); + + cdir_entries_on_this_disk = + (mz_uint32)zip64_cdir_total_entries_on_this_disk; + + /* Check for miniz's current practical limits (sorry, this should be enough + * for millions of files) */ + if (zip64_size_of_central_directory > MZ_UINT32_MAX) + return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); + + cdir_size = (mz_uint32)zip64_size_of_central_directory; + + num_this_disk = MZ_READ_LE32(pZip64_end_of_central_dir + + MZ_ZIP64_ECDH_NUM_THIS_DISK_OFS); + + cdir_disk_index = MZ_READ_LE32(pZip64_end_of_central_dir + + MZ_ZIP64_ECDH_NUM_DISK_CDIR_OFS); + + cdir_ofs = + MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_OFS_OFS); + } + + if (pZip->m_total_files != cdir_entries_on_this_disk) + return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK); + + if (((num_this_disk | cdir_disk_index) != 0) && + ((num_this_disk != 1) || (cdir_disk_index != 1))) + return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK); + + if (cdir_size < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) + return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); + + if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) + return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); + + pZip->m_central_directory_file_ofs = cdir_ofs; + + if (pZip->m_total_files) { + mz_uint i, n; + /* Read the entire central directory into a heap block, and allocate another + * heap block to hold the unsorted central dir file record offsets, and + * possibly another to hold the sorted indices. */ + if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, + MZ_FALSE)) || + (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, + pZip->m_total_files, MZ_FALSE))) + return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); + + if (sort_central_dir) { + if (!mz_zip_array_resize(pZip, + &pZip->m_pState->m_sorted_central_dir_offsets, + pZip->m_total_files, MZ_FALSE)) + return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); + } + + if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, + pZip->m_pState->m_central_dir.m_p, + cdir_size) != cdir_size) + return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); + + /* Now create an index into the central directory file records, do some + * basic sanity checking on each record */ + p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; + for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { + mz_uint total_header_size, disk_index, bit_flags, filename_size, + ext_data_size; + mz_uint64 comp_size, decomp_size, local_header_ofs; + + if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || + (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) + return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); + + MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, + i) = + (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); + + if (sort_central_dir) + MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, + mz_uint32, i) = i; + + comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); + decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); + local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); + filename_size = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); + ext_data_size = MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS); + + if ((!pZip->m_pState->m_zip64_has_extended_info_fields) && + (ext_data_size) && + (MZ_MAX(MZ_MAX(comp_size, decomp_size), local_header_ofs) == + MZ_UINT32_MAX)) { + /* Attempt to find zip64 extended information field in the entry's extra + * data */ + mz_uint32 extra_size_remaining = ext_data_size; + + if (extra_size_remaining) { + const mz_uint8 *pExtra_data; + void *buf = NULL; + + if (MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + ext_data_size > + n) { + buf = MZ_MALLOC(ext_data_size); + if (buf == NULL) + return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); + + if (pZip->m_pRead(pZip->m_pIO_opaque, + cdir_ofs + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + + filename_size, + buf, ext_data_size) != ext_data_size) { + MZ_FREE(buf); + return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); + } + + pExtra_data = (mz_uint8 *)buf; + } else { + pExtra_data = p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size; + } + + do { + mz_uint32 field_id; + mz_uint32 field_data_size; + + if (extra_size_remaining < (sizeof(mz_uint16) * 2)) { + MZ_FREE(buf); + return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); + } + + field_id = MZ_READ_LE16(pExtra_data); + field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16)); + + if ((field_data_size + sizeof(mz_uint16) * 2) > + extra_size_remaining) { + MZ_FREE(buf); + return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); + } + + if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID) { + /* Ok, the archive didn't have any zip64 headers but it uses a + * zip64 extended information field so mark it as zip64 anyway + * (this can occur with infozip's zip util when it reads + * compresses files from stdin). */ + pZip->m_pState->m_zip64 = MZ_TRUE; + pZip->m_pState->m_zip64_has_extended_info_fields = MZ_TRUE; + break; + } + + pExtra_data += sizeof(mz_uint16) * 2 + field_data_size; + extra_size_remaining = + extra_size_remaining - sizeof(mz_uint16) * 2 - field_data_size; + } while (extra_size_remaining); + + MZ_FREE(buf); + } + } + + /* I've seen archives that aren't marked as zip64 that uses zip64 ext + * data, argh */ + if ((comp_size != MZ_UINT32_MAX) && (decomp_size != MZ_UINT32_MAX)) { + if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && + (decomp_size != comp_size)) || + (decomp_size && !comp_size)) + return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); + } + + disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); + if ((disk_index == MZ_UINT16_MAX) || + ((disk_index != num_this_disk) && (disk_index != 1))) + return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK); + + if (comp_size != MZ_UINT32_MAX) { + if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) + return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); + } + + bit_flags = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); + if (bit_flags & MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_LOCAL_DIR_IS_MASKED) + return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION); + + if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > + n) + return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); + + n -= total_header_size; + p += total_header_size; + } + } + + if (sort_central_dir) + mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); + + return MZ_TRUE; +} + +mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, + mz_uint32 flags) { + if ((!pZip) || (!pZip->m_pRead)) + return MZ_FALSE; + if (!mz_zip_reader_init_internal(pZip, flags)) + return MZ_FALSE; + pZip->m_archive_size = size; + if (!mz_zip_reader_read_central_dir(pZip, flags)) { + mz_zip_reader_end(pZip); + return MZ_FALSE; + } + return MZ_TRUE; +} + +static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, + void *pBuf, size_t n) { + mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; + size_t s = (file_ofs >= pZip->m_archive_size) + ? 0 + : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); + memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); + return s; +} + +mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, + size_t size, mz_uint32 flags) { + if (!mz_zip_reader_init_internal(pZip, flags)) + return MZ_FALSE; + pZip->m_archive_size = size; + pZip->m_pRead = mz_zip_mem_read_func; + pZip->m_pIO_opaque = pZip; +#ifdef __cplusplus + pZip->m_pState->m_pMem = const_cast(pMem); +#else + pZip->m_pState->m_pMem = (void *)pMem; +#endif + pZip->m_pState->m_mem_size = size; + if (!mz_zip_reader_read_central_dir(pZip, flags)) { + mz_zip_reader_end(pZip); + return MZ_FALSE; + } + return MZ_TRUE; +} + +#ifndef MINIZ_NO_STDIO +static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, + void *pBuf, size_t n) { + mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; + mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); + if (((mz_int64)file_ofs < 0) || + (((cur_ofs != (mz_int64)file_ofs)) && + (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) + return 0; + return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); +} + +mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, + mz_uint32 flags) { + mz_uint64 file_size; + MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); + if (!pFile) + return MZ_FALSE; + if (MZ_FSEEK64(pFile, 0, SEEK_END)) { + MZ_FCLOSE(pFile); + return MZ_FALSE; + } + file_size = MZ_FTELL64(pFile); + if (!mz_zip_reader_init_internal(pZip, flags)) { + MZ_FCLOSE(pFile); + return MZ_FALSE; + } + pZip->m_pRead = mz_zip_file_read_func; + pZip->m_pIO_opaque = pZip; + pZip->m_pState->m_pFile = pFile; + pZip->m_archive_size = file_size; + if (!mz_zip_reader_read_central_dir(pZip, flags)) { + mz_zip_reader_end(pZip); + return MZ_FALSE; + } + return MZ_TRUE; +} +#endif // #ifndef MINIZ_NO_STDIO + +mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { + return pZip ? pZip->m_total_files : 0; +} + +static MZ_FORCEINLINE const mz_uint8 * +mz_zip_reader_get_cdh(mz_zip_archive *pZip, mz_uint file_index) { + if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || + (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) + return NULL; + return &MZ_ZIP_ARRAY_ELEMENT( + &pZip->m_pState->m_central_dir, mz_uint8, + MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, + file_index)); +} + +mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, + mz_uint file_index) { + mz_uint m_bit_flag; + const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); + if (!p) + return MZ_FALSE; + m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); + return (m_bit_flag & 1); +} + +mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, + mz_uint file_index) { + mz_uint filename_len, external_attr; + const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); + if (!p) + return MZ_FALSE; + + // First see if the filename ends with a '/' character. + filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); + if (filename_len) { + if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') + return MZ_TRUE; + } + + // Bugfix: This code was also checking if the internal attribute was non-zero, + // which wasn't correct. Most/all zip writers (hopefully) set DOS + // file/directory attributes in the low 16-bits, so check for the DOS + // directory flag and ignore the source OS ID in the created by field. + // FIXME: Remove this check? Is it necessary - we already check the filename. + external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); + if ((external_attr & 0x10) != 0) + return MZ_TRUE; + + return MZ_FALSE; +} + +mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, + mz_zip_archive_file_stat *pStat) { + mz_uint n; + const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); + if ((!p) || (!pStat)) + return MZ_FALSE; + + // Unpack the central directory record. + pStat->m_file_index = file_index; + pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( + &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); + pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); + pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); + pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); + pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); +#ifndef MINIZ_NO_TIME + pStat->m_time = + mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), + MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); +#endif + pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); + pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); + pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); + pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); + pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); + pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); + + // Copy as much of the filename and comment as possible. + n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); + n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); + memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); + pStat->m_filename[n] = '\0'; + + n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); + n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); + pStat->m_comment_size = n; + memcpy(pStat->m_comment, + p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), + n); + pStat->m_comment[n] = '\0'; + + return MZ_TRUE; +} + +mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, + char *pFilename, mz_uint filename_buf_size) { + mz_uint n; + const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); + if (!p) { + if (filename_buf_size) + pFilename[0] = '\0'; + return 0; + } + n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); + if (filename_buf_size) { + n = MZ_MIN(n, filename_buf_size - 1); + memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); + pFilename[n] = '\0'; + } + return n + 1; +} + +static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, + const char *pB, + mz_uint len, + mz_uint flags) { + mz_uint i; + if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) + return 0 == memcmp(pA, pB, len); + for (i = 0; i < len; ++i) + if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) + return MZ_FALSE; + return MZ_TRUE; +} + +static MZ_FORCEINLINE int +mz_zip_reader_filename_compare(const mz_zip_array *pCentral_dir_array, + const mz_zip_array *pCentral_dir_offsets, + mz_uint l_index, const char *pR, mz_uint r_len) { + const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( + pCentral_dir_array, mz_uint8, + MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, + l_index)), + *pE; + mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); + mz_uint8 l = 0, r = 0; + pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; + pE = pL + MZ_MIN(l_len, r_len); + while (pL < pE) { + if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) + break; + pL++; + pR++; + } + return (pL == pE) ? (int)(l_len - r_len) : (l - r); +} + +static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, + const char *pFilename) { + mz_zip_internal_state *pState = pZip->m_pState; + const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; + const mz_zip_array *pCentral_dir = &pState->m_central_dir; + mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( + &pState->m_sorted_central_dir_offsets, mz_uint32, 0); + const int size = pZip->m_total_files; + const mz_uint filename_len = (mz_uint)strlen(pFilename); + int l = 0, h = size - 1; + while (l <= h) { + int m = (l + h) >> 1, file_index = pIndices[m], + comp = + mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, + file_index, pFilename, filename_len); + if (!comp) + return file_index; + else if (comp < 0) + l = m + 1; + else + h = m - 1; + } + return -1; +} + +int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, + const char *pComment, mz_uint flags) { + mz_uint file_index; + size_t name_len, comment_len; + if ((!pZip) || (!pZip->m_pState) || (!pName) || + (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) + return -1; + if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && + (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) + return mz_zip_reader_locate_file_binary_search(pZip, pName); + name_len = strlen(pName); + if (name_len > 0xFFFF) + return -1; + comment_len = pComment ? strlen(pComment) : 0; + if (comment_len > 0xFFFF) + return -1; + for (file_index = 0; file_index < pZip->m_total_files; file_index++) { + const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( + &pZip->m_pState->m_central_dir, mz_uint8, + MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, + file_index)); + mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); + const char *pFilename = + (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; + if (filename_len < name_len) + continue; + if (comment_len) { + mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), + file_comment_len = + MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); + const char *pFile_comment = pFilename + filename_len + file_extra_len; + if ((file_comment_len != comment_len) || + (!mz_zip_reader_string_equal(pComment, pFile_comment, + file_comment_len, flags))) + continue; + } + if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { + int ofs = filename_len - 1; + do { + if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || + (pFilename[ofs] == ':')) + break; + } while (--ofs >= 0); + ofs++; + pFilename += ofs; + filename_len -= ofs; + } + if ((filename_len == name_len) && + (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) + return file_index; + } + return -1; +} + +mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, + mz_uint file_index, void *pBuf, + size_t buf_size, mz_uint flags, + void *pUser_read_buf, + size_t user_read_buf_size) { + int status = TINFL_STATUS_DONE; + mz_uint64 needed_size, cur_file_ofs, comp_remaining, + out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; + mz_zip_archive_file_stat file_stat; + void *pRead_buf; + mz_uint32 + local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / + sizeof(mz_uint32)]; + mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; + tinfl_decompressor inflator; + + if ((buf_size) && (!pBuf)) + return MZ_FALSE; + + if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) + return MZ_FALSE; + + // Empty file, or a directory (but not always a directory - I've seen odd zips + // with directories that have compressed data which inflates to 0 bytes) + if (!file_stat.m_comp_size) + return MZ_TRUE; + + // Entry is a subdirectory (I've seen old zips with dir entries which have + // compressed deflate data which inflates to 0 bytes, but these entries claim + // to uncompress to 512 bytes in the headers). I'm torn how to handle this + // case - should it fail instead? + if (mz_zip_reader_is_file_a_directory(pZip, file_index)) + return MZ_TRUE; + + // Encryption and patch files are not supported. + if (file_stat.m_bit_flag & (1 | 32)) + return MZ_FALSE; + + // This function only supports stored and deflate. + if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && + (file_stat.m_method != MZ_DEFLATED)) + return MZ_FALSE; + + // Ensure supplied output buffer is large enough. + needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size + : file_stat.m_uncomp_size; + if (buf_size < needed_size) + return MZ_FALSE; + + // Read and parse the local directory entry. + cur_file_ofs = file_stat.m_local_header_ofs; + if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) + return MZ_FALSE; + if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) + return MZ_FALSE; + + cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); + if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) + return MZ_FALSE; + + if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { + // The file is stored or the caller has requested the compressed data. + if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, + (size_t)needed_size) != needed_size) + return MZ_FALSE; + return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || + (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, + (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); + } + + // Decompress the file either directly from memory or from a file input + // buffer. + tinfl_init(&inflator); + + if (pZip->m_pState->m_pMem) { + // Read directly from the archive in memory. + pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; + read_buf_size = read_buf_avail = file_stat.m_comp_size; + comp_remaining = 0; + } else if (pUser_read_buf) { + // Use a user provided read buffer. + if (!user_read_buf_size) + return MZ_FALSE; + pRead_buf = (mz_uint8 *)pUser_read_buf; + read_buf_size = user_read_buf_size; + read_buf_avail = 0; + comp_remaining = file_stat.m_comp_size; + } else { + // Temporarily allocate a read buffer. + read_buf_size = MZ_MIN(file_stat.m_comp_size, MZ_ZIP_MAX_IO_BUF_SIZE); +#ifdef _MSC_VER + if (((0, sizeof(size_t) == sizeof(mz_uint32))) && + (read_buf_size > 0x7FFFFFFF)) +#else + if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) +#endif + return MZ_FALSE; + if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, + (size_t)read_buf_size))) + return MZ_FALSE; + read_buf_avail = 0; + comp_remaining = file_stat.m_comp_size; + } + + do { + size_t in_buf_size, + out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); + if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { + read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); + if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, + (size_t)read_buf_avail) != read_buf_avail) { + status = TINFL_STATUS_FAILED; + break; + } + cur_file_ofs += read_buf_avail; + comp_remaining -= read_buf_avail; + read_buf_ofs = 0; + } + in_buf_size = (size_t)read_buf_avail; + status = tinfl_decompress( + &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, + (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, + TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | + (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); + read_buf_avail -= in_buf_size; + read_buf_ofs += in_buf_size; + out_buf_ofs += out_buf_size; + } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); + + if (status == TINFL_STATUS_DONE) { + // Make sure the entire file was decompressed, and check its CRC. + if ((out_buf_ofs != file_stat.m_uncomp_size) || + (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, + (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) + status = TINFL_STATUS_FAILED; + } + + if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) + pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); + + return status == TINFL_STATUS_DONE; +} + +mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( + mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, + mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { + int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); + if (file_index < 0) + return MZ_FALSE; + return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, + flags, pUser_read_buf, + user_read_buf_size); +} + +mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, + void *pBuf, size_t buf_size, + mz_uint flags) { + return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, + flags, NULL, 0); +} + +mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, + const char *pFilename, void *pBuf, + size_t buf_size, mz_uint flags) { + return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, + buf_size, flags, NULL, 0); +} + +void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, + size_t *pSize, mz_uint flags) { + mz_uint64 comp_size, uncomp_size, alloc_size; + const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); + void *pBuf; + + if (pSize) + *pSize = 0; + if (!p) + return NULL; + + comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); + uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); + + alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; +#ifdef _MSC_VER + if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) +#else + if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) +#endif + return NULL; + if (NULL == + (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) + return NULL; + + if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, + flags)) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); + return NULL; + } + + if (pSize) + *pSize = (size_t)alloc_size; + return pBuf; +} + +void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, + const char *pFilename, size_t *pSize, + mz_uint flags) { + int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); + if (file_index < 0) { + if (pSize) + *pSize = 0; + return MZ_FALSE; + } + return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); +} + +mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, + mz_uint file_index, + mz_file_write_func pCallback, + void *pOpaque, mz_uint flags) { + int status = TINFL_STATUS_DONE; + mz_uint file_crc32 = MZ_CRC32_INIT; + mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, + out_buf_ofs = 0, cur_file_ofs; + mz_zip_archive_file_stat file_stat; + void *pRead_buf = NULL; + void *pWrite_buf = NULL; + mz_uint32 + local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / + sizeof(mz_uint32)]; + mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; + + if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) + return MZ_FALSE; + + // Empty file, or a directory (but not always a directory - I've seen odd zips + // with directories that have compressed data which inflates to 0 bytes) + if (!file_stat.m_comp_size) + return MZ_TRUE; + + // Entry is a subdirectory (I've seen old zips with dir entries which have + // compressed deflate data which inflates to 0 bytes, but these entries claim + // to uncompress to 512 bytes in the headers). I'm torn how to handle this + // case - should it fail instead? + if (mz_zip_reader_is_file_a_directory(pZip, file_index)) + return MZ_TRUE; + + // Encryption and patch files are not supported. + if (file_stat.m_bit_flag & (1 | 32)) + return MZ_FALSE; + + // This function only supports stored and deflate. + if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && + (file_stat.m_method != MZ_DEFLATED)) + return MZ_FALSE; + + // Read and parse the local directory entry. + cur_file_ofs = file_stat.m_local_header_ofs; + if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) + return MZ_FALSE; + if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) + return MZ_FALSE; + + cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); + if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) + return MZ_FALSE; + + // Decompress the file either directly from memory or from a file input + // buffer. + if (pZip->m_pState->m_pMem) { + pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; + read_buf_size = read_buf_avail = file_stat.m_comp_size; + comp_remaining = 0; + } else { + read_buf_size = MZ_MIN(file_stat.m_comp_size, MZ_ZIP_MAX_IO_BUF_SIZE); + if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, + (size_t)read_buf_size))) + return MZ_FALSE; + read_buf_avail = 0; + comp_remaining = file_stat.m_comp_size; + } + + if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { + // The file is stored or the caller has requested the compressed data. + if (pZip->m_pState->m_pMem) { +#ifdef _MSC_VER + if (((0, sizeof(size_t) == sizeof(mz_uint32))) && + (file_stat.m_comp_size > 0xFFFFFFFF)) +#else + if (((sizeof(size_t) == sizeof(mz_uint32))) && + (file_stat.m_comp_size > 0xFFFFFFFF)) +#endif + return MZ_FALSE; + if (pCallback(pOpaque, out_buf_ofs, pRead_buf, + (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) + status = TINFL_STATUS_FAILED; + else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) + file_crc32 = + (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, + (size_t)file_stat.m_comp_size); + // cur_file_ofs += file_stat.m_comp_size; + out_buf_ofs += file_stat.m_comp_size; + // comp_remaining = 0; + } else { + while (comp_remaining) { + read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); + if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, + (size_t)read_buf_avail) != read_buf_avail) { + status = TINFL_STATUS_FAILED; + break; + } + + if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) + file_crc32 = (mz_uint32)mz_crc32( + file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); + + if (pCallback(pOpaque, out_buf_ofs, pRead_buf, + (size_t)read_buf_avail) != read_buf_avail) { + status = TINFL_STATUS_FAILED; + break; + } + cur_file_ofs += read_buf_avail; + out_buf_ofs += read_buf_avail; + comp_remaining -= read_buf_avail; + } + } + } else { + tinfl_decompressor inflator; + tinfl_init(&inflator); + + if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, + TINFL_LZ_DICT_SIZE))) + status = TINFL_STATUS_FAILED; + else { + do { + mz_uint8 *pWrite_buf_cur = + (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); + size_t in_buf_size, + out_buf_size = + TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); + if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { + read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); + if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, + (size_t)read_buf_avail) != read_buf_avail) { + status = TINFL_STATUS_FAILED; + break; + } + cur_file_ofs += read_buf_avail; + comp_remaining -= read_buf_avail; + read_buf_ofs = 0; + } + + in_buf_size = (size_t)read_buf_avail; + status = tinfl_decompress( + &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, + (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, + comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); + read_buf_avail -= in_buf_size; + read_buf_ofs += in_buf_size; + + if (out_buf_size) { + if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != + out_buf_size) { + status = TINFL_STATUS_FAILED; + break; + } + file_crc32 = + (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); + if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { + status = TINFL_STATUS_FAILED; + break; + } + } + } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || + (status == TINFL_STATUS_HAS_MORE_OUTPUT)); + } + } + + if ((status == TINFL_STATUS_DONE) && + (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { + // Make sure the entire file was decompressed, and check its CRC. + if ((out_buf_ofs != file_stat.m_uncomp_size) || + (file_crc32 != file_stat.m_crc32)) + status = TINFL_STATUS_FAILED; + } + + if (!pZip->m_pState->m_pMem) + pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); + if (pWrite_buf) + pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); + + return status == TINFL_STATUS_DONE; +} + +mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, + const char *pFilename, + mz_file_write_func pCallback, + void *pOpaque, mz_uint flags) { + int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); + if (file_index < 0) + return MZ_FALSE; + return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, + flags); +} + +#ifndef MINIZ_NO_STDIO +static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, + const void *pBuf, size_t n) { + (void)ofs; + return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); +} + +mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, + const char *pDst_filename, + mz_uint flags) { + mz_bool status; + mz_zip_archive_file_stat file_stat; + MZ_FILE *pFile; + if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) + return MZ_FALSE; + + pFile = MZ_FOPEN(pDst_filename, "wb"); + if (!pFile) + return MZ_FALSE; + status = mz_zip_reader_extract_to_callback( + pZip, file_index, mz_zip_file_write_callback, pFile, flags); + if (MZ_FCLOSE(pFile) == EOF) + return MZ_FALSE; +#ifndef MINIZ_NO_TIME + if (status) { + mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); + } +#endif + + return status; +} +#endif // #ifndef MINIZ_NO_STDIO + +mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { + if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || + (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) + return MZ_FALSE; + + mz_zip_internal_state *pState = pZip->m_pState; + pZip->m_pState = NULL; + mz_zip_array_clear(pZip, &pState->m_central_dir); + mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); + mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); + +#ifndef MINIZ_NO_STDIO + if (pState->m_pFile) { + MZ_FCLOSE(pState->m_pFile); + pState->m_pFile = NULL; + } +#endif // #ifndef MINIZ_NO_STDIO + + pZip->m_pFree(pZip->m_pAlloc_opaque, pState); + + pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; + + return MZ_TRUE; +} + +#ifndef MINIZ_NO_STDIO +mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, + const char *pArchive_filename, + const char *pDst_filename, + mz_uint flags) { + int file_index = + mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); + if (file_index < 0) + return MZ_FALSE; + return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); +} +#endif + +// ------------------- .ZIP archive writing + +#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS + +static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { + p[0] = (mz_uint8)v; + p[1] = (mz_uint8)(v >> 8); +} +static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { + p[0] = (mz_uint8)v; + p[1] = (mz_uint8)(v >> 8); + p[2] = (mz_uint8)(v >> 16); + p[3] = (mz_uint8)(v >> 24); +} +#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) +#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) + +mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { + if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || + (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) + return MZ_FALSE; + + if (pZip->m_file_offset_alignment) { + // Ensure user specified file offset alignment is a power of 2. + if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) + return MZ_FALSE; + } + + if (!pZip->m_pAlloc) + pZip->m_pAlloc = def_alloc_func; + if (!pZip->m_pFree) + pZip->m_pFree = def_free_func; + if (!pZip->m_pRealloc) + pZip->m_pRealloc = def_realloc_func; + + pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; + pZip->m_archive_size = existing_size; + pZip->m_central_directory_file_ofs = 0; + pZip->m_total_files = 0; + + if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( + pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) + return MZ_FALSE; + memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); + MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, + sizeof(mz_uint8)); + MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, + sizeof(mz_uint32)); + MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, + sizeof(mz_uint32)); + return MZ_TRUE; +} + +static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, + const void *pBuf, size_t n) { + mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; + mz_zip_internal_state *pState = pZip->m_pState; + mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); + + if ((!n) || + ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) + return 0; + + if (new_size > pState->m_mem_capacity) { + void *pNew_block; + size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); + while (new_capacity < new_size) + new_capacity *= 2; + if (NULL == (pNew_block = pZip->m_pRealloc( + pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) + return 0; + pState->m_pMem = pNew_block; + pState->m_mem_capacity = new_capacity; + } + memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); + pState->m_mem_size = (size_t)new_size; + return n; +} + +mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, + size_t size_to_reserve_at_beginning, + size_t initial_allocation_size) { + pZip->m_pWrite = mz_zip_heap_write_func; + pZip->m_pIO_opaque = pZip; + if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) + return MZ_FALSE; + if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, + size_to_reserve_at_beginning))) { + if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( + pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { + mz_zip_writer_end(pZip); + return MZ_FALSE; + } + pZip->m_pState->m_mem_capacity = initial_allocation_size; + } + return MZ_TRUE; +} + +#ifndef MINIZ_NO_STDIO +static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, + const void *pBuf, size_t n) { + mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; + mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); + if (((mz_int64)file_ofs < 0) || + (((cur_ofs != (mz_int64)file_ofs)) && + (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) + return 0; + return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); +} + +mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, + mz_uint64 size_to_reserve_at_beginning) { + MZ_FILE *pFile; + pZip->m_pWrite = mz_zip_file_write_func; + pZip->m_pIO_opaque = pZip; + if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) + return MZ_FALSE; + if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { + mz_zip_writer_end(pZip); + return MZ_FALSE; + } + pZip->m_pState->m_pFile = pFile; + if (size_to_reserve_at_beginning) { + mz_uint64 cur_ofs = 0; + char buf[4096]; + MZ_CLEAR_OBJ(buf); + do { + size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); + if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { + mz_zip_writer_end(pZip); + return MZ_FALSE; + } + cur_ofs += n; + size_to_reserve_at_beginning -= n; + } while (size_to_reserve_at_beginning); + } + return MZ_TRUE; +} +#endif // #ifndef MINIZ_NO_STDIO + +mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, + const char *pFilename) { + mz_zip_internal_state *pState; + if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) + return MZ_FALSE; + // No sense in trying to write to an archive that's already at the support max + // size + if ((pZip->m_total_files == 0xFFFF) || + ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) + return MZ_FALSE; + + pState = pZip->m_pState; + + if (pState->m_pFile) { +#ifdef MINIZ_NO_STDIO + pFilename; + return MZ_FALSE; +#else + // Archive is being read from stdio - try to reopen as writable. + if (pZip->m_pIO_opaque != pZip) + return MZ_FALSE; + if (!pFilename) + return MZ_FALSE; + pZip->m_pWrite = mz_zip_file_write_func; + if (NULL == + (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { + // The mz_zip_archive is now in a bogus state because pState->m_pFile is + // NULL, so just close it. + mz_zip_reader_end(pZip); + return MZ_FALSE; + } +#endif // #ifdef MINIZ_NO_STDIO + } else if (pState->m_pMem) { + // Archive lives in a memory block. Assume it's from the heap that we can + // resize using the realloc callback. + if (pZip->m_pIO_opaque != pZip) + return MZ_FALSE; + pState->m_mem_capacity = pState->m_mem_size; + pZip->m_pWrite = mz_zip_heap_write_func; + } + // Archive is being read via a user provided read function - make sure the + // user has specified a write function too. + else if (!pZip->m_pWrite) + return MZ_FALSE; + + // Start writing new files at the archive's current central directory + // location. + pZip->m_archive_size = pZip->m_central_directory_file_ofs; + pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; + pZip->m_central_directory_file_ofs = 0; + + return MZ_TRUE; +} + +mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, + const void *pBuf, size_t buf_size, + mz_uint level_and_flags) { + return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, + level_and_flags, 0, 0); +} + +typedef struct { + mz_zip_archive *m_pZip; + mz_uint64 m_cur_archive_file_ofs; + mz_uint64 m_comp_size; +} mz_zip_writer_add_state; + +static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, + void *pUser) { + mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; + if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, + pState->m_cur_archive_file_ofs, pBuf, + len) != len) + return MZ_FALSE; + pState->m_cur_archive_file_ofs += len; + pState->m_comp_size += len; + return MZ_TRUE; +} + +static mz_bool mz_zip_writer_create_local_dir_header( + mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, + mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, + mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, + mz_uint16 dos_time, mz_uint16 dos_date) { + (void)pZip; + memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); + MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); + MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); + MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); + MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); + MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); + MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); + MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); + MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); + MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); + MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); + MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); + return MZ_TRUE; +} + +static mz_bool mz_zip_writer_create_central_dir_header( + mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, + mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, + mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, + mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, + mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { + (void)pZip; + mz_uint16 version_made_by = 10 * MZ_VER_MAJOR + MZ_VER_MINOR; + version_made_by |= (MZ_PLATFORM << 8); + + memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); + MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); + MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_MADE_BY_OFS, version_made_by); + MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); + MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); + MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); + MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); + MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); + MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); + MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); + MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); + MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); + MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); + MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); + MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); + MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); + return MZ_TRUE; +} + +static mz_bool mz_zip_writer_add_to_central_dir( + mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, + const void *pExtra, mz_uint16 extra_size, const void *pComment, + mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, + mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, + mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, + mz_uint32 ext_attributes) { + mz_zip_internal_state *pState = pZip->m_pState; + mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; + size_t orig_central_dir_size = pState->m_central_dir.m_size; + mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; + + // No zip64 support yet + if ((local_header_ofs > 0xFFFFFFFF) || + (((mz_uint64)pState->m_central_dir.m_size + + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + + comment_size) > 0xFFFFFFFF)) + return MZ_FALSE; + + if (!mz_zip_writer_create_central_dir_header( + pZip, central_dir_header, filename_size, extra_size, comment_size, + uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, + dos_date, local_header_ofs, ext_attributes)) + return MZ_FALSE; + + if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || + (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, + filename_size)) || + (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, + extra_size)) || + (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, + comment_size)) || + (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, + ¢ral_dir_ofs, 1))) { + // Try to push the central directory array back into its original state. + mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, + MZ_FALSE); + return MZ_FALSE; + } + + return MZ_TRUE; +} + +static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { + // Basic ZIP archive filename validity checks: Valid filenames cannot start + // with a forward slash, cannot contain a drive letter, and cannot use + // DOS-style backward slashes. + if (*pArchive_name == '/') + return MZ_FALSE; + while (*pArchive_name) { + if ((*pArchive_name == '\\') || (*pArchive_name == ':')) + return MZ_FALSE; + pArchive_name++; + } + return MZ_TRUE; +} + +static mz_uint +mz_zip_writer_compute_padding_needed_for_file_alignment(mz_zip_archive *pZip) { + mz_uint32 n; + if (!pZip->m_file_offset_alignment) + return 0; + n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); + return (pZip->m_file_offset_alignment - n) & + (pZip->m_file_offset_alignment - 1); +} + +static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, + mz_uint64 cur_file_ofs, mz_uint32 n) { + char buf[4096]; + memset(buf, 0, MZ_MIN(sizeof(buf), n)); + while (n) { + mz_uint32 s = MZ_MIN(sizeof(buf), n); + if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) + return MZ_FALSE; + cur_file_ofs += s; + n -= s; + } + return MZ_TRUE; +} + +mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, + const char *pArchive_name, const void *pBuf, + size_t buf_size, const void *pComment, + mz_uint16 comment_size, + mz_uint level_and_flags, mz_uint64 uncomp_size, + mz_uint32 uncomp_crc32) { + mz_uint32 ext_attributes = 0; + mz_uint16 method = 0, dos_time = 0, dos_date = 0; + mz_uint level, num_alignment_padding_bytes; + mz_uint64 local_dir_header_ofs, cur_archive_file_ofs, comp_size = 0; + size_t archive_name_size; + mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; + tdefl_compressor *pComp = NULL; + mz_bool store_data_uncompressed; + mz_zip_internal_state *pState; + + if ((int)level_and_flags < 0) + level_and_flags = MZ_DEFAULT_LEVEL; + level = level_and_flags & 0xF; + store_data_uncompressed = + ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); + + if ((!pZip) || (!pZip->m_pState) || + (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || + (!pArchive_name) || ((comment_size) && (!pComment)) || + (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) + return MZ_FALSE; + + local_dir_header_ofs = cur_archive_file_ofs = pZip->m_archive_size; + pState = pZip->m_pState; + + if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) + return MZ_FALSE; + // No zip64 support yet + if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) + return MZ_FALSE; + if (!mz_zip_writer_validate_archive_name(pArchive_name)) + return MZ_FALSE; + +#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_WRITING_APIS) + { + time_t cur_time; + time(&cur_time); + mz_zip_time_t_to_dos_time(cur_time, &dos_time, &dos_date); + } +#endif // #ifndef MINIZ_NO_TIME + + archive_name_size = strlen(pArchive_name); + if (archive_name_size > 0xFFFF) + return MZ_FALSE; + + num_alignment_padding_bytes = + mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); + + // no zip64 support yet + if ((pZip->m_total_files == 0xFFFF) || + ((pZip->m_archive_size + num_alignment_padding_bytes + + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + + comment_size + archive_name_size) > 0xFFFFFFFF)) + return MZ_FALSE; + + if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { + // Set DOS Subdirectory attribute bit. + ext_attributes |= 0x10; + // Subdirectories cannot contain data. + if ((buf_size) || (uncomp_size)) + return MZ_FALSE; + } + + // Try to do any allocations before writing to the archive, so if an + // allocation fails the file remains unmodified. (A good idea if we're doing + // an in-place modification.) + if ((!mz_zip_array_ensure_room(pZip, &pState->m_central_dir, + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + + archive_name_size + comment_size)) || + (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) + return MZ_FALSE; + + if ((!store_data_uncompressed) && (buf_size)) { + if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( + pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) + return MZ_FALSE; + } + + if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs, + num_alignment_padding_bytes + + sizeof(local_dir_header))) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); + return MZ_FALSE; + } + local_dir_header_ofs += num_alignment_padding_bytes; + if (pZip->m_file_offset_alignment) { + MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == + 0); + } + cur_archive_file_ofs += + num_alignment_padding_bytes + sizeof(local_dir_header); + + MZ_CLEAR_OBJ(local_dir_header); + if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, + archive_name_size) != archive_name_size) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); + return MZ_FALSE; + } + cur_archive_file_ofs += archive_name_size; + + if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { + uncomp_crc32 = + (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); + uncomp_size = buf_size; + if (uncomp_size <= 3) { + level = 0; + store_data_uncompressed = MZ_TRUE; + } + } + + if (store_data_uncompressed) { + if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, + buf_size) != buf_size) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); + return MZ_FALSE; + } + + cur_archive_file_ofs += buf_size; + comp_size = buf_size; + + if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) + method = MZ_DEFLATED; + } else if (buf_size) { + mz_zip_writer_add_state state; + + state.m_pZip = pZip; + state.m_cur_archive_file_ofs = cur_archive_file_ofs; + state.m_comp_size = 0; + + if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, + tdefl_create_comp_flags_from_zip_params( + level, -15, MZ_DEFAULT_STRATEGY)) != + TDEFL_STATUS_OKAY) || + (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != + TDEFL_STATUS_DONE)) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); + return MZ_FALSE; + } + + comp_size = state.m_comp_size; + cur_archive_file_ofs = state.m_cur_archive_file_ofs; + + method = MZ_DEFLATED; + } + + pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); + pComp = NULL; + + // no zip64 support yet + if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) + return MZ_FALSE; + + if (!mz_zip_writer_create_local_dir_header( + pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, + comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) + return MZ_FALSE; + + if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, + sizeof(local_dir_header)) != sizeof(local_dir_header)) + return MZ_FALSE; + + if (!mz_zip_writer_add_to_central_dir( + pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, + comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, + dos_time, dos_date, local_dir_header_ofs, ext_attributes)) + return MZ_FALSE; + + pZip->m_total_files++; + pZip->m_archive_size = cur_archive_file_ofs; + + return MZ_TRUE; +} + +#ifndef MINIZ_NO_STDIO +mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, + const char *pSrc_filename, const void *pComment, + mz_uint16 comment_size, mz_uint level_and_flags, + mz_uint32 ext_attributes) { + mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; + mz_uint16 method = 0, dos_time = 0, dos_date = 0; + time_t file_modified_time; + mz_uint64 local_dir_header_ofs, cur_archive_file_ofs, uncomp_size = 0, + comp_size = 0; + size_t archive_name_size; + mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; + MZ_FILE *pSrc_file = NULL; + + if ((int)level_and_flags < 0) + level_and_flags = MZ_DEFAULT_LEVEL; + level = level_and_flags & 0xF; + + if ((!pZip) || (!pZip->m_pState) || + (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || + ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) + return MZ_FALSE; + + local_dir_header_ofs = cur_archive_file_ofs = pZip->m_archive_size; + + if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) + return MZ_FALSE; + if (!mz_zip_writer_validate_archive_name(pArchive_name)) + return MZ_FALSE; + + archive_name_size = strlen(pArchive_name); + if (archive_name_size > 0xFFFF) + return MZ_FALSE; + + num_alignment_padding_bytes = + mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); + + // no zip64 support yet + if ((pZip->m_total_files == 0xFFFF) || + ((pZip->m_archive_size + num_alignment_padding_bytes + + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + + comment_size + archive_name_size) > 0xFFFFFFFF)) + return MZ_FALSE; + + memset(&file_modified_time, 0, sizeof(file_modified_time)); +#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_WRITING_APIS) +#ifndef MINIZ_NO_STDIO + if (!mz_zip_get_file_modified_time(pSrc_filename, &file_modified_time)) + return MZ_FALSE; +#endif + mz_zip_time_t_to_dos_time(file_modified_time, &dos_time, &dos_date); +#endif + + pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); + if (!pSrc_file) + return MZ_FALSE; + MZ_FSEEK64(pSrc_file, 0, SEEK_END); + uncomp_size = MZ_FTELL64(pSrc_file); + MZ_FSEEK64(pSrc_file, 0, SEEK_SET); + + if (uncomp_size > 0xFFFFFFFF) { + // No zip64 support yet + MZ_FCLOSE(pSrc_file); + return MZ_FALSE; + } + if (uncomp_size <= 3) + level = 0; + + if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs, + num_alignment_padding_bytes + + sizeof(local_dir_header))) { + MZ_FCLOSE(pSrc_file); + return MZ_FALSE; + } + local_dir_header_ofs += num_alignment_padding_bytes; + if (pZip->m_file_offset_alignment) { + MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == + 0); + } + cur_archive_file_ofs += + num_alignment_padding_bytes + sizeof(local_dir_header); + + MZ_CLEAR_OBJ(local_dir_header); + if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, + archive_name_size) != archive_name_size) { + MZ_FCLOSE(pSrc_file); + return MZ_FALSE; + } + cur_archive_file_ofs += archive_name_size; + + if (uncomp_size) { + mz_uint64 uncomp_remaining = uncomp_size; + void *pRead_buf = + pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); + if (!pRead_buf) { + MZ_FCLOSE(pSrc_file); + return MZ_FALSE; + } + + if (!level) { + while (uncomp_remaining) { + mz_uint n = (mz_uint)MZ_MIN(MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); + if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || + (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, + n) != n)) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); + MZ_FCLOSE(pSrc_file); + return MZ_FALSE; + } + uncomp_crc32 = + (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); + uncomp_remaining -= n; + cur_archive_file_ofs += n; + } + comp_size = uncomp_size; + } else { + mz_bool result = MZ_FALSE; + mz_zip_writer_add_state state; + tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( + pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); + if (!pComp) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); + MZ_FCLOSE(pSrc_file); + return MZ_FALSE; + } + + state.m_pZip = pZip; + state.m_cur_archive_file_ofs = cur_archive_file_ofs; + state.m_comp_size = 0; + + if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, + tdefl_create_comp_flags_from_zip_params( + level, -15, MZ_DEFAULT_STRATEGY)) != + TDEFL_STATUS_OKAY) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); + pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); + MZ_FCLOSE(pSrc_file); + return MZ_FALSE; + } + + for (;;) { + size_t in_buf_size = + (mz_uint32)MZ_MIN(uncomp_remaining, MZ_ZIP_MAX_IO_BUF_SIZE); + tdefl_status status; + + if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) + break; + + uncomp_crc32 = (mz_uint32)mz_crc32( + uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); + uncomp_remaining -= in_buf_size; + + status = tdefl_compress_buffer(pComp, pRead_buf, in_buf_size, + uncomp_remaining ? TDEFL_NO_FLUSH + : TDEFL_FINISH); + if (status == TDEFL_STATUS_DONE) { + result = MZ_TRUE; + break; + } else if (status != TDEFL_STATUS_OKAY) + break; + } + + pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); + + if (!result) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); + MZ_FCLOSE(pSrc_file); + return MZ_FALSE; + } + + comp_size = state.m_comp_size; + cur_archive_file_ofs = state.m_cur_archive_file_ofs; + + method = MZ_DEFLATED; + } + + pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); + } + + MZ_FCLOSE(pSrc_file); + pSrc_file = NULL; + + // no zip64 support yet + if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) + return MZ_FALSE; + + if (!mz_zip_writer_create_local_dir_header( + pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, + comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) + return MZ_FALSE; + + if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, + sizeof(local_dir_header)) != sizeof(local_dir_header)) + return MZ_FALSE; + + if (!mz_zip_writer_add_to_central_dir( + pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, + comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, + dos_time, dos_date, local_dir_header_ofs, ext_attributes)) + return MZ_FALSE; + + pZip->m_total_files++; + pZip->m_archive_size = cur_archive_file_ofs; + + return MZ_TRUE; +} +#endif // #ifndef MINIZ_NO_STDIO + +mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, + mz_zip_archive *pSource_zip, + mz_uint file_index) { + mz_uint n, bit_flags, num_alignment_padding_bytes; + mz_uint64 comp_bytes_remaining, local_dir_header_ofs; + mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; + mz_uint32 + local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / + sizeof(mz_uint32)]; + mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; + mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; + size_t orig_central_dir_size; + mz_zip_internal_state *pState; + void *pBuf; + const mz_uint8 *pSrc_central_header; + + if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) + return MZ_FALSE; + if (NULL == + (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) + return MZ_FALSE; + pState = pZip->m_pState; + + num_alignment_padding_bytes = + mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); + + // no zip64 support yet + if ((pZip->m_total_files == 0xFFFF) || + ((pZip->m_archive_size + num_alignment_padding_bytes + + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > + 0xFFFFFFFF)) + return MZ_FALSE; + + cur_src_file_ofs = + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); + cur_dst_file_ofs = pZip->m_archive_size; + + if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, + pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) + return MZ_FALSE; + if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) + return MZ_FALSE; + cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; + + if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, + num_alignment_padding_bytes)) + return MZ_FALSE; + cur_dst_file_ofs += num_alignment_padding_bytes; + local_dir_header_ofs = cur_dst_file_ofs; + if (pZip->m_file_offset_alignment) { + MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == + 0); + } + + if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) + return MZ_FALSE; + cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; + + n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); + comp_bytes_remaining = + n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); + + if (NULL == + (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, + (size_t)MZ_MAX(sizeof(mz_uint32) * 4, + MZ_MIN(MZ_ZIP_MAX_IO_BUF_SIZE, + comp_bytes_remaining))))) + return MZ_FALSE; + + while (comp_bytes_remaining) { + n = (mz_uint)MZ_MIN(MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); + if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, + n) != n) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); + return MZ_FALSE; + } + cur_src_file_ofs += n; + + if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); + return MZ_FALSE; + } + cur_dst_file_ofs += n; + + comp_bytes_remaining -= n; + } + + bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); + if (bit_flags & 8) { + // Copy data descriptor + if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, + sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); + return MZ_FALSE; + } + + n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); + if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); + return MZ_FALSE; + } + + // cur_src_file_ofs += n; + cur_dst_file_ofs += n; + } + pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); + + // no zip64 support yet + if (cur_dst_file_ofs > 0xFFFFFFFF) + return MZ_FALSE; + + orig_central_dir_size = pState->m_central_dir.m_size; + + memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); + MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, + local_dir_header_ofs); + if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) + return MZ_FALSE; + + n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); + if (!mz_zip_array_push_back( + pZip, &pState->m_central_dir, + pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { + mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, + MZ_FALSE); + return MZ_FALSE; + } + + if (pState->m_central_dir.m_size > 0xFFFFFFFF) + return MZ_FALSE; + n = (mz_uint32)orig_central_dir_size; + if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { + mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, + MZ_FALSE); + return MZ_FALSE; + } + + pZip->m_total_files++; + pZip->m_archive_size = cur_dst_file_ofs; + + return MZ_TRUE; +} + +mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { + mz_zip_internal_state *pState; + mz_uint64 central_dir_ofs, central_dir_size; + mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; + + if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) + return MZ_FALSE; + + pState = pZip->m_pState; + + // no zip64 support yet + if ((pZip->m_total_files > 0xFFFF) || + ((pZip->m_archive_size + pState->m_central_dir.m_size + + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) + return MZ_FALSE; + + central_dir_ofs = 0; + central_dir_size = 0; + if (pZip->m_total_files) { + // Write central directory + central_dir_ofs = pZip->m_archive_size; + central_dir_size = pState->m_central_dir.m_size; + pZip->m_central_directory_file_ofs = central_dir_ofs; + if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, + pState->m_central_dir.m_p, + (size_t)central_dir_size) != central_dir_size) + return MZ_FALSE; + pZip->m_archive_size += central_dir_size; + } + + // Write end of central directory record + MZ_CLEAR_OBJ(hdr); + MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); + MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, + pZip->m_total_files); + MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); + MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); + MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); + + if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, + sizeof(hdr)) != sizeof(hdr)) + return MZ_FALSE; +#ifndef MINIZ_NO_STDIO + if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) + return MZ_FALSE; +#endif // #ifndef MINIZ_NO_STDIO + + pZip->m_archive_size += sizeof(hdr); + + pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; + return MZ_TRUE; +} + +mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, + size_t *pSize) { + if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) + return MZ_FALSE; + if (pZip->m_pWrite != mz_zip_heap_write_func) + return MZ_FALSE; + if (!mz_zip_writer_finalize_archive(pZip)) + return MZ_FALSE; + + *pBuf = pZip->m_pState->m_pMem; + *pSize = pZip->m_pState->m_mem_size; + pZip->m_pState->m_pMem = NULL; + pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; + return MZ_TRUE; +} + +mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { + mz_zip_internal_state *pState; + mz_bool status = MZ_TRUE; + if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || + ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && + (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) + return MZ_FALSE; + + pState = pZip->m_pState; + pZip->m_pState = NULL; + mz_zip_array_clear(pZip, &pState->m_central_dir); + mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); + mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); + +#ifndef MINIZ_NO_STDIO + if (pState->m_pFile) { + MZ_FCLOSE(pState->m_pFile); + pState->m_pFile = NULL; + } +#endif // #ifndef MINIZ_NO_STDIO + + if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { + pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); + pState->m_pMem = NULL; + } + + pZip->m_pFree(pZip->m_pAlloc_opaque, pState); + pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; + return status; +} + +#ifndef MINIZ_NO_STDIO +mz_bool mz_zip_add_mem_to_archive_file_in_place( + const char *pZip_filename, const char *pArchive_name, const void *pBuf, + size_t buf_size, const void *pComment, mz_uint16 comment_size, + mz_uint level_and_flags) { + mz_bool status, created_new_archive = MZ_FALSE; + mz_zip_archive zip_archive; + struct MZ_FILE_STAT_STRUCT file_stat; + MZ_CLEAR_OBJ(zip_archive); + if ((int)level_and_flags < 0) + level_and_flags = MZ_DEFAULT_LEVEL; + if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || + ((comment_size) && (!pComment)) || + ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) + return MZ_FALSE; + if (!mz_zip_writer_validate_archive_name(pArchive_name)) + return MZ_FALSE; + if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { + // Create a new archive. + if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) + return MZ_FALSE; + created_new_archive = MZ_TRUE; + } else { + // Append to an existing archive. + if (!mz_zip_reader_init_file(&zip_archive, pZip_filename, + level_and_flags | + MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) + return MZ_FALSE; + if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { + mz_zip_reader_end(&zip_archive); + return MZ_FALSE; + } + } + status = + mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, + pComment, comment_size, level_and_flags, 0, 0); + // Always finalize, even if adding failed for some reason, so we have a valid + // central directory. (This may not always succeed, but we can try.) + if (!mz_zip_writer_finalize_archive(&zip_archive)) + status = MZ_FALSE; + if (!mz_zip_writer_end(&zip_archive)) + status = MZ_FALSE; + if ((!status) && (created_new_archive)) { + // It's a new archive and something went wrong, so just delete it. + int ignoredStatus = MZ_DELETE_FILE(pZip_filename); + (void)ignoredStatus; + } + return status; +} + +void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, + const char *pArchive_name, + size_t *pSize, mz_uint flags) { + int file_index; + mz_zip_archive zip_archive; + void *p = NULL; + + if (pSize) + *pSize = 0; + + if ((!pZip_filename) || (!pArchive_name)) + return NULL; + + MZ_CLEAR_OBJ(zip_archive); + if (!mz_zip_reader_init_file(&zip_archive, pZip_filename, + flags | + MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) + return NULL; + + if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, + flags)) >= 0) + p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); + + mz_zip_reader_end(&zip_archive); + return p; +} + +#endif // #ifndef MINIZ_NO_STDIO + +#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS + +#endif // #ifndef MINIZ_NO_ARCHIVE_APIS + +#ifdef __cplusplus +} +#endif + +#endif // MINIZ_HEADER_FILE_ONLY + +/* + This is free and unencumbered software released into the public domain. + + Anyone is free to copy, modify, publish, use, compile, sell, or + distribute this software, either in source code form or as a compiled + binary, for any purpose, commercial or non-commercial, and by any + means. + + In jurisdictions that recognize copyright laws, the author or authors + of this software dedicate any and all copyright interest in the + software to the public domain. We make this dedication for the benefit + of the public at large and to the detriment of our heirs and + successors. We intend this dedication to be an overt act of + relinquishment in perpetuity of all present and future rights to this + software under copyright law. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + For more information, please refer to +*/ diff --git a/v_windows/v/old/thirdparty/zip/zip.c b/v_windows/v/old/thirdparty/zip/zip.c new file mode 100644 index 0000000..503df25 --- /dev/null +++ b/v_windows/v/old/thirdparty/zip/zip.c @@ -0,0 +1,1034 @@ +/* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#define __STDC_WANT_LIB_EXT1__ 1 + +#include +#include +#include + +#if defined(_WIN32) || defined(__WIN32__) || defined(_MSC_VER) || \ + defined(__MINGW32__) +/* Win32, DOS, MSVC, MSVS */ +#include + +#define MKDIR(DIRNAME) _mkdir(DIRNAME) +#define STRCLONE(STR) ((STR) ? _strdup(STR) : NULL) +#define HAS_DEVICE(P) \ + ((((P)[0] >= 'A' && (P)[0] <= 'Z') || ((P)[0] >= 'a' && (P)[0] <= 'z')) && \ + (P)[1] == ':') +#define FILESYSTEM_PREFIX_LEN(P) (HAS_DEVICE(P) ? 2 : 0) + +#else + +#include // needed for symlink() on BSD +int symlink(const char *target, const char *linkpath); // needed on Linux + +#define MKDIR(DIRNAME) mkdir(DIRNAME, 0755) +#define STRCLONE(STR) ((STR) ? strdup(STR) : NULL) + +#endif + +#include "miniz.h" +#include "zip.h" + +#ifndef HAS_DEVICE +#define HAS_DEVICE(P) 0 +#endif + +#ifndef FILESYSTEM_PREFIX_LEN +#define FILESYSTEM_PREFIX_LEN(P) 0 +#endif + +#ifndef ISSLASH +#define ISSLASH(C) ((C) == '/' || (C) == '\\') +#endif + +#define CLEANUP(ptr) \ + do { \ + if (ptr) { \ + free((void *)ptr); \ + ptr = NULL; \ + } \ + } while (0) + +static const char *base_name(const char *name) { + char const *p; + char const *base = name += FILESYSTEM_PREFIX_LEN(name); + int all_slashes = 1; + + for (p = name; *p; p++) { + if (ISSLASH(*p)) + base = p + 1; + else + all_slashes = 0; + } + + /* If NAME is all slashes, arrange to return `/'. */ + if (*base == '\0' && ISSLASH(*name) && all_slashes) + --base; + + return base; +} + +static int mkpath(char *path) { + char *p; + char npath[MAX_PATH + 1]; + int len = 0; + int has_device = HAS_DEVICE(path); + + memset(npath, 0, MAX_PATH + 1); + if (has_device) { + // only on windows + npath[0] = path[0]; + npath[1] = path[1]; + len = 2; + } + for (p = path + len; *p && len < MAX_PATH; p++) { + if (ISSLASH(*p) && ((!has_device && len > 0) || (has_device && len > 2))) { +#if defined(_WIN32) || defined(__WIN32__) || defined(_MSC_VER) || \ + defined(__MINGW32__) +#else + if ('\\' == *p) { + *p = '/'; + } +#endif + + if (MKDIR(npath) == -1) { + if (errno != EEXIST) { + return -1; + } + } + } + npath[len++] = *p; + } + + return 0; +} + +static char *strrpl(const char *str, size_t n, char oldchar, char newchar) { + char c; + size_t i; + char *rpl = (char *)calloc((1 + n), sizeof(char)); + char *begin = rpl; + if (!rpl) { + return NULL; + } + + for (i = 0; (i < n) && (c = *str++); ++i) { + if (c == oldchar) { + c = newchar; + } + *rpl++ = c; + } + + return begin; +} + +struct zip_entry_t { + int index; + char *name; + mz_uint64 uncomp_size; + mz_uint64 comp_size; + mz_uint32 uncomp_crc32; + mz_uint64 offset; + mz_uint8 header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; + mz_uint64 header_offset; + mz_uint16 method; + mz_zip_writer_add_state state; + tdefl_compressor comp; + mz_uint32 external_attr; + time_t m_time; +}; + +struct zip_t { + mz_zip_archive archive; + mz_uint level; + struct zip_entry_t entry; +}; + +struct zip_t *zip_open(const char *zipname, int level, char mode) { + struct zip_t *zip = NULL; + + if (!zipname || strlen(zipname) < 1) { + // zip_t archive name is empty or NULL + goto cleanup; + } + + if (level < 0) + level = MZ_DEFAULT_LEVEL; + if ((level & 0xF) > MZ_UBER_COMPRESSION) { + // Wrong compression level + goto cleanup; + } + + zip = (struct zip_t *)calloc((size_t)1, sizeof(struct zip_t)); + if (!zip) + goto cleanup; + + zip->level = (mz_uint)level; + switch (mode) { + case 'w': + // Create a new archive. + if (!mz_zip_writer_init_file(&(zip->archive), zipname, 0)) { + // Cannot initialize zip_archive writer + goto cleanup; + } + break; + + case 'r': + case 'a': + if (!mz_zip_reader_init_file( + &(zip->archive), zipname, + zip->level | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) { + // An archive file does not exist or cannot initialize + // zip_archive reader + goto cleanup; + } + if (mode == 'a' && + !mz_zip_writer_init_from_reader(&(zip->archive), zipname)) { + mz_zip_reader_end(&(zip->archive)); + goto cleanup; + } + break; + + default: + goto cleanup; + } + + return zip; + +cleanup: + CLEANUP(zip); + return NULL; +} + +void zip_close(struct zip_t *zip) { + if (zip) { + // Always finalize, even if adding failed for some reason, so we have a + // valid central directory. + mz_zip_writer_finalize_archive(&(zip->archive)); + + mz_zip_writer_end(&(zip->archive)); + mz_zip_reader_end(&(zip->archive)); + + CLEANUP(zip); + } +} + +int zip_is64(struct zip_t *zip) { + if (!zip) { + // zip_t handler is not initialized + return -1; + } + + if (!zip->archive.m_pState) { + // zip state is not initialized + return -1; + } + + return (int)zip->archive.m_pState->m_zip64; +} + +int zip_entry_open(struct zip_t *zip, const char *entryname) { + size_t entrylen = 0; + mz_zip_archive *pzip = NULL; + mz_uint num_alignment_padding_bytes, level; + mz_zip_archive_file_stat stats; + + if (!zip || !entryname) { + return -1; + } + + entrylen = strlen(entryname); + if (entrylen < 1) { + return -1; + } + + /* + .ZIP File Format Specification Version: 6.3.3 + + 4.4.17.1 The name of the file, with optional relative path. + The path stored MUST not contain a drive or + device letter, or a leading slash. All slashes + MUST be forward slashes '/' as opposed to + backwards slashes '\' for compatibility with Amiga + and UNIX file systems etc. If input came from standard + input, there is no file name field. + */ + zip->entry.name = strrpl(entryname, entrylen, '\\', '/'); + if (!zip->entry.name) { + // Cannot parse zip entry name + return -1; + } + + pzip = &(zip->archive); + if (pzip->m_zip_mode == MZ_ZIP_MODE_READING) { + zip->entry.index = + mz_zip_reader_locate_file(pzip, zip->entry.name, NULL, 0); + if (zip->entry.index < 0) { + goto cleanup; + } + + if (!mz_zip_reader_file_stat(pzip, (mz_uint)zip->entry.index, &stats)) { + goto cleanup; + } + + zip->entry.comp_size = stats.m_comp_size; + zip->entry.uncomp_size = stats.m_uncomp_size; + zip->entry.uncomp_crc32 = stats.m_crc32; + zip->entry.offset = stats.m_central_dir_ofs; + zip->entry.header_offset = stats.m_local_header_ofs; + zip->entry.method = stats.m_method; + zip->entry.external_attr = stats.m_external_attr; +#ifndef MINIZ_NO_TIME + zip->entry.m_time = stats.m_time; +#endif + + return 0; + } + + zip->entry.index = (int)zip->archive.m_total_files; + zip->entry.comp_size = 0; + zip->entry.uncomp_size = 0; + zip->entry.uncomp_crc32 = MZ_CRC32_INIT; + zip->entry.offset = zip->archive.m_archive_size; + zip->entry.header_offset = zip->archive.m_archive_size; + memset(zip->entry.header, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE * sizeof(mz_uint8)); + zip->entry.method = 0; + + // UNIX or APPLE +#if MZ_PLATFORM == 3 || MZ_PLATFORM == 19 + // regular file with rw-r--r-- persmissions + zip->entry.external_attr = (mz_uint32)(0100644) << 16; +#else + zip->entry.external_attr = 0; +#endif + + num_alignment_padding_bytes = + mz_zip_writer_compute_padding_needed_for_file_alignment(pzip); + + if (!pzip->m_pState || (pzip->m_zip_mode != MZ_ZIP_MODE_WRITING)) { + // Wrong zip mode + goto cleanup; + } + if (zip->level & MZ_ZIP_FLAG_COMPRESSED_DATA) { + // Wrong zip compression level + goto cleanup; + } + // no zip64 support yet + if ((pzip->m_total_files == 0xFFFF) || + ((pzip->m_archive_size + num_alignment_padding_bytes + + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + + entrylen) > 0xFFFFFFFF)) { + // No zip64 support yet + goto cleanup; + } + if (!mz_zip_writer_write_zeros(pzip, zip->entry.offset, + num_alignment_padding_bytes + + sizeof(zip->entry.header))) { + // Cannot memset zip entry header + goto cleanup; + } + + zip->entry.header_offset += num_alignment_padding_bytes; + if (pzip->m_file_offset_alignment) { + MZ_ASSERT( + (zip->entry.header_offset & (pzip->m_file_offset_alignment - 1)) == 0); + } + zip->entry.offset += num_alignment_padding_bytes + sizeof(zip->entry.header); + + if (pzip->m_pWrite(pzip->m_pIO_opaque, zip->entry.offset, zip->entry.name, + entrylen) != entrylen) { + // Cannot write data to zip entry + goto cleanup; + } + + zip->entry.offset += entrylen; + level = zip->level & 0xF; + if (level) { + zip->entry.state.m_pZip = pzip; + zip->entry.state.m_cur_archive_file_ofs = zip->entry.offset; + zip->entry.state.m_comp_size = 0; + + if (tdefl_init(&(zip->entry.comp), mz_zip_writer_add_put_buf_callback, + &(zip->entry.state), + (int)tdefl_create_comp_flags_from_zip_params( + (int)level, -15, MZ_DEFAULT_STRATEGY)) != + TDEFL_STATUS_OKAY) { + // Cannot initialize the zip compressor + goto cleanup; + } + } + + zip->entry.m_time = time(NULL); + + return 0; + +cleanup: + CLEANUP(zip->entry.name); + return -1; +} + +int zip_entry_openbyindex(struct zip_t *zip, int index) { + mz_zip_archive *pZip = NULL; + mz_zip_archive_file_stat stats; + mz_uint namelen; + const mz_uint8 *pHeader; + const char *pFilename; + + if (!zip) { + // zip_t handler is not initialized + return -1; + } + + pZip = &(zip->archive); + if (pZip->m_zip_mode != MZ_ZIP_MODE_READING) { + // open by index requires readonly mode + return -1; + } + + if (index < 0 || (mz_uint)index >= pZip->m_total_files) { + // index out of range + return -1; + } + + if (!(pHeader = &MZ_ZIP_ARRAY_ELEMENT( + &pZip->m_pState->m_central_dir, mz_uint8, + MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, + mz_uint32, index)))) { + // cannot find header in central directory + return -1; + } + + namelen = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); + pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; + + /* + .ZIP File Format Specification Version: 6.3.3 + + 4.4.17.1 The name of the file, with optional relative path. + The path stored MUST not contain a drive or + device letter, or a leading slash. All slashes + MUST be forward slashes '/' as opposed to + backwards slashes '\' for compatibility with Amiga + and UNIX file systems etc. If input came from standard + input, there is no file name field. + */ + zip->entry.name = strrpl(pFilename, namelen, '\\', '/'); + if (!zip->entry.name) { + // local entry name is NULL + return -1; + } + + if (!mz_zip_reader_file_stat(pZip, (mz_uint)index, &stats)) { + return -1; + } + + zip->entry.index = index; + zip->entry.comp_size = stats.m_comp_size; + zip->entry.uncomp_size = stats.m_uncomp_size; + zip->entry.uncomp_crc32 = stats.m_crc32; + zip->entry.offset = stats.m_central_dir_ofs; + zip->entry.header_offset = stats.m_local_header_ofs; + zip->entry.method = stats.m_method; + zip->entry.external_attr = stats.m_external_attr; +#ifndef MINIZ_NO_TIME + zip->entry.m_time = stats.m_time; +#endif + + return 0; +} + +int zip_entry_close(struct zip_t *zip) { + mz_zip_archive *pzip = NULL; + mz_uint level; + tdefl_status done; + mz_uint16 entrylen; + mz_uint16 dos_time, dos_date; + int status = -1; + + if (!zip) { + // zip_t handler is not initialized + goto cleanup; + } + + pzip = &(zip->archive); + if (pzip->m_zip_mode == MZ_ZIP_MODE_READING) { + status = 0; + goto cleanup; + } + + level = zip->level & 0xF; + if (level) { + done = tdefl_compress_buffer(&(zip->entry.comp), "", 0, TDEFL_FINISH); + if (done != TDEFL_STATUS_DONE && done != TDEFL_STATUS_OKAY) { + // Cannot flush compressed buffer + goto cleanup; + } + zip->entry.comp_size = zip->entry.state.m_comp_size; + zip->entry.offset = zip->entry.state.m_cur_archive_file_ofs; + zip->entry.method = MZ_DEFLATED; + } + + entrylen = (mz_uint16)strlen(zip->entry.name); + // no zip64 support yet + if ((zip->entry.comp_size > 0xFFFFFFFF) || (zip->entry.offset > 0xFFFFFFFF)) { + // No zip64 support, yet + goto cleanup; + } + +#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_WRITING_APIS) + mz_zip_time_t_to_dos_time(zip->entry.m_time, &dos_time, &dos_date); +#endif + if (!mz_zip_writer_create_local_dir_header( + pzip, zip->entry.header, entrylen, 0, zip->entry.uncomp_size, + zip->entry.comp_size, zip->entry.uncomp_crc32, zip->entry.method, 0, + dos_time, dos_date)) { + // Cannot create zip entry header + goto cleanup; + } + + if (pzip->m_pWrite(pzip->m_pIO_opaque, zip->entry.header_offset, + zip->entry.header, + sizeof(zip->entry.header)) != sizeof(zip->entry.header)) { + // Cannot write zip entry header + goto cleanup; + } + + if (!mz_zip_writer_add_to_central_dir( + pzip, zip->entry.name, entrylen, NULL, 0, "", 0, + zip->entry.uncomp_size, zip->entry.comp_size, zip->entry.uncomp_crc32, + zip->entry.method, 0, dos_time, dos_date, zip->entry.header_offset, + zip->entry.external_attr)) { + // Cannot write to zip central dir + goto cleanup; + } + + pzip->m_total_files++; + pzip->m_archive_size = zip->entry.offset; + status = 0; + +cleanup: + if (zip) { + zip->entry.m_time = 0; + CLEANUP(zip->entry.name); + } + return status; +} + +const char *zip_entry_name(struct zip_t *zip) { + if (!zip) { + // zip_t handler is not initialized + return NULL; + } + + return zip->entry.name; +} + +int zip_entry_index(struct zip_t *zip) { + if (!zip) { + // zip_t handler is not initialized + return -1; + } + + return zip->entry.index; +} + +int zip_entry_isdir(struct zip_t *zip) { + if (!zip) { + // zip_t handler is not initialized + return -1; + } + + if (zip->entry.index < 0) { + // zip entry is not opened + return -1; + } + + return (int)mz_zip_reader_is_file_a_directory(&zip->archive, + (mz_uint)zip->entry.index); +} + +unsigned long long zip_entry_size(struct zip_t *zip) { + return zip ? zip->entry.uncomp_size : 0; +} + +unsigned int zip_entry_crc32(struct zip_t *zip) { + return zip ? zip->entry.uncomp_crc32 : 0; +} + +int zip_entry_write(struct zip_t *zip, const void *buf, size_t bufsize) { + mz_uint level; + mz_zip_archive *pzip = NULL; + tdefl_status status; + + if (!zip) { + // zip_t handler is not initialized + return -1; + } + + pzip = &(zip->archive); + if (buf && bufsize > 0) { + zip->entry.uncomp_size += bufsize; + zip->entry.uncomp_crc32 = (mz_uint32)mz_crc32( + zip->entry.uncomp_crc32, (const mz_uint8 *)buf, bufsize); + + level = zip->level & 0xF; + if (!level) { + if ((pzip->m_pWrite(pzip->m_pIO_opaque, zip->entry.offset, buf, + bufsize) != bufsize)) { + // Cannot write buffer + return -1; + } + zip->entry.offset += bufsize; + zip->entry.comp_size += bufsize; + } else { + status = tdefl_compress_buffer(&(zip->entry.comp), buf, bufsize, + TDEFL_NO_FLUSH); + if (status != TDEFL_STATUS_DONE && status != TDEFL_STATUS_OKAY) { + // Cannot compress buffer + return -1; + } + } + } + + return 0; +} + +int zip_entry_fwrite(struct zip_t *zip, const char *filename) { + int status = 0; + size_t n = 0; + FILE *stream = NULL; + mz_uint8 buf[MZ_ZIP_MAX_IO_BUF_SIZE]; + struct MZ_FILE_STAT_STRUCT file_stat; + + if (!zip) { + // zip_t handler is not initialized + return -1; + } + + memset(buf, 0, MZ_ZIP_MAX_IO_BUF_SIZE); + memset((void *)&file_stat, 0, sizeof(struct MZ_FILE_STAT_STRUCT)); + if (MZ_FILE_STAT(filename, &file_stat) != 0) { + // problem getting information - check errno + return -1; + } + + if ((file_stat.st_mode & 0200) == 0) { + // MS-DOS read-only attribute + zip->entry.external_attr |= 0x01; + } + zip->entry.external_attr |= (mz_uint32)((file_stat.st_mode & 0xFFFF) << 16); + zip->entry.m_time = file_stat.st_mtime; + +#if defined(_MSC_VER) + if (fopen_s(&stream, filename, "rb")) +#else + if (!(stream = fopen(filename, "rb"))) +#endif + { + // Cannot open filename + return -1; + } + + while ((n = fread(buf, sizeof(mz_uint8), MZ_ZIP_MAX_IO_BUF_SIZE, stream)) > + 0) { + if (zip_entry_write(zip, buf, n) < 0) { + status = -1; + break; + } + } + fclose(stream); + + return status; +} + +ssize_t zip_entry_read(struct zip_t *zip, void **buf, size_t *bufsize) { + mz_zip_archive *pzip = NULL; + mz_uint idx; + size_t size = 0; + + if (!zip) { + // zip_t handler is not initialized + return -1; + } + + pzip = &(zip->archive); + if (pzip->m_zip_mode != MZ_ZIP_MODE_READING || zip->entry.index < 0) { + // the entry is not found or we do not have read access + return -1; + } + + idx = (mz_uint)zip->entry.index; + if (mz_zip_reader_is_file_a_directory(pzip, idx)) { + // the entry is a directory + return -1; + } + + *buf = mz_zip_reader_extract_to_heap(pzip, idx, &size, 0); + if (*buf && bufsize) { + *bufsize = size; + } + return size; +} + +ssize_t zip_entry_noallocread(struct zip_t *zip, void *buf, size_t bufsize) { + mz_zip_archive *pzip = NULL; + + if (!zip) { + // zip_t handler is not initialized + return -1; + } + + pzip = &(zip->archive); + if (pzip->m_zip_mode != MZ_ZIP_MODE_READING || zip->entry.index < 0) { + // the entry is not found or we do not have read access + return -1; + } + + if (!mz_zip_reader_extract_to_mem_no_alloc(pzip, (mz_uint)zip->entry.index, + buf, bufsize, 0, NULL, 0)) { + return -1; + } + + return (ssize_t)zip->entry.uncomp_size; +} + +int zip_entry_fread(struct zip_t *zip, const char *filename) { + mz_zip_archive *pzip = NULL; + mz_uint idx; + mz_uint32 xattr = 0; + mz_zip_archive_file_stat info; + + if (!zip) { + // zip_t handler is not initialized + return -1; + } + + memset((void *)&info, 0, sizeof(mz_zip_archive_file_stat)); + pzip = &(zip->archive); + if (pzip->m_zip_mode != MZ_ZIP_MODE_READING || zip->entry.index < 0) { + // the entry is not found or we do not have read access + return -1; + } + + idx = (mz_uint)zip->entry.index; + if (mz_zip_reader_is_file_a_directory(pzip, idx)) { + // the entry is a directory + return -1; + } + + if (!mz_zip_reader_extract_to_file(pzip, idx, filename, 0)) { + return -1; + } + +#if defined(_MSC_VER) +#else + if (!mz_zip_reader_file_stat(pzip, idx, &info)) { + // Cannot get information about zip archive; + return -1; + } + + xattr = (info.m_external_attr >> 16) & 0xFFFF; + if (xattr > 0) { + if (chmod(filename, (mode_t)xattr) < 0) { + return -1; + } + } +#endif + + return 0; +} + +int zip_entry_extract(struct zip_t *zip, + size_t (*on_extract)(void *arg, unsigned long long offset, + const void *buf, size_t bufsize), + void *arg) { + mz_zip_archive *pzip = NULL; + mz_uint idx; + + if (!zip) { + // zip_t handler is not initialized + return -1; + } + + pzip = &(zip->archive); + if (pzip->m_zip_mode != MZ_ZIP_MODE_READING || zip->entry.index < 0) { + // the entry is not found or we do not have read access + return -1; + } + + idx = (mz_uint)zip->entry.index; + return (mz_zip_reader_extract_to_callback(pzip, idx, on_extract, arg, 0)) + ? 0 + : -1; +} + +int zip_total_entries(struct zip_t *zip) { + if (!zip) { + // zip_t handler is not initialized + return -1; + } + + return (int)zip->archive.m_total_files; +} + +int zip_create(const char *zipname, const char *filenames[], size_t len) { + int status = 0; + size_t i; + mz_zip_archive zip_archive; + struct MZ_FILE_STAT_STRUCT file_stat; + mz_uint32 ext_attributes = 0; + + if (!zipname || strlen(zipname) < 1) { + // zip_t archive name is empty or NULL + return -1; + } + + // Create a new archive. + if (!memset(&(zip_archive), 0, sizeof(zip_archive))) { + // Cannot memset zip archive + return -1; + } + + if (!mz_zip_writer_init_file(&zip_archive, zipname, 0)) { + // Cannot initialize zip_archive writer + return -1; + } + + memset((void *)&file_stat, 0, sizeof(struct MZ_FILE_STAT_STRUCT)); + + for (i = 0; i < len; ++i) { + const char *name = filenames[i]; + if (!name) { + status = -1; + break; + } + + if (MZ_FILE_STAT(name, &file_stat) != 0) { + // problem getting information - check errno + status = -1; + break; + } + + if ((file_stat.st_mode & 0200) == 0) { + // MS-DOS read-only attribute + ext_attributes |= 0x01; + } + ext_attributes |= (mz_uint32)((file_stat.st_mode & 0xFFFF) << 16); + + if (!mz_zip_writer_add_file(&zip_archive, base_name(name), name, "", 0, + ZIP_DEFAULT_COMPRESSION_LEVEL, + ext_attributes)) { + // Cannot add file to zip_archive + status = -1; + break; + } + } + + mz_zip_writer_finalize_archive(&zip_archive); + mz_zip_writer_end(&zip_archive); + return status; +} + +static char *normalize(char *name, char *const nname, size_t len) { + size_t offn = 0; + size_t offnn = 0, ncpy = 0; + + if (name == NULL || nname == NULL || len <= 0) { + return NULL; + } + // skip trailing '/' + while (ISSLASH(*name)) + name++; + + for (; offn < len; offn++) { + if (ISSLASH(name[offn])) { + if (ncpy > 0 && strncmp(&nname[offnn], ".", 1) && + strncmp(&nname[offnn], "..", 2)) { + offnn += ncpy; + nname[offnn++] = name[offn]; // append '/' + } + ncpy = 0; + } else { + nname[offnn + ncpy] = name[offn]; + ncpy++; + } + } + + // at the end, extra check what we've already copied + if (ncpy == 0 || !strncmp(&nname[offnn], ".", 1) || + !strncmp(&nname[offnn], "..", 2)) { + nname[offnn] = 0; + } + return nname; +} + +static int extract(mz_zip_archive *zip_archive, const char *dir, + int (*on_extract)(const char *filename, void *arg), + void *arg) { + int status = -1; + mz_uint i, n; + char path[MAX_PATH + 1]; + char symlink_to[MAX_PATH + 1]; + mz_zip_archive_file_stat info; + size_t dirlen = 0; + mz_uint32 xattr = 0; + + memset(path, 0, sizeof(path)); + memset(symlink_to, 0, sizeof(symlink_to)); + + dirlen = strlen(dir); + if (dirlen + 1 > MAX_PATH) { + return -1; + } + + memset((void *)&info, 0, sizeof(mz_zip_archive_file_stat)); + +#if defined(_MSC_VER) + strcpy_s(path, MAX_PATH, dir); +#else + strcpy(path, dir); +#endif + + if (!ISSLASH(path[dirlen - 1])) { +#if defined(_WIN32) || defined(__WIN32__) + path[dirlen] = '\\'; +#else + path[dirlen] = '/'; +#endif + ++dirlen; + } + + // Get and print information about each file in the archive. + n = mz_zip_reader_get_num_files(zip_archive); + for (i = 0; i < n; ++i) { + if (!mz_zip_reader_file_stat(zip_archive, i, &info)) { + // Cannot get information about zip archive; + goto out; + } + if (!normalize(info.m_filename, info.m_filename, strlen(info.m_filename))) { + // Cannot normalize file name; + goto out; + } +#if defined(_MSC_VER) + strncpy_s(&path[dirlen], MAX_PATH - dirlen, info.m_filename, + MAX_PATH - dirlen); +#else + strncpy(&path[dirlen], info.m_filename, MAX_PATH - dirlen); +#endif + if (mkpath(path) < 0) { + // Cannot make a path + goto out; + } + + if ((((info.m_version_made_by >> 8) == 3) || + ((info.m_version_made_by >> 8) == + 19)) // if zip is produced on Unix or macOS (3 and 19 from + // section 4.4.2.2 of zip standard) + && info.m_external_attr & + (0x20 << 24)) { // and has sym link attribute (0x80 is file, 0x40 + // is directory) +#if defined(_WIN32) || defined(__WIN32__) || defined(_MSC_VER) || \ + defined(__MINGW32__) +#else + if (info.m_uncomp_size > MAX_PATH || + !mz_zip_reader_extract_to_mem_no_alloc(zip_archive, i, symlink_to, + MAX_PATH, 0, NULL, 0)) { + goto out; + } + symlink_to[info.m_uncomp_size] = '\0'; + if (symlink(symlink_to, path) != 0) { + goto out; + } +#endif + } else { + if (!mz_zip_reader_is_file_a_directory(zip_archive, i)) { + if (!mz_zip_reader_extract_to_file(zip_archive, i, path, 0)) { + // Cannot extract zip archive to file + goto out; + } + } + +#if defined(_MSC_VER) +#else + xattr = (info.m_external_attr >> 16) & 0xFFFF; + if (xattr > 0) { + if (chmod(path, (mode_t)xattr) < 0) { + goto out; + } + } +#endif + } + + if (on_extract) { + if (on_extract(path, arg) < 0) { + goto out; + } + } + } + status = 0; + +out: + // Close the archive, freeing any resources it was using + if (!mz_zip_reader_end(zip_archive)) { + // Cannot end zip reader + status = -1; + } + return status; +} + +int zip_extract(const char *zipname, const char *dir, + int (*on_extract)(const char *filename, void *arg), void *arg) { + mz_zip_archive zip_archive; + if (!zipname || !dir) { + // Cannot parse zip archive name + return -1; + } + if (!memset(&zip_archive, 0, sizeof(mz_zip_archive))) { + // Cannot memset zip archive + return MZ_FALSE; + } + // Now try to open the archive. + if (!mz_zip_reader_init_file(&zip_archive, zipname, 0)) { + // Cannot initialize zip_archive reader + return MZ_FALSE; + } + + int status = extract(&zip_archive, dir, on_extract, arg); + + return status; +} + +int zip_extract_without_callback(const char *zipname, const char *dir) { + return zip_extract(zipname, dir, NULL, NULL); +} // for simple V bind ¯\_(ツ)_/¯ + +int zip_extract_stream(const char *stream, size_t size, const char *dir, + int (*on_extract)(const char *filename, void *arg), + void *arg) { + mz_zip_archive zip_archive; + if (!stream || !dir) { + // Cannot parse zip archive stream + return -1; + } + if (!memset(&zip_archive, 0, sizeof(mz_zip_archive))) { + // Cannot memset zip archive + return MZ_FALSE; + } + if (!mz_zip_reader_init_mem(&zip_archive, stream, size, 0)) { + // Cannot initialize zip_archive reader + return MZ_FALSE; + } + + int status = extract(&zip_archive, dir, on_extract, arg); + + return status; +} diff --git a/v_windows/v/old/thirdparty/zip/zip.h b/v_windows/v/old/thirdparty/zip/zip.h new file mode 100644 index 0000000..cd4198b --- /dev/null +++ b/v_windows/v/old/thirdparty/zip/zip.h @@ -0,0 +1,345 @@ +/* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#pragma once +#ifndef ZIP_H +#define ZIP_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if !defined(_SSIZE_T_DEFINED) && !defined(_SSIZE_T_DEFINED_) && \ + !defined(__DEFINED_ssize_t) && !defined(__ssize_t_defined) && \ + !defined(_SSIZE_T) && !defined(_SSIZE_T_) && !defined(_SSIZE_T_DECLARED) + +// 64-bit Windows is the only mainstream platform +// where sizeof(long) != sizeof(void*) +#ifdef _WIN64 +typedef long long ssize_t; /* byte count or error */ +#else +typedef long ssize_t; /* byte count or error */ +#endif + +#define _SSIZE_T_DEFINED +#define _SSIZE_T_DEFINED_ +#define __DEFINED_ssize_t +#define __ssize_t_defined +#define _SSIZE_T +#define _SSIZE_T_ +#define _SSIZE_T_DECLARED + +#endif + +#ifndef MAX_PATH +#define MAX_PATH 32767 /* # chars in a path name including NULL */ +#endif + +/** + * @mainpage + * + * Documenation for @ref zip. + */ + +/** + * @addtogroup zip + * @{ + */ + +/** + * Default zip compression level. + */ + +#define ZIP_DEFAULT_COMPRESSION_LEVEL 6 + +/** + * @struct zip_t + * + * This data structure is used throughout the library to represent zip archive - + * forward declaration. + */ +struct zip_t; + +/** + * Opens zip archive with compression level using the given mode. + * + * @param zipname zip archive file name. + * @param level compression level (0-9 are the standard zlib-style levels). + * @param mode file access mode. + * - 'r': opens a file for reading/extracting (the file must exists). + * - 'w': creates an empty file for writing. + * - 'a': appends to an existing archive. + * + * @return the zip archive handler or NULL on error + */ +extern struct zip_t *zip_open(const char *zipname, int level, char mode); + +/** + * Closes the zip archive, releases resources - always finalize. + * + * @param zip zip archive handler. + */ +extern void zip_close(struct zip_t *zip); + +/** + * Determines if the archive has a zip64 end of central directory headers. + * + * @param zip zip archive handler. + * + * @return the return code - 1 (true), 0 (false), negative number (< 0) on + * error. + */ +extern int zip_is64(struct zip_t *zip); + +/** + * Opens an entry by name in the zip archive. + * + * For zip archive opened in 'w' or 'a' mode the function will append + * a new entry. In readonly mode the function tries to locate the entry + * in global dictionary. + * + * @param zip zip archive handler. + * @param entryname an entry name in local dictionary. + * + * @return the return code - 0 on success, negative number (< 0) on error. + */ +extern int zip_entry_open(struct zip_t *zip, const char *entryname); + +/** + * Opens a new entry by index in the zip archive. + * + * This function is only valid if zip archive was opened in 'r' (readonly) mode. + * + * @param zip zip archive handler. + * @param index index in local dictionary. + * + * @return the return code - 0 on success, negative number (< 0) on error. + */ +extern int zip_entry_openbyindex(struct zip_t *zip, int index); + +/** + * Closes a zip entry, flushes buffer and releases resources. + * + * @param zip zip archive handler. + * + * @return the return code - 0 on success, negative number (< 0) on error. + */ +extern int zip_entry_close(struct zip_t *zip); + +/** + * Returns a local name of the current zip entry. + * + * The main difference between user's entry name and local entry name + * is optional relative path. + * Following .ZIP File Format Specification - the path stored MUST not contain + * a drive or device letter, or a leading slash. + * All slashes MUST be forward slashes '/' as opposed to backwards slashes '\' + * for compatibility with Amiga and UNIX file systems etc. + * + * @param zip: zip archive handler. + * + * @return the pointer to the current zip entry name, or NULL on error. + */ +extern const char *zip_entry_name(struct zip_t *zip); + +/** + * Returns an index of the current zip entry. + * + * @param zip zip archive handler. + * + * @return the index on success, negative number (< 0) on error. + */ +extern int zip_entry_index(struct zip_t *zip); + +/** + * Determines if the current zip entry is a directory entry. + * + * @param zip zip archive handler. + * + * @return the return code - 1 (true), 0 (false), negative number (< 0) on + * error. + */ +extern int zip_entry_isdir(struct zip_t *zip); + +/** + * Returns an uncompressed size of the current zip entry. + * + * @param zip zip archive handler. + * + * @return the uncompressed size in bytes. + */ +extern unsigned long long zip_entry_size(struct zip_t *zip); + +/** + * Returns CRC-32 checksum of the current zip entry. + * + * @param zip zip archive handler. + * + * @return the CRC-32 checksum. + */ +extern unsigned int zip_entry_crc32(struct zip_t *zip); + +/** + * Compresses an input buffer for the current zip entry. + * + * @param zip zip archive handler. + * @param buf input buffer. + * @param bufsize input buffer size (in bytes). + * + * @return the return code - 0 on success, negative number (< 0) on error. + */ +extern int zip_entry_write(struct zip_t *zip, const void *buf, size_t bufsize); + +/** + * Compresses a file for the current zip entry. + * + * @param zip zip archive handler. + * @param filename input file. + * + * @return the return code - 0 on success, negative number (< 0) on error. + */ +extern int zip_entry_fwrite(struct zip_t *zip, const char *filename); + +/** + * Extracts the current zip entry into output buffer. + * + * The function allocates sufficient memory for a output buffer. + * + * @param zip zip archive handler. + * @param buf output buffer. + * @param bufsize output buffer size (in bytes). + * + * @note remember to release memory allocated for a output buffer. + * for large entries, please take a look at zip_entry_extract function. + * + * @return the return code - the number of bytes actually read on success. + * Otherwise a -1 on error. + */ +extern ssize_t zip_entry_read(struct zip_t *zip, void **buf, size_t *bufsize); + +/** + * Extracts the current zip entry into a memory buffer using no memory + * allocation. + * + * @param zip zip archive handler. + * @param buf preallocated output buffer. + * @param bufsize output buffer size (in bytes). + * + * @note ensure supplied output buffer is large enough. + * zip_entry_size function (returns uncompressed size for the current + * entry) can be handy to estimate how big buffer is needed. for large + * entries, please take a look at zip_entry_extract function. + * + * @return the return code - the number of bytes actually read on success. + * Otherwise a -1 on error (e.g. bufsize is not large enough). + */ +extern ssize_t zip_entry_noallocread(struct zip_t *zip, void *buf, + size_t bufsize); + +/** + * Extracts the current zip entry into output file. + * + * @param zip zip archive handler. + * @param filename output file. + * + * @return the return code - 0 on success, negative number (< 0) on error. + */ +extern int zip_entry_fread(struct zip_t *zip, const char *filename); + +/** + * Extracts the current zip entry using a callback function (on_extract). + * + * @param zip zip archive handler. + * @param on_extract callback function. + * @param arg opaque pointer (optional argument, which you can pass to the + * on_extract callback) + * + * @return the return code - 0 on success, negative number (< 0) on error. + */ +extern int +zip_entry_extract(struct zip_t *zip, + size_t (*on_extract)(void *arg, unsigned long long offset, + const void *data, size_t size), + void *arg); + +/** + * Returns the number of all entries (files and directories) in the zip archive. + * + * @param zip zip archive handler. + * + * @return the return code - the number of entries on success, negative number + * (< 0) on error. + */ +extern int zip_total_entries(struct zip_t *zip); + +/** + * Creates a new archive and puts files into a single zip archive. + * + * @param zipname zip archive file. + * @param filenames input files. + * @param len: number of input files. + * + * @return the return code - 0 on success, negative number (< 0) on error. + */ +extern int zip_create(const char *zipname, const char *filenames[], size_t len); + +/** + * Extracts a zip archive file into directory. + * + * If on_extract_entry is not NULL, the callback will be called after + * successfully extracted each zip entry. + * Returning a negative value from the callback will cause abort and return an + * error. The last argument (void *arg) is optional, which you can use to pass + * data to the on_extract_entry callback. + * + * @param zipname zip archive file. + * @param dir output directory. + * @param on_extract_entry on extract callback. + * @param arg opaque pointer. + * + * @return the return code - 0 on success, negative number (< 0) on error. + */ +extern int zip_extract(const char *zipname, const char *dir, + int (*on_extract_entry)(const char *filename, void *arg), + void *arg); +// temporary working unzip solution +extern int zip_extract_without_callback(const char *zipname, const char *dir); + +/** + * Extracts a zip archive stream into directory. + * + * If on_extract is not NULL, the callback will be called after + * successfully extracted each zip entry. + * Returning a negative value from the callback will cause abort and return an + * error. The last argument (void *arg) is optional, which you can use to pass + * data to the on_extract callback. + * + * @param stream zip archive stream. + * @param size stream size. + * @param dir output directory. + * @param on_extract on extract callback. + * @param arg opaque pointer. + * + * @return the return code - 0 on success, negative number (< 0) on error. + */ +extern int zip_extract_stream(const char *stream, size_t size, const char *dir, + int (*on_extract)(const char *filename, + void *arg), + void *arg); +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif -- cgit v1.2.3