1 //------------------------------------------------------------------------------ 2 // GB_AxB_saxpy3_template.h: C=A*B, C<M>=A*B, or C<!M>=A*B via saxpy3 method 3 //------------------------------------------------------------------------------ 4 5 // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. 6 // SPDX-License-Identifier: Apache-2.0 7 8 //------------------------------------------------------------------------------ 9 10 // Definitions for GB_AxB_saxpy3_template.c. These do not depend on the 11 // sparsity of A and B. 12 13 #ifndef GB_AXB_SAXPY3_TEMPLATE_H 14 #define GB_AXB_SAXPY3_TEMPLATE_H 15 16 //------------------------------------------------------------------------------ 17 // GB_GET_M_j: prepare to iterate over M(:,j) 18 //------------------------------------------------------------------------------ 19 20 // prepare to iterate over the vector M(:,j), for the (kk)th vector of B 21 // FUTURE::: lookup all M(:,j) for all vectors in B, in a single pass, 22 // and save the mapping (like C_to_M mapping in GB_ewise_slice) 23 #define GB_GET_M_j \ 24 int64_t mpleft = 0 ; \ 25 int64_t mpright = mnvec-1 ; \ 26 int64_t pM_start, pM_end ; \ 27 GB_lookup (M_is_hyper, Mh, Mp, mvlen, &mpleft, mpright, \ 28 GBH (Bh, kk), &pM_start, &pM_end) ; \ 29 int64_t mjnz = pM_end - pM_start ; 30 31 //------------------------------------------------------------------------------ 32 // GB_GET_M_j_RANGE 33 //------------------------------------------------------------------------------ 34 35 #define GB_GET_M_j_RANGE(gamma) \ 36 int64_t mjnz_much = mjnz * gamma 37 38 //------------------------------------------------------------------------------ 39 // GB_SCATTER_M_j_TYPE: scatter M(:,j) of the given type into Gus. workspace 40 //------------------------------------------------------------------------------ 41 42 #define GB_SCATTER_M_j_TYPE(mask_t,pMstart,pMend,mark) \ 43 { \ 44 const mask_t *restrict Mxx = (mask_t *) Mx ; \ 45 if (M_is_bitmap) \ 46 { \ 47 /* M is bitmap */ \ 48 for (int64_t pM = pMstart ; pM < pMend ; pM++) \ 49 { \ 50 /* if (M (i,j) == 1) mark Hf [i] */ \ 51 if (Mb [pM] && Mxx [pM]) Hf [GBI (Mi, pM, mvlen)] = mark ; \ 52 } \ 53 } \ 54 else \ 55 { \ 56 /* M is hyper, sparse, or full */ \ 57 for (int64_t pM = pMstart ; pM < pMend ; pM++) \ 58 { \ 59 /* if (M (i,j) == 1) mark Hf [i] */ \ 60 if (Mxx [pM]) Hf [GBI (Mi, pM, mvlen)] = mark ; \ 61 } \ 62 } \ 63 } \ 64 break ; 65 66 //------------------------------------------------------------------------------ 67 // GB_SCATTER_M_j: scatter M(:,j) into the Gustavson workpace 68 //------------------------------------------------------------------------------ 69 70 #define GB_SCATTER_M_j(pMstart,pMend,mark) \ 71 if (Mx == NULL) \ 72 { \ 73 /* M is structural, not valued */ \ 74 if (M_is_bitmap) \ 75 { \ 76 /* M is bitmap */ \ 77 for (int64_t pM = pMstart ; pM < pMend ; pM++) \ 78 { \ 79 /* if (M (i,j) is present) mark Hf [i] */ \ 80 if (Mb [pM]) Hf [GBI (Mi, pM, mvlen)] = mark ; \ 81 } \ 82 } \ 83 else \ 84 { \ 85 /* M is hyper, sparse, or full */ \ 86 for (int64_t pM = pMstart ; pM < pMend ; pM++) \ 87 { \ 88 /* mark Hf [i] */ \ 89 Hf [GBI (Mi, pM, mvlen)] = mark ; \ 90 } \ 91 } \ 92 } \ 93 else \ 94 { \ 95 /* mask is valued, not structural */ \ 96 switch (msize) \ 97 { \ 98 default: \ 99 case 1: GB_SCATTER_M_j_TYPE (uint8_t , pMstart, pMend, mark) ; \ 100 case 2: GB_SCATTER_M_j_TYPE (uint16_t, pMstart, pMend, mark) ; \ 101 case 4: GB_SCATTER_M_j_TYPE (uint32_t, pMstart, pMend, mark) ; \ 102 case 8: GB_SCATTER_M_j_TYPE (uint64_t, pMstart, pMend, mark) ; \ 103 case 16: \ 104 { \ 105 const uint64_t *restrict Mxx = (uint64_t *) Mx ; \ 106 for (int64_t pM = pMstart ; pM < pMend ; pM++) \ 107 { \ 108 /* if (M (i,j) == 1) mark Hf [i] */ \ 109 if (!GBB (Mb, pM)) continue ; \ 110 if (Mxx [2*pM] || Mxx [2*pM+1]) \ 111 { \ 112 /* Hf [i] = M(i,j) */ \ 113 Hf [GBI (Mi, pM, mvlen)] = mark ; \ 114 } \ 115 } \ 116 } \ 117 } \ 118 } 119 120 //------------------------------------------------------------------------------ 121 // GB_HASH_M_j: scatter M(:,j) for a coarse hash task 122 //------------------------------------------------------------------------------ 123 124 // hash M(:,j) into Hf and Hi for coarse hash task, C<M>=A*B or C<!M>=A*B 125 #define GB_HASH_M_j \ 126 for (int64_t pM = pM_start ; pM < pM_end ; pM++) \ 127 { \ 128 GB_GET_M_ij (pM) ; /* get M(i,j) */ \ 129 if (!mij) continue ; /* skip if M(i,j)=0 */ \ 130 int64_t i = GBI (Mi, pM, mvlen) ; \ 131 for (GB_HASH (i)) /* find i in hash */ \ 132 { \ 133 if (Hf [hash] < mark) \ 134 { \ 135 Hf [hash] = mark ; /* insert M(i,j)=1 */ \ 136 Hi [hash] = i ; \ 137 break ; \ 138 } \ 139 } \ 140 } 141 142 //------------------------------------------------------------------------------ 143 // GB_GET_T_FOR_SECONDJ: define t for SECONDJ and SECONDJ1 semirings 144 //------------------------------------------------------------------------------ 145 146 #if GB_IS_SECONDJ_MULTIPLIER 147 #define GB_GET_T_FOR_SECONDJ \ 148 GB_CIJ_DECLARE (t) ; \ 149 GB_MULT (t, ignore, ignore, ignore, ignore, j) ; 150 #else 151 #define GB_GET_T_FOR_SECONDJ 152 #endif 153 154 //------------------------------------------------------------------------------ 155 // GB_GET_B_j_FOR_ALL_FORMATS: prepare to iterate over B(:,j) 156 //------------------------------------------------------------------------------ 157 158 // prepare to iterate over the vector B(:,j), the (kk)th vector in B, where 159 // j == GBH (Bh, kk). This macro works regardless of the sparsity of A and B. 160 #define GB_GET_B_j_FOR_ALL_FORMATS(A_is_hyper,B_is_sparse,B_is_hyper) \ 161 int64_t pleft = 0 ; \ 162 int64_t pright = anvec-1 ; \ 163 int64_t j = (B_is_hyper) ? Bh [kk] : kk ; \ 164 GB_GET_T_FOR_SECONDJ ; /* t = j for SECONDJ, or j+1 for SECONDJ1 */ \ 165 int64_t pB = (B_is_sparse || B_is_hyper) ? Bp [kk] : (kk * bvlen) ; \ 166 int64_t pB_end = (B_is_sparse || B_is_hyper) ? Bp [kk+1] : (pB+bvlen) ; \ 167 int64_t bjnz = pB_end - pB ; /* nnz (B (:,j) */ \ 168 /* FUTURE::: can skip if mjnz == 0 for C<M>=A*B tasks */ \ 169 if (A_is_hyper && (B_is_sparse || B_is_hyper) && bjnz > 2 && !B_jumbled)\ 170 { \ 171 /* trim Ah [0..pright] to remove any entries past last B(:,j), */ \ 172 /* to speed up GB_lookup in GB_GET_A_k_FOR_ALL_FORMATS. */ \ 173 /* This requires that B is not jumbled */ \ 174 GB_bracket_right (GBI (Bi, pB_end-1, bvlen), Ah, 0, &pright) ; \ 175 } 176 177 //------------------------------------------------------------------------------ 178 // GB_GET_B_kj: get the numeric value of B(k,j) 179 //------------------------------------------------------------------------------ 180 181 #if GB_IS_FIRSTJ_MULTIPLIER 182 183 // FIRSTJ or FIRSTJ1 multiplier 184 // t = aik * bkj = k or k+1 185 #define GB_GET_B_kj \ 186 GB_CIJ_DECLARE (t) ; \ 187 GB_MULT (t, ignore, ignore, ignore, k, ignore) 188 189 #else 190 191 #define GB_GET_B_kj \ 192 GB_GETB (bkj, Bx, pB) /* bkj = Bx [pB] */ 193 194 #endif 195 196 //------------------------------------------------------------------------------ 197 // GB_GET_A_k_FOR_ALL_FORMATS: prepare to iterate over the vector A(:,k) 198 //------------------------------------------------------------------------------ 199 200 #define GB_GET_A_k_FOR_ALL_FORMATS(A_is_hyper) \ 201 if (B_jumbled) pleft = 0 ; /* reuse pleft if B is not jumbled */ \ 202 int64_t pA_start, pA_end ; \ 203 GB_lookup (A_is_hyper, Ah, Ap, avlen, &pleft, pright, k, \ 204 &pA_start, &pA_end) ; \ 205 int64_t aknz = pA_end - pA_start 206 207 //------------------------------------------------------------------------------ 208 // GB_GET_M_ij: get the numeric value of M(i,j) 209 //------------------------------------------------------------------------------ 210 211 #define GB_GET_M_ij(pM) \ 212 /* get M(i,j), at Mi [pM] and Mx [pM] */ \ 213 bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) 214 215 //------------------------------------------------------------------------------ 216 // GB_MULT_A_ik_B_kj: declare t and compute t = A(i,k) * B(k,j) 217 //------------------------------------------------------------------------------ 218 219 #if GB_IS_PAIR_MULTIPLIER 220 221 // PAIR multiplier: t is always 1; no numeric work to do to compute t. 222 // The LXOR_PAIR and PLUS_PAIR semirings need the value t = 1 to use in 223 // their monoid operator, however. 224 #define t (GB_CTYPE_CAST (1, 0)) 225 #define GB_MULT_A_ik_B_kj 226 227 #elif ( GB_IS_FIRSTJ_MULTIPLIER || GB_IS_SECONDJ_MULTIPLIER ) 228 229 // nothing to do; t = aik*bkj already defined in an outer loop 230 #define GB_MULT_A_ik_B_kj 231 232 #else 233 234 // typical semiring 235 #define GB_MULT_A_ik_B_kj \ 236 GB_GETA (aik, Ax, pA) ; /* aik = Ax [pA] ; */ \ 237 GB_CIJ_DECLARE (t) ; /* ctype t ; */ \ 238 GB_MULT (t, aik, bkj, i, k, j) /* t = aik * bkj ; */ 239 240 #endif 241 242 //------------------------------------------------------------------------------ 243 // GB_GATHER_ALL_C_j: gather the values and pattern of C(:,j) 244 //------------------------------------------------------------------------------ 245 246 // gather the pattern and values of C(:,j) for a coarse Gustavson task; 247 // the pattern is not flagged as jumbled. 248 249 #if GB_IS_ANY_PAIR_SEMIRING 250 251 // ANY_PAIR: result is purely symbolic; no numeric work to do 252 #define GB_GATHER_ALL_C_j(mark) \ 253 for (int64_t i = 0 ; i < cvlen ; i++) \ 254 { \ 255 if (Hf [i] == mark) \ 256 { \ 257 Ci [pC++] = i ; \ 258 } \ 259 } 260 261 #else 262 263 // typical semiring 264 #define GB_GATHER_ALL_C_j(mark) \ 265 for (int64_t i = 0 ; i < cvlen ; i++) \ 266 { \ 267 if (Hf [i] == mark) \ 268 { \ 269 GB_CIJ_GATHER (pC, i) ; /* Cx [pC] = Hx [i] */ \ 270 Ci [pC++] = i ; \ 271 } \ 272 } 273 274 #endif 275 276 //------------------------------------------------------------------------------ 277 // GB_SORT_C_j_PATTERN: sort C(:,j) for a coarse task, or flag as jumbled 278 //------------------------------------------------------------------------------ 279 280 // Only coarse tasks do the optional sort. Fine hash tasks always leave C 281 // jumbled. 282 283 #define GB_SORT_C_j_PATTERN \ 284 if (do_sort) \ 285 { \ 286 /* sort the pattern of C(:,j) (non-default) */ \ 287 GB_qsort_1a (Ci + Cp [kk], cjnz) ; \ 288 } \ 289 else \ 290 { \ 291 /* lazy sort: C(:,j) is now jumbled (default) */ \ 292 task_C_jumbled = true ; \ 293 } 294 295 //------------------------------------------------------------------------------ 296 // GB_SORT_AND_GATHER_C_j: sort and gather C(:,j) for a coarse Gustavson task 297 //------------------------------------------------------------------------------ 298 299 // gather the values of C(:,j) for a coarse Gustavson task 300 #if GB_IS_ANY_PAIR_SEMIRING 301 302 // ANY_PAIR: result is purely symbolic 303 #define GB_SORT_AND_GATHER_C_j \ 304 GB_SORT_C_j_PATTERN ; 305 306 #else 307 308 // typical semiring 309 #define GB_SORT_AND_GATHER_C_j \ 310 GB_SORT_C_j_PATTERN ; \ 311 /* gather the values into C(:,j) */ \ 312 for (int64_t pC = Cp [kk] ; pC < Cp [kk+1] ; pC++) \ 313 { \ 314 int64_t i = Ci [pC] ; \ 315 GB_CIJ_GATHER (pC, i) ; /* Cx [pC] = Hx [i] */ \ 316 } 317 318 #endif 319 320 //------------------------------------------------------------------------------ 321 // GB_SORT_AND_GATHER_HASHED_C_j: sort and gather C(:,j) for a coarse hash task 322 //------------------------------------------------------------------------------ 323 324 #if GB_IS_ANY_PAIR_SEMIRING 325 326 // ANY_PAIR: result is purely symbolic 327 #define GB_SORT_AND_GATHER_HASHED_C_j(hash_mark) \ 328 GB_SORT_C_j_PATTERN ; 329 330 #else 331 332 // gather the values of C(:,j) for a coarse hash task 333 #define GB_SORT_AND_GATHER_HASHED_C_j(hash_mark) \ 334 GB_SORT_C_j_PATTERN ; \ 335 for (int64_t pC = Cp [kk] ; pC < Cp [kk+1] ; pC++) \ 336 { \ 337 int64_t i = Ci [pC] ; \ 338 for (GB_HASH (i)) /* find i in hash table */ \ 339 { \ 340 if (Hf [hash] == (hash_mark) && (Hi [hash] == i)) \ 341 { \ 342 /* i found in the hash table */ \ 343 /* Cx [pC] = Hx [hash] ; */ \ 344 GB_CIJ_GATHER (pC, hash) ; \ 345 break ; \ 346 } \ 347 } \ 348 } 349 350 #endif 351 352 //------------------------------------------------------------------------------ 353 // GB_ATOMIC_UPDATE_HX: Hx [i] += t 354 //------------------------------------------------------------------------------ 355 356 #if GB_IS_ANY_MONOID 357 358 //-------------------------------------------------------------------------- 359 // The update Hx [i] += t can be skipped entirely, for the ANY monoid. 360 //-------------------------------------------------------------------------- 361 362 #define GB_ATOMIC_UPDATE_HX(i,t) 363 364 #elif GB_HAS_ATOMIC 365 366 //-------------------------------------------------------------------------- 367 // Hx [i] += t via atomic update 368 //-------------------------------------------------------------------------- 369 370 // for built-in MIN/MAX monoids only, on built-in types 371 #define GB_MINMAX(i,t,done) \ 372 { \ 373 GB_CTYPE xold, xnew, *px = Hx + (i) ; \ 374 do \ 375 { \ 376 /* xold = Hx [i] via atomic read */ \ 377 GB_ATOMIC_READ \ 378 xold = (*px) ; \ 379 /* done if xold <= t for MIN, or xold >= t for MAX, */ \ 380 /* but not done if xold is NaN */ \ 381 if (done) break ; \ 382 xnew = t ; /* t should be assigned; it is not NaN */ \ 383 } \ 384 while (!GB_ATOMIC_COMPARE_EXCHANGE (px, xold, xnew)) ; \ 385 } 386 387 #if GB_IS_IMIN_MONOID 388 389 // built-in MIN monoids for signed and unsigned integers 390 #define GB_ATOMIC_UPDATE_HX(i,t) \ 391 GB_MINMAX (i, t, xold <= t) 392 393 #elif GB_IS_IMAX_MONOID 394 395 // built-in MAX monoids for signed and unsigned integers 396 #define GB_ATOMIC_UPDATE_HX(i,t) \ 397 GB_MINMAX (i, t, xold >= t) 398 399 #elif GB_IS_FMIN_MONOID 400 401 // built-in MIN monoids for float and double, with omitnan behavior. 402 // The update is skipped entirely if t is NaN. Otherwise, if t is not 403 // NaN, xold is checked. If xold is NaN, islessequal (xold, t) is 404 // always false, so the non-NaN t must be always be assigned to Hx [i]. 405 // If both terms are not NaN, then islessequal (xold,t) is just the 406 // comparison xold <= t. If that is true, there is no work to do and 407 // the loop breaks. Otherwise, t is smaller than xold and so it must 408 // be assigned to Hx [i]. 409 #define GB_ATOMIC_UPDATE_HX(i,t) \ 410 { \ 411 if (!isnan (t)) \ 412 { \ 413 GB_MINMAX (i, t, islessequal (xold, t)) ; \ 414 } \ 415 } 416 417 #elif GB_IS_FMAX_MONOID 418 419 // built-in MAX monoids for float and double, with omitnan behavior. 420 #define GB_ATOMIC_UPDATE_HX(i,t) \ 421 { \ 422 if (!isnan (t)) \ 423 { \ 424 GB_MINMAX (i, t, isgreaterequal (xold, t)) ; \ 425 } \ 426 } 427 428 #elif GB_IS_PLUS_FC32_MONOID 429 430 // built-in PLUS_FC32 monoid can be done as two independent atomics 431 #define GB_ATOMIC_UPDATE_HX(i,t) \ 432 GB_ATOMIC_UPDATE \ 433 Hx_real [2*(i)] += crealf (t) ; \ 434 GB_ATOMIC_UPDATE \ 435 Hx_imag [2*(i)] += cimagf (t) ; 436 437 #elif GB_IS_PLUS_FC64_MONOID 438 439 // built-in PLUS_FC64 monoid can be done as two independent atomics 440 #define GB_ATOMIC_UPDATE_HX(i,t) \ 441 GB_ATOMIC_UPDATE \ 442 Hx_real [2*(i)] += creal (t) ; \ 443 GB_ATOMIC_UPDATE \ 444 Hx_imag [2*(i)] += cimag (t) ; 445 446 #elif GB_HAS_OMP_ATOMIC 447 448 // built-in PLUS and TIMES for integers and real, and boolean LOR, 449 // LAND, LXOR monoids can be implemented with an OpenMP pragma. 450 #define GB_ATOMIC_UPDATE_HX(i,t) \ 451 GB_ATOMIC_UPDATE \ 452 GB_HX_UPDATE (i, t) 453 454 #else 455 456 // all other atomic monoids (EQ, XNOR) on boolean, signed and unsigned 457 // integers, float, and double (not used for single and double 458 // complex). 459 #define GB_ATOMIC_UPDATE_HX(i,t) \ 460 { \ 461 GB_CTYPE xold, xnew, *px = Hx + (i) ; \ 462 do \ 463 { \ 464 /* xold = Hx [i] via atomic read */ \ 465 GB_ATOMIC_READ \ 466 xold = (*px) ; \ 467 /* xnew = xold + t */ \ 468 xnew = GB_ADD_FUNCTION (xold, t) ; \ 469 } \ 470 while (!GB_ATOMIC_COMPARE_EXCHANGE (px, xold, xnew)) ; \ 471 } 472 473 #endif 474 475 #else 476 477 //-------------------------------------------------------------------------- 478 // Hx [i] += t can only be done inside the critical section 479 //-------------------------------------------------------------------------- 480 481 // all user-defined monoids go here, and all complex monoids (except PLUS) 482 #define GB_ATOMIC_UPDATE_HX(i,t) \ 483 GB_OMP_FLUSH \ 484 GB_HX_UPDATE (i, t) ; \ 485 GB_OMP_FLUSH 486 487 #endif 488 489 #define GB_IS_MINMAX_MONOID \ 490 (GB_IS_IMIN_MONOID || GB_IS_IMAX_MONOID || \ 491 GB_IS_FMIN_MONOID || GB_IS_FMAX_MONOID) 492 493 //------------------------------------------------------------------------------ 494 // GB_ATOMIC_WRITE_HX: Hx [i] = t 495 //------------------------------------------------------------------------------ 496 497 #if GB_IS_ANY_PAIR_SEMIRING 498 499 //-------------------------------------------------------------------------- 500 // ANY_PAIR: result is purely symbolic; no numeric work to do 501 //-------------------------------------------------------------------------- 502 503 #define GB_ATOMIC_WRITE_HX(i,t) 504 505 //-------------------------------------------------------------------------- 506 // ANY_PAIR: for the bitmap case only: Hx [i] = 1 507 //-------------------------------------------------------------------------- 508 509 #if GB_IS_ANY_FC32_MONOID || GB_IS_ANY_FC64_MONOID 510 #define GB_ATOMIC_SET_HX_ONE(i) \ 511 GB_ATOMIC_WRITE \ 512 Hx_real [2*(i)] = 1 ; \ 513 GB_ATOMIC_WRITE \ 514 Hx_imag [2*(i)] = 0 ; 515 #else 516 #define GB_ATOMIC_SET_HX_ONE(i) \ 517 GB_ATOMIC_WRITE \ 518 Hx [i] = 1 ; 519 #endif 520 521 #elif GB_HAS_ATOMIC 522 523 //-------------------------------------------------------------------------- 524 // Hx [i] = t via atomic write 525 //-------------------------------------------------------------------------- 526 527 #if GB_IS_PLUS_FC32_MONOID 528 529 // built-in PLUS_FC32 monoid 530 #define GB_ATOMIC_WRITE_HX(i,t) \ 531 GB_ATOMIC_WRITE \ 532 Hx_real [2*(i)] = crealf (t) ; \ 533 GB_ATOMIC_WRITE \ 534 Hx_imag [2*(i)] = cimagf (t) ; 535 536 #elif GB_IS_PLUS_FC64_MONOID 537 538 // built-in PLUS_FC64 monoid 539 #define GB_ATOMIC_WRITE_HX(i,t) \ 540 GB_ATOMIC_WRITE \ 541 Hx_real [2*(i)] = creal (t) ; \ 542 GB_ATOMIC_WRITE \ 543 Hx_imag [2*(i)] = cimag (t) ; 544 545 #else 546 547 // all other atomic monoids 548 #define GB_ATOMIC_WRITE_HX(i,t) \ 549 GB_ATOMIC_WRITE \ 550 GB_HX_WRITE (i, t) 551 552 #endif 553 554 #else 555 556 //-------------------------------------------------------------------------- 557 // Hx [i] = t via critical section 558 //-------------------------------------------------------------------------- 559 560 #define GB_ATOMIC_WRITE_HX(i,t) \ 561 GB_OMP_FLUSH \ 562 GB_HX_WRITE (i, t) ; \ 563 GB_OMP_FLUSH 564 565 #endif 566 567 //------------------------------------------------------------------------------ 568 // hash iteration 569 //------------------------------------------------------------------------------ 570 571 // to iterate over the hash table, looking for index i: 572 // 573 // for (GB_HASH (i)) 574 // { 575 // ... 576 // } 577 // 578 // which expands into the following, where f(i) is the GB_HASHF(i) hash 579 // function: 580 // 581 // for (int64_t hash = f(i) ; ; hash = (hash+1)&(hash_size-1)) 582 // { 583 // ... 584 // } 585 586 #define GB_HASH(i) \ 587 int64_t hash = GB_HASHF (i) ; ; GB_REHASH (hash,i) 588 589 //------------------------------------------------------------------------------ 590 // define macros for any sparsity of A and B 591 //------------------------------------------------------------------------------ 592 593 #undef GB_META16 594 #include "GB_meta16_definitions.h" 595 596 #endif 597 598