1 // The template and inlines for the -*- C++ -*- internal _Array helper class. 2 3 // Copyright (C) 1997, 1998, 1999, 2000, 2003, 2004, 2005, 2006 4 // Free Software Foundation, Inc. 5 // 6 // This file is part of the GNU ISO C++ Library. This library is free 7 // software; you can redistribute it and/or modify it under the 8 // terms of the GNU General Public License as published by the 9 // Free Software Foundation; either version 2, or (at your option) 10 // any later version. 11 12 // This library is distributed in the hope that it will be useful, 13 // but WITHOUT ANY WARRANTY; without even the implied warranty of 14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 // GNU General Public License for more details. 16 17 // You should have received a copy of the GNU General Public License along 18 // with this library; see the file COPYING. If not, write to the Free 19 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 20 // USA. 21 22 // As a special exception, you may use this file as part of a free software 23 // library without restriction. Specifically, if other files instantiate 24 // templates or use macros or inline functions from this file, or you compile 25 // this file and link it with other files to produce an executable, this 26 // file does not by itself cause the resulting executable to be covered by 27 // the GNU General Public License. This exception does not however 28 // invalidate any other reasons why the executable file might be covered by 29 // the GNU General Public License. 30 31 /** @file valarray_array.h 32 * This is an internal header file, included by other library headers. 33 * You should not attempt to use it directly. 34 */ 35 36 // Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr> 37 38 #ifndef _VALARRAY_ARRAY_H 39 #define _VALARRAY_ARRAY_H 1 40 41 #pragma GCC system_header 42 43 #include <bits/c++config.h> 44 #include <bits/cpp_type_traits.h> 45 #include <cstdlib> 46 #include <cstring> 47 #include <new> 48 49 _GLIBCXX_BEGIN_NAMESPACE(std) 50 51 // 52 // Helper functions on raw pointers 53 // 54 55 // We get memory by the old fashion way 56 inline void* 57 __valarray_get_memory(size_t __n) 58 { return operator new(__n); } 59 60 template<typename _Tp> 61 inline _Tp*__restrict__ 62 __valarray_get_storage(size_t __n) 63 { 64 return static_cast<_Tp*__restrict__> 65 (std::__valarray_get_memory(__n * sizeof(_Tp))); 66 } 67 68 // Return memory to the system 69 inline void 70 __valarray_release_memory(void* __p) 71 { operator delete(__p); } 72 73 // Turn a raw-memory into an array of _Tp filled with _Tp() 74 // This is required in 'valarray<T> v(n);' 75 template<typename _Tp, bool> 76 struct _Array_default_ctor 77 { 78 // Please note that this isn't exception safe. But 79 // valarrays aren't required to be exception safe. 80 inline static void 81 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 82 { 83 while (__b != __e) 84 new(__b++) _Tp(); 85 } 86 }; 87 88 template<typename _Tp> 89 struct _Array_default_ctor<_Tp, true> 90 { 91 // For fundamental types, it suffices to say 'memset()' 92 inline static void 93 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 94 { std::memset(__b, 0, (__e - __b) * sizeof(_Tp)); } 95 }; 96 97 template<typename _Tp> 98 inline void 99 __valarray_default_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 100 { 101 _Array_default_ctor<_Tp, __is_pod<_Tp>::__value>::_S_do_it(__b, __e); 102 } 103 104 // Turn a raw-memory into an array of _Tp filled with __t 105 // This is the required in valarray<T> v(n, t). Also 106 // used in valarray<>::resize(). 107 template<typename _Tp, bool> 108 struct _Array_init_ctor 109 { 110 // Please note that this isn't exception safe. But 111 // valarrays aren't required to be exception safe. 112 inline static void 113 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t) 114 { 115 while (__b != __e) 116 new(__b++) _Tp(__t); 117 } 118 }; 119 120 template<typename _Tp> 121 struct _Array_init_ctor<_Tp, true> 122 { 123 inline static void 124 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t) 125 { 126 while (__b != __e) 127 *__b++ = __t; 128 } 129 }; 130 131 template<typename _Tp> 132 inline void 133 __valarray_fill_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e, 134 const _Tp __t) 135 { 136 _Array_init_ctor<_Tp, __is_pod<_Tp>::__value>::_S_do_it(__b, __e, __t); 137 } 138 139 // 140 // copy-construct raw array [__o, *) from plain array [__b, __e) 141 // We can't just say 'memcpy()' 142 // 143 template<typename _Tp, bool> 144 struct _Array_copy_ctor 145 { 146 // Please note that this isn't exception safe. But 147 // valarrays aren't required to be exception safe. 148 inline static void 149 _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e, 150 _Tp* __restrict__ __o) 151 { 152 while (__b != __e) 153 new(__o++) _Tp(*__b++); 154 } 155 }; 156 157 template<typename _Tp> 158 struct _Array_copy_ctor<_Tp, true> 159 { 160 inline static void 161 _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e, 162 _Tp* __restrict__ __o) 163 { std::memcpy(__o, __b, (__e - __b)*sizeof(_Tp)); } 164 }; 165 166 template<typename _Tp> 167 inline void 168 __valarray_copy_construct(const _Tp* __restrict__ __b, 169 const _Tp* __restrict__ __e, 170 _Tp* __restrict__ __o) 171 { 172 _Array_copy_ctor<_Tp, __is_pod<_Tp>::__value>::_S_do_it(__b, __e, __o); 173 } 174 175 // copy-construct raw array [__o, *) from strided array __a[<__n : __s>] 176 template<typename _Tp> 177 inline void 178 __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n, 179 size_t __s, _Tp* __restrict__ __o) 180 { 181 if (__is_pod<_Tp>::__value) 182 while (__n--) 183 { 184 *__o++ = *__a; 185 __a += __s; 186 } 187 else 188 while (__n--) 189 { 190 new(__o++) _Tp(*__a); 191 __a += __s; 192 } 193 } 194 195 // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]] 196 template<typename _Tp> 197 inline void 198 __valarray_copy_construct (const _Tp* __restrict__ __a, 199 const size_t* __restrict__ __i, 200 _Tp* __restrict__ __o, size_t __n) 201 { 202 if (__is_pod<_Tp>::__value) 203 while (__n--) 204 *__o++ = __a[*__i++]; 205 else 206 while (__n--) 207 new (__o++) _Tp(__a[*__i++]); 208 } 209 210 // Do the necessary cleanup when we're done with arrays. 211 template<typename _Tp> 212 inline void 213 __valarray_destroy_elements(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 214 { 215 if (!__is_pod<_Tp>::__value) 216 while (__b != __e) 217 { 218 __b->~_Tp(); 219 ++__b; 220 } 221 } 222 223 // Fill a plain array __a[<__n>] with __t 224 template<typename _Tp> 225 inline void 226 __valarray_fill(_Tp* __restrict__ __a, size_t __n, const _Tp& __t) 227 { 228 while (__n--) 229 *__a++ = __t; 230 } 231 232 // fill strided array __a[<__n-1 : __s>] with __t 233 template<typename _Tp> 234 inline void 235 __valarray_fill(_Tp* __restrict__ __a, size_t __n, 236 size_t __s, const _Tp& __t) 237 { 238 for (size_t __i = 0; __i < __n; ++__i, __a += __s) 239 *__a = __t; 240 } 241 242 // fill indir ect array __a[__i[<__n>]] with __i 243 template<typename _Tp> 244 inline void 245 __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i, 246 size_t __n, const _Tp& __t) 247 { 248 for (size_t __j = 0; __j < __n; ++__j, ++__i) 249 __a[*__i] = __t; 250 } 251 252 // copy plain array __a[<__n>] in __b[<__n>] 253 // For non-fundamental types, it is wrong to say 'memcpy()' 254 template<typename _Tp, bool> 255 struct _Array_copier 256 { 257 inline static void 258 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) 259 { 260 while(__n--) 261 *__b++ = *__a++; 262 } 263 }; 264 265 template<typename _Tp> 266 struct _Array_copier<_Tp, true> 267 { 268 inline static void 269 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) 270 { std::memcpy (__b, __a, __n * sizeof (_Tp)); } 271 }; 272 273 // Copy a plain array __a[<__n>] into a play array __b[<>] 274 template<typename _Tp> 275 inline void 276 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, 277 _Tp* __restrict__ __b) 278 { 279 _Array_copier<_Tp, __is_pod<_Tp>::__value>::_S_do_it(__a, __n, __b); 280 } 281 282 // Copy strided array __a[<__n : __s>] in plain __b[<__n>] 283 template<typename _Tp> 284 inline void 285 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s, 286 _Tp* __restrict__ __b) 287 { 288 for (size_t __i = 0; __i < __n; ++__i, ++__b, __a += __s) 289 *__b = *__a; 290 } 291 292 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] 293 template<typename _Tp> 294 inline void 295 __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b, 296 size_t __n, size_t __s) 297 { 298 for (size_t __i = 0; __i < __n; ++__i, ++__a, __b += __s) 299 *__b = *__a; 300 } 301 302 // Copy strided array __src[<__n : __s1>] into another 303 // strided array __dst[< : __s2>]. Their sizes must match. 304 template<typename _Tp> 305 inline void 306 __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1, 307 _Tp* __restrict__ __dst, size_t __s2) 308 { 309 for (size_t __i = 0; __i < __n; ++__i) 310 __dst[__i * __s2] = __src[__i * __s1]; 311 } 312 313 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] 314 template<typename _Tp> 315 inline void 316 __valarray_copy(const _Tp* __restrict__ __a, 317 const size_t* __restrict__ __i, 318 _Tp* __restrict__ __b, size_t __n) 319 { 320 for (size_t __j = 0; __j < __n; ++__j, ++__b, ++__i) 321 *__b = __a[*__i]; 322 } 323 324 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] 325 template<typename _Tp> 326 inline void 327 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, 328 _Tp* __restrict__ __b, const size_t* __restrict__ __i) 329 { 330 for (size_t __j = 0; __j < __n; ++__j, ++__a, ++__i) 331 __b[*__i] = *__a; 332 } 333 334 // Copy the __n first elements of an indexed array __src[<__i>] into 335 // another indexed array __dst[<__j>]. 336 template<typename _Tp> 337 inline void 338 __valarray_copy(const _Tp* __restrict__ __src, size_t __n, 339 const size_t* __restrict__ __i, 340 _Tp* __restrict__ __dst, const size_t* __restrict__ __j) 341 { 342 for (size_t __k = 0; __k < __n; ++__k) 343 __dst[*__j++] = __src[*__i++]; 344 } 345 346 // 347 // Compute the sum of elements in range [__f, __l) 348 // This is a naive algorithm. It suffers from cancelling. 349 // In the future try to specialize 350 // for _Tp = float, double, long double using a more accurate 351 // algorithm. 352 // 353 template<typename _Tp> 354 inline _Tp 355 __valarray_sum(const _Tp* __restrict__ __f, const _Tp* __restrict__ __l) 356 { 357 _Tp __r = _Tp(); 358 while (__f != __l) 359 __r += *__f++; 360 return __r; 361 } 362 363 // Compute the product of all elements in range [__f, __l) 364 template<typename _Tp> 365 inline _Tp 366 __valarray_product(const _Tp* __restrict__ __f, 367 const _Tp* __restrict__ __l) 368 { 369 _Tp __r = _Tp(1); 370 while (__f != __l) 371 __r = __r * *__f++; 372 return __r; 373 } 374 375 // Compute the min/max of an array-expression 376 template<typename _Ta> 377 inline typename _Ta::value_type 378 __valarray_min(const _Ta& __a) 379 { 380 size_t __s = __a.size(); 381 typedef typename _Ta::value_type _Value_type; 382 _Value_type __r = __s == 0 ? _Value_type() : __a[0]; 383 for (size_t __i = 1; __i < __s; ++__i) 384 { 385 _Value_type __t = __a[__i]; 386 if (__t < __r) 387 __r = __t; 388 } 389 return __r; 390 } 391 392 template<typename _Ta> 393 inline typename _Ta::value_type 394 __valarray_max(const _Ta& __a) 395 { 396 size_t __s = __a.size(); 397 typedef typename _Ta::value_type _Value_type; 398 _Value_type __r = __s == 0 ? _Value_type() : __a[0]; 399 for (size_t __i = 1; __i < __s; ++__i) 400 { 401 _Value_type __t = __a[__i]; 402 if (__t > __r) 403 __r = __t; 404 } 405 return __r; 406 } 407 408 // 409 // Helper class _Array, first layer of valarray abstraction. 410 // All operations on valarray should be forwarded to this class 411 // whenever possible. -- gdr 412 // 413 414 template<typename _Tp> 415 struct _Array 416 { 417 explicit _Array(size_t); 418 explicit _Array(_Tp* const __restrict__); 419 explicit _Array(const valarray<_Tp>&); 420 _Array(const _Tp* __restrict__, size_t); 421 422 _Tp* begin() const; 423 424 _Tp* const __restrict__ _M_data; 425 }; 426 427 428 // Copy-construct plain array __b[<__n>] from indexed array __a[__i[<__n>]] 429 template<typename _Tp> 430 inline void 431 __valarray_copy_construct(_Array<_Tp> __a, _Array<size_t> __i, 432 _Array<_Tp> __b, size_t __n) 433 { std::__valarray_copy_construct(__a._M_data, __i._M_data, 434 __b._M_data, __n); } 435 436 // Copy-construct plain array __b[<__n>] from strided array __a[<__n : __s>] 437 template<typename _Tp> 438 inline void 439 __valarray_copy_construct(_Array<_Tp> __a, size_t __n, size_t __s, 440 _Array<_Tp> __b) 441 { std::__valarray_copy_construct(__a._M_data, __n, __s, __b._M_data); } 442 443 template<typename _Tp> 444 inline void 445 __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t) 446 { std::__valarray_fill(__a._M_data, __n, __t); } 447 448 template<typename _Tp> 449 inline void 450 __valarray_fill(_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t) 451 { std::__valarray_fill(__a._M_data, __n, __s, __t); } 452 453 template<typename _Tp> 454 inline void 455 __valarray_fill(_Array<_Tp> __a, _Array<size_t> __i, 456 size_t __n, const _Tp& __t) 457 { std::__valarray_fill(__a._M_data, __i._M_data, __n, __t); } 458 459 // Copy a plain array __a[<__n>] into a play array __b[<>] 460 template<typename _Tp> 461 inline void 462 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) 463 { std::__valarray_copy(__a._M_data, __n, __b._M_data); } 464 465 // Copy strided array __a[<__n : __s>] in plain __b[<__n>] 466 template<typename _Tp> 467 inline void 468 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b) 469 { std::__valarray_copy(__a._M_data, __n, __s, __b._M_data); } 470 471 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] 472 template<typename _Tp> 473 inline void 474 __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s) 475 { __valarray_copy(__a._M_data, __b._M_data, __n, __s); } 476 477 // Copy strided array __src[<__n : __s1>] into another 478 // strided array __dst[< : __s2>]. Their sizes must match. 479 template<typename _Tp> 480 inline void 481 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1, 482 _Array<_Tp> __b, size_t __s2) 483 { std::__valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); } 484 485 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] 486 template<typename _Tp> 487 inline void 488 __valarray_copy(_Array<_Tp> __a, _Array<size_t> __i, 489 _Array<_Tp> __b, size_t __n) 490 { std::__valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); } 491 492 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] 493 template<typename _Tp> 494 inline void 495 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b, 496 _Array<size_t> __i) 497 { std::__valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); } 498 499 // Copy the __n first elements of an indexed array __src[<__i>] into 500 // another indexed array __dst[<__j>]. 501 template<typename _Tp> 502 inline void 503 __valarray_copy(_Array<_Tp> __src, size_t __n, _Array<size_t> __i, 504 _Array<_Tp> __dst, _Array<size_t> __j) 505 { 506 std::__valarray_copy(__src._M_data, __n, __i._M_data, 507 __dst._M_data, __j._M_data); 508 } 509 510 template<typename _Tp> 511 inline 512 _Array<_Tp>::_Array(size_t __n) 513 : _M_data(__valarray_get_storage<_Tp>(__n)) 514 { std::__valarray_default_construct(_M_data, _M_data + __n); } 515 516 template<typename _Tp> 517 inline 518 _Array<_Tp>::_Array(_Tp* const __restrict__ __p) 519 : _M_data (__p) {} 520 521 template<typename _Tp> 522 inline 523 _Array<_Tp>::_Array(const valarray<_Tp>& __v) 524 : _M_data (__v._M_data) {} 525 526 template<typename _Tp> 527 inline 528 _Array<_Tp>::_Array(const _Tp* __restrict__ __b, size_t __s) 529 : _M_data(__valarray_get_storage<_Tp>(__s)) 530 { std::__valarray_copy_construct(__b, __s, _M_data); } 531 532 template<typename _Tp> 533 inline _Tp* 534 _Array<_Tp>::begin () const 535 { return _M_data; } 536 537 #define _DEFINE_ARRAY_FUNCTION(_Op, _Name) \ 538 template<typename _Tp> \ 539 inline void \ 540 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, const _Tp& __t) \ 541 { \ 542 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; ++__p) \ 543 *__p _Op##= __t; \ 544 } \ 545 \ 546 template<typename _Tp> \ 547 inline void \ 548 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \ 549 { \ 550 _Tp* __p = __a._M_data; \ 551 for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; ++__p, ++__q) \ 552 *__p _Op##= *__q; \ 553 } \ 554 \ 555 template<typename _Tp, class _Dom> \ 556 void \ 557 _Array_augmented_##_Name(_Array<_Tp> __a, \ 558 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 559 { \ 560 _Tp* __p(__a._M_data); \ 561 for (size_t __i = 0; __i < __n; ++__i, ++__p) \ 562 *__p _Op##= __e[__i]; \ 563 } \ 564 \ 565 template<typename _Tp> \ 566 inline void \ 567 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, size_t __s, \ 568 _Array<_Tp> __b) \ 569 { \ 570 _Tp* __q(__b._M_data); \ 571 for (_Tp* __p = __a._M_data; __p < __a._M_data + __s * __n; \ 572 __p += __s, ++__q) \ 573 *__p _Op##= *__q; \ 574 } \ 575 \ 576 template<typename _Tp> \ 577 inline void \ 578 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<_Tp> __b, \ 579 size_t __n, size_t __s) \ 580 { \ 581 _Tp* __q(__b._M_data); \ 582 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \ 583 ++__p, __q += __s) \ 584 *__p _Op##= *__q; \ 585 } \ 586 \ 587 template<typename _Tp, class _Dom> \ 588 void \ 589 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __s, \ 590 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 591 { \ 592 _Tp* __p(__a._M_data); \ 593 for (size_t __i = 0; __i < __n; ++__i, __p += __s) \ 594 *__p _Op##= __e[__i]; \ 595 } \ 596 \ 597 template<typename _Tp> \ 598 inline void \ 599 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \ 600 _Array<_Tp> __b, size_t __n) \ 601 { \ 602 _Tp* __q(__b._M_data); \ 603 for (size_t* __j = __i._M_data; __j < __i._M_data + __n; \ 604 ++__j, ++__q) \ 605 __a._M_data[*__j] _Op##= *__q; \ 606 } \ 607 \ 608 template<typename _Tp> \ 609 inline void \ 610 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \ 611 _Array<_Tp> __b, _Array<size_t> __i) \ 612 { \ 613 _Tp* __p(__a._M_data); \ 614 for (size_t* __j = __i._M_data; __j<__i._M_data + __n; \ 615 ++__j, ++__p) \ 616 *__p _Op##= __b._M_data[*__j]; \ 617 } \ 618 \ 619 template<typename _Tp, class _Dom> \ 620 void \ 621 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \ 622 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 623 { \ 624 size_t* __j(__i._M_data); \ 625 for (size_t __k = 0; __k<__n; ++__k, ++__j) \ 626 __a._M_data[*__j] _Op##= __e[__k]; \ 627 } \ 628 \ 629 template<typename _Tp> \ 630 void \ 631 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \ 632 _Array<_Tp> __b, size_t __n) \ 633 { \ 634 bool* __ok(__m._M_data); \ 635 _Tp* __p(__a._M_data); \ 636 for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; \ 637 ++__q, ++__ok, ++__p) \ 638 { \ 639 while (! *__ok) \ 640 { \ 641 ++__ok; \ 642 ++__p; \ 643 } \ 644 *__p _Op##= *__q; \ 645 } \ 646 } \ 647 \ 648 template<typename _Tp> \ 649 void \ 650 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \ 651 _Array<_Tp> __b, _Array<bool> __m) \ 652 { \ 653 bool* __ok(__m._M_data); \ 654 _Tp* __q(__b._M_data); \ 655 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \ 656 ++__p, ++__ok, ++__q) \ 657 { \ 658 while (! *__ok) \ 659 { \ 660 ++__ok; \ 661 ++__q; \ 662 } \ 663 *__p _Op##= *__q; \ 664 } \ 665 } \ 666 \ 667 template<typename _Tp, class _Dom> \ 668 void \ 669 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \ 670 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 671 { \ 672 bool* __ok(__m._M_data); \ 673 _Tp* __p(__a._M_data); \ 674 for (size_t __i = 0; __i < __n; ++__i, ++__ok, ++__p) \ 675 { \ 676 while (! *__ok) \ 677 { \ 678 ++__ok; \ 679 ++__p; \ 680 } \ 681 *__p _Op##= __e[__i]; \ 682 } \ 683 } 684 685 _DEFINE_ARRAY_FUNCTION(+, __plus) 686 _DEFINE_ARRAY_FUNCTION(-, __minus) 687 _DEFINE_ARRAY_FUNCTION(*, __multiplies) 688 _DEFINE_ARRAY_FUNCTION(/, __divides) 689 _DEFINE_ARRAY_FUNCTION(%, __modulus) 690 _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor) 691 _DEFINE_ARRAY_FUNCTION(|, __bitwise_or) 692 _DEFINE_ARRAY_FUNCTION(&, __bitwise_and) 693 _DEFINE_ARRAY_FUNCTION(<<, __shift_left) 694 _DEFINE_ARRAY_FUNCTION(>>, __shift_right) 695 696 #undef _DEFINE_ARRAY_FUNCTION 697 698 _GLIBCXX_END_NAMESPACE 699 700 #ifndef _GLIBCXX_EXPORT_TEMPLATE 701 # include <bits/valarray_array.tcc> 702 #endif 703 704 #endif /* _ARRAY_H */ 705