1 //! Minimal API of pointer vectors. 2 3 macro_rules! impl_minimal_p { 4 ([$elem_ty:ty; $elem_count:expr]: $id:ident, $mask_ty:ident, 5 $usize_ty:ident, $isize_ty:ident | $ref:ident | $test_tt:tt 6 | $($elem_name:ident),+ | ($true:expr, $false:expr) | 7 $(#[$doc:meta])*) => { 8 9 $(#[$doc])* 10 pub type $id<T> = Simd<[$elem_ty; $elem_count]>; 11 12 impl<T> sealed::Simd for $id<T> { 13 type Element = $elem_ty; 14 const LANES: usize = $elem_count; 15 type LanesType = [u32; $elem_count]; 16 } 17 18 impl<T> $id<T> { 19 /// Creates a new instance with each vector elements initialized 20 /// with the provided values. 21 #[inline] 22 #[allow(clippy::too_many_arguments)] 23 pub const fn new($($elem_name: $elem_ty),*) -> Self { 24 Simd(codegen::$id($($elem_name),*)) 25 } 26 27 /// Returns the number of vector lanes. 28 #[inline] 29 pub const fn lanes() -> usize { 30 $elem_count 31 } 32 33 /// Constructs a new instance with each element initialized to 34 /// `value`. 35 #[inline] 36 pub const fn splat(value: $elem_ty) -> Self { 37 Simd(codegen::$id($({ 38 #[allow(non_camel_case_types, dead_code)] 39 struct $elem_name; 40 value 41 }),*)) 42 } 43 44 /// Constructs a new instance with each element initialized to 45 /// `null`. 46 #[inline] 47 pub const fn null() -> Self { 48 Self::splat(crate::ptr::null_mut() as $elem_ty) 49 } 50 51 /// Returns a mask that selects those lanes that contain `null` 52 /// pointers. 53 #[inline] 54 pub fn is_null(self) -> $mask_ty { 55 self.eq(Self::null()) 56 } 57 58 /// Extracts the value at `index`. 59 /// 60 /// # Panics 61 /// 62 /// If `index >= Self::lanes()`. 63 #[inline] 64 pub fn extract(self, index: usize) -> $elem_ty { 65 assert!(index < $elem_count); 66 unsafe { self.extract_unchecked(index) } 67 } 68 69 /// Extracts the value at `index`. 70 /// 71 /// # Precondition 72 /// 73 /// If `index >= Self::lanes()` the behavior is undefined. 74 #[inline] 75 pub unsafe fn extract_unchecked(self, index: usize) -> $elem_ty { 76 use crate::llvm::simd_extract; 77 simd_extract(self.0, index as u32) 78 } 79 80 /// Returns a new vector where the value at `index` is replaced by 81 /// `new_value`. 82 /// 83 /// # Panics 84 /// 85 /// If `index >= Self::lanes()`. 86 #[inline] 87 #[must_use = "replace does not modify the original value - \ 88 it returns a new vector with the value at `index` \ 89 replaced by `new_value`d" 90 ] 91 #[allow(clippy::not_unsafe_ptr_arg_deref)] 92 pub fn replace(self, index: usize, new_value: $elem_ty) -> Self { 93 assert!(index < $elem_count); 94 unsafe { self.replace_unchecked(index, new_value) } 95 } 96 97 /// Returns a new vector where the value at `index` is replaced by `new_value`. 98 /// 99 /// # Precondition 100 /// 101 /// If `index >= Self::lanes()` the behavior is undefined. 102 #[inline] 103 #[must_use = "replace_unchecked does not modify the original value - \ 104 it returns a new vector with the value at `index` \ 105 replaced by `new_value`d" 106 ] 107 pub unsafe fn replace_unchecked( 108 self, 109 index: usize, 110 new_value: $elem_ty, 111 ) -> Self { 112 use crate::llvm::simd_insert; 113 Simd(simd_insert(self.0, index as u32, new_value)) 114 } 115 } 116 117 118 test_if!{ 119 $test_tt: 120 paste::item! { 121 pub mod [<$id _minimal>] { 122 use super::*; 123 #[cfg_attr(not(target_arch = "wasm32"), test)] 124 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 125 fn minimal() { 126 // lanes: 127 assert_eq!($elem_count, $id::<i32>::lanes()); 128 129 // splat and extract / extract_unchecked: 130 let VAL7: <$id<i32> as sealed::Simd>::Element 131 = $ref!(7); 132 let VAL42: <$id<i32> as sealed::Simd>::Element 133 = $ref!(42); 134 let VEC: $id<i32> = $id::splat(VAL7); 135 for i in 0..$id::<i32>::lanes() { 136 assert_eq!(VAL7, VEC.extract(i)); 137 assert_eq!( 138 VAL7, unsafe { VEC.extract_unchecked(i) } 139 ); 140 } 141 142 // replace / replace_unchecked 143 let new_vec = VEC.replace(0, VAL42); 144 for i in 0..$id::<i32>::lanes() { 145 if i == 0 { 146 assert_eq!(VAL42, new_vec.extract(i)); 147 } else { 148 assert_eq!(VAL7, new_vec.extract(i)); 149 } 150 } 151 let new_vec = unsafe { 152 VEC.replace_unchecked(0, VAL42) 153 }; 154 for i in 0..$id::<i32>::lanes() { 155 if i == 0 { 156 assert_eq!(VAL42, new_vec.extract(i)); 157 } else { 158 assert_eq!(VAL7, new_vec.extract(i)); 159 } 160 } 161 162 let mut n = $id::<i32>::null(); 163 assert_eq!( 164 n, 165 $id::<i32>::splat(unsafe { crate::mem::zeroed() }) 166 ); 167 assert!(n.is_null().all()); 168 n = n.replace( 169 0, unsafe { crate::mem::transmute(1_isize) } 170 ); 171 assert!(!n.is_null().all()); 172 if $id::<i32>::lanes() > 1 { 173 assert!(n.is_null().any()); 174 } else { 175 assert!(!n.is_null().any()); 176 } 177 } 178 179 // FIXME: wasm-bindgen-test does not support #[should_panic] 180 // #[cfg_attr(not(target_arch = "wasm32"), test)] 181 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 182 #[cfg(not(target_arch = "wasm32"))] 183 #[test] 184 #[should_panic] 185 fn extract_panic_oob() { 186 let VAL: <$id<i32> as sealed::Simd>::Element 187 = $ref!(7); 188 let VEC: $id<i32> = $id::splat(VAL); 189 let _ = VEC.extract($id::<i32>::lanes()); 190 } 191 192 // FIXME: wasm-bindgen-test does not support #[should_panic] 193 // #[cfg_attr(not(target_arch = "wasm32"), test)] 194 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 195 #[cfg(not(target_arch = "wasm32"))] 196 #[test] 197 #[should_panic] 198 fn replace_panic_oob() { 199 let VAL: <$id<i32> as sealed::Simd>::Element 200 = $ref!(7); 201 let VAL42: <$id<i32> as sealed::Simd>::Element 202 = $ref!(42); 203 let VEC: $id<i32> = $id::splat(VAL); 204 let _ = VEC.replace($id::<i32>::lanes(), VAL42); 205 } 206 } 207 } 208 } 209 210 impl<T> crate::fmt::Debug for $id<T> { 211 #[allow(clippy::missing_inline_in_public_items)] 212 fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) 213 -> crate::fmt::Result { 214 write!( 215 f, 216 "{}<{}>(", 217 stringify!($id), 218 unsafe { crate::intrinsics::type_name::<T>() } 219 )?; 220 for i in 0..$elem_count { 221 if i > 0 { 222 write!(f, ", ")?; 223 } 224 self.extract(i).fmt(f)?; 225 } 226 write!(f, ")") 227 } 228 } 229 230 test_if!{ 231 $test_tt: 232 paste::item! { 233 pub mod [<$id _fmt_debug>] { 234 use super::*; 235 #[cfg_attr(not(target_arch = "wasm32"), test)] 236 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 237 fn debug() { 238 use arrayvec::{ArrayString,ArrayVec}; 239 type TinyString = ArrayString<[u8; 512]>; 240 241 use crate::fmt::Write; 242 let v = $id::<i32>::default(); 243 let mut s = TinyString::new(); 244 write!(&mut s, "{:?}", v).unwrap(); 245 246 let mut beg = TinyString::new(); 247 write!(&mut beg, "{}<i32>(", stringify!($id)).unwrap(); 248 assert!( 249 s.starts_with(beg.as_str()), 250 "s = {} (should start with = {})", s, beg 251 ); 252 assert!(s.ends_with(")")); 253 let s: ArrayVec<[TinyString; 64]> 254 = s.replace(beg.as_str(), "") 255 .replace(")", "").split(",") 256 .map(|v| TinyString::from(v.trim()).unwrap()) 257 .collect(); 258 assert_eq!(s.len(), $id::<i32>::lanes()); 259 for (index, ss) in s.into_iter().enumerate() { 260 let mut e = TinyString::new(); 261 write!(&mut e, "{:?}", v.extract(index)).unwrap(); 262 assert_eq!(ss, e); 263 } 264 } 265 } 266 } 267 } 268 269 impl<T> Default for $id<T> { 270 #[inline] 271 fn default() -> Self { 272 // FIXME: ptrs do not implement default 273 Self::null() 274 } 275 } 276 277 test_if!{ 278 $test_tt: 279 paste::item! { 280 pub mod [<$id _default>] { 281 use super::*; 282 #[cfg_attr(not(target_arch = "wasm32"), test)] 283 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 284 fn default() { 285 let a = $id::<i32>::default(); 286 for i in 0..$id::<i32>::lanes() { 287 assert_eq!( 288 a.extract(i), unsafe { crate::mem::zeroed() } 289 ); 290 } 291 } 292 } 293 } 294 } 295 296 impl<T> $id<T> { 297 /// Lane-wise equality comparison. 298 #[inline] 299 pub fn eq(self, other: Self) -> $mask_ty { 300 unsafe { 301 use crate::llvm::simd_eq; 302 let a: $usize_ty = crate::mem::transmute(self); 303 let b: $usize_ty = crate::mem::transmute(other); 304 Simd(simd_eq(a.0, b.0)) 305 } 306 } 307 308 /// Lane-wise inequality comparison. 309 #[inline] 310 pub fn ne(self, other: Self) -> $mask_ty { 311 unsafe { 312 use crate::llvm::simd_ne; 313 let a: $usize_ty = crate::mem::transmute(self); 314 let b: $usize_ty = crate::mem::transmute(other); 315 Simd(simd_ne(a.0, b.0)) 316 } 317 } 318 319 /// Lane-wise less-than comparison. 320 #[inline] 321 pub fn lt(self, other: Self) -> $mask_ty { 322 unsafe { 323 use crate::llvm::simd_lt; 324 let a: $usize_ty = crate::mem::transmute(self); 325 let b: $usize_ty = crate::mem::transmute(other); 326 Simd(simd_lt(a.0, b.0)) 327 } 328 } 329 330 /// Lane-wise less-than-or-equals comparison. 331 #[inline] 332 pub fn le(self, other: Self) -> $mask_ty { 333 unsafe { 334 use crate::llvm::simd_le; 335 let a: $usize_ty = crate::mem::transmute(self); 336 let b: $usize_ty = crate::mem::transmute(other); 337 Simd(simd_le(a.0, b.0)) 338 } 339 } 340 341 /// Lane-wise greater-than comparison. 342 #[inline] 343 pub fn gt(self, other: Self) -> $mask_ty { 344 unsafe { 345 use crate::llvm::simd_gt; 346 let a: $usize_ty = crate::mem::transmute(self); 347 let b: $usize_ty = crate::mem::transmute(other); 348 Simd(simd_gt(a.0, b.0)) 349 } 350 } 351 352 /// Lane-wise greater-than-or-equals comparison. 353 #[inline] 354 pub fn ge(self, other: Self) -> $mask_ty { 355 unsafe { 356 use crate::llvm::simd_ge; 357 let a: $usize_ty = crate::mem::transmute(self); 358 let b: $usize_ty = crate::mem::transmute(other); 359 Simd(simd_ge(a.0, b.0)) 360 } 361 } 362 } 363 364 test_if!{ 365 $test_tt: 366 paste::item! { 367 pub mod [<$id _cmp_vertical>] { 368 use super::*; 369 #[cfg_attr(not(target_arch = "wasm32"), test)] 370 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 371 fn cmp() { 372 let a = $id::<i32>::null(); 373 let b = $id::<i32>::splat(unsafe { 374 crate::mem::transmute(1_isize) 375 }); 376 377 let r = a.lt(b); 378 let e = $mask_ty::splat(true); 379 assert!(r == e); 380 let r = a.le(b); 381 assert!(r == e); 382 383 let e = $mask_ty::splat(false); 384 let r = a.gt(b); 385 assert!(r == e); 386 let r = a.ge(b); 387 assert!(r == e); 388 let r = a.eq(b); 389 assert!(r == e); 390 391 let mut a = a; 392 let mut b = b; 393 let mut e = e; 394 for i in 0..$id::<i32>::lanes() { 395 if i % 2 == 0 { 396 a = a.replace( 397 i, 398 unsafe { crate::mem::transmute(0_isize) } 399 ); 400 b = b.replace( 401 i, 402 unsafe { crate::mem::transmute(1_isize) } 403 ); 404 e = e.replace(i, true); 405 } else { 406 a = a.replace( 407 i, 408 unsafe { crate::mem::transmute(1_isize) } 409 ); 410 b = b.replace( 411 i, 412 unsafe { crate::mem::transmute(0_isize) } 413 ); 414 e = e.replace(i, false); 415 } 416 } 417 let r = a.lt(b); 418 assert!(r == e); 419 } 420 } 421 } 422 } 423 424 #[allow(clippy::partialeq_ne_impl)] 425 impl<T> crate::cmp::PartialEq<$id<T>> for $id<T> { 426 #[inline] 427 fn eq(&self, other: &Self) -> bool { 428 $id::<T>::eq(*self, *other).all() 429 } 430 #[inline] 431 fn ne(&self, other: &Self) -> bool { 432 $id::<T>::ne(*self, *other).any() 433 } 434 } 435 436 // FIXME: https://github.com/rust-lang-nursery/rust-clippy/issues/2892 437 #[allow(clippy::partialeq_ne_impl)] 438 impl<T> crate::cmp::PartialEq<LexicographicallyOrdered<$id<T>>> 439 for LexicographicallyOrdered<$id<T>> 440 { 441 #[inline] 442 fn eq(&self, other: &Self) -> bool { 443 self.0 == other.0 444 } 445 #[inline] 446 fn ne(&self, other: &Self) -> bool { 447 self.0 != other.0 448 } 449 } 450 451 test_if!{ 452 $test_tt: 453 paste::item! { 454 pub mod [<$id _cmp_PartialEq>] { 455 use super::*; 456 #[cfg_attr(not(target_arch = "wasm32"), test)] 457 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 458 fn partial_eq() { 459 let a = $id::<i32>::null(); 460 let b = $id::<i32>::splat(unsafe { 461 crate::mem::transmute(1_isize) 462 }); 463 464 assert!(a != b); 465 assert!(!(a == b)); 466 assert!(a == a); 467 assert!(!(a != a)); 468 469 if $id::<i32>::lanes() > 1 { 470 let a = $id::<i32>::null().replace(0, unsafe { 471 crate::mem::transmute(1_isize) 472 }); 473 let b = $id::<i32>::splat(unsafe { 474 crate::mem::transmute(1_isize) 475 }); 476 477 assert!(a != b); 478 assert!(!(a == b)); 479 assert!(a == a); 480 assert!(!(a != a)); 481 } 482 } 483 } 484 } 485 } 486 487 impl<T> crate::cmp::Eq for $id<T> {} 488 impl<T> crate::cmp::Eq for LexicographicallyOrdered<$id<T>> {} 489 490 test_if!{ 491 $test_tt: 492 paste::item! { 493 pub mod [<$id _cmp_eq>] { 494 use super::*; 495 #[cfg_attr(not(target_arch = "wasm32"), test)] 496 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 497 fn eq() { 498 fn foo<E: crate::cmp::Eq>(_: E) {} 499 let a = $id::<i32>::null(); 500 foo(a); 501 } 502 } 503 } 504 } 505 506 impl<T> From<[$elem_ty; $elem_count]> for $id<T> { 507 #[inline] 508 fn from(array: [$elem_ty; $elem_count]) -> Self { 509 unsafe { 510 // FIXME: unnecessary zeroing; better than UB. 511 let mut u: Self = crate::mem::zeroed(); 512 crate::ptr::copy_nonoverlapping( 513 &array as *const [$elem_ty; $elem_count] as *const u8, 514 &mut u as *mut Self as *mut u8, 515 crate::mem::size_of::<Self>() 516 ); 517 u 518 } 519 } 520 } 521 impl<T> Into<[$elem_ty; $elem_count]> for $id<T> { 522 #[inline] 523 fn into(self) -> [$elem_ty; $elem_count] { 524 unsafe { 525 // FIXME: unnecessary zeroing; better than UB. 526 let mut u: [$elem_ty; $elem_count] = crate::mem::zeroed(); 527 crate::ptr::copy_nonoverlapping( 528 &self as *const $id<T> as *const u8, 529 &mut u as *mut [$elem_ty; $elem_count] as *mut u8, 530 crate::mem::size_of::<Self>() 531 ); 532 u 533 } 534 } 535 } 536 537 test_if!{ 538 $test_tt: 539 paste::item! { 540 pub mod [<$id _from>] { 541 use super::*; 542 #[cfg_attr(not(target_arch = "wasm32"), test)] 543 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 544 fn array() { 545 let values = [1_i32; $elem_count]; 546 547 let mut vec: $id<i32> = Default::default(); 548 let mut array = [ 549 $id::<i32>::null().extract(0); $elem_count 550 ]; 551 552 for i in 0..$elem_count { 553 let ptr = unsafe { 554 crate::mem::transmute( 555 &values[i] as *const i32 556 ) 557 }; 558 vec = vec.replace(i, ptr); 559 array[i] = ptr; 560 } 561 562 // FIXME: there is no impl of From<$id<T>> for [$elem_ty; N] 563 // let a0 = From::from(vec); 564 // assert_eq!(a0, array); 565 #[allow(unused_assignments)] 566 let mut a1 = array; 567 a1 = vec.into(); 568 assert_eq!(a1, array); 569 570 let v0: $id<i32> = From::from(array); 571 assert_eq!(v0, vec); 572 let v1: $id<i32> = array.into(); 573 assert_eq!(v1, vec); 574 } 575 } 576 } 577 } 578 579 impl<T> $id<T> { 580 /// Instantiates a new vector with the values of the `slice`. 581 /// 582 /// # Panics 583 /// 584 /// If `slice.len() < Self::lanes()` or `&slice[0]` is not aligned 585 /// to an `align_of::<Self>()` boundary. 586 #[inline] 587 pub fn from_slice_aligned(slice: &[$elem_ty]) -> Self { 588 unsafe { 589 assert!(slice.len() >= $elem_count); 590 let target_ptr = slice.get_unchecked(0) as *const $elem_ty; 591 assert!( 592 target_ptr.align_offset(crate::mem::align_of::<Self>()) 593 == 0 594 ); 595 Self::from_slice_aligned_unchecked(slice) 596 } 597 } 598 599 /// Instantiates a new vector with the values of the `slice`. 600 /// 601 /// # Panics 602 /// 603 /// If `slice.len() < Self::lanes()`. 604 #[inline] 605 pub fn from_slice_unaligned(slice: &[$elem_ty]) -> Self { 606 unsafe { 607 assert!(slice.len() >= $elem_count); 608 Self::from_slice_unaligned_unchecked(slice) 609 } 610 } 611 612 /// Instantiates a new vector with the values of the `slice`. 613 /// 614 /// # Precondition 615 /// 616 /// If `slice.len() < Self::lanes()` or `&slice[0]` is not aligned 617 /// to an `align_of::<Self>()` boundary, the behavior is undefined. 618 #[inline] 619 pub unsafe fn from_slice_aligned_unchecked(slice: &[$elem_ty]) 620 -> Self { 621 #[allow(clippy::cast_ptr_alignment)] 622 *(slice.get_unchecked(0) as *const $elem_ty as *const Self) 623 } 624 625 /// Instantiates a new vector with the values of the `slice`. 626 /// 627 /// # Precondition 628 /// 629 /// If `slice.len() < Self::lanes()` the behavior is undefined. 630 #[inline] 631 pub unsafe fn from_slice_unaligned_unchecked( 632 slice: &[$elem_ty], 633 ) -> Self { 634 use crate::mem::size_of; 635 let target_ptr = 636 slice.get_unchecked(0) as *const $elem_ty as *const u8; 637 let mut x = Self::splat(crate::ptr::null_mut() as $elem_ty); 638 let self_ptr = &mut x as *mut Self as *mut u8; 639 crate::ptr::copy_nonoverlapping( 640 target_ptr, 641 self_ptr, 642 size_of::<Self>(), 643 ); 644 x 645 } 646 } 647 648 test_if!{ 649 $test_tt: 650 paste::item! { 651 pub mod [<$id _slice_from_slice>] { 652 use super::*; 653 use crate::iter::Iterator; 654 655 #[cfg_attr(not(target_arch = "wasm32"), test)] 656 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 657 fn from_slice_unaligned() { 658 let (null, non_null) = ptr_vals!($id<i32>); 659 660 let mut unaligned = [ 661 non_null; $id::<i32>::lanes() + 1 662 ]; 663 unaligned[0] = null; 664 let vec = $id::<i32>::from_slice_unaligned( 665 &unaligned[1..] 666 ); 667 for (index, &b) in unaligned.iter().enumerate() { 668 if index == 0 { 669 assert_eq!(b, null); 670 } else { 671 assert_eq!(b, non_null); 672 assert_eq!(b, vec.extract(index - 1)); 673 } 674 } 675 } 676 677 // FIXME: wasm-bindgen-test does not support #[should_panic] 678 // #[cfg_attr(not(target_arch = "wasm32"), test)] 679 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 680 #[cfg(not(target_arch = "wasm32"))] 681 #[test] 682 #[should_panic] 683 fn from_slice_unaligned_fail() { 684 let (_null, non_null) = ptr_vals!($id<i32>); 685 let unaligned = [non_null; $id::<i32>::lanes() + 1]; 686 // the slice is not large enough => panic 687 let _vec = $id::<i32>::from_slice_unaligned( 688 &unaligned[2..] 689 ); 690 } 691 692 union A { 693 data: [<$id<i32> as sealed::Simd>::Element; 694 2 * $id::<i32>::lanes()], 695 _vec: $id<i32>, 696 } 697 698 #[cfg_attr(not(target_arch = "wasm32"), test)] 699 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 700 fn from_slice_aligned() { 701 let (null, non_null) = ptr_vals!($id<i32>); 702 let mut aligned = A { 703 data: [null; 2 * $id::<i32>::lanes()], 704 }; 705 for i in 706 $id::<i32>::lanes()..(2 * $id::<i32>::lanes()) { 707 unsafe { 708 aligned.data[i] = non_null; 709 } 710 } 711 712 let vec = unsafe { 713 $id::<i32>::from_slice_aligned( 714 &aligned.data[$id::<i32>::lanes()..] 715 ) 716 }; 717 for (index, &b) in unsafe { 718 aligned.data.iter().enumerate() 719 } { 720 if index < $id::<i32>::lanes() { 721 assert_eq!(b, null); 722 } else { 723 assert_eq!(b, non_null); 724 assert_eq!( 725 b, vec.extract(index - $id::<i32>::lanes()) 726 ); 727 } 728 } 729 } 730 731 // FIXME: wasm-bindgen-test does not support #[should_panic] 732 // #[cfg_attr(not(target_arch = "wasm32"), test)] 733 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 734 #[cfg(not(target_arch = "wasm32"))] 735 #[test] 736 #[should_panic] 737 fn from_slice_aligned_fail_lanes() { 738 let (_null, non_null) = ptr_vals!($id<i32>); 739 let aligned = A { 740 data: [non_null; 2 * $id::<i32>::lanes()], 741 }; 742 // the slice is not large enough => panic 743 let _vec = unsafe { 744 $id::<i32>::from_slice_aligned( 745 &aligned.data[2 * $id::<i32>::lanes()..] 746 ) 747 }; 748 } 749 750 // FIXME: wasm-bindgen-test does not support #[should_panic] 751 // #[cfg_attr(not(target_arch = "wasm32"), test)] 752 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 753 #[cfg(not(target_arch = "wasm32"))] 754 #[test] 755 #[should_panic] 756 fn from_slice_aligned_fail_align() { 757 unsafe { 758 let (null, _non_null) = ptr_vals!($id<i32>); 759 let aligned = A { 760 data: [null; 2 * $id::<i32>::lanes()], 761 }; 762 763 // get a pointer to the front of data 764 let ptr = aligned.data.as_ptr(); 765 // offset pointer by one element 766 let ptr = ptr.wrapping_add(1); 767 768 if ptr.align_offset( 769 crate::mem::align_of::<$id<i32>>() 770 ) == 0 { 771 // the pointer is properly aligned, so 772 // from_slice_aligned won't fail here (e.g. this 773 // can happen for i128x1). So we panic to make 774 // the "should_fail" test pass: 775 panic!("ok"); 776 } 777 778 // create a slice - this is safe, because the 779 // elements of the slice exist, are properly 780 // initialized, and properly aligned: 781 let s = slice::from_raw_parts( 782 ptr, $id::<i32>::lanes() 783 ); 784 // this should always panic because the slice 785 // alignment does not match the alignment 786 // requirements for the vector type: 787 let _vec = $id::<i32>::from_slice_aligned(s); 788 } 789 } 790 } 791 } 792 } 793 794 impl<T> $id<T> { 795 /// Writes the values of the vector to the `slice`. 796 /// 797 /// # Panics 798 /// 799 /// If `slice.len() < Self::lanes()` or `&slice[0]` is not 800 /// aligned to an `align_of::<Self>()` boundary. 801 #[inline] 802 pub fn write_to_slice_aligned(self, slice: &mut [$elem_ty]) { 803 unsafe { 804 assert!(slice.len() >= $elem_count); 805 let target_ptr = 806 slice.get_unchecked_mut(0) as *mut $elem_ty; 807 assert!( 808 target_ptr.align_offset(crate::mem::align_of::<Self>()) 809 == 0 810 ); 811 self.write_to_slice_aligned_unchecked(slice); 812 } 813 } 814 815 /// Writes the values of the vector to the `slice`. 816 /// 817 /// # Panics 818 /// 819 /// If `slice.len() < Self::lanes()`. 820 #[inline] 821 pub fn write_to_slice_unaligned(self, slice: &mut [$elem_ty]) { 822 unsafe { 823 assert!(slice.len() >= $elem_count); 824 self.write_to_slice_unaligned_unchecked(slice); 825 } 826 } 827 828 /// Writes the values of the vector to the `slice`. 829 /// 830 /// # Precondition 831 /// 832 /// If `slice.len() < Self::lanes()` or `&slice[0]` is not 833 /// aligned to an `align_of::<Self>()` boundary, the behavior is 834 /// undefined. 835 #[inline] 836 pub unsafe fn write_to_slice_aligned_unchecked( 837 self, slice: &mut [$elem_ty], 838 ) { 839 #[allow(clippy::cast_ptr_alignment)] 840 *(slice.get_unchecked_mut(0) as *mut $elem_ty as *mut Self) = 841 self; 842 } 843 844 /// Writes the values of the vector to the `slice`. 845 /// 846 /// # Precondition 847 /// 848 /// If `slice.len() < Self::lanes()` the behavior is undefined. 849 #[inline] 850 pub unsafe fn write_to_slice_unaligned_unchecked( 851 self, slice: &mut [$elem_ty], 852 ) { 853 let target_ptr = 854 slice.get_unchecked_mut(0) as *mut $elem_ty as *mut u8; 855 let self_ptr = &self as *const Self as *const u8; 856 crate::ptr::copy_nonoverlapping( 857 self_ptr, 858 target_ptr, 859 crate::mem::size_of::<Self>(), 860 ); 861 } 862 } 863 864 test_if!{ 865 $test_tt: 866 paste::item! { 867 pub mod [<$id _slice_write_to_slice>] { 868 use super::*; 869 use crate::iter::Iterator; 870 871 #[cfg_attr(not(target_arch = "wasm32"), test)] 872 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 873 fn write_to_slice_unaligned() { 874 let (null, non_null) = ptr_vals!($id<i32>); 875 let mut unaligned = [null; $id::<i32>::lanes() + 1]; 876 let vec = $id::<i32>::splat(non_null); 877 vec.write_to_slice_unaligned(&mut unaligned[1..]); 878 for (index, &b) in unaligned.iter().enumerate() { 879 if index == 0 { 880 assert_eq!(b, null); 881 } else { 882 assert_eq!(b, non_null); 883 assert_eq!(b, vec.extract(index - 1)); 884 } 885 } 886 } 887 888 // FIXME: wasm-bindgen-test does not support #[should_panic] 889 // #[cfg_attr(not(target_arch = "wasm32"), test)] 890 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 891 #[cfg(not(target_arch = "wasm32"))] 892 #[test] 893 #[should_panic] 894 fn write_to_slice_unaligned_fail() { 895 let (null, non_null) = ptr_vals!($id<i32>); 896 let mut unaligned = [null; $id::<i32>::lanes() + 1]; 897 let vec = $id::<i32>::splat(non_null); 898 // the slice is not large enough => panic 899 vec.write_to_slice_unaligned(&mut unaligned[2..]); 900 } 901 902 union A { 903 data: [<$id<i32> as sealed::Simd>::Element; 904 2 * $id::<i32>::lanes()], 905 _vec: $id<i32>, 906 } 907 908 #[cfg_attr(not(target_arch = "wasm32"), test)] 909 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 910 fn write_to_slice_aligned() { 911 let (null, non_null) = ptr_vals!($id<i32>); 912 let mut aligned = A { 913 data: [null; 2 * $id::<i32>::lanes()], 914 }; 915 let vec = $id::<i32>::splat(non_null); 916 unsafe { 917 vec.write_to_slice_aligned( 918 &mut aligned.data[$id::<i32>::lanes()..] 919 ) 920 }; 921 for (index, &b) in 922 unsafe { aligned.data.iter().enumerate() } { 923 if index < $id::<i32>::lanes() { 924 assert_eq!(b, null); 925 } else { 926 assert_eq!(b, non_null); 927 assert_eq!( 928 b, vec.extract(index - $id::<i32>::lanes()) 929 ); 930 } 931 } 932 } 933 934 // FIXME: wasm-bindgen-test does not support #[should_panic] 935 // #[cfg_attr(not(target_arch = "wasm32"), test)] 936 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 937 #[cfg(not(target_arch = "wasm32"))] 938 #[test] 939 #[should_panic] 940 fn write_to_slice_aligned_fail_lanes() { 941 let (null, non_null) = ptr_vals!($id<i32>); 942 let mut aligned = A { 943 data: [null; 2 * $id::<i32>::lanes()], 944 }; 945 let vec = $id::<i32>::splat(non_null); 946 // the slice is not large enough => panic 947 unsafe { 948 vec.write_to_slice_aligned( 949 &mut aligned.data[2 * $id::<i32>::lanes()..] 950 ) 951 }; 952 } 953 954 // FIXME: wasm-bindgen-test does not support #[should_panic] 955 // #[cfg_attr(not(target_arch = "wasm32"), test)] 956 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 957 #[cfg(not(target_arch = "wasm32"))] 958 #[test] 959 #[should_panic] 960 fn write_to_slice_aligned_fail_align() { 961 let (null, non_null) = ptr_vals!($id<i32>); 962 unsafe { 963 let mut aligned = A { 964 data: [null; 2 * $id::<i32>::lanes()], 965 }; 966 967 // get a pointer to the front of data 968 let ptr = aligned.data.as_mut_ptr(); 969 // offset pointer by one element 970 let ptr = ptr.wrapping_add(1); 971 972 if ptr.align_offset( 973 crate::mem::align_of::<$id<i32>>() 974 ) == 0 { 975 // the pointer is properly aligned, so 976 // write_to_slice_aligned won't fail here (e.g. 977 // this can happen for i128x1). So we panic to 978 // make the "should_fail" test pass: 979 panic!("ok"); 980 } 981 982 // create a slice - this is safe, because the 983 // elements of the slice exist, are properly 984 // initialized, and properly aligned: 985 let s = slice::from_raw_parts_mut( 986 ptr, $id::<i32>::lanes() 987 ); 988 // this should always panic because the slice 989 // alignment does not match the alignment 990 // requirements for the vector type: 991 let vec = $id::<i32>::splat(non_null); 992 vec.write_to_slice_aligned(s); 993 } 994 } 995 } 996 } 997 } 998 999 impl<T> crate::hash::Hash for $id<T> { 1000 #[inline] 1001 fn hash<H: crate::hash::Hasher>(&self, state: &mut H) { 1002 let s: $usize_ty = unsafe { crate::mem::transmute(*self) }; 1003 s.hash(state) 1004 } 1005 } 1006 1007 test_if! { 1008 $test_tt: 1009 paste::item! { 1010 pub mod [<$id _hash>] { 1011 use super::*; 1012 #[cfg_attr(not(target_arch = "wasm32"), test)] 1013 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 1014 fn hash() { 1015 use crate::hash::{Hash, Hasher}; 1016 #[allow(deprecated)] 1017 use crate::hash::{SipHasher13}; 1018 1019 let values = [1_i32; $elem_count]; 1020 1021 let mut vec: $id<i32> = Default::default(); 1022 let mut array = [ 1023 $id::<i32>::null().extract(0); 1024 $elem_count 1025 ]; 1026 1027 for i in 0..$elem_count { 1028 let ptr = unsafe { 1029 crate::mem::transmute( 1030 &values[i] as *const i32 1031 ) 1032 }; 1033 vec = vec.replace(i, ptr); 1034 array[i] = ptr; 1035 } 1036 1037 #[allow(deprecated)] 1038 let mut a_hash = SipHasher13::new(); 1039 let mut v_hash = a_hash.clone(); 1040 array.hash(&mut a_hash); 1041 vec.hash(&mut v_hash); 1042 assert_eq!(a_hash.finish(), v_hash.finish()); 1043 } 1044 } 1045 } 1046 } 1047 1048 impl<T> $id<T> { 1049 /// Calculates the offset from a pointer. 1050 /// 1051 /// `count` is in units of `T`; e.g. a count of `3` represents a 1052 /// pointer offset of `3 * size_of::<T>()` bytes. 1053 /// 1054 /// # Safety 1055 /// 1056 /// If any of the following conditions are violated, the result is 1057 /// Undefined Behavior: 1058 /// 1059 /// * Both the starting and resulting pointer must be either in 1060 /// bounds or one byte past the end of an allocated object. 1061 /// 1062 /// * The computed offset, in bytes, cannot overflow an `isize`. 1063 /// 1064 /// * The offset being in bounds cannot rely on "wrapping around" 1065 /// the address space. That is, the infinite-precision sum, in bytes 1066 /// must fit in a `usize`. 1067 /// 1068 /// The compiler and standard library generally tries to ensure 1069 /// allocations never reach a size where an offset is a concern. For 1070 /// instance, `Vec` and `Box` ensure they never allocate more than 1071 /// `isize::MAX` bytes, so `vec.as_ptr().offset(vec.len() as isize)` 1072 /// is always safe. 1073 /// 1074 /// Most platforms fundamentally can't even construct such an 1075 /// allocation. For instance, no known 64-bit platform can ever 1076 /// serve a request for 263 bytes due to page-table limitations or 1077 /// splitting the address space. However, some 32-bit and 16-bit 1078 /// platforms may successfully serve a request for more than 1079 /// `isize::MAX` bytes with things like Physical Address Extension. 1080 /// As such, memory acquired directly from allocators or memory 1081 /// mapped files may be too large to handle with this function. 1082 /// 1083 /// Consider using `wrapping_offset` instead if these constraints 1084 /// are difficult to satisfy. The only advantage of this method is 1085 /// that it enables more aggressive compiler optimizations. 1086 #[inline] 1087 pub unsafe fn offset(self, count: $isize_ty) -> Self { 1088 // FIXME: should use LLVM's `add nsw nuw` 1089 self.wrapping_offset(count) 1090 } 1091 1092 /// Calculates the offset from a pointer using wrapping arithmetic. 1093 /// 1094 /// `count` is in units of `T`; e.g. a count of `3` represents a 1095 /// pointer offset of `3 * size_of::<T>()` bytes. 1096 /// 1097 /// # Safety 1098 /// 1099 /// The resulting pointer does not need to be in bounds, but it is 1100 /// potentially hazardous to dereference (which requires unsafe). 1101 /// 1102 /// Always use `.offset(count)` instead when possible, because 1103 /// offset allows the compiler to optimize better. 1104 #[inline] 1105 pub fn wrapping_offset(self, count: $isize_ty) -> Self { 1106 unsafe { 1107 let x: $isize_ty = crate::mem::transmute(self); 1108 // note: {+,*} currently performs a `wrapping_{add, mul}` 1109 crate::mem::transmute( 1110 x + (count * crate::mem::size_of::<T>() as isize) 1111 ) 1112 } 1113 } 1114 1115 /// Calculates the distance between two pointers. 1116 /// 1117 /// The returned value is in units of `T`: the distance in bytes is 1118 /// divided by `mem::size_of::<T>()`. 1119 /// 1120 /// This function is the inverse of offset. 1121 /// 1122 /// # Safety 1123 /// 1124 /// If any of the following conditions are violated, the result is 1125 /// Undefined Behavior: 1126 /// 1127 /// * Both the starting and other pointer must be either in bounds 1128 /// or one byte past the end of the same allocated object. 1129 /// 1130 /// * The distance between the pointers, in bytes, cannot overflow 1131 /// an `isize`. 1132 /// 1133 /// * The distance between the pointers, in bytes, must be an exact 1134 /// multiple of the size of `T`. 1135 /// 1136 /// * The distance being in bounds cannot rely on "wrapping around" 1137 /// the address space. 1138 /// 1139 /// The compiler and standard library generally try to ensure 1140 /// allocations never reach a size where an offset is a concern. For 1141 /// instance, `Vec` and `Box` ensure they never allocate more than 1142 /// `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())` 1143 /// is always safe. 1144 /// 1145 /// Most platforms fundamentally can't even construct such an 1146 /// allocation. For instance, no known 64-bit platform can ever 1147 /// serve a request for 263 bytes due to page-table limitations or 1148 /// splitting the address space. However, some 32-bit and 16-bit 1149 /// platforms may successfully serve a request for more than 1150 /// `isize::MAX` bytes with things like Physical Address Extension. 1151 /// As such, memory acquired directly from allocators or memory 1152 /// mapped files may be too large to handle with this function. 1153 /// 1154 /// Consider using wrapping_offset_from instead if these constraints 1155 /// are difficult to satisfy. The only advantage of this method is 1156 /// that it enables more aggressive compiler optimizations. 1157 #[inline] 1158 pub unsafe fn offset_from(self, origin: Self) -> $isize_ty { 1159 // FIXME: should use LLVM's `sub nsw nuw`. 1160 self.wrapping_offset_from(origin) 1161 } 1162 1163 /// Calculates the distance between two pointers. 1164 /// 1165 /// The returned value is in units of `T`: the distance in bytes is 1166 /// divided by `mem::size_of::<T>()`. 1167 /// 1168 /// If the address different between the two pointers is not a 1169 /// multiple of `mem::size_of::<T>()` then the result of the 1170 /// division is rounded towards zero. 1171 /// 1172 /// Though this method is safe for any two pointers, note that its 1173 /// result will be mostly useless if the two pointers aren't into 1174 /// the same allocated object, for example if they point to two 1175 /// different local variables. 1176 #[inline] 1177 pub fn wrapping_offset_from(self, origin: Self) -> $isize_ty { 1178 let x: $isize_ty = unsafe { crate::mem::transmute(self) }; 1179 let y: $isize_ty = unsafe { crate::mem::transmute(origin) }; 1180 // note: {-,/} currently perform wrapping_{sub, div} 1181 (y - x) / (crate::mem::size_of::<T>() as isize) 1182 } 1183 1184 /// Calculates the offset from a pointer (convenience for 1185 /// `.offset(count as isize)`). 1186 /// 1187 /// `count` is in units of `T`; e.g. a count of 3 represents a 1188 /// pointer offset of `3 * size_of::<T>()` bytes. 1189 /// 1190 /// # Safety 1191 /// 1192 /// If any of the following conditions are violated, the result is 1193 /// Undefined Behavior: 1194 /// 1195 /// * Both the starting and resulting pointer must be either in 1196 /// bounds or one byte past the end of an allocated object. 1197 /// 1198 /// * The computed offset, in bytes, cannot overflow an `isize`. 1199 /// 1200 /// * The offset being in bounds cannot rely on "wrapping around" 1201 /// the address space. That is, the infinite-precision sum must fit 1202 /// in a `usize`. 1203 /// 1204 /// The compiler and standard library generally tries to ensure 1205 /// allocations never reach a size where an offset is a concern. For 1206 /// instance, `Vec` and `Box` ensure they never allocate more than 1207 /// `isize::MAX` bytes, so `vec.as_ptr().add(vec.len())` is always 1208 /// safe. 1209 /// 1210 /// Most platforms fundamentally can't even construct such an 1211 /// allocation. For instance, no known 64-bit platform can ever 1212 /// serve a request for 263 bytes due to page-table limitations or 1213 /// splitting the address space. However, some 32-bit and 16-bit 1214 /// platforms may successfully serve a request for more than 1215 /// `isize::MAX` bytes with things like Physical Address Extension. 1216 /// As such, memory acquired directly from allocators or memory 1217 /// mapped files may be too large to handle with this function. 1218 /// 1219 /// Consider using `wrapping_offset` instead if these constraints 1220 /// are difficult to satisfy. The only advantage of this method is 1221 /// that it enables more aggressive compiler optimizations. 1222 #[inline] 1223 #[allow(clippy::should_implement_trait)] 1224 pub unsafe fn add(self, count: $usize_ty) -> Self { 1225 self.offset(count.cast()) 1226 } 1227 1228 /// Calculates the offset from a pointer (convenience for 1229 /// `.offset((count as isize).wrapping_neg())`). 1230 /// 1231 /// `count` is in units of T; e.g. a `count` of 3 represents a 1232 /// pointer offset of `3 * size_of::<T>()` bytes. 1233 /// 1234 /// # Safety 1235 /// 1236 /// If any of the following conditions are violated, the result is 1237 /// Undefined Behavior: 1238 /// 1239 /// * Both the starting and resulting pointer must be either in 1240 /// bounds or one byte past the end of an allocated object. 1241 /// 1242 /// * The computed offset cannot exceed `isize::MAX` **bytes**. 1243 /// 1244 /// * The offset being in bounds cannot rely on "wrapping around" 1245 /// the address space. That is, the infinite-precision sum must fit 1246 /// in a usize. 1247 /// 1248 /// The compiler and standard library generally tries to ensure 1249 /// allocations never reach a size where an offset is a concern. For 1250 /// instance, `Vec` and `Box` ensure they never allocate more than 1251 /// `isize::MAX` bytes, so 1252 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. 1253 /// 1254 /// Most platforms fundamentally can't even construct such an 1255 /// allocation. For instance, no known 64-bit platform can ever 1256 /// serve a request for 2<sup>63</sup> bytes due to page-table 1257 /// limitations or splitting the address space. However, some 32-bit 1258 /// and 16-bit platforms may successfully serve a request for more 1259 /// than `isize::MAX` bytes with things like Physical Address 1260 /// Extension. As such, memory acquired directly from allocators or 1261 /// memory mapped files *may* be too large to handle with this 1262 /// function. 1263 /// 1264 /// Consider using `wrapping_offset` instead if these constraints 1265 /// are difficult to satisfy. The only advantage of this method is 1266 /// that it enables more aggressive compiler optimizations. 1267 #[inline] 1268 #[allow(clippy::should_implement_trait)] 1269 pub unsafe fn sub(self, count: $usize_ty) -> Self { 1270 let x: $isize_ty = count.cast(); 1271 // note: - is currently wrapping_neg 1272 self.offset(-x) 1273 } 1274 1275 /// Calculates the offset from a pointer using wrapping arithmetic. 1276 /// (convenience for `.wrapping_offset(count as isize)`) 1277 /// 1278 /// `count` is in units of T; e.g. a `count` of 3 represents a 1279 /// pointer offset of `3 * size_of::<T>()` bytes. 1280 /// 1281 /// # Safety 1282 /// 1283 /// The resulting pointer does not need to be in bounds, but it is 1284 /// potentially hazardous to dereference (which requires `unsafe`). 1285 /// 1286 /// Always use `.add(count)` instead when possible, because `add` 1287 /// allows the compiler to optimize better. 1288 #[inline] 1289 pub fn wrapping_add(self, count: $usize_ty) -> Self { 1290 self.wrapping_offset(count.cast()) 1291 } 1292 1293 /// Calculates the offset from a pointer using wrapping arithmetic. 1294 /// (convenience for `.wrapping_offset((count as 1295 /// isize).wrapping_sub())`) 1296 /// 1297 /// `count` is in units of T; e.g. a `count` of 3 represents a 1298 /// pointer offset of `3 * size_of::<T>()` bytes. 1299 /// 1300 /// # Safety 1301 /// 1302 /// The resulting pointer does not need to be in bounds, but it is 1303 /// potentially hazardous to dereference (which requires `unsafe`). 1304 /// 1305 /// Always use `.sub(count)` instead when possible, because `sub` 1306 /// allows the compiler to optimize better. 1307 #[inline] 1308 pub fn wrapping_sub(self, count: $usize_ty) -> Self { 1309 let x: $isize_ty = count.cast(); 1310 self.wrapping_offset(-1 * x) 1311 } 1312 } 1313 1314 impl<T> $id<T> { 1315 /// Shuffle vector elements according to `indices`. 1316 #[inline] 1317 pub fn shuffle1_dyn<I>(self, indices: I) -> Self 1318 where 1319 Self: codegen::shuffle1_dyn::Shuffle1Dyn<Indices = I>, 1320 { 1321 codegen::shuffle1_dyn::Shuffle1Dyn::shuffle1_dyn(self, indices) 1322 } 1323 } 1324 1325 test_if! { 1326 $test_tt: 1327 paste::item! { 1328 pub mod [<$id _shuffle1_dyn>] { 1329 use super::*; 1330 #[cfg_attr(not(target_arch = "wasm32"), test)] 1331 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 1332 fn shuffle1_dyn() { 1333 let (null, non_null) = ptr_vals!($id<i32>); 1334 1335 // alternating = [non_null, null, non_null, null, ...] 1336 let mut alternating = $id::<i32>::splat(null); 1337 for i in 0..$id::<i32>::lanes() { 1338 if i % 2 == 0 { 1339 alternating = alternating.replace(i, non_null); 1340 } 1341 } 1342 1343 type Indices = <$id<i32> 1344 as codegen::shuffle1_dyn::Shuffle1Dyn>::Indices; 1345 // even = [0, 0, 2, 2, 4, 4, ..] 1346 let even = { 1347 let mut v = Indices::splat(0); 1348 for i in 0..$id::<i32>::lanes() { 1349 if i % 2 == 0 { 1350 v = v.replace(i, (i as u8).into()); 1351 } else { 1352 v = v.replace(i, (i as u8 - 1).into()); 1353 } 1354 } 1355 v 1356 }; 1357 // odd = [1, 1, 3, 3, 5, 5, ...] 1358 let odd = { 1359 let mut v = Indices::splat(0); 1360 for i in 0..$id::<i32>::lanes() { 1361 if i % 2 != 0 { 1362 v = v.replace(i, (i as u8).into()); 1363 } else { 1364 v = v.replace(i, (i as u8 + 1).into()); 1365 } 1366 } 1367 v 1368 }; 1369 1370 assert_eq!( 1371 alternating.shuffle1_dyn(even), 1372 $id::<i32>::splat(non_null) 1373 ); 1374 if $id::<i32>::lanes() > 1 { 1375 assert_eq!( 1376 alternating.shuffle1_dyn(odd), 1377 $id::<i32>::splat(null) 1378 ); 1379 } 1380 } 1381 } 1382 } 1383 } 1384 }; 1385 } 1386