1 //! Minimal API of pointer vectors. 2 3 macro_rules! impl_minimal_p { 4 ([$elem_ty:ty; $elem_count:expr]: $id:ident, $mask_ty:ident, 5 $usize_ty:ident, $isize_ty:ident | $ref:ident | $test_tt:tt 6 | $($elem_name:ident),+ | ($true:expr, $false:expr) | 7 $(#[$doc:meta])*) => { 8 9 $(#[$doc])* 10 pub type $id<T> = Simd<[$elem_ty; $elem_count]>; 11 12 impl<T> sealed::Simd for $id<T> { 13 type Element = $elem_ty; 14 const LANES: usize = $elem_count; 15 type LanesType = [u32; $elem_count]; 16 } 17 18 impl<T> $id<T> { 19 /// Creates a new instance with each vector elements initialized 20 /// with the provided values. 21 #[inline] 22 #[allow(clippy::too_many_arguments)] 23 pub const fn new($($elem_name: $elem_ty),*) -> Self { 24 Simd(codegen::$id($($elem_name),*)) 25 } 26 27 /// Returns the number of vector lanes. 28 #[inline] 29 pub const fn lanes() -> usize { 30 $elem_count 31 } 32 33 /// Constructs a new instance with each element initialized to 34 /// `value`. 35 #[inline] 36 pub const fn splat(value: $elem_ty) -> Self { 37 Simd(codegen::$id($({ 38 #[allow(non_camel_case_types, dead_code)] 39 struct $elem_name; 40 value 41 }),*)) 42 } 43 44 /// Constructs a new instance with each element initialized to 45 /// `null`. 46 #[inline] 47 pub const fn null() -> Self { 48 Self::splat(crate::ptr::null_mut() as $elem_ty) 49 } 50 51 /// Returns a mask that selects those lanes that contain `null` 52 /// pointers. 53 #[inline] 54 pub fn is_null(self) -> $mask_ty { 55 self.eq(Self::null()) 56 } 57 58 /// Extracts the value at `index`. 59 /// 60 /// # Panics 61 /// 62 /// If `index >= Self::lanes()`. 63 #[inline] 64 pub fn extract(self, index: usize) -> $elem_ty { 65 assert!(index < $elem_count); 66 unsafe { self.extract_unchecked(index) } 67 } 68 69 /// Extracts the value at `index`. 70 /// 71 /// # Safety 72 /// 73 /// If `index >= Self::lanes()` the behavior is undefined. 74 #[inline] 75 pub unsafe fn extract_unchecked(self, index: usize) -> $elem_ty { 76 use crate::llvm::simd_extract; 77 simd_extract(self.0, index as u32) 78 } 79 80 /// Returns a new vector where the value at `index` is replaced by 81 /// `new_value`. 82 /// 83 /// # Panics 84 /// 85 /// If `index >= Self::lanes()`. 86 #[inline] 87 #[must_use = "replace does not modify the original value - \ 88 it returns a new vector with the value at `index` \ 89 replaced by `new_value`d" 90 ] 91 #[allow(clippy::not_unsafe_ptr_arg_deref)] 92 pub fn replace(self, index: usize, new_value: $elem_ty) -> Self { 93 assert!(index < $elem_count); 94 unsafe { self.replace_unchecked(index, new_value) } 95 } 96 97 /// Returns a new vector where the value at `index` is replaced by `new_value`. 98 /// 99 /// # Safety 100 /// 101 /// If `index >= Self::lanes()` the behavior is undefined. 102 #[inline] 103 #[must_use = "replace_unchecked does not modify the original value - \ 104 it returns a new vector with the value at `index` \ 105 replaced by `new_value`d" 106 ] 107 pub unsafe fn replace_unchecked( 108 self, 109 index: usize, 110 new_value: $elem_ty, 111 ) -> Self { 112 use crate::llvm::simd_insert; 113 Simd(simd_insert(self.0, index as u32, new_value)) 114 } 115 } 116 117 118 test_if!{ 119 $test_tt: 120 paste::item! { 121 pub mod [<$id _minimal>] { 122 use super::*; 123 #[cfg_attr(not(target_arch = "wasm32"), test)] 124 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 125 fn minimal() { 126 // lanes: 127 assert_eq!($elem_count, $id::<i32>::lanes()); 128 129 // splat and extract / extract_unchecked: 130 let VAL7: <$id<i32> as sealed::Simd>::Element 131 = $ref!(7); 132 let VAL42: <$id<i32> as sealed::Simd>::Element 133 = $ref!(42); 134 let VEC: $id<i32> = $id::splat(VAL7); 135 for i in 0..$id::<i32>::lanes() { 136 assert_eq!(VAL7, VEC.extract(i)); 137 assert_eq!( 138 VAL7, unsafe { VEC.extract_unchecked(i) } 139 ); 140 } 141 142 // replace / replace_unchecked 143 let new_vec = VEC.replace(0, VAL42); 144 for i in 0..$id::<i32>::lanes() { 145 if i == 0 { 146 assert_eq!(VAL42, new_vec.extract(i)); 147 } else { 148 assert_eq!(VAL7, new_vec.extract(i)); 149 } 150 } 151 let new_vec = unsafe { 152 VEC.replace_unchecked(0, VAL42) 153 }; 154 for i in 0..$id::<i32>::lanes() { 155 if i == 0 { 156 assert_eq!(VAL42, new_vec.extract(i)); 157 } else { 158 assert_eq!(VAL7, new_vec.extract(i)); 159 } 160 } 161 162 let mut n = $id::<i32>::null(); 163 assert_eq!( 164 n, 165 $id::<i32>::splat(unsafe { crate::mem::zeroed() }) 166 ); 167 assert!(n.is_null().all()); 168 n = n.replace( 169 0, unsafe { crate::mem::transmute(1_isize) } 170 ); 171 assert!(!n.is_null().all()); 172 if $id::<i32>::lanes() > 1 { 173 assert!(n.is_null().any()); 174 } else { 175 assert!(!n.is_null().any()); 176 } 177 } 178 179 // FIXME: wasm-bindgen-test does not support #[should_panic] 180 // #[cfg_attr(not(target_arch = "wasm32"), test)] 181 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 182 #[cfg(not(target_arch = "wasm32"))] 183 #[test] 184 #[should_panic] 185 fn extract_panic_oob() { 186 let VAL: <$id<i32> as sealed::Simd>::Element 187 = $ref!(7); 188 let VEC: $id<i32> = $id::splat(VAL); 189 let _ = VEC.extract($id::<i32>::lanes()); 190 } 191 192 // FIXME: wasm-bindgen-test does not support #[should_panic] 193 // #[cfg_attr(not(target_arch = "wasm32"), test)] 194 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 195 #[cfg(not(target_arch = "wasm32"))] 196 #[test] 197 #[should_panic] 198 fn replace_panic_oob() { 199 let VAL: <$id<i32> as sealed::Simd>::Element 200 = $ref!(7); 201 let VAL42: <$id<i32> as sealed::Simd>::Element 202 = $ref!(42); 203 let VEC: $id<i32> = $id::splat(VAL); 204 let _ = VEC.replace($id::<i32>::lanes(), VAL42); 205 } 206 } 207 } 208 } 209 210 impl<T> crate::fmt::Debug for $id<T> { 211 #[allow(clippy::missing_inline_in_public_items)] 212 fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) 213 -> crate::fmt::Result { 214 write!( 215 f, 216 "{}<{}>(", 217 stringify!($id), 218 crate::intrinsics::type_name::<T>() 219 )?; 220 for i in 0..$elem_count { 221 if i > 0 { 222 write!(f, ", ")?; 223 } 224 self.extract(i).fmt(f)?; 225 } 226 write!(f, ")") 227 } 228 } 229 230 test_if!{ 231 $test_tt: 232 paste::item! { 233 pub mod [<$id _fmt_debug>] { 234 use super::*; 235 #[cfg_attr(not(target_arch = "wasm32"), test)] 236 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 237 fn debug() { 238 use arrayvec::{ArrayString,ArrayVec}; 239 type TinyString = ArrayString<[u8; 512]>; 240 241 use crate::fmt::Write; 242 let v = $id::<i32>::default(); 243 let mut s = TinyString::new(); 244 write!(&mut s, "{:?}", v).unwrap(); 245 246 let mut beg = TinyString::new(); 247 write!(&mut beg, "{}<i32>(", stringify!($id)).unwrap(); 248 assert!( 249 s.starts_with(beg.as_str()), 250 "s = {} (should start with = {})", s, beg 251 ); 252 assert!(s.ends_with(")")); 253 let s: ArrayVec<[TinyString; 64]> 254 = s.replace(beg.as_str(), "") 255 .replace(")", "").split(",") 256 .map(|v| TinyString::from(v.trim()).unwrap()) 257 .collect(); 258 assert_eq!(s.len(), $id::<i32>::lanes()); 259 for (index, ss) in s.into_iter().enumerate() { 260 let mut e = TinyString::new(); 261 write!(&mut e, "{:?}", v.extract(index)).unwrap(); 262 assert_eq!(ss, e); 263 } 264 } 265 } 266 } 267 } 268 269 impl<T> Default for $id<T> { 270 #[inline] 271 fn default() -> Self { 272 // FIXME: ptrs do not implement default 273 Self::null() 274 } 275 } 276 277 test_if!{ 278 $test_tt: 279 paste::item! { 280 pub mod [<$id _default>] { 281 use super::*; 282 #[cfg_attr(not(target_arch = "wasm32"), test)] 283 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 284 fn default() { 285 let a = $id::<i32>::default(); 286 for i in 0..$id::<i32>::lanes() { 287 assert_eq!( 288 a.extract(i), unsafe { crate::mem::zeroed() } 289 ); 290 } 291 } 292 } 293 } 294 } 295 296 impl<T> $id<T> { 297 /// Lane-wise equality comparison. 298 #[inline] 299 pub fn eq(self, other: Self) -> $mask_ty { 300 unsafe { 301 use crate::llvm::simd_eq; 302 let a: $usize_ty = crate::mem::transmute(self); 303 let b: $usize_ty = crate::mem::transmute(other); 304 Simd(simd_eq(a.0, b.0)) 305 } 306 } 307 308 /// Lane-wise inequality comparison. 309 #[inline] 310 pub fn ne(self, other: Self) -> $mask_ty { 311 unsafe { 312 use crate::llvm::simd_ne; 313 let a: $usize_ty = crate::mem::transmute(self); 314 let b: $usize_ty = crate::mem::transmute(other); 315 Simd(simd_ne(a.0, b.0)) 316 } 317 } 318 319 /// Lane-wise less-than comparison. 320 #[inline] 321 pub fn lt(self, other: Self) -> $mask_ty { 322 unsafe { 323 use crate::llvm::simd_lt; 324 let a: $usize_ty = crate::mem::transmute(self); 325 let b: $usize_ty = crate::mem::transmute(other); 326 Simd(simd_lt(a.0, b.0)) 327 } 328 } 329 330 /// Lane-wise less-than-or-equals comparison. 331 #[inline] 332 pub fn le(self, other: Self) -> $mask_ty { 333 unsafe { 334 use crate::llvm::simd_le; 335 let a: $usize_ty = crate::mem::transmute(self); 336 let b: $usize_ty = crate::mem::transmute(other); 337 Simd(simd_le(a.0, b.0)) 338 } 339 } 340 341 /// Lane-wise greater-than comparison. 342 #[inline] 343 pub fn gt(self, other: Self) -> $mask_ty { 344 unsafe { 345 use crate::llvm::simd_gt; 346 let a: $usize_ty = crate::mem::transmute(self); 347 let b: $usize_ty = crate::mem::transmute(other); 348 Simd(simd_gt(a.0, b.0)) 349 } 350 } 351 352 /// Lane-wise greater-than-or-equals comparison. 353 #[inline] 354 pub fn ge(self, other: Self) -> $mask_ty { 355 unsafe { 356 use crate::llvm::simd_ge; 357 let a: $usize_ty = crate::mem::transmute(self); 358 let b: $usize_ty = crate::mem::transmute(other); 359 Simd(simd_ge(a.0, b.0)) 360 } 361 } 362 } 363 364 test_if!{ 365 $test_tt: 366 paste::item! { 367 pub mod [<$id _cmp_vertical>] { 368 use super::*; 369 #[cfg_attr(not(target_arch = "wasm32"), test)] 370 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 371 fn cmp() { 372 let a = $id::<i32>::null(); 373 let b = $id::<i32>::splat(unsafe { 374 crate::mem::transmute(1_isize) 375 }); 376 377 let r = a.lt(b); 378 let e = $mask_ty::splat(true); 379 assert!(r == e); 380 let r = a.le(b); 381 assert!(r == e); 382 383 let e = $mask_ty::splat(false); 384 let r = a.gt(b); 385 assert!(r == e); 386 let r = a.ge(b); 387 assert!(r == e); 388 let r = a.eq(b); 389 assert!(r == e); 390 391 let mut a = a; 392 let mut b = b; 393 let mut e = e; 394 for i in 0..$id::<i32>::lanes() { 395 if i % 2 == 0 { 396 a = a.replace( 397 i, 398 unsafe { crate::mem::transmute(0_isize) } 399 ); 400 b = b.replace( 401 i, 402 unsafe { crate::mem::transmute(1_isize) } 403 ); 404 e = e.replace(i, true); 405 } else { 406 a = a.replace( 407 i, 408 unsafe { crate::mem::transmute(1_isize) } 409 ); 410 b = b.replace( 411 i, 412 unsafe { crate::mem::transmute(0_isize) } 413 ); 414 e = e.replace(i, false); 415 } 416 } 417 let r = a.lt(b); 418 assert!(r == e); 419 } 420 } 421 } 422 } 423 424 #[allow(clippy::partialeq_ne_impl)] 425 impl<T> crate::cmp::PartialEq<$id<T>> for $id<T> { 426 #[inline] 427 fn eq(&self, other: &Self) -> bool { 428 $id::<T>::eq(*self, *other).all() 429 } 430 #[inline] 431 fn ne(&self, other: &Self) -> bool { 432 $id::<T>::ne(*self, *other).any() 433 } 434 } 435 436 // FIXME: https://github.com/rust-lang-nursery/rust-clippy/issues/2892 437 #[allow(clippy::partialeq_ne_impl)] 438 impl<T> crate::cmp::PartialEq<LexicographicallyOrdered<$id<T>>> 439 for LexicographicallyOrdered<$id<T>> 440 { 441 #[inline] 442 fn eq(&self, other: &Self) -> bool { 443 self.0 == other.0 444 } 445 #[inline] 446 fn ne(&self, other: &Self) -> bool { 447 self.0 != other.0 448 } 449 } 450 451 test_if!{ 452 $test_tt: 453 paste::item! { 454 pub mod [<$id _cmp_PartialEq>] { 455 use super::*; 456 #[cfg_attr(not(target_arch = "wasm32"), test)] 457 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 458 fn partial_eq() { 459 let a = $id::<i32>::null(); 460 let b = $id::<i32>::splat(unsafe { 461 crate::mem::transmute(1_isize) 462 }); 463 464 assert!(a != b); 465 assert!(!(a == b)); 466 assert!(a == a); 467 assert!(!(a != a)); 468 469 if $id::<i32>::lanes() > 1 { 470 let a = $id::<i32>::null().replace(0, unsafe { 471 crate::mem::transmute(1_isize) 472 }); 473 let b = $id::<i32>::splat(unsafe { 474 crate::mem::transmute(1_isize) 475 }); 476 477 assert!(a != b); 478 assert!(!(a == b)); 479 assert!(a == a); 480 assert!(!(a != a)); 481 } 482 } 483 } 484 } 485 } 486 487 impl<T> crate::cmp::Eq for $id<T> {} 488 impl<T> crate::cmp::Eq for LexicographicallyOrdered<$id<T>> {} 489 490 test_if!{ 491 $test_tt: 492 paste::item! { 493 pub mod [<$id _cmp_eq>] { 494 use super::*; 495 #[cfg_attr(not(target_arch = "wasm32"), test)] 496 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 497 fn eq() { 498 fn foo<E: crate::cmp::Eq>(_: E) {} 499 let a = $id::<i32>::null(); 500 foo(a); 501 } 502 } 503 } 504 } 505 506 impl<T> From<[$elem_ty; $elem_count]> for $id<T> { 507 #[inline] 508 fn from(array: [$elem_ty; $elem_count]) -> Self { 509 unsafe { 510 // FIXME: unnecessary zeroing; better than UB. 511 let mut u: Self = crate::mem::zeroed(); 512 crate::ptr::copy_nonoverlapping( 513 &array as *const [$elem_ty; $elem_count] as *const u8, 514 &mut u as *mut Self as *mut u8, 515 crate::mem::size_of::<Self>() 516 ); 517 u 518 } 519 } 520 } 521 impl<T> Into<[$elem_ty; $elem_count]> for $id<T> { 522 #[inline] 523 fn into(self) -> [$elem_ty; $elem_count] { 524 unsafe { 525 // FIXME: unnecessary zeroing; better than UB. 526 let mut u: [$elem_ty; $elem_count] = crate::mem::zeroed(); 527 crate::ptr::copy_nonoverlapping( 528 &self as *const $id<T> as *const u8, 529 &mut u as *mut [$elem_ty; $elem_count] as *mut u8, 530 crate::mem::size_of::<Self>() 531 ); 532 u 533 } 534 } 535 } 536 537 test_if!{ 538 $test_tt: 539 paste::item! { 540 pub mod [<$id _from>] { 541 use super::*; 542 #[cfg_attr(not(target_arch = "wasm32"), test)] 543 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 544 fn array() { 545 let values = [1_i32; $elem_count]; 546 547 let mut vec: $id<i32> = Default::default(); 548 let mut array = [ 549 $id::<i32>::null().extract(0); $elem_count 550 ]; 551 552 for i in 0..$elem_count { 553 let ptr = &values[i] as *const i32 as *mut i32; 554 vec = vec.replace(i, ptr); 555 array[i] = ptr; 556 } 557 558 // FIXME: there is no impl of From<$id<T>> for [$elem_ty; N] 559 // let a0 = From::from(vec); 560 // assert_eq!(a0, array); 561 #[allow(unused_assignments)] 562 let mut a1 = array; 563 a1 = vec.into(); 564 assert_eq!(a1, array); 565 566 let v0: $id<i32> = From::from(array); 567 assert_eq!(v0, vec); 568 let v1: $id<i32> = array.into(); 569 assert_eq!(v1, vec); 570 } 571 } 572 } 573 } 574 575 impl<T> $id<T> { 576 /// Instantiates a new vector with the values of the `slice`. 577 /// 578 /// # Panics 579 /// 580 /// If `slice.len() < Self::lanes()` or `&slice[0]` is not aligned 581 /// to an `align_of::<Self>()` boundary. 582 #[inline] 583 pub fn from_slice_aligned(slice: &[$elem_ty]) -> Self { 584 unsafe { 585 assert!(slice.len() >= $elem_count); 586 let target_ptr = slice.get_unchecked(0) as *const $elem_ty; 587 assert!( 588 target_ptr.align_offset(crate::mem::align_of::<Self>()) 589 == 0 590 ); 591 Self::from_slice_aligned_unchecked(slice) 592 } 593 } 594 595 /// Instantiates a new vector with the values of the `slice`. 596 /// 597 /// # Panics 598 /// 599 /// If `slice.len() < Self::lanes()`. 600 #[inline] 601 pub fn from_slice_unaligned(slice: &[$elem_ty]) -> Self { 602 unsafe { 603 assert!(slice.len() >= $elem_count); 604 Self::from_slice_unaligned_unchecked(slice) 605 } 606 } 607 608 /// Instantiates a new vector with the values of the `slice`. 609 /// 610 /// # Safety 611 /// 612 /// If `slice.len() < Self::lanes()` or `&slice[0]` is not aligned 613 /// to an `align_of::<Self>()` boundary, the behavior is undefined. 614 #[inline] 615 pub unsafe fn from_slice_aligned_unchecked(slice: &[$elem_ty]) 616 -> Self { 617 #[allow(clippy::cast_ptr_alignment)] 618 *(slice.get_unchecked(0) as *const $elem_ty as *const Self) 619 } 620 621 /// Instantiates a new vector with the values of the `slice`. 622 /// 623 /// # Safety 624 /// 625 /// If `slice.len() < Self::lanes()` the behavior is undefined. 626 #[inline] 627 pub unsafe fn from_slice_unaligned_unchecked( 628 slice: &[$elem_ty], 629 ) -> Self { 630 use crate::mem::size_of; 631 let target_ptr = 632 slice.get_unchecked(0) as *const $elem_ty as *const u8; 633 let mut x = Self::splat(crate::ptr::null_mut() as $elem_ty); 634 let self_ptr = &mut x as *mut Self as *mut u8; 635 crate::ptr::copy_nonoverlapping( 636 target_ptr, 637 self_ptr, 638 size_of::<Self>(), 639 ); 640 x 641 } 642 } 643 644 test_if!{ 645 $test_tt: 646 paste::item! { 647 pub mod [<$id _slice_from_slice>] { 648 use super::*; 649 use crate::iter::Iterator; 650 651 #[cfg_attr(not(target_arch = "wasm32"), test)] 652 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 653 fn from_slice_unaligned() { 654 let (null, non_null) = ptr_vals!($id<i32>); 655 656 let mut unaligned = [ 657 non_null; $id::<i32>::lanes() + 1 658 ]; 659 unaligned[0] = null; 660 let vec = $id::<i32>::from_slice_unaligned( 661 &unaligned[1..] 662 ); 663 for (index, &b) in unaligned.iter().enumerate() { 664 if index == 0 { 665 assert_eq!(b, null); 666 } else { 667 assert_eq!(b, non_null); 668 assert_eq!(b, vec.extract(index - 1)); 669 } 670 } 671 } 672 673 // FIXME: wasm-bindgen-test does not support #[should_panic] 674 // #[cfg_attr(not(target_arch = "wasm32"), test)] 675 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 676 #[cfg(not(target_arch = "wasm32"))] 677 #[test] 678 #[should_panic] 679 fn from_slice_unaligned_fail() { 680 let (_null, non_null) = ptr_vals!($id<i32>); 681 let unaligned = [non_null; $id::<i32>::lanes() + 1]; 682 // the slice is not large enough => panic 683 let _vec = $id::<i32>::from_slice_unaligned( 684 &unaligned[2..] 685 ); 686 } 687 688 union A { 689 data: [<$id<i32> as sealed::Simd>::Element; 690 2 * $id::<i32>::lanes()], 691 _vec: $id<i32>, 692 } 693 694 #[cfg_attr(not(target_arch = "wasm32"), test)] 695 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 696 fn from_slice_aligned() { 697 let (null, non_null) = ptr_vals!($id<i32>); 698 let mut aligned = A { 699 data: [null; 2 * $id::<i32>::lanes()], 700 }; 701 for i in 702 $id::<i32>::lanes()..(2 * $id::<i32>::lanes()) { 703 unsafe { 704 aligned.data[i] = non_null; 705 } 706 } 707 708 let vec = unsafe { 709 $id::<i32>::from_slice_aligned( 710 &aligned.data[$id::<i32>::lanes()..] 711 ) 712 }; 713 for (index, &b) in unsafe { 714 aligned.data.iter().enumerate() 715 } { 716 if index < $id::<i32>::lanes() { 717 assert_eq!(b, null); 718 } else { 719 assert_eq!(b, non_null); 720 assert_eq!( 721 b, vec.extract(index - $id::<i32>::lanes()) 722 ); 723 } 724 } 725 } 726 727 // FIXME: wasm-bindgen-test does not support #[should_panic] 728 // #[cfg_attr(not(target_arch = "wasm32"), test)] 729 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 730 #[cfg(not(target_arch = "wasm32"))] 731 #[test] 732 #[should_panic] 733 fn from_slice_aligned_fail_lanes() { 734 let (_null, non_null) = ptr_vals!($id<i32>); 735 let aligned = A { 736 data: [non_null; 2 * $id::<i32>::lanes()], 737 }; 738 // the slice is not large enough => panic 739 let _vec = unsafe { 740 $id::<i32>::from_slice_aligned( 741 &aligned.data[2 * $id::<i32>::lanes()..] 742 ) 743 }; 744 } 745 746 // FIXME: wasm-bindgen-test does not support #[should_panic] 747 // #[cfg_attr(not(target_arch = "wasm32"), test)] 748 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 749 #[cfg(not(target_arch = "wasm32"))] 750 #[test] 751 #[should_panic] 752 fn from_slice_aligned_fail_align() { 753 unsafe { 754 let (null, _non_null) = ptr_vals!($id<i32>); 755 let aligned = A { 756 data: [null; 2 * $id::<i32>::lanes()], 757 }; 758 759 // get a pointer to the front of data 760 let ptr = aligned.data.as_ptr(); 761 // offset pointer by one element 762 let ptr = ptr.wrapping_add(1); 763 764 if ptr.align_offset( 765 crate::mem::align_of::<$id<i32>>() 766 ) == 0 { 767 // the pointer is properly aligned, so 768 // from_slice_aligned won't fail here (e.g. this 769 // can happen for i128x1). So we panic to make 770 // the "should_fail" test pass: 771 panic!("ok"); 772 } 773 774 // create a slice - this is safe, because the 775 // elements of the slice exist, are properly 776 // initialized, and properly aligned: 777 let s = slice::from_raw_parts( 778 ptr, $id::<i32>::lanes() 779 ); 780 // this should always panic because the slice 781 // alignment does not match the alignment 782 // requirements for the vector type: 783 let _vec = $id::<i32>::from_slice_aligned(s); 784 } 785 } 786 } 787 } 788 } 789 790 impl<T> $id<T> { 791 /// Writes the values of the vector to the `slice`. 792 /// 793 /// # Panics 794 /// 795 /// If `slice.len() < Self::lanes()` or `&slice[0]` is not 796 /// aligned to an `align_of::<Self>()` boundary. 797 #[inline] 798 pub fn write_to_slice_aligned(self, slice: &mut [$elem_ty]) { 799 unsafe { 800 assert!(slice.len() >= $elem_count); 801 let target_ptr = 802 slice.get_unchecked_mut(0) as *mut $elem_ty; 803 assert!( 804 target_ptr.align_offset(crate::mem::align_of::<Self>()) 805 == 0 806 ); 807 self.write_to_slice_aligned_unchecked(slice); 808 } 809 } 810 811 /// Writes the values of the vector to the `slice`. 812 /// 813 /// # Panics 814 /// 815 /// If `slice.len() < Self::lanes()`. 816 #[inline] 817 pub fn write_to_slice_unaligned(self, slice: &mut [$elem_ty]) { 818 unsafe { 819 assert!(slice.len() >= $elem_count); 820 self.write_to_slice_unaligned_unchecked(slice); 821 } 822 } 823 824 /// Writes the values of the vector to the `slice`. 825 /// 826 /// # Safety 827 /// 828 /// If `slice.len() < Self::lanes()` or `&slice[0]` is not 829 /// aligned to an `align_of::<Self>()` boundary, the behavior is 830 /// undefined. 831 #[inline] 832 pub unsafe fn write_to_slice_aligned_unchecked( 833 self, slice: &mut [$elem_ty], 834 ) { 835 #[allow(clippy::cast_ptr_alignment)] 836 *(slice.get_unchecked_mut(0) as *mut $elem_ty as *mut Self) = 837 self; 838 } 839 840 /// Writes the values of the vector to the `slice`. 841 /// 842 /// # Safety 843 /// 844 /// If `slice.len() < Self::lanes()` the behavior is undefined. 845 #[inline] 846 pub unsafe fn write_to_slice_unaligned_unchecked( 847 self, slice: &mut [$elem_ty], 848 ) { 849 let target_ptr = 850 slice.get_unchecked_mut(0) as *mut $elem_ty as *mut u8; 851 let self_ptr = &self as *const Self as *const u8; 852 crate::ptr::copy_nonoverlapping( 853 self_ptr, 854 target_ptr, 855 crate::mem::size_of::<Self>(), 856 ); 857 } 858 } 859 860 test_if!{ 861 $test_tt: 862 paste::item! { 863 pub mod [<$id _slice_write_to_slice>] { 864 use super::*; 865 use crate::iter::Iterator; 866 867 #[cfg_attr(not(target_arch = "wasm32"), test)] 868 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 869 fn write_to_slice_unaligned() { 870 let (null, non_null) = ptr_vals!($id<i32>); 871 let mut unaligned = [null; $id::<i32>::lanes() + 1]; 872 let vec = $id::<i32>::splat(non_null); 873 vec.write_to_slice_unaligned(&mut unaligned[1..]); 874 for (index, &b) in unaligned.iter().enumerate() { 875 if index == 0 { 876 assert_eq!(b, null); 877 } else { 878 assert_eq!(b, non_null); 879 assert_eq!(b, vec.extract(index - 1)); 880 } 881 } 882 } 883 884 // FIXME: wasm-bindgen-test does not support #[should_panic] 885 // #[cfg_attr(not(target_arch = "wasm32"), test)] 886 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 887 #[cfg(not(target_arch = "wasm32"))] 888 #[test] 889 #[should_panic] 890 fn write_to_slice_unaligned_fail() { 891 let (null, non_null) = ptr_vals!($id<i32>); 892 let mut unaligned = [null; $id::<i32>::lanes() + 1]; 893 let vec = $id::<i32>::splat(non_null); 894 // the slice is not large enough => panic 895 vec.write_to_slice_unaligned(&mut unaligned[2..]); 896 } 897 898 union A { 899 data: [<$id<i32> as sealed::Simd>::Element; 900 2 * $id::<i32>::lanes()], 901 _vec: $id<i32>, 902 } 903 904 #[cfg_attr(not(target_arch = "wasm32"), test)] 905 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 906 fn write_to_slice_aligned() { 907 let (null, non_null) = ptr_vals!($id<i32>); 908 let mut aligned = A { 909 data: [null; 2 * $id::<i32>::lanes()], 910 }; 911 let vec = $id::<i32>::splat(non_null); 912 unsafe { 913 vec.write_to_slice_aligned( 914 &mut aligned.data[$id::<i32>::lanes()..] 915 ) 916 }; 917 for (index, &b) in 918 unsafe { aligned.data.iter().enumerate() } { 919 if index < $id::<i32>::lanes() { 920 assert_eq!(b, null); 921 } else { 922 assert_eq!(b, non_null); 923 assert_eq!( 924 b, vec.extract(index - $id::<i32>::lanes()) 925 ); 926 } 927 } 928 } 929 930 // FIXME: wasm-bindgen-test does not support #[should_panic] 931 // #[cfg_attr(not(target_arch = "wasm32"), test)] 932 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 933 #[cfg(not(target_arch = "wasm32"))] 934 #[test] 935 #[should_panic] 936 fn write_to_slice_aligned_fail_lanes() { 937 let (null, non_null) = ptr_vals!($id<i32>); 938 let mut aligned = A { 939 data: [null; 2 * $id::<i32>::lanes()], 940 }; 941 let vec = $id::<i32>::splat(non_null); 942 // the slice is not large enough => panic 943 unsafe { 944 vec.write_to_slice_aligned( 945 &mut aligned.data[2 * $id::<i32>::lanes()..] 946 ) 947 }; 948 } 949 950 // FIXME: wasm-bindgen-test does not support #[should_panic] 951 // #[cfg_attr(not(target_arch = "wasm32"), test)] 952 // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 953 #[cfg(not(target_arch = "wasm32"))] 954 #[test] 955 #[should_panic] 956 fn write_to_slice_aligned_fail_align() { 957 let (null, non_null) = ptr_vals!($id<i32>); 958 unsafe { 959 let mut aligned = A { 960 data: [null; 2 * $id::<i32>::lanes()], 961 }; 962 963 // get a pointer to the front of data 964 let ptr = aligned.data.as_mut_ptr(); 965 // offset pointer by one element 966 let ptr = ptr.wrapping_add(1); 967 968 if ptr.align_offset( 969 crate::mem::align_of::<$id<i32>>() 970 ) == 0 { 971 // the pointer is properly aligned, so 972 // write_to_slice_aligned won't fail here (e.g. 973 // this can happen for i128x1). So we panic to 974 // make the "should_fail" test pass: 975 panic!("ok"); 976 } 977 978 // create a slice - this is safe, because the 979 // elements of the slice exist, are properly 980 // initialized, and properly aligned: 981 let s = slice::from_raw_parts_mut( 982 ptr, $id::<i32>::lanes() 983 ); 984 // this should always panic because the slice 985 // alignment does not match the alignment 986 // requirements for the vector type: 987 let vec = $id::<i32>::splat(non_null); 988 vec.write_to_slice_aligned(s); 989 } 990 } 991 } 992 } 993 } 994 995 impl<T> crate::hash::Hash for $id<T> { 996 #[inline] 997 fn hash<H: crate::hash::Hasher>(&self, state: &mut H) { 998 let s: $usize_ty = unsafe { crate::mem::transmute(*self) }; 999 s.hash(state) 1000 } 1001 } 1002 1003 test_if! { 1004 $test_tt: 1005 paste::item! { 1006 pub mod [<$id _hash>] { 1007 use super::*; 1008 #[cfg_attr(not(target_arch = "wasm32"), test)] 1009 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 1010 fn hash() { 1011 use crate::hash::{Hash, Hasher}; 1012 #[allow(deprecated)] 1013 use crate::hash::{SipHasher13}; 1014 1015 let values = [1_i32; $elem_count]; 1016 1017 let mut vec: $id<i32> = Default::default(); 1018 let mut array = [ 1019 $id::<i32>::null().extract(0); 1020 $elem_count 1021 ]; 1022 1023 for i in 0..$elem_count { 1024 let ptr = &values[i] as *const i32 as *mut i32; 1025 vec = vec.replace(i, ptr); 1026 array[i] = ptr; 1027 } 1028 1029 #[allow(deprecated)] 1030 let mut a_hash = SipHasher13::new(); 1031 let mut v_hash = a_hash.clone(); 1032 array.hash(&mut a_hash); 1033 vec.hash(&mut v_hash); 1034 assert_eq!(a_hash.finish(), v_hash.finish()); 1035 } 1036 } 1037 } 1038 } 1039 1040 impl<T> $id<T> { 1041 /// Calculates the offset from a pointer. 1042 /// 1043 /// `count` is in units of `T`; e.g. a count of `3` represents a 1044 /// pointer offset of `3 * size_of::<T>()` bytes. 1045 /// 1046 /// # Safety 1047 /// 1048 /// If any of the following conditions are violated, the result is 1049 /// Undefined Behavior: 1050 /// 1051 /// * Both the starting and resulting pointer must be either in 1052 /// bounds or one byte past the end of an allocated object. 1053 /// 1054 /// * The computed offset, in bytes, cannot overflow an `isize`. 1055 /// 1056 /// * The offset being in bounds cannot rely on "wrapping around" 1057 /// the address space. That is, the infinite-precision sum, in bytes 1058 /// must fit in a `usize`. 1059 /// 1060 /// The compiler and standard library generally tries to ensure 1061 /// allocations never reach a size where an offset is a concern. For 1062 /// instance, `Vec` and `Box` ensure they never allocate more than 1063 /// `isize::MAX` bytes, so `vec.as_ptr().offset(vec.len() as isize)` 1064 /// is always safe. 1065 /// 1066 /// Most platforms fundamentally can't even construct such an 1067 /// allocation. For instance, no known 64-bit platform can ever 1068 /// serve a request for 263 bytes due to page-table limitations or 1069 /// splitting the address space. However, some 32-bit and 16-bit 1070 /// platforms may successfully serve a request for more than 1071 /// `isize::MAX` bytes with things like Physical Address Extension. 1072 /// As such, memory acquired directly from allocators or memory 1073 /// mapped files may be too large to handle with this function. 1074 /// 1075 /// Consider using `wrapping_offset` instead if these constraints 1076 /// are difficult to satisfy. The only advantage of this method is 1077 /// that it enables more aggressive compiler optimizations. 1078 #[inline] 1079 pub unsafe fn offset(self, count: $isize_ty) -> Self { 1080 // FIXME: should use LLVM's `add nsw nuw` 1081 self.wrapping_offset(count) 1082 } 1083 1084 /// Calculates the offset from a pointer using wrapping arithmetic. 1085 /// 1086 /// `count` is in units of `T`; e.g. a count of `3` represents a 1087 /// pointer offset of `3 * size_of::<T>()` bytes. 1088 /// 1089 /// # Safety 1090 /// 1091 /// The resulting pointer does not need to be in bounds, but it is 1092 /// potentially hazardous to dereference (which requires unsafe). 1093 /// 1094 /// Always use `.offset(count)` instead when possible, because 1095 /// offset allows the compiler to optimize better. 1096 #[inline] 1097 pub fn wrapping_offset(self, count: $isize_ty) -> Self { 1098 unsafe { 1099 let x: $isize_ty = crate::mem::transmute(self); 1100 // note: {+,*} currently performs a `wrapping_{add, mul}` 1101 crate::mem::transmute( 1102 x + (count * crate::mem::size_of::<T>() as isize) 1103 ) 1104 } 1105 } 1106 1107 /// Calculates the distance between two pointers. 1108 /// 1109 /// The returned value is in units of `T`: the distance in bytes is 1110 /// divided by `mem::size_of::<T>()`. 1111 /// 1112 /// This function is the inverse of offset. 1113 /// 1114 /// # Safety 1115 /// 1116 /// If any of the following conditions are violated, the result is 1117 /// Undefined Behavior: 1118 /// 1119 /// * Both the starting and other pointer must be either in bounds 1120 /// or one byte past the end of the same allocated object. 1121 /// 1122 /// * The distance between the pointers, in bytes, cannot overflow 1123 /// an `isize`. 1124 /// 1125 /// * The distance between the pointers, in bytes, must be an exact 1126 /// multiple of the size of `T`. 1127 /// 1128 /// * The distance being in bounds cannot rely on "wrapping around" 1129 /// the address space. 1130 /// 1131 /// The compiler and standard library generally try to ensure 1132 /// allocations never reach a size where an offset is a concern. For 1133 /// instance, `Vec` and `Box` ensure they never allocate more than 1134 /// `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())` 1135 /// is always safe. 1136 /// 1137 /// Most platforms fundamentally can't even construct such an 1138 /// allocation. For instance, no known 64-bit platform can ever 1139 /// serve a request for 263 bytes due to page-table limitations or 1140 /// splitting the address space. However, some 32-bit and 16-bit 1141 /// platforms may successfully serve a request for more than 1142 /// `isize::MAX` bytes with things like Physical Address Extension. 1143 /// As such, memory acquired directly from allocators or memory 1144 /// mapped files may be too large to handle with this function. 1145 /// 1146 /// Consider using `wrapping_offset_from` instead if these constraints 1147 /// are difficult to satisfy. The only advantage of this method is 1148 /// that it enables more aggressive compiler optimizations. 1149 #[inline] 1150 pub unsafe fn offset_from(self, origin: Self) -> $isize_ty { 1151 // FIXME: should use LLVM's `sub nsw nuw`. 1152 self.wrapping_offset_from(origin) 1153 } 1154 1155 /// Calculates the distance between two pointers. 1156 /// 1157 /// The returned value is in units of `T`: the distance in bytes is 1158 /// divided by `mem::size_of::<T>()`. 1159 /// 1160 /// If the address different between the two pointers is not a 1161 /// multiple of `mem::size_of::<T>()` then the result of the 1162 /// division is rounded towards zero. 1163 /// 1164 /// Though this method is safe for any two pointers, note that its 1165 /// result will be mostly useless if the two pointers aren't into 1166 /// the same allocated object, for example if they point to two 1167 /// different local variables. 1168 #[inline] 1169 pub fn wrapping_offset_from(self, origin: Self) -> $isize_ty { 1170 let x: $isize_ty = unsafe { crate::mem::transmute(self) }; 1171 let y: $isize_ty = unsafe { crate::mem::transmute(origin) }; 1172 // note: {-,/} currently perform wrapping_{sub, div} 1173 (y - x) / (crate::mem::size_of::<T>() as isize) 1174 } 1175 1176 /// Calculates the offset from a pointer (convenience for 1177 /// `.offset(count as isize)`). 1178 /// 1179 /// `count` is in units of `T`; e.g. a count of 3 represents a 1180 /// pointer offset of `3 * size_of::<T>()` bytes. 1181 /// 1182 /// # Safety 1183 /// 1184 /// If any of the following conditions are violated, the result is 1185 /// Undefined Behavior: 1186 /// 1187 /// * Both the starting and resulting pointer must be either in 1188 /// bounds or one byte past the end of an allocated object. 1189 /// 1190 /// * The computed offset, in bytes, cannot overflow an `isize`. 1191 /// 1192 /// * The offset being in bounds cannot rely on "wrapping around" 1193 /// the address space. That is, the infinite-precision sum must fit 1194 /// in a `usize`. 1195 /// 1196 /// The compiler and standard library generally tries to ensure 1197 /// allocations never reach a size where an offset is a concern. For 1198 /// instance, `Vec` and `Box` ensure they never allocate more than 1199 /// `isize::MAX` bytes, so `vec.as_ptr().add(vec.len())` is always 1200 /// safe. 1201 /// 1202 /// Most platforms fundamentally can't even construct such an 1203 /// allocation. For instance, no known 64-bit platform can ever 1204 /// serve a request for 263 bytes due to page-table limitations or 1205 /// splitting the address space. However, some 32-bit and 16-bit 1206 /// platforms may successfully serve a request for more than 1207 /// `isize::MAX` bytes with things like Physical Address Extension. 1208 /// As such, memory acquired directly from allocators or memory 1209 /// mapped files may be too large to handle with this function. 1210 /// 1211 /// Consider using `wrapping_offset` instead if these constraints 1212 /// are difficult to satisfy. The only advantage of this method is 1213 /// that it enables more aggressive compiler optimizations. 1214 #[inline] 1215 #[allow(clippy::should_implement_trait)] 1216 pub unsafe fn add(self, count: $usize_ty) -> Self { 1217 self.offset(count.cast()) 1218 } 1219 1220 /// Calculates the offset from a pointer (convenience for 1221 /// `.offset((count as isize).wrapping_neg())`). 1222 /// 1223 /// `count` is in units of T; e.g. a `count` of 3 represents a 1224 /// pointer offset of `3 * size_of::<T>()` bytes. 1225 /// 1226 /// # Safety 1227 /// 1228 /// If any of the following conditions are violated, the result is 1229 /// Undefined Behavior: 1230 /// 1231 /// * Both the starting and resulting pointer must be either in 1232 /// bounds or one byte past the end of an allocated object. 1233 /// 1234 /// * The computed offset cannot exceed `isize::MAX` **bytes**. 1235 /// 1236 /// * The offset being in bounds cannot rely on "wrapping around" 1237 /// the address space. That is, the infinite-precision sum must fit 1238 /// in a usize. 1239 /// 1240 /// The compiler and standard library generally tries to ensure 1241 /// allocations never reach a size where an offset is a concern. For 1242 /// instance, `Vec` and `Box` ensure they never allocate more than 1243 /// `isize::MAX` bytes, so 1244 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. 1245 /// 1246 /// Most platforms fundamentally can't even construct such an 1247 /// allocation. For instance, no known 64-bit platform can ever 1248 /// serve a request for 2<sup>63</sup> bytes due to page-table 1249 /// limitations or splitting the address space. However, some 32-bit 1250 /// and 16-bit platforms may successfully serve a request for more 1251 /// than `isize::MAX` bytes with things like Physical Address 1252 /// Extension. As such, memory acquired directly from allocators or 1253 /// memory mapped files *may* be too large to handle with this 1254 /// function. 1255 /// 1256 /// Consider using `wrapping_offset` instead if these constraints 1257 /// are difficult to satisfy. The only advantage of this method is 1258 /// that it enables more aggressive compiler optimizations. 1259 #[inline] 1260 #[allow(clippy::should_implement_trait)] 1261 pub unsafe fn sub(self, count: $usize_ty) -> Self { 1262 let x: $isize_ty = count.cast(); 1263 // note: - is currently wrapping_neg 1264 self.offset(-x) 1265 } 1266 1267 /// Calculates the offset from a pointer using wrapping arithmetic. 1268 /// (convenience for `.wrapping_offset(count as isize)`) 1269 /// 1270 /// `count` is in units of T; e.g. a `count` of 3 represents a 1271 /// pointer offset of `3 * size_of::<T>()` bytes. 1272 /// 1273 /// # Safety 1274 /// 1275 /// The resulting pointer does not need to be in bounds, but it is 1276 /// potentially hazardous to dereference (which requires `unsafe`). 1277 /// 1278 /// Always use `.add(count)` instead when possible, because `add` 1279 /// allows the compiler to optimize better. 1280 #[inline] 1281 pub fn wrapping_add(self, count: $usize_ty) -> Self { 1282 self.wrapping_offset(count.cast()) 1283 } 1284 1285 /// Calculates the offset from a pointer using wrapping arithmetic. 1286 /// (convenience for `.wrapping_offset((count as 1287 /// isize).wrapping_sub())`) 1288 /// 1289 /// `count` is in units of T; e.g. a `count` of 3 represents a 1290 /// pointer offset of `3 * size_of::<T>()` bytes. 1291 /// 1292 /// # Safety 1293 /// 1294 /// The resulting pointer does not need to be in bounds, but it is 1295 /// potentially hazardous to dereference (which requires `unsafe`). 1296 /// 1297 /// Always use `.sub(count)` instead when possible, because `sub` 1298 /// allows the compiler to optimize better. 1299 #[inline] 1300 pub fn wrapping_sub(self, count: $usize_ty) -> Self { 1301 let x: $isize_ty = count.cast(); 1302 self.wrapping_offset(-1 * x) 1303 } 1304 } 1305 1306 impl<T> $id<T> { 1307 /// Shuffle vector elements according to `indices`. 1308 #[inline] 1309 pub fn shuffle1_dyn<I>(self, indices: I) -> Self 1310 where 1311 Self: codegen::shuffle1_dyn::Shuffle1Dyn<Indices = I>, 1312 { 1313 codegen::shuffle1_dyn::Shuffle1Dyn::shuffle1_dyn(self, indices) 1314 } 1315 } 1316 1317 test_if! { 1318 $test_tt: 1319 paste::item! { 1320 pub mod [<$id _shuffle1_dyn>] { 1321 use super::*; 1322 #[cfg_attr(not(target_arch = "wasm32"), test)] 1323 #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] 1324 fn shuffle1_dyn() { 1325 let (null, non_null) = ptr_vals!($id<i32>); 1326 1327 // alternating = [non_null, null, non_null, null, ...] 1328 let mut alternating = $id::<i32>::splat(null); 1329 for i in 0..$id::<i32>::lanes() { 1330 if i % 2 == 0 { 1331 alternating = alternating.replace(i, non_null); 1332 } 1333 } 1334 1335 type Indices = <$id<i32> 1336 as codegen::shuffle1_dyn::Shuffle1Dyn>::Indices; 1337 // even = [0, 0, 2, 2, 4, 4, ..] 1338 let even = { 1339 let mut v = Indices::splat(0); 1340 for i in 0..$id::<i32>::lanes() { 1341 if i % 2 == 0 { 1342 v = v.replace(i, (i as u8).into()); 1343 } else { 1344 v = v.replace(i, (i as u8 - 1).into()); 1345 } 1346 } 1347 v 1348 }; 1349 // odd = [1, 1, 3, 3, 5, 5, ...] 1350 let odd = { 1351 let mut v = Indices::splat(0); 1352 for i in 0..$id::<i32>::lanes() { 1353 if i % 2 != 0 { 1354 v = v.replace(i, (i as u8).into()); 1355 } else { 1356 v = v.replace(i, (i as u8 + 1).into()); 1357 } 1358 } 1359 v 1360 }; 1361 1362 assert_eq!( 1363 alternating.shuffle1_dyn(even), 1364 $id::<i32>::splat(non_null) 1365 ); 1366 if $id::<i32>::lanes() > 1 { 1367 assert_eq!( 1368 alternating.shuffle1_dyn(odd), 1369 $id::<i32>::splat(null) 1370 ); 1371 } 1372 } 1373 } 1374 } 1375 } 1376 }; 1377 } 1378