1 //! Compiler intrinsics.
2 //!
3 //! The corresponding definitions are in `compiler/rustc_codegen_llvm/src/intrinsic.rs`.
4 //! The corresponding const implementations are in `compiler/rustc_mir/src/interpret/intrinsics.rs`
5 //!
6 //! # Const intrinsics
7 //!
8 //! Note: any changes to the constness of intrinsics should be discussed with the language team.
9 //! This includes changes in the stability of the constness.
10 //!
11 //! In order to make an intrinsic usable at compile-time, one needs to copy the implementation
12 //! from <https://github.com/rust-lang/miri/blob/master/src/shims/intrinsics.rs> to
13 //! `compiler/rustc_mir/src/interpret/intrinsics.rs` and add a
14 //! `#[rustc_const_unstable(feature = "foo", issue = "01234")]` to the intrinsic.
15 //!
16 //! If an intrinsic is supposed to be used from a `const fn` with a `rustc_const_stable` attribute,
17 //! the intrinsic's attribute must be `rustc_const_stable`, too. Such a change should not be done
18 //! without T-lang consultation, because it bakes a feature into the language that cannot be
19 //! replicated in user code without compiler support.
20 //!
21 //! # Volatiles
22 //!
23 //! The volatile intrinsics provide operations intended to act on I/O
24 //! memory, which are guaranteed to not be reordered by the compiler
25 //! across other volatile intrinsics. See the LLVM documentation on
26 //! [[volatile]].
27 //!
28 //! [volatile]: https://llvm.org/docs/LangRef.html#volatile-memory-accesses
29 //!
30 //! # Atomics
31 //!
32 //! The atomic intrinsics provide common atomic operations on machine
33 //! words, with multiple possible memory orderings. They obey the same
34 //! semantics as C++11. See the LLVM documentation on [[atomics]].
35 //!
36 //! [atomics]: https://llvm.org/docs/Atomics.html
37 //!
38 //! A quick refresher on memory ordering:
39 //!
40 //! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes
41 //! take place after the barrier.
42 //! * Release - a barrier for releasing a lock. Preceding reads and writes
43 //! take place before the barrier.
44 //! * Sequentially consistent - sequentially consistent operations are
45 //! guaranteed to happen in order. This is the standard mode for working
46 //! with atomic types and is equivalent to Java's `volatile`.
47
48 #![unstable(
49 feature = "core_intrinsics",
50 reason = "intrinsics are unlikely to ever be stabilized, instead \
51 they should be used through stabilized interfaces \
52 in the rest of the standard library",
53 issue = "none"
54 )]
55 #![allow(missing_docs)]
56
57 use crate::marker::DiscriminantKind;
58 use crate::mem;
59
60 // These imports are used for simplifying intra-doc links
61 #[allow(unused_imports)]
62 #[cfg(all(target_has_atomic = "8", target_has_atomic = "32", target_has_atomic = "ptr"))]
63 use crate::sync::atomic::{self, AtomicBool, AtomicI32, AtomicIsize, AtomicU32, Ordering};
64
65 #[stable(feature = "drop_in_place", since = "1.8.0")]
66 #[rustc_deprecated(
67 reason = "no longer an intrinsic - use `ptr::drop_in_place` directly",
68 since = "1.52.0"
69 )]
70 #[inline]
drop_in_place<T: ?Sized>(to_drop: *mut T)71 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
72 // SAFETY: see `ptr::drop_in_place`
73 unsafe { crate::ptr::drop_in_place(to_drop) }
74 }
75
76 extern "rust-intrinsic" {
77 // N.B., these intrinsics take raw pointers because they mutate aliased
78 // memory, which is not valid for either `&` or `&mut`.
79
80 /// Stores a value if the current value is the same as the `old` value.
81 ///
82 /// The stabilized version of this intrinsic is available on the
83 /// [`atomic`] types via the `compare_exchange` method by passing
84 /// [`Ordering::SeqCst`] as both the `success` and `failure` parameters.
85 /// For example, [`AtomicBool::compare_exchange`].
atomic_cxchg<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)86 pub fn atomic_cxchg<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
87 /// Stores a value if the current value is the same as the `old` value.
88 ///
89 /// The stabilized version of this intrinsic is available on the
90 /// [`atomic`] types via the `compare_exchange` method by passing
91 /// [`Ordering::Acquire`] as both the `success` and `failure` parameters.
92 /// For example, [`AtomicBool::compare_exchange`].
atomic_cxchg_acq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)93 pub fn atomic_cxchg_acq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
94 /// Stores a value if the current value is the same as the `old` value.
95 ///
96 /// The stabilized version of this intrinsic is available on the
97 /// [`atomic`] types via the `compare_exchange` method by passing
98 /// [`Ordering::Release`] as the `success` and [`Ordering::Relaxed`] as the
99 /// `failure` parameters. For example, [`AtomicBool::compare_exchange`].
atomic_cxchg_rel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)100 pub fn atomic_cxchg_rel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
101 /// Stores a value if the current value is the same as the `old` value.
102 ///
103 /// The stabilized version of this intrinsic is available on the
104 /// [`atomic`] types via the `compare_exchange` method by passing
105 /// [`Ordering::AcqRel`] as the `success` and [`Ordering::Acquire`] as the
106 /// `failure` parameters. For example, [`AtomicBool::compare_exchange`].
atomic_cxchg_acqrel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)107 pub fn atomic_cxchg_acqrel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
108 /// Stores a value if the current value is the same as the `old` value.
109 ///
110 /// The stabilized version of this intrinsic is available on the
111 /// [`atomic`] types via the `compare_exchange` method by passing
112 /// [`Ordering::Relaxed`] as both the `success` and `failure` parameters.
113 /// For example, [`AtomicBool::compare_exchange`].
atomic_cxchg_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)114 pub fn atomic_cxchg_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
115 /// Stores a value if the current value is the same as the `old` value.
116 ///
117 /// The stabilized version of this intrinsic is available on the
118 /// [`atomic`] types via the `compare_exchange` method by passing
119 /// [`Ordering::SeqCst`] as the `success` and [`Ordering::Relaxed`] as the
120 /// `failure` parameters. For example, [`AtomicBool::compare_exchange`].
atomic_cxchg_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)121 pub fn atomic_cxchg_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
122 /// Stores a value if the current value is the same as the `old` value.
123 ///
124 /// The stabilized version of this intrinsic is available on the
125 /// [`atomic`] types via the `compare_exchange` method by passing
126 /// [`Ordering::SeqCst`] as the `success` and [`Ordering::Acquire`] as the
127 /// `failure` parameters. For example, [`AtomicBool::compare_exchange`].
atomic_cxchg_failacq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)128 pub fn atomic_cxchg_failacq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
129 /// Stores a value if the current value is the same as the `old` value.
130 ///
131 /// The stabilized version of this intrinsic is available on the
132 /// [`atomic`] types via the `compare_exchange` method by passing
133 /// [`Ordering::Acquire`] as the `success` and [`Ordering::Relaxed`] as the
134 /// `failure` parameters. For example, [`AtomicBool::compare_exchange`].
atomic_cxchg_acq_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)135 pub fn atomic_cxchg_acq_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
136 /// Stores a value if the current value is the same as the `old` value.
137 ///
138 /// The stabilized version of this intrinsic is available on the
139 /// [`atomic`] types via the `compare_exchange` method by passing
140 /// [`Ordering::AcqRel`] as the `success` and [`Ordering::Relaxed`] as the
141 /// `failure` parameters. For example, [`AtomicBool::compare_exchange`].
atomic_cxchg_acqrel_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)142 pub fn atomic_cxchg_acqrel_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
143
144 /// Stores a value if the current value is the same as the `old` value.
145 ///
146 /// The stabilized version of this intrinsic is available on the
147 /// [`atomic`] types via the `compare_exchange_weak` method by passing
148 /// [`Ordering::SeqCst`] as both the `success` and `failure` parameters.
149 /// For example, [`AtomicBool::compare_exchange_weak`].
atomic_cxchgweak<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)150 pub fn atomic_cxchgweak<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
151 /// Stores a value if the current value is the same as the `old` value.
152 ///
153 /// The stabilized version of this intrinsic is available on the
154 /// [`atomic`] types via the `compare_exchange_weak` method by passing
155 /// [`Ordering::Acquire`] as both the `success` and `failure` parameters.
156 /// For example, [`AtomicBool::compare_exchange_weak`].
atomic_cxchgweak_acq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)157 pub fn atomic_cxchgweak_acq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
158 /// Stores a value if the current value is the same as the `old` value.
159 ///
160 /// The stabilized version of this intrinsic is available on the
161 /// [`atomic`] types via the `compare_exchange_weak` method by passing
162 /// [`Ordering::Release`] as the `success` and [`Ordering::Relaxed`] as the
163 /// `failure` parameters. For example, [`AtomicBool::compare_exchange_weak`].
atomic_cxchgweak_rel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)164 pub fn atomic_cxchgweak_rel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
165 /// Stores a value if the current value is the same as the `old` value.
166 ///
167 /// The stabilized version of this intrinsic is available on the
168 /// [`atomic`] types via the `compare_exchange_weak` method by passing
169 /// [`Ordering::AcqRel`] as the `success` and [`Ordering::Acquire`] as the
170 /// `failure` parameters. For example, [`AtomicBool::compare_exchange_weak`].
atomic_cxchgweak_acqrel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)171 pub fn atomic_cxchgweak_acqrel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
172 /// Stores a value if the current value is the same as the `old` value.
173 ///
174 /// The stabilized version of this intrinsic is available on the
175 /// [`atomic`] types via the `compare_exchange_weak` method by passing
176 /// [`Ordering::Relaxed`] as both the `success` and `failure` parameters.
177 /// For example, [`AtomicBool::compare_exchange_weak`].
atomic_cxchgweak_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)178 pub fn atomic_cxchgweak_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
179 /// Stores a value if the current value is the same as the `old` value.
180 ///
181 /// The stabilized version of this intrinsic is available on the
182 /// [`atomic`] types via the `compare_exchange_weak` method by passing
183 /// [`Ordering::SeqCst`] as the `success` and [`Ordering::Relaxed`] as the
184 /// `failure` parameters. For example, [`AtomicBool::compare_exchange_weak`].
atomic_cxchgweak_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)185 pub fn atomic_cxchgweak_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
186 /// Stores a value if the current value is the same as the `old` value.
187 ///
188 /// The stabilized version of this intrinsic is available on the
189 /// [`atomic`] types via the `compare_exchange_weak` method by passing
190 /// [`Ordering::SeqCst`] as the `success` and [`Ordering::Acquire`] as the
191 /// `failure` parameters. For example, [`AtomicBool::compare_exchange_weak`].
atomic_cxchgweak_failacq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)192 pub fn atomic_cxchgweak_failacq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
193 /// Stores a value if the current value is the same as the `old` value.
194 ///
195 /// The stabilized version of this intrinsic is available on the
196 /// [`atomic`] types via the `compare_exchange_weak` method by passing
197 /// [`Ordering::Acquire`] as the `success` and [`Ordering::Relaxed`] as the
198 /// `failure` parameters. For example, [`AtomicBool::compare_exchange_weak`].
atomic_cxchgweak_acq_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)199 pub fn atomic_cxchgweak_acq_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
200 /// Stores a value if the current value is the same as the `old` value.
201 ///
202 /// The stabilized version of this intrinsic is available on the
203 /// [`atomic`] types via the `compare_exchange_weak` method by passing
204 /// [`Ordering::AcqRel`] as the `success` and [`Ordering::Relaxed`] as the
205 /// `failure` parameters. For example, [`AtomicBool::compare_exchange_weak`].
atomic_cxchgweak_acqrel_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)206 pub fn atomic_cxchgweak_acqrel_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
207
208 /// Loads the current value of the pointer.
209 ///
210 /// The stabilized version of this intrinsic is available on the
211 /// [`atomic`] types via the `load` method by passing
212 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::load`].
atomic_load<T: Copy>(src: *const T) -> T213 pub fn atomic_load<T: Copy>(src: *const T) -> T;
214 /// Loads the current value of the pointer.
215 ///
216 /// The stabilized version of this intrinsic is available on the
217 /// [`atomic`] types via the `load` method by passing
218 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::load`].
atomic_load_acq<T: Copy>(src: *const T) -> T219 pub fn atomic_load_acq<T: Copy>(src: *const T) -> T;
220 /// Loads the current value of the pointer.
221 ///
222 /// The stabilized version of this intrinsic is available on the
223 /// [`atomic`] types via the `load` method by passing
224 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::load`].
atomic_load_relaxed<T: Copy>(src: *const T) -> T225 pub fn atomic_load_relaxed<T: Copy>(src: *const T) -> T;
atomic_load_unordered<T: Copy>(src: *const T) -> T226 pub fn atomic_load_unordered<T: Copy>(src: *const T) -> T;
227
228 /// Stores the value at the specified memory location.
229 ///
230 /// The stabilized version of this intrinsic is available on the
231 /// [`atomic`] types via the `store` method by passing
232 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::store`].
atomic_store<T: Copy>(dst: *mut T, val: T)233 pub fn atomic_store<T: Copy>(dst: *mut T, val: T);
234 /// Stores the value at the specified memory location.
235 ///
236 /// The stabilized version of this intrinsic is available on the
237 /// [`atomic`] types via the `store` method by passing
238 /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::store`].
atomic_store_rel<T: Copy>(dst: *mut T, val: T)239 pub fn atomic_store_rel<T: Copy>(dst: *mut T, val: T);
240 /// Stores the value at the specified memory location.
241 ///
242 /// The stabilized version of this intrinsic is available on the
243 /// [`atomic`] types via the `store` method by passing
244 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::store`].
atomic_store_relaxed<T: Copy>(dst: *mut T, val: T)245 pub fn atomic_store_relaxed<T: Copy>(dst: *mut T, val: T);
atomic_store_unordered<T: Copy>(dst: *mut T, val: T)246 pub fn atomic_store_unordered<T: Copy>(dst: *mut T, val: T);
247
248 /// Stores the value at the specified memory location, returning the old value.
249 ///
250 /// The stabilized version of this intrinsic is available on the
251 /// [`atomic`] types via the `swap` method by passing
252 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::swap`].
atomic_xchg<T: Copy>(dst: *mut T, src: T) -> T253 pub fn atomic_xchg<T: Copy>(dst: *mut T, src: T) -> T;
254 /// Stores the value at the specified memory location, returning the old value.
255 ///
256 /// The stabilized version of this intrinsic is available on the
257 /// [`atomic`] types via the `swap` method by passing
258 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::swap`].
atomic_xchg_acq<T: Copy>(dst: *mut T, src: T) -> T259 pub fn atomic_xchg_acq<T: Copy>(dst: *mut T, src: T) -> T;
260 /// Stores the value at the specified memory location, returning the old value.
261 ///
262 /// The stabilized version of this intrinsic is available on the
263 /// [`atomic`] types via the `swap` method by passing
264 /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::swap`].
atomic_xchg_rel<T: Copy>(dst: *mut T, src: T) -> T265 pub fn atomic_xchg_rel<T: Copy>(dst: *mut T, src: T) -> T;
266 /// Stores the value at the specified memory location, returning the old value.
267 ///
268 /// The stabilized version of this intrinsic is available on the
269 /// [`atomic`] types via the `swap` method by passing
270 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::swap`].
atomic_xchg_acqrel<T: Copy>(dst: *mut T, src: T) -> T271 pub fn atomic_xchg_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
272 /// Stores the value at the specified memory location, returning the old value.
273 ///
274 /// The stabilized version of this intrinsic is available on the
275 /// [`atomic`] types via the `swap` method by passing
276 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::swap`].
atomic_xchg_relaxed<T: Copy>(dst: *mut T, src: T) -> T277 pub fn atomic_xchg_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
278
279 /// Adds to the current value, returning the previous value.
280 ///
281 /// The stabilized version of this intrinsic is available on the
282 /// [`atomic`] types via the `fetch_add` method by passing
283 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicIsize::fetch_add`].
atomic_xadd<T: Copy>(dst: *mut T, src: T) -> T284 pub fn atomic_xadd<T: Copy>(dst: *mut T, src: T) -> T;
285 /// Adds to the current value, returning the previous value.
286 ///
287 /// The stabilized version of this intrinsic is available on the
288 /// [`atomic`] types via the `fetch_add` method by passing
289 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicIsize::fetch_add`].
atomic_xadd_acq<T: Copy>(dst: *mut T, src: T) -> T290 pub fn atomic_xadd_acq<T: Copy>(dst: *mut T, src: T) -> T;
291 /// Adds to the current value, returning the previous value.
292 ///
293 /// The stabilized version of this intrinsic is available on the
294 /// [`atomic`] types via the `fetch_add` method by passing
295 /// [`Ordering::Release`] as the `order`. For example, [`AtomicIsize::fetch_add`].
atomic_xadd_rel<T: Copy>(dst: *mut T, src: T) -> T296 pub fn atomic_xadd_rel<T: Copy>(dst: *mut T, src: T) -> T;
297 /// Adds to the current value, returning the previous value.
298 ///
299 /// The stabilized version of this intrinsic is available on the
300 /// [`atomic`] types via the `fetch_add` method by passing
301 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicIsize::fetch_add`].
atomic_xadd_acqrel<T: Copy>(dst: *mut T, src: T) -> T302 pub fn atomic_xadd_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
303 /// Adds to the current value, returning the previous value.
304 ///
305 /// The stabilized version of this intrinsic is available on the
306 /// [`atomic`] types via the `fetch_add` method by passing
307 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicIsize::fetch_add`].
atomic_xadd_relaxed<T: Copy>(dst: *mut T, src: T) -> T308 pub fn atomic_xadd_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
309
310 /// Subtract from the current value, returning the previous value.
311 ///
312 /// The stabilized version of this intrinsic is available on the
313 /// [`atomic`] types via the `fetch_sub` method by passing
314 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
atomic_xsub<T: Copy>(dst: *mut T, src: T) -> T315 pub fn atomic_xsub<T: Copy>(dst: *mut T, src: T) -> T;
316 /// Subtract from the current value, returning the previous value.
317 ///
318 /// The stabilized version of this intrinsic is available on the
319 /// [`atomic`] types via the `fetch_sub` method by passing
320 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
atomic_xsub_acq<T: Copy>(dst: *mut T, src: T) -> T321 pub fn atomic_xsub_acq<T: Copy>(dst: *mut T, src: T) -> T;
322 /// Subtract from the current value, returning the previous value.
323 ///
324 /// The stabilized version of this intrinsic is available on the
325 /// [`atomic`] types via the `fetch_sub` method by passing
326 /// [`Ordering::Release`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
atomic_xsub_rel<T: Copy>(dst: *mut T, src: T) -> T327 pub fn atomic_xsub_rel<T: Copy>(dst: *mut T, src: T) -> T;
328 /// Subtract from the current value, returning the previous value.
329 ///
330 /// The stabilized version of this intrinsic is available on the
331 /// [`atomic`] types via the `fetch_sub` method by passing
332 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
atomic_xsub_acqrel<T: Copy>(dst: *mut T, src: T) -> T333 pub fn atomic_xsub_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
334 /// Subtract from the current value, returning the previous value.
335 ///
336 /// The stabilized version of this intrinsic is available on the
337 /// [`atomic`] types via the `fetch_sub` method by passing
338 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
atomic_xsub_relaxed<T: Copy>(dst: *mut T, src: T) -> T339 pub fn atomic_xsub_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
340
341 /// Bitwise and with the current value, returning the previous value.
342 ///
343 /// The stabilized version of this intrinsic is available on the
344 /// [`atomic`] types via the `fetch_and` method by passing
345 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_and`].
atomic_and<T: Copy>(dst: *mut T, src: T) -> T346 pub fn atomic_and<T: Copy>(dst: *mut T, src: T) -> T;
347 /// Bitwise and with the current value, returning the previous value.
348 ///
349 /// The stabilized version of this intrinsic is available on the
350 /// [`atomic`] types via the `fetch_and` method by passing
351 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_and`].
atomic_and_acq<T: Copy>(dst: *mut T, src: T) -> T352 pub fn atomic_and_acq<T: Copy>(dst: *mut T, src: T) -> T;
353 /// Bitwise and with the current value, returning the previous value.
354 ///
355 /// The stabilized version of this intrinsic is available on the
356 /// [`atomic`] types via the `fetch_and` method by passing
357 /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_and`].
atomic_and_rel<T: Copy>(dst: *mut T, src: T) -> T358 pub fn atomic_and_rel<T: Copy>(dst: *mut T, src: T) -> T;
359 /// Bitwise and with the current value, returning the previous value.
360 ///
361 /// The stabilized version of this intrinsic is available on the
362 /// [`atomic`] types via the `fetch_and` method by passing
363 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_and`].
atomic_and_acqrel<T: Copy>(dst: *mut T, src: T) -> T364 pub fn atomic_and_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
365 /// Bitwise and with the current value, returning the previous value.
366 ///
367 /// The stabilized version of this intrinsic is available on the
368 /// [`atomic`] types via the `fetch_and` method by passing
369 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_and`].
atomic_and_relaxed<T: Copy>(dst: *mut T, src: T) -> T370 pub fn atomic_and_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
371
372 /// Bitwise nand with the current value, returning the previous value.
373 ///
374 /// The stabilized version of this intrinsic is available on the
375 /// [`AtomicBool`] type via the `fetch_nand` method by passing
376 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_nand`].
atomic_nand<T: Copy>(dst: *mut T, src: T) -> T377 pub fn atomic_nand<T: Copy>(dst: *mut T, src: T) -> T;
378 /// Bitwise nand with the current value, returning the previous value.
379 ///
380 /// The stabilized version of this intrinsic is available on the
381 /// [`AtomicBool`] type via the `fetch_nand` method by passing
382 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_nand`].
atomic_nand_acq<T: Copy>(dst: *mut T, src: T) -> T383 pub fn atomic_nand_acq<T: Copy>(dst: *mut T, src: T) -> T;
384 /// Bitwise nand with the current value, returning the previous value.
385 ///
386 /// The stabilized version of this intrinsic is available on the
387 /// [`AtomicBool`] type via the `fetch_nand` method by passing
388 /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_nand`].
atomic_nand_rel<T: Copy>(dst: *mut T, src: T) -> T389 pub fn atomic_nand_rel<T: Copy>(dst: *mut T, src: T) -> T;
390 /// Bitwise nand with the current value, returning the previous value.
391 ///
392 /// The stabilized version of this intrinsic is available on the
393 /// [`AtomicBool`] type via the `fetch_nand` method by passing
394 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_nand`].
atomic_nand_acqrel<T: Copy>(dst: *mut T, src: T) -> T395 pub fn atomic_nand_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
396 /// Bitwise nand with the current value, returning the previous value.
397 ///
398 /// The stabilized version of this intrinsic is available on the
399 /// [`AtomicBool`] type via the `fetch_nand` method by passing
400 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_nand`].
atomic_nand_relaxed<T: Copy>(dst: *mut T, src: T) -> T401 pub fn atomic_nand_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
402
403 /// Bitwise or with the current value, returning the previous value.
404 ///
405 /// The stabilized version of this intrinsic is available on the
406 /// [`atomic`] types via the `fetch_or` method by passing
407 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_or`].
atomic_or<T: Copy>(dst: *mut T, src: T) -> T408 pub fn atomic_or<T: Copy>(dst: *mut T, src: T) -> T;
409 /// Bitwise or with the current value, returning the previous value.
410 ///
411 /// The stabilized version of this intrinsic is available on the
412 /// [`atomic`] types via the `fetch_or` method by passing
413 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_or`].
atomic_or_acq<T: Copy>(dst: *mut T, src: T) -> T414 pub fn atomic_or_acq<T: Copy>(dst: *mut T, src: T) -> T;
415 /// Bitwise or with the current value, returning the previous value.
416 ///
417 /// The stabilized version of this intrinsic is available on the
418 /// [`atomic`] types via the `fetch_or` method by passing
419 /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_or`].
atomic_or_rel<T: Copy>(dst: *mut T, src: T) -> T420 pub fn atomic_or_rel<T: Copy>(dst: *mut T, src: T) -> T;
421 /// Bitwise or with the current value, returning the previous value.
422 ///
423 /// The stabilized version of this intrinsic is available on the
424 /// [`atomic`] types via the `fetch_or` method by passing
425 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_or`].
atomic_or_acqrel<T: Copy>(dst: *mut T, src: T) -> T426 pub fn atomic_or_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
427 /// Bitwise or with the current value, returning the previous value.
428 ///
429 /// The stabilized version of this intrinsic is available on the
430 /// [`atomic`] types via the `fetch_or` method by passing
431 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_or`].
atomic_or_relaxed<T: Copy>(dst: *mut T, src: T) -> T432 pub fn atomic_or_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
433
434 /// Bitwise xor with the current value, returning the previous value.
435 ///
436 /// The stabilized version of this intrinsic is available on the
437 /// [`atomic`] types via the `fetch_xor` method by passing
438 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_xor`].
atomic_xor<T: Copy>(dst: *mut T, src: T) -> T439 pub fn atomic_xor<T: Copy>(dst: *mut T, src: T) -> T;
440 /// Bitwise xor with the current value, returning the previous value.
441 ///
442 /// The stabilized version of this intrinsic is available on the
443 /// [`atomic`] types via the `fetch_xor` method by passing
444 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_xor`].
atomic_xor_acq<T: Copy>(dst: *mut T, src: T) -> T445 pub fn atomic_xor_acq<T: Copy>(dst: *mut T, src: T) -> T;
446 /// Bitwise xor with the current value, returning the previous value.
447 ///
448 /// The stabilized version of this intrinsic is available on the
449 /// [`atomic`] types via the `fetch_xor` method by passing
450 /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_xor`].
atomic_xor_rel<T: Copy>(dst: *mut T, src: T) -> T451 pub fn atomic_xor_rel<T: Copy>(dst: *mut T, src: T) -> T;
452 /// Bitwise xor with the current value, returning the previous value.
453 ///
454 /// The stabilized version of this intrinsic is available on the
455 /// [`atomic`] types via the `fetch_xor` method by passing
456 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_xor`].
atomic_xor_acqrel<T: Copy>(dst: *mut T, src: T) -> T457 pub fn atomic_xor_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
458 /// Bitwise xor with the current value, returning the previous value.
459 ///
460 /// The stabilized version of this intrinsic is available on the
461 /// [`atomic`] types via the `fetch_xor` method by passing
462 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_xor`].
atomic_xor_relaxed<T: Copy>(dst: *mut T, src: T) -> T463 pub fn atomic_xor_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
464
465 /// Maximum with the current value using a signed comparison.
466 ///
467 /// The stabilized version of this intrinsic is available on the
468 /// [`atomic`] signed integer types via the `fetch_max` method by passing
469 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicI32::fetch_max`].
atomic_max<T: Copy>(dst: *mut T, src: T) -> T470 pub fn atomic_max<T: Copy>(dst: *mut T, src: T) -> T;
471 /// Maximum with the current value using a signed comparison.
472 ///
473 /// The stabilized version of this intrinsic is available on the
474 /// [`atomic`] signed integer types via the `fetch_max` method by passing
475 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicI32::fetch_max`].
atomic_max_acq<T: Copy>(dst: *mut T, src: T) -> T476 pub fn atomic_max_acq<T: Copy>(dst: *mut T, src: T) -> T;
477 /// Maximum with the current value using a signed comparison.
478 ///
479 /// The stabilized version of this intrinsic is available on the
480 /// [`atomic`] signed integer types via the `fetch_max` method by passing
481 /// [`Ordering::Release`] as the `order`. For example, [`AtomicI32::fetch_max`].
atomic_max_rel<T: Copy>(dst: *mut T, src: T) -> T482 pub fn atomic_max_rel<T: Copy>(dst: *mut T, src: T) -> T;
483 /// Maximum with the current value using a signed comparison.
484 ///
485 /// The stabilized version of this intrinsic is available on the
486 /// [`atomic`] signed integer types via the `fetch_max` method by passing
487 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicI32::fetch_max`].
atomic_max_acqrel<T: Copy>(dst: *mut T, src: T) -> T488 pub fn atomic_max_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
489 /// Maximum with the current value.
490 ///
491 /// The stabilized version of this intrinsic is available on the
492 /// [`atomic`] signed integer types via the `fetch_max` method by passing
493 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicI32::fetch_max`].
atomic_max_relaxed<T: Copy>(dst: *mut T, src: T) -> T494 pub fn atomic_max_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
495
496 /// Minimum with the current value using a signed comparison.
497 ///
498 /// The stabilized version of this intrinsic is available on the
499 /// [`atomic`] signed integer types via the `fetch_min` method by passing
500 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicI32::fetch_min`].
atomic_min<T: Copy>(dst: *mut T, src: T) -> T501 pub fn atomic_min<T: Copy>(dst: *mut T, src: T) -> T;
502 /// Minimum with the current value using a signed comparison.
503 ///
504 /// The stabilized version of this intrinsic is available on the
505 /// [`atomic`] signed integer types via the `fetch_min` method by passing
506 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicI32::fetch_min`].
atomic_min_acq<T: Copy>(dst: *mut T, src: T) -> T507 pub fn atomic_min_acq<T: Copy>(dst: *mut T, src: T) -> T;
508 /// Minimum with the current value using a signed comparison.
509 ///
510 /// The stabilized version of this intrinsic is available on the
511 /// [`atomic`] signed integer types via the `fetch_min` method by passing
512 /// [`Ordering::Release`] as the `order`. For example, [`AtomicI32::fetch_min`].
atomic_min_rel<T: Copy>(dst: *mut T, src: T) -> T513 pub fn atomic_min_rel<T: Copy>(dst: *mut T, src: T) -> T;
514 /// Minimum with the current value using a signed comparison.
515 ///
516 /// The stabilized version of this intrinsic is available on the
517 /// [`atomic`] signed integer types via the `fetch_min` method by passing
518 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicI32::fetch_min`].
atomic_min_acqrel<T: Copy>(dst: *mut T, src: T) -> T519 pub fn atomic_min_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
520 /// Minimum with the current value using a signed comparison.
521 ///
522 /// The stabilized version of this intrinsic is available on the
523 /// [`atomic`] signed integer types via the `fetch_min` method by passing
524 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicI32::fetch_min`].
atomic_min_relaxed<T: Copy>(dst: *mut T, src: T) -> T525 pub fn atomic_min_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
526
527 /// Minimum with the current value using an unsigned comparison.
528 ///
529 /// The stabilized version of this intrinsic is available on the
530 /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
531 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicU32::fetch_min`].
atomic_umin<T: Copy>(dst: *mut T, src: T) -> T532 pub fn atomic_umin<T: Copy>(dst: *mut T, src: T) -> T;
533 /// Minimum with the current value using an unsigned comparison.
534 ///
535 /// The stabilized version of this intrinsic is available on the
536 /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
537 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicU32::fetch_min`].
atomic_umin_acq<T: Copy>(dst: *mut T, src: T) -> T538 pub fn atomic_umin_acq<T: Copy>(dst: *mut T, src: T) -> T;
539 /// Minimum with the current value using an unsigned comparison.
540 ///
541 /// The stabilized version of this intrinsic is available on the
542 /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
543 /// [`Ordering::Release`] as the `order`. For example, [`AtomicU32::fetch_min`].
atomic_umin_rel<T: Copy>(dst: *mut T, src: T) -> T544 pub fn atomic_umin_rel<T: Copy>(dst: *mut T, src: T) -> T;
545 /// Minimum with the current value using an unsigned comparison.
546 ///
547 /// The stabilized version of this intrinsic is available on the
548 /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
549 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicU32::fetch_min`].
atomic_umin_acqrel<T: Copy>(dst: *mut T, src: T) -> T550 pub fn atomic_umin_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
551 /// Minimum with the current value using an unsigned comparison.
552 ///
553 /// The stabilized version of this intrinsic is available on the
554 /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
555 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicU32::fetch_min`].
atomic_umin_relaxed<T: Copy>(dst: *mut T, src: T) -> T556 pub fn atomic_umin_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
557
558 /// Maximum with the current value using an unsigned comparison.
559 ///
560 /// The stabilized version of this intrinsic is available on the
561 /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
562 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicU32::fetch_max`].
atomic_umax<T: Copy>(dst: *mut T, src: T) -> T563 pub fn atomic_umax<T: Copy>(dst: *mut T, src: T) -> T;
564 /// Maximum with the current value using an unsigned comparison.
565 ///
566 /// The stabilized version of this intrinsic is available on the
567 /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
568 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicU32::fetch_max`].
atomic_umax_acq<T: Copy>(dst: *mut T, src: T) -> T569 pub fn atomic_umax_acq<T: Copy>(dst: *mut T, src: T) -> T;
570 /// Maximum with the current value using an unsigned comparison.
571 ///
572 /// The stabilized version of this intrinsic is available on the
573 /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
574 /// [`Ordering::Release`] as the `order`. For example, [`AtomicU32::fetch_max`].
atomic_umax_rel<T: Copy>(dst: *mut T, src: T) -> T575 pub fn atomic_umax_rel<T: Copy>(dst: *mut T, src: T) -> T;
576 /// Maximum with the current value using an unsigned comparison.
577 ///
578 /// The stabilized version of this intrinsic is available on the
579 /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
580 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicU32::fetch_max`].
atomic_umax_acqrel<T: Copy>(dst: *mut T, src: T) -> T581 pub fn atomic_umax_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
582 /// Maximum with the current value using an unsigned comparison.
583 ///
584 /// The stabilized version of this intrinsic is available on the
585 /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
586 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicU32::fetch_max`].
atomic_umax_relaxed<T: Copy>(dst: *mut T, src: T) -> T587 pub fn atomic_umax_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
588
589 /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
590 /// if supported; otherwise, it is a no-op.
591 /// Prefetches have no effect on the behavior of the program but can change its performance
592 /// characteristics.
593 ///
594 /// The `locality` argument must be a constant integer and is a temporal locality specifier
595 /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
596 ///
597 /// This intrinsic does not have a stable counterpart.
prefetch_read_data<T>(data: *const T, locality: i32)598 pub fn prefetch_read_data<T>(data: *const T, locality: i32);
599 /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
600 /// if supported; otherwise, it is a no-op.
601 /// Prefetches have no effect on the behavior of the program but can change its performance
602 /// characteristics.
603 ///
604 /// The `locality` argument must be a constant integer and is a temporal locality specifier
605 /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
606 ///
607 /// This intrinsic does not have a stable counterpart.
prefetch_write_data<T>(data: *const T, locality: i32)608 pub fn prefetch_write_data<T>(data: *const T, locality: i32);
609 /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
610 /// if supported; otherwise, it is a no-op.
611 /// Prefetches have no effect on the behavior of the program but can change its performance
612 /// characteristics.
613 ///
614 /// The `locality` argument must be a constant integer and is a temporal locality specifier
615 /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
616 ///
617 /// This intrinsic does not have a stable counterpart.
prefetch_read_instruction<T>(data: *const T, locality: i32)618 pub fn prefetch_read_instruction<T>(data: *const T, locality: i32);
619 /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
620 /// if supported; otherwise, it is a no-op.
621 /// Prefetches have no effect on the behavior of the program but can change its performance
622 /// characteristics.
623 ///
624 /// The `locality` argument must be a constant integer and is a temporal locality specifier
625 /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
626 ///
627 /// This intrinsic does not have a stable counterpart.
prefetch_write_instruction<T>(data: *const T, locality: i32)628 pub fn prefetch_write_instruction<T>(data: *const T, locality: i32);
629 }
630
631 extern "rust-intrinsic" {
632 /// An atomic fence.
633 ///
634 /// The stabilized version of this intrinsic is available in
635 /// [`atomic::fence`] by passing [`Ordering::SeqCst`]
636 /// as the `order`.
atomic_fence()637 pub fn atomic_fence();
638 /// An atomic fence.
639 ///
640 /// The stabilized version of this intrinsic is available in
641 /// [`atomic::fence`] by passing [`Ordering::Acquire`]
642 /// as the `order`.
atomic_fence_acq()643 pub fn atomic_fence_acq();
644 /// An atomic fence.
645 ///
646 /// The stabilized version of this intrinsic is available in
647 /// [`atomic::fence`] by passing [`Ordering::Release`]
648 /// as the `order`.
atomic_fence_rel()649 pub fn atomic_fence_rel();
650 /// An atomic fence.
651 ///
652 /// The stabilized version of this intrinsic is available in
653 /// [`atomic::fence`] by passing [`Ordering::AcqRel`]
654 /// as the `order`.
atomic_fence_acqrel()655 pub fn atomic_fence_acqrel();
656
657 /// A compiler-only memory barrier.
658 ///
659 /// Memory accesses will never be reordered across this barrier by the
660 /// compiler, but no instructions will be emitted for it. This is
661 /// appropriate for operations on the same thread that may be preempted,
662 /// such as when interacting with signal handlers.
663 ///
664 /// The stabilized version of this intrinsic is available in
665 /// [`atomic::compiler_fence`] by passing [`Ordering::SeqCst`]
666 /// as the `order`.
atomic_singlethreadfence()667 pub fn atomic_singlethreadfence();
668 /// A compiler-only memory barrier.
669 ///
670 /// Memory accesses will never be reordered across this barrier by the
671 /// compiler, but no instructions will be emitted for it. This is
672 /// appropriate for operations on the same thread that may be preempted,
673 /// such as when interacting with signal handlers.
674 ///
675 /// The stabilized version of this intrinsic is available in
676 /// [`atomic::compiler_fence`] by passing [`Ordering::Acquire`]
677 /// as the `order`.
atomic_singlethreadfence_acq()678 pub fn atomic_singlethreadfence_acq();
679 /// A compiler-only memory barrier.
680 ///
681 /// Memory accesses will never be reordered across this barrier by the
682 /// compiler, but no instructions will be emitted for it. This is
683 /// appropriate for operations on the same thread that may be preempted,
684 /// such as when interacting with signal handlers.
685 ///
686 /// The stabilized version of this intrinsic is available in
687 /// [`atomic::compiler_fence`] by passing [`Ordering::Release`]
688 /// as the `order`.
atomic_singlethreadfence_rel()689 pub fn atomic_singlethreadfence_rel();
690 /// A compiler-only memory barrier.
691 ///
692 /// Memory accesses will never be reordered across this barrier by the
693 /// compiler, but no instructions will be emitted for it. This is
694 /// appropriate for operations on the same thread that may be preempted,
695 /// such as when interacting with signal handlers.
696 ///
697 /// The stabilized version of this intrinsic is available in
698 /// [`atomic::compiler_fence`] by passing [`Ordering::AcqRel`]
699 /// as the `order`.
atomic_singlethreadfence_acqrel()700 pub fn atomic_singlethreadfence_acqrel();
701
702 /// Magic intrinsic that derives its meaning from attributes
703 /// attached to the function.
704 ///
705 /// For example, dataflow uses this to inject static assertions so
706 /// that `rustc_peek(potentially_uninitialized)` would actually
707 /// double-check that dataflow did indeed compute that it is
708 /// uninitialized at that point in the control flow.
709 ///
710 /// This intrinsic should not be used outside of the compiler.
rustc_peek<T>(_: T) -> T711 pub fn rustc_peek<T>(_: T) -> T;
712
713 /// Aborts the execution of the process.
714 ///
715 /// Note that, unlike most intrinsics, this is safe to call;
716 /// it does not require an `unsafe` block.
717 /// Therefore, implementations must not require the user to uphold
718 /// any safety invariants.
719 ///
720 /// [`std::process::abort`](../../std/process/fn.abort.html) is to be preferred if possible,
721 /// as its behavior is more user-friendly and more stable.
722 ///
723 /// The current implementation of `intrinsics::abort` is to invoke an invalid instruction,
724 /// on most platforms.
725 /// On Unix, the
726 /// process will probably terminate with a signal like `SIGABRT`, `SIGILL`, `SIGTRAP`, `SIGSEGV` or
727 /// `SIGBUS`. The precise behaviour is not guaranteed and not stable.
abort() -> !728 pub fn abort() -> !;
729
730 /// Informs the optimizer that this point in the code is not reachable,
731 /// enabling further optimizations.
732 ///
733 /// N.B., this is very different from the `unreachable!()` macro: Unlike the
734 /// macro, which panics when it is executed, it is *undefined behavior* to
735 /// reach code marked with this function.
736 ///
737 /// The stabilized version of this intrinsic is [`core::hint::unreachable_unchecked`].
738 #[rustc_const_stable(feature = "const_unreachable_unchecked", since = "1.57.0")]
unreachable() -> !739 pub fn unreachable() -> !;
740
741 /// Informs the optimizer that a condition is always true.
742 /// If the condition is false, the behavior is undefined.
743 ///
744 /// No code is generated for this intrinsic, but the optimizer will try
745 /// to preserve it (and its condition) between passes, which may interfere
746 /// with optimization of surrounding code and reduce performance. It should
747 /// not be used if the invariant can be discovered by the optimizer on its
748 /// own, or if it does not enable any significant optimizations.
749 ///
750 /// This intrinsic does not have a stable counterpart.
751 #[rustc_const_unstable(feature = "const_assume", issue = "76972")]
assume(b: bool)752 pub fn assume(b: bool);
753
754 /// Hints to the compiler that branch condition is likely to be true.
755 /// Returns the value passed to it.
756 ///
757 /// Any use other than with `if` statements will probably not have an effect.
758 ///
759 /// Note that, unlike most intrinsics, this is safe to call;
760 /// it does not require an `unsafe` block.
761 /// Therefore, implementations must not require the user to uphold
762 /// any safety invariants.
763 ///
764 /// This intrinsic does not have a stable counterpart.
765 #[rustc_const_unstable(feature = "const_likely", issue = "none")]
likely(b: bool) -> bool766 pub fn likely(b: bool) -> bool;
767
768 /// Hints to the compiler that branch condition is likely to be false.
769 /// Returns the value passed to it.
770 ///
771 /// Any use other than with `if` statements will probably not have an effect.
772 ///
773 /// Note that, unlike most intrinsics, this is safe to call;
774 /// it does not require an `unsafe` block.
775 /// Therefore, implementations must not require the user to uphold
776 /// any safety invariants.
777 ///
778 /// This intrinsic does not have a stable counterpart.
779 #[rustc_const_unstable(feature = "const_likely", issue = "none")]
unlikely(b: bool) -> bool780 pub fn unlikely(b: bool) -> bool;
781
782 /// Executes a breakpoint trap, for inspection by a debugger.
783 ///
784 /// This intrinsic does not have a stable counterpart.
breakpoint()785 pub fn breakpoint();
786
787 /// The size of a type in bytes.
788 ///
789 /// Note that, unlike most intrinsics, this is safe to call;
790 /// it does not require an `unsafe` block.
791 /// Therefore, implementations must not require the user to uphold
792 /// any safety invariants.
793 ///
794 /// More specifically, this is the offset in bytes between successive
795 /// items of the same type, including alignment padding.
796 ///
797 /// The stabilized version of this intrinsic is [`core::mem::size_of`].
798 #[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
size_of<T>() -> usize799 pub fn size_of<T>() -> usize;
800
801 /// The minimum alignment of a type.
802 ///
803 /// Note that, unlike most intrinsics, this is safe to call;
804 /// it does not require an `unsafe` block.
805 /// Therefore, implementations must not require the user to uphold
806 /// any safety invariants.
807 ///
808 /// The stabilized version of this intrinsic is [`core::mem::align_of`].
809 #[rustc_const_stable(feature = "const_min_align_of", since = "1.40.0")]
min_align_of<T>() -> usize810 pub fn min_align_of<T>() -> usize;
811 /// The preferred alignment of a type.
812 ///
813 /// This intrinsic does not have a stable counterpart.
814 #[rustc_const_unstable(feature = "const_pref_align_of", issue = "none")]
pref_align_of<T>() -> usize815 pub fn pref_align_of<T>() -> usize;
816
817 /// The size of the referenced value in bytes.
818 ///
819 /// The stabilized version of this intrinsic is [`mem::size_of_val`].
820 #[rustc_const_unstable(feature = "const_size_of_val", issue = "46571")]
size_of_val<T: ?Sized>(_: *const T) -> usize821 pub fn size_of_val<T: ?Sized>(_: *const T) -> usize;
822 /// The required alignment of the referenced value.
823 ///
824 /// The stabilized version of this intrinsic is [`core::mem::align_of_val`].
825 #[rustc_const_unstable(feature = "const_align_of_val", issue = "46571")]
min_align_of_val<T: ?Sized>(_: *const T) -> usize826 pub fn min_align_of_val<T: ?Sized>(_: *const T) -> usize;
827
828 /// Gets a static string slice containing the name of a type.
829 ///
830 /// Note that, unlike most intrinsics, this is safe to call;
831 /// it does not require an `unsafe` block.
832 /// Therefore, implementations must not require the user to uphold
833 /// any safety invariants.
834 ///
835 /// The stabilized version of this intrinsic is [`core::any::type_name`].
836 #[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
type_name<T: ?Sized>() -> &'static str837 pub fn type_name<T: ?Sized>() -> &'static str;
838
839 /// Gets an identifier which is globally unique to the specified type. This
840 /// function will return the same value for a type regardless of whichever
841 /// crate it is invoked in.
842 ///
843 /// Note that, unlike most intrinsics, this is safe to call;
844 /// it does not require an `unsafe` block.
845 /// Therefore, implementations must not require the user to uphold
846 /// any safety invariants.
847 ///
848 /// The stabilized version of this intrinsic is [`core::any::TypeId::of`].
849 #[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
type_id<T: ?Sized + 'static>() -> u64850 pub fn type_id<T: ?Sized + 'static>() -> u64;
851
852 /// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited:
853 /// This will statically either panic, or do nothing.
854 ///
855 /// This intrinsic does not have a stable counterpart.
856 #[rustc_const_unstable(feature = "const_assert_type", issue = "none")]
assert_inhabited<T>()857 pub fn assert_inhabited<T>();
858
859 /// A guard for unsafe functions that cannot ever be executed if `T` does not permit
860 /// zero-initialization: This will statically either panic, or do nothing.
861 ///
862 /// This intrinsic does not have a stable counterpart.
assert_zero_valid<T>()863 pub fn assert_zero_valid<T>();
864
865 /// A guard for unsafe functions that cannot ever be executed if `T` has invalid
866 /// bit patterns: This will statically either panic, or do nothing.
867 ///
868 /// This intrinsic does not have a stable counterpart.
assert_uninit_valid<T>()869 pub fn assert_uninit_valid<T>();
870
871 /// Gets a reference to a static `Location` indicating where it was called.
872 ///
873 /// Note that, unlike most intrinsics, this is safe to call;
874 /// it does not require an `unsafe` block.
875 /// Therefore, implementations must not require the user to uphold
876 /// any safety invariants.
877 ///
878 /// Consider using [`core::panic::Location::caller`] instead.
879 #[rustc_const_unstable(feature = "const_caller_location", issue = "76156")]
caller_location() -> &'static crate::panic::Location<'static>880 pub fn caller_location() -> &'static crate::panic::Location<'static>;
881
882 /// Moves a value out of scope without running drop glue.
883 ///
884 /// This exists solely for [`mem::forget_unsized`]; normal `forget` uses
885 /// `ManuallyDrop` instead.
886 ///
887 /// Note that, unlike most intrinsics, this is safe to call;
888 /// it does not require an `unsafe` block.
889 /// Therefore, implementations must not require the user to uphold
890 /// any safety invariants.
891 #[rustc_const_unstable(feature = "const_intrinsic_forget", issue = "none")]
forget<T: ?Sized>(_: T)892 pub fn forget<T: ?Sized>(_: T);
893
894 /// Reinterprets the bits of a value of one type as another type.
895 ///
896 /// Both types must have the same size. Neither the original, nor the result,
897 /// may be an [invalid value](../../nomicon/what-unsafe-does.html).
898 ///
899 /// `transmute` is semantically equivalent to a bitwise move of one type
900 /// into another. It copies the bits from the source value into the
901 /// destination value, then forgets the original. It's equivalent to C's
902 /// `memcpy` under the hood, just like `transmute_copy`.
903 ///
904 /// Because `transmute` is a by-value operation, alignment of the *transmuted values
905 /// themselves* is not a concern. As with any other function, the compiler already ensures
906 /// both `T` and `U` are properly aligned. However, when transmuting values that *point
907 /// elsewhere* (such as pointers, references, boxes…), the caller has to ensure proper
908 /// alignment of the pointed-to values.
909 ///
910 /// `transmute` is **incredibly** unsafe. There are a vast number of ways to
911 /// cause [undefined behavior][ub] with this function. `transmute` should be
912 /// the absolute last resort.
913 ///
914 /// Transmuting pointers to integers in a `const` context is [undefined behavior][ub].
915 /// Any attempt to use the resulting value for integer operations will abort const-evaluation.
916 ///
917 /// The [nomicon](../../nomicon/transmutes.html) has additional
918 /// documentation.
919 ///
920 /// [ub]: ../../reference/behavior-considered-undefined.html
921 ///
922 /// # Examples
923 ///
924 /// There are a few things that `transmute` is really useful for.
925 ///
926 /// Turning a pointer into a function pointer. This is *not* portable to
927 /// machines where function pointers and data pointers have different sizes.
928 ///
929 /// ```
930 /// fn foo() -> i32 {
931 /// 0
932 /// }
933 /// let pointer = foo as *const ();
934 /// let function = unsafe {
935 /// std::mem::transmute::<*const (), fn() -> i32>(pointer)
936 /// };
937 /// assert_eq!(function(), 0);
938 /// ```
939 ///
940 /// Extending a lifetime, or shortening an invariant lifetime. This is
941 /// advanced, very unsafe Rust!
942 ///
943 /// ```
944 /// struct R<'a>(&'a i32);
945 /// unsafe fn extend_lifetime<'b>(r: R<'b>) -> R<'static> {
946 /// std::mem::transmute::<R<'b>, R<'static>>(r)
947 /// }
948 ///
949 /// unsafe fn shorten_invariant_lifetime<'b, 'c>(r: &'b mut R<'static>)
950 /// -> &'b mut R<'c> {
951 /// std::mem::transmute::<&'b mut R<'static>, &'b mut R<'c>>(r)
952 /// }
953 /// ```
954 ///
955 /// # Alternatives
956 ///
957 /// Don't despair: many uses of `transmute` can be achieved through other means.
958 /// Below are common applications of `transmute` which can be replaced with safer
959 /// constructs.
960 ///
961 /// Turning raw bytes(`&[u8]`) to `u32`, `f64`, etc.:
962 ///
963 /// ```
964 /// let raw_bytes = [0x78, 0x56, 0x34, 0x12];
965 ///
966 /// let num = unsafe {
967 /// std::mem::transmute::<[u8; 4], u32>(raw_bytes)
968 /// };
969 ///
970 /// // use `u32::from_ne_bytes` instead
971 /// let num = u32::from_ne_bytes(raw_bytes);
972 /// // or use `u32::from_le_bytes` or `u32::from_be_bytes` to specify the endianness
973 /// let num = u32::from_le_bytes(raw_bytes);
974 /// assert_eq!(num, 0x12345678);
975 /// let num = u32::from_be_bytes(raw_bytes);
976 /// assert_eq!(num, 0x78563412);
977 /// ```
978 ///
979 /// Turning a pointer into a `usize`:
980 ///
981 /// ```
982 /// let ptr = &0;
983 /// let ptr_num_transmute = unsafe {
984 /// std::mem::transmute::<&i32, usize>(ptr)
985 /// };
986 ///
987 /// // Use an `as` cast instead
988 /// let ptr_num_cast = ptr as *const i32 as usize;
989 /// ```
990 ///
991 /// Turning a `*mut T` into an `&mut T`:
992 ///
993 /// ```
994 /// let ptr: *mut i32 = &mut 0;
995 /// let ref_transmuted = unsafe {
996 /// std::mem::transmute::<*mut i32, &mut i32>(ptr)
997 /// };
998 ///
999 /// // Use a reborrow instead
1000 /// let ref_casted = unsafe { &mut *ptr };
1001 /// ```
1002 ///
1003 /// Turning an `&mut T` into an `&mut U`:
1004 ///
1005 /// ```
1006 /// let ptr = &mut 0;
1007 /// let val_transmuted = unsafe {
1008 /// std::mem::transmute::<&mut i32, &mut u32>(ptr)
1009 /// };
1010 ///
1011 /// // Now, put together `as` and reborrowing - note the chaining of `as`
1012 /// // `as` is not transitive
1013 /// let val_casts = unsafe { &mut *(ptr as *mut i32 as *mut u32) };
1014 /// ```
1015 ///
1016 /// Turning an `&str` into a `&[u8]`:
1017 ///
1018 /// ```
1019 /// // this is not a good way to do this.
1020 /// let slice = unsafe { std::mem::transmute::<&str, &[u8]>("Rust") };
1021 /// assert_eq!(slice, &[82, 117, 115, 116]);
1022 ///
1023 /// // You could use `str::as_bytes`
1024 /// let slice = "Rust".as_bytes();
1025 /// assert_eq!(slice, &[82, 117, 115, 116]);
1026 ///
1027 /// // Or, just use a byte string, if you have control over the string
1028 /// // literal
1029 /// assert_eq!(b"Rust", &[82, 117, 115, 116]);
1030 /// ```
1031 ///
1032 /// Turning a `Vec<&T>` into a `Vec<Option<&T>>`.
1033 ///
1034 /// To transmute the inner type of the contents of a container, you must make sure to not
1035 /// violate any of the container's invariants. For `Vec`, this means that both the size
1036 /// *and alignment* of the inner types have to match. Other containers might rely on the
1037 /// size of the type, alignment, or even the `TypeId`, in which case transmuting wouldn't
1038 /// be possible at all without violating the container invariants.
1039 ///
1040 /// ```
1041 /// let store = [0, 1, 2, 3];
1042 /// let v_orig = store.iter().collect::<Vec<&i32>>();
1043 ///
1044 /// // clone the vector as we will reuse them later
1045 /// let v_clone = v_orig.clone();
1046 ///
1047 /// // Using transmute: this relies on the unspecified data layout of `Vec`, which is a
1048 /// // bad idea and could cause Undefined Behavior.
1049 /// // However, it is no-copy.
1050 /// let v_transmuted = unsafe {
1051 /// std::mem::transmute::<Vec<&i32>, Vec<Option<&i32>>>(v_clone)
1052 /// };
1053 ///
1054 /// let v_clone = v_orig.clone();
1055 ///
1056 /// // This is the suggested, safe way.
1057 /// // It does copy the entire vector, though, into a new array.
1058 /// let v_collected = v_clone.into_iter()
1059 /// .map(Some)
1060 /// .collect::<Vec<Option<&i32>>>();
1061 ///
1062 /// let v_clone = v_orig.clone();
1063 ///
1064 /// // This is the proper no-copy, unsafe way of "transmuting" a `Vec`, without relying on the
1065 /// // data layout. Instead of literally calling `transmute`, we perform a pointer cast, but
1066 /// // in terms of converting the original inner type (`&i32`) to the new one (`Option<&i32>`),
1067 /// // this has all the same caveats. Besides the information provided above, also consult the
1068 /// // [`from_raw_parts`] documentation.
1069 /// let v_from_raw = unsafe {
1070 // FIXME Update this when vec_into_raw_parts is stabilized
1071 /// // Ensure the original vector is not dropped.
1072 /// let mut v_clone = std::mem::ManuallyDrop::new(v_clone);
1073 /// Vec::from_raw_parts(v_clone.as_mut_ptr() as *mut Option<&i32>,
1074 /// v_clone.len(),
1075 /// v_clone.capacity())
1076 /// };
1077 /// ```
1078 ///
1079 /// [`from_raw_parts`]: ../../std/vec/struct.Vec.html#method.from_raw_parts
1080 ///
1081 /// Implementing `split_at_mut`:
1082 ///
1083 /// ```
1084 /// use std::{slice, mem};
1085 ///
1086 /// // There are multiple ways to do this, and there are multiple problems
1087 /// // with the following (transmute) way.
1088 /// fn split_at_mut_transmute<T>(slice: &mut [T], mid: usize)
1089 /// -> (&mut [T], &mut [T]) {
1090 /// let len = slice.len();
1091 /// assert!(mid <= len);
1092 /// unsafe {
1093 /// let slice2 = mem::transmute::<&mut [T], &mut [T]>(slice);
1094 /// // first: transmute is not type safe; all it checks is that T and
1095 /// // U are of the same size. Second, right here, you have two
1096 /// // mutable references pointing to the same memory.
1097 /// (&mut slice[0..mid], &mut slice2[mid..len])
1098 /// }
1099 /// }
1100 ///
1101 /// // This gets rid of the type safety problems; `&mut *` will *only* give
1102 /// // you an `&mut T` from an `&mut T` or `*mut T`.
1103 /// fn split_at_mut_casts<T>(slice: &mut [T], mid: usize)
1104 /// -> (&mut [T], &mut [T]) {
1105 /// let len = slice.len();
1106 /// assert!(mid <= len);
1107 /// unsafe {
1108 /// let slice2 = &mut *(slice as *mut [T]);
1109 /// // however, you still have two mutable references pointing to
1110 /// // the same memory.
1111 /// (&mut slice[0..mid], &mut slice2[mid..len])
1112 /// }
1113 /// }
1114 ///
1115 /// // This is how the standard library does it. This is the best method, if
1116 /// // you need to do something like this
1117 /// fn split_at_stdlib<T>(slice: &mut [T], mid: usize)
1118 /// -> (&mut [T], &mut [T]) {
1119 /// let len = slice.len();
1120 /// assert!(mid <= len);
1121 /// unsafe {
1122 /// let ptr = slice.as_mut_ptr();
1123 /// // This now has three mutable references pointing at the same
1124 /// // memory. `slice`, the rvalue ret.0, and the rvalue ret.1.
1125 /// // `slice` is never used after `let ptr = ...`, and so one can
1126 /// // treat it as "dead", and therefore, you only have two real
1127 /// // mutable slices.
1128 /// (slice::from_raw_parts_mut(ptr, mid),
1129 /// slice::from_raw_parts_mut(ptr.add(mid), len - mid))
1130 /// }
1131 /// }
1132 /// ```
1133 #[stable(feature = "rust1", since = "1.0.0")]
1134 #[rustc_const_stable(feature = "const_transmute", since = "1.46.0")]
1135 #[rustc_diagnostic_item = "transmute"]
transmute<T, U>(e: T) -> U1136 pub fn transmute<T, U>(e: T) -> U;
1137
1138 /// Returns `true` if the actual type given as `T` requires drop
1139 /// glue; returns `false` if the actual type provided for `T`
1140 /// implements `Copy`.
1141 ///
1142 /// If the actual type neither requires drop glue nor implements
1143 /// `Copy`, then the return value of this function is unspecified.
1144 ///
1145 /// Note that, unlike most intrinsics, this is safe to call;
1146 /// it does not require an `unsafe` block.
1147 /// Therefore, implementations must not require the user to uphold
1148 /// any safety invariants.
1149 ///
1150 /// The stabilized version of this intrinsic is [`mem::needs_drop`](crate::mem::needs_drop).
1151 #[rustc_const_stable(feature = "const_needs_drop", since = "1.40.0")]
needs_drop<T>() -> bool1152 pub fn needs_drop<T>() -> bool;
1153
1154 /// Calculates the offset from a pointer.
1155 ///
1156 /// This is implemented as an intrinsic to avoid converting to and from an
1157 /// integer, since the conversion would throw away aliasing information.
1158 ///
1159 /// # Safety
1160 ///
1161 /// Both the starting and resulting pointer must be either in bounds or one
1162 /// byte past the end of an allocated object. If either pointer is out of
1163 /// bounds or arithmetic overflow occurs then any further use of the
1164 /// returned value will result in undefined behavior.
1165 ///
1166 /// The stabilized version of this intrinsic is [`pointer::offset`].
1167 #[must_use = "returns a new pointer rather than modifying its argument"]
1168 #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
offset<T>(dst: *const T, offset: isize) -> *const T1169 pub fn offset<T>(dst: *const T, offset: isize) -> *const T;
1170
1171 /// Calculates the offset from a pointer, potentially wrapping.
1172 ///
1173 /// This is implemented as an intrinsic to avoid converting to and from an
1174 /// integer, since the conversion inhibits certain optimizations.
1175 ///
1176 /// # Safety
1177 ///
1178 /// Unlike the `offset` intrinsic, this intrinsic does not restrict the
1179 /// resulting pointer to point into or one byte past the end of an allocated
1180 /// object, and it wraps with two's complement arithmetic. The resulting
1181 /// value is not necessarily valid to be used to actually access memory.
1182 ///
1183 /// The stabilized version of this intrinsic is [`pointer::wrapping_offset`].
1184 #[must_use = "returns a new pointer rather than modifying its argument"]
1185 #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
arith_offset<T>(dst: *const T, offset: isize) -> *const T1186 pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
1187
1188 /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
1189 /// a size of `count` * `size_of::<T>()` and an alignment of
1190 /// `min_align_of::<T>()`
1191 ///
1192 /// The volatile parameter is set to `true`, so it will not be optimized out
1193 /// unless size is equal to zero.
1194 ///
1195 /// This intrinsic does not have a stable counterpart.
volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T, count: usize)1196 pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T, count: usize);
1197 /// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
1198 /// a size of `count * size_of::<T>()` and an alignment of
1199 /// `min_align_of::<T>()`
1200 ///
1201 /// The volatile parameter is set to `true`, so it will not be optimized out
1202 /// unless size is equal to zero.
1203 ///
1204 /// This intrinsic does not have a stable counterpart.
volatile_copy_memory<T>(dst: *mut T, src: *const T, count: usize)1205 pub fn volatile_copy_memory<T>(dst: *mut T, src: *const T, count: usize);
1206 /// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
1207 /// size of `count * size_of::<T>()` and an alignment of
1208 /// `min_align_of::<T>()`.
1209 ///
1210 /// The volatile parameter is set to `true`, so it will not be optimized out
1211 /// unless size is equal to zero.
1212 ///
1213 /// This intrinsic does not have a stable counterpart.
volatile_set_memory<T>(dst: *mut T, val: u8, count: usize)1214 pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: usize);
1215
1216 /// Performs a volatile load from the `src` pointer.
1217 ///
1218 /// The stabilized version of this intrinsic is [`core::ptr::read_volatile`].
volatile_load<T>(src: *const T) -> T1219 pub fn volatile_load<T>(src: *const T) -> T;
1220 /// Performs a volatile store to the `dst` pointer.
1221 ///
1222 /// The stabilized version of this intrinsic is [`core::ptr::write_volatile`].
volatile_store<T>(dst: *mut T, val: T)1223 pub fn volatile_store<T>(dst: *mut T, val: T);
1224
1225 /// Performs a volatile load from the `src` pointer
1226 /// The pointer is not required to be aligned.
1227 ///
1228 /// This intrinsic does not have a stable counterpart.
unaligned_volatile_load<T>(src: *const T) -> T1229 pub fn unaligned_volatile_load<T>(src: *const T) -> T;
1230 /// Performs a volatile store to the `dst` pointer.
1231 /// The pointer is not required to be aligned.
1232 ///
1233 /// This intrinsic does not have a stable counterpart.
unaligned_volatile_store<T>(dst: *mut T, val: T)1234 pub fn unaligned_volatile_store<T>(dst: *mut T, val: T);
1235
1236 /// Returns the square root of an `f32`
1237 ///
1238 /// The stabilized version of this intrinsic is
1239 /// [`f32::sqrt`](../../std/primitive.f32.html#method.sqrt)
sqrtf32(x: f32) -> f321240 pub fn sqrtf32(x: f32) -> f32;
1241 /// Returns the square root of an `f64`
1242 ///
1243 /// The stabilized version of this intrinsic is
1244 /// [`f64::sqrt`](../../std/primitive.f64.html#method.sqrt)
sqrtf64(x: f64) -> f641245 pub fn sqrtf64(x: f64) -> f64;
1246
1247 /// Raises an `f32` to an integer power.
1248 ///
1249 /// The stabilized version of this intrinsic is
1250 /// [`f32::powi`](../../std/primitive.f32.html#method.powi)
powif32(a: f32, x: i32) -> f321251 pub fn powif32(a: f32, x: i32) -> f32;
1252 /// Raises an `f64` to an integer power.
1253 ///
1254 /// The stabilized version of this intrinsic is
1255 /// [`f64::powi`](../../std/primitive.f64.html#method.powi)
powif64(a: f64, x: i32) -> f641256 pub fn powif64(a: f64, x: i32) -> f64;
1257
1258 /// Returns the sine of an `f32`.
1259 ///
1260 /// The stabilized version of this intrinsic is
1261 /// [`f32::sin`](../../std/primitive.f32.html#method.sin)
sinf32(x: f32) -> f321262 pub fn sinf32(x: f32) -> f32;
1263 /// Returns the sine of an `f64`.
1264 ///
1265 /// The stabilized version of this intrinsic is
1266 /// [`f64::sin`](../../std/primitive.f64.html#method.sin)
sinf64(x: f64) -> f641267 pub fn sinf64(x: f64) -> f64;
1268
1269 /// Returns the cosine of an `f32`.
1270 ///
1271 /// The stabilized version of this intrinsic is
1272 /// [`f32::cos`](../../std/primitive.f32.html#method.cos)
cosf32(x: f32) -> f321273 pub fn cosf32(x: f32) -> f32;
1274 /// Returns the cosine of an `f64`.
1275 ///
1276 /// The stabilized version of this intrinsic is
1277 /// [`f64::cos`](../../std/primitive.f64.html#method.cos)
cosf64(x: f64) -> f641278 pub fn cosf64(x: f64) -> f64;
1279
1280 /// Raises an `f32` to an `f32` power.
1281 ///
1282 /// The stabilized version of this intrinsic is
1283 /// [`f32::powf`](../../std/primitive.f32.html#method.powf)
powf32(a: f32, x: f32) -> f321284 pub fn powf32(a: f32, x: f32) -> f32;
1285 /// Raises an `f64` to an `f64` power.
1286 ///
1287 /// The stabilized version of this intrinsic is
1288 /// [`f64::powf`](../../std/primitive.f64.html#method.powf)
powf64(a: f64, x: f64) -> f641289 pub fn powf64(a: f64, x: f64) -> f64;
1290
1291 /// Returns the exponential of an `f32`.
1292 ///
1293 /// The stabilized version of this intrinsic is
1294 /// [`f32::exp`](../../std/primitive.f32.html#method.exp)
expf32(x: f32) -> f321295 pub fn expf32(x: f32) -> f32;
1296 /// Returns the exponential of an `f64`.
1297 ///
1298 /// The stabilized version of this intrinsic is
1299 /// [`f64::exp`](../../std/primitive.f64.html#method.exp)
expf64(x: f64) -> f641300 pub fn expf64(x: f64) -> f64;
1301
1302 /// Returns 2 raised to the power of an `f32`.
1303 ///
1304 /// The stabilized version of this intrinsic is
1305 /// [`f32::exp2`](../../std/primitive.f32.html#method.exp2)
exp2f32(x: f32) -> f321306 pub fn exp2f32(x: f32) -> f32;
1307 /// Returns 2 raised to the power of an `f64`.
1308 ///
1309 /// The stabilized version of this intrinsic is
1310 /// [`f64::exp2`](../../std/primitive.f64.html#method.exp2)
exp2f64(x: f64) -> f641311 pub fn exp2f64(x: f64) -> f64;
1312
1313 /// Returns the natural logarithm of an `f32`.
1314 ///
1315 /// The stabilized version of this intrinsic is
1316 /// [`f32::ln`](../../std/primitive.f32.html#method.ln)
logf32(x: f32) -> f321317 pub fn logf32(x: f32) -> f32;
1318 /// Returns the natural logarithm of an `f64`.
1319 ///
1320 /// The stabilized version of this intrinsic is
1321 /// [`f64::ln`](../../std/primitive.f64.html#method.ln)
logf64(x: f64) -> f641322 pub fn logf64(x: f64) -> f64;
1323
1324 /// Returns the base 10 logarithm of an `f32`.
1325 ///
1326 /// The stabilized version of this intrinsic is
1327 /// [`f32::log10`](../../std/primitive.f32.html#method.log10)
log10f32(x: f32) -> f321328 pub fn log10f32(x: f32) -> f32;
1329 /// Returns the base 10 logarithm of an `f64`.
1330 ///
1331 /// The stabilized version of this intrinsic is
1332 /// [`f64::log10`](../../std/primitive.f64.html#method.log10)
log10f64(x: f64) -> f641333 pub fn log10f64(x: f64) -> f64;
1334
1335 /// Returns the base 2 logarithm of an `f32`.
1336 ///
1337 /// The stabilized version of this intrinsic is
1338 /// [`f32::log2`](../../std/primitive.f32.html#method.log2)
log2f32(x: f32) -> f321339 pub fn log2f32(x: f32) -> f32;
1340 /// Returns the base 2 logarithm of an `f64`.
1341 ///
1342 /// The stabilized version of this intrinsic is
1343 /// [`f64::log2`](../../std/primitive.f64.html#method.log2)
log2f64(x: f64) -> f641344 pub fn log2f64(x: f64) -> f64;
1345
1346 /// Returns `a * b + c` for `f32` values.
1347 ///
1348 /// The stabilized version of this intrinsic is
1349 /// [`f32::mul_add`](../../std/primitive.f32.html#method.mul_add)
fmaf32(a: f32, b: f32, c: f32) -> f321350 pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
1351 /// Returns `a * b + c` for `f64` values.
1352 ///
1353 /// The stabilized version of this intrinsic is
1354 /// [`f64::mul_add`](../../std/primitive.f64.html#method.mul_add)
fmaf64(a: f64, b: f64, c: f64) -> f641355 pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
1356
1357 /// Returns the absolute value of an `f32`.
1358 ///
1359 /// The stabilized version of this intrinsic is
1360 /// [`f32::abs`](../../std/primitive.f32.html#method.abs)
fabsf32(x: f32) -> f321361 pub fn fabsf32(x: f32) -> f32;
1362 /// Returns the absolute value of an `f64`.
1363 ///
1364 /// The stabilized version of this intrinsic is
1365 /// [`f64::abs`](../../std/primitive.f64.html#method.abs)
fabsf64(x: f64) -> f641366 pub fn fabsf64(x: f64) -> f64;
1367
1368 /// Returns the minimum of two `f32` values.
1369 ///
1370 /// Note that, unlike most intrinsics, this is safe to call;
1371 /// it does not require an `unsafe` block.
1372 /// Therefore, implementations must not require the user to uphold
1373 /// any safety invariants.
1374 ///
1375 /// The stabilized version of this intrinsic is
1376 /// [`f32::min`]
minnumf32(x: f32, y: f32) -> f321377 pub fn minnumf32(x: f32, y: f32) -> f32;
1378 /// Returns the minimum of two `f64` values.
1379 ///
1380 /// Note that, unlike most intrinsics, this is safe to call;
1381 /// it does not require an `unsafe` block.
1382 /// Therefore, implementations must not require the user to uphold
1383 /// any safety invariants.
1384 ///
1385 /// The stabilized version of this intrinsic is
1386 /// [`f64::min`]
minnumf64(x: f64, y: f64) -> f641387 pub fn minnumf64(x: f64, y: f64) -> f64;
1388 /// Returns the maximum of two `f32` values.
1389 ///
1390 /// Note that, unlike most intrinsics, this is safe to call;
1391 /// it does not require an `unsafe` block.
1392 /// Therefore, implementations must not require the user to uphold
1393 /// any safety invariants.
1394 ///
1395 /// The stabilized version of this intrinsic is
1396 /// [`f32::max`]
maxnumf32(x: f32, y: f32) -> f321397 pub fn maxnumf32(x: f32, y: f32) -> f32;
1398 /// Returns the maximum of two `f64` values.
1399 ///
1400 /// Note that, unlike most intrinsics, this is safe to call;
1401 /// it does not require an `unsafe` block.
1402 /// Therefore, implementations must not require the user to uphold
1403 /// any safety invariants.
1404 ///
1405 /// The stabilized version of this intrinsic is
1406 /// [`f64::max`]
maxnumf64(x: f64, y: f64) -> f641407 pub fn maxnumf64(x: f64, y: f64) -> f64;
1408
1409 /// Copies the sign from `y` to `x` for `f32` values.
1410 ///
1411 /// The stabilized version of this intrinsic is
1412 /// [`f32::copysign`](../../std/primitive.f32.html#method.copysign)
copysignf32(x: f32, y: f32) -> f321413 pub fn copysignf32(x: f32, y: f32) -> f32;
1414 /// Copies the sign from `y` to `x` for `f64` values.
1415 ///
1416 /// The stabilized version of this intrinsic is
1417 /// [`f64::copysign`](../../std/primitive.f64.html#method.copysign)
copysignf64(x: f64, y: f64) -> f641418 pub fn copysignf64(x: f64, y: f64) -> f64;
1419
1420 /// Returns the largest integer less than or equal to an `f32`.
1421 ///
1422 /// The stabilized version of this intrinsic is
1423 /// [`f32::floor`](../../std/primitive.f32.html#method.floor)
floorf32(x: f32) -> f321424 pub fn floorf32(x: f32) -> f32;
1425 /// Returns the largest integer less than or equal to an `f64`.
1426 ///
1427 /// The stabilized version of this intrinsic is
1428 /// [`f64::floor`](../../std/primitive.f64.html#method.floor)
floorf64(x: f64) -> f641429 pub fn floorf64(x: f64) -> f64;
1430
1431 /// Returns the smallest integer greater than or equal to an `f32`.
1432 ///
1433 /// The stabilized version of this intrinsic is
1434 /// [`f32::ceil`](../../std/primitive.f32.html#method.ceil)
ceilf32(x: f32) -> f321435 pub fn ceilf32(x: f32) -> f32;
1436 /// Returns the smallest integer greater than or equal to an `f64`.
1437 ///
1438 /// The stabilized version of this intrinsic is
1439 /// [`f64::ceil`](../../std/primitive.f64.html#method.ceil)
ceilf64(x: f64) -> f641440 pub fn ceilf64(x: f64) -> f64;
1441
1442 /// Returns the integer part of an `f32`.
1443 ///
1444 /// The stabilized version of this intrinsic is
1445 /// [`f32::trunc`](../../std/primitive.f32.html#method.trunc)
truncf32(x: f32) -> f321446 pub fn truncf32(x: f32) -> f32;
1447 /// Returns the integer part of an `f64`.
1448 ///
1449 /// The stabilized version of this intrinsic is
1450 /// [`f64::trunc`](../../std/primitive.f64.html#method.trunc)
truncf64(x: f64) -> f641451 pub fn truncf64(x: f64) -> f64;
1452
1453 /// Returns the nearest integer to an `f32`. May raise an inexact floating-point exception
1454 /// if the argument is not an integer.
rintf32(x: f32) -> f321455 pub fn rintf32(x: f32) -> f32;
1456 /// Returns the nearest integer to an `f64`. May raise an inexact floating-point exception
1457 /// if the argument is not an integer.
rintf64(x: f64) -> f641458 pub fn rintf64(x: f64) -> f64;
1459
1460 /// Returns the nearest integer to an `f32`.
1461 ///
1462 /// This intrinsic does not have a stable counterpart.
nearbyintf32(x: f32) -> f321463 pub fn nearbyintf32(x: f32) -> f32;
1464 /// Returns the nearest integer to an `f64`.
1465 ///
1466 /// This intrinsic does not have a stable counterpart.
nearbyintf64(x: f64) -> f641467 pub fn nearbyintf64(x: f64) -> f64;
1468
1469 /// Returns the nearest integer to an `f32`. Rounds half-way cases away from zero.
1470 ///
1471 /// The stabilized version of this intrinsic is
1472 /// [`f32::round`](../../std/primitive.f32.html#method.round)
roundf32(x: f32) -> f321473 pub fn roundf32(x: f32) -> f32;
1474 /// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero.
1475 ///
1476 /// The stabilized version of this intrinsic is
1477 /// [`f64::round`](../../std/primitive.f64.html#method.round)
roundf64(x: f64) -> f641478 pub fn roundf64(x: f64) -> f64;
1479
1480 /// Float addition that allows optimizations based on algebraic rules.
1481 /// May assume inputs are finite.
1482 ///
1483 /// This intrinsic does not have a stable counterpart.
fadd_fast<T: Copy>(a: T, b: T) -> T1484 pub fn fadd_fast<T: Copy>(a: T, b: T) -> T;
1485
1486 /// Float subtraction that allows optimizations based on algebraic rules.
1487 /// May assume inputs are finite.
1488 ///
1489 /// This intrinsic does not have a stable counterpart.
fsub_fast<T: Copy>(a: T, b: T) -> T1490 pub fn fsub_fast<T: Copy>(a: T, b: T) -> T;
1491
1492 /// Float multiplication that allows optimizations based on algebraic rules.
1493 /// May assume inputs are finite.
1494 ///
1495 /// This intrinsic does not have a stable counterpart.
fmul_fast<T: Copy>(a: T, b: T) -> T1496 pub fn fmul_fast<T: Copy>(a: T, b: T) -> T;
1497
1498 /// Float division that allows optimizations based on algebraic rules.
1499 /// May assume inputs are finite.
1500 ///
1501 /// This intrinsic does not have a stable counterpart.
fdiv_fast<T: Copy>(a: T, b: T) -> T1502 pub fn fdiv_fast<T: Copy>(a: T, b: T) -> T;
1503
1504 /// Float remainder that allows optimizations based on algebraic rules.
1505 /// May assume inputs are finite.
1506 ///
1507 /// This intrinsic does not have a stable counterpart.
frem_fast<T: Copy>(a: T, b: T) -> T1508 pub fn frem_fast<T: Copy>(a: T, b: T) -> T;
1509
1510 /// Convert with LLVM’s fptoui/fptosi, which may return undef for values out of range
1511 /// (<https://github.com/rust-lang/rust/issues/10184>)
1512 ///
1513 /// Stabilized as [`f32::to_int_unchecked`] and [`f64::to_int_unchecked`].
float_to_int_unchecked<Float: Copy, Int: Copy>(value: Float) -> Int1514 pub fn float_to_int_unchecked<Float: Copy, Int: Copy>(value: Float) -> Int;
1515
1516 /// Returns the number of bits set in an integer type `T`
1517 ///
1518 /// Note that, unlike most intrinsics, this is safe to call;
1519 /// it does not require an `unsafe` block.
1520 /// Therefore, implementations must not require the user to uphold
1521 /// any safety invariants.
1522 ///
1523 /// The stabilized versions of this intrinsic are available on the integer
1524 /// primitives via the `count_ones` method. For example,
1525 /// [`u32::count_ones`]
1526 #[rustc_const_stable(feature = "const_ctpop", since = "1.40.0")]
ctpop<T: Copy>(x: T) -> T1527 pub fn ctpop<T: Copy>(x: T) -> T;
1528
1529 /// Returns the number of leading unset bits (zeroes) in an integer type `T`.
1530 ///
1531 /// Note that, unlike most intrinsics, this is safe to call;
1532 /// it does not require an `unsafe` block.
1533 /// Therefore, implementations must not require the user to uphold
1534 /// any safety invariants.
1535 ///
1536 /// The stabilized versions of this intrinsic are available on the integer
1537 /// primitives via the `leading_zeros` method. For example,
1538 /// [`u32::leading_zeros`]
1539 ///
1540 /// # Examples
1541 ///
1542 /// ```
1543 /// #![feature(core_intrinsics)]
1544 ///
1545 /// use std::intrinsics::ctlz;
1546 ///
1547 /// let x = 0b0001_1100_u8;
1548 /// let num_leading = ctlz(x);
1549 /// assert_eq!(num_leading, 3);
1550 /// ```
1551 ///
1552 /// An `x` with value `0` will return the bit width of `T`.
1553 ///
1554 /// ```
1555 /// #![feature(core_intrinsics)]
1556 ///
1557 /// use std::intrinsics::ctlz;
1558 ///
1559 /// let x = 0u16;
1560 /// let num_leading = ctlz(x);
1561 /// assert_eq!(num_leading, 16);
1562 /// ```
1563 #[rustc_const_stable(feature = "const_ctlz", since = "1.40.0")]
ctlz<T: Copy>(x: T) -> T1564 pub fn ctlz<T: Copy>(x: T) -> T;
1565
1566 /// Like `ctlz`, but extra-unsafe as it returns `undef` when
1567 /// given an `x` with value `0`.
1568 ///
1569 /// This intrinsic does not have a stable counterpart.
1570 ///
1571 /// # Examples
1572 ///
1573 /// ```
1574 /// #![feature(core_intrinsics)]
1575 ///
1576 /// use std::intrinsics::ctlz_nonzero;
1577 ///
1578 /// let x = 0b0001_1100_u8;
1579 /// let num_leading = unsafe { ctlz_nonzero(x) };
1580 /// assert_eq!(num_leading, 3);
1581 /// ```
1582 #[rustc_const_stable(feature = "constctlz", since = "1.50.0")]
ctlz_nonzero<T: Copy>(x: T) -> T1583 pub fn ctlz_nonzero<T: Copy>(x: T) -> T;
1584
1585 /// Returns the number of trailing unset bits (zeroes) in an integer type `T`.
1586 ///
1587 /// Note that, unlike most intrinsics, this is safe to call;
1588 /// it does not require an `unsafe` block.
1589 /// Therefore, implementations must not require the user to uphold
1590 /// any safety invariants.
1591 ///
1592 /// The stabilized versions of this intrinsic are available on the integer
1593 /// primitives via the `trailing_zeros` method. For example,
1594 /// [`u32::trailing_zeros`]
1595 ///
1596 /// # Examples
1597 ///
1598 /// ```
1599 /// #![feature(core_intrinsics)]
1600 ///
1601 /// use std::intrinsics::cttz;
1602 ///
1603 /// let x = 0b0011_1000_u8;
1604 /// let num_trailing = cttz(x);
1605 /// assert_eq!(num_trailing, 3);
1606 /// ```
1607 ///
1608 /// An `x` with value `0` will return the bit width of `T`:
1609 ///
1610 /// ```
1611 /// #![feature(core_intrinsics)]
1612 ///
1613 /// use std::intrinsics::cttz;
1614 ///
1615 /// let x = 0u16;
1616 /// let num_trailing = cttz(x);
1617 /// assert_eq!(num_trailing, 16);
1618 /// ```
1619 #[rustc_const_stable(feature = "const_cttz", since = "1.40.0")]
cttz<T: Copy>(x: T) -> T1620 pub fn cttz<T: Copy>(x: T) -> T;
1621
1622 /// Like `cttz`, but extra-unsafe as it returns `undef` when
1623 /// given an `x` with value `0`.
1624 ///
1625 /// This intrinsic does not have a stable counterpart.
1626 ///
1627 /// # Examples
1628 ///
1629 /// ```
1630 /// #![feature(core_intrinsics)]
1631 ///
1632 /// use std::intrinsics::cttz_nonzero;
1633 ///
1634 /// let x = 0b0011_1000_u8;
1635 /// let num_trailing = unsafe { cttz_nonzero(x) };
1636 /// assert_eq!(num_trailing, 3);
1637 /// ```
1638 #[rustc_const_stable(feature = "const_cttz", since = "1.53.0")]
cttz_nonzero<T: Copy>(x: T) -> T1639 pub fn cttz_nonzero<T: Copy>(x: T) -> T;
1640
1641 /// Reverses the bytes in an integer type `T`.
1642 ///
1643 /// Note that, unlike most intrinsics, this is safe to call;
1644 /// it does not require an `unsafe` block.
1645 /// Therefore, implementations must not require the user to uphold
1646 /// any safety invariants.
1647 ///
1648 /// The stabilized versions of this intrinsic are available on the integer
1649 /// primitives via the `swap_bytes` method. For example,
1650 /// [`u32::swap_bytes`]
1651 #[rustc_const_stable(feature = "const_bswap", since = "1.40.0")]
bswap<T: Copy>(x: T) -> T1652 pub fn bswap<T: Copy>(x: T) -> T;
1653
1654 /// Reverses the bits in an integer type `T`.
1655 ///
1656 /// Note that, unlike most intrinsics, this is safe to call;
1657 /// it does not require an `unsafe` block.
1658 /// Therefore, implementations must not require the user to uphold
1659 /// any safety invariants.
1660 ///
1661 /// The stabilized versions of this intrinsic are available on the integer
1662 /// primitives via the `reverse_bits` method. For example,
1663 /// [`u32::reverse_bits`]
1664 #[rustc_const_stable(feature = "const_bitreverse", since = "1.40.0")]
bitreverse<T: Copy>(x: T) -> T1665 pub fn bitreverse<T: Copy>(x: T) -> T;
1666
1667 /// Performs checked integer addition.
1668 ///
1669 /// Note that, unlike most intrinsics, this is safe to call;
1670 /// it does not require an `unsafe` block.
1671 /// Therefore, implementations must not require the user to uphold
1672 /// any safety invariants.
1673 ///
1674 /// The stabilized versions of this intrinsic are available on the integer
1675 /// primitives via the `overflowing_add` method. For example,
1676 /// [`u32::overflowing_add`]
1677 #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
add_with_overflow<T: Copy>(x: T, y: T) -> (T, bool)1678 pub fn add_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
1679
1680 /// Performs checked integer subtraction
1681 ///
1682 /// Note that, unlike most intrinsics, this is safe to call;
1683 /// it does not require an `unsafe` block.
1684 /// Therefore, implementations must not require the user to uphold
1685 /// any safety invariants.
1686 ///
1687 /// The stabilized versions of this intrinsic are available on the integer
1688 /// primitives via the `overflowing_sub` method. For example,
1689 /// [`u32::overflowing_sub`]
1690 #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
sub_with_overflow<T: Copy>(x: T, y: T) -> (T, bool)1691 pub fn sub_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
1692
1693 /// Performs checked integer multiplication
1694 ///
1695 /// Note that, unlike most intrinsics, this is safe to call;
1696 /// it does not require an `unsafe` block.
1697 /// Therefore, implementations must not require the user to uphold
1698 /// any safety invariants.
1699 ///
1700 /// The stabilized versions of this intrinsic are available on the integer
1701 /// primitives via the `overflowing_mul` method. For example,
1702 /// [`u32::overflowing_mul`]
1703 #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
mul_with_overflow<T: Copy>(x: T, y: T) -> (T, bool)1704 pub fn mul_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
1705
1706 /// Performs an exact division, resulting in undefined behavior where
1707 /// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`
1708 ///
1709 /// This intrinsic does not have a stable counterpart.
exact_div<T: Copy>(x: T, y: T) -> T1710 pub fn exact_div<T: Copy>(x: T, y: T) -> T;
1711
1712 /// Performs an unchecked division, resulting in undefined behavior
1713 /// where `y == 0` or `x == T::MIN && y == -1`
1714 ///
1715 /// Safe wrappers for this intrinsic are available on the integer
1716 /// primitives via the `checked_div` method. For example,
1717 /// [`u32::checked_div`]
1718 #[rustc_const_stable(feature = "const_int_unchecked_arith", since = "1.52.0")]
unchecked_div<T: Copy>(x: T, y: T) -> T1719 pub fn unchecked_div<T: Copy>(x: T, y: T) -> T;
1720 /// Returns the remainder of an unchecked division, resulting in
1721 /// undefined behavior when `y == 0` or `x == T::MIN && y == -1`
1722 ///
1723 /// Safe wrappers for this intrinsic are available on the integer
1724 /// primitives via the `checked_rem` method. For example,
1725 /// [`u32::checked_rem`]
1726 #[rustc_const_stable(feature = "const_int_unchecked_arith", since = "1.52.0")]
unchecked_rem<T: Copy>(x: T, y: T) -> T1727 pub fn unchecked_rem<T: Copy>(x: T, y: T) -> T;
1728
1729 /// Performs an unchecked left shift, resulting in undefined behavior when
1730 /// `y < 0` or `y >= N`, where N is the width of T in bits.
1731 ///
1732 /// Safe wrappers for this intrinsic are available on the integer
1733 /// primitives via the `checked_shl` method. For example,
1734 /// [`u32::checked_shl`]
1735 #[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")]
unchecked_shl<T: Copy>(x: T, y: T) -> T1736 pub fn unchecked_shl<T: Copy>(x: T, y: T) -> T;
1737 /// Performs an unchecked right shift, resulting in undefined behavior when
1738 /// `y < 0` or `y >= N`, where N is the width of T in bits.
1739 ///
1740 /// Safe wrappers for this intrinsic are available on the integer
1741 /// primitives via the `checked_shr` method. For example,
1742 /// [`u32::checked_shr`]
1743 #[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")]
unchecked_shr<T: Copy>(x: T, y: T) -> T1744 pub fn unchecked_shr<T: Copy>(x: T, y: T) -> T;
1745
1746 /// Returns the result of an unchecked addition, resulting in
1747 /// undefined behavior when `x + y > T::MAX` or `x + y < T::MIN`.
1748 ///
1749 /// This intrinsic does not have a stable counterpart.
1750 #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
unchecked_add<T: Copy>(x: T, y: T) -> T1751 pub fn unchecked_add<T: Copy>(x: T, y: T) -> T;
1752
1753 /// Returns the result of an unchecked subtraction, resulting in
1754 /// undefined behavior when `x - y > T::MAX` or `x - y < T::MIN`.
1755 ///
1756 /// This intrinsic does not have a stable counterpart.
1757 #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
unchecked_sub<T: Copy>(x: T, y: T) -> T1758 pub fn unchecked_sub<T: Copy>(x: T, y: T) -> T;
1759
1760 /// Returns the result of an unchecked multiplication, resulting in
1761 /// undefined behavior when `x * y > T::MAX` or `x * y < T::MIN`.
1762 ///
1763 /// This intrinsic does not have a stable counterpart.
1764 #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
unchecked_mul<T: Copy>(x: T, y: T) -> T1765 pub fn unchecked_mul<T: Copy>(x: T, y: T) -> T;
1766
1767 /// Performs rotate left.
1768 ///
1769 /// Note that, unlike most intrinsics, this is safe to call;
1770 /// it does not require an `unsafe` block.
1771 /// Therefore, implementations must not require the user to uphold
1772 /// any safety invariants.
1773 ///
1774 /// The stabilized versions of this intrinsic are available on the integer
1775 /// primitives via the `rotate_left` method. For example,
1776 /// [`u32::rotate_left`]
1777 #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
rotate_left<T: Copy>(x: T, y: T) -> T1778 pub fn rotate_left<T: Copy>(x: T, y: T) -> T;
1779
1780 /// Performs rotate right.
1781 ///
1782 /// Note that, unlike most intrinsics, this is safe to call;
1783 /// it does not require an `unsafe` block.
1784 /// Therefore, implementations must not require the user to uphold
1785 /// any safety invariants.
1786 ///
1787 /// The stabilized versions of this intrinsic are available on the integer
1788 /// primitives via the `rotate_right` method. For example,
1789 /// [`u32::rotate_right`]
1790 #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
rotate_right<T: Copy>(x: T, y: T) -> T1791 pub fn rotate_right<T: Copy>(x: T, y: T) -> T;
1792
1793 /// Returns (a + b) mod 2<sup>N</sup>, where N is the width of T in bits.
1794 ///
1795 /// Note that, unlike most intrinsics, this is safe to call;
1796 /// it does not require an `unsafe` block.
1797 /// Therefore, implementations must not require the user to uphold
1798 /// any safety invariants.
1799 ///
1800 /// The stabilized versions of this intrinsic are available on the integer
1801 /// primitives via the `wrapping_add` method. For example,
1802 /// [`u32::wrapping_add`]
1803 #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
wrapping_add<T: Copy>(a: T, b: T) -> T1804 pub fn wrapping_add<T: Copy>(a: T, b: T) -> T;
1805 /// Returns (a - b) mod 2<sup>N</sup>, where N is the width of T in bits.
1806 ///
1807 /// Note that, unlike most intrinsics, this is safe to call;
1808 /// it does not require an `unsafe` block.
1809 /// Therefore, implementations must not require the user to uphold
1810 /// any safety invariants.
1811 ///
1812 /// The stabilized versions of this intrinsic are available on the integer
1813 /// primitives via the `wrapping_sub` method. For example,
1814 /// [`u32::wrapping_sub`]
1815 #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
wrapping_sub<T: Copy>(a: T, b: T) -> T1816 pub fn wrapping_sub<T: Copy>(a: T, b: T) -> T;
1817 /// Returns (a * b) mod 2<sup>N</sup>, where N is the width of T in bits.
1818 ///
1819 /// Note that, unlike most intrinsics, this is safe to call;
1820 /// it does not require an `unsafe` block.
1821 /// Therefore, implementations must not require the user to uphold
1822 /// any safety invariants.
1823 ///
1824 /// The stabilized versions of this intrinsic are available on the integer
1825 /// primitives via the `wrapping_mul` method. For example,
1826 /// [`u32::wrapping_mul`]
1827 #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
wrapping_mul<T: Copy>(a: T, b: T) -> T1828 pub fn wrapping_mul<T: Copy>(a: T, b: T) -> T;
1829
1830 /// Computes `a + b`, saturating at numeric bounds.
1831 ///
1832 /// Note that, unlike most intrinsics, this is safe to call;
1833 /// it does not require an `unsafe` block.
1834 /// Therefore, implementations must not require the user to uphold
1835 /// any safety invariants.
1836 ///
1837 /// The stabilized versions of this intrinsic are available on the integer
1838 /// primitives via the `saturating_add` method. For example,
1839 /// [`u32::saturating_add`]
1840 #[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
saturating_add<T: Copy>(a: T, b: T) -> T1841 pub fn saturating_add<T: Copy>(a: T, b: T) -> T;
1842 /// Computes `a - b`, saturating at numeric bounds.
1843 ///
1844 /// Note that, unlike most intrinsics, this is safe to call;
1845 /// it does not require an `unsafe` block.
1846 /// Therefore, implementations must not require the user to uphold
1847 /// any safety invariants.
1848 ///
1849 /// The stabilized versions of this intrinsic are available on the integer
1850 /// primitives via the `saturating_sub` method. For example,
1851 /// [`u32::saturating_sub`]
1852 #[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
saturating_sub<T: Copy>(a: T, b: T) -> T1853 pub fn saturating_sub<T: Copy>(a: T, b: T) -> T;
1854
1855 /// Returns the value of the discriminant for the variant in 'v';
1856 /// if `T` has no discriminant, returns `0`.
1857 ///
1858 /// Note that, unlike most intrinsics, this is safe to call;
1859 /// it does not require an `unsafe` block.
1860 /// Therefore, implementations must not require the user to uphold
1861 /// any safety invariants.
1862 ///
1863 /// The stabilized version of this intrinsic is [`core::mem::discriminant`].
1864 #[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant1865 pub fn discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant;
1866
1867 /// Returns the number of variants of the type `T` cast to a `usize`;
1868 /// if `T` has no variants, returns `0`. Uninhabited variants will be counted.
1869 ///
1870 /// Note that, unlike most intrinsics, this is safe to call;
1871 /// it does not require an `unsafe` block.
1872 /// Therefore, implementations must not require the user to uphold
1873 /// any safety invariants.
1874 ///
1875 /// The to-be-stabilized version of this intrinsic is [`mem::variant_count`].
1876 #[rustc_const_unstable(feature = "variant_count", issue = "73662")]
variant_count<T>() -> usize1877 pub fn variant_count<T>() -> usize;
1878
1879 /// Rust's "try catch" construct which invokes the function pointer `try_fn`
1880 /// with the data pointer `data`.
1881 ///
1882 /// The third argument is a function called if a panic occurs. This function
1883 /// takes the data pointer and a pointer to the target-specific exception
1884 /// object that was caught. For more information see the compiler's
1885 /// source as well as std's catch implementation.
1886 pub fn r#try(try_fn: fn(*mut u8), data: *mut u8, catch_fn: fn(*mut u8, *mut u8)) -> i32;
1887
1888 /// Emits a `!nontemporal` store according to LLVM (see their docs).
1889 /// Probably will never become stable.
nontemporal_store<T>(ptr: *mut T, val: T)1890 pub fn nontemporal_store<T>(ptr: *mut T, val: T);
1891
1892 /// See documentation of `<*const T>::offset_from` for details.
1893 #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "41079")]
ptr_offset_from<T>(ptr: *const T, base: *const T) -> isize1894 pub fn ptr_offset_from<T>(ptr: *const T, base: *const T) -> isize;
1895
1896 /// See documentation of `<*const T>::guaranteed_eq` for details.
1897 ///
1898 /// Note that, unlike most intrinsics, this is safe to call;
1899 /// it does not require an `unsafe` block.
1900 /// Therefore, implementations must not require the user to uphold
1901 /// any safety invariants.
1902 #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
ptr_guaranteed_eq<T>(ptr: *const T, other: *const T) -> bool1903 pub fn ptr_guaranteed_eq<T>(ptr: *const T, other: *const T) -> bool;
1904
1905 /// See documentation of `<*const T>::guaranteed_ne` for details.
1906 ///
1907 /// Note that, unlike most intrinsics, this is safe to call;
1908 /// it does not require an `unsafe` block.
1909 /// Therefore, implementations must not require the user to uphold
1910 /// any safety invariants.
1911 #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
ptr_guaranteed_ne<T>(ptr: *const T, other: *const T) -> bool1912 pub fn ptr_guaranteed_ne<T>(ptr: *const T, other: *const T) -> bool;
1913
1914 /// Allocate at compile time. Should not be called at runtime.
1915 #[rustc_const_unstable(feature = "const_heap", issue = "79597")]
const_allocate(size: usize, align: usize) -> *mut u81916 pub fn const_allocate(size: usize, align: usize) -> *mut u8;
1917
1918 /// Determines whether the raw bytes of the two values are equal.
1919 ///
1920 /// The is particularly handy for arrays, since it allows things like just
1921 /// comparing `i96`s instead of forcing `alloca`s for `[6 x i16]`.
1922 ///
1923 /// Above some backend-decided threshold this will emit calls to `memcmp`,
1924 /// like slice equality does, instead of causing massive code size.
1925 ///
1926 /// # Safety
1927 ///
1928 /// It's UB to call this if any of the *bytes* in `*a` or `*b` are uninitialized.
1929 /// Note that this is a stricter criterion than just the *values* being
1930 /// fully-initialized: if `T` has padding, it's UB to call this intrinsic.
1931 ///
1932 /// (The implementation is allowed to branch on the results of comparisons,
1933 /// which is UB if any of their inputs are `undef`.)
1934 #[rustc_const_unstable(feature = "const_intrinsic_raw_eq", issue = "none")]
raw_eq<T>(a: &T, b: &T) -> bool1935 pub fn raw_eq<T>(a: &T, b: &T) -> bool;
1936
1937 /// See documentation of [`std::hint::black_box`] for details.
1938 ///
1939 /// [`std::hint::black_box`]: crate::hint::black_box
black_box<T>(dummy: T) -> T1940 pub fn black_box<T>(dummy: T) -> T;
1941 }
1942
1943 // Some functions are defined here because they accidentally got made
1944 // available in this module on stable. See <https://github.com/rust-lang/rust/issues/15702>.
1945 // (`transmute` also falls into this category, but it cannot be wrapped due to the
1946 // check that `T` and `U` have the same size.)
1947
1948 /// Checks whether `ptr` is properly aligned with respect to
1949 /// `align_of::<T>()`.
is_aligned_and_not_null<T>(ptr: *const T) -> bool1950 pub(crate) fn is_aligned_and_not_null<T>(ptr: *const T) -> bool {
1951 !ptr.is_null() && ptr as usize % mem::align_of::<T>() == 0
1952 }
1953
1954 /// Checks whether the regions of memory starting at `src` and `dst` of size
1955 /// `count * size_of::<T>()` do *not* overlap.
1956 #[cfg(debug_assertions)]
is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool1957 pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool {
1958 let src_usize = src as usize;
1959 let dst_usize = dst as usize;
1960 let size = mem::size_of::<T>().checked_mul(count).unwrap();
1961 let diff = if src_usize > dst_usize { src_usize - dst_usize } else { dst_usize - src_usize };
1962 // If the absolute distance between the ptrs is at least as big as the size of the buffer,
1963 // they do not overlap.
1964 diff >= size
1965 }
1966
1967 /// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
1968 /// and destination must *not* overlap.
1969 ///
1970 /// For regions of memory which might overlap, use [`copy`] instead.
1971 ///
1972 /// `copy_nonoverlapping` is semantically equivalent to C's [`memcpy`], but
1973 /// with the argument order swapped.
1974 ///
1975 /// [`memcpy`]: https://en.cppreference.com/w/c/string/byte/memcpy
1976 ///
1977 /// # Safety
1978 ///
1979 /// Behavior is undefined if any of the following conditions are violated:
1980 ///
1981 /// * `src` must be [valid] for reads of `count * size_of::<T>()` bytes.
1982 ///
1983 /// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes.
1984 ///
1985 /// * Both `src` and `dst` must be properly aligned.
1986 ///
1987 /// * The region of memory beginning at `src` with a size of `count *
1988 /// size_of::<T>()` bytes must *not* overlap with the region of memory
1989 /// beginning at `dst` with the same size.
1990 ///
1991 /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of
1992 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values
1993 /// in the region beginning at `*src` and the region beginning at `*dst` can
1994 /// [violate memory safety][read-ownership].
1995 ///
1996 /// Note that even if the effectively copied size (`count * size_of::<T>()`) is
1997 /// `0`, the pointers must be non-null and properly aligned.
1998 ///
1999 /// [`read`]: crate::ptr::read
2000 /// [read-ownership]: crate::ptr::read#ownership-of-the-returned-value
2001 /// [valid]: crate::ptr#safety
2002 ///
2003 /// # Examples
2004 ///
2005 /// Manually implement [`Vec::append`]:
2006 ///
2007 /// ```
2008 /// use std::ptr;
2009 ///
2010 /// /// Moves all the elements of `src` into `dst`, leaving `src` empty.
2011 /// fn append<T>(dst: &mut Vec<T>, src: &mut Vec<T>) {
2012 /// let src_len = src.len();
2013 /// let dst_len = dst.len();
2014 ///
2015 /// // Ensure that `dst` has enough capacity to hold all of `src`.
2016 /// dst.reserve(src_len);
2017 ///
2018 /// unsafe {
2019 /// // The call to offset is always safe because `Vec` will never
2020 /// // allocate more than `isize::MAX` bytes.
2021 /// let dst_ptr = dst.as_mut_ptr().offset(dst_len as isize);
2022 /// let src_ptr = src.as_ptr();
2023 ///
2024 /// // Truncate `src` without dropping its contents. We do this first,
2025 /// // to avoid problems in case something further down panics.
2026 /// src.set_len(0);
2027 ///
2028 /// // The two regions cannot overlap because mutable references do
2029 /// // not alias, and two different vectors cannot own the same
2030 /// // memory.
2031 /// ptr::copy_nonoverlapping(src_ptr, dst_ptr, src_len);
2032 ///
2033 /// // Notify `dst` that it now holds the contents of `src`.
2034 /// dst.set_len(dst_len + src_len);
2035 /// }
2036 /// }
2037 ///
2038 /// let mut a = vec!['r'];
2039 /// let mut b = vec!['u', 's', 't'];
2040 ///
2041 /// append(&mut a, &mut b);
2042 ///
2043 /// assert_eq!(a, &['r', 'u', 's', 't']);
2044 /// assert!(b.is_empty());
2045 /// ```
2046 ///
2047 /// [`Vec::append`]: ../../std/vec/struct.Vec.html#method.append
2048 #[doc(alias = "memcpy")]
2049 #[stable(feature = "rust1", since = "1.0.0")]
2050 #[rustc_const_unstable(feature = "const_intrinsic_copy", issue = "80697")]
2051 #[inline]
copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize)2052 pub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
2053 extern "rust-intrinsic" {
2054 #[rustc_const_unstable(feature = "const_intrinsic_copy", issue = "80697")]
2055 pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
2056 }
2057
2058 #[cfg(debug_assertions)]
2059 fn runtime_check<T>(src: *const T, dst: *mut T, count: usize) {
2060 if !is_aligned_and_not_null(src)
2061 || !is_aligned_and_not_null(dst)
2062 || !is_nonoverlapping(src, dst, count)
2063 {
2064 // Not panicking to keep codegen impact smaller.
2065 abort();
2066 }
2067 }
2068 #[cfg(debug_assertions)]
2069 const fn compiletime_check<T>(_src: *const T, _dst: *mut T, _count: usize) {}
2070 #[cfg(debug_assertions)]
2071 // SAFETY: runtime debug-assertions are a best-effort basis; it's fine to
2072 // not do them during compile time
2073 unsafe {
2074 const_eval_select((src, dst, count), compiletime_check, runtime_check);
2075 }
2076
2077 // SAFETY: the safety contract for `copy_nonoverlapping` must be
2078 // upheld by the caller.
2079 unsafe { copy_nonoverlapping(src, dst, count) }
2080 }
2081
2082 /// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
2083 /// and destination may overlap.
2084 ///
2085 /// If the source and destination will *never* overlap,
2086 /// [`copy_nonoverlapping`] can be used instead.
2087 ///
2088 /// `copy` is semantically equivalent to C's [`memmove`], but with the argument
2089 /// order swapped. Copying takes place as if the bytes were copied from `src`
2090 /// to a temporary array and then copied from the array to `dst`.
2091 ///
2092 /// [`memmove`]: https://en.cppreference.com/w/c/string/byte/memmove
2093 ///
2094 /// # Safety
2095 ///
2096 /// Behavior is undefined if any of the following conditions are violated:
2097 ///
2098 /// * `src` must be [valid] for reads of `count * size_of::<T>()` bytes.
2099 ///
2100 /// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes.
2101 ///
2102 /// * Both `src` and `dst` must be properly aligned.
2103 ///
2104 /// Like [`read`], `copy` creates a bitwise copy of `T`, regardless of
2105 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the values
2106 /// in the region beginning at `*src` and the region beginning at `*dst` can
2107 /// [violate memory safety][read-ownership].
2108 ///
2109 /// Note that even if the effectively copied size (`count * size_of::<T>()`) is
2110 /// `0`, the pointers must be non-null and properly aligned.
2111 ///
2112 /// [`read`]: crate::ptr::read
2113 /// [read-ownership]: crate::ptr::read#ownership-of-the-returned-value
2114 /// [valid]: crate::ptr#safety
2115 ///
2116 /// # Examples
2117 ///
2118 /// Efficiently create a Rust vector from an unsafe buffer:
2119 ///
2120 /// ```
2121 /// use std::ptr;
2122 ///
2123 /// /// # Safety
2124 /// ///
2125 /// /// * `ptr` must be correctly aligned for its type and non-zero.
2126 /// /// * `ptr` must be valid for reads of `elts` contiguous elements of type `T`.
2127 /// /// * Those elements must not be used after calling this function unless `T: Copy`.
2128 /// # #[allow(dead_code)]
2129 /// unsafe fn from_buf_raw<T>(ptr: *const T, elts: usize) -> Vec<T> {
2130 /// let mut dst = Vec::with_capacity(elts);
2131 ///
2132 /// // SAFETY: Our precondition ensures the source is aligned and valid,
2133 /// // and `Vec::with_capacity` ensures that we have usable space to write them.
2134 /// ptr::copy(ptr, dst.as_mut_ptr(), elts);
2135 ///
2136 /// // SAFETY: We created it with this much capacity earlier,
2137 /// // and the previous `copy` has initialized these elements.
2138 /// dst.set_len(elts);
2139 /// dst
2140 /// }
2141 /// ```
2142 #[doc(alias = "memmove")]
2143 #[stable(feature = "rust1", since = "1.0.0")]
2144 #[rustc_const_unstable(feature = "const_intrinsic_copy", issue = "80697")]
2145 #[inline]
copy<T>(src: *const T, dst: *mut T, count: usize)2146 pub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
2147 extern "rust-intrinsic" {
2148 #[rustc_const_unstable(feature = "const_intrinsic_copy", issue = "80697")]
2149 fn copy<T>(src: *const T, dst: *mut T, count: usize);
2150 }
2151
2152 #[cfg(debug_assertions)]
2153 fn runtime_check<T>(src: *const T, dst: *mut T) {
2154 if !is_aligned_and_not_null(src) || !is_aligned_and_not_null(dst) {
2155 // Not panicking to keep codegen impact smaller.
2156 abort();
2157 }
2158 }
2159 #[cfg(debug_assertions)]
2160 const fn compiletime_check<T>(_src: *const T, _dst: *mut T) {}
2161 #[cfg(debug_assertions)]
2162 // SAFETY: runtime debug-assertions are a best-effort basis; it's fine to
2163 // not do them during compile time
2164 unsafe {
2165 const_eval_select((src, dst), compiletime_check, runtime_check);
2166 }
2167
2168 // SAFETY: the safety contract for `copy` must be upheld by the caller.
2169 unsafe { copy(src, dst, count) }
2170 }
2171
2172 /// Sets `count * size_of::<T>()` bytes of memory starting at `dst` to
2173 /// `val`.
2174 ///
2175 /// `write_bytes` is similar to C's [`memset`], but sets `count *
2176 /// size_of::<T>()` bytes to `val`.
2177 ///
2178 /// [`memset`]: https://en.cppreference.com/w/c/string/byte/memset
2179 ///
2180 /// # Safety
2181 ///
2182 /// Behavior is undefined if any of the following conditions are violated:
2183 ///
2184 /// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes.
2185 ///
2186 /// * `dst` must be properly aligned.
2187 ///
2188 /// Additionally, the caller must ensure that writing `count *
2189 /// size_of::<T>()` bytes to the given region of memory results in a valid
2190 /// value of `T`. Using a region of memory typed as a `T` that contains an
2191 /// invalid value of `T` is undefined behavior.
2192 ///
2193 /// Note that even if the effectively copied size (`count * size_of::<T>()`) is
2194 /// `0`, the pointer must be non-null and properly aligned.
2195 ///
2196 /// [valid]: crate::ptr#safety
2197 ///
2198 /// # Examples
2199 ///
2200 /// Basic usage:
2201 ///
2202 /// ```
2203 /// use std::ptr;
2204 ///
2205 /// let mut vec = vec![0u32; 4];
2206 /// unsafe {
2207 /// let vec_ptr = vec.as_mut_ptr();
2208 /// ptr::write_bytes(vec_ptr, 0xfe, 2);
2209 /// }
2210 /// assert_eq!(vec, [0xfefefefe, 0xfefefefe, 0, 0]);
2211 /// ```
2212 ///
2213 /// Creating an invalid value:
2214 ///
2215 /// ```
2216 /// use std::ptr;
2217 ///
2218 /// let mut v = Box::new(0i32);
2219 ///
2220 /// unsafe {
2221 /// // Leaks the previously held value by overwriting the `Box<T>` with
2222 /// // a null pointer.
2223 /// ptr::write_bytes(&mut v as *mut Box<i32>, 0, 1);
2224 /// }
2225 ///
2226 /// // At this point, using or dropping `v` results in undefined behavior.
2227 /// // drop(v); // ERROR
2228 ///
2229 /// // Even leaking `v` "uses" it, and hence is undefined behavior.
2230 /// // mem::forget(v); // ERROR
2231 ///
2232 /// // In fact, `v` is invalid according to basic type layout invariants, so *any*
2233 /// // operation touching it is undefined behavior.
2234 /// // let v2 = v; // ERROR
2235 ///
2236 /// unsafe {
2237 /// // Let us instead put in a valid value
2238 /// ptr::write(&mut v as *mut Box<i32>, Box::new(42i32));
2239 /// }
2240 ///
2241 /// // Now the box is fine
2242 /// assert_eq!(*v, 42);
2243 /// ```
2244 #[stable(feature = "rust1", since = "1.0.0")]
2245 #[inline]
write_bytes<T>(dst: *mut T, val: u8, count: usize)2246 pub unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
2247 extern "rust-intrinsic" {
2248 fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
2249 }
2250
2251 debug_assert!(is_aligned_and_not_null(dst), "attempt to write to unaligned or null pointer");
2252
2253 // SAFETY: the safety contract for `write_bytes` must be upheld by the caller.
2254 unsafe { write_bytes(dst, val, count) }
2255 }
2256
2257 /// Selects which function to call depending on the context.
2258 ///
2259 /// If this function is evaluated at compile-time, then a call to this
2260 /// intrinsic will be replaced with a call to `called_in_const`. It gets
2261 /// replaced with a call to `called_at_rt` otherwise.
2262 ///
2263 /// # Type Requirements
2264 ///
2265 /// The two functions must be both function items. They cannot be function
2266 /// pointers or closures.
2267 ///
2268 /// `arg` will be the arguments that will be passed to either one of the
2269 /// two functions, therefore, both functions must accept the same type of
2270 /// arguments. Both functions must return RET.
2271 ///
2272 /// # Safety
2273 ///
2274 /// This intrinsic allows breaking [referential transparency] in `const fn`
2275 /// and is therefore `unsafe`.
2276 ///
2277 /// Code that uses this intrinsic must be extremely careful to ensure that
2278 /// `const fn`s remain referentially-transparent independently of when they
2279 /// are evaluated.
2280 ///
2281 /// The Rust compiler assumes that it is sound to replace a call to a `const
2282 /// fn` with the result produced by evaluating it at compile-time. If
2283 /// evaluating the function at run-time were to produce a different result,
2284 /// or have any other observable side-effects, the behavior is undefined.
2285 ///
2286 /// [referential transparency]: https://en.wikipedia.org/wiki/Referential_transparency
2287 #[unstable(
2288 feature = "const_eval_select",
2289 issue = "none",
2290 reason = "const_eval_select will never be stable"
2291 )]
2292 #[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
2293 #[lang = "const_eval_select"]
2294 #[rustc_do_not_const_check]
const_eval_select<ARG, F, G, RET>( arg: ARG, _called_in_const: F, called_at_rt: G, ) -> RET where F: ~const FnOnce<ARG, Output = RET>, G: FnOnce<ARG, Output = RET> + ~const Drop,2295 pub const unsafe fn const_eval_select<ARG, F, G, RET>(
2296 arg: ARG,
2297 _called_in_const: F,
2298 called_at_rt: G,
2299 ) -> RET
2300 where
2301 F: ~const FnOnce<ARG, Output = RET>,
2302 G: FnOnce<ARG, Output = RET> + ~const Drop,
2303 {
2304 called_at_rt.call_once(arg)
2305 }
2306
2307 #[unstable(
2308 feature = "const_eval_select",
2309 issue = "none",
2310 reason = "const_eval_select will never be stable"
2311 )]
2312 #[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
2313 #[lang = "const_eval_select_ct"]
const_eval_select_ct<ARG, F, G, RET>( arg: ARG, called_in_const: F, _called_at_rt: G, ) -> RET where F: ~const FnOnce<ARG, Output = RET>, G: FnOnce<ARG, Output = RET> + ~const Drop,2314 pub const unsafe fn const_eval_select_ct<ARG, F, G, RET>(
2315 arg: ARG,
2316 called_in_const: F,
2317 _called_at_rt: G,
2318 ) -> RET
2319 where
2320 F: ~const FnOnce<ARG, Output = RET>,
2321 G: FnOnce<ARG, Output = RET> + ~const Drop,
2322 {
2323 called_in_const.call_once(arg)
2324 }
2325