compiler_builtins/math/libm_math/support/
feature_detect.rs1#[cfg(all(target_has_atomic = "ptr", not(target_has_atomic = "32")))]
5compile_error!("currently all targets that support `AtomicPtr` also support `AtomicU32`");
6
7use core::sync::atomic::{AtomicU32, Ordering};
8
9#[allow(unused_macros)]
11macro_rules! unique_masks {
12 ($ty:ty, $($name:ident,)+) => {
13 #[cfg(test)]
14 pub const ALL: &[$ty] = &[$($name),+];
15 #[cfg(test)]
16 pub const NAMES: &[&str] = &[$(stringify!($name)),+];
17
18 unique_masks!(@one; $ty; 0; $($name,)+);
19 };
20 (@one; $_ty:ty; $_idx:expr;) => {};
22 (@one; $ty:ty; $shift:expr; $name:ident, $($tail:tt)*) => {
23 pub const $name: $ty = 1 << $shift;
24 const _: () = assert!($name != (1 << (<$ty>::BITS - 1)));
26 unique_masks!(@one; $ty; $shift + 1; $($tail)*);
28 };
29}
30
31#[allow(unused_macros)] macro_rules! select_once {
45 (
46 sig: fn($($arg:ident: $ArgTy:ty),*) -> $RetTy:ty,
47 init: $init:expr,
48 call: $call:expr,
49 ) => {{
50 use core::mem;
51 use core::sync::atomic::{AtomicPtr, Ordering};
52
53 type Func = unsafe fn($($arg: $ArgTy),*) -> $RetTy;
54
55 static FUNC: AtomicPtr<()> = AtomicPtr::new((initializer as Func) as *mut ());
58
59 fn initializer($($arg: $ArgTy),*) -> $RetTy {
61 let fn_ptr: Func = $init();
63 FUNC.store(fn_ptr as *mut (), Ordering::Relaxed);
64
65 $call(fn_ptr)
67 }
68
69 let raw: *mut () = FUNC.load(Ordering::Relaxed);
70
71 let fn_ptr: Func = unsafe { mem::transmute::<*mut (), Func>(raw) };
74
75 $call(fn_ptr)
76 }}
77}
78
79#[allow(unused_imports)]
80pub(crate) use {select_once, unique_masks};
81
82use crate::support::cold_path;
83
84#[derive(Clone, Copy, Debug, PartialEq)]
86pub struct Flags(u32);
87
88#[allow(dead_code)] impl Flags {
90 pub const fn empty() -> Self {
92 Self(0)
93 }
94
95 pub const fn from_bits(val: u32) -> Self {
97 Self(val)
98 }
99
100 pub fn bits(&self) -> u32 {
102 self.0
103 }
104
105 pub fn insert(&mut self, mask: u32) {
107 self.0 |= mask;
108 }
109
110 pub fn contains(&self, mask: u32) -> bool {
112 self.0 & mask == mask
113 }
114
115 pub fn test_nth(&self, bit: u32) -> bool {
117 debug_assert!(bit < u32::BITS, "bit index out-of-bounds");
118 self.0 & (1 << bit) != 0
119 }
120}
121
122#[allow(dead_code)] pub fn get_or_init_flags_cache(cache: &AtomicU32, init: impl FnOnce() -> Flags) -> Flags {
128 const INITIALIZED: u32 = 1 << 31;
130
131 let mut flags = Flags::from_bits(cache.load(Ordering::Relaxed));
133
134 if !flags.contains(INITIALIZED) {
135 cold_path();
138
139 flags = init();
140 debug_assert!(
141 !flags.contains(INITIALIZED),
142 "initialized bit shouldn't be set"
143 );
144 flags.insert(INITIALIZED);
145 cache.store(flags.bits(), Ordering::Relaxed);
146 }
147
148 flags
149}
150
151#[cfg(test)]
152mod tests {
153 use super::*;
154
155 #[test]
156 fn unique_masks() {
157 unique_masks! {
158 u32,
159 V0,
160 V1,
161 V2,
162 }
163 assert_eq!(V0, 1u32 << 0);
164 assert_eq!(V1, 1u32 << 1);
165 assert_eq!(V2, 1u32 << 2);
166 assert_eq!(ALL, [V0, V1, V2]);
167 assert_eq!(NAMES, ["V0", "V1", "V2"]);
168 }
169
170 #[test]
171 fn flag_cache_is_used() {
172 static CACHE: AtomicU32 = AtomicU32::new(0);
174
175 let mut f1 = Flags::from_bits(0x1);
176 let f2 = Flags::from_bits(0x2);
177
178 let r1 = get_or_init_flags_cache(&CACHE, || f1);
179 let r2 = get_or_init_flags_cache(&CACHE, || f2);
180
181 f1.insert(1 << 31); assert_eq!(r1, f1);
184 assert_eq!(r2, f1);
185 }
186
187 #[test]
188 fn select_cache_is_used() {
189 static CALLED: AtomicU32 = AtomicU32::new(0);
191
192 fn inner() {
193 fn nop() {}
194
195 select_once! {
196 sig: fn() -> (),
197 init: || {
198 CALLED.fetch_add(1, Ordering::Relaxed);
199 nop
200 },
201 call: |fn_ptr: Func| unsafe { fn_ptr() },
202 }
203 }
204
205 inner();
207 assert_eq!(CALLED.load(Ordering::Relaxed), 1);
208 inner();
209 assert_eq!(CALLED.load(Ordering::Relaxed), 1);
210 }
211}