1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019, 2020 Jeffrey Roberson <jeff@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #ifndef _SYS_SMR_TYPES_H_ 29 #define _SYS_SMR_TYPES_H_ 30 31 #include <sys/_smr.h> 32 33 /* 34 * SMR Accessors are meant to provide safe access to SMR protected 35 * pointers and prevent misuse and accidental access. 36 * 37 * Accessors are grouped by type: 38 * entered - Use while in a read section (between smr_enter/smr_exit()) 39 * serialized - Use while holding a lock that serializes writers. Updates 40 * are synchronized with readers via included barriers. 41 * unserialized - Use after the memory is out of scope and not visible to 42 * readers. 43 * 44 * All acceses include a parameter for an assert to verify the required 45 * synchronization. For example, a writer might use: 46 * 47 * smr_serialized_store(pointer, value, mtx_owned(&writelock)); 48 * 49 * These are only enabled in INVARIANTS kernels. 50 */ 51 52 /* Type restricting pointer access to force smr accessors. */ 53 #define SMR_POINTER(type) \ 54 struct { \ 55 type __ptr; /* Do not access directly */ \ 56 } 57 58 /* 59 * Read from an SMR protected pointer while in a read section. 60 */ 61 #define smr_entered_load(p, smr) ({ \ 62 SMR_ASSERT(SMR_ENTERED((smr)), "smr_entered_load"); \ 63 (__typeof((p)->__ptr))atomic_load_acq_ptr((uintptr_t *)&(p)->__ptr); \ 64 }) 65 66 /* 67 * Read from an SMR protected pointer while serialized by an 68 * external mechanism. 'ex' should contain an assert that the 69 * external mechanism is held. i.e. mtx_owned() 70 */ 71 #define smr_serialized_load(p, ex) ({ \ 72 SMR_ASSERT(ex, "smr_serialized_load"); \ 73 (__typeof((p)->__ptr))atomic_load_ptr(&(p)->__ptr); \ 74 }) 75 76 /* 77 * Store 'v' to an SMR protected pointer while serialized by an 78 * external mechanism. 'ex' should contain an assert that the 79 * external mechanism is held. i.e. mtx_owned() 80 * 81 * Writers that are serialized with mutual exclusion or on a single 82 * thread should use smr_serialized_store() rather than swap. 83 */ 84 #define smr_serialized_store(p, v, ex) do { \ 85 SMR_ASSERT(ex, "smr_serialized_store"); \ 86 __typeof((p)->__ptr) _v = (v); \ 87 atomic_store_rel_ptr((uintptr_t *)&(p)->__ptr, (uintptr_t)_v); \ 88 } while (0) 89 90 /* 91 * swap 'v' with an SMR protected pointer and return the old value 92 * while serialized by an external mechanism. 'ex' should contain 93 * an assert that the external mechanism is provided. i.e. mtx_owned() 94 * 95 * Swap permits multiple writers to update a pointer concurrently. 96 */ 97 #define smr_serialized_swap(p, v, ex) ({ \ 98 SMR_ASSERT(ex, "smr_serialized_swap"); \ 99 __typeof((p)->__ptr) _v = (v); \ 100 /* Release barrier guarantees contents are visible to reader */ \ 101 atomic_thread_fence_rel(); \ 102 (__typeof((p)->__ptr))atomic_swap_ptr( \ 103 (uintptr_t *)&(p)->__ptr, (uintptr_t)_v); \ 104 }) 105 106 /* 107 * Read from an SMR protected pointer when no serialization is required 108 * such as in the destructor callback or when the caller guarantees other 109 * synchronization. 110 */ 111 #define smr_unserialized_load(p, ex) ({ \ 112 SMR_ASSERT(ex, "smr_unserialized_load"); \ 113 (__typeof((p)->__ptr))atomic_load_ptr(&(p)->__ptr); \ 114 }) 115 116 /* 117 * Store to an SMR protected pointer when no serialiation is required 118 * such as in the destructor callback or when the caller guarantees other 119 * synchronization. 120 */ 121 #define smr_unserialized_store(p, v, ex) do { \ 122 SMR_ASSERT(ex, "smr_unserialized_store"); \ 123 __typeof((p)->__ptr) _v = (v); \ 124 atomic_store_ptr((uintptr_t *)&(p)->__ptr, (uintptr_t)_v); \ 125 } while (0) 126 127 #ifndef _KERNEL 128 129 /* 130 * Load an SMR protected pointer when accessing kernel data structures through 131 * libkvm. 132 */ 133 #define smr_kvm_load(p) ((p)->__ptr) 134 135 #endif /* !_KERNEL */ 136 #endif /* !_SYS_SMR_TYPES_H_ */ 137