1 /* $OpenBSD: percpu.h,v 1.9 2023/09/16 09:33:27 mpi Exp $ */
2
3 /*
4 * Copyright (c) 2016 David Gwynne <dlg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #ifndef _SYS_PERCPU_H_
20 #define _SYS_PERCPU_H_
21
22 #ifndef CACHELINESIZE
23 #define CACHELINESIZE 64
24 #endif
25
26 #ifndef __upunused /* this should go in param.h */
27 #ifdef MULTIPROCESSOR
28 #define __upunused
29 #else
30 #define __upunused __attribute__((__unused__))
31 #endif
32 #endif
33
34 struct cpumem {
35 void *mem;
36 };
37
38 struct cpumem_iter {
39 unsigned int cpu;
40 } __upunused;
41
42 struct counters_ref {
43 uint64_t g;
44 uint64_t *c;
45 };
46
47 #ifdef _KERNEL
48
49 #include <sys/atomic.h>
50
51 struct pool;
52
53 struct cpumem *cpumem_get(struct pool *);
54 void cpumem_put(struct pool *, struct cpumem *);
55
56 struct cpumem *cpumem_malloc(size_t, int);
57 struct cpumem *cpumem_malloc_ncpus(struct cpumem *, size_t, int);
58 void cpumem_free(struct cpumem *, int, size_t);
59
60 void *cpumem_first(struct cpumem_iter *, struct cpumem *);
61 void *cpumem_next(struct cpumem_iter *, struct cpumem *);
62
63 static inline void *
cpumem_enter(struct cpumem * cm)64 cpumem_enter(struct cpumem *cm)
65 {
66 #ifdef MULTIPROCESSOR
67 return (cm[cpu_number()].mem);
68 #else
69 return (cm);
70 #endif
71 }
72
73 static inline void
cpumem_leave(struct cpumem * cm,void * mem)74 cpumem_leave(struct cpumem *cm, void *mem)
75 {
76 /* KDASSERT? */
77 }
78
79 #ifdef MULTIPROCESSOR
80
81 #define CPUMEM_BOOT_MEMORY(_name, _sz) \
82 static struct { \
83 unsigned char mem[_sz]; \
84 struct cpumem cpumem; \
85 } __aligned(CACHELINESIZE) _name##_boot_cpumem = { \
86 .cpumem = { _name##_boot_cpumem.mem } \
87 }
88
89 #define CPUMEM_BOOT_INITIALIZER(_name) \
90 { &_name##_boot_cpumem.cpumem }
91
92 #else /* MULTIPROCESSOR */
93
94 #define CPUMEM_BOOT_MEMORY(_name, _sz) \
95 static struct { \
96 unsigned char mem[_sz]; \
97 } __aligned(sizeof(uint64_t)) _name##_boot_cpumem
98
99 #define CPUMEM_BOOT_INITIALIZER(_name) \
100 { (struct cpumem *)&_name##_boot_cpumem.mem }
101
102 #endif /* MULTIPROCESSOR */
103
104 #define CPUMEM_FOREACH(_var, _iter, _cpumem) \
105 for ((_var) = cpumem_first((_iter), (_cpumem)); \
106 (_var) != NULL; \
107 (_var) = cpumem_next((_iter), (_cpumem)))
108
109 /*
110 * per cpu counters
111 */
112
113 struct cpumem *counters_alloc(unsigned int);
114 struct cpumem *counters_alloc_ncpus(struct cpumem *, unsigned int);
115 void counters_free(struct cpumem *, unsigned int);
116 void counters_read(struct cpumem *, uint64_t *, unsigned int,
117 uint64_t *);
118 void counters_zero(struct cpumem *, unsigned int);
119
120 static inline uint64_t *
counters_enter(struct counters_ref * ref,struct cpumem * cm)121 counters_enter(struct counters_ref *ref, struct cpumem *cm)
122 {
123 ref->c = cpumem_enter(cm);
124 #ifdef MULTIPROCESSOR
125 ref->g = ++(*ref->c); /* make the generation number odd */
126 membar_producer();
127 return (ref->c + 1);
128 #else
129 return (ref->c);
130 #endif
131 }
132
133 static inline void
counters_leave(struct counters_ref * ref,struct cpumem * cm)134 counters_leave(struct counters_ref *ref, struct cpumem *cm)
135 {
136 #ifdef MULTIPROCESSOR
137 membar_producer();
138 (*ref->c) = ++ref->g; /* make the generation number even again */
139 #endif
140 cpumem_leave(cm, ref->c);
141 }
142
143 static inline void
counters_inc(struct cpumem * cm,unsigned int c)144 counters_inc(struct cpumem *cm, unsigned int c)
145 {
146 struct counters_ref ref;
147 uint64_t *counters;
148
149 counters = counters_enter(&ref, cm);
150 counters[c]++;
151 counters_leave(&ref, cm);
152 }
153
154 static inline void
counters_dec(struct cpumem * cm,unsigned int c)155 counters_dec(struct cpumem *cm, unsigned int c)
156 {
157 struct counters_ref ref;
158 uint64_t *counters;
159
160 counters = counters_enter(&ref, cm);
161 counters[c]--;
162 counters_leave(&ref, cm);
163 }
164
165 static inline void
counters_add(struct cpumem * cm,unsigned int c,uint64_t v)166 counters_add(struct cpumem *cm, unsigned int c, uint64_t v)
167 {
168 struct counters_ref ref;
169 uint64_t *counters;
170
171 counters = counters_enter(&ref, cm);
172 counters[c] += v;
173 counters_leave(&ref, cm);
174 }
175
176 static inline void
counters_pkt(struct cpumem * cm,unsigned int c,unsigned int b,uint64_t v)177 counters_pkt(struct cpumem *cm, unsigned int c, unsigned int b, uint64_t v)
178 {
179 struct counters_ref ref;
180 uint64_t *counters;
181
182 counters = counters_enter(&ref, cm);
183 counters[c]++;
184 counters[b] += v;
185 counters_leave(&ref, cm);
186 }
187
188 #ifdef MULTIPROCESSOR
189 #define COUNTERS_BOOT_MEMORY(_name, _n) \
190 CPUMEM_BOOT_MEMORY(_name, ((_n) + 1) * sizeof(uint64_t))
191 #else
192 #define COUNTERS_BOOT_MEMORY(_name, _n) \
193 CPUMEM_BOOT_MEMORY(_name, (_n) * sizeof(uint64_t))
194 #endif
195
196 #define COUNTERS_BOOT_INITIALIZER(_name) CPUMEM_BOOT_INITIALIZER(_name)
197
198 #endif /* _KERNEL */
199 #endif /* _SYS_PERCPU_H_ */
200