xref: /netbsd/sys/sys/cpu_data.h (revision f4090881)
1 /*	$NetBSD: cpu_data.h,v 1.54 2023/07/13 12:06:20 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2004, 2006, 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * based on arch/i386/include/cpu.h:
31  *	NetBSD: cpu.h,v 1.115 2004/05/16 12:32:53 yamt Exp
32  */
33 
34 #ifndef _SYS_CPU_DATA_H_
35 #define	_SYS_CPU_DATA_H_
36 
37 struct callout;
38 struct lwp;
39 
40 #include <sys/sched.h>	/* for schedstate_percpu */
41 #include <sys/condvar.h>
42 #include <sys/pcu.h>
43 #include <sys/percpu_types.h>
44 #include <sys/queue.h>
45 #include <sys/kcpuset.h>
46 #include <sys/ipi.h>
47 #include <sys/intr.h>
48 
49 /* Per-CPU counters.  New elements must be added in blocks of 8. */
50 enum cpu_count {
51 	CPU_COUNT_NSWTCH,		/* 0 */
52 	CPU_COUNT_NSYSCALL,
53 	CPU_COUNT_NTRAP,
54 	CPU_COUNT_NINTR,
55 	CPU_COUNT_NSOFT,
56 	CPU_COUNT_FORKS,
57 	CPU_COUNT_FORKS_PPWAIT,
58 	CPU_COUNT_FORKS_SHAREVM,
59 	CPU_COUNT_COLORHIT,		/* 8 */
60 	CPU_COUNT_COLORMISS,
61 	CPU_COUNT__UNUSED3,
62 	CPU_COUNT__UNUSED4,
63 	CPU_COUNT_CPUHIT,
64 	CPU_COUNT_CPUMISS,
65 	CPU_COUNT_FREEPAGES,
66 	CPU_COUNT__UNUSED5,
67 	CPU_COUNT_PAGEINS,		/* 16 */
68 	CPU_COUNT_FLTUP,
69 	CPU_COUNT_FLTNOUP,
70 	CPU_COUNT_FLTPGWAIT,
71 	CPU_COUNT_FLTRELCK,
72 	CPU_COUNT_FLTRELCKOK,
73 	CPU_COUNT__UNUSED1,
74 	CPU_COUNT__UNUSED2,
75 	CPU_COUNT_NFAULT,		/* 24 */
76 	CPU_COUNT_FLT_ACOW,
77 	CPU_COUNT_FLT_ANON,
78 	CPU_COUNT_FLT_OBJ,
79 	CPU_COUNT_FLT_PRCOPY,
80 	CPU_COUNT_FLT_PRZERO,
81 	CPU_COUNT_FLTAMCOPY,
82 	CPU_COUNT_FLTANGET,
83 	CPU_COUNT_FLTANRETRY,		/* 32 */
84 	CPU_COUNT_FLTGET,
85 	CPU_COUNT_FLTLGET,
86 	CPU_COUNT_FLTNAMAP,
87 	CPU_COUNT_FLTNOMAP,
88 	CPU_COUNT_FLTNOANON,
89 	CPU_COUNT_FLTNORAM,
90 	CPU_COUNT_FLTPGRELE,
91 	CPU_COUNT_ANONUNKNOWN,		/* 40 */
92 	CPU_COUNT_ANONCLEAN,
93 	CPU_COUNT_ANONDIRTY,
94 	CPU_COUNT_FILEUNKNOWN,
95 	CPU_COUNT_FILECLEAN,
96 	CPU_COUNT_FILEDIRTY,
97 	CPU_COUNT_EXECPAGES,
98 	CPU_COUNT_SYNC,
99 	CPU_COUNT_MAX			/* 48 */
100 };
101 
102 /*
103  * MI per-cpu data
104  *
105  * this structure is intended to be included in MD cpu_info structure.
106  *	struct cpu_info {
107  *		struct cpu_data ci_data;
108  *	}
109  *
110  * note that cpu_data is not expected to contain much data,
111  * as cpu_info is size-limited on most ports.
112  */
113 
114 struct lockdebug;
115 
116 enum cpu_rel {
117 	/*
118 	 * This is a circular list of peer CPUs in the same core (SMT /
119 	 * Hyperthreading).  It always includes the CPU it is referenced
120 	 * from as the last entry.
121 	 */
122 	CPUREL_CORE,
123 
124 	/*
125 	 * This is a circular list of peer CPUs in the same physical
126 	 * package.  It always includes the CPU it is referenced from as
127 	 * the last entry.
128 	 */
129 	CPUREL_PACKAGE,
130 
131 	/*
132 	 * This is a circular list of the first CPUs in each physical
133 	 * package.  It may or may not include the CPU it is referenced
134 	 * from.
135 	 */
136 	CPUREL_PACKAGE1ST,
137 
138 	/* Terminator. */
139 	CPUREL_COUNT
140 };
141 
142 struct cpu_data {
143 	/*
144 	 * The first section is likely to be touched by other CPUs -
145 	 * it is cache hot.
146 	 */
147 	u_int		cpu_index;		/* CPU index */
148 	lwp_t		*cpu_biglock_wanted;	/* LWP spinning on biglock */
149 	kcondvar_t	cpu_xcall;		/* cross-call support */
150 	int		cpu_xcall_pending;	/* cross-call support */
151 	u_int		cpu_psz_read_depth;	/* pserialize(9) read depth */
152 	uint32_t	cpu_ipipend[IPI_BITWORDS];	/* pending IPIs */
153 	struct schedstate_percpu cpu_schedstate; /* scheduler state */
154 
155 	/* Basic topology information.  May be fake. */
156 	u_int		cpu_package_id;
157 	u_int		cpu_core_id;
158 	u_int		cpu_smt_id;
159 	u_int		cpu_numa_id;
160 	bool		cpu_is_slow;
161 	u_int		cpu_nsibling[CPUREL_COUNT];
162 	struct cpu_info	*cpu_sibling[CPUREL_COUNT];
163 	struct cpu_info *cpu_package1st;	/* 1st CPU in our package */
164 
165 	/*
166 	 * This section is mostly CPU-private.
167 	 */
168 	lwp_t		*cpu_idlelwp __aligned(64);/* idle lwp */
169 	void		*cpu_lockstat;		/* lockstat private tables */
170 	u_int		cpu_biglock_count;	/* # recursive holds */
171 	u_int		cpu_spin_locks;		/* # of spinlockmgr locks */
172 	u_int		cpu_simple_locks;	/* # of simple locks held */
173 	u_int		cpu_spin_locks2;	/* # of spin locks held XXX */
174 	u_int		cpu_lkdebug_recurse;	/* LOCKDEBUG recursion */
175 	u_int		cpu_softints;		/* pending (slow) softints */
176 	struct uvm_cpu	*cpu_uvm;		/* uvm per-cpu data */
177 	u_int		cpu_faultrng;		/* counter for fault rng */
178 	void		*cpu_callout;		/* per-CPU callout state */
179 	void		*cpu_softcpu;		/* soft interrupt table */
180 	TAILQ_HEAD(,buf) cpu_biodone;		/* finished block xfers */
181 	percpu_cpu_t	cpu_percpu;		/* per-cpu data */
182 	struct selcluster *cpu_selcluster;	/* per-CPU select() info */
183 	void		*cpu_nch;		/* per-cpu vfs_cache data */
184 	_TAILQ_HEAD(,struct lockdebug,volatile) cpu_ld_locks;/* !: lockdebug */
185 	__cpu_simple_lock_t cpu_ld_lock;	/* lockdebug */
186 	uint64_t	cpu_cc_freq;		/* cycle counter frequency */
187 	int64_t		cpu_cc_skew;		/* counter skew vs cpu0 */
188 	char		cpu_name[8];		/* eg, "cpu4" */
189 	kcpuset_t	*cpu_kcpuset;		/* kcpuset_t of this cpu only */
190 	struct lwp * volatile cpu_pcu_curlwp[PCU_UNIT_COUNT];
191 	int64_t		cpu_counts[CPU_COUNT_MAX];/* per-CPU counts */
192 
193 	unsigned	cpu_heartbeat_count;		/* # of heartbeats */
194 	unsigned	cpu_heartbeat_uptime_cache;	/* last time_uptime */
195 	unsigned	cpu_heartbeat_uptime_stamp;	/* heartbeats since
196 							 * uptime changed */
197 };
198 
199 #define	ci_schedstate		ci_data.cpu_schedstate
200 #define	ci_index		ci_data.cpu_index
201 #define	ci_biglock_count	ci_data.cpu_biglock_count
202 #define	ci_biglock_wanted	ci_data.cpu_biglock_wanted
203 #define	ci_cpuname		ci_data.cpu_name
204 #define	ci_spin_locks		ci_data.cpu_spin_locks
205 #define	ci_simple_locks		ci_data.cpu_simple_locks
206 #define	ci_lockstat		ci_data.cpu_lockstat
207 #define	ci_spin_locks2		ci_data.cpu_spin_locks2
208 #define	ci_lkdebug_recurse	ci_data.cpu_lkdebug_recurse
209 #define	ci_pcu_curlwp		ci_data.cpu_pcu_curlwp
210 #define	ci_kcpuset		ci_data.cpu_kcpuset
211 #define	ci_ipipend		ci_data.cpu_ipipend
212 #define	ci_psz_read_depth	ci_data.cpu_psz_read_depth
213 
214 #define	ci_package_id		ci_data.cpu_package_id
215 #define	ci_core_id		ci_data.cpu_core_id
216 #define	ci_smt_id		ci_data.cpu_smt_id
217 #define	ci_numa_id		ci_data.cpu_numa_id
218 #define	ci_is_slow		ci_data.cpu_is_slow
219 #define	ci_nsibling		ci_data.cpu_nsibling
220 #define	ci_sibling		ci_data.cpu_sibling
221 #define	ci_package1st		ci_data.cpu_package1st
222 #define	ci_faultrng		ci_data.cpu_faultrng
223 #define	ci_counts		ci_data.cpu_counts
224 
225 #define	ci_heartbeat_count		ci_data.cpu_heartbeat_count
226 #define	ci_heartbeat_uptime_cache	ci_data.cpu_heartbeat_uptime_cache
227 #define	ci_heartbeat_uptime_stamp	ci_data.cpu_heartbeat_uptime_stamp
228 
229 #define	cpu_nsyscall		cpu_counts[CPU_COUNT_NSYSCALL]
230 #define	cpu_ntrap		cpu_counts[CPU_COUNT_NTRAP]
231 #define	cpu_nswtch		cpu_counts[CPU_COUNT_NSWTCH]
232 #define	cpu_nintr		cpu_counts[CPU_COUNT_NINTR]
233 #define	cpu_nsoft		cpu_counts[CPU_COUNT_NSOFT]
234 #define	cpu_nfault		cpu_counts[CPU_COUNT_NFAULT]
235 
236 void	mi_cpu_init(void);
237 int	mi_cpu_attach(struct cpu_info *);
238 
239 /*
240  * Adjust a count with preemption already disabled.  If the counter being
241  * adjusted can be updated from interrupt context, SPL must be raised.
242  */
243 #define	CPU_COUNT(idx, d)					\
244 do {								\
245 	extern bool kpreempt_disabled(void);			\
246 	KASSERT(kpreempt_disabled());				\
247 	KASSERT((unsigned)idx < CPU_COUNT_MAX);			\
248 	curcpu()->ci_counts[(idx)] += (d);			\
249 } while (/* CONSTCOND */ 0)
250 
251 /*
252  * Fetch a potentially stale count - cheap, use as often as you like.
253  */
254 static inline int64_t
cpu_count_get(enum cpu_count idx)255 cpu_count_get(enum cpu_count idx)
256 {
257 	extern int64_t cpu_counts[];
258 	return cpu_counts[idx];
259 }
260 
261 void	cpu_count(enum cpu_count, int64_t);
262 void	cpu_count_sync(bool);
263 
264 #endif /* _SYS_CPU_DATA_H_ */
265