xref: /openbsd/sys/sys/clockintr.h (revision c737cf90)
1 /* $OpenBSD: clockintr.h,v 1.29 2024/02/25 19:15:50 cheloha Exp $ */
2 /*
3  * Copyright (c) 2020-2024 Scott Cheloha <cheloha@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifndef _SYS_CLOCKINTR_H_
19 #define _SYS_CLOCKINTR_H_
20 
21 #include <sys/stdint.h>
22 
23 struct clockintr_stat {
24 	uint64_t cs_dispatched;		/* total time in dispatch (ns) */
25 	uint64_t cs_early;		/* number of early dispatch calls */
26 	uint64_t cs_earliness;		/* total earliness (ns) */
27 	uint64_t cs_lateness;		/* total lateness (ns) */
28 	uint64_t cs_prompt;		/* number of prompt dispatch calls */
29 	uint64_t cs_run;		/* number of events dispatched */
30 	uint64_t cs_spurious;		/* number of spurious dispatch calls */
31 };
32 
33 #ifdef _KERNEL
34 
35 #include <sys/mutex.h>
36 #include <sys/queue.h>
37 
38 struct clockqueue;
39 struct clockrequest;
40 struct cpu_info;
41 
42 /*
43  * Platform API
44  */
45 
46 struct intrclock {
47 	void *ic_cookie;
48 	void (*ic_rearm)(void *, uint64_t);
49 	void (*ic_trigger)(void *);
50 };
51 
52 /*
53  * Schedulable clock interrupt callback.
54  *
55  * Struct member protections:
56  *
57  *	I	Immutable after initialization.
58  *	m	Parent queue mutex (cl_queue->cq_mtx).
59  */
60 struct clockintr {
61 	uint64_t cl_expiration;				/* [m] dispatch time */
62 	TAILQ_ENTRY(clockintr) cl_alink;		/* [m] cq_all glue */
63 	TAILQ_ENTRY(clockintr) cl_plink;		/* [m] cq_pend glue */
64 	void *cl_arg;					/* [I] argument */
65 	void (*cl_func)(struct clockrequest *, void*, void*); /* [I] callback */
66 	struct clockqueue *cl_queue;			/* [I] parent queue */
67 	uint32_t cl_flags;				/* [m] CLST_* flags */
68 };
69 
70 #define CLST_PENDING		0x00000001	/* scheduled to run */
71 
72 /*
73  * Interface for callback rescheduling requests.
74  *
75  * Struct member protections:
76  *
77  *	I	Immutable after initialization.
78  *	o	Owned by a single CPU.
79  */
80 struct clockrequest {
81 	uint64_t cr_expiration;			/* [o] copy of dispatch time */
82 	struct clockqueue *cr_queue;		/* [I] enclosing queue */
83 	uint32_t cr_flags;			/* [o] CR_* flags */
84 };
85 
86 #define CR_RESCHEDULE		0x00000001	/* reschedule upon return */
87 
88 /*
89  * Per-CPU clock interrupt state.
90  *
91  * Struct member protections:
92  *
93  *	a	Modified atomically.
94  *	I	Immutable after initialization.
95  *	m	Per-queue mutex (cq_mtx).
96  *	o	Owned by a single CPU.
97  */
98 struct clockqueue {
99 	struct clockrequest cq_request;	/* [o] callback request object */
100 	struct mutex cq_mtx;		/* [a] per-queue mutex */
101 	uint64_t cq_uptime;		/* [o] cached uptime */
102 	TAILQ_HEAD(, clockintr) cq_all;	/* [m] established clockintr list */
103 	TAILQ_HEAD(, clockintr) cq_pend;/* [m] pending clockintr list */
104 	struct clockintr *cq_running;	/* [m] running clockintr */
105 	struct clockintr cq_hardclock;	/* [o] hardclock handle */
106 	struct intrclock cq_intrclock;	/* [I] local interrupt clock */
107 	struct clockintr_stat cq_stat;	/* [o] dispatch statistics */
108 	volatile uint32_t cq_gen;	/* [o] cq_stat update generation */
109 	volatile uint32_t cq_dispatch;	/* [o] dispatch is running */
110 	uint32_t cq_flags;		/* [m] CQ_* flags; see below */
111 };
112 
113 #define CQ_INIT			0x00000001	/* clockintr_cpu_init() done */
114 #define CQ_INTRCLOCK		0x00000002	/* intrclock installed */
115 #define CQ_IGNORE_REQUEST	0x00000004	/* ignore callback requests */
116 #define CQ_NEED_WAKEUP		0x00000008	/* caller at barrier */
117 #define CQ_STATE_MASK		0x0000000f
118 
119 void clockintr_cpu_init(const struct intrclock *);
120 int clockintr_dispatch(void *);
121 void clockintr_trigger(void);
122 
123 /*
124  * Kernel API
125  */
126 
127 #define CL_BARRIER	0x00000001	/* block if callback is running */
128 #define CL_FLAG_MASK	0x00000001
129 
130 uint64_t clockintr_advance(struct clockintr *, uint64_t);
131 void clockintr_bind(struct clockintr *, struct cpu_info *,
132     void (*)(struct clockrequest *, void *, void *), void *);
133 void clockintr_cancel(struct clockintr *);
134 void clockintr_schedule(struct clockintr *, uint64_t);
135 void clockintr_stagger(struct clockintr *, uint64_t, uint32_t, uint32_t);
136 void clockintr_unbind(struct clockintr *, uint32_t);
137 uint64_t clockrequest_advance(struct clockrequest *, uint64_t);
138 uint64_t clockrequest_advance_random(struct clockrequest *, uint64_t, uint32_t);
139 void clockqueue_init(struct clockqueue *);
140 int sysctl_clockintr(int *, u_int, void *, size_t *, void *, size_t);
141 
142 #endif /* _KERNEL */
143 
144 #endif /* !_SYS_CLOCKINTR_H_ */
145