xref: /netbsd/sys/dev/raidframe/rf_revent.c (revision dfc45731)
1 /*	$NetBSD: rf_revent.c,v 1.29 2021/07/23 00:54:45 oster Exp $	*/
2 /*
3  * Copyright (c) 1995 Carnegie-Mellon University.
4  * All rights reserved.
5  *
6  * Author:
7  *
8  * Permission to use, copy, modify and distribute this software and
9  * its documentation is hereby granted, provided that both the copyright
10  * notice and this permission notice appear in all copies of the
11  * software, derivative works or modified versions, and any portions
12  * thereof, and that both notices appear in supporting documentation.
13  *
14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17  *
18  * Carnegie Mellon requests users of this software to return to
19  *
20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21  *  School of Computer Science
22  *  Carnegie Mellon University
23  *  Pittsburgh PA 15213-3890
24  *
25  * any improvements or extensions that they make and grant Carnegie the
26  * rights to redistribute these changes.
27  */
28 /*
29  * revent.c -- reconstruction event handling code
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: rf_revent.c,v 1.29 2021/07/23 00:54:45 oster Exp $");
34 
35 #include <sys/errno.h>
36 
37 #include "rf_raid.h"
38 #include "rf_revent.h"
39 #include "rf_etimer.h"
40 #include "rf_general.h"
41 #include "rf_desc.h"
42 #include "rf_shutdown.h"
43 
44 #define RF_MAX_FREE_REVENT 128
45 #define RF_MIN_FREE_REVENT  32
46 #define RF_EVENTQ_WAIT 5000
47 
48 #include <sys/proc.h>
49 #include <sys/kernel.h>
50 
51 static void rf_ShutdownReconEvent(void *);
52 
53 static RF_ReconEvent_t *
54 GetReconEventDesc(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg, RF_Revent_t type);
55 
rf_ShutdownReconEvent(void * arg)56 static void rf_ShutdownReconEvent(void *arg)
57 {
58 	RF_Raid_t *raidPtr;
59 
60 	raidPtr = (RF_Raid_t *) arg;
61 
62 	pool_destroy(&raidPtr->pools.revent);
63 }
64 
65 int
rf_ConfigureReconEvent(RF_ShutdownList_t ** listp,RF_Raid_t * raidPtr,RF_Config_t * cfgPtr)66 rf_ConfigureReconEvent(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
67 		       RF_Config_t *cfgPtr)
68 {
69 
70 	rf_pool_init(raidPtr, raidPtr->poolNames.revent, &raidPtr->pools.revent, sizeof(RF_ReconEvent_t),
71 		     "revent", RF_MIN_FREE_REVENT, RF_MAX_FREE_REVENT);
72 	rf_ShutdownCreate(listp, rf_ShutdownReconEvent, raidPtr);
73 
74 	return (0);
75 }
76 
77 /* returns the next reconstruction event, blocking the calling thread
78  * until one becomes available.  will now return null if it is blocked
79  * or will return an event if it is not */
80 
81 RF_ReconEvent_t *
rf_GetNextReconEvent(RF_RaidReconDesc_t * reconDesc)82 rf_GetNextReconEvent(RF_RaidReconDesc_t *reconDesc)
83 {
84 	RF_Raid_t *raidPtr = reconDesc->raidPtr;
85 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
86 	RF_ReconEvent_t *event;
87 	int stall_count;
88 
89 	rf_lock_mutex2(rctrl->eq_mutex);
90 	/* q null and count==0 must be equivalent conditions */
91 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
92 
93 	/* mpsleep timeout value: secs = timo_val/hz.  'ticks' here is
94 	   defined as cycle-counter ticks, not softclock ticks */
95 
96 #define MAX_RECON_EXEC_USECS (100 * 1000)  /* 100 ms */
97 #define RECON_DELAY_MS 25
98 #define RECON_TIMO     ((RECON_DELAY_MS * hz) / 1000)
99 
100 	/* we are not pre-emptible in the kernel, but we don't want to run
101 	 * forever.  If we run w/o blocking for more than MAX_RECON_EXEC_TICKS
102 	 * ticks of the cycle counter, delay for RECON_DELAY before
103 	 * continuing. this may murder us with context switches, so we may
104 	 * need to increase both the MAX...TICKS and the RECON_DELAY_MS. */
105 	if (reconDesc->reconExecTimerRunning) {
106 		int     status;
107 
108 		RF_ETIMER_STOP(reconDesc->recon_exec_timer);
109 		RF_ETIMER_EVAL(reconDesc->recon_exec_timer);
110 		reconDesc->reconExecTicks +=
111 			RF_ETIMER_VAL_US(reconDesc->recon_exec_timer);
112 		if (reconDesc->reconExecTicks > reconDesc->maxReconExecTicks)
113 			reconDesc->maxReconExecTicks =
114 				reconDesc->reconExecTicks;
115 		if (reconDesc->reconExecTicks >= MAX_RECON_EXEC_USECS) {
116 			/* we've been running too long.  delay for
117 			 * RECON_DELAY_MS */
118 #if RF_RECON_STATS > 0
119 			reconDesc->numReconExecDelays++;
120 #endif				/* RF_RECON_STATS > 0 */
121 
122 			status = rf_sleep("rfrecond", RECON_TIMO,
123 					  rctrl->eq_mutex);
124 			RF_ASSERT(status == EWOULDBLOCK);
125 			reconDesc->reconExecTicks = 0;
126 		}
127 	}
128 
129 	stall_count = 0;
130 	while (!rctrl->eventQueue) {
131 #if RF_RECON_STATS > 0
132 		reconDesc->numReconEventWaits++;
133 #endif				/* RF_RECON_STATS > 0 */
134 
135 		rf_timedwait_cond2(rctrl->eq_cv, rctrl->eq_mutex,
136 				   RF_EVENTQ_WAIT);
137 
138 		stall_count++;
139 
140 		if ((stall_count > 10) &&
141 		    rctrl->headSepCBList) {
142 			/* There is work to do on the callback list, and
143 			   we've waited long enough... */
144 			rf_WakeupHeadSepCBWaiters(raidPtr);
145 			stall_count = 0;
146 		}
147 		reconDesc->reconExecTicks = 0;	/* we've just waited */
148 	}
149 
150 	reconDesc->reconExecTimerRunning = 1;
151 	if (RF_ETIMER_VAL_US(reconDesc->recon_exec_timer)!=0) {
152 		/* it moved!!  reset the timer. */
153 		RF_ETIMER_START(reconDesc->recon_exec_timer);
154 	}
155 	event = rctrl->eventQueue;
156 	rctrl->eventQueue = event->next;
157 	event->next = NULL;
158 	rctrl->eq_count--;
159 
160 	/* q null and count==0 must be equivalent conditions */
161 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
162 	rf_unlock_mutex2(rctrl->eq_mutex);
163 	return (event);
164 }
165 /* enqueues a reconstruction event on the indicated queue */
166 void
rf_CauseReconEvent(RF_Raid_t * raidPtr,RF_RowCol_t col,void * arg,RF_Revent_t type)167 rf_CauseReconEvent(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg,
168 		   RF_Revent_t type)
169 {
170 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
171 	RF_ReconEvent_t *event = GetReconEventDesc(raidPtr, col, arg, type);
172 
173 	if (type == RF_REVENT_BUFCLEAR) {
174 		RF_ASSERT(col != rctrl->fcol);
175 	}
176 	RF_ASSERT(col >= 0 && col <= raidPtr->numCol);
177 	rf_lock_mutex2(rctrl->eq_mutex);
178 	/* q null and count==0 must be equivalent conditions */
179 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
180 	event->next = rctrl->eventQueue;
181 	rctrl->eventQueue = event;
182 	rctrl->eq_count++;
183 	rf_broadcast_cond2(rctrl->eq_cv);
184 	rf_unlock_mutex2(rctrl->eq_mutex);
185 }
186 /* allocates and initializes a recon event descriptor */
187 static RF_ReconEvent_t *
GetReconEventDesc(RF_Raid_t * raidPtr,RF_RowCol_t col,void * arg,RF_Revent_t type)188 GetReconEventDesc(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg, RF_Revent_t type)
189 {
190 	RF_ReconEvent_t *t;
191 
192 	t = pool_get(&raidPtr->pools.revent, PR_WAITOK);
193 	t->col = col;
194 	t->arg = arg;
195 	t->type = type;
196 	t->next = NULL;
197 	return (t);
198 }
199 
200 /*
201   rf_DrainReconEventQueue() -- used in the event of a reconstruction
202   problem, this function simply drains all pending events from the
203   reconstruct event queue.
204  */
205 
206 void
rf_DrainReconEventQueue(RF_RaidReconDesc_t * reconDesc)207 rf_DrainReconEventQueue(RF_RaidReconDesc_t *reconDesc)
208 {
209 	RF_ReconCtrl_t *rctrl = reconDesc->raidPtr->reconControl;
210 	RF_ReconEvent_t *event;
211 
212 	rf_lock_mutex2(rctrl->eq_mutex);
213 	while (rctrl->eventQueue!=NULL) {
214 
215 		event = rctrl->eventQueue;
216 		rctrl->eventQueue = event->next;
217 		event->next = NULL;
218 		rctrl->eq_count--;
219 		/* dump it */
220 		rf_FreeReconEventDesc(reconDesc->raidPtr, event);
221 	}
222 	rf_unlock_mutex2(rctrl->eq_mutex);
223 }
224 
225 void
rf_FreeReconEventDesc(RF_Raid_t * raidPtr,RF_ReconEvent_t * event)226 rf_FreeReconEventDesc(RF_Raid_t *raidPtr, RF_ReconEvent_t *event)
227 {
228 	pool_put(&raidPtr->pools.revent, event);
229 }
230