xref: /netbsd/sys/dev/raidframe/rf_revent.c (revision bf9ec67e)
1 /*	$NetBSD: rf_revent.c,v 1.10 2001/11/13 07:11:16 lukem Exp $	*/
2 /*
3  * Copyright (c) 1995 Carnegie-Mellon University.
4  * All rights reserved.
5  *
6  * Author:
7  *
8  * Permission to use, copy, modify and distribute this software and
9  * its documentation is hereby granted, provided that both the copyright
10  * notice and this permission notice appear in all copies of the
11  * software, derivative works or modified versions, and any portions
12  * thereof, and that both notices appear in supporting documentation.
13  *
14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17  *
18  * Carnegie Mellon requests users of this software to return to
19  *
20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21  *  School of Computer Science
22  *  Carnegie Mellon University
23  *  Pittsburgh PA 15213-3890
24  *
25  * any improvements or extensions that they make and grant Carnegie the
26  * rights to redistribute these changes.
27  */
28 /*
29  * revent.c -- reconstruction event handling code
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: rf_revent.c,v 1.10 2001/11/13 07:11:16 lukem Exp $");
34 
35 #include <sys/errno.h>
36 
37 #include "rf_raid.h"
38 #include "rf_revent.h"
39 #include "rf_etimer.h"
40 #include "rf_general.h"
41 #include "rf_freelist.h"
42 #include "rf_desc.h"
43 #include "rf_shutdown.h"
44 
45 static RF_FreeList_t *rf_revent_freelist;
46 #define RF_MAX_FREE_REVENT 128
47 #define RF_REVENT_INC        8
48 #define RF_REVENT_INITIAL    8
49 
50 
51 
52 #include <sys/proc.h>
53 #include <sys/kernel.h>
54 
55 #define DO_WAIT(_rc)  \
56 	ltsleep(&(_rc)->eventQueue, PRIBIO,  "raidframe eventq", \
57 		0, &((_rc)->eq_mutex))
58 
59 #define DO_SIGNAL(_rc)     wakeup(&(_rc)->eventQueue)
60 
61 
62 static void rf_ShutdownReconEvent(void *);
63 
64 static RF_ReconEvent_t *
65 GetReconEventDesc(RF_RowCol_t row, RF_RowCol_t col,
66     void *arg, RF_Revent_t type);
67 
68 static void rf_ShutdownReconEvent(ignored)
69 	void   *ignored;
70 {
71 	RF_FREELIST_DESTROY(rf_revent_freelist, next, (RF_ReconEvent_t *));
72 }
73 
74 int
75 rf_ConfigureReconEvent(listp)
76 	RF_ShutdownList_t **listp;
77 {
78 	int     rc;
79 
80 	RF_FREELIST_CREATE(rf_revent_freelist, RF_MAX_FREE_REVENT,
81 	    RF_REVENT_INC, sizeof(RF_ReconEvent_t));
82 	if (rf_revent_freelist == NULL)
83 		return (ENOMEM);
84 	rc = rf_ShutdownCreate(listp, rf_ShutdownReconEvent, NULL);
85 	if (rc) {
86 		RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n", __FILE__,
87 		    __LINE__, rc);
88 		rf_ShutdownReconEvent(NULL);
89 		return (rc);
90 	}
91 	RF_FREELIST_PRIME(rf_revent_freelist, RF_REVENT_INITIAL, next,
92 	    (RF_ReconEvent_t *));
93 	return (0);
94 }
95 
96 /* returns the next reconstruction event, blocking the calling thread
97  * until one becomes available.  will now return null if it is blocked
98  * or will return an event if it is not */
99 
100 RF_ReconEvent_t *
101 rf_GetNextReconEvent(reconDesc, row, continueFunc, continueArg)
102 	RF_RaidReconDesc_t *reconDesc;
103 	RF_RowCol_t row;
104 	void    (*continueFunc) (void *);
105 	void   *continueArg;
106 {
107 	RF_Raid_t *raidPtr = reconDesc->raidPtr;
108 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl[row];
109 	RF_ReconEvent_t *event;
110 
111 	RF_ASSERT(row >= 0 && row <= raidPtr->numRow);
112 	RF_LOCK_MUTEX(rctrl->eq_mutex);
113 	/* q null and count==0 must be equivalent conditions */
114 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
115 
116 	rctrl->continueFunc = continueFunc;
117 	rctrl->continueArg = continueArg;
118 
119 
120 	/* mpsleep timeout value: secs = timo_val/hz.  'ticks' here is
121 	   defined as cycle-counter ticks, not softclock ticks */
122 
123 #define MAX_RECON_EXEC_USECS (100 * 1000)  /* 100 ms */
124 #define RECON_DELAY_MS 25
125 #define RECON_TIMO     ((RECON_DELAY_MS * hz) / 1000)
126 
127 	/* we are not pre-emptible in the kernel, but we don't want to run
128 	 * forever.  If we run w/o blocking for more than MAX_RECON_EXEC_TICKS
129 	 * ticks of the cycle counter, delay for RECON_DELAY before
130 	 * continuing. this may murder us with context switches, so we may
131 	 * need to increase both the MAX...TICKS and the RECON_DELAY_MS. */
132 	if (reconDesc->reconExecTimerRunning) {
133 		int     status;
134 
135 		RF_ETIMER_STOP(reconDesc->recon_exec_timer);
136 		RF_ETIMER_EVAL(reconDesc->recon_exec_timer);
137 		reconDesc->reconExecTicks +=
138 			RF_ETIMER_VAL_US(reconDesc->recon_exec_timer);
139 		if (reconDesc->reconExecTicks > reconDesc->maxReconExecTicks)
140 			reconDesc->maxReconExecTicks =
141 				reconDesc->reconExecTicks;
142 		if (reconDesc->reconExecTicks >= MAX_RECON_EXEC_USECS) {
143 			/* we've been running too long.  delay for
144 			 * RECON_DELAY_MS */
145 #if RF_RECON_STATS > 0
146 			reconDesc->numReconExecDelays++;
147 #endif				/* RF_RECON_STATS > 0 */
148 
149 			status = ltsleep(&reconDesc->reconExecTicks, PRIBIO,
150 					 "recon delay", RECON_TIMO,
151 					 &rctrl->eq_mutex);
152 			RF_ASSERT(status == EWOULDBLOCK);
153 			reconDesc->reconExecTicks = 0;
154 		}
155 	}
156 	while (!rctrl->eventQueue) {
157 #if RF_RECON_STATS > 0
158 		reconDesc->numReconEventWaits++;
159 #endif				/* RF_RECON_STATS > 0 */
160 		DO_WAIT(rctrl);
161 		reconDesc->reconExecTicks = 0;	/* we've just waited */
162 	}
163 
164 	reconDesc->reconExecTimerRunning = 1;
165 	if (RF_ETIMER_VAL_US(reconDesc->recon_exec_timer)!=0) {
166 		/* it moved!!  reset the timer. */
167 		RF_ETIMER_START(reconDesc->recon_exec_timer);
168 	}
169 	event = rctrl->eventQueue;
170 	rctrl->eventQueue = event->next;
171 	event->next = NULL;
172 	rctrl->eq_count--;
173 
174 	/* q null and count==0 must be equivalent conditions */
175 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
176 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
177 	return (event);
178 }
179 /* enqueues a reconstruction event on the indicated queue */
180 void
181 rf_CauseReconEvent(raidPtr, row, col, arg, type)
182 	RF_Raid_t *raidPtr;
183 	RF_RowCol_t row;
184 	RF_RowCol_t col;
185 	void   *arg;
186 	RF_Revent_t type;
187 {
188 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl[row];
189 	RF_ReconEvent_t *event = GetReconEventDesc(row, col, arg, type);
190 
191 	if (type == RF_REVENT_BUFCLEAR) {
192 		RF_ASSERT(col != rctrl->fcol);
193 	}
194 	RF_ASSERT(row >= 0 && row <= raidPtr->numRow && col >= 0 && col <= raidPtr->numCol);
195 	RF_LOCK_MUTEX(rctrl->eq_mutex);
196 	/* q null and count==0 must be equivalent conditions */
197 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
198 	event->next = rctrl->eventQueue;
199 	rctrl->eventQueue = event;
200 	rctrl->eq_count++;
201 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
202 
203 	DO_SIGNAL(rctrl);
204 }
205 /* allocates and initializes a recon event descriptor */
206 static RF_ReconEvent_t *
207 GetReconEventDesc(row, col, arg, type)
208 	RF_RowCol_t row;
209 	RF_RowCol_t col;
210 	void   *arg;
211 	RF_Revent_t type;
212 {
213 	RF_ReconEvent_t *t;
214 
215 	RF_FREELIST_GET(rf_revent_freelist, t, next, (RF_ReconEvent_t *));
216 	if (t == NULL)
217 		return (NULL);
218 	t->col = col;
219 	t->arg = arg;
220 	t->type = type;
221 	return (t);
222 }
223 
224 void
225 rf_FreeReconEventDesc(event)
226 	RF_ReconEvent_t *event;
227 {
228 	RF_FREELIST_FREE(rf_revent_freelist, event, next);
229 }
230