xref: /netbsd/sys/arch/newsmips/dev/sc_wrap.c (revision c4a72b64)
1 /*	$NetBSD: sc_wrap.c,v 1.20 2002/10/02 04:27:52 thorpej Exp $	*/
2 
3 /*
4  * This driver is slow!  Need to rewrite.
5  */
6 
7 #include <sys/types.h>
8 #include <sys/param.h>
9 #include <sys/systm.h>
10 #include <sys/kernel.h>
11 #include <sys/device.h>
12 #include <sys/proc.h>
13 #include <sys/buf.h>
14 #include <sys/malloc.h>
15 
16 #include <dev/scsipi/scsi_all.h>
17 #include <dev/scsipi/scsipi_all.h>
18 #include <dev/scsipi/scsiconf.h>
19 #include <dev/scsipi/scsi_message.h>
20 
21 #include <newsmips/dev/scsireg.h>
22 #include <newsmips/dev/dmac_0448.h>
23 #include <newsmips/dev/screg_1185.h>
24 
25 #include <machine/adrsmap.h>
26 #include <machine/autoconf.h>
27 #include <machine/machConst.h>
28 
29 #include <mips/cache.h>
30 
31 static int cxd1185_match __P((struct device *, struct cfdata *, void *));
32 static void cxd1185_attach __P((struct device *, struct device *, void *));
33 
34 CFATTACH_DECL(sc, sizeof(struct sc_softc),
35     cxd1185_match, cxd1185_attach, NULL, NULL);
36 
37 void cxd1185_init __P((struct sc_softc *));
38 static void free_scb __P((struct sc_softc *, struct sc_scb *));
39 static struct sc_scb *get_scb __P((struct sc_softc *, int));
40 static void sc_scsipi_request __P((struct scsipi_channel *,
41 					scsipi_adapter_req_t, void *));
42 static int sc_poll __P((struct sc_softc *, int, int));
43 static void sc_sched __P((struct sc_softc *));
44 void sc_done __P((struct sc_scb *));
45 int sc_intr __P((void *));
46 static void cxd1185_timeout __P((void *));
47 
48 extern void sc_send __P((struct sc_scb *, int, int));
49 extern int scintr __P((void));
50 extern void scsi_hardreset __P((void));
51 extern int sc_busy __P((struct sc_softc *, int));
52 extern paddr_t kvtophys __P((vaddr_t));
53 
54 static int sc_disconnect = IDT_DISCON;
55 
56 int
57 cxd1185_match(parent, cf, aux)
58 	struct device *parent;
59 	struct cfdata *cf;
60 	void *aux;
61 {
62 	struct confargs *ca = aux;
63 
64 	if (strcmp(ca->ca_name, "sc"))
65 		return 0;
66 
67 	return 1;
68 }
69 
70 void
71 cxd1185_attach(parent, self, aux)
72 	struct device *parent, *self;
73 	void *aux;
74 {
75 	struct sc_softc *sc = (void *)self;
76 	struct sc_scb *scb;
77 	int i, intlevel;
78 
79 	intlevel = sc->sc_dev.dv_cfdata->cf_level;
80 	if (intlevel == -1) {
81 #if 0
82 		printf(": interrupt level not configured\n");
83 		return;
84 #else
85 		printf(": interrupt level not configured; using");
86 		intlevel = 0;
87 #endif
88 	}
89 	printf(" level %d\n", intlevel);
90 
91 	if (sc_idenr & 0x08)
92 		sc->scsi_1185AQ = 1;
93 	else
94 		sc->scsi_1185AQ = 0;
95 
96 	sc->sc_adapter.adapt_dev = &sc->sc_dev;
97 	sc->sc_adapter.adapt_nchannels = 1;
98 	sc->sc_adapter.adapt_openings = 7;
99 	sc->sc_adapter.adapt_max_periph = 1;
100 	sc->sc_adapter.adapt_ioctl = NULL;
101 	sc->sc_adapter.adapt_minphys = minphys;
102 	sc->sc_adapter.adapt_request = sc_scsipi_request;
103 
104 	memset(&sc->sc_channel, 0, sizeof(sc->sc_channel));
105 	sc->sc_channel.chan_adapter = &sc->sc_adapter;
106 	sc->sc_channel.chan_bustype = &scsi_bustype;
107 	sc->sc_channel.chan_channel = 0;
108 	sc->sc_channel.chan_ntargets = 8;
109 	sc->sc_channel.chan_nluns = 8;
110 	sc->sc_channel.chan_id = 7;
111 
112 	TAILQ_INIT(&sc->ready_list);
113 	TAILQ_INIT(&sc->free_list);
114 
115 	scb = sc->sc_scb;
116 	for (i = 0; i < 24; i++) {	/* XXX 24 */
117 		TAILQ_INSERT_TAIL(&sc->free_list, scb, chain);
118 		scb++;
119 	}
120 
121 	cxd1185_init(sc);
122 	DELAY(100000);
123 
124 	hb_intr_establish(intlevel, IPL_BIO, sc_intr, sc);
125 
126 	config_found(&sc->sc_dev, &sc->sc_channel, scsiprint);
127 }
128 
129 void
130 cxd1185_init(sc)
131 	struct sc_softc *sc;
132 {
133 	int i;
134 
135 	for (i = 0; i < 8; i++)
136 		sc->inuse[i] = 0;
137 
138 	scsi_hardreset();
139 }
140 
141 void
142 free_scb(sc, scb)
143 	struct sc_softc *sc;
144 	struct sc_scb *scb;
145 {
146 	int s;
147 
148 	s = splbio();
149 
150 	TAILQ_INSERT_HEAD(&sc->free_list, scb, chain);
151 
152 	/*
153 	 * If there were none, wake anybody waiting for one to come free,
154 	 * starting with queued entries.
155 	 */
156 	if (scb->chain.tqe_next == 0)
157 		wakeup(&sc->free_list);
158 
159 	splx(s);
160 }
161 
162 struct sc_scb *
163 get_scb(sc, flags)
164 	struct sc_softc *sc;
165 	int flags;
166 {
167 	int s;
168 	struct sc_scb *scb;
169 
170 	s = splbio();
171 
172 	while ((scb = sc->free_list.tqh_first) == NULL &&
173 		(flags & XS_CTL_NOSLEEP) == 0)
174 		tsleep(&sc->free_list, PRIBIO, "sc_scb", 0);
175 	if (scb) {
176 		TAILQ_REMOVE(&sc->free_list, scb, chain);
177 	}
178 
179 	splx(s);
180 	return scb;
181 }
182 
183 void
184 sc_scsipi_request(chan, req, arg)
185 	struct scsipi_channel *chan;
186 	scsipi_adapter_req_t req;
187 	void *arg;
188 {
189 	struct scsipi_xfer *xs;
190 	struct scsipi_periph *periph;
191 	struct sc_softc *sc = (void *)chan->chan_adapter->adapt_dev;
192 	struct sc_scb *scb;
193 	int flags, s;
194 	int target;
195 
196 	switch (req) {
197 	case ADAPTER_REQ_RUN_XFER:
198 		xs = arg;
199 		periph = xs->xs_periph;
200 
201 		flags = xs->xs_control;
202 		if ((scb = get_scb(sc, flags)) == NULL)
203 			panic("sc_scsipi_request: no scb");
204 
205 		scb->xs = xs;
206 		scb->flags = 0;
207 		scb->sc_ctag = 0;
208 		scb->sc_coffset = 0;
209 		scb->istatus = 0;
210 		scb->tstatus = 0;
211 		scb->message = 0;
212 		bzero(scb->msgbuf, sizeof(scb->msgbuf));
213 
214 		s = splbio();
215 
216 		TAILQ_INSERT_TAIL(&sc->ready_list, scb, chain);
217 		sc_sched(sc);
218 		splx(s);
219 
220 		if (flags & XS_CTL_POLL) {
221 			target = periph->periph_target;
222 			if (sc_poll(sc, target, xs->timeout)) {
223 				printf("sc: timeout (retry)\n");
224 				if (sc_poll(sc, target, xs->timeout)) {
225 					printf("sc: timeout\n");
226 				}
227 			}
228 			/* called during autoconfig only... */
229 			mips_dcache_wbinv_all();	/* Flush DCache */
230 		}
231 		return;
232 	case ADAPTER_REQ_GROW_RESOURCES:
233 		/* XXX Not supported. */
234 		return;
235 	case ADAPTER_REQ_SET_XFER_MODE:
236 		/* XXX Not supported. */
237 		return;
238 	}
239 }
240 
241 /*
242  * Used when interrupt driven I/O isn't allowed, e.g. during boot.
243  */
244 int
245 sc_poll(sc, chan, count)
246 	struct sc_softc *sc;
247 	int chan, count;
248 {
249 	volatile u_char *int_stat = (void *)INTST1;
250 	volatile u_char *int_clear = (void *)INTCLR1;
251 
252 	while (sc_busy(sc, chan)) {
253 		if (*int_stat & INTST1_DMA) {
254 		    *int_clear = INTST1_DMA;
255 		    if (dmac_gstat & CH_INT(CH_SCSI)) {
256 			if (dmac_gstat & CH_MRQ(CH_SCSI)) {
257 			    DELAY(50);
258 			    if (dmac_gstat & CH_MRQ(CH_SCSI))
259 				printf("dma_poll\n");
260 			}
261 			DELAY(10);
262 			scintr();
263 		    }
264 		}
265 		DELAY(1000);
266 		count--;
267 		if (count <= 0)
268 			return 1;
269 	}
270 	return 0;
271 }
272 
273 void
274 sc_sched(sc)
275 	struct sc_softc *sc;
276 {
277 	struct scsipi_xfer *xs;
278 	struct scsipi_periph *periph;
279 	int ie = 0;
280 	int flags;
281 	int chan, lun;
282 	struct sc_scb *scb, *nextscb;
283 
284 	scb = sc->ready_list.tqh_first;
285 start:
286 	if (scb == NULL)
287 		return;
288 
289 	xs = scb->xs;
290 	periph = xs->xs_periph;
291 	chan = periph->periph_target;
292 	flags = xs->xs_control;
293 
294 	if (cold)
295 		flags |= XS_CTL_POLL;
296 
297 	if (sc->inuse[chan]) {
298 		scb = scb->chain.tqe_next;
299 		goto start;
300 	}
301 	sc->inuse[chan] = 1;
302 
303 	if (flags & XS_CTL_RESET)
304 		printf("SCSI RESET\n");
305 
306 	lun = periph->periph_lun;
307 
308 	scb->identify = MSG_IDENT | sc_disconnect | (lun & IDT_DRMASK);
309 	scb->sc_ctrnscnt = xs->datalen;
310 
311 	/* make va->pa mapping table for dma */
312 	if (xs->datalen > 0) {
313 		int pages, offset;
314 		int i, pn;
315 		vaddr_t va;
316 
317 		/* bzero(&sc->sc_map[chan], sizeof(struct sc_map)); */
318 
319 		va = (vaddr_t)xs->data;
320 
321 		offset = va & PGOFSET;
322 		pages = (offset + xs->datalen + NBPG -1 ) >> PGSHIFT;
323 		if (pages >= NSCMAP)
324 			panic("sc_map: Too many pages");
325 
326 		for (i = 0; i < pages; i++) {
327 			pn = kvtophys(va) >> PGSHIFT;
328 			sc->sc_map[chan].mp_addr[i] = pn;
329 			va += NBPG;
330 		}
331 
332 		sc->sc_map[chan].mp_offset = offset;
333 		sc->sc_map[chan].mp_pages = pages;
334 		scb->sc_map = &sc->sc_map[chan];
335 	}
336 
337 	if ((flags & XS_CTL_POLL) == 0)
338 		ie = SCSI_INTEN;
339 
340 	if (xs->data)
341 		scb->sc_cpoint = (void *)xs->data;
342 	else
343 		scb->sc_cpoint = scb->msgbuf;
344 	scb->scb_softc = sc;
345 
346 	callout_reset(&scb->xs->xs_callout, hz * 10, cxd1185_timeout, scb);
347 	sc_send(scb, chan, ie);
348 	callout_stop(&scb->xs->xs_callout);
349 
350 	nextscb = scb->chain.tqe_next;
351 
352 	TAILQ_REMOVE(&sc->ready_list, scb, chain);
353 
354 	scb = nextscb;
355 
356 	goto start;
357 }
358 
359 void
360 sc_done(scb)
361 	struct sc_scb *scb;
362 {
363 	struct scsipi_xfer *xs = scb->xs;
364 	struct scsipi_periph *periph = xs->xs_periph;
365 	struct sc_softc *sc = (void *)periph->periph_channel->chan_adapter->adapt_dev;
366 
367 	xs->resid = 0;
368 	xs->status = 0;
369 
370 	if (scb->istatus != INST_EP) {
371 		if (scb->istatus == (INST_EP|INST_TO))
372 			xs->error = XS_SELTIMEOUT;
373 		else {
374 			printf("SC(i): [istatus=0x%x, tstatus=0x%x]\n",
375 				scb->istatus, scb->tstatus);
376 			xs->error = XS_DRIVER_STUFFUP;
377 		}
378 	}
379 
380 	switch (scb->tstatus) {
381 
382 	case TGST_GOOD:
383 		break;
384 
385 	case TGST_CC:
386 		xs->status = SCSI_CHECK;
387 		if (xs->error == 0)
388 			xs->error = XS_BUSY;
389 
390 	default:
391 		printf("SC(t): [istatus=0x%x, tstatus=0x%x]\n",
392 			scb->istatus, scb->tstatus);
393 		break;
394 	}
395 
396 	scsipi_done(xs);
397 	free_scb(sc, scb);
398 	sc->inuse[periph->periph_target] = 0;
399 	sc_sched(sc);
400 }
401 
402 int
403 sc_intr(v)
404 	void *v;
405 {
406 	/* struct sc_softc *sc = v; */
407 	volatile u_char *gsp = (u_char *)DMAC_GSTAT;
408 	u_int gstat = *gsp;
409 	int mrqb, i;
410 
411 	if ((gstat & CH_INT(CH_SCSI)) == 0)
412 		return 0;
413 
414 	/*
415 	 * when DMA interrupt occurs there remain some untransferred data.
416 	 * wait data transfer completion.
417 	 */
418 	mrqb = (gstat & CH_INT(CH_SCSI)) << 1;
419 	if (gstat & mrqb) {
420 		/*
421 		 * XXX SHOULD USE DELAY()
422 		 */
423 		for (i = 0; i < 50; i++)
424 			;
425 		if (*gsp & mrqb)
426 			printf("sc_intr: MRQ\n");
427 	}
428 	scintr();
429 
430 	return 1;
431 }
432 
433 
434 #if 0
435 /*
436  * SCOP_RSENSE request
437  */
438 void
439 scop_rsense(intr, sc_param, lun, ie, count, param)
440 	register int intr;
441 	register struct scsi *sc_param;
442 	register int lun;
443 	register int ie;
444 	register int count;
445 	register caddr_t param;
446 {
447 	bzero(sc_param, sizeof(struct scsi));
448 	sc_param->identify = MSG_IDENT | sc_disconnect | (lun & IDT_DRMASK);
449 	sc_param->sc_lun = lun;
450 
451 	sc_param->sc_cpoint = (u_char *)param;
452 	sc_param->sc_ctrnscnt = count;
453 
454 	/* sc_cdb */
455 	sc_param->sc_opcode = SCOP_RSENSE;
456 	sc_param->sc_count = count;
457 
458 	sc_go(intr, sc_param, ie, sc_param);
459 }
460 #endif
461 
462 void
463 cxd1185_timeout(arg)
464 	void *arg;
465 {
466 	struct sc_scb *scb = arg;
467 	struct scsipi_xfer *xs = scb->xs;
468 	struct scsipi_periph *periph = xs->xs_periph;
469 	int chan;
470 
471 	chan = periph->periph_target;
472 
473 	printf("sc: timeout ch=%d\n", chan);
474 
475 	/* XXX abort transfer and ... */
476 }
477