xref: /netbsd/sys/arch/newsmips/dev/sc_wrap.c (revision bf9ec67e)
1 /*	$NetBSD: sc_wrap.c,v 1.18 2001/11/14 18:15:30 thorpej Exp $	*/
2 
3 /*
4  * This driver is slow!  Need to rewrite.
5  */
6 
7 #include <sys/types.h>
8 #include <sys/param.h>
9 #include <sys/systm.h>
10 #include <sys/kernel.h>
11 #include <sys/device.h>
12 #include <sys/proc.h>
13 #include <sys/buf.h>
14 #include <sys/malloc.h>
15 
16 #include <dev/scsipi/scsi_all.h>
17 #include <dev/scsipi/scsipi_all.h>
18 #include <dev/scsipi/scsiconf.h>
19 #include <dev/scsipi/scsi_message.h>
20 
21 #include <newsmips/dev/scsireg.h>
22 #include <newsmips/dev/dmac_0448.h>
23 #include <newsmips/dev/screg_1185.h>
24 
25 #include <machine/adrsmap.h>
26 #include <machine/autoconf.h>
27 #include <machine/machConst.h>
28 
29 #include <mips/cache.h>
30 
31 static int cxd1185_match __P((struct device *, struct cfdata *, void *));
32 static void cxd1185_attach __P((struct device *, struct device *, void *));
33 
34 struct cfattach sc_ca = {
35 	sizeof(struct sc_softc), cxd1185_match, cxd1185_attach
36 };
37 
38 void cxd1185_init __P((struct sc_softc *));
39 static void free_scb __P((struct sc_softc *, struct sc_scb *));
40 static struct sc_scb *get_scb __P((struct sc_softc *, int));
41 static void sc_scsipi_request __P((struct scsipi_channel *,
42 					scsipi_adapter_req_t, void *));
43 static int sc_poll __P((struct sc_softc *, int, int));
44 static void sc_sched __P((struct sc_softc *));
45 void sc_done __P((struct sc_scb *));
46 int sc_intr __P((void *));
47 static void cxd1185_timeout __P((void *));
48 
49 extern void sc_send __P((struct sc_scb *, int, int));
50 extern int scintr __P((void));
51 extern void scsi_hardreset __P((void));
52 extern int sc_busy __P((struct sc_softc *, int));
53 extern paddr_t kvtophys __P((vaddr_t));
54 
55 static int sc_disconnect = IDT_DISCON;
56 
57 int
58 cxd1185_match(parent, cf, aux)
59 	struct device *parent;
60 	struct cfdata *cf;
61 	void *aux;
62 {
63 	struct confargs *ca = aux;
64 
65 	if (strcmp(ca->ca_name, "sc"))
66 		return 0;
67 
68 	return 1;
69 }
70 
71 void
72 cxd1185_attach(parent, self, aux)
73 	struct device *parent, *self;
74 	void *aux;
75 {
76 	struct sc_softc *sc = (void *)self;
77 	struct sc_scb *scb;
78 	int i, intlevel;
79 
80 	intlevel = sc->sc_dev.dv_cfdata->cf_level;
81 	if (intlevel == -1) {
82 #if 0
83 		printf(": interrupt level not configured\n");
84 		return;
85 #else
86 		printf(": interrupt level not configured; using");
87 		intlevel = 0;
88 #endif
89 	}
90 	printf(" level %d\n", intlevel);
91 
92 	if (sc_idenr & 0x08)
93 		sc->scsi_1185AQ = 1;
94 	else
95 		sc->scsi_1185AQ = 0;
96 
97 	sc->sc_adapter.adapt_dev = &sc->sc_dev;
98 	sc->sc_adapter.adapt_nchannels = 1;
99 	sc->sc_adapter.adapt_openings = 7;
100 	sc->sc_adapter.adapt_max_periph = 1;
101 	sc->sc_adapter.adapt_ioctl = NULL;
102 	sc->sc_adapter.adapt_minphys = minphys;
103 	sc->sc_adapter.adapt_request = sc_scsipi_request;
104 
105 	memset(&sc->sc_channel, 0, sizeof(sc->sc_channel));
106 	sc->sc_channel.chan_adapter = &sc->sc_adapter;
107 	sc->sc_channel.chan_bustype = &scsi_bustype;
108 	sc->sc_channel.chan_channel = 0;
109 	sc->sc_channel.chan_ntargets = 8;
110 	sc->sc_channel.chan_nluns = 8;
111 	sc->sc_channel.chan_id = 7;
112 
113 	TAILQ_INIT(&sc->ready_list);
114 	TAILQ_INIT(&sc->free_list);
115 
116 	scb = sc->sc_scb;
117 	for (i = 0; i < 24; i++) {	/* XXX 24 */
118 		TAILQ_INSERT_TAIL(&sc->free_list, scb, chain);
119 		scb++;
120 	}
121 
122 	cxd1185_init(sc);
123 	DELAY(100000);
124 
125 	hb_intr_establish(intlevel, IPL_BIO, sc_intr, sc);
126 
127 	config_found(&sc->sc_dev, &sc->sc_channel, scsiprint);
128 }
129 
130 void
131 cxd1185_init(sc)
132 	struct sc_softc *sc;
133 {
134 	int i;
135 
136 	for (i = 0; i < 8; i++)
137 		sc->inuse[i] = 0;
138 
139 	scsi_hardreset();
140 }
141 
142 void
143 free_scb(sc, scb)
144 	struct sc_softc *sc;
145 	struct sc_scb *scb;
146 {
147 	int s;
148 
149 	s = splbio();
150 
151 	TAILQ_INSERT_HEAD(&sc->free_list, scb, chain);
152 
153 	/*
154 	 * If there were none, wake anybody waiting for one to come free,
155 	 * starting with queued entries.
156 	 */
157 	if (scb->chain.tqe_next == 0)
158 		wakeup(&sc->free_list);
159 
160 	splx(s);
161 }
162 
163 struct sc_scb *
164 get_scb(sc, flags)
165 	struct sc_softc *sc;
166 	int flags;
167 {
168 	int s;
169 	struct sc_scb *scb;
170 
171 	s = splbio();
172 
173 	while ((scb = sc->free_list.tqh_first) == NULL &&
174 		(flags & XS_CTL_NOSLEEP) == 0)
175 		tsleep(&sc->free_list, PRIBIO, "sc_scb", 0);
176 	if (scb) {
177 		TAILQ_REMOVE(&sc->free_list, scb, chain);
178 	}
179 
180 	splx(s);
181 	return scb;
182 }
183 
184 void
185 sc_scsipi_request(chan, req, arg)
186 	struct scsipi_channel *chan;
187 	scsipi_adapter_req_t req;
188 	void *arg;
189 {
190 	struct scsipi_xfer *xs;
191 	struct scsipi_periph *periph;
192 	struct sc_softc *sc = (void *)chan->chan_adapter->adapt_dev;
193 	struct sc_scb *scb;
194 	int flags, s;
195 	int target;
196 
197 	switch (req) {
198 	case ADAPTER_REQ_RUN_XFER:
199 		xs = arg;
200 		periph = xs->xs_periph;
201 
202 		flags = xs->xs_control;
203 		if ((scb = get_scb(sc, flags)) == NULL)
204 			panic("sc_scsipi_request: no scb");
205 
206 		scb->xs = xs;
207 		scb->flags = 0;
208 		scb->sc_ctag = 0;
209 		scb->sc_coffset = 0;
210 		scb->istatus = 0;
211 		scb->tstatus = 0;
212 		scb->message = 0;
213 		bzero(scb->msgbuf, sizeof(scb->msgbuf));
214 
215 		s = splbio();
216 
217 		TAILQ_INSERT_TAIL(&sc->ready_list, scb, chain);
218 		sc_sched(sc);
219 		splx(s);
220 
221 		if (flags & XS_CTL_POLL) {
222 			target = periph->periph_target;
223 			if (sc_poll(sc, target, xs->timeout)) {
224 				printf("sc: timeout (retry)\n");
225 				if (sc_poll(sc, target, xs->timeout)) {
226 					printf("sc: timeout\n");
227 				}
228 			}
229 			/* called during autoconfig only... */
230 			mips_dcache_wbinv_all();	/* Flush DCache */
231 		}
232 		return;
233 	case ADAPTER_REQ_GROW_RESOURCES:
234 		/* XXX Not supported. */
235 		return;
236 	case ADAPTER_REQ_SET_XFER_MODE:
237 		/* XXX Not supported. */
238 		return;
239 	}
240 }
241 
242 /*
243  * Used when interrupt driven I/O isn't allowed, e.g. during boot.
244  */
245 int
246 sc_poll(sc, chan, count)
247 	struct sc_softc *sc;
248 	int chan, count;
249 {
250 	volatile u_char *int_stat = (void *)INTST1;
251 	volatile u_char *int_clear = (void *)INTCLR1;
252 
253 	while (sc_busy(sc, chan)) {
254 		if (*int_stat & INTST1_DMA) {
255 		    *int_clear = INTST1_DMA;
256 		    if (dmac_gstat & CH_INT(CH_SCSI)) {
257 			if (dmac_gstat & CH_MRQ(CH_SCSI)) {
258 			    DELAY(50);
259 			    if (dmac_gstat & CH_MRQ(CH_SCSI))
260 				printf("dma_poll\n");
261 			}
262 			DELAY(10);
263 			scintr();
264 		    }
265 		}
266 		DELAY(1000);
267 		count--;
268 		if (count <= 0)
269 			return 1;
270 	}
271 	return 0;
272 }
273 
274 void
275 sc_sched(sc)
276 	struct sc_softc *sc;
277 {
278 	struct scsipi_xfer *xs;
279 	struct scsipi_periph *periph;
280 	int ie = 0;
281 	int flags;
282 	int chan, lun;
283 	struct sc_scb *scb, *nextscb;
284 
285 	scb = sc->ready_list.tqh_first;
286 start:
287 	if (scb == NULL)
288 		return;
289 
290 	xs = scb->xs;
291 	periph = xs->xs_periph;
292 	chan = periph->periph_target;
293 	flags = xs->xs_control;
294 
295 	if (cold)
296 		flags |= XS_CTL_POLL;
297 
298 	if (sc->inuse[chan]) {
299 		scb = scb->chain.tqe_next;
300 		goto start;
301 	}
302 	sc->inuse[chan] = 1;
303 
304 	if (flags & XS_CTL_RESET)
305 		printf("SCSI RESET\n");
306 
307 	lun = periph->periph_lun;
308 
309 	scb->identify = MSG_IDENT | sc_disconnect | (lun & IDT_DRMASK);
310 	scb->sc_ctrnscnt = xs->datalen;
311 
312 	/* make va->pa mapping table for dma */
313 	if (xs->datalen > 0) {
314 		int pages, offset;
315 		int i, pn;
316 		vaddr_t va;
317 
318 		/* bzero(&sc->sc_map[chan], sizeof(struct sc_map)); */
319 
320 		va = (vaddr_t)xs->data;
321 
322 		offset = va & PGOFSET;
323 		pages = (offset + xs->datalen + NBPG -1 ) >> PGSHIFT;
324 		if (pages >= NSCMAP)
325 			panic("sc_map: Too many pages");
326 
327 		for (i = 0; i < pages; i++) {
328 			pn = kvtophys(va) >> PGSHIFT;
329 			sc->sc_map[chan].mp_addr[i] = pn;
330 			va += NBPG;
331 		}
332 
333 		sc->sc_map[chan].mp_offset = offset;
334 		sc->sc_map[chan].mp_pages = pages;
335 		scb->sc_map = &sc->sc_map[chan];
336 	}
337 
338 	if ((flags & XS_CTL_POLL) == 0)
339 		ie = SCSI_INTEN;
340 
341 	if (xs->data)
342 		scb->sc_cpoint = (void *)xs->data;
343 	else
344 		scb->sc_cpoint = scb->msgbuf;
345 	scb->scb_softc = sc;
346 
347 	callout_reset(&scb->xs->xs_callout, hz * 10, cxd1185_timeout, scb);
348 	sc_send(scb, chan, ie);
349 	callout_stop(&scb->xs->xs_callout);
350 
351 	nextscb = scb->chain.tqe_next;
352 
353 	TAILQ_REMOVE(&sc->ready_list, scb, chain);
354 
355 	scb = nextscb;
356 
357 	goto start;
358 }
359 
360 void
361 sc_done(scb)
362 	struct sc_scb *scb;
363 {
364 	struct scsipi_xfer *xs = scb->xs;
365 	struct scsipi_periph *periph = xs->xs_periph;
366 	struct sc_softc *sc = (void *)periph->periph_channel->chan_adapter->adapt_dev;
367 
368 	xs->resid = 0;
369 	xs->status = 0;
370 
371 	if (scb->istatus != INST_EP) {
372 		if (scb->istatus == (INST_EP|INST_TO))
373 			xs->error = XS_SELTIMEOUT;
374 		else {
375 			printf("SC(i): [istatus=0x%x, tstatus=0x%x]\n",
376 				scb->istatus, scb->tstatus);
377 			xs->error = XS_DRIVER_STUFFUP;
378 		}
379 	}
380 
381 	switch (scb->tstatus) {
382 
383 	case TGST_GOOD:
384 		break;
385 
386 	case TGST_CC:
387 		xs->status = SCSI_CHECK;
388 		if (xs->error == 0)
389 			xs->error = XS_BUSY;
390 
391 	default:
392 		printf("SC(t): [istatus=0x%x, tstatus=0x%x]\n",
393 			scb->istatus, scb->tstatus);
394 		break;
395 	}
396 
397 	scsipi_done(xs);
398 	free_scb(sc, scb);
399 	sc->inuse[periph->periph_target] = 0;
400 	sc_sched(sc);
401 }
402 
403 int
404 sc_intr(v)
405 	void *v;
406 {
407 	/* struct sc_softc *sc = v; */
408 	volatile u_char *gsp = (u_char *)DMAC_GSTAT;
409 	u_int gstat = *gsp;
410 	int mrqb, i;
411 
412 	if ((gstat & CH_INT(CH_SCSI)) == 0)
413 		return 0;
414 
415 	/*
416 	 * when DMA interrupt occurs there remain some untransferred data.
417 	 * wait data transfer completion.
418 	 */
419 	mrqb = (gstat & CH_INT(CH_SCSI)) << 1;
420 	if (gstat & mrqb) {
421 		/*
422 		 * XXX SHOULD USE DELAY()
423 		 */
424 		for (i = 0; i < 50; i++)
425 			;
426 		if (*gsp & mrqb)
427 			printf("sc_intr: MRQ\n");
428 	}
429 	scintr();
430 
431 	return 1;
432 }
433 
434 
435 #if 0
436 /*
437  * SCOP_RSENSE request
438  */
439 void
440 scop_rsense(intr, sc_param, lun, ie, count, param)
441 	register int intr;
442 	register struct scsi *sc_param;
443 	register int lun;
444 	register int ie;
445 	register int count;
446 	register caddr_t param;
447 {
448 	bzero(sc_param, sizeof(struct scsi));
449 	sc_param->identify = MSG_IDENT | sc_disconnect | (lun & IDT_DRMASK);
450 	sc_param->sc_lun = lun;
451 
452 	sc_param->sc_cpoint = (u_char *)param;
453 	sc_param->sc_ctrnscnt = count;
454 
455 	/* sc_cdb */
456 	sc_param->sc_opcode = SCOP_RSENSE;
457 	sc_param->sc_count = count;
458 
459 	sc_go(intr, sc_param, ie, sc_param);
460 }
461 #endif
462 
463 void
464 cxd1185_timeout(arg)
465 	void *arg;
466 {
467 	struct sc_scb *scb = arg;
468 	struct scsipi_xfer *xs = scb->xs;
469 	struct scsipi_periph *periph = xs->xs_periph;
470 	int chan;
471 
472 	chan = periph->periph_target;
473 
474 	printf("sc: timeout ch=%d\n", chan);
475 
476 	/* XXX abort transfer and ... */
477 }
478