xref: /netbsd/sys/arch/newsmips/dev/sc_wrap.c (revision 6550d01e)
1 /*	$NetBSD: sc_wrap.c,v 1.31 2008/04/09 15:40:30 tsutsui Exp $	*/
2 
3 /*
4  * This driver is slow!  Need to rewrite.
5  */
6 
7 #include <sys/cdefs.h>
8 __KERNEL_RCSID(0, "$NetBSD: sc_wrap.c,v 1.31 2008/04/09 15:40:30 tsutsui Exp $");
9 
10 #include <sys/types.h>
11 #include <sys/param.h>
12 #include <sys/systm.h>
13 #include <sys/kernel.h>
14 #include <sys/device.h>
15 #include <sys/proc.h>
16 #include <sys/buf.h>
17 #include <sys/malloc.h>
18 
19 #include <uvm/uvm_extern.h>
20 
21 #include <dev/scsipi/scsi_all.h>
22 #include <dev/scsipi/scsipi_all.h>
23 #include <dev/scsipi/scsiconf.h>
24 #include <dev/scsipi/scsi_message.h>
25 
26 #include <newsmips/dev/hbvar.h>
27 #include <newsmips/dev/scsireg.h>
28 #include <newsmips/dev/dmac_0448.h>
29 #include <newsmips/dev/screg_1185.h>
30 
31 #include <machine/adrsmap.h>
32 #include <machine/autoconf.h>
33 #include <machine/machConst.h>
34 
35 #include <mips/cache.h>
36 
37 static int cxd1185_match(device_t, cfdata_t, void *);
38 static void cxd1185_attach(device_t, device_t, void *);
39 
40 CFATTACH_DECL_NEW(sc, sizeof(struct sc_softc),
41     cxd1185_match, cxd1185_attach, NULL, NULL);
42 
43 void cxd1185_init(struct sc_softc *);
44 static void free_scb(struct sc_softc *, struct sc_scb *);
45 static struct sc_scb *get_scb(struct sc_softc *, int);
46 static void sc_scsipi_request(struct scsipi_channel *,
47     scsipi_adapter_req_t, void *);
48 static int sc_poll(struct sc_softc *, int, int);
49 static void sc_sched(struct sc_softc *);
50 void sc_done(struct sc_scb *);
51 int sc_intr(void *);
52 static void cxd1185_timeout(void *);
53 
54 extern void sc_send(struct sc_scb *, int, int);
55 extern int scintr(void);
56 extern void scsi_hardreset(void);
57 extern int sc_busy(struct sc_softc *, int);
58 extern paddr_t kvtophys(vaddr_t);
59 
60 static int sc_disconnect = IDT_DISCON;
61 
62 int
63 cxd1185_match(device_t parent, cfdata_t cf, void *aux)
64 {
65 	struct hb_attach_args *ha = aux;
66 
67 	if (strcmp(ha->ha_name, "sc"))
68 		return 0;
69 
70 	return 1;
71 }
72 
73 void
74 cxd1185_attach(device_t parent, device_t self, void *aux)
75 {
76 	struct sc_softc *sc = device_private(self);
77 	struct hb_attach_args *ha = aux;
78 	struct sc_scb *scb;
79 	int i, intlevel;
80 
81 	sc->sc_dev = self;
82 
83 	intlevel = ha->ha_level;
84 	if (intlevel == -1) {
85 #if 0
86 		aprint_error(": interrupt level not configured\n");
87 		return;
88 #else
89 		aprint_normal(": interrupt level not configured; using");
90 		intlevel = 0;
91 #endif
92 	}
93 	aprint_normal(" level %d\n", intlevel);
94 
95 	if (sc_idenr & 0x08)
96 		sc->scsi_1185AQ = 1;
97 	else
98 		sc->scsi_1185AQ = 0;
99 
100 	sc->sc_adapter.adapt_dev = self;
101 	sc->sc_adapter.adapt_nchannels = 1;
102 	sc->sc_adapter.adapt_openings = 7;
103 	sc->sc_adapter.adapt_max_periph = 1;
104 	sc->sc_adapter.adapt_ioctl = NULL;
105 	sc->sc_adapter.adapt_minphys = minphys;
106 	sc->sc_adapter.adapt_request = sc_scsipi_request;
107 
108 	memset(&sc->sc_channel, 0, sizeof(sc->sc_channel));
109 	sc->sc_channel.chan_adapter = &sc->sc_adapter;
110 	sc->sc_channel.chan_bustype = &scsi_bustype;
111 	sc->sc_channel.chan_channel = 0;
112 	sc->sc_channel.chan_ntargets = 8;
113 	sc->sc_channel.chan_nluns = 8;
114 	sc->sc_channel.chan_id = 7;
115 
116 	TAILQ_INIT(&sc->ready_list);
117 	TAILQ_INIT(&sc->free_list);
118 
119 	scb = sc->sc_scb;
120 	for (i = 0; i < 24; i++) {	/* XXX 24 */
121 		TAILQ_INSERT_TAIL(&sc->free_list, scb, chain);
122 		scb++;
123 	}
124 
125 	cxd1185_init(sc);
126 	DELAY(100000);
127 
128 	hb_intr_establish(intlevel, INTEN1_DMA, IPL_BIO, sc_intr, sc);
129 
130 	config_found(self, &sc->sc_channel, scsiprint);
131 }
132 
133 void
134 cxd1185_init(struct sc_softc *sc)
135 {
136 	int i;
137 
138 	for (i = 0; i < 8; i++)
139 		sc->inuse[i] = 0;
140 
141 	scsi_hardreset();
142 }
143 
144 void
145 free_scb(struct sc_softc *sc, struct sc_scb *scb)
146 {
147 	int s;
148 
149 	s = splbio();
150 
151 	TAILQ_INSERT_HEAD(&sc->free_list, scb, chain);
152 
153 	/*
154 	 * If there were none, wake anybody waiting for one to come free,
155 	 * starting with queued entries.
156 	 */
157 	if (scb->chain.tqe_next == 0)
158 		wakeup(&sc->free_list);
159 
160 	splx(s);
161 }
162 
163 struct sc_scb *
164 get_scb(struct sc_softc *sc, int flags)
165 {
166 	int s;
167 	struct sc_scb *scb;
168 
169 	s = splbio();
170 
171 	while ((scb = sc->free_list.tqh_first) == NULL &&
172 		(flags & XS_CTL_NOSLEEP) == 0)
173 		tsleep(&sc->free_list, PRIBIO, "sc_scb", 0);
174 	if (scb) {
175 		TAILQ_REMOVE(&sc->free_list, scb, chain);
176 	}
177 
178 	splx(s);
179 	return scb;
180 }
181 
182 void
183 sc_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
184     void *arg)
185 {
186 	struct scsipi_xfer *xs;
187 	struct scsipi_periph *periph;
188 	struct sc_softc *sc = device_private(chan->chan_adapter->adapt_dev);
189 	struct sc_scb *scb;
190 	int flags, s;
191 	int target;
192 
193 	switch (req) {
194 	case ADAPTER_REQ_RUN_XFER:
195 		xs = arg;
196 		periph = xs->xs_periph;
197 
198 		flags = xs->xs_control;
199 		if ((scb = get_scb(sc, flags)) == NULL)
200 			panic("%s: no scb", __func__);
201 
202 		scb->xs = xs;
203 		scb->flags = 0;
204 		scb->sc_ctag = 0;
205 		scb->sc_coffset = 0;
206 		scb->istatus = 0;
207 		scb->tstatus = 0;
208 		scb->message = 0;
209 		memset(scb->msgbuf, 0, sizeof(scb->msgbuf));
210 
211 		s = splbio();
212 
213 		TAILQ_INSERT_TAIL(&sc->ready_list, scb, chain);
214 		sc_sched(sc);
215 		splx(s);
216 
217 		if (flags & XS_CTL_POLL) {
218 			target = periph->periph_target;
219 			if (sc_poll(sc, target, xs->timeout)) {
220 				printf("sc: timeout (retry)\n");
221 				if (sc_poll(sc, target, xs->timeout)) {
222 					printf("sc: timeout\n");
223 				}
224 			}
225 			/* called during autoconfig only... */
226 			mips_dcache_wbinv_all();	/* Flush DCache */
227 		}
228 		return;
229 	case ADAPTER_REQ_GROW_RESOURCES:
230 		/* XXX Not supported. */
231 		return;
232 	case ADAPTER_REQ_SET_XFER_MODE:
233 		/* XXX Not supported. */
234 		return;
235 	}
236 }
237 
238 /*
239  * Used when interrupt driven I/O isn't allowed, e.g. during boot.
240  */
241 int
242 sc_poll(struct sc_softc *sc, int chan, int count)
243 {
244 	volatile uint8_t *int_stat = (void *)INTST1;
245 	volatile uint8_t *int_clear = (void *)INTCLR1;
246 
247 	while (sc_busy(sc, chan)) {
248 		if (*int_stat & INTST1_DMA) {
249 		    *int_clear = INTST1_DMA;
250 		    if (dmac_gstat & CH_INT(CH_SCSI)) {
251 			if (dmac_gstat & CH_MRQ(CH_SCSI)) {
252 			    DELAY(50);
253 			    if (dmac_gstat & CH_MRQ(CH_SCSI))
254 				printf("dma_poll\n");
255 			}
256 			DELAY(10);
257 			scintr();
258 		    }
259 		}
260 		DELAY(1000);
261 		count--;
262 		if (count <= 0)
263 			return 1;
264 	}
265 	return 0;
266 }
267 
268 void
269 sc_sched(struct sc_softc *sc)
270 {
271 	struct scsipi_xfer *xs;
272 	struct scsipi_periph *periph;
273 	int ie = 0;
274 	int flags;
275 	int chan, lun;
276 	struct sc_scb *scb, *nextscb;
277 
278 	scb = sc->ready_list.tqh_first;
279 start:
280 	if (scb == NULL)
281 		return;
282 
283 	xs = scb->xs;
284 	periph = xs->xs_periph;
285 	chan = periph->periph_target;
286 	flags = xs->xs_control;
287 
288 	if (sc->inuse[chan]) {
289 		scb = scb->chain.tqe_next;
290 		goto start;
291 	}
292 	sc->inuse[chan] = 1;
293 
294 	if (flags & XS_CTL_RESET)
295 		printf("SCSI RESET\n");
296 
297 	lun = periph->periph_lun;
298 
299 	scb->identify = MSG_IDENT | sc_disconnect | (lun & IDT_DRMASK);
300 	scb->sc_ctrnscnt = xs->datalen;
301 
302 	/* make va->pa mapping table for DMA */
303 	if (xs->datalen > 0) {
304 		uint32_t pn, pages, offset;
305 		int i;
306 		vaddr_t va;
307 
308 #if 0
309 		memset(&sc->sc_map[chan], 0, sizeof(struct sc_map));
310 #endif
311 
312 		va = (vaddr_t)xs->data;
313 
314 		offset = va & PGOFSET;
315 		pages = (offset + xs->datalen + PAGE_SIZE -1 ) >> PGSHIFT;
316 		if (pages >= NSCMAP)
317 			panic("sc_map: Too many pages");
318 
319 		for (i = 0; i < pages; i++) {
320 			pn = kvtophys(va) >> PGSHIFT;
321 			sc->sc_map[chan].mp_addr[i] = pn;
322 			va += PAGE_SIZE;
323 		}
324 
325 		sc->sc_map[chan].mp_offset = offset;
326 		sc->sc_map[chan].mp_pages = pages;
327 		scb->sc_map = &sc->sc_map[chan];
328 	}
329 
330 	if ((flags & XS_CTL_POLL) == 0)
331 		ie = SCSI_INTEN;
332 
333 	if (xs->data)
334 		scb->sc_cpoint = (void *)xs->data;
335 	else
336 		scb->sc_cpoint = scb->msgbuf;
337 	scb->scb_softc = sc;
338 
339 	callout_reset(&scb->xs->xs_callout, hz * 10, cxd1185_timeout, scb);
340 	sc_send(scb, chan, ie);
341 	callout_stop(&scb->xs->xs_callout);
342 
343 	nextscb = scb->chain.tqe_next;
344 
345 	TAILQ_REMOVE(&sc->ready_list, scb, chain);
346 
347 	scb = nextscb;
348 
349 	goto start;
350 }
351 
352 void
353 sc_done(struct sc_scb *scb)
354 {
355 	struct scsipi_xfer *xs = scb->xs;
356 	struct scsipi_periph *periph = xs->xs_periph;
357 	struct sc_softc *sc;
358 
359 	sc = device_private(periph->periph_channel->chan_adapter->adapt_dev);
360 	xs->resid = 0;
361 	xs->status = 0;
362 
363 	if (scb->istatus != INST_EP) {
364 		if (scb->istatus == (INST_EP|INST_TO))
365 			xs->error = XS_SELTIMEOUT;
366 		else {
367 			printf("SC(i): [istatus=0x%x, tstatus=0x%x]\n",
368 				scb->istatus, scb->tstatus);
369 			xs->error = XS_DRIVER_STUFFUP;
370 		}
371 	}
372 
373 	switch (scb->tstatus) {
374 
375 	case TGST_GOOD:
376 		break;
377 
378 	case TGST_CC:
379 		xs->status = SCSI_CHECK;
380 		if (xs->error == 0)
381 			xs->error = XS_BUSY;
382 		break;
383 
384 	default:
385 		printf("SC(t): [istatus=0x%x, tstatus=0x%x]\n",
386 			scb->istatus, scb->tstatus);
387 		break;
388 	}
389 
390 	scsipi_done(xs);
391 	free_scb(sc, scb);
392 	sc->inuse[periph->periph_target] = 0;
393 	sc_sched(sc);
394 }
395 
396 int
397 sc_intr(void *v)
398 {
399 	/* struct sc_softc *sc = v; */
400 	volatile uint8_t *gsp = (uint8_t *)DMAC_GSTAT;
401 	u_int gstat = *gsp;
402 	int mrqb, i;
403 
404 	if ((gstat & CH_INT(CH_SCSI)) == 0)
405 		return 0;
406 
407 	/*
408 	 * when DMA interrupt occurs there remain some untransferred data.
409 	 * wait data transfer completion.
410 	 */
411 	mrqb = (gstat & CH_INT(CH_SCSI)) << 1;
412 	if (gstat & mrqb) {
413 		/*
414 		 * XXX SHOULD USE DELAY()
415 		 */
416 		for (i = 0; i < 50; i++)
417 			;
418 		if (*gsp & mrqb)
419 			printf("%s: MRQ\n", __func__);
420 	}
421 	scintr();
422 
423 	return 1;
424 }
425 
426 
427 #if 0
428 /*
429  * SCOP_RSENSE request
430  */
431 void
432 scop_rsense(int intr, struct scsi *sc_param, int lun, int ie, int count,
433     void *param)
434 {
435 
436 	memset(sc_param, 0, sizeof(struct scsi));
437 	sc_param->identify = MSG_IDENT | sc_disconnect | (lun & IDT_DRMASK);
438 	sc_param->sc_lun = lun;
439 
440 	sc_param->sc_cpoint = (uint8_t *)param;
441 	sc_param->sc_ctrnscnt = count;
442 
443 	/* sc_cdb */
444 	sc_param->sc_opcode = SCOP_RSENSE;
445 	sc_param->sc_count = count;
446 
447 	sc_go(intr, sc_param, ie, sc_param);
448 }
449 #endif
450 
451 void
452 cxd1185_timeout(void *arg)
453 {
454 	struct sc_scb *scb = arg;
455 	struct scsipi_xfer *xs = scb->xs;
456 	struct scsipi_periph *periph = xs->xs_periph;
457 	int chan;
458 
459 	chan = periph->periph_target;
460 
461 	printf("sc: timeout ch=%d\n", chan);
462 
463 	/* XXX abort transfer and ... */
464 }
465