xref: /netbsd/sys/arch/macppc/dev/wdc_obio.c (revision bf9ec67e)
1 /*	$NetBSD: wdc_obio.c,v 1.19 2002/01/09 05:23:07 dbj Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum and by Onno van der Linden.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
43 
44 #include <uvm/uvm_extern.h>
45 
46 #include <machine/bus.h>
47 #include <machine/autoconf.h>
48 
49 #include <dev/ata/atareg.h>
50 #include <dev/ata/atavar.h>
51 #include <dev/ic/wdcvar.h>
52 
53 #include <dev/ofw/openfirm.h>
54 
55 #include <macppc/dev/dbdma.h>
56 
57 #define WDC_REG_NPORTS		8
58 #define WDC_AUXREG_OFFSET	0x16
59 #define WDC_DEFAULT_PIO_IRQ	13	/* XXX */
60 #define WDC_DEFAULT_DMA_IRQ	2	/* XXX */
61 
62 #define WDC_OPTIONS_DMA 0x01
63 
64 /*
65  * XXX This code currently doesn't even try to allow 32-bit data port use.
66  */
67 
68 struct wdc_obio_softc {
69 	struct wdc_softc sc_wdcdev;
70 	struct channel_softc *wdc_chanptr;
71 	struct channel_softc wdc_channel;
72 	dbdma_regmap_t *sc_dmareg;
73 	dbdma_command_t	*sc_dmacmd;
74 	u_int sc_dmaconf[2];	/* per target value of CONFIG_REG */
75 	void *sc_ih;
76 };
77 
78 int wdc_obio_probe __P((struct device *, struct cfdata *, void *));
79 void wdc_obio_attach __P((struct device *, struct device *, void *));
80 int wdc_obio_detach __P((struct device *, int));
81 int wdc_obio_dma_init __P((void *, int, int, void *, size_t, int));
82 void wdc_obio_dma_start __P((void *, int, int));
83 int wdc_obio_dma_finish __P((void *, int, int, int));
84 
85 static void wdc_obio_select __P((struct channel_softc *, int));
86 static void adjust_timing __P((struct channel_softc *));
87 static void ata4_adjust_timing __P((struct channel_softc *));
88 
89 struct cfattach wdc_obio_ca = {
90 	sizeof(struct wdc_obio_softc), wdc_obio_probe, wdc_obio_attach,
91 	wdc_obio_detach, wdcactivate
92 };
93 
94 
95 int
96 wdc_obio_probe(parent, match, aux)
97 	struct device *parent;
98 	struct cfdata *match;
99 	void *aux;
100 {
101 	struct confargs *ca = aux;
102 	char compat[32];
103 
104 	/* XXX should not use name */
105 	if (strcmp(ca->ca_name, "ATA") == 0 ||
106 	    strcmp(ca->ca_name, "ata") == 0 ||
107 	    strcmp(ca->ca_name, "ata0") == 0 ||
108 	    strcmp(ca->ca_name, "ide") == 0)
109 		return 1;
110 
111 	memset(compat, 0, sizeof(compat));
112 	OF_getprop(ca->ca_node, "compatible", compat, sizeof(compat));
113 	if (strcmp(compat, "heathrow-ata") == 0 ||
114 	    strcmp(compat, "keylargo-ata") == 0)
115 		return 1;
116 
117 	return 0;
118 }
119 
120 void
121 wdc_obio_attach(parent, self, aux)
122 	struct device *parent, *self;
123 	void *aux;
124 {
125 	struct wdc_obio_softc *sc = (void *)self;
126 	struct confargs *ca = aux;
127 	struct channel_softc *chp = &sc->wdc_channel;
128 	int intr;
129 	int use_dma = 0;
130 	char path[80];
131 
132 	if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & WDC_OPTIONS_DMA) {
133 		if (ca->ca_nreg >= 16 || ca->ca_nintr == -1)
134 			use_dma = 1;	/* XXX Don't work yet. */
135 	}
136 
137 	if (ca->ca_nintr >= 4 && ca->ca_nreg >= 8) {
138 		intr = ca->ca_intr[0];
139 		printf(" irq %d", intr);
140 	} else if (ca->ca_nintr == -1) {
141 		intr = WDC_DEFAULT_PIO_IRQ;
142 		printf(" irq property not found; using %d", intr);
143 	} else {
144 		printf(": couldn't get irq property\n");
145 		return;
146 	}
147 
148 	if (use_dma)
149 		printf(": DMA transfer");
150 
151 	printf("\n");
152 
153 	chp->cmd_iot = chp->ctl_iot =
154 		macppc_make_bus_space_tag(ca->ca_baseaddr + ca->ca_reg[0], 4);
155 
156 	if (bus_space_map(chp->cmd_iot, 0, WDC_REG_NPORTS, 0, &chp->cmd_ioh) ||
157 	    bus_space_subregion(chp->cmd_iot, chp->cmd_ioh,
158 			WDC_AUXREG_OFFSET, 1, &chp->ctl_ioh)) {
159 		printf("%s: couldn't map registers\n",
160 			sc->sc_wdcdev.sc_dev.dv_xname);
161 		return;
162 	}
163 #if 0
164 	chp->data32iot = chp->cmd_iot;
165 	chp->data32ioh = chp->cmd_ioh;
166 #endif
167 
168 	sc->sc_ih = intr_establish(intr, IST_LEVEL, IPL_BIO, wdcintr, chp);
169 
170 	if (use_dma) {
171 		sc->sc_dmacmd = dbdma_alloc(sizeof(dbdma_command_t) * 20);
172 		sc->sc_dmareg = mapiodev(ca->ca_baseaddr + ca->ca_reg[2],
173 					 ca->ca_reg[3]);
174 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
175 		sc->sc_wdcdev.DMA_cap = 2;
176 		if (strcmp(ca->ca_name, "ata-4") == 0) {
177 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
178 			sc->sc_wdcdev.UDMA_cap = 4;
179 			sc->sc_wdcdev.set_modes = ata4_adjust_timing;
180 		} else {
181 			sc->sc_wdcdev.set_modes = adjust_timing;
182 		}
183 #ifdef notyet
184 		/* Minimum cycle time is 150ns (DMA MODE 1) on ohare. */
185 		if (ohare) {
186 			sc->sc_wdcdev.PIO_cap = 3;
187 			sc->sc_wdcdev.DMA_cap = 1;
188 		}
189 #endif
190 	} else {
191 		/* all non-dma controllers can use adjust_timing */
192 		sc->sc_wdcdev.set_modes = adjust_timing;
193 	}
194 
195 	sc->sc_wdcdev.PIO_cap = 4;
196 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
197 	sc->wdc_chanptr = chp;
198 	sc->sc_wdcdev.channels = &sc->wdc_chanptr;
199 	sc->sc_wdcdev.nchannels = 1;
200 	sc->sc_wdcdev.dma_arg = sc;
201 	sc->sc_wdcdev.dma_init = wdc_obio_dma_init;
202 	sc->sc_wdcdev.dma_start = wdc_obio_dma_start;
203 	sc->sc_wdcdev.dma_finish = wdc_obio_dma_finish;
204 	chp->channel = 0;
205 	chp->wdc = &sc->sc_wdcdev;
206 	chp->ch_queue = malloc(sizeof(struct channel_queue),
207 		M_DEVBUF, M_NOWAIT);
208 	if (chp->ch_queue == NULL) {
209 		printf("%s: can't allocate memory for command queue",
210 		sc->sc_wdcdev.sc_dev.dv_xname);
211 		return;
212 	}
213 
214 #define OHARE_FEATURE_REG	0xf3000038
215 
216 	/* XXX Enable wdc1 by feature reg. */
217 	memset(path, 0, sizeof(path));
218 	OF_package_to_path(ca->ca_node, path, sizeof(path));
219 	if (strcmp(path, "/bandit@F2000000/ohare@10/ata@21000") == 0) {
220 		u_int x;
221 
222 		x = in32rb(OHARE_FEATURE_REG);
223 		x |= 8;
224 		out32rb(OHARE_FEATURE_REG, x);
225 	}
226 
227 	wdcattach(chp);
228 	sc->sc_wdcdev.set_modes(chp);
229 
230 }
231 
232 /* Multiword DMA transfer timings */
233 struct ide_timings {
234 	int cycle;	/* minimum cycle time [ns] */
235 	int active;	/* minimum command active time [ns] */
236 };
237 static struct ide_timings pio_timing[5] = {
238 	{ 600, 180 },    /* Mode 0 */
239 	{ 390, 150 },    /*      1 */
240 	{ 240, 105 },    /*      2 */
241 	{ 180,  90 },    /*      3 */
242 	{ 120,  75 }     /*      4 */
243 };
244 static struct ide_timings dma_timing[3] = {
245 	{ 480, 240 },	/* Mode 0 */
246 	{ 165,  90 },	/* Mode 1 */
247 	{ 120,  75 }	/* Mode 2 */
248 };
249 
250 static struct ide_timings udma_timing[5] = {
251 	{120, 180},	/* Mode 0 */
252 	{ 90, 150},	/* Mode 1 */
253 	{ 60, 120},	/* Mode 2 */
254 	{ 45, 90},	/* Mode 3 */
255 	{ 30, 90}	/* Mode 4 */
256 };
257 
258 #define TIME_TO_TICK(time) howmany((time), 30)
259 #define PIO_REC_OFFSET 4
260 #define PIO_REC_MIN 1
261 #define PIO_ACT_MIN 1
262 #define DMA_REC_OFFSET 1
263 #define DMA_REC_MIN 1
264 #define DMA_ACT_MIN 1
265 
266 #define ATA4_TIME_TO_TICK(time)  howmany((time), 15) /* 15 ns clock */
267 
268 #define CONFIG_REG (0x200 >> 4)		/* IDE access timing register */
269 
270 void
271 wdc_obio_select(chp, drive)
272 	struct channel_softc *chp;
273 	int drive;
274 {
275 	struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->wdc;
276 	bus_space_write_4(chp->cmd_iot, chp->cmd_ioh,
277 			CONFIG_REG, sc->sc_dmaconf[drive]);
278 }
279 
280 void
281 adjust_timing(chp)
282 	struct channel_softc *chp;
283 {
284 	struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->wdc;
285 	int drive;
286 	int min_cycle, min_active;
287 	int cycle_tick, act_tick, inact_tick, half_tick;
288 
289 	for (drive = 0; drive < 2; drive++) {
290 		u_int conf = 0;
291 		struct ata_drive_datas *drvp;
292 
293 		drvp = &chp->ch_drive[drive];
294 		/* set up pio mode timings */
295 		if (drvp->drive_flags & DRIVE) {
296 			int piomode = drvp->PIO_mode;
297 			min_cycle = pio_timing[piomode].cycle;
298 			min_active = pio_timing[piomode].active;
299 
300 			cycle_tick = TIME_TO_TICK(min_cycle);
301 			act_tick = TIME_TO_TICK(min_active);
302 			if (act_tick < PIO_ACT_MIN)
303 				act_tick = PIO_ACT_MIN;
304 			inact_tick = cycle_tick - act_tick - PIO_REC_OFFSET;
305 			if (inact_tick < PIO_REC_MIN)
306 				inact_tick = PIO_REC_MIN;
307 			/* mask: 0x000007ff */
308 			conf |= (inact_tick << 5) | act_tick;
309 		}
310 		/* Set up dma mode timings */
311 		if (drvp->drive_flags & DRIVE_DMA) {
312 			int dmamode = drvp->DMA_mode;
313 			min_cycle = dma_timing[dmamode].cycle;
314 			min_active = dma_timing[dmamode].active;
315 			cycle_tick = TIME_TO_TICK(min_cycle);
316 			act_tick = TIME_TO_TICK(min_active);
317 			inact_tick = cycle_tick - act_tick - DMA_REC_OFFSET;
318 			if (inact_tick < DMA_REC_MIN)
319 				inact_tick = DMA_REC_MIN;
320 			half_tick = 0;	/* XXX */
321 			/* mask: 0xfffff800 */
322 			conf |=
323 					(half_tick << 21) |
324 					(inact_tick << 16) | (act_tick << 11);
325 		}
326 		if (conf) {
327 			printf("conf[%d] = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n",
328 					drive, conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick);
329 		}
330 		sc->sc_dmaconf[drive] = conf;
331 	}
332 	sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_SELECT;
333 	sc->sc_wdcdev.select = 0;
334 	if (sc->sc_dmaconf[0]) {
335 		wdc_obio_select(chp,0);
336 		if (sc->sc_dmaconf[1] && (sc->sc_dmaconf[0] != sc->sc_dmaconf[1])) {
337 			sc->sc_wdcdev.select = wdc_obio_select;
338 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_SELECT;
339 		}
340 	} else if (sc->sc_dmaconf[1]) {
341 		wdc_obio_select(chp,1);
342 	}
343 	wdc_print_modes(chp);
344 }
345 
346 void
347 ata4_adjust_timing(chp)
348 	struct channel_softc *chp;
349 {
350 	struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->wdc;
351 	int drive;
352 	int min_cycle, min_active;
353 	int cycle_tick, act_tick, inact_tick;
354 
355 	for (drive = 0; drive < 2; drive++) {
356 		u_int conf = 0;
357 		struct ata_drive_datas *drvp;
358 
359 		drvp = &chp->ch_drive[drive];
360 		/* set up pio mode timings */
361 
362 		if (drvp->drive_flags & DRIVE) {
363 			int piomode = drvp->PIO_mode;
364 			min_cycle = pio_timing[piomode].cycle;
365 			min_active = pio_timing[piomode].active;
366 
367 			cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
368 			act_tick = ATA4_TIME_TO_TICK(min_active);
369 			inact_tick = cycle_tick - act_tick;
370 			/* mask: 0x000003ff */
371 			conf |= (inact_tick << 5) | act_tick;
372 		}
373 		/* set up dma mode timings */
374 		if (drvp->drive_flags & DRIVE_DMA) {
375 			int dmamode = drvp->DMA_mode;
376 			min_cycle = dma_timing[dmamode].cycle;
377 			min_active = dma_timing[dmamode].active;
378 			cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
379 			act_tick = ATA4_TIME_TO_TICK(min_active);
380 			inact_tick = cycle_tick - act_tick;
381 			/* mask: 0x001ffc00 */
382 			conf |= (act_tick << 10) | (inact_tick << 15);
383 		}
384 		/* set up udma mode timings */
385 		if (drvp->drive_flags & DRIVE_UDMA) {
386 			int udmamode = drvp->UDMA_mode;
387 			min_cycle = udma_timing[udmamode].cycle;
388 			min_active = udma_timing[udmamode].active;
389 			act_tick = ATA4_TIME_TO_TICK(min_active);
390 			cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
391 			/* mask: 0x1ff00000 */
392 			conf |= (cycle_tick << 21) | (act_tick << 25) | 0x100000;
393 		}
394 		if (conf) {
395 			printf("ata4 conf[%d] = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n",
396 					drive, conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick);
397 		}
398 		sc->sc_dmaconf[drive] = conf;
399 	}
400 	sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_SELECT;
401 	sc->sc_wdcdev.select = 0;
402 	if (sc->sc_dmaconf[0]) {
403 		wdc_obio_select(chp,0);
404 		if (sc->sc_dmaconf[1] && (sc->sc_dmaconf[0] != sc->sc_dmaconf[1])) {
405 			sc->sc_wdcdev.select = wdc_obio_select;
406 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_SELECT;
407 		}
408 	} else if (sc->sc_dmaconf[1]) {
409 		wdc_obio_select(chp,1);
410 	}
411 	wdc_print_modes(chp);
412 }
413 
414 int
415 wdc_obio_detach(self, flags)
416 	struct device *self;
417 	int flags;
418 {
419 	struct wdc_obio_softc *sc = (void *)self;
420 	int error;
421 
422 	if ((error = wdcdetach(self, flags)) != 0)
423 		return error;
424 
425 	intr_disestablish(sc->sc_ih);
426 
427 	free(sc->wdc_channel.ch_queue, M_DEVBUF);
428 
429 	/* Unmap our i/o space. */
430 	bus_space_unmap(chp->cmd_iot, chp->cmd_ioh, WDC_REG_NPORTS);
431 
432 	/* Unmap DMA registers. */
433 	/* XXX unmapiodev(sc->sc_dmareg); */
434 	/* XXX free(sc->sc_dmacmd); */
435 
436 	return 0;
437 }
438 
439 int
440 wdc_obio_dma_init(v, channel, drive, databuf, datalen, read)
441 	void *v;
442 	void *databuf;
443 	size_t datalen;
444 	int read;
445 {
446 	struct wdc_obio_softc *sc = v;
447 	vaddr_t va = (vaddr_t)databuf;
448 	dbdma_command_t *cmdp;
449 	u_int cmd, offset;
450 
451 	cmdp = sc->sc_dmacmd;
452 	cmd = read ? DBDMA_CMD_IN_MORE : DBDMA_CMD_OUT_MORE;
453 
454 	offset = va & PGOFSET;
455 
456 	/* if va is not page-aligned, setup the first page */
457 	if (offset != 0) {
458 		int rest = NBPG - offset;	/* the rest of the page */
459 
460 		if (datalen > rest) {		/* if continues to next page */
461 			DBDMA_BUILD(cmdp, cmd, 0, rest, vtophys(va),
462 				DBDMA_INT_NEVER, DBDMA_WAIT_NEVER,
463 				DBDMA_BRANCH_NEVER);
464 			datalen -= rest;
465 			va += rest;
466 			cmdp++;
467 		}
468 	}
469 
470 	/* now va is page-aligned */
471 	while (datalen > NBPG) {
472 		DBDMA_BUILD(cmdp, cmd, 0, NBPG, vtophys(va),
473 			DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
474 		datalen -= NBPG;
475 		va += NBPG;
476 		cmdp++;
477 	}
478 
479 	/* the last page (datalen <= NBPG here) */
480 	cmd = read ? DBDMA_CMD_IN_LAST : DBDMA_CMD_OUT_LAST;
481 	DBDMA_BUILD(cmdp, cmd, 0, datalen, vtophys(va),
482 		DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
483 	cmdp++;
484 
485 	DBDMA_BUILD(cmdp, DBDMA_CMD_STOP, 0, 0, 0,
486 		DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
487 
488 	return 0;
489 }
490 
491 void
492 wdc_obio_dma_start(v, channel, drive)
493 	void *v;
494 	int channel, drive;
495 {
496 	struct wdc_obio_softc *sc = v;
497 
498 	dbdma_start(sc->sc_dmareg, sc->sc_dmacmd);
499 }
500 
501 int
502 wdc_obio_dma_finish(v, channel, drive, read)
503 	void *v;
504 	int channel, drive;
505 	int read;
506 {
507 	struct wdc_obio_softc *sc = v;
508 
509 	dbdma_stop(sc->sc_dmareg);
510 	return 0;
511 }
512