1 /*-
2  * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /* local prototypes */
28 static int ata_promise_chipinit(device_t dev);
29 static int ata_promise_allocate(device_t dev);
30 static int ata_promise_status(device_t dev);
31 static int ata_promise_dmastart(device_t dev);
32 static int ata_promise_dmastop(device_t dev);
33 static void ata_promise_dmareset(device_t dev);
34 static void ata_promise_dmainit(device_t dev);
35 static void ata_promise_setmode(device_t dev, int mode);
36 static int ata_promise_tx2_allocate(device_t dev);
37 static int ata_promise_tx2_status(device_t dev);
38 static int ata_promise_mio_allocate(device_t dev);
39 static void ata_promise_mio_intr(void *data);
40 static int ata_promise_mio_status(device_t dev);
41 static int ata_promise_mio_command(struct ata_request *request);
42 static void ata_promise_mio_reset(device_t dev);
43 static u_int32_t ata_promise_mio_softreset(device_t dev, int port);
44 static void ata_promise_mio_dmainit(device_t dev);
45 static void ata_promise_mio_setprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
46 static void ata_promise_mio_setmode(device_t dev, int mode);
47 static void ata_promise_sx4_intr(void *data);
48 static int ata_promise_sx4_command(struct ata_request *request);
49 static int ata_promise_apkt(u_int8_t *bytep, struct ata_request *request);
50 static void ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hpkt);
51 static void ata_promise_next_hpkt(struct ata_pci_controller *ctlr);
52 
53 /* misc defines */
54 #define PR_OLD		0
55 #define PR_NEW		1
56 #define PR_TX		2
57 #define PR_MIO		3
58 #define PR_TX4		0x01
59 #define PR_SX4X		0x02
60 #define PR_SX6K		0x04
61 #define PR_PATA		0x08
62 #define PR_CMBO		0x10
63 #define PR_CMBO2	0x20
64 #define PR_SATA		0x40
65 #define PR_SATA2	0x80
66 
67 #define ATA_PDC_APKT_OFFSET     0x00000010
68 #define ATA_PDC_HPKT_OFFSET     0x00000040
69 #define ATA_PDC_ASG_OFFSET      0x00000080
70 #define ATA_PDC_LSG_OFFSET      0x000000c0
71 #define ATA_PDC_HSG_OFFSET      0x00000100
72 #define ATA_PDC_CHN_OFFSET      0x00000400
73 #define ATA_PDC_BUF_BASE        0x00400000
74 #define ATA_PDC_BUF_OFFSET      0x00100000
75 #define ATA_PDC_MAX_HPKT        8
76 #define ATA_PDC_WRITE_REG       0x00
77 #define ATA_PDC_WRITE_CTL       0x0e
78 #define ATA_PDC_WRITE_END       0x08
79 #define ATA_PDC_WAIT_NBUSY      0x10
80 #define ATA_PDC_WAIT_READY      0x18
81 #define ATA_PDC_1B              0x20
82 #define ATA_PDC_2B              0x40
83 
84 struct host_packet {
85     u_int32_t                   addr;
86     TAILQ_ENTRY(host_packet)    chain;
87 };
88 
89 struct ata_promise_sx4 {
90     struct lock                 mtx;
91     TAILQ_HEAD(, host_packet)   queue;
92     int                         busy;
93 };
94 
95 /*
96  * Promise chipset support functions
97  */
98 int
99 ata_promise_ident(device_t dev)
100 {
101     struct ata_pci_controller *ctlr = device_get_softc(dev);
102     const struct ata_chip_id *idx;
103     static const struct ata_chip_id ids[] =
104     {{ ATA_PDC20246,  0, PR_OLD, 0x00,     ATA_UDMA2, "PDC20246" },
105      { ATA_PDC20262,  0, PR_NEW, 0x00,     ATA_UDMA4, "PDC20262" },
106      { ATA_PDC20263,  0, PR_NEW, 0x00,     ATA_UDMA4, "PDC20263" },
107      { ATA_PDC20265,  0, PR_NEW, 0x00,     ATA_UDMA5, "PDC20265" },
108      { ATA_PDC20267,  0, PR_NEW, 0x00,     ATA_UDMA5, "PDC20267" },
109      { ATA_PDC20268,  0, PR_TX,  PR_TX4,   ATA_UDMA5, "PDC20268" },
110      { ATA_PDC20269,  0, PR_TX,  0x00,     ATA_UDMA6, "PDC20269" },
111      { ATA_PDC20270,  0, PR_TX,  PR_TX4,   ATA_UDMA5, "PDC20270" },
112      { ATA_PDC20271,  0, PR_TX,  0x00,     ATA_UDMA6, "PDC20271" },
113      { ATA_PDC20275,  0, PR_TX,  0x00,     ATA_UDMA6, "PDC20275" },
114      { ATA_PDC20276,  0, PR_TX,  PR_SX6K,  ATA_UDMA6, "PDC20276" },
115      { ATA_PDC20277,  0, PR_TX,  0x00,     ATA_UDMA6, "PDC20277" },
116      { ATA_PDC20318,  0, PR_MIO, PR_SATA,  ATA_SA150, "PDC20318" },
117      { ATA_PDC20319,  0, PR_MIO, PR_SATA,  ATA_SA150, "PDC20319" },
118      { ATA_PDC20371,  0, PR_MIO, PR_CMBO,  ATA_SA150, "PDC20371" },
119      { ATA_PDC20375,  0, PR_MIO, PR_CMBO,  ATA_SA150, "PDC20375" },
120      { ATA_PDC20376,  0, PR_MIO, PR_CMBO,  ATA_SA150, "PDC20376" },
121      { ATA_PDC20377,  0, PR_MIO, PR_CMBO,  ATA_SA150, "PDC20377" },
122      { ATA_PDC20378,  0, PR_MIO, PR_CMBO,  ATA_SA150, "PDC20378" },
123      { ATA_PDC20379,  0, PR_MIO, PR_CMBO,  ATA_SA150, "PDC20379" },
124      { ATA_PDC20571,  0, PR_MIO, PR_CMBO2, ATA_SA150, "PDC20571" },
125      { ATA_PDC20575,  0, PR_MIO, PR_CMBO2, ATA_SA150, "PDC20575" },
126      { ATA_PDC20579,  0, PR_MIO, PR_CMBO2, ATA_SA150, "PDC20579" },
127      { ATA_PDC20771,  0, PR_MIO, PR_CMBO2, ATA_SA300, "PDC20771" },
128      { ATA_PDC40775,  0, PR_MIO, PR_CMBO2, ATA_SA300, "PDC40775" },
129      { ATA_PDC20617,  0, PR_MIO, PR_PATA,  ATA_UDMA6, "PDC20617" },
130      { ATA_PDC20618,  0, PR_MIO, PR_PATA,  ATA_UDMA6, "PDC20618" },
131      { ATA_PDC20619,  0, PR_MIO, PR_PATA,  ATA_UDMA6, "PDC20619" },
132      { ATA_PDC20620,  0, PR_MIO, PR_PATA,  ATA_UDMA6, "PDC20620" },
133      { ATA_PDC20621,  0, PR_MIO, PR_SX4X,  ATA_UDMA5, "PDC20621" },
134      { ATA_PDC20622,  0, PR_MIO, PR_SX4X,  ATA_SA150, "PDC20622" },
135      { ATA_PDC40518,  0, PR_MIO, PR_SATA2, ATA_SA150, "PDC40518" },
136      { ATA_PDC40519,  0, PR_MIO, PR_SATA2, ATA_SA150, "PDC40519" },
137      { ATA_PDC40718,  0, PR_MIO, PR_SATA2, ATA_SA300, "PDC40718" },
138      { ATA_PDC40719,  0, PR_MIO, PR_SATA2, ATA_SA300, "PDC40719" },
139      { ATA_PDC40779,  0, PR_MIO, PR_SATA2, ATA_SA300, "PDC40779" },
140      { 0, 0, 0, 0, 0, 0}};
141     char buffer[64];
142     uintptr_t devid = 0;
143 
144     if (pci_get_vendor(dev) != ATA_PROMISE_ID)
145 	return ENXIO;
146 
147     if (!(idx = ata_match_chip(dev, ids)))
148 	return ENXIO;
149 
150     /* if we are on a SuperTrak SX6000 dont attach */
151     if ((idx->cfg2 & PR_SX6K) && pci_get_class(GRANDPARENT(dev))==PCIC_BRIDGE &&
152 	!BUS_READ_IVAR(device_get_parent(GRANDPARENT(dev)),
153 		       GRANDPARENT(dev), PCI_IVAR_DEVID, &devid) &&
154 	devid == ATA_I960RM)
155 	return ENXIO;
156 
157     strcpy(buffer, "Promise ");
158     strcat(buffer, idx->text);
159 
160     /* if we are on a FastTrak TX4, adjust the interrupt resource */
161     if ((idx->cfg2 & PR_TX4) && pci_get_class(GRANDPARENT(dev))==PCIC_BRIDGE &&
162 	!BUS_READ_IVAR(device_get_parent(GRANDPARENT(dev)),
163 		       GRANDPARENT(dev), PCI_IVAR_DEVID, &devid) &&
164 	((devid == ATA_DEC_21150) || (devid == ATA_DEC_21150_1))) {
165 	static long start = 0, end = 0;
166 
167 	if (pci_get_slot(dev) == 1) {
168 	    bus_get_resource(dev, SYS_RES_IRQ, 0, &start, &end);
169 	    strcat(buffer, " (channel 0+1)");
170 	}
171 	else if (pci_get_slot(dev) == 2 && start && end) {
172 	    bus_set_resource(dev, SYS_RES_IRQ, 0, start, end,
173 	        machintr_legacy_intr_cpuid(start));
174 	    strcat(buffer, " (channel 2+3)");
175 	}
176 	else {
177 	    start = end = 0;
178 	}
179     }
180     ksprintf(buffer, "%s %s controller", buffer, ata_mode2str(idx->max_dma));
181     device_set_desc_copy(dev, buffer);
182     ctlr->chip = idx;
183     ctlr->chipinit = ata_promise_chipinit;
184     return 0;
185 }
186 
187 static int
188 ata_promise_chipinit(device_t dev)
189 {
190     struct ata_pci_controller *ctlr = device_get_softc(dev);
191     int fake_reg, stat_reg;
192 
193     if (ata_setup_interrupt(dev, ata_generic_intr))
194 	return ENXIO;
195 
196     switch  (ctlr->chip->cfg1) {
197     case PR_NEW:
198 	/* setup clocks */
199 	ATA_OUTB(ctlr->r_res1, 0x11, ATA_INB(ctlr->r_res1, 0x11) | 0x0a);
200 
201 	ctlr->dmainit = ata_promise_dmainit;
202 	/* FALLTHROUGH */
203 
204     case PR_OLD:
205 	/* enable burst mode */
206 	ATA_OUTB(ctlr->r_res1, 0x1f, ATA_INB(ctlr->r_res1, 0x1f) | 0x01);
207 	ctlr->allocate = ata_promise_allocate;
208 	ctlr->setmode = ata_promise_setmode;
209 	return 0;
210 
211     case PR_TX:
212 	ctlr->allocate = ata_promise_tx2_allocate;
213 	ctlr->setmode = ata_promise_setmode;
214 	return 0;
215 
216     case PR_MIO:
217 	ctlr->r_type1 = SYS_RES_MEMORY;
218 	ctlr->r_rid1 = PCIR_BAR(4);
219 	if (!(ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1,
220 						    &ctlr->r_rid1, RF_ACTIVE)))
221 	    goto failnfree;
222 
223 	ctlr->r_type2 = SYS_RES_MEMORY;
224 	ctlr->r_rid2 = PCIR_BAR(3);
225 	if (!(ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2,
226 						    &ctlr->r_rid2, RF_ACTIVE)))
227 	    goto failnfree;
228 
229 	if (ctlr->chip->cfg2 == PR_SX4X) {
230 	    struct ata_promise_sx4 *hpkt;
231 	    u_int32_t dimm = ATA_INL(ctlr->r_res2, 0x000c0080);
232 
233 	    if (bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle) ||
234 		bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS,
235 			       ata_promise_sx4_intr, ctlr, &ctlr->handle, NULL)) {
236 		device_printf(dev, "unable to setup interrupt\n");
237 		goto failnfree;
238 	    }
239 
240 	    /* print info about cache memory */
241 	    device_printf(dev, "DIMM size %dMB @ 0x%08x%s\n",
242 			  (((dimm >> 16) & 0xff)-((dimm >> 24) & 0xff)+1) << 4,
243 			  ((dimm >> 24) & 0xff),
244 			  ATA_INL(ctlr->r_res2, 0x000c0088) & (1<<16) ?
245 			  " ECC enabled" : "" );
246 
247 	    /* adjust cache memory parameters */
248 	    ATA_OUTL(ctlr->r_res2, 0x000c000c,
249 		     (ATA_INL(ctlr->r_res2, 0x000c000c) & 0xffff0000));
250 
251 	    /* setup host packet controls */
252 	    hpkt = kmalloc(sizeof(struct ata_promise_sx4),
253 			  M_TEMP, M_INTWAIT | M_ZERO);
254 	    lockinit(&hpkt->mtx, "chipinit", 0, 0);
255 	    TAILQ_INIT(&hpkt->queue);
256 	    hpkt->busy = 0;
257 	    device_set_ivars(dev, hpkt);
258 	    ctlr->allocate = ata_promise_mio_allocate;
259 	    ctlr->reset = ata_promise_mio_reset;
260 	    ctlr->dmainit = ata_promise_mio_dmainit;
261 	    ctlr->setmode = ata_promise_setmode;
262 	    ctlr->channels = 4;
263 	    return 0;
264 	}
265 
266 	/* mio type controllers need an interrupt intercept */
267 	if (bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle) ||
268 	    bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS,
269 			       ata_promise_mio_intr, ctlr, &ctlr->handle, NULL)) {
270 		device_printf(dev, "unable to setup interrupt\n");
271 		goto failnfree;
272 	}
273 
274 	switch (ctlr->chip->cfg2) {
275 	case PR_PATA:
276 	    ctlr->channels = ((ATA_INL(ctlr->r_res2, 0x48) & 0x01) > 0) +
277 			     ((ATA_INL(ctlr->r_res2, 0x48) & 0x02) > 0) + 2;
278 	    goto sata150;
279 	case PR_CMBO:
280 	    ctlr->channels = 3;
281 	    goto sata150;
282 	case PR_SATA:
283 	    ctlr->channels = 4;
284 sata150:
285 	    fake_reg = 0x60;
286 	    stat_reg = 0x6c;
287 	    break;
288 
289 	case PR_CMBO2:
290 	    ctlr->channels = 3;
291 	    goto sataii;
292 	case PR_SATA2:
293 	default:
294 	    ctlr->channels = 4;
295 sataii:
296 	    fake_reg = 0x54;
297 	    stat_reg = 0x60;
298 	    break;
299 	}
300 
301 	/* prime fake interrupt register */
302 	ATA_OUTL(ctlr->r_res2, fake_reg, 0xffffffff);
303 
304 	/* clear SATA status and unmask interrupts */
305 	ATA_OUTL(ctlr->r_res2, stat_reg, 0x000000ff);
306 
307 	/* enable "long burst length" on gen2 chips */
308 	if ((ctlr->chip->cfg2 == PR_SATA2) || (ctlr->chip->cfg2 == PR_CMBO2))
309 	    ATA_OUTL(ctlr->r_res2, 0x44, ATA_INL(ctlr->r_res2, 0x44) | 0x2000);
310 
311 	ctlr->allocate = ata_promise_mio_allocate;
312 	ctlr->reset = ata_promise_mio_reset;
313 	ctlr->dmainit = ata_promise_mio_dmainit;
314 	ctlr->setmode = ata_promise_mio_setmode;
315 
316 	return 0;
317     }
318 
319 failnfree:
320     if (ctlr->r_res2)
321 	bus_release_resource(dev, ctlr->r_type2, ctlr->r_rid2, ctlr->r_res2);
322     if (ctlr->r_res1)
323 	bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1, ctlr->r_res1);
324     return ENXIO;
325 }
326 
327 static int
328 ata_promise_allocate(device_t dev)
329 {
330     struct ata_channel *ch = device_get_softc(dev);
331 
332     if (ata_pci_allocate(dev))
333 	return ENXIO;
334 
335     ch->hw.status = ata_promise_status;
336     return 0;
337 }
338 
339 static int
340 ata_promise_status(device_t dev)
341 {
342     struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
343     struct ata_channel *ch = device_get_softc(dev);
344 
345     if (ATA_INL(ctlr->r_res1, 0x1c) & (ch->unit ? 0x00004000 : 0x00000400)) {
346 	return ata_pci_status(dev);
347     }
348     return 0;
349 }
350 
351 static int
352 ata_promise_dmastart(device_t dev)
353 {
354     struct ata_pci_controller *ctlr = device_get_softc(GRANDPARENT(dev));
355     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
356     struct ata_device *atadev  = device_get_softc(dev);
357 
358     if (atadev->flags & ATA_D_48BIT_ACTIVE) {
359 	ATA_OUTB(ctlr->r_res1, 0x11,
360 		 ATA_INB(ctlr->r_res1, 0x11) | (ch->unit ? 0x08 : 0x02));
361 	ATA_OUTL(ctlr->r_res1, ch->unit ? 0x24 : 0x20,
362 		 ((ch->dma->flags & ATA_DMA_READ) ? 0x05000000 : 0x06000000) |
363 		 (ch->dma->cur_iosize >> 1));
364     }
365     ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, (ATA_IDX_INB(ch, ATA_BMSTAT_PORT) |
366 		 (ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR)));
367     ATA_IDX_OUTL(ch, ATA_BMDTP_PORT, ch->dma->sg_bus);
368     ATA_IDX_OUTB(ch, ATA_BMCMD_PORT,
369 		 ((ch->dma->flags & ATA_DMA_READ) ? ATA_BMCMD_WRITE_READ : 0) |
370 		 ATA_BMCMD_START_STOP);
371     ch->flags |= ATA_DMA_ACTIVE;
372     return 0;
373 }
374 
375 static int
376 ata_promise_dmastop(device_t dev)
377 {
378     struct ata_pci_controller *ctlr = device_get_softc(GRANDPARENT(dev));
379     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
380     struct ata_device *atadev  = device_get_softc(dev);
381     int error;
382 
383     if (atadev->flags & ATA_D_48BIT_ACTIVE) {
384 	ATA_OUTB(ctlr->r_res1, 0x11,
385 		 ATA_INB(ctlr->r_res1, 0x11) & ~(ch->unit ? 0x08 : 0x02));
386 	ATA_OUTL(ctlr->r_res1, ch->unit ? 0x24 : 0x20, 0);
387     }
388     error = ATA_IDX_INB(ch, ATA_BMSTAT_PORT);
389     ATA_IDX_OUTB(ch, ATA_BMCMD_PORT,
390 		 ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP);
391     ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR);
392     ch->flags &= ~ATA_DMA_ACTIVE;
393     return error;
394 }
395 
396 static void
397 ata_promise_dmareset(device_t dev)
398 {
399     struct ata_channel *ch = device_get_softc(dev);
400 
401     ATA_IDX_OUTB(ch, ATA_BMCMD_PORT,
402 		 ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP);
403     ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR);
404     ch->flags &= ~ATA_DMA_ACTIVE;
405 }
406 
407 static void
408 ata_promise_dmainit(device_t dev)
409 {
410     struct ata_channel *ch = device_get_softc(dev);
411 
412     ata_dmainit(dev);
413     if (ch->dma) {
414 	ch->dma->start = ata_promise_dmastart;
415 	ch->dma->stop = ata_promise_dmastop;
416 	ch->dma->reset = ata_promise_dmareset;
417     }
418 }
419 
420 static void
421 ata_promise_setmode(device_t dev, int mode)
422 {
423     device_t gparent = GRANDPARENT(dev);
424     struct ata_pci_controller *ctlr = device_get_softc(gparent);
425     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
426     struct ata_device *atadev = device_get_softc(dev);
427     int devno = (ch->unit << 1) + atadev->unit;
428     int error;
429     static const uint32_t timings[][2] = {
430     /*    PR_OLD      PR_NEW               mode */
431 	{ 0x004ff329, 0x004fff2f },     /* PIO 0 */
432 	{ 0x004fec25, 0x004ff82a },     /* PIO 1 */
433 	{ 0x004fe823, 0x004ff026 },     /* PIO 2 */
434 	{ 0x004fe622, 0x004fec24 },     /* PIO 3 */
435 	{ 0x004fe421, 0x004fe822 },     /* PIO 4 */
436 	{ 0x004567f3, 0x004acef6 },     /* MWDMA 0 */
437 	{ 0x004467f3, 0x0048cef6 },     /* MWDMA 1 */
438 	{ 0x004367f3, 0x0046cef6 },     /* MWDMA 2 */
439 	{ 0x004367f3, 0x0046cef6 },     /* UDMA 0 */
440 	{ 0x004247f3, 0x00448ef6 },     /* UDMA 1 */
441 	{ 0x004127f3, 0x00436ef6 },     /* UDMA 2 */
442 	{ 0,          0x00424ef6 },     /* UDMA 3 */
443 	{ 0,          0x004127f3 },     /* UDMA 4 */
444 	{ 0,          0x004127f3 }      /* UDMA 5 */
445     };
446 
447     mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma);
448 
449     switch (ctlr->chip->cfg1) {
450     case PR_OLD:
451     case PR_NEW:
452 	if (mode > ATA_UDMA2 && (pci_read_config(gparent, 0x50, 2) &
453 				 (ch->unit ? 1 << 11 : 1 << 10))) {
454 	    ata_print_cable(dev, "controller");
455 	    mode = ATA_UDMA2;
456 	}
457 	if (ata_atapi(dev) && mode > ATA_PIO_MAX)
458 	    mode = ata_limit_mode(dev, mode, ATA_PIO_MAX);
459 	break;
460 
461     case PR_TX:
462 	ATA_IDX_OUTB(ch, ATA_BMDEVSPEC_0, 0x0b);
463 	if (mode > ATA_UDMA2 &&
464 	    ATA_IDX_INB(ch, ATA_BMDEVSPEC_1) & 0x04) {
465 	    ata_print_cable(dev, "controller");
466 	    mode = ATA_UDMA2;
467 	}
468 	break;
469 
470     case PR_MIO:
471 	if (mode > ATA_UDMA2 &&
472 	    (ATA_INL(ctlr->r_res2,
473 		     (ctlr->chip->cfg2 & PR_SX4X ? 0x000c0260 : 0x0260) +
474 		     (ch->unit << 7)) & 0x01000000)) {
475 	    ata_print_cable(dev, "controller");
476 	    mode = ATA_UDMA2;
477 	}
478 	break;
479     }
480 
481     error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode);
482 
483     if (bootverbose)
484 	device_printf(dev, "%ssetting %s on %s chip\n",
485 		     (error) ? "FAILURE " : "",
486 		     ata_mode2str(mode), ctlr->chip->text);
487     if (!error) {
488 	if (ctlr->chip->cfg1 < PR_TX)
489 	    pci_write_config(gparent, 0x60 + (devno << 2),
490 			     timings[ata_mode2idx(mode)][ctlr->chip->cfg1], 4);
491 	atadev->mode = mode;
492     }
493     return;
494 }
495 
496 static int
497 ata_promise_tx2_allocate(device_t dev)
498 {
499     struct ata_channel *ch = device_get_softc(dev);
500 
501     if (ata_pci_allocate(dev))
502 	return ENXIO;
503 
504     ch->hw.status = ata_promise_tx2_status;
505     return 0;
506 }
507 
508 static int
509 ata_promise_tx2_status(device_t dev)
510 {
511     struct ata_channel *ch = device_get_softc(dev);
512 
513     ATA_IDX_OUTB(ch, ATA_BMDEVSPEC_0, 0x0b);
514     if (ATA_IDX_INB(ch, ATA_BMDEVSPEC_1) & 0x20) {
515 	return ata_pci_status(dev);
516     }
517     return 0;
518 }
519 
520 static int
521 ata_promise_mio_allocate(device_t dev)
522 {
523     struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
524     struct ata_channel *ch = device_get_softc(dev);
525     int offset = (ctlr->chip->cfg2 & PR_SX4X) ? 0x000c0000 : 0;
526     int i;
527 
528     for (i = ATA_DATA; i <= ATA_COMMAND; i++) {
529 	ch->r_io[i].res = ctlr->r_res2;
530 	ch->r_io[i].offset = offset + 0x0200 + (i << 2) + (ch->unit << 7);
531     }
532     ch->r_io[ATA_CONTROL].res = ctlr->r_res2;
533     ch->r_io[ATA_CONTROL].offset = offset + 0x0238 + (ch->unit << 7);
534     ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res2;
535     ata_default_registers(dev);
536     if ((ctlr->chip->cfg2 & (PR_SATA | PR_SATA2)) ||
537 	((ctlr->chip->cfg2 & (PR_CMBO | PR_CMBO2)) && ch->unit < 2)) {
538 	ch->r_io[ATA_SSTATUS].res = ctlr->r_res2;
539 	ch->r_io[ATA_SSTATUS].offset = 0x400 + (ch->unit << 8);
540 	ch->r_io[ATA_SERROR].res = ctlr->r_res2;
541 	ch->r_io[ATA_SERROR].offset = 0x404 + (ch->unit << 8);
542 	ch->r_io[ATA_SCONTROL].res = ctlr->r_res2;
543 	ch->r_io[ATA_SCONTROL].offset = 0x408 + (ch->unit << 8);
544 	ch->flags |= ATA_NO_SLAVE;
545     }
546     ch->flags |= ATA_USE_16BIT;
547 
548     ata_generic_hw(dev);
549     if (ctlr->chip->cfg2 & PR_SX4X) {
550 	ch->hw.command = ata_promise_sx4_command;
551     }
552     else {
553 	ch->hw.command = ata_promise_mio_command;
554 	ch->hw.status = ata_promise_mio_status;
555 	ch->hw.softreset = ata_promise_mio_softreset;
556      }
557     return 0;
558 }
559 
560 static void
561 ata_promise_mio_intr(void *data)
562 {
563     struct ata_pci_controller *ctlr = data;
564     struct ata_channel *ch;
565     u_int32_t vector;
566     int unit, fake_reg;
567 
568     switch (ctlr->chip->cfg2) {
569     case PR_PATA:
570     case PR_CMBO:
571     case PR_SATA:
572 	fake_reg = 0x60;
573 	break;
574     case PR_CMBO2:
575     case PR_SATA2:
576     default:
577 	fake_reg = 0x54;
578 	break;
579     }
580 
581     /*
582      * since reading interrupt status register on early "mio" chips
583      * clears the status bits we cannot read it for each channel later on
584      * in the generic interrupt routine.
585      * store the bits in an unused register in the chip so we can read
586      * it from there safely to get around this "feature".
587      */
588     vector = ATA_INL(ctlr->r_res2, 0x040);
589     ATA_OUTL(ctlr->r_res2, 0x040, vector);
590     ATA_OUTL(ctlr->r_res2, fake_reg, vector);
591 
592     for (unit = 0; unit < ctlr->channels; unit++) {
593 	if ((ch = ctlr->interrupt[unit].argument))
594 	    ctlr->interrupt[unit].function(ch);
595     }
596 
597     ATA_OUTL(ctlr->r_res2, fake_reg, 0xffffffff);
598 }
599 
600 static int
601 ata_promise_mio_status(device_t dev)
602 {
603     struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
604     struct ata_channel *ch = device_get_softc(dev);
605     struct ata_connect_task *tp;
606     u_int32_t fake_reg, stat_reg, vector, status;
607 
608     switch (ctlr->chip->cfg2) {
609     case PR_PATA:
610     case PR_CMBO:
611     case PR_SATA:
612 	fake_reg = 0x60;
613 	stat_reg = 0x6c;
614 	break;
615     case PR_CMBO2:
616     case PR_SATA2:
617     default:
618 	fake_reg = 0x54;
619 	stat_reg = 0x60;
620 	break;
621     }
622 
623     /* read and acknowledge interrupt */
624     vector = ATA_INL(ctlr->r_res2, fake_reg);
625 
626     /* read and clear interface status */
627     status = ATA_INL(ctlr->r_res2, stat_reg);
628     ATA_OUTL(ctlr->r_res2, stat_reg, status & (0x00000011 << ch->unit));
629 
630     /* check for and handle disconnect events */
631     if ((status & (0x00000001 << ch->unit)) &&
632 	(tp = (struct ata_connect_task *)
633 	      kmalloc(sizeof(struct ata_connect_task),
634 		     M_ATA, M_INTWAIT | M_ZERO))) {
635 
636 	if (bootverbose)
637 	    device_printf(ch->dev, "DISCONNECT requested\n");
638 	tp->action = ATA_C_DETACH;
639 	tp->dev = ch->dev;
640 	TASK_INIT(&tp->task, 0, ata_sata_phy_event, tp);
641 	taskqueue_enqueue(taskqueue_thread[mycpuid], &tp->task);
642     }
643 
644     /* check for and handle connect events */
645     if ((status & (0x00000010 << ch->unit)) &&
646 	(tp = (struct ata_connect_task *)
647 	      kmalloc(sizeof(struct ata_connect_task),
648 		     M_ATA, M_INTWAIT | M_ZERO))) {
649 
650 	if (bootverbose)
651 	    device_printf(ch->dev, "CONNECT requested\n");
652 	tp->action = ATA_C_ATTACH;
653 	tp->dev = ch->dev;
654 	TASK_INIT(&tp->task, 0, ata_sata_phy_event, tp);
655 	taskqueue_enqueue(taskqueue_thread[mycpuid], &tp->task);
656     }
657 
658     /* do we have any device action ? */
659     return (vector & (1 << (ch->unit + 1)));
660 }
661 
662 static int
663 ata_promise_mio_command(struct ata_request *request)
664 {
665     struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev));
666     struct ata_channel *ch = device_get_softc(request->parent);
667     struct ata_device *atadev = device_get_softc(request->dev);
668     u_int32_t *wordp = (u_int32_t *)ch->dma->work;
669 
670     ATA_OUTL(ctlr->r_res2, (ch->unit + 1) << 2, 0x00000001);
671 
672     if ((ctlr->chip->cfg2 == PR_SATA2) ||
673         ((ctlr->chip->cfg2 == PR_CMBO2) && (ch->unit < 2))) {
674 	/* set portmultiplier port */
675 	ATA_OUTB(ctlr->r_res2, 0x4e8 + (ch->unit << 8), atadev->unit & 0x0f);
676     }
677 
678     /* XXX SOS add ATAPI commands support later */
679     switch (request->u.ata.command) {
680     default:
681 	return ata_generic_command(request);
682 
683     case ATA_READ_DMA:
684     case ATA_READ_DMA48:
685 	wordp[0] = htole32(0x04 | ((ch->unit + 1) << 16) | (0x00 << 24));
686 	break;
687 
688     case ATA_WRITE_DMA:
689     case ATA_WRITE_DMA48:
690 	wordp[0] = htole32(0x00 | ((ch->unit + 1) << 16) | (0x00 << 24));
691 	break;
692     }
693     wordp[1] = htole32(ch->dma->sg_bus);
694     wordp[2] = 0;
695     ata_promise_apkt((u_int8_t*)wordp, request);
696 
697     ATA_OUTL(ctlr->r_res2, 0x0240 + (ch->unit << 7), ch->dma->work_bus);
698     return 0;
699 }
700 
701 static void
702 ata_promise_mio_reset(device_t dev)
703 {
704     struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
705     struct ata_channel *ch = device_get_softc(dev);
706     struct ata_promise_sx4 *hpktp;
707 
708     switch (ctlr->chip->cfg2) {
709     case PR_SX4X:
710 
711 	/* softreset channel ATA module */
712 	hpktp = device_get_ivars(ctlr->dev);
713 	ATA_OUTL(ctlr->r_res2, 0xc0260 + (ch->unit << 7), ch->unit + 1);
714 	ata_udelay(1000);
715 	ATA_OUTL(ctlr->r_res2, 0xc0260 + (ch->unit << 7),
716 		 (ATA_INL(ctlr->r_res2, 0xc0260 + (ch->unit << 7)) &
717 		  ~0x00003f9f) | (ch->unit + 1));
718 
719 	/* softreset HOST module */ /* XXX SOS what about other outstandings */
720 	lockmgr(&hpktp->mtx, LK_EXCLUSIVE);
721 	ATA_OUTL(ctlr->r_res2, 0xc012c,
722 		 (ATA_INL(ctlr->r_res2, 0xc012c) & ~0x00000f9f) | (1 << 11));
723 	DELAY(10);
724 	ATA_OUTL(ctlr->r_res2, 0xc012c,
725 		 (ATA_INL(ctlr->r_res2, 0xc012c) & ~0x00000f9f));
726 	hpktp->busy = 0;
727 	lockmgr(&hpktp->mtx, LK_RELEASE);
728 	ata_generic_reset(dev);
729 	break;
730 
731     case PR_PATA:
732     case PR_CMBO:
733     case PR_SATA:
734 	if ((ctlr->chip->cfg2 == PR_SATA) ||
735 	    ((ctlr->chip->cfg2 == PR_CMBO) && (ch->unit < 2))) {
736 
737 	    /* mask plug/unplug intr */
738 	    ATA_OUTL(ctlr->r_res2, 0x06c, (0x00110000 << ch->unit));
739 	}
740 
741 	/* softreset channels ATA module */
742 	ATA_OUTL(ctlr->r_res2, 0x0260 + (ch->unit << 7), (1 << 11));
743 	ata_udelay(10000);
744 	ATA_OUTL(ctlr->r_res2, 0x0260 + (ch->unit << 7),
745 		 (ATA_INL(ctlr->r_res2, 0x0260 + (ch->unit << 7)) &
746 		  ~0x00003f9f) | (ch->unit + 1));
747 
748 	if ((ctlr->chip->cfg2 == PR_SATA) ||
749 	    ((ctlr->chip->cfg2 == PR_CMBO) && (ch->unit < 2))) {
750 
751 	    if (ata_sata_phy_reset(dev))
752 		ata_generic_reset(dev);
753 
754 	    /* reset and enable plug/unplug intr */
755 	    ATA_OUTL(ctlr->r_res2, 0x06c, (0x00000011 << ch->unit));
756 	}
757 	else
758 	    ata_generic_reset(dev);
759 	break;
760 
761     case PR_CMBO2:
762     case PR_SATA2:
763 	if ((ctlr->chip->cfg2 == PR_SATA2) ||
764 	    ((ctlr->chip->cfg2 == PR_CMBO2) && (ch->unit < 2))) {
765 	    /* set portmultiplier port */
766 	    //ATA_OUTL(ctlr->r_res2, 0x4e8 + (ch->unit << 8), 0x0f);
767 
768 	    /* mask plug/unplug intr */
769 	    ATA_OUTL(ctlr->r_res2, 0x060, (0x00110000 << ch->unit));
770 	}
771 
772 	/* softreset channels ATA module */
773 	ATA_OUTL(ctlr->r_res2, 0x0260 + (ch->unit << 7), (1 << 11));
774 	ata_udelay(10000);
775 	ATA_OUTL(ctlr->r_res2, 0x0260 + (ch->unit << 7),
776 		 (ATA_INL(ctlr->r_res2, 0x0260 + (ch->unit << 7)) &
777 		  ~0x00003f9f) | (ch->unit + 1));
778 
779 	if ((ctlr->chip->cfg2 == PR_SATA2) ||
780 	    ((ctlr->chip->cfg2 == PR_CMBO2) && (ch->unit < 2))) {
781 
782 	    /* set PHY mode to "improved" */
783 	    ATA_OUTL(ctlr->r_res2, 0x414 + (ch->unit << 8),
784 		     (ATA_INL(ctlr->r_res2, 0x414 + (ch->unit << 8)) &
785 		     ~0x00000003) | 0x00000001);
786 
787 	    if (ata_sata_phy_reset(dev)) {
788 		u_int32_t signature = ch->hw.softreset(dev, ATA_PM);
789 
790 		if (1 | bootverbose)
791 		    device_printf(dev, "SIGNATURE: %08x\n", signature);
792 
793 		/* figure out whats there */
794 		switch (signature >> 16) {
795 		case 0x0000:
796 		    ch->devices = ATA_ATA_MASTER;
797 		    break;
798 		case 0x9669:
799 		    ch->devices = ATA_PORTMULTIPLIER;
800 		    device_printf(ch->dev,
801 				  "Portmultipliers not supported yet\n");
802 		    ch->devices = 0;
803 		    break;
804 		case 0xeb14:
805 		    ch->devices = ATA_ATAPI_MASTER;
806 		    break;
807 		default:
808 		    ch->devices = 0;
809 		}
810 		if (bootverbose)
811 		    device_printf(dev, "ata_promise_mio_reset devices=%08x\n",
812 				  ch->devices);
813 	    }
814 
815 	    /* reset and enable plug/unplug intr */
816 	    ATA_OUTL(ctlr->r_res2, 0x060, (0x00000011 << ch->unit));
817 
818 	    /* set portmultiplier port */
819 	    ATA_OUTL(ctlr->r_res2, 0x4e8 + (ch->unit << 8), 0x00);
820 	}
821 	else
822 	    ata_generic_reset(dev);
823 	break;
824 
825     }
826 }
827 
828 /* must be called with ATA channel locked and state_mtx held */
829 static u_int32_t
830 ata_promise_mio_softreset(device_t dev, int port)
831 {
832     struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
833     struct ata_channel *ch = device_get_softc(dev);
834     int timeout;
835 
836     /* set portmultiplier port */
837     ATA_OUTB(ctlr->r_res2, 0x4e8 + (ch->unit << 8), port & 0x0f);
838 
839     /* softreset device on this channel */
840     ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_DEV(ATA_MASTER));
841     DELAY(10);
842     ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_IDS | ATA_A_RESET);
843     ata_udelay(10000);
844     ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_IDS);
845     ata_udelay(150000);
846     ATA_IDX_INB(ch, ATA_ERROR);
847 
848     /* wait for BUSY to go inactive */
849     for (timeout = 0; timeout < 100; timeout++) {
850 	uint8_t /* err, */ stat;
851 
852 	/* err = */ ATA_IDX_INB(ch, ATA_ERROR);
853 	stat = ATA_IDX_INB(ch, ATA_STATUS);
854 
855 	if (!(stat & ATA_S_BUSY)) {
856 	    return ATA_IDX_INB(ch, ATA_COUNT) |
857 		   (ATA_IDX_INB(ch, ATA_SECTOR) << 8) |
858 		   (ATA_IDX_INB(ch, ATA_CYL_LSB) << 16) |
859 		   (ATA_IDX_INB(ch, ATA_CYL_MSB) << 24);
860 	}
861 
862 	/* wait for master and/or slave */
863 	if (!(stat & ATA_S_BUSY) || (stat == 0xff && timeout > 10))
864 	    break;
865 	ata_udelay(100000);
866     }
867     return -1;
868 }
869 
870 static void
871 ata_promise_mio_dmainit(device_t dev)
872 {
873     struct ata_channel *ch = device_get_softc(dev);
874 
875     ata_dmainit(dev);
876     /* note start and stop are not used here */
877     if (ch->dma)
878 	ch->dma->setprd = ata_promise_mio_setprd;
879 }
880 
881 
882 #define MAXLASTSGSIZE (32 * sizeof(u_int32_t))
883 static void
884 ata_promise_mio_setprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
885 {
886     struct ata_dmasetprd_args *args = xsc;
887     struct ata_dma_prdentry *prd = args->dmatab;
888     int i;
889 
890     if ((args->error = error))
891 	return;
892 
893     for (i = 0; i < nsegs; i++) {
894 	prd[i].addr = htole32(segs[i].ds_addr);
895 	prd[i].count = htole32(segs[i].ds_len);
896     }
897     if (segs[i - 1].ds_len > MAXLASTSGSIZE) {
898 	//printf("split last SG element of %u\n", segs[i - 1].ds_len);
899 	prd[i - 1].count = htole32(segs[i - 1].ds_len - MAXLASTSGSIZE);
900 	prd[i].count = htole32(MAXLASTSGSIZE);
901 	prd[i].addr = htole32(segs[i - 1].ds_addr +
902 			      (segs[i - 1].ds_len - MAXLASTSGSIZE));
903 	nsegs++;
904 	i++;
905     }
906     prd[i - 1].count |= htole32(ATA_DMA_EOT);
907     KASSERT(nsegs <= ATA_DMA_ENTRIES, ("too many DMA segment entries\n"));
908     args->nsegs = nsegs;
909 }
910 
911 static void
912 ata_promise_mio_setmode(device_t dev, int mode)
913 {
914     device_t gparent = GRANDPARENT(dev);
915     struct ata_pci_controller *ctlr = device_get_softc(gparent);
916     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
917 
918     if ( (ctlr->chip->cfg2 == PR_SATA) ||
919 	((ctlr->chip->cfg2 == PR_CMBO) && (ch->unit < 2)) ||
920 	(ctlr->chip->cfg2 == PR_SATA2) ||
921 	((ctlr->chip->cfg2 == PR_CMBO2) && (ch->unit < 2)))
922 	ata_sata_setmode(dev, mode);
923     else
924 	ata_promise_setmode(dev, mode);
925 }
926 
927 static void
928 ata_promise_sx4_intr(void *data)
929 {
930     struct ata_pci_controller *ctlr = data;
931     struct ata_channel *ch;
932     u_int32_t vector = ATA_INL(ctlr->r_res2, 0x000c0480);
933     int unit;
934 
935     for (unit = 0; unit < ctlr->channels; unit++) {
936 	if (vector & (1 << (unit + 1)))
937 	    if ((ch = ctlr->interrupt[unit].argument))
938 		ctlr->interrupt[unit].function(ch);
939 	if (vector & (1 << (unit + 5)))
940 	    if ((ch = ctlr->interrupt[unit].argument))
941 		ata_promise_queue_hpkt(ctlr,
942 				       htole32((ch->unit * ATA_PDC_CHN_OFFSET) +
943 					       ATA_PDC_HPKT_OFFSET));
944 	if (vector & (1 << (unit + 9))) {
945 	    ata_promise_next_hpkt(ctlr);
946 	    if ((ch = ctlr->interrupt[unit].argument))
947 		ctlr->interrupt[unit].function(ch);
948 	}
949 	if (vector & (1 << (unit + 13))) {
950 	    ata_promise_next_hpkt(ctlr);
951 	    if ((ch = ctlr->interrupt[unit].argument))
952 		ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7),
953 			 htole32((ch->unit * ATA_PDC_CHN_OFFSET) +
954 			 ATA_PDC_APKT_OFFSET));
955 	}
956     }
957 }
958 
959 static int
960 ata_promise_sx4_command(struct ata_request *request)
961 {
962     device_t gparent = GRANDPARENT(request->dev);
963     struct ata_pci_controller *ctlr = device_get_softc(gparent);
964     struct ata_channel *ch = device_get_softc(request->parent);
965     struct ata_dma_prdentry *prd = ch->dma->sg;
966     caddr_t window = rman_get_virtual(ctlr->r_res1);
967     u_int32_t *wordp;
968     int i, idx, length = 0;
969 
970     /* XXX SOS add ATAPI commands support later */
971     switch (request->u.ata.command) {
972 
973     default:
974 	return -1;
975 
976     case ATA_ATA_IDENTIFY:
977     case ATA_READ:
978     case ATA_READ48:
979     case ATA_READ_MUL:
980     case ATA_READ_MUL48:
981     case ATA_WRITE:
982     case ATA_WRITE48:
983     case ATA_WRITE_MUL:
984     case ATA_WRITE_MUL48:
985 	ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 1) << 2), 0x00000001);
986 	return ata_generic_command(request);
987 
988     case ATA_SETFEATURES:
989     case ATA_FLUSHCACHE:
990     case ATA_FLUSHCACHE48:
991     case ATA_SLEEP:
992     case ATA_SET_MULTI:
993 	wordp = (u_int32_t *)
994 	    (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET);
995 	wordp[0] = htole32(0x08 | ((ch->unit + 1)<<16) | (0x00 << 24));
996 	wordp[1] = 0;
997 	wordp[2] = 0;
998 	ata_promise_apkt((u_int8_t *)wordp, request);
999 	ATA_OUTL(ctlr->r_res2, 0x000c0484, 0x00000001);
1000 	ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 1) << 2), 0x00000001);
1001 	ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7),
1002 		 htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_APKT_OFFSET));
1003 	return 0;
1004 
1005     case ATA_READ_DMA:
1006     case ATA_READ_DMA48:
1007     case ATA_WRITE_DMA:
1008     case ATA_WRITE_DMA48:
1009 	wordp = (u_int32_t *)
1010 	    (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HSG_OFFSET);
1011 	i = idx = 0;
1012 	do {
1013 	    wordp[idx++] = prd[i].addr;
1014 	    wordp[idx++] = prd[i].count;
1015 	    length += (prd[i].count & ~ATA_DMA_EOT);
1016 	} while (!(prd[i++].count & ATA_DMA_EOT));
1017 
1018 	wordp = (u_int32_t *)
1019 	    (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_LSG_OFFSET);
1020 	wordp[0] = htole32((ch->unit * ATA_PDC_BUF_OFFSET) + ATA_PDC_BUF_BASE);
1021 	wordp[1] = htole32(request->bytecount | ATA_DMA_EOT);
1022 
1023 	wordp = (u_int32_t *)
1024 	    (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_ASG_OFFSET);
1025 	wordp[0] = htole32((ch->unit * ATA_PDC_BUF_OFFSET) + ATA_PDC_BUF_BASE);
1026 	wordp[1] = htole32(request->bytecount | ATA_DMA_EOT);
1027 
1028 	wordp = (u_int32_t *)
1029 	    (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HPKT_OFFSET);
1030 	if (request->flags & ATA_R_READ)
1031 	    wordp[0] = htole32(0x14 | ((ch->unit+9)<<16) | ((ch->unit+5)<<24));
1032 	if (request->flags & ATA_R_WRITE)
1033 	    wordp[0] = htole32(0x00 | ((ch->unit+13)<<16) | (0x00<<24));
1034 	wordp[1] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_HSG_OFFSET);
1035 	wordp[2] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_LSG_OFFSET);
1036 	wordp[3] = 0;
1037 
1038 	wordp = (u_int32_t *)
1039 	    (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET);
1040 	if (request->flags & ATA_R_READ)
1041 	    wordp[0] = htole32(0x04 | ((ch->unit+5)<<16) | (0x00<<24));
1042 	if (request->flags & ATA_R_WRITE)
1043 	    wordp[0] = htole32(0x10 | ((ch->unit+1)<<16) | ((ch->unit+13)<<24));
1044 	wordp[1] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_ASG_OFFSET);
1045 	wordp[2] = 0;
1046 	ata_promise_apkt((u_int8_t *)wordp, request);
1047 	ATA_OUTL(ctlr->r_res2, 0x000c0484, 0x00000001);
1048 
1049 	if (request->flags & ATA_R_READ) {
1050 	    ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit+5)<<2), 0x00000001);
1051 	    ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit+9)<<2), 0x00000001);
1052 	    ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7),
1053 		htole32((ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET));
1054 	}
1055 	if (request->flags & ATA_R_WRITE) {
1056 	    ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit+1)<<2), 0x00000001);
1057 	    ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit+13)<<2), 0x00000001);
1058 	    ata_promise_queue_hpkt(ctlr,
1059 		htole32((ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HPKT_OFFSET));
1060 	}
1061 	return 0;
1062     }
1063 }
1064 
1065 static int
1066 ata_promise_apkt(u_int8_t *bytep, struct ata_request *request)
1067 {
1068     struct ata_device *atadev = device_get_softc(request->dev);
1069     int i = 12;
1070 
1071     bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_PDC_WAIT_NBUSY|ATA_DRIVE;
1072     bytep[i++] = ATA_D_IBM | ATA_D_LBA | ATA_DEV(atadev->unit);
1073     bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_CTL;
1074     bytep[i++] = ATA_A_4BIT;
1075 
1076     if (atadev->flags & ATA_D_48BIT_ACTIVE) {
1077 	bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_FEATURE;
1078 	bytep[i++] = request->u.ata.feature >> 8;
1079 	bytep[i++] = request->u.ata.feature;
1080 	bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_COUNT;
1081 	bytep[i++] = request->u.ata.count >> 8;
1082 	bytep[i++] = request->u.ata.count;
1083 	bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_SECTOR;
1084 	bytep[i++] = request->u.ata.lba >> 24;
1085 	bytep[i++] = request->u.ata.lba;
1086 	bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_CYL_LSB;
1087 	bytep[i++] = request->u.ata.lba >> 32;
1088 	bytep[i++] = request->u.ata.lba >> 8;
1089 	bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_CYL_MSB;
1090 	bytep[i++] = request->u.ata.lba >> 40;
1091 	bytep[i++] = request->u.ata.lba >> 16;
1092 	bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_DRIVE;
1093 	bytep[i++] = ATA_D_LBA | ATA_DEV(atadev->unit);
1094     }
1095     else {
1096 	bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_FEATURE;
1097 	bytep[i++] = request->u.ata.feature;
1098 	bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_COUNT;
1099 	bytep[i++] = request->u.ata.count;
1100 	bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_SECTOR;
1101 	bytep[i++] = request->u.ata.lba;
1102 	bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_CYL_LSB;
1103 	bytep[i++] = request->u.ata.lba >> 8;
1104 	bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_CYL_MSB;
1105 	bytep[i++] = request->u.ata.lba >> 16;
1106 	bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_DRIVE;
1107 	bytep[i++] = (atadev->flags & ATA_D_USE_CHS ? 0 : ATA_D_LBA) |
1108 		     ATA_D_IBM | ATA_DEV(atadev->unit) |
1109 		     ((request->u.ata.lba >> 24)&0xf);
1110     }
1111     bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_END | ATA_COMMAND;
1112     bytep[i++] = request->u.ata.command;
1113     return i;
1114 }
1115 
1116 static void
1117 ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hpkt)
1118 {
1119     struct ata_promise_sx4 *hpktp = device_get_ivars(ctlr->dev);
1120 
1121     lockmgr(&hpktp->mtx, LK_EXCLUSIVE);
1122     if (hpktp->busy) {
1123 	struct host_packet *hp =
1124 	    kmalloc(sizeof(struct host_packet), M_TEMP, M_INTWAIT | M_ZERO);
1125 	hp->addr = hpkt;
1126 	TAILQ_INSERT_TAIL(&hpktp->queue, hp, chain);
1127     }
1128     else {
1129 	hpktp->busy = 1;
1130 	ATA_OUTL(ctlr->r_res2, 0x000c0100, hpkt);
1131     }
1132     lockmgr(&hpktp->mtx, LK_RELEASE);
1133 }
1134 
1135 static void
1136 ata_promise_next_hpkt(struct ata_pci_controller *ctlr)
1137 {
1138     struct ata_promise_sx4 *hpktp = device_get_ivars(ctlr->dev);
1139     struct host_packet *hp;
1140 
1141     lockmgr(&hpktp->mtx, LK_EXCLUSIVE);
1142     if ((hp = TAILQ_FIRST(&hpktp->queue))) {
1143 	TAILQ_REMOVE(&hpktp->queue, hp, chain);
1144 	ATA_OUTL(ctlr->r_res2, 0x000c0100, hp->addr);
1145 	kfree(hp, M_TEMP);
1146     }
1147     else
1148 	hpktp->busy = 0;
1149     lockmgr(&hpktp->mtx, LK_RELEASE);
1150 }
1151