1 /* $OpenBSD: aic7xxx_openbsd.c,v 1.72 2022/04/16 19:19:58 naddy Exp $ */
2 /* $NetBSD: aic7xxx_osm.c,v 1.14 2003/11/02 11:07:44 wiz Exp $ */
3
4 /*
5 * Bus independent OpenBSD shim for the aic7xxx based adaptec SCSI controllers
6 *
7 * Copyright (c) 1994-2001 Justin T. Gibbs.
8 * Copyright (c) 2001-2002 Steve Murphree, Jr.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU Public License ("GPL").
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
36 *
37 * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
38 */
39 /*
40 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
41 */
42
43 #include <dev/ic/aic7xxx_openbsd.h>
44 #include <dev/ic/aic7xxx_inline.h>
45
46 #ifndef AHC_TMODE_ENABLE
47 #define AHC_TMODE_ENABLE 0
48 #endif
49
50
51 void ahc_action(struct scsi_xfer *);
52 void ahc_execute_scb(void *, bus_dma_segment_t *, int);
53 int ahc_poll(struct ahc_softc *, int);
54 void ahc_setup_data(struct ahc_softc *, struct scsi_xfer *, struct scb *);
55
56 void ahc_adapter_req_set_xfer_mode(struct ahc_softc *, struct scb *);
57
58
59 struct cfdriver ahc_cd = {
60 NULL, "ahc", DV_DULL
61 };
62
63 static const struct scsi_adapter ahc_switch = {
64 ahc_action, NULL, NULL, NULL, NULL
65 };
66
67 /*
68 * Attach all the sub-devices we can find
69 */
70 int
ahc_attach(struct ahc_softc * ahc)71 ahc_attach(struct ahc_softc *ahc)
72 {
73 struct scsibus_attach_args saa;
74 int s;
75
76 s = splbio();
77
78 #ifndef DEBUG
79 if (bootverbose) {
80 char ahc_info[256];
81 ahc_controller_info(ahc, ahc_info, sizeof ahc_info);
82 printf("%s: %s\n", ahc->sc_dev.dv_xname, ahc_info);
83 }
84 #endif
85
86 ahc_intr_enable(ahc, TRUE);
87
88 if (ahc->flags & AHC_RESET_BUS_A)
89 ahc_reset_channel(ahc, 'A', TRUE);
90 if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B)
91 ahc_reset_channel(ahc, 'B', TRUE);
92
93 saa.saa_adapter_buswidth = (ahc->features & AHC_WIDE) ? 16 :8;
94 saa.saa_adapter_softc = ahc;
95 saa.saa_adapter = &ahc_switch;
96 saa.saa_luns = saa.saa_adapter_buswidth = 8;
97 saa.saa_openings = 16;
98 saa.saa_pool = &ahc->sc_iopool;
99 saa.saa_quirks = saa.saa_flags = 0;
100 saa.saa_wwpn = saa.saa_wwnn = 0;
101 if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
102 saa.saa_adapter_target = ahc->our_id;
103 ahc->sc_child = (struct scsibus_softc *)config_found(
104 (void *)&ahc->sc_dev, &saa, scsiprint);
105 if (ahc->features & AHC_TWIN) {
106 saa.saa_adapter_target = ahc->our_id_b;
107 ahc->sc_child_b = (struct scsibus_softc *)config_found(
108 (void *)&ahc->sc_dev, &saa, scsiprint);
109 }
110 } else {
111 if (ahc->features & AHC_TWIN) {
112 saa.saa_adapter_target = ahc->our_id_b;
113 ahc->sc_child = (struct scsibus_softc *)config_found(
114 (void *)&ahc->sc_dev, &saa, scsiprint);
115 }
116 saa.saa_adapter_target = ahc->our_id;
117 ahc->sc_child_b = (struct scsibus_softc *)config_found(
118 (void *)&ahc->sc_dev, &saa, scsiprint);
119 }
120
121 splx(s);
122 return (1);
123 }
124
125 /*
126 * Catch an interrupt from the adapter
127 */
128 int
ahc_platform_intr(void * arg)129 ahc_platform_intr(void *arg)
130 {
131 struct ahc_softc *ahc = (struct ahc_softc *)arg;
132
133 bus_dmamap_sync(ahc->parent_dmat, ahc->scb_data->hscb_dmamap,
134 0, ahc->scb_data->hscb_dmamap->dm_mapsize,
135 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
136
137 return ahc_intr(ahc);
138 }
139
140 /*
141 * We have an scb which has been processed by the
142 * adaptor, now we look to see how the operation
143 * went.
144 */
145 void
ahc_done(struct ahc_softc * ahc,struct scb * scb)146 ahc_done(struct ahc_softc *ahc, struct scb *scb)
147 {
148 struct scsi_xfer *xs = scb->xs;
149
150 bus_dmamap_sync(ahc->parent_dmat, ahc->scb_data->hscb_dmamap,
151 0, ahc->scb_data->hscb_dmamap->dm_mapsize,
152 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
153
154 LIST_REMOVE(scb, pending_links);
155 if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
156 struct scb_tailq *untagged_q;
157 int target_offset;
158
159 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
160 untagged_q = &ahc->untagged_queues[target_offset];
161 TAILQ_REMOVE(untagged_q, scb, links.tqe);
162 scb->flags &= ~SCB_UNTAGGEDQ;
163 ahc_run_untagged_queue(ahc, untagged_q);
164 }
165
166 timeout_del(&xs->stimeout);
167
168 if (xs->datalen) {
169 int op;
170
171 if ((xs->flags & SCSI_DATA_IN) != 0)
172 op = BUS_DMASYNC_POSTREAD;
173 else
174 op = BUS_DMASYNC_POSTWRITE;
175 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
176 scb->dmamap->dm_mapsize, op);
177 bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
178 }
179
180 /* Translate the CAM status code to a SCSI error code. */
181 switch (xs->error) {
182 case CAM_SCSI_STATUS_ERROR:
183 case CAM_REQ_INPROG:
184 case CAM_REQ_CMP:
185 switch (xs->status) {
186 case SCSI_TASKSET_FULL:
187 case SCSI_BUSY:
188 xs->error = XS_BUSY;
189 break;
190 case SCSI_CHECK:
191 case SCSI_TERMINATED:
192 if ((scb->flags & SCB_SENSE) == 0) {
193 /* CHECK on CHECK? */
194 xs->error = XS_DRIVER_STUFFUP;
195 } else
196 xs->error = XS_NOERROR;
197 break;
198 default:
199 xs->error = XS_NOERROR;
200 break;
201 }
202 break;
203 case CAM_REQUEUE_REQ:
204 case CAM_BUSY:
205 xs->error = XS_BUSY;
206 break;
207 case CAM_CMD_TIMEOUT:
208 xs->error = XS_TIMEOUT;
209 break;
210 case CAM_BDR_SENT:
211 case CAM_SCSI_BUS_RESET:
212 xs->error = XS_RESET;
213 break;
214 case CAM_SEL_TIMEOUT:
215 xs->error = XS_SELTIMEOUT;
216 break;
217 default:
218 xs->error = XS_DRIVER_STUFFUP;
219 break;
220 }
221
222 /* Don't clobber any existing error state */
223 if (xs->error != XS_NOERROR) {
224 /* Don't clobber any existing error state */
225 } else if ((scb->flags & SCB_SENSE) != 0) {
226 /*
227 * We performed autosense retrieval.
228 *
229 * Zero any sense not transferred by the
230 * device. The SCSI spec mandates that any
231 * untransferred data should be assumed to be
232 * zero. Complete the 'bounce' of sense information
233 * through buffers accessible via bus-space by
234 * copying it into the clients csio.
235 */
236 memset(&xs->sense, 0, sizeof(struct scsi_sense_data));
237 memcpy(&xs->sense, ahc_get_sense_buf(ahc, scb),
238 aic_le32toh(scb->sg_list->len) & AHC_SG_LEN_MASK);
239 xs->error = XS_SENSE;
240 }
241
242 scsi_done(xs);
243 }
244
245 void
ahc_action(struct scsi_xfer * xs)246 ahc_action(struct scsi_xfer *xs)
247 {
248 struct ahc_softc *ahc;
249 struct scb *scb;
250 struct hardware_scb *hscb;
251 u_int target_id;
252 u_int our_id;
253
254 #ifdef AHC_DEBUG
255 printf("%s: ahc_action\n", ahc_name(ahc));
256 #endif
257 ahc = xs->sc_link->bus->sb_adapter_softc;
258
259 target_id = xs->sc_link->target;
260 our_id = SCSI_SCSI_ID(ahc, xs->sc_link);
261
262 /*
263 * get the scb to use.
264 */
265 scb = xs->io;
266
267 /* Clean up for the next user */
268 scb->flags = SCB_FLAG_NONE;
269
270 hscb = scb->hscb;
271 hscb->control = 0;
272 ahc->scb_data->scbindex[hscb->tag] = NULL;
273
274 #ifdef AHC_DEBUG
275 printf("%s: start scb(%p)\n", ahc_name(ahc), scb);
276 #endif
277 scb->xs = xs;
278 timeout_set(&xs->stimeout, ahc_timeout, scb);
279
280 /*
281 * Put all the arguments for the xfer in the scb
282 */
283 hscb->control = 0;
284 hscb->scsiid = BUILD_SCSIID(ahc, xs->sc_link, target_id, our_id);
285 hscb->lun = xs->sc_link->lun;
286 if (xs->xs_control & XS_CTL_RESET) {
287 hscb->cdb_len = 0;
288 scb->flags |= SCB_DEVICE_RESET;
289 hscb->control |= MK_MESSAGE;
290 ahc_execute_scb(scb, NULL, 0);
291 return;
292 }
293
294 ahc_setup_data(ahc, xs, scb);
295 }
296
297 void
ahc_execute_scb(void * arg,bus_dma_segment_t * dm_segs,int nsegments)298 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
299 {
300 struct scb *scb;
301 struct scsi_xfer *xs;
302 struct ahc_softc *ahc;
303 struct ahc_initiator_tinfo *tinfo;
304 struct ahc_tmode_tstate *tstate;
305
306 u_int mask;
307 int s;
308
309 scb = (struct scb *)arg;
310 xs = scb->xs;
311 xs->error = CAM_REQ_INPROG;
312 xs->status = 0;
313 ahc = xs->sc_link->bus->sb_adapter_softc;
314
315 if (nsegments != 0) {
316 struct ahc_dma_seg *sg;
317 bus_dma_segment_t *end_seg;
318 int op;
319
320 end_seg = dm_segs + nsegments;
321
322 /* Copy the segments into our SG list */
323 sg = scb->sg_list;
324 while (dm_segs < end_seg) {
325 uint32_t len;
326
327 sg->addr = aic_htole32(dm_segs->ds_addr);
328 len = dm_segs->ds_len
329 | ((dm_segs->ds_addr >> 8) & 0x7F000000);
330 sg->len = aic_htole32(len);
331 sg++;
332 dm_segs++;
333 }
334
335 /*
336 * Note where to find the SG entries in bus space.
337 * We also set the full residual flag which the
338 * sequencer will clear as soon as a data transfer
339 * occurs.
340 */
341 scb->hscb->sgptr = aic_htole32(scb->sg_list_phys|SG_FULL_RESID);
342
343 if ((xs->flags & SCSI_DATA_IN) != 0)
344 op = BUS_DMASYNC_PREREAD;
345 else
346 op = BUS_DMASYNC_PREWRITE;
347
348 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
349 scb->dmamap->dm_mapsize, op);
350
351 sg--;
352 sg->len |= aic_htole32(AHC_DMA_LAST_SEG);
353
354 bus_dmamap_sync(ahc->parent_dmat, scb->sg_map->sg_dmamap,
355 0, scb->sg_map->sg_dmamap->dm_mapsize,
356 BUS_DMASYNC_PREWRITE);
357
358 /* Copy the first SG into the "current" data pointer area */
359 scb->hscb->dataptr = scb->sg_list->addr;
360 scb->hscb->datacnt = scb->sg_list->len;
361 } else {
362 scb->hscb->sgptr = aic_htole32(SG_LIST_NULL);
363 scb->hscb->dataptr = 0;
364 scb->hscb->datacnt = 0;
365 }
366
367 scb->sg_count = nsegments;
368
369 s = splbio();
370
371 tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
372 SCSIID_OUR_ID(scb->hscb->scsiid),
373 SCSIID_TARGET(ahc, scb->hscb->scsiid),
374 &tstate);
375
376 mask = SCB_GET_TARGET_MASK(ahc, scb);
377 scb->hscb->scsirate = tinfo->scsirate;
378 scb->hscb->scsioffset = tinfo->curr.offset;
379
380 if ((tstate->ultraenb & mask) != 0)
381 scb->hscb->control |= ULTRAENB;
382
383 if ((tstate->discenable & mask) != 0)
384 scb->hscb->control |= DISCENB;
385
386 if ((tstate->auto_negotiate & mask) != 0) {
387 scb->flags |= SCB_AUTO_NEGOTIATE;
388 scb->hscb->control |= MK_MESSAGE;
389 }
390
391 if ((tstate->tagenable & mask) != 0)
392 scb->hscb->control |= TAG_ENB;
393
394 bus_dmamap_sync(ahc->parent_dmat, ahc->scb_data->hscb_dmamap,
395 0, ahc->scb_data->hscb_dmamap->dm_mapsize,
396 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
397
398 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
399
400 if (!(xs->flags & SCSI_POLL))
401 timeout_add_msec(&xs->stimeout, xs->timeout);
402
403 /*
404 * We only allow one untagged transaction
405 * per target in the initiator role unless
406 * we are storing a full busy target *lun*
407 * table in SCB space.
408 *
409 * This really should not be of any
410 * concern, as we take care to avoid this
411 * in ahc_done(). XXX smurph
412 */
413 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
414 && (ahc->flags & AHC_SCB_BTT) == 0) {
415 struct scb_tailq *untagged_q;
416 int target_offset;
417
418 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
419 untagged_q = &(ahc->untagged_queues[target_offset]);
420 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
421 scb->flags |= SCB_UNTAGGEDQ;
422 if (TAILQ_FIRST(untagged_q) != scb) {
423 if (xs->flags & SCSI_POLL)
424 goto poll;
425 else {
426 splx(s);
427 return;
428 }
429 }
430 }
431 scb->flags |= SCB_ACTIVE;
432
433 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
434 /* Define a mapping from our tag to the SCB. */
435 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
436 ahc_pause(ahc);
437 if ((ahc->flags & AHC_PAGESCBS) == 0)
438 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
439 ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
440 ahc_unpause(ahc);
441 } else {
442 ahc_queue_scb(ahc, scb);
443 }
444
445 if (!(xs->flags & SCSI_POLL)) {
446 if (ahc->inited_target[xs->sc_link->target] == 0) {
447 struct ahc_devinfo devinfo;
448
449 ahc_adapter_req_set_xfer_mode(ahc, scb);
450 ahc_scb_devinfo(ahc, &devinfo, scb);
451 ahc_update_neg_request(ahc, &devinfo, tstate, tinfo,
452 AHC_NEG_IF_NON_ASYNC);
453
454 ahc->inited_target[xs->sc_link->target] = 1;
455 }
456 splx(s);
457 return;
458 }
459
460 /*
461 * If we can't use interrupts, poll for completion
462 */
463 poll:
464 #ifdef AHC_DEBUG
465 printf("%s: cmd_poll\n", ahc_name(ahc));
466 #endif
467
468 do {
469 if (ahc_poll(ahc, xs->timeout)) {
470 if (!(xs->flags & SCSI_SILENT))
471 printf("cmd fail\n");
472 ahc_timeout(scb);
473 break;
474 }
475 } while (!(xs->flags & ITSDONE));
476
477 splx(s);
478 }
479
480 int
ahc_poll(struct ahc_softc * ahc,int wait)481 ahc_poll(struct ahc_softc *ahc, int wait)
482 {
483 while (--wait) {
484 DELAY(1000);
485 if (ahc_inb(ahc, INTSTAT) & INT_PEND)
486 break;
487 }
488
489 if (wait == 0) {
490 printf("%s: board is not responding\n", ahc_name(ahc));
491 return (EIO);
492 }
493
494 ahc_intr((void *)ahc);
495 return (0);
496 }
497
498 void
ahc_setup_data(struct ahc_softc * ahc,struct scsi_xfer * xs,struct scb * scb)499 ahc_setup_data(struct ahc_softc *ahc, struct scsi_xfer *xs,
500 struct scb *scb)
501 {
502 struct hardware_scb *hscb;
503
504 hscb = scb->hscb;
505 xs->resid = xs->status = 0;
506 xs->error = CAM_REQ_INPROG;
507
508 hscb->cdb_len = xs->cmdlen;
509 if (hscb->cdb_len > sizeof(hscb->cdb32)) {
510 xs->error = XS_DRIVER_STUFFUP;
511 scsi_done(xs);
512 return;
513 }
514
515 if (hscb->cdb_len > 12) {
516 memcpy(hscb->cdb32, &xs->cmd, hscb->cdb_len);
517 scb->flags |= SCB_CDB32_PTR;
518 } else {
519 memcpy(hscb->shared_data.cdb, &xs->cmd, hscb->cdb_len);
520 }
521
522 /* Only use S/G if there is a transfer */
523 if (xs->datalen) {
524 int error;
525
526 error = bus_dmamap_load(ahc->parent_dmat,
527 scb->dmamap, xs->data,
528 xs->datalen, NULL,
529 (xs->flags & SCSI_NOSLEEP) ?
530 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
531 if (error) {
532 #ifdef AHC_DEBUG
533 printf("%s: in ahc_setup_data(): bus_dmamap_load() "
534 "= %d\n",
535 ahc_name(ahc), error);
536 #endif
537 xs->error = XS_DRIVER_STUFFUP;
538 scsi_done(xs);
539 return;
540 }
541 ahc_execute_scb(scb, scb->dmamap->dm_segs,
542 scb->dmamap->dm_nsegs);
543 } else {
544 ahc_execute_scb(scb, NULL, 0);
545 }
546 }
547
548 void
ahc_timeout(void * arg)549 ahc_timeout(void *arg)
550 {
551 struct scb *scb, *list_scb;
552 struct ahc_softc *ahc;
553 int s;
554 int found;
555 char channel;
556
557 scb = arg;
558 ahc = scb->xs->sc_link->bus->sb_adapter_softc;
559
560 s = splbio();
561
562 #ifdef AHC_DEBUG
563 printf("%s: SCB %d timed out\n", ahc_name(ahc), scb->hscb->tag);
564 ahc_dump_card_state(ahc);
565 #endif
566
567 ahc_pause(ahc);
568
569 if (scb->flags & SCB_ACTIVE) {
570 channel = SCB_GET_CHANNEL(ahc, scb);
571 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
572 /*
573 * Go through all of our pending SCBs and remove
574 * any scheduled timeouts for them. They're about to be
575 * aborted so no need for them to timeout.
576 */
577 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
578 if (list_scb->xs)
579 timeout_del(&list_scb->xs->stimeout);
580 }
581 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
582 #ifdef AHC_DEBUG
583 printf("%s: Issued Channel %c Bus Reset %d SCBs aborted\n",
584 ahc_name(ahc), channel, found);
585 #endif
586 }
587
588 ahc_unpause(ahc);
589 splx(s);
590 }
591
592
593 void
ahc_platform_set_tags(struct ahc_softc * ahc,struct ahc_devinfo * devinfo,int alg)594 ahc_platform_set_tags(struct ahc_softc *ahc,
595 struct ahc_devinfo *devinfo, int alg)
596 {
597 struct ahc_tmode_tstate *tstate;
598
599 ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
600 devinfo->target, &tstate);
601
602 /* XXXX Need to check quirks before doing this! XXXX */
603
604 switch (alg) {
605 case AHC_QUEUE_BASIC:
606 case AHC_QUEUE_TAGGED:
607 tstate->tagenable |= devinfo->target_mask;
608 break;
609 case AHC_QUEUE_NONE:
610 tstate->tagenable &= ~devinfo->target_mask;
611 break;
612 }
613 }
614
615 int
ahc_softc_comp(struct ahc_softc * lahc,struct ahc_softc * rahc)616 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
617 {
618 return (0);
619 }
620
621 void
ahc_send_async(struct ahc_softc * ahc,char channel,u_int target,u_int lun,ac_code code,void * opt_arg)622 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
623 ac_code code, void *opt_arg)
624 {
625 /* Nothing to do here for OpenBSD */
626 }
627
628 void
ahc_adapter_req_set_xfer_mode(struct ahc_softc * ahc,struct scb * scb)629 ahc_adapter_req_set_xfer_mode(struct ahc_softc *ahc, struct scb *scb)
630 {
631 struct ahc_initiator_tinfo *tinfo;
632 struct ahc_tmode_tstate *tstate;
633 struct ahc_syncrate *syncrate;
634 struct ahc_devinfo devinfo;
635 u_int16_t quirks;
636 u_int width, ppr_options, period, offset;
637 int s;
638
639 s = splbio();
640
641 ahc_scb_devinfo(ahc, &devinfo, scb);
642 quirks = scb->xs->sc_link->quirks;
643 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
644 devinfo.our_scsiid, devinfo.target, &tstate);
645
646 tstate->discenable |= (ahc->user_discenable & devinfo.target_mask);
647
648 if (quirks & SDEV_NOTAGS)
649 tstate->tagenable &= ~devinfo.target_mask;
650 else if (ahc->user_tagenable & devinfo.target_mask)
651 tstate->tagenable |= devinfo.target_mask;
652
653 if (quirks & SDEV_NOWIDE)
654 width = MSG_EXT_WDTR_BUS_8_BIT;
655 else
656 width = MSG_EXT_WDTR_BUS_16_BIT;
657
658 ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
659 if (width > tinfo->user.width)
660 width = tinfo->user.width;
661 ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
662
663 if (quirks & SDEV_NOSYNC) {
664 period = 0;
665 offset = 0;
666 } else {
667 period = tinfo->user.period;
668 offset = tinfo->user.offset;
669 }
670
671 /* XXX Look at saved INQUIRY flags for PPR capabilities XXX */
672 ppr_options = tinfo->user.ppr_options;
673 /* XXX Other reasons to avoid ppr? XXX */
674 if (width < MSG_EXT_WDTR_BUS_16_BIT)
675 ppr_options = 0;
676
677 if ((tstate->discenable & devinfo.target_mask) == 0 ||
678 (tstate->tagenable & devinfo.target_mask) == 0)
679 ppr_options &= ~MSG_EXT_PPR_PROT_IUS;
680
681 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
682 AHC_SYNCRATE_MAX);
683 ahc_validate_offset(ahc, NULL, syncrate, &offset, width,
684 ROLE_UNKNOWN);
685
686 if (offset == 0) {
687 period = 0;
688 ppr_options = 0;
689 }
690
691 if (ppr_options != 0 && tinfo->user.transport_version >= 3) {
692 tinfo->goal.transport_version = tinfo->user.transport_version;
693 tinfo->curr.transport_version = tinfo->user.transport_version;
694 }
695
696 ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, ppr_options,
697 AHC_TRANS_GOAL, FALSE);
698
699 splx(s);
700 }
701
702 /*
703 * Get a free scb. If there are none, see if we can allocate a new SCB.
704 */
705 void *
ahc_scb_alloc(void * xahc)706 ahc_scb_alloc(void *xahc)
707 {
708 struct ahc_softc *ahc = xahc;
709 struct scb *scb;
710
711 mtx_enter(&ahc->sc_scb_mtx);
712 scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
713
714 if (scb != NULL)
715 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
716
717 mtx_leave(&ahc->sc_scb_mtx);
718
719 return (scb);
720 }
721
722 /*
723 * Return an SCB resource to the free list.
724 */
725 void
ahc_scb_free(void * xahc,void * io)726 ahc_scb_free(void *xahc, void *io)
727 {
728 struct ahc_softc *ahc = xahc;
729 struct scb *scb = io;
730 struct hardware_scb *hscb;
731
732 hscb = scb->hscb;
733 /* Clean up for the next user */
734 ahc->scb_data->scbindex[hscb->tag] = NULL;
735 scb->flags = SCB_FLAG_NONE;
736 hscb->control = 0;
737
738 mtx_enter(&ahc->sc_scb_mtx);
739 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
740 mtx_leave(&ahc->sc_scb_mtx);
741
742 /* Notify the OSM that a resource is now available. */
743 ahc_platform_scb_free(ahc, scb);
744 }
745