1 /*
2 * Copyright (c) 2010, LSI Corp.
3 * All rights reserved.
4 * Author : Manjunath Ranganathaiah
5 * Support: freebsdraid@lsi.com
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: src/sys/dev/tws/tws_cam.c,v 1.3 2007/05/09 04:16:32 mrangana Exp $
35 */
36
37 #include <dev/raid/tws/tws.h>
38 #include <dev/raid/tws/tws_services.h>
39 #include <dev/raid/tws/tws_hdm.h>
40 #include <dev/raid/tws/tws_user.h>
41 #include <bus/cam/cam.h>
42 #include <bus/cam/cam_ccb.h>
43 #include <bus/cam/cam_sim.h>
44 #include <bus/cam/cam_xpt_sim.h>
45 #include <bus/cam/cam_debug.h>
46 #include <bus/cam/cam_periph.h>
47
48 #include <bus/cam/scsi/scsi_all.h>
49 #include <bus/cam/scsi/scsi_message.h>
50
51 static int tws_cam_depth=(TWS_MAX_REQS - TWS_RESERVED_REQS);
52 static char tws_sev_str[5][8]={"","ERROR","WARNING","INFO","DEBUG"};
53
54 static void tws_action(struct cam_sim *sim, union ccb *ccb);
55 static void tws_poll(struct cam_sim *sim);
56 static void tws_bus_scan_cb(struct cam_periph *periph, union ccb *ccb);
57 static void tws_scsi_complete(struct tws_request *req);
58
59
60
61 void tws_unmap_request(struct tws_softc *sc, struct tws_request *req);
62 int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req);
63 int tws_bus_scan(struct tws_softc *sc);
64 int tws_cam_attach(struct tws_softc *sc);
65 void tws_cam_detach(struct tws_softc *sc);
66 void tws_reset(void *arg);
67
68 static void tws_reset_cb(void *arg);
69 static void tws_reinit(void *arg);
70 static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb);
71 static void tws_freeze_simq(struct tws_softc *sc);
72 static void tws_release_simq(struct tws_softc *sc);
73 static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
74 int nseg, int error);
75 static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src,
76 void *sgl_dest, u_int16_t num_sgl_entries);
77 static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa);
78 static void tws_scsi_err_complete(struct tws_request *req,
79 struct tws_command_header *hdr);
80 static void tws_passthru_err_complete(struct tws_request *req,
81 struct tws_command_header *hdr);
82
83
84 static void tws_timeout(void *arg);
85 static void tws_intr_attn_aen(struct tws_softc *sc);
86 static void tws_intr_attn_error(struct tws_softc *sc);
87 static void tws_intr_resp(struct tws_softc *sc);
88 void tws_intr(void *arg);
89 void tws_cmd_complete(struct tws_request *req);
90 void tws_aen_complete(struct tws_request *req);
91 int tws_send_scsi_cmd(struct tws_softc *sc, int cmd);
92 void tws_getset_param_complete(struct tws_request *req);
93 int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
94 u_int32_t param_size, void *data);
95 int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
96 u_int32_t param_size, void *data);
97
98
99 extern struct tws_request *tws_get_request(struct tws_softc *sc,
100 u_int16_t type);
101 extern void *tws_release_request(struct tws_request *req);
102 extern int tws_submit_command(struct tws_softc *sc, struct tws_request *req);
103 extern boolean tws_get_response(struct tws_softc *sc,
104 u_int16_t *req_id, u_int64_t *mfa);
105 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
106 u_int8_t q_type );
107 extern struct tws_request * tws_q_remove_request(struct tws_softc *sc,
108 struct tws_request *req, u_int8_t q_type );
109 extern void tws_send_event(struct tws_softc *sc, u_int8_t event);
110
111 extern struct tws_sense *
112 tws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa);
113
114 extern void tws_fetch_aen(void *arg);
115 extern void tws_disable_db_intr(struct tws_softc *sc);
116 extern void tws_enable_db_intr(struct tws_softc *sc);
117 extern void tws_passthru_complete(struct tws_request *req);
118 extern void tws_aen_synctime_with_host(struct tws_softc *sc);
119 extern void tws_circular_aenq_insert(struct tws_softc *sc,
120 struct tws_circular_q *cq, struct tws_event_packet *aen);
121 extern int tws_use_32bit_sgls;
122 extern boolean tws_ctlr_reset(struct tws_softc *sc);
123 extern struct tws_request * tws_q_remove_tail(struct tws_softc *sc,
124 u_int8_t q_type );
125 extern void tws_turn_off_interrupts(struct tws_softc *sc);
126 extern void tws_turn_on_interrupts(struct tws_softc *sc);
127 extern int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
128 extern void tws_init_obfl_q(struct tws_softc *sc);
129 extern uint8_t tws_get_state(struct tws_softc *sc);
130 extern void tws_assert_soft_reset(struct tws_softc *sc);
131 extern boolean tws_ctlr_ready(struct tws_softc *sc);
132 extern u_int16_t tws_poll4_response(struct tws_softc *sc, u_int64_t *mfa);
133
134
135
136 int
tws_cam_attach(struct tws_softc * sc)137 tws_cam_attach(struct tws_softc *sc)
138 {
139 struct cam_devq *devq;
140 int error;
141
142 TWS_TRACE_DEBUG(sc, "entry", 0, sc);
143 /* Create a device queue for sim */
144
145 /*
146 * if the user sets cam depth to less than 1
147 * cam may get confused
148 */
149 if ( tws_cam_depth < 1 )
150 tws_cam_depth = 1;
151 if ( tws_cam_depth > (tws_queue_depth - TWS_RESERVED_REQS) )
152 tws_cam_depth = tws_queue_depth - TWS_RESERVED_REQS;
153
154 TWS_TRACE_DEBUG(sc, "depths,ctlr,cam", tws_queue_depth, tws_cam_depth);
155
156 if ((devq = cam_simq_alloc(tws_cam_depth)) == NULL) {
157 tws_log(sc, CAM_SIMQ_ALLOC);
158 return(ENOMEM);
159 }
160
161 /*
162 * Create a SIM entry. Though we can support tws_cam_depth
163 * simultaneous requests, we claim to be able to handle only
164 * (tws_cam_depth), so that we always have reserved requests
165 * packet available to service ioctls and internal commands.
166 */
167 sc->sim = cam_sim_alloc(tws_action, tws_poll, "tws", sc,
168 device_get_unit(sc->tws_dev),
169 &sc->sim_lock,
170 tws_cam_depth, 1, devq);
171 /* 1, 1, devq); */
172 cam_simq_release(devq);
173 if (sc->sim == NULL) {
174 tws_log(sc, CAM_SIM_ALLOC);
175 }
176 /* Register the bus. */
177 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
178 if (xpt_bus_register(sc->sim, 0) != CAM_SUCCESS) {
179 cam_sim_free(sc->sim);
180 sc->sim = NULL; /* so cam_detach will not try to free it */
181 lockmgr(&sc->sim_lock, LK_RELEASE);
182 tws_log(sc, TWS_XPT_BUS_REGISTER);
183 return(ENXIO);
184 }
185 if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim),
186 CAM_TARGET_WILDCARD,
187 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
188 xpt_bus_deregister(cam_sim_path(sc->sim));
189 cam_sim_free(sc->sim);
190 tws_log(sc, TWS_XPT_CREATE_PATH);
191 lockmgr(&sc->sim_lock, LK_RELEASE);
192 return(ENXIO);
193 }
194 if ((error = tws_bus_scan(sc))) {
195 tws_log(sc, TWS_BUS_SCAN_REQ);
196 lockmgr(&sc->sim_lock, LK_RELEASE);
197 return(error);
198 }
199 lockmgr(&sc->sim_lock, LK_RELEASE);
200
201 return(0);
202 }
203
204 void
tws_cam_detach(struct tws_softc * sc)205 tws_cam_detach(struct tws_softc *sc)
206 {
207 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
208 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
209 if (sc->path)
210 xpt_free_path(sc->path);
211 if (sc->sim) {
212 xpt_bus_deregister(cam_sim_path(sc->sim));
213 cam_sim_free(sc->sim);
214 }
215 lockmgr(&sc->sim_lock, LK_RELEASE);
216 }
217
218 int
tws_bus_scan(struct tws_softc * sc)219 tws_bus_scan(struct tws_softc *sc)
220 {
221 struct cam_path *path;
222 union ccb *ccb;
223
224 TWS_TRACE_DEBUG(sc, "entry", sc, 0);
225 KASSERT(sc->sim, ("sim not allocated"));
226 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
227
228 ccb = sc->scan_ccb;
229
230 if (xpt_create_path(&path, xpt_periph, cam_sim_path(sc->sim),
231 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
232 /* lockmgr(&sc->sim_lock, LK_RELEASE); */
233 return(EIO);
234 }
235 xpt_setup_ccb(&ccb->ccb_h, path, 5);
236 ccb->ccb_h.func_code = XPT_SCAN_BUS;
237 ccb->ccb_h.cbfcnp = tws_bus_scan_cb;
238 ccb->crcn.flags = CAM_FLAG_NONE;
239 xpt_action(ccb);
240
241 return(0);
242 }
243
244 static void
tws_bus_scan_cb(struct cam_periph * periph,union ccb * ccb)245 tws_bus_scan_cb(struct cam_periph *periph, union ccb *ccb)
246 {
247 struct tws_softc *sc = periph->softc;
248
249 /* calling trace results in non-sleepable lock head panic
250 using printf to debug */
251
252 if (ccb->ccb_h.status != CAM_REQ_CMP) {
253 kprintf("cam_scan failure\n");
254
255 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
256 tws_send_event(sc, TWS_SCAN_FAILURE);
257 lockmgr(&sc->gen_lock, LK_RELEASE);
258 }
259
260 xpt_free_path(ccb->ccb_h.path);
261 }
262
263 static void
tws_action(struct cam_sim * sim,union ccb * ccb)264 tws_action(struct cam_sim *sim, union ccb *ccb)
265 {
266 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
267
268 switch( ccb->ccb_h.func_code ) {
269 case XPT_SCSI_IO:
270 {
271 if ( tws_execute_scsi(sc, ccb) )
272 TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0);
273 break;
274 }
275 case XPT_ABORT:
276 {
277 TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0);
278 ccb->ccb_h.status = CAM_UA_ABORT;
279 xpt_done(ccb);
280 break;
281 }
282 case XPT_RESET_BUS:
283 {
284 TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb);
285 break;
286 }
287 case XPT_SET_TRAN_SETTINGS:
288 {
289 TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb);
290 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
291 xpt_done(ccb);
292
293 break;
294 }
295 case XPT_GET_TRAN_SETTINGS:
296 {
297 TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb);
298
299 ccb->cts.protocol = PROTO_SCSI;
300 ccb->cts.protocol_version = SCSI_REV_2;
301 ccb->cts.transport = XPORT_SPI;
302 ccb->cts.transport_version = 2;
303
304 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
305 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
306 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
307 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
308 ccb->ccb_h.status = CAM_REQ_CMP;
309 xpt_done(ccb);
310
311 break;
312 }
313 case XPT_CALC_GEOMETRY:
314 {
315 TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb,
316 ccb->ccg.block_size);
317 cam_calc_geometry(&ccb->ccg, 1/* extended */);
318 xpt_done(ccb);
319
320 break;
321 }
322 case XPT_PATH_INQ:
323 {
324 TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb);
325 ccb->cpi.version_num = 1;
326 ccb->cpi.hba_inquiry = 0;
327 ccb->cpi.target_sprt = 0;
328 ccb->cpi.hba_misc = 0;
329 ccb->cpi.hba_eng_cnt = 0;
330 ccb->cpi.max_target = TWS_MAX_NUM_UNITS;
331 ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1;
332 ccb->cpi.unit_number = cam_sim_unit(sim);
333 ccb->cpi.bus_id = cam_sim_bus(sim);
334 ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID;
335 ccb->cpi.base_transfer_speed = 300000;
336 strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
337 strncpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN);
338 strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
339 ccb->cpi.transport = XPORT_SPI;
340 ccb->cpi.transport_version = 2;
341 ccb->cpi.protocol = PROTO_SCSI;
342 ccb->cpi.protocol_version = SCSI_REV_2;
343 ccb->ccb_h.status = CAM_REQ_CMP;
344 xpt_done(ccb);
345
346 break;
347 }
348 default:
349 TWS_TRACE_DEBUG(sc, "default", sim, ccb);
350 ccb->ccb_h.status = CAM_REQ_INVALID;
351 xpt_done(ccb);
352 break;
353 }
354 }
355
356 static void
tws_scsi_complete(struct tws_request * req)357 tws_scsi_complete(struct tws_request *req)
358 {
359 struct tws_softc *sc = req->sc;
360
361 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
362 tws_q_remove_request(sc, req, TWS_BUSY_Q);
363 lockmgr(&sc->q_lock, LK_RELEASE);
364
365 callout_stop(req->ccb_ptr->ccb_h.timeout_ch);
366 tws_unmap_request(req->sc, req);
367
368
369 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
370 req->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
371 xpt_done(req->ccb_ptr);
372 lockmgr(&sc->sim_lock, LK_RELEASE);
373
374 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
375 tws_q_insert_tail(sc, req, TWS_FREE_Q);
376 lockmgr(&sc->q_lock, LK_RELEASE);
377
378 }
379
380 void
tws_getset_param_complete(struct tws_request * req)381 tws_getset_param_complete(struct tws_request *req)
382 {
383 struct tws_softc *sc = req->sc;
384
385 TWS_TRACE_DEBUG(sc, "getset complete", req, req->request_id);
386
387 callout_stop(&req->thandle);
388 tws_unmap_request(sc, req);
389
390 kfree(req->data, M_TWS);
391
392 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
393 req->state = TWS_REQ_STATE_FREE;
394 lockmgr(&sc->gen_lock, LK_RELEASE);
395
396 }
397
398 void
tws_aen_complete(struct tws_request * req)399 tws_aen_complete(struct tws_request *req)
400 {
401 struct tws_softc *sc = req->sc;
402 struct tws_command_header *sense;
403 struct tws_event_packet event;
404 u_int16_t aen_code=0;
405
406 TWS_TRACE_DEBUG(sc, "aen complete", 0, req->request_id);
407
408 callout_stop(&req->thandle);
409 tws_unmap_request(sc, req);
410
411 sense = (struct tws_command_header *)req->data;
412
413 TWS_TRACE_DEBUG(sc,"sense code, key",sense->sense_data[0],
414 sense->sense_data[2]);
415 TWS_TRACE_DEBUG(sc,"sense rid, seve",sense->header_desc.request_id,
416 sense->status_block.res__severity);
417 TWS_TRACE_DEBUG(sc,"sense srcnum, error",sense->status_block.srcnum,
418 sense->status_block.error);
419 TWS_TRACE_DEBUG(sc,"sense shdr, ssense",sense->header_desc.size_header,
420 sense->header_desc.size_sense);
421
422 aen_code = sense->status_block.error;
423
424 switch ( aen_code ) {
425 case TWS_AEN_SYNC_TIME_WITH_HOST :
426 tws_aen_synctime_with_host(sc);
427 break;
428 case TWS_AEN_QUEUE_EMPTY :
429 break;
430 default :
431 bzero(&event, sizeof(struct tws_event_packet));
432 event.sequence_id = sc->seq_id;
433 event.time_stamp_sec = (u_int32_t)TWS_LOCAL_TIME;
434 event.aen_code = sense->status_block.error;
435 event.severity = sense->status_block.res__severity & 0x7;
436 event.event_src = TWS_SRC_CTRL_EVENT;
437 strcpy(event.severity_str, tws_sev_str[event.severity]);
438 event.retrieved = TWS_AEN_NOT_RETRIEVED;
439
440 bcopy(sense->err_specific_desc, event.parameter_data,
441 TWS_ERROR_SPECIFIC_DESC_LEN);
442 event.parameter_data[TWS_ERROR_SPECIFIC_DESC_LEN - 1] = '\0';
443 event.parameter_len = (u_int8_t)strlen(event.parameter_data)+1;
444
445 if ( event.parameter_len < TWS_ERROR_SPECIFIC_DESC_LEN ) {
446 event.parameter_len += ((u_int8_t)strlen(event.parameter_data +
447 event.parameter_len) + 1);
448 }
449
450 device_printf(sc->tws_dev, "%s: (0x%02X: 0x%04X): %s: %s\n",
451 event.severity_str,
452 event.event_src,
453 event.aen_code,
454 event.parameter_data +
455 (strlen(event.parameter_data) + 1),
456 event.parameter_data);
457
458 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
459 tws_circular_aenq_insert(sc, &sc->aen_q, &event);
460 sc->seq_id++;
461 lockmgr(&sc->gen_lock, LK_RELEASE);
462 break;
463
464 }
465
466 kfree(req->data, M_TWS);
467
468 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
469 req->state = TWS_REQ_STATE_FREE;
470 lockmgr(&sc->gen_lock, LK_RELEASE);
471
472 if ( aen_code != TWS_AEN_QUEUE_EMPTY ) {
473 /* timeout(tws_fetch_aen, sc, 1);*/
474 sc->stats.num_aens++;
475 tws_fetch_aen(sc);
476 }
477
478 }
479
480 void
tws_cmd_complete(struct tws_request * req)481 tws_cmd_complete(struct tws_request *req)
482 {
483 struct tws_softc *sc = req->sc;
484
485 callout_stop(req->ccb_ptr->ccb_h.timeout_ch);
486 tws_unmap_request(sc, req);
487
488 }
489
490 static void
tws_err_complete(struct tws_softc * sc,u_int64_t mfa)491 tws_err_complete(struct tws_softc *sc, u_int64_t mfa)
492 {
493
494 struct tws_command_header *hdr;
495 struct tws_sense *sen;
496 struct tws_request *req;
497 u_int16_t req_id;
498 u_int32_t reg, status;
499
500 if ( !mfa ) {
501 TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa);
502 return;
503 } else {
504 /* lookup the sense */
505 sen = tws_find_sense_from_mfa(sc, mfa);
506 if ( sen == NULL ) {
507 TWS_TRACE_DEBUG(sc, "found null req", 0, mfa);
508 return;
509 }
510 hdr = sen->hdr;
511 TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr);
512 req_id = hdr->header_desc.request_id;
513 req = &sc->reqs[req_id];
514 TWS_TRACE_DEBUG(sc, "req, id", req, req_id);
515 if ( req->error_code != TWS_REQ_SUBMIT_SUCCESS )
516 TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code);
517 }
518
519 switch (req->type) {
520 case TWS_PASSTHRU_REQ :
521 tws_passthru_err_complete(req, hdr);
522 break;
523 case TWS_GETSET_PARAM_REQ :
524 tws_getset_param_complete(req);
525 break;
526 case TWS_SCSI_IO_REQ :
527 tws_scsi_err_complete(req, hdr);
528 break;
529
530 }
531
532 lockmgr(&sc->io_lock, LK_EXCLUSIVE);
533 hdr->header_desc.size_header = 128;
534 reg = (u_int32_t)( mfa>>32);
535 tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4);
536 reg = (u_int32_t)(mfa);
537 tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4);
538
539 status = tws_read_reg(sc, TWS_I2O0_STATUS, 4);
540 if ( status & TWS_BIT13 ) {
541 TWS_TRACE_DEBUG(sc, "OBFL Overrun", status, TWS_I2O0_STATUS);
542 sc->obfl_q_overrun = true;
543 sen->posted = false;
544 }
545 lockmgr(&sc->io_lock, LK_RELEASE);
546
547 }
548
549 static void
tws_scsi_err_complete(struct tws_request * req,struct tws_command_header * hdr)550 tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr)
551 {
552 u_int8_t *sense_data;
553 struct tws_softc *sc = req->sc;
554 union ccb *ccb = req->ccb_ptr;
555
556 TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error,
557 req->cmd_pkt->cmd.pkt_a.status);
558 if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED ||
559 hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) {
560
561 if ( ccb->ccb_h.target_lun ) {
562 TWS_TRACE_DEBUG(sc, "invalid lun error",0,0);
563 ccb->ccb_h.status |= CAM_LUN_INVALID;
564 } else {
565 TWS_TRACE_DEBUG(sc, "invalid target error",0,0);
566 ccb->ccb_h.status |= CAM_TID_INVALID;
567 }
568
569 } else {
570 TWS_TRACE_DEBUG(sc, "scsi status error",0,0);
571 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
572 if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) &&
573 (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) {
574 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
575 TWS_TRACE_DEBUG(sc, "page mode not supported",0,0);
576 }
577 }
578
579 /* if there were no error simply mark complete error */
580 if (ccb->ccb_h.status == 0)
581 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
582
583 sense_data = (u_int8_t *)&ccb->csio.sense_data;
584 if (sense_data) {
585 memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH );
586 ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH;
587 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
588 }
589 ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status;
590
591 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
592 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
593 xpt_done(ccb);
594 lockmgr(&sc->sim_lock, LK_RELEASE);
595
596 callout_stop(req->ccb_ptr->ccb_h.timeout_ch);
597 tws_unmap_request(req->sc, req);
598 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
599 tws_q_remove_request(sc, req, TWS_BUSY_Q);
600 tws_q_insert_tail(sc, req, TWS_FREE_Q);
601 lockmgr(&sc->q_lock, LK_RELEASE);
602
603 }
604
605 static void
tws_passthru_err_complete(struct tws_request * req,struct tws_command_header * hdr)606 tws_passthru_err_complete(struct tws_request *req,
607 struct tws_command_header *hdr)
608 {
609
610 TWS_TRACE_DEBUG(req->sc, "entry", hdr, req->request_id);
611 req->error_code = hdr->status_block.error;
612 memcpy(&(req->cmd_pkt->hdr), hdr, sizeof(struct tws_command_header));
613 tws_passthru_complete(req);
614 }
615
616 static void
tws_drain_busy_queue(struct tws_softc * sc)617 tws_drain_busy_queue(struct tws_softc *sc)
618 {
619
620 struct tws_request *req;
621 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
622
623 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
624 req = tws_q_remove_tail(sc, TWS_BUSY_Q);
625 lockmgr(&sc->q_lock, LK_RELEASE);
626 while ( req ) {
627 callout_stop(req->ccb_ptr->ccb_h.timeout_ch);
628 tws_unmap_request(req->sc, req);
629
630 TWS_TRACE_DEBUG(sc, "drained", 0, req->request_id);
631
632 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
633 req->ccb_ptr->ccb_h.status = CAM_REQUEUE_REQ;
634 xpt_done(req->ccb_ptr);
635 lockmgr(&sc->sim_lock, LK_RELEASE);
636
637 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
638 tws_q_insert_tail(sc, req, TWS_FREE_Q);
639 req = tws_q_remove_tail(sc, TWS_BUSY_Q);
640 lockmgr(&sc->q_lock, LK_RELEASE);
641 }
642
643 }
644
645 static void
tws_drain_reserved_reqs(struct tws_softc * sc)646 tws_drain_reserved_reqs(struct tws_softc *sc)
647 {
648
649 struct tws_request *r;
650
651 r = &sc->reqs[1];
652 if ( r->state != TWS_REQ_STATE_FREE ) {
653 TWS_TRACE_DEBUG(sc, "drained aen req", 0, 0);
654 callout_stop(&r->thandle);
655 tws_unmap_request(sc, r);
656 kfree(r->data, M_TWS);
657 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
658 r->state = TWS_REQ_STATE_FREE;
659 lockmgr(&sc->gen_lock, LK_RELEASE);
660 }
661 r = &sc->reqs[2];
662 if ( r->state != TWS_REQ_STATE_FREE ) {
663 TWS_TRACE_DEBUG(sc, "drained passthru req", 0, 0);
664 r->error_code = TWS_REQ_REQUEUE;
665 tws_passthru_complete(r);
666 }
667 r = &sc->reqs[3];
668 if ( r->state != TWS_REQ_STATE_FREE ) {
669 TWS_TRACE_DEBUG(sc, "drained set param req", 0, 0);
670 tws_getset_param_complete(r);
671 }
672
673 }
674
675 static void
tws_drain_response_queue(struct tws_softc * sc)676 tws_drain_response_queue(struct tws_softc *sc)
677 {
678 tws_intr_resp(sc);
679 }
680
681
682 static int32_t
tws_execute_scsi(struct tws_softc * sc,union ccb * ccb)683 tws_execute_scsi(struct tws_softc *sc, union ccb *ccb)
684 {
685 struct tws_command_packet *cmd_pkt;
686 struct tws_request *req;
687 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
688 struct ccb_scsiio *csio = &(ccb->csio);
689 int error;
690 u_int16_t lun;
691
692 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
693 if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) {
694 TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun);
695 ccb_h->status |= CAM_TID_INVALID;
696 xpt_done(ccb);
697 return(0);
698 }
699 if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) {
700 TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun);
701 ccb_h->status |= CAM_LUN_INVALID;
702 xpt_done(ccb);
703 return(0);
704 }
705
706 if(ccb_h->flags & CAM_CDB_PHYS) {
707 TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun);
708 ccb_h->status = CAM_REQ_CMP_ERR;
709 xpt_done(ccb);
710 return(0);
711 }
712
713 /*
714 * We are going to work on this request. Mark it as enqueued (though
715 * we don't actually queue it...)
716 */
717 ccb_h->status |= CAM_SIM_QUEUED;
718
719 req = tws_get_request(sc, TWS_SCSI_IO_REQ);
720 if ( !req ) {
721 TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun);
722 /* tws_freeze_simq(sc); */
723 ccb_h->status |= CAM_REQUEUE_REQ;
724 xpt_done(ccb);
725 return(0);
726 }
727
728 if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
729 if(ccb_h->flags & CAM_DIR_IN)
730 req->flags = TWS_DIR_IN;
731 else
732 req->flags = TWS_DIR_OUT;
733 } else {
734 req->flags = TWS_DIR_NONE; /* no data */
735 }
736
737 req->type = TWS_SCSI_IO_REQ;
738 req->cb = tws_scsi_complete;
739
740 cmd_pkt = req->cmd_pkt;
741 /* cmd_pkt->hdr.header_desc.size_header = 128; */
742 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
743 cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id;
744 cmd_pkt->cmd.pkt_a.status = 0;
745 cmd_pkt->cmd.pkt_a.sgl_offset = 16;
746
747 /* lower nibble */
748 lun = ccb_h->target_lun & 0XF;
749 lun = lun << 12;
750 cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id;
751 /* upper nibble */
752 lun = ccb_h->target_lun & 0XF0;
753 lun = lun << 8;
754 cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun;
755
756 #ifdef TWS_DEBUG
757 if ( csio->cdb_len > 16 )
758 TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len);
759 #endif
760
761 if(ccb_h->flags & CAM_CDB_POINTER)
762 bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
763 else
764 bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
765
766 if (!(ccb_h->flags & CAM_DATA_PHYS)) {
767 /* Virtual data addresses. Need to convert them... */
768 if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
769 if (csio->dxfer_len > TWS_MAX_IO_SIZE) {
770 TWS_TRACE(sc, "I/O is big", csio->dxfer_len, 0);
771 tws_release_request(req);
772 ccb_h->status = CAM_REQ_TOO_BIG;
773 xpt_done(ccb);
774 return(0);
775 }
776
777 req->length = csio->dxfer_len;
778 if (req->length) {
779 req->data = csio->data_ptr;
780 /* there is 1 sgl_entrie */
781 /* cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= 1; */
782 }
783 } else {
784 TWS_TRACE_DEBUG(sc, "got sglist", ccb_h->target_id, ccb_h->target_lun);
785 tws_release_request(req);
786 ccb_h->status = CAM_REQ_CMP_ERR;
787 xpt_done(ccb);
788 return(0);
789 }
790 } else {
791 /* Data addresses are physical. */
792 TWS_TRACE_DEBUG(sc, "Phy data addr", ccb_h->target_id, ccb_h->target_lun);
793 tws_release_request(req);
794 ccb_h->status = CAM_REQ_CMP_ERR;
795 ccb_h->status |= CAM_RELEASE_SIMQ;
796 ccb_h->status &= ~CAM_SIM_QUEUED;
797 xpt_done(ccb);
798 return(0);
799 }
800 /* save ccb ptr */
801 req->ccb_ptr = ccb;
802 /*
803 * tws_map_load_data_callback will fill in the SGL,
804 * and submit the I/O.
805 */
806 sc->stats.scsi_ios++;
807 callout_reset(ccb_h->timeout_ch, (ccb_h->timeout * hz)/1000,
808 tws_timeout, req);
809 error = tws_map_request(sc, req);
810 return(error);
811 }
812
813
814 int
tws_send_scsi_cmd(struct tws_softc * sc,int cmd)815 tws_send_scsi_cmd(struct tws_softc *sc, int cmd)
816 {
817
818 struct tws_request *req;
819 struct tws_command_packet *cmd_pkt;
820 int error;
821
822 TWS_TRACE_DEBUG(sc, "entry",sc, cmd);
823 req = tws_get_request(sc, TWS_AEN_FETCH_REQ);
824
825 if ( req == NULL )
826 return(ENOMEM);
827
828 req->type = TWS_AEN_FETCH_REQ;
829 req->cb = tws_aen_complete;
830
831 cmd_pkt = req->cmd_pkt;
832 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
833 cmd_pkt->cmd.pkt_a.status = 0;
834 cmd_pkt->cmd.pkt_a.unit = 0;
835 cmd_pkt->cmd.pkt_a.sgl_offset = 16;
836 cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id;
837
838 cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd;
839 cmd_pkt->cmd.pkt_a.cdb[4] = 128;
840
841 req->length = TWS_SECTOR_SIZE;
842 req->data = kmalloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT | M_ZERO);
843 if ( req->data == NULL )
844 return(ENOMEM);
845 req->flags = TWS_DIR_IN;
846
847 callout_reset(&req->thandle, (TWS_IO_TIMEOUT * hz), tws_timeout, req);
848 error = tws_map_request(sc, req);
849 return(error);
850
851 }
852
853 int
tws_set_param(struct tws_softc * sc,u_int32_t table_id,u_int32_t param_id,u_int32_t param_size,void * data)854 tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
855 u_int32_t param_size, void *data)
856 {
857 struct tws_request *req;
858 struct tws_command_packet *cmd_pkt;
859 union tws_command_giga *cmd;
860 struct tws_getset_param *param;
861 int error;
862
863 req = tws_get_request(sc, TWS_GETSET_PARAM_REQ);
864 if ( req == NULL ) {
865 TWS_TRACE_DEBUG(sc, "null req", 0, 0);
866 return(ENOMEM);
867 }
868
869 req->length = TWS_SECTOR_SIZE;
870 req->data = kmalloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT | M_ZERO);
871 if ( req->data == NULL )
872 return(ENOMEM);
873 param = (struct tws_getset_param *)req->data;
874
875 req->cb = tws_getset_param_complete;
876 req->flags = TWS_DIR_OUT;
877 cmd_pkt = req->cmd_pkt;
878
879 cmd = &cmd_pkt->cmd.pkt_g;
880 cmd->param.sgl_off__opcode =
881 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM);
882 cmd->param.request_id = (u_int8_t)req->request_id;
883 cmd->param.host_id__unit = 0;
884 cmd->param.param_count = 1;
885 cmd->param.size = 2; /* map routine will add sgls */
886
887 /* Specify which parameter we want to set. */
888 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
889 param->parameter_id = (u_int8_t)(param_id);
890 param->parameter_size_bytes = (u_int16_t)param_size;
891 memcpy(param->data, data, param_size);
892
893 callout_reset(&req->thandle, (TWS_IO_TIMEOUT * hz), tws_timeout, req);
894 error = tws_map_request(sc, req);
895 return(error);
896
897 }
898
899 int
tws_get_param(struct tws_softc * sc,u_int32_t table_id,u_int32_t param_id,u_int32_t param_size,void * data)900 tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
901 u_int32_t param_size, void *data)
902 {
903 struct tws_request *req;
904 struct tws_command_packet *cmd_pkt;
905 union tws_command_giga *cmd;
906 struct tws_getset_param *param;
907 u_int16_t reqid;
908 u_int64_t mfa;
909 int error = SUCCESS;
910
911
912 req = tws_get_request(sc, TWS_GETSET_PARAM_REQ);
913 if ( req == NULL ) {
914 TWS_TRACE_DEBUG(sc, "null req", 0, 0);
915 return(FAILURE);
916 }
917
918 req->length = TWS_SECTOR_SIZE;
919 req->data = kmalloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT | M_ZERO);
920 if ( req->data == NULL )
921 return(FAILURE);
922 param = (struct tws_getset_param *)req->data;
923
924 req->cb = NULL;
925 req->flags = TWS_DIR_IN;
926 cmd_pkt = req->cmd_pkt;
927
928 cmd = &cmd_pkt->cmd.pkt_g;
929 cmd->param.sgl_off__opcode =
930 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM);
931 cmd->param.request_id = (u_int8_t)req->request_id;
932 cmd->param.host_id__unit = 0;
933 cmd->param.param_count = 1;
934 cmd->param.size = 2; /* map routine will add sgls */
935
936 /* Specify which parameter we want to set. */
937 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
938 param->parameter_id = (u_int8_t)(param_id);
939 param->parameter_size_bytes = (u_int16_t)param_size;
940
941 tws_map_request(sc, req);
942 reqid = tws_poll4_response(sc, &mfa);
943 tws_unmap_request(sc, req);
944
945 if ( reqid == TWS_GETSET_PARAM_REQ ) {
946 memcpy(data, param->data, param_size);
947 } else {
948 error = FAILURE;
949
950 }
951
952 kfree(req->data, M_TWS);
953 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
954 req->state = TWS_REQ_STATE_FREE;
955 lockmgr(&sc->gen_lock, LK_RELEASE);
956 return(error);
957
958 }
959
960 void
tws_unmap_request(struct tws_softc * sc,struct tws_request * req)961 tws_unmap_request(struct tws_softc *sc, struct tws_request *req)
962 {
963
964 if (req->data != NULL) {
965 if ( req->flags & TWS_DIR_IN )
966 bus_dmamap_sync(sc->data_tag, req->dma_map,
967 BUS_DMASYNC_POSTREAD);
968 if ( req->flags & TWS_DIR_OUT )
969 bus_dmamap_sync(sc->data_tag, req->dma_map,
970 BUS_DMASYNC_POSTWRITE);
971 lockmgr(&sc->io_lock, LK_EXCLUSIVE);
972 bus_dmamap_unload(sc->data_tag, req->dma_map);
973 lockmgr(&sc->io_lock, LK_RELEASE);
974 }
975 }
976
977 int32_t
tws_map_request(struct tws_softc * sc,struct tws_request * req)978 tws_map_request(struct tws_softc *sc, struct tws_request *req)
979 {
980 int32_t error = 0;
981
982
983 /* If the command involves data, map that too. */
984 if (req->data != NULL) {
985 /*
986 * Map the data buffer into bus space and build the SG list.
987 */
988 lockmgr(&sc->io_lock, LK_EXCLUSIVE);
989 error = bus_dmamap_load(sc->data_tag, req->dma_map,
990 req->data, req->length,
991 tws_dmamap_data_load_cbfn, req,
992 BUS_DMA_WAITOK);
993 lockmgr(&sc->io_lock, LK_RELEASE);
994
995 if (error == EINPROGRESS) {
996 TWS_TRACE(sc, "in progress", 0, error);
997 /* tws_freeze_simq(sc); */
998 error = TWS_REQ_ERR_INPROGRESS;
999 }
1000 } else { /* no data involved */
1001 error = tws_submit_command(sc, req);
1002 }
1003 req->error_code = error;
1004 return(error);
1005 }
1006
1007
1008 static void
tws_dmamap_data_load_cbfn(void * arg,bus_dma_segment_t * segs,int nseg,int error)1009 tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
1010 int nseg, int error)
1011 {
1012
1013 struct tws_request *req = (struct tws_request *)arg;
1014 struct tws_softc *sc = req->sc;
1015 u_int16_t sgls = nseg;
1016 void *sgl_ptr;
1017 struct tws_cmd_generic *gcmd;
1018
1019 if ( error == EFBIG )
1020 TWS_TRACE(sc, "not enough data segs", 0, nseg);
1021
1022
1023 if ( req->flags & TWS_DIR_IN )
1024 bus_dmamap_sync(req->sc->data_tag, req->dma_map,
1025 BUS_DMASYNC_PREREAD);
1026 if ( req->flags & TWS_DIR_OUT )
1027 bus_dmamap_sync(req->sc->data_tag, req->dma_map,
1028 BUS_DMASYNC_PREWRITE);
1029 if ( segs ) {
1030 if ( (req->type == TWS_PASSTHRU_REQ &&
1031 GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) !=
1032 TWS_FW_CMD_EXECUTE_SCSI) ||
1033 req->type == TWS_GETSET_PARAM_REQ) {
1034 gcmd = &req->cmd_pkt->cmd.pkt_g.generic;
1035 sgl_ptr = (u_int32_t *)(gcmd) + gcmd->size;
1036 gcmd->size += sgls *
1037 ((req->sc->is64bit && !tws_use_32bit_sgls) ? 4 :2 );
1038 tws_fill_sg_list(req->sc, segs, sgl_ptr, sgls);
1039
1040 } else {
1041 tws_fill_sg_list(req->sc, segs,
1042 (void *)req->cmd_pkt->cmd.pkt_a.sg_list, sgls);
1043 req->cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= sgls ;
1044 }
1045 }
1046
1047
1048 req->error_code = tws_submit_command(req->sc, req);
1049
1050 }
1051
1052
1053 static void
tws_fill_sg_list(struct tws_softc * sc,void * sgl_src,void * sgl_dest,u_int16_t num_sgl_entries)1054 tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest,
1055 u_int16_t num_sgl_entries)
1056 {
1057 int i;
1058
1059 if ( sc->is64bit ) {
1060 struct tws_sg_desc64 *sgl_s = (struct tws_sg_desc64 *)sgl_src;
1061
1062 if ( !tws_use_32bit_sgls ) {
1063 struct tws_sg_desc64 *sgl_d = (struct tws_sg_desc64 *)sgl_dest;
1064 if ( num_sgl_entries > TWS_MAX_64BIT_SG_ELEMENTS )
1065 TWS_TRACE(sc, "64bit sg overflow", num_sgl_entries, 0);
1066 for (i = 0; i < num_sgl_entries; i++) {
1067 sgl_d[i].address = sgl_s->address;
1068 sgl_d[i].length = sgl_s->length;
1069 sgl_d[i].flag = 0;
1070 sgl_d[i].reserved = 0;
1071 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1072 sizeof(bus_dma_segment_t));
1073 }
1074 } else {
1075 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1076 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1077 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1078 for (i = 0; i < num_sgl_entries; i++) {
1079 sgl_d[i].address = sgl_s->address;
1080 sgl_d[i].length = sgl_s->length;
1081 sgl_d[i].flag = 0;
1082 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1083 sizeof(bus_dma_segment_t));
1084 }
1085 }
1086 } else {
1087 struct tws_sg_desc32 *sgl_s = (struct tws_sg_desc32 *)sgl_src;
1088 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1089
1090 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1091 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1092
1093
1094 for (i = 0; i < num_sgl_entries; i++) {
1095 sgl_d[i].address = sgl_s[i].address;
1096 sgl_d[i].length = sgl_s[i].length;
1097 sgl_d[i].flag = 0;
1098 }
1099 }
1100 }
1101
1102
1103 void
tws_intr(void * arg)1104 tws_intr(void *arg)
1105 {
1106 struct tws_softc *sc = (struct tws_softc *)arg;
1107 u_int32_t histat=0, db=0;
1108
1109 KASSERT(sc, ("null softc"));
1110
1111 sc->stats.num_intrs++;
1112 histat = tws_read_reg(sc, TWS_I2O0_HISTAT, 4);
1113 if ( histat & TWS_BIT2 ) {
1114 TWS_TRACE_DEBUG(sc, "door bell :)", histat, TWS_I2O0_HISTAT);
1115 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1116 if ( db & TWS_BIT21 ) {
1117 tws_intr_attn_error(sc);
1118 return;
1119 }
1120 if ( db & TWS_BIT18 ) {
1121 tws_intr_attn_aen(sc);
1122 }
1123 }
1124
1125 if ( histat & TWS_BIT3 ) {
1126 tws_intr_resp(sc);
1127 }
1128 }
1129
1130 static void
tws_intr_attn_aen(struct tws_softc * sc)1131 tws_intr_attn_aen(struct tws_softc *sc)
1132 {
1133 u_int32_t db=0;
1134
1135 /* maskoff db intrs untill all the aens are fetched */
1136 /* tws_disable_db_intr(sc); */
1137 tws_fetch_aen(sc);
1138 tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4);
1139 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1140
1141 }
1142
1143 static void
tws_intr_attn_error(struct tws_softc * sc)1144 tws_intr_attn_error(struct tws_softc *sc)
1145 {
1146 u_int32_t db=0;
1147
1148 TWS_TRACE(sc, "attn error", 0, 0);
1149 tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
1150 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1151 device_printf(sc->tws_dev, "Micro controller error.\n");
1152 tws_reset(sc);
1153 }
1154
1155 static void
tws_intr_resp(struct tws_softc * sc)1156 tws_intr_resp(struct tws_softc *sc)
1157 {
1158 u_int16_t req_id;
1159 u_int64_t mfa;
1160
1161 while ( tws_get_response(sc, &req_id, &mfa) ) {
1162 sc->stats.reqs_out++;
1163 if ( req_id == TWS_INVALID_REQID ) {
1164 TWS_TRACE_DEBUG(sc, "invalid req_id", mfa, req_id);
1165 sc->stats.reqs_errored++;
1166 tws_err_complete(sc, mfa);
1167 continue;
1168 }
1169
1170 sc->reqs[req_id].cb(&sc->reqs[req_id]);
1171 }
1172
1173 }
1174
1175
1176 static void
tws_poll(struct cam_sim * sim)1177 tws_poll(struct cam_sim *sim)
1178 {
1179 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
1180 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
1181 tws_intr(sc);
1182 }
1183
1184 static void
tws_timeout(void * arg)1185 tws_timeout(void *arg)
1186 {
1187 struct tws_request *req = (struct tws_request *)arg;
1188 struct tws_softc *sc = req->sc;
1189
1190
1191 if ( tws_get_state(sc) != TWS_RESET ) {
1192 device_printf(sc->tws_dev, "Request timed out.\n");
1193 tws_reset(sc);
1194 }
1195 }
1196
1197 void
tws_reset(void * arg)1198 tws_reset(void *arg)
1199 {
1200
1201 struct tws_softc *sc = (struct tws_softc *)arg;
1202
1203 if ( tws_get_state(sc) == TWS_RESET ) {
1204 return;
1205 }
1206 device_printf(sc->tws_dev, "Resetting controller\n");
1207 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
1208 tws_send_event(sc, TWS_RESET_START);
1209 lockmgr(&sc->gen_lock, LK_RELEASE);
1210
1211 tws_turn_off_interrupts(sc);
1212 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
1213 tws_freeze_simq(sc);
1214 lockmgr(&sc->sim_lock, LK_RELEASE);
1215
1216 tws_assert_soft_reset(sc);
1217 callout_reset(&sc->reset_cb_handle, hz/10, tws_reset_cb, sc);
1218 }
1219
1220 static void
tws_reset_cb(void * arg)1221 tws_reset_cb(void *arg)
1222 {
1223
1224 struct tws_softc *sc = (struct tws_softc *)arg;
1225 u_int32_t reg;
1226
1227 if ( tws_get_state(sc) != TWS_RESET ) {
1228 return;
1229 }
1230 reg = tws_read_reg(sc, TWS_I2O0_SCRPD3, 4);
1231 if (!( reg & TWS_BIT13 )) {
1232 callout_reset(&sc->reset_cb_handle, hz/10, tws_reset_cb, sc);
1233 return;
1234 }
1235 tws_drain_response_queue(sc);
1236 tws_drain_busy_queue(sc);
1237 tws_drain_reserved_reqs(sc);
1238 callout_reset(&sc->reinit_handle, 5*hz, tws_reinit, sc);
1239 }
1240
1241 static void
tws_reinit(void * arg)1242 tws_reinit(void *arg)
1243 {
1244
1245 struct tws_softc *sc = (struct tws_softc *)arg;
1246 static int timeout_val=0, try=2 ;
1247
1248 if ( !tws_ctlr_ready(sc) ) {
1249 timeout_val += 5;
1250 if ( timeout_val >= TWS_RESET_TIMEOUT ) {
1251 timeout_val = 0;
1252 if ( try )
1253 tws_assert_soft_reset(sc);
1254 try--;
1255 }
1256 callout_reset(&sc->reinit_handle, 5*hz, tws_reinit, sc);
1257 return;
1258 }
1259
1260 timeout_val=0;
1261 try = 2;
1262 sc->obfl_q_overrun = false;
1263 if ( tws_init_connect(sc, tws_queue_depth) ) {
1264 TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit);
1265 }
1266 tws_init_obfl_q(sc);
1267
1268 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
1269 tws_release_simq(sc);
1270 lockmgr(&sc->sim_lock, LK_RELEASE);
1271 tws_turn_on_interrupts(sc);
1272
1273 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
1274 tws_send_event(sc, TWS_RESET_COMPLETE);
1275 lockmgr(&sc->gen_lock, LK_RELEASE);
1276 if ( sc->chan ) {
1277 sc->chan = 0;
1278 wakeup((void *)&sc->chan);
1279 }
1280
1281 }
1282
1283
1284 static void
tws_freeze_simq(struct tws_softc * sc)1285 tws_freeze_simq(struct tws_softc *sc)
1286 {
1287
1288 TWS_TRACE_DEBUG(sc, "freezeing", 0, 0);
1289 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
1290 xpt_freeze_simq(sc->sim, 1);
1291
1292 }
1293 static void
tws_release_simq(struct tws_softc * sc)1294 tws_release_simq(struct tws_softc *sc)
1295 {
1296
1297 TWS_TRACE_DEBUG(sc, "unfreezeing", 0, 0);
1298 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
1299 xpt_release_simq(sc->sim, 1);
1300
1301 }
1302
1303
1304 TUNABLE_INT("hw.tws.cam_depth", &tws_cam_depth);
1305