ibtl_handlers.c (03494a98) ibtl_handlers.c (76c04273)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 134 unchanged lines hidden (view full) ---

143static ibtl_cq_t *ibtl_cq_list_start, *ibtl_cq_list_end;
144
145static int ibtl_cq_threads = 0; /* total # of cq threads */
146static int ibtl_cqs_using_threads = 0; /* total # of cqs using threads */
147static int ibtl_cq_thread_exit = 0; /* set if/when thread(s) should exit */
148
149/* value used to tell IBTL threads to exit */
150#define IBTL_THREAD_EXIT 0x1b7fdead /* IBTF DEAD */
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 134 unchanged lines hidden (view full) ---

143static ibtl_cq_t *ibtl_cq_list_start, *ibtl_cq_list_end;
144
145static int ibtl_cq_threads = 0; /* total # of cq threads */
146static int ibtl_cqs_using_threads = 0; /* total # of cqs using threads */
147static int ibtl_cq_thread_exit = 0; /* set if/when thread(s) should exit */
148
149/* value used to tell IBTL threads to exit */
150#define IBTL_THREAD_EXIT 0x1b7fdead /* IBTF DEAD */
151/* Cisco Topspin Vendor ID for Rereg hack */
152#define IBT_VENDOR_CISCO 0x05ad
151
152int ibtl_eec_not_supported = 1;
153
154char *ibtl_last_client_name; /* may help debugging */
153
154int ibtl_eec_not_supported = 1;
155
156char *ibtl_last_client_name; /* may help debugging */
157typedef ibt_status_t (*ibtl_node_info_cb_t)(ib_guid_t, uint8_t, ib_lid_t,
158 ibt_node_info_t *);
155
159
160ibtl_node_info_cb_t ibtl_node_info_cb;
161
156_NOTE(LOCK_ORDER(ibtl_clnt_list_mutex ibtl_async_mutex))
157
162_NOTE(LOCK_ORDER(ibtl_clnt_list_mutex ibtl_async_mutex))
163
164void
165ibtl_cm_set_node_info_cb(ibt_status_t (*node_info_cb)(ib_guid_t, uint8_t,
166 ib_lid_t, ibt_node_info_t *))
167{
168 mutex_enter(&ibtl_clnt_list_mutex);
169 ibtl_node_info_cb = node_info_cb;
170 mutex_exit(&ibtl_clnt_list_mutex);
171}
172
158/*
159 * ibc_async_handler()
160 *
161 * Asynchronous Event/Error Handler.
162 *
163 * This is the function called HCA drivers to post various async
164 * event and errors mention in the IB architecture spec. See
165 * ibtl_types.h for additional details of this.

--- 9 unchanged lines hidden (view full) ---

175 ibc_async_event_t *event_p)
176{
177 ibtl_qp_t *ibtl_qp;
178 ibtl_cq_t *ibtl_cq;
179 ibtl_srq_t *ibtl_srq;
180 ibtl_eec_t *ibtl_eec;
181 uint8_t port_minus1;
182
173/*
174 * ibc_async_handler()
175 *
176 * Asynchronous Event/Error Handler.
177 *
178 * This is the function called HCA drivers to post various async
179 * event and errors mention in the IB architecture spec. See
180 * ibtl_types.h for additional details of this.

--- 9 unchanged lines hidden (view full) ---

190 ibc_async_event_t *event_p)
191{
192 ibtl_qp_t *ibtl_qp;
193 ibtl_cq_t *ibtl_cq;
194 ibtl_srq_t *ibtl_srq;
195 ibtl_eec_t *ibtl_eec;
196 uint8_t port_minus1;
197
198 ibtl_async_port_event_t *portp;
199
183 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler(%p, 0x%x, %p)",
184 hca_devp, code, event_p);
185
186 mutex_enter(&ibtl_async_mutex);
187
188 switch (code) {
189 case IBT_EVENT_PATH_MIGRATED_QP:
190 case IBT_EVENT_SQD:

--- 113 unchanged lines hidden (view full) ---

304 break;
305
306 case IBT_ERROR_LOCAL_CATASTROPHIC:
307 hca_devp->hd_async_codes |= code;
308 hca_devp->hd_fma_ena = event_p->ev_fma_ena;
309 /* FALLTHROUGH */
310
311 case IBT_EVENT_PORT_UP:
200 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler(%p, 0x%x, %p)",
201 hca_devp, code, event_p);
202
203 mutex_enter(&ibtl_async_mutex);
204
205 switch (code) {
206 case IBT_EVENT_PATH_MIGRATED_QP:
207 case IBT_EVENT_SQD:

--- 113 unchanged lines hidden (view full) ---

321 break;
322
323 case IBT_ERROR_LOCAL_CATASTROPHIC:
324 hca_devp->hd_async_codes |= code;
325 hca_devp->hd_fma_ena = event_p->ev_fma_ena;
326 /* FALLTHROUGH */
327
328 case IBT_EVENT_PORT_UP:
329 case IBT_PORT_CHANGE_EVENT:
330 case IBT_CLNT_REREG_EVENT:
312 case IBT_ERROR_PORT_DOWN:
331 case IBT_ERROR_PORT_DOWN:
313 if ((code == IBT_EVENT_PORT_UP) ||
314 (code == IBT_ERROR_PORT_DOWN)) {
332 if ((code & IBT_PORT_EVENTS) != 0) {
315 if ((port_minus1 = event_p->ev_port - 1) >=
316 hca_devp->hd_hca_attr->hca_nports) {
317 IBTF_DPRINTF_L2(ibtf_handlers,
318 "ibc_async_handler: bad port #: %d",
319 event_p->ev_port);
320 break;
321 }
333 if ((port_minus1 = event_p->ev_port - 1) >=
334 hca_devp->hd_hca_attr->hca_nports) {
335 IBTF_DPRINTF_L2(ibtf_handlers,
336 "ibc_async_handler: bad port #: %d",
337 event_p->ev_port);
338 break;
339 }
322 hca_devp->hd_async_port[port_minus1] =
323 ((code == IBT_EVENT_PORT_UP) ? IBTL_HCA_PORT_UP :
324 IBTL_HCA_PORT_DOWN) | IBTL_HCA_PORT_CHANGED;
340 portp = &hca_devp->hd_async_port[port_minus1];
341 if (code == IBT_EVENT_PORT_UP) {
342 /*
343 * The port is just coming UP we can't have any
344 * valid older events.
345 */
346 portp->status = IBTL_HCA_PORT_UP;
347 } else if (code == IBT_ERROR_PORT_DOWN) {
348 /*
349 * The port is going DOWN older events don't
350 * count.
351 */
352 portp->status = IBTL_HCA_PORT_DOWN;
353 } else if (code == IBT_PORT_CHANGE_EVENT) {
354 /*
355 * For port UP and DOWN events only the latest
356 * event counts. If we get a UP after DOWN it
357 * is sufficient to send just UP and vice versa.
358 * In the case of port CHANGE event it is valid
359 * only when the port is UP already but if we
360 * receive it after UP but before UP is
361 * delivered we still need to deliver CHANGE
362 * after we deliver UP event.
363 *
364 * We will not get a CHANGE event when the port
365 * is down or DOWN event is pending.
366 */
367 portp->flags |= event_p->ev_port_flags;
368 portp->status |= IBTL_HCA_PORT_CHG;
369 } else if (code == IBT_CLNT_REREG_EVENT) {
370 /*
371 * SM has requested a re-register of
372 * subscription to SM events notification.
373 */
374 portp->status |= IBTL_HCA_PORT_ASYNC_CLNT_REREG;
375 }
376
325 hca_devp->hd_async_codes |= code;
326 }
327
328 if ((hca_devp->hd_async_flags & IBTL_ASYNC_PENDING) == 0) {
329 hca_devp->hd_async_flags |= IBTL_ASYNC_PENDING;
330 hca_devp->hd_async_link = NULL;
331 if (ibtl_async_hca_list_end == NULL)
332 ibtl_async_hca_list_start = hca_devp;

--- 91 unchanged lines hidden (view full) ---

424
425 mutex_enter(&ibtl_clnt_list_mutex);
426 if (--hca_devp->hd_async_task_cnt == 0)
427 cv_signal(&hca_devp->hd_async_task_cv);
428 mutex_exit(&ibtl_clnt_list_mutex);
429}
430
431static void
377 hca_devp->hd_async_codes |= code;
378 }
379
380 if ((hca_devp->hd_async_flags & IBTL_ASYNC_PENDING) == 0) {
381 hca_devp->hd_async_flags |= IBTL_ASYNC_PENDING;
382 hca_devp->hd_async_link = NULL;
383 if (ibtl_async_hca_list_end == NULL)
384 ibtl_async_hca_list_start = hca_devp;

--- 91 unchanged lines hidden (view full) ---

476
477 mutex_enter(&ibtl_clnt_list_mutex);
478 if (--hca_devp->hd_async_task_cnt == 0)
479 cv_signal(&hca_devp->hd_async_task_cv);
480 mutex_exit(&ibtl_clnt_list_mutex);
481}
482
483static void
484ibt_cisco_embedded_sm_rereg_fix(void *arg)
485{
486 struct ibtl_mgr_s *mgrp = arg;
487 ibtl_hca_devinfo_t *hca_devp;
488 ibt_node_info_t node_info;
489 ibt_status_t ibt_status;
490 ibtl_async_port_event_t *portp;
491 ib_lid_t sm_lid;
492 ib_guid_t hca_guid;
493 ibt_async_event_t *event_p;
494 ibt_hca_portinfo_t *pinfop;
495 uint8_t port;
496
497 hca_devp = mgrp->mgr_hca_devp;
498
499 mutex_enter(&ibtl_clnt_list_mutex);
500 event_p = &hca_devp->hd_async_event;
501 port = event_p->ev_port;
502 portp = &hca_devp->hd_async_port[port - 1];
503 pinfop = &hca_devp->hd_portinfop[port - 1];
504 sm_lid = pinfop->p_sm_lid;
505 hca_guid = hca_devp->hd_hca_attr->hca_node_guid;
506 mutex_exit(&ibtl_clnt_list_mutex);
507
508 ibt_status = ((ibtl_node_info_cb_t)mgrp->mgr_async_handler)(hca_guid,
509 port, sm_lid, &node_info);
510 if (ibt_status == IBT_SUCCESS) {
511 if ((node_info.n_vendor_id == IBT_VENDOR_CISCO) &&
512 (node_info.n_node_type == IBT_NODE_TYPE_SWITCH)) {
513 mutex_enter(&ibtl_async_mutex);
514 portp->status |= IBTL_HCA_PORT_ASYNC_CLNT_REREG;
515 hca_devp->hd_async_codes |= IBT_CLNT_REREG_EVENT;
516 mutex_exit(&ibtl_async_mutex);
517 }
518 }
519 kmem_free(mgrp, sizeof (*mgrp));
520
521 mutex_enter(&ibtl_clnt_list_mutex);
522 if (--hca_devp->hd_async_task_cnt == 0)
523 cv_signal(&hca_devp->hd_async_task_cv);
524 mutex_exit(&ibtl_clnt_list_mutex);
525}
526
527static void
528ibtl_cm_get_node_info(ibtl_hca_devinfo_t *hca_devp,
529 ibt_async_handler_t async_handler)
530{
531 struct ibtl_mgr_s *mgrp;
532
533 if (async_handler == NULL)
534 return;
535
536 _NOTE(NO_COMPETING_THREADS_NOW)
537 mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
538 mgrp->mgr_hca_devp = hca_devp;
539 mgrp->mgr_async_handler = async_handler;
540 mgrp->mgr_clnt_private = NULL;
541 hca_devp->hd_async_task_cnt++;
542
543 (void) taskq_dispatch(ibtl_async_taskq,
544 ibt_cisco_embedded_sm_rereg_fix, mgrp, TQ_SLEEP);
545#ifndef lint
546 _NOTE(COMPETING_THREADS_NOW)
547#endif
548}
549
550static void
432ibtl_tell_mgr(ibtl_hca_devinfo_t *hca_devp, ibt_async_handler_t async_handler,
433 void *clnt_private)
434{
435 struct ibtl_mgr_s *mgrp;
436
437 if (async_handler == NULL)
438 return;
439

--- 58 unchanged lines hidden (view full) ---

498 * ibtl_hca_devinfo_t for all client taskq threads to reference.
499 *
500 * This is called from an async or taskq thread with ibtl_async_mutex held.
501 */
502static void
503ibtl_do_hca_asyncs(ibtl_hca_devinfo_t *hca_devp)
504{
505 ibtl_hca_t *ibt_hca;
551ibtl_tell_mgr(ibtl_hca_devinfo_t *hca_devp, ibt_async_handler_t async_handler,
552 void *clnt_private)
553{
554 struct ibtl_mgr_s *mgrp;
555
556 if (async_handler == NULL)
557 return;
558

--- 58 unchanged lines hidden (view full) ---

617 * ibtl_hca_devinfo_t for all client taskq threads to reference.
618 *
619 * This is called from an async or taskq thread with ibtl_async_mutex held.
620 */
621static void
622ibtl_do_hca_asyncs(ibtl_hca_devinfo_t *hca_devp)
623{
624 ibtl_hca_t *ibt_hca;
625 ibt_async_event_t *eventp;
506 ibt_async_code_t code;
507 ibtl_async_port_status_t temp;
508 uint8_t nports;
509 uint8_t port_minus1;
626 ibt_async_code_t code;
627 ibtl_async_port_status_t temp;
628 uint8_t nports;
629 uint8_t port_minus1;
510 ibtl_async_port_status_t *portp;
630 ibtl_async_port_event_t *portp;
511
512 mutex_exit(&ibtl_async_mutex);
513
514 mutex_enter(&ibtl_clnt_list_mutex);
515 while (hca_devp->hd_async_busy)
516 cv_wait(&hca_devp->hd_async_busy_cv, &ibtl_clnt_list_mutex);
517 hca_devp->hd_async_busy = 1;
518 mutex_enter(&ibtl_async_mutex);
519
520 bzero(&hca_devp->hd_async_event, sizeof (hca_devp->hd_async_event));
521 for (;;) {
522
523 hca_devp->hd_async_event.ev_fma_ena = 0;
524
525 code = hca_devp->hd_async_codes;
526 if (code & IBT_ERROR_LOCAL_CATASTROPHIC) {
527 code = IBT_ERROR_LOCAL_CATASTROPHIC;
528 hca_devp->hd_async_event.ev_fma_ena =
529 hca_devp->hd_fma_ena;
631
632 mutex_exit(&ibtl_async_mutex);
633
634 mutex_enter(&ibtl_clnt_list_mutex);
635 while (hca_devp->hd_async_busy)
636 cv_wait(&hca_devp->hd_async_busy_cv, &ibtl_clnt_list_mutex);
637 hca_devp->hd_async_busy = 1;
638 mutex_enter(&ibtl_async_mutex);
639
640 bzero(&hca_devp->hd_async_event, sizeof (hca_devp->hd_async_event));
641 for (;;) {
642
643 hca_devp->hd_async_event.ev_fma_ena = 0;
644
645 code = hca_devp->hd_async_codes;
646 if (code & IBT_ERROR_LOCAL_CATASTROPHIC) {
647 code = IBT_ERROR_LOCAL_CATASTROPHIC;
648 hca_devp->hd_async_event.ev_fma_ena =
649 hca_devp->hd_fma_ena;
530 } else if (code & IBT_ERROR_PORT_DOWN)
650 } else if (code & IBT_ERROR_PORT_DOWN) {
531 code = IBT_ERROR_PORT_DOWN;
651 code = IBT_ERROR_PORT_DOWN;
532 else if (code & IBT_EVENT_PORT_UP)
652 temp = IBTL_HCA_PORT_DOWN;
653 } else if (code & IBT_EVENT_PORT_UP) {
533 code = IBT_EVENT_PORT_UP;
654 code = IBT_EVENT_PORT_UP;
534 else {
655 temp = IBTL_HCA_PORT_UP;
656 } else if (code & IBT_PORT_CHANGE_EVENT) {
657 code = IBT_PORT_CHANGE_EVENT;
658 temp = IBTL_HCA_PORT_CHG;
659 } else if (code & IBT_CLNT_REREG_EVENT) {
660 code = IBT_CLNT_REREG_EVENT;
661 temp = IBTL_HCA_PORT_ASYNC_CLNT_REREG;
662 } else {
535 hca_devp->hd_async_codes = 0;
536 code = 0;
537 }
538
539 if (code == 0) {
540 hca_devp->hd_async_flags &= ~IBTL_ASYNC_PENDING;
541 break;
542 }
543 hca_devp->hd_async_codes &= ~code;
544
663 hca_devp->hd_async_codes = 0;
664 code = 0;
665 }
666
667 if (code == 0) {
668 hca_devp->hd_async_flags &= ~IBTL_ASYNC_PENDING;
669 break;
670 }
671 hca_devp->hd_async_codes &= ~code;
672
545 if ((code == IBT_EVENT_PORT_UP) ||
546 (code == IBT_ERROR_PORT_DOWN)) {
547 /* PORT_UP or PORT_DOWN */
673 /* PORT_UP, PORT_CHANGE, PORT_DOWN or ASYNC_REREG */
674 if ((code & IBT_PORT_EVENTS) != 0) {
548 portp = hca_devp->hd_async_port;
549 nports = hca_devp->hd_hca_attr->hca_nports;
550 for (port_minus1 = 0; port_minus1 < nports;
551 port_minus1++) {
675 portp = hca_devp->hd_async_port;
676 nports = hca_devp->hd_hca_attr->hca_nports;
677 for (port_minus1 = 0; port_minus1 < nports;
678 port_minus1++) {
552 temp = ((code == IBT_EVENT_PORT_UP) ?
553 IBTL_HCA_PORT_UP : IBTL_HCA_PORT_DOWN) |
554 IBTL_HCA_PORT_CHANGED;
555 if (portp[port_minus1] == temp)
679 /*
680 * Matching event in this port, let's go handle
681 * it.
682 */
683 if ((portp[port_minus1].status & temp) != 0)
556 break;
557 }
558 if (port_minus1 >= nports) {
559 /* we checked again, but found nothing */
560 continue;
561 }
562 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_do_hca_asyncs: "
563 "async: port# %x code %x", port_minus1 + 1, code);
564 /* mark it to check for other ports after we're done */
565 hca_devp->hd_async_codes |= code;
566
684 break;
685 }
686 if (port_minus1 >= nports) {
687 /* we checked again, but found nothing */
688 continue;
689 }
690 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_do_hca_asyncs: "
691 "async: port# %x code %x", port_minus1 + 1, code);
692 /* mark it to check for other ports after we're done */
693 hca_devp->hd_async_codes |= code;
694
695 /*
696 * Copy the event information into hca_devp and clear
697 * event information from the per port data.
698 */
567 hca_devp->hd_async_event.ev_port = port_minus1 + 1;
699 hca_devp->hd_async_event.ev_port = port_minus1 + 1;
568 hca_devp->hd_async_port[port_minus1] &=
569 ~IBTL_HCA_PORT_CHANGED;
700 if (temp == IBTL_HCA_PORT_CHG) {
701 hca_devp->hd_async_event.ev_port_flags =
702 hca_devp->hd_async_port[port_minus1].flags;
703 hca_devp->hd_async_port[port_minus1].flags = 0;
704 }
705 hca_devp->hd_async_port[port_minus1].status &= ~temp;
570
571 mutex_exit(&ibtl_async_mutex);
572 ibtl_reinit_hca_portinfo(hca_devp, port_minus1 + 1);
573 mutex_enter(&ibtl_async_mutex);
706
707 mutex_exit(&ibtl_async_mutex);
708 ibtl_reinit_hca_portinfo(hca_devp, port_minus1 + 1);
709 mutex_enter(&ibtl_async_mutex);
710 eventp = &hca_devp->hd_async_event;
711 eventp->ev_hca_guid =
712 hca_devp->hd_hca_attr->hca_node_guid;
574 }
575
576 hca_devp->hd_async_code = code;
577 hca_devp->hd_async_event.ev_hca_guid =
578 hca_devp->hd_hca_attr->hca_node_guid;
579 mutex_exit(&ibtl_async_mutex);
580
581 /*

--- 5 unchanged lines hidden (view full) ---

587 if (ibtl_ibma_async_handler)
588 ibtl_tell_mgr(hca_devp, ibtl_ibma_async_handler,
589 ibtl_ibma_clnt_private);
590 /* wait for all tasks to complete */
591 while (hca_devp->hd_async_task_cnt != 0)
592 cv_wait(&hca_devp->hd_async_task_cv,
593 &ibtl_clnt_list_mutex);
594
713 }
714
715 hca_devp->hd_async_code = code;
716 hca_devp->hd_async_event.ev_hca_guid =
717 hca_devp->hd_hca_attr->hca_node_guid;
718 mutex_exit(&ibtl_async_mutex);
719
720 /*

--- 5 unchanged lines hidden (view full) ---

726 if (ibtl_ibma_async_handler)
727 ibtl_tell_mgr(hca_devp, ibtl_ibma_async_handler,
728 ibtl_ibma_clnt_private);
729 /* wait for all tasks to complete */
730 while (hca_devp->hd_async_task_cnt != 0)
731 cv_wait(&hca_devp->hd_async_task_cv,
732 &ibtl_clnt_list_mutex);
733
734 /*
735 * Hack Alert:
736 * The ibmf handler would have updated the Master SM LID if it
737 * was SM LID change event. Now lets check if the new Master SM
738 * is a Embedded Cisco Topspin SM.
739 */
740 if ((code == IBT_PORT_CHANGE_EVENT) &&
741 eventp->ev_port_flags & IBT_PORT_CHANGE_SM_LID)
742 ibtl_cm_get_node_info(hca_devp,
743 (ibt_async_handler_t)ibtl_node_info_cb);
744 /* wait for node info task to complete */
745 while (hca_devp->hd_async_task_cnt != 0)
746 cv_wait(&hca_devp->hd_async_task_cv,
747 &ibtl_clnt_list_mutex);
748
595 if (ibtl_dm_async_handler)
596 ibtl_tell_mgr(hca_devp, ibtl_dm_async_handler,
597 ibtl_dm_clnt_private);
598 if (ibtl_cm_async_handler)
599 ibtl_tell_mgr(hca_devp, ibtl_cm_async_handler,
600 ibtl_cm_clnt_private);
601 /* wait for all tasks to complete */
602 while (hca_devp->hd_async_task_cnt != 0)

--- 1313 unchanged lines hidden (view full) ---

1916 }
1917 mutex_destroy(&ibtl_cq_mutex);
1918 cv_destroy(&ibtl_cq_cv);
1919
1920 mutex_destroy(&ibtl_async_mutex);
1921 cv_destroy(&ibtl_async_cv);
1922 cv_destroy(&ibtl_clnt_cv);
1923}
749 if (ibtl_dm_async_handler)
750 ibtl_tell_mgr(hca_devp, ibtl_dm_async_handler,
751 ibtl_dm_clnt_private);
752 if (ibtl_cm_async_handler)
753 ibtl_tell_mgr(hca_devp, ibtl_cm_async_handler,
754 ibtl_cm_clnt_private);
755 /* wait for all tasks to complete */
756 while (hca_devp->hd_async_task_cnt != 0)

--- 1313 unchanged lines hidden (view full) ---

2070 }
2071 mutex_destroy(&ibtl_cq_mutex);
2072 cv_destroy(&ibtl_cq_cv);
2073
2074 mutex_destroy(&ibtl_async_mutex);
2075 cv_destroy(&ibtl_async_cv);
2076 cv_destroy(&ibtl_clnt_cv);
2077}
2078
2079/* ARGSUSED */
2080ibt_status_t ibtl_dummy_node_info_cb(ib_guid_t hca_guid, uint8_t port,
2081 ib_lid_t lid, ibt_node_info_t *node_info)
2082{
2083 return (IBT_SUCCESS);
2084}