Lines Matching refs:dev

166 static inline u64 async_mask(struct mthca_dev *dev)  in async_mask()  argument
168 return dev->mthca_flags & MTHCA_FLAG_SRQ ? in async_mask()
173 static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) in tavor_set_eq_ci() argument
185 dev->kar + MTHCA_EQ_DOORBELL, in tavor_set_eq_ci()
186 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); in tavor_set_eq_ci()
189 static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) in arbel_set_eq_ci() argument
194 dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); in arbel_set_eq_ci()
199 static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) in set_eq_ci() argument
201 if (mthca_is_memfree(dev)) in set_eq_ci()
202 arbel_set_eq_ci(dev, eq, ci); in set_eq_ci()
204 tavor_set_eq_ci(dev, eq, ci); in set_eq_ci()
207 static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) in tavor_eq_req_not() argument
210 dev->kar + MTHCA_EQ_DOORBELL, in tavor_eq_req_not()
211 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); in tavor_eq_req_not()
214 static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask) in arbel_eq_req_not() argument
216 writel(eqn_mask, dev->eq_regs.arbel.eq_arm); in arbel_eq_req_not()
219 static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) in disarm_cq() argument
221 if (!mthca_is_memfree(dev)) { in disarm_cq()
223 dev->kar + MTHCA_EQ_DOORBELL, in disarm_cq()
224 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); in disarm_cq()
246 static void port_change(struct mthca_dev *dev, int port, int active) in port_change() argument
250 mthca_dbg(dev, "Port change to %s for port %d\n", in port_change()
253 record.device = &dev->ib_dev; in port_change()
260 static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) in mthca_eq_int() argument
277 disarm_cq(dev, eq->eqn, disarm_cqn); in mthca_eq_int()
278 mthca_cq_completion(dev, disarm_cqn); in mthca_eq_int()
282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
302 mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, in mthca_eq_int()
307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
327 mthca_cmd_event(dev, in mthca_eq_int()
334 port_change(dev, in mthca_eq_int()
340 mthca_warn(dev, "CQ %s on CQN %06x\n", in mthca_eq_int()
344 mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), in mthca_eq_int()
349 mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); in mthca_eq_int()
357 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n", in mthca_eq_int()
379 set_eq_ci(dev, eq, eq->cons_index); in mthca_eq_int()
393 struct mthca_dev *dev = dev_ptr; in mthca_tavor_interrupt() local
397 if (dev->eq_table.clr_mask) in mthca_tavor_interrupt()
398 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); in mthca_tavor_interrupt()
400 ecr = readl(dev->eq_regs.tavor.ecr_base + 4); in mthca_tavor_interrupt()
404 writel(ecr, dev->eq_regs.tavor.ecr_base + in mthca_tavor_interrupt()
408 if (ecr & dev->eq_table.eq[i].eqn_mask) { in mthca_tavor_interrupt()
409 if (mthca_eq_int(dev, &dev->eq_table.eq[i])) in mthca_tavor_interrupt()
410 tavor_set_eq_ci(dev, &dev->eq_table.eq[i], in mthca_tavor_interrupt()
411 dev->eq_table.eq[i].cons_index); in mthca_tavor_interrupt()
412 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); in mthca_tavor_interrupt()
421 struct mthca_dev *dev = eq->dev; in mthca_tavor_msi_x_interrupt() local
423 mthca_eq_int(dev, eq); in mthca_tavor_msi_x_interrupt()
424 tavor_set_eq_ci(dev, eq, eq->cons_index); in mthca_tavor_msi_x_interrupt()
425 tavor_eq_req_not(dev, eq->eqn); in mthca_tavor_msi_x_interrupt()
433 struct mthca_dev *dev = dev_ptr; in mthca_arbel_interrupt() local
437 if (dev->eq_table.clr_mask) in mthca_arbel_interrupt()
438 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); in mthca_arbel_interrupt()
441 if (mthca_eq_int(dev, &dev->eq_table.eq[i])) { in mthca_arbel_interrupt()
443 arbel_set_eq_ci(dev, &dev->eq_table.eq[i], in mthca_arbel_interrupt()
444 dev->eq_table.eq[i].cons_index); in mthca_arbel_interrupt()
447 arbel_eq_req_not(dev, dev->eq_table.arm_mask); in mthca_arbel_interrupt()
455 struct mthca_dev *dev = eq->dev; in mthca_arbel_msi_x_interrupt() local
457 mthca_eq_int(dev, eq); in mthca_arbel_msi_x_interrupt()
458 arbel_set_eq_ci(dev, eq, eq->cons_index); in mthca_arbel_msi_x_interrupt()
459 arbel_eq_req_not(dev, eq->eqn_mask); in mthca_arbel_msi_x_interrupt()
465 static int mthca_create_eq(struct mthca_dev *dev, in mthca_create_eq() argument
478 eq->dev = dev; in mthca_create_eq()
494 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); in mthca_create_eq()
500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, in mthca_create_eq()
514 eq->eqn = mthca_alloc(&dev->eq_table.alloc); in mthca_create_eq()
518 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, in mthca_create_eq()
532 if (mthca_is_memfree(dev)) in mthca_create_eq()
536 if (mthca_is_memfree(dev)) { in mthca_create_eq()
537 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); in mthca_create_eq()
539 eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index); in mthca_create_eq()
540 eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num); in mthca_create_eq()
545 err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn); in mthca_create_eq()
547 mthca_warn(dev, "SW2HW_EQ returned %d\n", err); in mthca_create_eq()
552 mthca_free_mailbox(dev, mailbox); in mthca_create_eq()
557 dev->eq_table.arm_mask |= eq->eqn_mask; in mthca_create_eq()
559 mthca_dbg(dev, "Allocated EQ %d with %d entries\n", in mthca_create_eq()
565 mthca_free_mr(dev, &eq->mr); in mthca_create_eq()
568 mthca_free(&dev->eq_table.alloc, eq->eqn); in mthca_create_eq()
573 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, in mthca_create_eq()
578 mthca_free_mailbox(dev, mailbox); in mthca_create_eq()
588 static void mthca_free_eq(struct mthca_dev *dev, in mthca_free_eq() argument
597 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); in mthca_free_eq()
601 err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn); in mthca_free_eq()
603 mthca_warn(dev, "HW2SW_EQ returned %d\n", err); in mthca_free_eq()
605 dev->eq_table.arm_mask &= ~eq->eqn_mask; in mthca_free_eq()
608 mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); in mthca_free_eq()
618 mthca_free_mr(dev, &eq->mr); in mthca_free_eq()
620 pci_free_consistent(dev->pdev, PAGE_SIZE, in mthca_free_eq()
625 mthca_free_mailbox(dev, mailbox); in mthca_free_eq()
628 static void mthca_free_irqs(struct mthca_dev *dev) in mthca_free_irqs() argument
632 if (dev->eq_table.have_irq) in mthca_free_irqs()
633 free_irq(dev->pdev->irq, dev); in mthca_free_irqs()
635 if (dev->eq_table.eq[i].have_irq) { in mthca_free_irqs()
636 free_irq(dev->eq_table.eq[i].msi_x_vector, in mthca_free_irqs()
637 dev->eq_table.eq + i); in mthca_free_irqs()
638 dev->eq_table.eq[i].have_irq = 0; in mthca_free_irqs()
642 static int mthca_map_reg(struct mthca_dev *dev, in mthca_map_reg() argument
646 phys_addr_t base = pci_resource_start(dev->pdev, 0); in mthca_map_reg()
655 static int mthca_map_eq_regs(struct mthca_dev *dev) in mthca_map_eq_regs() argument
657 if (mthca_is_memfree(dev)) { in mthca_map_eq_regs()
665 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & in mthca_map_eq_regs()
666 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, in mthca_map_eq_regs()
667 &dev->clr_base)) { in mthca_map_eq_regs()
668 mthca_err(dev, "Couldn't map interrupt clear register, " in mthca_map_eq_regs()
677 if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & in mthca_map_eq_regs()
678 dev->fw.arbel.eq_arm_base) + 4, 4, in mthca_map_eq_regs()
679 &dev->eq_regs.arbel.eq_arm)) { in mthca_map_eq_regs()
680 mthca_err(dev, "Couldn't map EQ arm register, aborting.\n"); in mthca_map_eq_regs()
681 iounmap(dev->clr_base); in mthca_map_eq_regs()
685 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & in mthca_map_eq_regs()
686 dev->fw.arbel.eq_set_ci_base, in mthca_map_eq_regs()
688 &dev->eq_regs.arbel.eq_set_ci_base)) { in mthca_map_eq_regs()
689 mthca_err(dev, "Couldn't map EQ CI register, aborting.\n"); in mthca_map_eq_regs()
690 iounmap(dev->eq_regs.arbel.eq_arm); in mthca_map_eq_regs()
691 iounmap(dev->clr_base); in mthca_map_eq_regs()
695 if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, in mthca_map_eq_regs()
696 &dev->clr_base)) { in mthca_map_eq_regs()
697 mthca_err(dev, "Couldn't map interrupt clear register, " in mthca_map_eq_regs()
702 if (mthca_map_reg(dev, MTHCA_ECR_BASE, in mthca_map_eq_regs()
704 &dev->eq_regs.tavor.ecr_base)) { in mthca_map_eq_regs()
705 mthca_err(dev, "Couldn't map ecr register, " in mthca_map_eq_regs()
707 iounmap(dev->clr_base); in mthca_map_eq_regs()
716 static void mthca_unmap_eq_regs(struct mthca_dev *dev) in mthca_unmap_eq_regs() argument
718 if (mthca_is_memfree(dev)) { in mthca_unmap_eq_regs()
719 iounmap(dev->eq_regs.arbel.eq_set_ci_base); in mthca_unmap_eq_regs()
720 iounmap(dev->eq_regs.arbel.eq_arm); in mthca_unmap_eq_regs()
721 iounmap(dev->clr_base); in mthca_unmap_eq_regs()
723 iounmap(dev->eq_regs.tavor.ecr_base); in mthca_unmap_eq_regs()
724 iounmap(dev->clr_base); in mthca_unmap_eq_regs()
728 int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) in mthca_map_eq_icm() argument
738 dev->eq_table.icm_virt = icm_virt; in mthca_map_eq_icm()
739 dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER); in mthca_map_eq_icm()
740 if (!dev->eq_table.icm_page) in mthca_map_eq_icm()
742 dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0, in mthca_map_eq_icm()
744 if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) { in mthca_map_eq_icm()
745 __free_page(dev->eq_table.icm_page); in mthca_map_eq_icm()
749 ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt); in mthca_map_eq_icm()
751 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, in mthca_map_eq_icm()
753 __free_page(dev->eq_table.icm_page); in mthca_map_eq_icm()
759 void mthca_unmap_eq_icm(struct mthca_dev *dev) in mthca_unmap_eq_icm() argument
761 mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1); in mthca_unmap_eq_icm()
762 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, in mthca_unmap_eq_icm()
764 __free_page(dev->eq_table.icm_page); in mthca_unmap_eq_icm()
767 int mthca_init_eq_table(struct mthca_dev *dev) in mthca_init_eq_table() argument
773 err = mthca_alloc_init(&dev->eq_table.alloc, in mthca_init_eq_table()
774 dev->limits.num_eqs, in mthca_init_eq_table()
775 dev->limits.num_eqs - 1, in mthca_init_eq_table()
776 dev->limits.reserved_eqs); in mthca_init_eq_table()
780 err = mthca_map_eq_regs(dev); in mthca_init_eq_table()
784 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { in mthca_init_eq_table()
785 dev->eq_table.clr_mask = 0; in mthca_init_eq_table()
787 dev->eq_table.clr_mask = in mthca_init_eq_table()
788 swab32(1 << (dev->eq_table.inta_pin & 31)); in mthca_init_eq_table()
789 dev->eq_table.clr_int = dev->clr_base + in mthca_init_eq_table()
790 (dev->eq_table.inta_pin < 32 ? 4 : 0); in mthca_init_eq_table()
793 dev->eq_table.arm_mask = 0; in mthca_init_eq_table()
795 intr = dev->eq_table.inta_pin; in mthca_init_eq_table()
797 err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE, in mthca_init_eq_table()
798 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr, in mthca_init_eq_table()
799 &dev->eq_table.eq[MTHCA_EQ_COMP]); in mthca_init_eq_table()
803 err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE, in mthca_init_eq_table()
804 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr, in mthca_init_eq_table()
805 &dev->eq_table.eq[MTHCA_EQ_ASYNC]); in mthca_init_eq_table()
809 err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE, in mthca_init_eq_table()
810 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr, in mthca_init_eq_table()
811 &dev->eq_table.eq[MTHCA_EQ_CMD]); in mthca_init_eq_table()
815 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { in mthca_init_eq_table()
823 snprintf(dev->eq_table.eq[i].irq_name, in mthca_init_eq_table()
826 pci_name(dev->pdev)); in mthca_init_eq_table()
827 err = request_irq(dev->eq_table.eq[i].msi_x_vector, in mthca_init_eq_table()
828 mthca_is_memfree(dev) ? in mthca_init_eq_table()
831 0, dev->eq_table.eq[i].irq_name, in mthca_init_eq_table()
832 dev->eq_table.eq + i); in mthca_init_eq_table()
835 dev->eq_table.eq[i].have_irq = 1; in mthca_init_eq_table()
838 snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX, in mthca_init_eq_table()
839 DRV_NAME "@pci:%s", pci_name(dev->pdev)); in mthca_init_eq_table()
840 err = request_irq(dev->pdev->irq, in mthca_init_eq_table()
841 mthca_is_memfree(dev) ? in mthca_init_eq_table()
844 IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev); in mthca_init_eq_table()
847 dev->eq_table.have_irq = 1; in mthca_init_eq_table()
850 err = mthca_MAP_EQ(dev, async_mask(dev), in mthca_init_eq_table()
851 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); in mthca_init_eq_table()
853 mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", in mthca_init_eq_table()
854 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err); in mthca_init_eq_table()
856 err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, in mthca_init_eq_table()
857 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn); in mthca_init_eq_table()
859 mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n", in mthca_init_eq_table()
860 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err); in mthca_init_eq_table()
863 if (mthca_is_memfree(dev)) in mthca_init_eq_table()
864 arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask); in mthca_init_eq_table()
866 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); in mthca_init_eq_table()
871 mthca_free_irqs(dev); in mthca_init_eq_table()
872 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]); in mthca_init_eq_table()
875 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]); in mthca_init_eq_table()
878 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]); in mthca_init_eq_table()
881 mthca_unmap_eq_regs(dev); in mthca_init_eq_table()
884 mthca_alloc_cleanup(&dev->eq_table.alloc); in mthca_init_eq_table()
888 void mthca_cleanup_eq_table(struct mthca_dev *dev) in mthca_cleanup_eq_table() argument
892 mthca_free_irqs(dev); in mthca_cleanup_eq_table()
894 mthca_MAP_EQ(dev, async_mask(dev), in mthca_cleanup_eq_table()
895 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); in mthca_cleanup_eq_table()
896 mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, in mthca_cleanup_eq_table()
897 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn); in mthca_cleanup_eq_table()
900 mthca_free_eq(dev, &dev->eq_table.eq[i]); in mthca_cleanup_eq_table()
902 mthca_unmap_eq_regs(dev); in mthca_cleanup_eq_table()
904 mthca_alloc_cleanup(&dev->eq_table.alloc); in mthca_cleanup_eq_table()