xref: /freebsd/sys/dev/mthca/mthca_eq.c (revision 33ec1ccb)
133ec1ccbSHans Petter Selasky /*
233ec1ccbSHans Petter Selasky  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
333ec1ccbSHans Petter Selasky  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
433ec1ccbSHans Petter Selasky  *
533ec1ccbSHans Petter Selasky  * This software is available to you under a choice of one of two
633ec1ccbSHans Petter Selasky  * licenses.  You may choose to be licensed under the terms of the GNU
733ec1ccbSHans Petter Selasky  * General Public License (GPL) Version 2, available from the file
833ec1ccbSHans Petter Selasky  * COPYING in the main directory of this source tree, or the
933ec1ccbSHans Petter Selasky  * OpenIB.org BSD license below:
1033ec1ccbSHans Petter Selasky  *
1133ec1ccbSHans Petter Selasky  *     Redistribution and use in source and binary forms, with or
1233ec1ccbSHans Petter Selasky  *     without modification, are permitted provided that the following
1333ec1ccbSHans Petter Selasky  *     conditions are met:
1433ec1ccbSHans Petter Selasky  *
1533ec1ccbSHans Petter Selasky  *      - Redistributions of source code must retain the above
1633ec1ccbSHans Petter Selasky  *        copyright notice, this list of conditions and the following
1733ec1ccbSHans Petter Selasky  *        disclaimer.
1833ec1ccbSHans Petter Selasky  *
1933ec1ccbSHans Petter Selasky  *      - Redistributions in binary form must reproduce the above
2033ec1ccbSHans Petter Selasky  *        copyright notice, this list of conditions and the following
2133ec1ccbSHans Petter Selasky  *        disclaimer in the documentation and/or other materials
2233ec1ccbSHans Petter Selasky  *        provided with the distribution.
2333ec1ccbSHans Petter Selasky  *
2433ec1ccbSHans Petter Selasky  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2533ec1ccbSHans Petter Selasky  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2633ec1ccbSHans Petter Selasky  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2733ec1ccbSHans Petter Selasky  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2833ec1ccbSHans Petter Selasky  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2933ec1ccbSHans Petter Selasky  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
3033ec1ccbSHans Petter Selasky  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3133ec1ccbSHans Petter Selasky  * SOFTWARE.
3233ec1ccbSHans Petter Selasky  */
3333ec1ccbSHans Petter Selasky 
3433ec1ccbSHans Petter Selasky #include <linux/errno.h>
3533ec1ccbSHans Petter Selasky #include <linux/interrupt.h>
3633ec1ccbSHans Petter Selasky #include <linux/pci.h>
3733ec1ccbSHans Petter Selasky #include <linux/slab.h>
3833ec1ccbSHans Petter Selasky 
3933ec1ccbSHans Petter Selasky #include "mthca_dev.h"
4033ec1ccbSHans Petter Selasky #include "mthca_cmd.h"
4133ec1ccbSHans Petter Selasky #include "mthca_config_reg.h"
4233ec1ccbSHans Petter Selasky 
4333ec1ccbSHans Petter Selasky enum {
4433ec1ccbSHans Petter Selasky 	MTHCA_NUM_ASYNC_EQE = 0x80,
4533ec1ccbSHans Petter Selasky 	MTHCA_NUM_CMD_EQE   = 0x80,
4633ec1ccbSHans Petter Selasky 	MTHCA_NUM_SPARE_EQE = 0x80,
4733ec1ccbSHans Petter Selasky 	MTHCA_EQ_ENTRY_SIZE = 0x20
4833ec1ccbSHans Petter Selasky };
4933ec1ccbSHans Petter Selasky 
5033ec1ccbSHans Petter Selasky /*
5133ec1ccbSHans Petter Selasky  * Must be packed because start is 64 bits but only aligned to 32 bits.
5233ec1ccbSHans Petter Selasky  */
5333ec1ccbSHans Petter Selasky struct mthca_eq_context {
5433ec1ccbSHans Petter Selasky 	__be32 flags;
5533ec1ccbSHans Petter Selasky 	__be64 start;
5633ec1ccbSHans Petter Selasky 	__be32 logsize_usrpage;
5733ec1ccbSHans Petter Selasky 	__be32 tavor_pd;	/* reserved for Arbel */
5833ec1ccbSHans Petter Selasky 	u8     reserved1[3];
5933ec1ccbSHans Petter Selasky 	u8     intr;
6033ec1ccbSHans Petter Selasky 	__be32 arbel_pd;	/* lost_count for Tavor */
6133ec1ccbSHans Petter Selasky 	__be32 lkey;
6233ec1ccbSHans Petter Selasky 	u32    reserved2[2];
6333ec1ccbSHans Petter Selasky 	__be32 consumer_index;
6433ec1ccbSHans Petter Selasky 	__be32 producer_index;
6533ec1ccbSHans Petter Selasky 	u32    reserved3[4];
6633ec1ccbSHans Petter Selasky } __attribute__((packed));
6733ec1ccbSHans Petter Selasky 
6833ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATUS_OK          ( 0 << 28)
6933ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATUS_OVERFLOW    ( 9 << 28)
7033ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATUS_WRITE_FAIL  (10 << 28)
7133ec1ccbSHans Petter Selasky #define MTHCA_EQ_OWNER_SW           ( 0 << 24)
7233ec1ccbSHans Petter Selasky #define MTHCA_EQ_OWNER_HW           ( 1 << 24)
7333ec1ccbSHans Petter Selasky #define MTHCA_EQ_FLAG_TR            ( 1 << 18)
7433ec1ccbSHans Petter Selasky #define MTHCA_EQ_FLAG_OI            ( 1 << 17)
7533ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATE_ARMED        ( 1 <<  8)
7633ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATE_FIRED        ( 2 <<  8)
7733ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 <<  8)
7833ec1ccbSHans Petter Selasky #define MTHCA_EQ_STATE_ARBEL        ( 8 <<  8)
7933ec1ccbSHans Petter Selasky 
8033ec1ccbSHans Petter Selasky enum {
8133ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_COMP       	    = 0x00,
8233ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_PATH_MIG   	    = 0x01,
8333ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_COMM_EST   	    = 0x02,
8433ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_SQ_DRAINED 	    = 0x03,
8533ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE    = 0x13,
8633ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_SRQ_LIMIT	    = 0x14,
8733ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_CQ_ERROR   	    = 0x04,
8833ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_WQ_CATAS_ERROR     = 0x05,
8933ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_EEC_CATAS_ERROR    = 0x06,
9033ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_PATH_MIG_FAILED    = 0x07,
9133ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
9233ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR    = 0x11,
9333ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR    = 0x12,
9433ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR  = 0x08,
9533ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_PORT_CHANGE        = 0x09,
9633ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_EQ_OVERFLOW        = 0x0f,
9733ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_ECC_DETECT         = 0x0e,
9833ec1ccbSHans Petter Selasky 	MTHCA_EVENT_TYPE_CMD                = 0x0a
9933ec1ccbSHans Petter Selasky };
10033ec1ccbSHans Petter Selasky 
10133ec1ccbSHans Petter Selasky #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG)           | \
10233ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_COMM_EST)           | \
10333ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED)         | \
10433ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_CQ_ERROR)           | \
10533ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR)     | \
10633ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR)    | \
10733ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED)    | \
10833ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
10933ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
11033ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR)  | \
11133ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE)        | \
11233ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
11333ec1ccbSHans Petter Selasky #define MTHCA_SRQ_EVENT_MASK   ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
11433ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
11533ec1ccbSHans Petter Selasky 				(1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
11633ec1ccbSHans Petter Selasky #define MTHCA_CMD_EVENT_MASK    (1ULL << MTHCA_EVENT_TYPE_CMD)
11733ec1ccbSHans Petter Selasky 
11833ec1ccbSHans Petter Selasky #define MTHCA_EQ_DB_INC_CI     (1 << 24)
11933ec1ccbSHans Petter Selasky #define MTHCA_EQ_DB_REQ_NOT    (2 << 24)
12033ec1ccbSHans Petter Selasky #define MTHCA_EQ_DB_DISARM_CQ  (3 << 24)
12133ec1ccbSHans Petter Selasky #define MTHCA_EQ_DB_SET_CI     (4 << 24)
12233ec1ccbSHans Petter Selasky #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)
12333ec1ccbSHans Petter Selasky 
12433ec1ccbSHans Petter Selasky struct mthca_eqe {
12533ec1ccbSHans Petter Selasky 	u8 reserved1;
12633ec1ccbSHans Petter Selasky 	u8 type;
12733ec1ccbSHans Petter Selasky 	u8 reserved2;
12833ec1ccbSHans Petter Selasky 	u8 subtype;
12933ec1ccbSHans Petter Selasky 	union {
13033ec1ccbSHans Petter Selasky 		u32 raw[6];
13133ec1ccbSHans Petter Selasky 		struct {
13233ec1ccbSHans Petter Selasky 			__be32 cqn;
13333ec1ccbSHans Petter Selasky 		} __attribute__((packed)) comp;
13433ec1ccbSHans Petter Selasky 		struct {
13533ec1ccbSHans Petter Selasky 			u16    reserved1;
13633ec1ccbSHans Petter Selasky 			__be16 token;
13733ec1ccbSHans Petter Selasky 			u32    reserved2;
13833ec1ccbSHans Petter Selasky 			u8     reserved3[3];
13933ec1ccbSHans Petter Selasky 			u8     status;
14033ec1ccbSHans Petter Selasky 			__be64 out_param;
14133ec1ccbSHans Petter Selasky 		} __attribute__((packed)) cmd;
14233ec1ccbSHans Petter Selasky 		struct {
14333ec1ccbSHans Petter Selasky 			__be32 qpn;
14433ec1ccbSHans Petter Selasky 		} __attribute__((packed)) qp;
14533ec1ccbSHans Petter Selasky 		struct {
14633ec1ccbSHans Petter Selasky 			__be32 srqn;
14733ec1ccbSHans Petter Selasky 		} __attribute__((packed)) srq;
14833ec1ccbSHans Petter Selasky 		struct {
14933ec1ccbSHans Petter Selasky 			__be32 cqn;
15033ec1ccbSHans Petter Selasky 			u32    reserved1;
15133ec1ccbSHans Petter Selasky 			u8     reserved2[3];
15233ec1ccbSHans Petter Selasky 			u8     syndrome;
15333ec1ccbSHans Petter Selasky 		} __attribute__((packed)) cq_err;
15433ec1ccbSHans Petter Selasky 		struct {
15533ec1ccbSHans Petter Selasky 			u32    reserved1[2];
15633ec1ccbSHans Petter Selasky 			__be32 port;
15733ec1ccbSHans Petter Selasky 		} __attribute__((packed)) port_change;
15833ec1ccbSHans Petter Selasky 	} event;
15933ec1ccbSHans Petter Selasky 	u8 reserved3[3];
16033ec1ccbSHans Petter Selasky 	u8 owner;
16133ec1ccbSHans Petter Selasky } __attribute__((packed));
16233ec1ccbSHans Petter Selasky 
16333ec1ccbSHans Petter Selasky #define  MTHCA_EQ_ENTRY_OWNER_SW      (0 << 7)
16433ec1ccbSHans Petter Selasky #define  MTHCA_EQ_ENTRY_OWNER_HW      (1 << 7)
16533ec1ccbSHans Petter Selasky 
async_mask(struct mthca_dev * dev)16633ec1ccbSHans Petter Selasky static inline u64 async_mask(struct mthca_dev *dev)
16733ec1ccbSHans Petter Selasky {
16833ec1ccbSHans Petter Selasky 	return dev->mthca_flags & MTHCA_FLAG_SRQ ?
16933ec1ccbSHans Petter Selasky 		MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :
17033ec1ccbSHans Petter Selasky 		MTHCA_ASYNC_EVENT_MASK;
17133ec1ccbSHans Petter Selasky }
17233ec1ccbSHans Petter Selasky 
tavor_set_eq_ci(struct mthca_dev * dev,struct mthca_eq * eq,u32 ci)17333ec1ccbSHans Petter Selasky static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
17433ec1ccbSHans Petter Selasky {
17533ec1ccbSHans Petter Selasky 	/*
17633ec1ccbSHans Petter Selasky 	 * This barrier makes sure that all updates to ownership bits
17733ec1ccbSHans Petter Selasky 	 * done by set_eqe_hw() hit memory before the consumer index
17833ec1ccbSHans Petter Selasky 	 * is updated.  set_eq_ci() allows the HCA to possibly write
17933ec1ccbSHans Petter Selasky 	 * more EQ entries, and we want to avoid the exceedingly
18033ec1ccbSHans Petter Selasky 	 * unlikely possibility of the HCA writing an entry and then
18133ec1ccbSHans Petter Selasky 	 * having set_eqe_hw() overwrite the owner field.
18233ec1ccbSHans Petter Selasky 	 */
18333ec1ccbSHans Petter Selasky 	wmb();
18433ec1ccbSHans Petter Selasky 	mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1),
18533ec1ccbSHans Petter Selasky 		      dev->kar + MTHCA_EQ_DOORBELL,
18633ec1ccbSHans Petter Selasky 		      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
18733ec1ccbSHans Petter Selasky }
18833ec1ccbSHans Petter Selasky 
arbel_set_eq_ci(struct mthca_dev * dev,struct mthca_eq * eq,u32 ci)18933ec1ccbSHans Petter Selasky static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
19033ec1ccbSHans Petter Selasky {
19133ec1ccbSHans Petter Selasky 	/* See comment in tavor_set_eq_ci() above. */
19233ec1ccbSHans Petter Selasky 	wmb();
19333ec1ccbSHans Petter Selasky 	__raw_writel((__force u32) cpu_to_be32(ci),
19433ec1ccbSHans Petter Selasky 		     dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
19533ec1ccbSHans Petter Selasky 	/* We still want ordering, just not swabbing, so add a barrier */
19633ec1ccbSHans Petter Selasky 	mb();
19733ec1ccbSHans Petter Selasky }
19833ec1ccbSHans Petter Selasky 
set_eq_ci(struct mthca_dev * dev,struct mthca_eq * eq,u32 ci)19933ec1ccbSHans Petter Selasky static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
20033ec1ccbSHans Petter Selasky {
20133ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev))
20233ec1ccbSHans Petter Selasky 		arbel_set_eq_ci(dev, eq, ci);
20333ec1ccbSHans Petter Selasky 	else
20433ec1ccbSHans Petter Selasky 		tavor_set_eq_ci(dev, eq, ci);
20533ec1ccbSHans Petter Selasky }
20633ec1ccbSHans Petter Selasky 
tavor_eq_req_not(struct mthca_dev * dev,int eqn)20733ec1ccbSHans Petter Selasky static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
20833ec1ccbSHans Petter Selasky {
20933ec1ccbSHans Petter Selasky 	mthca_write64(MTHCA_EQ_DB_REQ_NOT | eqn, 0,
21033ec1ccbSHans Petter Selasky 		      dev->kar + MTHCA_EQ_DOORBELL,
21133ec1ccbSHans Petter Selasky 		      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
21233ec1ccbSHans Petter Selasky }
21333ec1ccbSHans Petter Selasky 
arbel_eq_req_not(struct mthca_dev * dev,u32 eqn_mask)21433ec1ccbSHans Petter Selasky static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
21533ec1ccbSHans Petter Selasky {
21633ec1ccbSHans Petter Selasky 	writel(eqn_mask, dev->eq_regs.arbel.eq_arm);
21733ec1ccbSHans Petter Selasky }
21833ec1ccbSHans Petter Selasky 
disarm_cq(struct mthca_dev * dev,int eqn,int cqn)21933ec1ccbSHans Petter Selasky static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
22033ec1ccbSHans Petter Selasky {
22133ec1ccbSHans Petter Selasky 	if (!mthca_is_memfree(dev)) {
22233ec1ccbSHans Petter Selasky 		mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn,
22333ec1ccbSHans Petter Selasky 			      dev->kar + MTHCA_EQ_DOORBELL,
22433ec1ccbSHans Petter Selasky 			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
22533ec1ccbSHans Petter Selasky 	}
22633ec1ccbSHans Petter Selasky }
22733ec1ccbSHans Petter Selasky 
get_eqe(struct mthca_eq * eq,u32 entry)22833ec1ccbSHans Petter Selasky static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
22933ec1ccbSHans Petter Selasky {
23033ec1ccbSHans Petter Selasky 	unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;
23133ec1ccbSHans Petter Selasky 	return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
23233ec1ccbSHans Petter Selasky }
23333ec1ccbSHans Petter Selasky 
next_eqe_sw(struct mthca_eq * eq)23433ec1ccbSHans Petter Selasky static inline struct mthca_eqe *next_eqe_sw(struct mthca_eq *eq)
23533ec1ccbSHans Petter Selasky {
23633ec1ccbSHans Petter Selasky 	struct mthca_eqe *eqe;
23733ec1ccbSHans Petter Selasky 	eqe = get_eqe(eq, eq->cons_index);
23833ec1ccbSHans Petter Selasky 	return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
23933ec1ccbSHans Petter Selasky }
24033ec1ccbSHans Petter Selasky 
set_eqe_hw(struct mthca_eqe * eqe)24133ec1ccbSHans Petter Selasky static inline void set_eqe_hw(struct mthca_eqe *eqe)
24233ec1ccbSHans Petter Selasky {
24333ec1ccbSHans Petter Selasky 	eqe->owner =  MTHCA_EQ_ENTRY_OWNER_HW;
24433ec1ccbSHans Petter Selasky }
24533ec1ccbSHans Petter Selasky 
port_change(struct mthca_dev * dev,int port,int active)24633ec1ccbSHans Petter Selasky static void port_change(struct mthca_dev *dev, int port, int active)
24733ec1ccbSHans Petter Selasky {
24833ec1ccbSHans Petter Selasky 	struct ib_event record;
24933ec1ccbSHans Petter Selasky 
25033ec1ccbSHans Petter Selasky 	mthca_dbg(dev, "Port change to %s for port %d\n",
25133ec1ccbSHans Petter Selasky 		  active ? "active" : "down", port);
25233ec1ccbSHans Petter Selasky 
25333ec1ccbSHans Petter Selasky 	record.device = &dev->ib_dev;
25433ec1ccbSHans Petter Selasky 	record.event  = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
25533ec1ccbSHans Petter Selasky 	record.element.port_num = port;
25633ec1ccbSHans Petter Selasky 
25733ec1ccbSHans Petter Selasky 	ib_dispatch_event(&record);
25833ec1ccbSHans Petter Selasky }
25933ec1ccbSHans Petter Selasky 
mthca_eq_int(struct mthca_dev * dev,struct mthca_eq * eq)26033ec1ccbSHans Petter Selasky static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
26133ec1ccbSHans Petter Selasky {
26233ec1ccbSHans Petter Selasky 	struct mthca_eqe *eqe;
26333ec1ccbSHans Petter Selasky 	int disarm_cqn;
26433ec1ccbSHans Petter Selasky 	int eqes_found = 0;
26533ec1ccbSHans Petter Selasky 	int set_ci = 0;
26633ec1ccbSHans Petter Selasky 
26733ec1ccbSHans Petter Selasky 	while ((eqe = next_eqe_sw(eq))) {
26833ec1ccbSHans Petter Selasky 		/*
26933ec1ccbSHans Petter Selasky 		 * Make sure we read EQ entry contents after we've
27033ec1ccbSHans Petter Selasky 		 * checked the ownership bit.
27133ec1ccbSHans Petter Selasky 		 */
27233ec1ccbSHans Petter Selasky 		rmb();
27333ec1ccbSHans Petter Selasky 
27433ec1ccbSHans Petter Selasky 		switch (eqe->type) {
27533ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_COMP:
27633ec1ccbSHans Petter Selasky 			disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
27733ec1ccbSHans Petter Selasky 			disarm_cq(dev, eq->eqn, disarm_cqn);
27833ec1ccbSHans Petter Selasky 			mthca_cq_completion(dev, disarm_cqn);
27933ec1ccbSHans Petter Selasky 			break;
28033ec1ccbSHans Petter Selasky 
28133ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_PATH_MIG:
28233ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
28333ec1ccbSHans Petter Selasky 				       IB_EVENT_PATH_MIG);
28433ec1ccbSHans Petter Selasky 			break;
28533ec1ccbSHans Petter Selasky 
28633ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_COMM_EST:
28733ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
28833ec1ccbSHans Petter Selasky 				       IB_EVENT_COMM_EST);
28933ec1ccbSHans Petter Selasky 			break;
29033ec1ccbSHans Petter Selasky 
29133ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_SQ_DRAINED:
29233ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
29333ec1ccbSHans Petter Selasky 				       IB_EVENT_SQ_DRAINED);
29433ec1ccbSHans Petter Selasky 			break;
29533ec1ccbSHans Petter Selasky 
29633ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
29733ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
29833ec1ccbSHans Petter Selasky 				       IB_EVENT_QP_LAST_WQE_REACHED);
29933ec1ccbSHans Petter Selasky 			break;
30033ec1ccbSHans Petter Selasky 
30133ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_SRQ_LIMIT:
30233ec1ccbSHans Petter Selasky 			mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
30333ec1ccbSHans Petter Selasky 					IB_EVENT_SRQ_LIMIT_REACHED);
30433ec1ccbSHans Petter Selasky 			break;
30533ec1ccbSHans Petter Selasky 
30633ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
30733ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
30833ec1ccbSHans Petter Selasky 				       IB_EVENT_QP_FATAL);
30933ec1ccbSHans Petter Selasky 			break;
31033ec1ccbSHans Petter Selasky 
31133ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:
31233ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
31333ec1ccbSHans Petter Selasky 				       IB_EVENT_PATH_MIG_ERR);
31433ec1ccbSHans Petter Selasky 			break;
31533ec1ccbSHans Petter Selasky 
31633ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
31733ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
31833ec1ccbSHans Petter Selasky 				       IB_EVENT_QP_REQ_ERR);
31933ec1ccbSHans Petter Selasky 			break;
32033ec1ccbSHans Petter Selasky 
32133ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:
32233ec1ccbSHans Petter Selasky 			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
32333ec1ccbSHans Petter Selasky 				       IB_EVENT_QP_ACCESS_ERR);
32433ec1ccbSHans Petter Selasky 			break;
32533ec1ccbSHans Petter Selasky 
32633ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_CMD:
32733ec1ccbSHans Petter Selasky 			mthca_cmd_event(dev,
32833ec1ccbSHans Petter Selasky 					be16_to_cpu(eqe->event.cmd.token),
32933ec1ccbSHans Petter Selasky 					eqe->event.cmd.status,
33033ec1ccbSHans Petter Selasky 					be64_to_cpu(eqe->event.cmd.out_param));
33133ec1ccbSHans Petter Selasky 			break;
33233ec1ccbSHans Petter Selasky 
33333ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_PORT_CHANGE:
33433ec1ccbSHans Petter Selasky 			port_change(dev,
33533ec1ccbSHans Petter Selasky 				    (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3,
33633ec1ccbSHans Petter Selasky 				    eqe->subtype == 0x4);
33733ec1ccbSHans Petter Selasky 			break;
33833ec1ccbSHans Petter Selasky 
33933ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_CQ_ERROR:
34033ec1ccbSHans Petter Selasky 			mthca_warn(dev, "CQ %s on CQN %06x\n",
34133ec1ccbSHans Petter Selasky 				   eqe->event.cq_err.syndrome == 1 ?
34233ec1ccbSHans Petter Selasky 				   "overrun" : "access violation",
34333ec1ccbSHans Petter Selasky 				   be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
34433ec1ccbSHans Petter Selasky 			mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
34533ec1ccbSHans Petter Selasky 				       IB_EVENT_CQ_ERR);
34633ec1ccbSHans Petter Selasky 			break;
34733ec1ccbSHans Petter Selasky 
34833ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
34933ec1ccbSHans Petter Selasky 			mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
35033ec1ccbSHans Petter Selasky 			break;
35133ec1ccbSHans Petter Selasky 
35233ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:
35333ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:
35433ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:
35533ec1ccbSHans Petter Selasky 		case MTHCA_EVENT_TYPE_ECC_DETECT:
35633ec1ccbSHans Petter Selasky 		default:
35733ec1ccbSHans Petter Selasky 			mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
35833ec1ccbSHans Petter Selasky 				   eqe->type, eqe->subtype, eq->eqn);
35933ec1ccbSHans Petter Selasky 			break;
36033ec1ccbSHans Petter Selasky 		}
36133ec1ccbSHans Petter Selasky 
36233ec1ccbSHans Petter Selasky 		set_eqe_hw(eqe);
36333ec1ccbSHans Petter Selasky 		++eq->cons_index;
36433ec1ccbSHans Petter Selasky 		eqes_found = 1;
36533ec1ccbSHans Petter Selasky 		++set_ci;
36633ec1ccbSHans Petter Selasky 
36733ec1ccbSHans Petter Selasky 		/*
36833ec1ccbSHans Petter Selasky 		 * The HCA will think the queue has overflowed if we
36933ec1ccbSHans Petter Selasky 		 * don't tell it we've been processing events.  We
37033ec1ccbSHans Petter Selasky 		 * create our EQs with MTHCA_NUM_SPARE_EQE extra
37133ec1ccbSHans Petter Selasky 		 * entries, so we must update our consumer index at
37233ec1ccbSHans Petter Selasky 		 * least that often.
37333ec1ccbSHans Petter Selasky 		 */
37433ec1ccbSHans Petter Selasky 		if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {
37533ec1ccbSHans Petter Selasky 			/*
37633ec1ccbSHans Petter Selasky 			 * Conditional on hca_type is OK here because
37733ec1ccbSHans Petter Selasky 			 * this is a rare case, not the fast path.
37833ec1ccbSHans Petter Selasky 			 */
37933ec1ccbSHans Petter Selasky 			set_eq_ci(dev, eq, eq->cons_index);
38033ec1ccbSHans Petter Selasky 			set_ci = 0;
38133ec1ccbSHans Petter Selasky 		}
38233ec1ccbSHans Petter Selasky 	}
38333ec1ccbSHans Petter Selasky 
38433ec1ccbSHans Petter Selasky 	/*
38533ec1ccbSHans Petter Selasky 	 * Rely on caller to set consumer index so that we don't have
38633ec1ccbSHans Petter Selasky 	 * to test hca_type in our interrupt handling fast path.
38733ec1ccbSHans Petter Selasky 	 */
38833ec1ccbSHans Petter Selasky 	return eqes_found;
38933ec1ccbSHans Petter Selasky }
39033ec1ccbSHans Petter Selasky 
mthca_tavor_interrupt(int irq,void * dev_ptr)39133ec1ccbSHans Petter Selasky static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr)
39233ec1ccbSHans Petter Selasky {
39333ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = dev_ptr;
39433ec1ccbSHans Petter Selasky 	u32 ecr;
39533ec1ccbSHans Petter Selasky 	int i;
39633ec1ccbSHans Petter Selasky 
39733ec1ccbSHans Petter Selasky 	if (dev->eq_table.clr_mask)
39833ec1ccbSHans Petter Selasky 		writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
39933ec1ccbSHans Petter Selasky 
40033ec1ccbSHans Petter Selasky 	ecr = readl(dev->eq_regs.tavor.ecr_base + 4);
40133ec1ccbSHans Petter Selasky 	if (!ecr)
40233ec1ccbSHans Petter Selasky 		return IRQ_NONE;
40333ec1ccbSHans Petter Selasky 
40433ec1ccbSHans Petter Selasky 	writel(ecr, dev->eq_regs.tavor.ecr_base +
40533ec1ccbSHans Petter Selasky 	       MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
40633ec1ccbSHans Petter Selasky 
40733ec1ccbSHans Petter Selasky 	for (i = 0; i < MTHCA_NUM_EQ; ++i)
40833ec1ccbSHans Petter Selasky 		if (ecr & dev->eq_table.eq[i].eqn_mask) {
40933ec1ccbSHans Petter Selasky 			if (mthca_eq_int(dev, &dev->eq_table.eq[i]))
41033ec1ccbSHans Petter Selasky 				tavor_set_eq_ci(dev, &dev->eq_table.eq[i],
41133ec1ccbSHans Petter Selasky 						dev->eq_table.eq[i].cons_index);
41233ec1ccbSHans Petter Selasky 			tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
41333ec1ccbSHans Petter Selasky 		}
41433ec1ccbSHans Petter Selasky 
41533ec1ccbSHans Petter Selasky 	return IRQ_HANDLED;
41633ec1ccbSHans Petter Selasky }
41733ec1ccbSHans Petter Selasky 
mthca_tavor_msi_x_interrupt(int irq,void * eq_ptr)41833ec1ccbSHans Petter Selasky static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr)
41933ec1ccbSHans Petter Selasky {
42033ec1ccbSHans Petter Selasky 	struct mthca_eq  *eq  = eq_ptr;
42133ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = eq->dev;
42233ec1ccbSHans Petter Selasky 
42333ec1ccbSHans Petter Selasky 	mthca_eq_int(dev, eq);
42433ec1ccbSHans Petter Selasky 	tavor_set_eq_ci(dev, eq, eq->cons_index);
42533ec1ccbSHans Petter Selasky 	tavor_eq_req_not(dev, eq->eqn);
42633ec1ccbSHans Petter Selasky 
42733ec1ccbSHans Petter Selasky 	/* MSI-X vectors always belong to us */
42833ec1ccbSHans Petter Selasky 	return IRQ_HANDLED;
42933ec1ccbSHans Petter Selasky }
43033ec1ccbSHans Petter Selasky 
mthca_arbel_interrupt(int irq,void * dev_ptr)43133ec1ccbSHans Petter Selasky static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr)
43233ec1ccbSHans Petter Selasky {
43333ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = dev_ptr;
43433ec1ccbSHans Petter Selasky 	int work = 0;
43533ec1ccbSHans Petter Selasky 	int i;
43633ec1ccbSHans Petter Selasky 
43733ec1ccbSHans Petter Selasky 	if (dev->eq_table.clr_mask)
43833ec1ccbSHans Petter Selasky 		writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
43933ec1ccbSHans Petter Selasky 
44033ec1ccbSHans Petter Selasky 	for (i = 0; i < MTHCA_NUM_EQ; ++i)
44133ec1ccbSHans Petter Selasky 		if (mthca_eq_int(dev, &dev->eq_table.eq[i])) {
44233ec1ccbSHans Petter Selasky 			work = 1;
44333ec1ccbSHans Petter Selasky 			arbel_set_eq_ci(dev, &dev->eq_table.eq[i],
44433ec1ccbSHans Petter Selasky 					dev->eq_table.eq[i].cons_index);
44533ec1ccbSHans Petter Selasky 		}
44633ec1ccbSHans Petter Selasky 
44733ec1ccbSHans Petter Selasky 	arbel_eq_req_not(dev, dev->eq_table.arm_mask);
44833ec1ccbSHans Petter Selasky 
44933ec1ccbSHans Petter Selasky 	return IRQ_RETVAL(work);
45033ec1ccbSHans Petter Selasky }
45133ec1ccbSHans Petter Selasky 
mthca_arbel_msi_x_interrupt(int irq,void * eq_ptr)45233ec1ccbSHans Petter Selasky static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr)
45333ec1ccbSHans Petter Selasky {
45433ec1ccbSHans Petter Selasky 	struct mthca_eq  *eq  = eq_ptr;
45533ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = eq->dev;
45633ec1ccbSHans Petter Selasky 
45733ec1ccbSHans Petter Selasky 	mthca_eq_int(dev, eq);
45833ec1ccbSHans Petter Selasky 	arbel_set_eq_ci(dev, eq, eq->cons_index);
45933ec1ccbSHans Petter Selasky 	arbel_eq_req_not(dev, eq->eqn_mask);
46033ec1ccbSHans Petter Selasky 
46133ec1ccbSHans Petter Selasky 	/* MSI-X vectors always belong to us */
46233ec1ccbSHans Petter Selasky 	return IRQ_HANDLED;
46333ec1ccbSHans Petter Selasky }
46433ec1ccbSHans Petter Selasky 
mthca_create_eq(struct mthca_dev * dev,int nent,u8 intr,struct mthca_eq * eq)46533ec1ccbSHans Petter Selasky static int mthca_create_eq(struct mthca_dev *dev,
46633ec1ccbSHans Petter Selasky 			   int nent,
46733ec1ccbSHans Petter Selasky 			   u8 intr,
46833ec1ccbSHans Petter Selasky 			   struct mthca_eq *eq)
46933ec1ccbSHans Petter Selasky {
47033ec1ccbSHans Petter Selasky 	int npages;
47133ec1ccbSHans Petter Selasky 	u64 *dma_list = NULL;
47233ec1ccbSHans Petter Selasky 	dma_addr_t t;
47333ec1ccbSHans Petter Selasky 	struct mthca_mailbox *mailbox;
47433ec1ccbSHans Petter Selasky 	struct mthca_eq_context *eq_context;
47533ec1ccbSHans Petter Selasky 	int err = -ENOMEM;
47633ec1ccbSHans Petter Selasky 	int i;
47733ec1ccbSHans Petter Selasky 
47833ec1ccbSHans Petter Selasky 	eq->dev  = dev;
47933ec1ccbSHans Petter Selasky 	eq->nent = roundup_pow_of_two(max(nent, 2));
48033ec1ccbSHans Petter Selasky 	npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
48133ec1ccbSHans Petter Selasky 
48233ec1ccbSHans Petter Selasky 	eq->page_list = kmalloc(npages * sizeof *eq->page_list,
48333ec1ccbSHans Petter Selasky 				GFP_KERNEL);
48433ec1ccbSHans Petter Selasky 	if (!eq->page_list)
48533ec1ccbSHans Petter Selasky 		goto err_out;
48633ec1ccbSHans Petter Selasky 
48733ec1ccbSHans Petter Selasky 	for (i = 0; i < npages; ++i)
48833ec1ccbSHans Petter Selasky 		eq->page_list[i].buf = NULL;
48933ec1ccbSHans Petter Selasky 
49033ec1ccbSHans Petter Selasky 	dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
49133ec1ccbSHans Petter Selasky 	if (!dma_list)
49233ec1ccbSHans Petter Selasky 		goto err_out_free;
49333ec1ccbSHans Petter Selasky 
49433ec1ccbSHans Petter Selasky 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
49533ec1ccbSHans Petter Selasky 	if (IS_ERR(mailbox))
49633ec1ccbSHans Petter Selasky 		goto err_out_free;
49733ec1ccbSHans Petter Selasky 	eq_context = mailbox->buf;
49833ec1ccbSHans Petter Selasky 
49933ec1ccbSHans Petter Selasky 	for (i = 0; i < npages; ++i) {
50033ec1ccbSHans Petter Selasky 		eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
50133ec1ccbSHans Petter Selasky 							  PAGE_SIZE, &t, GFP_KERNEL);
50233ec1ccbSHans Petter Selasky 		if (!eq->page_list[i].buf)
50333ec1ccbSHans Petter Selasky 			goto err_out_free_pages;
50433ec1ccbSHans Petter Selasky 
50533ec1ccbSHans Petter Selasky 		dma_list[i] = t;
50633ec1ccbSHans Petter Selasky 		dma_unmap_addr_set(&eq->page_list[i], mapping, t);
50733ec1ccbSHans Petter Selasky 
50833ec1ccbSHans Petter Selasky 		clear_page(eq->page_list[i].buf);
50933ec1ccbSHans Petter Selasky 	}
51033ec1ccbSHans Petter Selasky 
51133ec1ccbSHans Petter Selasky 	for (i = 0; i < eq->nent; ++i)
51233ec1ccbSHans Petter Selasky 		set_eqe_hw(get_eqe(eq, i));
51333ec1ccbSHans Petter Selasky 
51433ec1ccbSHans Petter Selasky 	eq->eqn = mthca_alloc(&dev->eq_table.alloc);
51533ec1ccbSHans Petter Selasky 	if (eq->eqn == -1)
51633ec1ccbSHans Petter Selasky 		goto err_out_free_pages;
51733ec1ccbSHans Petter Selasky 
51833ec1ccbSHans Petter Selasky 	err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
51933ec1ccbSHans Petter Selasky 				  dma_list, PAGE_SHIFT, npages,
52033ec1ccbSHans Petter Selasky 				  0, npages * PAGE_SIZE,
52133ec1ccbSHans Petter Selasky 				  MTHCA_MPT_FLAG_LOCAL_WRITE |
52233ec1ccbSHans Petter Selasky 				  MTHCA_MPT_FLAG_LOCAL_READ,
52333ec1ccbSHans Petter Selasky 				  &eq->mr);
52433ec1ccbSHans Petter Selasky 	if (err)
52533ec1ccbSHans Petter Selasky 		goto err_out_free_eq;
52633ec1ccbSHans Petter Selasky 
52733ec1ccbSHans Petter Selasky 	memset(eq_context, 0, sizeof *eq_context);
52833ec1ccbSHans Petter Selasky 	eq_context->flags           = cpu_to_be32(MTHCA_EQ_STATUS_OK   |
52933ec1ccbSHans Petter Selasky 						  MTHCA_EQ_OWNER_HW    |
53033ec1ccbSHans Petter Selasky 						  MTHCA_EQ_STATE_ARMED |
53133ec1ccbSHans Petter Selasky 						  MTHCA_EQ_FLAG_TR);
53233ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev))
53333ec1ccbSHans Petter Selasky 		eq_context->flags  |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
53433ec1ccbSHans Petter Selasky 
53533ec1ccbSHans Petter Selasky 	eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
53633ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev)) {
53733ec1ccbSHans Petter Selasky 		eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
53833ec1ccbSHans Petter Selasky 	} else {
53933ec1ccbSHans Petter Selasky 		eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
54033ec1ccbSHans Petter Selasky 		eq_context->tavor_pd         = cpu_to_be32(dev->driver_pd.pd_num);
54133ec1ccbSHans Petter Selasky 	}
54233ec1ccbSHans Petter Selasky 	eq_context->intr            = intr;
54333ec1ccbSHans Petter Selasky 	eq_context->lkey            = cpu_to_be32(eq->mr.ibmr.lkey);
54433ec1ccbSHans Petter Selasky 
54533ec1ccbSHans Petter Selasky 	err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn);
54633ec1ccbSHans Petter Selasky 	if (err) {
54733ec1ccbSHans Petter Selasky 		mthca_warn(dev, "SW2HW_EQ returned %d\n", err);
54833ec1ccbSHans Petter Selasky 		goto err_out_free_mr;
54933ec1ccbSHans Petter Selasky 	}
55033ec1ccbSHans Petter Selasky 
55133ec1ccbSHans Petter Selasky 	kfree(dma_list);
55233ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
55333ec1ccbSHans Petter Selasky 
55433ec1ccbSHans Petter Selasky 	eq->eqn_mask   = swab32(1 << eq->eqn);
55533ec1ccbSHans Petter Selasky 	eq->cons_index = 0;
55633ec1ccbSHans Petter Selasky 
55733ec1ccbSHans Petter Selasky 	dev->eq_table.arm_mask |= eq->eqn_mask;
55833ec1ccbSHans Petter Selasky 
55933ec1ccbSHans Petter Selasky 	mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
56033ec1ccbSHans Petter Selasky 		  eq->eqn, eq->nent);
56133ec1ccbSHans Petter Selasky 
56233ec1ccbSHans Petter Selasky 	return err;
56333ec1ccbSHans Petter Selasky 
56433ec1ccbSHans Petter Selasky  err_out_free_mr:
56533ec1ccbSHans Petter Selasky 	mthca_free_mr(dev, &eq->mr);
56633ec1ccbSHans Petter Selasky 
56733ec1ccbSHans Petter Selasky  err_out_free_eq:
56833ec1ccbSHans Petter Selasky 	mthca_free(&dev->eq_table.alloc, eq->eqn);
56933ec1ccbSHans Petter Selasky 
57033ec1ccbSHans Petter Selasky  err_out_free_pages:
57133ec1ccbSHans Petter Selasky 	for (i = 0; i < npages; ++i)
57233ec1ccbSHans Petter Selasky 		if (eq->page_list[i].buf)
57333ec1ccbSHans Petter Selasky 			dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
57433ec1ccbSHans Petter Selasky 					  eq->page_list[i].buf,
57533ec1ccbSHans Petter Selasky 					  dma_unmap_addr(&eq->page_list[i],
57633ec1ccbSHans Petter Selasky 							 mapping));
57733ec1ccbSHans Petter Selasky 
57833ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
57933ec1ccbSHans Petter Selasky 
58033ec1ccbSHans Petter Selasky  err_out_free:
58133ec1ccbSHans Petter Selasky 	kfree(eq->page_list);
58233ec1ccbSHans Petter Selasky 	kfree(dma_list);
58333ec1ccbSHans Petter Selasky 
58433ec1ccbSHans Petter Selasky  err_out:
58533ec1ccbSHans Petter Selasky 	return err;
58633ec1ccbSHans Petter Selasky }
58733ec1ccbSHans Petter Selasky 
mthca_free_eq(struct mthca_dev * dev,struct mthca_eq * eq)58833ec1ccbSHans Petter Selasky static void mthca_free_eq(struct mthca_dev *dev,
58933ec1ccbSHans Petter Selasky 			  struct mthca_eq *eq)
59033ec1ccbSHans Petter Selasky {
59133ec1ccbSHans Petter Selasky 	struct mthca_mailbox *mailbox;
59233ec1ccbSHans Petter Selasky 	int err;
59333ec1ccbSHans Petter Selasky 	int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
59433ec1ccbSHans Petter Selasky 		PAGE_SIZE;
59533ec1ccbSHans Petter Selasky 	int i;
59633ec1ccbSHans Petter Selasky 
59733ec1ccbSHans Petter Selasky 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
59833ec1ccbSHans Petter Selasky 	if (IS_ERR(mailbox))
59933ec1ccbSHans Petter Selasky 		return;
60033ec1ccbSHans Petter Selasky 
60133ec1ccbSHans Petter Selasky 	err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn);
60233ec1ccbSHans Petter Selasky 	if (err)
60333ec1ccbSHans Petter Selasky 		mthca_warn(dev, "HW2SW_EQ returned %d\n", err);
60433ec1ccbSHans Petter Selasky 
60533ec1ccbSHans Petter Selasky 	dev->eq_table.arm_mask &= ~eq->eqn_mask;
60633ec1ccbSHans Petter Selasky 
60733ec1ccbSHans Petter Selasky 	if (0) {
60833ec1ccbSHans Petter Selasky 		mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
60933ec1ccbSHans Petter Selasky 		for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
61033ec1ccbSHans Petter Selasky 			if (i % 4 == 0)
61133ec1ccbSHans Petter Selasky 				printk("[%02x] ", i * 4);
61233ec1ccbSHans Petter Selasky 			printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
61333ec1ccbSHans Petter Selasky 			if ((i + 1) % 4 == 0)
61433ec1ccbSHans Petter Selasky 				printk("\n");
61533ec1ccbSHans Petter Selasky 		}
61633ec1ccbSHans Petter Selasky 	}
61733ec1ccbSHans Petter Selasky 
61833ec1ccbSHans Petter Selasky 	mthca_free_mr(dev, &eq->mr);
61933ec1ccbSHans Petter Selasky 	for (i = 0; i < npages; ++i)
62033ec1ccbSHans Petter Selasky 		pci_free_consistent(dev->pdev, PAGE_SIZE,
62133ec1ccbSHans Petter Selasky 				    eq->page_list[i].buf,
62233ec1ccbSHans Petter Selasky 				    dma_unmap_addr(&eq->page_list[i], mapping));
62333ec1ccbSHans Petter Selasky 
62433ec1ccbSHans Petter Selasky 	kfree(eq->page_list);
62533ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
62633ec1ccbSHans Petter Selasky }
62733ec1ccbSHans Petter Selasky 
mthca_free_irqs(struct mthca_dev * dev)62833ec1ccbSHans Petter Selasky static void mthca_free_irqs(struct mthca_dev *dev)
62933ec1ccbSHans Petter Selasky {
63033ec1ccbSHans Petter Selasky 	int i;
63133ec1ccbSHans Petter Selasky 
63233ec1ccbSHans Petter Selasky 	if (dev->eq_table.have_irq)
63333ec1ccbSHans Petter Selasky 		free_irq(dev->pdev->irq, dev);
63433ec1ccbSHans Petter Selasky 	for (i = 0; i < MTHCA_NUM_EQ; ++i)
63533ec1ccbSHans Petter Selasky 		if (dev->eq_table.eq[i].have_irq) {
63633ec1ccbSHans Petter Selasky 			free_irq(dev->eq_table.eq[i].msi_x_vector,
63733ec1ccbSHans Petter Selasky 				 dev->eq_table.eq + i);
63833ec1ccbSHans Petter Selasky 			dev->eq_table.eq[i].have_irq = 0;
63933ec1ccbSHans Petter Selasky 		}
64033ec1ccbSHans Petter Selasky }
64133ec1ccbSHans Petter Selasky 
mthca_map_reg(struct mthca_dev * dev,unsigned long offset,unsigned long size,void __iomem ** map)64233ec1ccbSHans Petter Selasky static int mthca_map_reg(struct mthca_dev *dev,
64333ec1ccbSHans Petter Selasky 			 unsigned long offset, unsigned long size,
64433ec1ccbSHans Petter Selasky 			 void __iomem **map)
64533ec1ccbSHans Petter Selasky {
64633ec1ccbSHans Petter Selasky 	phys_addr_t base = pci_resource_start(dev->pdev, 0);
64733ec1ccbSHans Petter Selasky 
64833ec1ccbSHans Petter Selasky 	*map = ioremap(base + offset, size);
64933ec1ccbSHans Petter Selasky 	if (!*map)
65033ec1ccbSHans Petter Selasky 		return -ENOMEM;
65133ec1ccbSHans Petter Selasky 
65233ec1ccbSHans Petter Selasky 	return 0;
65333ec1ccbSHans Petter Selasky }
65433ec1ccbSHans Petter Selasky 
mthca_map_eq_regs(struct mthca_dev * dev)65533ec1ccbSHans Petter Selasky static int mthca_map_eq_regs(struct mthca_dev *dev)
65633ec1ccbSHans Petter Selasky {
65733ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev)) {
65833ec1ccbSHans Petter Selasky 		/*
65933ec1ccbSHans Petter Selasky 		 * We assume that the EQ arm and EQ set CI registers
66033ec1ccbSHans Petter Selasky 		 * fall within the first BAR.  We can't trust the
66133ec1ccbSHans Petter Selasky 		 * values firmware gives us, since those addresses are
66233ec1ccbSHans Petter Selasky 		 * valid on the HCA's side of the PCI bus but not
66333ec1ccbSHans Petter Selasky 		 * necessarily the host side.
66433ec1ccbSHans Petter Selasky 		 */
66533ec1ccbSHans Petter Selasky 		if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
66633ec1ccbSHans Petter Selasky 				  dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
66733ec1ccbSHans Petter Selasky 				  &dev->clr_base)) {
66833ec1ccbSHans Petter Selasky 			mthca_err(dev, "Couldn't map interrupt clear register, "
66933ec1ccbSHans Petter Selasky 				  "aborting.\n");
67033ec1ccbSHans Petter Selasky 			return -ENOMEM;
67133ec1ccbSHans Petter Selasky 		}
67233ec1ccbSHans Petter Selasky 
67333ec1ccbSHans Petter Selasky 		/*
67433ec1ccbSHans Petter Selasky 		 * Add 4 because we limit ourselves to EQs 0 ... 31,
67533ec1ccbSHans Petter Selasky 		 * so we only need the low word of the register.
67633ec1ccbSHans Petter Selasky 		 */
67733ec1ccbSHans Petter Selasky 		if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
67833ec1ccbSHans Petter Selasky 					dev->fw.arbel.eq_arm_base) + 4, 4,
67933ec1ccbSHans Petter Selasky 				  &dev->eq_regs.arbel.eq_arm)) {
68033ec1ccbSHans Petter Selasky 			mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
68133ec1ccbSHans Petter Selasky 			iounmap(dev->clr_base);
68233ec1ccbSHans Petter Selasky 			return -ENOMEM;
68333ec1ccbSHans Petter Selasky 		}
68433ec1ccbSHans Petter Selasky 
68533ec1ccbSHans Petter Selasky 		if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
68633ec1ccbSHans Petter Selasky 				  dev->fw.arbel.eq_set_ci_base,
68733ec1ccbSHans Petter Selasky 				  MTHCA_EQ_SET_CI_SIZE,
68833ec1ccbSHans Petter Selasky 				  &dev->eq_regs.arbel.eq_set_ci_base)) {
68933ec1ccbSHans Petter Selasky 			mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
69033ec1ccbSHans Petter Selasky 			iounmap(dev->eq_regs.arbel.eq_arm);
69133ec1ccbSHans Petter Selasky 			iounmap(dev->clr_base);
69233ec1ccbSHans Petter Selasky 			return -ENOMEM;
69333ec1ccbSHans Petter Selasky 		}
69433ec1ccbSHans Petter Selasky 	} else {
69533ec1ccbSHans Petter Selasky 		if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
69633ec1ccbSHans Petter Selasky 				  &dev->clr_base)) {
69733ec1ccbSHans Petter Selasky 			mthca_err(dev, "Couldn't map interrupt clear register, "
69833ec1ccbSHans Petter Selasky 				  "aborting.\n");
69933ec1ccbSHans Petter Selasky 			return -ENOMEM;
70033ec1ccbSHans Petter Selasky 		}
70133ec1ccbSHans Petter Selasky 
70233ec1ccbSHans Petter Selasky 		if (mthca_map_reg(dev, MTHCA_ECR_BASE,
70333ec1ccbSHans Petter Selasky 				  MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
70433ec1ccbSHans Petter Selasky 				  &dev->eq_regs.tavor.ecr_base)) {
70533ec1ccbSHans Petter Selasky 			mthca_err(dev, "Couldn't map ecr register, "
70633ec1ccbSHans Petter Selasky 				  "aborting.\n");
70733ec1ccbSHans Petter Selasky 			iounmap(dev->clr_base);
70833ec1ccbSHans Petter Selasky 			return -ENOMEM;
70933ec1ccbSHans Petter Selasky 		}
71033ec1ccbSHans Petter Selasky 	}
71133ec1ccbSHans Petter Selasky 
71233ec1ccbSHans Petter Selasky 	return 0;
71333ec1ccbSHans Petter Selasky 
71433ec1ccbSHans Petter Selasky }
71533ec1ccbSHans Petter Selasky 
mthca_unmap_eq_regs(struct mthca_dev * dev)71633ec1ccbSHans Petter Selasky static void mthca_unmap_eq_regs(struct mthca_dev *dev)
71733ec1ccbSHans Petter Selasky {
71833ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev)) {
71933ec1ccbSHans Petter Selasky 		iounmap(dev->eq_regs.arbel.eq_set_ci_base);
72033ec1ccbSHans Petter Selasky 		iounmap(dev->eq_regs.arbel.eq_arm);
72133ec1ccbSHans Petter Selasky 		iounmap(dev->clr_base);
72233ec1ccbSHans Petter Selasky 	} else {
72333ec1ccbSHans Petter Selasky 		iounmap(dev->eq_regs.tavor.ecr_base);
72433ec1ccbSHans Petter Selasky 		iounmap(dev->clr_base);
72533ec1ccbSHans Petter Selasky 	}
72633ec1ccbSHans Petter Selasky }
72733ec1ccbSHans Petter Selasky 
mthca_map_eq_icm(struct mthca_dev * dev,u64 icm_virt)72833ec1ccbSHans Petter Selasky int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
72933ec1ccbSHans Petter Selasky {
73033ec1ccbSHans Petter Selasky 	int ret;
73133ec1ccbSHans Petter Selasky 
73233ec1ccbSHans Petter Selasky 	/*
73333ec1ccbSHans Petter Selasky 	 * We assume that mapping one page is enough for the whole EQ
73433ec1ccbSHans Petter Selasky 	 * context table.  This is fine with all current HCAs, because
73533ec1ccbSHans Petter Selasky 	 * we only use 32 EQs and each EQ uses 32 bytes of context
73633ec1ccbSHans Petter Selasky 	 * memory, or 1 KB total.
73733ec1ccbSHans Petter Selasky 	 */
73833ec1ccbSHans Petter Selasky 	dev->eq_table.icm_virt = icm_virt;
73933ec1ccbSHans Petter Selasky 	dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
74033ec1ccbSHans Petter Selasky 	if (!dev->eq_table.icm_page)
74133ec1ccbSHans Petter Selasky 		return -ENOMEM;
74233ec1ccbSHans Petter Selasky 	dev->eq_table.icm_dma  = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
74333ec1ccbSHans Petter Selasky 					      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
74433ec1ccbSHans Petter Selasky 	if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
74533ec1ccbSHans Petter Selasky 		__free_page(dev->eq_table.icm_page);
74633ec1ccbSHans Petter Selasky 		return -ENOMEM;
74733ec1ccbSHans Petter Selasky 	}
74833ec1ccbSHans Petter Selasky 
74933ec1ccbSHans Petter Selasky 	ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt);
75033ec1ccbSHans Petter Selasky 	if (ret) {
75133ec1ccbSHans Petter Selasky 		pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
75233ec1ccbSHans Petter Selasky 			       PCI_DMA_BIDIRECTIONAL);
75333ec1ccbSHans Petter Selasky 		__free_page(dev->eq_table.icm_page);
75433ec1ccbSHans Petter Selasky 	}
75533ec1ccbSHans Petter Selasky 
75633ec1ccbSHans Petter Selasky 	return ret;
75733ec1ccbSHans Petter Selasky }
75833ec1ccbSHans Petter Selasky 
mthca_unmap_eq_icm(struct mthca_dev * dev)75933ec1ccbSHans Petter Selasky void mthca_unmap_eq_icm(struct mthca_dev *dev)
76033ec1ccbSHans Petter Selasky {
76133ec1ccbSHans Petter Selasky 	mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1);
76233ec1ccbSHans Petter Selasky 	pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
76333ec1ccbSHans Petter Selasky 		       PCI_DMA_BIDIRECTIONAL);
76433ec1ccbSHans Petter Selasky 	__free_page(dev->eq_table.icm_page);
76533ec1ccbSHans Petter Selasky }
76633ec1ccbSHans Petter Selasky 
mthca_init_eq_table(struct mthca_dev * dev)76733ec1ccbSHans Petter Selasky int mthca_init_eq_table(struct mthca_dev *dev)
76833ec1ccbSHans Petter Selasky {
76933ec1ccbSHans Petter Selasky 	int err;
77033ec1ccbSHans Petter Selasky 	u8 intr;
77133ec1ccbSHans Petter Selasky 	int i;
77233ec1ccbSHans Petter Selasky 
77333ec1ccbSHans Petter Selasky 	err = mthca_alloc_init(&dev->eq_table.alloc,
77433ec1ccbSHans Petter Selasky 			       dev->limits.num_eqs,
77533ec1ccbSHans Petter Selasky 			       dev->limits.num_eqs - 1,
77633ec1ccbSHans Petter Selasky 			       dev->limits.reserved_eqs);
77733ec1ccbSHans Petter Selasky 	if (err)
77833ec1ccbSHans Petter Selasky 		return err;
77933ec1ccbSHans Petter Selasky 
78033ec1ccbSHans Petter Selasky 	err = mthca_map_eq_regs(dev);
78133ec1ccbSHans Petter Selasky 	if (err)
78233ec1ccbSHans Petter Selasky 		goto err_out_free;
78333ec1ccbSHans Petter Selasky 
78433ec1ccbSHans Petter Selasky 	if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
78533ec1ccbSHans Petter Selasky 		dev->eq_table.clr_mask = 0;
78633ec1ccbSHans Petter Selasky 	} else {
78733ec1ccbSHans Petter Selasky 		dev->eq_table.clr_mask =
78833ec1ccbSHans Petter Selasky 			swab32(1 << (dev->eq_table.inta_pin & 31));
78933ec1ccbSHans Petter Selasky 		dev->eq_table.clr_int  = dev->clr_base +
79033ec1ccbSHans Petter Selasky 			(dev->eq_table.inta_pin < 32 ? 4 : 0);
79133ec1ccbSHans Petter Selasky 	}
79233ec1ccbSHans Petter Selasky 
79333ec1ccbSHans Petter Selasky 	dev->eq_table.arm_mask = 0;
79433ec1ccbSHans Petter Selasky 
79533ec1ccbSHans Petter Selasky 	intr = dev->eq_table.inta_pin;
79633ec1ccbSHans Petter Selasky 
79733ec1ccbSHans Petter Selasky 	err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
79833ec1ccbSHans Petter Selasky 			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
79933ec1ccbSHans Petter Selasky 			      &dev->eq_table.eq[MTHCA_EQ_COMP]);
80033ec1ccbSHans Petter Selasky 	if (err)
80133ec1ccbSHans Petter Selasky 		goto err_out_unmap;
80233ec1ccbSHans Petter Selasky 
80333ec1ccbSHans Petter Selasky 	err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
80433ec1ccbSHans Petter Selasky 			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
80533ec1ccbSHans Petter Selasky 			      &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
80633ec1ccbSHans Petter Selasky 	if (err)
80733ec1ccbSHans Petter Selasky 		goto err_out_comp;
80833ec1ccbSHans Petter Selasky 
80933ec1ccbSHans Petter Selasky 	err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
81033ec1ccbSHans Petter Selasky 			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
81133ec1ccbSHans Petter Selasky 			      &dev->eq_table.eq[MTHCA_EQ_CMD]);
81233ec1ccbSHans Petter Selasky 	if (err)
81333ec1ccbSHans Petter Selasky 		goto err_out_async;
81433ec1ccbSHans Petter Selasky 
81533ec1ccbSHans Petter Selasky 	if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
81633ec1ccbSHans Petter Selasky 		static const char *eq_name[] = {
81733ec1ccbSHans Petter Selasky 			[MTHCA_EQ_COMP]  = DRV_NAME "-comp",
81833ec1ccbSHans Petter Selasky 			[MTHCA_EQ_ASYNC] = DRV_NAME "-async",
81933ec1ccbSHans Petter Selasky 			[MTHCA_EQ_CMD]   = DRV_NAME "-cmd"
82033ec1ccbSHans Petter Selasky 		};
82133ec1ccbSHans Petter Selasky 
82233ec1ccbSHans Petter Selasky 		for (i = 0; i < MTHCA_NUM_EQ; ++i) {
82333ec1ccbSHans Petter Selasky 			snprintf(dev->eq_table.eq[i].irq_name,
82433ec1ccbSHans Petter Selasky 				 IB_DEVICE_NAME_MAX,
82533ec1ccbSHans Petter Selasky 				 "%s@pci:%s", eq_name[i],
82633ec1ccbSHans Petter Selasky 				 pci_name(dev->pdev));
82733ec1ccbSHans Petter Selasky 			err = request_irq(dev->eq_table.eq[i].msi_x_vector,
82833ec1ccbSHans Petter Selasky 					  mthca_is_memfree(dev) ?
82933ec1ccbSHans Petter Selasky 					  mthca_arbel_msi_x_interrupt :
83033ec1ccbSHans Petter Selasky 					  mthca_tavor_msi_x_interrupt,
83133ec1ccbSHans Petter Selasky 					  0, dev->eq_table.eq[i].irq_name,
83233ec1ccbSHans Petter Selasky 					  dev->eq_table.eq + i);
83333ec1ccbSHans Petter Selasky 			if (err)
83433ec1ccbSHans Petter Selasky 				goto err_out_cmd;
83533ec1ccbSHans Petter Selasky 			dev->eq_table.eq[i].have_irq = 1;
83633ec1ccbSHans Petter Selasky 		}
83733ec1ccbSHans Petter Selasky 	} else {
83833ec1ccbSHans Petter Selasky 		snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
83933ec1ccbSHans Petter Selasky 			 DRV_NAME "@pci:%s", pci_name(dev->pdev));
84033ec1ccbSHans Petter Selasky 		err = request_irq(dev->pdev->irq,
84133ec1ccbSHans Petter Selasky 				  mthca_is_memfree(dev) ?
84233ec1ccbSHans Petter Selasky 				  mthca_arbel_interrupt :
84333ec1ccbSHans Petter Selasky 				  mthca_tavor_interrupt,
84433ec1ccbSHans Petter Selasky 				  IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
84533ec1ccbSHans Petter Selasky 		if (err)
84633ec1ccbSHans Petter Selasky 			goto err_out_cmd;
84733ec1ccbSHans Petter Selasky 		dev->eq_table.have_irq = 1;
84833ec1ccbSHans Petter Selasky 	}
84933ec1ccbSHans Petter Selasky 
85033ec1ccbSHans Petter Selasky 	err = mthca_MAP_EQ(dev, async_mask(dev),
85133ec1ccbSHans Petter Selasky 			   0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
85233ec1ccbSHans Petter Selasky 	if (err)
85333ec1ccbSHans Petter Selasky 		mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
85433ec1ccbSHans Petter Selasky 			   dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
85533ec1ccbSHans Petter Selasky 
85633ec1ccbSHans Petter Selasky 	err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
85733ec1ccbSHans Petter Selasky 			   0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
85833ec1ccbSHans Petter Selasky 	if (err)
85933ec1ccbSHans Petter Selasky 		mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
86033ec1ccbSHans Petter Selasky 			   dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
86133ec1ccbSHans Petter Selasky 
86233ec1ccbSHans Petter Selasky 	for (i = 0; i < MTHCA_NUM_EQ; ++i)
86333ec1ccbSHans Petter Selasky 		if (mthca_is_memfree(dev))
86433ec1ccbSHans Petter Selasky 			arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
86533ec1ccbSHans Petter Selasky 		else
86633ec1ccbSHans Petter Selasky 			tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
86733ec1ccbSHans Petter Selasky 
86833ec1ccbSHans Petter Selasky 	return 0;
86933ec1ccbSHans Petter Selasky 
87033ec1ccbSHans Petter Selasky err_out_cmd:
87133ec1ccbSHans Petter Selasky 	mthca_free_irqs(dev);
87233ec1ccbSHans Petter Selasky 	mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
87333ec1ccbSHans Petter Selasky 
87433ec1ccbSHans Petter Selasky err_out_async:
87533ec1ccbSHans Petter Selasky 	mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
87633ec1ccbSHans Petter Selasky 
87733ec1ccbSHans Petter Selasky err_out_comp:
87833ec1ccbSHans Petter Selasky 	mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);
87933ec1ccbSHans Petter Selasky 
88033ec1ccbSHans Petter Selasky err_out_unmap:
88133ec1ccbSHans Petter Selasky 	mthca_unmap_eq_regs(dev);
88233ec1ccbSHans Petter Selasky 
88333ec1ccbSHans Petter Selasky err_out_free:
88433ec1ccbSHans Petter Selasky 	mthca_alloc_cleanup(&dev->eq_table.alloc);
88533ec1ccbSHans Petter Selasky 	return err;
88633ec1ccbSHans Petter Selasky }
88733ec1ccbSHans Petter Selasky 
mthca_cleanup_eq_table(struct mthca_dev * dev)88833ec1ccbSHans Petter Selasky void mthca_cleanup_eq_table(struct mthca_dev *dev)
88933ec1ccbSHans Petter Selasky {
89033ec1ccbSHans Petter Selasky 	int i;
89133ec1ccbSHans Petter Selasky 
89233ec1ccbSHans Petter Selasky 	mthca_free_irqs(dev);
89333ec1ccbSHans Petter Selasky 
89433ec1ccbSHans Petter Selasky 	mthca_MAP_EQ(dev, async_mask(dev),
89533ec1ccbSHans Petter Selasky 		     1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
89633ec1ccbSHans Petter Selasky 	mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
89733ec1ccbSHans Petter Selasky 		     1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
89833ec1ccbSHans Petter Selasky 
89933ec1ccbSHans Petter Selasky 	for (i = 0; i < MTHCA_NUM_EQ; ++i)
90033ec1ccbSHans Petter Selasky 		mthca_free_eq(dev, &dev->eq_table.eq[i]);
90133ec1ccbSHans Petter Selasky 
90233ec1ccbSHans Petter Selasky 	mthca_unmap_eq_regs(dev);
90333ec1ccbSHans Petter Selasky 
90433ec1ccbSHans Petter Selasky 	mthca_alloc_cleanup(&dev->eq_table.alloc);
90533ec1ccbSHans Petter Selasky }
906