xref: /linux/include/net/mana/gdma.h (revision 2c20e20b)
1fd325cd6SLong Li /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2fd325cd6SLong Li /* Copyright (c) 2021, Microsoft Corporation. */
3fd325cd6SLong Li 
4fd325cd6SLong Li #ifndef _GDMA_H
5fd325cd6SLong Li #define _GDMA_H
6fd325cd6SLong Li 
7fd325cd6SLong Li #include <linux/dma-mapping.h>
8fd325cd6SLong Li #include <linux/netdevice.h>
9fd325cd6SLong Li 
10fd325cd6SLong Li #include "shm_channel.h"
11fd325cd6SLong Li 
12de372f2aSAjay Sharma #define GDMA_STATUS_MORE_ENTRIES	0x00000105
13de372f2aSAjay Sharma 
14fd325cd6SLong Li /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
15fd325cd6SLong Li  * them are naturally aligned and hence don't need __packed.
16fd325cd6SLong Li  */
17fd325cd6SLong Li 
18fd325cd6SLong Li enum gdma_request_type {
19fd325cd6SLong Li 	GDMA_VERIFY_VF_DRIVER_VERSION	= 1,
20fd325cd6SLong Li 	GDMA_QUERY_MAX_RESOURCES	= 2,
21fd325cd6SLong Li 	GDMA_LIST_DEVICES		= 3,
22fd325cd6SLong Li 	GDMA_REGISTER_DEVICE		= 4,
23fd325cd6SLong Li 	GDMA_DEREGISTER_DEVICE		= 5,
24fd325cd6SLong Li 	GDMA_GENERATE_TEST_EQE		= 10,
25fd325cd6SLong Li 	GDMA_CREATE_QUEUE		= 12,
26fd325cd6SLong Li 	GDMA_DISABLE_QUEUE		= 13,
27f72ececfSLong Li 	GDMA_ALLOCATE_RESOURCE_RANGE	= 22,
28f72ececfSLong Li 	GDMA_DESTROY_RESOURCE_RANGE	= 24,
29fd325cd6SLong Li 	GDMA_CREATE_DMA_REGION		= 25,
30fd325cd6SLong Li 	GDMA_DMA_REGION_ADD_PAGES	= 26,
31fd325cd6SLong Li 	GDMA_DESTROY_DMA_REGION		= 27,
3228c66cfaSAjay Sharma 	GDMA_CREATE_PD			= 29,
3328c66cfaSAjay Sharma 	GDMA_DESTROY_PD			= 30,
3428c66cfaSAjay Sharma 	GDMA_CREATE_MR			= 31,
3528c66cfaSAjay Sharma 	GDMA_DESTROY_MR			= 32,
3662c1bff5SSouradeep Chakrabarti 	GDMA_QUERY_HWC_TIMEOUT		= 84, /* 0x54 */
37fd325cd6SLong Li };
38fd325cd6SLong Li 
39f72ececfSLong Li #define GDMA_RESOURCE_DOORBELL_PAGE	27
40f72ececfSLong Li 
41fd325cd6SLong Li enum gdma_queue_type {
42fd325cd6SLong Li 	GDMA_INVALID_QUEUE,
43fd325cd6SLong Li 	GDMA_SQ,
44fd325cd6SLong Li 	GDMA_RQ,
45fd325cd6SLong Li 	GDMA_CQ,
46fd325cd6SLong Li 	GDMA_EQ,
47fd325cd6SLong Li };
48fd325cd6SLong Li 
49fd325cd6SLong Li enum gdma_work_request_flags {
50fd325cd6SLong Li 	GDMA_WR_NONE			= 0,
51fd325cd6SLong Li 	GDMA_WR_OOB_IN_SGL		= BIT(0),
52fd325cd6SLong Li 	GDMA_WR_PAD_BY_SGE0		= BIT(1),
53fd325cd6SLong Li };
54fd325cd6SLong Li 
55fd325cd6SLong Li enum gdma_eqe_type {
56fd325cd6SLong Li 	GDMA_EQE_COMPLETION		= 3,
57fd325cd6SLong Li 	GDMA_EQE_TEST_EVENT		= 64,
58fd325cd6SLong Li 	GDMA_EQE_HWC_INIT_EQ_ID_DB	= 129,
59fd325cd6SLong Li 	GDMA_EQE_HWC_INIT_DATA		= 130,
60fd325cd6SLong Li 	GDMA_EQE_HWC_INIT_DONE		= 131,
6162c1bff5SSouradeep Chakrabarti 	GDMA_EQE_HWC_SOC_RECONFIG	= 132,
6262c1bff5SSouradeep Chakrabarti 	GDMA_EQE_HWC_SOC_RECONFIG_DATA	= 133,
63fd325cd6SLong Li };
64fd325cd6SLong Li 
65fd325cd6SLong Li enum {
66fd325cd6SLong Li 	GDMA_DEVICE_NONE	= 0,
67fd325cd6SLong Li 	GDMA_DEVICE_HWC		= 1,
68fd325cd6SLong Li 	GDMA_DEVICE_MANA	= 2,
69a7f0636dSLong Li 	GDMA_DEVICE_MANA_IB	= 3,
70fd325cd6SLong Li };
71fd325cd6SLong Li 
72fd325cd6SLong Li struct gdma_resource {
73fd325cd6SLong Li 	/* Protect the bitmap */
74fd325cd6SLong Li 	spinlock_t lock;
75fd325cd6SLong Li 
76fd325cd6SLong Li 	/* The bitmap size in bits. */
77fd325cd6SLong Li 	u32 size;
78fd325cd6SLong Li 
79fd325cd6SLong Li 	/* The bitmap tracks the resources. */
80fd325cd6SLong Li 	unsigned long *map;
81fd325cd6SLong Li };
82fd325cd6SLong Li 
83fd325cd6SLong Li union gdma_doorbell_entry {
84fd325cd6SLong Li 	u64	as_uint64;
85fd325cd6SLong Li 
86fd325cd6SLong Li 	struct {
87fd325cd6SLong Li 		u64 id		: 24;
88fd325cd6SLong Li 		u64 reserved	: 8;
89fd325cd6SLong Li 		u64 tail_ptr	: 31;
90fd325cd6SLong Li 		u64 arm		: 1;
91fd325cd6SLong Li 	} cq;
92fd325cd6SLong Li 
93fd325cd6SLong Li 	struct {
94fd325cd6SLong Li 		u64 id		: 24;
95fd325cd6SLong Li 		u64 wqe_cnt	: 8;
96fd325cd6SLong Li 		u64 tail_ptr	: 32;
97fd325cd6SLong Li 	} rq;
98fd325cd6SLong Li 
99fd325cd6SLong Li 	struct {
100fd325cd6SLong Li 		u64 id		: 24;
101fd325cd6SLong Li 		u64 reserved	: 8;
102fd325cd6SLong Li 		u64 tail_ptr	: 32;
103fd325cd6SLong Li 	} sq;
104fd325cd6SLong Li 
105fd325cd6SLong Li 	struct {
106fd325cd6SLong Li 		u64 id		: 16;
107fd325cd6SLong Li 		u64 reserved	: 16;
108fd325cd6SLong Li 		u64 tail_ptr	: 31;
109fd325cd6SLong Li 		u64 arm		: 1;
110fd325cd6SLong Li 	} eq;
111fd325cd6SLong Li }; /* HW DATA */
112fd325cd6SLong Li 
113fd325cd6SLong Li struct gdma_msg_hdr {
114fd325cd6SLong Li 	u32 hdr_type;
115fd325cd6SLong Li 	u32 msg_type;
116fd325cd6SLong Li 	u16 msg_version;
117fd325cd6SLong Li 	u16 hwc_msg_id;
118fd325cd6SLong Li 	u32 msg_size;
119fd325cd6SLong Li }; /* HW DATA */
120fd325cd6SLong Li 
121fd325cd6SLong Li struct gdma_dev_id {
122fd325cd6SLong Li 	union {
123fd325cd6SLong Li 		struct {
124fd325cd6SLong Li 			u16 type;
125fd325cd6SLong Li 			u16 instance;
126fd325cd6SLong Li 		};
127fd325cd6SLong Li 
128fd325cd6SLong Li 		u32 as_uint32;
129fd325cd6SLong Li 	};
130fd325cd6SLong Li }; /* HW DATA */
131fd325cd6SLong Li 
132fd325cd6SLong Li struct gdma_req_hdr {
133fd325cd6SLong Li 	struct gdma_msg_hdr req;
134fd325cd6SLong Li 	struct gdma_msg_hdr resp; /* The expected response */
135fd325cd6SLong Li 	struct gdma_dev_id dev_id;
136fd325cd6SLong Li 	u32 activity_id;
137fd325cd6SLong Li }; /* HW DATA */
138fd325cd6SLong Li 
139fd325cd6SLong Li struct gdma_resp_hdr {
140fd325cd6SLong Li 	struct gdma_msg_hdr response;
141fd325cd6SLong Li 	struct gdma_dev_id dev_id;
142fd325cd6SLong Li 	u32 activity_id;
143fd325cd6SLong Li 	u32 status;
144fd325cd6SLong Li 	u32 reserved;
145fd325cd6SLong Li }; /* HW DATA */
146fd325cd6SLong Li 
147fd325cd6SLong Li struct gdma_general_req {
148fd325cd6SLong Li 	struct gdma_req_hdr hdr;
149fd325cd6SLong Li }; /* HW DATA */
150fd325cd6SLong Li 
151fd325cd6SLong Li #define GDMA_MESSAGE_V1 1
15280f6215bSHaiyang Zhang #define GDMA_MESSAGE_V2 2
153*2c20e20bSLong Li #define GDMA_MESSAGE_V3 3
154fd325cd6SLong Li 
155fd325cd6SLong Li struct gdma_general_resp {
156fd325cd6SLong Li 	struct gdma_resp_hdr hdr;
157fd325cd6SLong Li }; /* HW DATA */
158fd325cd6SLong Li 
159fd325cd6SLong Li #define GDMA_STANDARD_HEADER_TYPE 0
160fd325cd6SLong Li 
mana_gd_init_req_hdr(struct gdma_req_hdr * hdr,u32 code,u32 req_size,u32 resp_size)161fd325cd6SLong Li static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
162fd325cd6SLong Li 					u32 req_size, u32 resp_size)
163fd325cd6SLong Li {
164fd325cd6SLong Li 	hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
165fd325cd6SLong Li 	hdr->req.msg_type = code;
166fd325cd6SLong Li 	hdr->req.msg_version = GDMA_MESSAGE_V1;
167fd325cd6SLong Li 	hdr->req.msg_size = req_size;
168fd325cd6SLong Li 
169fd325cd6SLong Li 	hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
170fd325cd6SLong Li 	hdr->resp.msg_type = code;
171fd325cd6SLong Li 	hdr->resp.msg_version = GDMA_MESSAGE_V1;
172fd325cd6SLong Li 	hdr->resp.msg_size = resp_size;
173fd325cd6SLong Li }
174fd325cd6SLong Li 
175fd325cd6SLong Li /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
176fd325cd6SLong Li struct gdma_sge {
177fd325cd6SLong Li 	u64 address;
178fd325cd6SLong Li 	u32 mem_key;
179fd325cd6SLong Li 	u32 size;
180fd325cd6SLong Li }; /* HW DATA */
181fd325cd6SLong Li 
182fd325cd6SLong Li struct gdma_wqe_request {
183fd325cd6SLong Li 	struct gdma_sge *sgl;
184fd325cd6SLong Li 	u32 num_sge;
185fd325cd6SLong Li 
186fd325cd6SLong Li 	u32 inline_oob_size;
187fd325cd6SLong Li 	const void *inline_oob_data;
188fd325cd6SLong Li 
189fd325cd6SLong Li 	u32 flags;
190fd325cd6SLong Li 	u32 client_data_unit;
191fd325cd6SLong Li };
192fd325cd6SLong Li 
193fd325cd6SLong Li enum gdma_page_type {
194fd325cd6SLong Li 	GDMA_PAGE_TYPE_4K,
195fd325cd6SLong Li };
196fd325cd6SLong Li 
197fd325cd6SLong Li #define GDMA_INVALID_DMA_REGION 0
198fd325cd6SLong Li 
199fd325cd6SLong Li struct gdma_mem_info {
200fd325cd6SLong Li 	struct device *dev;
201fd325cd6SLong Li 
202fd325cd6SLong Li 	dma_addr_t dma_handle;
203fd325cd6SLong Li 	void *virt_addr;
204fd325cd6SLong Li 	u64 length;
205fd325cd6SLong Li 
206fd325cd6SLong Li 	/* Allocated by the PF driver */
2073574cfdcSLeon Romanovsky 	u64 dma_region_handle;
208fd325cd6SLong Li };
209fd325cd6SLong Li 
210fd325cd6SLong Li #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
211fd325cd6SLong Li 
212fd325cd6SLong Li struct gdma_dev {
213fd325cd6SLong Li 	struct gdma_context *gdma_context;
214fd325cd6SLong Li 
215fd325cd6SLong Li 	struct gdma_dev_id dev_id;
216fd325cd6SLong Li 
217fd325cd6SLong Li 	u32 pdid;
218fd325cd6SLong Li 	u32 doorbell;
219fd325cd6SLong Li 	u32 gpa_mkey;
220fd325cd6SLong Li 
221fd325cd6SLong Li 	/* GDMA driver specific pointer */
222fd325cd6SLong Li 	void *driver_data;
223fd325cd6SLong Li 
224fd325cd6SLong Li 	struct auxiliary_device *adev;
225fd325cd6SLong Li };
226fd325cd6SLong Li 
227fd325cd6SLong Li #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
228fd325cd6SLong Li 
229fd325cd6SLong Li #define GDMA_CQE_SIZE 64
230fd325cd6SLong Li #define GDMA_EQE_SIZE 16
231fd325cd6SLong Li #define GDMA_MAX_SQE_SIZE 512
232fd325cd6SLong Li #define GDMA_MAX_RQE_SIZE 256
233fd325cd6SLong Li 
234fd325cd6SLong Li #define GDMA_COMP_DATA_SIZE 0x3C
235fd325cd6SLong Li 
236fd325cd6SLong Li #define GDMA_EVENT_DATA_SIZE 0xC
237fd325cd6SLong Li 
238fd325cd6SLong Li /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
239fd325cd6SLong Li #define GDMA_WQE_BU_SIZE 32
240fd325cd6SLong Li 
241fd325cd6SLong Li #define INVALID_PDID		UINT_MAX
242fd325cd6SLong Li #define INVALID_DOORBELL	UINT_MAX
243fd325cd6SLong Li #define INVALID_MEM_KEY		UINT_MAX
244fd325cd6SLong Li #define INVALID_QUEUE_ID	UINT_MAX
245fd325cd6SLong Li #define INVALID_PCI_MSIX_INDEX  UINT_MAX
246fd325cd6SLong Li 
247fd325cd6SLong Li struct gdma_comp {
248fd325cd6SLong Li 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
249fd325cd6SLong Li 	u32 wq_num;
250fd325cd6SLong Li 	bool is_sq;
251fd325cd6SLong Li };
252fd325cd6SLong Li 
253fd325cd6SLong Li struct gdma_event {
254fd325cd6SLong Li 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
255fd325cd6SLong Li 	u8  type;
256fd325cd6SLong Li };
257fd325cd6SLong Li 
258fd325cd6SLong Li struct gdma_queue;
259fd325cd6SLong Li 
260fd325cd6SLong Li struct mana_eq {
261fd325cd6SLong Li 	struct gdma_queue *eq;
262fd325cd6SLong Li };
263fd325cd6SLong Li 
264fd325cd6SLong Li typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
265fd325cd6SLong Li 			      struct gdma_event *e);
266fd325cd6SLong Li 
267fd325cd6SLong Li typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
268fd325cd6SLong Li 
269fd325cd6SLong Li /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
270fd325cd6SLong Li  * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
271fd325cd6SLong Li  * driver increases the 'head' in BUs rather than in bytes, and notifies
272fd325cd6SLong Li  * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
273fd325cd6SLong Li  * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
274fd325cd6SLong Li  *
275fd325cd6SLong Li  * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
276fd325cd6SLong Li  * processed, the driver increases the 'tail' to indicate that WQEs have
277fd325cd6SLong Li  * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
278fd325cd6SLong Li  *
279fd325cd6SLong Li  * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
280fd325cd6SLong Li  * that the EQ/CQ is big enough so they can't overflow, and the driver uses
281fd325cd6SLong Li  * the owner bits mechanism to detect if the queue has become empty.
282fd325cd6SLong Li  */
283fd325cd6SLong Li struct gdma_queue {
284fd325cd6SLong Li 	struct gdma_dev *gdma_dev;
285fd325cd6SLong Li 
286fd325cd6SLong Li 	enum gdma_queue_type type;
287fd325cd6SLong Li 	u32 id;
288fd325cd6SLong Li 
289fd325cd6SLong Li 	struct gdma_mem_info mem_info;
290fd325cd6SLong Li 
291fd325cd6SLong Li 	void *queue_mem_ptr;
292fd325cd6SLong Li 	u32 queue_size;
293fd325cd6SLong Li 
294fd325cd6SLong Li 	bool monitor_avl_buf;
295fd325cd6SLong Li 
296fd325cd6SLong Li 	u32 head;
297fd325cd6SLong Li 	u32 tail;
298fd325cd6SLong Li 	struct list_head entry;
299fd325cd6SLong Li 
300fd325cd6SLong Li 	/* Extra fields specific to EQ/CQ. */
301fd325cd6SLong Li 	union {
302fd325cd6SLong Li 		struct {
303fd325cd6SLong Li 			bool disable_needed;
304fd325cd6SLong Li 
305fd325cd6SLong Li 			gdma_eq_callback *callback;
306fd325cd6SLong Li 			void *context;
307fd325cd6SLong Li 
308fd325cd6SLong Li 			unsigned int msix_index;
309fd325cd6SLong Li 
310fd325cd6SLong Li 			u32 log2_throttle_limit;
311fd325cd6SLong Li 		} eq;
312fd325cd6SLong Li 
313fd325cd6SLong Li 		struct {
314fd325cd6SLong Li 			gdma_cq_callback *callback;
315fd325cd6SLong Li 			void *context;
316fd325cd6SLong Li 
317fd325cd6SLong Li 			struct gdma_queue *parent; /* For CQ/EQ relationship */
318fd325cd6SLong Li 		} cq;
319fd325cd6SLong Li 	};
320fd325cd6SLong Li };
321fd325cd6SLong Li 
322fd325cd6SLong Li struct gdma_queue_spec {
323fd325cd6SLong Li 	enum gdma_queue_type type;
324fd325cd6SLong Li 	bool monitor_avl_buf;
325fd325cd6SLong Li 	unsigned int queue_size;
326fd325cd6SLong Li 
327fd325cd6SLong Li 	/* Extra fields specific to EQ/CQ. */
328fd325cd6SLong Li 	union {
329fd325cd6SLong Li 		struct {
330fd325cd6SLong Li 			gdma_eq_callback *callback;
331fd325cd6SLong Li 			void *context;
332fd325cd6SLong Li 
333fd325cd6SLong Li 			unsigned long log2_throttle_limit;
334fd325cd6SLong Li 			unsigned int msix_index;
335fd325cd6SLong Li 		} eq;
336fd325cd6SLong Li 
337fd325cd6SLong Li 		struct {
338fd325cd6SLong Li 			gdma_cq_callback *callback;
339fd325cd6SLong Li 			void *context;
340fd325cd6SLong Li 
341fd325cd6SLong Li 			struct gdma_queue *parent_eq;
342fd325cd6SLong Li 
343fd325cd6SLong Li 		} cq;
344fd325cd6SLong Li 	};
34520e3028cSHaiyang Zhang };
34620e3028cSHaiyang Zhang 
347fd325cd6SLong Li #define MANA_IRQ_NAME_SZ 32
348fd325cd6SLong Li 
349fd325cd6SLong Li struct gdma_irq_context {
35020e3028cSHaiyang Zhang 	void (*handler)(void *arg);
351fd325cd6SLong Li 	/* Protect the eq_list */
352fd325cd6SLong Li 	spinlock_t lock;
353fd325cd6SLong Li 	struct list_head eq_list;
354fd325cd6SLong Li 	char name[MANA_IRQ_NAME_SZ];
355fd325cd6SLong Li };
356fd325cd6SLong Li 
357fd325cd6SLong Li struct gdma_context {
358fd325cd6SLong Li 	struct device		*dev;
359fd325cd6SLong Li 
360fd325cd6SLong Li 	/* Per-vPort max number of queues */
361fd325cd6SLong Li 	unsigned int		max_num_queues;
362fd325cd6SLong Li 	unsigned int		max_num_msix;
36380f6215bSHaiyang Zhang 	unsigned int		num_msix_usable;
36480f6215bSHaiyang Zhang 	struct gdma_irq_context	*irq_contexts;
36580f6215bSHaiyang Zhang 
366fd325cd6SLong Li 	/* L2 MTU */
367fd325cd6SLong Li 	u16 adapter_mtu;
368fd325cd6SLong Li 
369fd325cd6SLong Li 	/* This maps a CQ index to the queue structure. */
370fd325cd6SLong Li 	unsigned int		max_num_cqs;
371fd325cd6SLong Li 	struct gdma_queue	**cq_table;
372fd325cd6SLong Li 
373fd325cd6SLong Li 	/* Protect eq_test_event and test_event_eq_id  */
374fd325cd6SLong Li 	struct mutex		eq_test_event_mutex;
375fd325cd6SLong Li 	struct completion	eq_test_event;
376fd325cd6SLong Li 	u32			test_event_eq_id;
377fd325cd6SLong Li 
378fd325cd6SLong Li 	bool			is_pf;
379fd325cd6SLong Li 	phys_addr_t		bar0_pa;
380fd325cd6SLong Li 	void __iomem		*bar0_va;
381fd325cd6SLong Li 	void __iomem		*shm_base;
38279b0872bSJakub Kicinski 	void __iomem		*db_page_base;
383fd325cd6SLong Li 	phys_addr_t		phys_db_page_base;
384fd325cd6SLong Li 	u32 db_page_size;
385fd325cd6SLong Li 	int                     numa_node;
386fd325cd6SLong Li 
387fd325cd6SLong Li 	/* Shared memory chanenl (used to bootstrap HWC) */
388fd325cd6SLong Li 	struct shm_channel	shm_channel;
389fd325cd6SLong Li 
390fd325cd6SLong Li 	/* Hardware communication channel (HWC) */
391fd325cd6SLong Li 	struct gdma_dev		hwc;
392a7f0636dSLong Li 
393a7f0636dSLong Li 	/* Azure network adapter */
394a7f0636dSLong Li 	struct gdma_dev		mana;
395fd325cd6SLong Li 
396fd325cd6SLong Li 	/* Azure RDMA adapter */
397fd325cd6SLong Li 	struct gdma_dev		mana_ib;
398fd325cd6SLong Li };
399fd325cd6SLong Li 
400fd325cd6SLong Li #define MAX_NUM_GDMA_DEVICES	4
401fd325cd6SLong Li 
mana_gd_is_mana(struct gdma_dev * gd)402fd325cd6SLong Li static inline bool mana_gd_is_mana(struct gdma_dev *gd)
403fd325cd6SLong Li {
404fd325cd6SLong Li 	return gd->dev_id.type == GDMA_DEVICE_MANA;
405fd325cd6SLong Li }
406fd325cd6SLong Li 
mana_gd_is_hwc(struct gdma_dev * gd)407fd325cd6SLong Li static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
408fd325cd6SLong Li {
409fd325cd6SLong Li 	return gd->dev_id.type == GDMA_DEVICE_HWC;
410fd325cd6SLong Li }
411fd325cd6SLong Li 
412fd325cd6SLong Li u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
413fd325cd6SLong Li u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
414fd325cd6SLong Li 
415fd325cd6SLong Li int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
416fd325cd6SLong Li 
417fd325cd6SLong Li int mana_gd_create_hwc_queue(struct gdma_dev *gd,
418fd325cd6SLong Li 			     const struct gdma_queue_spec *spec,
419fd325cd6SLong Li 			     struct gdma_queue **queue_ptr);
420fd325cd6SLong Li 
421fd325cd6SLong Li int mana_gd_create_mana_eq(struct gdma_dev *gd,
422fd325cd6SLong Li 			   const struct gdma_queue_spec *spec,
423fd325cd6SLong Li 			   struct gdma_queue **queue_ptr);
424fd325cd6SLong Li 
425fd325cd6SLong Li int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
426fd325cd6SLong Li 			      const struct gdma_queue_spec *spec,
427fd325cd6SLong Li 			      struct gdma_queue **queue_ptr);
428fd325cd6SLong Li 
429fd325cd6SLong Li void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
430fd325cd6SLong Li 
431fd325cd6SLong Li int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
432fd325cd6SLong Li 
433fd325cd6SLong Li void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
434fd325cd6SLong Li 
435fd325cd6SLong Li struct gdma_wqe {
436fd325cd6SLong Li 	u32 reserved	:24;
437fd325cd6SLong Li 	u32 last_vbytes	:8;
438fd325cd6SLong Li 
439fd325cd6SLong Li 	union {
440fd325cd6SLong Li 		u32 flags;
441fd325cd6SLong Li 
442fd325cd6SLong Li 		struct {
443fd325cd6SLong Li 			u32 num_sge		:8;
444fd325cd6SLong Li 			u32 inline_oob_size_div4:3;
445fd325cd6SLong Li 			u32 client_oob_in_sgl	:1;
446fd325cd6SLong Li 			u32 reserved1		:4;
447fd325cd6SLong Li 			u32 client_data_unit	:14;
448fd325cd6SLong Li 			u32 reserved2		:2;
449fd325cd6SLong Li 		};
450fd325cd6SLong Li 	};
451fd325cd6SLong Li }; /* HW DATA */
452fd325cd6SLong Li 
453fd325cd6SLong Li #define INLINE_OOB_SMALL_SIZE 8
454fd325cd6SLong Li #define INLINE_OOB_LARGE_SIZE 24
455fd325cd6SLong Li 
456aa565497SLong Li #define MAX_TX_WQE_SIZE 512
457aa565497SLong Li #define MAX_RX_WQE_SIZE 256
458aa565497SLong Li 
459aa565497SLong Li #define MAX_TX_WQE_SGL_ENTRIES	((GDMA_MAX_SQE_SIZE -			   \
460aa565497SLong Li 			sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
461aa565497SLong Li 			sizeof(struct gdma_sge))
462aa565497SLong Li 
463fd325cd6SLong Li #define MAX_RX_WQE_SGL_ENTRIES	((GDMA_MAX_RQE_SIZE -			   \
464fd325cd6SLong Li 			sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
465fd325cd6SLong Li 
466fd325cd6SLong Li struct gdma_cqe {
467fd325cd6SLong Li 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
468fd325cd6SLong Li 
469fd325cd6SLong Li 	union {
470fd325cd6SLong Li 		u32 as_uint32;
471fd325cd6SLong Li 
472fd325cd6SLong Li 		struct {
473fd325cd6SLong Li 			u32 wq_num	: 24;
474fd325cd6SLong Li 			u32 is_sq	: 1;
475fd325cd6SLong Li 			u32 reserved	: 4;
476fd325cd6SLong Li 			u32 owner_bits	: 3;
477fd325cd6SLong Li 		};
478fd325cd6SLong Li 	} cqe_info;
479fd325cd6SLong Li }; /* HW DATA */
480fd325cd6SLong Li 
481fd325cd6SLong Li #define GDMA_CQE_OWNER_BITS 3
482fd325cd6SLong Li 
483fd325cd6SLong Li #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
484fd325cd6SLong Li 
485fd325cd6SLong Li #define SET_ARM_BIT 1
486fd325cd6SLong Li 
487fd325cd6SLong Li #define GDMA_EQE_OWNER_BITS 3
488fd325cd6SLong Li 
489fd325cd6SLong Li union gdma_eqe_info {
490fd325cd6SLong Li 	u32 as_uint32;
491fd325cd6SLong Li 
492fd325cd6SLong Li 	struct {
493fd325cd6SLong Li 		u32 type	: 8;
494fd325cd6SLong Li 		u32 reserved1	: 8;
495fd325cd6SLong Li 		u32 client_id	: 2;
496fd325cd6SLong Li 		u32 reserved2	: 11;
497fd325cd6SLong Li 		u32 owner_bits	: 3;
498fd325cd6SLong Li 	};
499fd325cd6SLong Li }; /* HW DATA */
500fd325cd6SLong Li 
501fd325cd6SLong Li #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
502fd325cd6SLong Li #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
503fd325cd6SLong Li 
504fd325cd6SLong Li struct gdma_eqe {
505fd325cd6SLong Li 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
506fd325cd6SLong Li 	u32 eqe_info;
507fd325cd6SLong Li }; /* HW DATA */
508fd325cd6SLong Li 
509fd325cd6SLong Li #define GDMA_REG_DB_PAGE_OFFSET	8
510fd325cd6SLong Li #define GDMA_REG_DB_PAGE_SIZE	0x10
511fd325cd6SLong Li #define GDMA_REG_SHM_OFFSET	0x18
512fd325cd6SLong Li 
513fd325cd6SLong Li #define GDMA_PF_REG_DB_PAGE_SIZE	0xD0
514fd325cd6SLong Li #define GDMA_PF_REG_DB_PAGE_OFF		0xC8
515fd325cd6SLong Li #define GDMA_PF_REG_SHM_OFF		0x70
516fd325cd6SLong Li 
517fd325cd6SLong Li #define GDMA_SRIOV_REG_CFG_BASE_OFF	0x108
518fd325cd6SLong Li 
519fd325cd6SLong Li #define MANA_PF_DEVICE_ID 0x00B9
520fd325cd6SLong Li #define MANA_VF_DEVICE_ID 0x00BA
521fd325cd6SLong Li 
522fd325cd6SLong Li struct gdma_posted_wqe_info {
523fd325cd6SLong Li 	u32 wqe_size_in_bu;
524fd325cd6SLong Li };
525fd325cd6SLong Li 
526fd325cd6SLong Li /* GDMA_GENERATE_TEST_EQE */
527fd325cd6SLong Li struct gdma_generate_test_event_req {
528fd325cd6SLong Li 	struct gdma_req_hdr hdr;
529fd325cd6SLong Li 	u32 queue_index;
530fd325cd6SLong Li }; /* HW DATA */
531fd325cd6SLong Li 
532fd325cd6SLong Li /* GDMA_VERIFY_VF_DRIVER_VERSION */
533fd325cd6SLong Li enum {
534fd325cd6SLong Li 	GDMA_PROTOCOL_V1	= 1,
535fd325cd6SLong Li 	GDMA_PROTOCOL_FIRST	= GDMA_PROTOCOL_V1,
536fd325cd6SLong Li 	GDMA_PROTOCOL_LAST	= GDMA_PROTOCOL_V1,
537fd325cd6SLong Li };
538837e8ac8SJakub Kicinski 
539837e8ac8SJakub Kicinski #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
540837e8ac8SJakub Kicinski 
541837e8ac8SJakub Kicinski /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
54262c1bff5SSouradeep Chakrabarti  * so the driver is able to reliably support features like busy_poll.
543837e8ac8SJakub Kicinski  */
544837e8ac8SJakub Kicinski #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
545837e8ac8SJakub Kicinski #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
54662c1bff5SSouradeep Chakrabarti 
54762c1bff5SSouradeep Chakrabarti #define GDMA_DRV_CAP_FLAGS1 \
548fd325cd6SLong Li 	(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
549fd325cd6SLong Li 	 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
550fd325cd6SLong Li 	 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG)
551fd325cd6SLong Li 
552fd325cd6SLong Li #define GDMA_DRV_CAP_FLAGS2 0
553fd325cd6SLong Li 
554fd325cd6SLong Li #define GDMA_DRV_CAP_FLAGS3 0
555fd325cd6SLong Li 
556fd325cd6SLong Li #define GDMA_DRV_CAP_FLAGS4 0
557fd325cd6SLong Li 
558fd325cd6SLong Li struct gdma_verify_ver_req {
559fd325cd6SLong Li 	struct gdma_req_hdr hdr;
560fd325cd6SLong Li 
561fd325cd6SLong Li 	/* Mandatory fields required for protocol establishment */
562fd325cd6SLong Li 	u64 protocol_ver_min;
563fd325cd6SLong Li 	u64 protocol_ver_max;
564fd325cd6SLong Li 
565fd325cd6SLong Li 	/* Gdma Driver Capability Flags */
566fd325cd6SLong Li 	u64 gd_drv_cap_flags1;
567fd325cd6SLong Li 	u64 gd_drv_cap_flags2;
568fd325cd6SLong Li 	u64 gd_drv_cap_flags3;
569fd325cd6SLong Li 	u64 gd_drv_cap_flags4;
570fd325cd6SLong Li 
571fd325cd6SLong Li 	/* Advisory fields */
572fd325cd6SLong Li 	u64 drv_ver;
573fd325cd6SLong Li 	u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
574fd325cd6SLong Li 	u32 reserved;
575fd325cd6SLong Li 	u32 os_ver_major;
576fd325cd6SLong Li 	u32 os_ver_minor;
577fd325cd6SLong Li 	u32 os_ver_build;
578fd325cd6SLong Li 	u32 os_ver_platform;
579fd325cd6SLong Li 	u64 reserved_2;
580fd325cd6SLong Li 	u8 os_ver_str1[128];
581fd325cd6SLong Li 	u8 os_ver_str2[128];
582fd325cd6SLong Li 	u8 os_ver_str3[128];
583fd325cd6SLong Li 	u8 os_ver_str4[128];
584fd325cd6SLong Li }; /* HW DATA */
585fd325cd6SLong Li 
586fd325cd6SLong Li struct gdma_verify_ver_resp {
587fd325cd6SLong Li 	struct gdma_resp_hdr hdr;
588fd325cd6SLong Li 	u64 gdma_protocol_ver;
589fd325cd6SLong Li 	u64 pf_cap_flags1;
590fd325cd6SLong Li 	u64 pf_cap_flags2;
591fd325cd6SLong Li 	u64 pf_cap_flags3;
592fd325cd6SLong Li 	u64 pf_cap_flags4;
593fd325cd6SLong Li }; /* HW DATA */
594fd325cd6SLong Li 
595fd325cd6SLong Li /* GDMA_QUERY_MAX_RESOURCES */
596fd325cd6SLong Li struct gdma_query_max_resources_resp {
597fd325cd6SLong Li 	struct gdma_resp_hdr hdr;
598fd325cd6SLong Li 	u32 status;
599fd325cd6SLong Li 	u32 max_sq;
600fd325cd6SLong Li 	u32 max_rq;
601fd325cd6SLong Li 	u32 max_cq;
602fd325cd6SLong Li 	u32 max_eq;
603fd325cd6SLong Li 	u32 max_db;
604fd325cd6SLong Li 	u32 max_mst;
605fd325cd6SLong Li 	u32 max_cq_mod_ctx;
606fd325cd6SLong Li 	u32 max_mod_cq;
607fd325cd6SLong Li 	u32 max_msix;
608fd325cd6SLong Li }; /* HW DATA */
609fd325cd6SLong Li 
610fd325cd6SLong Li /* GDMA_LIST_DEVICES */
611fd325cd6SLong Li struct gdma_list_devices_resp {
612fd325cd6SLong Li 	struct gdma_resp_hdr hdr;
613fd325cd6SLong Li 	u32 num_of_devs;
614fd325cd6SLong Li 	u32 reserved;
615fd325cd6SLong Li 	struct gdma_dev_id devs[64];
616fd325cd6SLong Li }; /* HW DATA */
617fd325cd6SLong Li 
618fd325cd6SLong Li /* GDMA_REGISTER_DEVICE */
619fd325cd6SLong Li struct gdma_register_device_resp {
620fd325cd6SLong Li 	struct gdma_resp_hdr hdr;
621fd325cd6SLong Li 	u32 pdid;
622fd325cd6SLong Li 	u32 gpa_mkey;
623f72ececfSLong Li 	u32 db_id;
624f72ececfSLong Li }; /* HW DATA */
625f72ececfSLong Li 
626f72ececfSLong Li struct gdma_allocate_resource_range_req {
627f72ececfSLong Li 	struct gdma_req_hdr hdr;
628f72ececfSLong Li 	u32 resource_type;
629f72ececfSLong Li 	u32 num_resources;
630f72ececfSLong Li 	u32 alignment;
631f72ececfSLong Li 	u32 allocated_resources;
632f72ececfSLong Li };
633f72ececfSLong Li 
634f72ececfSLong Li struct gdma_allocate_resource_range_resp {
635f72ececfSLong Li 	struct gdma_resp_hdr hdr;
636f72ececfSLong Li 	u32 allocated_resources;
637f72ececfSLong Li };
638f72ececfSLong Li 
639f72ececfSLong Li struct gdma_destroy_resource_range_req {
640f72ececfSLong Li 	struct gdma_req_hdr hdr;
641f72ececfSLong Li 	u32 resource_type;
642f72ececfSLong Li 	u32 num_resources;
643fd325cd6SLong Li 	u32 allocated_resources;
644fd325cd6SLong Li };
645fd325cd6SLong Li 
646fd325cd6SLong Li /* GDMA_CREATE_QUEUE */
647fd325cd6SLong Li struct gdma_create_queue_req {
648fd325cd6SLong Li 	struct gdma_req_hdr hdr;
649fd325cd6SLong Li 	u32 type;
6503574cfdcSLeon Romanovsky 	u32 reserved1;
651fd325cd6SLong Li 	u32 pdid;
652fd325cd6SLong Li 	u32 doolbell_id;
653fd325cd6SLong Li 	u64 gdma_region;
654fd325cd6SLong Li 	u32 reserved2;
655fd325cd6SLong Li 	u32 queue_size;
656fd325cd6SLong Li 	u32 log2_throttle_limit;
657fd325cd6SLong Li 	u32 eq_pci_msix_index;
658fd325cd6SLong Li 	u32 cq_mod_ctx_id;
659fd325cd6SLong Li 	u32 cq_parent_eq_id;
660fd325cd6SLong Li 	u8  rq_drop_on_overrun;
661fd325cd6SLong Li 	u8  rq_err_on_wqe_overflow;
662fd325cd6SLong Li 	u8  rq_chain_rec_wqes;
663fd325cd6SLong Li 	u8  sq_hw_db;
664fd325cd6SLong Li 	u32 reserved3;
665fd325cd6SLong Li }; /* HW DATA */
666fd325cd6SLong Li 
667fd325cd6SLong Li struct gdma_create_queue_resp {
668fd325cd6SLong Li 	struct gdma_resp_hdr hdr;
669fd325cd6SLong Li 	u32 queue_index;
670fd325cd6SLong Li }; /* HW DATA */
671fd325cd6SLong Li 
672fd325cd6SLong Li /* GDMA_DISABLE_QUEUE */
673fd325cd6SLong Li struct gdma_disable_queue_req {
674fd325cd6SLong Li 	struct gdma_req_hdr hdr;
675fd325cd6SLong Li 	u32 type;
676fd325cd6SLong Li 	u32 queue_index;
67762c1bff5SSouradeep Chakrabarti 	u32 alloc_res_id_on_creation;
67862c1bff5SSouradeep Chakrabarti }; /* HW DATA */
67962c1bff5SSouradeep Chakrabarti 
68062c1bff5SSouradeep Chakrabarti /* GDMA_QUERY_HWC_TIMEOUT */
68162c1bff5SSouradeep Chakrabarti struct gdma_query_hwc_timeout_req {
68262c1bff5SSouradeep Chakrabarti 	struct gdma_req_hdr hdr;
68362c1bff5SSouradeep Chakrabarti 	u32 timeout_ms;
68462c1bff5SSouradeep Chakrabarti 	u32 reserved;
68562c1bff5SSouradeep Chakrabarti };
68662c1bff5SSouradeep Chakrabarti 
68762c1bff5SSouradeep Chakrabarti struct gdma_query_hwc_timeout_resp {
68862c1bff5SSouradeep Chakrabarti 	struct gdma_resp_hdr hdr;
68962c1bff5SSouradeep Chakrabarti 	u32 timeout_ms;
69028c66cfaSAjay Sharma 	u32 reserved;
69128c66cfaSAjay Sharma };
69228c66cfaSAjay Sharma 
69328c66cfaSAjay Sharma enum atb_page_size {
69428c66cfaSAjay Sharma 	ATB_PAGE_SIZE_4K,
69528c66cfaSAjay Sharma 	ATB_PAGE_SIZE_8K,
69628c66cfaSAjay Sharma 	ATB_PAGE_SIZE_16K,
69728c66cfaSAjay Sharma 	ATB_PAGE_SIZE_32K,
69828c66cfaSAjay Sharma 	ATB_PAGE_SIZE_64K,
69928c66cfaSAjay Sharma 	ATB_PAGE_SIZE_128K,
70028c66cfaSAjay Sharma 	ATB_PAGE_SIZE_256K,
70128c66cfaSAjay Sharma 	ATB_PAGE_SIZE_512K,
70228c66cfaSAjay Sharma 	ATB_PAGE_SIZE_1M,
70328c66cfaSAjay Sharma 	ATB_PAGE_SIZE_2M,
70428c66cfaSAjay Sharma 	ATB_PAGE_SIZE_MAX,
70528c66cfaSAjay Sharma };
70628c66cfaSAjay Sharma 
70728c66cfaSAjay Sharma enum gdma_mr_access_flags {
70828c66cfaSAjay Sharma 	GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
70928c66cfaSAjay Sharma 	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
71028c66cfaSAjay Sharma 	GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
71128c66cfaSAjay Sharma 	GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
712fd325cd6SLong Li 	GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
713fd325cd6SLong Li };
714fd325cd6SLong Li 
715fd325cd6SLong Li /* GDMA_CREATE_DMA_REGION */
716fd325cd6SLong Li struct gdma_create_dma_region_req {
717fd325cd6SLong Li 	struct gdma_req_hdr hdr;
718fd325cd6SLong Li 
719fd325cd6SLong Li 	/* The total size of the DMA region */
720fd325cd6SLong Li 	u64 length;
721fd325cd6SLong Li 
722fd325cd6SLong Li 	/* The offset in the first page */
723fd325cd6SLong Li 	u32 offset_in_page;
724fd325cd6SLong Li 
725fd325cd6SLong Li 	/* enum gdma_page_type */
726fd325cd6SLong Li 	u32 gdma_page_type;
727fd325cd6SLong Li 
728fd325cd6SLong Li 	/* The total number of pages */
729fd325cd6SLong Li 	u32 page_count;
730fd325cd6SLong Li 
731fd325cd6SLong Li 	/* If page_addr_list_len is smaller than page_count,
732fd325cd6SLong Li 	 * the remaining page addresses will be added via the
733fd325cd6SLong Li 	 * message GDMA_DMA_REGION_ADD_PAGES.
734fd325cd6SLong Li 	 */
735fd325cd6SLong Li 	u32 page_addr_list_len;
736fd325cd6SLong Li 	u64 page_addr_list[];
737fd325cd6SLong Li }; /* HW DATA */
7383574cfdcSLeon Romanovsky 
739fd325cd6SLong Li struct gdma_create_dma_region_resp {
740fd325cd6SLong Li 	struct gdma_resp_hdr hdr;
741fd325cd6SLong Li 	u64 dma_region_handle;
742fd325cd6SLong Li }; /* HW DATA */
743fd325cd6SLong Li 
744fd325cd6SLong Li /* GDMA_DMA_REGION_ADD_PAGES */
7453574cfdcSLeon Romanovsky struct gdma_dma_region_add_pages_req {
746fd325cd6SLong Li 	struct gdma_req_hdr hdr;
747fd325cd6SLong Li 
748fd325cd6SLong Li 	u64 dma_region_handle;
749fd325cd6SLong Li 
750fd325cd6SLong Li 	u32 page_addr_list_len;
751fd325cd6SLong Li 	u32 reserved3;
752fd325cd6SLong Li 
753fd325cd6SLong Li 	u64 page_addr_list[];
754fd325cd6SLong Li }; /* HW DATA */
755fd325cd6SLong Li 
756fd325cd6SLong Li /* GDMA_DESTROY_DMA_REGION */
7573574cfdcSLeon Romanovsky struct gdma_destroy_dma_region_req {
75828c66cfaSAjay Sharma 	struct gdma_req_hdr hdr;
75928c66cfaSAjay Sharma 
76028c66cfaSAjay Sharma 	u64 dma_region_handle;
76128c66cfaSAjay Sharma }; /* HW DATA */
76228c66cfaSAjay Sharma 
76328c66cfaSAjay Sharma enum gdma_pd_flags {
76428c66cfaSAjay Sharma 	GDMA_PD_FLAG_INVALID = 0,
76528c66cfaSAjay Sharma };
76628c66cfaSAjay Sharma 
76728c66cfaSAjay Sharma struct gdma_create_pd_req {
76828c66cfaSAjay Sharma 	struct gdma_req_hdr hdr;
76928c66cfaSAjay Sharma 	enum gdma_pd_flags flags;
77028c66cfaSAjay Sharma 	u32 reserved;
77128c66cfaSAjay Sharma };/* HW DATA */
7723574cfdcSLeon Romanovsky 
77328c66cfaSAjay Sharma struct gdma_create_pd_resp {
77428c66cfaSAjay Sharma 	struct gdma_resp_hdr hdr;
77528c66cfaSAjay Sharma 	u64 pd_handle;
77628c66cfaSAjay Sharma 	u32 pd_id;
77728c66cfaSAjay Sharma 	u32 reserved;
77828c66cfaSAjay Sharma };/* HW DATA */
7793574cfdcSLeon Romanovsky 
78028c66cfaSAjay Sharma struct gdma_destroy_pd_req {
78128c66cfaSAjay Sharma 	struct gdma_req_hdr hdr;
78228c66cfaSAjay Sharma 	u64 pd_handle;
78328c66cfaSAjay Sharma };/* HW DATA */
78428c66cfaSAjay Sharma 
78528c66cfaSAjay Sharma struct gdma_destory_pd_resp {
78628c66cfaSAjay Sharma 	struct gdma_resp_hdr hdr;
78728c66cfaSAjay Sharma };/* HW DATA */
78828c66cfaSAjay Sharma 
78928c66cfaSAjay Sharma enum gdma_mr_type {
79028c66cfaSAjay Sharma 	/* Guest Virtual Address - MRs of this type allow access
79128c66cfaSAjay Sharma 	 * to memory mapped by PTEs associated with this MR using a virtual
79228c66cfaSAjay Sharma 	 * address that is set up in the MST
79328c66cfaSAjay Sharma 	 */
79428c66cfaSAjay Sharma 	GDMA_MR_TYPE_GVA = 2,
7953574cfdcSLeon Romanovsky };
79628c66cfaSAjay Sharma 
79728c66cfaSAjay Sharma struct gdma_create_mr_params {
79828c66cfaSAjay Sharma 	u64 pd_handle;
7993574cfdcSLeon Romanovsky 	enum gdma_mr_type mr_type;
80028c66cfaSAjay Sharma 	union {
80128c66cfaSAjay Sharma 		struct {
80228c66cfaSAjay Sharma 			u64 dma_region_handle;
80328c66cfaSAjay Sharma 			u64 virtual_address;
80428c66cfaSAjay Sharma 			enum gdma_mr_access_flags access_flags;
80528c66cfaSAjay Sharma 		} gva;
80628c66cfaSAjay Sharma 	};
80728c66cfaSAjay Sharma };
8083574cfdcSLeon Romanovsky 
80928c66cfaSAjay Sharma struct gdma_create_mr_request {
81028c66cfaSAjay Sharma 	struct gdma_req_hdr hdr;
81128c66cfaSAjay Sharma 	u64 pd_handle;
81228c66cfaSAjay Sharma 	enum gdma_mr_type mr_type;
81328c66cfaSAjay Sharma 	u32 reserved_1;
8143574cfdcSLeon Romanovsky 
81528c66cfaSAjay Sharma 	union {
81628c66cfaSAjay Sharma 		struct {
81728c66cfaSAjay Sharma 			u64 dma_region_handle;
81828c66cfaSAjay Sharma 			u64 virtual_address;
81928c66cfaSAjay Sharma 			enum gdma_mr_access_flags access_flags;
82028c66cfaSAjay Sharma 		} gva;
82128c66cfaSAjay Sharma 
82228c66cfaSAjay Sharma 	};
82328c66cfaSAjay Sharma 	u32 reserved_2;
82428c66cfaSAjay Sharma };/* HW DATA */
8253574cfdcSLeon Romanovsky 
82628c66cfaSAjay Sharma struct gdma_create_mr_response {
82728c66cfaSAjay Sharma 	struct gdma_resp_hdr hdr;
82828c66cfaSAjay Sharma 	u64 mr_handle;
82928c66cfaSAjay Sharma 	u32 lkey;
83028c66cfaSAjay Sharma 	u32 rkey;
83128c66cfaSAjay Sharma };/* HW DATA */
8323574cfdcSLeon Romanovsky 
83328c66cfaSAjay Sharma struct gdma_destroy_mr_request {
83428c66cfaSAjay Sharma 	struct gdma_req_hdr hdr;
83528c66cfaSAjay Sharma 	u64 mr_handle;
83628c66cfaSAjay Sharma };/* HW DATA */
837fd325cd6SLong Li 
838fd325cd6SLong Li struct gdma_destroy_mr_response {
839fd325cd6SLong Li 	struct gdma_resp_hdr hdr;
840fd325cd6SLong Li };/* HW DATA */
841fd325cd6SLong Li 
842fd325cd6SLong Li int mana_gd_verify_vf_version(struct pci_dev *pdev);
843fd325cd6SLong Li 
844fd325cd6SLong Li int mana_gd_register_device(struct gdma_dev *gd);
845fd325cd6SLong Li int mana_gd_deregister_device(struct gdma_dev *gd);
846fd325cd6SLong Li 
847fd325cd6SLong Li int mana_gd_post_work_request(struct gdma_queue *wq,
848fd325cd6SLong Li 			      const struct gdma_wqe_request *wqe_req,
849fd325cd6SLong Li 			      struct gdma_posted_wqe_info *wqe_info);
850fd325cd6SLong Li 
851fd325cd6SLong Li int mana_gd_post_and_ring(struct gdma_queue *queue,
852fd325cd6SLong Li 			  const struct gdma_wqe_request *wqe,
853fd325cd6SLong Li 			  struct gdma_posted_wqe_info *wqe_info);
854fd325cd6SLong Li 
855fd325cd6SLong Li int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
856fd325cd6SLong Li void mana_gd_free_res_map(struct gdma_resource *r);
857fd325cd6SLong Li 
858fd325cd6SLong Li void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
859fd325cd6SLong Li 			      struct gdma_queue *queue);
860fd325cd6SLong Li 
861fd325cd6SLong Li int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
862fd325cd6SLong Li 			 struct gdma_mem_info *gmi);
863fd325cd6SLong Li 
864fd325cd6SLong Li void mana_gd_free_memory(struct gdma_mem_info *gmi);
86528c66cfaSAjay Sharma 
8663574cfdcSLeon Romanovsky int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
86728c66cfaSAjay Sharma 			 u32 resp_len, void *resp);
868fd325cd6SLong Li 
869 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
870 
871 #endif /* _GDMA_H */
872