1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
8  *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   This program is distributed in the hope that it will be useful, but
15  *   WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  *   General Public License for more details.
18  *
19  *   BSD LICENSE
20  *
21  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
22  *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
23  *
24  *   Redistribution and use in source and binary forms, with or without
25  *   modification, are permitted provided that the following conditions
26  *   are met:
27  *
28  *     * Redistributions of source code must retain the above copyright
29  *       notice, this list of conditions and the following disclaimer.
30  *     * Redistributions in binary form must reproduce the above copy
31  *       notice, this list of conditions and the following disclaimer in
32  *       the documentation and/or other materials provided with the
33  *       distribution.
34  *     * Neither the name of Intel Corporation nor the names of its
35  *       contributors may be used to endorse or promote products derived
36  *       from this software without specific prior written permission.
37  *
38  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49  *
50  * PCIe NTB Linux driver
51  *
52  * Contact Information:
53  * Allen Hubbe <Allen.Hubbe@emc.com>
54  */
55 
56 #ifndef _NTB_H_
57 #define _NTB_H_
58 
59 #include <linux/completion.h>
60 #include <linux/device.h>
61 #include <linux/interrupt.h>
62 
63 struct ntb_client;
64 struct ntb_dev;
65 struct ntb_msi;
66 struct pci_dev;
67 
68 /**
69  * enum ntb_topo - NTB connection topology
70  * @NTB_TOPO_NONE:	Topology is unknown or invalid.
71  * @NTB_TOPO_PRI:	On primary side of local ntb.
72  * @NTB_TOPO_SEC:	On secondary side of remote ntb.
73  * @NTB_TOPO_B2B_USD:	On primary side of local ntb upstream of remote ntb.
74  * @NTB_TOPO_B2B_DSD:	On primary side of local ntb downstream of remote ntb.
75  * @NTB_TOPO_SWITCH:	Connected via a switch which supports ntb.
76  * @NTB_TOPO_CROSSLINK: Connected via two symmetric switchecs
77  */
78 enum ntb_topo {
79 	NTB_TOPO_NONE = -1,
80 	NTB_TOPO_PRI,
81 	NTB_TOPO_SEC,
82 	NTB_TOPO_B2B_USD,
83 	NTB_TOPO_B2B_DSD,
84 	NTB_TOPO_SWITCH,
85 	NTB_TOPO_CROSSLINK,
86 };
87 
ntb_topo_is_b2b(enum ntb_topo topo)88 static inline int ntb_topo_is_b2b(enum ntb_topo topo)
89 {
90 	switch ((int)topo) {
91 	case NTB_TOPO_B2B_USD:
92 	case NTB_TOPO_B2B_DSD:
93 		return 1;
94 	}
95 	return 0;
96 }
97 
ntb_topo_string(enum ntb_topo topo)98 static inline char *ntb_topo_string(enum ntb_topo topo)
99 {
100 	switch (topo) {
101 	case NTB_TOPO_NONE:		return "NTB_TOPO_NONE";
102 	case NTB_TOPO_PRI:		return "NTB_TOPO_PRI";
103 	case NTB_TOPO_SEC:		return "NTB_TOPO_SEC";
104 	case NTB_TOPO_B2B_USD:		return "NTB_TOPO_B2B_USD";
105 	case NTB_TOPO_B2B_DSD:		return "NTB_TOPO_B2B_DSD";
106 	case NTB_TOPO_SWITCH:		return "NTB_TOPO_SWITCH";
107 	case NTB_TOPO_CROSSLINK:	return "NTB_TOPO_CROSSLINK";
108 	}
109 	return "NTB_TOPO_INVALID";
110 }
111 
112 /**
113  * enum ntb_speed - NTB link training speed
114  * @NTB_SPEED_AUTO:	Request the max supported speed.
115  * @NTB_SPEED_NONE:	Link is not trained to any speed.
116  * @NTB_SPEED_GEN1:	Link is trained to gen1 speed.
117  * @NTB_SPEED_GEN2:	Link is trained to gen2 speed.
118  * @NTB_SPEED_GEN3:	Link is trained to gen3 speed.
119  * @NTB_SPEED_GEN4:	Link is trained to gen4 speed.
120  */
121 enum ntb_speed {
122 	NTB_SPEED_AUTO = -1,
123 	NTB_SPEED_NONE = 0,
124 	NTB_SPEED_GEN1 = 1,
125 	NTB_SPEED_GEN2 = 2,
126 	NTB_SPEED_GEN3 = 3,
127 	NTB_SPEED_GEN4 = 4
128 };
129 
130 /**
131  * enum ntb_width - NTB link training width
132  * @NTB_WIDTH_AUTO:	Request the max supported width.
133  * @NTB_WIDTH_NONE:	Link is not trained to any width.
134  * @NTB_WIDTH_1:	Link is trained to 1 lane width.
135  * @NTB_WIDTH_2:	Link is trained to 2 lane width.
136  * @NTB_WIDTH_4:	Link is trained to 4 lane width.
137  * @NTB_WIDTH_8:	Link is trained to 8 lane width.
138  * @NTB_WIDTH_12:	Link is trained to 12 lane width.
139  * @NTB_WIDTH_16:	Link is trained to 16 lane width.
140  * @NTB_WIDTH_32:	Link is trained to 32 lane width.
141  */
142 enum ntb_width {
143 	NTB_WIDTH_AUTO = -1,
144 	NTB_WIDTH_NONE = 0,
145 	NTB_WIDTH_1 = 1,
146 	NTB_WIDTH_2 = 2,
147 	NTB_WIDTH_4 = 4,
148 	NTB_WIDTH_8 = 8,
149 	NTB_WIDTH_12 = 12,
150 	NTB_WIDTH_16 = 16,
151 	NTB_WIDTH_32 = 32,
152 };
153 
154 /**
155  * enum ntb_default_port - NTB default port number
156  * @NTB_PORT_PRI_USD:	Default port of the NTB_TOPO_PRI/NTB_TOPO_B2B_USD
157  *			topologies
158  * @NTB_PORT_SEC_DSD:	Default port of the NTB_TOPO_SEC/NTB_TOPO_B2B_DSD
159  *			topologies
160  */
161 enum ntb_default_port {
162 	NTB_PORT_PRI_USD,
163 	NTB_PORT_SEC_DSD
164 };
165 #define NTB_DEF_PEER_CNT	(1)
166 #define NTB_DEF_PEER_IDX	(0)
167 
168 /**
169  * struct ntb_client_ops - ntb client operations
170  * @probe:		Notify client of a new device.
171  * @remove:		Notify client to remove a device.
172  */
173 struct ntb_client_ops {
174 	int (*probe)(struct ntb_client *client, struct ntb_dev *ntb);
175 	void (*remove)(struct ntb_client *client, struct ntb_dev *ntb);
176 };
177 
ntb_client_ops_is_valid(const struct ntb_client_ops * ops)178 static inline int ntb_client_ops_is_valid(const struct ntb_client_ops *ops)
179 {
180 	/* commented callbacks are not required: */
181 	return
182 		ops->probe			&&
183 		ops->remove			&&
184 		1;
185 }
186 
187 /**
188  * struct ntb_ctx_ops - ntb driver context operations
189  * @link_event:		See ntb_link_event().
190  * @db_event:		See ntb_db_event().
191  * @msg_event:		See ntb_msg_event().
192  */
193 struct ntb_ctx_ops {
194 	void (*link_event)(void *ctx);
195 	void (*db_event)(void *ctx, int db_vector);
196 	void (*msg_event)(void *ctx);
197 };
198 
ntb_ctx_ops_is_valid(const struct ntb_ctx_ops * ops)199 static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
200 {
201 	/* commented callbacks are not required: */
202 	return
203 		/* ops->link_event		&& */
204 		/* ops->db_event		&& */
205 		/* ops->msg_event		&& */
206 		1;
207 }
208 
209 /**
210  * struct ntb_dev_ops - ntb device operations
211  * @port_number:	See ntb_port_number().
212  * @peer_port_count:	See ntb_peer_port_count().
213  * @peer_port_number:	See ntb_peer_port_number().
214  * @peer_port_idx:	See ntb_peer_port_idx().
215  * @link_is_up:		See ntb_link_is_up().
216  * @link_enable:	See ntb_link_enable().
217  * @link_disable:	See ntb_link_disable().
218  * @mw_count:		See ntb_mw_count().
219  * @mw_get_align:	See ntb_mw_get_align().
220  * @mw_set_trans:	See ntb_mw_set_trans().
221  * @mw_clear_trans:	See ntb_mw_clear_trans().
222  * @peer_mw_count:	See ntb_peer_mw_count().
223  * @peer_mw_get_addr:	See ntb_peer_mw_get_addr().
224  * @peer_mw_set_trans:	See ntb_peer_mw_set_trans().
225  * @peer_mw_clear_trans:See ntb_peer_mw_clear_trans().
226  * @db_is_unsafe:	See ntb_db_is_unsafe().
227  * @db_valid_mask:	See ntb_db_valid_mask().
228  * @db_vector_count:	See ntb_db_vector_count().
229  * @db_vector_mask:	See ntb_db_vector_mask().
230  * @db_read:		See ntb_db_read().
231  * @db_set:		See ntb_db_set().
232  * @db_clear:		See ntb_db_clear().
233  * @db_read_mask:	See ntb_db_read_mask().
234  * @db_set_mask:	See ntb_db_set_mask().
235  * @db_clear_mask:	See ntb_db_clear_mask().
236  * @peer_db_addr:	See ntb_peer_db_addr().
237  * @peer_db_read:	See ntb_peer_db_read().
238  * @peer_db_set:	See ntb_peer_db_set().
239  * @peer_db_clear:	See ntb_peer_db_clear().
240  * @peer_db_read_mask:	See ntb_peer_db_read_mask().
241  * @peer_db_set_mask:	See ntb_peer_db_set_mask().
242  * @peer_db_clear_mask:	See ntb_peer_db_clear_mask().
243  * @spad_is_unsafe:	See ntb_spad_is_unsafe().
244  * @spad_count:		See ntb_spad_count().
245  * @spad_read:		See ntb_spad_read().
246  * @spad_write:		See ntb_spad_write().
247  * @peer_spad_addr:	See ntb_peer_spad_addr().
248  * @peer_spad_read:	See ntb_peer_spad_read().
249  * @peer_spad_write:	See ntb_peer_spad_write().
250  * @msg_count:		See ntb_msg_count().
251  * @msg_inbits:		See ntb_msg_inbits().
252  * @msg_outbits:	See ntb_msg_outbits().
253  * @msg_read_sts:	See ntb_msg_read_sts().
254  * @msg_clear_sts:	See ntb_msg_clear_sts().
255  * @msg_set_mask:	See ntb_msg_set_mask().
256  * @msg_clear_mask:	See ntb_msg_clear_mask().
257  * @msg_read:		See ntb_msg_read().
258  * @peer_msg_write:	See ntb_peer_msg_write().
259  */
260 struct ntb_dev_ops {
261 	int (*port_number)(struct ntb_dev *ntb);
262 	int (*peer_port_count)(struct ntb_dev *ntb);
263 	int (*peer_port_number)(struct ntb_dev *ntb, int pidx);
264 	int (*peer_port_idx)(struct ntb_dev *ntb, int port);
265 
266 	u64 (*link_is_up)(struct ntb_dev *ntb,
267 			  enum ntb_speed *speed, enum ntb_width *width);
268 	int (*link_enable)(struct ntb_dev *ntb,
269 			   enum ntb_speed max_speed, enum ntb_width max_width);
270 	int (*link_disable)(struct ntb_dev *ntb);
271 
272 	int (*mw_count)(struct ntb_dev *ntb, int pidx);
273 	int (*mw_get_align)(struct ntb_dev *ntb, int pidx, int widx,
274 			    resource_size_t *addr_align,
275 			    resource_size_t *size_align,
276 			    resource_size_t *size_max);
277 	int (*mw_set_trans)(struct ntb_dev *ntb, int pidx, int widx,
278 			    dma_addr_t addr, resource_size_t size);
279 	int (*mw_clear_trans)(struct ntb_dev *ntb, int pidx, int widx);
280 	int (*peer_mw_count)(struct ntb_dev *ntb);
281 	int (*peer_mw_get_addr)(struct ntb_dev *ntb, int widx,
282 				phys_addr_t *base, resource_size_t *size);
283 	int (*peer_mw_set_trans)(struct ntb_dev *ntb, int pidx, int widx,
284 				 u64 addr, resource_size_t size);
285 	int (*peer_mw_clear_trans)(struct ntb_dev *ntb, int pidx, int widx);
286 
287 	int (*db_is_unsafe)(struct ntb_dev *ntb);
288 	u64 (*db_valid_mask)(struct ntb_dev *ntb);
289 	int (*db_vector_count)(struct ntb_dev *ntb);
290 	u64 (*db_vector_mask)(struct ntb_dev *ntb, int db_vector);
291 
292 	u64 (*db_read)(struct ntb_dev *ntb);
293 	int (*db_set)(struct ntb_dev *ntb, u64 db_bits);
294 	int (*db_clear)(struct ntb_dev *ntb, u64 db_bits);
295 
296 	u64 (*db_read_mask)(struct ntb_dev *ntb);
297 	int (*db_set_mask)(struct ntb_dev *ntb, u64 db_bits);
298 	int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
299 
300 	int (*peer_db_addr)(struct ntb_dev *ntb,
301 			    phys_addr_t *db_addr, resource_size_t *db_size,
302 				u64 *db_data, int db_bit);
303 	u64 (*peer_db_read)(struct ntb_dev *ntb);
304 	int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits);
305 	int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits);
306 
307 	u64 (*peer_db_read_mask)(struct ntb_dev *ntb);
308 	int (*peer_db_set_mask)(struct ntb_dev *ntb, u64 db_bits);
309 	int (*peer_db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
310 
311 	int (*spad_is_unsafe)(struct ntb_dev *ntb);
312 	int (*spad_count)(struct ntb_dev *ntb);
313 
314 	u32 (*spad_read)(struct ntb_dev *ntb, int sidx);
315 	int (*spad_write)(struct ntb_dev *ntb, int sidx, u32 val);
316 
317 	int (*peer_spad_addr)(struct ntb_dev *ntb, int pidx, int sidx,
318 			      phys_addr_t *spad_addr);
319 	u32 (*peer_spad_read)(struct ntb_dev *ntb, int pidx, int sidx);
320 	int (*peer_spad_write)(struct ntb_dev *ntb, int pidx, int sidx,
321 			       u32 val);
322 
323 	int (*msg_count)(struct ntb_dev *ntb);
324 	u64 (*msg_inbits)(struct ntb_dev *ntb);
325 	u64 (*msg_outbits)(struct ntb_dev *ntb);
326 	u64 (*msg_read_sts)(struct ntb_dev *ntb);
327 	int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits);
328 	int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits);
329 	int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits);
330 	u32 (*msg_read)(struct ntb_dev *ntb, int *pidx, int midx);
331 	int (*peer_msg_write)(struct ntb_dev *ntb, int pidx, int midx, u32 msg);
332 };
333 
ntb_dev_ops_is_valid(const struct ntb_dev_ops * ops)334 static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
335 {
336 	/* commented callbacks are not required: */
337 	return
338 		/* Port operations are required for multiport devices */
339 		!ops->peer_port_count == !ops->port_number	&&
340 		!ops->peer_port_number == !ops->port_number	&&
341 		!ops->peer_port_idx == !ops->port_number	&&
342 
343 		/* Link operations are required */
344 		ops->link_is_up				&&
345 		ops->link_enable			&&
346 		ops->link_disable			&&
347 
348 		/* One or both MW interfaces should be developed */
349 		ops->mw_count				&&
350 		ops->mw_get_align			&&
351 		(ops->mw_set_trans			||
352 		 ops->peer_mw_set_trans)		&&
353 		/* ops->mw_clear_trans			&& */
354 		ops->peer_mw_count			&&
355 		ops->peer_mw_get_addr			&&
356 		/* ops->peer_mw_clear_trans		&& */
357 
358 		/* Doorbell operations are mostly required */
359 		/* ops->db_is_unsafe			&& */
360 		ops->db_valid_mask			&&
361 		/* both set, or both unset */
362 		(!ops->db_vector_count == !ops->db_vector_mask)	&&
363 		ops->db_read				&&
364 		/* ops->db_set				&& */
365 		ops->db_clear				&&
366 		/* ops->db_read_mask			&& */
367 		ops->db_set_mask			&&
368 		ops->db_clear_mask			&&
369 		/* ops->peer_db_addr			&& */
370 		/* ops->peer_db_read			&& */
371 		ops->peer_db_set			&&
372 		/* ops->peer_db_clear			&& */
373 		/* ops->peer_db_read_mask		&& */
374 		/* ops->peer_db_set_mask		&& */
375 		/* ops->peer_db_clear_mask		&& */
376 
377 		/* Scrachpads interface is optional */
378 		/* !ops->spad_is_unsafe == !ops->spad_count	&& */
379 		!ops->spad_read == !ops->spad_count		&&
380 		!ops->spad_write == !ops->spad_count		&&
381 		/* !ops->peer_spad_addr == !ops->spad_count	&& */
382 		/* !ops->peer_spad_read == !ops->spad_count	&& */
383 		!ops->peer_spad_write == !ops->spad_count	&&
384 
385 		/* Messaging interface is optional */
386 		!ops->msg_inbits == !ops->msg_count		&&
387 		!ops->msg_outbits == !ops->msg_count		&&
388 		!ops->msg_read_sts == !ops->msg_count		&&
389 		!ops->msg_clear_sts == !ops->msg_count		&&
390 		/* !ops->msg_set_mask == !ops->msg_count	&& */
391 		/* !ops->msg_clear_mask == !ops->msg_count	&& */
392 		!ops->msg_read == !ops->msg_count		&&
393 		!ops->peer_msg_write == !ops->msg_count		&&
394 		1;
395 }
396 
397 /**
398  * struct ntb_client - client interested in ntb devices
399  * @drv:		Linux driver object.
400  * @ops:		See &ntb_client_ops.
401  */
402 struct ntb_client {
403 	struct device_driver		drv;
404 	const struct ntb_client_ops	ops;
405 };
406 #define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv)
407 
408 /**
409  * struct ntb_dev - ntb device
410  * @dev:		Linux device object.
411  * @pdev:		PCI device entry of the ntb.
412  * @topo:		Detected topology of the ntb.
413  * @ops:		See &ntb_dev_ops.
414  * @ctx:		See &ntb_ctx_ops.
415  * @ctx_ops:		See &ntb_ctx_ops.
416  */
417 struct ntb_dev {
418 	struct device			dev;
419 	struct pci_dev			*pdev;
420 	enum ntb_topo			topo;
421 	const struct ntb_dev_ops	*ops;
422 	void				*ctx;
423 	const struct ntb_ctx_ops	*ctx_ops;
424 
425 	/* private: */
426 
427 	/* synchronize setting, clearing, and calling ctx_ops */
428 	spinlock_t			ctx_lock;
429 	/* block unregister until device is fully released */
430 	struct completion		released;
431 
432 #ifdef CONFIG_NTB_MSI
433 	struct ntb_msi *msi;
434 #endif
435 };
436 #define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev)
437 
438 /**
439  * ntb_register_client() - register a client for interest in ntb devices
440  * @client:	Client context.
441  *
442  * The client will be added to the list of clients interested in ntb devices.
443  * The client will be notified of any ntb devices that are not already
444  * associated with a client, or if ntb devices are registered later.
445  *
446  * Return: Zero if the client is registered, otherwise an error number.
447  */
448 #define ntb_register_client(client) \
449 	__ntb_register_client((client), THIS_MODULE, KBUILD_MODNAME)
450 
451 int __ntb_register_client(struct ntb_client *client, struct module *mod,
452 			  const char *mod_name);
453 
454 /**
455  * ntb_unregister_client() - unregister a client for interest in ntb devices
456  * @client:	Client context.
457  *
458  * The client will be removed from the list of clients interested in ntb
459  * devices.  If any ntb devices are associated with the client, the client will
460  * be notified to remove those devices.
461  */
462 void ntb_unregister_client(struct ntb_client *client);
463 
464 #define module_ntb_client(__ntb_client) \
465 	module_driver(__ntb_client, ntb_register_client, \
466 			ntb_unregister_client)
467 
468 /**
469  * ntb_register_device() - register a ntb device
470  * @ntb:	NTB device context.
471  *
472  * The device will be added to the list of ntb devices.  If any clients are
473  * interested in ntb devices, each client will be notified of the ntb device,
474  * until at most one client accepts the device.
475  *
476  * Return: Zero if the device is registered, otherwise an error number.
477  */
478 int ntb_register_device(struct ntb_dev *ntb);
479 
480 /**
481  * ntb_unregister_device() - unregister a ntb device
482  * @ntb:	NTB device context.
483  *
484  * The device will be removed from the list of ntb devices.  If the ntb device
485  * is associated with a client, the client will be notified to remove the
486  * device.
487  */
488 void ntb_unregister_device(struct ntb_dev *ntb);
489 
490 /**
491  * ntb_set_ctx() - associate a driver context with an ntb device
492  * @ntb:	NTB device context.
493  * @ctx:	Driver context.
494  * @ctx_ops:	Driver context operations.
495  *
496  * Associate a driver context and operations with a ntb device.  The context is
497  * provided by the client driver, and the driver may associate a different
498  * context with each ntb device.
499  *
500  * Return: Zero if the context is associated, otherwise an error number.
501  */
502 int ntb_set_ctx(struct ntb_dev *ntb, void *ctx,
503 		const struct ntb_ctx_ops *ctx_ops);
504 
505 /**
506  * ntb_clear_ctx() - disassociate any driver context from an ntb device
507  * @ntb:	NTB device context.
508  *
509  * Clear any association that may exist between a driver context and the ntb
510  * device.
511  */
512 void ntb_clear_ctx(struct ntb_dev *ntb);
513 
514 /**
515  * ntb_link_event() - notify driver context of a change in link status
516  * @ntb:	NTB device context.
517  *
518  * Notify the driver context that the link status may have changed.  The driver
519  * should call ntb_link_is_up() to get the current status.
520  */
521 void ntb_link_event(struct ntb_dev *ntb);
522 
523 /**
524  * ntb_db_event() - notify driver context of a doorbell event
525  * @ntb:	NTB device context.
526  * @vector:	Interrupt vector number.
527  *
528  * Notify the driver context of a doorbell event.  If hardware supports
529  * multiple interrupt vectors for doorbells, the vector number indicates which
530  * vector received the interrupt.  The vector number is relative to the first
531  * vector used for doorbells, starting at zero, and must be less than
532  * ntb_db_vector_count().  The driver may call ntb_db_read() to check which
533  * doorbell bits need service, and ntb_db_vector_mask() to determine which of
534  * those bits are associated with the vector number.
535  */
536 void ntb_db_event(struct ntb_dev *ntb, int vector);
537 
538 /**
539  * ntb_msg_event() - notify driver context of a message event
540  * @ntb:	NTB device context.
541  *
542  * Notify the driver context of a message event.  If hardware supports
543  * message registers, this event indicates, that a new message arrived in
544  * some incoming message register or last sent message couldn't be delivered.
545  * The events can be masked/unmasked by the methods ntb_msg_set_mask() and
546  * ntb_msg_clear_mask().
547  */
548 void ntb_msg_event(struct ntb_dev *ntb);
549 
550 /**
551  * ntb_default_port_number() - get the default local port number
552  * @ntb:	NTB device context.
553  *
554  * If hardware driver doesn't specify port_number() callback method, the NTB
555  * is considered with just two ports. So this method returns default local
556  * port number in compliance with topology.
557  *
558  * NOTE Don't call this method directly. The ntb_port_number() function should
559  * be used instead.
560  *
561  * Return: the default local port number
562  */
563 int ntb_default_port_number(struct ntb_dev *ntb);
564 
565 /**
566  * ntb_default_port_count() - get the default number of peer device ports
567  * @ntb:	NTB device context.
568  *
569  * By default hardware driver supports just one peer device.
570  *
571  * NOTE Don't call this method directly. The ntb_peer_port_count() function
572  * should be used instead.
573  *
574  * Return: the default number of peer ports
575  */
576 int ntb_default_peer_port_count(struct ntb_dev *ntb);
577 
578 /**
579  * ntb_default_peer_port_number() - get the default peer port by given index
580  * @ntb:	NTB device context.
581  * @idx:	Peer port index (should not differ from zero).
582  *
583  * By default hardware driver supports just one peer device, so this method
584  * shall return the corresponding value from enum ntb_default_port.
585  *
586  * NOTE Don't call this method directly. The ntb_peer_port_number() function
587  * should be used instead.
588  *
589  * Return: the peer device port or negative value indicating an error
590  */
591 int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx);
592 
593 /**
594  * ntb_default_peer_port_idx() - get the default peer device port index by
595  *				 given port number
596  * @ntb:	NTB device context.
597  * @port:	Peer port number (should be one of enum ntb_default_port).
598  *
599  * By default hardware driver supports just one peer device, so while
600  * specified port-argument indicates peer port from enum ntb_default_port,
601  * the return value shall be zero.
602  *
603  * NOTE Don't call this method directly. The ntb_peer_port_idx() function
604  * should be used instead.
605  *
606  * Return: the peer port index or negative value indicating an error
607  */
608 int ntb_default_peer_port_idx(struct ntb_dev *ntb, int port);
609 
610 /**
611  * ntb_port_number() - get the local port number
612  * @ntb:	NTB device context.
613  *
614  * Hardware must support at least simple two-ports ntb connection
615  *
616  * Return: the local port number
617  */
ntb_port_number(struct ntb_dev * ntb)618 static inline int ntb_port_number(struct ntb_dev *ntb)
619 {
620 	if (!ntb->ops->port_number)
621 		return ntb_default_port_number(ntb);
622 
623 	return ntb->ops->port_number(ntb);
624 }
625 /**
626  * ntb_peer_port_count() - get the number of peer device ports
627  * @ntb:	NTB device context.
628  *
629  * Hardware may support an access to memory of several remote domains
630  * over multi-port NTB devices. This method returns the number of peers,
631  * local device can have shared memory with.
632  *
633  * Return: the number of peer ports
634  */
ntb_peer_port_count(struct ntb_dev * ntb)635 static inline int ntb_peer_port_count(struct ntb_dev *ntb)
636 {
637 	if (!ntb->ops->peer_port_count)
638 		return ntb_default_peer_port_count(ntb);
639 
640 	return ntb->ops->peer_port_count(ntb);
641 }
642 
643 /**
644  * ntb_peer_port_number() - get the peer port by given index
645  * @ntb:	NTB device context.
646  * @pidx:	Peer port index.
647  *
648  * Peer ports are continuously enumerated by NTB API logic, so this method
649  * lets to retrieve port real number by its index.
650  *
651  * Return: the peer device port or negative value indicating an error
652  */
ntb_peer_port_number(struct ntb_dev * ntb,int pidx)653 static inline int ntb_peer_port_number(struct ntb_dev *ntb, int pidx)
654 {
655 	if (!ntb->ops->peer_port_number)
656 		return ntb_default_peer_port_number(ntb, pidx);
657 
658 	return ntb->ops->peer_port_number(ntb, pidx);
659 }
660 
661 /**
662  * ntb_logical_port_number() - get the logical port number of the local port
663  * @ntb:	NTB device context.
664  *
665  * The Logical Port Number is defined to be a unique number for each
666  * port starting from zero through to the number of ports minus one.
667  * This is in contrast to the Port Number where each port can be assigned
668  * any unique physical number by the hardware.
669  *
670  * The logical port number is useful for calculating the resource indexes
671  * used by peers.
672  *
673  * Return: the logical port number or negative value indicating an error
674  */
ntb_logical_port_number(struct ntb_dev * ntb)675 static inline int ntb_logical_port_number(struct ntb_dev *ntb)
676 {
677 	int lport = ntb_port_number(ntb);
678 	int pidx;
679 
680 	if (lport < 0)
681 		return lport;
682 
683 	for (pidx = 0; pidx < ntb_peer_port_count(ntb); pidx++)
684 		if (lport <= ntb_peer_port_number(ntb, pidx))
685 			return pidx;
686 
687 	return pidx;
688 }
689 
690 /**
691  * ntb_peer_logical_port_number() - get the logical peer port by given index
692  * @ntb:	NTB device context.
693  * @pidx:	Peer port index.
694  *
695  * The Logical Port Number is defined to be a unique number for each
696  * port starting from zero through to the number of ports minus one.
697  * This is in contrast to the Port Number where each port can be assigned
698  * any unique physical number by the hardware.
699  *
700  * The logical port number is useful for calculating the resource indexes
701  * used by peers.
702  *
703  * Return: the peer's logical port number or negative value indicating an error
704  */
ntb_peer_logical_port_number(struct ntb_dev * ntb,int pidx)705 static inline int ntb_peer_logical_port_number(struct ntb_dev *ntb, int pidx)
706 {
707 	if (ntb_peer_port_number(ntb, pidx) < ntb_port_number(ntb))
708 		return pidx;
709 	else
710 		return pidx + 1;
711 }
712 
713 /**
714  * ntb_peer_port_idx() - get the peer device port index by given port number
715  * @ntb:	NTB device context.
716  * @port:	Peer port number.
717  *
718  * Inverse operation of ntb_peer_port_number(), so one can get port index
719  * by specified port number.
720  *
721  * Return: the peer port index or negative value indicating an error
722  */
ntb_peer_port_idx(struct ntb_dev * ntb,int port)723 static inline int ntb_peer_port_idx(struct ntb_dev *ntb, int port)
724 {
725 	if (!ntb->ops->peer_port_idx)
726 		return ntb_default_peer_port_idx(ntb, port);
727 
728 	return ntb->ops->peer_port_idx(ntb, port);
729 }
730 
731 /**
732  * ntb_link_is_up() - get the current ntb link state
733  * @ntb:	NTB device context.
734  * @speed:	OUT - The link speed expressed as PCIe generation number.
735  * @width:	OUT - The link width expressed as the number of PCIe lanes.
736  *
737  * Get the current state of the ntb link.  It is recommended to query the link
738  * state once after every link event.  It is safe to query the link state in
739  * the context of the link event callback.
740  *
741  * Return: bitfield of indexed ports link state: bit is set/cleared if the
742  *         link is up/down respectively.
743  */
ntb_link_is_up(struct ntb_dev * ntb,enum ntb_speed * speed,enum ntb_width * width)744 static inline u64 ntb_link_is_up(struct ntb_dev *ntb,
745 				 enum ntb_speed *speed, enum ntb_width *width)
746 {
747 	return ntb->ops->link_is_up(ntb, speed, width);
748 }
749 
750 /**
751  * ntb_link_enable() - enable the local port ntb connection
752  * @ntb:	NTB device context.
753  * @max_speed:	The maximum link speed expressed as PCIe generation number.
754  * @max_width:	The maximum link width expressed as the number of PCIe lanes.
755  *
756  * Enable the NTB/PCIe link on the local or remote (for bridge-to-bridge
757  * topology) side of the bridge. If it's supported the ntb device should train
758  * the link to its maximum speed and width, or the requested speed and width,
759  * whichever is smaller. Some hardware doesn't support PCIe link training, so
760  * the last two arguments will be ignored then.
761  *
762  * Return: Zero on success, otherwise an error number.
763  */
ntb_link_enable(struct ntb_dev * ntb,enum ntb_speed max_speed,enum ntb_width max_width)764 static inline int ntb_link_enable(struct ntb_dev *ntb,
765 				  enum ntb_speed max_speed,
766 				  enum ntb_width max_width)
767 {
768 	return ntb->ops->link_enable(ntb, max_speed, max_width);
769 }
770 
771 /**
772  * ntb_link_disable() - disable the local port ntb connection
773  * @ntb:	NTB device context.
774  *
775  * Disable the link on the local or remote (for b2b topology) of the ntb.
776  * The ntb device should disable the link.  Returning from this call must
777  * indicate that a barrier has passed, though with no more writes may pass in
778  * either direction across the link, except if this call returns an error
779  * number.
780  *
781  * Return: Zero on success, otherwise an error number.
782  */
ntb_link_disable(struct ntb_dev * ntb)783 static inline int ntb_link_disable(struct ntb_dev *ntb)
784 {
785 	return ntb->ops->link_disable(ntb);
786 }
787 
788 /**
789  * ntb_mw_count() - get the number of inbound memory windows, which could
790  *                  be created for a specified peer device
791  * @ntb:	NTB device context.
792  * @pidx:	Port index of peer device.
793  *
794  * Hardware and topology may support a different number of memory windows.
795  * Moreover different peer devices can support different number of memory
796  * windows. Simply speaking this method returns the number of possible inbound
797  * memory windows to share with specified peer device. Note: this may return
798  * zero if the link is not up yet.
799  *
800  * Return: the number of memory windows.
801  */
ntb_mw_count(struct ntb_dev * ntb,int pidx)802 static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx)
803 {
804 	return ntb->ops->mw_count(ntb, pidx);
805 }
806 
807 /**
808  * ntb_mw_get_align() - get the restriction parameters of inbound memory window
809  * @ntb:	NTB device context.
810  * @pidx:	Port index of peer device.
811  * @widx:	Memory window index.
812  * @addr_align:	OUT - the base alignment for translating the memory window
813  * @size_align:	OUT - the size alignment for translating the memory window
814  * @size_max:	OUT - the maximum size of the memory window
815  *
816  * Get the alignments of an inbound memory window with specified index.
817  * NULL may be given for any output parameter if the value is not needed.
818  * The alignment and size parameters may be used for allocation of proper
819  * shared memory. Note: this must only be called when the link is up.
820  *
821  * Return: Zero on success, otherwise a negative error number.
822  */
ntb_mw_get_align(struct ntb_dev * ntb,int pidx,int widx,resource_size_t * addr_align,resource_size_t * size_align,resource_size_t * size_max)823 static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
824 				   resource_size_t *addr_align,
825 				   resource_size_t *size_align,
826 				   resource_size_t *size_max)
827 {
828 	if (!(ntb_link_is_up(ntb, NULL, NULL) & BIT_ULL(pidx)))
829 		return -ENOTCONN;
830 
831 	return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align,
832 				      size_max);
833 }
834 
835 /**
836  * ntb_mw_set_trans() - set the translation of an inbound memory window
837  * @ntb:	NTB device context.
838  * @pidx:	Port index of peer device.
839  * @widx:	Memory window index.
840  * @addr:	The dma address of local memory to expose to the peer.
841  * @size:	The size of the local memory to expose to the peer.
842  *
843  * Set the translation of a memory window.  The peer may access local memory
844  * through the window starting at the address, up to the size.  The address
845  * and size must be aligned in compliance with restrictions of
846  * ntb_mw_get_align(). The region size should not exceed the size_max parameter
847  * of that method.
848  *
849  * This method may not be implemented due to the hardware specific memory
850  * windows interface.
851  *
852  * Return: Zero on success, otherwise an error number.
853  */
ntb_mw_set_trans(struct ntb_dev * ntb,int pidx,int widx,dma_addr_t addr,resource_size_t size)854 static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
855 				   dma_addr_t addr, resource_size_t size)
856 {
857 	if (!ntb->ops->mw_set_trans)
858 		return 0;
859 
860 	return ntb->ops->mw_set_trans(ntb, pidx, widx, addr, size);
861 }
862 
863 /**
864  * ntb_mw_clear_trans() - clear the translation address of an inbound memory
865  *                        window
866  * @ntb:	NTB device context.
867  * @pidx:	Port index of peer device.
868  * @widx:	Memory window index.
869  *
870  * Clear the translation of an inbound memory window.  The peer may no longer
871  * access local memory through the window.
872  *
873  * Return: Zero on success, otherwise an error number.
874  */
ntb_mw_clear_trans(struct ntb_dev * ntb,int pidx,int widx)875 static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int pidx, int widx)
876 {
877 	if (!ntb->ops->mw_clear_trans)
878 		return ntb_mw_set_trans(ntb, pidx, widx, 0, 0);
879 
880 	return ntb->ops->mw_clear_trans(ntb, pidx, widx);
881 }
882 
883 /**
884  * ntb_peer_mw_count() - get the number of outbound memory windows, which could
885  *                       be mapped to access a shared memory
886  * @ntb:	NTB device context.
887  *
888  * Hardware and topology may support a different number of memory windows.
889  * This method returns the number of outbound memory windows supported by
890  * local device.
891  *
892  * Return: the number of memory windows.
893  */
ntb_peer_mw_count(struct ntb_dev * ntb)894 static inline int ntb_peer_mw_count(struct ntb_dev *ntb)
895 {
896 	return ntb->ops->peer_mw_count(ntb);
897 }
898 
899 /**
900  * ntb_peer_mw_get_addr() - get map address of an outbound memory window
901  * @ntb:	NTB device context.
902  * @widx:	Memory window index (within ntb_peer_mw_count() return value).
903  * @base:	OUT - the base address of mapping region.
904  * @size:	OUT - the size of mapping region.
905  *
906  * Get base and size of memory region to map.  NULL may be given for any output
907  * parameter if the value is not needed.  The base and size may be used for
908  * mapping the memory window, to access the peer memory.
909  *
910  * Return: Zero on success, otherwise a negative error number.
911  */
ntb_peer_mw_get_addr(struct ntb_dev * ntb,int widx,phys_addr_t * base,resource_size_t * size)912 static inline int ntb_peer_mw_get_addr(struct ntb_dev *ntb, int widx,
913 				      phys_addr_t *base, resource_size_t *size)
914 {
915 	return ntb->ops->peer_mw_get_addr(ntb, widx, base, size);
916 }
917 
918 /**
919  * ntb_peer_mw_set_trans() - set a translation address of a memory window
920  *                           retrieved from a peer device
921  * @ntb:	NTB device context.
922  * @pidx:	Port index of peer device the translation address received from.
923  * @widx:	Memory window index.
924  * @addr:	The dma address of the shared memory to access.
925  * @size:	The size of the shared memory to access.
926  *
927  * Set the translation of an outbound memory window.  The local device may
928  * access shared memory allocated by a peer device sent the address.
929  *
930  * This method may not be implemented due to the hardware specific memory
931  * windows interface, so a translation address can be only set on the side,
932  * where shared memory (inbound memory windows) is allocated.
933  *
934  * Return: Zero on success, otherwise an error number.
935  */
ntb_peer_mw_set_trans(struct ntb_dev * ntb,int pidx,int widx,u64 addr,resource_size_t size)936 static inline int ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
937 					u64 addr, resource_size_t size)
938 {
939 	if (!ntb->ops->peer_mw_set_trans)
940 		return 0;
941 
942 	return ntb->ops->peer_mw_set_trans(ntb, pidx, widx, addr, size);
943 }
944 
945 /**
946  * ntb_peer_mw_clear_trans() - clear the translation address of an outbound
947  *                             memory window
948  * @ntb:	NTB device context.
949  * @pidx:	Port index of peer device.
950  * @widx:	Memory window index.
951  *
952  * Clear the translation of a outbound memory window.  The local device may no
953  * longer access a shared memory through the window.
954  *
955  * This method may not be implemented due to the hardware specific memory
956  * windows interface.
957  *
958  * Return: Zero on success, otherwise an error number.
959  */
ntb_peer_mw_clear_trans(struct ntb_dev * ntb,int pidx,int widx)960 static inline int ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx,
961 					  int widx)
962 {
963 	if (!ntb->ops->peer_mw_clear_trans)
964 		return ntb_peer_mw_set_trans(ntb, pidx, widx, 0, 0);
965 
966 	return ntb->ops->peer_mw_clear_trans(ntb, pidx, widx);
967 }
968 
969 /**
970  * ntb_db_is_unsafe() - check if it is safe to use hardware doorbell
971  * @ntb:	NTB device context.
972  *
973  * It is possible for some ntb hardware to be affected by errata.  Hardware
974  * drivers can advise clients to avoid using doorbells.  Clients may ignore
975  * this advice, though caution is recommended.
976  *
977  * Return: Zero if it is safe to use doorbells, or One if it is not safe.
978  */
ntb_db_is_unsafe(struct ntb_dev * ntb)979 static inline int ntb_db_is_unsafe(struct ntb_dev *ntb)
980 {
981 	if (!ntb->ops->db_is_unsafe)
982 		return 0;
983 
984 	return ntb->ops->db_is_unsafe(ntb);
985 }
986 
987 /**
988  * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb
989  * @ntb:	NTB device context.
990  *
991  * Hardware may support different number or arrangement of doorbell bits.
992  *
993  * Return: A mask of doorbell bits supported by the ntb.
994  */
ntb_db_valid_mask(struct ntb_dev * ntb)995 static inline u64 ntb_db_valid_mask(struct ntb_dev *ntb)
996 {
997 	return ntb->ops->db_valid_mask(ntb);
998 }
999 
1000 /**
1001  * ntb_db_vector_count() - get the number of doorbell interrupt vectors
1002  * @ntb:	NTB device context.
1003  *
1004  * Hardware may support different number of interrupt vectors.
1005  *
1006  * Return: The number of doorbell interrupt vectors.
1007  */
ntb_db_vector_count(struct ntb_dev * ntb)1008 static inline int ntb_db_vector_count(struct ntb_dev *ntb)
1009 {
1010 	if (!ntb->ops->db_vector_count)
1011 		return 1;
1012 
1013 	return ntb->ops->db_vector_count(ntb);
1014 }
1015 
1016 /**
1017  * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector
1018  * @ntb:	NTB device context.
1019  * @vector:	Doorbell vector number.
1020  *
1021  * Each interrupt vector may have a different number or arrangement of bits.
1022  *
1023  * Return: A mask of doorbell bits serviced by a vector.
1024  */
ntb_db_vector_mask(struct ntb_dev * ntb,int vector)1025 static inline u64 ntb_db_vector_mask(struct ntb_dev *ntb, int vector)
1026 {
1027 	if (!ntb->ops->db_vector_mask)
1028 		return ntb_db_valid_mask(ntb);
1029 
1030 	return ntb->ops->db_vector_mask(ntb, vector);
1031 }
1032 
1033 /**
1034  * ntb_db_read() - read the local doorbell register
1035  * @ntb:	NTB device context.
1036  *
1037  * Read the local doorbell register, and return the bits that are set.
1038  *
1039  * Return: The bits currently set in the local doorbell register.
1040  */
ntb_db_read(struct ntb_dev * ntb)1041 static inline u64 ntb_db_read(struct ntb_dev *ntb)
1042 {
1043 	return ntb->ops->db_read(ntb);
1044 }
1045 
1046 /**
1047  * ntb_db_set() - set bits in the local doorbell register
1048  * @ntb:	NTB device context.
1049  * @db_bits:	Doorbell bits to set.
1050  *
1051  * Set bits in the local doorbell register, which may generate a local doorbell
1052  * interrupt.  Bits that were already set must remain set.
1053  *
1054  * This is unusual, and hardware may not support it.
1055  *
1056  * Return: Zero on success, otherwise an error number.
1057  */
ntb_db_set(struct ntb_dev * ntb,u64 db_bits)1058 static inline int ntb_db_set(struct ntb_dev *ntb, u64 db_bits)
1059 {
1060 	if (!ntb->ops->db_set)
1061 		return -EINVAL;
1062 
1063 	return ntb->ops->db_set(ntb, db_bits);
1064 }
1065 
1066 /**
1067  * ntb_db_clear() - clear bits in the local doorbell register
1068  * @ntb:	NTB device context.
1069  * @db_bits:	Doorbell bits to clear.
1070  *
1071  * Clear bits in the local doorbell register, arming the bits for the next
1072  * doorbell.
1073  *
1074  * Return: Zero on success, otherwise an error number.
1075  */
ntb_db_clear(struct ntb_dev * ntb,u64 db_bits)1076 static inline int ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1077 {
1078 	return ntb->ops->db_clear(ntb, db_bits);
1079 }
1080 
1081 /**
1082  * ntb_db_read_mask() - read the local doorbell mask
1083  * @ntb:	NTB device context.
1084  *
1085  * Read the local doorbell mask register, and return the bits that are set.
1086  *
1087  * This is unusual, though hardware is likely to support it.
1088  *
1089  * Return: The bits currently set in the local doorbell mask register.
1090  */
ntb_db_read_mask(struct ntb_dev * ntb)1091 static inline u64 ntb_db_read_mask(struct ntb_dev *ntb)
1092 {
1093 	if (!ntb->ops->db_read_mask)
1094 		return 0;
1095 
1096 	return ntb->ops->db_read_mask(ntb);
1097 }
1098 
1099 /**
1100  * ntb_db_set_mask() - set bits in the local doorbell mask
1101  * @ntb:	NTB device context.
1102  * @db_bits:	Doorbell mask bits to set.
1103  *
1104  * Set bits in the local doorbell mask register, preventing doorbell interrupts
1105  * from being generated for those doorbell bits.  Bits that were already set
1106  * must remain set.
1107  *
1108  * Return: Zero on success, otherwise an error number.
1109  */
ntb_db_set_mask(struct ntb_dev * ntb,u64 db_bits)1110 static inline int ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1111 {
1112 	return ntb->ops->db_set_mask(ntb, db_bits);
1113 }
1114 
1115 /**
1116  * ntb_db_clear_mask() - clear bits in the local doorbell mask
1117  * @ntb:	NTB device context.
1118  * @db_bits:	Doorbell bits to clear.
1119  *
1120  * Clear bits in the local doorbell mask register, allowing doorbell interrupts
1121  * from being generated for those doorbell bits.  If a doorbell bit is already
1122  * set at the time the mask is cleared, and the corresponding mask bit is
1123  * changed from set to clear, then the ntb driver must ensure that
1124  * ntb_db_event() is called.  If the hardware does not generate the interrupt
1125  * on clearing the mask bit, then the driver must call ntb_db_event() anyway.
1126  *
1127  * Return: Zero on success, otherwise an error number.
1128  */
ntb_db_clear_mask(struct ntb_dev * ntb,u64 db_bits)1129 static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1130 {
1131 	return ntb->ops->db_clear_mask(ntb, db_bits);
1132 }
1133 
1134 /**
1135  * ntb_peer_db_addr() - address and size of the peer doorbell register
1136  * @ntb:	NTB device context.
1137  * @db_addr:	OUT - The address of the peer doorbell register.
1138  * @db_size:	OUT - The number of bytes to write the peer doorbell register.
1139  * @db_data:	OUT - The data of peer doorbell register
1140  * @db_bit:		door bell bit number
1141  *
1142  * Return the address of the peer doorbell register.  This may be used, for
1143  * example, by drivers that offload memory copy operations to a dma engine.
1144  * The drivers may wish to ring the peer doorbell at the completion of memory
1145  * copy operations.  For efficiency, and to simplify ordering of operations
1146  * between the dma memory copies and the ringing doorbell, the driver may
1147  * append one additional dma memory copy with the doorbell register as the
1148  * destination, after the memory copy operations.
1149  *
1150  * Return: Zero on success, otherwise an error number.
1151  */
ntb_peer_db_addr(struct ntb_dev * ntb,phys_addr_t * db_addr,resource_size_t * db_size,u64 * db_data,int db_bit)1152 static inline int ntb_peer_db_addr(struct ntb_dev *ntb,
1153 				   phys_addr_t *db_addr,
1154 				   resource_size_t *db_size,
1155 				   u64 *db_data, int db_bit)
1156 {
1157 	if (!ntb->ops->peer_db_addr)
1158 		return -EINVAL;
1159 
1160 	return ntb->ops->peer_db_addr(ntb, db_addr, db_size, db_data, db_bit);
1161 }
1162 
1163 /**
1164  * ntb_peer_db_read() - read the peer doorbell register
1165  * @ntb:	NTB device context.
1166  *
1167  * Read the peer doorbell register, and return the bits that are set.
1168  *
1169  * This is unusual, and hardware may not support it.
1170  *
1171  * Return: The bits currently set in the peer doorbell register.
1172  */
ntb_peer_db_read(struct ntb_dev * ntb)1173 static inline u64 ntb_peer_db_read(struct ntb_dev *ntb)
1174 {
1175 	if (!ntb->ops->peer_db_read)
1176 		return 0;
1177 
1178 	return ntb->ops->peer_db_read(ntb);
1179 }
1180 
1181 /**
1182  * ntb_peer_db_set() - set bits in the peer doorbell register
1183  * @ntb:	NTB device context.
1184  * @db_bits:	Doorbell bits to set.
1185  *
1186  * Set bits in the peer doorbell register, which may generate a peer doorbell
1187  * interrupt.  Bits that were already set must remain set.
1188  *
1189  * Return: Zero on success, otherwise an error number.
1190  */
ntb_peer_db_set(struct ntb_dev * ntb,u64 db_bits)1191 static inline int ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1192 {
1193 	return ntb->ops->peer_db_set(ntb, db_bits);
1194 }
1195 
1196 /**
1197  * ntb_peer_db_clear() - clear bits in the peer doorbell register
1198  * @ntb:	NTB device context.
1199  * @db_bits:	Doorbell bits to clear.
1200  *
1201  * Clear bits in the peer doorbell register, arming the bits for the next
1202  * doorbell.
1203  *
1204  * This is unusual, and hardware may not support it.
1205  *
1206  * Return: Zero on success, otherwise an error number.
1207  */
ntb_peer_db_clear(struct ntb_dev * ntb,u64 db_bits)1208 static inline int ntb_peer_db_clear(struct ntb_dev *ntb, u64 db_bits)
1209 {
1210 	if (!ntb->ops->db_clear)
1211 		return -EINVAL;
1212 
1213 	return ntb->ops->peer_db_clear(ntb, db_bits);
1214 }
1215 
1216 /**
1217  * ntb_peer_db_read_mask() - read the peer doorbell mask
1218  * @ntb:	NTB device context.
1219  *
1220  * Read the peer doorbell mask register, and return the bits that are set.
1221  *
1222  * This is unusual, and hardware may not support it.
1223  *
1224  * Return: The bits currently set in the peer doorbell mask register.
1225  */
ntb_peer_db_read_mask(struct ntb_dev * ntb)1226 static inline u64 ntb_peer_db_read_mask(struct ntb_dev *ntb)
1227 {
1228 	if (!ntb->ops->db_read_mask)
1229 		return 0;
1230 
1231 	return ntb->ops->peer_db_read_mask(ntb);
1232 }
1233 
1234 /**
1235  * ntb_peer_db_set_mask() - set bits in the peer doorbell mask
1236  * @ntb:	NTB device context.
1237  * @db_bits:	Doorbell mask bits to set.
1238  *
1239  * Set bits in the peer doorbell mask register, preventing doorbell interrupts
1240  * from being generated for those doorbell bits.  Bits that were already set
1241  * must remain set.
1242  *
1243  * This is unusual, and hardware may not support it.
1244  *
1245  * Return: Zero on success, otherwise an error number.
1246  */
ntb_peer_db_set_mask(struct ntb_dev * ntb,u64 db_bits)1247 static inline int ntb_peer_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1248 {
1249 	if (!ntb->ops->db_set_mask)
1250 		return -EINVAL;
1251 
1252 	return ntb->ops->peer_db_set_mask(ntb, db_bits);
1253 }
1254 
1255 /**
1256  * ntb_peer_db_clear_mask() - clear bits in the peer doorbell mask
1257  * @ntb:	NTB device context.
1258  * @db_bits:	Doorbell bits to clear.
1259  *
1260  * Clear bits in the peer doorbell mask register, allowing doorbell interrupts
1261  * from being generated for those doorbell bits.  If the hardware does not
1262  * generate the interrupt on clearing the mask bit, then the driver should not
1263  * implement this function!
1264  *
1265  * This is unusual, and hardware may not support it.
1266  *
1267  * Return: Zero on success, otherwise an error number.
1268  */
ntb_peer_db_clear_mask(struct ntb_dev * ntb,u64 db_bits)1269 static inline int ntb_peer_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1270 {
1271 	if (!ntb->ops->db_clear_mask)
1272 		return -EINVAL;
1273 
1274 	return ntb->ops->peer_db_clear_mask(ntb, db_bits);
1275 }
1276 
1277 /**
1278  * ntb_spad_is_unsafe() - check if it is safe to use the hardware scratchpads
1279  * @ntb:	NTB device context.
1280  *
1281  * It is possible for some ntb hardware to be affected by errata.  Hardware
1282  * drivers can advise clients to avoid using scratchpads.  Clients may ignore
1283  * this advice, though caution is recommended.
1284  *
1285  * Return: Zero if it is safe to use scratchpads, or One if it is not safe.
1286  */
ntb_spad_is_unsafe(struct ntb_dev * ntb)1287 static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb)
1288 {
1289 	if (!ntb->ops->spad_is_unsafe)
1290 		return 0;
1291 
1292 	return ntb->ops->spad_is_unsafe(ntb);
1293 }
1294 
1295 /**
1296  * ntb_spad_count() - get the number of scratchpads
1297  * @ntb:	NTB device context.
1298  *
1299  * Hardware and topology may support a different number of scratchpads.
1300  * Although it must be the same for all ports per NTB device.
1301  *
1302  * Return: the number of scratchpads.
1303  */
ntb_spad_count(struct ntb_dev * ntb)1304 static inline int ntb_spad_count(struct ntb_dev *ntb)
1305 {
1306 	if (!ntb->ops->spad_count)
1307 		return 0;
1308 
1309 	return ntb->ops->spad_count(ntb);
1310 }
1311 
1312 /**
1313  * ntb_spad_read() - read the local scratchpad register
1314  * @ntb:	NTB device context.
1315  * @sidx:	Scratchpad index.
1316  *
1317  * Read the local scratchpad register, and return the value.
1318  *
1319  * Return: The value of the local scratchpad register.
1320  */
ntb_spad_read(struct ntb_dev * ntb,int sidx)1321 static inline u32 ntb_spad_read(struct ntb_dev *ntb, int sidx)
1322 {
1323 	if (!ntb->ops->spad_read)
1324 		return ~(u32)0;
1325 
1326 	return ntb->ops->spad_read(ntb, sidx);
1327 }
1328 
1329 /**
1330  * ntb_spad_write() - write the local scratchpad register
1331  * @ntb:	NTB device context.
1332  * @sidx:	Scratchpad index.
1333  * @val:	Scratchpad value.
1334  *
1335  * Write the value to the local scratchpad register.
1336  *
1337  * Return: Zero on success, otherwise an error number.
1338  */
ntb_spad_write(struct ntb_dev * ntb,int sidx,u32 val)1339 static inline int ntb_spad_write(struct ntb_dev *ntb, int sidx, u32 val)
1340 {
1341 	if (!ntb->ops->spad_write)
1342 		return -EINVAL;
1343 
1344 	return ntb->ops->spad_write(ntb, sidx, val);
1345 }
1346 
1347 /**
1348  * ntb_peer_spad_addr() - address of the peer scratchpad register
1349  * @ntb:	NTB device context.
1350  * @pidx:	Port index of peer device.
1351  * @sidx:	Scratchpad index.
1352  * @spad_addr:	OUT - The address of the peer scratchpad register.
1353  *
1354  * Return the address of the peer scratchpad register.  This may be used, for
1355  * example, by drivers that offload memory copy operations to a dma engine.
1356  *
1357  * Return: Zero on success, otherwise an error number.
1358  */
ntb_peer_spad_addr(struct ntb_dev * ntb,int pidx,int sidx,phys_addr_t * spad_addr)1359 static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
1360 				     phys_addr_t *spad_addr)
1361 {
1362 	if (!ntb->ops->peer_spad_addr)
1363 		return -EINVAL;
1364 
1365 	return ntb->ops->peer_spad_addr(ntb, pidx, sidx, spad_addr);
1366 }
1367 
1368 /**
1369  * ntb_peer_spad_read() - read the peer scratchpad register
1370  * @ntb:	NTB device context.
1371  * @pidx:	Port index of peer device.
1372  * @sidx:	Scratchpad index.
1373  *
1374  * Read the peer scratchpad register, and return the value.
1375  *
1376  * Return: The value of the peer scratchpad register.
1377  */
ntb_peer_spad_read(struct ntb_dev * ntb,int pidx,int sidx)1378 static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
1379 {
1380 	if (!ntb->ops->peer_spad_read)
1381 		return ~(u32)0;
1382 
1383 	return ntb->ops->peer_spad_read(ntb, pidx, sidx);
1384 }
1385 
1386 /**
1387  * ntb_peer_spad_write() - write the peer scratchpad register
1388  * @ntb:	NTB device context.
1389  * @pidx:	Port index of peer device.
1390  * @sidx:	Scratchpad index.
1391  * @val:	Scratchpad value.
1392  *
1393  * Write the value to the peer scratchpad register.
1394  *
1395  * Return: Zero on success, otherwise an error number.
1396  */
ntb_peer_spad_write(struct ntb_dev * ntb,int pidx,int sidx,u32 val)1397 static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
1398 				      u32 val)
1399 {
1400 	if (!ntb->ops->peer_spad_write)
1401 		return -EINVAL;
1402 
1403 	return ntb->ops->peer_spad_write(ntb, pidx, sidx, val);
1404 }
1405 
1406 /**
1407  * ntb_msg_count() - get the number of message registers
1408  * @ntb:	NTB device context.
1409  *
1410  * Hardware may support a different number of message registers.
1411  *
1412  * Return: the number of message registers.
1413  */
ntb_msg_count(struct ntb_dev * ntb)1414 static inline int ntb_msg_count(struct ntb_dev *ntb)
1415 {
1416 	if (!ntb->ops->msg_count)
1417 		return 0;
1418 
1419 	return ntb->ops->msg_count(ntb);
1420 }
1421 
1422 /**
1423  * ntb_msg_inbits() - get a bitfield of inbound message registers status
1424  * @ntb:	NTB device context.
1425  *
1426  * The method returns the bitfield of status and mask registers, which related
1427  * to inbound message registers.
1428  *
1429  * Return: bitfield of inbound message registers.
1430  */
ntb_msg_inbits(struct ntb_dev * ntb)1431 static inline u64 ntb_msg_inbits(struct ntb_dev *ntb)
1432 {
1433 	if (!ntb->ops->msg_inbits)
1434 		return 0;
1435 
1436 	return ntb->ops->msg_inbits(ntb);
1437 }
1438 
1439 /**
1440  * ntb_msg_outbits() - get a bitfield of outbound message registers status
1441  * @ntb:	NTB device context.
1442  *
1443  * The method returns the bitfield of status and mask registers, which related
1444  * to outbound message registers.
1445  *
1446  * Return: bitfield of outbound message registers.
1447  */
ntb_msg_outbits(struct ntb_dev * ntb)1448 static inline u64 ntb_msg_outbits(struct ntb_dev *ntb)
1449 {
1450 	if (!ntb->ops->msg_outbits)
1451 		return 0;
1452 
1453 	return ntb->ops->msg_outbits(ntb);
1454 }
1455 
1456 /**
1457  * ntb_msg_read_sts() - read the message registers status
1458  * @ntb:	NTB device context.
1459  *
1460  * Read the status of message register. Inbound and outbound message registers
1461  * related bits can be filtered by masks retrieved from ntb_msg_inbits() and
1462  * ntb_msg_outbits().
1463  *
1464  * Return: status bits of message registers
1465  */
ntb_msg_read_sts(struct ntb_dev * ntb)1466 static inline u64 ntb_msg_read_sts(struct ntb_dev *ntb)
1467 {
1468 	if (!ntb->ops->msg_read_sts)
1469 		return 0;
1470 
1471 	return ntb->ops->msg_read_sts(ntb);
1472 }
1473 
1474 /**
1475  * ntb_msg_clear_sts() - clear status bits of message registers
1476  * @ntb:	NTB device context.
1477  * @sts_bits:	Status bits to clear.
1478  *
1479  * Clear bits in the status register.
1480  *
1481  * Return: Zero on success, otherwise a negative error number.
1482  */
ntb_msg_clear_sts(struct ntb_dev * ntb,u64 sts_bits)1483 static inline int ntb_msg_clear_sts(struct ntb_dev *ntb, u64 sts_bits)
1484 {
1485 	if (!ntb->ops->msg_clear_sts)
1486 		return -EINVAL;
1487 
1488 	return ntb->ops->msg_clear_sts(ntb, sts_bits);
1489 }
1490 
1491 /**
1492  * ntb_msg_set_mask() - set mask of message register status bits
1493  * @ntb:	NTB device context.
1494  * @mask_bits:	Mask bits.
1495  *
1496  * Mask the message registers status bits from raising the message event.
1497  *
1498  * Return: Zero on success, otherwise a negative error number.
1499  */
ntb_msg_set_mask(struct ntb_dev * ntb,u64 mask_bits)1500 static inline int ntb_msg_set_mask(struct ntb_dev *ntb, u64 mask_bits)
1501 {
1502 	if (!ntb->ops->msg_set_mask)
1503 		return -EINVAL;
1504 
1505 	return ntb->ops->msg_set_mask(ntb, mask_bits);
1506 }
1507 
1508 /**
1509  * ntb_msg_clear_mask() - clear message registers mask
1510  * @ntb:	NTB device context.
1511  * @mask_bits:	Mask bits to clear.
1512  *
1513  * Clear bits in the message events mask register.
1514  *
1515  * Return: Zero on success, otherwise a negative error number.
1516  */
ntb_msg_clear_mask(struct ntb_dev * ntb,u64 mask_bits)1517 static inline int ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
1518 {
1519 	if (!ntb->ops->msg_clear_mask)
1520 		return -EINVAL;
1521 
1522 	return ntb->ops->msg_clear_mask(ntb, mask_bits);
1523 }
1524 
1525 /**
1526  * ntb_msg_read() - read inbound message register with specified index
1527  * @ntb:	NTB device context.
1528  * @pidx:	OUT - Port index of peer device a message retrieved from
1529  * @midx:	Message register index
1530  *
1531  * Read data from the specified message register. Source port index of a
1532  * message is retrieved as well.
1533  *
1534  * Return: The value of the inbound message register.
1535  */
ntb_msg_read(struct ntb_dev * ntb,int * pidx,int midx)1536 static inline u32 ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx)
1537 {
1538 	if (!ntb->ops->msg_read)
1539 		return ~(u32)0;
1540 
1541 	return ntb->ops->msg_read(ntb, pidx, midx);
1542 }
1543 
1544 /**
1545  * ntb_peer_msg_write() - write data to the specified peer message register
1546  * @ntb:	NTB device context.
1547  * @pidx:	Port index of peer device a message being sent to
1548  * @midx:	Message register index
1549  * @msg:	Data to send
1550  *
1551  * Send data to a specified peer device using the defined message register.
1552  * Message event can be raised if the midx registers isn't empty while
1553  * calling this method and the corresponding interrupt isn't masked.
1554  *
1555  * Return: Zero on success, otherwise a negative error number.
1556  */
ntb_peer_msg_write(struct ntb_dev * ntb,int pidx,int midx,u32 msg)1557 static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
1558 				     u32 msg)
1559 {
1560 	if (!ntb->ops->peer_msg_write)
1561 		return -EINVAL;
1562 
1563 	return ntb->ops->peer_msg_write(ntb, pidx, midx, msg);
1564 }
1565 
1566 /**
1567  * ntb_peer_resource_idx() - get a resource index for a given peer idx
1568  * @ntb:	NTB device context.
1569  * @pidx:	Peer port index.
1570  *
1571  * When constructing a graph of peers, each remote peer must use a different
1572  * resource index (mw, doorbell, etc) to communicate with each other
1573  * peer.
1574  *
1575  * In a two peer system, this function should always return 0 such that
1576  * resource 0 points to the remote peer on both ports.
1577  *
1578  * In a 5 peer system, this function will return the following matrix
1579  *
1580  * pidx \ port    0    1    2    3    4
1581  * 0              0    0    1    2    3
1582  * 1              0    1    1    2    3
1583  * 2              0    1    2    2    3
1584  * 3              0    1    2    3    3
1585  *
1586  * For example, if this function is used to program peer's memory
1587  * windows, port 0 will program MW 0 on all it's peers to point to itself.
1588  * port 1 will program MW 0 in port 0 to point to itself and MW 1 on all
1589  * other ports. etc.
1590  *
1591  * For the legacy two host case, ntb_port_number() and ntb_peer_port_number()
1592  * both return zero and therefore this function will always return zero.
1593  * So MW 0 on each host would be programmed to point to the other host.
1594  *
1595  * Return: the resource index to use for that peer.
1596  */
ntb_peer_resource_idx(struct ntb_dev * ntb,int pidx)1597 static inline int ntb_peer_resource_idx(struct ntb_dev *ntb, int pidx)
1598 {
1599 	int local_port, peer_port;
1600 
1601 	if (pidx >= ntb_peer_port_count(ntb))
1602 		return -EINVAL;
1603 
1604 	local_port = ntb_logical_port_number(ntb);
1605 	peer_port = ntb_peer_logical_port_number(ntb, pidx);
1606 
1607 	if (peer_port < local_port)
1608 		return local_port - 1;
1609 	else
1610 		return local_port;
1611 }
1612 
1613 /**
1614  * ntb_peer_highest_mw_idx() - get a memory window index for a given peer idx
1615  *	using the highest index memory windows first
1616  *
1617  * @ntb:	NTB device context.
1618  * @pidx:	Peer port index.
1619  *
1620  * Like ntb_peer_resource_idx(), except it returns indexes starting with
1621  * last memory window index.
1622  *
1623  * Return: the resource index to use for that peer.
1624  */
ntb_peer_highest_mw_idx(struct ntb_dev * ntb,int pidx)1625 static inline int ntb_peer_highest_mw_idx(struct ntb_dev *ntb, int pidx)
1626 {
1627 	int ret;
1628 
1629 	ret = ntb_peer_resource_idx(ntb, pidx);
1630 	if (ret < 0)
1631 		return ret;
1632 
1633 	return ntb_mw_count(ntb, pidx) - ret - 1;
1634 }
1635 
1636 struct ntb_msi_desc {
1637 	u32 addr_offset;
1638 	u32 data;
1639 };
1640 
1641 #ifdef CONFIG_NTB_MSI
1642 
1643 int ntb_msi_init(struct ntb_dev *ntb, void (*desc_changed)(void *ctx));
1644 int ntb_msi_setup_mws(struct ntb_dev *ntb);
1645 void ntb_msi_clear_mws(struct ntb_dev *ntb);
1646 int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
1647 				  irq_handler_t thread_fn,
1648 				  const char *name, void *dev_id,
1649 				  struct ntb_msi_desc *msi_desc);
1650 void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id);
1651 int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer,
1652 			 struct ntb_msi_desc *desc);
1653 int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer,
1654 		      struct ntb_msi_desc *desc,
1655 		      phys_addr_t *msi_addr);
1656 
1657 #else /* not CONFIG_NTB_MSI */
1658 
ntb_msi_init(struct ntb_dev * ntb,void (* desc_changed)(void * ctx))1659 static inline int ntb_msi_init(struct ntb_dev *ntb,
1660 			       void (*desc_changed)(void *ctx))
1661 {
1662 	return -EOPNOTSUPP;
1663 }
ntb_msi_setup_mws(struct ntb_dev * ntb)1664 static inline int ntb_msi_setup_mws(struct ntb_dev *ntb)
1665 {
1666 	return -EOPNOTSUPP;
1667 }
ntb_msi_clear_mws(struct ntb_dev * ntb)1668 static inline void ntb_msi_clear_mws(struct ntb_dev *ntb) {}
ntbm_msi_request_threaded_irq(struct ntb_dev * ntb,irq_handler_t handler,irq_handler_t thread_fn,const char * name,void * dev_id,struct ntb_msi_desc * msi_desc)1669 static inline int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb,
1670 						irq_handler_t handler,
1671 						irq_handler_t thread_fn,
1672 						const char *name, void *dev_id,
1673 						struct ntb_msi_desc *msi_desc)
1674 {
1675 	return -EOPNOTSUPP;
1676 }
ntbm_msi_free_irq(struct ntb_dev * ntb,unsigned int irq,void * dev_id)1677 static inline void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq,
1678 				     void *dev_id) {}
ntb_msi_peer_trigger(struct ntb_dev * ntb,int peer,struct ntb_msi_desc * desc)1679 static inline int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer,
1680 				       struct ntb_msi_desc *desc)
1681 {
1682 	return -EOPNOTSUPP;
1683 }
ntb_msi_peer_addr(struct ntb_dev * ntb,int peer,struct ntb_msi_desc * desc,phys_addr_t * msi_addr)1684 static inline int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer,
1685 				    struct ntb_msi_desc *desc,
1686 				    phys_addr_t *msi_addr)
1687 {
1688 	return -EOPNOTSUPP;
1689 
1690 }
1691 
1692 #endif /* CONFIG_NTB_MSI */
1693 
ntbm_msi_request_irq(struct ntb_dev * ntb,irq_handler_t handler,const char * name,void * dev_id,struct ntb_msi_desc * msi_desc)1694 static inline int ntbm_msi_request_irq(struct ntb_dev *ntb,
1695 				       irq_handler_t handler,
1696 				       const char *name, void *dev_id,
1697 				       struct ntb_msi_desc *msi_desc)
1698 {
1699 	return ntbm_msi_request_threaded_irq(ntb, handler, NULL, name,
1700 					     dev_id, msi_desc);
1701 }
1702 
1703 #endif
1704