1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3  *
4  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
5  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <linux/module.h>
40 #include <linux/string.h>
41 #include <linux/errno.h>
42 #include <linux/kernel.h>
43 #include <linux/slab.h>
44 #include <linux/mutex.h>
45 #include <linux/netdevice.h>
46 #include <rdma/ib_addr.h>
47 #include <rdma/ib_cache.h>
48 
49 #include "core_priv.h"
50 
51 MODULE_AUTHOR("Roland Dreier");
52 MODULE_DESCRIPTION("core kernel InfiniBand API");
53 MODULE_LICENSE("Dual BSD/GPL");
54 
55 struct ib_client_data {
56 	struct list_head  list;
57 	struct ib_client *client;
58 	void *            data;
59 	/* The device or client is going down. Do not call client or device
60 	 * callbacks other than remove(). */
61 	bool		  going_down;
62 };
63 
64 struct workqueue_struct *ib_comp_wq;
65 struct workqueue_struct *ib_wq;
66 EXPORT_SYMBOL_GPL(ib_wq);
67 
68 /* The device_list and client_list contain devices and clients after their
69  * registration has completed, and the devices and clients are removed
70  * during unregistration. */
71 static LIST_HEAD(device_list);
72 static LIST_HEAD(client_list);
73 
74 /*
75  * device_mutex and lists_rwsem protect access to both device_list and
76  * client_list.  device_mutex protects writer access by device and client
77  * registration / de-registration.  lists_rwsem protects reader access to
78  * these lists.  Iterators of these lists must lock it for read, while updates
79  * to the lists must be done with a write lock. A special case is when the
80  * device_mutex is locked. In this case locking the lists for read access is
81  * not necessary as the device_mutex implies it.
82  *
83  * lists_rwsem also protects access to the client data list.
84  */
85 static DEFINE_MUTEX(device_mutex);
86 static DECLARE_RWSEM(lists_rwsem);
87 
88 
89 static int ib_device_check_mandatory(struct ib_device *device)
90 {
91 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
92 	static const struct {
93 		size_t offset;
94 		char  *name;
95 	} mandatory_table[] = {
96 		IB_MANDATORY_FUNC(query_device),
97 		IB_MANDATORY_FUNC(query_port),
98 		IB_MANDATORY_FUNC(query_pkey),
99 		IB_MANDATORY_FUNC(query_gid),
100 		IB_MANDATORY_FUNC(alloc_pd),
101 		IB_MANDATORY_FUNC(dealloc_pd),
102 		IB_MANDATORY_FUNC(create_ah),
103 		IB_MANDATORY_FUNC(destroy_ah),
104 		IB_MANDATORY_FUNC(create_qp),
105 		IB_MANDATORY_FUNC(modify_qp),
106 		IB_MANDATORY_FUNC(destroy_qp),
107 		IB_MANDATORY_FUNC(post_send),
108 		IB_MANDATORY_FUNC(post_recv),
109 		IB_MANDATORY_FUNC(create_cq),
110 		IB_MANDATORY_FUNC(destroy_cq),
111 		IB_MANDATORY_FUNC(poll_cq),
112 		IB_MANDATORY_FUNC(req_notify_cq),
113 		IB_MANDATORY_FUNC(get_dma_mr),
114 		IB_MANDATORY_FUNC(dereg_mr),
115 		IB_MANDATORY_FUNC(get_port_immutable)
116 	};
117 	int i;
118 
119 	for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
120 		if (!*(void **) ((char *) device + mandatory_table[i].offset)) {
121 			pr_warn("Device %s is missing mandatory function %s\n",
122 				device->name, mandatory_table[i].name);
123 			return -EINVAL;
124 		}
125 	}
126 
127 	return 0;
128 }
129 
130 static struct ib_device *__ib_device_get_by_name(const char *name)
131 {
132 	struct ib_device *device;
133 
134 	list_for_each_entry(device, &device_list, core_list)
135 		if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
136 			return device;
137 
138 	return NULL;
139 }
140 
141 
142 static int alloc_name(char *name)
143 {
144 	unsigned long *inuse;
145 	char buf[IB_DEVICE_NAME_MAX];
146 	struct ib_device *device;
147 	int i;
148 
149 	inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
150 	if (!inuse)
151 		return -ENOMEM;
152 
153 	list_for_each_entry(device, &device_list, core_list) {
154 		if (!sscanf(device->name, name, &i))
155 			continue;
156 		if (i < 0 || i >= PAGE_SIZE * 8)
157 			continue;
158 		snprintf(buf, sizeof buf, name, i);
159 		if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
160 			set_bit(i, inuse);
161 	}
162 
163 	i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
164 	free_page((unsigned long) inuse);
165 	snprintf(buf, sizeof buf, name, i);
166 
167 	if (__ib_device_get_by_name(buf))
168 		return -ENFILE;
169 
170 	strlcpy(name, buf, IB_DEVICE_NAME_MAX);
171 	return 0;
172 }
173 
174 static void ib_device_release(struct device *device)
175 {
176 	struct ib_device *dev = container_of(device, struct ib_device, dev);
177 
178 	WARN_ON(dev->reg_state == IB_DEV_REGISTERED);
179 	if (dev->reg_state == IB_DEV_UNREGISTERED) {
180 		/*
181 		 * In IB_DEV_UNINITIALIZED state, cache or port table
182 		 * is not even created. Free cache and port table only when
183 		 * device reaches UNREGISTERED state.
184 		 */
185 		ib_cache_release_one(dev);
186 		kfree(dev->port_immutable);
187 	}
188 	kfree(dev);
189 }
190 
191 static struct class ib_class = {
192 	.name    = "infiniband",
193 	.dev_release = ib_device_release,
194 };
195 
196 /**
197  * ib_alloc_device - allocate an IB device struct
198  * @size:size of structure to allocate
199  *
200  * Low-level drivers should use ib_alloc_device() to allocate &struct
201  * ib_device.  @size is the size of the structure to be allocated,
202  * including any private data used by the low-level driver.
203  * ib_dealloc_device() must be used to free structures allocated with
204  * ib_alloc_device().
205  */
206 struct ib_device *ib_alloc_device(size_t size)
207 {
208 	struct ib_device *device;
209 
210 	if (WARN_ON(size < sizeof(struct ib_device)))
211 		return NULL;
212 
213 	device = kzalloc(size, GFP_KERNEL);
214 	if (!device)
215 		return NULL;
216 
217 	device->dev.parent = &linux_root_device;
218 	device->dev.class = &ib_class;
219 	device_initialize(&device->dev);
220 
221 	dev_set_drvdata(&device->dev, device);
222 
223 	INIT_LIST_HEAD(&device->event_handler_list);
224 	spin_lock_init(&device->event_handler_lock);
225 	spin_lock_init(&device->client_data_lock);
226 	INIT_LIST_HEAD(&device->client_data_list);
227 	INIT_LIST_HEAD(&device->port_list);
228 
229 	return device;
230 }
231 EXPORT_SYMBOL(ib_alloc_device);
232 
233 /**
234  * ib_dealloc_device - free an IB device struct
235  * @device:structure to free
236  *
237  * Free a structure allocated with ib_alloc_device().
238  */
239 void ib_dealloc_device(struct ib_device *device)
240 {
241 	WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
242 		device->reg_state != IB_DEV_UNINITIALIZED);
243 	kobject_put(&device->dev.kobj);
244 }
245 EXPORT_SYMBOL(ib_dealloc_device);
246 
247 static int add_client_context(struct ib_device *device, struct ib_client *client)
248 {
249 	struct ib_client_data *context;
250 	unsigned long flags;
251 
252 	context = kmalloc(sizeof *context, GFP_KERNEL);
253 	if (!context) {
254 		pr_warn("Couldn't allocate client context for %s/%s\n",
255 			device->name, client->name);
256 		return -ENOMEM;
257 	}
258 
259 	context->client = client;
260 	context->data   = NULL;
261 	context->going_down = false;
262 
263 	down_write(&lists_rwsem);
264 	spin_lock_irqsave(&device->client_data_lock, flags);
265 	list_add(&context->list, &device->client_data_list);
266 	spin_unlock_irqrestore(&device->client_data_lock, flags);
267 	up_write(&lists_rwsem);
268 
269 	return 0;
270 }
271 
272 static int verify_immutable(const struct ib_device *dev, u8 port)
273 {
274 	return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
275 			    rdma_max_mad_size(dev, port) != 0);
276 }
277 
278 static int read_port_immutable(struct ib_device *device)
279 {
280 	int ret;
281 	u8 start_port = rdma_start_port(device);
282 	u8 end_port = rdma_end_port(device);
283 	u8 port;
284 
285 	/**
286 	 * device->port_immutable is indexed directly by the port number to make
287 	 * access to this data as efficient as possible.
288 	 *
289 	 * Therefore port_immutable is declared as a 1 based array with
290 	 * potential empty slots at the beginning.
291 	 */
292 	device->port_immutable = kzalloc(sizeof(*device->port_immutable)
293 					 * (end_port + 1),
294 					 GFP_KERNEL);
295 	if (!device->port_immutable)
296 		return -ENOMEM;
297 
298 	for (port = start_port; port <= end_port; ++port) {
299 		ret = device->get_port_immutable(device, port,
300 						 &device->port_immutable[port]);
301 		if (ret)
302 			return ret;
303 
304 		if (verify_immutable(device, port))
305 			return -EINVAL;
306 	}
307 	return 0;
308 }
309 
310 void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len)
311 {
312 	if (dev->get_dev_fw_str)
313 		dev->get_dev_fw_str(dev, str, str_len);
314 	else
315 		str[0] = '\0';
316 }
317 EXPORT_SYMBOL(ib_get_device_fw_str);
318 
319 /**
320  * ib_register_device - Register an IB device with IB core
321  * @device:Device to register
322  *
323  * Low-level drivers use ib_register_device() to register their
324  * devices with the IB core.  All registered clients will receive a
325  * callback for each device that is added. @device must be allocated
326  * with ib_alloc_device().
327  */
328 int ib_register_device(struct ib_device *device,
329 		       int (*port_callback)(struct ib_device *,
330 					    u8, struct kobject *))
331 {
332 	int ret;
333 	struct ib_client *client;
334 	struct ib_udata uhw = {.outlen = 0, .inlen = 0};
335 
336 	mutex_lock(&device_mutex);
337 
338 	if (strchr(device->name, '%')) {
339 		ret = alloc_name(device->name);
340 		if (ret)
341 			goto out;
342 	}
343 
344 	if (ib_device_check_mandatory(device)) {
345 		ret = -EINVAL;
346 		goto out;
347 	}
348 
349 	ret = read_port_immutable(device);
350 	if (ret) {
351 		pr_warn("Couldn't create per port immutable data %s\n",
352 			device->name);
353 		goto out;
354 	}
355 
356 	ret = ib_cache_setup_one(device);
357 	if (ret) {
358 		pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
359 		goto port_cleanup;
360 	}
361 
362 	memset(&device->attrs, 0, sizeof(device->attrs));
363 	ret = device->query_device(device, &device->attrs, &uhw);
364 	if (ret) {
365 		pr_warn("Couldn't query the device attributes\n");
366 		goto cache_cleanup;
367 	}
368 
369 	ret = ib_device_register_sysfs(device, port_callback);
370 	if (ret) {
371 		pr_warn("Couldn't register device %s with driver model\n",
372 			device->name);
373 		goto cache_cleanup;
374 	}
375 
376 	device->reg_state = IB_DEV_REGISTERED;
377 
378 	list_for_each_entry(client, &client_list, list)
379 		if (client->add && !add_client_context(device, client))
380 			client->add(device);
381 
382 	down_write(&lists_rwsem);
383 	list_add_tail(&device->core_list, &device_list);
384 	up_write(&lists_rwsem);
385 	mutex_unlock(&device_mutex);
386 	return 0;
387 
388 cache_cleanup:
389 	ib_cache_cleanup_one(device);
390 	ib_cache_release_one(device);
391 port_cleanup:
392 	kfree(device->port_immutable);
393 out:
394 	mutex_unlock(&device_mutex);
395 	return ret;
396 }
397 EXPORT_SYMBOL(ib_register_device);
398 
399 /**
400  * ib_unregister_device - Unregister an IB device
401  * @device:Device to unregister
402  *
403  * Unregister an IB device.  All clients will receive a remove callback.
404  */
405 void ib_unregister_device(struct ib_device *device)
406 {
407 	struct ib_client_data *context, *tmp;
408 	unsigned long flags;
409 
410 	mutex_lock(&device_mutex);
411 
412 	down_write(&lists_rwsem);
413 	list_del(&device->core_list);
414 	spin_lock_irqsave(&device->client_data_lock, flags);
415 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
416 		context->going_down = true;
417 	spin_unlock_irqrestore(&device->client_data_lock, flags);
418 	downgrade_write(&lists_rwsem);
419 
420 	list_for_each_entry_safe(context, tmp, &device->client_data_list,
421 				 list) {
422 		if (context->client->remove)
423 			context->client->remove(device, context->data);
424 	}
425 	up_read(&lists_rwsem);
426 
427 	mutex_unlock(&device_mutex);
428 
429 	ib_device_unregister_sysfs(device);
430 	ib_cache_cleanup_one(device);
431 
432 	down_write(&lists_rwsem);
433 	spin_lock_irqsave(&device->client_data_lock, flags);
434 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
435 		kfree(context);
436 	spin_unlock_irqrestore(&device->client_data_lock, flags);
437 	up_write(&lists_rwsem);
438 
439 	device->reg_state = IB_DEV_UNREGISTERED;
440 }
441 EXPORT_SYMBOL(ib_unregister_device);
442 
443 /**
444  * ib_register_client - Register an IB client
445  * @client:Client to register
446  *
447  * Upper level users of the IB drivers can use ib_register_client() to
448  * register callbacks for IB device addition and removal.  When an IB
449  * device is added, each registered client's add method will be called
450  * (in the order the clients were registered), and when a device is
451  * removed, each client's remove method will be called (in the reverse
452  * order that clients were registered).  In addition, when
453  * ib_register_client() is called, the client will receive an add
454  * callback for all devices already registered.
455  */
456 int ib_register_client(struct ib_client *client)
457 {
458 	struct ib_device *device;
459 
460 	mutex_lock(&device_mutex);
461 
462 	list_for_each_entry(device, &device_list, core_list)
463 		if (client->add && !add_client_context(device, client))
464 			client->add(device);
465 
466 	down_write(&lists_rwsem);
467 	list_add_tail(&client->list, &client_list);
468 	up_write(&lists_rwsem);
469 
470 	mutex_unlock(&device_mutex);
471 
472 	return 0;
473 }
474 EXPORT_SYMBOL(ib_register_client);
475 
476 /**
477  * ib_unregister_client - Unregister an IB client
478  * @client:Client to unregister
479  *
480  * Upper level users use ib_unregister_client() to remove their client
481  * registration.  When ib_unregister_client() is called, the client
482  * will receive a remove callback for each IB device still registered.
483  */
484 void ib_unregister_client(struct ib_client *client)
485 {
486 	struct ib_client_data *context, *tmp;
487 	struct ib_device *device;
488 	unsigned long flags;
489 
490 	mutex_lock(&device_mutex);
491 
492 	down_write(&lists_rwsem);
493 	list_del(&client->list);
494 	up_write(&lists_rwsem);
495 
496 	list_for_each_entry(device, &device_list, core_list) {
497 		struct ib_client_data *found_context = NULL;
498 
499 		down_write(&lists_rwsem);
500 		spin_lock_irqsave(&device->client_data_lock, flags);
501 		list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
502 			if (context->client == client) {
503 				context->going_down = true;
504 				found_context = context;
505 				break;
506 			}
507 		spin_unlock_irqrestore(&device->client_data_lock, flags);
508 		up_write(&lists_rwsem);
509 
510 		if (client->remove)
511 			client->remove(device, found_context ?
512 					       found_context->data : NULL);
513 
514 		if (!found_context) {
515 			pr_warn("No client context found for %s/%s\n",
516 				device->name, client->name);
517 			continue;
518 		}
519 
520 		down_write(&lists_rwsem);
521 		spin_lock_irqsave(&device->client_data_lock, flags);
522 		list_del(&found_context->list);
523 		kfree(found_context);
524 		spin_unlock_irqrestore(&device->client_data_lock, flags);
525 		up_write(&lists_rwsem);
526 	}
527 
528 	mutex_unlock(&device_mutex);
529 }
530 EXPORT_SYMBOL(ib_unregister_client);
531 
532 /**
533  * ib_get_client_data - Get IB client context
534  * @device:Device to get context for
535  * @client:Client to get context for
536  *
537  * ib_get_client_data() returns client context set with
538  * ib_set_client_data().
539  */
540 void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
541 {
542 	struct ib_client_data *context;
543 	void *ret = NULL;
544 	unsigned long flags;
545 
546 	spin_lock_irqsave(&device->client_data_lock, flags);
547 	list_for_each_entry(context, &device->client_data_list, list)
548 		if (context->client == client) {
549 			ret = context->data;
550 			break;
551 		}
552 	spin_unlock_irqrestore(&device->client_data_lock, flags);
553 
554 	return ret;
555 }
556 EXPORT_SYMBOL(ib_get_client_data);
557 
558 /**
559  * ib_set_client_data - Set IB client context
560  * @device:Device to set context for
561  * @client:Client to set context for
562  * @data:Context to set
563  *
564  * ib_set_client_data() sets client context that can be retrieved with
565  * ib_get_client_data().
566  */
567 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
568 			void *data)
569 {
570 	struct ib_client_data *context;
571 	unsigned long flags;
572 
573 	spin_lock_irqsave(&device->client_data_lock, flags);
574 	list_for_each_entry(context, &device->client_data_list, list)
575 		if (context->client == client) {
576 			context->data = data;
577 			goto out;
578 		}
579 
580 	pr_warn("No client context found for %s/%s\n",
581 		device->name, client->name);
582 
583 out:
584 	spin_unlock_irqrestore(&device->client_data_lock, flags);
585 }
586 EXPORT_SYMBOL(ib_set_client_data);
587 
588 /**
589  * ib_register_event_handler - Register an IB event handler
590  * @event_handler:Handler to register
591  *
592  * ib_register_event_handler() registers an event handler that will be
593  * called back when asynchronous IB events occur (as defined in
594  * chapter 11 of the InfiniBand Architecture Specification).  This
595  * callback may occur in interrupt context.
596  */
597 int ib_register_event_handler  (struct ib_event_handler *event_handler)
598 {
599 	unsigned long flags;
600 
601 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
602 	list_add_tail(&event_handler->list,
603 		      &event_handler->device->event_handler_list);
604 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
605 
606 	return 0;
607 }
608 EXPORT_SYMBOL(ib_register_event_handler);
609 
610 /**
611  * ib_unregister_event_handler - Unregister an event handler
612  * @event_handler:Handler to unregister
613  *
614  * Unregister an event handler registered with
615  * ib_register_event_handler().
616  */
617 int ib_unregister_event_handler(struct ib_event_handler *event_handler)
618 {
619 	unsigned long flags;
620 
621 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
622 	list_del(&event_handler->list);
623 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
624 
625 	return 0;
626 }
627 EXPORT_SYMBOL(ib_unregister_event_handler);
628 
629 /**
630  * ib_dispatch_event - Dispatch an asynchronous event
631  * @event:Event to dispatch
632  *
633  * Low-level drivers must call ib_dispatch_event() to dispatch the
634  * event to all registered event handlers when an asynchronous event
635  * occurs.
636  */
637 void ib_dispatch_event(struct ib_event *event)
638 {
639 	unsigned long flags;
640 	struct ib_event_handler *handler;
641 
642 	spin_lock_irqsave(&event->device->event_handler_lock, flags);
643 
644 	list_for_each_entry(handler, &event->device->event_handler_list, list)
645 		handler->handler(handler, event);
646 
647 	spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
648 }
649 EXPORT_SYMBOL(ib_dispatch_event);
650 
651 /**
652  * ib_query_port - Query IB port attributes
653  * @device:Device to query
654  * @port_num:Port number to query
655  * @port_attr:Port attributes
656  *
657  * ib_query_port() returns the attributes of a port through the
658  * @port_attr pointer.
659  */
660 int ib_query_port(struct ib_device *device,
661 		  u8 port_num,
662 		  struct ib_port_attr *port_attr)
663 {
664 	union ib_gid gid;
665 	int err;
666 
667 	if (!rdma_is_port_valid(device, port_num))
668 		return -EINVAL;
669 
670 	memset(port_attr, 0, sizeof(*port_attr));
671 	err = device->query_port(device, port_num, port_attr);
672 	if (err || port_attr->subnet_prefix)
673 		return err;
674 
675 	if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
676 		return 0;
677 
678 	err = ib_query_gid(device, port_num, 0, &gid, NULL);
679 	if (err)
680 		return err;
681 
682 	port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
683 	return 0;
684 }
685 EXPORT_SYMBOL(ib_query_port);
686 
687 /**
688  * ib_query_gid - Get GID table entry
689  * @device:Device to query
690  * @port_num:Port number to query
691  * @index:GID table index to query
692  * @gid:Returned GID
693  * @attr: Returned GID attributes related to this GID index (only in RoCE).
694  *   NULL means ignore.
695  *
696  * ib_query_gid() fetches the specified GID table entry.
697  */
698 int ib_query_gid(struct ib_device *device,
699 		 u8 port_num, int index, union ib_gid *gid,
700 		 struct ib_gid_attr *attr)
701 {
702 	if (rdma_cap_roce_gid_table(device, port_num))
703 		return ib_get_cached_gid(device, port_num, index, gid, attr);
704 
705 	if (attr)
706 		return -EINVAL;
707 
708 	return device->query_gid(device, port_num, index, gid);
709 }
710 EXPORT_SYMBOL(ib_query_gid);
711 
712 /**
713  * ib_enum_roce_netdev - enumerate all RoCE ports
714  * @ib_dev : IB device we want to query
715  * @filter: Should we call the callback?
716  * @filter_cookie: Cookie passed to filter
717  * @cb: Callback to call for each found RoCE ports
718  * @cookie: Cookie passed back to the callback
719  *
720  * Enumerates all of the physical RoCE ports of ib_dev
721  * which are related to netdevice and calls callback() on each
722  * device for which filter() function returns non zero.
723  */
724 void ib_enum_roce_netdev(struct ib_device *ib_dev,
725 			 roce_netdev_filter filter,
726 			 void *filter_cookie,
727 			 roce_netdev_callback cb,
728 			 void *cookie)
729 {
730 	u8 port;
731 
732 	for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
733 	     port++)
734 		if (rdma_protocol_roce(ib_dev, port)) {
735 			if_t idev = NULL;
736 
737 			if (ib_dev->get_netdev)
738 				idev = ib_dev->get_netdev(ib_dev, port);
739 
740 			if (idev && (if_getflags(idev) & IFF_DYING)) {
741 				dev_put(idev);
742 				idev = NULL;
743 			}
744 
745 			if (filter(ib_dev, port, idev, filter_cookie))
746 				cb(ib_dev, port, idev, cookie);
747 
748 			if (idev)
749 				dev_put(idev);
750 		}
751 }
752 
753 /**
754  * ib_enum_all_roce_netdevs - enumerate all RoCE devices
755  * @filter: Should we call the callback?
756  * @filter_cookie: Cookie passed to filter
757  * @cb: Callback to call for each found RoCE ports
758  * @cookie: Cookie passed back to the callback
759  *
760  * Enumerates all RoCE devices' physical ports which are related
761  * to netdevices and calls callback() on each device for which
762  * filter() function returns non zero.
763  */
764 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
765 			      void *filter_cookie,
766 			      roce_netdev_callback cb,
767 			      void *cookie)
768 {
769 	struct ib_device *dev;
770 
771 	down_read(&lists_rwsem);
772 	list_for_each_entry(dev, &device_list, core_list)
773 		ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
774 	up_read(&lists_rwsem);
775 }
776 
777 /**
778  * ib_cache_gid_del_all_by_netdev - delete GIDs belonging a netdevice
779  *
780  * @ndev: Pointer to netdevice
781  */
782 void ib_cache_gid_del_all_by_netdev(if_t ndev)
783 {
784 	struct ib_device *ib_dev;
785 	u8 port;
786 
787 	down_read(&lists_rwsem);
788 	list_for_each_entry(ib_dev, &device_list, core_list) {
789 		for (port = rdma_start_port(ib_dev);
790 		     port <= rdma_end_port(ib_dev);
791 		     port++) {
792 			if (rdma_protocol_roce(ib_dev, port) == 0)
793 				continue;
794 			(void) ib_cache_gid_del_all_netdev_gids(ib_dev, port, ndev);
795 		}
796 	}
797 	up_read(&lists_rwsem);
798 }
799 
800 /**
801  * ib_query_pkey - Get P_Key table entry
802  * @device:Device to query
803  * @port_num:Port number to query
804  * @index:P_Key table index to query
805  * @pkey:Returned P_Key
806  *
807  * ib_query_pkey() fetches the specified P_Key table entry.
808  */
809 int ib_query_pkey(struct ib_device *device,
810 		  u8 port_num, u16 index, u16 *pkey)
811 {
812 	if (!rdma_is_port_valid(device, port_num))
813 		return -EINVAL;
814 
815 	return device->query_pkey(device, port_num, index, pkey);
816 }
817 EXPORT_SYMBOL(ib_query_pkey);
818 
819 /**
820  * ib_modify_device - Change IB device attributes
821  * @device:Device to modify
822  * @device_modify_mask:Mask of attributes to change
823  * @device_modify:New attribute values
824  *
825  * ib_modify_device() changes a device's attributes as specified by
826  * the @device_modify_mask and @device_modify structure.
827  */
828 int ib_modify_device(struct ib_device *device,
829 		     int device_modify_mask,
830 		     struct ib_device_modify *device_modify)
831 {
832 	if (!device->modify_device)
833 		return -ENOSYS;
834 
835 	return device->modify_device(device, device_modify_mask,
836 				     device_modify);
837 }
838 EXPORT_SYMBOL(ib_modify_device);
839 
840 /**
841  * ib_modify_port - Modifies the attributes for the specified port.
842  * @device: The device to modify.
843  * @port_num: The number of the port to modify.
844  * @port_modify_mask: Mask used to specify which attributes of the port
845  *   to change.
846  * @port_modify: New attribute values for the port.
847  *
848  * ib_modify_port() changes a port's attributes as specified by the
849  * @port_modify_mask and @port_modify structure.
850  */
851 int ib_modify_port(struct ib_device *device,
852 		   u8 port_num, int port_modify_mask,
853 		   struct ib_port_modify *port_modify)
854 {
855 	if (!device->modify_port)
856 		return -ENOSYS;
857 
858 	if (!rdma_is_port_valid(device, port_num))
859 		return -EINVAL;
860 
861 	return device->modify_port(device, port_num, port_modify_mask,
862 				   port_modify);
863 }
864 EXPORT_SYMBOL(ib_modify_port);
865 
866 /**
867  * ib_find_gid - Returns the port number and GID table index where
868  *   a specified GID value occurs.
869  * @device: The device to query.
870  * @gid: The GID value to search for.
871  * @gid_type: Type of GID.
872  * @ndev: The ndev related to the GID to search for.
873  * @port_num: The port number of the device where the GID value was found.
874  * @index: The index into the GID table where the GID was found.  This
875  *   parameter may be NULL.
876  */
877 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
878 		enum ib_gid_type gid_type, if_t ndev,
879 		u8 *port_num, u16 *index)
880 {
881 	union ib_gid tmp_gid;
882 	int ret, port, i;
883 
884 	for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
885 		if (rdma_cap_roce_gid_table(device, port)) {
886 			if (!ib_find_cached_gid_by_port(device, gid, gid_type, port,
887 							ndev, index)) {
888 				*port_num = port;
889 				return 0;
890 			}
891 		}
892 
893 		if (gid_type != IB_GID_TYPE_IB)
894 			continue;
895 
896 		for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
897 			ret = ib_query_gid(device, port, i, &tmp_gid, NULL);
898 			if (ret)
899 				return ret;
900 			if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
901 				*port_num = port;
902 				if (index)
903 					*index = i;
904 				return 0;
905 			}
906 		}
907 	}
908 
909 	return -ENOENT;
910 }
911 EXPORT_SYMBOL(ib_find_gid);
912 
913 /**
914  * ib_find_pkey - Returns the PKey table index where a specified
915  *   PKey value occurs.
916  * @device: The device to query.
917  * @port_num: The port number of the device to search for the PKey.
918  * @pkey: The PKey value to search for.
919  * @index: The index into the PKey table where the PKey was found.
920  */
921 int ib_find_pkey(struct ib_device *device,
922 		 u8 port_num, u16 pkey, u16 *index)
923 {
924 	int ret, i;
925 	u16 tmp_pkey;
926 	int partial_ix = -1;
927 
928 	for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
929 		ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
930 		if (ret)
931 			return ret;
932 		if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
933 			/* if there is full-member pkey take it.*/
934 			if (tmp_pkey & 0x8000) {
935 				*index = i;
936 				return 0;
937 			}
938 			if (partial_ix < 0)
939 				partial_ix = i;
940 		}
941 	}
942 
943 	/*no full-member, if exists take the limited*/
944 	if (partial_ix >= 0) {
945 		*index = partial_ix;
946 		return 0;
947 	}
948 	return -ENOENT;
949 }
950 EXPORT_SYMBOL(ib_find_pkey);
951 
952 /**
953  * ib_get_net_dev_by_params() - Return the appropriate net_dev
954  * for a received CM request
955  * @dev:	An RDMA device on which the request has been received.
956  * @port:	Port number on the RDMA device.
957  * @pkey:	The Pkey the request came on.
958  * @gid:	A GID that the net_dev uses to communicate.
959  * @addr:	Contains the IP address that the request specified as its
960  *		destination.
961  */
962 if_t ib_get_net_dev_by_params(struct ib_device *dev,
963 					    u8 port,
964 					    u16 pkey,
965 					    const union ib_gid *gid,
966 					    const struct sockaddr *addr)
967 {
968 	if_t net_dev = NULL;
969 	struct ib_client_data *context;
970 
971 	if (!rdma_protocol_ib(dev, port))
972 		return NULL;
973 
974 	down_read(&lists_rwsem);
975 
976 	list_for_each_entry(context, &dev->client_data_list, list) {
977 		struct ib_client *client = context->client;
978 
979 		if (context->going_down)
980 			continue;
981 
982 		if (client->get_net_dev_by_params) {
983 			net_dev = client->get_net_dev_by_params(dev, port, pkey,
984 								gid, addr,
985 								context->data);
986 			if (net_dev)
987 				break;
988 		}
989 	}
990 
991 	up_read(&lists_rwsem);
992 
993 	return net_dev;
994 }
995 EXPORT_SYMBOL(ib_get_net_dev_by_params);
996 
997 static int __init ib_core_init(void)
998 {
999 	int ret;
1000 
1001 	ib_wq = alloc_workqueue("infiniband", 0, 0);
1002 	if (!ib_wq)
1003 		return -ENOMEM;
1004 
1005 	ib_comp_wq = alloc_workqueue("ib-comp-wq",
1006 			WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
1007 			mp_ncpus * 4 /* WQ_UNBOUND_MAX_ACTIVE */);
1008 	if (!ib_comp_wq) {
1009 		ret = -ENOMEM;
1010 		goto err;
1011 	}
1012 
1013 	ret = class_register(&ib_class);
1014 	if (ret) {
1015 		pr_warn("Couldn't create InfiniBand device class\n");
1016 		goto err_comp;
1017 	}
1018 
1019 	ret = addr_init();
1020 	if (ret) {
1021 		pr_warn("Could't init IB address resolution\n");
1022 		goto err_sysfs;
1023 	}
1024 
1025 	ret = ib_mad_init();
1026 	if (ret) {
1027 		pr_warn("Couldn't init IB MAD\n");
1028 		goto err_addr;
1029 	}
1030 
1031 	ret = ib_sa_init();
1032 	if (ret) {
1033 		pr_warn("Couldn't init SA\n");
1034 		goto err_mad;
1035 	}
1036 
1037 	ib_cache_setup();
1038 
1039 	return 0;
1040 
1041 err_mad:
1042 	ib_mad_cleanup();
1043 err_addr:
1044 	addr_cleanup();
1045 err_sysfs:
1046 	class_unregister(&ib_class);
1047 err_comp:
1048 	destroy_workqueue(ib_comp_wq);
1049 err:
1050 	destroy_workqueue(ib_wq);
1051 	return ret;
1052 }
1053 
1054 static void __exit ib_core_cleanup(void)
1055 {
1056 	ib_cache_cleanup();
1057 	ib_sa_cleanup();
1058 	ib_mad_cleanup();
1059 	addr_cleanup();
1060 	class_unregister(&ib_class);
1061 	destroy_workqueue(ib_comp_wq);
1062 	/* Make sure that any pending umem accounting work is done. */
1063 	destroy_workqueue(ib_wq);
1064 }
1065 
1066 /*
1067  * Typical loading and unloading order values and their use:
1068  *
1069  * SI_ORDER_FIRST (default for module_init):
1070  *      Core modules (PCI, infiniband)
1071  * SI_ORDER_SECOND (default for module_exit):
1072  *      Infiniband core modules (CM)
1073  * SI_ORDER_THIRD:
1074  * SI_ORDER_FOURTH:
1075  *      Infiniband core modules (CMA)
1076  * SI_ORDER_FIFTH:
1077  *      Infiniband user-space modules (UCM,UCMA,UMAD,UVERBS,IPOIB)
1078  * SI_ORDER_SIXTH:
1079  *      Network HW driver modules
1080  * SI_ORDER_SEVENTH:
1081  *      Infiniband HW driver modules
1082  */
1083 module_init_order(ib_core_init, SI_ORDER_FIRST);
1084 module_exit_order(ib_core_cleanup, SI_ORDER_FIRST);
1085 
1086 MODULE_VERSION(ibcore, 1);
1087 MODULE_DEPEND(ibcore, linuxkpi, 1, 1, 1);
1088