1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3  *
4  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
5  * Copyright (c) 2005 Intel Corporation. All rights reserved.
6  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
8  *
9  * This software is available to you under a choice of one of two
10  * licenses.  You may choose to be licensed under the terms of the GNU
11  * General Public License (GPL) Version 2, available from the file
12  * COPYING in the main directory of this source tree, or the
13  * OpenIB.org BSD license below:
14  *
15  *     Redistribution and use in source and binary forms, with or
16  *     without modification, are permitted provided that the following
17  *     conditions are met:
18  *
19  *      - Redistributions of source code must retain the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer.
22  *
23  *      - Redistributions in binary form must reproduce the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer in the documentation and/or other materials
26  *        provided with the distribution.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35  * SOFTWARE.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <linux/module.h>
42 #include <linux/errno.h>
43 #include <linux/slab.h>
44 #include <linux/workqueue.h>
45 #include <linux/netdevice.h>
46 #include <linux/in6.h>
47 
48 #include <rdma/ib_cache.h>
49 
50 #include "core_priv.h"
51 
52 struct ib_pkey_cache {
53 	int             table_len;
54 	u16             table[0];
55 };
56 
57 struct ib_update_work {
58 	struct work_struct work;
59 	struct ib_device  *device;
60 	u8                 port_num;
61 };
62 
63 union ib_gid zgid;
64 EXPORT_SYMBOL(zgid);
65 
66 static const struct ib_gid_attr zattr;
67 
68 enum gid_attr_find_mask {
69 	GID_ATTR_FIND_MASK_GID          = 1UL << 0,
70 	GID_ATTR_FIND_MASK_NETDEV	= 1UL << 1,
71 	GID_ATTR_FIND_MASK_DEFAULT	= 1UL << 2,
72 	GID_ATTR_FIND_MASK_GID_TYPE	= 1UL << 3,
73 };
74 
75 enum gid_table_entry_props {
76 	GID_TABLE_ENTRY_INVALID		= 1UL << 0,
77 	GID_TABLE_ENTRY_DEFAULT		= 1UL << 1,
78 };
79 
80 enum gid_table_write_action {
81 	GID_TABLE_WRITE_ACTION_ADD,
82 	GID_TABLE_WRITE_ACTION_DEL,
83 	/* MODIFY only updates the GID table. Currently only used by
84 	 * ib_cache_update.
85 	 */
86 	GID_TABLE_WRITE_ACTION_MODIFY
87 };
88 
89 struct ib_gid_table_entry {
90 	unsigned long	    props;
91 	union ib_gid        gid;
92 	struct ib_gid_attr  attr;
93 	void		   *context;
94 };
95 
96 struct ib_gid_table {
97 	int                  sz;
98 	/* In RoCE, adding a GID to the table requires:
99 	 * (a) Find if this GID is already exists.
100 	 * (b) Find a free space.
101 	 * (c) Write the new GID
102 	 *
103 	 * Delete requires different set of operations:
104 	 * (a) Find the GID
105 	 * (b) Delete it.
106 	 *
107 	 * Add/delete should be carried out atomically.
108 	 * This is done by locking this mutex from multiple
109 	 * writers. We don't need this lock for IB, as the MAD
110 	 * layer replaces all entries. All data_vec entries
111 	 * are locked by this lock.
112 	 **/
113 	struct mutex         lock;
114 	/* This lock protects the table entries from being
115 	 * read and written simultaneously.
116 	 */
117 	rwlock_t	     rwlock;
118 	struct ib_gid_table_entry *data_vec;
119 };
120 
121 static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
122 {
123 	if (rdma_cap_roce_gid_table(ib_dev, port)) {
124 		struct ib_event event;
125 
126 		event.device		= ib_dev;
127 		event.element.port_num	= port;
128 		event.event		= IB_EVENT_GID_CHANGE;
129 
130 		ib_dispatch_event(&event);
131 	}
132 }
133 
134 static const char * const gid_type_str[] = {
135 	[IB_GID_TYPE_IB]	= "IB/RoCE v1",
136 	[IB_GID_TYPE_ROCE_UDP_ENCAP]	= "RoCE v2",
137 };
138 
139 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
140 {
141 	if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
142 		return gid_type_str[gid_type];
143 
144 	return "Invalid GID type";
145 }
146 EXPORT_SYMBOL(ib_cache_gid_type_str);
147 
148 int ib_cache_gid_parse_type_str(const char *buf)
149 {
150 	unsigned int i;
151 	size_t len;
152 	int err = -EINVAL;
153 
154 	len = strlen(buf);
155 	if (len == 0)
156 		return -EINVAL;
157 
158 	if (buf[len - 1] == '\n')
159 		len--;
160 
161 	for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
162 		if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
163 		    len == strlen(gid_type_str[i])) {
164 			err = i;
165 			break;
166 		}
167 
168 	return err;
169 }
170 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
171 
172 /* This function expects that rwlock will be write locked in all
173  * scenarios and that lock will be locked in sleep-able (RoCE)
174  * scenarios.
175  */
176 static int write_gid(struct ib_device *ib_dev, u8 port,
177 		     struct ib_gid_table *table, int ix,
178 		     const union ib_gid *gid,
179 		     const struct ib_gid_attr *attr,
180 		     enum gid_table_write_action action,
181 		     bool  default_gid)
182 	__releases(&table->rwlock) __acquires(&table->rwlock)
183 {
184 	int ret = 0;
185 	struct net_device *old_net_dev;
186 	enum ib_gid_type old_gid_type;
187 
188 	/* in rdma_cap_roce_gid_table, this funciton should be protected by a
189 	 * sleep-able lock.
190 	 */
191 
192 	if (rdma_cap_roce_gid_table(ib_dev, port)) {
193 		table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
194 		write_unlock_irq(&table->rwlock);
195 		/* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
196 		 * RoCE providers and thus only updates the cache.
197 		 */
198 		if (action == GID_TABLE_WRITE_ACTION_ADD)
199 			ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
200 					      &table->data_vec[ix].context);
201 		else if (action == GID_TABLE_WRITE_ACTION_DEL)
202 			ret = ib_dev->del_gid(ib_dev, port, ix,
203 					      &table->data_vec[ix].context);
204 		write_lock_irq(&table->rwlock);
205 	}
206 
207 	old_net_dev = table->data_vec[ix].attr.ndev;
208 	old_gid_type = table->data_vec[ix].attr.gid_type;
209 	if (old_net_dev && old_net_dev != attr->ndev)
210 		dev_put(old_net_dev);
211 	/* if modify_gid failed, just delete the old gid */
212 	if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
213 		gid = &zgid;
214 		attr = &zattr;
215 		table->data_vec[ix].context = NULL;
216 	}
217 
218 	memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
219 	memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
220 	if (default_gid) {
221 		table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
222 		if (action == GID_TABLE_WRITE_ACTION_DEL)
223 			table->data_vec[ix].attr.gid_type = old_gid_type;
224 	}
225 	if (table->data_vec[ix].attr.ndev &&
226 	    table->data_vec[ix].attr.ndev != old_net_dev)
227 		dev_hold(table->data_vec[ix].attr.ndev);
228 
229 	table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
230 
231 	return ret;
232 }
233 
234 static int add_gid(struct ib_device *ib_dev, u8 port,
235 		   struct ib_gid_table *table, int ix,
236 		   const union ib_gid *gid,
237 		   const struct ib_gid_attr *attr,
238 		   bool  default_gid) {
239 	return write_gid(ib_dev, port, table, ix, gid, attr,
240 			 GID_TABLE_WRITE_ACTION_ADD, default_gid);
241 }
242 
243 static int modify_gid(struct ib_device *ib_dev, u8 port,
244 		      struct ib_gid_table *table, int ix,
245 		      const union ib_gid *gid,
246 		      const struct ib_gid_attr *attr,
247 		      bool  default_gid) {
248 	return write_gid(ib_dev, port, table, ix, gid, attr,
249 			 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
250 }
251 
252 static int del_gid(struct ib_device *ib_dev, u8 port,
253 		   struct ib_gid_table *table, int ix,
254 		   bool  default_gid) {
255 	return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
256 			 GID_TABLE_WRITE_ACTION_DEL, default_gid);
257 }
258 
259 /* rwlock should be read locked */
260 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
261 		    const struct ib_gid_attr *val, bool default_gid,
262 		    unsigned long mask, int *pempty)
263 {
264 	int i = 0;
265 	int found = -1;
266 	int empty = pempty ? -1 : 0;
267 
268 	while (i < table->sz && (found < 0 || empty < 0)) {
269 		struct ib_gid_table_entry *data = &table->data_vec[i];
270 		struct ib_gid_attr *attr = &data->attr;
271 		int curr_index = i;
272 
273 		i++;
274 
275 		if (data->props & GID_TABLE_ENTRY_INVALID)
276 			continue;
277 
278 		if (empty < 0)
279 			if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
280 			    !memcmp(attr, &zattr, sizeof(*attr)) &&
281 			    !data->props)
282 				empty = curr_index;
283 
284 		if (found >= 0)
285 			continue;
286 
287 		if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
288 		    attr->gid_type != val->gid_type)
289 			continue;
290 
291 		if (mask & GID_ATTR_FIND_MASK_GID &&
292 		    memcmp(gid, &data->gid, sizeof(*gid)))
293 			continue;
294 
295 		if (mask & GID_ATTR_FIND_MASK_NETDEV &&
296 		    attr->ndev != val->ndev)
297 			continue;
298 
299 		if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
300 		    !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
301 		    default_gid)
302 			continue;
303 
304 		found = curr_index;
305 	}
306 
307 	if (pempty)
308 		*pempty = empty;
309 
310 	return found;
311 }
312 
313 static void addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
314 {
315 	if (dev->if_addrlen != ETH_ALEN)
316 		return;
317 	memcpy(eui, IF_LLADDR(dev), 3);
318 	memcpy(eui + 5, IF_LLADDR(dev) + 3, 3);
319 
320 	/* NOTE: The scope ID is added by the GID to IP conversion */
321 
322 	eui[3] = 0xFF;
323 	eui[4] = 0xFE;
324 	eui[0] ^= 2;
325 }
326 
327 static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
328 {
329 	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
330 	addrconf_ifid_eui48(&gid->raw[8], dev);
331 }
332 
333 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
334 		     union ib_gid *gid, struct ib_gid_attr *attr)
335 {
336 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
337 	struct ib_gid_table *table;
338 	int ix;
339 	int ret = 0;
340 	int empty;
341 
342 	table = ports_table[port - rdma_start_port(ib_dev)];
343 
344 	if (!memcmp(gid, &zgid, sizeof(*gid)))
345 		return -EINVAL;
346 
347 	mutex_lock(&table->lock);
348 	write_lock_irq(&table->rwlock);
349 
350 	ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
351 		      GID_ATTR_FIND_MASK_GID_TYPE |
352 		      GID_ATTR_FIND_MASK_NETDEV, &empty);
353 	if (ix >= 0)
354 		goto out_unlock;
355 
356 	if (empty < 0) {
357 		ret = -ENOSPC;
358 		goto out_unlock;
359 	}
360 
361 	ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
362 	if (!ret)
363 		dispatch_gid_change_event(ib_dev, port);
364 
365 out_unlock:
366 	write_unlock_irq(&table->rwlock);
367 	mutex_unlock(&table->lock);
368 	return ret;
369 }
370 
371 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
372 		     union ib_gid *gid, struct ib_gid_attr *attr)
373 {
374 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
375 	struct ib_gid_table *table;
376 	int ix;
377 
378 	table = ports_table[port - rdma_start_port(ib_dev)];
379 
380 	mutex_lock(&table->lock);
381 	write_lock_irq(&table->rwlock);
382 
383 	ix = find_gid(table, gid, attr, false,
384 		      GID_ATTR_FIND_MASK_GID	  |
385 		      GID_ATTR_FIND_MASK_GID_TYPE |
386 		      GID_ATTR_FIND_MASK_NETDEV	  |
387 		      GID_ATTR_FIND_MASK_DEFAULT,
388 		      NULL);
389 	if (ix < 0)
390 		goto out_unlock;
391 
392 	if (!del_gid(ib_dev, port, table, ix, false))
393 		dispatch_gid_change_event(ib_dev, port);
394 
395 out_unlock:
396 	write_unlock_irq(&table->rwlock);
397 	mutex_unlock(&table->lock);
398 	return 0;
399 }
400 
401 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
402 				     struct net_device *ndev)
403 {
404 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
405 	struct ib_gid_table *table;
406 	int ix;
407 	bool deleted = false;
408 
409 	table  = ports_table[port - rdma_start_port(ib_dev)];
410 
411 	mutex_lock(&table->lock);
412 	write_lock_irq(&table->rwlock);
413 
414 	for (ix = 0; ix < table->sz; ix++)
415 		if (table->data_vec[ix].attr.ndev == ndev)
416 			if (!del_gid(ib_dev, port, table, ix,
417 				     !!(table->data_vec[ix].props &
418 					GID_TABLE_ENTRY_DEFAULT)))
419 				deleted = true;
420 
421 	write_unlock_irq(&table->rwlock);
422 	mutex_unlock(&table->lock);
423 
424 	if (deleted)
425 		dispatch_gid_change_event(ib_dev, port);
426 
427 	return 0;
428 }
429 
430 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
431 			      union ib_gid *gid, struct ib_gid_attr *attr)
432 {
433 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
434 	struct ib_gid_table *table;
435 
436 	table = ports_table[port - rdma_start_port(ib_dev)];
437 
438 	if (index < 0 || index >= table->sz)
439 		return -EINVAL;
440 
441 	if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
442 		return -EAGAIN;
443 
444 	memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
445 	if (attr) {
446 		memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
447 		/* make sure network device is valid and attached */
448 		if (attr->ndev != NULL &&
449 		    (attr->ndev->if_flags & IFF_DYING) == 0 &&
450 		    attr->ndev->if_addr != NULL)
451 			dev_hold(attr->ndev);
452 		else
453 			attr->ndev = NULL;
454 	}
455 
456 	return 0;
457 }
458 
459 static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
460 				    const union ib_gid *gid,
461 				    const struct ib_gid_attr *val,
462 				    unsigned long mask,
463 				    u8 *port, u16 *index)
464 {
465 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
466 	struct ib_gid_table *table;
467 	u8 p;
468 	int local_index;
469 	unsigned long flags;
470 
471 	for (p = 0; p < ib_dev->phys_port_cnt; p++) {
472 		table = ports_table[p];
473 		read_lock_irqsave(&table->rwlock, flags);
474 		local_index = find_gid(table, gid, val, false, mask, NULL);
475 		if (local_index >= 0) {
476 			if (index)
477 				*index = local_index;
478 			if (port)
479 				*port = p + rdma_start_port(ib_dev);
480 			read_unlock_irqrestore(&table->rwlock, flags);
481 			return 0;
482 		}
483 		read_unlock_irqrestore(&table->rwlock, flags);
484 	}
485 
486 	return -ENOENT;
487 }
488 
489 static int ib_cache_gid_find(struct ib_device *ib_dev,
490 			     const union ib_gid *gid,
491 			     enum ib_gid_type gid_type,
492 			     struct net_device *ndev, u8 *port,
493 			     u16 *index)
494 {
495 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
496 			     GID_ATTR_FIND_MASK_GID_TYPE;
497 	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
498 
499 	if (ndev)
500 		mask |= GID_ATTR_FIND_MASK_NETDEV;
501 
502 	return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
503 					mask, port, index);
504 }
505 
506 int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
507 			       const union ib_gid *gid,
508 			       enum ib_gid_type gid_type,
509 			       u8 port, struct net_device *ndev,
510 			       u16 *index)
511 {
512 	int local_index;
513 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
514 	struct ib_gid_table *table;
515 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
516 			     GID_ATTR_FIND_MASK_GID_TYPE;
517 	struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
518 	unsigned long flags;
519 
520 	if (port < rdma_start_port(ib_dev) ||
521 	    port > rdma_end_port(ib_dev))
522 		return -ENOENT;
523 
524 	table = ports_table[port - rdma_start_port(ib_dev)];
525 
526 	if (ndev)
527 		mask |= GID_ATTR_FIND_MASK_NETDEV;
528 
529 	read_lock_irqsave(&table->rwlock, flags);
530 	local_index = find_gid(table, gid, &val, false, mask, NULL);
531 	if (local_index >= 0) {
532 		if (index)
533 			*index = local_index;
534 		read_unlock_irqrestore(&table->rwlock, flags);
535 		return 0;
536 	}
537 
538 	read_unlock_irqrestore(&table->rwlock, flags);
539 	return -ENOENT;
540 }
541 EXPORT_SYMBOL(ib_find_cached_gid_by_port);
542 
543 /**
544  * ib_find_gid_by_filter - Returns the GID table index where a specified
545  * GID value occurs
546  * @device: The device to query.
547  * @gid: The GID value to search for.
548  * @port_num: The port number of the device where the GID value could be
549  *   searched.
550  * @filter: The filter function is executed on any matching GID in the table.
551  *   If the filter function returns true, the corresponding index is returned,
552  *   otherwise, we continue searching the GID table. It's guaranteed that
553  *   while filter is executed, ndev field is valid and the structure won't
554  *   change. filter is executed in an atomic context. filter must not be NULL.
555  * @index: The index into the cached GID table where the GID was found.  This
556  *   parameter may be NULL.
557  *
558  * ib_cache_gid_find_by_filter() searches for the specified GID value
559  * of which the filter function returns true in the port's GID table.
560  * This function is only supported on RoCE ports.
561  *
562  */
563 static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
564 				       const union ib_gid *gid,
565 				       u8 port,
566 				       bool (*filter)(const union ib_gid *,
567 						      const struct ib_gid_attr *,
568 						      void *),
569 				       void *context,
570 				       u16 *index)
571 {
572 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
573 	struct ib_gid_table *table;
574 	unsigned int i;
575 	unsigned long flags;
576 	bool found = false;
577 
578 	if (!ports_table)
579 		return -EOPNOTSUPP;
580 
581 	if (port < rdma_start_port(ib_dev) ||
582 	    port > rdma_end_port(ib_dev) ||
583 	    !rdma_protocol_roce(ib_dev, port))
584 		return -EPROTONOSUPPORT;
585 
586 	table = ports_table[port - rdma_start_port(ib_dev)];
587 
588 	read_lock_irqsave(&table->rwlock, flags);
589 	for (i = 0; i < table->sz; i++) {
590 		struct ib_gid_attr attr;
591 
592 		if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
593 			goto next;
594 
595 		if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
596 			goto next;
597 
598 		memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
599 
600 		if (filter(gid, &attr, context))
601 			found = true;
602 
603 next:
604 		if (found)
605 			break;
606 	}
607 	read_unlock_irqrestore(&table->rwlock, flags);
608 
609 	if (!found)
610 		return -ENOENT;
611 
612 	if (index)
613 		*index = i;
614 	return 0;
615 }
616 
617 static struct ib_gid_table *alloc_gid_table(int sz)
618 {
619 	struct ib_gid_table *table =
620 		kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
621 
622 	if (!table)
623 		return NULL;
624 
625 	table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
626 	if (!table->data_vec)
627 		goto err_free_table;
628 
629 	mutex_init(&table->lock);
630 
631 	table->sz = sz;
632 	rwlock_init(&table->rwlock);
633 
634 	return table;
635 
636 err_free_table:
637 	kfree(table);
638 	return NULL;
639 }
640 
641 static void release_gid_table(struct ib_gid_table *table)
642 {
643 	if (table) {
644 		kfree(table->data_vec);
645 		kfree(table);
646 	}
647 }
648 
649 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
650 				   struct ib_gid_table *table)
651 {
652 	int i;
653 	bool deleted = false;
654 
655 	if (!table)
656 		return;
657 
658 	write_lock_irq(&table->rwlock);
659 	for (i = 0; i < table->sz; ++i) {
660 		if (memcmp(&table->data_vec[i].gid, &zgid,
661 			   sizeof(table->data_vec[i].gid)))
662 			if (!del_gid(ib_dev, port, table, i,
663 				     table->data_vec[i].props &
664 				     GID_ATTR_FIND_MASK_DEFAULT))
665 				deleted = true;
666 	}
667 	write_unlock_irq(&table->rwlock);
668 
669 	if (deleted)
670 		dispatch_gid_change_event(ib_dev, port);
671 }
672 
673 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
674 				  struct net_device *ndev,
675 				  unsigned long gid_type_mask,
676 				  enum ib_cache_gid_default_mode mode)
677 {
678 	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
679 	union ib_gid gid;
680 	struct ib_gid_attr gid_attr;
681 	struct ib_gid_attr zattr_type = zattr;
682 	struct ib_gid_table *table;
683 	unsigned int gid_type;
684 
685 	table  = ports_table[port - rdma_start_port(ib_dev)];
686 
687 	make_default_gid(ndev, &gid);
688 	memset(&gid_attr, 0, sizeof(gid_attr));
689 	gid_attr.ndev = ndev;
690 
691 	/* Default GID is created using unique GUID and local subnet prefix,
692 	 * as described in section 4.1.1 and 3.5.10 in IB spec 1.3.
693 	 * Therefore don't create RoCEv2 default GID based on it that
694 	 * resembles as IPv6 GID based on link local address when IPv6 is
695 	 * disabled in kernel.
696 	 */
697 #ifndef INET6
698 	gid_type_mask &= ~BIT(IB_GID_TYPE_ROCE_UDP_ENCAP);
699 #endif
700 
701 	for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
702 		int ix;
703 		union ib_gid current_gid;
704 		struct ib_gid_attr current_gid_attr = {};
705 
706 		if (1UL << gid_type & ~gid_type_mask)
707 			continue;
708 
709 		gid_attr.gid_type = gid_type;
710 
711 		mutex_lock(&table->lock);
712 		write_lock_irq(&table->rwlock);
713 		ix = find_gid(table, NULL, &gid_attr, true,
714 			      GID_ATTR_FIND_MASK_GID_TYPE |
715 			      GID_ATTR_FIND_MASK_DEFAULT,
716 			      NULL);
717 
718 		/* Coudn't find default GID location */
719 		if (WARN_ON(ix < 0))
720 			goto release;
721 
722 		zattr_type.gid_type = gid_type;
723 
724 		if (!__ib_cache_gid_get(ib_dev, port, ix,
725 					&current_gid, &current_gid_attr) &&
726 		    mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
727 		    !memcmp(&gid, &current_gid, sizeof(gid)) &&
728 		    !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
729 			goto release;
730 
731 		if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
732 		    memcmp(&current_gid_attr, &zattr_type,
733 			   sizeof(current_gid_attr))) {
734 			if (del_gid(ib_dev, port, table, ix, true)) {
735 				pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
736 					ix, gid.raw);
737 				goto release;
738 			} else {
739 				dispatch_gid_change_event(ib_dev, port);
740 			}
741 		}
742 
743 		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
744 			if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
745 				pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
746 					gid.raw);
747 			else
748 				dispatch_gid_change_event(ib_dev, port);
749 		}
750 
751 release:
752 		if (current_gid_attr.ndev)
753 			dev_put(current_gid_attr.ndev);
754 		write_unlock_irq(&table->rwlock);
755 		mutex_unlock(&table->lock);
756 	}
757 }
758 
759 static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
760 				     struct ib_gid_table *table)
761 {
762 	unsigned int i;
763 	unsigned long roce_gid_type_mask;
764 	unsigned int num_default_gids;
765 	unsigned int current_gid = 0;
766 
767 	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
768 	num_default_gids = hweight_long(roce_gid_type_mask);
769 	for (i = 0; i < num_default_gids && i < table->sz; i++) {
770 		struct ib_gid_table_entry *entry =
771 			&table->data_vec[i];
772 
773 		entry->props |= GID_TABLE_ENTRY_DEFAULT;
774 		current_gid = find_next_bit(&roce_gid_type_mask,
775 					    BITS_PER_LONG,
776 					    current_gid);
777 		entry->attr.gid_type = current_gid++;
778 	}
779 
780 	return 0;
781 }
782 
783 static int _gid_table_setup_one(struct ib_device *ib_dev)
784 {
785 	u8 port;
786 	struct ib_gid_table **table;
787 	int err = 0;
788 
789 	table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
790 
791 	if (!table) {
792 		pr_warn("failed to allocate ib gid cache for %s\n",
793 			ib_dev->name);
794 		return -ENOMEM;
795 	}
796 
797 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
798 		u8 rdma_port = port + rdma_start_port(ib_dev);
799 
800 		table[port] =
801 			alloc_gid_table(
802 				ib_dev->port_immutable[rdma_port].gid_tbl_len);
803 		if (!table[port]) {
804 			err = -ENOMEM;
805 			goto rollback_table_setup;
806 		}
807 
808 		err = gid_table_reserve_default(ib_dev,
809 						port + rdma_start_port(ib_dev),
810 						table[port]);
811 		if (err)
812 			goto rollback_table_setup;
813 	}
814 
815 	ib_dev->cache.gid_cache = table;
816 	return 0;
817 
818 rollback_table_setup:
819 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
820 		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
821 				       table[port]);
822 		release_gid_table(table[port]);
823 	}
824 
825 	kfree(table);
826 	return err;
827 }
828 
829 static void gid_table_release_one(struct ib_device *ib_dev)
830 {
831 	struct ib_gid_table **table = ib_dev->cache.gid_cache;
832 	u8 port;
833 
834 	if (!table)
835 		return;
836 
837 	for (port = 0; port < ib_dev->phys_port_cnt; port++)
838 		release_gid_table(table[port]);
839 
840 	kfree(table);
841 	ib_dev->cache.gid_cache = NULL;
842 }
843 
844 static void gid_table_cleanup_one(struct ib_device *ib_dev)
845 {
846 	struct ib_gid_table **table = ib_dev->cache.gid_cache;
847 	u8 port;
848 
849 	if (!table)
850 		return;
851 
852 	for (port = 0; port < ib_dev->phys_port_cnt; port++)
853 		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
854 				       table[port]);
855 }
856 
857 static int gid_table_setup_one(struct ib_device *ib_dev)
858 {
859 	int err;
860 
861 	err = _gid_table_setup_one(ib_dev);
862 
863 	if (err)
864 		return err;
865 
866 	err = roce_rescan_device(ib_dev);
867 
868 	if (err) {
869 		gid_table_cleanup_one(ib_dev);
870 		gid_table_release_one(ib_dev);
871 	}
872 
873 	return err;
874 }
875 
876 int ib_get_cached_gid(struct ib_device *device,
877 		      u8                port_num,
878 		      int               index,
879 		      union ib_gid     *gid,
880 		      struct ib_gid_attr *gid_attr)
881 {
882 	int res;
883 	unsigned long flags;
884 	struct ib_gid_table **ports_table = device->cache.gid_cache;
885 	struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
886 
887 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
888 		return -EINVAL;
889 
890 	read_lock_irqsave(&table->rwlock, flags);
891 	res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
892 	read_unlock_irqrestore(&table->rwlock, flags);
893 
894 	return res;
895 }
896 EXPORT_SYMBOL(ib_get_cached_gid);
897 
898 int ib_find_cached_gid(struct ib_device *device,
899 		       const union ib_gid *gid,
900 		       enum ib_gid_type gid_type,
901 		       struct net_device *ndev,
902 		       u8               *port_num,
903 		       u16              *index)
904 {
905 	return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
906 }
907 EXPORT_SYMBOL(ib_find_cached_gid);
908 
909 int ib_find_gid_by_filter(struct ib_device *device,
910 			  const union ib_gid *gid,
911 			  u8 port_num,
912 			  bool (*filter)(const union ib_gid *gid,
913 					 const struct ib_gid_attr *,
914 					 void *),
915 			  void *context, u16 *index)
916 {
917 	/* Only RoCE GID table supports filter function */
918 	if (!rdma_cap_roce_gid_table(device, port_num) && filter)
919 		return -EPROTONOSUPPORT;
920 
921 	return ib_cache_gid_find_by_filter(device, gid,
922 					   port_num, filter,
923 					   context, index);
924 }
925 EXPORT_SYMBOL(ib_find_gid_by_filter);
926 
927 int ib_get_cached_pkey(struct ib_device *device,
928 		       u8                port_num,
929 		       int               index,
930 		       u16              *pkey)
931 {
932 	struct ib_pkey_cache *cache;
933 	unsigned long flags;
934 	int ret = 0;
935 
936 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
937 		return -EINVAL;
938 
939 	read_lock_irqsave(&device->cache.lock, flags);
940 
941 	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
942 
943 	if (index < 0 || index >= cache->table_len)
944 		ret = -EINVAL;
945 	else
946 		*pkey = cache->table[index];
947 
948 	read_unlock_irqrestore(&device->cache.lock, flags);
949 
950 	return ret;
951 }
952 EXPORT_SYMBOL(ib_get_cached_pkey);
953 
954 int ib_find_cached_pkey(struct ib_device *device,
955 			u8                port_num,
956 			u16               pkey,
957 			u16              *index)
958 {
959 	struct ib_pkey_cache *cache;
960 	unsigned long flags;
961 	int i;
962 	int ret = -ENOENT;
963 	int partial_ix = -1;
964 
965 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
966 		return -EINVAL;
967 
968 	read_lock_irqsave(&device->cache.lock, flags);
969 
970 	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
971 
972 	*index = -1;
973 
974 	for (i = 0; i < cache->table_len; ++i)
975 		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
976 			if (cache->table[i] & 0x8000) {
977 				*index = i;
978 				ret = 0;
979 				break;
980 			} else
981 				partial_ix = i;
982 		}
983 
984 	if (ret && partial_ix >= 0) {
985 		*index = partial_ix;
986 		ret = 0;
987 	}
988 
989 	read_unlock_irqrestore(&device->cache.lock, flags);
990 
991 	return ret;
992 }
993 EXPORT_SYMBOL(ib_find_cached_pkey);
994 
995 int ib_find_exact_cached_pkey(struct ib_device *device,
996 			      u8                port_num,
997 			      u16               pkey,
998 			      u16              *index)
999 {
1000 	struct ib_pkey_cache *cache;
1001 	unsigned long flags;
1002 	int i;
1003 	int ret = -ENOENT;
1004 
1005 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1006 		return -EINVAL;
1007 
1008 	read_lock_irqsave(&device->cache.lock, flags);
1009 
1010 	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
1011 
1012 	*index = -1;
1013 
1014 	for (i = 0; i < cache->table_len; ++i)
1015 		if (cache->table[i] == pkey) {
1016 			*index = i;
1017 			ret = 0;
1018 			break;
1019 		}
1020 
1021 	read_unlock_irqrestore(&device->cache.lock, flags);
1022 
1023 	return ret;
1024 }
1025 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1026 
1027 int ib_get_cached_lmc(struct ib_device *device,
1028 		      u8                port_num,
1029 		      u8                *lmc)
1030 {
1031 	unsigned long flags;
1032 	int ret = 0;
1033 
1034 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1035 		return -EINVAL;
1036 
1037 	read_lock_irqsave(&device->cache.lock, flags);
1038 	*lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
1039 	read_unlock_irqrestore(&device->cache.lock, flags);
1040 
1041 	return ret;
1042 }
1043 EXPORT_SYMBOL(ib_get_cached_lmc);
1044 
1045 static void ib_cache_update(struct ib_device *device,
1046 			    u8                port)
1047 {
1048 	struct ib_port_attr       *tprops = NULL;
1049 	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
1050 	struct ib_gid_cache {
1051 		int             table_len;
1052 		union ib_gid    table[0];
1053 	}			  *gid_cache = NULL;
1054 	int                        i;
1055 	int                        ret;
1056 	struct ib_gid_table	  *table;
1057 	struct ib_gid_table	 **ports_table = device->cache.gid_cache;
1058 	bool			   use_roce_gid_table =
1059 					rdma_cap_roce_gid_table(device, port);
1060 
1061 	if (port < rdma_start_port(device) || port > rdma_end_port(device))
1062 		return;
1063 
1064 	table = ports_table[port - rdma_start_port(device)];
1065 
1066 	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1067 	if (!tprops)
1068 		return;
1069 
1070 	ret = ib_query_port(device, port, tprops);
1071 	if (ret) {
1072 		pr_warn("ib_query_port failed (%d) for %s\n",
1073 			ret, device->name);
1074 		goto err;
1075 	}
1076 
1077 	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
1078 			     sizeof *pkey_cache->table, GFP_KERNEL);
1079 	if (!pkey_cache)
1080 		goto err;
1081 
1082 	pkey_cache->table_len = tprops->pkey_tbl_len;
1083 
1084 	if (!use_roce_gid_table) {
1085 		gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
1086 			    sizeof(*gid_cache->table), GFP_KERNEL);
1087 		if (!gid_cache)
1088 			goto err;
1089 
1090 		gid_cache->table_len = tprops->gid_tbl_len;
1091 	}
1092 
1093 	for (i = 0; i < pkey_cache->table_len; ++i) {
1094 		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1095 		if (ret) {
1096 			pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1097 				ret, device->name, i);
1098 			goto err;
1099 		}
1100 	}
1101 
1102 	if (!use_roce_gid_table) {
1103 		for (i = 0;  i < gid_cache->table_len; ++i) {
1104 			ret = ib_query_gid(device, port, i,
1105 					   gid_cache->table + i, NULL);
1106 			if (ret) {
1107 				pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1108 					ret, device->name, i);
1109 				goto err;
1110 			}
1111 		}
1112 	}
1113 
1114 	write_lock_irq(&device->cache.lock);
1115 
1116 	old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
1117 
1118 	device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
1119 	if (!use_roce_gid_table) {
1120 		write_lock(&table->rwlock);
1121 		for (i = 0; i < gid_cache->table_len; i++) {
1122 			modify_gid(device, port, table, i, gid_cache->table + i,
1123 				   &zattr, false);
1124 		}
1125 		write_unlock(&table->rwlock);
1126 	}
1127 
1128 	device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
1129 
1130 	write_unlock_irq(&device->cache.lock);
1131 
1132 	kfree(gid_cache);
1133 	kfree(old_pkey_cache);
1134 	kfree(tprops);
1135 	return;
1136 
1137 err:
1138 	kfree(pkey_cache);
1139 	kfree(gid_cache);
1140 	kfree(tprops);
1141 }
1142 
1143 static void ib_cache_task(struct work_struct *_work)
1144 {
1145 	struct ib_update_work *work =
1146 		container_of(_work, struct ib_update_work, work);
1147 
1148 	ib_cache_update(work->device, work->port_num);
1149 	kfree(work);
1150 }
1151 
1152 static void ib_cache_event(struct ib_event_handler *handler,
1153 			   struct ib_event *event)
1154 {
1155 	struct ib_update_work *work;
1156 
1157 	if (event->event == IB_EVENT_PORT_ERR    ||
1158 	    event->event == IB_EVENT_PORT_ACTIVE ||
1159 	    event->event == IB_EVENT_LID_CHANGE  ||
1160 	    event->event == IB_EVENT_PKEY_CHANGE ||
1161 	    event->event == IB_EVENT_SM_CHANGE   ||
1162 	    event->event == IB_EVENT_CLIENT_REREGISTER ||
1163 	    event->event == IB_EVENT_GID_CHANGE) {
1164 		work = kmalloc(sizeof *work, GFP_ATOMIC);
1165 		if (work) {
1166 			INIT_WORK(&work->work, ib_cache_task);
1167 			work->device   = event->device;
1168 			work->port_num = event->element.port_num;
1169 			queue_work(ib_wq, &work->work);
1170 		}
1171 	}
1172 }
1173 
1174 int ib_cache_setup_one(struct ib_device *device)
1175 {
1176 	int p;
1177 	int err;
1178 
1179 	rwlock_init(&device->cache.lock);
1180 
1181 	device->cache.pkey_cache =
1182 		kzalloc(sizeof *device->cache.pkey_cache *
1183 			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1184 	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
1185 					  (rdma_end_port(device) -
1186 					   rdma_start_port(device) + 1),
1187 					  GFP_KERNEL);
1188 	if (!device->cache.pkey_cache ||
1189 	    !device->cache.lmc_cache) {
1190 		pr_warn("Couldn't allocate cache for %s\n", device->name);
1191 		return -ENOMEM;
1192 	}
1193 
1194 	err = gid_table_setup_one(device);
1195 	if (err)
1196 		/* Allocated memory will be cleaned in the release function */
1197 		return err;
1198 
1199 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1200 		ib_cache_update(device, p + rdma_start_port(device));
1201 
1202 	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1203 			      device, ib_cache_event);
1204 	err = ib_register_event_handler(&device->cache.event_handler);
1205 	if (err)
1206 		goto err;
1207 
1208 	return 0;
1209 
1210 err:
1211 	gid_table_cleanup_one(device);
1212 	return err;
1213 }
1214 
1215 void ib_cache_release_one(struct ib_device *device)
1216 {
1217 	int p;
1218 
1219 	/*
1220 	 * The release function frees all the cache elements.
1221 	 * This function should be called as part of freeing
1222 	 * all the device's resources when the cache could no
1223 	 * longer be accessed.
1224 	 */
1225 	if (device->cache.pkey_cache)
1226 		for (p = 0;
1227 		     p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1228 			kfree(device->cache.pkey_cache[p]);
1229 
1230 	gid_table_release_one(device);
1231 	kfree(device->cache.pkey_cache);
1232 	kfree(device->cache.lmc_cache);
1233 }
1234 
1235 void ib_cache_cleanup_one(struct ib_device *device)
1236 {
1237 	/* The cleanup function unregisters the event handler,
1238 	 * waits for all in-progress workqueue elements and cleans
1239 	 * up the GID cache. This function should be called after
1240 	 * the device was removed from the devices list and all
1241 	 * clients were removed, so the cache exists but is
1242 	 * non-functional and shouldn't be updated anymore.
1243 	 */
1244 	ib_unregister_event_handler(&device->cache.event_handler);
1245 	flush_workqueue(ib_wq);
1246 	gid_table_cleanup_one(device);
1247 }
1248 
1249 void __init ib_cache_setup(void)
1250 {
1251 	roce_gid_mgmt_init();
1252 }
1253 
1254 void __exit ib_cache_cleanup(void)
1255 {
1256 	roce_gid_mgmt_cleanup();
1257 }
1258