xref: /freebsd/sys/ofed/include/rdma/ib_umem_odp.h (revision c697fb7f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3  *
4  * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $FreeBSD$
35  */
36 
37 #ifndef IB_UMEM_ODP_H
38 #define IB_UMEM_ODP_H
39 
40 #include <linux/rbtree.h>
41 
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_verbs.h>
44 
45 struct umem_odp_node {
46 	u64 __subtree_last;
47 	struct rb_node rb;
48 };
49 
50 struct ib_umem_odp {
51 	/*
52 	 * An array of the pages included in the on-demand paging umem.
53 	 * Indices of pages that are currently not mapped into the device will
54 	 * contain NULL.
55 	 */
56 	struct page		**page_list;
57 	/*
58 	 * An array of the same size as page_list, with DMA addresses mapped
59 	 * for pages the pages in page_list. The lower two bits designate
60 	 * access permissions. See ODP_READ_ALLOWED_BIT and
61 	 * ODP_WRITE_ALLOWED_BIT.
62 	 */
63 	dma_addr_t		*dma_list;
64 	/*
65 	 * The umem_mutex protects the page_list and dma_list fields of an ODP
66 	 * umem, allowing only a single thread to map/unmap pages. The mutex
67 	 * also protects access to the mmu notifier counters.
68 	 */
69 	struct mutex		umem_mutex;
70 	void			*private; /* for the HW driver to use. */
71 
72 	/* When false, use the notifier counter in the ucontext struct. */
73 	bool mn_counters_active;
74 	int notifiers_seq;
75 	int notifiers_count;
76 
77 	/* A linked list of umems that don't have private mmu notifier
78 	 * counters yet. */
79 	struct list_head no_private_counters;
80 	struct ib_umem		*umem;
81 
82 	/* Tree tracking */
83 	struct umem_odp_node	interval_tree;
84 
85 	struct completion	notifier_completion;
86 	int			dying;
87 };
88 
89 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
90 
91 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem);
92 
93 void ib_umem_odp_release(struct ib_umem *umem);
94 
95 /*
96  * The lower 2 bits of the DMA address signal the R/W permissions for
97  * the entry. To upgrade the permissions, provide the appropriate
98  * bitmask to the map_dma_pages function.
99  *
100  * Be aware that upgrading a mapped address might result in change of
101  * the DMA address for the page.
102  */
103 #define ODP_READ_ALLOWED_BIT  (1<<0ULL)
104 #define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
105 
106 #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
107 
108 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
109 			      u64 access_mask, unsigned long current_seq);
110 
111 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
112 				 u64 bound);
113 
114 void rbt_ib_umem_insert(struct umem_odp_node *node, struct rb_root *root);
115 void rbt_ib_umem_remove(struct umem_odp_node *node, struct rb_root *root);
116 typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
117 			      void *cookie);
118 /*
119  * Call the callback on each ib_umem in the range. Returns the logical or of
120  * the return values of the functions called.
121  */
122 int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end,
123 				  umem_call_back cb, void *cookie);
124 
125 struct umem_odp_node *rbt_ib_umem_iter_first(struct rb_root *root,
126 					     u64 start, u64 last);
127 struct umem_odp_node *rbt_ib_umem_iter_next(struct umem_odp_node *node,
128 					    u64 start, u64 last);
129 
130 static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item,
131 					     unsigned long mmu_seq)
132 {
133 	/*
134 	 * This code is strongly based on the KVM code from
135 	 * mmu_notifier_retry. Should be called with
136 	 * the relevant locks taken (item->odp_data->umem_mutex
137 	 * and the ucontext umem_mutex semaphore locked for read).
138 	 */
139 
140 	/* Do not allow page faults while the new ib_umem hasn't seen a state
141 	 * with zero notifiers yet, and doesn't have its own valid set of
142 	 * private counters. */
143 	if (!item->odp_data->mn_counters_active)
144 		return 1;
145 
146 	if (unlikely(item->odp_data->notifiers_count))
147 		return 1;
148 	if (item->odp_data->notifiers_seq != mmu_seq)
149 		return 1;
150 	return 0;
151 }
152 
153 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
154 
155 static inline int ib_umem_odp_get(struct ib_ucontext *context,
156 				  struct ib_umem *umem)
157 {
158 	return -EINVAL;
159 }
160 
161 static inline void ib_umem_odp_release(struct ib_umem *umem) {}
162 
163 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
164 
165 #endif /* IB_UMEM_ODP_H */
166