1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2005 Topspin Communications. All rights reserved. 5 * Copyright (c) 2005 Cisco Systems. All rights reserved. 6 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 * 36 * $FreeBSD$ 37 */ 38 39 #define LINUXKPI_PARAM_PREFIX ibcore_ 40 41 #include <linux/mm.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/sched.h> 44 #include <linux/slab.h> 45 #include <linux/wait.h> 46 #include <rdma/ib_umem_odp.h> 47 48 #include "uverbs.h" 49 50 #include <sys/priv.h> 51 52 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) 53 { 54 struct scatterlist *sg; 55 struct page *page; 56 int i; 57 58 if (umem->nmap > 0) 59 ib_dma_unmap_sg(dev, umem->sg_head.sgl, 60 umem->nmap, 61 DMA_BIDIRECTIONAL); 62 63 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { 64 65 page = sg_page(sg); 66 put_page(page); 67 } 68 69 sg_free_table(&umem->sg_head); 70 return; 71 72 } 73 74 /** 75 * ib_umem_get - Pin and DMA map userspace memory. 76 * 77 * If access flags indicate ODP memory, avoid pinning. Instead, stores 78 * the mm for future page fault handling in conjunction with MMU notifiers. 79 * 80 * @context: userspace context to pin memory for 81 * @addr: userspace virtual address to start at 82 * @size: length of region to pin 83 * @access: IB_ACCESS_xxx flags for memory being pinned 84 * @dmasync: flush in-flight DMA when the memory region is written 85 */ 86 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, 87 size_t size, int access, int dmasync) 88 { 89 struct ib_umem *umem; 90 struct page **page_list; 91 struct vm_area_struct **vma_list; 92 unsigned long locked; 93 unsigned long cur_base; 94 unsigned long npages; 95 int ret; 96 int i; 97 struct dma_attrs dma_attrs = { 0 }; 98 struct scatterlist *sg, *sg_list_start; 99 int need_release = 0; 100 unsigned int gup_flags = FOLL_WRITE; 101 102 if (dmasync) 103 dma_attrs.flags |= DMA_ATTR_WRITE_BARRIER; 104 105 if (!size) 106 return ERR_PTR(-EINVAL); 107 108 /* 109 * If the combination of the addr and size requested for this memory 110 * region causes an integer overflow, return error. 111 */ 112 if (((addr + size) < addr) || 113 PAGE_ALIGN(addr + size) < (addr + size)) 114 return ERR_PTR(-EINVAL); 115 116 if (priv_check(curthread, PRIV_VM_MLOCK) != 0) 117 return ERR_PTR(-EPERM); 118 119 umem = kzalloc(sizeof *umem, GFP_KERNEL); 120 if (!umem) 121 return ERR_PTR(-ENOMEM); 122 123 umem->context = context; 124 umem->length = size; 125 umem->address = addr; 126 umem->page_size = PAGE_SIZE; 127 umem->pid = get_pid(task_pid(current)); 128 /* 129 * We ask for writable memory if any of the following 130 * access flags are set. "Local write" and "remote write" 131 * obviously require write access. "Remote atomic" can do 132 * things like fetch and add, which will modify memory, and 133 * "MW bind" can change permissions by binding a window. 134 */ 135 umem->writable = !!(access & 136 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | 137 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); 138 139 if (access & IB_ACCESS_ON_DEMAND) { 140 ret = ib_umem_odp_get(context, umem); 141 if (ret) { 142 kfree(umem); 143 return ERR_PTR(ret); 144 } 145 return umem; 146 } 147 148 umem->odp_data = NULL; 149 150 page_list = (struct page **) __get_free_page(GFP_KERNEL); 151 if (!page_list) { 152 kfree(umem); 153 return ERR_PTR(-ENOMEM); 154 } 155 156 vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL); 157 158 npages = ib_umem_num_pages(umem); 159 160 down_write(¤t->mm->mmap_sem); 161 162 locked = npages + current->mm->pinned_vm; 163 164 cur_base = addr & PAGE_MASK; 165 166 if (npages == 0 || npages > UINT_MAX) { 167 ret = -EINVAL; 168 goto out; 169 } 170 171 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); 172 if (ret) 173 goto out; 174 175 if (!umem->writable) 176 gup_flags |= FOLL_FORCE; 177 178 need_release = 1; 179 sg_list_start = umem->sg_head.sgl; 180 181 while (npages) { 182 ret = get_user_pages(cur_base, 183 min_t(unsigned long, npages, 184 PAGE_SIZE / sizeof (struct page *)), 185 gup_flags, page_list, vma_list); 186 187 if (ret < 0) 188 goto out; 189 190 umem->npages += ret; 191 cur_base += ret * PAGE_SIZE; 192 npages -= ret; 193 194 for_each_sg(sg_list_start, sg, ret, i) { 195 sg_set_page(sg, page_list[i], PAGE_SIZE, 0); 196 } 197 198 /* preparing for next loop */ 199 sg_list_start = sg; 200 } 201 202 umem->nmap = ib_dma_map_sg_attrs(context->device, 203 umem->sg_head.sgl, 204 umem->npages, 205 DMA_BIDIRECTIONAL, 206 &dma_attrs); 207 208 if (umem->nmap <= 0) { 209 ret = -ENOMEM; 210 goto out; 211 } 212 213 ret = 0; 214 215 out: 216 if (ret < 0) { 217 if (need_release) 218 __ib_umem_release(context->device, umem, 0); 219 put_pid(umem->pid); 220 kfree(umem); 221 } else 222 current->mm->pinned_vm = locked; 223 224 up_write(¤t->mm->mmap_sem); 225 if (vma_list) 226 free_page((unsigned long) vma_list); 227 free_page((unsigned long) page_list); 228 229 return ret < 0 ? ERR_PTR(ret) : umem; 230 } 231 EXPORT_SYMBOL(ib_umem_get); 232 233 static void ib_umem_account(struct work_struct *work) 234 { 235 struct ib_umem *umem = container_of(work, struct ib_umem, work); 236 237 down_write(&umem->mm->mmap_sem); 238 umem->mm->pinned_vm -= umem->diff; 239 up_write(&umem->mm->mmap_sem); 240 mmput(umem->mm); 241 kfree(umem); 242 } 243 244 /** 245 * ib_umem_release - release memory pinned with ib_umem_get 246 * @umem: umem struct to release 247 */ 248 void ib_umem_release(struct ib_umem *umem) 249 { 250 struct ib_ucontext *context = umem->context; 251 struct mm_struct *mm; 252 struct task_struct *task; 253 unsigned long diff; 254 255 if (umem->odp_data) { 256 ib_umem_odp_release(umem); 257 return; 258 } 259 260 __ib_umem_release(umem->context->device, umem, 1); 261 262 task = get_pid_task(umem->pid, PIDTYPE_PID); 263 put_pid(umem->pid); 264 if (!task) 265 goto out; 266 mm = get_task_mm(task); 267 put_task_struct(task); 268 if (!mm) 269 goto out; 270 271 diff = ib_umem_num_pages(umem); 272 273 /* 274 * We may be called with the mm's mmap_sem already held. This 275 * can happen when a userspace munmap() is the call that drops 276 * the last reference to our file and calls our release 277 * method. If there are memory regions to destroy, we'll end 278 * up here and not be able to take the mmap_sem. In that case 279 * we defer the vm_locked accounting to the system workqueue. 280 */ 281 if (context->closing) { 282 if (!down_write_trylock(&mm->mmap_sem)) { 283 INIT_WORK(&umem->work, ib_umem_account); 284 umem->mm = mm; 285 umem->diff = diff; 286 287 queue_work(ib_wq, &umem->work); 288 return; 289 } 290 } else 291 down_write(&mm->mmap_sem); 292 293 mm->pinned_vm -= diff; 294 up_write(&mm->mmap_sem); 295 mmput(mm); 296 out: 297 kfree(umem); 298 } 299 EXPORT_SYMBOL(ib_umem_release); 300 301 int ib_umem_page_count(struct ib_umem *umem) 302 { 303 int shift; 304 int i; 305 int n; 306 struct scatterlist *sg; 307 308 if (umem->odp_data) 309 return ib_umem_num_pages(umem); 310 311 shift = ilog2(umem->page_size); 312 313 n = 0; 314 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) 315 n += sg_dma_len(sg) >> shift; 316 317 return n; 318 } 319 EXPORT_SYMBOL(ib_umem_page_count); 320 321 /* 322 * Copy from the given ib_umem's pages to the given buffer. 323 * 324 * umem - the umem to copy from 325 * offset - offset to start copying from 326 * dst - destination buffer 327 * length - buffer length 328 * 329 * Returns 0 on success, or an error code. 330 */ 331 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, 332 size_t length) 333 { 334 size_t end = offset + length; 335 int ret; 336 337 if (offset > umem->length || length > umem->length - offset) { 338 pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n", 339 offset, umem->length, end); 340 return -EINVAL; 341 } 342 343 #ifdef __linux__ 344 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length, 345 offset + ib_umem_offset(umem)); 346 #else 347 ret = 0; 348 #endif 349 if (ret < 0) 350 return ret; 351 else if (ret != length) 352 return -EINVAL; 353 else 354 return 0; 355 } 356 EXPORT_SYMBOL(ib_umem_copy_from); 357