1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2014 Mellanox Technologies. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * $FreeBSD$ 35 */ 36 37 #include <linux/types.h> 38 #include <linux/sched.h> 39 #include <linux/slab.h> 40 #include <linux/vmalloc.h> 41 42 #include <rdma/ib_verbs.h> 43 #include <rdma/ib_umem.h> 44 #include <rdma/ib_umem_odp.h> 45 46 static void ib_umem_notifier_start_account(struct ib_umem *item) 47 { 48 mutex_lock(&item->odp_data->umem_mutex); 49 50 /* Only update private counters for this umem if it has them. 51 * Otherwise skip it. All page faults will be delayed for this umem. */ 52 if (item->odp_data->mn_counters_active) { 53 int notifiers_count = item->odp_data->notifiers_count++; 54 55 if (notifiers_count == 0) 56 /* Initialize the completion object for waiting on 57 * notifiers. Since notifier_count is zero, no one 58 * should be waiting right now. */ 59 reinit_completion(&item->odp_data->notifier_completion); 60 } 61 mutex_unlock(&item->odp_data->umem_mutex); 62 } 63 64 static void ib_umem_notifier_end_account(struct ib_umem *item) 65 { 66 mutex_lock(&item->odp_data->umem_mutex); 67 68 /* Only update private counters for this umem if it has them. 69 * Otherwise skip it. All page faults will be delayed for this umem. */ 70 if (item->odp_data->mn_counters_active) { 71 /* 72 * This sequence increase will notify the QP page fault that 73 * the page that is going to be mapped in the spte could have 74 * been freed. 75 */ 76 ++item->odp_data->notifiers_seq; 77 if (--item->odp_data->notifiers_count == 0) 78 complete_all(&item->odp_data->notifier_completion); 79 } 80 mutex_unlock(&item->odp_data->umem_mutex); 81 } 82 83 /* Account for a new mmu notifier in an ib_ucontext. */ 84 static void ib_ucontext_notifier_start_account(struct ib_ucontext *context) 85 { 86 atomic_inc(&context->notifier_count); 87 } 88 89 /* Account for a terminating mmu notifier in an ib_ucontext. 90 * 91 * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since 92 * the function takes the semaphore itself. */ 93 static void ib_ucontext_notifier_end_account(struct ib_ucontext *context) 94 { 95 int zero_notifiers = atomic_dec_and_test(&context->notifier_count); 96 97 if (zero_notifiers && 98 !list_empty(&context->no_private_counters)) { 99 /* No currently running mmu notifiers. Now is the chance to 100 * add private accounting to all previously added umems. */ 101 struct ib_umem_odp *odp_data, *next; 102 103 /* Prevent concurrent mmu notifiers from working on the 104 * no_private_counters list. */ 105 down_write(&context->umem_rwsem); 106 107 /* Read the notifier_count again, with the umem_rwsem 108 * semaphore taken for write. */ 109 if (!atomic_read(&context->notifier_count)) { 110 list_for_each_entry_safe(odp_data, next, 111 &context->no_private_counters, 112 no_private_counters) { 113 mutex_lock(&odp_data->umem_mutex); 114 odp_data->mn_counters_active = true; 115 list_del(&odp_data->no_private_counters); 116 complete_all(&odp_data->notifier_completion); 117 mutex_unlock(&odp_data->umem_mutex); 118 } 119 } 120 121 up_write(&context->umem_rwsem); 122 } 123 } 124 125 static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start, 126 u64 end, void *cookie) { 127 /* 128 * Increase the number of notifiers running, to 129 * prevent any further fault handling on this MR. 130 */ 131 ib_umem_notifier_start_account(item); 132 item->odp_data->dying = 1; 133 /* Make sure that the fact the umem is dying is out before we release 134 * all pending page faults. */ 135 smp_wmb(); 136 complete_all(&item->odp_data->notifier_completion); 137 item->context->invalidate_range(item, ib_umem_start(item), 138 ib_umem_end(item)); 139 return 0; 140 } 141 142 static void ib_umem_notifier_release(struct mmu_notifier *mn, 143 struct mm_struct *mm) 144 { 145 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); 146 147 if (!context->invalidate_range) 148 return; 149 150 ib_ucontext_notifier_start_account(context); 151 down_read(&context->umem_rwsem); 152 rbt_ib_umem_for_each_in_range(&context->umem_tree, 0, 153 ULLONG_MAX, 154 ib_umem_notifier_release_trampoline, 155 NULL); 156 up_read(&context->umem_rwsem); 157 } 158 159 static int invalidate_page_trampoline(struct ib_umem *item, u64 start, 160 u64 end, void *cookie) 161 { 162 ib_umem_notifier_start_account(item); 163 item->context->invalidate_range(item, start, start + PAGE_SIZE); 164 ib_umem_notifier_end_account(item); 165 return 0; 166 } 167 168 static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn, 169 struct mm_struct *mm, 170 unsigned long address) 171 { 172 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); 173 174 if (!context->invalidate_range) 175 return; 176 177 ib_ucontext_notifier_start_account(context); 178 down_read(&context->umem_rwsem); 179 rbt_ib_umem_for_each_in_range(&context->umem_tree, address, 180 address + PAGE_SIZE, 181 invalidate_page_trampoline, NULL); 182 up_read(&context->umem_rwsem); 183 ib_ucontext_notifier_end_account(context); 184 } 185 186 static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start, 187 u64 end, void *cookie) 188 { 189 ib_umem_notifier_start_account(item); 190 item->context->invalidate_range(item, start, end); 191 return 0; 192 } 193 194 static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn, 195 struct mm_struct *mm, 196 unsigned long start, 197 unsigned long end) 198 { 199 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); 200 201 if (!context->invalidate_range) 202 return; 203 204 ib_ucontext_notifier_start_account(context); 205 down_read(&context->umem_rwsem); 206 rbt_ib_umem_for_each_in_range(&context->umem_tree, start, 207 end, 208 invalidate_range_start_trampoline, NULL); 209 up_read(&context->umem_rwsem); 210 } 211 212 static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start, 213 u64 end, void *cookie) 214 { 215 ib_umem_notifier_end_account(item); 216 return 0; 217 } 218 219 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, 220 struct mm_struct *mm, 221 unsigned long start, 222 unsigned long end) 223 { 224 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); 225 226 if (!context->invalidate_range) 227 return; 228 229 down_read(&context->umem_rwsem); 230 rbt_ib_umem_for_each_in_range(&context->umem_tree, start, 231 end, 232 invalidate_range_end_trampoline, NULL); 233 up_read(&context->umem_rwsem); 234 ib_ucontext_notifier_end_account(context); 235 } 236 237 static const struct mmu_notifier_ops ib_umem_notifiers = { 238 .release = ib_umem_notifier_release, 239 .invalidate_page = ib_umem_notifier_invalidate_page, 240 .invalidate_range_start = ib_umem_notifier_invalidate_range_start, 241 .invalidate_range_end = ib_umem_notifier_invalidate_range_end, 242 }; 243 244 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) 245 { 246 int ret_val; 247 pid_t our_pid; 248 struct mm_struct *mm = get_task_mm(current); 249 250 if (!mm) 251 return -EINVAL; 252 253 /* Prevent creating ODP MRs in child processes */ 254 rcu_read_lock(); 255 our_pid = get_pid(task_pid_group_leader(current)); 256 rcu_read_unlock(); 257 put_pid(our_pid); 258 if (context->tgid != our_pid) { 259 ret_val = -EINVAL; 260 goto out_mm; 261 } 262 263 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL); 264 if (!umem->odp_data) { 265 ret_val = -ENOMEM; 266 goto out_mm; 267 } 268 umem->odp_data->umem = umem; 269 270 mutex_init(&umem->odp_data->umem_mutex); 271 272 init_completion(&umem->odp_data->notifier_completion); 273 274 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) * 275 sizeof(*umem->odp_data->page_list)); 276 if (!umem->odp_data->page_list) { 277 ret_val = -ENOMEM; 278 goto out_odp_data; 279 } 280 281 umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) * 282 sizeof(*umem->odp_data->dma_list)); 283 if (!umem->odp_data->dma_list) { 284 ret_val = -ENOMEM; 285 goto out_page_list; 286 } 287 288 /* 289 * When using MMU notifiers, we will get a 290 * notification before the "current" task (and MM) is 291 * destroyed. We use the umem_rwsem semaphore to synchronize. 292 */ 293 down_write(&context->umem_rwsem); 294 context->odp_mrs_count++; 295 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) 296 rbt_ib_umem_insert(&umem->odp_data->interval_tree, 297 &context->umem_tree); 298 if (likely(!atomic_read(&context->notifier_count)) || 299 context->odp_mrs_count == 1) 300 umem->odp_data->mn_counters_active = true; 301 else 302 list_add(&umem->odp_data->no_private_counters, 303 &context->no_private_counters); 304 downgrade_write(&context->umem_rwsem); 305 306 if (context->odp_mrs_count == 1) { 307 /* 308 * Note that at this point, no MMU notifier is running 309 * for this context! 310 */ 311 atomic_set(&context->notifier_count, 0); 312 INIT_HLIST_NODE(&context->mn.hlist); 313 context->mn.ops = &ib_umem_notifiers; 314 /* 315 * Lock-dep detects a false positive for mmap_sem vs. 316 * umem_rwsem, due to not grasping downgrade_write correctly. 317 */ 318 ret_val = mmu_notifier_register(&context->mn, mm); 319 if (ret_val) { 320 pr_err("Failed to register mmu_notifier %d\n", ret_val); 321 ret_val = -EBUSY; 322 goto out_mutex; 323 } 324 } 325 326 up_read(&context->umem_rwsem); 327 328 /* 329 * Note that doing an mmput can cause a notifier for the relevant mm. 330 * If the notifier is called while we hold the umem_rwsem, this will 331 * cause a deadlock. Therefore, we release the reference only after we 332 * released the semaphore. 333 */ 334 mmput(mm); 335 return 0; 336 337 out_mutex: 338 up_read(&context->umem_rwsem); 339 vfree(umem->odp_data->dma_list); 340 out_page_list: 341 vfree(umem->odp_data->page_list); 342 out_odp_data: 343 kfree(umem->odp_data); 344 out_mm: 345 mmput(mm); 346 return ret_val; 347 } 348 349 void ib_umem_odp_release(struct ib_umem *umem) 350 { 351 struct ib_ucontext *context = umem->context; 352 353 /* 354 * Ensure that no more pages are mapped in the umem. 355 * 356 * It is the driver's responsibility to ensure, before calling us, 357 * that the hardware will not attempt to access the MR any more. 358 */ 359 ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem), 360 ib_umem_end(umem)); 361 362 down_write(&context->umem_rwsem); 363 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) 364 rbt_ib_umem_remove(&umem->odp_data->interval_tree, 365 &context->umem_tree); 366 context->odp_mrs_count--; 367 if (!umem->odp_data->mn_counters_active) { 368 list_del(&umem->odp_data->no_private_counters); 369 complete_all(&umem->odp_data->notifier_completion); 370 } 371 372 /* 373 * Downgrade the lock to a read lock. This ensures that the notifiers 374 * (who lock the mutex for reading) will be able to finish, and we 375 * will be able to enventually obtain the mmu notifiers SRCU. Note 376 * that since we are doing it atomically, no other user could register 377 * and unregister while we do the check. 378 */ 379 downgrade_write(&context->umem_rwsem); 380 if (!context->odp_mrs_count) { 381 struct task_struct *owning_process = NULL; 382 struct mm_struct *owning_mm = NULL; 383 384 owning_process = get_pid_task(context->tgid, 385 PIDTYPE_PID); 386 if (owning_process == NULL) 387 /* 388 * The process is already dead, notifier were removed 389 * already. 390 */ 391 goto out; 392 393 owning_mm = get_task_mm(owning_process); 394 if (owning_mm == NULL) 395 /* 396 * The process' mm is already dead, notifier were 397 * removed already. 398 */ 399 goto out_put_task; 400 mmu_notifier_unregister(&context->mn, owning_mm); 401 402 mmput(owning_mm); 403 404 out_put_task: 405 put_task_struct(owning_process); 406 } 407 out: 408 up_read(&context->umem_rwsem); 409 410 vfree(umem->odp_data->dma_list); 411 vfree(umem->odp_data->page_list); 412 kfree(umem->odp_data); 413 kfree(umem); 414 } 415 416 /* 417 * Map for DMA and insert a single page into the on-demand paging page tables. 418 * 419 * @umem: the umem to insert the page to. 420 * @page_index: index in the umem to add the page to. 421 * @page: the page struct to map and add. 422 * @access_mask: access permissions needed for this page. 423 * @current_seq: sequence number for synchronization with invalidations. 424 * the sequence number is taken from 425 * umem->odp_data->notifiers_seq. 426 * 427 * The function returns -EFAULT if the DMA mapping operation fails. It returns 428 * -EAGAIN if a concurrent invalidation prevents us from updating the page. 429 * 430 * The page is released via put_page even if the operation failed. For 431 * on-demand pinning, the page is released whenever it isn't stored in the 432 * umem. 433 */ 434 static int ib_umem_odp_map_dma_single_page( 435 struct ib_umem *umem, 436 int page_index, 437 u64 base_virt_addr, 438 struct page *page, 439 u64 access_mask, 440 unsigned long current_seq) 441 { 442 struct ib_device *dev = umem->context->device; 443 dma_addr_t dma_addr; 444 int stored_page = 0; 445 int remove_existing_mapping = 0; 446 int ret = 0; 447 448 /* 449 * Note: we avoid writing if seq is different from the initial seq, to 450 * handle case of a racing notifier. This check also allows us to bail 451 * early if we have a notifier running in parallel with us. 452 */ 453 if (ib_umem_mmu_notifier_retry(umem, current_seq)) { 454 ret = -EAGAIN; 455 goto out; 456 } 457 if (!(umem->odp_data->dma_list[page_index])) { 458 dma_addr = ib_dma_map_page(dev, 459 page, 460 0, PAGE_SIZE, 461 DMA_BIDIRECTIONAL); 462 if (ib_dma_mapping_error(dev, dma_addr)) { 463 ret = -EFAULT; 464 goto out; 465 } 466 umem->odp_data->dma_list[page_index] = dma_addr | access_mask; 467 umem->odp_data->page_list[page_index] = page; 468 stored_page = 1; 469 } else if (umem->odp_data->page_list[page_index] == page) { 470 umem->odp_data->dma_list[page_index] |= access_mask; 471 } else { 472 pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n", 473 umem->odp_data->page_list[page_index], page); 474 /* Better remove the mapping now, to prevent any further 475 * damage. */ 476 remove_existing_mapping = 1; 477 } 478 479 out: 480 /* On Demand Paging - avoid pinning the page */ 481 if (umem->context->invalidate_range || !stored_page) 482 put_page(page); 483 484 if (remove_existing_mapping && umem->context->invalidate_range) { 485 invalidate_page_trampoline( 486 umem, 487 base_virt_addr + (page_index * PAGE_SIZE), 488 base_virt_addr + ((page_index+1)*PAGE_SIZE), 489 NULL); 490 ret = -EAGAIN; 491 } 492 493 return ret; 494 } 495 496 /** 497 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR. 498 * 499 * Pins the range of pages passed in the argument, and maps them to 500 * DMA addresses. The DMA addresses of the mapped pages is updated in 501 * umem->odp_data->dma_list. 502 * 503 * Returns the number of pages mapped in success, negative error code 504 * for failure. 505 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents 506 * the function from completing its task. 507 * 508 * @umem: the umem to map and pin 509 * @user_virt: the address from which we need to map. 510 * @bcnt: the minimal number of bytes to pin and map. The mapping might be 511 * bigger due to alignment, and may also be smaller in case of an error 512 * pinning or mapping a page. The actual pages mapped is returned in 513 * the return value. 514 * @access_mask: bit mask of the requested access permissions for the given 515 * range. 516 * @current_seq: the MMU notifiers sequance value for synchronization with 517 * invalidations. the sequance number is read from 518 * umem->odp_data->notifiers_seq before calling this function 519 */ 520 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, 521 u64 access_mask, unsigned long current_seq) 522 { 523 struct task_struct *owning_process = NULL; 524 struct mm_struct *owning_mm = NULL; 525 struct page **local_page_list = NULL; 526 u64 off; 527 int j, k, ret = 0, start_idx, npages = 0; 528 u64 base_virt_addr; 529 unsigned int flags = 0; 530 531 if (access_mask == 0) 532 return -EINVAL; 533 534 if (user_virt < ib_umem_start(umem) || 535 user_virt + bcnt > ib_umem_end(umem)) 536 return -EFAULT; 537 538 local_page_list = (struct page **)__get_free_page(GFP_KERNEL); 539 if (!local_page_list) 540 return -ENOMEM; 541 542 off = user_virt & (~PAGE_MASK); 543 user_virt = user_virt & PAGE_MASK; 544 base_virt_addr = user_virt; 545 bcnt += off; /* Charge for the first page offset as well. */ 546 547 owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID); 548 if (owning_process == NULL) { 549 ret = -EINVAL; 550 goto out_no_task; 551 } 552 553 owning_mm = get_task_mm(owning_process); 554 if (owning_mm == NULL) { 555 ret = -EINVAL; 556 goto out_put_task; 557 } 558 559 if (access_mask & ODP_WRITE_ALLOWED_BIT) 560 flags |= FOLL_WRITE; 561 562 start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT; 563 k = start_idx; 564 565 while (bcnt > 0) { 566 const size_t gup_num_pages = 567 min_t(size_t, ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE, 568 PAGE_SIZE / sizeof(struct page *)); 569 570 down_read(&owning_mm->mmap_sem); 571 /* 572 * Note: this might result in redundent page getting. We can 573 * avoid this by checking dma_list to be 0 before calling 574 * get_user_pages. However, this make the code much more 575 * complex (and doesn't gain us much performance in most use 576 * cases). 577 */ 578 npages = get_user_pages_remote(owning_process, owning_mm, 579 user_virt, gup_num_pages, 580 flags, local_page_list, NULL); 581 up_read(&owning_mm->mmap_sem); 582 583 if (npages < 0) 584 break; 585 586 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); 587 user_virt += npages << PAGE_SHIFT; 588 mutex_lock(&umem->odp_data->umem_mutex); 589 for (j = 0; j < npages; ++j) { 590 ret = ib_umem_odp_map_dma_single_page( 591 umem, k, base_virt_addr, local_page_list[j], 592 access_mask, current_seq); 593 if (ret < 0) 594 break; 595 k++; 596 } 597 mutex_unlock(&umem->odp_data->umem_mutex); 598 599 if (ret < 0) { 600 /* Release left over pages when handling errors. */ 601 for (++j; j < npages; ++j) 602 put_page(local_page_list[j]); 603 break; 604 } 605 } 606 607 if (ret >= 0) { 608 if (npages < 0 && k == start_idx) 609 ret = npages; 610 else 611 ret = k - start_idx; 612 } 613 614 mmput(owning_mm); 615 out_put_task: 616 put_task_struct(owning_process); 617 out_no_task: 618 free_page((unsigned long)local_page_list); 619 return ret; 620 } 621 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages); 622 623 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, 624 u64 bound) 625 { 626 int idx; 627 u64 addr; 628 struct ib_device *dev = umem->context->device; 629 630 virt = max_t(u64, virt, ib_umem_start(umem)); 631 bound = min_t(u64, bound, ib_umem_end(umem)); 632 /* Note that during the run of this function, the 633 * notifiers_count of the MR is > 0, preventing any racing 634 * faults from completion. We might be racing with other 635 * invalidations, so we must make sure we free each page only 636 * once. */ 637 mutex_lock(&umem->odp_data->umem_mutex); 638 for (addr = virt; addr < bound; addr += (u64)umem->page_size) { 639 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; 640 if (umem->odp_data->page_list[idx]) { 641 struct page *page = umem->odp_data->page_list[idx]; 642 dma_addr_t dma = umem->odp_data->dma_list[idx]; 643 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; 644 645 WARN_ON(!dma_addr); 646 647 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE, 648 DMA_BIDIRECTIONAL); 649 if (dma & ODP_WRITE_ALLOWED_BIT) { 650 struct page *head_page = compound_head(page); 651 /* 652 * set_page_dirty prefers being called with 653 * the page lock. However, MMU notifiers are 654 * called sometimes with and sometimes without 655 * the lock. We rely on the umem_mutex instead 656 * to prevent other mmu notifiers from 657 * continuing and allowing the page mapping to 658 * be removed. 659 */ 660 set_page_dirty(head_page); 661 } 662 /* on demand pinning support */ 663 if (!umem->context->invalidate_range) 664 put_page(page); 665 umem->odp_data->page_list[idx] = NULL; 666 umem->odp_data->dma_list[idx] = 0; 667 } 668 } 669 mutex_unlock(&umem->odp_data->umem_mutex); 670 } 671 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); 672