1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3 *
4 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <sys/cdefs.h>
36 #include <linux/types.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40
41 #include <rdma/ib_verbs.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_umem_odp.h>
44
ib_umem_notifier_start_account(struct ib_umem * item)45 static void ib_umem_notifier_start_account(struct ib_umem *item)
46 {
47 mutex_lock(&item->odp_data->umem_mutex);
48
49 /* Only update private counters for this umem if it has them.
50 * Otherwise skip it. All page faults will be delayed for this umem. */
51 if (item->odp_data->mn_counters_active) {
52 int notifiers_count = item->odp_data->notifiers_count++;
53
54 if (notifiers_count == 0)
55 /* Initialize the completion object for waiting on
56 * notifiers. Since notifier_count is zero, no one
57 * should be waiting right now. */
58 reinit_completion(&item->odp_data->notifier_completion);
59 }
60 mutex_unlock(&item->odp_data->umem_mutex);
61 }
62
ib_umem_notifier_end_account(struct ib_umem * item)63 static void ib_umem_notifier_end_account(struct ib_umem *item)
64 {
65 mutex_lock(&item->odp_data->umem_mutex);
66
67 /* Only update private counters for this umem if it has them.
68 * Otherwise skip it. All page faults will be delayed for this umem. */
69 if (item->odp_data->mn_counters_active) {
70 /*
71 * This sequence increase will notify the QP page fault that
72 * the page that is going to be mapped in the spte could have
73 * been freed.
74 */
75 ++item->odp_data->notifiers_seq;
76 if (--item->odp_data->notifiers_count == 0)
77 complete_all(&item->odp_data->notifier_completion);
78 }
79 mutex_unlock(&item->odp_data->umem_mutex);
80 }
81
82 /* Account for a new mmu notifier in an ib_ucontext. */
ib_ucontext_notifier_start_account(struct ib_ucontext * context)83 static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
84 {
85 atomic_inc(&context->notifier_count);
86 }
87
88 /* Account for a terminating mmu notifier in an ib_ucontext.
89 *
90 * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
91 * the function takes the semaphore itself. */
ib_ucontext_notifier_end_account(struct ib_ucontext * context)92 static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
93 {
94 int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
95
96 if (zero_notifiers &&
97 !list_empty(&context->no_private_counters)) {
98 /* No currently running mmu notifiers. Now is the chance to
99 * add private accounting to all previously added umems. */
100 struct ib_umem_odp *odp_data, *next;
101
102 /* Prevent concurrent mmu notifiers from working on the
103 * no_private_counters list. */
104 down_write(&context->umem_rwsem);
105
106 /* Read the notifier_count again, with the umem_rwsem
107 * semaphore taken for write. */
108 if (!atomic_read(&context->notifier_count)) {
109 list_for_each_entry_safe(odp_data, next,
110 &context->no_private_counters,
111 no_private_counters) {
112 mutex_lock(&odp_data->umem_mutex);
113 odp_data->mn_counters_active = true;
114 list_del(&odp_data->no_private_counters);
115 complete_all(&odp_data->notifier_completion);
116 mutex_unlock(&odp_data->umem_mutex);
117 }
118 }
119
120 up_write(&context->umem_rwsem);
121 }
122 }
123
ib_umem_notifier_release_trampoline(struct ib_umem * item,u64 start,u64 end,void * cookie)124 static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
125 u64 end, void *cookie) {
126 /*
127 * Increase the number of notifiers running, to
128 * prevent any further fault handling on this MR.
129 */
130 ib_umem_notifier_start_account(item);
131 item->odp_data->dying = 1;
132 /* Make sure that the fact the umem is dying is out before we release
133 * all pending page faults. */
134 smp_wmb();
135 complete_all(&item->odp_data->notifier_completion);
136 item->context->invalidate_range(item, ib_umem_start(item),
137 ib_umem_end(item));
138 return 0;
139 }
140
ib_umem_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)141 static void ib_umem_notifier_release(struct mmu_notifier *mn,
142 struct mm_struct *mm)
143 {
144 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
145
146 if (!context->invalidate_range)
147 return;
148
149 ib_ucontext_notifier_start_account(context);
150 down_read(&context->umem_rwsem);
151 rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
152 ULLONG_MAX,
153 ib_umem_notifier_release_trampoline,
154 NULL);
155 up_read(&context->umem_rwsem);
156 }
157
invalidate_page_trampoline(struct ib_umem * item,u64 start,u64 end,void * cookie)158 static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
159 u64 end, void *cookie)
160 {
161 ib_umem_notifier_start_account(item);
162 item->context->invalidate_range(item, start, start + PAGE_SIZE);
163 ib_umem_notifier_end_account(item);
164 return 0;
165 }
166
ib_umem_notifier_invalidate_page(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long address)167 static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
168 struct mm_struct *mm,
169 unsigned long address)
170 {
171 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
172
173 if (!context->invalidate_range)
174 return;
175
176 ib_ucontext_notifier_start_account(context);
177 down_read(&context->umem_rwsem);
178 rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
179 address + PAGE_SIZE,
180 invalidate_page_trampoline, NULL);
181 up_read(&context->umem_rwsem);
182 ib_ucontext_notifier_end_account(context);
183 }
184
invalidate_range_start_trampoline(struct ib_umem * item,u64 start,u64 end,void * cookie)185 static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
186 u64 end, void *cookie)
187 {
188 ib_umem_notifier_start_account(item);
189 item->context->invalidate_range(item, start, end);
190 return 0;
191 }
192
ib_umem_notifier_invalidate_range_start(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)193 static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
194 struct mm_struct *mm,
195 unsigned long start,
196 unsigned long end)
197 {
198 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
199
200 if (!context->invalidate_range)
201 return;
202
203 ib_ucontext_notifier_start_account(context);
204 down_read(&context->umem_rwsem);
205 rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
206 end,
207 invalidate_range_start_trampoline, NULL);
208 up_read(&context->umem_rwsem);
209 }
210
invalidate_range_end_trampoline(struct ib_umem * item,u64 start,u64 end,void * cookie)211 static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
212 u64 end, void *cookie)
213 {
214 ib_umem_notifier_end_account(item);
215 return 0;
216 }
217
ib_umem_notifier_invalidate_range_end(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)218 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
219 struct mm_struct *mm,
220 unsigned long start,
221 unsigned long end)
222 {
223 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
224
225 if (!context->invalidate_range)
226 return;
227
228 down_read(&context->umem_rwsem);
229 rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
230 end,
231 invalidate_range_end_trampoline, NULL);
232 up_read(&context->umem_rwsem);
233 ib_ucontext_notifier_end_account(context);
234 }
235
236 static const struct mmu_notifier_ops ib_umem_notifiers = {
237 .release = ib_umem_notifier_release,
238 .invalidate_page = ib_umem_notifier_invalidate_page,
239 .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
240 .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
241 };
242
ib_umem_odp_get(struct ib_ucontext * context,struct ib_umem * umem)243 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
244 {
245 int ret_val;
246 pid_t our_pid;
247 struct mm_struct *mm = get_task_mm(current);
248
249 if (!mm)
250 return -EINVAL;
251
252 /* Prevent creating ODP MRs in child processes */
253 rcu_read_lock();
254 our_pid = get_pid(task_pid_group_leader(current));
255 rcu_read_unlock();
256 put_pid(our_pid);
257 if (context->tgid != our_pid) {
258 ret_val = -EINVAL;
259 goto out_mm;
260 }
261
262 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
263 if (!umem->odp_data) {
264 ret_val = -ENOMEM;
265 goto out_mm;
266 }
267 umem->odp_data->umem = umem;
268
269 mutex_init(&umem->odp_data->umem_mutex);
270
271 init_completion(&umem->odp_data->notifier_completion);
272
273 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
274 sizeof(*umem->odp_data->page_list));
275 if (!umem->odp_data->page_list) {
276 ret_val = -ENOMEM;
277 goto out_odp_data;
278 }
279
280 umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
281 sizeof(*umem->odp_data->dma_list));
282 if (!umem->odp_data->dma_list) {
283 ret_val = -ENOMEM;
284 goto out_page_list;
285 }
286
287 /*
288 * When using MMU notifiers, we will get a
289 * notification before the "current" task (and MM) is
290 * destroyed. We use the umem_rwsem semaphore to synchronize.
291 */
292 down_write(&context->umem_rwsem);
293 context->odp_mrs_count++;
294 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
295 rbt_ib_umem_insert(&umem->odp_data->interval_tree,
296 &context->umem_tree);
297 if (likely(!atomic_read(&context->notifier_count)) ||
298 context->odp_mrs_count == 1)
299 umem->odp_data->mn_counters_active = true;
300 else
301 list_add(&umem->odp_data->no_private_counters,
302 &context->no_private_counters);
303 downgrade_write(&context->umem_rwsem);
304
305 if (context->odp_mrs_count == 1) {
306 /*
307 * Note that at this point, no MMU notifier is running
308 * for this context!
309 */
310 atomic_set(&context->notifier_count, 0);
311 INIT_HLIST_NODE(&context->mn.hlist);
312 context->mn.ops = &ib_umem_notifiers;
313 /*
314 * Lock-dep detects a false positive for mmap_sem vs.
315 * umem_rwsem, due to not grasping downgrade_write correctly.
316 */
317 ret_val = mmu_notifier_register(&context->mn, mm);
318 if (ret_val) {
319 pr_err("Failed to register mmu_notifier %d\n", ret_val);
320 ret_val = -EBUSY;
321 goto out_mutex;
322 }
323 }
324
325 up_read(&context->umem_rwsem);
326
327 /*
328 * Note that doing an mmput can cause a notifier for the relevant mm.
329 * If the notifier is called while we hold the umem_rwsem, this will
330 * cause a deadlock. Therefore, we release the reference only after we
331 * released the semaphore.
332 */
333 mmput(mm);
334 return 0;
335
336 out_mutex:
337 up_read(&context->umem_rwsem);
338 vfree(umem->odp_data->dma_list);
339 out_page_list:
340 vfree(umem->odp_data->page_list);
341 out_odp_data:
342 kfree(umem->odp_data);
343 out_mm:
344 mmput(mm);
345 return ret_val;
346 }
347
ib_umem_odp_release(struct ib_umem * umem)348 void ib_umem_odp_release(struct ib_umem *umem)
349 {
350 struct ib_ucontext *context = umem->context;
351
352 /*
353 * Ensure that no more pages are mapped in the umem.
354 *
355 * It is the driver's responsibility to ensure, before calling us,
356 * that the hardware will not attempt to access the MR any more.
357 */
358 ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
359 ib_umem_end(umem));
360
361 down_write(&context->umem_rwsem);
362 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
363 rbt_ib_umem_remove(&umem->odp_data->interval_tree,
364 &context->umem_tree);
365 context->odp_mrs_count--;
366 if (!umem->odp_data->mn_counters_active) {
367 list_del(&umem->odp_data->no_private_counters);
368 complete_all(&umem->odp_data->notifier_completion);
369 }
370
371 /*
372 * Downgrade the lock to a read lock. This ensures that the notifiers
373 * (who lock the mutex for reading) will be able to finish, and we
374 * will be able to enventually obtain the mmu notifiers SRCU. Note
375 * that since we are doing it atomically, no other user could register
376 * and unregister while we do the check.
377 */
378 downgrade_write(&context->umem_rwsem);
379 if (!context->odp_mrs_count) {
380 struct task_struct *owning_process = NULL;
381 struct mm_struct *owning_mm = NULL;
382
383 owning_process = get_pid_task(context->tgid,
384 PIDTYPE_PID);
385 if (owning_process == NULL)
386 /*
387 * The process is already dead, notifier were removed
388 * already.
389 */
390 goto out;
391
392 owning_mm = get_task_mm(owning_process);
393 if (owning_mm == NULL)
394 /*
395 * The process' mm is already dead, notifier were
396 * removed already.
397 */
398 goto out_put_task;
399 mmu_notifier_unregister(&context->mn, owning_mm);
400
401 mmput(owning_mm);
402
403 out_put_task:
404 put_task_struct(owning_process);
405 }
406 out:
407 up_read(&context->umem_rwsem);
408
409 vfree(umem->odp_data->dma_list);
410 vfree(umem->odp_data->page_list);
411 kfree(umem->odp_data);
412 kfree(umem);
413 }
414
415 /*
416 * Map for DMA and insert a single page into the on-demand paging page tables.
417 *
418 * @umem: the umem to insert the page to.
419 * @page_index: index in the umem to add the page to.
420 * @page: the page struct to map and add.
421 * @access_mask: access permissions needed for this page.
422 * @current_seq: sequence number for synchronization with invalidations.
423 * the sequence number is taken from
424 * umem->odp_data->notifiers_seq.
425 *
426 * The function returns -EFAULT if the DMA mapping operation fails. It returns
427 * -EAGAIN if a concurrent invalidation prevents us from updating the page.
428 *
429 * The page is released via put_page even if the operation failed. For
430 * on-demand pinning, the page is released whenever it isn't stored in the
431 * umem.
432 */
ib_umem_odp_map_dma_single_page(struct ib_umem * umem,int page_index,u64 base_virt_addr,struct page * page,u64 access_mask,unsigned long current_seq)433 static int ib_umem_odp_map_dma_single_page(
434 struct ib_umem *umem,
435 int page_index,
436 u64 base_virt_addr,
437 struct page *page,
438 u64 access_mask,
439 unsigned long current_seq)
440 {
441 struct ib_device *dev = umem->context->device;
442 dma_addr_t dma_addr;
443 int stored_page = 0;
444 int remove_existing_mapping = 0;
445 int ret = 0;
446
447 /*
448 * Note: we avoid writing if seq is different from the initial seq, to
449 * handle case of a racing notifier. This check also allows us to bail
450 * early if we have a notifier running in parallel with us.
451 */
452 if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
453 ret = -EAGAIN;
454 goto out;
455 }
456 if (!(umem->odp_data->dma_list[page_index])) {
457 dma_addr = ib_dma_map_page(dev,
458 page,
459 0, PAGE_SIZE,
460 DMA_BIDIRECTIONAL);
461 if (ib_dma_mapping_error(dev, dma_addr)) {
462 ret = -EFAULT;
463 goto out;
464 }
465 umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
466 umem->odp_data->page_list[page_index] = page;
467 stored_page = 1;
468 } else if (umem->odp_data->page_list[page_index] == page) {
469 umem->odp_data->dma_list[page_index] |= access_mask;
470 } else {
471 pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
472 umem->odp_data->page_list[page_index], page);
473 /* Better remove the mapping now, to prevent any further
474 * damage. */
475 remove_existing_mapping = 1;
476 }
477
478 out:
479 /* On Demand Paging - avoid pinning the page */
480 if (umem->context->invalidate_range || !stored_page)
481 put_page(page);
482
483 if (remove_existing_mapping && umem->context->invalidate_range) {
484 invalidate_page_trampoline(
485 umem,
486 base_virt_addr + (page_index * PAGE_SIZE),
487 base_virt_addr + ((page_index+1)*PAGE_SIZE),
488 NULL);
489 ret = -EAGAIN;
490 }
491
492 return ret;
493 }
494
495 /**
496 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
497 *
498 * Pins the range of pages passed in the argument, and maps them to
499 * DMA addresses. The DMA addresses of the mapped pages is updated in
500 * umem->odp_data->dma_list.
501 *
502 * Returns the number of pages mapped in success, negative error code
503 * for failure.
504 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
505 * the function from completing its task.
506 *
507 * @umem: the umem to map and pin
508 * @user_virt: the address from which we need to map.
509 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
510 * bigger due to alignment, and may also be smaller in case of an error
511 * pinning or mapping a page. The actual pages mapped is returned in
512 * the return value.
513 * @access_mask: bit mask of the requested access permissions for the given
514 * range.
515 * @current_seq: the MMU notifiers sequance value for synchronization with
516 * invalidations. the sequance number is read from
517 * umem->odp_data->notifiers_seq before calling this function
518 */
ib_umem_odp_map_dma_pages(struct ib_umem * umem,u64 user_virt,u64 bcnt,u64 access_mask,unsigned long current_seq)519 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
520 u64 access_mask, unsigned long current_seq)
521 {
522 struct task_struct *owning_process = NULL;
523 struct mm_struct *owning_mm = NULL;
524 struct page **local_page_list = NULL;
525 u64 off;
526 int j, k, ret = 0, start_idx, npages = 0;
527 u64 base_virt_addr;
528 unsigned int flags = 0;
529
530 if (access_mask == 0)
531 return -EINVAL;
532
533 if (user_virt < ib_umem_start(umem) ||
534 user_virt + bcnt > ib_umem_end(umem))
535 return -EFAULT;
536
537 local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
538 if (!local_page_list)
539 return -ENOMEM;
540
541 off = user_virt & (~PAGE_MASK);
542 user_virt = user_virt & PAGE_MASK;
543 base_virt_addr = user_virt;
544 bcnt += off; /* Charge for the first page offset as well. */
545
546 owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
547 if (owning_process == NULL) {
548 ret = -EINVAL;
549 goto out_no_task;
550 }
551
552 owning_mm = get_task_mm(owning_process);
553 if (owning_mm == NULL) {
554 ret = -EINVAL;
555 goto out_put_task;
556 }
557
558 if (access_mask & ODP_WRITE_ALLOWED_BIT)
559 flags |= FOLL_WRITE;
560
561 start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
562 k = start_idx;
563
564 while (bcnt > 0) {
565 const size_t gup_num_pages =
566 min_t(size_t, ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
567 PAGE_SIZE / sizeof(struct page *));
568
569 down_read(&owning_mm->mmap_sem);
570 /*
571 * Note: this might result in redundent page getting. We can
572 * avoid this by checking dma_list to be 0 before calling
573 * get_user_pages. However, this make the code much more
574 * complex (and doesn't gain us much performance in most use
575 * cases).
576 */
577 npages = get_user_pages_remote(owning_process, owning_mm,
578 user_virt, gup_num_pages,
579 flags, local_page_list, NULL);
580 up_read(&owning_mm->mmap_sem);
581
582 if (npages < 0)
583 break;
584
585 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
586 user_virt += npages << PAGE_SHIFT;
587 mutex_lock(&umem->odp_data->umem_mutex);
588 for (j = 0; j < npages; ++j) {
589 ret = ib_umem_odp_map_dma_single_page(
590 umem, k, base_virt_addr, local_page_list[j],
591 access_mask, current_seq);
592 if (ret < 0)
593 break;
594 k++;
595 }
596 mutex_unlock(&umem->odp_data->umem_mutex);
597
598 if (ret < 0) {
599 /* Release left over pages when handling errors. */
600 for (++j; j < npages; ++j)
601 put_page(local_page_list[j]);
602 break;
603 }
604 }
605
606 if (ret >= 0) {
607 if (npages < 0 && k == start_idx)
608 ret = npages;
609 else
610 ret = k - start_idx;
611 }
612
613 mmput(owning_mm);
614 out_put_task:
615 put_task_struct(owning_process);
616 out_no_task:
617 free_page((unsigned long)local_page_list);
618 return ret;
619 }
620 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
621
ib_umem_odp_unmap_dma_pages(struct ib_umem * umem,u64 virt,u64 bound)622 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
623 u64 bound)
624 {
625 int idx;
626 u64 addr;
627 struct ib_device *dev = umem->context->device;
628
629 virt = max_t(u64, virt, ib_umem_start(umem));
630 bound = min_t(u64, bound, ib_umem_end(umem));
631 /* Note that during the run of this function, the
632 * notifiers_count of the MR is > 0, preventing any racing
633 * faults from completion. We might be racing with other
634 * invalidations, so we must make sure we free each page only
635 * once. */
636 mutex_lock(&umem->odp_data->umem_mutex);
637 for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
638 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
639 if (umem->odp_data->page_list[idx]) {
640 struct page *page = umem->odp_data->page_list[idx];
641 dma_addr_t dma = umem->odp_data->dma_list[idx];
642 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
643
644 WARN_ON(!dma_addr);
645
646 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
647 DMA_BIDIRECTIONAL);
648 if (dma & ODP_WRITE_ALLOWED_BIT) {
649 struct page *head_page = compound_head(page);
650 /*
651 * set_page_dirty prefers being called with
652 * the page lock. However, MMU notifiers are
653 * called sometimes with and sometimes without
654 * the lock. We rely on the umem_mutex instead
655 * to prevent other mmu notifiers from
656 * continuing and allowing the page mapping to
657 * be removed.
658 */
659 set_page_dirty(head_page);
660 }
661 /* on demand pinning support */
662 if (!umem->context->invalidate_range)
663 put_page(page);
664 umem->odp_data->page_list[idx] = NULL;
665 umem->odp_data->dma_list[idx] = 0;
666 }
667 }
668 mutex_unlock(&umem->odp_data->umem_mutex);
669 }
670 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
671