1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2 /*
3  * Copyright(c) 2020 Cornelis Networks, Inc.
4  * Copyright(c) 2015-2018 Intel Corporation.
5  */
6 #include <asm/page.h>
7 #include <linux/string.h>
8 
9 #include "mmu_rb.h"
10 #include "user_exp_rcv.h"
11 #include "trace.h"
12 
13 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
14 			    struct exp_tid_set *set,
15 			    struct hfi1_filedata *fd);
16 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
17 static int set_rcvarray_entry(struct hfi1_filedata *fd,
18 			      struct tid_user_buf *tbuf,
19 			      u32 rcventry, struct tid_group *grp,
20 			      u16 pageidx, unsigned int npages);
21 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
22 				    struct tid_rb_node *tnode);
23 static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
24 			      const struct mmu_notifier_range *range,
25 			      unsigned long cur_seq);
26 static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
27 			         const struct mmu_notifier_range *range,
28 			         unsigned long cur_seq);
29 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
30 			    struct tid_group *grp,
31 			    unsigned int start, u16 count,
32 			    u32 *tidlist, unsigned int *tididx,
33 			    unsigned int *pmapped);
34 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo);
35 static void __clear_tid_node(struct hfi1_filedata *fd,
36 			     struct tid_rb_node *node);
37 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
38 
39 static const struct mmu_interval_notifier_ops tid_mn_ops = {
40 	.invalidate = tid_rb_invalidate,
41 };
42 static const struct mmu_interval_notifier_ops tid_cover_ops = {
43 	.invalidate = tid_cover_invalidate,
44 };
45 
46 /*
47  * Initialize context and file private data needed for Expected
48  * receive caching. This needs to be done after the context has
49  * been configured with the eager/expected RcvEntry counts.
50  */
51 int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
52 			   struct hfi1_ctxtdata *uctxt)
53 {
54 	int ret = 0;
55 
56 	fd->entry_to_rb = kcalloc(uctxt->expected_count,
57 				  sizeof(struct rb_node *),
58 				  GFP_KERNEL);
59 	if (!fd->entry_to_rb)
60 		return -ENOMEM;
61 
62 	if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
63 		fd->invalid_tid_idx = 0;
64 		fd->invalid_tids = kcalloc(uctxt->expected_count,
65 					   sizeof(*fd->invalid_tids),
66 					   GFP_KERNEL);
67 		if (!fd->invalid_tids) {
68 			kfree(fd->entry_to_rb);
69 			fd->entry_to_rb = NULL;
70 			return -ENOMEM;
71 		}
72 		fd->use_mn = true;
73 	}
74 
75 	/*
76 	 * PSM does not have a good way to separate, count, and
77 	 * effectively enforce a limit on RcvArray entries used by
78 	 * subctxts (when context sharing is used) when TID caching
79 	 * is enabled. To help with that, we calculate a per-process
80 	 * RcvArray entry share and enforce that.
81 	 * If TID caching is not in use, PSM deals with usage on its
82 	 * own. In that case, we allow any subctxt to take all of the
83 	 * entries.
84 	 *
85 	 * Make sure that we set the tid counts only after successful
86 	 * init.
87 	 */
88 	spin_lock(&fd->tid_lock);
89 	if (uctxt->subctxt_cnt && fd->use_mn) {
90 		u16 remainder;
91 
92 		fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
93 		remainder = uctxt->expected_count % uctxt->subctxt_cnt;
94 		if (remainder && fd->subctxt < remainder)
95 			fd->tid_limit++;
96 	} else {
97 		fd->tid_limit = uctxt->expected_count;
98 	}
99 	spin_unlock(&fd->tid_lock);
100 
101 	return ret;
102 }
103 
104 void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
105 {
106 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
107 
108 	mutex_lock(&uctxt->exp_mutex);
109 	if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
110 		unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
111 	if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
112 		unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
113 	mutex_unlock(&uctxt->exp_mutex);
114 
115 	kfree(fd->invalid_tids);
116 	fd->invalid_tids = NULL;
117 
118 	kfree(fd->entry_to_rb);
119 	fd->entry_to_rb = NULL;
120 }
121 
122 /*
123  * Release pinned receive buffer pages.
124  *
125  * @mapped: true if the pages have been DMA mapped. false otherwise.
126  * @idx: Index of the first page to unpin.
127  * @npages: No of pages to unpin.
128  *
129  * If the pages have been DMA mapped (indicated by mapped parameter), their
130  * info will be passed via a struct tid_rb_node. If they haven't been mapped,
131  * their info will be passed via a struct tid_user_buf.
132  */
133 static void unpin_rcv_pages(struct hfi1_filedata *fd,
134 			    struct tid_user_buf *tidbuf,
135 			    struct tid_rb_node *node,
136 			    unsigned int idx,
137 			    unsigned int npages,
138 			    bool mapped)
139 {
140 	struct page **pages;
141 	struct hfi1_devdata *dd = fd->uctxt->dd;
142 	struct mm_struct *mm;
143 
144 	if (mapped) {
145 		dma_unmap_single(&dd->pcidev->dev, node->dma_addr,
146 				 node->npages * PAGE_SIZE, DMA_FROM_DEVICE);
147 		pages = &node->pages[idx];
148 		mm = mm_from_tid_node(node);
149 	} else {
150 		pages = &tidbuf->pages[idx];
151 		mm = current->mm;
152 	}
153 	hfi1_release_user_pages(mm, pages, npages, mapped);
154 	fd->tid_n_pinned -= npages;
155 }
156 
157 /*
158  * Pin receive buffer pages.
159  */
160 static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
161 {
162 	int pinned;
163 	unsigned int npages = tidbuf->npages;
164 	unsigned long vaddr = tidbuf->vaddr;
165 	struct page **pages = NULL;
166 	struct hfi1_devdata *dd = fd->uctxt->dd;
167 
168 	if (npages > fd->uctxt->expected_count) {
169 		dd_dev_err(dd, "Expected buffer too big\n");
170 		return -EINVAL;
171 	}
172 
173 	/* Allocate the array of struct page pointers needed for pinning */
174 	pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
175 	if (!pages)
176 		return -ENOMEM;
177 
178 	/*
179 	 * Pin all the pages of the user buffer. If we can't pin all the
180 	 * pages, accept the amount pinned so far and program only that.
181 	 * User space knows how to deal with partially programmed buffers.
182 	 */
183 	if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
184 		kfree(pages);
185 		return -ENOMEM;
186 	}
187 
188 	pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
189 	if (pinned <= 0) {
190 		kfree(pages);
191 		return pinned;
192 	}
193 	tidbuf->pages = pages;
194 	fd->tid_n_pinned += pinned;
195 	return pinned;
196 }
197 
198 /*
199  * RcvArray entry allocation for Expected Receives is done by the
200  * following algorithm:
201  *
202  * The context keeps 3 lists of groups of RcvArray entries:
203  *   1. List of empty groups - tid_group_list
204  *      This list is created during user context creation and
205  *      contains elements which describe sets (of 8) of empty
206  *      RcvArray entries.
207  *   2. List of partially used groups - tid_used_list
208  *      This list contains sets of RcvArray entries which are
209  *      not completely used up. Another mapping request could
210  *      use some of all of the remaining entries.
211  *   3. List of full groups - tid_full_list
212  *      This is the list where sets that are completely used
213  *      up go.
214  *
215  * An attempt to optimize the usage of RcvArray entries is
216  * made by finding all sets of physically contiguous pages in a
217  * user's buffer.
218  * These physically contiguous sets are further split into
219  * sizes supported by the receive engine of the HFI. The
220  * resulting sets of pages are stored in struct tid_pageset,
221  * which describes the sets as:
222  *    * .count - number of pages in this set
223  *    * .idx - starting index into struct page ** array
224  *                    of this set
225  *
226  * From this point on, the algorithm deals with the page sets
227  * described above. The number of pagesets is divided by the
228  * RcvArray group size to produce the number of full groups
229  * needed.
230  *
231  * Groups from the 3 lists are manipulated using the following
232  * rules:
233  *   1. For each set of 8 pagesets, a complete group from
234  *      tid_group_list is taken, programmed, and moved to
235  *      the tid_full_list list.
236  *   2. For all remaining pagesets:
237  *      2.1 If the tid_used_list is empty and the tid_group_list
238  *          is empty, stop processing pageset and return only
239  *          what has been programmed up to this point.
240  *      2.2 If the tid_used_list is empty and the tid_group_list
241  *          is not empty, move a group from tid_group_list to
242  *          tid_used_list.
243  *      2.3 For each group is tid_used_group, program as much as
244  *          can fit into the group. If the group becomes fully
245  *          used, move it to tid_full_list.
246  */
247 int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
248 			    struct hfi1_tid_info *tinfo)
249 {
250 	int ret = 0, need_group = 0, pinned;
251 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
252 	struct hfi1_devdata *dd = uctxt->dd;
253 	unsigned int ngroups, pageidx = 0, pageset_count,
254 		tididx = 0, mapped, mapped_pages = 0;
255 	u32 *tidlist = NULL;
256 	struct tid_user_buf *tidbuf;
257 	unsigned long mmu_seq = 0;
258 
259 	if (!PAGE_ALIGNED(tinfo->vaddr))
260 		return -EINVAL;
261 	if (tinfo->length == 0)
262 		return -EINVAL;
263 
264 	tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
265 	if (!tidbuf)
266 		return -ENOMEM;
267 
268 	mutex_init(&tidbuf->cover_mutex);
269 	tidbuf->vaddr = tinfo->vaddr;
270 	tidbuf->length = tinfo->length;
271 	tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length);
272 	tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
273 				GFP_KERNEL);
274 	if (!tidbuf->psets) {
275 		ret = -ENOMEM;
276 		goto fail_release_mem;
277 	}
278 
279 	if (fd->use_mn) {
280 		ret = mmu_interval_notifier_insert(
281 			&tidbuf->notifier, current->mm,
282 			tidbuf->vaddr, tidbuf->npages * PAGE_SIZE,
283 			&tid_cover_ops);
284 		if (ret)
285 			goto fail_release_mem;
286 		mmu_seq = mmu_interval_read_begin(&tidbuf->notifier);
287 	}
288 
289 	pinned = pin_rcv_pages(fd, tidbuf);
290 	if (pinned <= 0) {
291 		ret = (pinned < 0) ? pinned : -ENOSPC;
292 		goto fail_unpin;
293 	}
294 
295 	/* Find sets of physically contiguous pages */
296 	tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
297 
298 	/* Reserve the number of expected tids to be used. */
299 	spin_lock(&fd->tid_lock);
300 	if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
301 		pageset_count = fd->tid_limit - fd->tid_used;
302 	else
303 		pageset_count = tidbuf->n_psets;
304 	fd->tid_used += pageset_count;
305 	spin_unlock(&fd->tid_lock);
306 
307 	if (!pageset_count) {
308 		ret = -ENOSPC;
309 		goto fail_unreserve;
310 	}
311 
312 	ngroups = pageset_count / dd->rcv_entries.group_size;
313 	tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
314 	if (!tidlist) {
315 		ret = -ENOMEM;
316 		goto fail_unreserve;
317 	}
318 
319 	tididx = 0;
320 
321 	/*
322 	 * From this point on, we are going to be using shared (between master
323 	 * and subcontexts) context resources. We need to take the lock.
324 	 */
325 	mutex_lock(&uctxt->exp_mutex);
326 	/*
327 	 * The first step is to program the RcvArray entries which are complete
328 	 * groups.
329 	 */
330 	while (ngroups && uctxt->tid_group_list.count) {
331 		struct tid_group *grp =
332 			tid_group_pop(&uctxt->tid_group_list);
333 
334 		ret = program_rcvarray(fd, tidbuf, grp,
335 				       pageidx, dd->rcv_entries.group_size,
336 				       tidlist, &tididx, &mapped);
337 		/*
338 		 * If there was a failure to program the RcvArray
339 		 * entries for the entire group, reset the grp fields
340 		 * and add the grp back to the free group list.
341 		 */
342 		if (ret <= 0) {
343 			tid_group_add_tail(grp, &uctxt->tid_group_list);
344 			hfi1_cdbg(TID,
345 				  "Failed to program RcvArray group %d", ret);
346 			goto unlock;
347 		}
348 
349 		tid_group_add_tail(grp, &uctxt->tid_full_list);
350 		ngroups--;
351 		pageidx += ret;
352 		mapped_pages += mapped;
353 	}
354 
355 	while (pageidx < pageset_count) {
356 		struct tid_group *grp, *ptr;
357 		/*
358 		 * If we don't have any partially used tid groups, check
359 		 * if we have empty groups. If so, take one from there and
360 		 * put in the partially used list.
361 		 */
362 		if (!uctxt->tid_used_list.count || need_group) {
363 			if (!uctxt->tid_group_list.count)
364 				goto unlock;
365 
366 			grp = tid_group_pop(&uctxt->tid_group_list);
367 			tid_group_add_tail(grp, &uctxt->tid_used_list);
368 			need_group = 0;
369 		}
370 		/*
371 		 * There is an optimization opportunity here - instead of
372 		 * fitting as many page sets as we can, check for a group
373 		 * later on in the list that could fit all of them.
374 		 */
375 		list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
376 					 list) {
377 			unsigned use = min_t(unsigned, pageset_count - pageidx,
378 					     grp->size - grp->used);
379 
380 			ret = program_rcvarray(fd, tidbuf, grp,
381 					       pageidx, use, tidlist,
382 					       &tididx, &mapped);
383 			if (ret < 0) {
384 				hfi1_cdbg(TID,
385 					  "Failed to program RcvArray entries %d",
386 					  ret);
387 				goto unlock;
388 			} else if (ret > 0) {
389 				if (grp->used == grp->size)
390 					tid_group_move(grp,
391 						       &uctxt->tid_used_list,
392 						       &uctxt->tid_full_list);
393 				pageidx += ret;
394 				mapped_pages += mapped;
395 				need_group = 0;
396 				/* Check if we are done so we break out early */
397 				if (pageidx >= pageset_count)
398 					break;
399 			} else if (WARN_ON(ret == 0)) {
400 				/*
401 				 * If ret is 0, we did not program any entries
402 				 * into this group, which can only happen if
403 				 * we've screwed up the accounting somewhere.
404 				 * Warn and try to continue.
405 				 */
406 				need_group = 1;
407 			}
408 		}
409 	}
410 unlock:
411 	mutex_unlock(&uctxt->exp_mutex);
412 	hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
413 		  mapped_pages, ret);
414 
415 	/* fail if nothing was programmed, set error if none provided */
416 	if (tididx == 0) {
417 		if (ret >= 0)
418 			ret = -ENOSPC;
419 		goto fail_unreserve;
420 	}
421 
422 	/* adjust reserved tid_used to actual count */
423 	spin_lock(&fd->tid_lock);
424 	fd->tid_used -= pageset_count - tididx;
425 	spin_unlock(&fd->tid_lock);
426 
427 	/* unpin all pages not covered by a TID */
428 	unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
429 			false);
430 
431 	if (fd->use_mn) {
432 		/* check for an invalidate during setup */
433 		bool fail = false;
434 
435 		mutex_lock(&tidbuf->cover_mutex);
436 		fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq);
437 		mutex_unlock(&tidbuf->cover_mutex);
438 
439 		if (fail) {
440 			ret = -EBUSY;
441 			goto fail_unprogram;
442 		}
443 	}
444 
445 	tinfo->tidcnt = tididx;
446 	tinfo->length = mapped_pages * PAGE_SIZE;
447 
448 	if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
449 			 tidlist, sizeof(tidlist[0]) * tididx)) {
450 		ret = -EFAULT;
451 		goto fail_unprogram;
452 	}
453 
454 	if (fd->use_mn)
455 		mmu_interval_notifier_remove(&tidbuf->notifier);
456 	kfree(tidbuf->pages);
457 	kfree(tidbuf->psets);
458 	kfree(tidbuf);
459 	kfree(tidlist);
460 	return 0;
461 
462 fail_unprogram:
463 	/* unprogram, unmap, and unpin all allocated TIDs */
464 	tinfo->tidlist = (unsigned long)tidlist;
465 	hfi1_user_exp_rcv_clear(fd, tinfo);
466 	tinfo->tidlist = 0;
467 	pinned = 0;		/* nothing left to unpin */
468 	pageset_count = 0;	/* nothing left reserved */
469 fail_unreserve:
470 	spin_lock(&fd->tid_lock);
471 	fd->tid_used -= pageset_count;
472 	spin_unlock(&fd->tid_lock);
473 fail_unpin:
474 	if (fd->use_mn)
475 		mmu_interval_notifier_remove(&tidbuf->notifier);
476 	if (pinned > 0)
477 		unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
478 fail_release_mem:
479 	kfree(tidbuf->pages);
480 	kfree(tidbuf->psets);
481 	kfree(tidbuf);
482 	kfree(tidlist);
483 	return ret;
484 }
485 
486 int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
487 			    struct hfi1_tid_info *tinfo)
488 {
489 	int ret = 0;
490 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
491 	u32 *tidinfo;
492 	unsigned tididx;
493 
494 	if (unlikely(tinfo->tidcnt > fd->tid_used))
495 		return -EINVAL;
496 
497 	tidinfo = memdup_user(u64_to_user_ptr(tinfo->tidlist),
498 			      sizeof(tidinfo[0]) * tinfo->tidcnt);
499 	if (IS_ERR(tidinfo))
500 		return PTR_ERR(tidinfo);
501 
502 	mutex_lock(&uctxt->exp_mutex);
503 	for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
504 		ret = unprogram_rcvarray(fd, tidinfo[tididx]);
505 		if (ret) {
506 			hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
507 				  ret);
508 			break;
509 		}
510 	}
511 	spin_lock(&fd->tid_lock);
512 	fd->tid_used -= tididx;
513 	spin_unlock(&fd->tid_lock);
514 	tinfo->tidcnt = tididx;
515 	mutex_unlock(&uctxt->exp_mutex);
516 
517 	kfree(tidinfo);
518 	return ret;
519 }
520 
521 int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
522 			      struct hfi1_tid_info *tinfo)
523 {
524 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
525 	unsigned long *ev = uctxt->dd->events +
526 		(uctxt_offset(uctxt) + fd->subctxt);
527 	u32 *array;
528 	int ret = 0;
529 
530 	/*
531 	 * copy_to_user() can sleep, which will leave the invalid_lock
532 	 * locked and cause the MMU notifier to be blocked on the lock
533 	 * for a long time.
534 	 * Copy the data to a local buffer so we can release the lock.
535 	 */
536 	array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
537 	if (!array)
538 		return -EFAULT;
539 
540 	spin_lock(&fd->invalid_lock);
541 	if (fd->invalid_tid_idx) {
542 		memcpy(array, fd->invalid_tids, sizeof(*array) *
543 		       fd->invalid_tid_idx);
544 		memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
545 		       fd->invalid_tid_idx);
546 		tinfo->tidcnt = fd->invalid_tid_idx;
547 		fd->invalid_tid_idx = 0;
548 		/*
549 		 * Reset the user flag while still holding the lock.
550 		 * Otherwise, PSM can miss events.
551 		 */
552 		clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
553 	} else {
554 		tinfo->tidcnt = 0;
555 	}
556 	spin_unlock(&fd->invalid_lock);
557 
558 	if (tinfo->tidcnt) {
559 		if (copy_to_user((void __user *)tinfo->tidlist,
560 				 array, sizeof(*array) * tinfo->tidcnt))
561 			ret = -EFAULT;
562 	}
563 	kfree(array);
564 
565 	return ret;
566 }
567 
568 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
569 {
570 	unsigned pagecount, pageidx, setcount = 0, i;
571 	unsigned long pfn, this_pfn;
572 	struct page **pages = tidbuf->pages;
573 	struct tid_pageset *list = tidbuf->psets;
574 
575 	if (!npages)
576 		return 0;
577 
578 	/*
579 	 * Look for sets of physically contiguous pages in the user buffer.
580 	 * This will allow us to optimize Expected RcvArray entry usage by
581 	 * using the bigger supported sizes.
582 	 */
583 	pfn = page_to_pfn(pages[0]);
584 	for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
585 		this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
586 
587 		/*
588 		 * If the pfn's are not sequential, pages are not physically
589 		 * contiguous.
590 		 */
591 		if (this_pfn != ++pfn) {
592 			/*
593 			 * At this point we have to loop over the set of
594 			 * physically contiguous pages and break them down it
595 			 * sizes supported by the HW.
596 			 * There are two main constraints:
597 			 *     1. The max buffer size is MAX_EXPECTED_BUFFER.
598 			 *        If the total set size is bigger than that
599 			 *        program only a MAX_EXPECTED_BUFFER chunk.
600 			 *     2. The buffer size has to be a power of two. If
601 			 *        it is not, round down to the closes power of
602 			 *        2 and program that size.
603 			 */
604 			while (pagecount) {
605 				int maxpages = pagecount;
606 				u32 bufsize = pagecount * PAGE_SIZE;
607 
608 				if (bufsize > MAX_EXPECTED_BUFFER)
609 					maxpages =
610 						MAX_EXPECTED_BUFFER >>
611 						PAGE_SHIFT;
612 				else if (!is_power_of_2(bufsize))
613 					maxpages =
614 						rounddown_pow_of_two(bufsize) >>
615 						PAGE_SHIFT;
616 
617 				list[setcount].idx = pageidx;
618 				list[setcount].count = maxpages;
619 				pagecount -= maxpages;
620 				pageidx += maxpages;
621 				setcount++;
622 			}
623 			pageidx = i;
624 			pagecount = 1;
625 			pfn = this_pfn;
626 		} else {
627 			pagecount++;
628 		}
629 	}
630 	return setcount;
631 }
632 
633 /**
634  * program_rcvarray() - program an RcvArray group with receive buffers
635  * @fd: filedata pointer
636  * @tbuf: pointer to struct tid_user_buf that has the user buffer starting
637  *	  virtual address, buffer length, page pointers, pagesets (array of
638  *	  struct tid_pageset holding information on physically contiguous
639  *	  chunks from the user buffer), and other fields.
640  * @grp: RcvArray group
641  * @start: starting index into sets array
642  * @count: number of struct tid_pageset's to program
643  * @tidlist: the array of u32 elements when the information about the
644  *           programmed RcvArray entries is to be encoded.
645  * @tididx: starting offset into tidlist
646  * @pmapped: (output parameter) number of pages programmed into the RcvArray
647  *           entries.
648  *
649  * This function will program up to 'count' number of RcvArray entries from the
650  * group 'grp'. To make best use of write-combining writes, the function will
651  * perform writes to the unused RcvArray entries which will be ignored by the
652  * HW. Each RcvArray entry will be programmed with a physically contiguous
653  * buffer chunk from the user's virtual buffer.
654  *
655  * Return:
656  * -EINVAL if the requested count is larger than the size of the group,
657  * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
658  * number of RcvArray entries programmed.
659  */
660 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
661 			    struct tid_group *grp,
662 			    unsigned int start, u16 count,
663 			    u32 *tidlist, unsigned int *tididx,
664 			    unsigned int *pmapped)
665 {
666 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
667 	struct hfi1_devdata *dd = uctxt->dd;
668 	u16 idx;
669 	u32 tidinfo = 0, rcventry, useidx = 0;
670 	int mapped = 0;
671 
672 	/* Count should never be larger than the group size */
673 	if (count > grp->size)
674 		return -EINVAL;
675 
676 	/* Find the first unused entry in the group */
677 	for (idx = 0; idx < grp->size; idx++) {
678 		if (!(grp->map & (1 << idx))) {
679 			useidx = idx;
680 			break;
681 		}
682 		rcv_array_wc_fill(dd, grp->base + idx);
683 	}
684 
685 	idx = 0;
686 	while (idx < count) {
687 		u16 npages, pageidx, setidx = start + idx;
688 		int ret = 0;
689 
690 		/*
691 		 * If this entry in the group is used, move to the next one.
692 		 * If we go past the end of the group, exit the loop.
693 		 */
694 		if (useidx >= grp->size) {
695 			break;
696 		} else if (grp->map & (1 << useidx)) {
697 			rcv_array_wc_fill(dd, grp->base + useidx);
698 			useidx++;
699 			continue;
700 		}
701 
702 		rcventry = grp->base + useidx;
703 		npages = tbuf->psets[setidx].count;
704 		pageidx = tbuf->psets[setidx].idx;
705 
706 		ret = set_rcvarray_entry(fd, tbuf,
707 					 rcventry, grp, pageidx,
708 					 npages);
709 		if (ret)
710 			return ret;
711 		mapped += npages;
712 
713 		tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
714 			EXP_TID_SET(LEN, npages);
715 		tidlist[(*tididx)++] = tidinfo;
716 		grp->used++;
717 		grp->map |= 1 << useidx++;
718 		idx++;
719 	}
720 
721 	/* Fill the rest of the group with "blank" writes */
722 	for (; useidx < grp->size; useidx++)
723 		rcv_array_wc_fill(dd, grp->base + useidx);
724 	*pmapped = mapped;
725 	return idx;
726 }
727 
728 static int set_rcvarray_entry(struct hfi1_filedata *fd,
729 			      struct tid_user_buf *tbuf,
730 			      u32 rcventry, struct tid_group *grp,
731 			      u16 pageidx, unsigned int npages)
732 {
733 	int ret;
734 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
735 	struct tid_rb_node *node;
736 	struct hfi1_devdata *dd = uctxt->dd;
737 	dma_addr_t phys;
738 	struct page **pages = tbuf->pages + pageidx;
739 
740 	/*
741 	 * Allocate the node first so we can handle a potential
742 	 * failure before we've programmed anything.
743 	 */
744 	node = kzalloc(struct_size(node, pages, npages), GFP_KERNEL);
745 	if (!node)
746 		return -ENOMEM;
747 
748 	phys = dma_map_single(&dd->pcidev->dev, __va(page_to_phys(pages[0])),
749 			      npages * PAGE_SIZE, DMA_FROM_DEVICE);
750 	if (dma_mapping_error(&dd->pcidev->dev, phys)) {
751 		dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
752 			   phys);
753 		kfree(node);
754 		return -EFAULT;
755 	}
756 
757 	node->fdata = fd;
758 	mutex_init(&node->invalidate_mutex);
759 	node->phys = page_to_phys(pages[0]);
760 	node->npages = npages;
761 	node->rcventry = rcventry;
762 	node->dma_addr = phys;
763 	node->grp = grp;
764 	node->freed = false;
765 	memcpy(node->pages, pages, flex_array_size(node, pages, npages));
766 
767 	if (fd->use_mn) {
768 		ret = mmu_interval_notifier_insert(
769 			&node->notifier, current->mm,
770 			tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
771 			&tid_mn_ops);
772 		if (ret)
773 			goto out_unmap;
774 	}
775 	fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
776 
777 	hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
778 	trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
779 			       node->notifier.interval_tree.start, node->phys,
780 			       phys);
781 	return 0;
782 
783 out_unmap:
784 	hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
785 		  node->rcventry, node->notifier.interval_tree.start,
786 		  node->phys, ret);
787 	dma_unmap_single(&dd->pcidev->dev, phys, npages * PAGE_SIZE,
788 			 DMA_FROM_DEVICE);
789 	kfree(node);
790 	return -EFAULT;
791 }
792 
793 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo)
794 {
795 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
796 	struct hfi1_devdata *dd = uctxt->dd;
797 	struct tid_rb_node *node;
798 	u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
799 	u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
800 
801 	if (tididx >= uctxt->expected_count) {
802 		dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
803 			   tididx, uctxt->ctxt);
804 		return -EINVAL;
805 	}
806 
807 	if (tidctrl == 0x3)
808 		return -EINVAL;
809 
810 	rcventry = tididx + (tidctrl - 1);
811 
812 	node = fd->entry_to_rb[rcventry];
813 	if (!node || node->rcventry != (uctxt->expected_base + rcventry))
814 		return -EBADF;
815 
816 	if (fd->use_mn)
817 		mmu_interval_notifier_remove(&node->notifier);
818 	cacheless_tid_rb_remove(fd, node);
819 
820 	return 0;
821 }
822 
823 static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
824 {
825 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
826 	struct hfi1_devdata *dd = uctxt->dd;
827 
828 	mutex_lock(&node->invalidate_mutex);
829 	if (node->freed)
830 		goto done;
831 	node->freed = true;
832 
833 	trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
834 				 node->npages,
835 				 node->notifier.interval_tree.start, node->phys,
836 				 node->dma_addr);
837 
838 	/* Make sure device has seen the write before pages are unpinned */
839 	hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
840 
841 	unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
842 done:
843 	mutex_unlock(&node->invalidate_mutex);
844 }
845 
846 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
847 {
848 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
849 
850 	__clear_tid_node(fd, node);
851 
852 	node->grp->used--;
853 	node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
854 
855 	if (node->grp->used == node->grp->size - 1)
856 		tid_group_move(node->grp, &uctxt->tid_full_list,
857 			       &uctxt->tid_used_list);
858 	else if (!node->grp->used)
859 		tid_group_move(node->grp, &uctxt->tid_used_list,
860 			       &uctxt->tid_group_list);
861 	kfree(node);
862 }
863 
864 /*
865  * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
866  * clearing nodes in the non-cached case.
867  */
868 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
869 			    struct exp_tid_set *set,
870 			    struct hfi1_filedata *fd)
871 {
872 	struct tid_group *grp, *ptr;
873 	int i;
874 
875 	list_for_each_entry_safe(grp, ptr, &set->list, list) {
876 		list_del_init(&grp->list);
877 
878 		for (i = 0; i < grp->size; i++) {
879 			if (grp->map & (1 << i)) {
880 				u16 rcventry = grp->base + i;
881 				struct tid_rb_node *node;
882 
883 				node = fd->entry_to_rb[rcventry -
884 							  uctxt->expected_base];
885 				if (!node || node->rcventry != rcventry)
886 					continue;
887 
888 				if (fd->use_mn)
889 					mmu_interval_notifier_remove(
890 						&node->notifier);
891 				cacheless_tid_rb_remove(fd, node);
892 			}
893 		}
894 	}
895 }
896 
897 static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
898 			      const struct mmu_notifier_range *range,
899 			      unsigned long cur_seq)
900 {
901 	struct tid_rb_node *node =
902 		container_of(mni, struct tid_rb_node, notifier);
903 	struct hfi1_filedata *fdata = node->fdata;
904 	struct hfi1_ctxtdata *uctxt = fdata->uctxt;
905 
906 	if (node->freed)
907 		return true;
908 
909 	/* take action only if unmapping */
910 	if (range->event != MMU_NOTIFY_UNMAP)
911 		return true;
912 
913 	trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
914 				 node->notifier.interval_tree.start,
915 				 node->rcventry, node->npages, node->dma_addr);
916 
917 	/* clear the hardware rcvarray entry */
918 	__clear_tid_node(fdata, node);
919 
920 	spin_lock(&fdata->invalid_lock);
921 	if (fdata->invalid_tid_idx < uctxt->expected_count) {
922 		fdata->invalid_tids[fdata->invalid_tid_idx] =
923 			rcventry2tidinfo(node->rcventry - uctxt->expected_base);
924 		fdata->invalid_tids[fdata->invalid_tid_idx] |=
925 			EXP_TID_SET(LEN, node->npages);
926 		if (!fdata->invalid_tid_idx) {
927 			unsigned long *ev;
928 
929 			/*
930 			 * hfi1_set_uevent_bits() sets a user event flag
931 			 * for all processes. Because calling into the
932 			 * driver to process TID cache invalidations is
933 			 * expensive and TID cache invalidations are
934 			 * handled on a per-process basis, we can
935 			 * optimize this to set the flag only for the
936 			 * process in question.
937 			 */
938 			ev = uctxt->dd->events +
939 				(uctxt_offset(uctxt) + fdata->subctxt);
940 			set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
941 		}
942 		fdata->invalid_tid_idx++;
943 	}
944 	spin_unlock(&fdata->invalid_lock);
945 	return true;
946 }
947 
948 static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
949 			         const struct mmu_notifier_range *range,
950 			         unsigned long cur_seq)
951 {
952 	struct tid_user_buf *tidbuf =
953 		container_of(mni, struct tid_user_buf, notifier);
954 
955 	/* take action only if unmapping */
956 	if (range->event == MMU_NOTIFY_UNMAP) {
957 		mutex_lock(&tidbuf->cover_mutex);
958 		mmu_interval_set_seq(mni, cur_seq);
959 		mutex_unlock(&tidbuf->cover_mutex);
960 	}
961 
962 	return true;
963 }
964 
965 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
966 				    struct tid_rb_node *tnode)
967 {
968 	u32 base = fdata->uctxt->expected_base;
969 
970 	fdata->entry_to_rb[tnode->rcventry - base] = NULL;
971 	clear_tid_node(fdata, tnode);
972 }
973