1 /* $NetBSD: uvm_fault.c,v 1.233 2023/07/17 12:55:37 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp
28 */
29
30 /*
31 * uvm_fault.c: fault handler
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.233 2023/07/17 12:55:37 riastradh Exp $");
36
37 #include "opt_uvmhist.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/atomic.h>
42 #include <sys/kernel.h>
43 #include <sys/mman.h>
44
45 #include <uvm/uvm.h>
46 #include <uvm/uvm_pdpolicy.h>
47 #include <uvm/uvm_rndsource.h>
48
49 /*
50 *
51 * a word on page faults:
52 *
53 * types of page faults we handle:
54 *
55 * CASE 1: upper layer faults CASE 2: lower layer faults
56 *
57 * CASE 1A CASE 1B CASE 2A CASE 2B
58 * read/write1 write>1 read/write +-cow_write/zero
59 * | | | |
60 * +--|--+ +--|--+ +-----+ + | + | +-----+
61 * amap | V | | ---------> new | | | | ^ |
62 * +-----+ +-----+ +-----+ + | + | +--|--+
63 * | | |
64 * +-----+ +-----+ +--|--+ | +--|--+
65 * uobj | d/c | | d/c | | V | +----+ |
66 * +-----+ +-----+ +-----+ +-----+
67 *
68 * d/c = don't care
69 *
70 * case [0]: layerless fault
71 * no amap or uobj is present. this is an error.
72 *
73 * case [1]: upper layer fault [anon active]
74 * 1A: [read] or [write with anon->an_ref == 1]
75 * I/O takes place in upper level anon and uobj is not touched.
76 * 1B: [write with anon->an_ref > 1]
77 * new anon is alloc'd and data is copied off ["COW"]
78 *
79 * case [2]: lower layer fault [uobj]
80 * 2A: [read on non-NULL uobj] or [write to non-copy_on_write area]
81 * I/O takes place directly in object.
82 * 2B: [write to copy_on_write] or [read on NULL uobj]
83 * data is "promoted" from uobj to a new anon.
84 * if uobj is null, then we zero fill.
85 *
86 * we follow the standard UVM locking protocol ordering:
87 *
88 * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
89 * we hold a PG_BUSY page if we unlock for I/O
90 *
91 *
92 * the code is structured as follows:
93 *
94 * - init the "IN" params in the ufi structure
95 * ReFault: (ERESTART returned to the loop in uvm_fault_internal)
96 * - do lookups [locks maps], check protection, handle needs_copy
97 * - check for case 0 fault (error)
98 * - establish "range" of fault
99 * - if we have an amap lock it and extract the anons
100 * - if sequential advice deactivate pages behind us
101 * - at the same time check pmap for unmapped areas and anon for pages
102 * that we could map in (and do map it if found)
103 * - check object for resident pages that we could map in
104 * - if (case 2) goto Case2
105 * - >>> handle case 1
106 * - ensure source anon is resident in RAM
107 * - if case 1B alloc new anon and copy from source
108 * - map the correct page in
109 * Case2:
110 * - >>> handle case 2
111 * - ensure source page is resident (if uobj)
112 * - if case 2B alloc new anon and copy from source (could be zero
113 * fill if uobj == NULL)
114 * - map the correct page in
115 * - done!
116 *
117 * note on paging:
118 * if we have to do I/O we place a PG_BUSY page in the correct object,
119 * unlock everything, and do the I/O. when I/O is done we must reverify
120 * the state of the world before assuming that our data structures are
121 * valid. [because mappings could change while the map is unlocked]
122 *
123 * alternative 1: unbusy the page in question and restart the page fault
124 * from the top (ReFault). this is easy but does not take advantage
125 * of the information that we already have from our previous lookup,
126 * although it is possible that the "hints" in the vm_map will help here.
127 *
128 * alternative 2: the system already keeps track of a "version" number of
129 * a map. [i.e. every time you write-lock a map (e.g. to change a
130 * mapping) you bump the version number up by one...] so, we can save
131 * the version number of the map before we release the lock and start I/O.
132 * then when I/O is done we can relock and check the version numbers
133 * to see if anything changed. this might save us some over 1 because
134 * we don't have to unbusy the page and may be less compares(?).
135 *
136 * alternative 3: put in backpointers or a way to "hold" part of a map
137 * in place while I/O is in progress. this could be complex to
138 * implement (especially with structures like amap that can be referenced
139 * by multiple map entries, and figuring out what should wait could be
140 * complex as well...).
141 *
142 * we use alternative 2. given that we are multi-threaded now we may want
143 * to reconsider the choice.
144 */
145
146 /*
147 * local data structures
148 */
149
150 struct uvm_advice {
151 int advice;
152 int nback;
153 int nforw;
154 };
155
156 /*
157 * page range array:
158 * note: index in array must match "advice" value
159 * XXX: borrowed numbers from freebsd. do they work well for us?
160 */
161
162 static const struct uvm_advice uvmadvice[] = {
163 { UVM_ADV_NORMAL, 3, 4 },
164 { UVM_ADV_RANDOM, 0, 0 },
165 { UVM_ADV_SEQUENTIAL, 8, 7},
166 };
167
168 #define UVM_MAXRANGE 16 /* must be MAX() of nback+nforw+1 */
169
170 /*
171 * private prototypes
172 */
173
174 /*
175 * inline functions
176 */
177
178 /*
179 * uvmfault_anonflush: try and deactivate pages in specified anons
180 *
181 * => does not have to deactivate page if it is busy
182 */
183
184 static inline void
uvmfault_anonflush(struct vm_anon ** anons,int n)185 uvmfault_anonflush(struct vm_anon **anons, int n)
186 {
187 int lcv;
188 struct vm_page *pg;
189
190 for (lcv = 0; lcv < n; lcv++) {
191 if (anons[lcv] == NULL)
192 continue;
193 KASSERT(rw_lock_held(anons[lcv]->an_lock));
194 pg = anons[lcv]->an_page;
195 if (pg && (pg->flags & PG_BUSY) == 0) {
196 uvm_pagelock(pg);
197 uvm_pagedeactivate(pg);
198 uvm_pageunlock(pg);
199 }
200 }
201 }
202
203 /*
204 * normal functions
205 */
206
207 /*
208 * uvmfault_amapcopy: clear "needs_copy" in a map.
209 *
210 * => called with VM data structures unlocked (usually, see below)
211 * => we get a write lock on the maps and clear needs_copy for a VA
212 * => if we are out of RAM we sleep (waiting for more)
213 */
214
215 static void
uvmfault_amapcopy(struct uvm_faultinfo * ufi)216 uvmfault_amapcopy(struct uvm_faultinfo *ufi)
217 {
218 for (;;) {
219
220 /*
221 * no mapping? give up.
222 */
223
224 if (uvmfault_lookup(ufi, true) == false)
225 return;
226
227 /*
228 * copy if needed.
229 */
230
231 if (UVM_ET_ISNEEDSCOPY(ufi->entry))
232 amap_copy(ufi->map, ufi->entry, AMAP_COPY_NOWAIT,
233 ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
234
235 /*
236 * didn't work? must be out of RAM. unlock and sleep.
237 */
238
239 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
240 uvmfault_unlockmaps(ufi, true);
241 uvm_wait("fltamapcopy");
242 continue;
243 }
244
245 /*
246 * got it! unlock and return.
247 */
248
249 uvmfault_unlockmaps(ufi, true);
250 return;
251 }
252 /*NOTREACHED*/
253 }
254
255 /*
256 * uvmfault_anonget: get data in an anon into a non-busy, non-released
257 * page in that anon.
258 *
259 * => Map, amap and thus anon should be locked by caller.
260 * => If we fail, we unlock everything and error is returned.
261 * => If we are successful, return with everything still locked.
262 * => We do not move the page on the queues [gets moved later]. If we
263 * allocate a new page [we_own], it gets put on the queues. Either way,
264 * the result is that the page is on the queues at return time
265 * => For pages which are on loan from a uvm_object (and thus are not owned
266 * by the anon): if successful, return with the owning object locked.
267 * The caller must unlock this object when it unlocks everything else.
268 */
269
270 int
uvmfault_anonget(struct uvm_faultinfo * ufi,struct vm_amap * amap,struct vm_anon * anon)271 uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
272 struct vm_anon *anon)
273 {
274 struct vm_page *pg;
275 krw_t lock_type;
276 int error;
277
278 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
279 KASSERT(rw_lock_held(anon->an_lock));
280 KASSERT(anon->an_lock == amap->am_lock);
281
282 /* Increment the counters.*/
283 cpu_count(CPU_COUNT_FLTANGET, 1);
284 if (anon->an_page) {
285 curlwp->l_ru.ru_minflt++;
286 } else {
287 curlwp->l_ru.ru_majflt++;
288 }
289 error = 0;
290
291 /*
292 * Loop until we get the anon data, or fail.
293 */
294
295 for (;;) {
296 bool we_own, locked;
297 /*
298 * Note: 'we_own' will become true if we set PG_BUSY on a page.
299 */
300 we_own = false;
301 pg = anon->an_page;
302
303 /*
304 * If there is a resident page and it is loaned, then anon
305 * may not own it. Call out to uvm_anon_lockloanpg() to
306 * identify and lock the real owner of the page.
307 */
308
309 if (pg && pg->loan_count)
310 pg = uvm_anon_lockloanpg(anon);
311
312 /*
313 * Is page resident? Make sure it is not busy/released.
314 */
315
316 lock_type = rw_lock_op(anon->an_lock);
317 if (pg) {
318
319 /*
320 * at this point, if the page has a uobject [meaning
321 * we have it on loan], then that uobject is locked
322 * by us! if the page is busy, we drop all the
323 * locks (including uobject) and try again.
324 */
325
326 if ((pg->flags & PG_BUSY) == 0) {
327 UVMHIST_LOG(maphist, "<- OK",0,0,0,0);
328 return 0;
329 }
330 cpu_count(CPU_COUNT_FLTPGWAIT, 1);
331
332 /*
333 * The last unlock must be an atomic unlock and wait
334 * on the owner of page.
335 */
336
337 if (pg->uobject) {
338 /* Owner of page is UVM object. */
339 uvmfault_unlockall(ufi, amap, NULL);
340 UVMHIST_LOG(maphist, " unlock+wait on uobj",0,
341 0,0,0);
342 uvm_pagewait(pg, pg->uobject->vmobjlock, "anonget1");
343 } else {
344 /* Owner of page is anon. */
345 uvmfault_unlockall(ufi, NULL, NULL);
346 UVMHIST_LOG(maphist, " unlock+wait on anon",0,
347 0,0,0);
348 uvm_pagewait(pg, anon->an_lock, "anonget2");
349 }
350 } else {
351 #if defined(VMSWAP)
352 /*
353 * No page, therefore allocate one. A write lock is
354 * required for this. If the caller didn't supply
355 * one, fail now and have them retry.
356 */
357
358 if (lock_type == RW_READER) {
359 return ENOLCK;
360 }
361 pg = uvm_pagealloc(NULL,
362 ufi != NULL ? ufi->orig_rvaddr : 0,
363 anon, ufi != NULL ? UVM_FLAG_COLORMATCH : 0);
364 if (pg == NULL) {
365 /* Out of memory. Wait a little. */
366 uvmfault_unlockall(ufi, amap, NULL);
367 cpu_count(CPU_COUNT_FLTNORAM, 1);
368 UVMHIST_LOG(maphist, " noram -- UVM_WAIT",0,
369 0,0,0);
370 if (!uvm_reclaimable()) {
371 return ENOMEM;
372 }
373 uvm_wait("flt_noram1");
374 } else {
375 /* PG_BUSY bit is set. */
376 we_own = true;
377 uvmfault_unlockall(ufi, amap, NULL);
378
379 /*
380 * Pass a PG_BUSY+PG_FAKE clean page into
381 * the uvm_swap_get() function with all data
382 * structures unlocked. Note that it is OK
383 * to read an_swslot here, because we hold
384 * PG_BUSY on the page.
385 */
386 cpu_count(CPU_COUNT_PAGEINS, 1);
387 error = uvm_swap_get(pg, anon->an_swslot,
388 PGO_SYNCIO);
389
390 /*
391 * We clean up after the I/O below in the
392 * 'we_own' case.
393 */
394 }
395 #else
396 panic("%s: no page", __func__);
397 #endif /* defined(VMSWAP) */
398 }
399
400 /*
401 * Re-lock the map and anon.
402 */
403
404 locked = uvmfault_relock(ufi);
405 if (locked || we_own) {
406 rw_enter(anon->an_lock, lock_type);
407 }
408
409 /*
410 * If we own the page (i.e. we set PG_BUSY), then we need
411 * to clean up after the I/O. There are three cases to
412 * consider:
413 *
414 * 1) Page was released during I/O: free anon and ReFault.
415 * 2) I/O not OK. Free the page and cause the fault to fail.
416 * 3) I/O OK! Activate the page and sync with the non-we_own
417 * case (i.e. drop anon lock if not locked).
418 */
419
420 if (we_own) {
421 KASSERT(lock_type == RW_WRITER);
422 #if defined(VMSWAP)
423 if (error) {
424
425 /*
426 * Remove the swap slot from the anon and
427 * mark the anon as having no real slot.
428 * Do not free the swap slot, thus preventing
429 * it from being used again.
430 */
431
432 if (anon->an_swslot > 0) {
433 uvm_swap_markbad(anon->an_swslot, 1);
434 }
435 anon->an_swslot = SWSLOT_BAD;
436
437 if ((pg->flags & PG_RELEASED) != 0) {
438 goto released;
439 }
440
441 /*
442 * Note: page was never !PG_BUSY, so it
443 * cannot be mapped and thus no need to
444 * pmap_page_protect() it.
445 */
446
447 uvm_pagefree(pg);
448
449 if (locked) {
450 uvmfault_unlockall(ufi, NULL, NULL);
451 }
452 rw_exit(anon->an_lock);
453 UVMHIST_LOG(maphist, "<- ERROR", 0,0,0,0);
454 return error;
455 }
456
457 if ((pg->flags & PG_RELEASED) != 0) {
458 released:
459 KASSERT(anon->an_ref == 0);
460
461 /*
462 * Released while we had unlocked amap.
463 */
464
465 if (locked) {
466 uvmfault_unlockall(ufi, NULL, NULL);
467 }
468 uvm_anon_release(anon);
469
470 if (error) {
471 UVMHIST_LOG(maphist,
472 "<- ERROR/RELEASED", 0,0,0,0);
473 return error;
474 }
475
476 UVMHIST_LOG(maphist, "<- RELEASED", 0,0,0,0);
477 return ERESTART;
478 }
479
480 /*
481 * We have successfully read the page, activate it.
482 */
483
484 uvm_pagelock(pg);
485 uvm_pageactivate(pg);
486 uvm_pagewakeup(pg);
487 uvm_pageunlock(pg);
488 pg->flags &= ~(PG_BUSY|PG_FAKE);
489 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
490 UVM_PAGE_OWN(pg, NULL);
491 #else
492 panic("%s: we_own", __func__);
493 #endif /* defined(VMSWAP) */
494 }
495
496 /*
497 * We were not able to re-lock the map - restart the fault.
498 */
499
500 if (!locked) {
501 if (we_own) {
502 rw_exit(anon->an_lock);
503 }
504 UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
505 return ERESTART;
506 }
507
508 /*
509 * Verify that no one has touched the amap and moved
510 * the anon on us.
511 */
512
513 if (ufi != NULL && amap_lookup(&ufi->entry->aref,
514 ufi->orig_rvaddr - ufi->entry->start) != anon) {
515
516 uvmfault_unlockall(ufi, amap, NULL);
517 UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
518 return ERESTART;
519 }
520
521 /*
522 * Retry..
523 */
524
525 cpu_count(CPU_COUNT_FLTANRETRY, 1);
526 continue;
527 }
528 /*NOTREACHED*/
529 }
530
531 /*
532 * uvmfault_promote: promote data to a new anon. used for 1B and 2B.
533 *
534 * 1. allocate an anon and a page.
535 * 2. fill its contents.
536 * 3. put it into amap.
537 *
538 * => if we fail (result != 0) we unlock everything.
539 * => on success, return a new locked anon via 'nanon'.
540 * (*nanon)->an_page will be a resident, locked, dirty page.
541 * => it's caller's responsibility to put the promoted nanon->an_page to the
542 * page queue.
543 */
544
545 static int
uvmfault_promote(struct uvm_faultinfo * ufi,struct vm_anon * oanon,struct vm_page * uobjpage,struct vm_anon ** nanon,struct vm_anon ** spare)546 uvmfault_promote(struct uvm_faultinfo *ufi,
547 struct vm_anon *oanon,
548 struct vm_page *uobjpage,
549 struct vm_anon **nanon, /* OUT: allocated anon */
550 struct vm_anon **spare)
551 {
552 struct vm_amap *amap = ufi->entry->aref.ar_amap;
553 struct uvm_object *uobj;
554 struct vm_anon *anon;
555 struct vm_page *pg;
556 struct vm_page *opg;
557 int error;
558 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
559
560 if (oanon) {
561 /* anon COW */
562 opg = oanon->an_page;
563 KASSERT(opg != NULL);
564 KASSERT(opg->uobject == NULL || opg->loan_count > 0);
565 } else if (uobjpage != PGO_DONTCARE) {
566 /* object-backed COW */
567 opg = uobjpage;
568 KASSERT(rw_lock_held(opg->uobject->vmobjlock));
569 } else {
570 /* ZFOD */
571 opg = NULL;
572 }
573 if (opg != NULL) {
574 uobj = opg->uobject;
575 } else {
576 uobj = NULL;
577 }
578
579 KASSERT(amap != NULL);
580 KASSERT(uobjpage != NULL);
581 KASSERT(rw_write_held(amap->am_lock));
582 KASSERT(oanon == NULL || amap->am_lock == oanon->an_lock);
583 KASSERT(uobj == NULL || rw_lock_held(uobj->vmobjlock));
584
585 if (*spare != NULL) {
586 anon = *spare;
587 *spare = NULL;
588 } else {
589 anon = uvm_analloc();
590 }
591 if (anon) {
592
593 /*
594 * The new anon is locked.
595 *
596 * if opg == NULL, we want a zero'd, dirty page,
597 * so have uvm_pagealloc() do that for us.
598 */
599
600 KASSERT(anon->an_lock == NULL);
601 anon->an_lock = amap->am_lock;
602 pg = uvm_pagealloc(NULL, ufi->orig_rvaddr, anon,
603 UVM_FLAG_COLORMATCH | (opg == NULL ? UVM_PGA_ZERO : 0));
604 if (pg == NULL) {
605 anon->an_lock = NULL;
606 }
607 } else {
608 pg = NULL;
609 }
610
611 /*
612 * out of memory resources?
613 */
614
615 if (pg == NULL) {
616 /* save anon for the next try. */
617 if (anon != NULL) {
618 *spare = anon;
619 }
620
621 /* unlock and fail ... */
622 uvmfault_unlockall(ufi, amap, uobj);
623 if (!uvm_reclaimable()) {
624 UVMHIST_LOG(maphist, "out of VM", 0,0,0,0);
625 cpu_count(CPU_COUNT_FLTNOANON, 1);
626 error = ENOMEM;
627 goto done;
628 }
629
630 UVMHIST_LOG(maphist, "out of RAM, waiting for more", 0,0,0,0);
631 cpu_count(CPU_COUNT_FLTNORAM, 1);
632 uvm_wait("flt_noram5");
633 error = ERESTART;
634 goto done;
635 }
636
637 /* copy page [pg now dirty] */
638 if (opg) {
639 uvm_pagecopy(opg, pg);
640 }
641 KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_DIRTY);
642
643 amap_add(&ufi->entry->aref, ufi->orig_rvaddr - ufi->entry->start, anon,
644 oanon != NULL);
645
646 /*
647 * from this point on am_lock won't be dropped until the page is
648 * entered, so it's safe to unbusy the page up front.
649 *
650 * uvm_fault_{upper,lower}_done will activate or enqueue the page.
651 */
652
653 pg = anon->an_page;
654 pg->flags &= ~(PG_BUSY|PG_FAKE);
655 UVM_PAGE_OWN(pg, NULL);
656
657 *nanon = anon;
658 error = 0;
659 done:
660 return error;
661 }
662
663 /*
664 * Update statistics after fault resolution.
665 * - maxrss
666 */
667 void
uvmfault_update_stats(struct uvm_faultinfo * ufi)668 uvmfault_update_stats(struct uvm_faultinfo *ufi)
669 {
670 struct vm_map *map;
671 struct vmspace *vm;
672 struct proc *p;
673 vsize_t res;
674
675 map = ufi->orig_map;
676
677 p = curproc;
678 KASSERT(p != NULL);
679 vm = p->p_vmspace;
680
681 if (&vm->vm_map != map)
682 return;
683
684 res = pmap_resident_count(map->pmap);
685 if (vm->vm_rssmax < res)
686 vm->vm_rssmax = res;
687 }
688
689 /*
690 * F A U L T - m a i n e n t r y p o i n t
691 */
692
693 /*
694 * uvm_fault: page fault handler
695 *
696 * => called from MD code to resolve a page fault
697 * => VM data structures usually should be unlocked. however, it is
698 * possible to call here with the main map locked if the caller
699 * gets a write lock, sets it recursive, and then calls us (c.f.
700 * uvm_map_pageable). this should be avoided because it keeps
701 * the map locked off during I/O.
702 * => MUST NEVER BE CALLED IN INTERRUPT CONTEXT
703 */
704
705 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
706 ~VM_PROT_WRITE : VM_PROT_ALL)
707
708 /* fault_flag values passed from uvm_fault_wire to uvm_fault_internal */
709 #define UVM_FAULT_WIRE (1 << 0)
710 #define UVM_FAULT_MAXPROT (1 << 1)
711
712 struct uvm_faultctx {
713
714 /*
715 * the following members are set up by uvm_fault_check() and
716 * read-only after that.
717 *
718 * note that narrow is used by uvm_fault_check() to change
719 * the behaviour after ERESTART.
720 *
721 * most of them might change after RESTART if the underlying
722 * map entry has been changed behind us. an exception is
723 * wire_paging, which does never change.
724 */
725 vm_prot_t access_type;
726 vaddr_t startva;
727 int npages;
728 int centeridx;
729 bool narrow; /* work on a single requested page only */
730 bool wire_mapping; /* request a PMAP_WIRED mapping
731 (UVM_FAULT_WIRE or VM_MAPENT_ISWIRED) */
732 bool wire_paging; /* request uvm_pagewire
733 (true for UVM_FAULT_WIRE) */
734 bool cow_now; /* VM_PROT_WRITE is actually requested
735 (ie. should break COW and page loaning) */
736
737 /*
738 * enter_prot is set up by uvm_fault_check() and clamped
739 * (ie. drop the VM_PROT_WRITE bit) in various places in case
740 * of !cow_now.
741 */
742 vm_prot_t enter_prot; /* prot at which we want to enter pages in */
743
744 /*
745 * the following member is for uvmfault_promote() and ERESTART.
746 */
747 struct vm_anon *anon_spare;
748
749 /*
750 * the following is actually a uvm_fault_lower() internal.
751 * it's here merely for debugging.
752 * (or due to the mechanical separation of the function?)
753 */
754 bool promote;
755
756 /*
757 * type of lock to acquire on objects in both layers.
758 */
759 krw_t lower_lock_type;
760 krw_t upper_lock_type;
761 };
762
763 static inline int uvm_fault_check(
764 struct uvm_faultinfo *, struct uvm_faultctx *,
765 struct vm_anon ***, bool);
766
767 static int uvm_fault_upper(
768 struct uvm_faultinfo *, struct uvm_faultctx *,
769 struct vm_anon **);
770 static inline int uvm_fault_upper_lookup(
771 struct uvm_faultinfo *, const struct uvm_faultctx *,
772 struct vm_anon **, struct vm_page **);
773 static inline void uvm_fault_upper_neighbor(
774 struct uvm_faultinfo *, const struct uvm_faultctx *,
775 vaddr_t, struct vm_page *, bool);
776 static inline int uvm_fault_upper_loan(
777 struct uvm_faultinfo *, struct uvm_faultctx *,
778 struct vm_anon *, struct uvm_object **);
779 static inline int uvm_fault_upper_promote(
780 struct uvm_faultinfo *, struct uvm_faultctx *,
781 struct uvm_object *, struct vm_anon *);
782 static inline int uvm_fault_upper_direct(
783 struct uvm_faultinfo *, struct uvm_faultctx *,
784 struct uvm_object *, struct vm_anon *);
785 static int uvm_fault_upper_enter(
786 struct uvm_faultinfo *, const struct uvm_faultctx *,
787 struct uvm_object *, struct vm_anon *,
788 struct vm_page *, struct vm_anon *);
789 static inline void uvm_fault_upper_done(
790 struct uvm_faultinfo *, const struct uvm_faultctx *,
791 struct vm_anon *, struct vm_page *);
792
793 static int uvm_fault_lower(
794 struct uvm_faultinfo *, struct uvm_faultctx *,
795 struct vm_page **);
796 static inline void uvm_fault_lower_lookup(
797 struct uvm_faultinfo *, const struct uvm_faultctx *,
798 struct vm_page **);
799 static inline void uvm_fault_lower_neighbor(
800 struct uvm_faultinfo *, const struct uvm_faultctx *,
801 vaddr_t, struct vm_page *);
802 static inline int uvm_fault_lower_io(
803 struct uvm_faultinfo *, struct uvm_faultctx *,
804 struct uvm_object **, struct vm_page **);
805 static inline int uvm_fault_lower_direct(
806 struct uvm_faultinfo *, struct uvm_faultctx *,
807 struct uvm_object *, struct vm_page *);
808 static inline int uvm_fault_lower_direct_loan(
809 struct uvm_faultinfo *, struct uvm_faultctx *,
810 struct uvm_object *, struct vm_page **,
811 struct vm_page **);
812 static inline int uvm_fault_lower_promote(
813 struct uvm_faultinfo *, struct uvm_faultctx *,
814 struct uvm_object *, struct vm_page *);
815 static int uvm_fault_lower_enter(
816 struct uvm_faultinfo *, const struct uvm_faultctx *,
817 struct uvm_object *,
818 struct vm_anon *, struct vm_page *);
819 static inline void uvm_fault_lower_done(
820 struct uvm_faultinfo *, const struct uvm_faultctx *,
821 struct uvm_object *, struct vm_page *);
822
823 int
uvm_fault_internal(struct vm_map * orig_map,vaddr_t vaddr,vm_prot_t access_type,int fault_flag)824 uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
825 vm_prot_t access_type, int fault_flag)
826 {
827 struct uvm_faultinfo ufi;
828 struct uvm_faultctx flt = {
829 .access_type = access_type,
830
831 /* don't look for neighborhood * pages on "wire" fault */
832 .narrow = (fault_flag & UVM_FAULT_WIRE) != 0,
833
834 /* "wire" fault causes wiring of both mapping and paging */
835 .wire_mapping = (fault_flag & UVM_FAULT_WIRE) != 0,
836 .wire_paging = (fault_flag & UVM_FAULT_WIRE) != 0,
837
838 /*
839 * default lock type to acquire on upper & lower layer
840 * objects: reader. this can be upgraded at any point
841 * during the fault from read -> write and uvm_faultctx
842 * changed to match, but is never downgraded write -> read.
843 */
844 #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
845 .upper_lock_type = RW_WRITER,
846 .lower_lock_type = RW_WRITER,
847 #else
848 .upper_lock_type = RW_READER,
849 .lower_lock_type = RW_READER,
850 #endif
851 };
852 const bool maxprot = (fault_flag & UVM_FAULT_MAXPROT) != 0;
853 struct vm_anon *anons_store[UVM_MAXRANGE], **anons;
854 struct vm_page *pages_store[UVM_MAXRANGE], **pages;
855 int error;
856
857 UVMHIST_FUNC(__func__);
858 UVMHIST_CALLARGS(maphist, "(map=%#jx, vaddr=%#jx, at=%jd, ff=%jd)",
859 (uintptr_t)orig_map, vaddr, access_type, fault_flag);
860
861 /* Don't count anything until user interaction is possible */
862 kpreempt_disable();
863 if (__predict_true(start_init_exec)) {
864 struct cpu_info *ci = curcpu();
865 CPU_COUNT(CPU_COUNT_NFAULT, 1);
866 /* Don't flood RNG subsystem with samples. */
867 if (++(ci->ci_faultrng) == 503) {
868 ci->ci_faultrng = 0;
869 rnd_add_uint32(&uvm_fault_rndsource,
870 sizeof(vaddr_t) == sizeof(uint32_t) ?
871 (uint32_t)vaddr : sizeof(vaddr_t) ==
872 sizeof(uint64_t) ?
873 (uint32_t)vaddr :
874 (uint32_t)ci->ci_counts[CPU_COUNT_NFAULT]);
875 }
876 }
877 kpreempt_enable();
878
879 /*
880 * init the IN parameters in the ufi
881 */
882
883 ufi.orig_map = orig_map;
884 ufi.orig_rvaddr = trunc_page(vaddr);
885 ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */
886
887 error = ERESTART;
888 while (error == ERESTART) { /* ReFault: */
889 anons = anons_store;
890 pages = pages_store;
891
892 error = uvm_fault_check(&ufi, &flt, &anons, maxprot);
893 if (error != 0)
894 continue;
895
896 error = uvm_fault_upper_lookup(&ufi, &flt, anons, pages);
897 if (error != 0)
898 continue;
899
900 if (pages[flt.centeridx] == PGO_DONTCARE)
901 error = uvm_fault_upper(&ufi, &flt, anons);
902 else {
903 struct uvm_object * const uobj =
904 ufi.entry->object.uvm_obj;
905
906 if (uobj && uobj->pgops->pgo_fault != NULL) {
907 /*
908 * invoke "special" fault routine.
909 */
910 rw_enter(uobj->vmobjlock, RW_WRITER);
911 /* locked: maps(read), amap(if there), uobj */
912 error = uobj->pgops->pgo_fault(&ufi,
913 flt.startva, pages, flt.npages,
914 flt.centeridx, flt.access_type,
915 PGO_LOCKED|PGO_SYNCIO);
916
917 /*
918 * locked: nothing, pgo_fault has unlocked
919 * everything
920 */
921
922 /*
923 * object fault routine responsible for
924 * pmap_update().
925 */
926
927 /*
928 * Wake up the pagedaemon if the fault method
929 * failed for lack of memory but some can be
930 * reclaimed.
931 */
932 if (error == ENOMEM && uvm_reclaimable()) {
933 uvm_wait("pgo_fault");
934 error = ERESTART;
935 }
936 } else {
937 error = uvm_fault_lower(&ufi, &flt, pages);
938 }
939 }
940 }
941
942 if (flt.anon_spare != NULL) {
943 flt.anon_spare->an_ref--;
944 KASSERT(flt.anon_spare->an_ref == 0);
945 KASSERT(flt.anon_spare->an_lock == NULL);
946 uvm_anfree(flt.anon_spare);
947 }
948 return error;
949 }
950
951 /*
952 * uvm_fault_check: check prot, handle needs-copy, etc.
953 *
954 * 1. lookup entry.
955 * 2. check protection.
956 * 3. adjust fault condition (mainly for simulated fault).
957 * 4. handle needs-copy (lazy amap copy).
958 * 5. establish range of interest for neighbor fault (aka pre-fault).
959 * 6. look up anons (if amap exists).
960 * 7. flush pages (if MADV_SEQUENTIAL)
961 *
962 * => called with nothing locked.
963 * => if we fail (result != 0) we unlock everything.
964 * => initialize/adjust many members of flt.
965 */
966
967 static int
uvm_fault_check(struct uvm_faultinfo * ufi,struct uvm_faultctx * flt,struct vm_anon *** ranons,bool maxprot)968 uvm_fault_check(
969 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
970 struct vm_anon ***ranons, bool maxprot)
971 {
972 struct vm_amap *amap;
973 struct uvm_object *uobj;
974 vm_prot_t check_prot;
975 int nback, nforw;
976 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
977
978 /*
979 * lookup and lock the maps
980 */
981
982 if (uvmfault_lookup(ufi, false) == false) {
983 UVMHIST_LOG(maphist, "<- no mapping @ %#jx", ufi->orig_rvaddr,
984 0,0,0);
985 return EFAULT;
986 }
987 /* locked: maps(read) */
988
989 #ifdef DIAGNOSTIC
990 if ((ufi->map->flags & VM_MAP_PAGEABLE) == 0) {
991 printf("Page fault on non-pageable map:\n");
992 printf("ufi->map = %p\n", ufi->map);
993 printf("ufi->orig_map = %p\n", ufi->orig_map);
994 printf("ufi->orig_rvaddr = %#lx\n", (u_long) ufi->orig_rvaddr);
995 panic("uvm_fault: (ufi->map->flags & VM_MAP_PAGEABLE) == 0");
996 }
997 #endif
998
999 /*
1000 * check protection
1001 */
1002
1003 check_prot = maxprot ?
1004 ufi->entry->max_protection : ufi->entry->protection;
1005 if ((check_prot & flt->access_type) != flt->access_type) {
1006 UVMHIST_LOG(maphist,
1007 "<- protection failure (prot=%#jx, access=%#jx)",
1008 ufi->entry->protection, flt->access_type, 0, 0);
1009 uvmfault_unlockmaps(ufi, false);
1010 return EFAULT;
1011 }
1012
1013 /*
1014 * "enter_prot" is the protection we want to enter the page in at.
1015 * for certain pages (e.g. copy-on-write pages) this protection can
1016 * be more strict than ufi->entry->protection. "wired" means either
1017 * the entry is wired or we are fault-wiring the pg.
1018 */
1019
1020 flt->enter_prot = ufi->entry->protection;
1021 if (VM_MAPENT_ISWIRED(ufi->entry)) {
1022 flt->wire_mapping = true;
1023 flt->wire_paging = true;
1024 flt->narrow = true;
1025 }
1026
1027 if (flt->wire_mapping) {
1028 flt->access_type = flt->enter_prot; /* full access for wired */
1029 flt->cow_now = (check_prot & VM_PROT_WRITE) != 0;
1030 } else {
1031 flt->cow_now = (flt->access_type & VM_PROT_WRITE) != 0;
1032 }
1033
1034 if (flt->wire_paging) {
1035 /* wiring pages requires a write lock. */
1036 flt->upper_lock_type = RW_WRITER;
1037 flt->lower_lock_type = RW_WRITER;
1038 }
1039
1040 flt->promote = false;
1041
1042 /*
1043 * handle "needs_copy" case. if we need to copy the amap we will
1044 * have to drop our readlock and relock it with a write lock. (we
1045 * need a write lock to change anything in a map entry [e.g.
1046 * needs_copy]).
1047 */
1048
1049 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
1050 if (flt->cow_now || (ufi->entry->object.uvm_obj == NULL)) {
1051 KASSERT(!maxprot);
1052 /* need to clear */
1053 UVMHIST_LOG(maphist,
1054 " need to clear needs_copy and refault",0,0,0,0);
1055 uvmfault_unlockmaps(ufi, false);
1056 uvmfault_amapcopy(ufi);
1057 cpu_count(CPU_COUNT_FLTAMCOPY, 1);
1058 return ERESTART;
1059
1060 } else {
1061
1062 /*
1063 * ensure that we pmap_enter page R/O since
1064 * needs_copy is still true
1065 */
1066
1067 flt->enter_prot &= ~VM_PROT_WRITE;
1068 }
1069 }
1070
1071 /*
1072 * identify the players
1073 */
1074
1075 amap = ufi->entry->aref.ar_amap; /* upper layer */
1076 uobj = ufi->entry->object.uvm_obj; /* lower layer */
1077
1078 /*
1079 * check for a case 0 fault. if nothing backing the entry then
1080 * error now.
1081 */
1082
1083 if (amap == NULL && uobj == NULL) {
1084 uvmfault_unlockmaps(ufi, false);
1085 UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0);
1086 return EFAULT;
1087 }
1088
1089 /*
1090 * for a case 2B fault waste no time on adjacent pages because
1091 * they are likely already entered.
1092 */
1093
1094 if (uobj != NULL && amap != NULL &&
1095 (flt->access_type & VM_PROT_WRITE) != 0) {
1096 /* wide fault (!narrow) */
1097 flt->narrow = true;
1098 }
1099
1100 /*
1101 * establish range of interest based on advice from mapper
1102 * and then clip to fit map entry. note that we only want
1103 * to do this the first time through the fault. if we
1104 * ReFault we will disable this by setting "narrow" to true.
1105 */
1106
1107 if (flt->narrow == false) {
1108
1109 /* wide fault (!narrow) */
1110 KASSERT(uvmadvice[ufi->entry->advice].advice ==
1111 ufi->entry->advice);
1112 nback = MIN(uvmadvice[ufi->entry->advice].nback,
1113 (ufi->orig_rvaddr - ufi->entry->start) >> PAGE_SHIFT);
1114 flt->startva = ufi->orig_rvaddr - (nback << PAGE_SHIFT);
1115 /*
1116 * note: "-1" because we don't want to count the
1117 * faulting page as forw
1118 */
1119 nforw = MIN(uvmadvice[ufi->entry->advice].nforw,
1120 ((ufi->entry->end - ufi->orig_rvaddr) >>
1121 PAGE_SHIFT) - 1);
1122 flt->npages = nback + nforw + 1;
1123 flt->centeridx = nback;
1124
1125 flt->narrow = true; /* ensure only once per-fault */
1126
1127 } else {
1128
1129 /* narrow fault! */
1130 nback = nforw = 0;
1131 flt->startva = ufi->orig_rvaddr;
1132 flt->npages = 1;
1133 flt->centeridx = 0;
1134
1135 }
1136 /* offset from entry's start to pgs' start */
1137 const voff_t eoff = flt->startva - ufi->entry->start;
1138
1139 /* locked: maps(read) */
1140 UVMHIST_LOG(maphist, " narrow=%jd, back=%jd, forw=%jd, startva=%#jx",
1141 flt->narrow, nback, nforw, flt->startva);
1142 UVMHIST_LOG(maphist, " entry=%#jx, amap=%#jx, obj=%#jx",
1143 (uintptr_t)ufi->entry, (uintptr_t)amap, (uintptr_t)uobj, 0);
1144
1145 /*
1146 * guess at the most suitable lock types to acquire.
1147 * if we've got an amap then lock it and extract current anons.
1148 */
1149
1150 if (amap) {
1151 if ((amap_flags(amap) & AMAP_SHARED) == 0) {
1152 /*
1153 * the amap isn't shared. get a writer lock to
1154 * avoid the cost of upgrading the lock later if
1155 * needed.
1156 *
1157 * XXX nice for PostgreSQL, but consider threads.
1158 */
1159 flt->upper_lock_type = RW_WRITER;
1160 } else if ((flt->access_type & VM_PROT_WRITE) != 0) {
1161 /*
1162 * assume we're about to COW.
1163 */
1164 flt->upper_lock_type = RW_WRITER;
1165 }
1166 amap_lock(amap, flt->upper_lock_type);
1167 amap_lookups(&ufi->entry->aref, eoff, *ranons, flt->npages);
1168 } else {
1169 if ((flt->access_type & VM_PROT_WRITE) != 0) {
1170 /*
1171 * we are about to dirty the object and that
1172 * requires a write lock.
1173 */
1174 flt->lower_lock_type = RW_WRITER;
1175 }
1176 *ranons = NULL; /* to be safe */
1177 }
1178
1179 /* locked: maps(read), amap(if there) */
1180 KASSERT(amap == NULL ||
1181 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1182
1183 /*
1184 * for MADV_SEQUENTIAL mappings we want to deactivate the back pages
1185 * now and then forget about them (for the rest of the fault).
1186 */
1187
1188 if (ufi->entry->advice == MADV_SEQUENTIAL && nback != 0) {
1189
1190 UVMHIST_LOG(maphist, " MADV_SEQUENTIAL: flushing backpages",
1191 0,0,0,0);
1192 /* flush back-page anons? */
1193 if (amap)
1194 uvmfault_anonflush(*ranons, nback);
1195
1196 /*
1197 * flush object? change lock type to RW_WRITER, to avoid
1198 * excessive competition between read/write locks if many
1199 * threads doing "sequential access".
1200 */
1201 if (uobj) {
1202 voff_t uoff;
1203
1204 flt->lower_lock_type = RW_WRITER;
1205 uoff = ufi->entry->offset + eoff;
1206 rw_enter(uobj->vmobjlock, RW_WRITER);
1207 (void) (uobj->pgops->pgo_put)(uobj, uoff, uoff +
1208 (nback << PAGE_SHIFT), PGO_DEACTIVATE);
1209 }
1210
1211 /* now forget about the backpages */
1212 if (amap)
1213 *ranons += nback;
1214 flt->startva += (nback << PAGE_SHIFT);
1215 flt->npages -= nback;
1216 flt->centeridx = 0;
1217 }
1218 /*
1219 * => startva is fixed
1220 * => npages is fixed
1221 */
1222 KASSERT(flt->startva <= ufi->orig_rvaddr);
1223 KASSERT(ufi->orig_rvaddr + ufi->orig_size <=
1224 flt->startva + (flt->npages << PAGE_SHIFT));
1225 return 0;
1226 }
1227
1228 /*
1229 * uvm_fault_upper_upgrade: upgrade upper lock, reader -> writer
1230 */
1231
1232 static inline int
uvm_fault_upper_upgrade(struct uvm_faultinfo * ufi,struct uvm_faultctx * flt,struct vm_amap * amap,struct uvm_object * uobj)1233 uvm_fault_upper_upgrade(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1234 struct vm_amap *amap, struct uvm_object *uobj)
1235 {
1236 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
1237
1238 KASSERT(amap != NULL);
1239 KASSERT(flt->upper_lock_type == rw_lock_op(amap->am_lock));
1240
1241 /*
1242 * fast path.
1243 */
1244
1245 if (__predict_true(flt->upper_lock_type == RW_WRITER)) {
1246 return 0;
1247 }
1248
1249 /*
1250 * otherwise try for the upgrade. if we don't get it, unlock
1251 * everything, restart the fault and next time around get a writer
1252 * lock.
1253 */
1254
1255 flt->upper_lock_type = RW_WRITER;
1256 if (__predict_false(!rw_tryupgrade(amap->am_lock))) {
1257 uvmfault_unlockall(ufi, amap, uobj);
1258 cpu_count(CPU_COUNT_FLTNOUP, 1);
1259 UVMHIST_LOG(maphist, " !upgrade upper", 0, 0,0,0);
1260 return ERESTART;
1261 }
1262 cpu_count(CPU_COUNT_FLTUP, 1);
1263 KASSERT(flt->upper_lock_type == rw_lock_op(amap->am_lock));
1264 return 0;
1265 }
1266
1267 /*
1268 * uvm_fault_upper_lookup: look up existing h/w mapping and amap.
1269 *
1270 * iterate range of interest:
1271 * 1. check if h/w mapping exists. if yes, we don't care
1272 * 2. check if anon exists. if not, page is lower.
1273 * 3. if anon exists, enter h/w mapping for neighbors.
1274 *
1275 * => called with amap locked (if exists).
1276 */
1277
1278 static int
uvm_fault_upper_lookup(struct uvm_faultinfo * ufi,const struct uvm_faultctx * flt,struct vm_anon ** anons,struct vm_page ** pages)1279 uvm_fault_upper_lookup(
1280 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
1281 struct vm_anon **anons, struct vm_page **pages)
1282 {
1283 struct vm_amap *amap = ufi->entry->aref.ar_amap;
1284 int lcv;
1285 vaddr_t currva;
1286 bool shadowed __unused;
1287 bool entered;
1288 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
1289
1290 /* locked: maps(read), amap(if there) */
1291 KASSERT(amap == NULL ||
1292 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1293
1294 /*
1295 * map in the backpages and frontpages we found in the amap in hopes
1296 * of preventing future faults. we also init the pages[] array as
1297 * we go.
1298 */
1299
1300 currva = flt->startva;
1301 shadowed = false;
1302 entered = false;
1303 for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
1304 /*
1305 * unmapped or center page. check if any anon at this level.
1306 */
1307 if (amap == NULL || anons[lcv] == NULL) {
1308 pages[lcv] = NULL;
1309 continue;
1310 }
1311
1312 /*
1313 * check for present page and map if possible.
1314 */
1315
1316 pages[lcv] = PGO_DONTCARE;
1317 if (lcv == flt->centeridx) { /* save center for later! */
1318 shadowed = true;
1319 continue;
1320 }
1321
1322 struct vm_anon *anon = anons[lcv];
1323 struct vm_page *pg = anon->an_page;
1324
1325 KASSERT(anon->an_lock == amap->am_lock);
1326
1327 /*
1328 * ignore loaned and busy pages.
1329 * don't play with VAs that are already mapped.
1330 */
1331
1332 if (pg && pg->loan_count == 0 && (pg->flags & PG_BUSY) == 0 &&
1333 !pmap_extract(ufi->orig_map->pmap, currva, NULL)) {
1334 uvm_fault_upper_neighbor(ufi, flt, currva,
1335 pg, anon->an_ref > 1);
1336 entered = true;
1337 }
1338 }
1339 if (entered) {
1340 pmap_update(ufi->orig_map->pmap);
1341 }
1342
1343 /* locked: maps(read), amap(if there) */
1344 KASSERT(amap == NULL ||
1345 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1346 /* (shadowed == true) if there is an anon at the faulting address */
1347 UVMHIST_LOG(maphist, " shadowed=%jd, will_get=%jd", shadowed,
1348 (ufi->entry->object.uvm_obj && shadowed != false),0,0);
1349
1350 return 0;
1351 }
1352
1353 /*
1354 * uvm_fault_upper_neighbor: enter single upper neighbor page.
1355 *
1356 * => called with amap and anon locked.
1357 */
1358
1359 static void
uvm_fault_upper_neighbor(struct uvm_faultinfo * ufi,const struct uvm_faultctx * flt,vaddr_t currva,struct vm_page * pg,bool readonly)1360 uvm_fault_upper_neighbor(
1361 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
1362 vaddr_t currva, struct vm_page *pg, bool readonly)
1363 {
1364 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
1365
1366 /* locked: amap, anon */
1367
1368 KASSERT(pg->uobject == NULL);
1369 KASSERT(pg->uanon != NULL);
1370 KASSERT(rw_lock_op(pg->uanon->an_lock) == flt->upper_lock_type);
1371 KASSERT(uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN);
1372
1373 /*
1374 * there wasn't a direct fault on the page, so avoid the cost of
1375 * activating it.
1376 */
1377
1378 if (!uvmpdpol_pageisqueued_p(pg) && pg->wire_count == 0) {
1379 uvm_pagelock(pg);
1380 uvm_pageenqueue(pg);
1381 uvm_pageunlock(pg);
1382 }
1383
1384 UVMHIST_LOG(maphist,
1385 " MAPPING: n anon: pm=%#jx, va=%#jx, pg=%#jx",
1386 (uintptr_t)ufi->orig_map->pmap, currva, (uintptr_t)pg, 0);
1387 cpu_count(CPU_COUNT_FLTNAMAP, 1);
1388
1389 /*
1390 * Since this page isn't the page that's actually faulting,
1391 * ignore pmap_enter() failures; it's not critical that we
1392 * enter these right now.
1393 */
1394
1395 (void) pmap_enter(ufi->orig_map->pmap, currva,
1396 VM_PAGE_TO_PHYS(pg),
1397 readonly ? (flt->enter_prot & ~VM_PROT_WRITE) :
1398 flt->enter_prot,
1399 PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0));
1400 }
1401
1402 /*
1403 * uvm_fault_upper: handle upper fault.
1404 *
1405 * 1. acquire anon lock.
1406 * 2. get anon. let uvmfault_anonget do the dirty work.
1407 * 3. handle loan.
1408 * 4. dispatch direct or promote handlers.
1409 */
1410
1411 static int
uvm_fault_upper(struct uvm_faultinfo * ufi,struct uvm_faultctx * flt,struct vm_anon ** anons)1412 uvm_fault_upper(
1413 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1414 struct vm_anon **anons)
1415 {
1416 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1417 struct vm_anon * const anon = anons[flt->centeridx];
1418 struct uvm_object *uobj;
1419 int error;
1420 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
1421
1422 /* locked: maps(read), amap, anon */
1423 KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1424 KASSERT(anon->an_lock == amap->am_lock);
1425
1426 /*
1427 * handle case 1: fault on an anon in our amap
1428 */
1429
1430 UVMHIST_LOG(maphist, " case 1 fault: anon=%#jx",
1431 (uintptr_t)anon, 0, 0, 0);
1432
1433 /*
1434 * no matter if we have case 1A or case 1B we are going to need to
1435 * have the anon's memory resident. ensure that now.
1436 */
1437
1438 /*
1439 * let uvmfault_anonget do the dirty work.
1440 * if it fails (!OK) it will unlock everything for us.
1441 * if it succeeds, locks are still valid and locked.
1442 * also, if it is OK, then the anon's page is on the queues.
1443 * if the page is on loan from a uvm_object, then anonget will
1444 * lock that object for us if it does not fail.
1445 */
1446 retry:
1447 error = uvmfault_anonget(ufi, amap, anon);
1448 switch (error) {
1449 case 0:
1450 break;
1451
1452 case ERESTART:
1453 return ERESTART;
1454
1455 case EAGAIN:
1456 kpause("fltagain1", false, hz/2, NULL);
1457 return ERESTART;
1458
1459 case ENOLCK:
1460 /* it needs a write lock: retry */
1461 error = uvm_fault_upper_upgrade(ufi, flt, amap, NULL);
1462 if (error != 0) {
1463 return error;
1464 }
1465 KASSERT(rw_write_held(amap->am_lock));
1466 goto retry;
1467
1468 default:
1469 return error;
1470 }
1471
1472 /*
1473 * uobj is non null if the page is on loan from an object (i.e. uobj)
1474 */
1475
1476 uobj = anon->an_page->uobject; /* locked by anonget if !NULL */
1477
1478 /* locked: maps(read), amap, anon, uobj(if one) */
1479 KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1480 KASSERT(anon->an_lock == amap->am_lock);
1481 KASSERT(uobj == NULL ||
1482 rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
1483
1484 /*
1485 * special handling for loaned pages
1486 */
1487
1488 if (anon->an_page->loan_count) {
1489 error = uvm_fault_upper_loan(ufi, flt, anon, &uobj);
1490 if (error != 0)
1491 return error;
1492 }
1493
1494 /*
1495 * if we are case 1B then we will need to allocate a new blank
1496 * anon to transfer the data into. note that we have a lock
1497 * on anon, so no one can busy or release the page until we are done.
1498 * also note that the ref count can't drop to zero here because
1499 * it is > 1 and we are only dropping one ref.
1500 *
1501 * in the (hopefully very rare) case that we are out of RAM we
1502 * will unlock, wait for more RAM, and refault.
1503 *
1504 * if we are out of anon VM we kill the process (XXX: could wait?).
1505 */
1506
1507 if (flt->cow_now && anon->an_ref > 1) {
1508 flt->promote = true;
1509 error = uvm_fault_upper_promote(ufi, flt, uobj, anon);
1510 } else {
1511 error = uvm_fault_upper_direct(ufi, flt, uobj, anon);
1512 }
1513 return error;
1514 }
1515
1516 /*
1517 * uvm_fault_upper_loan: handle loaned upper page.
1518 *
1519 * 1. if not cow'ing now, simply adjust flt->enter_prot.
1520 * 2. if cow'ing now, and if ref count is 1, break loan.
1521 */
1522
1523 static int
uvm_fault_upper_loan(struct uvm_faultinfo * ufi,struct uvm_faultctx * flt,struct vm_anon * anon,struct uvm_object ** ruobj)1524 uvm_fault_upper_loan(
1525 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1526 struct vm_anon *anon, struct uvm_object **ruobj)
1527 {
1528 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1529 int error = 0;
1530 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
1531
1532 if (!flt->cow_now) {
1533
1534 /*
1535 * for read faults on loaned pages we just cap the
1536 * protection at read-only.
1537 */
1538
1539 flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
1540
1541 } else {
1542 /*
1543 * note that we can't allow writes into a loaned page!
1544 *
1545 * if we have a write fault on a loaned page in an
1546 * anon then we need to look at the anon's ref count.
1547 * if it is greater than one then we are going to do
1548 * a normal copy-on-write fault into a new anon (this
1549 * is not a problem). however, if the reference count
1550 * is one (a case where we would normally allow a
1551 * write directly to the page) then we need to kill
1552 * the loan before we continue.
1553 */
1554
1555 /* >1 case is already ok */
1556 if (anon->an_ref == 1) {
1557 /* breaking loan requires a write lock. */
1558 error = uvm_fault_upper_upgrade(ufi, flt, amap, NULL);
1559 if (error != 0) {
1560 return error;
1561 }
1562 KASSERT(rw_write_held(amap->am_lock));
1563
1564 error = uvm_loanbreak_anon(anon, *ruobj);
1565 if (error != 0) {
1566 uvmfault_unlockall(ufi, amap, *ruobj);
1567 uvm_wait("flt_noram2");
1568 return ERESTART;
1569 }
1570 /* if we were a loan receiver uobj is gone */
1571 if (*ruobj)
1572 *ruobj = NULL;
1573 }
1574 }
1575 return error;
1576 }
1577
1578 /*
1579 * uvm_fault_upper_promote: promote upper page.
1580 *
1581 * 1. call uvmfault_promote.
1582 * 2. enqueue page.
1583 * 3. deref.
1584 * 4. pass page to uvm_fault_upper_enter.
1585 */
1586
1587 static int
uvm_fault_upper_promote(struct uvm_faultinfo * ufi,struct uvm_faultctx * flt,struct uvm_object * uobj,struct vm_anon * anon)1588 uvm_fault_upper_promote(
1589 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1590 struct uvm_object *uobj, struct vm_anon *anon)
1591 {
1592 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1593 struct vm_anon * const oanon = anon;
1594 struct vm_page *pg;
1595 int error;
1596 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
1597
1598 UVMHIST_LOG(maphist, " case 1B: COW fault",0,0,0,0);
1599 cpu_count(CPU_COUNT_FLT_ACOW, 1);
1600
1601 /* promoting requires a write lock. */
1602 error = uvm_fault_upper_upgrade(ufi, flt, amap, NULL);
1603 if (error != 0) {
1604 return error;
1605 }
1606 KASSERT(rw_write_held(amap->am_lock));
1607
1608 error = uvmfault_promote(ufi, oanon, PGO_DONTCARE, &anon,
1609 &flt->anon_spare);
1610 switch (error) {
1611 case 0:
1612 break;
1613 case ERESTART:
1614 return ERESTART;
1615 default:
1616 return error;
1617 }
1618 pg = anon->an_page;
1619
1620 KASSERT(anon->an_lock == oanon->an_lock);
1621 KASSERT((pg->flags & (PG_BUSY | PG_FAKE)) == 0);
1622
1623 /* deref: can not drop to zero here by defn! */
1624 KASSERT(oanon->an_ref > 1);
1625 oanon->an_ref--;
1626
1627 /*
1628 * note: oanon is still locked, as is the new anon. we
1629 * need to check for this later when we unlock oanon; if
1630 * oanon != anon, we'll have to unlock anon, too.
1631 */
1632
1633 return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
1634 }
1635
1636 /*
1637 * uvm_fault_upper_direct: handle direct fault.
1638 */
1639
1640 static int
uvm_fault_upper_direct(struct uvm_faultinfo * ufi,struct uvm_faultctx * flt,struct uvm_object * uobj,struct vm_anon * anon)1641 uvm_fault_upper_direct(
1642 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1643 struct uvm_object *uobj, struct vm_anon *anon)
1644 {
1645 struct vm_anon * const oanon = anon;
1646 struct vm_page *pg;
1647 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
1648
1649 cpu_count(CPU_COUNT_FLT_ANON, 1);
1650 pg = anon->an_page;
1651 if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */
1652 flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
1653
1654 return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
1655 }
1656
1657 /*
1658 * uvm_fault_upper_enter: enter h/w mapping of upper page.
1659 */
1660
1661 static int
uvm_fault_upper_enter(struct uvm_faultinfo * ufi,const struct uvm_faultctx * flt,struct uvm_object * uobj,struct vm_anon * anon,struct vm_page * pg,struct vm_anon * oanon)1662 uvm_fault_upper_enter(
1663 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
1664 struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg,
1665 struct vm_anon *oanon)
1666 {
1667 struct pmap *pmap = ufi->orig_map->pmap;
1668 vaddr_t va = ufi->orig_rvaddr;
1669 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1670 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
1671
1672 /* locked: maps(read), amap, oanon, anon(if different from oanon) */
1673 KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1674 KASSERT(anon->an_lock == amap->am_lock);
1675 KASSERT(oanon->an_lock == amap->am_lock);
1676 KASSERT(uobj == NULL ||
1677 rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
1678 KASSERT(uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN);
1679
1680 /*
1681 * now map the page in.
1682 */
1683
1684 UVMHIST_LOG(maphist,
1685 " MAPPING: anon: pm=%#jx, va=%#jx, pg=%#jx, promote=%jd",
1686 (uintptr_t)pmap, va, (uintptr_t)pg, flt->promote);
1687 if (pmap_enter(pmap, va, VM_PAGE_TO_PHYS(pg),
1688 flt->enter_prot, flt->access_type | PMAP_CANFAIL |
1689 (flt->wire_mapping ? PMAP_WIRED : 0)) != 0) {
1690
1691 /*
1692 * If pmap_enter() fails, it must not leave behind an existing
1693 * pmap entry. In particular, a now-stale entry for a different
1694 * page would leave the pmap inconsistent with the vm_map.
1695 * This is not to imply that pmap_enter() should remove an
1696 * existing mapping in such a situation (since that could create
1697 * different problems, eg. if the existing mapping is wired),
1698 * but rather that the pmap should be designed such that it
1699 * never needs to fail when the new mapping is replacing an
1700 * existing mapping and the new page has no existing mappings.
1701 *
1702 * XXX This can't be asserted safely any more because many
1703 * LWPs and/or many processes could simultaneously fault on
1704 * the same VA and some might succeed.
1705 */
1706
1707 /* KASSERT(!pmap_extract(pmap, va, NULL)); */
1708
1709 /*
1710 * ensure that the page is queued in the case that
1711 * we just promoted.
1712 */
1713
1714 uvm_pagelock(pg);
1715 uvm_pageenqueue(pg);
1716 uvm_pageunlock(pg);
1717
1718 /*
1719 * No need to undo what we did; we can simply think of
1720 * this as the pmap throwing away the mapping information.
1721 *
1722 * We do, however, have to go through the ReFault path,
1723 * as the map may change while we're asleep.
1724 */
1725
1726 uvmfault_unlockall(ufi, amap, uobj);
1727 if (!uvm_reclaimable()) {
1728 UVMHIST_LOG(maphist,
1729 "<- failed. out of VM",0,0,0,0);
1730 /* XXX instrumentation */
1731 return ENOMEM;
1732 }
1733 /* XXX instrumentation */
1734 uvm_wait("flt_pmfail1");
1735 return ERESTART;
1736 }
1737
1738 uvm_fault_upper_done(ufi, flt, anon, pg);
1739
1740 /*
1741 * done case 1! finish up by unlocking everything and returning success
1742 */
1743
1744 pmap_update(pmap);
1745 uvmfault_unlockall(ufi, amap, uobj);
1746 return 0;
1747 }
1748
1749 /*
1750 * uvm_fault_upper_done: queue upper center page.
1751 */
1752
1753 static void
uvm_fault_upper_done(struct uvm_faultinfo * ufi,const struct uvm_faultctx * flt,struct vm_anon * anon,struct vm_page * pg)1754 uvm_fault_upper_done(
1755 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
1756 struct vm_anon *anon, struct vm_page *pg)
1757 {
1758 const bool wire_paging = flt->wire_paging;
1759
1760 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
1761
1762 /*
1763 * ... update the page queues.
1764 */
1765
1766 if (wire_paging) {
1767 uvm_pagelock(pg);
1768 uvm_pagewire(pg);
1769 uvm_pageunlock(pg);
1770
1771 /*
1772 * since the now-wired page cannot be paged out,
1773 * release its swap resources for others to use.
1774 * and since an anon with no swap cannot be clean,
1775 * mark it dirty now.
1776 */
1777
1778 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
1779 uvm_anon_dropswap(anon);
1780 } else if (uvmpdpol_pageactivate_p(pg)) {
1781 /*
1782 * avoid re-activating the page unless needed,
1783 * to avoid false sharing on multiprocessor.
1784 */
1785
1786 uvm_pagelock(pg);
1787 uvm_pageactivate(pg);
1788 uvm_pageunlock(pg);
1789 }
1790 }
1791
1792 /*
1793 * uvm_fault_lower_upgrade: upgrade lower lock, reader -> writer
1794 */
1795
1796 static inline int
uvm_fault_lower_upgrade(struct uvm_faultinfo * ufi,struct uvm_faultctx * flt,struct vm_amap * amap,struct uvm_object * uobj,struct vm_page * uobjpage)1797 uvm_fault_lower_upgrade(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1798 struct vm_amap *amap, struct uvm_object *uobj, struct vm_page *uobjpage)
1799 {
1800
1801 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
1802
1803 KASSERT(uobj != NULL);
1804 KASSERT(flt->lower_lock_type == rw_lock_op(uobj->vmobjlock));
1805
1806 /*
1807 * fast path.
1808 */
1809
1810 if (__predict_true(flt->lower_lock_type == RW_WRITER)) {
1811 return 0;
1812 }
1813
1814 /*
1815 * otherwise try for the upgrade. if we don't get it, unlock
1816 * everything, restart the fault and next time around get a writer
1817 * lock.
1818 */
1819
1820 flt->lower_lock_type = RW_WRITER;
1821 if (__predict_false(!rw_tryupgrade(uobj->vmobjlock))) {
1822 uvmfault_unlockall(ufi, amap, uobj);
1823 cpu_count(CPU_COUNT_FLTNOUP, 1);
1824 UVMHIST_LOG(maphist, " !upgrade lower", 0, 0,0,0);
1825 return ERESTART;
1826 }
1827 cpu_count(CPU_COUNT_FLTUP, 1);
1828 KASSERT(flt->lower_lock_type == rw_lock_op(uobj->vmobjlock));
1829 return 0;
1830 }
1831
1832 /*
1833 * uvm_fault_lower: handle lower fault.
1834 *
1835 * 1. check uobj
1836 * 1.1. if null, ZFOD.
1837 * 1.2. if not null, look up unnmapped neighbor pages.
1838 * 2. for center page, check if promote.
1839 * 2.1. ZFOD always needs promotion.
1840 * 2.2. other uobjs, when entry is marked COW (usually MAP_PRIVATE vnode).
1841 * 3. if uobj is not ZFOD and page is not found, do i/o.
1842 * 4. dispatch either direct / promote fault.
1843 */
1844
1845 static int
uvm_fault_lower(struct uvm_faultinfo * ufi,struct uvm_faultctx * flt,struct vm_page ** pages)1846 uvm_fault_lower(
1847 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1848 struct vm_page **pages)
1849 {
1850 struct vm_amap *amap __diagused = ufi->entry->aref.ar_amap;
1851 struct uvm_object *uobj = ufi->entry->object.uvm_obj;
1852 struct vm_page *uobjpage;
1853 int error;
1854 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
1855
1856 /*
1857 * now, if the desired page is not shadowed by the amap and we have
1858 * a backing object that does not have a special fault routine, then
1859 * we ask (with pgo_get) the object for resident pages that we care
1860 * about and attempt to map them in. we do not let pgo_get block
1861 * (PGO_LOCKED).
1862 */
1863
1864 if (uobj == NULL) {
1865 /* zero fill; don't care neighbor pages */
1866 uobjpage = NULL;
1867 } else {
1868 uvm_fault_lower_lookup(ufi, flt, pages);
1869 uobjpage = pages[flt->centeridx];
1870 }
1871
1872 /*
1873 * note that at this point we are done with any front or back pages.
1874 * we are now going to focus on the center page (i.e. the one we've
1875 * faulted on). if we have faulted on the upper (anon) layer
1876 * [i.e. case 1], then the anon we want is anons[centeridx] (we have
1877 * not touched it yet). if we have faulted on the bottom (uobj)
1878 * layer [i.e. case 2] and the page was both present and available,
1879 * then we've got a pointer to it as "uobjpage" and we've already
1880 * made it BUSY.
1881 */
1882
1883 /*
1884 * locked:
1885 * maps(read), amap(if there), uobj(if !null), uobjpage(if !null)
1886 */
1887 KASSERT(amap == NULL ||
1888 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1889 KASSERT(uobj == NULL ||
1890 rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
1891
1892 /*
1893 * note that uobjpage can not be PGO_DONTCARE at this point. we now
1894 * set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we
1895 * have a backing object, check and see if we are going to promote
1896 * the data up to an anon during the fault.
1897 */
1898
1899 if (uobj == NULL) {
1900 uobjpage = PGO_DONTCARE;
1901 flt->promote = true; /* always need anon here */
1902 } else {
1903 KASSERT(uobjpage != PGO_DONTCARE);
1904 flt->promote = flt->cow_now && UVM_ET_ISCOPYONWRITE(ufi->entry);
1905 }
1906 UVMHIST_LOG(maphist, " case 2 fault: promote=%jd, zfill=%jd",
1907 flt->promote, (uobj == NULL), 0,0);
1908
1909 /*
1910 * if uobjpage is not null then we do not need to do I/O to get the
1911 * uobjpage.
1912 *
1913 * if uobjpage is null, then we need to unlock and ask the pager to
1914 * get the data for us. once we have the data, we need to reverify
1915 * the state the world. we are currently not holding any resources.
1916 */
1917
1918 if (uobjpage) {
1919 /* update rusage counters */
1920 curlwp->l_ru.ru_minflt++;
1921 } else {
1922 error = uvm_fault_lower_io(ufi, flt, &uobj, &uobjpage);
1923 if (error != 0)
1924 return error;
1925 }
1926
1927 /*
1928 * locked:
1929 * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj)
1930 */
1931 KASSERT(amap == NULL ||
1932 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1933 KASSERT(uobj == NULL ||
1934 rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
1935
1936 /*
1937 * notes:
1938 * - at this point uobjpage can not be NULL
1939 * - at this point uobjpage can not be PG_RELEASED (since we checked
1940 * for it above)
1941 * - at this point uobjpage could be waited on (handle later)
1942 * - uobjpage can be from a different object if tmpfs (vnode vs UAO)
1943 */
1944
1945 KASSERT(uobjpage != NULL);
1946 KASSERT(uobj == NULL ||
1947 uobjpage->uobject->vmobjlock == uobj->vmobjlock);
1948 KASSERT(uobj == NULL || !UVM_OBJ_IS_CLEAN(uobjpage->uobject) ||
1949 uvm_pagegetdirty(uobjpage) == UVM_PAGE_STATUS_CLEAN);
1950
1951 if (!flt->promote) {
1952 error = uvm_fault_lower_direct(ufi, flt, uobj, uobjpage);
1953 } else {
1954 error = uvm_fault_lower_promote(ufi, flt, uobj, uobjpage);
1955 }
1956 return error;
1957 }
1958
1959 /*
1960 * uvm_fault_lower_lookup: look up on-memory uobj pages.
1961 *
1962 * 1. get on-memory pages.
1963 * 2. if failed, give up (get only center page later).
1964 * 3. if succeeded, enter h/w mapping of neighbor pages.
1965 */
1966
1967 static void
uvm_fault_lower_lookup(struct uvm_faultinfo * ufi,const struct uvm_faultctx * flt,struct vm_page ** pages)1968 uvm_fault_lower_lookup(
1969 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
1970 struct vm_page **pages)
1971 {
1972 struct uvm_object *uobj = ufi->entry->object.uvm_obj;
1973 int lcv, gotpages;
1974 vaddr_t currva;
1975 bool entered;
1976 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
1977
1978 rw_enter(uobj->vmobjlock, flt->lower_lock_type);
1979
1980 /*
1981 * Locked: maps(read), amap(if there), uobj
1982 */
1983
1984 cpu_count(CPU_COUNT_FLTLGET, 1);
1985 gotpages = flt->npages;
1986 (void) uobj->pgops->pgo_get(uobj,
1987 ufi->entry->offset + flt->startva - ufi->entry->start,
1988 pages, &gotpages, flt->centeridx,
1989 flt->access_type & MASK(ufi->entry), ufi->entry->advice,
1990 PGO_LOCKED);
1991
1992 KASSERT(rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
1993
1994 /*
1995 * check for pages to map, if we got any
1996 */
1997
1998 if (gotpages == 0) {
1999 pages[flt->centeridx] = NULL;
2000 return;
2001 }
2002
2003 entered = false;
2004 currva = flt->startva;
2005 for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
2006 struct vm_page *curpg;
2007
2008 curpg = pages[lcv];
2009 if (curpg == NULL || curpg == PGO_DONTCARE) {
2010 continue;
2011 }
2012
2013 /*
2014 * in the case of tmpfs, the pages might be from a different
2015 * uvm_object. just make sure that they have the same lock.
2016 */
2017
2018 KASSERT(curpg->uobject->vmobjlock == uobj->vmobjlock);
2019 KASSERT((curpg->flags & PG_BUSY) == 0);
2020
2021 /*
2022 * leave the centre page for later. don't screw with
2023 * existing mappings (needless & expensive).
2024 */
2025
2026 if (lcv == flt->centeridx) {
2027 UVMHIST_LOG(maphist, " got uobjpage (%#jx) "
2028 "with locked get", (uintptr_t)curpg, 0, 0, 0);
2029 } else if (!pmap_extract(ufi->orig_map->pmap, currva, NULL)) {
2030 uvm_fault_lower_neighbor(ufi, flt, currva, curpg);
2031 entered = true;
2032 }
2033 }
2034 if (entered) {
2035 pmap_update(ufi->orig_map->pmap);
2036 }
2037 }
2038
2039 /*
2040 * uvm_fault_lower_neighbor: enter h/w mapping of lower neighbor page.
2041 */
2042
2043 static void
uvm_fault_lower_neighbor(struct uvm_faultinfo * ufi,const struct uvm_faultctx * flt,vaddr_t currva,struct vm_page * pg)2044 uvm_fault_lower_neighbor(
2045 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
2046 vaddr_t currva, struct vm_page *pg)
2047 {
2048 const bool readonly = uvm_pagereadonly_p(pg) || pg->loan_count > 0;
2049 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
2050
2051 /* locked: maps(read), amap(if there), uobj */
2052
2053 /*
2054 * calling pgo_get with PGO_LOCKED returns us pages which
2055 * are neither busy nor released, so we don't need to check
2056 * for this. we can just directly enter the pages.
2057 *
2058 * there wasn't a direct fault on the page, so avoid the cost of
2059 * activating it.
2060 */
2061
2062 if (!uvmpdpol_pageisqueued_p(pg) && pg->wire_count == 0) {
2063 uvm_pagelock(pg);
2064 uvm_pageenqueue(pg);
2065 uvm_pageunlock(pg);
2066 }
2067
2068 UVMHIST_LOG(maphist,
2069 " MAPPING: n obj: pm=%#jx, va=%#jx, pg=%#jx",
2070 (uintptr_t)ufi->orig_map->pmap, currva, (uintptr_t)pg, 0);
2071 cpu_count(CPU_COUNT_FLTNOMAP, 1);
2072
2073 /*
2074 * Since this page isn't the page that's actually faulting,
2075 * ignore pmap_enter() failures; it's not critical that we
2076 * enter these right now.
2077 * NOTE: page can't be waited on or PG_RELEASED because we've
2078 * held the lock the whole time we've had the handle.
2079 */
2080 KASSERT((pg->flags & PG_PAGEOUT) == 0);
2081 KASSERT((pg->flags & PG_RELEASED) == 0);
2082 KASSERT(!UVM_OBJ_IS_CLEAN(pg->uobject) ||
2083 uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN);
2084 KASSERT((pg->flags & PG_BUSY) == 0);
2085 KASSERT(rw_lock_op(pg->uobject->vmobjlock) == flt->lower_lock_type);
2086
2087 const vm_prot_t mapprot =
2088 readonly ? (flt->enter_prot & ~VM_PROT_WRITE) :
2089 flt->enter_prot & MASK(ufi->entry);
2090 const u_int mapflags =
2091 PMAP_CANFAIL | (flt->wire_mapping ? (mapprot | PMAP_WIRED) : 0);
2092 (void) pmap_enter(ufi->orig_map->pmap, currva,
2093 VM_PAGE_TO_PHYS(pg), mapprot, mapflags);
2094 }
2095
2096 /*
2097 * uvm_fault_lower_io: get lower page from backing store.
2098 *
2099 * 1. unlock everything, because i/o will block.
2100 * 2. call pgo_get.
2101 * 3. if failed, recover.
2102 * 4. if succeeded, relock everything and verify things.
2103 */
2104
2105 static int
uvm_fault_lower_io(struct uvm_faultinfo * ufi,struct uvm_faultctx * flt,struct uvm_object ** ruobj,struct vm_page ** ruobjpage)2106 uvm_fault_lower_io(
2107 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
2108 struct uvm_object **ruobj, struct vm_page **ruobjpage)
2109 {
2110 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
2111 struct uvm_object *uobj = *ruobj;
2112 struct vm_page *pg;
2113 bool locked;
2114 int gotpages;
2115 int error;
2116 voff_t uoff;
2117 vm_prot_t access_type;
2118 int advice;
2119 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
2120
2121 /* update rusage counters */
2122 curlwp->l_ru.ru_majflt++;
2123
2124 /* grab everything we need from the entry before we unlock */
2125 uoff = (ufi->orig_rvaddr - ufi->entry->start) + ufi->entry->offset;
2126 access_type = flt->access_type & MASK(ufi->entry);
2127 advice = ufi->entry->advice;
2128
2129 /* Locked: maps(read), amap(if there), uobj */
2130 KASSERT(rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
2131
2132 /* Upgrade to a write lock if needed. */
2133 error = uvm_fault_lower_upgrade(ufi, flt, amap, uobj, NULL);
2134 if (error != 0) {
2135 return error;
2136 }
2137 uvmfault_unlockall(ufi, amap, NULL);
2138
2139 /* Locked: uobj(write) */
2140 KASSERT(rw_write_held(uobj->vmobjlock));
2141
2142 cpu_count(CPU_COUNT_FLTGET, 1);
2143 gotpages = 1;
2144 pg = NULL;
2145 error = uobj->pgops->pgo_get(uobj, uoff, &pg, &gotpages,
2146 0, access_type, advice, PGO_SYNCIO);
2147 /* locked: pg(if no error) */
2148
2149 /*
2150 * recover from I/O
2151 */
2152
2153 if (error) {
2154 if (error == EAGAIN) {
2155 UVMHIST_LOG(maphist,
2156 " pgo_get says TRY AGAIN!",0,0,0,0);
2157 kpause("fltagain2", false, hz/2, NULL);
2158 return ERESTART;
2159 }
2160
2161 #if 0
2162 KASSERT(error != ERESTART);
2163 #else
2164 /* XXXUEBS don't re-fault? */
2165 if (error == ERESTART)
2166 error = EIO;
2167 #endif
2168
2169 UVMHIST_LOG(maphist, "<- pgo_get failed (code %jd)",
2170 error, 0,0,0);
2171 return error;
2172 }
2173
2174 /*
2175 * re-verify the state of the world by first trying to relock
2176 * the maps. always relock the object.
2177 */
2178
2179 locked = uvmfault_relock(ufi);
2180 if (locked && amap)
2181 amap_lock(amap, flt->upper_lock_type);
2182
2183 /* might be changed */
2184 uobj = pg->uobject;
2185
2186 rw_enter(uobj->vmobjlock, flt->lower_lock_type);
2187 KASSERT((pg->flags & PG_BUSY) != 0);
2188 KASSERT(flt->lower_lock_type == RW_WRITER);
2189
2190 uvm_pagelock(pg);
2191 uvm_pageactivate(pg);
2192 uvm_pageunlock(pg);
2193
2194 /* locked(locked): maps(read), amap(if !null), uobj, pg */
2195 /* locked(!locked): uobj, pg */
2196
2197 /*
2198 * verify that the page has not be released and re-verify
2199 * that amap slot is still free. if there is a problem,
2200 * we unlock and clean up.
2201 */
2202
2203 if ((pg->flags & PG_RELEASED) != 0 ||
2204 (locked && amap && amap_lookup(&ufi->entry->aref,
2205 ufi->orig_rvaddr - ufi->entry->start))) {
2206 if (locked)
2207 uvmfault_unlockall(ufi, amap, NULL);
2208 locked = false;
2209 }
2210
2211 /*
2212 * unbusy/release the page.
2213 */
2214
2215 if ((pg->flags & PG_RELEASED) == 0) {
2216 pg->flags &= ~PG_BUSY;
2217 uvm_pagelock(pg);
2218 uvm_pagewakeup(pg);
2219 uvm_pageunlock(pg);
2220 UVM_PAGE_OWN(pg, NULL);
2221 } else {
2222 cpu_count(CPU_COUNT_FLTPGRELE, 1);
2223 uvm_pagefree(pg);
2224 }
2225
2226 /*
2227 * didn't get the lock? retry.
2228 */
2229
2230 if (locked == false) {
2231 UVMHIST_LOG(maphist,
2232 " wasn't able to relock after fault: retry",
2233 0,0,0,0);
2234 rw_exit(uobj->vmobjlock);
2235 return ERESTART;
2236 }
2237
2238 /*
2239 * we have the data in pg. we are holding object lock (so the page
2240 * can't be released on us).
2241 */
2242
2243 /* locked: maps(read), amap(if !null), uobj */
2244
2245 *ruobj = uobj;
2246 *ruobjpage = pg;
2247 return 0;
2248 }
2249
2250 /*
2251 * uvm_fault_lower_direct: fault lower center page
2252 *
2253 * 1. adjust flt->enter_prot.
2254 * 2. if page is loaned, resolve.
2255 */
2256
2257 int
uvm_fault_lower_direct(struct uvm_faultinfo * ufi,struct uvm_faultctx * flt,struct uvm_object * uobj,struct vm_page * uobjpage)2258 uvm_fault_lower_direct(
2259 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
2260 struct uvm_object *uobj, struct vm_page *uobjpage)
2261 {
2262 struct vm_page *pg;
2263 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
2264
2265 /*
2266 * we are not promoting. if the mapping is COW ensure that we
2267 * don't give more access than we should (e.g. when doing a read
2268 * fault on a COPYONWRITE mapping we want to map the COW page in
2269 * R/O even though the entry protection could be R/W).
2270 *
2271 * set "pg" to the page we want to map in (uobjpage, usually)
2272 */
2273
2274 cpu_count(CPU_COUNT_FLT_OBJ, 1);
2275 if (UVM_ET_ISCOPYONWRITE(ufi->entry) ||
2276 UVM_OBJ_NEEDS_WRITEFAULT(uobjpage->uobject))
2277 flt->enter_prot &= ~VM_PROT_WRITE;
2278 pg = uobjpage; /* map in the actual object */
2279
2280 KASSERT(uobjpage != PGO_DONTCARE);
2281
2282 /*
2283 * we are faulting directly on the page. be careful
2284 * about writing to loaned pages...
2285 */
2286
2287 if (uobjpage->loan_count) {
2288 uvm_fault_lower_direct_loan(ufi, flt, uobj, &pg, &uobjpage);
2289 }
2290 KASSERT(pg == uobjpage);
2291 KASSERT((pg->flags & PG_BUSY) == 0);
2292 return uvm_fault_lower_enter(ufi, flt, uobj, NULL, pg);
2293 }
2294
2295 /*
2296 * uvm_fault_lower_direct_loan: resolve loaned page.
2297 *
2298 * 1. if not cow'ing, adjust flt->enter_prot.
2299 * 2. if cow'ing, break loan.
2300 */
2301
2302 static int
uvm_fault_lower_direct_loan(struct uvm_faultinfo * ufi,struct uvm_faultctx * flt,struct uvm_object * uobj,struct vm_page ** rpg,struct vm_page ** ruobjpage)2303 uvm_fault_lower_direct_loan(
2304 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
2305 struct uvm_object *uobj, struct vm_page **rpg,
2306 struct vm_page **ruobjpage)
2307 {
2308 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
2309 struct vm_page *pg;
2310 struct vm_page *uobjpage = *ruobjpage;
2311 int error;
2312 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
2313
2314 if (!flt->cow_now) {
2315 /* read fault: cap the protection at readonly */
2316 /* cap! */
2317 flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
2318 } else {
2319 /*
2320 * write fault: must break the loan here. to do this
2321 * we need a write lock on the object.
2322 */
2323
2324 error = uvm_fault_lower_upgrade(ufi, flt, amap, uobj, uobjpage);
2325 if (error != 0) {
2326 return error;
2327 }
2328 KASSERT(rw_write_held(uobj->vmobjlock));
2329
2330 pg = uvm_loanbreak(uobjpage);
2331 if (pg == NULL) {
2332
2333 uvmfault_unlockall(ufi, amap, uobj);
2334 UVMHIST_LOG(maphist,
2335 " out of RAM breaking loan, waiting",
2336 0,0,0,0);
2337 cpu_count(CPU_COUNT_FLTNORAM, 1);
2338 uvm_wait("flt_noram4");
2339 return ERESTART;
2340 }
2341 *rpg = pg;
2342 *ruobjpage = pg;
2343
2344 /*
2345 * drop ownership of page while still holding object lock,
2346 * which won't be dropped until the page is entered.
2347 */
2348
2349 uvm_pagelock(pg);
2350 uvm_pagewakeup(pg);
2351 uvm_pageunlock(pg);
2352 pg->flags &= ~PG_BUSY;
2353 UVM_PAGE_OWN(pg, NULL);
2354 }
2355 return 0;
2356 }
2357
2358 /*
2359 * uvm_fault_lower_promote: promote lower page.
2360 *
2361 * 1. call uvmfault_promote.
2362 * 2. fill in data.
2363 * 3. if not ZFOD, dispose old page.
2364 */
2365
2366 int
uvm_fault_lower_promote(struct uvm_faultinfo * ufi,struct uvm_faultctx * flt,struct uvm_object * uobj,struct vm_page * uobjpage)2367 uvm_fault_lower_promote(
2368 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
2369 struct uvm_object *uobj, struct vm_page *uobjpage)
2370 {
2371 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
2372 struct vm_anon *anon;
2373 struct vm_page *pg;
2374 int error;
2375 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
2376
2377 KASSERT(amap != NULL);
2378
2379 /* promoting requires a write lock. */
2380 error = uvm_fault_upper_upgrade(ufi, flt, amap, uobj);
2381 if (error != 0) {
2382 return error;
2383 }
2384 KASSERT(rw_write_held(amap->am_lock));
2385 KASSERT(uobj == NULL ||
2386 rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
2387
2388 /*
2389 * If we are going to promote the data to an anon we
2390 * allocate a blank anon here and plug it into our amap.
2391 */
2392 error = uvmfault_promote(ufi, NULL, uobjpage, &anon, &flt->anon_spare);
2393 switch (error) {
2394 case 0:
2395 break;
2396 case ERESTART:
2397 return ERESTART;
2398 default:
2399 return error;
2400 }
2401
2402 pg = anon->an_page;
2403
2404 /*
2405 * Fill in the data.
2406 */
2407
2408 if (uobjpage != PGO_DONTCARE) {
2409 cpu_count(CPU_COUNT_FLT_PRCOPY, 1);
2410
2411 /*
2412 * promote to shared amap? make sure all sharing
2413 * procs see it
2414 */
2415
2416 if ((amap_flags(amap) & AMAP_SHARED) != 0) {
2417 pmap_page_protect(uobjpage, VM_PROT_NONE);
2418 /*
2419 * XXX: PAGE MIGHT BE WIRED!
2420 */
2421 }
2422
2423 UVMHIST_LOG(maphist,
2424 " promote uobjpage %#jx to anon/page %#jx/%#jx",
2425 (uintptr_t)uobjpage, (uintptr_t)anon, (uintptr_t)pg, 0);
2426
2427 } else {
2428 cpu_count(CPU_COUNT_FLT_PRZERO, 1);
2429
2430 /*
2431 * Page is zero'd and marked dirty by
2432 * uvmfault_promote().
2433 */
2434
2435 UVMHIST_LOG(maphist," zero fill anon/page %#jx/%#jx",
2436 (uintptr_t)anon, (uintptr_t)pg, 0, 0);
2437 }
2438
2439 return uvm_fault_lower_enter(ufi, flt, uobj, anon, pg);
2440 }
2441
2442 /*
2443 * uvm_fault_lower_enter: enter h/w mapping of lower page or anon page promoted
2444 * from the lower page.
2445 */
2446
2447 int
uvm_fault_lower_enter(struct uvm_faultinfo * ufi,const struct uvm_faultctx * flt,struct uvm_object * uobj,struct vm_anon * anon,struct vm_page * pg)2448 uvm_fault_lower_enter(
2449 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
2450 struct uvm_object *uobj,
2451 struct vm_anon *anon, struct vm_page *pg)
2452 {
2453 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
2454 const bool readonly = uvm_pagereadonly_p(pg);
2455 int error;
2456 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
2457
2458 /*
2459 * Locked:
2460 *
2461 * maps(read), amap(if !null), uobj(if !null),
2462 * anon(if !null), pg(if anon), unlock_uobj(if !null)
2463 *
2464 * anon must be write locked (promotion). uobj can be either.
2465 *
2466 * Note: pg is either the uobjpage or the new page in the new anon.
2467 */
2468
2469 KASSERT(amap == NULL ||
2470 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
2471 KASSERT(uobj == NULL ||
2472 rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
2473 KASSERT(anon == NULL || anon->an_lock == amap->am_lock);
2474
2475 /*
2476 * note that pg can't be PG_RELEASED or PG_BUSY since we did
2477 * not drop the object lock since the last time we checked.
2478 */
2479
2480 KASSERT((pg->flags & PG_RELEASED) == 0);
2481 KASSERT((pg->flags & PG_BUSY) == 0);
2482
2483 /*
2484 * all resources are present. we can now map it in and free our
2485 * resources.
2486 */
2487
2488 UVMHIST_LOG(maphist,
2489 " MAPPING: case2: pm=%#jx, va=%#jx, pg=%#jx, promote=%jd",
2490 (uintptr_t)ufi->orig_map->pmap, ufi->orig_rvaddr,
2491 (uintptr_t)pg, flt->promote);
2492 KASSERTMSG((flt->access_type & VM_PROT_WRITE) == 0 || !readonly,
2493 "promote=%u cow_now=%u access_type=%x enter_prot=%x cow=%u "
2494 "entry=%p map=%p orig_rvaddr=%p pg=%p",
2495 flt->promote, flt->cow_now, flt->access_type, flt->enter_prot,
2496 UVM_ET_ISCOPYONWRITE(ufi->entry), ufi->entry, ufi->orig_map,
2497 (void *)ufi->orig_rvaddr, pg);
2498 KASSERT((flt->access_type & VM_PROT_WRITE) == 0 || !readonly);
2499 if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr,
2500 VM_PAGE_TO_PHYS(pg),
2501 readonly ? flt->enter_prot & ~VM_PROT_WRITE : flt->enter_prot,
2502 flt->access_type | PMAP_CANFAIL |
2503 (flt->wire_mapping ? PMAP_WIRED : 0)) != 0) {
2504
2505 /*
2506 * No need to undo what we did; we can simply think of
2507 * this as the pmap throwing away the mapping information.
2508 *
2509 * We do, however, have to go through the ReFault path,
2510 * as the map may change while we're asleep.
2511 */
2512
2513 /*
2514 * ensure that the page is queued in the case that
2515 * we just promoted the page.
2516 */
2517
2518 if (anon != NULL) {
2519 uvm_pagelock(pg);
2520 uvm_pageenqueue(pg);
2521 uvm_pagewakeup(pg);
2522 uvm_pageunlock(pg);
2523 }
2524
2525 uvmfault_unlockall(ufi, amap, uobj);
2526 if (!uvm_reclaimable()) {
2527 UVMHIST_LOG(maphist,
2528 "<- failed. out of VM",0,0,0,0);
2529 /* XXX instrumentation */
2530 error = ENOMEM;
2531 return error;
2532 }
2533 /* XXX instrumentation */
2534 uvm_wait("flt_pmfail2");
2535 return ERESTART;
2536 }
2537
2538 uvm_fault_lower_done(ufi, flt, uobj, pg);
2539 pmap_update(ufi->orig_map->pmap);
2540 uvmfault_unlockall(ufi, amap, uobj);
2541
2542 UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0);
2543 return 0;
2544 }
2545
2546 /*
2547 * uvm_fault_lower_done: queue lower center page.
2548 */
2549
2550 void
uvm_fault_lower_done(struct uvm_faultinfo * ufi,const struct uvm_faultctx * flt,struct uvm_object * uobj,struct vm_page * pg)2551 uvm_fault_lower_done(
2552 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
2553 struct uvm_object *uobj, struct vm_page *pg)
2554 {
2555
2556 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
2557
2558 if (flt->wire_paging) {
2559 uvm_pagelock(pg);
2560 uvm_pagewire(pg);
2561 uvm_pageunlock(pg);
2562 if (pg->flags & PG_AOBJ) {
2563
2564 /*
2565 * since the now-wired page cannot be paged out,
2566 * release its swap resources for others to use.
2567 * since an aobj page with no swap cannot be clean,
2568 * mark it dirty now.
2569 *
2570 * use pg->uobject here. if the page is from a
2571 * tmpfs vnode, the pages are backed by its UAO and
2572 * not the vnode.
2573 */
2574
2575 KASSERT(uobj != NULL);
2576 KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
2577 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
2578 uao_dropswap(pg->uobject, pg->offset >> PAGE_SHIFT);
2579 }
2580 } else if (uvmpdpol_pageactivate_p(pg)) {
2581 /*
2582 * avoid re-activating the page unless needed,
2583 * to avoid false sharing on multiprocessor.
2584 */
2585
2586 uvm_pagelock(pg);
2587 uvm_pageactivate(pg);
2588 uvm_pageunlock(pg);
2589 }
2590 }
2591
2592
2593 /*
2594 * uvm_fault_wire: wire down a range of virtual addresses in a map.
2595 *
2596 * => map may be read-locked by caller, but MUST NOT be write-locked.
2597 * => if map is read-locked, any operations which may cause map to
2598 * be write-locked in uvm_fault() must be taken care of by
2599 * the caller. See uvm_map_pageable().
2600 */
2601
2602 int
uvm_fault_wire(struct vm_map * map,vaddr_t start,vaddr_t end,vm_prot_t access_type,int maxprot)2603 uvm_fault_wire(struct vm_map *map, vaddr_t start, vaddr_t end,
2604 vm_prot_t access_type, int maxprot)
2605 {
2606 vaddr_t va;
2607 int error;
2608
2609 /*
2610 * now fault it in a page at a time. if the fault fails then we have
2611 * to undo what we have done. note that in uvm_fault VM_PROT_NONE
2612 * is replaced with the max protection if fault_type is VM_FAULT_WIRE.
2613 */
2614
2615 /*
2616 * XXX work around overflowing a vaddr_t. this prevents us from
2617 * wiring the last page in the address space, though.
2618 */
2619 if (start > end) {
2620 return EFAULT;
2621 }
2622
2623 for (va = start; va < end; va += PAGE_SIZE) {
2624 error = uvm_fault_internal(map, va, access_type,
2625 (maxprot ? UVM_FAULT_MAXPROT : 0) | UVM_FAULT_WIRE);
2626 if (error) {
2627 if (va != start) {
2628 uvm_fault_unwire(map, start, va);
2629 }
2630 return error;
2631 }
2632 }
2633 return 0;
2634 }
2635
2636 /*
2637 * uvm_fault_unwire(): unwire range of virtual space.
2638 */
2639
2640 void
uvm_fault_unwire(struct vm_map * map,vaddr_t start,vaddr_t end)2641 uvm_fault_unwire(struct vm_map *map, vaddr_t start, vaddr_t end)
2642 {
2643 vm_map_lock_read(map);
2644 uvm_fault_unwire_locked(map, start, end);
2645 vm_map_unlock_read(map);
2646 }
2647
2648 /*
2649 * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire().
2650 *
2651 * => map must be at least read-locked.
2652 */
2653
2654 void
uvm_fault_unwire_locked(struct vm_map * map,vaddr_t start,vaddr_t end)2655 uvm_fault_unwire_locked(struct vm_map *map, vaddr_t start, vaddr_t end)
2656 {
2657 struct vm_map_entry *entry, *oentry;
2658 pmap_t pmap = vm_map_pmap(map);
2659 vaddr_t va;
2660 paddr_t pa;
2661 struct vm_page *pg;
2662
2663 /*
2664 * we assume that the area we are unwiring has actually been wired
2665 * in the first place. this means that we should be able to extract
2666 * the PAs from the pmap. we also lock out the page daemon so that
2667 * we can call uvm_pageunwire.
2668 */
2669
2670 /*
2671 * find the beginning map entry for the region.
2672 */
2673
2674 KASSERT(start >= vm_map_min(map));
2675 KASSERT(end <= vm_map_max(map));
2676 if (uvm_map_lookup_entry(map, start, &entry) == false)
2677 panic("uvm_fault_unwire_locked: address not in map");
2678
2679 oentry = NULL;
2680 for (va = start; va < end; va += PAGE_SIZE) {
2681
2682 /*
2683 * find the map entry for the current address.
2684 */
2685
2686 KASSERT(va >= entry->start);
2687 while (va >= entry->end) {
2688 KASSERT(entry->next != &map->header);
2689 KASSERT(entry->next->start <= entry->end);
2690 entry = entry->next;
2691 }
2692
2693 /*
2694 * lock it.
2695 */
2696
2697 if (entry != oentry) {
2698 if (oentry != NULL) {
2699 uvm_map_unlock_entry(oentry);
2700 }
2701 uvm_map_lock_entry(entry, RW_WRITER);
2702 oentry = entry;
2703 }
2704
2705 /*
2706 * if the entry is no longer wired, tell the pmap.
2707 */
2708
2709 if (!pmap_extract(pmap, va, &pa))
2710 continue;
2711
2712 if (VM_MAPENT_ISWIRED(entry) == 0)
2713 pmap_unwire(pmap, va);
2714
2715 pg = PHYS_TO_VM_PAGE(pa);
2716 if (pg) {
2717 uvm_pagelock(pg);
2718 uvm_pageunwire(pg);
2719 uvm_pageunlock(pg);
2720 }
2721 }
2722
2723 if (oentry != NULL) {
2724 uvm_map_unlock_entry(entry);
2725 }
2726 }
2727