Home
last modified time | relevance | path

Searched refs:base_gfn (Results 1 – 25 of 28) sorted by relevance

12

/linux/arch/loongarch/kvm/
H A Dmmu.c366 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local
367 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
368 gfn_t end = base_gfn + __fls(mask) + 1; in kvm_arch_mmu_enable_log_dirty_pt_masked()
373 ctx.gfn = base_gfn; in kvm_arch_mmu_enable_log_dirty_pt_masked()
392 if ((new->base_gfn + new->npages) > (kvm->arch.gpa_size >> PAGE_SHIFT)) in kvm_arch_prepare_memory_region()
397 gpa_start = new->base_gfn << PAGE_SHIFT; in kvm_arch_prepare_memory_region()
479 needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, in kvm_arch_commit_memory_region()
480 new->base_gfn + new->npages); in kvm_arch_commit_memory_region()
498 kvm_flush_range(kvm, slot->base_gfn, slot->base_gfn + slot->npages, 1); in kvm_arch_flush_shadow_memslot()
/linux/arch/x86/kvm/
H A Dmmu.h251 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument
255 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); in gfn_to_index()
262 return gfn_to_index(slot->base_gfn + npages - 1, in __kvm_mmu_slot_lpages()
263 slot->base_gfn, level) + 1; in __kvm_mmu_slot_lpages()
/linux/arch/riscv/kvm/
H A Dmmu.c337 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; in gstage_wp_memory_region()
338 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in gstage_wp_memory_region()
398 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local
399 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked()
400 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked()
425 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot()
465 if ((new->base_gfn + new->npages) >= in kvm_arch_prepare_memory_region()
472 base_gpa = new->base_gfn << PAGE_SHIFT; in kvm_arch_prepare_memory_region()
/linux/virt/kvm/
H A Dguest_memfd.c33 gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff; in __kvm_gmem_prepare_folio()
81 index = gfn - slot->base_gfn + slot->gmem.pgoff; in kvm_gmem_prepare_folio()
117 .start = slot->base_gfn + max(pgoff, start) - pgoff, in kvm_gmem_invalidate_begin()
118 .end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff, in kvm_gmem_invalidate_begin()
559 pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff; in __kvm_gmem_get_pfn()
647 npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages); in kvm_gmem_populate()
H A Dkvm_main.c1469 if (slot->base_gfn < tmp->base_gfn) in kvm_insert_gfn_node()
1471 else if (slot->base_gfn > tmp->base_gfn) in kvm_insert_gfn_node()
1493 WARN_ON_ONCE(old->base_gfn != new->base_gfn); in kvm_replace_gfn_node()
1552 if (old && old->base_gfn == new->base_gfn) { in kvm_replace_memslot()
1769 dest->base_gfn = src->base_gfn; in kvm_copy_memslot()
1990 gfn_t base_gfn; in __kvm_set_memory_region() local
2042 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); in __kvm_set_memory_region()
2063 if (base_gfn != old->base_gfn) in __kvm_set_memory_region()
2072 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) in __kvm_set_memory_region()
2082 new->base_gfn = base_gfn; in __kvm_set_memory_region()
[all …]
/linux/arch/arm64/kvm/
H A Dmmu.c345 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot()
987 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_unmap_memslot()
1179 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
1180 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
1209 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_split_memory_region()
1210 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_split_memory_region()
1232 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local
1233 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked()
1234 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked()
1273 gpa_start = memslot->base_gfn << PAGE_SHIFT; in fault_supports_stage2_huge_mapping()
[all …]
/linux/arch/x86/kvm/mmu/
H A Dpage_track.c80 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_write_track()
147 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in kvm_gfn_is_write_tracked()
308 n->track_remove_region(slot->base_gfn, slot->npages, n); in kvm_page_track_delete_slot()
H A Dtdp_mmu.c346 gfn_t base_gfn = sp->gfn; in handle_removed_pt() local
355 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); in handle_removed_pt()
1335 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot()
1336 slot->base_gfn + slot->npages, min_level); in kvm_tdp_mmu_wrprot_slot()
1555 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot()
1556 slot->base_gfn + slot->npages); in kvm_tdp_mmu_clear_dirty_slot()
1622 gfn_t start = slot->base_gfn; in zap_collapsible_spte_range()
H A Dpaging_tmpl.h634 gfn_t base_gfn = fault->gfn; in FNAME() local
636 WARN_ON_ONCE(gw->gfn != base_gfn); in FNAME()
735 base_gfn = gfn_round_for_level(fault->gfn, it.level); in FNAME()
741 sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, in FNAME()
756 base_gfn, fault->pfn, fault); in FNAME()
H A Dmmu.c1293 slot->base_gfn + gfn_offset, mask, true); in kvm_mmu_write_protect_pt_masked()
1316 slot->base_gfn + gfn_offset, mask, false); in kvm_mmu_clear_dirty_pt_masked()
1533 slot->base_gfn, slot->base_gfn + slot->npages - 1, in walk_slot_rmaps()
3239 gfn_t base_gfn = fault->gfn; in direct_map() local
3270 base_gfn, fault->pfn, fault); in direct_map()
6909 u64 start = memslot->base_gfn; in kvm_mmu_slot_try_split_huge_pages()
7079 gfn_t gfn = slot->base_gfn + i; in kvm_mmu_zap_memslot_pages_and_flush()
7100 .start = slot->base_gfn, in kvm_mmu_zap_memslot()
7101 .end = slot->base_gfn + slot->npages, in kvm_mmu_zap_memslot()
7681 if (gfn >= slot->base_gfn && in kvm_arch_post_set_memory_attributes()
[all …]
/linux/arch/powerpc/kvm/
H A Dtrace_hv.h305 __field(u64, base_gfn)
317 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
325 __entry->base_gfn, __entry->slot_flags)
H A Dbook3s_hv_uvmem.c261 p->base_pfn = slot->base_gfn; in kvmppc_uvmem_slot_init()
279 if (p->base_pfn == slot->base_gfn) { in kvmppc_uvmem_slot_free()
394 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge()
450 memslot->base_gfn << PAGE_SHIFT, in __kvmppc_uvmem_memslot_create()
624 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages()
797 unsigned long gfn = memslot->base_gfn; in kvmppc_uv_migrate_mem_slot()
H A Dbook3s_64_mmu_hv.c592 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault()
706 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault()
828 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_unmap_rmapp()
879 gfn = memslot->base_gfn; in kvmppc_core_flush_memslot_hv()
908 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_rmapp()
979 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_test_age_rmapp()
1112 if (gfn < memslot->base_gfn || in kvmppc_harvest_vpa_dirty()
1113 gfn >= memslot->base_gfn + memslot->npages) in kvmppc_harvest_vpa_dirty()
1118 __set_bit_le(gfn - memslot->base_gfn, map); in kvmppc_harvest_vpa_dirty()
1193 set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap); in kvmppc_unpin_guest_page()
[all …]
H A Dbook3s_64_mmu_radix.c1069 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_radix()
1101 unsigned long gfn = memslot->base_gfn + pagenum; in kvm_radix_test_clear_dirty()
1144 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_radix_test_clear_dirty()
1192 gpa = memslot->base_gfn << PAGE_SHIFT; in kvmppc_radix_flush_memslot()
H A Dbook3s_hv_rm_mmu.c104 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map()
142 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); in revmap_for_hpte()
242 slot_fn = gfn - memslot->base_gfn; in kvmppc_do_h_enter()
H A De500_mmu_host.c381 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
H A Dbook3s_hv_nested.c1038 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; in kvmhv_remove_nest_rmap_range()
1673 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in __kvmhv_nested_page_fault()
/linux/arch/mips/kvm/
H A Dmmu.c418 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local
419 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
420 gfn_t end = base_gfn + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
H A Dmips.c198 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, in kvm_arch_flush_shadow_memslot()
199 slot->base_gfn + slot->npages - 1); in kvm_arch_flush_shadow_memslot()
233 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, in kvm_arch_commit_memory_region()
234 new->base_gfn + new->npages - 1); in kvm_arch_commit_memory_region()
/linux/include/linux/
H A Dkvm_host.h594 gfn_t base_gfn; member
1118 if (start < slot->base_gfn) { in kvm_memslot_iter_start()
1156 if (iter->slot->base_gfn + iter->slot->npages <= start) in kvm_memslot_iter_start()
1170 return iter->slot->base_gfn < end; in kvm_memslot_iter_is_valid()
1730 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) in try_get_memslot()
1753 if (gfn >= slot->base_gfn) { in search_memslots()
1754 if (gfn < slot->base_gfn + slot->npages) in search_memslots()
1803 unsigned long offset = gfn - slot->base_gfn; in __gfn_to_hva_memslot()
1818 return slot->base_gfn + gfn_offset; in hva_to_gfn_memslot()
/linux/arch/s390/kvm/
H A Dpv.c261 while (slot && slot->base_gfn < pages_2g) { in kvm_s390_destroy_lower_2g()
262 len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE; in kvm_s390_destroy_lower_2g()
265 slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages); in kvm_s390_destroy_lower_2g()
H A Dkvm-s390.c688 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log()
689 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log()
2234 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
2237 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
2247 if (cur_gfn < ms->base_gfn) in kvm_s390_next_dirty_cmma()
2255 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma()
2300 if (cur_gfn - ms->base_gfn >= ms->npages) { in kvm_s390_get_cmma()
5779 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
5811 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5815 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
[all …]
H A Dkvm-s390.h265 return ms->base_gfn + ms->npages; in kvm_s390_get_gfn_end()
H A Dpriv.c1242 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in __do_essa()
/linux/arch/powerpc/include/asm/
H A Dkvm_book3s_64.h495 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); in slot_is_aligned()

12