xref: /freebsd/lib/libkvm/kvm_minidump_aarch64.c (revision 1d386b48)
1db5ac78dSAndrew Turner /*-
2db5ac78dSAndrew Turner  * Copyright (c) 2006 Peter Wemm
3db5ac78dSAndrew Turner  *
4db5ac78dSAndrew Turner  * Redistribution and use in source and binary forms, with or without
5db5ac78dSAndrew Turner  * modification, are permitted provided that the following conditions
6db5ac78dSAndrew Turner  * are met:
7db5ac78dSAndrew Turner  * 1. Redistributions of source code must retain the above copyright
8db5ac78dSAndrew Turner  *    notice, this list of conditions and the following disclaimer.
9db5ac78dSAndrew Turner  * 2. Redistributions in binary form must reproduce the above copyright
10db5ac78dSAndrew Turner  *    notice, this list of conditions and the following disclaimer in the
11db5ac78dSAndrew Turner  *    documentation and/or other materials provided with the distribution.
12db5ac78dSAndrew Turner  *
13db5ac78dSAndrew Turner  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14db5ac78dSAndrew Turner  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15db5ac78dSAndrew Turner  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16db5ac78dSAndrew Turner  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17db5ac78dSAndrew Turner  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18db5ac78dSAndrew Turner  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19db5ac78dSAndrew Turner  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20db5ac78dSAndrew Turner  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21db5ac78dSAndrew Turner  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22db5ac78dSAndrew Turner  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23db5ac78dSAndrew Turner  * SUCH DAMAGE.
24db5ac78dSAndrew Turner  *
25db5ac78dSAndrew Turner  * From: FreeBSD: src/lib/libkvm/kvm_minidump_amd64.c r261799
26db5ac78dSAndrew Turner  */
27db5ac78dSAndrew Turner 
28db5ac78dSAndrew Turner #include <sys/cdefs.h>
29db5ac78dSAndrew Turner /*
30db5ac78dSAndrew Turner  * ARM64 (AArch64) machine dependent routines for kvm and minidumps.
31db5ac78dSAndrew Turner  */
32db5ac78dSAndrew Turner 
33db5ac78dSAndrew Turner #include <sys/param.h>
347f911abeSJohn Baldwin #include <stdint.h>
35db5ac78dSAndrew Turner #include <stdlib.h>
36db5ac78dSAndrew Turner #include <string.h>
37db5ac78dSAndrew Turner #include <unistd.h>
388baaf913SWill Andrews #include <vm/vm.h>
39db5ac78dSAndrew Turner #include <kvm.h>
40db5ac78dSAndrew Turner 
417f911abeSJohn Baldwin #include "../../sys/arm64/include/minidump.h"
42db5ac78dSAndrew Turner 
43db5ac78dSAndrew Turner #include <limits.h>
44db5ac78dSAndrew Turner 
45db5ac78dSAndrew Turner #include "kvm_private.h"
467f911abeSJohn Baldwin #include "kvm_aarch64.h"
47db5ac78dSAndrew Turner 
48f2e36d47SAndrew Turner #define	aarch64_round_page(x, size)	roundup2((kvaddr_t)(x), size)
49f2e36d47SAndrew Turner #define	aarch64_trunc_page(x, size)	rounddown2((kvaddr_t)(x), size)
50db5ac78dSAndrew Turner 
51db5ac78dSAndrew Turner struct vmstate {
52db5ac78dSAndrew Turner 	struct minidumphdr hdr;
53f2e36d47SAndrew Turner 	size_t page_size;
54f2e36d47SAndrew Turner 	u_int l3_shift;
55db5ac78dSAndrew Turner };
56db5ac78dSAndrew Turner 
57c9057838SWill Andrews static aarch64_pte_t
_aarch64_pte_get(kvm_t * kd,u_long pteindex)58c9057838SWill Andrews _aarch64_pte_get(kvm_t *kd, u_long pteindex)
59c9057838SWill Andrews {
60c9057838SWill Andrews 	aarch64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
61c9057838SWill Andrews 
62c9057838SWill Andrews 	return le64toh(*pte);
63c9057838SWill Andrews }
64c9057838SWill Andrews 
65db5ac78dSAndrew Turner static int
_aarch64_minidump_probe(kvm_t * kd)667f911abeSJohn Baldwin _aarch64_minidump_probe(kvm_t *kd)
67db5ac78dSAndrew Turner {
68db5ac78dSAndrew Turner 
697f911abeSJohn Baldwin 	return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_AARCH64) &&
707f911abeSJohn Baldwin 	    _kvm_is_minidump(kd));
71db5ac78dSAndrew Turner }
72db5ac78dSAndrew Turner 
737f911abeSJohn Baldwin static void
_aarch64_minidump_freevtop(kvm_t * kd)747f911abeSJohn Baldwin _aarch64_minidump_freevtop(kvm_t *kd)
75db5ac78dSAndrew Turner {
76db5ac78dSAndrew Turner 	struct vmstate *vm = kd->vmst;
77db5ac78dSAndrew Turner 
78db5ac78dSAndrew Turner 	free(vm);
79db5ac78dSAndrew Turner 	kd->vmst = NULL;
80db5ac78dSAndrew Turner }
81db5ac78dSAndrew Turner 
827f911abeSJohn Baldwin static int
_aarch64_minidump_initvtop(kvm_t * kd)837f911abeSJohn Baldwin _aarch64_minidump_initvtop(kvm_t *kd)
84db5ac78dSAndrew Turner {
85db5ac78dSAndrew Turner 	struct vmstate *vmst;
8600e66147SD Scott Phillips 	off_t off, dump_avail_off, sparse_off;
87db5ac78dSAndrew Turner 
88db5ac78dSAndrew Turner 	vmst = _kvm_malloc(kd, sizeof(*vmst));
89fb0e1892SEnji Cooper 	if (vmst == NULL) {
90db5ac78dSAndrew Turner 		_kvm_err(kd, kd->program, "cannot allocate vm");
91db5ac78dSAndrew Turner 		return (-1);
92db5ac78dSAndrew Turner 	}
93db5ac78dSAndrew Turner 	kd->vmst = vmst;
94db5ac78dSAndrew Turner 	if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
95db5ac78dSAndrew Turner 	    sizeof(vmst->hdr)) {
96db5ac78dSAndrew Turner 		_kvm_err(kd, kd->program, "cannot read dump header");
97db5ac78dSAndrew Turner 		return (-1);
98db5ac78dSAndrew Turner 	}
99db5ac78dSAndrew Turner 	if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic,
100db5ac78dSAndrew Turner 	    sizeof(vmst->hdr.magic)) != 0) {
101db5ac78dSAndrew Turner 		_kvm_err(kd, kd->program, "not a minidump for this platform");
102db5ac78dSAndrew Turner 		return (-1);
103db5ac78dSAndrew Turner 	}
104db5ac78dSAndrew Turner 
1057f911abeSJohn Baldwin 	vmst->hdr.version = le32toh(vmst->hdr.version);
106f2e36d47SAndrew Turner 	if (vmst->hdr.version > MINIDUMP_VERSION || vmst->hdr.version < 1) {
107db5ac78dSAndrew Turner 		_kvm_err(kd, kd->program, "wrong minidump version. "
108db5ac78dSAndrew Turner 		    "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
109db5ac78dSAndrew Turner 		return (-1);
110db5ac78dSAndrew Turner 	}
1117f911abeSJohn Baldwin 	vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize);
1127f911abeSJohn Baldwin 	vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize);
1137f911abeSJohn Baldwin 	vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize);
1147f911abeSJohn Baldwin 	vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase);
1157f911abeSJohn Baldwin 	vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys);
1167f911abeSJohn Baldwin 	vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
1177f911abeSJohn Baldwin 	vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);
118f2e36d47SAndrew Turner 	/* dumpavailsize added in version 2 */
119f2e36d47SAndrew Turner 	if (vmst->hdr.version >= 2) {
120f2e36d47SAndrew Turner 		vmst->hdr.dumpavailsize = le32toh(vmst->hdr.dumpavailsize);
121f2e36d47SAndrew Turner 	} else {
122f2e36d47SAndrew Turner 		vmst->hdr.dumpavailsize = 0;
123f2e36d47SAndrew Turner 	}
124f2e36d47SAndrew Turner 	/* flags added in version 3 */
125f2e36d47SAndrew Turner 	if (vmst->hdr.version >= 3) {
126f2e36d47SAndrew Turner 		vmst->hdr.flags = le32toh(vmst->hdr.flags);
127f2e36d47SAndrew Turner 	} else {
128f2e36d47SAndrew Turner 		vmst->hdr.flags = MINIDUMP_FLAG_PS_4K;
129f2e36d47SAndrew Turner 	}
130db5ac78dSAndrew Turner 
131f2e36d47SAndrew Turner 	switch (vmst->hdr.flags & MINIDUMP_FLAG_PS_MASK) {
132f2e36d47SAndrew Turner 	case MINIDUMP_FLAG_PS_4K:
133f2e36d47SAndrew Turner 		vmst->page_size = AARCH64_PAGE_SIZE_4K;
134f2e36d47SAndrew Turner 		vmst->l3_shift = AARCH64_L3_SHIFT_4K;
135f2e36d47SAndrew Turner 		break;
136f2e36d47SAndrew Turner 	case MINIDUMP_FLAG_PS_16K:
137f2e36d47SAndrew Turner 		vmst->page_size = AARCH64_PAGE_SIZE_16K;
138f2e36d47SAndrew Turner 		vmst->l3_shift = AARCH64_L3_SHIFT_16K;
139f2e36d47SAndrew Turner 		break;
140f2e36d47SAndrew Turner 	default:
141f2e36d47SAndrew Turner 		_kvm_err(kd, kd->program, "unknown page size flag %x",
142f2e36d47SAndrew Turner 		    vmst->hdr.flags & MINIDUMP_FLAG_PS_MASK);
143db5ac78dSAndrew Turner 		return (-1);
144db5ac78dSAndrew Turner 	}
145f2e36d47SAndrew Turner 
146f2e36d47SAndrew Turner 	/* Skip header and msgbuf */
147f2e36d47SAndrew Turner 	dump_avail_off = vmst->page_size +
148f2e36d47SAndrew Turner 	    aarch64_round_page(vmst->hdr.msgbufsize, vmst->page_size);
149f2e36d47SAndrew Turner 
150f2e36d47SAndrew Turner 	/* Skip dump_avail */
151f2e36d47SAndrew Turner 	off = dump_avail_off +
152f2e36d47SAndrew Turner 	    aarch64_round_page(vmst->hdr.dumpavailsize, vmst->page_size);
153f2e36d47SAndrew Turner 
154f2e36d47SAndrew Turner 	/* build physical address lookup table for sparse pages */
155f2e36d47SAndrew Turner 	sparse_off = off +
156f2e36d47SAndrew Turner 	    aarch64_round_page(vmst->hdr.bitmapsize, vmst->page_size) +
157f2e36d47SAndrew Turner 	    aarch64_round_page(vmst->hdr.pmapsize, vmst->page_size);
158f2e36d47SAndrew Turner 	if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
159f2e36d47SAndrew Turner 	    vmst->hdr.bitmapsize, off, sparse_off, vmst->page_size) == -1) {
160f2e36d47SAndrew Turner 		return (-1);
161f2e36d47SAndrew Turner 	}
162f2e36d47SAndrew Turner 	off += aarch64_round_page(vmst->hdr.bitmapsize, vmst->page_size);
163db5ac78dSAndrew Turner 
164c9057838SWill Andrews 	if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) {
1657f911abeSJohn Baldwin 		return (-1);
166db5ac78dSAndrew Turner 	}
167f2e36d47SAndrew Turner 	off += aarch64_round_page(vmst->hdr.pmapsize, vmst->page_size);
168db5ac78dSAndrew Turner 
169db5ac78dSAndrew Turner 	return (0);
170db5ac78dSAndrew Turner }
171db5ac78dSAndrew Turner 
172db5ac78dSAndrew Turner static int
_aarch64_minidump_vatop(kvm_t * kd,kvaddr_t va,off_t * pa)1737f911abeSJohn Baldwin _aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
174db5ac78dSAndrew Turner {
175db5ac78dSAndrew Turner 	struct vmstate *vm;
1767f911abeSJohn Baldwin 	aarch64_physaddr_t offset;
1777f911abeSJohn Baldwin 	aarch64_pte_t l3;
1787f911abeSJohn Baldwin 	kvaddr_t l3_index;
1797f911abeSJohn Baldwin 	aarch64_physaddr_t a;
180db5ac78dSAndrew Turner 	off_t ofs;
181db5ac78dSAndrew Turner 
182db5ac78dSAndrew Turner 	vm = kd->vmst;
183f2e36d47SAndrew Turner 	offset = va & (kd->vmst->page_size - 1);
184db5ac78dSAndrew Turner 
185db5ac78dSAndrew Turner 	if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
186f2e36d47SAndrew Turner 		a = aarch64_trunc_page(va - vm->hdr.dmapbase + vm->hdr.dmapphys,
187f2e36d47SAndrew Turner 		    kd->vmst->page_size);
188f2e36d47SAndrew Turner 		ofs = _kvm_pt_find(kd, a, kd->vmst->page_size);
189db5ac78dSAndrew Turner 		if (ofs == -1) {
1907f911abeSJohn Baldwin 			_kvm_err(kd, kd->program, "_aarch64_minidump_vatop: "
1917f911abeSJohn Baldwin 			    "direct map address 0x%jx not in minidump",
1927f911abeSJohn Baldwin 			    (uintmax_t)va);
193db5ac78dSAndrew Turner 			goto invalid;
194db5ac78dSAndrew Turner 		}
195db5ac78dSAndrew Turner 		*pa = ofs + offset;
196f2e36d47SAndrew Turner 		return (kd->vmst->page_size - offset);
197db5ac78dSAndrew Turner 	} else if (va >= vm->hdr.kernbase) {
198f2e36d47SAndrew Turner 		l3_index = (va - vm->hdr.kernbase) >> kd->vmst->l3_shift;
199c9057838SWill Andrews 		if (l3_index >= vm->hdr.pmapsize / sizeof(l3))
200db5ac78dSAndrew Turner 			goto invalid;
201c9057838SWill Andrews 		l3 = _aarch64_pte_get(kd, l3_index);
2027f911abeSJohn Baldwin 		if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) {
2037f911abeSJohn Baldwin 			_kvm_err(kd, kd->program,
2047f911abeSJohn Baldwin 			    "_aarch64_minidump_vatop: pde not valid");
205db5ac78dSAndrew Turner 			goto invalid;
206db5ac78dSAndrew Turner 		}
2077f911abeSJohn Baldwin 		a = l3 & ~AARCH64_ATTR_MASK;
208f2e36d47SAndrew Turner 		ofs = _kvm_pt_find(kd, a, kd->vmst->page_size);
209db5ac78dSAndrew Turner 		if (ofs == -1) {
2107f911abeSJohn Baldwin 			_kvm_err(kd, kd->program, "_aarch64_minidump_vatop: "
2117f911abeSJohn Baldwin 			    "physical address 0x%jx not in minidump",
2127f911abeSJohn Baldwin 			    (uintmax_t)a);
213db5ac78dSAndrew Turner 			goto invalid;
214db5ac78dSAndrew Turner 		}
215db5ac78dSAndrew Turner 		*pa = ofs + offset;
216f2e36d47SAndrew Turner 		return (kd->vmst->page_size - offset);
217db5ac78dSAndrew Turner 	} else {
218db5ac78dSAndrew Turner 		_kvm_err(kd, kd->program,
2197f911abeSJohn Baldwin 	    "_aarch64_minidump_vatop: virtual address 0x%jx not minidumped",
2207f911abeSJohn Baldwin 		    (uintmax_t)va);
221db5ac78dSAndrew Turner 		goto invalid;
222db5ac78dSAndrew Turner 	}
223db5ac78dSAndrew Turner 
224db5ac78dSAndrew Turner invalid:
2257f911abeSJohn Baldwin 	_kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
226db5ac78dSAndrew Turner 	return (0);
227db5ac78dSAndrew Turner }
228db5ac78dSAndrew Turner 
2297f911abeSJohn Baldwin static int
_aarch64_minidump_kvatop(kvm_t * kd,kvaddr_t va,off_t * pa)2307f911abeSJohn Baldwin _aarch64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
231db5ac78dSAndrew Turner {
232db5ac78dSAndrew Turner 
233db5ac78dSAndrew Turner 	if (ISALIVE(kd)) {
2347f911abeSJohn Baldwin 		_kvm_err(kd, 0,
2357f911abeSJohn Baldwin 		    "_aarch64_minidump_kvatop called in live kernel!");
236db5ac78dSAndrew Turner 		return (0);
237db5ac78dSAndrew Turner 	}
2387f911abeSJohn Baldwin 	return (_aarch64_minidump_vatop(kd, va, pa));
239db5ac78dSAndrew Turner }
2407f911abeSJohn Baldwin 
2417f911abeSJohn Baldwin static int
_aarch64_native(kvm_t * kd __unused)242881b0edbSEnji Cooper _aarch64_native(kvm_t *kd __unused)
2437f911abeSJohn Baldwin {
2447f911abeSJohn Baldwin 
2457f911abeSJohn Baldwin #ifdef __aarch64__
2467f911abeSJohn Baldwin 	return (1);
2477f911abeSJohn Baldwin #else
2487f911abeSJohn Baldwin 	return (0);
2497f911abeSJohn Baldwin #endif
2507f911abeSJohn Baldwin }
2517f911abeSJohn Baldwin 
252c9057838SWill Andrews static vm_prot_t
_aarch64_entry_to_prot(aarch64_pte_t pte)253c9057838SWill Andrews _aarch64_entry_to_prot(aarch64_pte_t pte)
254c9057838SWill Andrews {
255c9057838SWill Andrews 	vm_prot_t prot = VM_PROT_READ;
256c9057838SWill Andrews 
257c9057838SWill Andrews 	/* Source: arm64/arm64/pmap.c:pmap_protect() */
258c9057838SWill Andrews 	if ((pte & AARCH64_ATTR_AP(AARCH64_ATTR_AP_RO)) == 0)
259c9057838SWill Andrews 		prot |= VM_PROT_WRITE;
260c9057838SWill Andrews 	if ((pte & AARCH64_ATTR_XN) == 0)
261c9057838SWill Andrews 		prot |= VM_PROT_EXECUTE;
262c9057838SWill Andrews 	return prot;
263c9057838SWill Andrews }
264c9057838SWill Andrews 
265c9057838SWill Andrews static int
_aarch64_minidump_walk_pages(kvm_t * kd,kvm_walk_pages_cb_t * cb,void * arg)266c9057838SWill Andrews _aarch64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
267c9057838SWill Andrews {
268c9057838SWill Andrews 	struct vmstate *vm = kd->vmst;
269c9057838SWill Andrews 	u_long nptes = vm->hdr.pmapsize / sizeof(aarch64_pte_t);
270c9057838SWill Andrews 	u_long bmindex, dva, pa, pteindex, va;
271c9057838SWill Andrews 	struct kvm_bitmap bm;
272c9057838SWill Andrews 	vm_prot_t prot;
273c9057838SWill Andrews 	int ret = 0;
274c9057838SWill Andrews 
275c9057838SWill Andrews 	if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex))
276c9057838SWill Andrews 		return (0);
277c9057838SWill Andrews 
278c9057838SWill Andrews 	for (pteindex = 0; pteindex < nptes; pteindex++) {
279c9057838SWill Andrews 		aarch64_pte_t pte = _aarch64_pte_get(kd, pteindex);
280c9057838SWill Andrews 
281c9057838SWill Andrews 		if ((pte & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE)
282c9057838SWill Andrews 			continue;
283c9057838SWill Andrews 
284f2e36d47SAndrew Turner 		va = vm->hdr.kernbase + (pteindex << kd->vmst->l3_shift);
285c9057838SWill Andrews 		pa = pte & ~AARCH64_ATTR_MASK;
286c9057838SWill Andrews 		dva = vm->hdr.dmapbase + pa;
287c9057838SWill Andrews 		if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
288f2e36d47SAndrew Turner 		    _aarch64_entry_to_prot(pte), kd->vmst->page_size, 0)) {
289c9057838SWill Andrews 			goto out;
290c9057838SWill Andrews 		}
291c9057838SWill Andrews 	}
292c9057838SWill Andrews 
293c9057838SWill Andrews 	while (_kvm_bitmap_next(&bm, &bmindex)) {
294f2e36d47SAndrew Turner 		pa = _kvm_bit_id_pa(kd, bmindex, kd->vmst->page_size);
29500e66147SD Scott Phillips 		if (pa == _KVM_PA_INVALID)
29600e66147SD Scott Phillips 			break;
297c9057838SWill Andrews 		dva = vm->hdr.dmapbase + pa;
298f2e36d47SAndrew Turner 		if (vm->hdr.dmapend < (dva + kd->vmst->page_size))
299c9057838SWill Andrews 			break;
300c9057838SWill Andrews 		va = 0;
301c9057838SWill Andrews 		prot = VM_PROT_READ | VM_PROT_WRITE;
302c9057838SWill Andrews 		if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
303f2e36d47SAndrew Turner 		    prot, kd->vmst->page_size, 0)) {
304c9057838SWill Andrews 			goto out;
305c9057838SWill Andrews 		}
306c9057838SWill Andrews 	}
307c9057838SWill Andrews 	ret = 1;
308c9057838SWill Andrews 
309c9057838SWill Andrews out:
310c9057838SWill Andrews 	_kvm_bitmap_deinit(&bm);
311c9057838SWill Andrews 	return (ret);
312c9057838SWill Andrews }
313c9057838SWill Andrews 
314881b0edbSEnji Cooper static struct kvm_arch kvm_aarch64_minidump = {
3157f911abeSJohn Baldwin 	.ka_probe = _aarch64_minidump_probe,
3167f911abeSJohn Baldwin 	.ka_initvtop = _aarch64_minidump_initvtop,
3177f911abeSJohn Baldwin 	.ka_freevtop = _aarch64_minidump_freevtop,
3187f911abeSJohn Baldwin 	.ka_kvatop = _aarch64_minidump_kvatop,
3197f911abeSJohn Baldwin 	.ka_native = _aarch64_native,
320c9057838SWill Andrews 	.ka_walk_pages = _aarch64_minidump_walk_pages,
3217f911abeSJohn Baldwin };
3227f911abeSJohn Baldwin 
3237f911abeSJohn Baldwin KVM_ARCH(kvm_aarch64_minidump);
324