xref: /illumos-gate/usr/src/uts/common/vm/hat.h (revision bb25c06c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
27 /*	  All Rights Reserved  	*/
28 
29 /*
30  * University Copyright- Copyright (c) 1982, 1986, 1988
31  * The Regents of the University of California
32  * All Rights Reserved
33  *
34  * University Acknowledgment- Portions of this document are derived from
35  * software developed by the University of California, Berkeley, and its
36  * contributors.
37  */
38 
39 #ifndef	_VM_HAT_H
40 #define	_VM_HAT_H
41 
42 #pragma ident	"%Z%%M%	%I%	%E% SMI"
43 
44 #include <sys/types.h>
45 #include <sys/t_lock.h>
46 #include <vm/faultcode.h>
47 #include <sys/kstat.h>
48 #include <sys/siginfo.h>
49 
50 #ifdef	__cplusplus
51 extern "C" {
52 #endif
53 
54 /*
55  * VM - Hardware Address Translation management.
56  *
57  * This file describes the machine independent interfaces to
58  * the hardware address translation management routines.  Other
59  * machine specific interfaces and structures are defined
60  * in <vm/hat_xxx.h>.  The hat layer manages the address
61  * translation hardware as a cache driven by calls from the
62  * higher levels of the VM system.
63  */
64 
65 struct hat;
66 struct kpme;
67 struct memseg;
68 
69 #include <vm/page.h>
70 
71 /*
72  * a callback used with hat_unload_callback()
73  * start and end mark are set to a range of unloaded addresses
74  * and the function is invoked with a pointer to this data structure
75  */
76 typedef struct hat_callback {
77 	caddr_t	hcb_start_addr;
78 	caddr_t hcb_end_addr;
79 	void	(*hcb_function)(struct hat_callback *);
80 	void	*hcb_data;
81 } hat_callback_t;
82 
83 #ifdef	_KERNEL
84 
85 /*
86  * One time hat initialization
87  */
88 void	hat_init(void);
89 
90 /*
91  * Notify hat of a system dump
92  */
93 void	hat_dump(void);
94 
95 /*
96  * Operations on an address space:
97  *
98  * struct hat *hat_alloc(as)
99  *	allocated a hat structure for as.
100  *
101  * void hat_free_start(hat)
102  *	informs hat layer process has finished executing but as has not
103  *	been cleaned up yet.
104  *
105  * void hat_free_end(hat)
106  *	informs hat layer as is being destroyed.  hat layer cannot use as
107  *	pointer after this call.
108  *
109  * void hat_swapin(hat)
110  *	allocate any hat resources required for process being swapped in.
111  *
112  * void hat_swapout(hat)
113  *	deallocate hat resources for process being swapped out.
114  *
115  * size_t hat_get_mapped_size(hat)
116  *	returns number of bytes that have valid mappings in hat.
117  *
118  * void hat_stats_enable(hat)
119  * void hat_stats_disable(hat)
120  *	enables/disables collection of stats for hat.
121  *
122  * int hat_dup(parenthat, childhat, addr, len, flags)
123  *	Duplicate address translations of the parent to the child.  Supports
124  *	the entire address range or a range depending on flag,
125  *	zero returned on success, non-zero on error
126  *
127  * void hat_thread_exit(thread)
128  *	Notifies the HAT that a thread is exiting, called after it has been
129  *	reassigned to the kernel AS.
130  */
131 
132 struct hat *hat_alloc(struct as *);
133 void	hat_free_start(struct hat *);
134 void	hat_free_end(struct hat *);
135 int	hat_dup(struct hat *, struct hat *, caddr_t, size_t, uint_t);
136 void	hat_swapin(struct hat *);
137 void	hat_swapout(struct hat *);
138 size_t	hat_get_mapped_size(struct hat *);
139 int	hat_stats_enable(struct hat *);
140 void	hat_stats_disable(struct hat *);
141 void	hat_thread_exit(kthread_t *);
142 
143 /*
144  * Operations on a named address within a segment:
145  *
146  * void hat_memload(hat, addr, pp, attr, flags)
147  *	load/lock the given page struct
148  *
149  * void hat_memload_array(hat, addr, len, ppa, attr, flags)
150  *	load/lock the given array of page structs
151  *
152  * void hat_devload(hat, addr, len, pf, attr, flags)
153  *	load/lock the given page frame number
154  *
155  * void hat_unlock(hat, addr, len)
156  *	unlock a given range of addresses
157  *
158  * void hat_unload(hat, addr, len, flags)
159  * void hat_unload_callback(hat, addr, len, flags, callback)
160  *	unload a given range of addresses (has optional callback)
161  *
162  * void hat_sync(hat, addr, len, flags)
163  *	synchronize mapping with software data structures
164  *
165  * void	hat_map(hat, addr, len, flags)
166  *
167  * void hat_setattr(hat, addr, len, attr)
168  * void hat_clrattr(hat, addr, len, attr)
169  * void hat_chgattr(hat, addr, len, attr)
170  *	modify attributes for a range of addresses. skips any invalid mappings
171  *
172  * uint_t hat_getattr(hat, addr, *attr)
173  *	returns attr for <hat,addr> in *attr.  returns 0 if there was a
174  *	mapping and *attr is valid, nonzero if there was no mapping and
175  *	*attr is not valid.
176  *
177  * size_t hat_getpagesize(hat, addr)
178  *	returns pagesize in bytes for <hat, addr>. returns -1 if there is
179  *	no mapping. This is an advisory call.
180  *
181  * pfn_t hat_getpfnum(hat, addr)
182  *	returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
183  *
184  * pfn_t hat_getkpfnum(addr)
185  *	returns pfn for non-memory mapped addr in kernel address space
186  *	or PFN_INVALID if mapping is invalid or is kernel memory.
187  *
188  * int hat_probe(hat, addr)
189  *	return 0 if no valid mapping is present.  Faster version
190  *	of hat_getattr in certain architectures.
191  *
192  * int hat_share(dhat, daddr, shat, saddr, len, szc)
193  *
194  * void hat_unshare(hat, addr, len, szc)
195  *
196  * void hat_chgprot(hat, addr, len, vprot)
197  *	This is a deprecated call.  New segment drivers should store
198  *	all attributes and use hat_*attr calls.
199  *	Change the protections in the virtual address range
200  *	given to the specified virtual protection.  If vprot is ~PROT_WRITE,
201  *	then remove write permission, leaving the other permissions
202  *	unchanged.  If vprot is ~PROT_USER, remove user permissions.
203  */
204 
205 void	hat_memload(struct hat *, caddr_t, struct page *, uint_t, uint_t);
206 void	hat_memload_array(struct hat *, caddr_t, size_t, struct page **,
207 		uint_t, uint_t);
208 
209 void	hat_devload(struct hat *, caddr_t, size_t, pfn_t, uint_t, int);
210 void	hat_unlock(struct hat *, caddr_t, size_t);
211 void	hat_unload(struct hat *, caddr_t, size_t, uint_t);
212 void	hat_unload_callback(struct hat *, caddr_t, size_t, uint_t,
213 		hat_callback_t *);
214 void	hat_sync(struct hat *, caddr_t, size_t, uint_t);
215 void	hat_map(struct hat *, caddr_t, size_t, uint_t);
216 void	hat_setattr(struct hat *, caddr_t, size_t, uint_t);
217 void	hat_clrattr(struct hat *, caddr_t, size_t, uint_t);
218 void	hat_chgattr(struct hat *, caddr_t, size_t, uint_t);
219 uint_t	hat_getattr(struct hat *, caddr_t, uint_t *);
220 ssize_t	hat_getpagesize(struct hat *, caddr_t);
221 pfn_t	hat_getpfnum(struct hat *, caddr_t);
222 int	hat_probe(struct hat *, caddr_t);
223 int	hat_share(struct hat *, caddr_t, struct hat *, caddr_t, size_t, uint_t);
224 void	hat_unshare(struct hat *, caddr_t, size_t, uint_t);
225 void	hat_chgprot(struct hat *, caddr_t, size_t, uint_t);
226 void	hat_reserve(struct as *, caddr_t, size_t);
227 pfn_t	va_to_pfn(void *);
228 uint64_t va_to_pa(void *);
229 
230 /*
231  * hat_getkpfnum() is never supported on amd64 and will be
232  * removed from other platforms in future release
233  */
234 #if !defined(__amd64)
235 pfn_t	hat_getkpfnum(caddr_t);
236 #endif
237 
238 
239 /*
240  * Kernel Physical Mapping (segkpm) hat interface routines.
241  */
242 caddr_t	hat_kpm_mapin(struct page *, struct kpme *);
243 void	hat_kpm_mapout(struct page *, struct kpme *, caddr_t);
244 caddr_t	hat_kpm_page2va(struct page *, int);
245 struct page *hat_kpm_vaddr2page(caddr_t);
246 int	hat_kpm_fault(struct hat *, caddr_t);
247 void	hat_kpm_mseghash_clear(int);
248 void	hat_kpm_mseghash_update(pgcnt_t, struct memseg *);
249 void	hat_kpm_addmem_mseg_update(struct memseg *, pgcnt_t, offset_t);
250 void	hat_kpm_addmem_mseg_insert(struct memseg *);
251 void	hat_kpm_addmem_memsegs_update(struct memseg *);
252 caddr_t hat_kpm_mseg_reuse(struct memseg *);
253 void	hat_kpm_delmem_mseg_update(struct memseg *, struct memseg **);
254 void	hat_kpm_split_mseg_update(struct memseg *, struct memseg **,
255 			struct memseg *, struct memseg *, struct memseg *);
256 void	hat_kpm_walk(void (*)(void *, void *, size_t), void *);
257 
258 /*
259  * Operations on all translations for a given page(s)
260  *
261  * void hat_page_setattr(pp, flag)
262  * void hat_page_clrattr(pp, flag)
263  *	used to set/clr red/mod bits.
264  *
265  * uint hat_page_getattr(pp, flag)
266  *	If flag is specified, returns 0 if attribute is disabled
267  *	and non zero if enabled.  If flag specifes multiple attributs
268  *	then returns 0 if ALL atriibutes are disabled.  This is an advisory
269  *	call.
270  *
271  * int hat_pageunload(pp, forceflag)
272  *	unload all translations attached to pp.
273  *
274  * uint_t hat_pagesync(pp, flags)
275  *	get hw stats from hardware into page struct and reset hw stats
276  *	returns attributes of page
277  *
278  * ulong_t hat_page_getshare(pp)
279  *	returns approx number of mappings to this pp.  A return of 0 implies
280  *	there are no mappings to the page.
281  *
282  * faultcode_t hat_softlock(hat, addr, lenp, ppp, flags);
283  *	called to softlock pages for zero copy tcp
284  *
285  * void hat_page_demote(pp);
286  *	unload all large mappings to pp and decrease p_szc of all
287  *	constituent pages according to the remaining mappings.
288  */
289 
290 void	hat_page_setattr(struct page *, uint_t);
291 void	hat_page_clrattr(struct page *, uint_t);
292 uint_t	hat_page_getattr(struct page *, uint_t);
293 int	hat_pageunload(struct page *, uint_t);
294 uint_t	hat_pagesync(struct page *, uint_t);
295 ulong_t	hat_page_getshare(struct page *);
296 faultcode_t hat_softlock(struct hat *, caddr_t, size_t *,
297 			struct page **, uint_t);
298 void	hat_page_demote(struct page *);
299 
300 /*
301  * Rountine to expose supported HAT features to PIM.
302  */
303 enum hat_features {
304 	HAT_SHARED_PT,		/* Shared page tables */
305 	HAT_DYNAMIC_ISM_UNMAP,	/* hat_pageunload() handles ISM pages */
306 	HAT_VMODSORT		/* support for VMODSORT flag of vnode */
307 };
308 
309 int hat_supported(enum hat_features, void *);
310 
311 /*
312  * Services provided to the hat:
313  *
314  * void as_signal_proc(as, siginfo)
315  *	deliver signal to all processes that have this as.
316  *
317  * int hat_setstat(as, addr, len, rmbits)
318  *	informs hatstat layer that ref/mod bits need to be updated for
319  *	address range. Returns 0 on success, 1 for failure.
320  */
321 void	as_signal_proc(struct as *, k_siginfo_t *siginfo);
322 void	hat_setstat(struct as *, caddr_t, size_t, uint_t);
323 
324 /*
325  * Flags to pass to hat routines.
326  *
327  * Certain flags only apply to some interfaces:
328  *
329  * 	HAT_LOAD	Default flags to load a translation to the page.
330  * 	HAT_LOAD_LOCK	Lock down mapping resources; hat_map(), hat_memload(),
331  *			and hat_devload().
332  *	HAT_LOAD_ADV	Advisory load - Load translation if and only if
333  *			sufficient MMU resources exist (i.e., do not steal).
334  *	HAT_LOAD_SHARE	A flag to hat_memload() to indicate h/w page tables
335  *			that map some user pages (not kas) is shared by more
336  *			than one process (eg. ISM).
337  *	HAT_LOAD_CONTIG	Pages are contigous
338  *	HAT_LOAD_NOCONSIST Do not add mapping to mapping list.
339  *	HAT_LOAD_REMAP	Reload a valid pte with a different page frame.
340  *	HAT_RELOAD_SHARE Reload a shared page table entry. Some platforms
341  *			 may require different actions than on the first
342  *			 load of a shared mapping.
343  *	HAT_NO_KALLOC	Do not kmem_alloc while creating the mapping; at this
344  *			point, it's setting up mapping to allocate internal
345  *			hat layer data structures.  This flag forces hat layer
346  *			to tap its reserves in order to prevent infinite
347  *			recursion.
348  *	HAT_LOAD_TEXT	A flag to hat_memload() to indicate loading text pages.
349  */
350 
351 /*
352  * Flags for hat_memload/hat_devload
353  */
354 #define	HAT_FLAGS_RESV		0xFF000000	/* resv for hat impl */
355 #define	HAT_LOAD		0x00
356 #define	HAT_LOAD_LOCK		0x01
357 #define	HAT_LOAD_ADV		0x04
358 #define	HAT_LOAD_CONTIG		0x10
359 #define	HAT_LOAD_NOCONSIST	0x20
360 #define	HAT_LOAD_SHARE		0x40
361 #define	HAT_LOAD_REMAP		0x80
362 #define	HAT_RELOAD_SHARE	0x100
363 #define	HAT_NO_KALLOC		0x200
364 #define	HAT_LOAD_TEXT		0x400
365 
366 /*
367  * Flags for initializing disable_*large_pages.
368  *
369  *	HAT_AUTO_TEXT	Get MMU specific disable_auto_text_large_pages
370  *	HAT_AUTO_DATA	Get MMU specific disable_auto_data_large_pages
371  */
372 #define	HAT_AUTO_TEXT		0x800
373 #define	HAT_AUTO_DATA		0x1000
374 
375 /*
376  * Attributes for hat_memload/hat_devload/hat_*attr
377  * are a superset of prot flags defined in mman.h.
378  */
379 #define	HAT_PLAT_ATTR_MASK	0xF00000
380 #define	HAT_PROT_MASK		0x0F
381 
382 #define	HAT_NOFAULT		0x10
383 #define	HAT_NOSYNC		0x20
384 
385 /*
386  * Advisory ordering attributes. Apply only to device mappings.
387  *
388  * HAT_STRICTORDER: the CPU must issue the references in order, as the
389  *	programmer specified.  This is the default.
390  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
391  *	of reordering; store or load with store or load).
392  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
393  *	to consecutive locations (for example, turn two consecutive byte
394  *	stores into one halfword store), and it may batch individual loads
395  *	(for example, turn two consecutive byte loads into one halfword load).
396  *	This also implies re-ordering.
397  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
398  *	until another store occurs.  The default is to fetch new data
399  *	on every load.  This also implies merging.
400  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
401  *	the device (perhaps with other data) at a later time.  The default is
402  *	to push the data right away.  This also implies load caching.
403  */
404 #define	HAT_STRICTORDER		0x0000
405 #define	HAT_UNORDERED_OK	0x0100
406 #define	HAT_MERGING_OK		0x0200
407 #define	HAT_LOADCACHING_OK	0x0300
408 #define	HAT_STORECACHING_OK	0x0400
409 #define	HAT_ORDER_MASK		0x0700
410 
411 /* endian attributes */
412 #define	HAT_NEVERSWAP		0x0000
413 #define	HAT_STRUCTURE_BE	0x1000
414 #define	HAT_STRUCTURE_LE	0x2000
415 #define	HAT_ENDIAN_MASK		0x3000
416 
417 /* flags for hat_softlock */
418 #define	HAT_COW			0x0001
419 
420 /*
421  * Flags for hat_unload
422  */
423 #define	HAT_UNLOAD		0x00
424 #define	HAT_UNLOAD_NOSYNC	0x02
425 #define	HAT_UNLOAD_UNLOCK	0x04
426 #define	HAT_UNLOAD_OTHER	0x08
427 #define	HAT_UNLOAD_UNMAP	0x10
428 
429 /*
430  * Flags for hat_pagesync, hat_getstat, hat_sync
431  */
432 #define	HAT_SYNC_DONTZERO	0x00
433 #define	HAT_SYNC_ZERORM		0x01
434 /* Additional flags for hat_pagesync */
435 #define	HAT_SYNC_STOPON_REF	0x02
436 #define	HAT_SYNC_STOPON_MOD	0x04
437 #define	HAT_SYNC_STOPON_RM	(HAT_SYNC_STOPON_REF | HAT_SYNC_STOPON_MOD)
438 #define	HAT_SYNC_STOPON_SHARED	0x08
439 
440 /*
441  * Flags for hat_dup
442  *
443  * HAT_DUP_ALL dup entire address space
444  * HAT_DUP_COW dup plus hat_clrattr(..PROT_WRITE) on newas
445  */
446 #define	HAT_DUP_ALL		1
447 #define	HAT_DUP_COW		2
448 
449 
450 /*
451  * Flags for hat_map
452  */
453 #define	HAT_MAP			0x00
454 
455 /*
456  * Flag for hat_pageunload
457  */
458 #define	HAT_ADV_PGUNLOAD	0x00
459 #define	HAT_FORCE_PGUNLOAD	0x01
460 
461 /*
462  * Attributes for hat_page_*attr, hat_setstats and
463  * returned by hat_pagesync.
464  */
465 #define	P_MOD	0x1		/* the modified bit */
466 #define	P_REF	0x2		/* the referenced bit */
467 #define	P_RO	0x4		/* Read only page */
468 
469 #define	hat_ismod(pp)		(hat_page_getattr(pp, P_MOD))
470 #define	hat_isref(pp)		(hat_page_getattr(pp, P_REF))
471 #define	hat_isro(pp)		(hat_page_getattr(pp, P_RO))
472 
473 #define	hat_setmod(pp)		(hat_page_setattr(pp, P_MOD))
474 #define	hat_setref(pp)		(hat_page_setattr(pp, P_REF))
475 #define	hat_setrefmod(pp)	(hat_page_setattr(pp, P_REF|P_MOD))
476 
477 #define	hat_clrmod(pp)		(hat_page_clrattr(pp, P_MOD))
478 #define	hat_clrref(pp)		(hat_page_clrattr(pp, P_REF))
479 #define	hat_clrrefmod(pp)	(hat_page_clrattr(pp, P_REF|P_MOD))
480 
481 #define	hat_page_is_mapped(pp)	(hat_page_getshare(pp))
482 
483 /*
484  * hat_setup is being used in sparc/os/sundep.c
485  */
486 void	hat_setup(struct hat *, int);
487 
488 /*
489  * Flags for hat_setup
490  */
491 #define	HAT_DONTALLOC		0
492 #define	HAT_ALLOC		1
493 #define	HAT_INIT		2
494 
495 /*
496  * Other routines, for statistics
497  */
498 int	hat_startstat(struct as *);
499 void	hat_getstat(struct as *, caddr_t, size_t, uint_t, char *, int);
500 void	hat_freestat(struct as *, int);
501 void	hat_resvstat(size_t, struct as *, caddr_t);
502 
503 /*
504  * Transitionary routine while we still allow hat_getkpfnum(caddr_t)
505  * to return a pfn for kernel memory, but want to warn the user that
506  * it isn't supported.
507  */
508 void	hat_getkpfnum_badcall(void *caller);
509 
510 /*
511  * Relocation callback routines. Currently only sfmmu HAT supports
512  * these.
513  */
514 extern int	hat_add_callback(id_t, caddr_t, uint_t, uint_t, void *,
515 	pfn_t *, void **);
516 extern id_t	hat_register_callback(int,
517 	int (*prehandler)(caddr_t, uint_t, uint_t, void *),
518 	int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
519 	int (*errhandler)(caddr_t, uint_t, uint_t, void *), int);
520 extern void	hat_delete_callback(caddr_t, uint_t, void *, uint_t, void *);
521 
522 /*
523  * hat_add_callback()/hat_delete_callback() flags.
524  */
525 #define	HAC_NOSLEEP	0x0
526 #define	HAC_SLEEP	0x1
527 #define	HAC_PAGELOCK	0x2
528 
529 /*
530  * Suspend/unsuspend handler callback arguments.
531  */
532 #define	HAT_SUSPEND		0x0010
533 #define	HAT_UNSUSPEND		0x0010
534 #define	HAT_PRESUSPEND		0x0020
535 #define	HAT_POSTUNSUSPEND	0x0020
536 
537 /*
538  * Error handler callback arguments. See the block comments
539  * before the implementation of hat_add_callback() for an
540  * explanation of what these mean.
541  */
542 #define	HAT_CB_ERR_LEAKED	0x1
543 
544 #endif /* _KERNEL */
545 
546 /*
547  * The size of the bit array for ref and mod bit storage must be a power of 2.
548  * 2 bits are collected for each page.  Below the power used is 4,
549  * which is 16 8-bit characters = 128 bits, ref and mod bit information
550  * for 64 pages.
551  */
552 #define	HRM_SHIFT		4
553 #define	HRM_BYTES		(1 << HRM_SHIFT)
554 #define	HRM_PAGES		((HRM_BYTES * NBBY) / 2)
555 #define	HRM_PGPERBYTE		(NBBY/2)
556 #define	HRM_PGBYTEMASK		(HRM_PGPERBYTE-1)
557 
558 #define	HRM_PGOFFMASK		((HRM_PGPERBYTE-1) << MMU_PAGESHIFT)
559 #define	HRM_BASEOFFSET		(((MMU_PAGESIZE * HRM_PAGES) - 1))
560 #define	HRM_BASEMASK		(~(HRM_BASEOFFSET))
561 
562 #define	HRM_BASESHIFT		(MMU_PAGESHIFT + (HRM_SHIFT + 2))
563 #define	HRM_PAGEMASK		(MMU_PAGEMASK ^ HRM_BASEMASK)
564 
565 #define	HRM_HASHSIZE		0x200
566 #define	HRM_HASHMASK		(HRM_HASHSIZE - 1)
567 
568 #define	HRM_BLIST_INCR		0x200
569 
570 /*
571  * The structure for maintaining referenced and modified information
572  */
573 struct hrmstat {
574 	struct as	*hrm_as;	/* stat block belongs to this as */
575 	uintptr_t	hrm_base;	/* base of block */
576 	ushort_t	hrm_id;		/* opaque identifier, one of a_vbits */
577 	struct hrmstat	*hrm_anext;	/* as statistics block list */
578 	struct hrmstat	*hrm_hnext;	/* list for hashed blocks */
579 	uchar_t		hrm_bits[HRM_BYTES]; /* the ref and mod bits */
580 };
581 
582 /*
583  * For global monitoring of the reference and modified bits
584  * of all address spaces we reserve one id bit.
585  */
586 #define	HRM_SWSMONID	1
587 
588 
589 #ifdef _KERNEL
590 
591 /*
592  * Hat locking functions
593  * XXX - these two functions are currently being used by hatstats
594  * 	they can be removed by using a per-as mutex for hatstats.
595  */
596 void	hat_enter(struct hat *);
597 void	hat_exit(struct hat *);
598 
599 #endif /* _KERNEL */
600 
601 #ifdef	__cplusplus
602 }
603 #endif
604 
605 #endif	/* _VM_HAT_H */
606