1// Copyright 2019 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Address range data structure.
6//
7// This file contains an implementation of a data structure which
8// manages ordered address ranges.
9
10package runtime
11
12import (
13	"runtime/internal/sys"
14	"unsafe"
15)
16
17// addrRange represents a region of address space.
18type addrRange struct {
19	// base and limit together represent the region of address space
20	// [base, limit). That is, base is inclusive, limit is exclusive.
21	base, limit uintptr
22}
23
24// size returns the size of the range represented in bytes.
25func (a addrRange) size() uintptr {
26	if a.limit <= a.base {
27		return 0
28	}
29	return a.limit - a.base
30}
31
32// contains returns whether or not the range contains a given address.
33func (a addrRange) contains(addr uintptr) bool {
34	return addr >= a.base && addr < a.limit
35}
36
37// subtract takes the addrRange toPrune and cuts out any overlap with
38// from, then returns the new range. subtract assumes that a and b
39// either don't overlap at all, only overlap on one side, or are equal.
40// If b is strictly contained in a, thus forcing a split, it will throw.
41func (a addrRange) subtract(b addrRange) addrRange {
42	if a.base >= b.base && a.limit <= b.limit {
43		return addrRange{}
44	} else if a.base < b.base && a.limit > b.limit {
45		throw("bad prune")
46	} else if a.limit > b.limit && a.base < b.limit {
47		a.base = b.limit
48	} else if a.base < b.base && a.limit > b.base {
49		a.limit = b.base
50	}
51	return a
52}
53
54// addrRanges is a data structure holding a collection of ranges of
55// address space.
56//
57// The ranges are coalesced eagerly to reduce the
58// number ranges it holds.
59//
60// The slice backing store for this field is persistentalloc'd
61// and thus there is no way to free it.
62//
63// addrRanges is not thread-safe.
64type addrRanges struct {
65	// ranges is a slice of ranges sorted by base.
66	ranges []addrRange
67
68	// sysStat is the stat to track allocations by this type
69	sysStat *uint64
70}
71
72func (a *addrRanges) init(sysStat *uint64) {
73	ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
74	ranges.len = 0
75	ranges.cap = 16
76	ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, sysStat))
77	a.sysStat = sysStat
78}
79
80// findSucc returns the first index in a such that base is
81// less than the base of the addrRange at that index.
82func (a *addrRanges) findSucc(base uintptr) int {
83	// TODO(mknyszek): Consider a binary search for large arrays.
84	// While iterating over these ranges is potentially expensive,
85	// the expected number of ranges is small, ideally just 1,
86	// since Go heaps are usually mostly contiguous.
87	for i := range a.ranges {
88		if base < a.ranges[i].base {
89			return i
90		}
91	}
92	return len(a.ranges)
93}
94
95// contains returns true if a covers the address addr.
96func (a *addrRanges) contains(addr uintptr) bool {
97	i := a.findSucc(addr)
98	if i == 0 {
99		return false
100	}
101	return a.ranges[i-1].contains(addr)
102}
103
104// add inserts a new address range to a.
105//
106// r must not overlap with any address range in a.
107func (a *addrRanges) add(r addrRange) {
108	// The copies in this function are potentially expensive, but this data
109	// structure is meant to represent the Go heap. At worst, copying this
110	// would take ~160µs assuming a conservative copying rate of 25 GiB/s (the
111	// copy will almost never trigger a page fault) for a 1 TiB heap with 4 MiB
112	// arenas which is completely discontiguous. ~160µs is still a lot, but in
113	// practice most platforms have 64 MiB arenas (which cuts this by a factor
114	// of 16) and Go heaps are usually mostly contiguous, so the chance that
115	// an addrRanges even grows to that size is extremely low.
116
117	// Because we assume r is not currently represented in a,
118	// findSucc gives us our insertion index.
119	i := a.findSucc(r.base)
120	coalescesDown := i > 0 && a.ranges[i-1].limit == r.base
121	coalescesUp := i < len(a.ranges) && r.limit == a.ranges[i].base
122	if coalescesUp && coalescesDown {
123		// We have neighbors and they both border us.
124		// Merge a.ranges[i-1], r, and a.ranges[i] together into a.ranges[i-1].
125		a.ranges[i-1].limit = a.ranges[i].limit
126
127		// Delete a.ranges[i].
128		copy(a.ranges[i:], a.ranges[i+1:])
129		a.ranges = a.ranges[:len(a.ranges)-1]
130	} else if coalescesDown {
131		// We have a neighbor at a lower address only and it borders us.
132		// Merge the new space into a.ranges[i-1].
133		a.ranges[i-1].limit = r.limit
134	} else if coalescesUp {
135		// We have a neighbor at a higher address only and it borders us.
136		// Merge the new space into a.ranges[i].
137		a.ranges[i].base = r.base
138	} else {
139		// We may or may not have neighbors which don't border us.
140		// Add the new range.
141		if len(a.ranges)+1 > cap(a.ranges) {
142			// Grow the array. Note that this leaks the old array, but since
143			// we're doubling we have at most 2x waste. For a 1 TiB heap and
144			// 4 MiB arenas which are all discontiguous (both very conservative
145			// assumptions), this would waste at most 4 MiB of memory.
146			oldRanges := a.ranges
147			ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
148			ranges.len = len(oldRanges) + 1
149			ranges.cap = cap(oldRanges) * 2
150			ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, a.sysStat))
151
152			// Copy in the old array, but make space for the new range.
153			copy(a.ranges[:i], oldRanges[:i])
154			copy(a.ranges[i+1:], oldRanges[i:])
155		} else {
156			a.ranges = a.ranges[:len(a.ranges)+1]
157			copy(a.ranges[i+1:], a.ranges[i:])
158		}
159		a.ranges[i] = r
160	}
161}
162