1// Copyright 2019 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"fmt"
9	"math/rand"
10	. "runtime"
11	"testing"
12)
13
14// makePallocData produces an initialized PallocData by setting
15// the ranges of described in alloc and scavenge.
16func makePallocData(alloc, scavenged []BitRange) *PallocData {
17	b := new(PallocData)
18	for _, v := range alloc {
19		if v.N == 0 {
20			// Skip N==0. It's harmless and allocRange doesn't
21			// handle this case.
22			continue
23		}
24		b.AllocRange(v.I, v.N)
25	}
26	for _, v := range scavenged {
27		if v.N == 0 {
28			// See the previous loop.
29			continue
30		}
31		b.ScavengedSetRange(v.I, v.N)
32	}
33	return b
34}
35
36func TestFillAligned(t *testing.T) {
37	fillAlignedSlow := func(x uint64, m uint) uint64 {
38		if m == 1 {
39			return x
40		}
41		out := uint64(0)
42		for i := uint(0); i < 64; i += m {
43			for j := uint(0); j < m; j++ {
44				if x&(uint64(1)<<(i+j)) != 0 {
45					out |= ((uint64(1) << m) - 1) << i
46					break
47				}
48			}
49		}
50		return out
51	}
52	check := func(x uint64, m uint) {
53		want := fillAlignedSlow(x, m)
54		if got := FillAligned(x, m); got != want {
55			t.Logf("got:  %064b", got)
56			t.Logf("want: %064b", want)
57			t.Errorf("bad fillAligned(%016x, %d)", x, m)
58		}
59	}
60	for m := uint(1); m <= 64; m *= 2 {
61		tests := []uint64{
62			0x0000000000000000,
63			0x00000000ffffffff,
64			0xffffffff00000000,
65			0x8000000000000001,
66			0xf00000000000000f,
67			0xf00000010050000f,
68			0xffffffffffffffff,
69			0x0000000000000001,
70			0x0000000000000002,
71			0x0000000000000008,
72			uint64(1) << (m - 1),
73			uint64(1) << m,
74			// Try a few fixed arbitrary examples.
75			0xb02b9effcf137016,
76			0x3975a076a9fbff18,
77			0x0f8c88ec3b81506e,
78			0x60f14d80ef2fa0e6,
79		}
80		for _, test := range tests {
81			check(test, m)
82		}
83		for i := 0; i < 1000; i++ {
84			// Try a pseudo-random numbers.
85			check(rand.Uint64(), m)
86
87			if m > 1 {
88				// For m != 1, let's construct a slightly more interesting
89				// random test. Generate a bitmap which is either 0 or
90				// randomly set bits for each m-aligned group of m bits.
91				val := uint64(0)
92				for n := uint(0); n < 64; n += m {
93					// For each group of m bits, flip a coin:
94					// * Leave them as zero.
95					// * Set them randomly.
96					if rand.Uint64()%2 == 0 {
97						val |= (rand.Uint64() & ((1 << m) - 1)) << n
98					}
99				}
100				check(val, m)
101			}
102		}
103	}
104}
105
106func TestPallocDataFindScavengeCandidate(t *testing.T) {
107	type test struct {
108		alloc, scavenged []BitRange
109		min, max         uintptr
110		want             BitRange
111	}
112	tests := map[string]test{
113		"MixedMin1": {
114			alloc:     []BitRange{{0, 40}, {42, PallocChunkPages - 42}},
115			scavenged: []BitRange{{0, 41}, {42, PallocChunkPages - 42}},
116			min:       1,
117			max:       PallocChunkPages,
118			want:      BitRange{41, 1},
119		},
120		"MultiMin1": {
121			alloc:     []BitRange{{0, 63}, {65, 20}, {87, PallocChunkPages - 87}},
122			scavenged: []BitRange{{86, 1}},
123			min:       1,
124			max:       PallocChunkPages,
125			want:      BitRange{85, 1},
126		},
127	}
128	// Try out different page minimums.
129	for m := uintptr(1); m <= 64; m *= 2 {
130		suffix := fmt.Sprintf("Min%d", m)
131		tests["AllFree"+suffix] = test{
132			min:  m,
133			max:  PallocChunkPages,
134			want: BitRange{0, PallocChunkPages},
135		}
136		tests["AllScavenged"+suffix] = test{
137			scavenged: []BitRange{{0, PallocChunkPages}},
138			min:       m,
139			max:       PallocChunkPages,
140			want:      BitRange{0, 0},
141		}
142		tests["NoneFree"+suffix] = test{
143			alloc:     []BitRange{{0, PallocChunkPages}},
144			scavenged: []BitRange{{PallocChunkPages / 2, PallocChunkPages / 2}},
145			min:       m,
146			max:       PallocChunkPages,
147			want:      BitRange{0, 0},
148		}
149		tests["StartFree"+suffix] = test{
150			alloc: []BitRange{{uint(m), PallocChunkPages - uint(m)}},
151			min:   m,
152			max:   PallocChunkPages,
153			want:  BitRange{0, uint(m)},
154		}
155		tests["StartFree"+suffix] = test{
156			alloc: []BitRange{{uint(m), PallocChunkPages - uint(m)}},
157			min:   m,
158			max:   PallocChunkPages,
159			want:  BitRange{0, uint(m)},
160		}
161		tests["EndFree"+suffix] = test{
162			alloc: []BitRange{{0, PallocChunkPages - uint(m)}},
163			min:   m,
164			max:   PallocChunkPages,
165			want:  BitRange{PallocChunkPages - uint(m), uint(m)},
166		}
167		tests["Straddle64"+suffix] = test{
168			alloc: []BitRange{{0, 64 - uint(m)}, {64 + uint(m), PallocChunkPages - (64 + uint(m))}},
169			min:   m,
170			max:   2 * m,
171			want:  BitRange{64 - uint(m), 2 * uint(m)},
172		}
173		tests["BottomEdge64WithFull"+suffix] = test{
174			alloc:     []BitRange{{64, 64}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
175			scavenged: []BitRange{{1, 10}},
176			min:       m,
177			max:       3 * m,
178			want:      BitRange{128, 3 * uint(m)},
179		}
180		tests["BottomEdge64WithPocket"+suffix] = test{
181			alloc:     []BitRange{{64, 62}, {127, 1}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
182			scavenged: []BitRange{{1, 10}},
183			min:       m,
184			max:       3 * m,
185			want:      BitRange{128, 3 * uint(m)},
186		}
187		tests["Max0"+suffix] = test{
188			scavenged: []BitRange{{0, PallocChunkPages - uint(m)}},
189			min:       m,
190			max:       0,
191			want:      BitRange{PallocChunkPages - uint(m), uint(m)},
192		}
193		if m <= 8 {
194			tests["OneFree"] = test{
195				alloc: []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}},
196				min:   m,
197				max:   PallocChunkPages,
198				want:  BitRange{40, uint(m)},
199			}
200			tests["OneScavenged"] = test{
201				alloc:     []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}},
202				scavenged: []BitRange{{40, 1}},
203				min:       m,
204				max:       PallocChunkPages,
205				want:      BitRange{0, 0},
206			}
207		}
208		if m > 1 {
209			tests["MaxUnaligned"+suffix] = test{
210				scavenged: []BitRange{{0, PallocChunkPages - uint(m*2-1)}},
211				min:       m,
212				max:       m - 2,
213				want:      BitRange{PallocChunkPages - uint(m), uint(m)},
214			}
215			tests["SkipSmall"+suffix] = test{
216				alloc: []BitRange{{0, 64 - uint(m)}, {64, 5}, {70, 11}, {82, PallocChunkPages - 82}},
217				min:   m,
218				max:   m,
219				want:  BitRange{64 - uint(m), uint(m)},
220			}
221			tests["SkipMisaligned"+suffix] = test{
222				alloc: []BitRange{{0, 64 - uint(m)}, {64, 63}, {127 + uint(m), PallocChunkPages - (127 + uint(m))}},
223				min:   m,
224				max:   m,
225				want:  BitRange{64 - uint(m), uint(m)},
226			}
227			tests["MaxLessThan"+suffix] = test{
228				scavenged: []BitRange{{0, PallocChunkPages - uint(m)}},
229				min:       m,
230				max:       1,
231				want:      BitRange{PallocChunkPages - uint(m), uint(m)},
232			}
233		}
234	}
235	if PhysHugePageSize > uintptr(PageSize) {
236		// Check hugepage preserving behavior.
237		bits := uint(PhysHugePageSize / uintptr(PageSize))
238		tests["PreserveHugePageBottom"] = test{
239			alloc: []BitRange{{bits + 2, PallocChunkPages - (bits + 2)}},
240			min:   1,
241			max:   3, // Make it so that max would have us try to break the huge page.
242			want:  BitRange{0, bits + 2},
243		}
244		if 3*bits < PallocChunkPages {
245			// We need at least 3 huge pages in a chunk for this test to make sense.
246			tests["PreserveHugePageMiddle"] = test{
247				alloc: []BitRange{{0, bits - 10}, {2*bits + 10, PallocChunkPages - (2*bits + 10)}},
248				min:   1,
249				max:   12, // Make it so that max would have us try to break the huge page.
250				want:  BitRange{bits, bits + 10},
251			}
252		}
253		tests["PreserveHugePageTop"] = test{
254			alloc: []BitRange{{0, PallocChunkPages - bits}},
255			min:   1,
256			max:   1, // Even one page would break a huge page in this case.
257			want:  BitRange{PallocChunkPages - bits, bits},
258		}
259	}
260	for name, v := range tests {
261		v := v
262		t.Run(name, func(t *testing.T) {
263			b := makePallocData(v.alloc, v.scavenged)
264			start, size := b.FindScavengeCandidate(PallocChunkPages-1, v.min, v.max)
265			got := BitRange{start, size}
266			if !(got.N == 0 && v.want.N == 0) && got != v.want {
267				t.Fatalf("candidate mismatch: got %v, want %v", got, v.want)
268			}
269		})
270	}
271}
272
273// Tests end-to-end scavenging on a pageAlloc.
274func TestPageAllocScavenge(t *testing.T) {
275	if GOOS == "openbsd" && testing.Short() {
276		t.Skip("skipping because virtual memory is limited; see #36210")
277	}
278	type test struct {
279		request, expect uintptr
280	}
281	minPages := PhysPageSize / PageSize
282	if minPages < 1 {
283		minPages = 1
284	}
285	type setup struct {
286		beforeAlloc map[ChunkIdx][]BitRange
287		beforeScav  map[ChunkIdx][]BitRange
288		expect      []test
289		afterScav   map[ChunkIdx][]BitRange
290	}
291	tests := map[string]setup{
292		"AllFreeUnscavExhaust": {
293			beforeAlloc: map[ChunkIdx][]BitRange{
294				BaseChunkIdx:     {},
295				BaseChunkIdx + 1: {},
296				BaseChunkIdx + 2: {},
297			},
298			beforeScav: map[ChunkIdx][]BitRange{
299				BaseChunkIdx:     {},
300				BaseChunkIdx + 1: {},
301				BaseChunkIdx + 2: {},
302			},
303			expect: []test{
304				{^uintptr(0), 3 * PallocChunkPages * PageSize},
305			},
306			afterScav: map[ChunkIdx][]BitRange{
307				BaseChunkIdx:     {{0, PallocChunkPages}},
308				BaseChunkIdx + 1: {{0, PallocChunkPages}},
309				BaseChunkIdx + 2: {{0, PallocChunkPages}},
310			},
311		},
312		"NoneFreeUnscavExhaust": {
313			beforeAlloc: map[ChunkIdx][]BitRange{
314				BaseChunkIdx:     {{0, PallocChunkPages}},
315				BaseChunkIdx + 1: {},
316				BaseChunkIdx + 2: {{0, PallocChunkPages}},
317			},
318			beforeScav: map[ChunkIdx][]BitRange{
319				BaseChunkIdx:     {},
320				BaseChunkIdx + 1: {{0, PallocChunkPages}},
321				BaseChunkIdx + 2: {},
322			},
323			expect: []test{
324				{^uintptr(0), 0},
325			},
326			afterScav: map[ChunkIdx][]BitRange{
327				BaseChunkIdx:     {},
328				BaseChunkIdx + 1: {{0, PallocChunkPages}},
329				BaseChunkIdx + 2: {},
330			},
331		},
332		"ScavHighestPageFirst": {
333			beforeAlloc: map[ChunkIdx][]BitRange{
334				BaseChunkIdx: {},
335			},
336			beforeScav: map[ChunkIdx][]BitRange{
337				BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
338			},
339			expect: []test{
340				{1, minPages * PageSize},
341			},
342			afterScav: map[ChunkIdx][]BitRange{
343				BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(minPages)}},
344			},
345		},
346		"ScavMultiple": {
347			beforeAlloc: map[ChunkIdx][]BitRange{
348				BaseChunkIdx: {},
349			},
350			beforeScav: map[ChunkIdx][]BitRange{
351				BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
352			},
353			expect: []test{
354				{minPages * PageSize, minPages * PageSize},
355				{minPages * PageSize, minPages * PageSize},
356			},
357			afterScav: map[ChunkIdx][]BitRange{
358				BaseChunkIdx: {{0, PallocChunkPages}},
359			},
360		},
361		"ScavMultiple2": {
362			beforeAlloc: map[ChunkIdx][]BitRange{
363				BaseChunkIdx:     {},
364				BaseChunkIdx + 1: {},
365			},
366			beforeScav: map[ChunkIdx][]BitRange{
367				BaseChunkIdx:     {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
368				BaseChunkIdx + 1: {{0, PallocChunkPages - uint(2*minPages)}},
369			},
370			expect: []test{
371				{2 * minPages * PageSize, 2 * minPages * PageSize},
372				{minPages * PageSize, minPages * PageSize},
373				{minPages * PageSize, minPages * PageSize},
374			},
375			afterScav: map[ChunkIdx][]BitRange{
376				BaseChunkIdx:     {{0, PallocChunkPages}},
377				BaseChunkIdx + 1: {{0, PallocChunkPages}},
378			},
379		},
380		"ScavDiscontiguous": {
381			beforeAlloc: map[ChunkIdx][]BitRange{
382				BaseChunkIdx:       {},
383				BaseChunkIdx + 0xe: {},
384			},
385			beforeScav: map[ChunkIdx][]BitRange{
386				BaseChunkIdx:       {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
387				BaseChunkIdx + 0xe: {{uint(2 * minPages), PallocChunkPages - uint(2*minPages)}},
388			},
389			expect: []test{
390				{2 * minPages * PageSize, 2 * minPages * PageSize},
391				{^uintptr(0), 2 * minPages * PageSize},
392				{^uintptr(0), 0},
393			},
394			afterScav: map[ChunkIdx][]BitRange{
395				BaseChunkIdx:       {{0, PallocChunkPages}},
396				BaseChunkIdx + 0xe: {{0, PallocChunkPages}},
397			},
398		},
399	}
400	if PageAlloc64Bit != 0 {
401		tests["ScavAllVeryDiscontiguous"] = setup{
402			beforeAlloc: map[ChunkIdx][]BitRange{
403				BaseChunkIdx:          {},
404				BaseChunkIdx + 0x1000: {},
405			},
406			beforeScav: map[ChunkIdx][]BitRange{
407				BaseChunkIdx:          {},
408				BaseChunkIdx + 0x1000: {},
409			},
410			expect: []test{
411				{^uintptr(0), 2 * PallocChunkPages * PageSize},
412				{^uintptr(0), 0},
413			},
414			afterScav: map[ChunkIdx][]BitRange{
415				BaseChunkIdx:          {{0, PallocChunkPages}},
416				BaseChunkIdx + 0x1000: {{0, PallocChunkPages}},
417			},
418		}
419	}
420	for name, v := range tests {
421		v := v
422		runTest := func(t *testing.T, locked bool) {
423			b := NewPageAlloc(v.beforeAlloc, v.beforeScav)
424			defer FreePageAlloc(b)
425
426			for iter, h := range v.expect {
427				if got := b.Scavenge(h.request, locked); got != h.expect {
428					t.Fatalf("bad scavenge #%d: want %d, got %d", iter+1, h.expect, got)
429				}
430			}
431			want := NewPageAlloc(v.beforeAlloc, v.afterScav)
432			defer FreePageAlloc(want)
433
434			checkPageAlloc(t, want, b)
435		}
436		t.Run(name, func(t *testing.T) {
437			runTest(t, false)
438		})
439		t.Run(name+"Locked", func(t *testing.T) {
440			runTest(t, true)
441		})
442	}
443}
444