1/*
2Copyright 2012 Google Inc.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8     http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17// Tests for groupcache.
18
19package groupcache
20
21import (
22	"context"
23	"errors"
24	"fmt"
25	"hash/crc32"
26	"math/rand"
27	"reflect"
28	"sync"
29	"testing"
30	"time"
31	"unsafe"
32
33	"github.com/golang/protobuf/proto"
34
35	pb "github.com/golang/groupcache/groupcachepb"
36	testpb "github.com/golang/groupcache/testpb"
37)
38
39var (
40	once                    sync.Once
41	stringGroup, protoGroup Getter
42
43	stringc = make(chan string)
44
45	dummyCtx = context.TODO()
46
47	// cacheFills is the number of times stringGroup or
48	// protoGroup's Getter have been called. Read using the
49	// cacheFills function.
50	cacheFills AtomicInt
51)
52
53const (
54	stringGroupName = "string-group"
55	protoGroupName  = "proto-group"
56	testMessageType = "google3/net/groupcache/go/test_proto.TestMessage"
57	fromChan        = "from-chan"
58	cacheSize       = 1 << 20
59)
60
61func testSetup() {
62	stringGroup = NewGroup(stringGroupName, cacheSize, GetterFunc(func(_ context.Context, key string, dest Sink) error {
63		if key == fromChan {
64			key = <-stringc
65		}
66		cacheFills.Add(1)
67		return dest.SetString("ECHO:" + key)
68	}))
69
70	protoGroup = NewGroup(protoGroupName, cacheSize, GetterFunc(func(_ context.Context, key string, dest Sink) error {
71		if key == fromChan {
72			key = <-stringc
73		}
74		cacheFills.Add(1)
75		return dest.SetProto(&testpb.TestMessage{
76			Name: proto.String("ECHO:" + key),
77			City: proto.String("SOME-CITY"),
78		})
79	}))
80}
81
82// TestGetDupSuppressString tests that a Getter's Get method is only called once with two
83// outstanding callers.  This is the string variant.
84func TestGetDupSuppressString(t *testing.T) {
85	once.Do(testSetup)
86	// Start two getters. The first should block (waiting reading
87	// from stringc) and the second should latch on to the first
88	// one.
89	resc := make(chan string, 2)
90	for i := 0; i < 2; i++ {
91		go func() {
92			var s string
93			if err := stringGroup.Get(dummyCtx, fromChan, StringSink(&s)); err != nil {
94				resc <- "ERROR:" + err.Error()
95				return
96			}
97			resc <- s
98		}()
99	}
100
101	// Wait a bit so both goroutines get merged together via
102	// singleflight.
103	// TODO(bradfitz): decide whether there are any non-offensive
104	// debug/test hooks that could be added to singleflight to
105	// make a sleep here unnecessary.
106	time.Sleep(250 * time.Millisecond)
107
108	// Unblock the first getter, which should unblock the second
109	// as well.
110	stringc <- "foo"
111
112	for i := 0; i < 2; i++ {
113		select {
114		case v := <-resc:
115			if v != "ECHO:foo" {
116				t.Errorf("got %q; want %q", v, "ECHO:foo")
117			}
118		case <-time.After(5 * time.Second):
119			t.Errorf("timeout waiting on getter #%d of 2", i+1)
120		}
121	}
122}
123
124// TestGetDupSuppressProto tests that a Getter's Get method is only called once with two
125// outstanding callers.  This is the proto variant.
126func TestGetDupSuppressProto(t *testing.T) {
127	once.Do(testSetup)
128	// Start two getters. The first should block (waiting reading
129	// from stringc) and the second should latch on to the first
130	// one.
131	resc := make(chan *testpb.TestMessage, 2)
132	for i := 0; i < 2; i++ {
133		go func() {
134			tm := new(testpb.TestMessage)
135			if err := protoGroup.Get(dummyCtx, fromChan, ProtoSink(tm)); err != nil {
136				tm.Name = proto.String("ERROR:" + err.Error())
137			}
138			resc <- tm
139		}()
140	}
141
142	// Wait a bit so both goroutines get merged together via
143	// singleflight.
144	// TODO(bradfitz): decide whether there are any non-offensive
145	// debug/test hooks that could be added to singleflight to
146	// make a sleep here unnecessary.
147	time.Sleep(250 * time.Millisecond)
148
149	// Unblock the first getter, which should unblock the second
150	// as well.
151	stringc <- "Fluffy"
152	want := &testpb.TestMessage{
153		Name: proto.String("ECHO:Fluffy"),
154		City: proto.String("SOME-CITY"),
155	}
156	for i := 0; i < 2; i++ {
157		select {
158		case v := <-resc:
159			if !reflect.DeepEqual(v, want) {
160				t.Errorf(" Got: %v\nWant: %v", proto.CompactTextString(v), proto.CompactTextString(want))
161			}
162		case <-time.After(5 * time.Second):
163			t.Errorf("timeout waiting on getter #%d of 2", i+1)
164		}
165	}
166}
167
168func countFills(f func()) int64 {
169	fills0 := cacheFills.Get()
170	f()
171	return cacheFills.Get() - fills0
172}
173
174func TestCaching(t *testing.T) {
175	once.Do(testSetup)
176	fills := countFills(func() {
177		for i := 0; i < 10; i++ {
178			var s string
179			if err := stringGroup.Get(dummyCtx, "TestCaching-key", StringSink(&s)); err != nil {
180				t.Fatal(err)
181			}
182		}
183	})
184	if fills != 1 {
185		t.Errorf("expected 1 cache fill; got %d", fills)
186	}
187}
188
189func TestCacheEviction(t *testing.T) {
190	once.Do(testSetup)
191	testKey := "TestCacheEviction-key"
192	getTestKey := func() {
193		var res string
194		for i := 0; i < 10; i++ {
195			if err := stringGroup.Get(dummyCtx, testKey, StringSink(&res)); err != nil {
196				t.Fatal(err)
197			}
198		}
199	}
200	fills := countFills(getTestKey)
201	if fills != 1 {
202		t.Fatalf("expected 1 cache fill; got %d", fills)
203	}
204
205	g := stringGroup.(*Group)
206	evict0 := g.mainCache.nevict
207
208	// Trash the cache with other keys.
209	var bytesFlooded int64
210	// cacheSize/len(testKey) is approximate
211	for bytesFlooded < cacheSize+1024 {
212		var res string
213		key := fmt.Sprintf("dummy-key-%d", bytesFlooded)
214		stringGroup.Get(dummyCtx, key, StringSink(&res))
215		bytesFlooded += int64(len(key) + len(res))
216	}
217	evicts := g.mainCache.nevict - evict0
218	if evicts <= 0 {
219		t.Errorf("evicts = %v; want more than 0", evicts)
220	}
221
222	// Test that the key is gone.
223	fills = countFills(getTestKey)
224	if fills != 1 {
225		t.Fatalf("expected 1 cache fill after cache trashing; got %d", fills)
226	}
227}
228
229type fakePeer struct {
230	hits int
231	fail bool
232}
233
234func (p *fakePeer) Get(_ context.Context, in *pb.GetRequest, out *pb.GetResponse) error {
235	p.hits++
236	if p.fail {
237		return errors.New("simulated error from peer")
238	}
239	out.Value = []byte("got:" + in.GetKey())
240	return nil
241}
242
243type fakePeers []ProtoGetter
244
245func (p fakePeers) PickPeer(key string) (peer ProtoGetter, ok bool) {
246	if len(p) == 0 {
247		return
248	}
249	n := crc32.Checksum([]byte(key), crc32.IEEETable) % uint32(len(p))
250	return p[n], p[n] != nil
251}
252
253// TestPeers tests that peers (virtual, in-process) are hit, and how much.
254func TestPeers(t *testing.T) {
255	once.Do(testSetup)
256	rand.Seed(123)
257	peer0 := &fakePeer{}
258	peer1 := &fakePeer{}
259	peer2 := &fakePeer{}
260	peerList := fakePeers([]ProtoGetter{peer0, peer1, peer2, nil})
261	const cacheSize = 0 // disabled
262	localHits := 0
263	getter := func(_ context.Context, key string, dest Sink) error {
264		localHits++
265		return dest.SetString("got:" + key)
266	}
267	testGroup := newGroup("TestPeers-group", cacheSize, GetterFunc(getter), peerList)
268	run := func(name string, n int, wantSummary string) {
269		// Reset counters
270		localHits = 0
271		for _, p := range []*fakePeer{peer0, peer1, peer2} {
272			p.hits = 0
273		}
274
275		for i := 0; i < n; i++ {
276			key := fmt.Sprintf("key-%d", i)
277			want := "got:" + key
278			var got string
279			err := testGroup.Get(dummyCtx, key, StringSink(&got))
280			if err != nil {
281				t.Errorf("%s: error on key %q: %v", name, key, err)
282				continue
283			}
284			if got != want {
285				t.Errorf("%s: for key %q, got %q; want %q", name, key, got, want)
286			}
287		}
288		summary := func() string {
289			return fmt.Sprintf("localHits = %d, peers = %d %d %d", localHits, peer0.hits, peer1.hits, peer2.hits)
290		}
291		if got := summary(); got != wantSummary {
292			t.Errorf("%s: got %q; want %q", name, got, wantSummary)
293		}
294	}
295	resetCacheSize := func(maxBytes int64) {
296		g := testGroup
297		g.cacheBytes = maxBytes
298		g.mainCache = cache{}
299		g.hotCache = cache{}
300	}
301
302	// Base case; peers all up, with no problems.
303	resetCacheSize(1 << 20)
304	run("base", 200, "localHits = 49, peers = 51 49 51")
305
306	// Verify cache was hit.  All localHits are gone, and some of
307	// the peer hits (the ones randomly selected to be maybe hot)
308	run("cached_base", 200, "localHits = 0, peers = 49 47 48")
309	resetCacheSize(0)
310
311	// With one of the peers being down.
312	// TODO(bradfitz): on a peer number being unavailable, the
313	// consistent hashing should maybe keep trying others to
314	// spread the load out. Currently it fails back to local
315	// execution if the first consistent-hash slot is unavailable.
316	peerList[0] = nil
317	run("one_peer_down", 200, "localHits = 100, peers = 0 49 51")
318
319	// Failing peer
320	peerList[0] = peer0
321	peer0.fail = true
322	run("peer0_failing", 200, "localHits = 100, peers = 51 49 51")
323}
324
325func TestTruncatingByteSliceTarget(t *testing.T) {
326	var buf [100]byte
327	s := buf[:]
328	if err := stringGroup.Get(dummyCtx, "short", TruncatingByteSliceSink(&s)); err != nil {
329		t.Fatal(err)
330	}
331	if want := "ECHO:short"; string(s) != want {
332		t.Errorf("short key got %q; want %q", s, want)
333	}
334
335	s = buf[:6]
336	if err := stringGroup.Get(dummyCtx, "truncated", TruncatingByteSliceSink(&s)); err != nil {
337		t.Fatal(err)
338	}
339	if want := "ECHO:t"; string(s) != want {
340		t.Errorf("truncated key got %q; want %q", s, want)
341	}
342}
343
344func TestAllocatingByteSliceTarget(t *testing.T) {
345	var dst []byte
346	sink := AllocatingByteSliceSink(&dst)
347
348	inBytes := []byte("some bytes")
349	sink.SetBytes(inBytes)
350	if want := "some bytes"; string(dst) != want {
351		t.Errorf("SetBytes resulted in %q; want %q", dst, want)
352	}
353	v, err := sink.view()
354	if err != nil {
355		t.Fatalf("view after SetBytes failed: %v", err)
356	}
357	if &inBytes[0] == &dst[0] {
358		t.Error("inBytes and dst share memory")
359	}
360	if &inBytes[0] == &v.b[0] {
361		t.Error("inBytes and view share memory")
362	}
363	if &dst[0] == &v.b[0] {
364		t.Error("dst and view share memory")
365	}
366}
367
368// orderedFlightGroup allows the caller to force the schedule of when
369// orig.Do will be called.  This is useful to serialize calls such
370// that singleflight cannot dedup them.
371type orderedFlightGroup struct {
372	mu     sync.Mutex
373	stage1 chan bool
374	stage2 chan bool
375	orig   flightGroup
376}
377
378func (g *orderedFlightGroup) Do(key string, fn func() (interface{}, error)) (interface{}, error) {
379	<-g.stage1
380	<-g.stage2
381	g.mu.Lock()
382	defer g.mu.Unlock()
383	return g.orig.Do(key, fn)
384}
385
386// TestNoDedup tests invariants on the cache size when singleflight is
387// unable to dedup calls.
388func TestNoDedup(t *testing.T) {
389	const testkey = "testkey"
390	const testval = "testval"
391	g := newGroup("testgroup", 1024, GetterFunc(func(_ context.Context, key string, dest Sink) error {
392		return dest.SetString(testval)
393	}), nil)
394
395	orderedGroup := &orderedFlightGroup{
396		stage1: make(chan bool),
397		stage2: make(chan bool),
398		orig:   g.loadGroup,
399	}
400	// Replace loadGroup with our wrapper so we can control when
401	// loadGroup.Do is entered for each concurrent request.
402	g.loadGroup = orderedGroup
403
404	// Issue two idential requests concurrently.  Since the cache is
405	// empty, it will miss.  Both will enter load(), but we will only
406	// allow one at a time to enter singleflight.Do, so the callback
407	// function will be called twice.
408	resc := make(chan string, 2)
409	for i := 0; i < 2; i++ {
410		go func() {
411			var s string
412			if err := g.Get(dummyCtx, testkey, StringSink(&s)); err != nil {
413				resc <- "ERROR:" + err.Error()
414				return
415			}
416			resc <- s
417		}()
418	}
419
420	// Ensure both goroutines have entered the Do routine.  This implies
421	// both concurrent requests have checked the cache, found it empty,
422	// and called load().
423	orderedGroup.stage1 <- true
424	orderedGroup.stage1 <- true
425	orderedGroup.stage2 <- true
426	orderedGroup.stage2 <- true
427
428	for i := 0; i < 2; i++ {
429		if s := <-resc; s != testval {
430			t.Errorf("result is %s want %s", s, testval)
431		}
432	}
433
434	const wantItems = 1
435	if g.mainCache.items() != wantItems {
436		t.Errorf("mainCache has %d items, want %d", g.mainCache.items(), wantItems)
437	}
438
439	// If the singleflight callback doesn't double-check the cache again
440	// upon entry, we would increment nbytes twice but the entry would
441	// only be in the cache once.
442	const wantBytes = int64(len(testkey) + len(testval))
443	if g.mainCache.nbytes != wantBytes {
444		t.Errorf("cache has %d bytes, want %d", g.mainCache.nbytes, wantBytes)
445	}
446}
447
448func TestGroupStatsAlignment(t *testing.T) {
449	var g Group
450	off := unsafe.Offsetof(g.Stats)
451	if off%8 != 0 {
452		t.Fatal("Stats structure is not 8-byte aligned.")
453	}
454}
455
456// TODO(bradfitz): port the Google-internal full integration test into here,
457// using HTTP requests instead of our RPC system.
458