1// Copyright 2015 The etcd Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package mvcc
16
17import (
18	"math/rand"
19	"os"
20	"testing"
21
22	"github.com/coreos/etcd/lease"
23	"github.com/coreos/etcd/mvcc/backend"
24)
25
26func BenchmarkWatchableStorePut(b *testing.B) {
27	be, tmpPath := backend.NewDefaultTmpBackend()
28	s := New(be, &lease.FakeLessor{}, nil)
29	defer cleanup(s, be, tmpPath)
30
31	// arbitrary number of bytes
32	bytesN := 64
33	keys := createBytesSlice(bytesN, b.N)
34	vals := createBytesSlice(bytesN, b.N)
35
36	b.ResetTimer()
37	b.ReportAllocs()
38	for i := 0; i < b.N; i++ {
39		s.Put(keys[i], vals[i], lease.NoLease)
40	}
41}
42
43// BenchmarkWatchableStoreTxnPut benchmarks the Put operation
44// with transaction begin and end, where transaction involves
45// some synchronization operations, such as mutex locking.
46func BenchmarkWatchableStoreTxnPut(b *testing.B) {
47	var i fakeConsistentIndex
48	be, tmpPath := backend.NewDefaultTmpBackend()
49	s := New(be, &lease.FakeLessor{}, &i)
50	defer cleanup(s, be, tmpPath)
51
52	// arbitrary number of bytes
53	bytesN := 64
54	keys := createBytesSlice(bytesN, b.N)
55	vals := createBytesSlice(bytesN, b.N)
56
57	b.ResetTimer()
58	b.ReportAllocs()
59	for i := 0; i < b.N; i++ {
60		id := s.TxnBegin()
61		if _, err := s.TxnPut(id, keys[i], vals[i], lease.NoLease); err != nil {
62			plog.Fatalf("txn put error: %v", err)
63		}
64		s.TxnEnd(id)
65	}
66}
67
68// BenchmarkWatchableStoreWatchSyncPut benchmarks the case of
69// many synced watchers receiving a Put notification.
70func BenchmarkWatchableStoreWatchSyncPut(b *testing.B) {
71	be, tmpPath := backend.NewDefaultTmpBackend()
72	s := newWatchableStore(be, &lease.FakeLessor{}, nil)
73	defer cleanup(s, be, tmpPath)
74
75	k := []byte("testkey")
76	v := []byte("testval")
77
78	w := s.NewWatchStream()
79	defer w.Close()
80	watchIDs := make([]WatchID, b.N)
81	for i := range watchIDs {
82		// non-0 value to keep watchers in unsynced
83		watchIDs[i] = w.Watch(k, nil, 1)
84	}
85
86	b.ResetTimer()
87	b.ReportAllocs()
88
89	// trigger watchers
90	s.Put(k, v, lease.NoLease)
91	for range watchIDs {
92		<-w.Chan()
93	}
94	select {
95	case wc := <-w.Chan():
96		b.Fatalf("unexpected data %v", wc)
97	default:
98	}
99}
100
101// Benchmarks on cancel function performance for unsynced watchers
102// in a WatchableStore. It creates k*N watchers to populate unsynced
103// with a reasonably large number of watchers. And measures the time it
104// takes to cancel N watchers out of k*N watchers. The performance is
105// expected to differ depending on the unsynced member implementation.
106// TODO: k is an arbitrary constant. We need to figure out what factor
107// we should put to simulate the real-world use cases.
108func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
109	be, tmpPath := backend.NewDefaultTmpBackend()
110	s := NewStore(be, &lease.FakeLessor{}, nil)
111
112	// manually create watchableStore instead of newWatchableStore
113	// because newWatchableStore periodically calls syncWatchersLoop
114	// method to sync watchers in unsynced map. We want to keep watchers
115	// in unsynced for this benchmark.
116	ws := &watchableStore{
117		store:    s,
118		unsynced: newWatcherGroup(),
119
120		// to make the test not crash from assigning to nil map.
121		// 'synced' doesn't get populated in this test.
122		synced: newWatcherGroup(),
123	}
124
125	defer func() {
126		ws.store.Close()
127		os.Remove(tmpPath)
128	}()
129
130	// Put a key so that we can spawn watchers on that key
131	// (testKey in this test). This increases the rev to 1,
132	// and later we can we set the watcher's startRev to 1,
133	// and force watchers to be in unsynced.
134	testKey := []byte("foo")
135	testValue := []byte("bar")
136	s.Put(testKey, testValue, lease.NoLease)
137
138	w := ws.NewWatchStream()
139
140	const k int = 2
141	benchSampleN := b.N
142	watcherN := k * benchSampleN
143
144	watchIDs := make([]WatchID, watcherN)
145	for i := 0; i < watcherN; i++ {
146		// non-0 value to keep watchers in unsynced
147		watchIDs[i] = w.Watch(testKey, nil, 1)
148	}
149
150	// random-cancel N watchers to make it not biased towards
151	// data structures with an order, such as slice.
152	ix := rand.Perm(watcherN)
153
154	b.ResetTimer()
155	b.ReportAllocs()
156
157	// cancel N watchers
158	for _, idx := range ix[:benchSampleN] {
159		if err := w.Cancel(watchIDs[idx]); err != nil {
160			b.Error(err)
161		}
162	}
163}
164
165func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
166	be, tmpPath := backend.NewDefaultTmpBackend()
167	s := newWatchableStore(be, &lease.FakeLessor{}, nil)
168
169	defer func() {
170		s.store.Close()
171		os.Remove(tmpPath)
172	}()
173
174	// Put a key so that we can spawn watchers on that key
175	testKey := []byte("foo")
176	testValue := []byte("bar")
177	s.Put(testKey, testValue, lease.NoLease)
178
179	w := s.NewWatchStream()
180
181	// put 1 million watchers on the same key
182	const watcherN = 1000000
183
184	watchIDs := make([]WatchID, watcherN)
185	for i := 0; i < watcherN; i++ {
186		// 0 for startRev to keep watchers in synced
187		watchIDs[i] = w.Watch(testKey, nil, 0)
188	}
189
190	// randomly cancel watchers to make it not biased towards
191	// data structures with an order, such as slice.
192	ix := rand.Perm(watcherN)
193
194	b.ResetTimer()
195	b.ReportAllocs()
196
197	for _, idx := range ix {
198		if err := w.Cancel(watchIDs[idx]); err != nil {
199			b.Error(err)
200		}
201	}
202}
203