1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Large data benchmark.
6// The JSON data is a summary of agl's changes in the
7// go, webkit, and chromium open source projects.
8// We benchmark converting between the JSON form
9// and in-memory data structures.
10
11package json
12
13import (
14	"bytes"
15	"compress/gzip"
16	"fmt"
17	"io/ioutil"
18	"os"
19	"reflect"
20	"runtime"
21	"strings"
22	"sync"
23	"testing"
24)
25
26type codeResponse struct {
27	Tree     *codeNode `json:"tree"`
28	Username string    `json:"username"`
29}
30
31type codeNode struct {
32	Name     string      `json:"name"`
33	Kids     []*codeNode `json:"kids"`
34	CLWeight float64     `json:"cl_weight"`
35	Touches  int         `json:"touches"`
36	MinT     int64       `json:"min_t"`
37	MaxT     int64       `json:"max_t"`
38	MeanT    int64       `json:"mean_t"`
39}
40
41var codeJSON []byte
42var codeStruct codeResponse
43
44func codeInit() {
45	f, err := os.Open("testdata/code.json.gz")
46	if err != nil {
47		panic(err)
48	}
49	defer f.Close()
50	gz, err := gzip.NewReader(f)
51	if err != nil {
52		panic(err)
53	}
54	data, err := ioutil.ReadAll(gz)
55	if err != nil {
56		panic(err)
57	}
58
59	codeJSON = data
60
61	if err := Unmarshal(codeJSON, &codeStruct); err != nil {
62		panic("unmarshal code.json: " + err.Error())
63	}
64
65	if data, err = Marshal(&codeStruct); err != nil {
66		panic("marshal code.json: " + err.Error())
67	}
68
69	if !bytes.Equal(data, codeJSON) {
70		println("different lengths", len(data), len(codeJSON))
71		for i := 0; i < len(data) && i < len(codeJSON); i++ {
72			if data[i] != codeJSON[i] {
73				println("re-marshal: changed at byte", i)
74				println("orig: ", string(codeJSON[i-10:i+10]))
75				println("new: ", string(data[i-10:i+10]))
76				break
77			}
78		}
79		panic("re-marshal code.json: different result")
80	}
81}
82
83func BenchmarkCodeEncoder(b *testing.B) {
84	b.ReportAllocs()
85	if codeJSON == nil {
86		b.StopTimer()
87		codeInit()
88		b.StartTimer()
89	}
90	b.RunParallel(func(pb *testing.PB) {
91		enc := NewEncoder(ioutil.Discard)
92		for pb.Next() {
93			if err := enc.Encode(&codeStruct); err != nil {
94				b.Fatal("Encode:", err)
95			}
96		}
97	})
98	b.SetBytes(int64(len(codeJSON)))
99}
100
101func BenchmarkCodeMarshal(b *testing.B) {
102	b.ReportAllocs()
103	if codeJSON == nil {
104		b.StopTimer()
105		codeInit()
106		b.StartTimer()
107	}
108	b.RunParallel(func(pb *testing.PB) {
109		for pb.Next() {
110			if _, err := Marshal(&codeStruct); err != nil {
111				b.Fatal("Marshal:", err)
112			}
113		}
114	})
115	b.SetBytes(int64(len(codeJSON)))
116}
117
118func benchMarshalBytes(n int) func(*testing.B) {
119	sample := []byte("hello world")
120	// Use a struct pointer, to avoid an allocation when passing it as an
121	// interface parameter to Marshal.
122	v := &struct {
123		Bytes []byte
124	}{
125		bytes.Repeat(sample, (n/len(sample))+1)[:n],
126	}
127	return func(b *testing.B) {
128		for i := 0; i < b.N; i++ {
129			if _, err := Marshal(v); err != nil {
130				b.Fatal("Marshal:", err)
131			}
132		}
133	}
134}
135
136func BenchmarkMarshalBytes(b *testing.B) {
137	b.ReportAllocs()
138	// 32 fits within encodeState.scratch.
139	b.Run("32", benchMarshalBytes(32))
140	// 256 doesn't fit in encodeState.scratch, but is small enough to
141	// allocate and avoid the slower base64.NewEncoder.
142	b.Run("256", benchMarshalBytes(256))
143	// 4096 is large enough that we want to avoid allocating for it.
144	b.Run("4096", benchMarshalBytes(4096))
145}
146
147func BenchmarkCodeDecoder(b *testing.B) {
148	b.ReportAllocs()
149	if codeJSON == nil {
150		b.StopTimer()
151		codeInit()
152		b.StartTimer()
153	}
154	b.RunParallel(func(pb *testing.PB) {
155		var buf bytes.Buffer
156		dec := NewDecoder(&buf)
157		var r codeResponse
158		for pb.Next() {
159			buf.Write(codeJSON)
160			// hide EOF
161			buf.WriteByte('\n')
162			buf.WriteByte('\n')
163			buf.WriteByte('\n')
164			if err := dec.Decode(&r); err != nil {
165				b.Fatal("Decode:", err)
166			}
167		}
168	})
169	b.SetBytes(int64(len(codeJSON)))
170}
171
172func BenchmarkUnicodeDecoder(b *testing.B) {
173	b.ReportAllocs()
174	j := []byte(`"\uD83D\uDE01"`)
175	b.SetBytes(int64(len(j)))
176	r := bytes.NewReader(j)
177	dec := NewDecoder(r)
178	var out string
179	b.ResetTimer()
180	for i := 0; i < b.N; i++ {
181		if err := dec.Decode(&out); err != nil {
182			b.Fatal("Decode:", err)
183		}
184		r.Seek(0, 0)
185	}
186}
187
188func BenchmarkDecoderStream(b *testing.B) {
189	b.ReportAllocs()
190	b.StopTimer()
191	var buf bytes.Buffer
192	dec := NewDecoder(&buf)
193	buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n")
194	var x interface{}
195	if err := dec.Decode(&x); err != nil {
196		b.Fatal("Decode:", err)
197	}
198	ones := strings.Repeat(" 1\n", 300000) + "\n\n\n"
199	b.StartTimer()
200	for i := 0; i < b.N; i++ {
201		if i%300000 == 0 {
202			buf.WriteString(ones)
203		}
204		x = nil
205		if err := dec.Decode(&x); err != nil || x != 1.0 {
206			b.Fatalf("Decode: %v after %d", err, i)
207		}
208	}
209}
210
211func BenchmarkCodeUnmarshal(b *testing.B) {
212	b.ReportAllocs()
213	if codeJSON == nil {
214		b.StopTimer()
215		codeInit()
216		b.StartTimer()
217	}
218	b.RunParallel(func(pb *testing.PB) {
219		for pb.Next() {
220			var r codeResponse
221			if err := Unmarshal(codeJSON, &r); err != nil {
222				b.Fatal("Unmarshal:", err)
223			}
224		}
225	})
226	b.SetBytes(int64(len(codeJSON)))
227}
228
229func BenchmarkCodeUnmarshalReuse(b *testing.B) {
230	b.ReportAllocs()
231	if codeJSON == nil {
232		b.StopTimer()
233		codeInit()
234		b.StartTimer()
235	}
236	b.RunParallel(func(pb *testing.PB) {
237		var r codeResponse
238		for pb.Next() {
239			if err := Unmarshal(codeJSON, &r); err != nil {
240				b.Fatal("Unmarshal:", err)
241			}
242		}
243	})
244	b.SetBytes(int64(len(codeJSON)))
245}
246
247func BenchmarkUnmarshalString(b *testing.B) {
248	b.ReportAllocs()
249	data := []byte(`"hello, world"`)
250	b.RunParallel(func(pb *testing.PB) {
251		var s string
252		for pb.Next() {
253			if err := Unmarshal(data, &s); err != nil {
254				b.Fatal("Unmarshal:", err)
255			}
256		}
257	})
258}
259
260func BenchmarkUnmarshalFloat64(b *testing.B) {
261	b.ReportAllocs()
262	data := []byte(`3.14`)
263	b.RunParallel(func(pb *testing.PB) {
264		var f float64
265		for pb.Next() {
266			if err := Unmarshal(data, &f); err != nil {
267				b.Fatal("Unmarshal:", err)
268			}
269		}
270	})
271}
272
273func BenchmarkUnmarshalInt64(b *testing.B) {
274	b.ReportAllocs()
275	data := []byte(`3`)
276	b.RunParallel(func(pb *testing.PB) {
277		var x int64
278		for pb.Next() {
279			if err := Unmarshal(data, &x); err != nil {
280				b.Fatal("Unmarshal:", err)
281			}
282		}
283	})
284}
285
286func BenchmarkIssue10335(b *testing.B) {
287	b.ReportAllocs()
288	j := []byte(`{"a":{ }}`)
289	b.RunParallel(func(pb *testing.PB) {
290		var s struct{}
291		for pb.Next() {
292			if err := Unmarshal(j, &s); err != nil {
293				b.Fatal(err)
294			}
295		}
296	})
297}
298
299func BenchmarkUnmapped(b *testing.B) {
300	b.ReportAllocs()
301	j := []byte(`{"s": "hello", "y": 2, "o": {"x": 0}, "a": [1, 99, {"x": 1}]}`)
302	b.RunParallel(func(pb *testing.PB) {
303		var s struct{}
304		for pb.Next() {
305			if err := Unmarshal(j, &s); err != nil {
306				b.Fatal(err)
307			}
308		}
309	})
310}
311
312func BenchmarkTypeFieldsCache(b *testing.B) {
313	b.ReportAllocs()
314	var maxTypes int = 1e6
315	if testenv.Builder() != "" {
316		maxTypes = 1e3 // restrict cache sizes on builders
317	}
318
319	// Dynamically generate many new types.
320	types := make([]reflect.Type, maxTypes)
321	fs := []reflect.StructField{{
322		Type:  reflect.TypeOf(""),
323		Index: []int{0},
324	}}
325	for i := range types {
326		fs[0].Name = fmt.Sprintf("TypeFieldsCache%d", i)
327		types[i] = reflect.StructOf(fs)
328	}
329
330	// clearClear clears the cache. Other JSON operations, must not be running.
331	clearCache := func() {
332		fieldCache = sync.Map{}
333	}
334
335	// MissTypes tests the performance of repeated cache misses.
336	// This measures the time to rebuild a cache of size nt.
337	for nt := 1; nt <= maxTypes; nt *= 10 {
338		ts := types[:nt]
339		b.Run(fmt.Sprintf("MissTypes%d", nt), func(b *testing.B) {
340			nc := runtime.GOMAXPROCS(0)
341			for i := 0; i < b.N; i++ {
342				clearCache()
343				var wg sync.WaitGroup
344				for j := 0; j < nc; j++ {
345					wg.Add(1)
346					go func(j int) {
347						for _, t := range ts[(j*len(ts))/nc : ((j+1)*len(ts))/nc] {
348							cachedTypeFields(t)
349						}
350						wg.Done()
351					}(j)
352				}
353				wg.Wait()
354			}
355		})
356	}
357
358	// HitTypes tests the performance of repeated cache hits.
359	// This measures the average time of each cache lookup.
360	for nt := 1; nt <= maxTypes; nt *= 10 {
361		// Pre-warm a cache of size nt.
362		clearCache()
363		for _, t := range types[:nt] {
364			cachedTypeFields(t)
365		}
366		b.Run(fmt.Sprintf("HitTypes%d", nt), func(b *testing.B) {
367			b.RunParallel(func(pb *testing.PB) {
368				for pb.Next() {
369					cachedTypeFields(types[0])
370				}
371			})
372		})
373	}
374}
375