1package cuckoo
2
3import (
4	"bytes"
5	"encoding/gob"
6)
7
8const (
9	DefaultLoadFactor = 0.9
10	DefaultCapacity   = 10000
11)
12
13type ScalableCuckooFilter struct {
14	filters    []*Filter
15	loadFactor float32
16	//when scale(last filter size * loadFactor >= capacity) get new filter capacity
17	scaleFactor func(capacity uint) uint
18}
19
20type option func(*ScalableCuckooFilter)
21
22type Store struct {
23	Bytes      [][]byte
24	LoadFactor float32
25}
26
27/*
28 by default option the grow capacity is:
29 capacity , total
30 4096  4096
31 8192  12288
3216384  28672
3332768  61440
3465536  126,976
35*/
36func NewScalableCuckooFilter(opts ...option) *ScalableCuckooFilter {
37	sfilter := new(ScalableCuckooFilter)
38	for _, opt := range opts {
39		opt(sfilter)
40	}
41	configure(sfilter)
42	return sfilter
43}
44
45func (sf *ScalableCuckooFilter) Lookup(data []byte) bool {
46	for _, filter := range sf.filters {
47		if filter.Lookup(data) {
48			return true
49		}
50	}
51	return false
52}
53
54func (sf *ScalableCuckooFilter) Reset() {
55	for _, filter := range sf.filters {
56		filter.Reset()
57	}
58}
59
60func (sf *ScalableCuckooFilter) Insert(data []byte) bool {
61	needScale := false
62	lastFilter := sf.filters[len(sf.filters)-1]
63	if (float32(lastFilter.count) / float32(len(lastFilter.buckets))) > sf.loadFactor {
64		needScale = true
65	} else {
66		b := lastFilter.Insert(data)
67		needScale = !b
68	}
69	if !needScale {
70		return true
71	}
72	newFilter := NewFilter(sf.scaleFactor(uint(len(lastFilter.buckets))))
73	sf.filters = append(sf.filters, newFilter)
74	return newFilter.Insert(data)
75}
76
77func (sf *ScalableCuckooFilter) InsertUnique(data []byte) bool {
78	if sf.Lookup(data) {
79		return false
80	}
81	return sf.Insert(data)
82}
83
84func (sf *ScalableCuckooFilter) Delete(data []byte) bool {
85	for _, filter := range sf.filters {
86		if filter.Delete(data) {
87			return true
88		}
89	}
90	return false
91}
92
93func (sf *ScalableCuckooFilter) Count() uint {
94	var sum uint
95	for _, filter := range sf.filters {
96		sum += filter.count
97	}
98	return sum
99
100}
101
102func (sf *ScalableCuckooFilter) Encode() []byte {
103	slice := make([][]byte, len(sf.filters))
104	for i, filter := range sf.filters {
105		encode := filter.Encode()
106		slice[i] = encode
107	}
108	store := &Store{
109		Bytes:      slice,
110		LoadFactor: sf.loadFactor,
111	}
112	buf := bytes.NewBuffer(nil)
113	enc := gob.NewEncoder(buf)
114	err := enc.Encode(store)
115	if err != nil {
116		return nil
117	}
118	return buf.Bytes()
119}
120
121func (sf *ScalableCuckooFilter) DecodeWithParam(fBytes []byte, opts ...option) (*ScalableCuckooFilter, error) {
122	instance, err := DecodeScalableFilter(fBytes)
123	if err != nil {
124		return nil, err
125	}
126	for _, opt := range opts {
127		opt(instance)
128	}
129	return instance, nil
130}
131
132func DecodeScalableFilter(fBytes []byte) (*ScalableCuckooFilter, error) {
133	buf := bytes.NewBuffer(fBytes)
134	dec := gob.NewDecoder(buf)
135	store := &Store{}
136	err := dec.Decode(store)
137	if err != nil {
138		return nil, err
139	}
140	filterSize := len(store.Bytes)
141	instance := NewScalableCuckooFilter(func(filter *ScalableCuckooFilter) {
142		filter.filters = make([]*Filter, filterSize)
143	}, func(filter *ScalableCuckooFilter) {
144		filter.loadFactor = store.LoadFactor
145	})
146	for i, oneBytes := range store.Bytes {
147		filter, err := Decode(oneBytes)
148		if err != nil {
149			return nil, err
150		}
151		instance.filters[i] = filter
152	}
153	return instance, nil
154
155}
156
157func configure(sfilter *ScalableCuckooFilter) {
158	if sfilter.loadFactor == 0 {
159		sfilter.loadFactor = DefaultLoadFactor
160	}
161	if sfilter.scaleFactor == nil {
162		sfilter.scaleFactor = func(currentSize uint) uint {
163			return currentSize * bucketSize * 2
164		}
165	}
166	if sfilter.filters == nil {
167		initFilter := NewFilter(DefaultCapacity)
168		sfilter.filters = []*Filter{initFilter}
169	}
170}
171