1// Copyright (c) 2017 The btcsuite developers
2// Use of this source code is governed by an ISC
3// license that can be found in the LICENSE file.
4
5package indexers
6
7import (
8	"errors"
9
10	"github.com/btcsuite/btcd/blockchain"
11	"github.com/btcsuite/btcd/chaincfg"
12	"github.com/btcsuite/btcd/chaincfg/chainhash"
13	"github.com/btcsuite/btcd/database"
14	"github.com/btcsuite/btcd/wire"
15	"github.com/btcsuite/btcutil"
16	"github.com/btcsuite/btcutil/gcs"
17	"github.com/btcsuite/btcutil/gcs/builder"
18)
19
20const (
21	// cfIndexName is the human-readable name for the index.
22	cfIndexName = "committed filter index"
23)
24
25// Committed filters come in one flavor currently: basic. They are generated
26// and dropped in pairs, and both are indexed by a block's hash.  Besides
27// holding different content, they also live in different buckets.
28var (
29	// cfIndexParentBucketKey is the name of the parent bucket used to
30	// house the index. The rest of the buckets live below this bucket.
31	cfIndexParentBucketKey = []byte("cfindexparentbucket")
32
33	// cfIndexKeys is an array of db bucket names used to house indexes of
34	// block hashes to cfilters.
35	cfIndexKeys = [][]byte{
36		[]byte("cf0byhashidx"),
37	}
38
39	// cfHeaderKeys is an array of db bucket names used to house indexes of
40	// block hashes to cf headers.
41	cfHeaderKeys = [][]byte{
42		[]byte("cf0headerbyhashidx"),
43	}
44
45	// cfHashKeys is an array of db bucket names used to house indexes of
46	// block hashes to cf hashes.
47	cfHashKeys = [][]byte{
48		[]byte("cf0hashbyhashidx"),
49	}
50
51	maxFilterType = uint8(len(cfHeaderKeys) - 1)
52
53	// zeroHash is the chainhash.Hash value of all zero bytes, defined here
54	// for convenience.
55	zeroHash chainhash.Hash
56)
57
58// dbFetchFilterIdxEntry retrieves a data blob from the filter index database.
59// An entry's absence is not considered an error.
60func dbFetchFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash) ([]byte, error) {
61	idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
62	return idx.Get(h[:]), nil
63}
64
65// dbStoreFilterIdxEntry stores a data blob in the filter index database.
66func dbStoreFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash, f []byte) error {
67	idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
68	return idx.Put(h[:], f)
69}
70
71// dbDeleteFilterIdxEntry deletes a data blob from the filter index database.
72func dbDeleteFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash) error {
73	idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
74	return idx.Delete(h[:])
75}
76
77// CfIndex implements a committed filter (cf) by hash index.
78type CfIndex struct {
79	db          database.DB
80	chainParams *chaincfg.Params
81}
82
83// Ensure the CfIndex type implements the Indexer interface.
84var _ Indexer = (*CfIndex)(nil)
85
86// Ensure the CfIndex type implements the NeedsInputser interface.
87var _ NeedsInputser = (*CfIndex)(nil)
88
89// NeedsInputs signals that the index requires the referenced inputs in order
90// to properly create the index.
91//
92// This implements the NeedsInputser interface.
93func (idx *CfIndex) NeedsInputs() bool {
94	return true
95}
96
97// Init initializes the hash-based cf index. This is part of the Indexer
98// interface.
99func (idx *CfIndex) Init() error {
100	return nil // Nothing to do.
101}
102
103// Key returns the database key to use for the index as a byte slice. This is
104// part of the Indexer interface.
105func (idx *CfIndex) Key() []byte {
106	return cfIndexParentBucketKey
107}
108
109// Name returns the human-readable name of the index. This is part of the
110// Indexer interface.
111func (idx *CfIndex) Name() string {
112	return cfIndexName
113}
114
115// Create is invoked when the indexer manager determines the index needs to
116// be created for the first time. It creates buckets for the two hash-based cf
117// indexes (regular only currently).
118func (idx *CfIndex) Create(dbTx database.Tx) error {
119	meta := dbTx.Metadata()
120
121	cfIndexParentBucket, err := meta.CreateBucket(cfIndexParentBucketKey)
122	if err != nil {
123		return err
124	}
125
126	for _, bucketName := range cfIndexKeys {
127		_, err = cfIndexParentBucket.CreateBucket(bucketName)
128		if err != nil {
129			return err
130		}
131	}
132
133	for _, bucketName := range cfHeaderKeys {
134		_, err = cfIndexParentBucket.CreateBucket(bucketName)
135		if err != nil {
136			return err
137		}
138	}
139
140	for _, bucketName := range cfHashKeys {
141		_, err = cfIndexParentBucket.CreateBucket(bucketName)
142		if err != nil {
143			return err
144		}
145	}
146
147	return nil
148}
149
150// storeFilter stores a given filter, and performs the steps needed to
151// generate the filter's header.
152func storeFilter(dbTx database.Tx, block *btcutil.Block, f *gcs.Filter,
153	filterType wire.FilterType) error {
154	if uint8(filterType) > maxFilterType {
155		return errors.New("unsupported filter type")
156	}
157
158	// Figure out which buckets to use.
159	fkey := cfIndexKeys[filterType]
160	hkey := cfHeaderKeys[filterType]
161	hashkey := cfHashKeys[filterType]
162
163	// Start by storing the filter.
164	h := block.Hash()
165	filterBytes, err := f.NBytes()
166	if err != nil {
167		return err
168	}
169	err = dbStoreFilterIdxEntry(dbTx, fkey, h, filterBytes)
170	if err != nil {
171		return err
172	}
173
174	// Next store the filter hash.
175	filterHash, err := builder.GetFilterHash(f)
176	if err != nil {
177		return err
178	}
179	err = dbStoreFilterIdxEntry(dbTx, hashkey, h, filterHash[:])
180	if err != nil {
181		return err
182	}
183
184	// Then fetch the previous block's filter header.
185	var prevHeader *chainhash.Hash
186	ph := &block.MsgBlock().Header.PrevBlock
187	if ph.IsEqual(&zeroHash) {
188		prevHeader = &zeroHash
189	} else {
190		pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph)
191		if err != nil {
192			return err
193		}
194
195		// Construct the new block's filter header, and store it.
196		prevHeader, err = chainhash.NewHash(pfh)
197		if err != nil {
198			return err
199		}
200	}
201
202	fh, err := builder.MakeHeaderForFilter(f, *prevHeader)
203	if err != nil {
204		return err
205	}
206	return dbStoreFilterIdxEntry(dbTx, hkey, h, fh[:])
207}
208
209// ConnectBlock is invoked by the index manager when a new block has been
210// connected to the main chain. This indexer adds a hash-to-cf mapping for
211// every passed block. This is part of the Indexer interface.
212func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block,
213	stxos []blockchain.SpentTxOut) error {
214
215	prevScripts := make([][]byte, len(stxos))
216	for i, stxo := range stxos {
217		prevScripts[i] = stxo.PkScript
218	}
219
220	f, err := builder.BuildBasicFilter(block.MsgBlock(), prevScripts)
221	if err != nil {
222		return err
223	}
224
225	return storeFilter(dbTx, block, f, wire.GCSFilterRegular)
226}
227
228// DisconnectBlock is invoked by the index manager when a block has been
229// disconnected from the main chain.  This indexer removes the hash-to-cf
230// mapping for every passed block. This is part of the Indexer interface.
231func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block,
232	_ []blockchain.SpentTxOut) error {
233
234	for _, key := range cfIndexKeys {
235		err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
236		if err != nil {
237			return err
238		}
239	}
240
241	for _, key := range cfHeaderKeys {
242		err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
243		if err != nil {
244			return err
245		}
246	}
247
248	for _, key := range cfHashKeys {
249		err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
250		if err != nil {
251			return err
252		}
253	}
254
255	return nil
256}
257
258// entryByBlockHash fetches a filter index entry of a particular type
259// (eg. filter, filter header, etc) for a filter type and block hash.
260func (idx *CfIndex) entryByBlockHash(filterTypeKeys [][]byte,
261	filterType wire.FilterType, h *chainhash.Hash) ([]byte, error) {
262
263	if uint8(filterType) > maxFilterType {
264		return nil, errors.New("unsupported filter type")
265	}
266	key := filterTypeKeys[filterType]
267
268	var entry []byte
269	err := idx.db.View(func(dbTx database.Tx) error {
270		var err error
271		entry, err = dbFetchFilterIdxEntry(dbTx, key, h)
272		return err
273	})
274	return entry, err
275}
276
277// entriesByBlockHashes batch fetches a filter index entry of a particular type
278// (eg. filter, filter header, etc) for a filter type and slice of block hashes.
279func (idx *CfIndex) entriesByBlockHashes(filterTypeKeys [][]byte,
280	filterType wire.FilterType, blockHashes []*chainhash.Hash) ([][]byte, error) {
281
282	if uint8(filterType) > maxFilterType {
283		return nil, errors.New("unsupported filter type")
284	}
285	key := filterTypeKeys[filterType]
286
287	entries := make([][]byte, 0, len(blockHashes))
288	err := idx.db.View(func(dbTx database.Tx) error {
289		for _, blockHash := range blockHashes {
290			entry, err := dbFetchFilterIdxEntry(dbTx, key, blockHash)
291			if err != nil {
292				return err
293			}
294			entries = append(entries, entry)
295		}
296		return nil
297	})
298	return entries, err
299}
300
301// FilterByBlockHash returns the serialized contents of a block's basic or
302// committed filter.
303func (idx *CfIndex) FilterByBlockHash(h *chainhash.Hash,
304	filterType wire.FilterType) ([]byte, error) {
305	return idx.entryByBlockHash(cfIndexKeys, filterType, h)
306}
307
308// FiltersByBlockHashes returns the serialized contents of a block's basic or
309// committed filter for a set of blocks by hash.
310func (idx *CfIndex) FiltersByBlockHashes(blockHashes []*chainhash.Hash,
311	filterType wire.FilterType) ([][]byte, error) {
312	return idx.entriesByBlockHashes(cfIndexKeys, filterType, blockHashes)
313}
314
315// FilterHeaderByBlockHash returns the serialized contents of a block's basic
316// committed filter header.
317func (idx *CfIndex) FilterHeaderByBlockHash(h *chainhash.Hash,
318	filterType wire.FilterType) ([]byte, error) {
319	return idx.entryByBlockHash(cfHeaderKeys, filterType, h)
320}
321
322// FilterHeadersByBlockHashes returns the serialized contents of a block's
323// basic committed filter header for a set of blocks by hash.
324func (idx *CfIndex) FilterHeadersByBlockHashes(blockHashes []*chainhash.Hash,
325	filterType wire.FilterType) ([][]byte, error) {
326	return idx.entriesByBlockHashes(cfHeaderKeys, filterType, blockHashes)
327}
328
329// FilterHashByBlockHash returns the serialized contents of a block's basic
330// committed filter hash.
331func (idx *CfIndex) FilterHashByBlockHash(h *chainhash.Hash,
332	filterType wire.FilterType) ([]byte, error) {
333	return idx.entryByBlockHash(cfHashKeys, filterType, h)
334}
335
336// FilterHashesByBlockHashes returns the serialized contents of a block's basic
337// committed filter hash for a set of blocks by hash.
338func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*chainhash.Hash,
339	filterType wire.FilterType) ([][]byte, error) {
340	return idx.entriesByBlockHashes(cfHashKeys, filterType, blockHashes)
341}
342
343// NewCfIndex returns a new instance of an indexer that is used to create a
344// mapping of the hashes of all blocks in the blockchain to their respective
345// committed filters.
346//
347// It implements the Indexer interface which plugs into the IndexManager that
348// in turn is used by the blockchain package. This allows the index to be
349// seamlessly maintained along with the chain.
350func NewCfIndex(db database.DB, chainParams *chaincfg.Params) *CfIndex {
351	return &CfIndex{db: db, chainParams: chainParams}
352}
353
354// DropCfIndex drops the CF index from the provided database if exists.
355func DropCfIndex(db database.DB, interrupt <-chan struct{}) error {
356	return dropIndex(db, cfIndexParentBucketKey, cfIndexName, interrupt)
357}
358