1 /*
2    Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
3 
4    This program is free software; you can redistribute it and/or modify
5    it under the terms of the GNU General Public License, version 2.0,
6    as published by the Free Software Foundation.
7 
8    This program is also distributed with certain software (including
9    but not limited to OpenSSL) that is licensed under separate terms,
10    as designated in a particular file or component or in included license
11    documentation.  The authors of MySQL hereby grant you an additional
12    permission to link the program and your derivative works with the
13    separately licensed software that they have included with MySQL.
14 
15    This program is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License, version 2.0, for more details.
19 
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
23 */
24 
25 #ifndef DBTUX_H
26 #define DBTUX_H
27 
28 #include <ndb_limits.h>
29 #include <SimulatedBlock.hpp>
30 #include <AttributeDescriptor.hpp>
31 #include <AttributeHeader.hpp>
32 #include <ArrayPool.hpp>
33 #include <DataBuffer.hpp>
34 #include <IntrusiveList.hpp>
35 #include <md5_hash.hpp>
36 
37 // big brother
38 #include <dbtup/Dbtup.hpp>
39 
40 // packed index keys and bounds
41 #include <NdbPack.hpp>
42 
43 // signal classes
44 #include <signaldata/DictTabInfo.hpp>
45 #include <signaldata/TuxContinueB.hpp>
46 #include <signaldata/TupFrag.hpp>
47 #include <signaldata/AlterIndxImpl.hpp>
48 #include <signaldata/DropTab.hpp>
49 #include <signaldata/TuxMaint.hpp>
50 #include <signaldata/AccScan.hpp>
51 #include <signaldata/TuxBound.hpp>
52 #include <signaldata/NextScan.hpp>
53 #include <signaldata/AccLock.hpp>
54 #include <signaldata/DumpStateOrd.hpp>
55 #include <signaldata/IndexStatSignal.hpp>
56 
57 // debug
58 #ifdef VM_TRACE
59 #include <NdbOut.hpp>
60 #include <OutputStream.hpp>
61 #endif
62 
63 
64 #define JAM_FILE_ID 374
65 
66 
67 #undef max
68 #undef min
69 
70 class Configuration;
71 struct mt_BuildIndxCtx;
72 
73 class Dbtux : public SimulatedBlock {
74   friend class DbtuxProxy;
75   friend struct mt_BuildIndxCtx;
76   friend Uint32 Dbtux_mt_buildIndexFragment_wrapper_C(void*);
77 public:
78   Dbtux(Block_context& ctx, Uint32 instanceNumber = 0);
79   virtual ~Dbtux();
80 
81   // pointer to TUP instance in this thread
82   Dbtup* c_tup;
83   void execTUX_BOUND_INFO(Signal* signal);
84   void execREAD_PSEUDO_REQ(Signal* signal);
85 
86 private:
87   // sizes are in words (Uint32)
88   STATIC_CONST( MaxIndexFragments = MAX_FRAG_PER_LQH );
89   STATIC_CONST( MaxIndexAttributes = MAX_ATTRIBUTES_IN_INDEX );
90   STATIC_CONST( MaxAttrDataSize = 2 * MAX_ATTRIBUTES_IN_INDEX + MAX_KEY_SIZE_IN_WORDS );
91   STATIC_CONST( MaxXfrmDataSize = MaxAttrDataSize * MAX_XFRM_MULTIPLY);
92 public:
93   STATIC_CONST( DescPageSize = 512 );
94 private:
95   STATIC_CONST( MaxTreeNodeSize = MAX_TTREE_NODE_SIZE );
96   STATIC_CONST( MaxPrefSize = MAX_TTREE_PREF_SIZE );
97   STATIC_CONST( ScanBoundSegmentSize = 7 );
98   STATIC_CONST( MaxAccLockOps = MAX_PARALLEL_OP_PER_SCAN );
99   STATIC_CONST( MaxTreeDepth = 32 );    // strict
100 #ifdef VM_TRACE
101   // for TuxCtx::c_debugBuffer
102   STATIC_CONST( DebugBufferBytes = (MaxAttrDataSize << 2) );
103 #endif
104   BLOCK_DEFINES(Dbtux);
105 
106   // forward declarations
107   struct TuxCtx;
108 
109   // AttributeHeader size is assumed to be 1 word
110   STATIC_CONST( AttributeHeaderSize = 1 );
111 
112   /*
113    * Logical tuple address, "local key".  Identifies table tuples.
114    */
115   typedef Uint32 TupAddr;
116   STATIC_CONST( NullTupAddr = (Uint32)-1 );
117 
118   /*
119    * Physical tuple address in TUP.  Provides fast access to table tuple
120    * or index node.  Valid within the db node and across timeslices.
121    * Not valid between db nodes or across restarts.
122    *
123    * To avoid wasting an Uint16 the pageid is split in two.
124    */
125   struct TupLoc {
126   private:
127     Uint16 m_pageId1;           // page i-value (big-endian)
128     Uint16 m_pageId2;
129     Uint16 m_pageOffset;        // page offset in words
130   public:
131     TupLoc();
132     TupLoc(Uint32 pageId, Uint16 pageOffset);
133     Uint32 getPageId() const;
134     void setPageId(Uint32 pageId);
135     Uint32 getPageOffset() const;
136     void setPageOffset(Uint32 pageOffset);
137     bool operator==(const TupLoc& loc) const;
138     bool operator!=(const TupLoc& loc) const;
139   };
140 
141   /*
142    * There is no const member NullTupLoc since the compiler may not be
143    * able to optimize it to TupLoc() constants.  Instead null values are
144    * constructed on the stack with TupLoc().
145    */
146 #define NullTupLoc TupLoc()
147 
148   // tree definitions
149 
150   /*
151    * Tree entry.  Points to a tuple in primary table via physical
152    * address of "original" tuple and tuple version.
153    *
154    * ZTUP_VERSION_BITS must be 15 (or less).
155    */
156   struct TreeEnt;
157   friend struct TreeEnt;
158   struct TreeEnt {
159     TupLoc m_tupLoc;            // address of original tuple
160     unsigned m_tupVersion : 15; // version
161     TreeEnt();
162     // methods
163     bool eqtuple(const TreeEnt ent) const;
164     bool eq(const TreeEnt ent) const;
165     int cmp(const TreeEnt ent) const;
166   };
167   STATIC_CONST( TreeEntSize = sizeof(TreeEnt) >> 2 );
168   static const TreeEnt NullTreeEnt;
169 
170   /*
171    * Tree node has 3 parts:
172    *
173    * 1) struct TreeNode - the header (6 words)
174    * 2) some key values for min entry - the min prefix
175    * 3) list of TreeEnt (each 2 words)
176    *
177    * There are 3 links to other nodes: left child, right child, parent.
178    * Occupancy (number of entries) is at least 1 except temporarily when
179    * a node is about to be removed.
180    */
181   struct TreeNode;
182   friend struct TreeNode;
183   struct TreeNode {
184     TupLoc m_link[3];           // link to 0-left child 1-right child 2-parent
185     unsigned m_side : 2;        // we are 0-left child 1-right child 2-root
186     unsigned m_balance : 2;     // balance -1, 0, +1 plus 1 for Solaris CC
187     unsigned pad1 : 4;
188     Uint8 m_occup;              // current number of entries
189     Uint32 m_nodeScan;          // list of scans at this node
190     TreeNode();
191   };
192   STATIC_CONST( NodeHeadSize = sizeof(TreeNode) >> 2 );
193 
194   /*
195    * Tree header.  There is one in each fragment.  Contains tree
196    * parameters and address of root node.
197    */
198   struct TreeHead;
199   friend struct TreeHead;
200   struct TreeHead {
201     Uint8 m_nodeSize;           // words in tree node
202     Uint8 m_prefSize;           // words in min prefix
203     Uint8 m_minOccup;           // min entries in internal node
204     Uint8 m_maxOccup;           // max entries in node
205     TupLoc m_root;              // root node
206     TreeHead();
207     // methods
208     Uint32* getPref(TreeNode* node) const;
209     TreeEnt* getEntList(TreeNode* node) const;
210   };
211 
212   /*
213    * Tree position.  Specifies node, position within node (from 0 to
214    * m_occup), and whether the position is at an existing entry or
215    * before one (if any).  Position m_occup points past the node and is
216    * also represented by position 0 of next node.  Includes direction
217    * used by scan.
218    */
219   struct TreePos;
220   friend struct TreePos;
221   struct TreePos {
222     TupLoc m_loc;               // physical node address
223     Uint16 m_pos;               // position 0 to m_occup
224     Uint8 m_dir;                // see scanNext
225     TreePos();
226   };
227 
228   // packed metadata
229 
230   /*
231    * Descriptor page.  The "hot" metadata for an index is stored as
232    * contiguous array of words on some page.  It has 3 parts:
233    * 1) DescHead
234    * 2) array of NdbPack::Type used by NdbPack::Spec of index key
235    * 3) array of attr headers for reading index key values from TUP
236    */
237   struct DescPage;
238   friend struct DescPage;
239   struct DescPage {
240     Uint32 m_nextPage;
241     Uint32 m_numFree;           // number of free words
242     union {
243     Uint32 m_data[DescPageSize];
244     Uint32 nextPool;
245     };
246     DescPage();
247   };
248   typedef Ptr<DescPage> DescPagePtr;
249   ArrayPool<DescPage> c_descPagePool;
250   Uint32 c_descPageList;
251 
252   struct DescHead {
253     Uint32 m_indexId;
254     Uint16 m_numAttrs;
255     Uint16 m_magic;
256     enum { Magic = 0xDE5C };
257   };
258   STATIC_CONST( DescHeadSize = sizeof(DescHead) >> 2 );
259 
260   typedef NdbPack::Type KeyType;
261   typedef NdbPack::Spec KeySpec;
262   STATIC_CONST( KeyTypeSize = sizeof(KeyType) >> 2 );
263 
264   typedef NdbPack::DataC KeyDataC;
265   typedef NdbPack::Data KeyData;
266   typedef NdbPack::BoundC KeyBoundC;
267   typedef NdbPack::Bound KeyBound;
268 
269   // range scan
270 
271   /*
272    * ScanBound instances are members of ScanOp.  Bound data is stored in
273    * a separate segmented buffer pool.
274    */
275   struct ScanBound {
276     DataBuffer<ScanBoundSegmentSize>::Head m_head;
277     Uint16 m_cnt;       // number of attributes
278     Int16 m_side;
279     ScanBound();
280   };
281   DataBuffer<ScanBoundSegmentSize>::DataBufferPool c_scanBoundPool;
282 
283   // ScanLock
284   struct ScanLock {
ScanLockDbtux::ScanLock285     ScanLock() {}
286     Uint32 m_accLockOp;
287     union {
288     Uint32 nextPool;
289     Uint32 nextList;
290     };
291     Uint32 prevList;
292   };
293   typedef Ptr<ScanLock> ScanLockPtr;
294   ArrayPool<ScanLock> c_scanLockPool;
295 
296   /*
297    * Scan operation.
298    *
299    * Tuples are locked one at a time.  The current lock op is set to
300    * RNIL as soon as the lock is obtained and passed to LQH.  We must
301    * however remember all locks which LQH has not returned for unlocking
302    * since they must be aborted by us when the scan is closed.
303    *
304    * Scan state describes the entry we are interested in.  There is
305    * a separate lock wait flag.  It may be for current entry or it may
306    * be for an entry we were moved away from.  In any case nothing
307    * happens with current entry before lock wait flag is cleared.
308    *
309    * An unfinished scan is always linked to some tree node, and has
310    * current position and direction (see comments at scanNext).  There
311    * is also a copy of latest entry found.
312    *
313    * Error handling:  An error code (independent of scan state) is set
314    * and returned to LQH.  No more result rows are returned but normal
315    * protocol is still followed until scan close.
316    */
317   struct ScanOp;
318   friend struct ScanOp;
319   struct ScanOp {
320     enum {
321       Undef = 0,
322       First = 1,                // before first entry
323       Current = 2,              // at some entry
324       Found = 3,                // return current as next scan result
325       Blocked = 4,              // found and waiting for ACC lock
326       Locked = 5,               // found and locked or no lock needed
327       Next = 6,                 // looking for next extry
328       Last = 7,                 // after last entry
329       Aborting = 8
330     };
331     Uint8 m_state;
332     Uint8 m_lockwait;
333     Uint16 m_errorCode;
334     Uint32 m_userPtr;           // scanptr.i in LQH
335     Uint32 m_userRef;
336     Uint32 m_tableId;
337     Uint32 m_indexId;
338     Uint32 m_fragId;
339     Uint32 m_fragPtrI;
340     Uint32 m_transId1;
341     Uint32 m_transId2;
342     Uint32 m_savePointId;
343     // lock waited for or obtained and not yet passed to LQH
344     Uint32 m_accLockOp;
345     // locks obtained and passed to LQH but not yet returned by LQH
346     DLFifoList<ScanLock>::Head m_accLockOps;
347     Uint8 m_readCommitted;      // no locking
348     Uint8 m_lockMode;
349     Uint8 m_descending;
350     ScanBound m_scanBound[2];
351     TreePos m_scanPos;          // position
352     TreeEnt m_scanEnt;          // latest entry found
353     Uint32 m_nodeScan;          // next scan at node (single-linked)
354     Uint32 m_statOpPtrI;        // RNIL unless this is a statistics scan
355     union {
356     Uint32 nextPool;
357     Uint32 nextList;
358     };
359     Uint32 prevList;
360     ScanOp();
361   };
362   typedef Ptr<ScanOp> ScanOpPtr;
363   ArrayPool<ScanOp> c_scanOpPool;
364 
365   // indexes and fragments
366 
367   /*
368    * Ordered index.  Top level data structure.  The primary table (table
369    * being indexed) lives in TUP.
370    */
371   struct Index;
372   friend struct Index;
373   struct Index {
374     enum State {
375       NotDefined = 0,
376       Defining = 1,
377       Building = 3,             // triggers activated, building
378       Online = 2,               // triggers activated and build done
379       Dropping = 9
380     };
381     State m_state;
382     DictTabInfo::TableType m_tableType;
383     Uint32 m_tableId;
384     Uint16 unused;
385     Uint16 m_numFrags;
386     Uint32 m_fragId[MaxIndexFragments];
387     Uint32 m_fragPtrI[MaxIndexFragments];
388     Uint32 m_descPage;          // descriptor page
389     Uint16 m_descOff;           // offset within the page
390     Uint16 m_numAttrs;
391     Uint16 m_prefAttrs;         // attributes in min prefix
392     Uint16 m_prefBytes;         // max bytes in min prefix
393     KeySpec m_keySpec;
394     Uint32 m_statFragPtrI;      // fragment to monitor if not RNIL
395     Uint32 m_statLoadTime;      // load time of index stats
396     union {
397     bool m_storeNullKey;
398     Uint32 nextPool;
399     };
400     Index();
401   };
402   typedef Ptr<Index> IndexPtr;
403   ArrayPool<Index> c_indexPool;
404   RSS_AP_SNAPSHOT(c_indexPool);
405 
406   /*
407    * Fragment of an index, as known to DIH/TC.  Represents the two
408    * duplicate fragments known to LQH/ACC/TUP.  Includes tree header.
409    * There are no maintenance operation records yet.
410    */
411   struct Frag;
412   friend struct Frag;
413   struct Frag {
414     Uint32 m_tableId;           // copy from index level
415     Uint32 m_indexId;
416     Uint16 unused;
417     Uint16 m_fragId;
418     TreeHead m_tree;
419     TupLoc m_freeLoc;           // one free node for next op
420     DLList<ScanOp> m_scanList;  // current scans on this fragment
421     Uint32 m_tupIndexFragPtrI;
422     Uint32 m_tupTableFragPtrI;
423     Uint32 m_accTableFragPtrI;
424     Uint64 m_entryCount;        // current entries
425     Uint64 m_entryBytes;        // sum of index key sizes
426     Uint64 m_entryOps;          // ops since last index stats update
427     union {
428     Uint32 nextPool;
429     };
430     Frag(ArrayPool<ScanOp>& scanOpPool);
431   };
432   typedef Ptr<Frag> FragPtr;
433   ArrayPool<Frag> c_fragPool;
434   RSS_AP_SNAPSHOT(c_fragPool);
435 
436   /*
437    * Fragment metadata operation.
438    */
439   struct FragOp {
440     Uint32 m_userPtr;
441     Uint32 m_userRef;
442     Uint32 m_indexId;
443     Uint32 m_fragId;
444     Uint32 m_fragPtrI;
445     Uint32 m_fragNo;            // fragment number starting at zero
446     Uint32 m_numAttrsRecvd;
447     union {
448     Uint32 nextPool;
449     };
450     FragOp();
451   };
452   typedef Ptr<FragOp> FragOpPtr;
453   ArrayPool<FragOp> c_fragOpPool;
454   RSS_AP_SNAPSHOT(c_fragOpPool);
455 
456   // node handles
457 
458   /*
459    * A node handle is a reference to a tree node in TUP.  It is used to
460    * operate on the node.  Node handles are allocated on the stack.
461    */
462   struct NodeHandle;
463   friend struct NodeHandle;
464   struct NodeHandle {
465     Frag& m_frag;               // fragment using the node
466     TupLoc m_loc;               // physical node address
467     TreeNode* m_node;           // pointer to node storage
468     NodeHandle(Frag& frag);
469     NodeHandle(const NodeHandle& node);
470     NodeHandle& operator=(const NodeHandle& node);
471     // check if unassigned
472     bool isNull();
473     // getters
474     TupLoc getLink(unsigned i);
475     unsigned getChilds();       // cannot spell
476     unsigned getSide();
477     unsigned getOccup();
478     int getBalance();
479     Uint32 getNodeScan();
480     // setters
481     void setLink(unsigned i, TupLoc loc);
482     void setSide(unsigned i);
483     void setOccup(unsigned n);
484     void setBalance(int b);
485     void setNodeScan(Uint32 scanPtrI);
486     // access other parts of the node
487     Uint32* getPref();
488     TreeEnt getEnt(unsigned pos);
489     // for ndbrequire and ndbassert
490     void progError(int line, int cause, const char* file);
491   };
492 
493   // stats scan
494   struct StatOp;
495   friend struct StatOp;
496   struct StatOp {
497     // the scan
498     Uint32 m_scanOpPtrI;
499     // parameters
500     Uint32 m_saveSize;
501     Uint32 m_saveScale;
502     Uint32 m_batchSize;
503     Uint32 m_estBytes;
504    // counters
505    Uint32 m_rowCount;
506    Uint32 m_batchCurr;
507    bool m_haveSample;
508    Uint32 m_sampleCount;
509    Uint32 m_keyBytes;
510    bool m_keyChange;
511    bool m_usePrev;
512    // metadata
513    enum { MaxKeyCount = MAX_INDEX_STAT_KEY_COUNT };
514    enum { MaxKeySize = MAX_INDEX_STAT_KEY_SIZE };
515    enum { MaxValueCount = MAX_INDEX_STAT_VALUE_COUNT };
516    enum { MaxValueSize = MAX_INDEX_STAT_VALUE_SIZE };
517    Uint32 m_keyCount;
518    Uint32 m_valueCount;
519    // pack
520    const KeySpec& m_keySpec;
521    NdbPack::Spec m_valueSpec;
522    NdbPack::Type m_valueSpecBuf[MaxValueCount];
523    // data previous current result
524    KeyData m_keyData1;
525    KeyData m_keyData2;
526    KeyData m_keyData;
527    NdbPack::Data m_valueData;
528    // buffers with one word for length bytes
529    Uint32 m_keyDataBuf1[1 + MaxKeySize];
530    Uint32 m_keyDataBuf2[1 + MaxKeySize];
531    Uint32 m_keyDataBuf[1 + MaxKeySize];
532    Uint32 m_valueDataBuf[1 + MaxValueCount];
533    // value collection
534    struct Value {
535      Uint32 m_rir;
536      Uint32 m_unq[MaxKeyCount];
537      Value();
538    };
539    Value m_value1;
540    Value m_value2;
541    union {
542    Uint32 nextPool;
543    };
544    StatOp(const Index&);
545   };
546   typedef Ptr<StatOp> StatOpPtr;
547   ArrayPool<StatOp> c_statOpPool;
548   RSS_AP_SNAPSHOT(c_statOpPool);
549 
550   // stats monitor (shared by req data and continueB loop)
551   struct StatMon;
552   friend struct StatMon;
553   struct StatMon {
554     IndexStatImplReq m_req;
555     Uint32 m_requestType;
556     // continueB loop
557     Uint32 m_loopIndexId;
558     Uint32 m_loopDelay;
559     StatMon();
560   };
561   StatMon c_statMon;
562 
563   // methods
564 
565   /*
566    * DbtuxGen.cpp
567    */
568   void execCONTINUEB(Signal* signal);
569   void execSTTOR(Signal* signal);
570   void execREAD_CONFIG_REQ(Signal* signal);
571   void execNODE_STATE_REP(Signal* signal);
572 
573   // utils
574   void readKeyAttrs(TuxCtx&, const Frag& frag, TreeEnt ent, KeyData& keyData, Uint32 count);
575   void readTablePk(const Frag& frag, TreeEnt ent, Uint32* pkData, unsigned& pkSize);
576   void unpackBound(TuxCtx&, const ScanBound& bound, KeyBoundC& searchBound);
577   void findFrag(EmulatedJamBuffer* jamBuf, const Index& index,
578                 Uint32 fragId, FragPtr& fragPtr);
579 
580   /*
581    * DbtuxMeta.cpp
582    */
583   void execCREATE_TAB_REQ(Signal*);
584   void execTUXFRAGREQ(Signal* signal);
585   void execTUX_ADD_ATTRREQ(Signal* signal);
586   void execALTER_INDX_IMPL_REQ(Signal* signal);
587   void execDROP_TAB_REQ(Signal* signal);
588   void execDROP_FRAG_REQ(Signal* signal);
589   bool allocDescEnt(IndexPtr indexPtr);
590   void freeDescEnt(IndexPtr indexPtr);
591   void abortAddFragOp(Signal* signal);
592   void dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 senderData);
593 
594   /*
595    * DbtuxMaint.cpp
596    */
597   void execTUX_MAINT_REQ(Signal* signal);
598 
599   /*
600    * DbtuxNode.cpp
601    */
602   int allocNode(TuxCtx&, NodeHandle& node);
603   void freeNode(NodeHandle& node);
604   void selectNode(NodeHandle& node, TupLoc loc);
605   void insertNode(NodeHandle& node);
606   void deleteNode(NodeHandle& node);
607   void freePreallocatedNode(Frag& frag);
608   void setNodePref(struct TuxCtx &, NodeHandle& node);
609   // node operations
610   void nodePushUp(TuxCtx&, NodeHandle& node, unsigned pos, const TreeEnt& ent, Uint32 scanList);
611   void nodePushUpScans(NodeHandle& node, unsigned pos);
612   void nodePopDown(TuxCtx&, NodeHandle& node, unsigned pos, TreeEnt& en, Uint32* scanList);
613   void nodePopDownScans(NodeHandle& node, unsigned pos);
614   void nodePushDown(TuxCtx&, NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32& scanList);
615   void nodePushDownScans(NodeHandle& node, unsigned pos);
616   void nodePopUp(TuxCtx&, NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32 scanList);
617   void nodePopUpScans(NodeHandle& node, unsigned pos);
618   void nodeSlide(TuxCtx&, NodeHandle& dstNode, NodeHandle& srcNode, unsigned cnt, unsigned i);
619   // scans linked to node
620   void addScanList(NodeHandle& node, unsigned pos, Uint32 scanList);
621   void removeScanList(NodeHandle& node, unsigned pos, Uint32& scanList);
622   void moveScanList(NodeHandle& node, unsigned pos);
623   void linkScan(NodeHandle& node, ScanOpPtr scanPtr);
624   void unlinkScan(NodeHandle& node, ScanOpPtr scanPtr);
625   bool islinkScan(NodeHandle& node, ScanOpPtr scanPtr);
626 
627   /*
628    * DbtuxTree.cpp
629    */
630   // add entry
631   void treeAdd(TuxCtx&, Frag& frag, TreePos treePos, TreeEnt ent);
632   void treeAddFull(TuxCtx&, Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent);
633   void treeAddNode(TuxCtx&, Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent, NodeHandle parentNode, unsigned i);
634   void treeAddRebalance(TuxCtx&, Frag& frag, NodeHandle node, unsigned i);
635   // remove entry
636   void treeRemove(Frag& frag, TreePos treePos);
637   void treeRemoveInner(Frag& frag, NodeHandle lubNode, unsigned pos);
638   void treeRemoveSemi(Frag& frag, NodeHandle node, unsigned i);
639   void treeRemoveLeaf(Frag& frag, NodeHandle node);
640   void treeRemoveNode(Frag& frag, NodeHandle node);
641   void treeRemoveRebalance(Frag& frag, NodeHandle node, unsigned i);
642   // rotate
643   void treeRotateSingle(TuxCtx&, Frag& frag, NodeHandle& node, unsigned i);
644   void treeRotateDouble(TuxCtx&, Frag& frag, NodeHandle& node, unsigned i);
645 
646   /*
647    * DbtuxScan.cpp
648    */
649   void execACC_SCANREQ(Signal* signal);
650   void execNEXT_SCANREQ(Signal* signal);
651   void execACC_CHECK_SCAN(Signal* signal);
652   void execACCKEYCONF(Signal* signal);
653   void execACCKEYREF(Signal* signal);
654   void execACC_ABORTCONF(Signal* signal);
655   void scanFirst(ScanOpPtr scanPtr);
656   void scanFind(ScanOpPtr scanPtr);
657   void scanNext(ScanOpPtr scanPtr, bool fromMaintReq);
658   bool scanCheck(ScanOpPtr scanPtr, TreeEnt ent);
659   bool scanVisible(ScanOpPtr scanPtr, TreeEnt ent);
660   void scanClose(Signal* signal, ScanOpPtr scanPtr);
661   void abortAccLockOps(Signal* signal, ScanOpPtr scanPtr);
662   void addAccLockOp(ScanOpPtr scanPtr, Uint32 accLockOp);
663   void removeAccLockOp(ScanOpPtr scanPtr, Uint32 accLockOp);
664   void releaseScanOp(ScanOpPtr& scanPtr);
665 
666   /*
667    * DbtuxSearch.cpp
668    */
669   void findNodeToUpdate(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, NodeHandle& currNode);
670   bool findPosToAdd(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, NodeHandle& currNode, TreePos& treePos);
671   bool findPosToRemove(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, NodeHandle& currNode, TreePos& treePos);
672   bool searchToAdd(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, TreePos& treePos);
673   bool searchToRemove(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, TreePos& treePos);
674   void findNodeToScan(Frag& frag, unsigned dir, const KeyBoundC& searchBound, NodeHandle& currNode);
675   void findPosToScan(Frag& frag, unsigned idir, const KeyBoundC& searchBound, NodeHandle& currNode, Uint16* pos);
676   void searchToScan(Frag& frag, unsigned idir, const KeyBoundC& searchBound, TreePos& treePos);
677 
678   /*
679    * DbtuxCmp.cpp
680    */
681   int cmpSearchKey(TuxCtx&, const KeyDataC& searchKey, const KeyDataC& entryKey, Uint32 cnt);
682   int cmpSearchBound(TuxCtx&, const KeyBoundC& searchBound, const KeyDataC& entryKey, Uint32 cnt);
683 
684   /*
685    * DbtuxStat.cpp
686    */
687   // one-round-trip tree-dive records in range
688   void statRecordsInRange(ScanOpPtr scanPtr, Uint32* out);
689   Uint32 getEntriesBeforeOrAfter(Frag& frag, TreePos pos, unsigned idir);
690   unsigned getPathToNode(NodeHandle node, Uint16* path);
691   // stats scan
692   int statScanInit(StatOpPtr, const Uint32* data, Uint32 len, Uint32* usedLen);
693   int statScanAddRow(StatOpPtr, TreeEnt ent);
694   void statScanReadKey(StatOpPtr, Uint32* out);
695   void statScanReadValue(StatOpPtr, Uint32* out);
696   void execINDEX_STAT_REP(Signal*); // from TRIX
697   // stats monitor request
698   void execINDEX_STAT_IMPL_REQ(Signal*);
699   void statMonStart(Signal*, StatMon&);
700   void statMonStop(Signal*, StatMon&);
701   void statMonConf(Signal*, StatMon&);
702   // stats monitor continueB loop
703   void statMonSendContinueB(Signal*);
704   void statMonExecContinueB(Signal*);
705   void statMonCheck(Signal*, StatMon&);
706   void statMonRep(Signal*, StatMon&);
707 
708   /*
709    * DbtuxDebug.cpp
710    */
711   void execDUMP_STATE_ORD(Signal* signal);
712 #ifdef VM_TRACE
713   struct PrintPar {
714     char m_path[100];           // LR prefix
715     unsigned m_side;            // expected side
716     TupLoc m_parent;            // expected parent address
717     int m_depth;                // returned depth
718     unsigned m_occup;           // returned occupancy
719     TreeEnt m_minmax[2];        // returned subtree min and max
720     bool m_ok;                  // returned status
721     PrintPar();
722   };
723   void printTree(Signal* signal, Frag& frag, NdbOut& out);
724   void printNode(struct TuxCtx&, Frag&, NdbOut& out, TupLoc loc, PrintPar& par);
725   friend class NdbOut& operator<<(NdbOut&, const TupLoc&);
726   friend class NdbOut& operator<<(NdbOut&, const TreeEnt&);
727   friend class NdbOut& operator<<(NdbOut&, const TreeNode&);
728   friend class NdbOut& operator<<(NdbOut&, const TreeHead&);
729   friend class NdbOut& operator<<(NdbOut&, const TreePos&);
730   friend class NdbOut& operator<<(NdbOut&, const KeyType&);
731   friend class NdbOut& operator<<(NdbOut&, const ScanOp&);
732   friend class NdbOut& operator<<(NdbOut&, const Index&);
733   friend class NdbOut& operator<<(NdbOut&, const Frag&);
734   friend class NdbOut& operator<<(NdbOut&, const FragOp&);
735   friend class NdbOut& operator<<(NdbOut&, const NodeHandle&);
736   friend class NdbOut& operator<<(NdbOut&, const StatOp&);
737   friend class NdbOut& operator<<(NdbOut&, const StatMon&);
738   FILE* debugFile;
739   NdbOut debugOut;
740   unsigned debugFlags;
741   enum {
742     DebugMeta = 1,              // log create and drop index
743     DebugMaint = 2,             // log maintenance ops
744     DebugTree = 4,              // log and check tree after each op
745     DebugScan = 8,              // log scans
746     DebugLock = 16,             // log ACC locks
747     DebugStat = 32              // log stats collection
748   };
749   STATIC_CONST( DataFillByte = 0xa2 );
750   STATIC_CONST( NodeFillByte = 0xa4 );
751 #endif
752 
753   void execDBINFO_SCANREQ(Signal* signal);
754 
755   // start up info
756   Uint32 c_internalStartPhase;
757   Uint32 c_typeOfStart;
758 
759   /*
760    * Global data set at operation start.  Unpacked from index metadata.
761    * Not passed as parameter to methods.  Invalid across timeslices.
762    *
763    * TODO inline all into index metadata
764    */
765   struct TuxCtx
766   {
767     EmulatedJamBuffer * jamBuffer;
768 
769     // buffer for scan bound and search key data
770     Uint32* c_searchKey;
771 
772     // buffer for current entry key data
773     Uint32* c_entryKey;
774 
775     // buffer for xfrm-ed PK and for temporary use
776     Uint32* c_dataBuffer;
777 
778 #ifdef VM_TRACE
779     char* c_debugBuffer;
780 #endif
781   };
782 
783   struct TuxCtx c_ctx; // Global Tux context, for everything build MT-index build
784 
785   // index stats
786   bool c_indexStatAutoUpdate;
787   Uint32 c_indexStatSaveSize;
788   Uint32 c_indexStatSaveScale;
789   Uint32 c_indexStatTriggerPct;
790   Uint32 c_indexStatTriggerScale;
791   Uint32 c_indexStatUpdateDelay;
792 
793   // inlined utils
794   Uint32 getDescSize(const Index& index);
795   DescHead& getDescHead(const Index& index);
796   KeyType* getKeyTypes(DescHead& descHead);
797   const KeyType* getKeyTypes(const DescHead& descHead);
798   AttributeHeader* getKeyAttrs(DescHead& descHead);
799   const AttributeHeader* getKeyAttrs(const DescHead& descHead);
800   //
801   void getTupAddr(const Frag& frag, TreeEnt ent, Uint32& lkey1, Uint32& lkey2);
802   static unsigned min(unsigned x, unsigned y);
803   static unsigned max(unsigned x, unsigned y);
804 
805 public:
806   static Uint32 mt_buildIndexFragment_wrapper(void*);
807 private:
808   Uint32 mt_buildIndexFragment(struct mt_BuildIndxCtx*);
809 
810   Signal* c_signal_bug32040;
811 };
812 
813 // Dbtux::TupLoc
814 
815 inline
TupLoc()816 Dbtux::TupLoc::TupLoc() :
817   m_pageId1(RNIL >> 16),
818   m_pageId2(RNIL & 0xFFFF),
819   m_pageOffset(0)
820 {
821 }
822 
823 inline
TupLoc(Uint32 pageId,Uint16 pageOffset)824 Dbtux::TupLoc::TupLoc(Uint32 pageId, Uint16 pageOffset) :
825   m_pageId1(pageId >> 16),
826   m_pageId2(pageId & 0xFFFF),
827   m_pageOffset(pageOffset)
828 {
829 }
830 
831 inline Uint32
getPageId() const832 Dbtux::TupLoc::getPageId() const
833 {
834   return (m_pageId1 << 16) | m_pageId2;
835 }
836 
837 inline void
setPageId(Uint32 pageId)838 Dbtux::TupLoc::setPageId(Uint32 pageId)
839 {
840   m_pageId1 = (pageId >> 16);
841   m_pageId2 = (pageId & 0xFFFF);
842 }
843 
844 inline Uint32
getPageOffset() const845 Dbtux::TupLoc::getPageOffset() const
846 {
847   return (Uint32)m_pageOffset;
848 }
849 
850 inline void
setPageOffset(Uint32 pageOffset)851 Dbtux::TupLoc::setPageOffset(Uint32 pageOffset)
852 {
853   m_pageOffset = (Uint16)pageOffset;
854 }
855 
856 inline bool
operator ==(const TupLoc & loc) const857 Dbtux::TupLoc::operator==(const TupLoc& loc) const
858 {
859   return
860     m_pageId1 == loc.m_pageId1 &&
861     m_pageId2 == loc.m_pageId2 &&
862     m_pageOffset == loc.m_pageOffset;
863 }
864 
865 inline bool
operator !=(const TupLoc & loc) const866 Dbtux::TupLoc::operator!=(const TupLoc& loc) const
867 {
868   return ! (*this == loc);
869 }
870 
871 // Dbtux::TreeEnt
872 
873 inline
TreeEnt()874 Dbtux::TreeEnt::TreeEnt() :
875   m_tupLoc(),
876   m_tupVersion(0)
877 {
878 }
879 
880 inline bool
eqtuple(const TreeEnt ent) const881 Dbtux::TreeEnt::eqtuple(const TreeEnt ent) const
882 {
883   return
884     m_tupLoc == ent.m_tupLoc;
885 }
886 
887 inline bool
eq(const TreeEnt ent) const888 Dbtux::TreeEnt::eq(const TreeEnt ent) const
889 {
890   return
891     m_tupLoc == ent.m_tupLoc &&
892     m_tupVersion == ent.m_tupVersion;
893 }
894 
895 inline int
cmp(const TreeEnt ent) const896 Dbtux::TreeEnt::cmp(const TreeEnt ent) const
897 {
898   if (m_tupLoc.getPageId() < ent.m_tupLoc.getPageId())
899     return -1;
900   if (m_tupLoc.getPageId() > ent.m_tupLoc.getPageId())
901     return +1;
902   if (m_tupLoc.getPageOffset() < ent.m_tupLoc.getPageOffset())
903     return -1;
904   if (m_tupLoc.getPageOffset() > ent.m_tupLoc.getPageOffset())
905     return +1;
906   /*
907    * Guess if one tuple version has wrapped around.  This is well
908    * defined ordering on existing versions since versions are assigned
909    * consecutively and different versions exists only on uncommitted
910    * tuple.  Assuming max 2**14 uncommitted ops on same tuple.
911    */
912   const unsigned version_wrap_limit = (1 << (ZTUP_VERSION_BITS - 1));
913   if (m_tupVersion < ent.m_tupVersion) {
914     if (unsigned(ent.m_tupVersion - m_tupVersion) < version_wrap_limit)
915       return -1;
916     else
917       return +1;
918   }
919   if (m_tupVersion > ent.m_tupVersion) {
920     if (unsigned(m_tupVersion - ent.m_tupVersion) < version_wrap_limit)
921       return +1;
922     else
923       return -1;
924   }
925   return 0;
926 }
927 
928 // Dbtux::TreeNode
929 
930 inline
TreeNode()931 Dbtux::TreeNode::TreeNode() :
932   m_side(2),
933   m_balance(0 + 1),
934   pad1(0),
935   m_occup(0),
936   m_nodeScan(RNIL)
937 {
938   m_link[0] = NullTupLoc;
939   m_link[1] = NullTupLoc;
940   m_link[2] = NullTupLoc;
941 }
942 
943 // Dbtux::TreeHead
944 
945 inline
TreeHead()946 Dbtux::TreeHead::TreeHead() :
947   m_nodeSize(0),
948   m_prefSize(0),
949   m_minOccup(0),
950   m_maxOccup(0),
951   m_root()
952 {
953 }
954 
955 inline Uint32*
getPref(TreeNode * node) const956 Dbtux::TreeHead::getPref(TreeNode* node) const
957 {
958   Uint32* ptr = (Uint32*)node + NodeHeadSize;
959   return ptr;
960 }
961 
962 inline Dbtux::TreeEnt*
getEntList(TreeNode * node) const963 Dbtux::TreeHead::getEntList(TreeNode* node) const
964 {
965   Uint32* ptr = (Uint32*)node + NodeHeadSize + m_prefSize;
966   return (TreeEnt*)ptr;
967 }
968 
969 // Dbtux::TreePos
970 
971 inline
TreePos()972 Dbtux::TreePos::TreePos() :
973   m_loc(),
974   m_pos(ZNIL),
975   m_dir(255)
976 {
977 }
978 
979 // Dbtux::DescPage
980 
981 inline
DescPage()982 Dbtux::DescPage::DescPage() :
983   m_nextPage(RNIL),
984   m_numFree(ZNIL)
985 {
986   for (unsigned i = 0; i < DescPageSize; i++) {
987 #ifdef VM_TRACE
988     m_data[i] = 0x13571357;
989 #else
990     m_data[i] = 0;
991 #endif
992   }
993 }
994 
995 // Dbtux::ScanBound
996 
997 inline
ScanBound()998 Dbtux::ScanBound::ScanBound() :
999   m_head(),
1000   m_cnt(0),
1001   m_side(0)
1002 {
1003 }
1004 
1005 // Dbtux::ScanOp
1006 
1007 inline
ScanOp()1008 Dbtux::ScanOp::ScanOp() :
1009   m_state(Undef),
1010   m_lockwait(false),
1011   m_errorCode(0),
1012   m_userPtr(RNIL),
1013   m_userRef(RNIL),
1014   m_tableId(RNIL),
1015   m_indexId(RNIL),
1016   m_fragPtrI(RNIL),
1017   m_transId1(0),
1018   m_transId2(0),
1019   m_savePointId(0),
1020   m_accLockOp(RNIL),
1021   m_accLockOps(),
1022   m_readCommitted(0),
1023   m_lockMode(0),
1024   m_descending(0),
1025   m_scanBound(),
1026   m_scanPos(),
1027   m_scanEnt(),
1028   m_nodeScan(RNIL),
1029   m_statOpPtrI(RNIL)
1030 {
1031 }
1032 
1033 // Dbtux::Index
1034 
1035 inline
Index()1036 Dbtux::Index::Index() :
1037   m_state(NotDefined),
1038   m_tableType(DictTabInfo::UndefTableType),
1039   m_tableId(RNIL),
1040   m_numFrags(0),
1041   m_descPage(RNIL),
1042   m_descOff(0),
1043   m_numAttrs(0),
1044   m_prefAttrs(0),
1045   m_prefBytes(0),
1046   m_keySpec(),
1047   m_statFragPtrI(RNIL),
1048   m_statLoadTime(0),
1049   m_storeNullKey(false)
1050 {
1051   for (unsigned i = 0; i < MaxIndexFragments; i++) {
1052     m_fragId[i] = ZNIL;
1053     m_fragPtrI[i] = RNIL;
1054   };
1055 }
1056 
1057 // Dbtux::Frag
1058 
1059 inline
Frag(ArrayPool<ScanOp> & scanOpPool)1060 Dbtux::Frag::Frag(ArrayPool<ScanOp>& scanOpPool) :
1061   m_tableId(RNIL),
1062   m_indexId(RNIL),
1063   m_fragId(ZNIL),
1064   m_tree(),
1065   m_freeLoc(),
1066   m_scanList(scanOpPool),
1067   m_tupIndexFragPtrI(RNIL),
1068   m_tupTableFragPtrI(RNIL),
1069   m_accTableFragPtrI(RNIL),
1070   m_entryCount(0),
1071   m_entryBytes(0),
1072   m_entryOps(0)
1073 {
1074 }
1075 
1076 // Dbtux::FragOp
1077 
1078 inline
FragOp()1079 Dbtux::FragOp::FragOp() :
1080   m_userPtr(RNIL),
1081   m_userRef(RNIL),
1082   m_indexId(RNIL),
1083   m_fragId(ZNIL),
1084   m_fragPtrI(RNIL),
1085   m_fragNo(ZNIL),
1086   m_numAttrsRecvd(ZNIL)
1087 {
1088 }
1089 
1090 // Dbtux::NodeHandle
1091 
1092 inline
NodeHandle(Frag & frag)1093 Dbtux::NodeHandle::NodeHandle(Frag& frag) :
1094   m_frag(frag),
1095   m_loc(),
1096   m_node(0)
1097 {
1098 }
1099 
1100 inline
NodeHandle(const NodeHandle & node)1101 Dbtux::NodeHandle::NodeHandle(const NodeHandle& node) :
1102   m_frag(node.m_frag),
1103   m_loc(node.m_loc),
1104   m_node(node.m_node)
1105 {
1106 }
1107 
1108 inline Dbtux::NodeHandle&
operator =(const NodeHandle & node)1109 Dbtux::NodeHandle::operator=(const NodeHandle& node)
1110 {
1111   ndbassert(&m_frag == &node.m_frag);
1112   m_loc = node.m_loc;
1113   m_node = node.m_node;
1114   return *this;
1115 }
1116 
1117 inline bool
isNull()1118 Dbtux::NodeHandle::isNull()
1119 {
1120   return m_node == 0;
1121 }
1122 
1123 inline Dbtux::TupLoc
getLink(unsigned i)1124 Dbtux::NodeHandle::getLink(unsigned i)
1125 {
1126   ndbrequire(i <= 2);
1127   return m_node->m_link[i];
1128 }
1129 
1130 inline unsigned
getChilds()1131 Dbtux::NodeHandle::getChilds()
1132 {
1133   return (m_node->m_link[0] != NullTupLoc) + (m_node->m_link[1] != NullTupLoc);
1134 }
1135 
1136 inline unsigned
getSide()1137 Dbtux::NodeHandle::getSide()
1138 {
1139   return m_node->m_side;
1140 }
1141 
1142 inline unsigned
getOccup()1143 Dbtux::NodeHandle::getOccup()
1144 {
1145   return m_node->m_occup;
1146 }
1147 
1148 inline int
getBalance()1149 Dbtux::NodeHandle::getBalance()
1150 {
1151   return (int)m_node->m_balance - 1;
1152 }
1153 
1154 inline Uint32
getNodeScan()1155 Dbtux::NodeHandle::getNodeScan()
1156 {
1157   return m_node->m_nodeScan;
1158 }
1159 
1160 inline void
setLink(unsigned i,TupLoc loc)1161 Dbtux::NodeHandle::setLink(unsigned i, TupLoc loc)
1162 {
1163   if (likely(i <= 2))
1164   {
1165     m_node->m_link[i] = loc;
1166   }
1167   else
1168   {
1169     ndbrequire(false);
1170   }
1171 }
1172 
1173 inline void
setSide(unsigned i)1174 Dbtux::NodeHandle::setSide(unsigned i)
1175 {
1176   if (likely(i <= 2))
1177   {
1178     m_node->m_side = i;
1179   }
1180   else
1181   {
1182     ndbrequire(false);
1183   }
1184 }
1185 
1186 inline void
setOccup(unsigned n)1187 Dbtux::NodeHandle::setOccup(unsigned n)
1188 {
1189   TreeHead& tree = m_frag.m_tree;
1190   ndbrequire(n <= tree.m_maxOccup);
1191   m_node->m_occup = n;
1192 }
1193 
1194 inline void
setBalance(int b)1195 Dbtux::NodeHandle::setBalance(int b)
1196 {
1197   ndbrequire(abs(b) <= 1);
1198   m_node->m_balance = (unsigned)(b + 1);
1199 }
1200 
1201 inline void
setNodeScan(Uint32 scanPtrI)1202 Dbtux::NodeHandle::setNodeScan(Uint32 scanPtrI)
1203 {
1204   m_node->m_nodeScan = scanPtrI;
1205 }
1206 
1207 inline Uint32*
getPref()1208 Dbtux::NodeHandle::getPref()
1209 {
1210   TreeHead& tree = m_frag.m_tree;
1211   return tree.getPref(m_node);
1212 }
1213 
1214 inline Dbtux::TreeEnt
getEnt(unsigned pos)1215 Dbtux::NodeHandle::getEnt(unsigned pos)
1216 {
1217   TreeHead& tree = m_frag.m_tree;
1218   TreeEnt* entList = tree.getEntList(m_node);
1219   const unsigned occup = m_node->m_occup;
1220   ndbrequire(pos < occup);
1221   return entList[pos];
1222 }
1223 
1224 // stats
1225 
1226 inline
Value()1227 Dbtux::StatOp::Value::Value()
1228 {
1229   m_rir = 0;
1230   Uint32 i;
1231   for (i = 0; i < MaxKeyCount; i++)
1232     m_unq[i] = 0;
1233 }
1234 
1235 inline
StatOp(const Index & index)1236 Dbtux::StatOp::StatOp(const Index& index) :
1237   m_scanOpPtrI(RNIL),
1238   m_saveSize(0),
1239   m_saveScale(0),
1240   m_batchSize(0),
1241   m_estBytes(0),
1242   m_rowCount(0),
1243   m_batchCurr(0),
1244   m_haveSample(false),
1245   m_sampleCount(0),
1246   m_keyBytes(0),
1247   m_keyChange(false),
1248   m_usePrev(false),
1249   m_keyCount(0),
1250   m_valueCount(0),
1251   m_keySpec(index.m_keySpec),
1252   m_keyData1(m_keySpec, false, 2),
1253   m_keyData2(m_keySpec, false, 2),
1254   m_keyData(m_keySpec, false, 2),
1255   m_valueData(m_valueSpec, false, 2),
1256   m_value1(),
1257   m_value2()
1258 {
1259   m_valueSpec.set_buf(m_valueSpecBuf, MaxValueCount);
1260   m_keyData1.set_buf(m_keyDataBuf1, sizeof(m_keyDataBuf1));
1261   m_keyData2.set_buf(m_keyDataBuf2, sizeof(m_keyDataBuf2));
1262   m_keyData.set_buf(m_keyDataBuf, sizeof(m_keyDataBuf));
1263   m_valueData.set_buf(m_valueDataBuf, sizeof(m_valueDataBuf));
1264 }
1265 
1266 // Dbtux::StatMon
1267 
1268 inline
StatMon()1269 Dbtux::StatMon::StatMon() :
1270   m_requestType(0),
1271   m_loopIndexId(0),
1272   m_loopDelay(1000)
1273 {
1274   memset(&m_req, 0, sizeof(m_req));
1275 }
1276 
1277 // parameters for methods
1278 
1279 #ifdef VM_TRACE
1280 inline
PrintPar()1281 Dbtux::PrintPar::PrintPar() :
1282   // caller fills in
1283   m_path(),
1284   m_side(255),
1285   m_parent(),
1286   // default return values
1287   m_depth(0),
1288   m_occup(0),
1289   m_ok(true)
1290 {
1291 }
1292 #endif
1293 
1294 // utils
1295 
1296 inline Uint32
getDescSize(const Index & index)1297 Dbtux::getDescSize(const Index& index)
1298 {
1299   return
1300     DescHeadSize +
1301     index.m_numAttrs * KeyTypeSize +
1302     index.m_numAttrs * AttributeHeaderSize;
1303 }
1304 
1305 inline Dbtux::DescHead&
getDescHead(const Index & index)1306 Dbtux::getDescHead(const Index& index)
1307 {
1308   DescPagePtr pagePtr;
1309   pagePtr.i = index.m_descPage;
1310   c_descPagePool.getPtr(pagePtr);
1311   ndbrequire(index.m_descOff < DescPageSize);
1312   Uint32* ptr = &pagePtr.p->m_data[index.m_descOff];
1313   DescHead* descHead = reinterpret_cast<DescHead*>(ptr);
1314   ndbrequire(descHead->m_magic == DescHead::Magic);
1315   return *descHead;
1316 }
1317 
1318 inline Dbtux::KeyType*
getKeyTypes(DescHead & descHead)1319 Dbtux::getKeyTypes(DescHead& descHead)
1320 {
1321   Uint32* ptr = reinterpret_cast<Uint32*>(&descHead);
1322   ptr += DescHeadSize;
1323   return reinterpret_cast<KeyType*>(ptr);
1324 }
1325 
1326 inline const Dbtux::KeyType*
getKeyTypes(const DescHead & descHead)1327 Dbtux::getKeyTypes(const DescHead& descHead)
1328 {
1329   const Uint32* ptr = reinterpret_cast<const Uint32*>(&descHead);
1330   ptr += DescHeadSize;
1331   return reinterpret_cast<const KeyType*>(ptr);
1332 }
1333 
1334 inline AttributeHeader*
getKeyAttrs(DescHead & descHead)1335 Dbtux::getKeyAttrs(DescHead& descHead)
1336 {
1337   Uint32* ptr = reinterpret_cast<Uint32*>(&descHead);
1338   ptr += DescHeadSize;
1339   ptr += descHead.m_numAttrs * KeyTypeSize;
1340   return reinterpret_cast<AttributeHeader*>(ptr);
1341 }
1342 
1343 inline const AttributeHeader*
getKeyAttrs(const DescHead & descHead)1344 Dbtux::getKeyAttrs(const DescHead& descHead)
1345 {
1346   const Uint32* ptr = reinterpret_cast<const Uint32*>(&descHead);
1347   ptr += DescHeadSize;
1348   ptr += descHead.m_numAttrs * KeyTypeSize;
1349   return reinterpret_cast<const AttributeHeader*>(ptr);
1350 }
1351 
1352 inline
1353 void
getTupAddr(const Frag & frag,TreeEnt ent,Uint32 & lkey1,Uint32 & lkey2)1354 Dbtux::getTupAddr(const Frag& frag, TreeEnt ent, Uint32& lkey1, Uint32& lkey2)
1355 {
1356   const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI;
1357   const TupLoc tupLoc = ent.m_tupLoc;
1358   c_tup->tuxGetTupAddr(tableFragPtrI, tupLoc.getPageId(),tupLoc.getPageOffset(),
1359                        lkey1, lkey2);
1360   jamEntry();
1361 }
1362 
1363 inline unsigned
min(unsigned x,unsigned y)1364 Dbtux::min(unsigned x, unsigned y)
1365 {
1366   return x < y ? x : y;
1367 }
1368 
1369 inline unsigned
max(unsigned x,unsigned y)1370 Dbtux::max(unsigned x, unsigned y)
1371 {
1372   return x > y ? x : y;
1373 }
1374 
1375 // DbtuxCmp.cpp
1376 
1377 inline int
cmpSearchKey(TuxCtx & ctx,const KeyDataC & searchKey,const KeyDataC & entryKey,Uint32 cnt)1378 Dbtux::cmpSearchKey(TuxCtx& ctx, const KeyDataC& searchKey, const KeyDataC& entryKey, Uint32 cnt)
1379 {
1380   // compare cnt attributes from each
1381   Uint32 num_eq;
1382   int ret = searchKey.cmp(entryKey, cnt, num_eq);
1383 #ifdef VM_TRACE
1384   if (debugFlags & DebugMaint) {
1385     debugOut << "cmpSearchKey: ret:" << ret;
1386     debugOut << " search:" << searchKey.print(ctx.c_debugBuffer, DebugBufferBytes);
1387     debugOut << " entry:" << entryKey.print(ctx.c_debugBuffer, DebugBufferBytes);
1388     debugOut << endl;
1389   }
1390 #endif
1391   return ret;
1392 }
1393 
1394 inline int
cmpSearchBound(TuxCtx & ctx,const KeyBoundC & searchBound,const KeyDataC & entryKey,Uint32 cnt)1395 Dbtux::cmpSearchBound(TuxCtx& ctx, const KeyBoundC& searchBound, const KeyDataC& entryKey, Uint32 cnt)
1396 {
1397   // compare cnt attributes from each
1398   Uint32 num_eq;
1399   int ret = searchBound.cmp(entryKey, cnt, num_eq);
1400 #ifdef VM_TRACE
1401   if (debugFlags & DebugScan) {
1402     debugOut << "cmpSearchBound: res:" << ret;
1403     debugOut << " search:" << searchBound.print(ctx.c_debugBuffer, DebugBufferBytes);
1404     debugOut << " entry:" << entryKey.print(ctx.c_debugBuffer, DebugBufferBytes);
1405     debugOut << endl;
1406   }
1407 #endif
1408   return ret;
1409 }
1410 
1411 
1412 #undef JAM_FILE_ID
1413 
1414 #endif
1415