1 /*
2    Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 
4    This program is free software; you can redistribute it and/or modify
5    it under the terms of the GNU General Public License, version 2.0,
6    as published by the Free Software Foundation.
7 
8    This program is also distributed with certain software (including
9    but not limited to OpenSSL) that is licensed under separate terms,
10    as designated in a particular file or component or in included license
11    documentation.  The authors of MySQL hereby grant you an additional
12    permission to link the program and your derivative works with the
13    separately licensed software that they have included with MySQL.
14 
15    This program is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License, version 2.0, for more details.
19 
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
23 */
24 
25 #ifndef DBTUX_H
26 #define DBTUX_H
27 
28 #include <ndb_limits.h>
29 #include <SimulatedBlock.hpp>
30 #include <AttributeDescriptor.hpp>
31 #include <AttributeHeader.hpp>
32 #include <ArrayPool.hpp>
33 #include <DataBuffer.hpp>
34 #include <DLFifoList.hpp>
35 #include <md5_hash.hpp>
36 
37 // big brother
38 #include <dbtup/Dbtup.hpp>
39 
40 // packed index keys and bounds
41 #include <NdbPack.hpp>
42 
43 // signal classes
44 #include <signaldata/DictTabInfo.hpp>
45 #include <signaldata/TuxContinueB.hpp>
46 #include <signaldata/TupFrag.hpp>
47 #include <signaldata/AlterIndxImpl.hpp>
48 #include <signaldata/DropTab.hpp>
49 #include <signaldata/TuxMaint.hpp>
50 #include <signaldata/AccScan.hpp>
51 #include <signaldata/TuxBound.hpp>
52 #include <signaldata/NextScan.hpp>
53 #include <signaldata/AccLock.hpp>
54 #include <signaldata/DumpStateOrd.hpp>
55 #include <signaldata/IndexStatSignal.hpp>
56 
57 // debug
58 #ifdef VM_TRACE
59 #include <NdbOut.hpp>
60 #include <OutputStream.hpp>
61 #endif
62 
63 // jams
64 #undef jam
65 #undef jamEntry
66 #ifdef DBTUX_GEN_CPP
67 #define jam()           jamLine(10000 + __LINE__)
68 #define jamEntry()      jamEntryLine(10000 + __LINE__)
69 #endif
70 #ifdef DBTUX_META_CPP
71 #define jam()           jamLine(20000 + __LINE__)
72 #define jamEntry()      jamEntryLine(20000 + __LINE__)
73 #endif
74 #ifdef DBTUX_MAINT_CPP
75 #define jam()           jamLine(30000 + __LINE__)
76 #define jamEntry()      jamEntryLine(30000 + __LINE__)
77 #endif
78 #ifdef DBTUX_NODE_CPP
79 #define jam()           jamLine(40000 + __LINE__)
80 #define jamEntry()      jamEntryLine(40000 + __LINE__)
81 #endif
82 #ifdef DBTUX_TREE_CPP
83 #define jam()           jamLine(50000 + __LINE__)
84 #define jamEntry()      jamEntryLine(50000 + __LINE__)
85 #endif
86 #ifdef DBTUX_SCAN_CPP
87 #define jam()           jamLine(60000 + __LINE__)
88 #define jamEntry()      jamEntryLine(60000 + __LINE__)
89 #endif
90 #ifdef DBTUX_SEARCH_CPP
91 #define jam()           jamLine(70000 + __LINE__)
92 #define jamEntry()      jamEntryLine(70000 + __LINE__)
93 #endif
94 #ifdef DBTUX_CMP_CPP
95 #define jam()           jamLine(80000 + __LINE__)
96 #define jamEntry()      jamEntryLine(80000 + __LINE__)
97 #endif
98 #ifdef DBTUX_STAT_CPP
99 #define jam()           jamLine(90000 + __LINE__)
100 #define jamEntry()      jamEntryLine(90000 + __LINE__)
101 #endif
102 #ifdef DBTUX_DEBUG_CPP
103 #define jam()           jamLine(100000 + __LINE__)
104 #define jamEntry()      jamEntryLine(100000 + __LINE__)
105 #endif
106 #ifndef jam
107 #define jam()           jamLine(__LINE__)
108 #define jamEntry()      jamEntryLine(__LINE__)
109 #endif
110 
111 #undef max
112 #undef min
113 
114 class Configuration;
115 struct mt_BuildIndxCtx;
116 
117 class Dbtux : public SimulatedBlock {
118   friend class DbtuxProxy;
119   friend struct mt_BuildIndxCtx;
120   friend Uint32 Dbtux_mt_buildIndexFragment_wrapper_C(void*);
121 public:
122   Dbtux(Block_context& ctx, Uint32 instanceNumber = 0);
123   virtual ~Dbtux();
124 
125   // pointer to TUP instance in this thread
126   Dbtup* c_tup;
127 
128 private:
129   // sizes are in words (Uint32)
130   STATIC_CONST( MaxIndexFragments = MAX_FRAG_PER_NODE );
131   STATIC_CONST( MaxIndexAttributes = MAX_ATTRIBUTES_IN_INDEX );
132   STATIC_CONST( MaxAttrDataSize = 2 * MAX_ATTRIBUTES_IN_INDEX + MAX_KEY_SIZE_IN_WORDS );
133   STATIC_CONST( MaxXfrmDataSize = MaxAttrDataSize * MAX_XFRM_MULTIPLY);
134 public:
135   STATIC_CONST( DescPageSize = 512 );
136 private:
137   STATIC_CONST( MaxTreeNodeSize = MAX_TTREE_NODE_SIZE );
138   STATIC_CONST( MaxPrefSize = MAX_TTREE_PREF_SIZE );
139   STATIC_CONST( ScanBoundSegmentSize = 7 );
140   STATIC_CONST( MaxAccLockOps = MAX_PARALLEL_OP_PER_SCAN );
141   STATIC_CONST( MaxTreeDepth = 32 );    // strict
142 #ifdef VM_TRACE
143   // for TuxCtx::c_debugBuffer
144   STATIC_CONST( DebugBufferBytes = (MaxAttrDataSize << 2) );
145 #endif
146   BLOCK_DEFINES(Dbtux);
147 
148   // forward declarations
149   struct TuxCtx;
150 
151   // AttributeHeader size is assumed to be 1 word
152   STATIC_CONST( AttributeHeaderSize = 1 );
153 
154   /*
155    * Logical tuple address, "local key".  Identifies table tuples.
156    */
157   typedef Uint32 TupAddr;
158   STATIC_CONST( NullTupAddr = (Uint32)-1 );
159 
160   /*
161    * Physical tuple address in TUP.  Provides fast access to table tuple
162    * or index node.  Valid within the db node and across timeslices.
163    * Not valid between db nodes or across restarts.
164    *
165    * To avoid wasting an Uint16 the pageid is split in two.
166    */
167   struct TupLoc {
168   private:
169     Uint16 m_pageId1;           // page i-value (big-endian)
170     Uint16 m_pageId2;
171     Uint16 m_pageOffset;        // page offset in words
172   public:
173     TupLoc();
174     TupLoc(Uint32 pageId, Uint16 pageOffset);
175     Uint32 getPageId() const;
176     void setPageId(Uint32 pageId);
177     Uint32 getPageOffset() const;
178     void setPageOffset(Uint32 pageOffset);
179     bool operator==(const TupLoc& loc) const;
180     bool operator!=(const TupLoc& loc) const;
181   };
182 
183   /*
184    * There is no const member NullTupLoc since the compiler may not be
185    * able to optimize it to TupLoc() constants.  Instead null values are
186    * constructed on the stack with TupLoc().
187    */
188 #define NullTupLoc TupLoc()
189 
190   // tree definitions
191 
192   /*
193    * Tree entry.  Points to a tuple in primary table via physical
194    * address of "original" tuple and tuple version.
195    *
196    * ZTUP_VERSION_BITS must be 15 (or less).
197    */
198   struct TreeEnt;
199   friend struct TreeEnt;
200   struct TreeEnt {
201     TupLoc m_tupLoc;            // address of original tuple
202     unsigned m_tupVersion : 15; // version
203     TreeEnt();
204     // methods
205     bool eqtuple(const TreeEnt ent) const;
206     bool eq(const TreeEnt ent) const;
207     int cmp(const TreeEnt ent) const;
208   };
209   STATIC_CONST( TreeEntSize = sizeof(TreeEnt) >> 2 );
210   static const TreeEnt NullTreeEnt;
211 
212   /*
213    * Tree node has 3 parts:
214    *
215    * 1) struct TreeNode - the header (6 words)
216    * 2) some key values for min entry - the min prefix
217    * 3) list of TreeEnt (each 2 words)
218    *
219    * There are 3 links to other nodes: left child, right child, parent.
220    * Occupancy (number of entries) is at least 1 except temporarily when
221    * a node is about to be removed.
222    */
223   struct TreeNode;
224   friend struct TreeNode;
225   struct TreeNode {
226     TupLoc m_link[3];           // link to 0-left child 1-right child 2-parent
227     unsigned m_side : 2;        // we are 0-left child 1-right child 2-root
228     unsigned m_balance : 2;     // balance -1, 0, +1 plus 1 for Solaris CC
229     unsigned pad1 : 4;
230     Uint8 m_occup;              // current number of entries
231     Uint32 m_nodeScan;          // list of scans at this node
232     TreeNode();
233   };
234   STATIC_CONST( NodeHeadSize = sizeof(TreeNode) >> 2 );
235 
236   /*
237    * Tree header.  There is one in each fragment.  Contains tree
238    * parameters and address of root node.
239    */
240   struct TreeHead;
241   friend struct TreeHead;
242   struct TreeHead {
243     Uint8 m_nodeSize;           // words in tree node
244     Uint8 m_prefSize;           // words in min prefix
245     Uint8 m_minOccup;           // min entries in internal node
246     Uint8 m_maxOccup;           // max entries in node
247     TupLoc m_root;              // root node
248     TreeHead();
249     // methods
250     Uint32* getPref(TreeNode* node) const;
251     TreeEnt* getEntList(TreeNode* node) const;
252   };
253 
254   /*
255    * Tree position.  Specifies node, position within node (from 0 to
256    * m_occup), and whether the position is at an existing entry or
257    * before one (if any).  Position m_occup points past the node and is
258    * also represented by position 0 of next node.  Includes direction
259    * used by scan.
260    */
261   struct TreePos;
262   friend struct TreePos;
263   struct TreePos {
264     TupLoc m_loc;               // physical node address
265     Uint16 m_pos;               // position 0 to m_occup
266     Uint8 m_dir;                // see scanNext
267     TreePos();
268   };
269 
270   // packed metadata
271 
272   /*
273    * Descriptor page.  The "hot" metadata for an index is stored as
274    * contiguous array of words on some page.  It has 3 parts:
275    * 1) DescHead
276    * 2) array of NdbPack::Type used by NdbPack::Spec of index key
277    * 3) array of attr headers for reading index key values from TUP
278    */
279   struct DescPage;
280   friend struct DescPage;
281   struct DescPage {
282     Uint32 m_nextPage;
283     Uint32 m_numFree;           // number of free words
284     union {
285     Uint32 m_data[DescPageSize];
286     Uint32 nextPool;
287     };
288     DescPage();
289   };
290   typedef Ptr<DescPage> DescPagePtr;
291   ArrayPool<DescPage> c_descPagePool;
292   Uint32 c_descPageList;
293 
294   struct DescHead {
295     Uint32 m_indexId;
296     Uint16 m_numAttrs;
297     Uint16 m_magic;
298     enum { Magic = 0xDE5C };
299   };
300   STATIC_CONST( DescHeadSize = sizeof(DescHead) >> 2 );
301 
302   typedef NdbPack::Type KeyType;
303   typedef NdbPack::Spec KeySpec;
304   STATIC_CONST( KeyTypeSize = sizeof(KeyType) >> 2 );
305 
306   typedef NdbPack::DataC KeyDataC;
307   typedef NdbPack::Data KeyData;
308   typedef NdbPack::BoundC KeyBoundC;
309   typedef NdbPack::Bound KeyBound;
310 
311   // range scan
312 
313   /*
314    * ScanBound instances are members of ScanOp.  Bound data is stored in
315    * a separate segmented buffer pool.
316    */
317   struct ScanBound {
318     DataBuffer<ScanBoundSegmentSize>::Head m_head;
319     Uint16 m_cnt;       // number of attributes
320     Int16 m_side;
321     ScanBound();
322   };
323   DataBuffer<ScanBoundSegmentSize>::DataBufferPool c_scanBoundPool;
324 
325   // ScanLock
326   struct ScanLock {
ScanLockDbtux::ScanLock327     ScanLock() {}
328     Uint32 m_accLockOp;
329     union {
330     Uint32 nextPool;
331     Uint32 nextList;
332     };
333     Uint32 prevList;
334   };
335   typedef Ptr<ScanLock> ScanLockPtr;
336   ArrayPool<ScanLock> c_scanLockPool;
337 
338   /*
339    * Scan operation.
340    *
341    * Tuples are locked one at a time.  The current lock op is set to
342    * RNIL as soon as the lock is obtained and passed to LQH.  We must
343    * however remember all locks which LQH has not returned for unlocking
344    * since they must be aborted by us when the scan is closed.
345    *
346    * Scan state describes the entry we are interested in.  There is
347    * a separate lock wait flag.  It may be for current entry or it may
348    * be for an entry we were moved away from.  In any case nothing
349    * happens with current entry before lock wait flag is cleared.
350    *
351    * An unfinished scan is always linked to some tree node, and has
352    * current position and direction (see comments at scanNext).  There
353    * is also a copy of latest entry found.
354    *
355    * Error handling:  An error code (independent of scan state) is set
356    * and returned to LQH.  No more result rows are returned but normal
357    * protocol is still followed until scan close.
358    */
359   struct ScanOp;
360   friend struct ScanOp;
361   struct ScanOp {
362     enum {
363       Undef = 0,
364       First = 1,                // before first entry
365       Current = 2,              // at some entry
366       Found = 3,                // return current as next scan result
367       Blocked = 4,              // found and waiting for ACC lock
368       Locked = 5,               // found and locked or no lock needed
369       Next = 6,                 // looking for next extry
370       Last = 7,                 // after last entry
371       Aborting = 8
372     };
373     Uint8 m_state;
374     Uint8 m_lockwait;
375     Uint16 m_errorCode;
376     Uint32 m_userPtr;           // scanptr.i in LQH
377     Uint32 m_userRef;
378     Uint32 m_tableId;
379     Uint32 m_indexId;
380     Uint32 m_fragId;
381     Uint32 m_fragPtrI;
382     Uint32 m_transId1;
383     Uint32 m_transId2;
384     Uint32 m_savePointId;
385     // lock waited for or obtained and not yet passed to LQH
386     Uint32 m_accLockOp;
387     // locks obtained and passed to LQH but not yet returned by LQH
388     DLFifoList<ScanLock>::Head m_accLockOps;
389     Uint8 m_readCommitted;      // no locking
390     Uint8 m_lockMode;
391     Uint8 m_descending;
392     ScanBound m_scanBound[2];
393     TreePos m_scanPos;          // position
394     TreeEnt m_scanEnt;          // latest entry found
395     Uint32 m_nodeScan;          // next scan at node (single-linked)
396     Uint32 m_statOpPtrI;        // RNIL unless this is a statistics scan
397     union {
398     Uint32 nextPool;
399     Uint32 nextList;
400     };
401     Uint32 prevList;
402     ScanOp();
403   };
404   typedef Ptr<ScanOp> ScanOpPtr;
405   ArrayPool<ScanOp> c_scanOpPool;
406 
407   // indexes and fragments
408 
409   /*
410    * Ordered index.  Top level data structure.  The primary table (table
411    * being indexed) lives in TUP.
412    */
413   struct Index;
414   friend struct Index;
415   struct Index {
416     enum State {
417       NotDefined = 0,
418       Defining = 1,
419       Building = 3,             // triggers activated, building
420       Online = 2,               // triggers activated and build done
421       Dropping = 9
422     };
423     State m_state;
424     DictTabInfo::TableType m_tableType;
425     Uint32 m_tableId;
426     Uint16 unused;
427     Uint16 m_numFrags;
428     Uint32 m_fragId[MaxIndexFragments];
429     Uint32 m_fragPtrI[MaxIndexFragments];
430     Uint32 m_descPage;          // descriptor page
431     Uint16 m_descOff;           // offset within the page
432     Uint16 m_numAttrs;
433     Uint16 m_prefAttrs;         // attributes in min prefix
434     Uint16 m_prefBytes;         // max bytes in min prefix
435     KeySpec m_keySpec;
436     Uint32 m_statFragPtrI;      // fragment to monitor if not RNIL
437     Uint32 m_statLoadTime;      // load time of index stats
438     union {
439     bool m_storeNullKey;
440     Uint32 nextPool;
441     };
442     Index();
443   };
444   typedef Ptr<Index> IndexPtr;
445   ArrayPool<Index> c_indexPool;
446   RSS_AP_SNAPSHOT(c_indexPool);
447 
448   /*
449    * Fragment of an index, as known to DIH/TC.  Represents the two
450    * duplicate fragments known to LQH/ACC/TUP.  Includes tree header.
451    * There are no maintenance operation records yet.
452    */
453   struct Frag;
454   friend struct Frag;
455   struct Frag {
456     Uint32 m_tableId;           // copy from index level
457     Uint32 m_indexId;
458     Uint16 unused;
459     Uint16 m_fragId;
460     TreeHead m_tree;
461     TupLoc m_freeLoc;           // one free node for next op
462     DLList<ScanOp> m_scanList;  // current scans on this fragment
463     Uint32 m_tupIndexFragPtrI;
464     Uint32 m_tupTableFragPtrI;
465     Uint32 m_accTableFragPtrI;
466     Uint64 m_entryCount;        // current entries
467     Uint64 m_entryBytes;        // sum of index key sizes
468     Uint64 m_entryOps;          // ops since last index stats update
469     union {
470     Uint32 nextPool;
471     };
472     Frag(ArrayPool<ScanOp>& scanOpPool);
473   };
474   typedef Ptr<Frag> FragPtr;
475   ArrayPool<Frag> c_fragPool;
476   RSS_AP_SNAPSHOT(c_fragPool);
477 
478   /*
479    * Fragment metadata operation.
480    */
481   struct FragOp {
482     Uint32 m_userPtr;
483     Uint32 m_userRef;
484     Uint32 m_indexId;
485     Uint32 m_fragId;
486     Uint32 m_fragPtrI;
487     Uint32 m_fragNo;            // fragment number starting at zero
488     Uint32 m_numAttrsRecvd;
489     union {
490     Uint32 nextPool;
491     };
492     FragOp();
493   };
494   typedef Ptr<FragOp> FragOpPtr;
495   ArrayPool<FragOp> c_fragOpPool;
496   RSS_AP_SNAPSHOT(c_fragOpPool);
497 
498   // node handles
499 
500   /*
501    * A node handle is a reference to a tree node in TUP.  It is used to
502    * operate on the node.  Node handles are allocated on the stack.
503    */
504   struct NodeHandle;
505   friend struct NodeHandle;
506   struct NodeHandle {
507     Frag& m_frag;               // fragment using the node
508     TupLoc m_loc;               // physical node address
509     TreeNode* m_node;           // pointer to node storage
510     NodeHandle(Frag& frag);
511     NodeHandle(const NodeHandle& node);
512     NodeHandle& operator=(const NodeHandle& node);
513     // check if unassigned
514     bool isNull();
515     // getters
516     TupLoc getLink(unsigned i);
517     unsigned getChilds();       // cannot spell
518     unsigned getSide();
519     unsigned getOccup();
520     int getBalance();
521     Uint32 getNodeScan();
522     // setters
523     void setLink(unsigned i, TupLoc loc);
524     void setSide(unsigned i);
525     void setOccup(unsigned n);
526     void setBalance(int b);
527     void setNodeScan(Uint32 scanPtrI);
528     // access other parts of the node
529     Uint32* getPref();
530     TreeEnt getEnt(unsigned pos);
531     // for ndbrequire and ndbassert
532     void progError(int line, int cause, const char* file);
533   };
534 
535   // stats scan
536   struct StatOp;
537   friend struct StatOp;
538   struct StatOp {
539     // the scan
540     Uint32 m_scanOpPtrI;
541     // parameters
542     Uint32 m_saveSize;
543     Uint32 m_saveScale;
544     Uint32 m_batchSize;
545     Uint32 m_estBytes;
546    // counters
547    Uint32 m_rowCount;
548    Uint32 m_batchCurr;
549    bool m_haveSample;
550    Uint32 m_sampleCount;
551    Uint32 m_keyBytes;
552    bool m_keyChange;
553    bool m_usePrev;
554    // metadata
555    enum { MaxKeyCount = MAX_INDEX_STAT_KEY_COUNT };
556    enum { MaxKeySize = MAX_INDEX_STAT_KEY_SIZE };
557    enum { MaxValueCount = MAX_INDEX_STAT_VALUE_COUNT };
558    enum { MaxValueSize = MAX_INDEX_STAT_VALUE_SIZE };
559    Uint32 m_keyCount;
560    Uint32 m_valueCount;
561    // pack
562    const KeySpec& m_keySpec;
563    NdbPack::Spec m_valueSpec;
564    NdbPack::Type m_valueSpecBuf[MaxValueCount];
565    // data previous current result
566    KeyData m_keyData1;
567    KeyData m_keyData2;
568    KeyData m_keyData;
569    NdbPack::Data m_valueData;
570    // buffers with one word for length bytes
571    Uint32 m_keyDataBuf1[1 + MaxKeySize];
572    Uint32 m_keyDataBuf2[1 + MaxKeySize];
573    Uint32 m_keyDataBuf[1 + MaxKeySize];
574    Uint32 m_valueDataBuf[1 + MaxValueCount];
575    // value collection
576    struct Value {
577      Uint32 m_rir;
578      Uint32 m_unq[MaxKeyCount];
579      Value();
580    };
581    Value m_value1;
582    Value m_value2;
583    union {
584    Uint32 nextPool;
585    };
586    StatOp(const Index&);
587   };
588   typedef Ptr<StatOp> StatOpPtr;
589   ArrayPool<StatOp> c_statOpPool;
590   RSS_AP_SNAPSHOT(c_statOpPool);
591 
592   // stats monitor (shared by req data and continueB loop)
593   struct StatMon;
594   friend struct StatMon;
595   struct StatMon {
596     IndexStatImplReq m_req;
597     Uint32 m_requestType;
598     // continueB loop
599     Uint32 m_loopIndexId;
600     Uint32 m_loopDelay;
601     StatMon();
602   };
603   StatMon c_statMon;
604 
605   // methods
606 
607   /*
608    * DbtuxGen.cpp
609    */
610   void execCONTINUEB(Signal* signal);
611   void execSTTOR(Signal* signal);
612   void execREAD_CONFIG_REQ(Signal* signal);
613   void execNODE_STATE_REP(Signal* signal);
614 
615   // utils
616   void readKeyAttrs(TuxCtx&, const Frag& frag, TreeEnt ent, KeyData& keyData, Uint32 count);
617   void readTablePk(const Frag& frag, TreeEnt ent, Uint32* pkData, unsigned& pkSize);
618   void unpackBound(TuxCtx&, const ScanBound& bound, KeyBoundC& searchBound);
619   void findFrag(const Index& index, Uint32 fragId, FragPtr& fragPtr);
620 
621   /*
622    * DbtuxMeta.cpp
623    */
624   void execCREATE_TAB_REQ(Signal*);
625   void execTUXFRAGREQ(Signal* signal);
626   void execTUX_ADD_ATTRREQ(Signal* signal);
627   void execALTER_INDX_IMPL_REQ(Signal* signal);
628   void execDROP_TAB_REQ(Signal* signal);
629   void execDROP_FRAG_REQ(Signal* signal);
630   bool allocDescEnt(IndexPtr indexPtr);
631   void freeDescEnt(IndexPtr indexPtr);
632   void abortAddFragOp(Signal* signal);
633   void dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 senderData);
634 
635   /*
636    * DbtuxMaint.cpp
637    */
638   void execTUX_MAINT_REQ(Signal* signal);
639 
640   /*
641    * DbtuxNode.cpp
642    */
643   int allocNode(TuxCtx&, NodeHandle& node);
644   void freeNode(NodeHandle& node);
645   void selectNode(NodeHandle& node, TupLoc loc);
646   void insertNode(NodeHandle& node);
647   void deleteNode(NodeHandle& node);
648   void freePreallocatedNode(Frag& frag);
649   void setNodePref(struct TuxCtx &, NodeHandle& node);
650   // node operations
651   void nodePushUp(TuxCtx&, NodeHandle& node, unsigned pos, const TreeEnt& ent, Uint32 scanList);
652   void nodePushUpScans(NodeHandle& node, unsigned pos);
653   void nodePopDown(TuxCtx&, NodeHandle& node, unsigned pos, TreeEnt& en, Uint32* scanList);
654   void nodePopDownScans(NodeHandle& node, unsigned pos);
655   void nodePushDown(TuxCtx&, NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32& scanList);
656   void nodePushDownScans(NodeHandle& node, unsigned pos);
657   void nodePopUp(TuxCtx&, NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32 scanList);
658   void nodePopUpScans(NodeHandle& node, unsigned pos);
659   void nodeSlide(TuxCtx&, NodeHandle& dstNode, NodeHandle& srcNode, unsigned cnt, unsigned i);
660   // scans linked to node
661   void addScanList(NodeHandle& node, unsigned pos, Uint32 scanList);
662   void removeScanList(NodeHandle& node, unsigned pos, Uint32& scanList);
663   void moveScanList(NodeHandle& node, unsigned pos);
664   void linkScan(NodeHandle& node, ScanOpPtr scanPtr);
665   void unlinkScan(NodeHandle& node, ScanOpPtr scanPtr);
666   bool islinkScan(NodeHandle& node, ScanOpPtr scanPtr);
667 
668   /*
669    * DbtuxTree.cpp
670    */
671   // add entry
672   void treeAdd(TuxCtx&, Frag& frag, TreePos treePos, TreeEnt ent);
673   void treeAddFull(TuxCtx&, Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent);
674   void treeAddNode(TuxCtx&, Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent, NodeHandle parentNode, unsigned i);
675   void treeAddRebalance(TuxCtx&, Frag& frag, NodeHandle node, unsigned i);
676   // remove entry
677   void treeRemove(Frag& frag, TreePos treePos);
678   void treeRemoveInner(Frag& frag, NodeHandle lubNode, unsigned pos);
679   void treeRemoveSemi(Frag& frag, NodeHandle node, unsigned i);
680   void treeRemoveLeaf(Frag& frag, NodeHandle node);
681   void treeRemoveNode(Frag& frag, NodeHandle node);
682   void treeRemoveRebalance(Frag& frag, NodeHandle node, unsigned i);
683   // rotate
684   void treeRotateSingle(TuxCtx&, Frag& frag, NodeHandle& node, unsigned i);
685   void treeRotateDouble(TuxCtx&, Frag& frag, NodeHandle& node, unsigned i);
686 
687   /*
688    * DbtuxScan.cpp
689    */
690   void execACC_SCANREQ(Signal* signal);
691   void execTUX_BOUND_INFO(Signal* signal);
692   void execNEXT_SCANREQ(Signal* signal);
693   void execACC_CHECK_SCAN(Signal* signal);
694   void execACCKEYCONF(Signal* signal);
695   void execACCKEYREF(Signal* signal);
696   void execACC_ABORTCONF(Signal* signal);
697   void scanFirst(ScanOpPtr scanPtr);
698   void scanFind(ScanOpPtr scanPtr);
699   void scanNext(ScanOpPtr scanPtr, bool fromMaintReq);
700   bool scanCheck(ScanOpPtr scanPtr, TreeEnt ent);
701   bool scanVisible(ScanOpPtr scanPtr, TreeEnt ent);
702   void scanClose(Signal* signal, ScanOpPtr scanPtr);
703   void abortAccLockOps(Signal* signal, ScanOpPtr scanPtr);
704   void addAccLockOp(ScanOpPtr scanPtr, Uint32 accLockOp);
705   void removeAccLockOp(ScanOpPtr scanPtr, Uint32 accLockOp);
706   void releaseScanOp(ScanOpPtr& scanPtr);
707 
708   /*
709    * DbtuxSearch.cpp
710    */
711   void findNodeToUpdate(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, NodeHandle& currNode);
712   bool findPosToAdd(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, NodeHandle& currNode, TreePos& treePos);
713   bool findPosToRemove(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, NodeHandle& currNode, TreePos& treePos);
714   bool searchToAdd(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, TreePos& treePos);
715   bool searchToRemove(TuxCtx&, Frag& frag, const KeyDataC& searchKey, TreeEnt searchEnt, TreePos& treePos);
716   void findNodeToScan(Frag& frag, unsigned dir, const KeyBoundC& searchBound, NodeHandle& currNode);
717   void findPosToScan(Frag& frag, unsigned idir, const KeyBoundC& searchBound, NodeHandle& currNode, Uint16* pos);
718   void searchToScan(Frag& frag, unsigned idir, const KeyBoundC& searchBound, TreePos& treePos);
719 
720   /*
721    * DbtuxCmp.cpp
722    */
723   int cmpSearchKey(TuxCtx&, const KeyDataC& searchKey, const KeyDataC& entryKey, Uint32 cnt);
724   int cmpSearchBound(TuxCtx&, const KeyBoundC& searchBound, const KeyDataC& entryKey, Uint32 cnt);
725 
726   /*
727    * DbtuxStat.cpp
728    */
729   void execREAD_PSEUDO_REQ(Signal* signal);
730   // one-round-trip tree-dive records in range
731   void statRecordsInRange(ScanOpPtr scanPtr, Uint32* out);
732   Uint32 getEntriesBeforeOrAfter(Frag& frag, TreePos pos, unsigned idir);
733   unsigned getPathToNode(NodeHandle node, Uint16* path);
734   // stats scan
735   int statScanInit(StatOpPtr, const Uint32* data, Uint32 len, Uint32* usedLen);
736   int statScanAddRow(StatOpPtr, TreeEnt ent);
737   void statScanReadKey(StatOpPtr, Uint32* out);
738   void statScanReadValue(StatOpPtr, Uint32* out);
739   void execINDEX_STAT_REP(Signal*); // from TRIX
740   // stats monitor request
741   void execINDEX_STAT_IMPL_REQ(Signal*);
742   void statMonStart(Signal*, StatMon&);
743   void statMonStop(Signal*, StatMon&);
744   void statMonConf(Signal*, StatMon&);
745   // stats monitor continueB loop
746   void statMonSendContinueB(Signal*);
747   void statMonExecContinueB(Signal*);
748   void statMonCheck(Signal*, StatMon&);
749   void statMonRep(Signal*, StatMon&);
750 
751   /*
752    * DbtuxDebug.cpp
753    */
754   void execDUMP_STATE_ORD(Signal* signal);
755 #ifdef VM_TRACE
756   struct PrintPar {
757     char m_path[100];           // LR prefix
758     unsigned m_side;            // expected side
759     TupLoc m_parent;            // expected parent address
760     int m_depth;                // returned depth
761     unsigned m_occup;           // returned occupancy
762     TreeEnt m_minmax[2];        // returned subtree min and max
763     bool m_ok;                  // returned status
764     PrintPar();
765   };
766   void printTree(Signal* signal, Frag& frag, NdbOut& out);
767   void printNode(struct TuxCtx&, Frag&, NdbOut& out, TupLoc loc, PrintPar& par);
768   friend class NdbOut& operator<<(NdbOut&, const TupLoc&);
769   friend class NdbOut& operator<<(NdbOut&, const TreeEnt&);
770   friend class NdbOut& operator<<(NdbOut&, const TreeNode&);
771   friend class NdbOut& operator<<(NdbOut&, const TreeHead&);
772   friend class NdbOut& operator<<(NdbOut&, const TreePos&);
773   friend class NdbOut& operator<<(NdbOut&, const KeyType&);
774   friend class NdbOut& operator<<(NdbOut&, const ScanOp&);
775   friend class NdbOut& operator<<(NdbOut&, const Index&);
776   friend class NdbOut& operator<<(NdbOut&, const Frag&);
777   friend class NdbOut& operator<<(NdbOut&, const FragOp&);
778   friend class NdbOut& operator<<(NdbOut&, const NodeHandle&);
779   friend class NdbOut& operator<<(NdbOut&, const StatOp&);
780   friend class NdbOut& operator<<(NdbOut&, const StatMon&);
781   FILE* debugFile;
782   NdbOut debugOut;
783   unsigned debugFlags;
784   enum {
785     DebugMeta = 1,              // log create and drop index
786     DebugMaint = 2,             // log maintenance ops
787     DebugTree = 4,              // log and check tree after each op
788     DebugScan = 8,              // log scans
789     DebugLock = 16,             // log ACC locks
790     DebugStat = 32              // log stats collection
791   };
792   STATIC_CONST( DataFillByte = 0xa2 );
793   STATIC_CONST( NodeFillByte = 0xa4 );
794 #endif
795 
796   void execDBINFO_SCANREQ(Signal* signal);
797 
798   // start up info
799   Uint32 c_internalStartPhase;
800   Uint32 c_typeOfStart;
801 
802   /*
803    * Global data set at operation start.  Unpacked from index metadata.
804    * Not passed as parameter to methods.  Invalid across timeslices.
805    *
806    * TODO inline all into index metadata
807    */
808   struct TuxCtx
809   {
810     EmulatedJamBuffer * jamBuffer;
811 
812     // buffer for scan bound and search key data
813     Uint32* c_searchKey;
814 
815     // buffer for current entry key data
816     Uint32* c_entryKey;
817 
818     // buffer for xfrm-ed PK and for temporary use
819     Uint32* c_dataBuffer;
820 
821 #ifdef VM_TRACE
822     char* c_debugBuffer;
823 #endif
824   };
825 
826   struct TuxCtx c_ctx; // Global Tux context, for everything build MT-index build
827 
828   // index stats
829   bool c_indexStatAutoUpdate;
830   Uint32 c_indexStatSaveSize;
831   Uint32 c_indexStatSaveScale;
832   Uint32 c_indexStatTriggerPct;
833   Uint32 c_indexStatTriggerScale;
834   Uint32 c_indexStatUpdateDelay;
835 
836   // inlined utils
837   Uint32 getDescSize(const Index& index);
838   DescHead& getDescHead(const Index& index);
839   KeyType* getKeyTypes(DescHead& descHead);
840   const KeyType* getKeyTypes(const DescHead& descHead);
841   AttributeHeader* getKeyAttrs(DescHead& descHead);
842   const AttributeHeader* getKeyAttrs(const DescHead& descHead);
843   //
844   void getTupAddr(const Frag& frag, TreeEnt ent, Uint32& lkey1, Uint32& lkey2);
845   static unsigned min(unsigned x, unsigned y);
846   static unsigned max(unsigned x, unsigned y);
847 
848 public:
849   static Uint32 mt_buildIndexFragment_wrapper(void*);
850 private:
851   Uint32 mt_buildIndexFragment(struct mt_BuildIndxCtx*);
852 };
853 
854 // Dbtux::TupLoc
855 
856 inline
TupLoc()857 Dbtux::TupLoc::TupLoc() :
858   m_pageId1(RNIL >> 16),
859   m_pageId2(RNIL & 0xFFFF),
860   m_pageOffset(0)
861 {
862 }
863 
864 inline
TupLoc(Uint32 pageId,Uint16 pageOffset)865 Dbtux::TupLoc::TupLoc(Uint32 pageId, Uint16 pageOffset) :
866   m_pageId1(pageId >> 16),
867   m_pageId2(pageId & 0xFFFF),
868   m_pageOffset(pageOffset)
869 {
870 }
871 
872 inline Uint32
getPageId() const873 Dbtux::TupLoc::getPageId() const
874 {
875   return (m_pageId1 << 16) | m_pageId2;
876 }
877 
878 inline void
setPageId(Uint32 pageId)879 Dbtux::TupLoc::setPageId(Uint32 pageId)
880 {
881   m_pageId1 = (pageId >> 16);
882   m_pageId2 = (pageId & 0xFFFF);
883 }
884 
885 inline Uint32
getPageOffset() const886 Dbtux::TupLoc::getPageOffset() const
887 {
888   return (Uint32)m_pageOffset;
889 }
890 
891 inline void
setPageOffset(Uint32 pageOffset)892 Dbtux::TupLoc::setPageOffset(Uint32 pageOffset)
893 {
894   m_pageOffset = (Uint16)pageOffset;
895 }
896 
897 inline bool
operator ==(const TupLoc & loc) const898 Dbtux::TupLoc::operator==(const TupLoc& loc) const
899 {
900   return
901     m_pageId1 == loc.m_pageId1 &&
902     m_pageId2 == loc.m_pageId2 &&
903     m_pageOffset == loc.m_pageOffset;
904 }
905 
906 inline bool
operator !=(const TupLoc & loc) const907 Dbtux::TupLoc::operator!=(const TupLoc& loc) const
908 {
909   return ! (*this == loc);
910 }
911 
912 // Dbtux::TreeEnt
913 
914 inline
TreeEnt()915 Dbtux::TreeEnt::TreeEnt() :
916   m_tupLoc(),
917   m_tupVersion(0)
918 {
919 }
920 
921 inline bool
eqtuple(const TreeEnt ent) const922 Dbtux::TreeEnt::eqtuple(const TreeEnt ent) const
923 {
924   return
925     m_tupLoc == ent.m_tupLoc;
926 }
927 
928 inline bool
eq(const TreeEnt ent) const929 Dbtux::TreeEnt::eq(const TreeEnt ent) const
930 {
931   return
932     m_tupLoc == ent.m_tupLoc &&
933     m_tupVersion == ent.m_tupVersion;
934 }
935 
936 inline int
cmp(const TreeEnt ent) const937 Dbtux::TreeEnt::cmp(const TreeEnt ent) const
938 {
939   if (m_tupLoc.getPageId() < ent.m_tupLoc.getPageId())
940     return -1;
941   if (m_tupLoc.getPageId() > ent.m_tupLoc.getPageId())
942     return +1;
943   if (m_tupLoc.getPageOffset() < ent.m_tupLoc.getPageOffset())
944     return -1;
945   if (m_tupLoc.getPageOffset() > ent.m_tupLoc.getPageOffset())
946     return +1;
947   /*
948    * Guess if one tuple version has wrapped around.  This is well
949    * defined ordering on existing versions since versions are assigned
950    * consecutively and different versions exists only on uncommitted
951    * tuple.  Assuming max 2**14 uncommitted ops on same tuple.
952    */
953   const unsigned version_wrap_limit = (1 << (ZTUP_VERSION_BITS - 1));
954   if (m_tupVersion < ent.m_tupVersion) {
955     if (unsigned(ent.m_tupVersion - m_tupVersion) < version_wrap_limit)
956       return -1;
957     else
958       return +1;
959   }
960   if (m_tupVersion > ent.m_tupVersion) {
961     if (unsigned(m_tupVersion - ent.m_tupVersion) < version_wrap_limit)
962       return +1;
963     else
964       return -1;
965   }
966   return 0;
967 }
968 
969 // Dbtux::TreeNode
970 
971 inline
TreeNode()972 Dbtux::TreeNode::TreeNode() :
973   m_side(2),
974   m_balance(0 + 1),
975   pad1(0),
976   m_occup(0),
977   m_nodeScan(RNIL)
978 {
979   m_link[0] = NullTupLoc;
980   m_link[1] = NullTupLoc;
981   m_link[2] = NullTupLoc;
982 }
983 
984 // Dbtux::TreeHead
985 
986 inline
TreeHead()987 Dbtux::TreeHead::TreeHead() :
988   m_nodeSize(0),
989   m_prefSize(0),
990   m_minOccup(0),
991   m_maxOccup(0),
992   m_root()
993 {
994 }
995 
996 inline Uint32*
getPref(TreeNode * node) const997 Dbtux::TreeHead::getPref(TreeNode* node) const
998 {
999   Uint32* ptr = (Uint32*)node + NodeHeadSize;
1000   return ptr;
1001 }
1002 
1003 inline Dbtux::TreeEnt*
getEntList(TreeNode * node) const1004 Dbtux::TreeHead::getEntList(TreeNode* node) const
1005 {
1006   Uint32* ptr = (Uint32*)node + NodeHeadSize + m_prefSize;
1007   return (TreeEnt*)ptr;
1008 }
1009 
1010 // Dbtux::TreePos
1011 
1012 inline
TreePos()1013 Dbtux::TreePos::TreePos() :
1014   m_loc(),
1015   m_pos(ZNIL),
1016   m_dir(255)
1017 {
1018 }
1019 
1020 // Dbtux::DescPage
1021 
1022 inline
DescPage()1023 Dbtux::DescPage::DescPage() :
1024   m_nextPage(RNIL),
1025   m_numFree(ZNIL)
1026 {
1027   for (unsigned i = 0; i < DescPageSize; i++) {
1028 #ifdef VM_TRACE
1029     m_data[i] = 0x13571357;
1030 #else
1031     m_data[i] = 0;
1032 #endif
1033   }
1034 }
1035 
1036 // Dbtux::ScanBound
1037 
1038 inline
ScanBound()1039 Dbtux::ScanBound::ScanBound() :
1040   m_head(),
1041   m_cnt(0),
1042   m_side(0)
1043 {
1044 }
1045 
1046 // Dbtux::ScanOp
1047 
1048 inline
ScanOp()1049 Dbtux::ScanOp::ScanOp() :
1050   m_state(Undef),
1051   m_lockwait(false),
1052   m_errorCode(0),
1053   m_userPtr(RNIL),
1054   m_userRef(RNIL),
1055   m_tableId(RNIL),
1056   m_indexId(RNIL),
1057   m_fragPtrI(RNIL),
1058   m_transId1(0),
1059   m_transId2(0),
1060   m_savePointId(0),
1061   m_accLockOp(RNIL),
1062   m_accLockOps(),
1063   m_readCommitted(0),
1064   m_lockMode(0),
1065   m_descending(0),
1066   m_scanBound(),
1067   m_scanPos(),
1068   m_scanEnt(),
1069   m_nodeScan(RNIL),
1070   m_statOpPtrI(RNIL)
1071 {
1072 }
1073 
1074 // Dbtux::Index
1075 
1076 inline
Index()1077 Dbtux::Index::Index() :
1078   m_state(NotDefined),
1079   m_tableType(DictTabInfo::UndefTableType),
1080   m_tableId(RNIL),
1081   m_numFrags(0),
1082   m_descPage(RNIL),
1083   m_descOff(0),
1084   m_numAttrs(0),
1085   m_prefAttrs(0),
1086   m_prefBytes(0),
1087   m_keySpec(),
1088   m_statFragPtrI(RNIL),
1089   m_statLoadTime(0),
1090   m_storeNullKey(false)
1091 {
1092   for (unsigned i = 0; i < MaxIndexFragments; i++) {
1093     m_fragId[i] = ZNIL;
1094     m_fragPtrI[i] = RNIL;
1095   };
1096 }
1097 
1098 // Dbtux::Frag
1099 
1100 inline
Frag(ArrayPool<ScanOp> & scanOpPool)1101 Dbtux::Frag::Frag(ArrayPool<ScanOp>& scanOpPool) :
1102   m_tableId(RNIL),
1103   m_indexId(RNIL),
1104   m_fragId(ZNIL),
1105   m_tree(),
1106   m_freeLoc(),
1107   m_scanList(scanOpPool),
1108   m_tupIndexFragPtrI(RNIL),
1109   m_tupTableFragPtrI(RNIL),
1110   m_accTableFragPtrI(RNIL),
1111   m_entryCount(0),
1112   m_entryBytes(0),
1113   m_entryOps(0)
1114 {
1115 }
1116 
1117 // Dbtux::FragOp
1118 
1119 inline
FragOp()1120 Dbtux::FragOp::FragOp() :
1121   m_userPtr(RNIL),
1122   m_userRef(RNIL),
1123   m_indexId(RNIL),
1124   m_fragId(ZNIL),
1125   m_fragPtrI(RNIL),
1126   m_fragNo(ZNIL),
1127   m_numAttrsRecvd(ZNIL)
1128 {
1129 }
1130 
1131 // Dbtux::NodeHandle
1132 
1133 inline
NodeHandle(Frag & frag)1134 Dbtux::NodeHandle::NodeHandle(Frag& frag) :
1135   m_frag(frag),
1136   m_loc(),
1137   m_node(0)
1138 {
1139 }
1140 
1141 inline
NodeHandle(const NodeHandle & node)1142 Dbtux::NodeHandle::NodeHandle(const NodeHandle& node) :
1143   m_frag(node.m_frag),
1144   m_loc(node.m_loc),
1145   m_node(node.m_node)
1146 {
1147 }
1148 
1149 inline Dbtux::NodeHandle&
operator =(const NodeHandle & node)1150 Dbtux::NodeHandle::operator=(const NodeHandle& node)
1151 {
1152   ndbassert(&m_frag == &node.m_frag);
1153   m_loc = node.m_loc;
1154   m_node = node.m_node;
1155   return *this;
1156 }
1157 
1158 inline bool
isNull()1159 Dbtux::NodeHandle::isNull()
1160 {
1161   return m_node == 0;
1162 }
1163 
1164 inline Dbtux::TupLoc
getLink(unsigned i)1165 Dbtux::NodeHandle::getLink(unsigned i)
1166 {
1167   ndbrequire(i <= 2);
1168   return m_node->m_link[i];
1169 }
1170 
1171 inline unsigned
getChilds()1172 Dbtux::NodeHandle::getChilds()
1173 {
1174   return (m_node->m_link[0] != NullTupLoc) + (m_node->m_link[1] != NullTupLoc);
1175 }
1176 
1177 inline unsigned
getSide()1178 Dbtux::NodeHandle::getSide()
1179 {
1180   return m_node->m_side;
1181 }
1182 
1183 inline unsigned
getOccup()1184 Dbtux::NodeHandle::getOccup()
1185 {
1186   return m_node->m_occup;
1187 }
1188 
1189 inline int
getBalance()1190 Dbtux::NodeHandle::getBalance()
1191 {
1192   return (int)m_node->m_balance - 1;
1193 }
1194 
1195 inline Uint32
getNodeScan()1196 Dbtux::NodeHandle::getNodeScan()
1197 {
1198   return m_node->m_nodeScan;
1199 }
1200 
1201 inline void
setLink(unsigned i,TupLoc loc)1202 Dbtux::NodeHandle::setLink(unsigned i, TupLoc loc)
1203 {
1204   if (likely(i <= 2))
1205   {
1206     m_node->m_link[i] = loc;
1207   }
1208   else
1209   {
1210     ndbrequire(false);
1211   }
1212 }
1213 
1214 inline void
setSide(unsigned i)1215 Dbtux::NodeHandle::setSide(unsigned i)
1216 {
1217   if (likely(i <= 2))
1218   {
1219     m_node->m_side = i;
1220   }
1221   else
1222   {
1223     ndbrequire(false);
1224   }
1225 }
1226 
1227 inline void
setOccup(unsigned n)1228 Dbtux::NodeHandle::setOccup(unsigned n)
1229 {
1230   TreeHead& tree = m_frag.m_tree;
1231   ndbrequire(n <= tree.m_maxOccup);
1232   m_node->m_occup = n;
1233 }
1234 
1235 inline void
setBalance(int b)1236 Dbtux::NodeHandle::setBalance(int b)
1237 {
1238   ndbrequire(abs(b) <= 1);
1239   m_node->m_balance = (unsigned)(b + 1);
1240 }
1241 
1242 inline void
setNodeScan(Uint32 scanPtrI)1243 Dbtux::NodeHandle::setNodeScan(Uint32 scanPtrI)
1244 {
1245   m_node->m_nodeScan = scanPtrI;
1246 }
1247 
1248 inline Uint32*
getPref()1249 Dbtux::NodeHandle::getPref()
1250 {
1251   TreeHead& tree = m_frag.m_tree;
1252   return tree.getPref(m_node);
1253 }
1254 
1255 inline Dbtux::TreeEnt
getEnt(unsigned pos)1256 Dbtux::NodeHandle::getEnt(unsigned pos)
1257 {
1258   TreeHead& tree = m_frag.m_tree;
1259   TreeEnt* entList = tree.getEntList(m_node);
1260   const unsigned occup = m_node->m_occup;
1261   ndbrequire(pos < occup);
1262   return entList[pos];
1263 }
1264 
1265 // stats
1266 
1267 inline
Value()1268 Dbtux::StatOp::Value::Value()
1269 {
1270   m_rir = 0;
1271   Uint32 i;
1272   for (i = 0; i < MaxKeyCount; i++)
1273     m_unq[i] = 0;
1274 }
1275 
1276 inline
StatOp(const Index & index)1277 Dbtux::StatOp::StatOp(const Index& index) :
1278   m_scanOpPtrI(RNIL),
1279   m_saveSize(0),
1280   m_saveScale(0),
1281   m_batchSize(0),
1282   m_estBytes(0),
1283   m_rowCount(0),
1284   m_batchCurr(0),
1285   m_haveSample(false),
1286   m_sampleCount(0),
1287   m_keyBytes(0),
1288   m_keyChange(false),
1289   m_usePrev(false),
1290   m_keyCount(0),
1291   m_valueCount(0),
1292   m_keySpec(index.m_keySpec),
1293   m_keyData1(m_keySpec, false, 2),
1294   m_keyData2(m_keySpec, false, 2),
1295   m_keyData(m_keySpec, false, 2),
1296   m_valueData(m_valueSpec, false, 2),
1297   m_value1(),
1298   m_value2()
1299 {
1300   m_valueSpec.set_buf(m_valueSpecBuf, MaxValueCount);
1301   m_keyData1.set_buf(m_keyDataBuf1, sizeof(m_keyDataBuf1));
1302   m_keyData2.set_buf(m_keyDataBuf2, sizeof(m_keyDataBuf2));
1303   m_keyData.set_buf(m_keyDataBuf, sizeof(m_keyDataBuf));
1304   m_valueData.set_buf(m_valueDataBuf, sizeof(m_valueDataBuf));
1305 }
1306 
1307 // Dbtux::StatMon
1308 
1309 inline
StatMon()1310 Dbtux::StatMon::StatMon() :
1311   m_requestType(0),
1312   m_loopIndexId(0),
1313   m_loopDelay(1000)
1314 {
1315   memset(&m_req, 0, sizeof(m_req));
1316 }
1317 
1318 // parameters for methods
1319 
1320 #ifdef VM_TRACE
1321 inline
PrintPar()1322 Dbtux::PrintPar::PrintPar() :
1323   // caller fills in
1324   m_path(),
1325   m_side(255),
1326   m_parent(),
1327   // default return values
1328   m_depth(0),
1329   m_occup(0),
1330   m_ok(true)
1331 {
1332 }
1333 #endif
1334 
1335 // utils
1336 
1337 inline Uint32
getDescSize(const Index & index)1338 Dbtux::getDescSize(const Index& index)
1339 {
1340   return
1341     DescHeadSize +
1342     index.m_numAttrs * KeyTypeSize +
1343     index.m_numAttrs * AttributeHeaderSize;
1344 }
1345 
1346 inline Dbtux::DescHead&
getDescHead(const Index & index)1347 Dbtux::getDescHead(const Index& index)
1348 {
1349   DescPagePtr pagePtr;
1350   pagePtr.i = index.m_descPage;
1351   c_descPagePool.getPtr(pagePtr);
1352   ndbrequire(index.m_descOff < DescPageSize);
1353   Uint32* ptr = &pagePtr.p->m_data[index.m_descOff];
1354   DescHead* descHead = reinterpret_cast<DescHead*>(ptr);
1355   ndbrequire(descHead->m_magic == DescHead::Magic);
1356   return *descHead;
1357 }
1358 
1359 inline Dbtux::KeyType*
getKeyTypes(DescHead & descHead)1360 Dbtux::getKeyTypes(DescHead& descHead)
1361 {
1362   Uint32* ptr = reinterpret_cast<Uint32*>(&descHead);
1363   ptr += DescHeadSize;
1364   return reinterpret_cast<KeyType*>(ptr);
1365 }
1366 
1367 inline const Dbtux::KeyType*
getKeyTypes(const DescHead & descHead)1368 Dbtux::getKeyTypes(const DescHead& descHead)
1369 {
1370   const Uint32* ptr = reinterpret_cast<const Uint32*>(&descHead);
1371   ptr += DescHeadSize;
1372   return reinterpret_cast<const KeyType*>(ptr);
1373 }
1374 
1375 inline AttributeHeader*
getKeyAttrs(DescHead & descHead)1376 Dbtux::getKeyAttrs(DescHead& descHead)
1377 {
1378   Uint32* ptr = reinterpret_cast<Uint32*>(&descHead);
1379   ptr += DescHeadSize;
1380   ptr += descHead.m_numAttrs * KeyTypeSize;
1381   return reinterpret_cast<AttributeHeader*>(ptr);
1382 }
1383 
1384 inline const AttributeHeader*
getKeyAttrs(const DescHead & descHead)1385 Dbtux::getKeyAttrs(const DescHead& descHead)
1386 {
1387   const Uint32* ptr = reinterpret_cast<const Uint32*>(&descHead);
1388   ptr += DescHeadSize;
1389   ptr += descHead.m_numAttrs * KeyTypeSize;
1390   return reinterpret_cast<const AttributeHeader*>(ptr);
1391 }
1392 
1393 inline
1394 void
getTupAddr(const Frag & frag,TreeEnt ent,Uint32 & lkey1,Uint32 & lkey2)1395 Dbtux::getTupAddr(const Frag& frag, TreeEnt ent, Uint32& lkey1, Uint32& lkey2)
1396 {
1397   const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI;
1398   const TupLoc tupLoc = ent.m_tupLoc;
1399   c_tup->tuxGetTupAddr(tableFragPtrI, tupLoc.getPageId(),tupLoc.getPageOffset(),
1400                        lkey1, lkey2);
1401   jamEntry();
1402 }
1403 
1404 inline unsigned
min(unsigned x,unsigned y)1405 Dbtux::min(unsigned x, unsigned y)
1406 {
1407   return x < y ? x : y;
1408 }
1409 
1410 inline unsigned
max(unsigned x,unsigned y)1411 Dbtux::max(unsigned x, unsigned y)
1412 {
1413   return x > y ? x : y;
1414 }
1415 
1416 // DbtuxCmp.cpp
1417 
1418 inline int
cmpSearchKey(TuxCtx & ctx,const KeyDataC & searchKey,const KeyDataC & entryKey,Uint32 cnt)1419 Dbtux::cmpSearchKey(TuxCtx& ctx, const KeyDataC& searchKey, const KeyDataC& entryKey, Uint32 cnt)
1420 {
1421   // compare cnt attributes from each
1422   Uint32 num_eq;
1423   int ret = searchKey.cmp(entryKey, cnt, num_eq);
1424 #ifdef VM_TRACE
1425   if (debugFlags & DebugMaint) {
1426     debugOut << "cmpSearchKey: ret:" << ret;
1427     debugOut << " search:" << searchKey.print(ctx.c_debugBuffer, DebugBufferBytes);
1428     debugOut << " entry:" << entryKey.print(ctx.c_debugBuffer, DebugBufferBytes);
1429     debugOut << endl;
1430   }
1431 #endif
1432   return ret;
1433 }
1434 
1435 inline int
cmpSearchBound(TuxCtx & ctx,const KeyBoundC & searchBound,const KeyDataC & entryKey,Uint32 cnt)1436 Dbtux::cmpSearchBound(TuxCtx& ctx, const KeyBoundC& searchBound, const KeyDataC& entryKey, Uint32 cnt)
1437 {
1438   // compare cnt attributes from each
1439   Uint32 num_eq;
1440   int ret = searchBound.cmp(entryKey, cnt, num_eq);
1441 #ifdef VM_TRACE
1442   if (debugFlags & DebugScan) {
1443     debugOut << "cmpSearchBound: res:" << ret;
1444     debugOut << " search:" << searchBound.print(ctx.c_debugBuffer, DebugBufferBytes);
1445     debugOut << " entry:" << entryKey.print(ctx.c_debugBuffer, DebugBufferBytes);
1446     debugOut << endl;
1447   }
1448 #endif
1449   return ret;
1450 }
1451 
1452 #endif
1453