1 /* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
2 
3    This program is free software; you can redistribute it and/or modify
4    it under the terms of the GNU General Public License, version 2.0,
5    as published by the Free Software Foundation.
6 
7    This program is also distributed with certain software (including
8    but not limited to OpenSSL) that is licensed under separate terms,
9    as designated in a particular file or component or in included license
10    documentation.  The authors of MySQL hereby grant you an additional
11    permission to link the program and your derivative works with the
12    separately licensed software that they have included with MySQL.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License, version 2.0, for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
22 
23 #ifndef NDB_LOCAL_PROXY_HPP
24 #define NDB_LOCAL_PROXY_HPP
25 
26 #include <pc.hpp>
27 #include <SimulatedBlock.hpp>
28 #include <Bitmask.hpp>
29 #include <DLFifoList.hpp>
30 #include <signaldata/ReadConfig.hpp>
31 #include <signaldata/NdbSttor.hpp>
32 #include <signaldata/ReadNodesConf.hpp>
33 #include <signaldata/NodeFailRep.hpp>
34 #include <signaldata/NodeStateSignalData.hpp>
35 #include <signaldata/NFCompleteRep.hpp>
36 #include <signaldata/CreateTrigImpl.hpp>
37 #include <signaldata/DropTrigImpl.hpp>
38 #include <signaldata/DbinfoScan.hpp>
39 #include <signaldata/Sync.hpp>
40 
41 /*
42  * Proxy blocks for MT LQH.
43  *
44  * The LQH proxy is the LQH block seen by other nodes and blocks,
45  * unless by-passed for efficiency.  Real LQH instances (workers)
46  * run behind it.  The instance number is 1 + worker index.
47  *
48  * There are also proxies and workers for ACC, TUP, TUX, BACKUP,
49  * RESTORE, and PGMAN.  Proxy classes are subclasses of LocalProxy.
50  * Workers with same instance number (one from each class) run in
51  * same thread.
52  *
53  * After LQH workers there is an optional extra worker.  It runs
54  * in the thread of the main block (i.e. the proxy).  Its instance
55  * number is fixed as 1 + MaxLqhWorkers (currently 5) i.e. it skips
56  * over any unused LQH instance numbers.
57  */
58 
59 class LocalProxy : public SimulatedBlock {
60 public:
61   LocalProxy(BlockNumber blockNumber, Block_context& ctx);
62   virtual ~LocalProxy();
63   BLOCK_DEFINES(LocalProxy);
64 
65 protected:
66   enum { MaxLqhWorkers = MAX_NDBMT_LQH_WORKERS };
67   enum { MaxExtraWorkers = 1 };
68   enum { MaxWorkers = MaxLqhWorkers + MaxExtraWorkers };
69   typedef Bitmask<(MaxWorkers+31)/32> WorkerMask;
70   Uint32 c_lqhWorkers;
71   Uint32 c_extraWorkers;
72   Uint32 c_workers;
73   // no gaps - extra worker has index c_lqhWorkers (not MaxLqhWorkers)
74   SimulatedBlock* c_worker[MaxWorkers];
75 
76   virtual SimulatedBlock* newWorker(Uint32 instanceNo) = 0;
77   virtual void loadWorkers();
78 
79   // get worker block by index (not by instance)
80 
workerBlock(Uint32 i)81   SimulatedBlock* workerBlock(Uint32 i) {
82     ndbrequire(i < c_workers);
83     ndbrequire(c_worker[i] != 0);
84     return c_worker[i];
85   }
86 
extraWorkerBlock()87   SimulatedBlock* extraWorkerBlock() {
88     return workerBlock(c_lqhWorkers);
89   }
90 
91   // get worker block reference by index (not by instance)
92 
workerRef(Uint32 i)93   BlockReference workerRef(Uint32 i) {
94     return numberToRef(number(), workerInstance(i), getOwnNodeId());
95   }
96 
extraWorkerRef()97   BlockReference extraWorkerRef() {
98     ndbrequire(c_workers == c_lqhWorkers + 1);
99     Uint32 i = c_lqhWorkers;
100     return workerRef(i);
101   }
102 
103   // convert between worker index and worker instance
104 
workerInstance(Uint32 i) const105   Uint32 workerInstance(Uint32 i) const {
106     ndbrequire(i < c_workers);
107     Uint32 ino;
108     if (i < c_lqhWorkers)
109       ino = 1 + i;
110     else
111       ino = 1 + MaxLqhWorkers;
112     return ino;
113   }
114 
workerIndex(Uint32 ino) const115   Uint32 workerIndex(Uint32 ino) const {
116     ndbrequire(ino != 0);
117     Uint32 i;
118     if (ino != 1 + MaxLqhWorkers)
119       i = ino - 1;
120     else
121       i = c_lqhWorkers;
122     ndbrequire(i < c_workers);
123     return i;
124   }
125 
126   // support routines and classes ("Ss" = signal state)
127 
128   typedef void (LocalProxy::*SsFUNCREQ)(Signal*, Uint32 ssId, SectionHandle*);
129   typedef void (LocalProxy::*SsFUNCREP)(Signal*, Uint32 ssId);
130 
131   struct SsCommon {
132     Uint32 m_ssId;      // unique id in SsPool (below)
133     SsFUNCREQ m_sendREQ;   // from proxy to worker
134     SsFUNCREP m_sendCONF;  // from proxy to caller
135     Uint32 m_worker;    // current worker
136     Uint32 m_error;
137     Uint32 m_sec_cnt;
138     Uint32 m_sec_ptr[3];
nameLocalProxy::SsCommon139     static const char* name() { return "UNDEF"; }
SsCommonLocalProxy::SsCommon140     SsCommon() {
141       m_ssId = 0;
142       m_sendREQ = 0;
143       m_sendCONF = 0;
144       m_worker = 0;
145       m_error = 0;
146       m_sec_cnt = 0;
147     }
148   };
149 
150   // run workers sequentially
151   struct SsSequential : SsCommon {
SsSequentialLocalProxy::SsSequential152     SsSequential() {}
153   };
154   void sendREQ(Signal*, SsSequential& ss);
155   void recvCONF(Signal*, SsSequential& ss);
156   void recvREF(Signal*, SsSequential& ss, Uint32 error);
157   // for use in sendREQ
158   void skipReq(SsSequential& ss);
159   void skipConf(SsSequential& ss);
160   // for use in sendCONF
161   bool firstReply(const SsSequential& ss);
162   bool lastReply(const SsSequential& ss);
163 
164   void saveSections(SsCommon&ss, SectionHandle&);
165   void restoreHandle(SectionHandle&, SsCommon&);
166 
167   // run workers in parallel
168   struct SsParallel : SsCommon {
169     WorkerMask m_workerMask;
170     bool m_extraLast;   // run extra after LQH workers
171     Uint32 m_extraSent;
SsParallelLocalProxy::SsParallel172     SsParallel() {
173       m_extraLast = false;
174       m_extraSent = 0;
175     }
176   };
177   void sendREQ(Signal*, SsParallel& ss);
178   void recvCONF(Signal*, SsParallel& ss);
179   void recvREF(Signal*, SsParallel& ss, Uint32 error);
180   // for use in sendREQ
181   void skipReq(SsParallel& ss);
182   void skipConf(SsParallel& ss);
183   // for use in sendCONF
184   bool firstReply(const SsParallel& ss);
185   bool lastReply(const SsParallel& ss);
186   bool lastExtra(Signal* signal, SsParallel& ss);
187   // set all or given bits in worker mask
188   void setMask(SsParallel& ss);
189   void setMask(SsParallel& ss, const WorkerMask& mask);
190 
191   /*
192    * Ss instances are seized from a pool.  Each pool is simply an array
193    * of Ss instances.  Usually poolSize is 1.  Some signals need a few
194    * more but the heavy stuff (query/DML) by-passes the proxy.
195    *
196    * Each Ss instance has a unique Uint32 ssId.  If there are multiple
197    * instances then ssId must be computable from signal data.  One option
198    * often is to use a generated ssId and set it as senderData,
199    */
200 
201   template <class Ss>
202   struct SsPool {
203     Ss m_pool[Ss::poolSize];
204     Uint32 m_usage;
SsPoolLocalProxy::SsPool205     SsPool() {
206       m_usage = 0;
207     }
208   };
209 
210   Uint32 c_ssIdSeq;
211 
212   // convenient for adding non-zero high bit
213   enum { SsIdBase = (1u << 31) };
214 
215   template <class Ss>
ssSearch(Uint32 ssId)216   Ss* ssSearch(Uint32 ssId)
217   {
218     SsPool<Ss>& sp = Ss::pool(this);
219     Ss* ssptr = 0;
220     for (Uint32 i = 0; i < Ss::poolSize; i++) {
221       if (sp.m_pool[i].m_ssId == ssId) {
222         ssptr = &sp.m_pool[i];
223         break;
224       }
225     }
226     return ssptr;
227   }
228 
229   template <class Ss>
ssSeize()230   Ss& ssSeize() {
231     const Uint32 base = SsIdBase;
232     const Uint32 mask = ~base;
233     const Uint32 ssId = base | c_ssIdSeq;
234     c_ssIdSeq = (c_ssIdSeq + 1) & mask;
235     return ssSeize<Ss>(ssId);
236   }
237 
238   template <class Ss>
ssSeize(Uint32 ssId)239   Ss& ssSeize(Uint32 ssId) {
240     SsPool<Ss>& sp = Ss::pool(this);
241     ndbrequire(sp.m_usage < Ss::poolSize);
242     ndbrequire(ssId != 0);
243     Ss* ssptr;
244     // check for duplicate
245     ssptr = ssSearch<Ss>(ssId);
246     ndbrequire(ssptr == 0);
247     // search for free
248     ssptr = ssSearch<Ss>(0);
249     ndbrequire(ssptr != 0);
250     // set methods, clear bitmasks, etc
251     new (ssptr) Ss;
252     ssptr->m_ssId = ssId;
253     sp.m_usage++;
254     D("ssSeize" << V(sp.m_usage) << hex << V(ssId) << " " << Ss::name());
255     return *ssptr;
256   }
257 
258   template <class Ss>
ssFind(Uint32 ssId)259   Ss& ssFind(Uint32 ssId) {
260     ndbrequire(ssId != 0);
261     Ss* ssptr = ssSearch<Ss>(ssId);
262     ndbrequire(ssptr != 0);
263     return *ssptr;
264   }
265 
266   /*
267    * In some cases it may not be known if this is first request.
268    * This situation should be avoided by adding signal data or
269    * by keeping state in the proxy instance.
270    */
271   template <class Ss>
ssFindSeize(Uint32 ssId,bool * found)272   Ss& ssFindSeize(Uint32 ssId, bool* found) {
273     ndbrequire(ssId != 0);
274     Ss* ssptr = ssSearch<Ss>(ssId);
275     if (ssptr != 0) {
276       if (found)
277         *found = true;
278       return *ssptr;
279     }
280     if (found)
281       *found = false;
282     return ssSeize<Ss>(ssId);
283   }
284 
285   template <class Ss>
ssRelease(Uint32 ssId)286   void ssRelease(Uint32 ssId) {
287     SsPool<Ss>& sp = Ss::pool(this);
288     ndbrequire(sp.m_usage != 0);
289     ndbrequire(ssId != 0);
290     D("ssRelease" << V(sp.m_usage) << hex << V(ssId) << " " << Ss::name());
291     Ss* ssptr = ssSearch<Ss>(ssId);
292     ndbrequire(ssptr != 0);
293     ssptr->m_ssId = 0;
294     ndbrequire(sp.m_usage > 0);
295     sp.m_usage--;
296   }
297 
298   template <class Ss>
ssRelease(Ss & ss)299   void ssRelease(Ss& ss) {
300     ssRelease<Ss>(ss.m_ssId);
301   }
302 
303   /*
304    * In some cases handle pool full via delayed signal.
305    * wl4391_todo maybe use CONTINUEB and guard against infinite loop.
306    */
307   template <class Ss>
ssQueue(Signal * signal)308   bool ssQueue(Signal* signal) {
309     SsPool<Ss>& sp = Ss::pool(this);
310     if (sp.m_usage < Ss::poolSize)
311       return false;
312 
313     SectionHandle handle(this, signal);
314     GlobalSignalNumber gsn = signal->header.theVerId_signalNumber & 0xFFFF;
315     sendSignalWithDelay(reference(), gsn,
316                         signal, 10, signal->length(), &handle);
317     return true;
318   }
319 
320   // system info
321 
322   Uint32 c_typeOfStart;
323   Uint32 c_masterNodeId;
324 
325   // GSN_READ_CONFIG_REQ
326   struct Ss_READ_CONFIG_REQ : SsSequential {
327     ReadConfigReq m_req;
Ss_READ_CONFIG_REQLocalProxy::Ss_READ_CONFIG_REQ328     Ss_READ_CONFIG_REQ() {
329       m_sendREQ = &LocalProxy::sendREAD_CONFIG_REQ;
330       m_sendCONF = &LocalProxy::sendREAD_CONFIG_CONF;
331     }
332     enum { poolSize = 1 };
poolLocalProxy::Ss_READ_CONFIG_REQ333     static SsPool<Ss_READ_CONFIG_REQ>& pool(LocalProxy* proxy) {
334       return proxy->c_ss_READ_CONFIG_REQ;
335     }
336   };
337   SsPool<Ss_READ_CONFIG_REQ> c_ss_READ_CONFIG_REQ;
338   void execREAD_CONFIG_REQ(Signal*);
339   virtual void callREAD_CONFIG_REQ(Signal*);
340   void backREAD_CONFIG_REQ(Signal*);
341   void sendREAD_CONFIG_REQ(Signal*, Uint32 ssId, SectionHandle*);
342   void execREAD_CONFIG_CONF(Signal*);
343   void sendREAD_CONFIG_CONF(Signal*, Uint32 ssId);
344 
345   // GSN_STTOR
346   struct Ss_STTOR : SsParallel {
347     Uint32 m_reqlength;
348     Uint32 m_reqdata[25];
349     Uint32 m_conflength;
350     Uint32 m_confdata[25];
Ss_STTORLocalProxy::Ss_STTOR351     Ss_STTOR() {
352       m_sendREQ = &LocalProxy::sendSTTOR;
353       m_sendCONF = &LocalProxy::sendSTTORRY;
354     }
355     enum { poolSize = 1 };
poolLocalProxy::Ss_STTOR356     static SsPool<Ss_STTOR>& pool(LocalProxy* proxy) {
357       return proxy->c_ss_STTOR;
358     }
359   };
360   SsPool<Ss_STTOR> c_ss_STTOR;
361   void execSTTOR(Signal*);
362   virtual void callSTTOR(Signal*);
363   void backSTTOR(Signal*);
364   void sendSTTOR(Signal*, Uint32 ssId, SectionHandle*);
365   void execSTTORRY(Signal*);
366   void sendSTTORRY(Signal*, Uint32 ssId);
367 
368   // GSN_NDB_STTOR
369   struct Ss_NDB_STTOR : SsParallel {
370     NdbSttor m_req;
371     enum { m_reqlength = sizeof(NdbSttor) >> 2 };
Ss_NDB_STTORLocalProxy::Ss_NDB_STTOR372     Ss_NDB_STTOR() {
373       m_sendREQ = &LocalProxy::sendNDB_STTOR;
374       m_sendCONF = &LocalProxy::sendNDB_STTORRY;
375     }
376     enum { poolSize = 1 };
poolLocalProxy::Ss_NDB_STTOR377     static SsPool<Ss_NDB_STTOR>& pool(LocalProxy* proxy) {
378       return proxy->c_ss_NDB_STTOR;
379     }
380   };
381   SsPool<Ss_NDB_STTOR> c_ss_NDB_STTOR;
382   void execNDB_STTOR(Signal*);
383   virtual void callNDB_STTOR(Signal*);
384   void backNDB_STTOR(Signal*);
385   void sendNDB_STTOR(Signal*, Uint32 ssId, SectionHandle*);
386   void execNDB_STTORRY(Signal*);
387   void sendNDB_STTORRY(Signal*, Uint32 ssId);
388 
389   // GSN_READ_NODESREQ
390   struct Ss_READ_NODES_REQ {
391     GlobalSignalNumber m_gsn; // STTOR or NDB_STTOR
Ss_READ_NODES_REQLocalProxy::Ss_READ_NODES_REQ392     Ss_READ_NODES_REQ() {
393       m_gsn = 0;
394     }
395   };
396   Ss_READ_NODES_REQ c_ss_READ_NODESREQ;
397   void sendREAD_NODESREQ(Signal*);
398   void execREAD_NODESCONF(Signal*);
399   void execREAD_NODESREF(Signal*);
400 
401   // GSN_NODE_FAILREP
402   struct Ss_NODE_FAILREP : SsParallel {
403     NodeFailRep m_req;
404     // REQ sends NdbNodeBitmask but CONF sends nodeId at a time
405     NdbNodeBitmask m_waitFor[MaxWorkers];
Ss_NODE_FAILREPLocalProxy::Ss_NODE_FAILREP406     Ss_NODE_FAILREP() {
407       m_sendREQ = &LocalProxy::sendNODE_FAILREP;
408       m_sendCONF = &LocalProxy::sendNF_COMPLETEREP;
409     }
410     // some blocks do not reply
noReplyLocalProxy::Ss_NODE_FAILREP411     static bool noReply(BlockNumber blockNo) {
412       return
413         blockNo == BACKUP;
414     }
415     enum { poolSize = 1 };
poolLocalProxy::Ss_NODE_FAILREP416     static SsPool<Ss_NODE_FAILREP>& pool(LocalProxy* proxy) {
417       return proxy->c_ss_NODE_FAILREP;
418     }
419   };
420   SsPool<Ss_NODE_FAILREP> c_ss_NODE_FAILREP;
421   void execNODE_FAILREP(Signal*);
422   void sendNODE_FAILREP(Signal*, Uint32 ssId, SectionHandle*);
423   void execNF_COMPLETEREP(Signal*);
424   void sendNF_COMPLETEREP(Signal*, Uint32 ssId);
425 
426   // GSN_INCL_NODEREQ
427   struct Ss_INCL_NODEREQ : SsParallel {
428     // future-proof by allocating max length
429     struct Req {
430       Uint32 senderRef;
431       Uint32 inclNodeId;
432       Uint32 word[23];
433     };
434     struct Conf {
435       Uint32 inclNodeId;
436       Uint32 senderRef;
437     };
438     Uint32 m_reqlength;
439     Req m_req;
Ss_INCL_NODEREQLocalProxy::Ss_INCL_NODEREQ440     Ss_INCL_NODEREQ() {
441       m_sendREQ = &LocalProxy::sendINCL_NODEREQ;
442       m_sendCONF = &LocalProxy::sendINCL_NODECONF;
443     }
444     enum { poolSize = 1 };
poolLocalProxy::Ss_INCL_NODEREQ445     static SsPool<Ss_INCL_NODEREQ>& pool(LocalProxy* proxy) {
446       return proxy->c_ss_INCL_NODEREQ;
447     }
448   };
449   SsPool<Ss_INCL_NODEREQ> c_ss_INCL_NODEREQ;
450   void execINCL_NODEREQ(Signal*);
451   void sendINCL_NODEREQ(Signal*, Uint32 ssId, SectionHandle*);
452   void execINCL_NODECONF(Signal*);
453   void sendINCL_NODECONF(Signal*, Uint32 ssId);
454 
455   // GSN_NODE_STATE_REP
456   struct Ss_NODE_STATE_REP : SsParallel {
Ss_NODE_STATE_REPLocalProxy::Ss_NODE_STATE_REP457     Ss_NODE_STATE_REP() {
458       m_sendREQ = &LocalProxy::sendNODE_STATE_REP;
459       m_sendCONF = 0;
460     }
461     enum { poolSize = 1 };
poolLocalProxy::Ss_NODE_STATE_REP462     static SsPool<Ss_NODE_STATE_REP>& pool(LocalProxy* proxy) {
463       return proxy->c_ss_NODE_STATE_REP;
464     }
465   };
466   SsPool<Ss_NODE_STATE_REP> c_ss_NODE_STATE_REP;
467   void execNODE_STATE_REP(Signal*);
468   void sendNODE_STATE_REP(Signal*, Uint32 ssId, SectionHandle*);
469 
470   // GSN_CHANGE_NODE_STATE_REQ
471   struct Ss_CHANGE_NODE_STATE_REQ : SsParallel {
472     ChangeNodeStateReq m_req;
Ss_CHANGE_NODE_STATE_REQLocalProxy::Ss_CHANGE_NODE_STATE_REQ473     Ss_CHANGE_NODE_STATE_REQ() {
474       m_sendREQ = &LocalProxy::sendCHANGE_NODE_STATE_REQ;
475       m_sendCONF = &LocalProxy::sendCHANGE_NODE_STATE_CONF;
476     }
477     enum { poolSize = 1 };
poolLocalProxy::Ss_CHANGE_NODE_STATE_REQ478     static SsPool<Ss_CHANGE_NODE_STATE_REQ>& pool(LocalProxy* proxy) {
479       return proxy->c_ss_CHANGE_NODE_STATE_REQ;
480     }
481   };
482   SsPool<Ss_CHANGE_NODE_STATE_REQ> c_ss_CHANGE_NODE_STATE_REQ;
483   void execCHANGE_NODE_STATE_REQ(Signal*);
484   void sendCHANGE_NODE_STATE_REQ(Signal*, Uint32 ssId, SectionHandle*);
485   void execCHANGE_NODE_STATE_CONF(Signal*);
486   void sendCHANGE_NODE_STATE_CONF(Signal*, Uint32 ssId);
487 
488   // GSN_DUMP_STATE_ORD
489   struct Ss_DUMP_STATE_ORD : SsParallel {
490     Uint32 m_reqlength;
491     Uint32 m_reqdata[25];
Ss_DUMP_STATE_ORDLocalProxy::Ss_DUMP_STATE_ORD492     Ss_DUMP_STATE_ORD() {
493       m_sendREQ = &LocalProxy::sendDUMP_STATE_ORD;
494       m_sendCONF = 0;
495     }
496     enum { poolSize = 1 };
poolLocalProxy::Ss_DUMP_STATE_ORD497     static SsPool<Ss_DUMP_STATE_ORD>& pool(LocalProxy* proxy) {
498       return proxy->c_ss_DUMP_STATE_ORD;
499     }
500   };
501   SsPool<Ss_DUMP_STATE_ORD> c_ss_DUMP_STATE_ORD;
502   void execDUMP_STATE_ORD(Signal*);
503   void sendDUMP_STATE_ORD(Signal*, Uint32 ssId, SectionHandle*);
504 
505   // GSN_NDB_TAMPER
506   struct Ss_NDB_TAMPER : SsParallel {
507     Uint32 m_errorInsert;
Ss_NDB_TAMPERLocalProxy::Ss_NDB_TAMPER508     Ss_NDB_TAMPER() {
509       m_sendREQ = &LocalProxy::sendNDB_TAMPER;
510       m_sendCONF = 0;
511     }
512     enum { poolSize = 1 };
poolLocalProxy::Ss_NDB_TAMPER513     static SsPool<Ss_NDB_TAMPER>& pool(LocalProxy* proxy) {
514       return proxy->c_ss_NDB_TAMPER;
515     }
516   };
517   SsPool<Ss_NDB_TAMPER> c_ss_NDB_TAMPER;
518   void execNDB_TAMPER(Signal*);
519   void sendNDB_TAMPER(Signal*, Uint32 ssId, SectionHandle*);
520 
521   // GSN_TIME_SIGNAL
522   struct Ss_TIME_SIGNAL : SsParallel {
Ss_TIME_SIGNALLocalProxy::Ss_TIME_SIGNAL523     Ss_TIME_SIGNAL() {
524       m_sendREQ = &LocalProxy::sendTIME_SIGNAL;
525       m_sendCONF = 0;
526     }
527     enum { poolSize = 1 };
poolLocalProxy::Ss_TIME_SIGNAL528     static SsPool<Ss_TIME_SIGNAL>& pool(LocalProxy* proxy) {
529       return proxy->c_ss_TIME_SIGNAL;
530     }
531   };
532   SsPool<Ss_TIME_SIGNAL> c_ss_TIME_SIGNAL;
533   void execTIME_SIGNAL(Signal*);
534   void sendTIME_SIGNAL(Signal*, Uint32 ssId, SectionHandle*);
535 
536   // GSN_CREATE_TRIG_IMPL_REQ
537   struct Ss_CREATE_TRIG_IMPL_REQ : SsParallel {
538     CreateTrigImplReq m_req;
Ss_CREATE_TRIG_IMPL_REQLocalProxy::Ss_CREATE_TRIG_IMPL_REQ539     Ss_CREATE_TRIG_IMPL_REQ() {
540       m_sendREQ = &LocalProxy::sendCREATE_TRIG_IMPL_REQ;
541       m_sendCONF = &LocalProxy::sendCREATE_TRIG_IMPL_CONF;
542     }
543     enum { poolSize = 3 };
poolLocalProxy::Ss_CREATE_TRIG_IMPL_REQ544     static SsPool<Ss_CREATE_TRIG_IMPL_REQ>& pool(LocalProxy* proxy) {
545       return proxy->c_ss_CREATE_TRIG_IMPL_REQ;
546     }
547   };
548   SsPool<Ss_CREATE_TRIG_IMPL_REQ> c_ss_CREATE_TRIG_IMPL_REQ;
549   void execCREATE_TRIG_IMPL_REQ(Signal*);
550   void sendCREATE_TRIG_IMPL_REQ(Signal*, Uint32 ssId, SectionHandle*);
551   void execCREATE_TRIG_IMPL_CONF(Signal*);
552   void execCREATE_TRIG_IMPL_REF(Signal*);
553   void sendCREATE_TRIG_IMPL_CONF(Signal*, Uint32 ssId);
554 
555   // GSN_DROP_TRIG_IMPL_REQ
556   struct Ss_DROP_TRIG_IMPL_REQ : SsParallel {
557     DropTrigImplReq m_req;
Ss_DROP_TRIG_IMPL_REQLocalProxy::Ss_DROP_TRIG_IMPL_REQ558     Ss_DROP_TRIG_IMPL_REQ() {
559       m_sendREQ = &LocalProxy::sendDROP_TRIG_IMPL_REQ;
560       m_sendCONF = &LocalProxy::sendDROP_TRIG_IMPL_CONF;
561     }
562     enum { poolSize = 21 };
poolLocalProxy::Ss_DROP_TRIG_IMPL_REQ563     static SsPool<Ss_DROP_TRIG_IMPL_REQ>& pool(LocalProxy* proxy) {
564       return proxy->c_ss_DROP_TRIG_IMPL_REQ;
565     }
566   };
567   SsPool<Ss_DROP_TRIG_IMPL_REQ> c_ss_DROP_TRIG_IMPL_REQ;
568   void execDROP_TRIG_IMPL_REQ(Signal*);
569   void sendDROP_TRIG_IMPL_REQ(Signal*, Uint32 ssId, SectionHandle*);
570   void execDROP_TRIG_IMPL_CONF(Signal*);
571   void execDROP_TRIG_IMPL_REF(Signal*);
572   void sendDROP_TRIG_IMPL_CONF(Signal*, Uint32 ssId);
573 
574   // GSN_DBINFO_SCANREQ
575   bool find_next(Ndbinfo::ScanCursor* cursor) const;
576   void execDBINFO_SCANREQ(Signal*);
577   void execDBINFO_SCANCONF(Signal*);
578 
579   // GSN_SYNC_REQ
580   void execSYNC_REQ(Signal*);
581   void execSYNC_REF(Signal*);
582   void execSYNC_CONF(Signal*);
583   void sendSYNC_REQ(Signal*, Uint32 ssId, SectionHandle*);
584   void sendSYNC_CONF(Signal*, Uint32 ssId);
585   struct Ss_SYNC_REQ : SsParallel {
586     SyncReq m_req;
Ss_SYNC_REQLocalProxy::Ss_SYNC_REQ587     Ss_SYNC_REQ() {
588       m_sendREQ = &LocalProxy::sendSYNC_REQ;
589       m_sendCONF = &LocalProxy::sendSYNC_CONF;
590     }
591     enum { poolSize = 4 };
poolLocalProxy::Ss_SYNC_REQ592     static SsPool<Ss_SYNC_REQ>& pool(LocalProxy* proxy) {
593       return proxy->c_ss_SYNC_REQ;
594     }
595   };
596   SsPool<Ss_SYNC_REQ> c_ss_SYNC_REQ;
597 
598   void execSYNC_PATH_REQ(Signal*);
599 };
600 
601 #endif
602