1 /* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
2 
3    This program is free software; you can redistribute it and/or modify
4    it under the terms of the GNU General Public License, version 2.0,
5    as published by the Free Software Foundation.
6 
7    This program is also distributed with certain software (including
8    but not limited to OpenSSL) that is licensed under separate terms,
9    as designated in a particular file or component or in included license
10    documentation.  The authors of MySQL hereby grant you an additional
11    permission to link the program and your derivative works with the
12    separately licensed software that they have included with MySQL.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License, version 2.0, for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA */
22 
23 #ifndef NDB_LOCAL_PROXY_HPP
24 #define NDB_LOCAL_PROXY_HPP
25 
26 #include <pc.hpp>
27 #include <SimulatedBlock.hpp>
28 #include <Bitmask.hpp>
29 #include <IntrusiveList.hpp>
30 #include <signaldata/ReadConfig.hpp>
31 #include <signaldata/NdbSttor.hpp>
32 #include <signaldata/ReadNodesConf.hpp>
33 #include <signaldata/NodeFailRep.hpp>
34 #include <signaldata/NodeStateSignalData.hpp>
35 #include <signaldata/NFCompleteRep.hpp>
36 #include <signaldata/CreateTrigImpl.hpp>
37 #include <signaldata/DropTrigImpl.hpp>
38 #include <signaldata/DbinfoScan.hpp>
39 #include <signaldata/Sync.hpp>
40 
41 #define JAM_FILE_ID 438
42 
43 
44 /*
45  * Proxy blocks for MT LQH.
46  *
47  * The LQH proxy is the LQH block seen by other nodes and blocks,
48  * unless by-passed for efficiency.  Real LQH instances (workers)
49  * run behind it.  The instance number is 1 + worker index.
50  *
51  * There are also proxies and workers for ACC, TUP, TUX, BACKUP,
52  * RESTORE, and PGMAN.  Proxy classes are subclasses of LocalProxy.
53  * Workers with same instance number (one from each class) run in
54  * same thread.
55  *
56  * After LQH workers there is an optional extra worker.  It runs
57  * in the thread of the main block (i.e. the proxy).  Its instance
58  * number is fixed as 1 + MaxLqhWorkers (currently 5) i.e. it skips
59  * over any unused LQH instance numbers.
60  */
61 
62 class LocalProxy : public SimulatedBlock {
63 public:
64   LocalProxy(BlockNumber blockNumber, Block_context& ctx);
65   virtual ~LocalProxy();
66   BLOCK_DEFINES(LocalProxy);
67 
68 protected:
69   enum { MaxWorkers = SimulatedBlock::MaxInstances };
70   typedef Bitmask<(MaxWorkers+31)/32> WorkerMask;
71   Uint32 c_workers;
72   // no gaps - extra worker has index c_lqhWorkers (not MaxLqhWorkers)
73   SimulatedBlock* c_worker[MaxWorkers];
74   Uint32 c_anyWorkerCounter;
75 
76   virtual SimulatedBlock* newWorker(Uint32 instanceNo) = 0;
77   virtual void loadWorkers();
78 
79   // get worker block by index (not by instance)
80 
workerBlock(Uint32 i)81   SimulatedBlock* workerBlock(Uint32 i) {
82     ndbrequire(i < c_workers);
83     ndbrequire(c_worker[i] != 0);
84     return c_worker[i];
85   }
86 
87   // get worker block reference by index (not by instance)
88 
workerRef(Uint32 i)89   BlockReference workerRef(Uint32 i) {
90     return numberToRef(number(), workerInstance(i), getOwnNodeId());
91   }
92 
93   // convert between worker index and worker instance
94 
workerInstance(Uint32 i) const95   Uint32 workerInstance(Uint32 i) const {
96     ndbrequire(i < c_workers);
97     return i + 1;
98   }
99 
workerIndex(Uint32 ino) const100   Uint32 workerIndex(Uint32 ino) const {
101     ndbrequire(ino != 0);
102     return ino - 1;
103   }
104 
105   // Get a worker index - will balance round robin across
106   // workers over time.
getAnyWorkerIndex()107   Uint32 getAnyWorkerIndex()
108   {
109     return (c_anyWorkerCounter++) % c_workers;
110   }
111 
112   // Statelessly forward a signal (including any sections)
113   // to the worker with the supplied index.
114   void forwardToWorkerIndex(Signal* signal, Uint32 index);
115 
116   // Statelessly forward the signal (including any sections)
117   // to one of the workers, load balancing.
118   // Requires no arrival order constraints between signals.
119   void forwardToAnyWorker(Signal* signal);
120 
121   // support routines and classes ("Ss" = signal state)
122 
123   typedef void (LocalProxy::*SsFUNCREQ)(Signal*, Uint32 ssId, SectionHandle*);
124   typedef void (LocalProxy::*SsFUNCREP)(Signal*, Uint32 ssId);
125 
126   struct SsCommon {
127     Uint32 m_ssId;      // unique id in SsPool (below)
128     SsFUNCREQ m_sendREQ;   // from proxy to worker
129     SsFUNCREP m_sendCONF;  // from proxy to caller
130     Uint32 m_worker;    // current worker
131     Uint32 m_error;
132     Uint32 m_sec_cnt;
133     Uint32 m_sec_ptr[3];
nameLocalProxy::SsCommon134     static const char* name() { return "UNDEF"; }
SsCommonLocalProxy::SsCommon135     SsCommon() {
136       m_ssId = 0;
137       m_sendREQ = 0;
138       m_sendCONF = 0;
139       m_worker = 0;
140       m_error = 0;
141       m_sec_cnt = 0;
142     }
143   };
144 
145   // run workers sequentially
146   struct SsSequential : SsCommon {
SsSequentialLocalProxy::SsSequential147     SsSequential() {}
148   };
149   void sendREQ(Signal*, SsSequential& ss);
150   void recvCONF(Signal*, SsSequential& ss);
151   void recvREF(Signal*, SsSequential& ss, Uint32 error);
152   // for use in sendREQ
153   void skipReq(SsSequential& ss);
154   void skipConf(SsSequential& ss);
155   // for use in sendCONF
156   bool firstReply(const SsSequential& ss);
157   bool lastReply(const SsSequential& ss);
158 
159   void saveSections(SsCommon&ss, SectionHandle&);
160   void restoreHandle(SectionHandle&, SsCommon&);
161 
162   // run workers in parallel
163   struct SsParallel : SsCommon {
164     WorkerMask m_workerMask;
SsParallelLocalProxy::SsParallel165     SsParallel() {
166     }
167   };
168   void sendREQ(Signal*, SsParallel& ss, bool skipLast = false);
169   void recvCONF(Signal*, SsParallel& ss);
170   void recvREF(Signal*, SsParallel& ss, Uint32 error);
171   // for use in sendREQ
172   void skipReq(SsParallel& ss);
173   void skipConf(SsParallel& ss);
174   // for use in sendCONF
175   bool firstReply(const SsParallel& ss);
176   bool lastReply(const SsParallel& ss);
177   bool lastExtra(Signal* signal, SsParallel& ss);
178   // set all or given bits in worker mask
179   void setMask(SsParallel& ss);
180   void setMask(SsParallel& ss, const WorkerMask& mask);
181 
182   /*
183    * Ss instances are seized from a pool.  Each pool is simply an array
184    * of Ss instances.  Usually poolSize is 1.  Some signals need a few
185    * more but the heavy stuff (query/DML) by-passes the proxy.
186    *
187    * Each Ss instance has a unique Uint32 ssId.  If there are multiple
188    * instances then ssId must be computable from signal data.  One option
189    * often is to use a generated ssId and set it as senderData,
190    */
191 
192   template <class Ss>
193   struct SsPool {
194     Ss m_pool[Ss::poolSize];
195     Uint32 m_usage;
SsPoolLocalProxy::SsPool196     SsPool() {
197       m_usage = 0;
198     }
199   };
200 
201   Uint32 c_ssIdSeq;
202 
203   // convenient for adding non-zero high bit
204   enum { SsIdBase = (1u << 31) };
205 
206   template <class Ss>
ssSearch(Uint32 ssId)207   Ss* ssSearch(Uint32 ssId)
208   {
209     SsPool<Ss>& sp = Ss::pool(this);
210     Ss* ssptr = 0;
211     for (Uint32 i = 0; i < Ss::poolSize; i++) {
212       if (sp.m_pool[i].m_ssId == ssId) {
213         ssptr = &sp.m_pool[i];
214         break;
215       }
216     }
217     return ssptr;
218   }
219 
220   template <class Ss>
ssSeize()221   Ss& ssSeize() {
222     SsPool<Ss>& sp = Ss::pool(this);
223     Ss* ssptr = ssSearch<Ss>(0);
224     ndbrequire(ssptr != 0);
225     // Use position in array as ssId
226     UintPtr pos = ssptr - sp.m_pool;
227     Uint32 ssId = Uint32(pos) + 1;
228     new (ssptr) Ss;
229     ssptr->m_ssId = ssId;
230     sp.m_usage++;
231     //D("ssSeize()" << V(sp.m_usage) << hex << V(ssId) << " " << Ss::name());
232     return *ssptr;
233   }
234 
235   template <class Ss>
ssSeize(Uint32 ssId)236   Ss& ssSeize(Uint32 ssId) {
237     SsPool<Ss>& sp = Ss::pool(this);
238     ndbrequire(sp.m_usage < Ss::poolSize);
239     ndbrequire(ssId != 0);
240     Ss* ssptr;
241     // check for duplicate
242     ssptr = ssSearch<Ss>(ssId);
243     ndbrequire(ssptr == 0);
244     // search for free
245     ssptr = ssSearch<Ss>(0);
246     ndbrequire(ssptr != 0);
247     // set methods, clear bitmasks, etc
248     new (ssptr) Ss;
249     ssptr->m_ssId = ssId;
250     sp.m_usage++;
251     //D("ssSeize" << V(sp.m_usage) << hex << V(ssId) << " " << Ss::name());
252     return *ssptr;
253   }
254 
255   template <class Ss>
ssFind(Uint32 ssId)256   Ss& ssFind(Uint32 ssId) {
257     ndbrequire(ssId != 0);
258     Ss* ssptr = ssSearch<Ss>(ssId);
259     ndbrequire(ssptr != 0);
260     return *ssptr;
261   }
262 
263   /*
264    * In some cases it may not be known if this is first request.
265    * This situation should be avoided by adding signal data or
266    * by keeping state in the proxy instance.
267    */
268   template <class Ss>
ssFindSeize(Uint32 ssId,bool * found)269   Ss& ssFindSeize(Uint32 ssId, bool* found) {
270     ndbrequire(ssId != 0);
271     Ss* ssptr = ssSearch<Ss>(ssId);
272     if (ssptr != 0) {
273       if (found)
274         *found = true;
275       return *ssptr;
276     }
277     if (found)
278       *found = false;
279     return ssSeize<Ss>(ssId);
280   }
281 
282   template <class Ss>
ssRelease(Uint32 ssId)283   void ssRelease(Uint32 ssId) {
284     SsPool<Ss>& sp = Ss::pool(this);
285     ndbrequire(sp.m_usage != 0);
286     ndbrequire(ssId != 0);
287     //D("ssRelease" << V(sp.m_usage) << hex << V(ssId) << " " << Ss::name());
288     Ss* ssptr = ssSearch<Ss>(ssId);
289     ndbrequire(ssptr != 0);
290     ssptr->m_ssId = 0;
291     ndbrequire(sp.m_usage > 0);
292     sp.m_usage--;
293   }
294 
295   template <class Ss>
ssRelease(Ss & ss)296   void ssRelease(Ss& ss) {
297     ssRelease<Ss>(ss.m_ssId);
298   }
299 
300   /*
301    * In some cases handle pool full via delayed signal.
302    * wl4391_todo maybe use CONTINUEB and guard against infinite loop.
303    */
304   template <class Ss>
ssQueue(Signal * signal)305   bool ssQueue(Signal* signal) {
306     SsPool<Ss>& sp = Ss::pool(this);
307     if (sp.m_usage < Ss::poolSize)
308       return false;
309 
310     SectionHandle handle(this, signal);
311     GlobalSignalNumber gsn = signal->header.theVerId_signalNumber & 0xFFFF;
312     sendSignalWithDelay(reference(), gsn,
313                         signal, 10, signal->length(), &handle);
314     return true;
315   }
316 
317   // system info
318 
319   Uint32 c_typeOfStart;
320   Uint32 c_masterNodeId;
321 
322   // GSN_READ_CONFIG_REQ
323   struct Ss_READ_CONFIG_REQ : SsSequential {
324     ReadConfigReq m_req;
Ss_READ_CONFIG_REQLocalProxy::Ss_READ_CONFIG_REQ325     Ss_READ_CONFIG_REQ() {
326       m_sendREQ = &LocalProxy::sendREAD_CONFIG_REQ;
327       m_sendCONF = &LocalProxy::sendREAD_CONFIG_CONF;
328     }
329     enum { poolSize = 1 };
poolLocalProxy::Ss_READ_CONFIG_REQ330     static SsPool<Ss_READ_CONFIG_REQ>& pool(LocalProxy* proxy) {
331       return proxy->c_ss_READ_CONFIG_REQ;
332     }
333   };
334   SsPool<Ss_READ_CONFIG_REQ> c_ss_READ_CONFIG_REQ;
335   void execREAD_CONFIG_REQ(Signal*);
336   virtual void callREAD_CONFIG_REQ(Signal*);
337   void backREAD_CONFIG_REQ(Signal*);
338   void sendREAD_CONFIG_REQ(Signal*, Uint32 ssId, SectionHandle*);
339   void execREAD_CONFIG_CONF(Signal*);
340   void sendREAD_CONFIG_CONF(Signal*, Uint32 ssId);
341 
342   // GSN_STTOR
343   struct Ss_STTOR : SsParallel {
344     Uint32 m_reqlength;
345     Uint32 m_reqdata[25];
346     Uint32 m_conflength;
347     Uint32 m_confdata[25];
Ss_STTORLocalProxy::Ss_STTOR348     Ss_STTOR() {
349       m_sendREQ = &LocalProxy::sendSTTOR;
350       m_sendCONF = &LocalProxy::sendSTTORRY;
351     }
352     enum { poolSize = 1 };
poolLocalProxy::Ss_STTOR353     static SsPool<Ss_STTOR>& pool(LocalProxy* proxy) {
354       return proxy->c_ss_STTOR;
355     }
356   };
357   SsPool<Ss_STTOR> c_ss_STTOR;
358   void execSTTOR(Signal*);
359   virtual void callSTTOR(Signal*);
360   void backSTTOR(Signal*);
361   void sendSTTOR(Signal*, Uint32 ssId, SectionHandle*);
362   void execSTTORRY(Signal*);
363   void sendSTTORRY(Signal*, Uint32 ssId);
364 
365   // GSN_NDB_STTOR
366   struct Ss_NDB_STTOR : SsParallel {
367     NdbSttor m_req;
368     enum { m_reqlength = sizeof(NdbSttor) >> 2 };
Ss_NDB_STTORLocalProxy::Ss_NDB_STTOR369     Ss_NDB_STTOR() {
370       m_sendREQ = &LocalProxy::sendNDB_STTOR;
371       m_sendCONF = &LocalProxy::sendNDB_STTORRY;
372     }
373     enum { poolSize = 1 };
poolLocalProxy::Ss_NDB_STTOR374     static SsPool<Ss_NDB_STTOR>& pool(LocalProxy* proxy) {
375       return proxy->c_ss_NDB_STTOR;
376     }
377   };
378   SsPool<Ss_NDB_STTOR> c_ss_NDB_STTOR;
379   void execNDB_STTOR(Signal*);
380   virtual void callNDB_STTOR(Signal*);
381   void backNDB_STTOR(Signal*);
382   void sendNDB_STTOR(Signal*, Uint32 ssId, SectionHandle*);
383   void execNDB_STTORRY(Signal*);
384   void sendNDB_STTORRY(Signal*, Uint32 ssId);
385 
386   // GSN_READ_NODESREQ
387   struct Ss_READ_NODES_REQ {
388     GlobalSignalNumber m_gsn; // STTOR or NDB_STTOR
Ss_READ_NODES_REQLocalProxy::Ss_READ_NODES_REQ389     Ss_READ_NODES_REQ() {
390       m_gsn = 0;
391     }
392   };
393   Ss_READ_NODES_REQ c_ss_READ_NODESREQ;
394   void sendREAD_NODESREQ(Signal*);
395   void execREAD_NODESCONF(Signal*);
396   void execREAD_NODESREF(Signal*);
397 
398   // GSN_NODE_FAILREP
399   struct Ss_NODE_FAILREP : SsParallel {
400     NodeFailRep m_req;
401     // REQ sends NdbNodeBitmask but CONF sends nodeId at a time
402     NdbNodeBitmask m_waitFor[MaxWorkers];
Ss_NODE_FAILREPLocalProxy::Ss_NODE_FAILREP403     Ss_NODE_FAILREP() {
404       m_sendREQ = &LocalProxy::sendNODE_FAILREP;
405       m_sendCONF = &LocalProxy::sendNF_COMPLETEREP;
406     }
407     // some blocks do not reply
noReplyLocalProxy::Ss_NODE_FAILREP408     static bool noReply(BlockNumber blockNo) {
409       return
410         blockNo == BACKUP;
411     }
412     enum { poolSize = 1 };
poolLocalProxy::Ss_NODE_FAILREP413     static SsPool<Ss_NODE_FAILREP>& pool(LocalProxy* proxy) {
414       return proxy->c_ss_NODE_FAILREP;
415     }
416   };
417   SsPool<Ss_NODE_FAILREP> c_ss_NODE_FAILREP;
418   void execNODE_FAILREP(Signal*);
419   void sendNODE_FAILREP(Signal*, Uint32 ssId, SectionHandle*);
420   void execNF_COMPLETEREP(Signal*);
421   void sendNF_COMPLETEREP(Signal*, Uint32 ssId);
422 
423   // GSN_INCL_NODEREQ
424   struct Ss_INCL_NODEREQ : SsParallel {
425     // future-proof by allocating max length
426     struct Req {
427       Uint32 senderRef;
428       Uint32 inclNodeId;
429       Uint32 word[23];
430     };
431     struct Conf {
432       Uint32 inclNodeId;
433       Uint32 senderRef;
434     };
435     Uint32 m_reqlength;
436     Req m_req;
Ss_INCL_NODEREQLocalProxy::Ss_INCL_NODEREQ437     Ss_INCL_NODEREQ() {
438       m_sendREQ = &LocalProxy::sendINCL_NODEREQ;
439       m_sendCONF = &LocalProxy::sendINCL_NODECONF;
440     }
441     enum { poolSize = 1 };
poolLocalProxy::Ss_INCL_NODEREQ442     static SsPool<Ss_INCL_NODEREQ>& pool(LocalProxy* proxy) {
443       return proxy->c_ss_INCL_NODEREQ;
444     }
445   };
446   SsPool<Ss_INCL_NODEREQ> c_ss_INCL_NODEREQ;
447   void execINCL_NODEREQ(Signal*);
448   void sendINCL_NODEREQ(Signal*, Uint32 ssId, SectionHandle*);
449   void execINCL_NODECONF(Signal*);
450   void sendINCL_NODECONF(Signal*, Uint32 ssId);
451 
452   // GSN_NODE_STATE_REP
453   struct Ss_NODE_STATE_REP : SsParallel {
Ss_NODE_STATE_REPLocalProxy::Ss_NODE_STATE_REP454     Ss_NODE_STATE_REP() {
455       m_sendREQ = &LocalProxy::sendNODE_STATE_REP;
456       m_sendCONF = 0;
457     }
458     enum { poolSize = 1 };
poolLocalProxy::Ss_NODE_STATE_REP459     static SsPool<Ss_NODE_STATE_REP>& pool(LocalProxy* proxy) {
460       return proxy->c_ss_NODE_STATE_REP;
461     }
462   };
463   SsPool<Ss_NODE_STATE_REP> c_ss_NODE_STATE_REP;
464   void execNODE_STATE_REP(Signal*);
465   void sendNODE_STATE_REP(Signal*, Uint32 ssId, SectionHandle*);
466 
467   // GSN_CHANGE_NODE_STATE_REQ
468   struct Ss_CHANGE_NODE_STATE_REQ : SsParallel {
469     ChangeNodeStateReq m_req;
Ss_CHANGE_NODE_STATE_REQLocalProxy::Ss_CHANGE_NODE_STATE_REQ470     Ss_CHANGE_NODE_STATE_REQ() {
471       m_sendREQ = &LocalProxy::sendCHANGE_NODE_STATE_REQ;
472       m_sendCONF = &LocalProxy::sendCHANGE_NODE_STATE_CONF;
473     }
474     enum { poolSize = 1 };
poolLocalProxy::Ss_CHANGE_NODE_STATE_REQ475     static SsPool<Ss_CHANGE_NODE_STATE_REQ>& pool(LocalProxy* proxy) {
476       return proxy->c_ss_CHANGE_NODE_STATE_REQ;
477     }
478   };
479   SsPool<Ss_CHANGE_NODE_STATE_REQ> c_ss_CHANGE_NODE_STATE_REQ;
480   void execCHANGE_NODE_STATE_REQ(Signal*);
481   void sendCHANGE_NODE_STATE_REQ(Signal*, Uint32 ssId, SectionHandle*);
482   void execCHANGE_NODE_STATE_CONF(Signal*);
483   void sendCHANGE_NODE_STATE_CONF(Signal*, Uint32 ssId);
484 
485   // GSN_DUMP_STATE_ORD
486   struct Ss_DUMP_STATE_ORD : SsParallel {
487     Uint32 m_reqlength;
488     Uint32 m_reqdata[25];
Ss_DUMP_STATE_ORDLocalProxy::Ss_DUMP_STATE_ORD489     Ss_DUMP_STATE_ORD() {
490       m_sendREQ = &LocalProxy::sendDUMP_STATE_ORD;
491       m_sendCONF = 0;
492     }
493     enum { poolSize = 1 };
poolLocalProxy::Ss_DUMP_STATE_ORD494     static SsPool<Ss_DUMP_STATE_ORD>& pool(LocalProxy* proxy) {
495       return proxy->c_ss_DUMP_STATE_ORD;
496     }
497   };
498   SsPool<Ss_DUMP_STATE_ORD> c_ss_DUMP_STATE_ORD;
499   void execDUMP_STATE_ORD(Signal*);
500   void sendDUMP_STATE_ORD(Signal*, Uint32 ssId, SectionHandle*);
501 
502   // GSN_NDB_TAMPER
503   struct Ss_NDB_TAMPER : SsParallel
504   {
505     Uint32 m_errorInsert;
506     Uint32 m_errorInsertExtra;
507     bool m_haveErrorInsertExtra;
Ss_NDB_TAMPERLocalProxy::Ss_NDB_TAMPER508     Ss_NDB_TAMPER()
509     {
510       m_sendREQ = &LocalProxy::sendNDB_TAMPER;
511       m_sendCONF = 0;
512     }
513     enum { poolSize = 1 };
poolLocalProxy::Ss_NDB_TAMPER514     static SsPool<Ss_NDB_TAMPER>& pool(LocalProxy* proxy) {
515       return proxy->c_ss_NDB_TAMPER;
516     }
517   };
518   SsPool<Ss_NDB_TAMPER> c_ss_NDB_TAMPER;
519   void execNDB_TAMPER(Signal*);
520   void sendNDB_TAMPER(Signal*, Uint32 ssId, SectionHandle*);
521 
522   // GSN_TIME_SIGNAL
523   struct Ss_TIME_SIGNAL : SsParallel {
Ss_TIME_SIGNALLocalProxy::Ss_TIME_SIGNAL524     Ss_TIME_SIGNAL() {
525       m_sendREQ = &LocalProxy::sendTIME_SIGNAL;
526       m_sendCONF = 0;
527     }
528     enum { poolSize = 1 };
poolLocalProxy::Ss_TIME_SIGNAL529     static SsPool<Ss_TIME_SIGNAL>& pool(LocalProxy* proxy) {
530       return proxy->c_ss_TIME_SIGNAL;
531     }
532   };
533   SsPool<Ss_TIME_SIGNAL> c_ss_TIME_SIGNAL;
534   void execTIME_SIGNAL(Signal*);
535   void sendTIME_SIGNAL(Signal*, Uint32 ssId, SectionHandle*);
536 
537   // GSN_CREATE_TRIG_IMPL_REQ
538   struct Ss_CREATE_TRIG_IMPL_REQ : SsParallel {
539     CreateTrigImplReq m_req;
Ss_CREATE_TRIG_IMPL_REQLocalProxy::Ss_CREATE_TRIG_IMPL_REQ540     Ss_CREATE_TRIG_IMPL_REQ() {
541       m_sendREQ = &LocalProxy::sendCREATE_TRIG_IMPL_REQ;
542       m_sendCONF = &LocalProxy::sendCREATE_TRIG_IMPL_CONF;
543     }
544     enum { poolSize = 3 };
poolLocalProxy::Ss_CREATE_TRIG_IMPL_REQ545     static SsPool<Ss_CREATE_TRIG_IMPL_REQ>& pool(LocalProxy* proxy) {
546       return proxy->c_ss_CREATE_TRIG_IMPL_REQ;
547     }
548   };
549   SsPool<Ss_CREATE_TRIG_IMPL_REQ> c_ss_CREATE_TRIG_IMPL_REQ;
550   void execCREATE_TRIG_IMPL_REQ(Signal*);
551   void sendCREATE_TRIG_IMPL_REQ(Signal*, Uint32 ssId, SectionHandle*);
552   void execCREATE_TRIG_IMPL_CONF(Signal*);
553   void execCREATE_TRIG_IMPL_REF(Signal*);
554   void sendCREATE_TRIG_IMPL_CONF(Signal*, Uint32 ssId);
555 
556   // GSN_DROP_TRIG_IMPL_REQ
557   struct Ss_DROP_TRIG_IMPL_REQ : SsParallel {
558     DropTrigImplReq m_req;
Ss_DROP_TRIG_IMPL_REQLocalProxy::Ss_DROP_TRIG_IMPL_REQ559     Ss_DROP_TRIG_IMPL_REQ() {
560       m_sendREQ = &LocalProxy::sendDROP_TRIG_IMPL_REQ;
561       m_sendCONF = &LocalProxy::sendDROP_TRIG_IMPL_CONF;
562     }
563     enum { poolSize = NDB_MAX_PROXY_DROP_TRIG_IMPL_REQ };
poolLocalProxy::Ss_DROP_TRIG_IMPL_REQ564     static SsPool<Ss_DROP_TRIG_IMPL_REQ>& pool(LocalProxy* proxy) {
565       return proxy->c_ss_DROP_TRIG_IMPL_REQ;
566     }
567   };
568   SsPool<Ss_DROP_TRIG_IMPL_REQ> c_ss_DROP_TRIG_IMPL_REQ;
569   void execDROP_TRIG_IMPL_REQ(Signal*);
570   void sendDROP_TRIG_IMPL_REQ(Signal*, Uint32 ssId, SectionHandle*);
571   void execDROP_TRIG_IMPL_CONF(Signal*);
572   void execDROP_TRIG_IMPL_REF(Signal*);
573   void sendDROP_TRIG_IMPL_CONF(Signal*, Uint32 ssId);
574 
575   // GSN_DBINFO_SCANREQ
576   bool find_next(Ndbinfo::ScanCursor* cursor) const;
577   void execDBINFO_SCANREQ(Signal*);
578   void execDBINFO_SCANCONF(Signal*);
579 
580   // GSN_SYNC_REQ
581   void execSYNC_REQ(Signal*);
582   void execSYNC_REF(Signal*);
583   void execSYNC_CONF(Signal*);
584   void sendSYNC_REQ(Signal*, Uint32 ssId, SectionHandle*);
585   void sendSYNC_CONF(Signal*, Uint32 ssId);
586   struct Ss_SYNC_REQ : SsParallel {
587     SyncReq m_req;
Ss_SYNC_REQLocalProxy::Ss_SYNC_REQ588     Ss_SYNC_REQ() {
589       m_sendREQ = &LocalProxy::sendSYNC_REQ;
590       m_sendCONF = &LocalProxy::sendSYNC_CONF;
591     }
592     enum { poolSize = 4 };
poolLocalProxy::Ss_SYNC_REQ593     static SsPool<Ss_SYNC_REQ>& pool(LocalProxy* proxy) {
594       return proxy->c_ss_SYNC_REQ;
595     }
596   };
597   SsPool<Ss_SYNC_REQ> c_ss_SYNC_REQ;
598 
599   void execSYNC_PATH_REQ(Signal*);
600 
601   // GSN_API_FAILREQ
602   struct Ss_API_FAILREQ : SsParallel {
603     Uint32 m_ref; //
Ss_API_FAILREQLocalProxy::Ss_API_FAILREQ604     Ss_API_FAILREQ() {
605       m_sendREQ = (SsFUNCREQ)&LocalProxy::sendAPI_FAILREQ;
606       m_sendCONF = (SsFUNCREP)&LocalProxy::sendAPI_FAILCONF;
607     }
608     enum { poolSize = MAX_NODES };
poolLocalProxy::Ss_API_FAILREQ609     static SsPool<Ss_API_FAILREQ>& pool(LocalProxy* proxy) {
610       return proxy->c_ss_API_FAILREQ;
611     }
612   };
613   SsPool<Ss_API_FAILREQ> c_ss_API_FAILREQ;
614   void execAPI_FAILREQ(Signal*);
615   void sendAPI_FAILREQ(Signal*, Uint32 ssId, SectionHandle*);
616   void execAPI_FAILCONF(Signal*);
617   void sendAPI_FAILCONF(Signal*, Uint32 ssId);
618 };
619 
620 
621 #undef JAM_FILE_ID
622 
623 #endif
624