1 /* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
22
23 // can be removed if DBTUP continueB codes are moved to signaldata
24 #define DBTUP_C
25
26 #include "DbtupProxy.hpp"
27 #include "Dbtup.hpp"
28 #include <pgman.hpp>
29 #include <signaldata/LgmanContinueB.hpp>
30
31 #include <EventLogger.hpp>
32 extern EventLogger * g_eventLogger;
33
DbtupProxy(Block_context & ctx)34 DbtupProxy::DbtupProxy(Block_context& ctx) :
35 LocalProxy(DBTUP, ctx),
36 c_pgman(0),
37 c_tableRecSize(0),
38 c_tableRec(0)
39 {
40 // GSN_CREATE_TAB_REQ
41 addRecSignal(GSN_CREATE_TAB_REQ, &DbtupProxy::execCREATE_TAB_REQ);
42 addRecSignal(GSN_DROP_TAB_REQ, &DbtupProxy::execDROP_TAB_REQ);
43
44 // GSN_BUILD_INDX_IMPL_REQ
45 addRecSignal(GSN_BUILD_INDX_IMPL_REQ, &DbtupProxy::execBUILD_INDX_IMPL_REQ);
46 addRecSignal(GSN_BUILD_INDX_IMPL_CONF, &DbtupProxy::execBUILD_INDX_IMPL_CONF);
47 addRecSignal(GSN_BUILD_INDX_IMPL_REF, &DbtupProxy::execBUILD_INDX_IMPL_REF);
48 }
49
~DbtupProxy()50 DbtupProxy::~DbtupProxy()
51 {
52 }
53
54 SimulatedBlock*
newWorker(Uint32 instanceNo)55 DbtupProxy::newWorker(Uint32 instanceNo)
56 {
57 return new Dbtup(m_ctx, instanceNo);
58 }
59
60 // GSN_READ_CONFIG_REQ
61 void
callREAD_CONFIG_REQ(Signal * signal)62 DbtupProxy::callREAD_CONFIG_REQ(Signal* signal)
63 {
64 const ReadConfigReq* req = (const ReadConfigReq*)signal->getDataPtr();
65 ndbrequire(req->noOfParameters == 0);
66
67 const ndb_mgm_configuration_iterator * p =
68 m_ctx.m_config.getOwnConfigIterator();
69 ndbrequire(p != 0);
70
71 ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_TABLE, &c_tableRecSize));
72 c_tableRec = (Uint8*)allocRecord("TableRec", sizeof(Uint8), c_tableRecSize);
73 D("proxy:" << V(c_tableRecSize));
74 Uint32 i;
75 for (i = 0; i < c_tableRecSize; i++)
76 c_tableRec[i] = 0;
77 backREAD_CONFIG_REQ(signal);
78 }
79
80 // GSN_STTOR
81
82 void
callSTTOR(Signal * signal)83 DbtupProxy::callSTTOR(Signal* signal)
84 {
85 Uint32 startPhase = signal->theData[1];
86 switch (startPhase) {
87 case 1:
88 c_pgman = (Pgman*)globalData.getBlock(PGMAN);
89 ndbrequire(c_pgman != 0);
90 break;
91 }
92 backSTTOR(signal);
93 }
94
95 // GSN_CREATE_TAB_REQ
96
97 void
execCREATE_TAB_REQ(Signal * signal)98 DbtupProxy::execCREATE_TAB_REQ(Signal* signal)
99 {
100 const CreateTabReq* req = (const CreateTabReq*)signal->getDataPtr();
101 const Uint32 tableId = req->tableId;
102 ndbrequire(tableId < c_tableRecSize);
103 ndbrequire(c_tableRec[tableId] == 0);
104 c_tableRec[tableId] = 1;
105 D("proxy: created table" << V(tableId));
106 }
107
108 void
execDROP_TAB_REQ(Signal * signal)109 DbtupProxy::execDROP_TAB_REQ(Signal* signal)
110 {
111 const DropTabReq* req = (const DropTabReq*)signal->getDataPtr();
112 const Uint32 tableId = req->tableId;
113 ndbrequire(tableId < c_tableRecSize);
114 c_tableRec[tableId] = 0;
115 D("proxy: dropped table" << V(tableId));
116 }
117
118 // GSN_BUILD_INDX_IMPL_REQ
119
120 void
execBUILD_INDX_IMPL_REQ(Signal * signal)121 DbtupProxy::execBUILD_INDX_IMPL_REQ(Signal* signal)
122 {
123 const BuildIndxImplReq* req = (const BuildIndxImplReq*)signal->getDataPtr();
124 Ss_BUILD_INDX_IMPL_REQ& ss = ssSeize<Ss_BUILD_INDX_IMPL_REQ>();
125 ss.m_req = *req;
126 ndbrequire(signal->getLength() == BuildIndxImplReq::SignalLength);
127 sendREQ(signal, ss);
128 }
129
130 void
sendBUILD_INDX_IMPL_REQ(Signal * signal,Uint32 ssId,SectionHandle * handle)131 DbtupProxy::sendBUILD_INDX_IMPL_REQ(Signal* signal, Uint32 ssId,
132 SectionHandle* handle)
133 {
134 Ss_BUILD_INDX_IMPL_REQ& ss = ssFind<Ss_BUILD_INDX_IMPL_REQ>(ssId);
135
136 BuildIndxImplReq* req = (BuildIndxImplReq*)signal->getDataPtrSend();
137 *req = ss.m_req;
138 req->senderRef = reference();
139 req->senderData = ssId;
140 sendSignalNoRelease(workerRef(ss.m_worker), GSN_BUILD_INDX_IMPL_REQ,
141 signal, BuildIndxImplReq::SignalLength, JBB, handle);
142 }
143
144 void
execBUILD_INDX_IMPL_CONF(Signal * signal)145 DbtupProxy::execBUILD_INDX_IMPL_CONF(Signal* signal)
146 {
147 const BuildIndxImplConf* conf = (const BuildIndxImplConf*)signal->getDataPtr();
148 Uint32 ssId = conf->senderData;
149 Ss_BUILD_INDX_IMPL_REQ& ss = ssFind<Ss_BUILD_INDX_IMPL_REQ>(ssId);
150 recvCONF(signal, ss);
151 }
152
153 void
execBUILD_INDX_IMPL_REF(Signal * signal)154 DbtupProxy::execBUILD_INDX_IMPL_REF(Signal* signal)
155 {
156 const BuildIndxImplRef* ref = (const BuildIndxImplRef*)signal->getDataPtr();
157 Uint32 ssId = ref->senderData;
158 Ss_BUILD_INDX_IMPL_REQ& ss = ssFind<Ss_BUILD_INDX_IMPL_REQ>(ssId);
159 recvREF(signal, ss, ref->errorCode);
160 }
161
162 void
sendBUILD_INDX_IMPL_CONF(Signal * signal,Uint32 ssId)163 DbtupProxy::sendBUILD_INDX_IMPL_CONF(Signal* signal, Uint32 ssId)
164 {
165 Ss_BUILD_INDX_IMPL_REQ& ss = ssFind<Ss_BUILD_INDX_IMPL_REQ>(ssId);
166 BlockReference dictRef = ss.m_req.senderRef;
167
168 if (!lastReply(ss))
169 return;
170
171 if (ss.m_error == 0) {
172 jam();
173 BuildIndxImplConf* conf = (BuildIndxImplConf*)signal->getDataPtrSend();
174 conf->senderRef = reference();
175 conf->senderData = ss.m_req.senderData;
176 sendSignal(dictRef, GSN_BUILD_INDX_IMPL_CONF,
177 signal, BuildIndxImplConf::SignalLength, JBB);
178 } else {
179 BuildIndxImplRef* ref = (BuildIndxImplRef*)signal->getDataPtrSend();
180 ref->senderRef = reference();
181 ref->senderData = ss.m_req.senderData;
182 ref->errorCode = ss.m_error;
183 sendSignal(dictRef, GSN_BUILD_INDX_IMPL_REF,
184 signal, BuildIndxImplRef::SignalLength, JBB);
185 }
186
187 ssRelease<Ss_BUILD_INDX_IMPL_REQ>(ssId);
188 }
189
190 // client methods
191
192 // LGMAN
193
Proxy_undo()194 DbtupProxy::Proxy_undo::Proxy_undo()
195 {
196 m_type = 0;
197 m_len = 0;
198 m_ptr = 0;
199 m_lsn = (Uint64)0;
200 m_key.setNull();
201 m_page_id = ~(Uint32)0;
202 m_table_id = ~(Uint32)0;
203 m_fragment_id = ~(Uint32)0;
204 m_instance_no = ~(Uint32)0;
205 m_actions = 0;
206 m_in_use = false;
207 }
208
209 void
disk_restart_undo(Signal * signal,Uint64 lsn,Uint32 type,const Uint32 * ptr,Uint32 len)210 DbtupProxy::disk_restart_undo(Signal* signal, Uint64 lsn,
211 Uint32 type, const Uint32 * ptr, Uint32 len)
212 {
213 Proxy_undo& undo = c_proxy_undo;
214 ndbrequire(!undo.m_in_use);
215 new (&undo) Proxy_undo;
216 undo.m_in_use = true;
217
218 D("proxy: disk_restart_undo" << V(type) << hex << V(ptr) << dec << V(len) << V(lsn));
219 undo.m_type = type;
220 undo.m_len = len;
221 undo.m_ptr = ptr;
222 undo.m_lsn = lsn;
223
224 /*
225 * The timeslice via PGMAN/5 gives LGMAN a chance to overwrite the
226 * data pointed to by ptr. So save it now and do not use ptr.
227 */
228 ndbrequire(undo.m_len <= Proxy_undo::MaxData);
229 memcpy(undo.m_data, undo.m_ptr, undo.m_len << 2);
230
231 switch (undo.m_type) {
232 case File_formats::Undofile::UNDO_LCP_FIRST:
233 case File_formats::Undofile::UNDO_LCP:
234 {
235 undo.m_table_id = ptr[1] >> 16;
236 undo.m_fragment_id = ptr[1] & 0xFFFF;
237 undo.m_actions |= Proxy_undo::SendToAll;
238 undo.m_actions |= Proxy_undo::SendUndoNext;
239 }
240 break;
241 case File_formats::Undofile::UNDO_TUP_ALLOC:
242 {
243 const Dbtup::Disk_undo::Alloc* rec =
244 (const Dbtup::Disk_undo::Alloc*)ptr;
245 undo.m_key.m_file_no = rec->m_file_no_page_idx >> 16;
246 undo.m_key.m_page_no = rec->m_page_no;
247 undo.m_key.m_page_idx = rec->m_file_no_page_idx & 0xFFFF;
248 undo.m_actions |= Proxy_undo::ReadTupPage;
249 undo.m_actions |= Proxy_undo::GetInstance;
250 }
251 break;
252 case File_formats::Undofile::UNDO_TUP_UPDATE:
253 {
254 const Dbtup::Disk_undo::Update* rec =
255 (const Dbtup::Disk_undo::Update*)ptr;
256 undo.m_key.m_file_no = rec->m_file_no_page_idx >> 16;
257 undo.m_key.m_page_no = rec->m_page_no;
258 undo.m_key.m_page_idx = rec->m_file_no_page_idx & 0xFFFF;
259 undo.m_actions |= Proxy_undo::ReadTupPage;
260 undo.m_actions |= Proxy_undo::GetInstance;
261 }
262 break;
263 case File_formats::Undofile::UNDO_TUP_FREE:
264 {
265 const Dbtup::Disk_undo::Free* rec =
266 (const Dbtup::Disk_undo::Free*)ptr;
267 undo.m_key.m_file_no = rec->m_file_no_page_idx >> 16;
268 undo.m_key.m_page_no = rec->m_page_no;
269 undo.m_key.m_page_idx = rec->m_file_no_page_idx & 0xFFFF;
270 undo.m_actions |= Proxy_undo::ReadTupPage;
271 undo.m_actions |= Proxy_undo::GetInstance;
272 }
273 break;
274
275 case File_formats::Undofile::UNDO_TUP_CREATE:
276 {
277 jam();
278 Dbtup::Disk_undo::Create* rec= (Dbtup::Disk_undo::Create*)ptr;
279 Uint32 tableId = rec->m_table;
280 if (tableId < c_tableRecSize)
281 {
282 jam();
283 c_tableRec[tableId] = 0;
284 }
285
286 undo.m_actions |= Proxy_undo::SendToAll;
287 undo.m_actions |= Proxy_undo::SendUndoNext;
288 break;
289 }
290 case File_formats::Undofile::UNDO_TUP_DROP:
291 {
292 jam();
293 Dbtup::Disk_undo::Drop* rec= (Dbtup::Disk_undo::Drop*)ptr;
294 Uint32 tableId = rec->m_table;
295 if (tableId < c_tableRecSize)
296 {
297 jam();
298 c_tableRec[tableId] = 0;
299 }
300
301 undo.m_actions |= Proxy_undo::SendToAll;
302 undo.m_actions |= Proxy_undo::SendUndoNext;
303 break;
304 }
305 #if NOT_YET_UNDO_ALLOC_EXTENT
306 case File_formats::Undofile::UNDO_TUP_ALLOC_EXTENT:
307 ndbrequire(false);
308 break;
309 #endif
310 #if NOT_YET_UNDO_FREE_EXTENT
311 case File_formats::Undofile::UNDO_TUP_FREE_EXTENT:
312 ndbrequire(false);
313 break;
314 #endif
315 case File_formats::Undofile::UNDO_END:
316 {
317 undo.m_actions |= Proxy_undo::SendToAll;
318 }
319 break;
320 default:
321 ndbrequire(false);
322 break;
323 }
324
325 if (undo.m_actions & Proxy_undo::ReadTupPage) {
326 jam();
327 /*
328 * Page request goes to the extra PGMAN worker (our thread).
329 * TUP worker reads same page again via another PGMAN worker.
330 * MT-LGMAN is planned, do not optimize (pass page) now
331 */
332 Page_cache_client pgman(this, c_pgman);
333 Page_cache_client::Request req;
334
335 req.m_page = undo.m_key;
336 req.m_callback.m_callbackData = 0;
337 req.m_callback.m_callbackFunction =
338 safe_cast(&DbtupProxy::disk_restart_undo_callback);
339
340 int ret = pgman.get_page(signal, req, 0);
341 ndbrequire(ret >= 0);
342 if (ret > 0) {
343 jam();
344 execute(signal, req.m_callback, (Uint32)ret);
345 }
346 return;
347 }
348
349 disk_restart_undo_finish(signal);
350 }
351
352 void
disk_restart_undo_callback(Signal * signal,Uint32,Uint32 page_id)353 DbtupProxy::disk_restart_undo_callback(Signal* signal, Uint32, Uint32 page_id)
354 {
355 Proxy_undo& undo = c_proxy_undo;
356 undo.m_page_id = page_id;
357
358 Ptr<GlobalPage> gptr;
359 m_global_page_pool.getPtr(gptr, undo.m_page_id);
360
361 ndbrequire(undo.m_actions & Proxy_undo::ReadTupPage);
362 {
363 jam();
364 const Tup_page* page = (const Tup_page*)gptr.p;
365 const File_formats::Page_header& header = page->m_page_header;
366 const Uint32 page_type = header.m_page_type;
367
368 if (page_type == 0) { // wl4391_todo ?
369 jam();
370 ndbrequire(header.m_page_lsn_hi == 0 && header.m_page_lsn_lo == 0);
371 undo.m_actions |= Proxy_undo::NoExecute;
372 undo.m_actions |= Proxy_undo::SendUndoNext;
373 D("proxy: callback" << V(page_type));
374 } else {
375 ndbrequire(page_type == File_formats::PT_Tup_fixsize_page ||
376 page_type == File_formats::PT_Tup_varsize_page);
377
378 Uint64 page_lsn = (Uint64(header.m_page_lsn_hi) << 32) + header.m_page_lsn_lo;
379 if (! (undo.m_lsn <= page_lsn))
380 {
381 jam();
382 undo.m_actions |= Proxy_undo::NoExecute;
383 undo.m_actions |= Proxy_undo::SendUndoNext;
384 }
385
386 undo.m_table_id = page->m_table_id;
387 undo.m_fragment_id = page->m_fragment_id;
388 D("proxy: callback" << V(undo.m_table_id) << V(undo.m_fragment_id));
389 const Uint32 tableId = undo.m_table_id;
390 if (tableId >= c_tableRecSize || c_tableRec[tableId] == 0) {
391 D("proxy: table dropped" << V(tableId));
392 undo.m_actions |= Proxy_undo::NoExecute;
393 undo.m_actions |= Proxy_undo::SendUndoNext;
394 }
395 }
396 }
397
398 disk_restart_undo_finish(signal);
399 }
400
401 void
disk_restart_undo_finish(Signal * signal)402 DbtupProxy::disk_restart_undo_finish(Signal* signal)
403 {
404 Proxy_undo& undo = c_proxy_undo;
405
406 if (undo.m_actions & Proxy_undo::SendUndoNext) {
407 jam();
408 signal->theData[0] = LgmanContinueB::EXECUTE_UNDO_RECORD;
409 sendSignal(LGMAN_REF, GSN_CONTINUEB, signal, 1, JBB);
410 }
411
412 if (undo.m_actions & Proxy_undo::NoExecute) {
413 jam();
414 goto finish;
415 }
416
417 if (undo.m_actions & Proxy_undo::GetInstance) {
418 jam();
419 Uint32 instanceKey = getInstanceKey(undo.m_table_id, undo.m_fragment_id);
420 Uint32 instanceNo = getInstanceFromKey(instanceKey);
421 undo.m_instance_no = instanceNo;
422 }
423
424 if (!(undo.m_actions & Proxy_undo::SendToAll)) {
425 jam();
426 ndbrequire(undo.m_instance_no != 0);
427 Uint32 i = undo.m_instance_no - 1;
428 disk_restart_undo_send(signal, i);
429 } else {
430 jam();
431 Uint32 i;
432 for (i = 0; i < c_workers; i++) {
433 disk_restart_undo_send(signal, i);
434 }
435 }
436
437 finish:
438 ndbrequire(undo.m_in_use);
439 undo.m_in_use = false;
440 }
441
442 void
disk_restart_undo_send(Signal * signal,Uint32 i)443 DbtupProxy::disk_restart_undo_send(Signal* signal, Uint32 i)
444 {
445 /*
446 * Send undo entry via long signal because:
447 * 1) a method call would execute in another non-mutexed Pgman
448 * 2) MT-LGMAN is planned, do not optimize (pass pointer) now
449 */
450 Proxy_undo& undo = c_proxy_undo;
451
452 LinearSectionPtr ptr[3];
453 ptr[0].p = undo.m_data;
454 ptr[0].sz = undo.m_len;
455
456 signal->theData[0] = ZDISK_RESTART_UNDO;
457 signal->theData[1] = undo.m_type;
458 signal->theData[2] = undo.m_len;
459 signal->theData[3] = (Uint32)(undo.m_lsn >> 32);
460 signal->theData[4] = (Uint32)(undo.m_lsn & 0xFFFFFFFF);
461 sendSignal(workerRef(i), GSN_CONTINUEB, signal, 5, JBB, ptr, 1);
462 }
463
464 // TSMAN
465
466 int
disk_restart_alloc_extent(Uint32 tableId,Uint32 fragId,const Local_key * key,Uint32 pages)467 DbtupProxy::disk_restart_alloc_extent(Uint32 tableId, Uint32 fragId,
468 const Local_key* key, Uint32 pages)
469 {
470 if (tableId >= c_tableRecSize || c_tableRec[tableId] == 0) {
471 jam();
472 D("proxy: table dropped" << V(tableId));
473 return -1;
474 }
475
476 // local call so mapping instance key to number is ok
477 Uint32 instanceKey = getInstanceKey(tableId, fragId);
478 Uint32 instanceNo = getInstanceFromKey(instanceKey);
479
480 Uint32 i = workerIndex(instanceNo);
481 Dbtup* dbtup = (Dbtup*)workerBlock(i);
482 return dbtup->disk_restart_alloc_extent(tableId, fragId, key, pages);
483 }
484
485 void
disk_restart_page_bits(Uint32 tableId,Uint32 fragId,const Local_key * key,Uint32 bits)486 DbtupProxy::disk_restart_page_bits(Uint32 tableId, Uint32 fragId,
487 const Local_key* key, Uint32 bits)
488 {
489 ndbrequire(tableId < c_tableRecSize && c_tableRec[tableId] == 1);
490
491 // local call so mapping instance key to number is ok
492 Uint32 instanceKey = getInstanceKey(tableId, fragId);
493 Uint32 instanceNo = getInstanceFromKey(instanceKey);
494
495 Uint32 i = workerIndex(instanceNo);
496 Dbtup* dbtup = (Dbtup*)workerBlock(i);
497 dbtup->disk_restart_page_bits(tableId, fragId, key, bits);
498 }
499
500 BLOCK_FUNCTIONS(DbtupProxy)
501