1 /*
2 Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License, version 2.0,
6 as published by the Free Software Foundation.
7
8 This program is also distributed with certain software (including
9 but not limited to OpenSSL) that is licensed under separate terms,
10 as designated in a particular file or component or in included license
11 documentation. The authors of MySQL hereby grant you an additional
12 permission to link the program and your derivative works with the
13 separately licensed software that they have included with MySQL.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License, version 2.0, for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 #include <ndb_global.h>
26
27 #include "ClusterConfiguration.hpp"
28 #include <ErrorHandlingMacros.hpp>
29
30 #include <pc.hpp>
31 #include <BlockNumbers.h>
32 #include <signaldata/AccSizeAltReq.hpp>
33 #include <signaldata/DictSizeAltReq.hpp>
34 #include <signaldata/DihSizeAltReq.hpp>
35 #include <signaldata/LqhSizeAltReq.hpp>
36 #include <signaldata/TcSizeAltReq.hpp>
37 #include <signaldata/TupSizeAltReq.hpp>
38 #include <signaldata/TuxSizeAltReq.hpp>
39
40 #define JAM_FILE_ID 256
41
42
ClusterConfiguration()43 ClusterConfiguration::ClusterConfiguration()
44 {
45 for (unsigned i= 0; i< MAX_SIZEALT_BLOCKS; i++) // initialize
46 for (unsigned j= 0; j< MAX_SIZEALT_RECORD; j++) {
47 the_clusterData.SizeAltData.varSize[i][j].valid = false;
48 the_clusterData.SizeAltData.varSize[i][j].nrr = 0;
49 }
50
51 for (unsigned i1 = 0; i1< 5; i1++) // initialize
52 for (unsigned j1= 0; j1< CmvmiCfgConf::NO_OF_WORDS; j1++)
53 the_clusterData.ispValues[i1][j1] = 0;
54
55 the_clusterData.SizeAltData.noOfNodes = 0;
56 the_clusterData.SizeAltData.noOfNDBNodes = 0;
57 the_clusterData.SizeAltData.noOfAPINodes = 0;
58 the_clusterData.SizeAltData.noOfMGMNodes = 0;
59 }
60
~ClusterConfiguration()61 ClusterConfiguration::~ClusterConfiguration(){
62 }
63
64 void
setValue(VarSize * dst,const int index,UintR variableValue)65 setValue(VarSize* dst, const int index, UintR variableValue){
66 assert(dst != NULL);
67 assert(index >= 0 && index < MAX_SIZEALT_RECORD);
68
69 dst[index].nrr = variableValue;
70 dst[index].valid = true;
71 }
72
73 void
calcSizeAlteration()74 ClusterConfiguration::calcSizeAlteration()
75 {
76 SizeAlt *size = &the_clusterData.SizeAltData;
77
78 size->noOfTables++; // Remove impact of system table
79 size->noOfTables += size->noOfIndexes; // Indexes are tables too
80 size->noOfAttributes += 2; // ---"----
81
82 size->noOfTables *= 2; // Remove impact of Dict need 2 ids for each table
83
84 Uint32 noOfDBNodes = size->noOfNDBNodes;
85 if (noOfDBNodes > 15) {
86 noOfDBNodes = 15;
87 }//if
88 Uint32 noOfLocalScanRecords = (noOfDBNodes * size->noOfScanRecords) + 1;
89 Uint32 noOfTCScanRecords = size->noOfScanRecords;
90 {
91 /**
92 * Acc Size Alt values
93 */
94 size->blockNo[ACC] = DBACC;
95
96 VarSize * const acc = &(size->varSize[ACC][0]);
97
98 // Can keep 65536 pages (= 0.5 GByte)
99 setValue(acc, AccSizeAltReq::IND_DIR_RANGE,
100 4 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
101
102 setValue(acc, AccSizeAltReq::IND_DIR_ARRAY,
103 (size->noOfIndexPages >> 8) +
104 4 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
105
106 setValue(acc, AccSizeAltReq::IND_FRAGMENT,
107 2 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
108
109 /*-----------------------------------------------------------------------*/
110 // The extra operation records added are used by the scan and node
111 // recovery process.
112 // Node recovery process will have its operations dedicated to ensure
113 // that they never have a problem with allocation of the operation record.
114 // The remainder are allowed for use by the scan processes.
115 /*-----------------------------------------------------------------------*/
116 setValue(acc, AccSizeAltReq::IND_OP_RECS,
117 size->noOfReplicas*((16 * size->noOfOperations) / 10 + 50) +
118 (noOfLocalScanRecords * MAX_PARALLEL_SCANS_PER_FRAG) +
119 NODE_RECOVERY_SCAN_OP_RECORDS);
120
121 setValue(acc, AccSizeAltReq::IND_OVERFLOW_RECS,
122 size->noOfIndexPages +
123 2 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
124
125 setValue(acc, AccSizeAltReq::IND_PAGE8,
126 size->noOfIndexPages + 32);
127
128 setValue(acc, AccSizeAltReq::IND_ROOT_FRAG,
129 NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
130
131 setValue(acc, AccSizeAltReq::IND_TABLE,
132 size->noOfTables);
133
134 setValue(acc, AccSizeAltReq::IND_SCAN,
135 noOfLocalScanRecords);
136 }
137
138 {
139 /**
140 * Dict Size Alt values
141 */
142 size->blockNo[DICT] = DBDICT;
143
144 VarSize * const dict = &(size->varSize[DICT][0]);
145
146 setValue(dict, DictSizeAltReq::IND_ATTRIBUTE,
147 size->noOfAttributes);
148
149 setValue(dict, DictSizeAltReq::IND_CONNECT,
150 size->noOfOperations + 32);
151
152 setValue(dict, DictSizeAltReq::IND_FRAG_CONNECT,
153 NO_OF_FRAG_PER_NODE * size->noOfNDBNodes * size->noOfReplicas);
154
155 setValue(dict, DictSizeAltReq::IND_TABLE,
156 size->noOfTables);
157
158 setValue(dict, DictSizeAltReq::IND_TC_CONNECT,
159 2* size->noOfOperations);
160 }
161
162 {
163 /**
164 * Dih Size Alt values
165 */
166 size->blockNo[DIH] = DBDIH;
167
168 VarSize * const dih = &(size->varSize[DIH][0]);
169
170 setValue(dih, DihSizeAltReq::IND_API_CONNECT,
171 2 * size->noOfTransactions);
172
173 setValue(dih, DihSizeAltReq::IND_CONNECT,
174 size->noOfOperations + 46);
175
176 setValue(dih, DihSizeAltReq::IND_FRAG_CONNECT,
177 NO_OF_FRAG_PER_NODE * size->noOfTables * size->noOfNDBNodes);
178
179 int temp;
180 temp = size->noOfReplicas - 2;
181 if (temp < 0)
182 temp = 1;
183 else
184 temp++;
185 setValue(dih, DihSizeAltReq::IND_MORE_NODES,
186 temp * NO_OF_FRAG_PER_NODE *
187 size->noOfTables * size->noOfNDBNodes);
188
189 setValue(dih, DihSizeAltReq::IND_REPLICAS,
190 NO_OF_FRAG_PER_NODE * size->noOfTables *
191 size->noOfNDBNodes * size->noOfReplicas);
192
193 setValue(dih, DihSizeAltReq::IND_TABLE,
194 size->noOfTables);
195 }
196
197 {
198 /**
199 * Lqh Size Alt values
200 */
201 size->blockNo[LQH] = DBLQH;
202
203 VarSize * const lqh = &(size->varSize[LQH][0]);
204
205 setValue(lqh, LqhSizeAltReq::IND_FRAG,
206 NO_OF_FRAG_PER_NODE * size->noOfTables * size->noOfReplicas);
207
208 setValue(lqh, LqhSizeAltReq::IND_CONNECT,
209 size->noOfReplicas*((11 * size->noOfOperations) / 10 + 50));
210
211 setValue(lqh, LqhSizeAltReq::IND_TABLE,
212 size->noOfTables);
213
214 setValue(lqh, LqhSizeAltReq::IND_TC_CONNECT,
215 size->noOfReplicas*((16 * size->noOfOperations) / 10 + 50));
216
217 setValue(lqh, LqhSizeAltReq::IND_REPLICAS,
218 size->noOfReplicas);
219
220 setValue(lqh, LqhSizeAltReq::IND_LOG_FILES,
221 (4 * the_clusterData.ispValues[1][4]));
222
223 setValue(lqh, LqhSizeAltReq::IND_SCAN,
224 noOfLocalScanRecords);
225
226 }
227
228 {
229 /**
230 * Tc Size Alt values
231 */
232 size->blockNo[TC] = DBTC;
233
234 VarSize * const tc = &(size->varSize[TC][0]);
235
236 setValue(tc, TcSizeAltReq::IND_API_CONNECT,
237 3 * size->noOfTransactions);
238
239 setValue(tc, TcSizeAltReq::IND_TC_CONNECT,
240 size->noOfOperations + 16 + size->noOfTransactions);
241
242 setValue(tc, TcSizeAltReq::IND_TABLE,
243 size->noOfTables);
244
245 setValue(tc, TcSizeAltReq::IND_LOCAL_SCAN,
246 noOfLocalScanRecords);
247
248 setValue(tc, TcSizeAltReq::IND_TC_SCAN,
249 noOfTCScanRecords);
250 }
251
252 {
253 /**
254 * Tup Size Alt values
255 */
256 size->blockNo[TUP] = DBTUP;
257
258 VarSize * const tup = &(size->varSize[TUP][0]);
259
260 setValue(tup, TupSizeAltReq::IND_DISK_PAGE_ARRAY,
261 2 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
262
263 setValue(tup, TupSizeAltReq::IND_DISK_PAGE_REPRESENT,
264 size->noOfDiskClusters);
265
266 setValue(tup, TupSizeAltReq::IND_FRAG,
267 2 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
268
269 setValue(tup, TupSizeAltReq::IND_PAGE_CLUSTER,
270 size->noOfFreeClusters);
271
272 setValue(tup, TupSizeAltReq::IND_LOGIC_PAGE,
273 size->noOfDiskBufferPages + size->noOfDiskClusters);
274
275 setValue(tup, TupSizeAltReq::IND_OP_RECS,
276 size->noOfReplicas*((16 * size->noOfOperations) / 10 + 50));
277
278 setValue(tup, TupSizeAltReq::IND_PAGE,
279 size->noOfDataPages);
280
281 setValue(tup, TupSizeAltReq::IND_PAGE_RANGE,
282 4 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
283
284 setValue(tup, TupSizeAltReq::IND_TABLE,
285 size->noOfTables);
286
287 setValue(tup, TupSizeAltReq::IND_TABLE_DESC,
288 4 * NO_OF_FRAG_PER_NODE * size->noOfAttributes* size->noOfReplicas +
289 12 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas );
290
291 setValue(tup, TupSizeAltReq::IND_DELETED_BLOCKS,
292 size->noOfFreeClusters);
293
294 setValue(tup, TupSizeAltReq::IND_STORED_PROC,
295 noOfLocalScanRecords);
296 }
297
298 {
299 /**
300 * Tux Size Alt values
301 */
302 size->blockNo[TUX] = DBTUX;
303
304 VarSize * const tux = &(size->varSize[TUX][0]);
305
306 setValue(tux, TuxSizeAltReq::IND_INDEX,
307 size->noOfTables);
308
309 setValue(tux, TuxSizeAltReq::IND_FRAGMENT,
310 2 * NO_OF_FRAG_PER_NODE * size->noOfTables * size->noOfReplicas);
311
312 setValue(tux, TuxSizeAltReq::IND_ATTRIBUTE,
313 size->noOfIndexes * 4);
314
315 setValue(tux, TuxSizeAltReq::IND_SCAN,
316 noOfLocalScanRecords);
317 }
318 }
319
320 const ClusterConfiguration::ClusterData&
clusterData() const321 ClusterConfiguration::clusterData() const
322 {
323 return the_clusterData;
324 }
325
init(const Properties & p,const Properties & db)326 void ClusterConfiguration::init(const Properties & p, const Properties & db){
327 const char * msg = "Invalid configuration fetched";
328
329 ClusterData & cd = the_clusterData;
330
331 struct AttribStorage { const char * attrib; Uint32 * storage; };
332 AttribStorage tmp[] = {
333 {"MaxNoOfConcurrentScans", &cd.SizeAltData.noOfScanRecords },
334 {"MaxNoOfTables", &cd.SizeAltData.noOfTables },
335 {"MaxNoOfIndexes", &cd.SizeAltData.noOfIndexes },
336 {"NoOfReplicas", &cd.SizeAltData.noOfReplicas },
337 {"MaxNoOfAttributes", &cd.SizeAltData.noOfAttributes },
338 {"MaxNoOfConcurrentOperations", &cd.SizeAltData.noOfOperations },
339 {"MaxNoOfConcurrentTransactions", &cd.SizeAltData.noOfTransactions },
340 {"NoOfIndexPages", &cd.SizeAltData.noOfIndexPages },
341 {"NoOfDataPages", &cd.SizeAltData.noOfDataPages },
342 {"NoOfDiskBufferPages", &cd.SizeAltData.noOfDiskBufferPages },
343 {"NoOfDiskClusters", &cd.SizeAltData.noOfDiskClusters },
344 {"NoOfFreeDiskClusters", &cd.SizeAltData.noOfFreeClusters },
345 {"TimeToWaitAlive", &cd.ispValues[0][0] },
346 {"HeartbeatIntervalDbDb", &cd.ispValues[0][2] },
347 {"HeartbeatIntervalDbApi", &cd.ispValues[0][3] },
348 {"ArbitrationTimeout", &cd.ispValues[0][5] },
349 {"TimeBetweenLocalCheckpoints", &cd.ispValues[1][2] },
350 {"NoOfFragmentLogFiles", &cd.ispValues[1][4] },
351 {"MaxNoOfConcurrentScans", &cd.SizeAltData.noOfScanRecords },
352 {"NoOfConcurrentCheckpointsDuringRestart", &cd.ispValues[1][5] },
353 {"TransactionDeadlockDetectionTimeout", &cd.ispValues[1][6] },
354 {"NoOfConcurrentProcessesHandleTakeover", &cd.ispValues[1][7] },
355 {"TimeBetweenGlobalCheckpoints", &cd.ispValues[2][3] },
356 {"NoOfConcurrentCheckpointsAfterRestart", &cd.ispValues[2][4] },
357 {"TransactionInactiveTimeout", &cd.ispValues[2][7] },
358 {"NoOfDiskPagesToDiskDuringRestartTUP", &cd.ispValues[3][8] },
359 {"NoOfDiskPagesToDiskAfterRestartTUP", &cd.ispValues[3][9] },
360 {"NoOfDiskPagesToDiskDuringRestartACC", &cd.ispValues[3][10] },
361 {"NoOfDiskPagesToDiskAfterRestartACC", &cd.ispValues[3][11] },
362 {"NoOfDiskClustersPerDiskFile", &cd.ispValues[4][8] },
363 {"NoOfDiskFiles", &cd.ispValues[4][9] },
364 {"NoOfReplicas", &cd.ispValues[2][2] }
365 };
366
367
368 const int sz = sizeof(tmp)/sizeof(AttribStorage);
369 for(int i = 0; i<sz; i++){
370 if(!db.get(tmp[i].attrib, tmp[i].storage)){
371 char buf[255];
372 BaseString::snprintf(buf, sizeof(buf), "%s not found", tmp[i].attrib);
373 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
374 }
375 }
376
377 if(!p.get("NoOfNodes", &cd.SizeAltData.noOfNodes)){
378 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, "NoOfNodes missing");
379 }
380
381 Properties::Iterator it(&p);
382 const char * name = 0;
383 Uint32 nodeNo = 0;
384 for(name = it.first(); name != NULL; name = it.next()){
385 if(strncmp(name, "Node_", strlen("Node_")) == 0){
386
387 Uint32 nodeId;
388 const char * nodeType;
389 const Properties * node;
390
391 if(!p.get(name, &node)){
392 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, "Node data missing");
393 }
394
395 if(!node->get("Id", &nodeId)){
396 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, "Node data (Id) missing");
397 }
398
399 if(!node->get("Type", &nodeType)){
400 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, "Node data (Type) missing");
401 }
402
403 if(nodeId > MAX_NODES){
404 char buf[255];
405 snprintf(buf, sizeof(buf),
406 "Maximum DB node id allowed is: %d", MAX_NDB_NODES);
407 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
408 }
409
410 if(nodeId == 0){
411 char buf[255];
412 snprintf(buf, sizeof(buf),
413 "Minimum node id allowed in the cluster is: 1");
414 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
415 }
416
417 for(unsigned j = 0; j<nodeNo; j++){
418 if(cd.nodeData[j].nodeId == nodeId){
419 char buf[255];
420 BaseString::snprintf(buf, sizeof(buf), "Two node can not have the same node id");
421 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
422 }
423 }
424
425 {
426 for(unsigned j = 0; j<LogLevel::LOGLEVEL_CATEGORIES; j++){
427 Uint32 logLevel;
428 if(db.get(LogLevel::LOGLEVEL_CATEGORY_NAME[j].name, &logLevel)){
429 cd.SizeAltData.logLevel.setLogLevel((LogLevel::EventCategory)j,
430 logLevel);
431 }
432 }
433 }
434
435 cd.nodeData[nodeNo].nodeId = nodeId;
436 const char* tmpApiMgmProperties = 0;
437 if(strcmp("DB", nodeType) == 0){
438 cd.nodeData[nodeNo].nodeType = NodeInfo::DB;
439 cd.SizeAltData.noOfNDBNodes++; // No of NDB processes
440
441 if(nodeId > MAX_NDB_NODES){
442 char buf[255];
443 BaseString::snprintf(buf, sizeof(buf), "Maximum node id for a ndb node is: %d", MAX_NDB_NODES);
444 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
445 }
446 if(cd.SizeAltData.noOfNDBNodes > MAX_NDB_NODES){
447 char buf[255];
448 BaseString::snprintf(buf, sizeof(buf),
449 "Maximum %d ndb nodes is allowed in the cluster",
450 MAX_NDB_NODES);
451 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
452 }
453 } else if(strcmp("API", nodeType) == 0){
454 cd.nodeData[nodeNo].nodeType = NodeInfo::API;
455 cd.SizeAltData.noOfAPINodes++; // No of API processes
456 tmpApiMgmProperties = "API";
457 } else if(strcmp("REP", nodeType) == 0){
458 cd.nodeData[nodeNo].nodeType = NodeInfo::REP;
459 //cd.SizeAltData.noOfAPINodes++; // No of API processes
460 tmpApiMgmProperties = "REP";
461 } else if(strcmp("MGM", nodeType) == 0){
462 cd.nodeData[nodeNo].nodeType = NodeInfo::MGM;
463 cd.SizeAltData.noOfMGMNodes++; // No of MGM processes
464 tmpApiMgmProperties = "MGM";
465 } else {
466 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG,
467 "Invalid configuration: Unknown node type",
468 nodeType);
469 }
470
471 if (tmpApiMgmProperties) {
472 /*
473 const Properties* q = 0;
474
475 if (!p.get(tmpApiMgmProperties, nodeId, &q)) {
476 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, tmpApiMgmProperties);
477 } else {
478 */
479 Uint32 rank = 0;
480 if (node->get("ArbitrationRank", &rank) && rank > 0) {
481 cd.nodeData[nodeNo].arbitRank = rank;
482 // }
483 }
484 } else {
485 cd.nodeData[nodeNo].arbitRank = 0;
486 }
487
488 nodeNo++;
489 }
490 }
491 cd.SizeAltData.exist = true;
492 calcSizeAlteration();
493 }
494
495
496