1 /*
2 Copyright (C) 2003-2006 MySQL AB
3 All rights reserved. Use is subject to license terms.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License, version 2.0,
7 as published by the Free Software Foundation.
8
9 This program is also distributed with certain software (including
10 but not limited to OpenSSL) that is licensed under separate terms,
11 as designated in a particular file or component or in included license
12 documentation. The authors of MySQL hereby grant you an additional
13 permission to link the program and your derivative works with the
14 separately licensed software that they have included with MySQL.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License, version 2.0, for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 #include <ndb_global.h>
27
28 #include "ClusterConfiguration.hpp"
29 #include <ErrorHandlingMacros.hpp>
30
31 #include <pc.hpp>
32 #include <BlockNumbers.h>
33 #include <signaldata/AccSizeAltReq.hpp>
34 #include <signaldata/DictSizeAltReq.hpp>
35 #include <signaldata/DihSizeAltReq.hpp>
36 #include <signaldata/LqhSizeAltReq.hpp>
37 #include <signaldata/TcSizeAltReq.hpp>
38 #include <signaldata/TupSizeAltReq.hpp>
39 #include <signaldata/TuxSizeAltReq.hpp>
40
ClusterConfiguration()41 ClusterConfiguration::ClusterConfiguration()
42 {
43 for (unsigned i= 0; i< MAX_SIZEALT_BLOCKS; i++) // initialize
44 for (unsigned j= 0; j< MAX_SIZEALT_RECORD; j++) {
45 the_clusterData.SizeAltData.varSize[i][j].valid = false;
46 the_clusterData.SizeAltData.varSize[i][j].nrr = 0;
47 }
48
49 for (unsigned i1 = 0; i1< 5; i1++) // initialize
50 for (unsigned j1= 0; j1< CmvmiCfgConf::NO_OF_WORDS; j1++)
51 the_clusterData.ispValues[i1][j1] = 0;
52
53 the_clusterData.SizeAltData.noOfNodes = 0;
54 the_clusterData.SizeAltData.noOfNDBNodes = 0;
55 the_clusterData.SizeAltData.noOfAPINodes = 0;
56 the_clusterData.SizeAltData.noOfMGMNodes = 0;
57 }
58
~ClusterConfiguration()59 ClusterConfiguration::~ClusterConfiguration(){
60 }
61
62 void
setValue(VarSize * dst,const int index,UintR variableValue)63 setValue(VarSize* dst, const int index, UintR variableValue){
64 assert(dst != NULL);
65 assert(index >= 0 && index < MAX_SIZEALT_RECORD);
66
67 dst[index].nrr = variableValue;
68 dst[index].valid = true;
69 }
70
71 void
calcSizeAlteration()72 ClusterConfiguration::calcSizeAlteration()
73 {
74 SizeAlt *size = &the_clusterData.SizeAltData;
75
76 size->noOfTables++; // Remove impact of system table
77 size->noOfTables += size->noOfIndexes; // Indexes are tables too
78 size->noOfAttributes += 2; // ---"----
79
80 size->noOfTables *= 2; // Remove impact of Dict need 2 ids for each table
81
82 Uint32 noOfDBNodes = size->noOfNDBNodes;
83 if (noOfDBNodes > 15) {
84 noOfDBNodes = 15;
85 }//if
86 Uint32 noOfLocalScanRecords = (noOfDBNodes * size->noOfScanRecords) + 1;
87 Uint32 noOfTCScanRecords = size->noOfScanRecords;
88 {
89 /**
90 * Acc Size Alt values
91 */
92 size->blockNo[ACC] = DBACC;
93
94 VarSize * const acc = &(size->varSize[ACC][0]);
95
96 // Can keep 65536 pages (= 0.5 GByte)
97 setValue(acc, AccSizeAltReq::IND_DIR_RANGE,
98 4 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
99
100 setValue(acc, AccSizeAltReq::IND_DIR_ARRAY,
101 (size->noOfIndexPages >> 8) +
102 4 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
103
104 setValue(acc, AccSizeAltReq::IND_FRAGMENT,
105 2 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
106
107 /*-----------------------------------------------------------------------*/
108 // The extra operation records added are used by the scan and node
109 // recovery process.
110 // Node recovery process will have its operations dedicated to ensure
111 // that they never have a problem with allocation of the operation record.
112 // The remainder are allowed for use by the scan processes.
113 /*-----------------------------------------------------------------------*/
114 setValue(acc, AccSizeAltReq::IND_OP_RECS,
115 size->noOfReplicas*((16 * size->noOfOperations) / 10 + 50) +
116 (noOfLocalScanRecords * MAX_PARALLEL_SCANS_PER_FRAG) +
117 NODE_RECOVERY_SCAN_OP_RECORDS);
118
119 setValue(acc, AccSizeAltReq::IND_OVERFLOW_RECS,
120 size->noOfIndexPages +
121 2 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
122
123 setValue(acc, AccSizeAltReq::IND_PAGE8,
124 size->noOfIndexPages + 32);
125
126 setValue(acc, AccSizeAltReq::IND_ROOT_FRAG,
127 NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
128
129 setValue(acc, AccSizeAltReq::IND_TABLE,
130 size->noOfTables);
131
132 setValue(acc, AccSizeAltReq::IND_SCAN,
133 noOfLocalScanRecords);
134 }
135
136 {
137 /**
138 * Dict Size Alt values
139 */
140 size->blockNo[DICT] = DBDICT;
141
142 VarSize * const dict = &(size->varSize[DICT][0]);
143
144 setValue(dict, DictSizeAltReq::IND_ATTRIBUTE,
145 size->noOfAttributes);
146
147 setValue(dict, DictSizeAltReq::IND_CONNECT,
148 size->noOfOperations + 32);
149
150 setValue(dict, DictSizeAltReq::IND_FRAG_CONNECT,
151 NO_OF_FRAG_PER_NODE * size->noOfNDBNodes * size->noOfReplicas);
152
153 setValue(dict, DictSizeAltReq::IND_TABLE,
154 size->noOfTables);
155
156 setValue(dict, DictSizeAltReq::IND_TC_CONNECT,
157 2* size->noOfOperations);
158 }
159
160 {
161 /**
162 * Dih Size Alt values
163 */
164 size->blockNo[DIH] = DBDIH;
165
166 VarSize * const dih = &(size->varSize[DIH][0]);
167
168 setValue(dih, DihSizeAltReq::IND_API_CONNECT,
169 2 * size->noOfTransactions);
170
171 setValue(dih, DihSizeAltReq::IND_CONNECT,
172 size->noOfOperations + 46);
173
174 setValue(dih, DihSizeAltReq::IND_FRAG_CONNECT,
175 NO_OF_FRAG_PER_NODE * size->noOfTables * size->noOfNDBNodes);
176
177 int temp;
178 temp = size->noOfReplicas - 2;
179 if (temp < 0)
180 temp = 1;
181 else
182 temp++;
183 setValue(dih, DihSizeAltReq::IND_MORE_NODES,
184 temp * NO_OF_FRAG_PER_NODE *
185 size->noOfTables * size->noOfNDBNodes);
186
187 setValue(dih, DihSizeAltReq::IND_REPLICAS,
188 NO_OF_FRAG_PER_NODE * size->noOfTables *
189 size->noOfNDBNodes * size->noOfReplicas);
190
191 setValue(dih, DihSizeAltReq::IND_TABLE,
192 size->noOfTables);
193 }
194
195 {
196 /**
197 * Lqh Size Alt values
198 */
199 size->blockNo[LQH] = DBLQH;
200
201 VarSize * const lqh = &(size->varSize[LQH][0]);
202
203 setValue(lqh, LqhSizeAltReq::IND_FRAG,
204 NO_OF_FRAG_PER_NODE * size->noOfTables * size->noOfReplicas);
205
206 setValue(lqh, LqhSizeAltReq::IND_CONNECT,
207 size->noOfReplicas*((11 * size->noOfOperations) / 10 + 50));
208
209 setValue(lqh, LqhSizeAltReq::IND_TABLE,
210 size->noOfTables);
211
212 setValue(lqh, LqhSizeAltReq::IND_TC_CONNECT,
213 size->noOfReplicas*((16 * size->noOfOperations) / 10 + 50));
214
215 setValue(lqh, LqhSizeAltReq::IND_REPLICAS,
216 size->noOfReplicas);
217
218 setValue(lqh, LqhSizeAltReq::IND_LOG_FILES,
219 (4 * the_clusterData.ispValues[1][4]));
220
221 setValue(lqh, LqhSizeAltReq::IND_SCAN,
222 noOfLocalScanRecords);
223
224 }
225
226 {
227 /**
228 * Tc Size Alt values
229 */
230 size->blockNo[TC] = DBTC;
231
232 VarSize * const tc = &(size->varSize[TC][0]);
233
234 setValue(tc, TcSizeAltReq::IND_API_CONNECT,
235 3 * size->noOfTransactions);
236
237 setValue(tc, TcSizeAltReq::IND_TC_CONNECT,
238 size->noOfOperations + 16 + size->noOfTransactions);
239
240 setValue(tc, TcSizeAltReq::IND_TABLE,
241 size->noOfTables);
242
243 setValue(tc, TcSizeAltReq::IND_LOCAL_SCAN,
244 noOfLocalScanRecords);
245
246 setValue(tc, TcSizeAltReq::IND_TC_SCAN,
247 noOfTCScanRecords);
248 }
249
250 {
251 /**
252 * Tup Size Alt values
253 */
254 size->blockNo[TUP] = DBTUP;
255
256 VarSize * const tup = &(size->varSize[TUP][0]);
257
258 setValue(tup, TupSizeAltReq::IND_DISK_PAGE_ARRAY,
259 2 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
260
261 setValue(tup, TupSizeAltReq::IND_DISK_PAGE_REPRESENT,
262 size->noOfDiskClusters);
263
264 setValue(tup, TupSizeAltReq::IND_FRAG,
265 2 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
266
267 setValue(tup, TupSizeAltReq::IND_PAGE_CLUSTER,
268 size->noOfFreeClusters);
269
270 setValue(tup, TupSizeAltReq::IND_LOGIC_PAGE,
271 size->noOfDiskBufferPages + size->noOfDiskClusters);
272
273 setValue(tup, TupSizeAltReq::IND_OP_RECS,
274 size->noOfReplicas*((16 * size->noOfOperations) / 10 + 50));
275
276 setValue(tup, TupSizeAltReq::IND_PAGE,
277 size->noOfDataPages);
278
279 setValue(tup, TupSizeAltReq::IND_PAGE_RANGE,
280 4 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas);
281
282 setValue(tup, TupSizeAltReq::IND_TABLE,
283 size->noOfTables);
284
285 setValue(tup, TupSizeAltReq::IND_TABLE_DESC,
286 4 * NO_OF_FRAG_PER_NODE * size->noOfAttributes* size->noOfReplicas +
287 12 * NO_OF_FRAG_PER_NODE * size->noOfTables* size->noOfReplicas );
288
289 setValue(tup, TupSizeAltReq::IND_DELETED_BLOCKS,
290 size->noOfFreeClusters);
291
292 setValue(tup, TupSizeAltReq::IND_STORED_PROC,
293 noOfLocalScanRecords);
294 }
295
296 {
297 /**
298 * Tux Size Alt values
299 */
300 size->blockNo[TUX] = DBTUX;
301
302 VarSize * const tux = &(size->varSize[TUX][0]);
303
304 setValue(tux, TuxSizeAltReq::IND_INDEX,
305 size->noOfTables);
306
307 setValue(tux, TuxSizeAltReq::IND_FRAGMENT,
308 2 * NO_OF_FRAG_PER_NODE * size->noOfTables * size->noOfReplicas);
309
310 setValue(tux, TuxSizeAltReq::IND_ATTRIBUTE,
311 size->noOfIndexes * 4);
312
313 setValue(tux, TuxSizeAltReq::IND_SCAN,
314 noOfLocalScanRecords);
315 }
316 }
317
318 const ClusterConfiguration::ClusterData&
clusterData() const319 ClusterConfiguration::clusterData() const
320 {
321 return the_clusterData;
322 }
323
init(const Properties & p,const Properties & db)324 void ClusterConfiguration::init(const Properties & p, const Properties & db){
325 const char * msg = "Invalid configuration fetched";
326
327 ClusterData & cd = the_clusterData;
328
329 struct AttribStorage { const char * attrib; Uint32 * storage; };
330 AttribStorage tmp[] = {
331 {"MaxNoOfConcurrentScans", &cd.SizeAltData.noOfScanRecords },
332 {"MaxNoOfTables", &cd.SizeAltData.noOfTables },
333 {"MaxNoOfIndexes", &cd.SizeAltData.noOfIndexes },
334 {"NoOfReplicas", &cd.SizeAltData.noOfReplicas },
335 {"MaxNoOfAttributes", &cd.SizeAltData.noOfAttributes },
336 {"MaxNoOfConcurrentOperations", &cd.SizeAltData.noOfOperations },
337 {"MaxNoOfConcurrentTransactions", &cd.SizeAltData.noOfTransactions },
338 {"NoOfIndexPages", &cd.SizeAltData.noOfIndexPages },
339 {"NoOfDataPages", &cd.SizeAltData.noOfDataPages },
340 {"NoOfDiskBufferPages", &cd.SizeAltData.noOfDiskBufferPages },
341 {"NoOfDiskClusters", &cd.SizeAltData.noOfDiskClusters },
342 {"NoOfFreeDiskClusters", &cd.SizeAltData.noOfFreeClusters },
343 {"TimeToWaitAlive", &cd.ispValues[0][0] },
344 {"HeartbeatIntervalDbDb", &cd.ispValues[0][2] },
345 {"HeartbeatIntervalDbApi", &cd.ispValues[0][3] },
346 {"ArbitrationTimeout", &cd.ispValues[0][5] },
347 {"TimeBetweenLocalCheckpoints", &cd.ispValues[1][2] },
348 {"NoOfFragmentLogFiles", &cd.ispValues[1][4] },
349 {"MaxNoOfConcurrentScans", &cd.SizeAltData.noOfScanRecords },
350 {"NoOfConcurrentCheckpointsDuringRestart", &cd.ispValues[1][5] },
351 {"TransactionDeadlockDetectionTimeout", &cd.ispValues[1][6] },
352 {"NoOfConcurrentProcessesHandleTakeover", &cd.ispValues[1][7] },
353 {"TimeBetweenGlobalCheckpoints", &cd.ispValues[2][3] },
354 {"NoOfConcurrentCheckpointsAfterRestart", &cd.ispValues[2][4] },
355 {"TransactionInactiveTimeout", &cd.ispValues[2][7] },
356 {"NoOfDiskPagesToDiskDuringRestartTUP", &cd.ispValues[3][8] },
357 {"NoOfDiskPagesToDiskAfterRestartTUP", &cd.ispValues[3][9] },
358 {"NoOfDiskPagesToDiskDuringRestartACC", &cd.ispValues[3][10] },
359 {"NoOfDiskPagesToDiskAfterRestartACC", &cd.ispValues[3][11] },
360 {"NoOfDiskClustersPerDiskFile", &cd.ispValues[4][8] },
361 {"NoOfDiskFiles", &cd.ispValues[4][9] },
362 {"NoOfReplicas", &cd.ispValues[2][2] }
363 };
364
365
366 const int sz = sizeof(tmp)/sizeof(AttribStorage);
367 for(int i = 0; i<sz; i++){
368 if(!db.get(tmp[i].attrib, tmp[i].storage)){
369 char buf[255];
370 BaseString::snprintf(buf, sizeof(buf), "%s not found", tmp[i].attrib);
371 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
372 }
373 }
374
375 if(!p.get("NoOfNodes", &cd.SizeAltData.noOfNodes)){
376 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, "NoOfNodes missing");
377 }
378
379 Properties::Iterator it(&p);
380 const char * name = 0;
381 Uint32 nodeNo = 0;
382 for(name = it.first(); name != NULL; name = it.next()){
383 if(strncmp(name, "Node_", strlen("Node_")) == 0){
384
385 Uint32 nodeId;
386 const char * nodeType;
387 const Properties * node;
388
389 if(!p.get(name, &node)){
390 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, "Node data missing");
391 }
392
393 if(!node->get("Id", &nodeId)){
394 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, "Node data (Id) missing");
395 }
396
397 if(!node->get("Type", &nodeType)){
398 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, "Node data (Type) missing");
399 }
400
401 if(nodeId > MAX_NODES){
402 char buf[255];
403 snprintf(buf, sizeof(buf),
404 "Maximum DB node id allowed is: %d", MAX_NDB_NODES);
405 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
406 }
407
408 if(nodeId == 0){
409 char buf[255];
410 snprintf(buf, sizeof(buf),
411 "Minimum node id allowed in the cluster is: 1");
412 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
413 }
414
415 for(unsigned j = 0; j<nodeNo; j++){
416 if(cd.nodeData[j].nodeId == nodeId){
417 char buf[255];
418 BaseString::snprintf(buf, sizeof(buf), "Two node can not have the same node id");
419 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
420 }
421 }
422
423 {
424 for(unsigned j = 0; j<LogLevel::LOGLEVEL_CATEGORIES; j++){
425 Uint32 logLevel;
426 if(db.get(LogLevel::LOGLEVEL_CATEGORY_NAME[j].name, &logLevel)){
427 cd.SizeAltData.logLevel.setLogLevel((LogLevel::EventCategory)j,
428 logLevel);
429 }
430 }
431 }
432
433 cd.nodeData[nodeNo].nodeId = nodeId;
434 const char* tmpApiMgmProperties = 0;
435 if(strcmp("DB", nodeType) == 0){
436 cd.nodeData[nodeNo].nodeType = NodeInfo::DB;
437 cd.SizeAltData.noOfNDBNodes++; // No of NDB processes
438
439 if(nodeId > MAX_NDB_NODES){
440 char buf[255];
441 BaseString::snprintf(buf, sizeof(buf), "Maximum node id for a ndb node is: %d", MAX_NDB_NODES);
442 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
443 }
444 if(cd.SizeAltData.noOfNDBNodes > MAX_NDB_NODES){
445 char buf[255];
446 BaseString::snprintf(buf, sizeof(buf),
447 "Maximum %d ndb nodes is allowed in the cluster",
448 MAX_NDB_NODES);
449 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, buf);
450 }
451 } else if(strcmp("API", nodeType) == 0){
452 cd.nodeData[nodeNo].nodeType = NodeInfo::API;
453 cd.SizeAltData.noOfAPINodes++; // No of API processes
454 tmpApiMgmProperties = "API";
455 } else if(strcmp("REP", nodeType) == 0){
456 cd.nodeData[nodeNo].nodeType = NodeInfo::REP;
457 //cd.SizeAltData.noOfAPINodes++; // No of API processes
458 tmpApiMgmProperties = "REP";
459 } else if(strcmp("MGM", nodeType) == 0){
460 cd.nodeData[nodeNo].nodeType = NodeInfo::MGM;
461 cd.SizeAltData.noOfMGMNodes++; // No of MGM processes
462 tmpApiMgmProperties = "MGM";
463 } else {
464 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG,
465 "Invalid configuration: Unknown node type",
466 nodeType);
467 }
468
469 if (tmpApiMgmProperties) {
470 /*
471 const Properties* q = 0;
472
473 if (!p.get(tmpApiMgmProperties, nodeId, &q)) {
474 ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, msg, tmpApiMgmProperties);
475 } else {
476 */
477 Uint32 rank = 0;
478 if (node->get("ArbitrationRank", &rank) && rank > 0) {
479 cd.nodeData[nodeNo].arbitRank = rank;
480 // }
481 }
482 } else {
483 cd.nodeData[nodeNo].arbitRank = 0;
484 }
485
486 nodeNo++;
487 }
488 }
489 cd.SizeAltData.exist = true;
490 calcSizeAlteration();
491 }
492
493
494