1 /*
2 Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License, version 2.0,
6 as published by the Free Software Foundation.
7
8 This program is also distributed with certain software (including
9 but not limited to OpenSSL) that is licensed under separate terms,
10 as designated in a particular file or component or in included license
11 documentation. The authors of MySQL hereby grant you an additional
12 permission to link the program and your derivative works with the
13 separately licensed software that they have included with MySQL.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License, version 2.0, for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 #ifndef NDB_LIMITS_H
26 #define NDB_LIMITS_H
27
28 #include "ndb_version.h" // Limits might depend on NDB version
29
30 #define RNIL 0xffffff00
31
32 /**
33 * Note that actual value = MAX_NODES - 1,
34 * since NodeId = 0 can not be used
35 */
36 #define MAX_NDB_NODES 145
37 #define MAX_NDB_NODES_v1 49
38 #define MAX_NDB_NODE_GROUPS 72
39 #define MAX_NODES 256
40 #define NDB_UNDEF_NODEGROUP 0xFFFF
41 #define MAX_BACKUPS 0xFFFFFFFF
42
43 /**************************************************************************
44 * IT SHOULD BE (MAX_NDB_NODES - 1).
45 * WHEN MAX_NDB_NODE IS CHANGED, IT SHOULD BE CHANGED ALSO
46 **************************************************************************/
47 #define MAX_DATA_NODE_ID 144
48 /**************************************************************************
49 * IT SHOULD BE (MAX_NODES - 1).
50 * WHEN MAX_NODES IS CHANGED, IT SHOULD BE CHANGED ALSO
51 **************************************************************************/
52 #define MAX_NODES_ID 255
53
54 /**
55 * MAX_API_NODES = MAX_NODES - No of NDB Nodes in use
56 */
57
58 /**
59 * The maximum number of replicas in the system
60 */
61 #define MAX_REPLICAS 4
62
63 /**
64 * The maximum number of transporters allowed
65 * A maximum is needed to be able to allocate the array of transporters
66 * We need one
67 */
68 #define MAX_NODE_GROUP_TRANSPORTERS 32
69 #define MAX_NTRANSPORTERS (MAX_NODES + \
70 ((MAX_REPLICAS - 1) * MAX_NODE_GROUP_TRANSPORTERS))
71
72 /**
73 * The maximum number of local checkpoints stored at a time
74 */
75 #define MAX_LCP_STORED 3
76
77 /**
78 * Max LCP used (the reason for keeping MAX_LCP_STORED is that we
79 * need to restore from LCP's with lcp no == 2
80 */
81 #define MAX_LCP_USED 2
82
83 /**
84 * The maximum number of log execution rounds at system restart
85 */
86 #define MAX_LOG_EXEC 4
87
88 /**
89 * The maximum number of tuples per page
90 **/
91 #define MAX_TUPLES_PER_PAGE 8191
92 #define MAX_TUPLES_BITS 13 /* 13 bits = 8191 tuples per page */
93 #define NDB_MAX_TABLES 20320 /* SchemaFile.hpp */
94 #define MAX_TAB_NAME_SIZE 128
95 #define MAX_ATTR_NAME_SIZE NAME_LEN /* From mysql_com.h */
96 #define MAX_ATTR_DEFAULT_VALUE_SIZE ((MAX_TUPLE_SIZE_IN_WORDS + 1) * 4) //Add 1 word for AttributeHeader
97 #define MAX_ATTRIBUTES_IN_TABLE 512
98 #define MAX_ATTRIBUTES_IN_INDEX 32
99 #define MAX_TUPLE_SIZE_IN_WORDS 7500
100 #define MAX_KEY_SIZE_IN_WORDS 1023
101 #define MAX_NULL_BITS 4096
102
103 /*
104 * Suma block sorts data changes of tables in buckets.
105 * Sumas in a node group shares a number of buckets, which is the
106 * factorial of the number of replicas, to ensure balance in any
107 * node failure situations.
108 */
109 #define MAX_SUMA_BUCKETS_PER_NG 24 /* factorial of MAX_REPLICAS */
110
111 /*
112 * At any time, one Suma is responsible for streaming bucket data
113 * to its subscribers, each bucket uses its own stream aka
114 * subscriptions data stream.
115 *
116 * Note that each subscriber receives filtered data from the
117 * stream depending on which objects it subscribes on.
118 *
119 * A stream sending data from a bucket will have a 16-bit identifier
120 * with two parts. The lower 8 bit determines a non zero stream
121 * group. The upper 8 bit determines an identifier with that group.
122 *
123 * Stream group identifiers range from 1 to MAX_SUB_DATA_STREAM_GROUPS.
124 * Stream identifier within a group range from 0 to MAX_SUB_DATA_STREAMS_PER_GROUP - 1.
125 * Stream identifier zero is reserved to not identify any stream.
126 */
127 #define MAX_SUB_DATA_STREAMS (MAX_SUB_DATA_STREAMS_PER_GROUP * MAX_SUB_DATA_STREAM_GROUPS)
128 #define MAX_SUB_DATA_STREAM_GROUPS (MAX_NDB_NODES-1)
129 #define MAX_SUB_DATA_STREAMS_PER_GROUP (MAX_SUMA_BUCKETS_PER_NG / MAX_REPLICAS)
130
131 /*
132 * Fragmentation data are Uint16, first two are #replicas,
133 * and #fragments, then for each fragment, first log-part-id
134 * then nodeid for each replica.
135 * See creation in Dbdih::execCREATE_FRAGMENTATION_REQ()
136 * and read in Dbdih::execDIADDTABREQ()
137 */
138 #define MAX_FRAGMENT_DATA_ENTRIES (2 + (1 + MAX_REPLICAS) * MAX_NDB_PARTITIONS)
139 #define MAX_FRAGMENT_DATA_BYTES (2 * MAX_FRAGMENT_DATA_ENTRIES)
140 #define MAX_FRAGMENT_DATA_WORDS ((MAX_FRAGMENT_DATA_BYTES + 3) / 4)
141
142 #if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
143 #define MAX_NDB_PARTITIONS 240
144 #elif NDB_VERSION_D < NDB_MAKE_VERSION(7,6,8)
145 #define MAX_NDB_PARTITIONS 2048
146 #else
147 #define MAX_NDB_PARTITIONS 8160
148 #endif
149
150 #define NDB_PARTITION_BITS 16
151 #define NDB_PARTITION_MASK ((Uint32)((1 << NDB_PARTITION_BITS) - 1))
152
153 #define MAX_RANGE_DATA (131072+MAX_NDB_PARTITIONS) //0.5 MByte of list data
154
155 #define MAX_WORDS_META_FILE 24576
156
157 #define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1)
158 /*
159 * Max Number of Records to fetch per SCAN_NEXTREQ in a scan in LQH. The
160 * API can order a multiple of this number of records at a time since
161 * fragments can be scanned in parallel.
162 */
163 #define MAX_PARALLEL_OP_PER_SCAN 992
164 /*
165 * The default batch size. Configurable parameter.
166 */
167 #define DEF_BATCH_SIZE 256
168 /*
169 * When calculating the number of records sent from LQH in each batch
170 * one uses SCAN_BATCH_SIZE divided by the expected size of signals
171 * per row. This gives the batch size used for the scan. The NDB API
172 * will receive one batch from each node at a time so there has to be
173 * some care taken also so that the NDB API is not overloaded with
174 * signals.
175 * This parameter is configurable, this is the default value.
176 */
177 #define SCAN_BATCH_SIZE 16384
178 /*
179 * To protect the NDB API from overload we also define a maximum total
180 * batch size from all nodes. This parameter should most likely be
181 * configurable, or dependent on sendBufferSize.
182 * This parameter is configurable, this is the default value.
183 */
184 #define MAX_SCAN_BATCH_SIZE 262144
185 /*
186 * Maximum number of Parallel Scan queries on one hash index fragment
187 */
188 #define MAX_PARALLEL_SCANS_PER_FRAG 12
189
190 /**
191 * Computed defines
192 */
193 #define MAXNROFATTRIBUTESINWORDS (MAX_ATTRIBUTES_IN_TABLE / 32)
194
195 /*
196 * Ordered index constants. Make configurable per index later.
197 */
198 #define MAX_TTREE_NODE_SIZE 64 /* total words in node */
199 #define MAX_TTREE_PREF_SIZE 4 /* words in min prefix */
200 #define MAX_TTREE_NODE_SLACK 2 /* diff between max and min occupancy */
201
202 /*
203 * Blobs.
204 */
205 #define NDB_BLOB_V1 1
206 #define NDB_BLOB_V2 2
207 #define NDB_BLOB_V1_HEAD_SIZE 2 /* sizeof(Uint64) >> 2 */
208 #define NDB_BLOB_V2_HEAD_SIZE 4 /* 2 + 2 + 4 + 8 bytes, see NdbBlob.hpp */
209
210 /*
211 * Character sets.
212 */
213 #define MAX_XFRM_MULTIPLY 8 /* max expansion when normalizing */
214
215 /**
216 * Disk data
217 */
218 #define MAX_FILES_PER_FILEGROUP 1024
219
220 /**
221 * Page size in global page pool
222 */
223 #define GLOBAL_PAGE_SIZE 32768
224 #define GLOBAL_PAGE_SIZE_WORDS 8192
225
226 /*
227 * Schema transactions
228 */
229 #define MAX_SCHEMA_OPERATIONS 256
230
231 /*
232 * Long signals
233 */
234 #define NDB_SECTION_SEGMENT_SZ 60
235
236 /*
237 * Restore Buffer in pages
238 * 4M
239 */
240 #define LCP_RESTORE_BUFFER (4*32)
241
242
243 /**
244 * The hashmap size should support at least one
245 * partition per LDM. And also try to make size
246 * a multiple of all possible data node counts,
247 * so that all partitions are related to the same
248 * number of hashmap buckets as possible,
249 * otherwise some partitions will be bigger than
250 * others.
251 *
252 * The historical size of hashmaps supported by old
253 * versions of NDB is 240. This guarantees at most
254 * 1/6 of unusable data memory for some nodes, since
255 * one can have atmost 48 data nodes so each node
256 * will relate to at least 5 hashmap buckets. Also
257 * 240 is a multiple of 2, 3, 4, 5, 6, 8, 10, 12,
258 * 15, 16, 20, 24, 30, 32, 40, and 48 so having any
259 * of these number of nodes guarantees near no
260 * unusable data memory.
261 *
262 * The current value 3840 is 16 times 240, and so gives
263 * at least the same guarantees as the old value above,
264 * also if up to 16 ldm threads per node is used.
265 */
266
267 #define NDB_MAX_HASHMAP_BUCKETS (3840 * 2 * 3)
268 #define NDB_DEFAULT_HASHMAP_MAX_FRAGMENTS 1536
269
270 #if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
271 #define NDB_DEFAULT_HASHMAP_BUCKETS 240
272 #else
273 #define NDB_DEFAULT_HASHMAP_BUCKETS 3840
274 #endif
275
276 /**
277 * Bits/mask used for coding/decoding blockno/blockinstance
278 */
279 #define NDBMT_BLOCK_BITS 9
280 #define NDBMT_BLOCK_MASK ((1 << NDBMT_BLOCK_BITS) - 1)
281 #define NDBMT_BLOCK_INSTANCE_BITS 7
282 #define NDBMT_MAX_BLOCK_INSTANCES (1 << NDBMT_BLOCK_INSTANCE_BITS)
283 /* Proxy block 0 is not a worker */
284 #define NDBMT_MAX_WORKER_INSTANCES (NDBMT_MAX_BLOCK_INSTANCES - 1)
285
286 #define NDB_DEFAULT_LOG_PARTS 4
287
288 #define NDBMT_MAIN_THREADS 2 /* Without receiver threads */
289
290 #if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
291 #define NDB_MAX_LOG_PARTS 4
292 #define MAX_NDBMT_TC_THREADS 2
293 #define MAX_NDBMT_RECEIVE_THREADS 1
294 #define MAX_NDBMT_SEND_THREADS 0
295 #else
296 #define NDB_MAX_LOG_PARTS 32
297 #define MAX_NDBMT_TC_THREADS 32
298 #define MAX_NDBMT_RECEIVE_THREADS 16
299 #define MAX_NDBMT_SEND_THREADS 16
300 #endif
301
302 #define MAX_NDBMT_LQH_WORKERS NDB_MAX_LOG_PARTS
303 #define MAX_NDBMT_LQH_THREADS NDB_MAX_LOG_PARTS
304
305 #define NDB_FILE_BUFFER_SIZE (256*1024)
306
307 /*
308 * NDB_FS_RW_PAGES must be big enough for biggest request,
309 * probably PACK_TABLE_PAGES (see Dbdih.hpp)
310 */
311 #if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
312 #define NDB_FS_RW_PAGES 32
313 #else
314 #define NDB_FS_RW_PAGES 268 * 4
315 #endif
316
317 /**
318 * MAX_ATTRIBUTES_IN_TABLE old handling
319 */
320 #define MAXNROFATTRIBUTESINWORDS_OLD (128 / 32)
321
322 /**
323 * No of bits available for attribute mask in NDB$EVENTS_0
324 */
325 #define MAX_ATTRIBUTES_IN_TABLE_NDB_EVENTS_0 4096
326
327 /**
328 * Max treenodes per request SPJ
329 *
330 * Currently limited by nodemask being shipped back inside 32-bit
331 * word disguised as totalLen in ScanTabConf
332 */
333 #define NDB_SPJ_MAX_TREE_NODES 32
334
335 /*
336 * Stored ordered index stats uses 2 Longvarbinary pseudo-columns: the
337 * packed index keys and the packed values. Key size is limited by
338 * SAMPLES table which has 3 other PK attributes. Also length bytes is
339 * counted as 1 word. Values currently contain RIR (one word) and RPK
340 * (one word for each key level). The SAMPLEs table STAT_VALUE column
341 * is longer to allow future changes.
342 *
343 * Stats tables are "lifted" to mysql level so for max key size use
344 * MAX_KEY_LENGTH/4 instead of the bigger MAX_KEY_SIZE_IN_WORDS. The
345 * definition is not available by default, use 3072 directly now.
346 */
347 #define MAX_INDEX_STAT_KEY_COUNT MAX_ATTRIBUTES_IN_INDEX
348 #define MAX_INDEX_STAT_KEY_SIZE ((3072/4) - 3 - 1)
349 #define MAX_INDEX_STAT_VALUE_COUNT (1 + MAX_INDEX_STAT_KEY_COUNT)
350 #define MAX_INDEX_STAT_VALUE_SIZE MAX_INDEX_STAT_VALUE_COUNT
351 #define MAX_INDEX_STAT_VALUE_CSIZE 512 /* Longvarbinary(2048) */
352 #define MAX_INDEX_STAT_VALUE_FORMAT 1
353
354 /**
355 * When calculating batch size for unique key builds, reorg builds,
356 * and foreign key builds we will treat this as the maximum normal
357 * row size, if rows are bigger than this we will decrease the
358 * parallelism to adjust for this.
359 * See Suma.cpp
360 */
361 #define MAX_NORMAL_ROW_SIZE 2048
362
363 /**
364 * Maximum size that an EVENT_REP signal can carry in its
365 * long signal section.
366 */
367 #define MAX_EVENT_REP_SIZE_WORDS 1024
368
369 #define MAX_UNDO_DATA 20 + MAX_TUPLE_SIZE_IN_WORDS
370 // Max. number of pending undo records allowed per LDM
371 #define MAX_PENDING_UNDO_RECORDS 100
372
373 // Maximum handling of DROP_TRIG_REQs in parallel by LocalProxy
374 #define NDB_MAX_PROXY_DROP_TRIG_IMPL_REQ 21
375 /* Maximum number of DROP_TRIGGER_REQs SUMA can send parallely after the
376 * execution of SUB_STOP_REQ.
377 *
378 * We do not anticipate multiple parallel sub stop reqs from multiple APIs.
379 * So, it should be fair to restrict the number of API nodes sending
380 * sub stop requests parallely to 2. Any further sub stop requests from any
381 * other API nodes will be delayed. We delay the sub stop requests execution
382 * based on outstanding trigger drop requests. Each sub stop request can
383 * send a maximum of 3 drop trigger requests. So now a maximum of 6 is
384 * allowed to execute parallely from all api nodes.*/
385 #define NDB_MAX_SUMA_DROP_TRIG_REQ_SUBSTOP 2 * 3
386 /* Max DROP_TRIG_REQ allowed from api_fail_subscriber_list
387 * This is greater than the maximum requests allowed from SUB_STOP_REQ
388 * handling so as to give priority to API failure handling over normal start
389 * and stop subscriptions if they both are competing. */
390 #define NDB_MAX_SUMA_DROP_TRIG_REQ_APIFAIL 3 * 3
391
392 #ifdef NDB_STATIC_ASSERT
393
ndb_limits_constraints()394 static inline void ndb_limits_constraints()
395 {
396 NDB_STATIC_ASSERT(NDB_DEFAULT_HASHMAP_BUCKETS <= NDB_MAX_HASHMAP_BUCKETS);
397
398 NDB_STATIC_ASSERT(MAX_NDB_PARTITIONS <= NDB_MAX_HASHMAP_BUCKETS);
399
400 NDB_STATIC_ASSERT(MAX_NDB_PARTITIONS - 1 <= NDB_PARTITION_MASK);
401
402 // MAX_NDB_NODES should be 48, but code assumes it is 49
403 STATIC_CONST(MAX_NDB_DATA_NODES = MAX_DATA_NODE_ID);
404 NDB_STATIC_ASSERT(MAX_NDB_NODES == MAX_NDB_DATA_NODES + 1);
405
406 // Default partitioning is 1 partition per LDM
407 NDB_STATIC_ASSERT(MAX_NDB_DATA_NODES * MAX_NDBMT_LQH_WORKERS <= MAX_NDB_PARTITIONS);
408
409 // The default hashmap should atleast support the maximum default partitioning
410 NDB_STATIC_ASSERT(MAX_NDB_DATA_NODES * MAX_NDBMT_LQH_WORKERS <= NDB_MAX_HASHMAP_BUCKETS);
411 }
412
413 #endif
414
415 #endif
416