1 /*
2    Copyright (c) 2003, 2021, Oracle and/or its affiliates.
3 
4    This program is free software; you can redistribute it and/or modify
5    it under the terms of the GNU General Public License, version 2.0,
6    as published by the Free Software Foundation.
7 
8    This program is also distributed with certain software (including
9    but not limited to OpenSSL) that is licensed under separate terms,
10    as designated in a particular file or component or in included license
11    documentation.  The authors of MySQL hereby grant you an additional
12    permission to link the program and your derivative works with the
13    separately licensed software that they have included with MySQL.
14 
15    This program is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License, version 2.0, for more details.
19 
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
23 */
24 
25 #ifndef NDB_LIMITS_H
26 #define NDB_LIMITS_H
27 
28 #include "ndb_version.h"  // Limits might depend on NDB version
29 
30 #define RNIL    0xffffff00
31 
32 /**
33  * Note that actual value = MAX_NODES - 1,
34  *  since NodeId = 0 can not be used
35  */
36 #define MAX_NDB_NODES 49
37 #define MAX_NODES     256
38 #define NDB_UNDEF_NODEGROUP 0xFFFF
39 #define MAX_BACKUPS   0xFFFFFFFF
40 
41 /**************************************************************************
42  * IT SHOULD BE (MAX_NDB_NODES - 1).
43  * WHEN MAX_NDB_NODE IS CHANGED, IT SHOULD BE CHANGED ALSO
44  **************************************************************************/
45 #define MAX_DATA_NODE_ID 48
46 /**************************************************************************
47  * IT SHOULD BE (MAX_NODES - 1).
48  * WHEN MAX_NODES IS CHANGED, IT SHOULD BE CHANGED ALSO
49  **************************************************************************/
50 #define MAX_NODES_ID 255
51 
52 /**
53  * MAX_API_NODES = MAX_NODES - No of NDB Nodes in use
54  */
55 
56 /**
57  * The maximum number of replicas in the system
58  */
59 #define MAX_REPLICAS 4
60 
61 /**
62  * The maximum number of local checkpoints stored at a time
63  */
64 #define MAX_LCP_STORED 3
65 
66 /**
67  * Max LCP used (the reason for keeping MAX_LCP_STORED is that we
68  *   need to restore from LCP's with lcp no == 2
69  */
70 #define MAX_LCP_USED 2
71 
72 /**
73  * The maximum number of log execution rounds at system restart
74  */
75 #define MAX_LOG_EXEC 4
76 
77 /**
78  * The maximum number of tuples per page
79  **/
80 #define MAX_TUPLES_PER_PAGE 8191
81 #define MAX_TUPLES_BITS 13 		/* 13 bits = 8191 tuples per page */
82 #define NDB_MAX_TABLES 20320                /* SchemaFile.hpp */
83 #define MAX_TAB_NAME_SIZE 128
84 #define MAX_ATTR_NAME_SIZE NAME_LEN       /* From mysql_com.h */
85 #define MAX_ATTR_DEFAULT_VALUE_SIZE ((MAX_TUPLE_SIZE_IN_WORDS + 1) * 4)  //Add 1 word for AttributeHeader
86 #define MAX_ATTRIBUTES_IN_TABLE 512
87 #define MAX_ATTRIBUTES_IN_INDEX 32
88 #define MAX_TUPLE_SIZE_IN_WORDS 3500
89 
90 /**
91  * When sending a SUB_TABLE_DATA from SUMA to API
92  *
93  */
94 #define MAX_SUMA_MESSAGE_IN_WORDS 8028
95 
96 /**
97  * When sending a SUB_TABLE_DATA
98  *  this is is the maximum size that it can become
99  */
100 #define CHECK_SUMA_MESSAGE_SIZE(NO_KEYS,KEY_SIZE_IN_WORDS,NO_COLUMNS,TUPLE_SIZE_IN_WORDS) \
101   ((NO_KEYS + KEY_SIZE_IN_WORDS + 2 * (NO_COLUMNS + TUPLE_SIZE_IN_WORDS)) <= MAX_SUMA_MESSAGE_IN_WORDS)
102 
103 #define MAX_KEY_SIZE_IN_WORDS 1023
104 #define MAX_FRM_DATA_SIZE 6000
105 #define MAX_NULL_BITS 4096
106 
107 /*
108  * Suma block sorts data changes of tables in buckets.
109  * Sumas in a node group shares a number of buckets, which is the
110  * factorial of the number of replicas, to ensure balance in any
111  * node failure situations.
112  */
113 #define MAX_SUMA_BUCKETS_PER_NG     24 /* factorial of MAX_REPLICAS */
114 
115 /*
116  * At any time, one Suma is responsible for streaming bucket data
117  * to its subscribers, each bucket uses its own stream aka
118  * subscriptions data stream.
119  *
120  * Note that each subscriber receives filtered data from the
121  * stream depending on which objects it subscribes on.
122  *
123  * A stream sending data from a bucket will have a 16-bit identifier
124  * with two parts.  The lower 8 bit determines a non zero stream
125  * group.  The upper 8 bit determines an identifier with that group.
126  *
127  * Stream group identifiers range from 1 to MAX_SUB_DATA_STREAM_GROUPS.
128  * Stream identifier within a group range from 0 to MAX_SUB_DATA_STREAMS_PER_GROUP - 1.
129  * Stream identifier zero is reserved to not identify any stream.
130  */
131 #define MAX_SUB_DATA_STREAMS (MAX_SUB_DATA_STREAMS_PER_GROUP * MAX_SUB_DATA_STREAM_GROUPS)
132 #define MAX_SUB_DATA_STREAM_GROUPS      (MAX_NDB_NODES-1)
133 #define MAX_SUB_DATA_STREAMS_PER_GROUP  (MAX_SUMA_BUCKETS_PER_NG / MAX_REPLICAS)
134 
135 /*
136  * Fragmentation data are Uint16, first two are #replicas,
137  * and #fragments, then for each fragment, first log-part-id
138  * then nodeid for each replica.
139  * See creation in Dbdih::execCREATE_FRAGMENTATION_REQ()
140  * and read in Dbdih::execDIADDTABREQ()
141  */
142 #define MAX_FRAGMENT_DATA_ENTRIES (2 + (1 + MAX_REPLICAS) * MAX_NDB_PARTITIONS)
143 #define MAX_FRAGMENT_DATA_BYTES (2 * MAX_FRAGMENT_DATA_ENTRIES)
144 #define MAX_FRAGMENT_DATA_WORDS ((MAX_FRAGMENT_DATA_BYTES + 3) / 4)
145 
146 #if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
147 #define MAX_NDB_PARTITIONS 240
148 #else
149 #define MAX_NDB_PARTITIONS 2048
150 #endif
151 
152 #define NDB_PARTITION_BITS 16
153 #define NDB_PARTITION_MASK ((Uint32)((1 << NDB_PARTITION_BITS) - 1))
154 
155 #define MAX_RANGE_DATA (131072+MAX_NDB_PARTITIONS) //0.5 MByte of list data
156 
157 #define MAX_WORDS_META_FILE 24576
158 
159 #define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1)
160 /*
161  * Max Number of Records to fetch per SCAN_NEXTREQ in a scan in LQH. The
162  * API can order a multiple of this number of records at a time since
163  * fragments can be scanned in parallel.
164  */
165 #define MAX_PARALLEL_OP_PER_SCAN 992
166 /*
167 * The default batch size. Configurable parameter.
168 */
169 #if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
170 #define DEF_BATCH_SIZE 64
171 #else
172 #define DEF_BATCH_SIZE 256
173 #endif
174 /*
175 * When calculating the number of records sent from LQH in each batch
176 * one uses SCAN_BATCH_SIZE divided by the expected size of signals
177 * per row. This gives the batch size used for the scan. The NDB API
178 * will receive one batch from each node at a time so there has to be
179 * some care taken also so that the NDB API is not overloaded with
180 * signals.
181 * This parameter is configurable, this is the default value.
182 */
183 #define SCAN_BATCH_SIZE 16384
184 /*
185 * To protect the NDB API from overload we also define a maximum total
186 * batch size from all nodes. This parameter should most likely be
187 * configurable, or dependent on sendBufferSize.
188 * This parameter is configurable, this is the default value.
189 */
190 #define MAX_SCAN_BATCH_SIZE 262144
191 /*
192  * Maximum number of Parallel Scan queries on one hash index fragment
193  */
194 #define MAX_PARALLEL_SCANS_PER_FRAG 12
195 
196 /**
197  * Computed defines
198  */
199 #define MAXNROFATTRIBUTESINWORDS (MAX_ATTRIBUTES_IN_TABLE / 32)
200 
201 /*
202  * Ordered index constants.  Make configurable per index later.
203  */
204 #define MAX_TTREE_NODE_SIZE 64	    /* total words in node */
205 #define MAX_TTREE_PREF_SIZE 4	    /* words in min prefix */
206 #define MAX_TTREE_NODE_SLACK 2	    /* diff between max and min occupancy */
207 
208 /*
209  * Blobs.
210  */
211 #define NDB_BLOB_V1 1
212 #define NDB_BLOB_V2 2
213 #define NDB_BLOB_V1_HEAD_SIZE 2     /* sizeof(Uint64) >> 2 */
214 #define NDB_BLOB_V2_HEAD_SIZE 4     /* 2 + 2 + 4 + 8 bytes, see NdbBlob.hpp */
215 
216 /*
217  * Character sets.
218  */
219 #define MAX_XFRM_MULTIPLY 8         /* max expansion when normalizing */
220 
221 /**
222  * Disk data
223  */
224 #define MAX_FILES_PER_FILEGROUP 1024
225 
226 /**
227  * Page size in global page pool
228  */
229 #define GLOBAL_PAGE_SIZE 32768
230 #define GLOBAL_PAGE_SIZE_WORDS 8192
231 
232 /*
233  * Schema transactions
234  */
235 #define MAX_SCHEMA_OPERATIONS 256
236 
237 /*
238  * Long signals
239  */
240 #define NDB_SECTION_SEGMENT_SZ 60
241 
242 /*
243  * Restore Buffer in pages
244  *   4M
245  */
246 #define LCP_RESTORE_BUFFER (4*32)
247 
248 
249 /**
250  * The hashmap size should support at least one
251  * partition per LDM. And also try to make size
252  * a multiple of all possible data node counts,
253  * so that all partitions are related to the same
254  * number of hashmap buckets as possible,
255  * otherwise some partitions will be bigger than
256  * others.
257  *
258  * The historical size of hashmaps supported by old
259  * versions of NDB is 240.  This guarantees at most
260  * 1/6 of unusable data memory for some nodes, since
261  * one can have atmost 48 data nodes so each node
262  * will relate to at least 5 hashmap buckets.  Also
263  * 240 is a multiple of 2, 3, 4, 5, 6, 8, 10, 12,
264  * 15, 16, 20, 24, 30, 32, 40, and 48 so having any
265  * of these number of nodes guarantees near no
266  * unusable data memory.
267  *
268  * The current value 3840 is 16 times 240, and so gives
269  * at least the same guarantees as the old value above,
270  * also if up to 16 ldm threads per node is used.
271  */
272 
273 #define NDB_MAX_HASHMAP_BUCKETS (3840 * 2 * 3)
274 
275 #if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
276 #define NDB_DEFAULT_HASHMAP_BUCKETS 240
277 #else
278 #define NDB_DEFAULT_HASHMAP_BUCKETS 3840
279 #endif
280 
281 /**
282  * Bits/mask used for coding/decoding blockno/blockinstance
283  */
284 #define NDBMT_BLOCK_BITS 9
285 #define NDBMT_BLOCK_MASK ((1 << NDBMT_BLOCK_BITS) - 1)
286 #define NDBMT_BLOCK_INSTANCE_BITS 7
287 #define NDBMT_MAX_BLOCK_INSTANCES (1 << NDBMT_BLOCK_INSTANCE_BITS)
288 /* Proxy block 0 is not a worker */
289 #define NDBMT_MAX_WORKER_INSTANCES (NDBMT_MAX_BLOCK_INSTANCES - 1)
290 
291 #define NDB_DEFAULT_LOG_PARTS 4
292 
293 #if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
294 #define NDB_MAX_LOG_PARTS          4
295 #define MAX_NDBMT_TC_THREADS       2
296 #define MAX_NDBMT_RECEIVE_THREADS  1
297 #define MAX_NDBMT_SEND_THREADS     0
298 #else
299 #define NDB_MAX_LOG_PARTS         32
300 #define MAX_NDBMT_TC_THREADS      32
301 #define MAX_NDBMT_RECEIVE_THREADS 16
302 #define MAX_NDBMT_SEND_THREADS    16
303 #endif
304 
305 #define MAX_NDBMT_LQH_WORKERS NDB_MAX_LOG_PARTS
306 #define MAX_NDBMT_LQH_THREADS NDB_MAX_LOG_PARTS
307 
308 #define NDB_FILE_BUFFER_SIZE (256*1024)
309 
310 /*
311  * NDB_FS_RW_PAGES must be big enough for biggest request,
312  * probably PACK_TABLE_PAGES (see Dbdih.hpp)
313  */
314 #if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
315 #define NDB_FS_RW_PAGES 32
316 #else
317 #define NDB_FS_RW_PAGES 268
318 #endif
319 
320 /**
321  * MAX_ATTRIBUTES_IN_TABLE old handling
322  */
323 #define MAXNROFATTRIBUTESINWORDS_OLD (128 / 32)
324 
325 /**
326  * No of bits available for attribute mask in NDB$EVENTS_0
327  */
328 #define MAX_ATTRIBUTES_IN_TABLE_NDB_EVENTS_0 4096
329 
330 /**
331  * Max treenodes per request SPJ
332  *
333  * Currently limited by nodemask being shipped back inside 32-bit
334  *   word disguised as totalLen in ScanTabConf
335  */
336 #define NDB_SPJ_MAX_TREE_NODES 32
337 
338 /*
339  * Stored ordered index stats uses 2 Longvarbinary pseudo-columns: the
340  * packed index keys and the packed values.  Key size is limited by
341  * SAMPLES table which has 3 other PK attributes.  Also length bytes is
342  * counted as 1 word.  Values currently contain RIR (one word) and RPK
343  * (one word for each key level).  The SAMPLEs table STAT_VALUE column
344  * is longer to allow future changes.
345  *
346  * Stats tables are "lifted" to mysql level so for max key size use
347  * MAX_KEY_LENGTH/4 instead of the bigger MAX_KEY_SIZE_IN_WORDS.  The
348  * definition is not available by default, use 3072 directly now.
349  */
350 #define MAX_INDEX_STAT_KEY_COUNT    MAX_ATTRIBUTES_IN_INDEX
351 #define MAX_INDEX_STAT_KEY_SIZE     ((3072/4) - 3 - 1)
352 #define MAX_INDEX_STAT_VALUE_COUNT  (1 + MAX_INDEX_STAT_KEY_COUNT)
353 #define MAX_INDEX_STAT_VALUE_SIZE   MAX_INDEX_STAT_VALUE_COUNT
354 #define MAX_INDEX_STAT_VALUE_CSIZE  512 /* Longvarbinary(2048) */
355 #define MAX_INDEX_STAT_VALUE_FORMAT 1
356 
357 #ifdef NDB_STATIC_ASSERT
358 
ndb_limits_constraints()359 static inline void ndb_limits_constraints()
360 {
361   NDB_STATIC_ASSERT(NDB_DEFAULT_HASHMAP_BUCKETS <= NDB_MAX_HASHMAP_BUCKETS);
362 
363   NDB_STATIC_ASSERT(MAX_NDB_PARTITIONS <= NDB_MAX_HASHMAP_BUCKETS);
364 
365   NDB_STATIC_ASSERT(MAX_NDB_PARTITIONS - 1 <= NDB_PARTITION_MASK);
366 
367   // MAX_NDB_NODES should be 48, but code assumes it is 49
368   STATIC_CONST(MAX_NDB_DATA_NODES = MAX_DATA_NODE_ID);
369   NDB_STATIC_ASSERT(MAX_NDB_NODES == MAX_NDB_DATA_NODES + 1);
370 
371   // Default partitioning is 1 partition per LDM
372   NDB_STATIC_ASSERT(MAX_NDB_DATA_NODES * MAX_NDBMT_LQH_WORKERS <= MAX_NDB_PARTITIONS);
373 
374   // The default hashmap should atleast support the maximum default partitioning
375   NDB_STATIC_ASSERT(MAX_NDB_DATA_NODES * MAX_NDBMT_LQH_WORKERS <= NDB_DEFAULT_HASHMAP_BUCKETS);
376 }
377 
378 #endif
379 
380 #endif
381