1 /* Copyright (C) 2006 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
2 Copyright (c) 2020, MariaDB Corporation.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; version 2 of the License.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12
13 You should have received a copy of the GNU General Public License
14 along with this program; if not, write to the Free Software
15 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
16
17 /*
18 Read and write key blocks
19
20 The basic structure of a key block is as follows:
21
22 LSN 7 (LSN_STORE_SIZE); Log number for last change;
23 Only for transactional pages
24 PACK_TRANSID 6 (TRANSID_SIZE); Relative transid to pack page transid's
25 Only for transactional pages
26 KEYNR 1 (KEYPAGE_KEYID_SIZE) Which index this page belongs to
27 FLAG 1 (KEYPAGE_FLAG_SIZE) Flags for page
28 PAGE_SIZE 2 (KEYPAGE_USED_SIZE) How much of the page is used.
29 high-byte-first
30
31 The flag is a combination of the following values:
32
33 KEYPAGE_FLAG_ISNOD Page is a node
34 KEYPAGE_FLAG_HAS_TRANSID There may be a transid on the page.
35
36 After this we store key data, either packed or not packed, directly
37 after each other. If the page is a node flag, there is a pointer to
38 the next key page at page start and after each key.
39
40 At end of page the last KEYPAGE_CHECKSUM_SIZE bytes are reserved for a
41 page checksum.
42 */
43
44 #include "maria_def.h"
45 #include "trnman.h"
46 #include "ma_key_recover.h"
47
48 /**
49 Fill MARIA_PAGE structure for usage with _ma_write_keypage
50 */
51
_ma_page_setup(MARIA_PAGE * page,MARIA_HA * info,const MARIA_KEYDEF * keyinfo,my_off_t pos,uchar * buff)52 void _ma_page_setup(MARIA_PAGE *page, MARIA_HA *info,
53 const MARIA_KEYDEF *keyinfo, my_off_t pos,
54 uchar *buff)
55 {
56 MARIA_SHARE *share= info->s;
57
58 page->info= info;
59 page->keyinfo= keyinfo;
60 page->buff= buff;
61 page->pos= pos;
62 page->size= _ma_get_page_used(share, buff);
63 page->org_size= page->size;
64 page->flag= _ma_get_keypage_flag(share, buff);
65 page->node= ((page->flag & KEYPAGE_FLAG_ISNOD) ?
66 share->base.key_reflength : 0);
67 }
68
69 #ifdef IDENTICAL_PAGES_AFTER_RECOVERY
page_cleanup(MARIA_SHARE * share,MARIA_PAGE * page)70 void page_cleanup(MARIA_SHARE *share, MARIA_PAGE *page)
71 {
72 uint length= page->size;
73 DBUG_ASSERT(length <= share->max_index_block_size);
74 bzero(page->buff + length, share->block_size - length);
75 }
76 #endif
77
78
79 /**
80 Fetch a key-page in memory
81
82 @fn _ma_fetch_keypage()
83 @param page Fill this struct with information about read page
84 @param info Maria handler
85 @param keyinfo Key definition for used key
86 @param pos Position for page (in bytes)
87 @param lock Lock type for page
88 @param level Importance of page; Priority for page cache
89 @param buff Buffer to use for page
90 @param return_buffer Set to 1 if we want to force useage of buff
91
92 @return
93 @retval 0 ok
94 @retval 1 error
95 */
96
_ma_fetch_keypage(MARIA_PAGE * page,MARIA_HA * info,const MARIA_KEYDEF * keyinfo,my_off_t pos,enum pagecache_page_lock lock,int level,uchar * buff,my_bool return_buffer)97 my_bool _ma_fetch_keypage(MARIA_PAGE *page, MARIA_HA *info,
98 const MARIA_KEYDEF *keyinfo,
99 my_off_t pos, enum pagecache_page_lock lock,
100 int level, uchar *buff,
101 my_bool return_buffer __attribute__ ((unused)))
102 {
103 uchar *tmp;
104 MARIA_PINNED_PAGE page_link;
105 MARIA_SHARE *share= info->s;
106 uint block_size= share->block_size;
107 DBUG_ENTER("_ma_fetch_keypage");
108 DBUG_PRINT("enter",("page: %lu", (ulong) (pos / block_size)));
109
110 tmp= pagecache_read(share->pagecache, &share->kfile,
111 (pgcache_page_no_t) (pos / block_size), level, buff,
112 share->page_type, lock, &page_link.link);
113
114 if (lock != PAGECACHE_LOCK_LEFT_UNLOCKED)
115 {
116 DBUG_ASSERT(lock == PAGECACHE_LOCK_WRITE || lock == PAGECACHE_LOCK_READ);
117 page_link.unlock= (lock == PAGECACHE_LOCK_WRITE ?
118 PAGECACHE_LOCK_WRITE_UNLOCK :
119 PAGECACHE_LOCK_READ_UNLOCK);
120 page_link.changed= 0;
121 push_dynamic(&info->pinned_pages, (void*) &page_link);
122 page->link_offset= info->pinned_pages.elements-1;
123 }
124
125 if (tmp == info->buff)
126 info->keyread_buff_used=1;
127 else if (!tmp)
128 {
129 DBUG_PRINT("error",("Got errno: %d from pagecache_read",my_errno));
130 info->last_keypage=HA_OFFSET_ERROR;
131 _ma_set_fatal_error(share, HA_ERR_CRASHED);
132 DBUG_RETURN(1);
133 }
134 info->last_keypage= pos;
135
136 /*
137 Setup page structure to make pages easy to use
138 This is same as page_fill_info, but here inlined as this si used
139 so often.
140 */
141 page->info= info;
142 page->keyinfo= keyinfo;
143 page->buff= tmp;
144 page->pos= pos;
145 page->size= _ma_get_page_used(share, tmp);
146 page->org_size= page->size; /* For debugging */
147 page->flag= _ma_get_keypage_flag(share, tmp);
148 page->node= ((page->flag & KEYPAGE_FLAG_ISNOD) ?
149 share->base.key_reflength : 0);
150
151 #ifdef EXTRA_DEBUG
152 {
153 uint page_size= page->size;
154 if (page_size < 4 || page_size > share->max_index_block_size ||
155 _ma_get_keynr(share, tmp) != keyinfo->key_nr)
156 {
157 DBUG_PRINT("error",("page %lu had wrong page length: %u page_header: %u keynr: %u",
158 (ulong) (pos / block_size), page_size,
159 share->keypage_header,
160 _ma_get_keynr(share, tmp)));
161 DBUG_DUMP("page", tmp, page_size);
162 info->last_keypage = HA_OFFSET_ERROR;
163 _ma_set_fatal_error(share, HA_ERR_CRASHED);
164 DBUG_RETURN(1);
165 }
166 }
167 #endif
168 DBUG_RETURN(0);
169 } /* _ma_fetch_keypage */
170
171
172 /* Write a key-page on disk */
173
_ma_write_keypage(MARIA_PAGE * page,enum pagecache_page_lock lock,int level)174 my_bool _ma_write_keypage(MARIA_PAGE *page, enum pagecache_page_lock lock,
175 int level)
176 {
177 MARIA_SHARE *share= page->info->s;
178 uint block_size= share->block_size;
179 uchar *buff= page->buff;
180 my_bool res;
181 MARIA_PINNED_PAGE page_link;
182 DBUG_ENTER("_ma_write_keypage");
183
184 /*
185 The following ensures that for transactional tables we have logged
186 all changes that changes the page size (as the logging code sets
187 page->org_size)
188 */
189 DBUG_ASSERT(!share->now_transactional || page->size == page->org_size);
190
191 #ifdef EXTRA_DEBUG /* Safety check */
192 {
193 uint page_length, nod_flag;
194 page_length= _ma_get_page_used(share, buff);
195 nod_flag= _ma_test_if_nod(share, buff);
196
197 DBUG_ASSERT(page->size == page_length);
198 DBUG_ASSERT(page->size <= share->max_index_block_size);
199 DBUG_ASSERT(page->flag == _ma_get_keypage_flag(share, buff));
200
201 if (page->pos < share->base.keystart ||
202 page->pos+block_size > share->state.state.key_file_length ||
203 (page->pos & (maria_block_size-1)))
204 {
205 DBUG_PRINT("error",("Trying to write inside key status region: "
206 "key_start: %lu length: %lu page_pos: %lu",
207 (long) share->base.keystart,
208 (long) share->state.state.key_file_length,
209 (long) page->pos));
210 my_errno=EINVAL;
211 DBUG_ASSERT(0);
212 DBUG_RETURN(1);
213 }
214 DBUG_PRINT("page",("write page at: %lu",(ulong) (page->pos / block_size)));
215 DBUG_DUMP("buff", buff, page_length);
216 DBUG_ASSERT(page_length >= share->keypage_header + nod_flag +
217 page->keyinfo->minlength || maria_in_recovery);
218 }
219 #endif
220
221 /* Verify that keynr is correct */
222 DBUG_ASSERT(_ma_get_keynr(share, buff) == page->keyinfo->key_nr);
223
224 #if defined(EXTRA_DEBUG) && defined(HAVE_valgrind) && defined(WHEN_DEBUGGING)
225 MEM_CHECK_DEFINED(buff, block_size);
226 #endif
227
228 page_cleanup(share, page);
229 {
230 PAGECACHE_BLOCK_LINK **link;
231 enum pagecache_page_pin pin;
232 if (lock == PAGECACHE_LOCK_LEFT_WRITELOCKED)
233 {
234 pin= PAGECACHE_PIN_LEFT_PINNED;
235 link= &page_link.link;
236 }
237 else if (lock == PAGECACHE_LOCK_WRITE_UNLOCK)
238 {
239 pin= PAGECACHE_UNPIN;
240 /*
241 We unlock this page so link should be 0 to prevent it usage
242 even accidentally
243 */
244 link= NULL;
245 }
246 else
247 {
248 pin= PAGECACHE_PIN;
249 link= &page_link.link;
250 }
251 res= pagecache_write(share->pagecache,
252 &share->kfile,
253 (pgcache_page_no_t) (page->pos / block_size),
254 level, buff, share->page_type,
255 lock, pin, PAGECACHE_WRITE_DELAY, link,
256 LSN_IMPOSSIBLE);
257 }
258
259 if (lock == PAGECACHE_LOCK_WRITE)
260 {
261 /* It was not locked before, we have to unlock it when we unpin pages */
262 page_link.unlock= PAGECACHE_LOCK_WRITE_UNLOCK;
263 page_link.changed= 1;
264 push_dynamic(&page->info->pinned_pages, (void*) &page_link);
265 }
266 DBUG_RETURN(res);
267 }
268
269
270 /**
271 @brief Put page in free list
272
273 @fn _ma_dispose()
274 @param info Maria handle
275 @param pos Address to page
276 @param page_not_read 1 if page has not yet been read
277
278 @note
279 The page at 'pos' must have been read with a write lock.
280 This function does logging (unlike _ma_new()).
281
282 @return
283 @retval 0 ok
284 @retval 1 error
285
286 */
287
_ma_dispose(register MARIA_HA * info,my_off_t pos,my_bool page_not_read)288 int _ma_dispose(register MARIA_HA *info, my_off_t pos, my_bool page_not_read)
289 {
290 my_off_t old_link;
291 uchar buff[MAX_KEYPAGE_HEADER_SIZE+ 8 + 2];
292 ulonglong page_no;
293 MARIA_SHARE *share= info->s;
294 MARIA_PINNED_PAGE page_link;
295 uint block_size= share->block_size;
296 int result= 0;
297 enum pagecache_page_lock lock_method;
298 enum pagecache_page_pin pin_method;
299 DBUG_ENTER("_ma_dispose");
300 DBUG_PRINT("enter",("page: %lu", (ulong) (pos / block_size)));
301 DBUG_ASSERT(pos % block_size == 0);
302
303 (void) _ma_lock_key_del(info, 0);
304
305 old_link= share->key_del_current;
306 share->key_del_current= pos;
307 page_no= pos / block_size;
308 bzero(buff, share->keypage_header);
309 _ma_store_keynr(share, buff, (uchar) MARIA_DELETE_KEY_NR);
310 _ma_store_page_used(share, buff, share->keypage_header + 8);
311 mi_sizestore(buff + share->keypage_header, old_link);
312 share->state.changed|= STATE_NOT_SORTED_PAGES;
313
314 if (share->now_transactional)
315 {
316 LSN lsn;
317 uchar log_data[FILEID_STORE_SIZE + PAGE_STORE_SIZE * 2];
318 LEX_CUSTRING log_array[TRANSLOG_INTERNAL_PARTS + 1];
319 my_off_t page;
320
321 /* Store address of deleted page */
322 page_store(log_data + FILEID_STORE_SIZE, page_no);
323
324 /* Store link to next unused page (the link that is written to page) */
325 page= (old_link == HA_OFFSET_ERROR ? IMPOSSIBLE_PAGE_NO :
326 old_link / block_size);
327 page_store(log_data + FILEID_STORE_SIZE + PAGE_STORE_SIZE, page);
328
329 log_array[TRANSLOG_INTERNAL_PARTS + 0].str= log_data;
330 log_array[TRANSLOG_INTERNAL_PARTS + 0].length= sizeof(log_data);
331
332 if (translog_write_record(&lsn, LOGREC_REDO_INDEX_FREE_PAGE,
333 info->trn, info,
334 (translog_size_t) sizeof(log_data),
335 TRANSLOG_INTERNAL_PARTS + 1, log_array,
336 log_data, NULL))
337 result= 1;
338 }
339
340 if (page_not_read)
341 {
342 lock_method= PAGECACHE_LOCK_WRITE;
343 pin_method= PAGECACHE_PIN;
344 }
345 else
346 {
347 lock_method= PAGECACHE_LOCK_LEFT_WRITELOCKED;
348 pin_method= PAGECACHE_PIN_LEFT_PINNED;
349 }
350
351 if (pagecache_write_part(share->pagecache,
352 &share->kfile, (pgcache_page_no_t) page_no,
353 PAGECACHE_PRIORITY_LOW, buff,
354 share->page_type,
355 lock_method, pin_method,
356 PAGECACHE_WRITE_DELAY, &page_link.link,
357 LSN_IMPOSSIBLE,
358 0, share->keypage_header + 8))
359 result= 1;
360
361 #ifdef IDENTICAL_PAGES_AFTER_RECOVERY
362 {
363 uchar *page_buff= pagecache_block_link_to_buffer(page_link.link);
364 bzero(page_buff + share->keypage_header + 8,
365 block_size - share->keypage_header - 8 - KEYPAGE_CHECKSUM_SIZE);
366 }
367 #endif
368
369 if (page_not_read)
370 {
371 /* It was not locked before, we have to unlock it when we unpin pages */
372 page_link.unlock= PAGECACHE_LOCK_WRITE_UNLOCK;
373 page_link.changed= 1;
374 push_dynamic(&info->pinned_pages, (void*) &page_link);
375 }
376
377 DBUG_RETURN(result);
378 } /* _ma_dispose */
379
380
381 /**
382 @brief Get address for free page to use
383
384 @fn _ma_new()
385 @param info Maria handle
386 @param level Type of key block (caching priority for pagecache)
387 @param page_link Pointer to page in page cache if read. One can
388 check if this is used by checking if
389 page_link->changed != 0
390
391 @note Logging of this is left to the caller (so that the "new"ing and the
392 first changes done to this new page can be logged as one single entry - one
393 single _ma_log_new()) call).
394
395 @return
396 HA_OFFSET_ERROR File is full or page read error
397 # Page address to use
398 */
399
_ma_new(register MARIA_HA * info,int level,MARIA_PINNED_PAGE ** page_link)400 my_off_t _ma_new(register MARIA_HA *info, int level,
401 MARIA_PINNED_PAGE **page_link)
402
403 {
404 my_off_t pos;
405 MARIA_SHARE *share= info->s;
406 uint block_size= share->block_size;
407 DBUG_ENTER("_ma_new");
408
409 if (_ma_lock_key_del(info, 1))
410 {
411 mysql_mutex_lock(&share->intern_lock);
412 pos= share->state.state.key_file_length;
413 if (pos >= share->base.max_key_file_length - block_size)
414 {
415 my_errno=HA_ERR_INDEX_FILE_FULL;
416 mysql_mutex_unlock(&share->intern_lock);
417 DBUG_RETURN(HA_OFFSET_ERROR);
418 }
419 share->state.state.key_file_length+= block_size;
420 /* Following is for not transactional tables */
421 info->state->key_file_length= share->state.state.key_file_length;
422 mysql_mutex_unlock(&share->intern_lock);
423 (*page_link)->changed= 0;
424 (*page_link)->write_lock= PAGECACHE_LOCK_WRITE;
425 }
426 else
427 {
428 uchar *buff;
429 pos= share->key_del_current; /* Protected */
430 DBUG_ASSERT(share->pagecache->block_size == block_size);
431 if (!(buff= pagecache_read(share->pagecache,
432 &share->kfile,
433 (pgcache_page_no_t) (pos / block_size), level,
434 0, share->page_type,
435 PAGECACHE_LOCK_WRITE, &(*page_link)->link)))
436 pos= HA_OFFSET_ERROR;
437 else
438 {
439 /*
440 Next deleted page's number is in the header of the present page
441 (single linked list):
442 */
443 #ifdef DBUG_ASSERT_EXISTS
444 my_off_t key_del_current;
445 #endif
446 share->key_del_current= mi_sizekorr(buff+share->keypage_header);
447 #ifdef DBUG_ASSERT_EXISTS
448 key_del_current= share->key_del_current;
449 DBUG_ASSERT((key_del_current != 0) &&
450 ((key_del_current == HA_OFFSET_ERROR) ||
451 (key_del_current <=
452 (share->state.state.key_file_length - block_size))));
453 #endif
454 }
455
456 (*page_link)->unlock= PAGECACHE_LOCK_WRITE_UNLOCK;
457 (*page_link)->write_lock= PAGECACHE_LOCK_WRITE;
458 /*
459 We have to mark it changed as _ma_flush_pending_blocks() uses
460 'changed' to know if we used the page cache or not
461 */
462 (*page_link)->changed= 1;
463 push_dynamic(&info->pinned_pages, (void*) *page_link);
464 *page_link= dynamic_element(&info->pinned_pages,
465 info->pinned_pages.elements-1,
466 MARIA_PINNED_PAGE *);
467 }
468 share->state.changed|= STATE_NOT_SORTED_PAGES;
469 DBUG_PRINT("exit",("Pos: %ld",(long) pos));
470 DBUG_RETURN(pos);
471 } /* _ma_new */
472
473
474 /**
475 Log compactation of a index page
476 */
477
_ma_log_compact_keypage(MARIA_PAGE * ma_page,TrID min_read_from)478 static my_bool _ma_log_compact_keypage(MARIA_PAGE *ma_page,
479 TrID min_read_from)
480 {
481 LSN lsn;
482 uchar log_data[FILEID_STORE_SIZE + PAGE_STORE_SIZE + 1 + 7 + TRANSID_SIZE];
483 uchar *log_pos;
484 LEX_CUSTRING log_array[TRANSLOG_INTERNAL_PARTS + 1];
485 MARIA_HA *info= ma_page->info;
486 MARIA_SHARE *share= info->s;
487 uint translog_parts, extra_length;
488 my_off_t page= ma_page->pos;
489 DBUG_ENTER("_ma_log_compact_keypage");
490 DBUG_PRINT("enter", ("page: %lu", (ulong) (page / share->block_size)));
491
492 /* Store address of new root page */
493 page/= share->block_size;
494 page_store(log_data + FILEID_STORE_SIZE, page);
495
496 log_pos= log_data + FILEID_STORE_SIZE + PAGE_STORE_SIZE;
497
498 log_pos[0]= KEY_OP_COMPACT_PAGE;
499 transid_store(log_pos + 1, min_read_from);
500 log_pos+= 1 + TRANSID_SIZE;
501
502 log_array[TRANSLOG_INTERNAL_PARTS + 0].str= log_data;
503 log_array[TRANSLOG_INTERNAL_PARTS + 0].length= (uint) (log_pos -
504 log_data);
505 translog_parts= 1;
506 extra_length= 0;
507
508 _ma_log_key_changes(ma_page,
509 log_array + TRANSLOG_INTERNAL_PARTS + translog_parts,
510 log_pos, &extra_length, &translog_parts);
511 /* Remember new page length for future log entires for same page */
512 ma_page->org_size= ma_page->size;
513
514 if (translog_write_record(&lsn, LOGREC_REDO_INDEX,
515 info->trn, info,
516 (translog_size_t)(log_array[TRANSLOG_INTERNAL_PARTS +
517 0].length + extra_length),
518 TRANSLOG_INTERNAL_PARTS + translog_parts,
519 log_array, log_data, NULL))
520 DBUG_RETURN(1);
521 DBUG_RETURN(0);
522 }
523
524
525 /**
526 Remove all transaction id's less than given one from a key page
527
528 @fn _ma_compact_keypage()
529 @param keyinfo Key handler
530 @param page_pos Page position on disk
531 @param page Buffer for page
532 @param min_read_from Remove all trids from page less than this
533
534 @retval 0 Ok
535 ®retval 1 Error; my_errno contains the error
536 */
537
_ma_compact_keypage(MARIA_PAGE * ma_page,TrID min_read_from)538 my_bool _ma_compact_keypage(MARIA_PAGE *ma_page, TrID min_read_from)
539 {
540 MARIA_HA *info= ma_page->info;
541 MARIA_SHARE *share= info->s;
542 MARIA_KEY key;
543 uchar *page, *endpos, *start_of_empty_space;
544 uint page_flag, nod_flag, saved_space;
545 my_bool page_has_transid;
546 DBUG_ENTER("_ma_compact_keypage");
547
548 page_flag= ma_page->flag;
549 if (!(page_flag & KEYPAGE_FLAG_HAS_TRANSID))
550 DBUG_RETURN(0); /* No transaction id on page */
551
552 nod_flag= ma_page->node;
553 page= ma_page->buff;
554 endpos= page + ma_page->size;
555 key.data= info->lastkey_buff;
556 key.keyinfo= (MARIA_KEYDEF*) ma_page->keyinfo;
557
558 page_has_transid= 0;
559 page+= share->keypage_header + nod_flag;
560 key.data[0]= 0; /* safety */
561 start_of_empty_space= 0;
562 saved_space= 0;
563 do
564 {
565 if (!(page= (*ma_page->keyinfo->skip_key)(&key, 0, 0, page)))
566 {
567 DBUG_PRINT("error",("Couldn't find last key: page_pos: %p",
568 page));
569 _ma_set_fatal_error(share, HA_ERR_CRASHED);
570 DBUG_RETURN(1);
571 }
572 if (key_has_transid(page-1))
573 {
574 uint transid_length;
575 transid_length= transid_packed_length(page);
576
577 if (min_read_from == ~(TrID) 0 ||
578 min_read_from < transid_get_packed(share, page))
579 {
580 page[-1]&= 254; /* Remove transid marker */
581 transid_length= transid_packed_length(page);
582 if (start_of_empty_space)
583 {
584 /* Move block before the transid up in page */
585 uint copy_length= (uint) (page - start_of_empty_space) - saved_space;
586 memmove(start_of_empty_space, start_of_empty_space + saved_space,
587 copy_length);
588 start_of_empty_space+= copy_length;
589 }
590 else
591 start_of_empty_space= page;
592 saved_space+= transid_length;
593 }
594 else
595 page_has_transid= 1; /* At least one id left */
596 page+= transid_length;
597 }
598 page+= nod_flag;
599 } while (page < endpos);
600
601 DBUG_ASSERT(page == endpos);
602
603 if (start_of_empty_space)
604 {
605 /*
606 Move last block down
607 This is always true if any transid was removed
608 */
609 uint copy_length= (uint) (endpos - start_of_empty_space) - saved_space;
610
611 if (copy_length)
612 memmove(start_of_empty_space, start_of_empty_space + saved_space,
613 copy_length);
614 ma_page->size= (uint) (start_of_empty_space + copy_length - ma_page->buff);
615 page_store_size(share, ma_page);
616 }
617
618 if (!page_has_transid)
619 {
620 ma_page->flag&= ~KEYPAGE_FLAG_HAS_TRANSID;
621 _ma_store_keypage_flag(share, ma_page->buff, ma_page->flag);
622 /* Clear packed transid (in case of zerofill) */
623 bzero(ma_page->buff + LSN_STORE_SIZE, TRANSID_SIZE);
624 }
625
626 if (share->now_transactional)
627 {
628 if (_ma_log_compact_keypage(ma_page, min_read_from))
629 DBUG_RETURN(1);
630 }
631 DBUG_RETURN(0);
632 }
633