1 /*****************************************************************************
2 
3 Copyright (c) 2016, 2019, Oracle and/or its affiliates. All Rights Reserved.
4 
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License, version 2.0, as published by the
7 Free Software Foundation.
8 
9 This program is also distributed with certain software (including but not
10 limited to OpenSSL) that is licensed under separate terms, as designated in a
11 particular file or component or in included license documentation. The authors
12 of MySQL hereby grant you an additional permission to link the program and
13 your derivative works with the separately licensed software that they have
14 included with MySQL.
15 
16 This program is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
19 for more details.
20 
21 You should have received a copy of the GNU General Public License along with
22 this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
24 
25 *****************************************************************************/
26 
27 #include "zlob0first.h"
28 #include "trx0trx.h"
29 #include "zlob0index.h"
30 #include "zlob0read.h"
31 
32 namespace lob {
33 
34 /** Given the page size, what is the number of index entries the first page
35 can contain. */
get_n_index_entries() const36 ulint z_first_page_t::get_n_index_entries() const {
37   ut_ad(m_index != nullptr);
38 
39   const page_size_t page_size(dict_table_page_size(m_index->table));
40 
41   ulint len = page_size.physical();
42   switch (len) {
43     case KB16:
44       /* For a page size of 16KB, there are 100 index entries in
45       the first page of the zlob. */
46       return (100);
47     case 8192:
48       /* 8KB. */
49       return (80);
50     case 4096:
51       /* 4KB. */
52       return (40);
53     case 2048:
54       return (20);
55     case 1024:
56       return (5);
57     default:
58       ut_error;
59   }
60 }
61 
62 /** Given the page size, what is the number of frag entries the first page
63 can contain. */
get_n_frag_entries() const64 ulint z_first_page_t::get_n_frag_entries() const {
65   ut_ad(m_index != nullptr);
66 
67   DBUG_EXECUTE_IF("innodb_zlob_first_use_only_1_frag_entries", return (1););
68 
69   const page_size_t page_size(dict_table_page_size(m_index->table));
70   ulint len = page_size.physical();
71   switch (len) {
72     case KB16:
73       /* For a page size of 16KB, there are 200 frag entries in
74       the first page of the zlob. */
75       return (200);
76     case 8192:
77       return (100);
78     case 4096:
79       return (40);
80     case 2048:
81       return (20);
82     case 1024:
83       return (5);
84     default:
85       ut_error;
86   }
87 }
88 
alloc(bool bulk)89 buf_block_t *z_first_page_t::alloc(bool bulk) {
90   ut_ad(m_block == nullptr);
91 
92   page_no_t hint = FIL_NULL;
93   m_block = alloc_lob_page(m_index, m_mtr, hint, bulk);
94 
95   if (m_block == nullptr) {
96     return (nullptr);
97   }
98 
99   init();
100 
101   ut_ad(m_block->get_page_type() == FIL_PAGE_TYPE_ZLOB_FIRST);
102   return (m_block);
103 }
104 
105 /** Print the index entries. */
print_index_entries(std::ostream & out) const106 std::ostream &z_first_page_t::print_index_entries(std::ostream &out) const {
107   flst_base_node_t *flst = index_list();
108   fil_addr_t node_loc = flst_get_first(flst, m_mtr);
109 
110   space_id_t space = dict_index_get_space(m_index);
111   const page_size_t page_size = dict_table_page_size(m_index->table);
112 
113   out << "Index Entries: " << flst_bnode_t(flst, m_mtr) << std::endl;
114 
115   while (!fil_addr_is_null(node_loc)) {
116     flst_node_t *node =
117         fut_get_ptr(space, page_size, node_loc, RW_X_LATCH, m_mtr);
118     z_index_entry_t entry(node, m_mtr, m_index);
119     out << entry << std::endl;
120 
121     flst_base_node_t *vers = entry.get_versions_list();
122     fil_addr_t ver_loc = flst_get_first(vers, m_mtr);
123 
124     uint32_t depth = 0;
125     while (!fil_addr_is_null(ver_loc)) {
126       depth++;
127 
128       for (uint32_t i = 0; i < depth; ++i) {
129         out << "+";
130       }
131       flst_node_t *ver_node = addr2ptr_x(ver_loc);
132       z_index_entry_t vers_entry(ver_node, m_mtr, m_index);
133       out << vers_entry << std::endl;
134       ver_loc = vers_entry.get_next();
135     }
136 
137     node_loc = entry.get_next();
138   }
139 
140   return (out);
141 }
142 
143 /** Print the frag entries. */
print_frag_entries(std::ostream & out) const144 std::ostream &z_first_page_t::print_frag_entries(std::ostream &out) const {
145   flst_base_node_t *flst = frag_list();
146   fil_addr_t node_loc = flst_get_first(flst, m_mtr);
147   space_id_t space = dict_index_get_space(m_index);
148   const page_size_t page_size = dict_table_page_size(m_index->table);
149 
150   out << "Frag Entries: " << flst_bnode_t(flst, m_mtr) << std::endl;
151 
152   while (!fil_addr_is_null(node_loc)) {
153     flst_node_t *node =
154         fut_get_ptr(space, page_size, node_loc, RW_X_LATCH, m_mtr);
155     z_frag_entry_t entry(node, m_mtr);
156     out << entry << std::endl;
157     node_loc = entry.get_next();
158   }
159 
160   return (out);
161 }
162 
163 /** Allocate one index entry.  If there is no free index entry, allocate
164 an index page (a page full of z_index_entry_t objects) and service the
165 request.
166 @return the allocated index entry. */
alloc_index_entry(bool bulk)167 z_index_entry_t z_first_page_t::alloc_index_entry(bool bulk) {
168   flst_base_node_t *free_lst = free_list();
169   fil_addr_t first_loc = flst_get_first(free_lst, m_mtr);
170 
171   if (fil_addr_is_null(first_loc)) {
172     z_index_page_t page(m_mtr, m_index);
173     page.alloc(*this, bulk);
174     first_loc = flst_get_first(free_lst, m_mtr);
175   }
176 
177   if (fil_addr_is_null(first_loc)) {
178     return (z_index_entry_t());
179   }
180 
181   flst_node_t *first_ptr = addr2ptr_x(first_loc);
182   z_index_entry_t entry(first_ptr, m_mtr);
183   entry.remove(free_lst);
184 
185   return (entry);
186 }
187 
188 /** Allocate one frag page entry.  If there is no free frag entry, allocate
189 an frag node page (a page full of z_frag_entry_t objects) and service the
190 request.
191 @return the allocated frag entry. */
alloc_frag_entry(bool bulk)192 z_frag_entry_t z_first_page_t::alloc_frag_entry(bool bulk) {
193   flst_base_node_t *free_lst = free_frag_list();
194   flst_base_node_t *used_lst = frag_list();
195 
196   fil_addr_t first_loc = flst_get_first(free_lst, m_mtr);
197 
198   if (fil_addr_is_null(first_loc)) {
199     z_frag_node_page_t page(m_mtr, m_index);
200     page.alloc(*this, bulk);
201     first_loc = flst_get_first(free_lst, m_mtr);
202   }
203 
204   if (fil_addr_is_null(first_loc)) {
205     return (z_frag_entry_t());
206   }
207 
208   flst_node_t *first_ptr = addr2ptr_x(first_loc);
209   z_frag_entry_t entry(first_ptr, m_mtr);
210   entry.remove(free_lst);
211   entry.push_front(used_lst);
212   return (entry);
213 }
214 
alloc_fragment(bool bulk,ulint len,z_frag_page_t & frag_page,z_frag_entry_t & entry)215 frag_id_t z_first_page_t::alloc_fragment(bool bulk, ulint len,
216                                          z_frag_page_t &frag_page,
217                                          z_frag_entry_t &entry) {
218   ut_ad(m_mtr != nullptr);
219 
220   frag_id_t frag_id = FRAG_ID_NULL;
221 
222   frag_page.set_mtr(m_mtr);
223   frag_page.set_index(m_index);
224   frag_page.set_block_null();
225 
226   const page_no_t first_page_no = get_page_no();
227 
228   /* Make sure that there will be some extra space for page directory
229   entry and meta data.  Adding a margin to provide for this.  This is
230   for exact fit. */
231   const ulint look_size = len + frag_node_t::header_size();
232 
233   ut_ad(look_size <= z_frag_page_t::max_payload(m_index));
234 
235   flst_base_node_t *frag_lst = frag_list();
236 
237   /* Iterate through the list of frag entries in the page. */
238   fil_addr_t loc = flst_get_first(frag_lst, m_mtr);
239 
240   while (!fil_addr_is_null(loc)) {
241     flst_node_t *node = addr2ptr_x(loc);
242     entry.reset(node);
243 
244     ulint big_free = entry.get_big_free_len();
245 
246     if (big_free >= look_size) {
247       /* Double check if the information in the index
248       entry matches with the fragment page. If not, update
249       the index entry. */
250       frag_page.load_x(entry.get_page_no());
251 
252       const ulint big_free_len_1 = frag_page.get_big_free_len();
253       const ulint big_free_len_2 = entry.get_big_free_len();
254 
255       if (big_free_len_1 == big_free_len_2) {
256         frag_id = frag_page.alloc_fragment(len, entry);
257         if (frag_id != FRAG_ID_NULL) {
258           break;
259         }
260       } else {
261         entry.update(frag_page);
262 
263         /* Check again */
264         big_free = entry.get_big_free_len();
265 
266         if (big_free >= look_size) {
267           frag_id = frag_page.alloc_fragment(len, entry);
268           if (frag_id != FRAG_ID_NULL) {
269             break;
270           }
271         }
272       }
273     }
274 
275     loc = flst_get_next_addr(node, m_mtr);
276     entry.reset(nullptr);
277   }
278 
279   if (frag_id != FRAG_ID_NULL) {
280     return (frag_id);
281   }
282 
283   if (fil_addr_is_null(loc)) {
284     /* Need to allocate a new fragment page. */
285     buf_block_t *tmp_block = frag_page.alloc(*this, first_page_no + 1, bulk);
286 
287     if (tmp_block == nullptr) {
288       return (FRAG_ID_NULL);
289     }
290 
291     entry = alloc_frag_entry(bulk);
292 
293     if (entry.is_null()) {
294       return (FRAG_ID_NULL);
295     }
296 
297     entry.set_page_no(frag_page.get_page_no());
298     frag_page.set_frag_entry(entry.get_self_addr());
299 
300     /* Update the index entry with new space information. */
301     entry.update(frag_page);
302   }
303 
304 #ifdef UNIV_DEBUG
305   /* Adding more checks to ensure that an alloc fragment doesn't fail
306   for the selected fragment page. */
307   fil_addr_t addr1 = frag_page.get_frag_entry();
308   fil_addr_t addr2 = entry.get_self_addr();
309   ut_ad(addr1.is_equal(addr2));
310 
311   const ulint big_free_len_1 = frag_page.get_big_free_len();
312   const ulint big_free_len_2 = entry.get_big_free_len();
313   ut_ad(big_free_len_1 == big_free_len_2);
314 
315   ut_ad(big_free_len_1 >= look_size);
316   ut_ad(big_free_len_1 > len);
317 #endif /* UNIV_DEBUG */
318 
319   frag_id = frag_page.alloc_fragment(len, entry);
320 
321   ut_ad(frag_id != FRAG_ID_NULL);
322 
323   return (frag_id);
324 }
325 
326 /** Print the page. */
print(std::ostream & out) const327 std::ostream &z_first_page_t::print(std::ostream &out) const {
328   print_index_entries(out);
329   print_frag_entries(out);
330   return (out);
331 }
332 
333 /** Free all the z_frag_page_t pages. All the z_frag_page_t pages are
334 singly linked to each other.  The head of the list is maintained in the
335 first page. */
free_all_frag_node_pages()336 size_t z_first_page_t::free_all_frag_node_pages() {
337   size_t n_pages_freed = 0;
338   mtr_t local_mtr;
339   mtr_start(&local_mtr);
340   local_mtr.set_log_mode(m_mtr->get_log_mode());
341 
342   load_x(&local_mtr);
343 
344   while (true) {
345     page_no_t page_no = get_frag_node_page_no();
346     if (page_no == FIL_NULL) {
347       break;
348     }
349 
350     z_frag_node_page_t frag_node_page(&local_mtr, m_index);
351     frag_node_page.load_x(page_no);
352     page_no_t next_page = frag_node_page.get_next_page_no();
353     set_frag_node_page_no(next_page);
354     frag_node_page.dealloc();
355     n_pages_freed++;
356 
357     restart_mtr(&local_mtr);
358   }
359   mtr_commit(&local_mtr);
360 
361   return (n_pages_freed);
362 }
363 
364 /** Free all the index pages. */
free_all_index_pages()365 size_t z_first_page_t::free_all_index_pages() {
366   size_t n_pages_freed = 0;
367   mtr_t local_mtr;
368   mtr_start(&local_mtr);
369   local_mtr.set_log_mode(m_mtr->get_log_mode());
370 
371   load_x(&local_mtr);
372   while (true) {
373     page_no_t page_no = get_index_page_no();
374     if (page_no == FIL_NULL) {
375       break;
376     }
377     z_index_page_t index_page(m_mtr, m_index);
378     index_page.load_x(page_no);
379     page_no_t next_page = index_page.get_next_page_no();
380     set_index_page_no(next_page);
381     index_page.dealloc();
382     n_pages_freed++;
383     restart_mtr(&local_mtr);
384   }
385   mtr_commit(&local_mtr);
386   return (n_pages_freed);
387 }
388 
size_of_index_entries() const389 ulint z_first_page_t::size_of_index_entries() const {
390   return (z_index_entry_t::SIZE * get_n_index_entries());
391 }
392 
init_index_entries()393 void z_first_page_t::init_index_entries() {
394   flst_base_node_t *flst = free_list();
395   ulint n = get_n_index_entries();
396   for (ulint i = 0; i < n; ++i) {
397     flst_node_t *ptr = frame() + OFFSET_INDEX_BEGIN;
398     ptr += (i * z_index_entry_t::SIZE);
399     z_index_entry_t entry(ptr, m_mtr);
400     entry.init();
401     entry.push_back(flst);
402   }
403 }
404 
load_entry_s(fil_addr_t & addr,z_index_entry_t & entry)405 void z_first_page_t::load_entry_s(fil_addr_t &addr, z_index_entry_t &entry) {
406   entry.load_s(addr);
407 }
408 
load_entry_x(fil_addr_t & addr,z_index_entry_t & entry)409 void z_first_page_t::load_entry_x(fil_addr_t &addr, z_index_entry_t &entry) {
410   entry.load_x(addr);
411 }
412 
413 /** Deallocate the first page of a compressed LOB. */
dealloc()414 void z_first_page_t::dealloc() {
415   ut_ad(m_mtr != nullptr);
416   btr_page_free_low(m_index, m_block, ULINT_UNDEFINED, m_mtr);
417   m_block = nullptr;
418 }
419 
load_x(const page_id_t & page_id,const page_size_t & page_size)420 buf_block_t *z_first_page_t::load_x(const page_id_t &page_id,
421                                     const page_size_t &page_size) {
422   m_block = buf_page_get(page_id, page_size, RW_X_LATCH, m_mtr);
423 
424 #ifdef UNIV_DEBUG
425   /* Dump the page into the log file, if the page type is not matching
426   one of the first page types. */
427   page_type_t page_type = get_page_type();
428 
429   switch (page_type) {
430     case FIL_PAGE_TYPE_ZBLOB:
431     case FIL_PAGE_TYPE_ZLOB_FIRST:
432       /* Valid first page type for compressed LOB.*/
433       break;
434     default:
435       ut_print_buf(std::cout, m_block->frame, page_size.physical());
436       ut_error;
437   }
438 #endif /* UNIV_DEBUG */
439 
440   return (m_block);
441 }
442 
443 /** Increment the LOB version by 1. */
incr_lob_version()444 uint32_t z_first_page_t::incr_lob_version() {
445   ut_ad(m_mtr != nullptr);
446 
447   const uint32_t cur = get_lob_version();
448   const uint32_t val = cur + 1;
449   mlog_write_ulint(frame() + OFFSET_LOB_VERSION, val, MLOG_4BYTES, m_mtr);
450 
451   return (val);
452 }
453 
454 /** When the bit is set, the LOB is not partially updatable anymore.
455 Enable the bit.
456 @param[in]	trx	the current transaction.*/
mark_cannot_be_partially_updated(trx_t * trx)457 void z_first_page_t::mark_cannot_be_partially_updated(trx_t *trx) {
458   const trx_id_t trxid = (trx == nullptr) ? 0 : trx->id;
459   const undo_no_t undo_no = (trx == nullptr) ? 0 : (trx->undo_no - 1);
460 
461 #ifdef LOB_DEBUG
462   std::cout << "thread=" << std::this_thread::get_id()
463             << ", ZLOB first page=" << get_page_id()
464             << ", mark_cannot_be_partially_updated()" << std::endl;
465 #endif /* LOB_DEBUG */
466 
467   uint8_t flags = get_flags();
468   flags |= 0x01;
469   mlog_write_ulint(frame() + OFFSET_FLAGS, flags, MLOG_1BYTE, m_mtr);
470 
471   set_last_trx_id(trxid);
472   set_last_trx_undo_no(undo_no);
473 }
474 
free_all_data_pages()475 size_t z_first_page_t::free_all_data_pages() {
476   size_t n_pages_freed = 0;
477   mtr_t local_mtr;
478   mtr_start(&local_mtr);
479   local_mtr.set_log_mode(m_mtr->get_log_mode());
480   load_x(&local_mtr);
481 
482   flst_base_node_t *flst = index_list();
483   fil_addr_t node_loc = flst_get_first(flst, &local_mtr);
484 
485   z_index_entry_t cur_entry(&local_mtr, m_index);
486 
487   while (!fil_addr_is_null(node_loc)) {
488     flst_node_t *node = addr2ptr_x(node_loc, &local_mtr);
489     cur_entry.reset(node);
490     n_pages_freed += cur_entry.free_data_pages(&local_mtr);
491 
492     flst_base_node_t *vers = cur_entry.get_versions_list();
493     fil_addr_t ver_loc = flst_get_first(vers, &local_mtr);
494 
495     while (!fil_addr_is_null(ver_loc)) {
496       flst_node_t *ver_node = addr2ptr_x(ver_loc, &local_mtr);
497       z_index_entry_t vers_entry(ver_node, &local_mtr, m_index);
498       n_pages_freed += vers_entry.free_data_pages(&local_mtr);
499       ver_loc = vers_entry.get_next();
500 
501       restart_mtr(&local_mtr);
502       node = addr2ptr_x(node_loc, &local_mtr);
503       cur_entry.reset(node);
504     }
505 
506     node_loc = cur_entry.get_next();
507     cur_entry.reset(nullptr);
508     restart_mtr(&local_mtr);
509   }
510   mtr_commit(&local_mtr);
511 
512   return (n_pages_freed);
513 }
514 
515 #ifdef UNIV_DEBUG
validate_low()516 bool z_first_page_t::validate_low() {
517   mtr_t local_mtr;
518   mtr_start(&local_mtr);
519   local_mtr.set_log_mode(m_mtr->get_log_mode());
520   load_x(&local_mtr);
521 
522   ut_ad(get_page_type() == FIL_PAGE_TYPE_ZLOB_FIRST);
523 
524   flst_base_node_t *flst = index_list();
525   fil_addr_t node_loc = flst_get_first(flst, &local_mtr);
526 
527   z_index_entry_t cur_entry(&local_mtr, m_index);
528 
529   while (!fil_addr_is_null(node_loc)) {
530     flst_node_t *node = addr2ptr_x(node_loc, &local_mtr);
531     cur_entry.reset(node);
532 
533     ut_ad(z_validate_strm(m_index, cur_entry, &local_mtr));
534 
535     flst_base_node_t *vers = cur_entry.get_versions_list();
536     fil_addr_t ver_loc = flst_get_first(vers, &local_mtr);
537 
538     while (!fil_addr_is_null(ver_loc)) {
539       flst_node_t *ver_node = addr2ptr_x(ver_loc, &local_mtr);
540       z_index_entry_t vers_entry(ver_node, &local_mtr, m_index);
541       ut_ad(z_validate_strm(m_index, vers_entry, &local_mtr));
542       ver_loc = vers_entry.get_next();
543       restart_mtr(&local_mtr);
544       node = addr2ptr_x(node_loc, &local_mtr);
545       cur_entry.reset(node);
546     }
547 
548     node_loc = cur_entry.get_next();
549     cur_entry.reset(nullptr);
550 
551     restart_mtr(&local_mtr);
552   }
553 
554   mtr_commit(&local_mtr);
555   return (true);
556 }
557 #endif /* UNIV_DEBUG */
558 
import(trx_id_t trx_id)559 void z_first_page_t::import(trx_id_t trx_id) {
560   set_trx_id_no_redo(trx_id);
561   set_last_trx_id_no_redo(trx_id);
562 
563   ulint n = get_n_index_entries();
564   for (ulint i = 0; i < n; ++i) {
565     flst_node_t *ptr = frame() + OFFSET_INDEX_BEGIN;
566     ptr += (i * z_index_entry_t::SIZE);
567     z_index_entry_t entry(ptr);
568     entry.set_trx_id_no_redo(trx_id);
569     entry.set_trx_id_modifier_no_redo(trx_id);
570   }
571 }
572 
free_all_frag_pages_old()573 size_t z_first_page_t::free_all_frag_pages_old() {
574   size_t n_pages_freed = 0;
575   mtr_t local_mtr;
576   mtr_start(&local_mtr);
577   local_mtr.set_log_mode(m_mtr->get_log_mode());
578   load_x(&local_mtr);
579 
580   /* There is no list of fragment pages maintained.  We have to identify the
581    * list of fragment pages from the following two lists. */
582   flst_base_node_t *frag_lst = frag_list();
583   flst_base_node_t *free_frag_lst = free_frag_list();
584 
585   std::vector<flst_base_node_t *> two_list = {frag_lst, free_frag_lst};
586 
587   for (auto cur_lst : two_list) {
588     while (flst_get_len(cur_lst) > 0) {
589       fil_addr_t loc = flst_get_first(cur_lst, &local_mtr);
590       flst_node_t *node = addr2ptr_x(loc, &local_mtr);
591       z_frag_entry_t entry(node, &local_mtr);
592       page_no_t frag_page_no = entry.get_page_no();
593       loc = entry.get_next();
594       entry.remove(cur_lst);
595 
596       if (frag_page_no == FIL_NULL) {
597         continue;
598       }
599 
600       /* Multiple entries can point to the same fragment page.  So scan through
601        * the list and remove all entries pointing to the same fragment page. */
602       while (!fil_addr_is_null(loc)) {
603         node = addr2ptr_x(loc, &local_mtr);
604         z_frag_entry_t entry2(node, &local_mtr);
605 
606         loc = entry2.get_next();
607         if (frag_page_no == entry2.get_page_no()) {
608           entry2.set_page_no(FIL_NULL);
609           entry2.remove(cur_lst);
610         }
611       }
612 
613       /* Free the fragment page. */
614       entry.free_frag_page(&local_mtr, m_index);
615       n_pages_freed++;
616       restart_mtr(&local_mtr);
617     }
618   }
619 
620   mtr_commit(&local_mtr);
621   return (n_pages_freed);
622 }
623 
free_all_frag_pages()624 size_t z_first_page_t::free_all_frag_pages() {
625   size_t n_pages_freed = 0;
626   if (get_frag_page_no() == 0) {
627     n_pages_freed = free_all_frag_pages_old();
628   } else {
629     n_pages_freed = free_all_frag_pages_new();
630   }
631   return (n_pages_freed);
632 }
633 
free_all_frag_pages_new()634 size_t z_first_page_t::free_all_frag_pages_new() {
635   size_t n_pages_freed = 0;
636   mtr_t local_mtr;
637   mtr_start(&local_mtr);
638   local_mtr.set_log_mode(m_mtr->get_log_mode());
639   load_x(&local_mtr);
640 
641   while (true) {
642     page_no_t page_no = get_frag_page_no(&local_mtr);
643     if (page_no == FIL_NULL) {
644       break;
645     }
646     z_frag_page_t frag_page(&local_mtr, m_index);
647     frag_page.load_x(page_no);
648     page_no_t next_page = frag_page.get_next_page_no();
649     set_frag_page_no(&local_mtr, next_page);
650     frag_page.dealloc();
651     n_pages_freed++;
652     restart_mtr(&local_mtr);
653   }
654   mtr_commit(&local_mtr);
655   return (n_pages_freed);
656 }
657 
destroy()658 size_t z_first_page_t::destroy() {
659   size_t n_pages_freed = 0;
660   n_pages_freed += free_all_data_pages();
661   n_pages_freed += free_all_frag_pages();
662   n_pages_freed += free_all_frag_node_pages();
663   n_pages_freed += free_all_index_pages();
664   dealloc();
665   n_pages_freed++;
666   return (n_pages_freed);
667 }
668 
669 #ifdef UNIV_DEBUG
verify_frag_page_no()670 bool z_first_page_t::verify_frag_page_no() {
671   mtr_t local_mtr;
672   mtr_start(&local_mtr);
673   page_no_t page_no = get_frag_page_no();
674 
675   /* If the page_no is 0, then FIL_PAGE_PREV is not used to store the list of
676    * fragment pages.  So modifying it is not allowed and hence verification is
677    * not needed. */
678   ut_ad(page_no != 0);
679 
680   if (page_no == FIL_NULL) {
681     return (true);
682   }
683 
684   z_frag_page_t frag_page(&local_mtr, m_index);
685   frag_page.load_x(page_no);
686   page_type_t ptype = frag_page.get_page_type();
687   mtr_commit(&local_mtr);
688 
689   ut_ad(ptype == FIL_PAGE_TYPE_ZLOB_FRAG);
690   return (ptype == FIL_PAGE_TYPE_ZLOB_FRAG);
691 }
692 #endif /* UNIV_DEBUG */
693 
694 } /* namespace lob */
695