Drizzled Public API Documentation

buf0lru.cc
1 /*****************************************************************************
2 
3 Copyright (C) 1995, 2010, Innobase Oy. All Rights Reserved.
4 
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License as published by the Free Software
7 Foundation; version 2 of the License.
8 
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
15 St, Fifth Floor, Boston, MA 02110-1301 USA
16 
17 *****************************************************************************/
18 
19 /**************************************************/
26 #include <config.h>
27 #include "buf0lru.h"
28 
29 #ifdef UNIV_NONINL
30 #include "buf0lru.ic"
31 #endif
32 
33 #include "ut0byte.h"
34 #include "ut0lst.h"
35 #include "ut0rnd.h"
36 #include "sync0sync.h"
37 #include "sync0rw.h"
38 #include "hash0hash.h"
39 #include "os0sync.h"
40 #include "fil0fil.h"
41 #include "btr0btr.h"
42 #include "buf0buddy.h"
43 #include "buf0buf.h"
44 #include "buf0flu.h"
45 #include "buf0rea.h"
46 #include "btr0sea.h"
47 #include "ibuf0ibuf.h"
48 #include "os0file.h"
49 #include "page0zip.h"
50 #include "log0recv.h"
51 #include "srv0srv.h"
52 
60 #define BUF_LRU_OLD_TOLERANCE 20
61 
65 #define BUF_LRU_NON_OLD_MIN_LEN 5
66 #if BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN
67 # error "BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN"
68 #endif
69 
73 #define BUF_LRU_DROP_SEARCH_HASH_SIZE 1024
74 
77 static ibool buf_lru_switched_on_innodb_mon = FALSE;
78 
79 /******************************************************************/
88 /* @{ */
89 
93 #define BUF_LRU_STAT_N_INTERVAL 50
94 
97 #define BUF_LRU_IO_TO_UNZIP_FACTOR 50
98 
101 static buf_LRU_stat_t buf_LRU_stat_arr[BUF_LRU_STAT_N_INTERVAL];
102 
104 static ulint buf_LRU_stat_arr_ind;
105 
109 
113 
114 /* @} */
115 
119 UNIV_INTERN uint buf_LRU_old_threshold_ms;
120 /* @} */
121 
122 /******************************************************************/
132 static
133 enum buf_page_state
134 buf_LRU_block_remove_hashed_page(
135 /*=============================*/
136  buf_page_t* bpage,
139  ibool zip);
141 /******************************************************************/
143 static
144 void
145 buf_LRU_block_free_hashed_page(
146 /*===========================*/
147  buf_block_t* block);
150 /******************************************************************/
154 UNIV_INLINE
155 ibool
156 buf_LRU_evict_from_unzip_LRU(
157 /*=========================*/
158  buf_pool_t* buf_pool)
159 {
160  ulint io_avg;
161  ulint unzip_avg;
162 
163  ut_ad(buf_pool_mutex_own(buf_pool));
164 
165  /* If the unzip_LRU list is empty, we can only use the LRU. */
166  if (UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0) {
167  return(FALSE);
168  }
169 
170  /* If unzip_LRU is at most 10% of the size of the LRU list,
171  then use the LRU. This slack allows us to keep hot
172  decompressed pages in the buffer pool. */
173  if (UT_LIST_GET_LEN(buf_pool->unzip_LRU)
174  <= UT_LIST_GET_LEN(buf_pool->LRU) / 10) {
175  return(FALSE);
176  }
177 
178  /* If eviction hasn't started yet, we assume by default
179  that a workload is disk bound. */
180  if (buf_pool->freed_page_clock == 0) {
181  return(TRUE);
182  }
183 
184  /* Calculate the average over past intervals, and add the values
185  of the current interval. */
186  io_avg = buf_LRU_stat_sum.io / BUF_LRU_STAT_N_INTERVAL
188  unzip_avg = buf_LRU_stat_sum.unzip / BUF_LRU_STAT_N_INTERVAL
190 
191  /* Decide based on our formula. If the load is I/O bound
192  (unzip_avg is smaller than the weighted io_avg), evict an
193  uncompressed frame from unzip_LRU. Otherwise we assume that
194  the load is CPU bound and evict from the regular LRU. */
195  return(unzip_avg <= io_avg * BUF_LRU_IO_TO_UNZIP_FACTOR);
196 }
197 
198 /******************************************************************/
201 static
202 void
203 buf_LRU_drop_page_hash_batch(
204 /*=========================*/
205  ulint space_id,
206  ulint zip_size,
208  const ulint* arr,
209  ulint count)
210 {
211  ulint i;
212 
213  ut_ad(arr != NULL);
214  ut_ad(count <= BUF_LRU_DROP_SEARCH_HASH_SIZE);
215 
216  for (i = 0; i < count; ++i) {
217  btr_search_drop_page_hash_when_freed(space_id, zip_size,
218  arr[i]);
219  }
220 }
221 
222 /******************************************************************/
227 static
228 void
229 buf_LRU_drop_page_hash_for_tablespace(
230 /*==================================*/
231  buf_pool_t* buf_pool,
232  ulint id)
233 {
234  buf_page_t* bpage;
235  ulint* page_arr;
236  ulint num_entries;
237  ulint zip_size;
238 
239  zip_size = fil_space_get_zip_size(id);
240 
241  if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) {
242  /* Somehow, the tablespace does not exist. Nothing to drop. */
243  ut_ad(0);
244  return;
245  }
246 
247  page_arr = static_cast<unsigned long *>(ut_malloc(
248  sizeof(ulint) * BUF_LRU_DROP_SEARCH_HASH_SIZE));
249 
250  buf_pool_mutex_enter(buf_pool);
251  num_entries = 0;
252 
253 scan_again:
254  bpage = UT_LIST_GET_LAST(buf_pool->LRU);
255 
256  while (bpage != NULL) {
257  buf_page_t* prev_bpage;
258  ibool is_fixed;
259 
260  prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
261 
262  ut_a(buf_page_in_file(bpage));
263 
265  || bpage->space != id
266  || bpage->io_fix != BUF_IO_NONE) {
267  /* Compressed pages are never hashed.
268  Skip blocks of other tablespaces.
269  Skip I/O-fixed blocks (to be dealt with later). */
270 next_page:
271  bpage = prev_bpage;
272  continue;
273  }
274 
275  mutex_enter(&((buf_block_t*) bpage)->mutex);
276  is_fixed = bpage->buf_fix_count > 0
277  || !((buf_block_t*) bpage)->is_hashed;
278  mutex_exit(&((buf_block_t*) bpage)->mutex);
279 
280  if (is_fixed) {
281  goto next_page;
282  }
283 
284  /* Store the page number so that we can drop the hash
285  index in a batch later. */
286  page_arr[num_entries] = bpage->offset;
287  ut_a(num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE);
288  ++num_entries;
289 
290  if (num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE) {
291  goto next_page;
292  }
293 
294  /* Array full. We release the buf_pool->mutex to obey
295  the latching order. */
296  buf_pool_mutex_exit(buf_pool);
297 
298  buf_LRU_drop_page_hash_batch(
299  id, zip_size, page_arr, num_entries);
300 
301  num_entries = 0;
302 
303  buf_pool_mutex_enter(buf_pool);
304 
305  /* Note that we released the buf_pool mutex above
306  after reading the prev_bpage during processing of a
307  page_hash_batch (i.e.: when the array was full).
308  Because prev_bpage could belong to a compressed-only
309  block, it may have been relocated, and thus the
310  pointer cannot be trusted. Because bpage is of type
311  buf_block_t, it is safe to dereference.
312 
313  bpage can change in the LRU list. This is OK because
314  this function is a 'best effort' to drop as many
315  search hash entries as possible and it does not
316  guarantee that ALL such entries will be dropped. */
317 
318  /* If, however, bpage has been removed from LRU list
319  to the free list then we should restart the scan.
320  bpage->state is protected by buf_pool mutex. */
321  if (bpage
323  goto scan_again;
324  }
325  }
326 
327  buf_pool_mutex_exit(buf_pool);
328 
329  /* Drop any remaining batch of search hashed pages. */
330  buf_LRU_drop_page_hash_batch(id, zip_size, page_arr, num_entries);
331  ut_free(page_arr);
332 }
333 
334 /******************************************************************/
338 static
339 void
340 buf_LRU_invalidate_tablespace_buf_pool_instance(
341 /*============================================*/
342  buf_pool_t* buf_pool,
343  ulint id)
344 {
345  buf_page_t* bpage;
346  ibool all_freed;
347 
348 scan_again:
349  buf_pool_mutex_enter(buf_pool);
350 
351  all_freed = TRUE;
352 
353  bpage = UT_LIST_GET_LAST(buf_pool->LRU);
354 
355  while (bpage != NULL) {
356  buf_page_t* prev_bpage;
357  ibool prev_bpage_buf_fix = FALSE;
358 
359  ut_a(buf_page_in_file(bpage));
360 
361  prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
362 
363  /* bpage->space and bpage->io_fix are protected by
364  buf_pool->mutex and block_mutex. It is safe to check
365  them while holding buf_pool->mutex only. */
366 
367  if (buf_page_get_space(bpage) != id) {
368  /* Skip this block, as it does not belong to
369  the space that is being invalidated. */
370  } else if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
371  /* We cannot remove this page during this scan
372  yet; maybe the system is currently reading it
373  in, or flushing the modifications to the file */
374 
375  all_freed = FALSE;
376  } else {
377  mutex_t* block_mutex = buf_page_get_mutex(bpage);
378  mutex_enter(block_mutex);
379 
380  if (bpage->buf_fix_count > 0) {
381 
382  /* We cannot remove this page during
383  this scan yet; maybe the system is
384  currently reading it in, or flushing
385  the modifications to the file */
386 
387  all_freed = FALSE;
388 
389  goto next_page;
390  }
391 
392 #ifdef UNIV_DEBUG
393  if (buf_debug_prints) {
394  fprintf(stderr,
395  "Dropping space %lu page %lu\n",
396  (ulong) buf_page_get_space(bpage),
397  (ulong) buf_page_get_page_no(bpage));
398  }
399 #endif
400  if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
401  /* This is a compressed-only block
402  descriptor. Ensure that prev_bpage
403  cannot be relocated when bpage is freed. */
404  if (UNIV_LIKELY(prev_bpage != NULL)) {
405  switch (buf_page_get_state(
406  prev_bpage)) {
407  case BUF_BLOCK_FILE_PAGE:
408  /* Descriptors of uncompressed
409  blocks will not be relocated,
410  because we are holding the
411  buf_pool->mutex. */
412  break;
413  case BUF_BLOCK_ZIP_PAGE:
414  case BUF_BLOCK_ZIP_DIRTY:
415  /* Descriptors of compressed-
416  only blocks can be relocated,
417  unless they are buffer-fixed.
418  Because both bpage and
419  prev_bpage are protected by
420  buf_pool_zip_mutex, it is
421  not necessary to acquire
422  further mutexes. */
423  ut_ad(&buf_pool->zip_mutex
424  == block_mutex);
425  ut_ad(mutex_own(block_mutex));
426  prev_bpage_buf_fix = TRUE;
427  prev_bpage->buf_fix_count++;
428  break;
429  default:
430  ut_error;
431  }
432  }
433  } else if (((buf_block_t*) bpage)->is_hashed) {
434  ulint page_no;
435  ulint zip_size;
436 
437  buf_pool_mutex_exit(buf_pool);
438 
439  zip_size = buf_page_get_zip_size(bpage);
440  page_no = buf_page_get_page_no(bpage);
441 
442  mutex_exit(block_mutex);
443 
444  /* Note that the following call will acquire
445  an S-latch on the page */
446 
448  id, zip_size, page_no);
449  goto scan_again;
450  }
451 
452  if (bpage->oldest_modification != 0) {
453 
454  buf_flush_remove(bpage);
455  }
456 
457  /* Remove from the LRU list. */
458 
459  if (buf_LRU_block_remove_hashed_page(bpage, TRUE)
460  != BUF_BLOCK_ZIP_FREE) {
461  buf_LRU_block_free_hashed_page((buf_block_t*)
462  bpage);
463  } else {
464  /* The block_mutex should have been
465  released by buf_LRU_block_remove_hashed_page()
466  when it returns BUF_BLOCK_ZIP_FREE. */
467  ut_ad(block_mutex == &buf_pool->zip_mutex);
468  ut_ad(!mutex_own(block_mutex));
469 
470  if (prev_bpage_buf_fix) {
471  /* We temporarily buffer-fixed
472  prev_bpage, so that
473  buf_buddy_free() could not
474  relocate it, in case it was a
475  compressed-only block
476  descriptor. */
477 
478  mutex_enter(block_mutex);
479  ut_ad(prev_bpage->buf_fix_count > 0);
480  prev_bpage->buf_fix_count--;
481  mutex_exit(block_mutex);
482  }
483 
484  goto next_page_no_mutex;
485  }
486 next_page:
487  mutex_exit(block_mutex);
488  }
489 
490 next_page_no_mutex:
491  bpage = prev_bpage;
492  }
493 
494  buf_pool_mutex_exit(buf_pool);
495 
496  if (!all_freed) {
497  os_thread_sleep(20000);
498 
499  goto scan_again;
500  }
501 }
502 
503 /******************************************************************/
506 UNIV_INTERN
507 void
509 /*==========================*/
510  ulint id)
511 {
512  ulint i;
513 
514  /* Before we attempt to drop pages one by one we first
515  attempt to drop page hash index entries in batches to make
516  it more efficient. The batching attempt is a best effort
517  attempt and does not guarantee that all pages hash entries
518  will be dropped. We get rid of remaining page hash entries
519  one by one below. */
520  for (i = 0; i < srv_buf_pool_instances; i++) {
521  buf_pool_t* buf_pool;
522 
523  buf_pool = buf_pool_from_array(i);
524  buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
525  buf_LRU_invalidate_tablespace_buf_pool_instance(buf_pool, id);
526  }
527 }
528 
529 /********************************************************************/
531 UNIV_INTERN
532 void
534 /*=====================*/
535  buf_page_t* bpage)
536 {
537  buf_page_t* b;
538  buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
539 
540  ut_ad(buf_pool_mutex_own(buf_pool));
542 
543  /* Find the first successor of bpage in the LRU list
544  that is in the zip_clean list. */
545  b = bpage;
546  do {
547  b = UT_LIST_GET_NEXT(LRU, b);
548  } while (b && buf_page_get_state(b) != BUF_BLOCK_ZIP_PAGE);
549 
550  /* Insert bpage before b, i.e., after the predecessor of b. */
551  if (b) {
552  b = UT_LIST_GET_PREV(list, b);
553  }
554 
555  if (b) {
556  UT_LIST_INSERT_AFTER(list, buf_pool->zip_clean, b, bpage);
557  } else {
558  UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, bpage);
559  }
560 }
561 
562 /******************************************************************/
566 UNIV_INLINE
567 ibool
568 buf_LRU_free_from_unzip_LRU_list(
569 /*=============================*/
570  buf_pool_t* buf_pool,
571  ulint n_iterations)
578 {
579  buf_block_t* block;
580  ulint distance;
581 
582  ut_ad(buf_pool_mutex_own(buf_pool));
583 
584  /* Theoratically it should be much easier to find a victim
585  from unzip_LRU as we can choose even a dirty block (as we'll
586  be evicting only the uncompressed frame). In a very unlikely
587  eventuality that we are unable to find a victim from
588  unzip_LRU, we fall back to the regular LRU list. We do this
589  if we have done five iterations so far. */
590 
591  if (UNIV_UNLIKELY(n_iterations >= 5)
592  || !buf_LRU_evict_from_unzip_LRU(buf_pool)) {
593 
594  return(FALSE);
595  }
596 
597  distance = 100 + (n_iterations
598  * UT_LIST_GET_LEN(buf_pool->unzip_LRU)) / 5;
599 
600  for (block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
601  UNIV_LIKELY(block != NULL) && UNIV_LIKELY(distance > 0);
602  block = UT_LIST_GET_PREV(unzip_LRU, block), distance--) {
603 
604  enum buf_lru_free_block_status freed;
605 
607  ut_ad(block->in_unzip_LRU_list);
608  ut_ad(block->page.in_LRU_list);
609 
610  mutex_enter(&block->mutex);
611  freed = buf_LRU_free_block(&block->page, FALSE);
612  mutex_exit(&block->mutex);
613 
614  switch (freed) {
615  case BUF_LRU_FREED:
616  return(TRUE);
617 
619  /* If we failed to relocate, try
620  regular LRU eviction. */
621  return(FALSE);
622 
623  case BUF_LRU_NOT_FREED:
624  /* The block was buffer-fixed or I/O-fixed.
625  Keep looking. */
626  continue;
627  }
628 
629  /* inappropriate return value from
630  buf_LRU_free_block() */
631  ut_error;
632  }
633 
634  return(FALSE);
635 }
636 
637 /******************************************************************/
640 UNIV_INLINE
641 ibool
642 buf_LRU_free_from_common_LRU_list(
643 /*==============================*/
644  buf_pool_t* buf_pool,
645  ulint n_iterations)
652 {
653  buf_page_t* bpage;
654  ulint distance;
655 
656  ut_ad(buf_pool_mutex_own(buf_pool));
657 
658  distance = 100 + (n_iterations * buf_pool->curr_size) / 10;
659 
660  for (bpage = UT_LIST_GET_LAST(buf_pool->LRU);
661  UNIV_LIKELY(bpage != NULL) && UNIV_LIKELY(distance > 0);
662  bpage = UT_LIST_GET_PREV(LRU, bpage), distance--) {
663 
664  enum buf_lru_free_block_status freed;
665  unsigned accessed;
666  mutex_t* block_mutex
667  = buf_page_get_mutex(bpage);
668 
669  ut_ad(buf_page_in_file(bpage));
670  ut_ad(bpage->in_LRU_list);
671 
672  mutex_enter(block_mutex);
673  accessed = buf_page_is_accessed(bpage);
674  freed = buf_LRU_free_block(bpage, TRUE);
675  mutex_exit(block_mutex);
676 
677  switch (freed) {
678  case BUF_LRU_FREED:
679  /* Keep track of pages that are evicted without
680  ever being accessed. This gives us a measure of
681  the effectiveness of readahead */
682  if (!accessed) {
683  ++buf_pool->stat.n_ra_pages_evicted;
684  }
685  return(TRUE);
686 
687  case BUF_LRU_NOT_FREED:
688  /* The block was dirty, buffer-fixed, or I/O-fixed.
689  Keep looking. */
690  continue;
691 
693  /* This should never occur, because we
694  want to discard the compressed page too. */
695  break;
696  }
697 
698  /* inappropriate return value from
699  buf_LRU_free_block() */
700  ut_error;
701  }
702 
703  return(FALSE);
704 }
705 
706 /******************************************************************/
709 UNIV_INTERN
710 ibool
712 /*==========================*/
713  buf_pool_t* buf_pool,
715  ulint n_iterations)
724 {
725  ibool freed = FALSE;
726 
727  buf_pool_mutex_enter(buf_pool);
728 
729  freed = buf_LRU_free_from_unzip_LRU_list(buf_pool, n_iterations);
730 
731  if (!freed) {
732  freed = buf_LRU_free_from_common_LRU_list(
733  buf_pool, n_iterations);
734  }
735 
736  if (!freed) {
737  buf_pool->LRU_flush_ended = 0;
738  } else if (buf_pool->LRU_flush_ended > 0) {
739  buf_pool->LRU_flush_ended--;
740  }
741 
742  buf_pool_mutex_exit(buf_pool);
743 
744  return(freed);
745 }
746 
747 /******************************************************************/
755 UNIV_INTERN
756 void
758 /*============================*/
759  buf_pool_t* buf_pool)
760 {
761 
762  if (buf_pool == NULL) {
763  ulint i;
764 
765  for (i = 0; i < srv_buf_pool_instances; i++) {
766  buf_pool = buf_pool_from_array(i);
768  }
769  } else {
770  buf_pool_mutex_enter(buf_pool);
771 
772  while (buf_pool->LRU_flush_ended > 0) {
773 
774  buf_pool_mutex_exit(buf_pool);
775 
776  buf_LRU_search_and_free_block(buf_pool, 1);
777 
778  buf_pool_mutex_enter(buf_pool);
779  }
780 
781  buf_pool_mutex_exit(buf_pool);
782  }
783 }
784 
785 /******************************************************************/
790 UNIV_INTERN
791 ibool
793 /*==============================*/
794 {
795  ulint i;
796  ibool ret = FALSE;
797 
798  for (i = 0; i < srv_buf_pool_instances && !ret; i++) {
799  buf_pool_t* buf_pool;
800 
801  buf_pool = buf_pool_from_array(i);
802 
803  buf_pool_mutex_enter(buf_pool);
804 
805  if (!recv_recovery_on
806  && UT_LIST_GET_LEN(buf_pool->free)
807  + UT_LIST_GET_LEN(buf_pool->LRU)
808  < buf_pool->curr_size / 4) {
809 
810  ret = TRUE;
811  }
812 
813  buf_pool_mutex_exit(buf_pool);
814  }
815 
816  return(ret);
817 }
818 
819 /******************************************************************/
823 UNIV_INTERN
826 /*==================*/
827  buf_pool_t* buf_pool)
828 {
829  buf_block_t* block;
830 
831  ut_ad(buf_pool_mutex_own(buf_pool));
832 
833  block = (buf_block_t*) UT_LIST_GET_FIRST(buf_pool->free);
834 
835  if (block) {
836 
837  ut_ad(block->page.in_free_list);
838  ut_d(block->page.in_free_list = FALSE);
839  ut_ad(!block->page.in_flush_list);
840  ut_ad(!block->page.in_LRU_list);
841  ut_a(!buf_page_in_file(&block->page));
842  UT_LIST_REMOVE(list, buf_pool->free, (&block->page));
843 
844  mutex_enter(&block->mutex);
845 
847  UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE);
848 
849  ut_ad(buf_pool_from_block(block) == buf_pool);
850 
851  mutex_exit(&block->mutex);
852  }
853 
854  return(block);
855 }
856 
857 /******************************************************************/
862 UNIV_INTERN
865 /*===================*/
866  buf_pool_t* buf_pool)
867 {
868  buf_block_t* block = NULL;
869  ibool freed;
870  ulint n_iterations = 1;
871  ibool mon_value_was = FALSE;
872  ibool started_monitor = FALSE;
873 loop:
874  buf_pool_mutex_enter(buf_pool);
875 
876  if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
877  + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 20) {
878  ut_print_timestamp(stderr);
879 
880  fprintf(stderr,
881  " InnoDB: ERROR: over 95 percent of the buffer pool"
882  " is occupied by\n"
883  "InnoDB: lock heaps or the adaptive hash index!"
884  " Check that your\n"
885  "InnoDB: transactions do not set too many row locks.\n"
886  "InnoDB: Your buffer pool size is %lu MB."
887  " Maybe you should make\n"
888  "InnoDB: the buffer pool bigger?\n"
889  "InnoDB: We intentionally generate a seg fault"
890  " to print a stack trace\n"
891  "InnoDB: on Linux!\n",
892  (ulong) (buf_pool->curr_size
893  / (1024 * 1024 / UNIV_PAGE_SIZE)));
894 
895  ut_error;
896 
897  } else if (!recv_recovery_on
898  && (UT_LIST_GET_LEN(buf_pool->free)
899  + UT_LIST_GET_LEN(buf_pool->LRU))
900  < buf_pool->curr_size / 3) {
901 
902  if (!buf_lru_switched_on_innodb_mon) {
903 
904  /* Over 67 % of the buffer pool is occupied by lock
905  heaps or the adaptive hash index. This may be a memory
906  leak! */
907 
908  ut_print_timestamp(stderr);
909  fprintf(stderr,
910  " InnoDB: WARNING: over 67 percent of"
911  " the buffer pool is occupied by\n"
912  "InnoDB: lock heaps or the adaptive"
913  " hash index! Check that your\n"
914  "InnoDB: transactions do not set too many"
915  " row locks.\n"
916  "InnoDB: Your buffer pool size is %lu MB."
917  " Maybe you should make\n"
918  "InnoDB: the buffer pool bigger?\n"
919  "InnoDB: Starting the InnoDB Monitor to print"
920  " diagnostics, including\n"
921  "InnoDB: lock heap and hash index sizes.\n",
922  (ulong) (buf_pool->curr_size
923  / (1024 * 1024 / UNIV_PAGE_SIZE)));
924 
925  buf_lru_switched_on_innodb_mon = TRUE;
926  srv_print_innodb_monitor = TRUE;
927  os_event_set(srv_lock_timeout_thread_event);
928  }
929  } else if (buf_lru_switched_on_innodb_mon) {
930 
931  /* Switch off the InnoDB Monitor; this is a simple way
932  to stop the monitor if the situation becomes less urgent,
933  but may also surprise users if the user also switched on the
934  monitor! */
935 
936  buf_lru_switched_on_innodb_mon = FALSE;
937  srv_print_innodb_monitor = FALSE;
938  }
939 
940  /* If there is a block in the free list, take it */
941  block = buf_LRU_get_free_only(buf_pool);
942  buf_pool_mutex_exit(buf_pool);
943 
944  if (block) {
945  ut_ad(buf_pool_from_block(block) == buf_pool);
946  memset(&block->page.zip, 0, sizeof block->page.zip);
947 
948  if (started_monitor) {
949  srv_print_innodb_monitor = mon_value_was;
950  }
951 
952  return(block);
953  }
954 
955  /* If no block was in the free list, search from the end of the LRU
956  list and try to free a block there */
957 
958  freed = buf_LRU_search_and_free_block(buf_pool, n_iterations);
959 
960  if (freed > 0) {
961  goto loop;
962  }
963 
964  if (n_iterations > 30) {
965  ut_print_timestamp(stderr);
966  fprintf(stderr,
967  " InnoDB: Warning: difficult to find free blocks in\n"
968  "InnoDB: the buffer pool (%lu search iterations)!"
969  " Consider\n"
970  "InnoDB: increasing the buffer pool size.\n"
971  "InnoDB: It is also possible that"
972  " in your Unix version\n"
973  "InnoDB: fsync is very slow, or"
974  " completely frozen inside\n"
975  "InnoDB: the OS kernel. Then upgrading to"
976  " a newer version\n"
977  "InnoDB: of your operating system may help."
978  " Look at the\n"
979  "InnoDB: number of fsyncs in diagnostic info below.\n"
980  "InnoDB: Pending flushes (fsync) log: %lu;"
981  " buffer pool: %lu\n"
982  "InnoDB: %lu OS file reads, %lu OS file writes,"
983  " %lu OS fsyncs\n"
984  "InnoDB: Starting InnoDB Monitor to print further\n"
985  "InnoDB: diagnostics to the standard output.\n",
986  (ulong) n_iterations,
989  (ulong) os_n_file_reads, (ulong) os_n_file_writes,
990  (ulong) os_n_fsyncs);
991 
992  mon_value_was = srv_print_innodb_monitor;
993  started_monitor = TRUE;
994  srv_print_innodb_monitor = TRUE;
995  os_event_set(srv_lock_timeout_thread_event);
996  }
997 
998  /* No free block was found: try to flush the LRU list */
999 
1000  buf_flush_free_margin(buf_pool);
1001  ++srv_buf_pool_wait_free;
1002 
1004 
1005  buf_pool_mutex_enter(buf_pool);
1006 
1007  if (buf_pool->LRU_flush_ended > 0) {
1008  /* We have written pages in an LRU flush. To make the insert
1009  buffer more efficient, we try to move these pages to the free
1010  list. */
1011 
1012  buf_pool_mutex_exit(buf_pool);
1013 
1015  } else {
1016  buf_pool_mutex_exit(buf_pool);
1017  }
1018 
1019  if (n_iterations > 10) {
1020 
1021  os_thread_sleep(500000);
1022  }
1023 
1024  n_iterations++;
1025 
1026  goto loop;
1027 }
1028 
1029 /*******************************************************************/
1032 UNIV_INLINE
1033 void
1034 buf_LRU_old_adjust_len(
1035 /*===================*/
1036  buf_pool_t* buf_pool)
1037 {
1038  ulint old_len;
1039  ulint new_len;
1040 
1041  ut_a(buf_pool->LRU_old);
1042  ut_ad(buf_pool_mutex_own(buf_pool));
1045 #if BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)
1046 # error "BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)"
1047 #endif
1048 #ifdef UNIV_LRU_DEBUG
1049  /* buf_pool->LRU_old must be the first item in the LRU list
1050  whose "old" flag is set. */
1051  ut_a(buf_pool->LRU_old->old);
1052  ut_a(!UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)
1053  || !UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)->old);
1054  ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)
1055  || UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old);
1056 #endif /* UNIV_LRU_DEBUG */
1057 
1058  old_len = buf_pool->LRU_old_len;
1059  new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
1060  * buf_pool->LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
1061  UT_LIST_GET_LEN(buf_pool->LRU)
1062  - (BUF_LRU_OLD_TOLERANCE
1063  + BUF_LRU_NON_OLD_MIN_LEN));
1064 
1065  for (;;) {
1066  buf_page_t* LRU_old = buf_pool->LRU_old;
1067 
1068  ut_a(LRU_old);
1069  ut_ad(LRU_old->in_LRU_list);
1070 #ifdef UNIV_LRU_DEBUG
1071  ut_a(LRU_old->old);
1072 #endif /* UNIV_LRU_DEBUG */
1073 
1074  /* Update the LRU_old pointer if necessary */
1075 
1076  if (old_len + BUF_LRU_OLD_TOLERANCE < new_len) {
1077 
1078  buf_pool->LRU_old = LRU_old = UT_LIST_GET_PREV(
1079  LRU, LRU_old);
1080 #ifdef UNIV_LRU_DEBUG
1081  ut_a(!LRU_old->old);
1082 #endif /* UNIV_LRU_DEBUG */
1083  old_len = ++buf_pool->LRU_old_len;
1084  buf_page_set_old(LRU_old, TRUE);
1085 
1086  } else if (old_len > new_len + BUF_LRU_OLD_TOLERANCE) {
1087 
1088  buf_pool->LRU_old = UT_LIST_GET_NEXT(LRU, LRU_old);
1089  old_len = --buf_pool->LRU_old_len;
1090  buf_page_set_old(LRU_old, FALSE);
1091  } else {
1092  return;
1093  }
1094  }
1095 }
1096 
1097 /*******************************************************************/
1100 static
1101 void
1102 buf_LRU_old_init(
1103 /*=============*/
1104  buf_pool_t* buf_pool)
1105 {
1106  buf_page_t* bpage;
1107 
1108  ut_ad(buf_pool_mutex_own(buf_pool));
1109  ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN);
1110 
1111  /* We first initialize all blocks in the LRU list as old and then use
1112  the adjust function to move the LRU_old pointer to the right
1113  position */
1114 
1115  for (bpage = UT_LIST_GET_LAST(buf_pool->LRU); bpage != NULL;
1116  bpage = UT_LIST_GET_PREV(LRU, bpage)) {
1117  ut_ad(bpage->in_LRU_list);
1118  ut_ad(buf_page_in_file(bpage));
1119  /* This loop temporarily violates the
1120  assertions of buf_page_set_old(). */
1121  bpage->old = TRUE;
1122  }
1123 
1124  buf_pool->LRU_old = UT_LIST_GET_FIRST(buf_pool->LRU);
1125  buf_pool->LRU_old_len = UT_LIST_GET_LEN(buf_pool->LRU);
1126 
1127  buf_LRU_old_adjust_len(buf_pool);
1128 }
1129 
1130 /******************************************************************/
1132 static
1133 void
1134 buf_unzip_LRU_remove_block_if_needed(
1135 /*=================================*/
1136  buf_page_t* bpage)
1137 {
1138  buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1139 
1140  ut_ad(buf_pool);
1141  ut_ad(bpage);
1142  ut_ad(buf_page_in_file(bpage));
1143  ut_ad(buf_pool_mutex_own(buf_pool));
1144 
1145  if (buf_page_belongs_to_unzip_LRU(bpage)) {
1146  buf_block_t* block = (buf_block_t*) bpage;
1147 
1148  ut_ad(block->in_unzip_LRU_list);
1149  ut_d(block->in_unzip_LRU_list = FALSE);
1150 
1151  UT_LIST_REMOVE(unzip_LRU, buf_pool->unzip_LRU, block);
1152  }
1153 }
1154 
1155 /******************************************************************/
1157 UNIV_INLINE
1158 void
1159 buf_LRU_remove_block(
1160 /*=================*/
1161  buf_page_t* bpage)
1162 {
1163  buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1164 
1165  ut_ad(buf_pool);
1166  ut_ad(bpage);
1167  ut_ad(buf_pool_mutex_own(buf_pool));
1168 
1169  ut_a(buf_page_in_file(bpage));
1170 
1171  ut_ad(bpage->in_LRU_list);
1172 
1173  /* If the LRU_old pointer is defined and points to just this block,
1174  move it backward one step */
1175 
1176  if (UNIV_UNLIKELY(bpage == buf_pool->LRU_old)) {
1177 
1178  /* Below: the previous block is guaranteed to exist,
1179  because the LRU_old pointer is only allowed to differ
1180  by BUF_LRU_OLD_TOLERANCE from strict
1181  buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
1182  list length. */
1183  buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
1184 
1185  ut_a(prev_bpage);
1186 #ifdef UNIV_LRU_DEBUG
1187  ut_a(!prev_bpage->old);
1188 #endif /* UNIV_LRU_DEBUG */
1189  buf_pool->LRU_old = prev_bpage;
1190  buf_page_set_old(prev_bpage, TRUE);
1191 
1192  buf_pool->LRU_old_len++;
1193  }
1194 
1195  /* Remove the block from the LRU list */
1196  UT_LIST_REMOVE(LRU, buf_pool->LRU, bpage);
1197  ut_d(bpage->in_LRU_list = FALSE);
1198 
1199  buf_unzip_LRU_remove_block_if_needed(bpage);
1200 
1201  /* If the LRU list is so short that LRU_old is not defined,
1202  clear the "old" flags and return */
1203  if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) {
1204 
1205  for (bpage = UT_LIST_GET_FIRST(buf_pool->LRU); bpage != NULL;
1206  bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
1207  /* This loop temporarily violates the
1208  assertions of buf_page_set_old(). */
1209  bpage->old = FALSE;
1210  }
1211 
1212  buf_pool->LRU_old = NULL;
1213  buf_pool->LRU_old_len = 0;
1214 
1215  return;
1216  }
1217 
1218  ut_ad(buf_pool->LRU_old);
1219 
1220  /* Update the LRU_old_len field if necessary */
1221  if (buf_page_is_old(bpage)) {
1222 
1223  buf_pool->LRU_old_len--;
1224  }
1225 
1226  /* Adjust the length of the old block list if necessary */
1227  buf_LRU_old_adjust_len(buf_pool);
1228 }
1229 
1230 /******************************************************************/
1232 UNIV_INTERN
1233 void
1235 /*====================*/
1236  buf_block_t* block,
1237  ibool old)
1239 {
1240  buf_pool_t* buf_pool = buf_pool_from_block(block);
1241 
1242  ut_ad(buf_pool);
1243  ut_ad(block);
1244  ut_ad(buf_pool_mutex_own(buf_pool));
1245 
1247 
1248  ut_ad(!block->in_unzip_LRU_list);
1249  ut_d(block->in_unzip_LRU_list = TRUE);
1250 
1251  if (old) {
1252  UT_LIST_ADD_LAST(unzip_LRU, buf_pool->unzip_LRU, block);
1253  } else {
1254  UT_LIST_ADD_FIRST(unzip_LRU, buf_pool->unzip_LRU, block);
1255  }
1256 }
1257 
1258 /******************************************************************/
1260 UNIV_INLINE
1261 void
1262 buf_LRU_add_block_to_end_low(
1263 /*=========================*/
1264  buf_page_t* bpage)
1265 {
1266  buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1267 
1268  ut_ad(buf_pool);
1269  ut_ad(bpage);
1270  ut_ad(buf_pool_mutex_own(buf_pool));
1271 
1272  ut_a(buf_page_in_file(bpage));
1273 
1274  ut_ad(!bpage->in_LRU_list);
1275  UT_LIST_ADD_LAST(LRU, buf_pool->LRU, bpage);
1276  ut_d(bpage->in_LRU_list = TRUE);
1277 
1278  if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
1279 
1280  ut_ad(buf_pool->LRU_old);
1281 
1282  /* Adjust the length of the old block list if necessary */
1283 
1284  buf_page_set_old(bpage, TRUE);
1285  buf_pool->LRU_old_len++;
1286  buf_LRU_old_adjust_len(buf_pool);
1287 
1288  } else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
1289 
1290  /* The LRU list is now long enough for LRU_old to become
1291  defined: init it */
1292 
1293  buf_LRU_old_init(buf_pool);
1294  } else {
1295  buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
1296  }
1297 
1298  /* If this is a zipped block with decompressed frame as well
1299  then put it on the unzip_LRU list */
1300  if (buf_page_belongs_to_unzip_LRU(bpage)) {
1301  buf_unzip_LRU_add_block((buf_block_t*) bpage, TRUE);
1302  }
1303 }
1304 
1305 /******************************************************************/
1307 UNIV_INLINE
1308 void
1309 buf_LRU_add_block_low(
1310 /*==================*/
1311  buf_page_t* bpage,
1312  ibool old)
1316 {
1317  buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1318 
1319  ut_ad(buf_pool);
1320  ut_ad(bpage);
1321  ut_ad(buf_pool_mutex_own(buf_pool));
1322 
1323  ut_a(buf_page_in_file(bpage));
1324  ut_ad(!bpage->in_LRU_list);
1325 
1326  if (!old || (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN)) {
1327 
1328  UT_LIST_ADD_FIRST(LRU, buf_pool->LRU, bpage);
1329 
1330  bpage->freed_page_clock = buf_pool->freed_page_clock;
1331  } else {
1332 #ifdef UNIV_LRU_DEBUG
1333  /* buf_pool->LRU_old must be the first item in the LRU list
1334  whose "old" flag is set. */
1335  ut_a(buf_pool->LRU_old->old);
1336  ut_a(!UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)
1337  || !UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)->old);
1338  ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)
1339  || UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old);
1340 #endif /* UNIV_LRU_DEBUG */
1341  UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU, buf_pool->LRU_old,
1342  bpage);
1343  buf_pool->LRU_old_len++;
1344  }
1345 
1346  ut_d(bpage->in_LRU_list = TRUE);
1347 
1348  if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
1349 
1350  ut_ad(buf_pool->LRU_old);
1351 
1352  /* Adjust the length of the old block list if necessary */
1353 
1354  buf_page_set_old(bpage, old);
1355  buf_LRU_old_adjust_len(buf_pool);
1356 
1357  } else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
1358 
1359  /* The LRU list is now long enough for LRU_old to become
1360  defined: init it */
1361 
1362  buf_LRU_old_init(buf_pool);
1363  } else {
1364  buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
1365  }
1366 
1367  /* If this is a zipped block with decompressed frame as well
1368  then put it on the unzip_LRU list */
1369  if (buf_page_belongs_to_unzip_LRU(bpage)) {
1370  buf_unzip_LRU_add_block((buf_block_t*) bpage, old);
1371  }
1372 }
1373 
1374 /******************************************************************/
1376 UNIV_INTERN
1377 void
1379 /*==============*/
1380  buf_page_t* bpage,
1381  ibool old)
1386 {
1387  buf_LRU_add_block_low(bpage, old);
1388 }
1389 
1390 /******************************************************************/
1392 UNIV_INTERN
1393 void
1395 /*=====================*/
1396  buf_page_t* bpage)
1397 {
1398  buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1399 
1400  ut_ad(buf_pool_mutex_own(buf_pool));
1401 
1402  if (bpage->old) {
1403  buf_pool->stat.n_pages_made_young++;
1404  }
1405 
1406  buf_LRU_remove_block(bpage);
1407  buf_LRU_add_block_low(bpage, FALSE);
1408 }
1409 
1410 /******************************************************************/
1412 UNIV_INTERN
1413 void
1415 /*===================*/
1416  buf_page_t* bpage)
1417 {
1418  buf_LRU_remove_block(bpage);
1419  buf_LRU_add_block_to_end_low(bpage);
1420 }
1421 
1422 /******************************************************************/
1435 UNIV_INTERN
1438 /*===============*/
1439  buf_page_t* bpage,
1440  ibool zip)
1442 {
1443  buf_page_t* b = NULL;
1444  buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1445  mutex_t* block_mutex = buf_page_get_mutex(bpage);
1446 
1447  ut_ad(buf_pool_mutex_own(buf_pool));
1448  ut_ad(mutex_own(block_mutex));
1449  ut_ad(buf_page_in_file(bpage));
1450  ut_ad(bpage->in_LRU_list);
1451  ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
1452 #if UNIV_WORD_SIZE == 4
1453  /* On 32-bit systems, there is no padding in buf_page_t. On
1454  other systems, Valgrind could complain about uninitialized pad
1455  bytes. */
1456  UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
1457 #endif
1458 
1459  if (!buf_page_can_relocate(bpage)) {
1460 
1461  /* Do not free buffer-fixed or I/O-fixed blocks. */
1462  return(BUF_LRU_NOT_FREED);
1463  }
1464 
1465 #ifdef UNIV_IBUF_COUNT_DEBUG
1466  ut_a(ibuf_count_get(bpage->space, bpage->offset) == 0);
1467 #endif /* UNIV_IBUF_COUNT_DEBUG */
1468 
1469  if (zip || !bpage->zip.data) {
1470  /* This would completely free the block. */
1471  /* Do not completely free dirty blocks. */
1472 
1473  if (bpage->oldest_modification) {
1474  return(BUF_LRU_NOT_FREED);
1475  }
1476  } else if (bpage->oldest_modification) {
1477  /* Do not completely free dirty blocks. */
1478 
1479  if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
1480  ut_ad(buf_page_get_state(bpage)
1481  == BUF_BLOCK_ZIP_DIRTY);
1482  return(BUF_LRU_NOT_FREED);
1483  }
1484 
1485  goto alloc;
1486  } else if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
1487  /* Allocate the control block for the compressed page.
1488  If it cannot be allocated (without freeing a block
1489  from the LRU list), refuse to free bpage. */
1490 alloc:
1491  buf_pool_mutex_exit_forbid(buf_pool);
1492  b = static_cast<buf_page_t *>(buf_buddy_alloc(buf_pool, sizeof *b, NULL));
1493  buf_pool_mutex_exit_allow(buf_pool);
1494 
1495  if (UNIV_UNLIKELY(!b)) {
1496  return(BUF_LRU_CANNOT_RELOCATE);
1497  }
1498 
1499  memcpy(b, bpage, sizeof *b);
1500  }
1501 
1502 #ifdef UNIV_DEBUG
1503  if (buf_debug_prints) {
1504  fprintf(stderr, "Putting space %lu page %lu to free list\n",
1505  (ulong) buf_page_get_space(bpage),
1506  (ulong) buf_page_get_page_no(bpage));
1507  }
1508 #endif /* UNIV_DEBUG */
1509 
1510  if (buf_LRU_block_remove_hashed_page(bpage, zip)
1511  != BUF_BLOCK_ZIP_FREE) {
1512  ut_a(bpage->buf_fix_count == 0);
1513 
1514  if (b) {
1515  buf_page_t* hash_b;
1516  buf_page_t* prev_b = UT_LIST_GET_PREV(LRU, b);
1517 
1518  const ulint fold = buf_page_address_fold(
1519  bpage->space, bpage->offset);
1520 
1521  hash_b = buf_page_hash_get_low(
1522  buf_pool, bpage->space, bpage->offset, fold);
1523 
1524  ut_a(!hash_b);
1525 
1526  b->state = b->oldest_modification
1529  UNIV_MEM_DESC(b->zip.data,
1530  page_zip_get_size(&b->zip), b);
1531 
1532  /* The fields in_page_hash and in_LRU_list of
1533  the to-be-freed block descriptor should have
1534  been cleared in
1535  buf_LRU_block_remove_hashed_page(), which
1536  invokes buf_LRU_remove_block(). */
1537  ut_ad(!bpage->in_page_hash);
1538  ut_ad(!bpage->in_LRU_list);
1539  /* bpage->state was BUF_BLOCK_FILE_PAGE because
1540  b != NULL. The type cast below is thus valid. */
1541  ut_ad(!((buf_block_t*) bpage)->in_unzip_LRU_list);
1542 
1543  /* The fields of bpage were copied to b before
1544  buf_LRU_block_remove_hashed_page() was invoked. */
1545  ut_ad(!b->in_zip_hash);
1546  ut_ad(b->in_page_hash);
1547  ut_ad(b->in_LRU_list);
1548 
1549  HASH_INSERT(buf_page_t, hash,
1550  buf_pool->page_hash, fold, b);
1551 
1552  /* Insert b where bpage was in the LRU list. */
1553  if (UNIV_LIKELY(prev_b != NULL)) {
1554  ulint lru_len;
1555 
1556  ut_ad(prev_b->in_LRU_list);
1557  ut_ad(buf_page_in_file(prev_b));
1558 #if UNIV_WORD_SIZE == 4
1559  /* On 32-bit systems, there is no
1560  padding in buf_page_t. On other
1561  systems, Valgrind could complain about
1562  uninitialized pad bytes. */
1563  UNIV_MEM_ASSERT_RW(prev_b, sizeof *prev_b);
1564 #endif
1565  UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU,
1566  prev_b, b);
1567 
1568  if (buf_page_is_old(b)) {
1569  buf_pool->LRU_old_len++;
1570  if (UNIV_UNLIKELY
1571  (buf_pool->LRU_old
1572  == UT_LIST_GET_NEXT(LRU, b))) {
1573 
1574  buf_pool->LRU_old = b;
1575  }
1576  }
1577 
1578  lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
1579 
1580  if (lru_len > BUF_LRU_OLD_MIN_LEN) {
1581  ut_ad(buf_pool->LRU_old);
1582  /* Adjust the length of the
1583  old block list if necessary */
1584  buf_LRU_old_adjust_len(buf_pool);
1585  } else if (lru_len == BUF_LRU_OLD_MIN_LEN) {
1586  /* The LRU list is now long
1587  enough for LRU_old to become
1588  defined: init it */
1589  buf_LRU_old_init(buf_pool);
1590  }
1591 #ifdef UNIV_LRU_DEBUG
1592  /* Check that the "old" flag is consistent
1593  in the block and its neighbours. */
1595 #endif /* UNIV_LRU_DEBUG */
1596  } else {
1597  ut_d(b->in_LRU_list = FALSE);
1598  buf_LRU_add_block_low(b, buf_page_is_old(b));
1599  }
1600 
1601  if (b->state == BUF_BLOCK_ZIP_PAGE) {
1603  } else {
1604  /* Relocate on buf_pool->flush_list. */
1606  }
1607 
1608  bpage->zip.data = NULL;
1609  page_zip_set_size(&bpage->zip, 0);
1610 
1611  /* Prevent buf_page_get_gen() from
1612  decompressing the block while we release
1613  buf_pool->mutex and block_mutex. */
1614  b->buf_fix_count++;
1615  b->io_fix = BUF_IO_READ;
1616  }
1617 
1618  buf_pool_mutex_exit(buf_pool);
1619  mutex_exit(block_mutex);
1620 
1621  /* Remove possible adaptive hash index on the page.
1622  The page was declared uninitialized by
1623  buf_LRU_block_remove_hashed_page(). We need to flag
1624  the contents of the page valid (which it still is) in
1625  order to avoid bogus Valgrind warnings.*/
1626 
1627  UNIV_MEM_VALID(((buf_block_t*) bpage)->frame,
1628  UNIV_PAGE_SIZE);
1630  UNIV_MEM_INVALID(((buf_block_t*) bpage)->frame,
1631  UNIV_PAGE_SIZE);
1632 
1633  if (b) {
1634  /* Compute and stamp the compressed page
1635  checksum while not holding any mutex. The
1636  block is already half-freed
1637  (BUF_BLOCK_REMOVE_HASH) and removed from
1638  buf_pool->page_hash, thus inaccessible by any
1639  other thread. */
1640 
1643  UNIV_LIKELY(srv_use_checksums)
1645  b->zip.data,
1646  page_zip_get_size(&b->zip))
1648  }
1649 
1650  buf_pool_mutex_enter(buf_pool);
1651  mutex_enter(block_mutex);
1652 
1653  if (b) {
1654  mutex_enter(&buf_pool->zip_mutex);
1655  b->buf_fix_count--;
1657  mutex_exit(&buf_pool->zip_mutex);
1658  }
1659 
1660  buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
1661  } else {
1662  /* The block_mutex should have been released by
1663  buf_LRU_block_remove_hashed_page() when it returns
1664  BUF_BLOCK_ZIP_FREE. */
1665  ut_ad(block_mutex == &buf_pool->zip_mutex);
1666  mutex_enter(block_mutex);
1667  }
1668 
1669  return(BUF_LRU_FREED);
1670 }
1671 
1672 /******************************************************************/
1674 UNIV_INTERN
1675 void
1677 /*=============================*/
1678  buf_block_t* block)
1679 {
1680  void* data;
1681  buf_pool_t* buf_pool = buf_pool_from_block(block);
1682 
1683  ut_ad(block);
1684  ut_ad(buf_pool_mutex_own(buf_pool));
1685  ut_ad(mutex_own(&block->mutex));
1686 
1687  switch (buf_block_get_state(block)) {
1688  case BUF_BLOCK_MEMORY:
1690  break;
1691  default:
1692  ut_error;
1693  }
1694 
1695 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
1696  ut_a(block->n_pointers == 0);
1697 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
1698  ut_ad(!block->page.in_free_list);
1699  ut_ad(!block->page.in_flush_list);
1700  ut_ad(!block->page.in_LRU_list);
1701 
1703 
1704  UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE);
1705 #ifdef UNIV_DEBUG
1706  /* Wipe contents of page to reveal possible stale pointers to it */
1707  memset(block->frame, '\0', UNIV_PAGE_SIZE);
1708 #else
1709  /* Wipe page_no and space_id */
1710  memset(block->frame + FIL_PAGE_OFFSET, 0xfe, 4);
1711  memset(block->frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, 0xfe, 4);
1712 #endif
1713  data = block->page.zip.data;
1714 
1715  if (data) {
1716  block->page.zip.data = NULL;
1717  mutex_exit(&block->mutex);
1718  buf_pool_mutex_exit_forbid(buf_pool);
1719 
1721  buf_pool, data, page_zip_get_size(&block->page.zip));
1722 
1723  buf_pool_mutex_exit_allow(buf_pool);
1724  mutex_enter(&block->mutex);
1725  page_zip_set_size(&block->page.zip, 0);
1726  }
1727 
1728  UT_LIST_ADD_FIRST(list, buf_pool->free, (&block->page));
1729  ut_d(block->page.in_free_list = TRUE);
1730 
1731  UNIV_MEM_ASSERT_AND_FREE(block->frame, UNIV_PAGE_SIZE);
1732 }
1733 
1734 /******************************************************************/
1744 static
1745 enum buf_page_state
1746 buf_LRU_block_remove_hashed_page(
1747 /*=============================*/
1748  buf_page_t* bpage,
1751  ibool zip)
1753 {
1754  ulint fold;
1755  const buf_page_t* hashed_bpage;
1756  buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1757 
1758  ut_ad(bpage);
1759  ut_ad(buf_pool_mutex_own(buf_pool));
1760  ut_ad(mutex_own(buf_page_get_mutex(bpage)));
1761 
1763  ut_a(bpage->buf_fix_count == 0);
1764 
1765 #if UNIV_WORD_SIZE == 4
1766  /* On 32-bit systems, there is no padding in
1767  buf_page_t. On other systems, Valgrind could complain
1768  about uninitialized pad bytes. */
1769  UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
1770 #endif
1771 
1772  buf_LRU_remove_block(bpage);
1773 
1774  buf_pool->freed_page_clock += 1;
1775 
1776  switch (buf_page_get_state(bpage)) {
1777  case BUF_BLOCK_FILE_PAGE:
1778  UNIV_MEM_ASSERT_W(bpage, sizeof(buf_block_t));
1779  UNIV_MEM_ASSERT_W(((buf_block_t*) bpage)->frame,
1780  UNIV_PAGE_SIZE);
1782  if (bpage->zip.data) {
1783  const page_t* page = ((buf_block_t*) bpage)->frame;
1784  const ulint zip_size
1785  = page_zip_get_size(&bpage->zip);
1786 
1787  ut_a(!zip || bpage->oldest_modification == 0);
1788 
1789  switch (UNIV_EXPECT(fil_page_get_type(page),
1790  FIL_PAGE_INDEX)) {
1792  case FIL_PAGE_INODE:
1793  case FIL_PAGE_IBUF_BITMAP:
1794  case FIL_PAGE_TYPE_FSP_HDR:
1795  case FIL_PAGE_TYPE_XDES:
1796  /* These are essentially uncompressed pages. */
1797  if (!zip) {
1798  /* InnoDB writes the data to the
1799  uncompressed page frame. Copy it
1800  to the compressed page, which will
1801  be preserved. */
1802  memcpy(bpage->zip.data, page,
1803  zip_size);
1804  }
1805  break;
1806  case FIL_PAGE_TYPE_ZBLOB:
1807  case FIL_PAGE_TYPE_ZBLOB2:
1808  break;
1809  case FIL_PAGE_INDEX:
1810 #ifdef UNIV_ZIP_DEBUG
1811  ut_a(page_zip_validate(&bpage->zip, page));
1812 #endif /* UNIV_ZIP_DEBUG */
1813  break;
1814  default:
1815  ut_print_timestamp(stderr);
1816  fputs(" InnoDB: ERROR: The compressed page"
1817  " to be evicted seems corrupt:", stderr);
1818  ut_print_buf(stderr, page, zip_size);
1819  fputs("\nInnoDB: Possibly older version"
1820  " of the page:", stderr);
1821  ut_print_buf(stderr, bpage->zip.data,
1822  zip_size);
1823  putc('\n', stderr);
1824  ut_error;
1825  }
1826 
1827  break;
1828  }
1829  /* fall through */
1830  case BUF_BLOCK_ZIP_PAGE:
1831  ut_a(bpage->oldest_modification == 0);
1832  UNIV_MEM_ASSERT_W(bpage->zip.data,
1833  page_zip_get_size(&bpage->zip));
1834  break;
1835  case BUF_BLOCK_ZIP_FREE:
1836  case BUF_BLOCK_ZIP_DIRTY:
1837  case BUF_BLOCK_NOT_USED:
1839  case BUF_BLOCK_MEMORY:
1840  case BUF_BLOCK_REMOVE_HASH:
1841  ut_error;
1842  break;
1843  }
1844 
1845  fold = buf_page_address_fold(bpage->space, bpage->offset);
1846  hashed_bpage = buf_page_hash_get_low(
1847  buf_pool, bpage->space, bpage->offset, fold);
1848 
1849  if (UNIV_UNLIKELY(bpage != hashed_bpage)) {
1850  fprintf(stderr,
1851  "InnoDB: Error: page %lu %lu not found"
1852  " in the hash table\n",
1853  (ulong) bpage->space,
1854  (ulong) bpage->offset);
1855  if (hashed_bpage) {
1856  fprintf(stderr,
1857  "InnoDB: In hash table we find block"
1858  " %p of %lu %lu which is not %p\n",
1859  (const void*) hashed_bpage,
1860  (ulong) hashed_bpage->space,
1861  (ulong) hashed_bpage->offset,
1862  (const void*) bpage);
1863  }
1864 
1865 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
1866  mutex_exit(buf_page_get_mutex(bpage));
1867  buf_pool_mutex_exit(buf_pool);
1868  buf_print();
1869  buf_LRU_print();
1870  buf_validate();
1871  buf_LRU_validate();
1872 #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
1873  ut_error;
1874  }
1875 
1876  ut_ad(!bpage->in_zip_hash);
1877  ut_ad(bpage->in_page_hash);
1878  ut_d(bpage->in_page_hash = FALSE);
1879  HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, bpage);
1880  switch (buf_page_get_state(bpage)) {
1881  case BUF_BLOCK_ZIP_PAGE:
1882  ut_ad(!bpage->in_free_list);
1883  ut_ad(!bpage->in_flush_list);
1884  ut_ad(!bpage->in_LRU_list);
1885  ut_a(bpage->zip.data);
1886  ut_a(buf_page_get_zip_size(bpage));
1887 
1888  UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
1889 
1890  mutex_exit(&buf_pool->zip_mutex);
1891  buf_pool_mutex_exit_forbid(buf_pool);
1892 
1894  buf_pool, bpage->zip.data,
1895  page_zip_get_size(&bpage->zip));
1896 
1897  bpage->state = BUF_BLOCK_ZIP_FREE;
1898  buf_buddy_free(buf_pool, bpage, sizeof(*bpage));
1899  buf_pool_mutex_exit_allow(buf_pool);
1900 
1901  UNIV_MEM_UNDESC(bpage);
1902  return(BUF_BLOCK_ZIP_FREE);
1903 
1904  case BUF_BLOCK_FILE_PAGE:
1905  memset(((buf_block_t*) bpage)->frame
1906  + FIL_PAGE_OFFSET, 0xff, 4);
1907  memset(((buf_block_t*) bpage)->frame
1909  UNIV_MEM_INVALID(((buf_block_t*) bpage)->frame,
1910  UNIV_PAGE_SIZE);
1912 
1913  if (zip && bpage->zip.data) {
1914  /* Free the compressed page. */
1915  void* data = bpage->zip.data;
1916  bpage->zip.data = NULL;
1917 
1918  ut_ad(!bpage->in_free_list);
1919  ut_ad(!bpage->in_flush_list);
1920  ut_ad(!bpage->in_LRU_list);
1921  mutex_exit(&((buf_block_t*) bpage)->mutex);
1922  buf_pool_mutex_exit_forbid(buf_pool);
1923 
1925  buf_pool, data,
1926  page_zip_get_size(&bpage->zip));
1927 
1928  buf_pool_mutex_exit_allow(buf_pool);
1929  mutex_enter(&((buf_block_t*) bpage)->mutex);
1930  page_zip_set_size(&bpage->zip, 0);
1931  }
1932 
1933  return(BUF_BLOCK_REMOVE_HASH);
1934 
1935  case BUF_BLOCK_ZIP_FREE:
1936  case BUF_BLOCK_ZIP_DIRTY:
1937  case BUF_BLOCK_NOT_USED:
1939  case BUF_BLOCK_MEMORY:
1940  case BUF_BLOCK_REMOVE_HASH:
1941  break;
1942  }
1943 
1944  ut_error;
1945  return(BUF_BLOCK_ZIP_FREE);
1946 }
1947 
1948 /******************************************************************/
1950 static
1951 void
1952 buf_LRU_block_free_hashed_page(
1953 /*===========================*/
1954  buf_block_t* block)
1956 {
1957 #ifdef UNIV_DEBUG
1958  buf_pool_t* buf_pool = buf_pool_from_block(block);
1959  ut_ad(buf_pool_mutex_own(buf_pool));
1960 #endif
1961  ut_ad(mutex_own(&block->mutex));
1962 
1964 
1966 }
1967 
1968 /**********************************************************************/
1971 static
1972 uint
1973 buf_LRU_old_ratio_update_instance(
1974 /*==============================*/
1975  buf_pool_t* buf_pool,
1976  uint old_pct,
1978  ibool adjust)
1981 {
1982  uint ratio;
1983 
1984  ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100;
1985  if (ratio < BUF_LRU_OLD_RATIO_MIN) {
1986  ratio = BUF_LRU_OLD_RATIO_MIN;
1987  } else if (ratio > BUF_LRU_OLD_RATIO_MAX) {
1988  ratio = BUF_LRU_OLD_RATIO_MAX;
1989  }
1990 
1991  if (adjust) {
1992  buf_pool_mutex_enter(buf_pool);
1993 
1994  if (ratio != buf_pool->LRU_old_ratio) {
1995  buf_pool->LRU_old_ratio = ratio;
1996 
1997  if (UT_LIST_GET_LEN(buf_pool->LRU)
1998  >= BUF_LRU_OLD_MIN_LEN) {
1999 
2000  buf_LRU_old_adjust_len(buf_pool);
2001  }
2002  }
2003 
2004  buf_pool_mutex_exit(buf_pool);
2005  } else {
2006  buf_pool->LRU_old_ratio = ratio;
2007  }
2008  /* the reverse of
2009  ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100 */
2010  return((uint) (ratio * 100 / (double) BUF_LRU_OLD_RATIO_DIV + 0.5));
2011 }
2012 
2013 /**********************************************************************/
2016 UNIV_INTERN
2017 ulint
2019 /*=====================*/
2020  uint old_pct,
2022  ibool adjust)
2025 {
2026  ulint i;
2027  ulint new_ratio = 0;
2028 
2029  for (i = 0; i < srv_buf_pool_instances; i++) {
2030  buf_pool_t* buf_pool;
2031 
2032  buf_pool = buf_pool_from_array(i);
2033 
2034  new_ratio = buf_LRU_old_ratio_update_instance(
2035  buf_pool, old_pct, adjust);
2036  }
2037 
2038  return(new_ratio);
2039 }
2040 
2041 /********************************************************************/
2044 UNIV_INTERN
2045 void
2047 /*=====================*/
2048 {
2049  ulint i;
2050  buf_LRU_stat_t* item;
2051  buf_pool_t* buf_pool;
2052  ibool evict_started = FALSE;
2053  buf_LRU_stat_t cur_stat;
2054 
2055  /* If we haven't started eviction yet then don't update stats. */
2056  for (i = 0; i < srv_buf_pool_instances; i++) {
2057 
2058  buf_pool = buf_pool_from_array(i);
2059 
2060  if (buf_pool->freed_page_clock != 0) {
2061  evict_started = TRUE;
2062  break;
2063  }
2064  }
2065 
2066  if (!evict_started) {
2067  goto func_exit;
2068  }
2069 
2070  /* Update the index. */
2071  item = &buf_LRU_stat_arr[buf_LRU_stat_arr_ind];
2072  buf_LRU_stat_arr_ind++;
2073  buf_LRU_stat_arr_ind %= BUF_LRU_STAT_N_INTERVAL;
2074 
2075  /* Add the current value and subtract the obsolete entry.
2076  Since buf_LRU_stat_cur is not protected by any mutex,
2077  it can be changing between adding to buf_LRU_stat_sum
2078  and copying to item. Assign it to local variables to make
2079  sure the same value assign to the buf_LRU_stat_sum
2080  and item */
2081  cur_stat = buf_LRU_stat_cur;
2082 
2083  buf_LRU_stat_sum.io += cur_stat.io - item->io;
2084  buf_LRU_stat_sum.unzip += cur_stat.unzip - item->unzip;
2085 
2086  /* Put current entry in the array. */
2087  memcpy(item, &cur_stat, sizeof *item);
2088 
2089 func_exit:
2090  /* Clear the current entry. */
2091  memset(&buf_LRU_stat_cur, 0, sizeof buf_LRU_stat_cur);
2092 }
2093 
2094 /********************************************************************/
2096 #define LRU_DUMP_FILE "ib_lru_dump"
2097 
2098 UNIV_INTERN
2099 bool
2101 /*===================*/
2102 {
2103  os_file_t dump_file = -1;
2104  ibool success;
2105  byte* buffer_base = NULL;
2106  byte* buffer = NULL;
2107  buf_page_t* bpage;
2108  ulint buffers;
2109  ulint offset;
2110  bool ret = false;
2111  ulint i;
2112 
2113  for (i = 0; i < srv_n_data_files; i++) {
2114  if (strstr(srv_data_file_names[i], LRU_DUMP_FILE) != NULL) {
2115  fprintf(stderr,
2116  " InnoDB: The name '%s' seems to be used for"
2117  " innodb_data_file_path. For safety, dumping of the LRU list"
2118  " is not being done.\n", LRU_DUMP_FILE);
2119  goto end;
2120  }
2121  }
2122 
2123  buffer_base = static_cast<byte *>(ut_malloc(2 * UNIV_PAGE_SIZE));
2124  buffer = static_cast<byte *>(ut_align(buffer_base, UNIV_PAGE_SIZE));
2125  if (buffer == NULL) {
2126  fprintf(stderr,
2127  " InnoDB: cannot allocate buffer.\n");
2128  goto end;
2129  }
2130 
2131  dump_file = os_file_create(innodb_file_temp_key, LRU_DUMP_FILE, OS_FILE_OVERWRITE,
2132  OS_FILE_NORMAL, OS_DATA_FILE, &success);
2133  if (success == FALSE) {
2134  os_file_get_last_error(TRUE);
2135  fprintf(stderr,
2136  " InnoDB: cannot open %s\n", LRU_DUMP_FILE);
2137  goto end;
2138  }
2139 
2140  buffers = offset = 0;
2141 
2142  for (i = 0; i < srv_buf_pool_instances; i++) {
2143  buf_pool_t* buf_pool;
2144 
2145  buf_pool = buf_pool_from_array(i);
2146 
2147  buf_pool_mutex_enter(buf_pool);
2148  bpage = UT_LIST_GET_LAST(buf_pool->LRU);
2149 
2150  while (bpage != NULL) {
2151  if (offset == 0) {
2152  memset(buffer, 0, UNIV_PAGE_SIZE);
2153  }
2154 
2155  mach_write_to_4(buffer + offset * 4, bpage->space);
2156  offset++;
2157  mach_write_to_4(buffer + offset * 4, bpage->offset);
2158  offset++;
2159 
2160  if (offset == UNIV_PAGE_SIZE/4) {
2161  success = os_file_write(LRU_DUMP_FILE, dump_file, buffer,
2162  (buffers << UNIV_PAGE_SIZE_SHIFT) & 0xFFFFFFFFUL,
2163  (buffers >> (32 - UNIV_PAGE_SIZE_SHIFT)),
2164  UNIV_PAGE_SIZE);
2165  if (success == FALSE) {
2166  buf_pool_mutex_exit(buf_pool);
2167  fprintf(stderr,
2168  " InnoDB: cannot write page %lu of %s\n",
2169  buffers, LRU_DUMP_FILE);
2170  goto end;
2171  }
2172  buffers++;
2173  offset = 0;
2174  }
2175 
2176  bpage = UT_LIST_GET_PREV(LRU, bpage);
2177  }
2178  buf_pool_mutex_exit(buf_pool);
2179  }
2180 
2181  if (offset == 0) {
2182  memset(buffer, 0, UNIV_PAGE_SIZE);
2183  }
2184 
2185  mach_write_to_4(buffer + offset * 4, 0xFFFFFFFFUL);
2186  offset++;
2187  mach_write_to_4(buffer + offset * 4, 0xFFFFFFFFUL);
2188  offset++;
2189 
2190  success = os_file_write(LRU_DUMP_FILE, dump_file, buffer,
2191  (buffers << UNIV_PAGE_SIZE_SHIFT) & 0xFFFFFFFFUL,
2192  (buffers >> (32 - UNIV_PAGE_SIZE_SHIFT)),
2193  UNIV_PAGE_SIZE);
2194  if (success == FALSE) {
2195  goto end;
2196  }
2197 
2198  ret = true;
2199 end:
2200  if (dump_file != -1)
2201  os_file_close(dump_file);
2202  if (buffer_base)
2203  ut_free(buffer_base);
2204 
2205  return(ret);
2206 }
2207 
2208 typedef struct {
2209  ib_uint32_t space_id;
2210  ib_uint32_t page_no;
2211 } dump_record_t;
2212 
2213 static int dump_record_cmp(const void *a, const void *b)
2214 {
2215  const dump_record_t *rec1 = (dump_record_t *) a;
2216  const dump_record_t *rec2 = (dump_record_t *) b;
2217 
2218  if (rec1->space_id < rec2->space_id)
2219  return -1;
2220  if (rec1->space_id > rec2->space_id)
2221  return 1;
2222  if (rec1->page_no < rec2->page_no)
2223  return -1;
2224  return rec1->page_no > rec2->page_no;
2225 }
2226 
2227 /********************************************************************/
2229 UNIV_INTERN
2230 bool
2232 /*======================*/
2233 {
2234  os_file_t dump_file = -1;
2235  ibool success;
2236  byte* buffer_base = NULL;
2237  byte* buffer = NULL;
2238  ulint buffers;
2239  ulint offset;
2240  ulint reads = 0;
2241  ulint req = 0;
2242  bool terminated = false;
2243  bool ret = false;
2244  dump_record_t* records = NULL;
2245  ulint size;
2246  ulint size_high;
2247  ulint length;
2248 
2249  dump_file = os_file_create_simple_no_error_handling(innodb_file_temp_key,
2250  LRU_DUMP_FILE, OS_FILE_OPEN, OS_FILE_READ_ONLY, &success);
2251  if (success == FALSE || !os_file_get_size(dump_file, &size, &size_high)) {
2252  os_file_get_last_error(TRUE);
2253  fprintf(stderr,
2254  " InnoDB: cannot open %s\n", LRU_DUMP_FILE);
2255  goto end;
2256  }
2257  if (size == 0 || size_high > 0 || size % 8) {
2258  fprintf(stderr, " InnoDB: broken LRU dump file\n");
2259  goto end;
2260  }
2261  buffer_base = static_cast<byte *>(ut_malloc(2 * UNIV_PAGE_SIZE));
2262  buffer = static_cast<byte *>(ut_align(buffer_base, UNIV_PAGE_SIZE));
2263  records = static_cast<dump_record_t *>(ut_malloc(size));
2264  if (buffer == NULL || records == NULL) {
2265  fprintf(stderr,
2266  " InnoDB: cannot allocate buffer.\n");
2267  goto end;
2268  }
2269 
2270  buffers = 0;
2271  length = 0;
2272  while (!terminated) {
2273  success = os_file_read(dump_file, buffer,
2274  (buffers << UNIV_PAGE_SIZE_SHIFT) & 0xFFFFFFFFUL,
2275  (buffers >> (32 - UNIV_PAGE_SIZE_SHIFT)),
2276  UNIV_PAGE_SIZE);
2277  if (success == FALSE) {
2278  fprintf(stderr,
2279  " InnoDB: either could not read page %lu of %s,"
2280  " or terminated unexpectedly.\n",
2281  buffers, LRU_DUMP_FILE);
2282  goto end;
2283  }
2284 
2285  for (offset = 0; offset < UNIV_PAGE_SIZE/4; offset += 2) {
2286  ulint space_id;
2287  ulint page_no;
2288 
2289  space_id = mach_read_from_4(buffer + offset * 4);
2290  page_no = mach_read_from_4(buffer + (offset + 1) * 4);
2291  if (space_id == 0xFFFFFFFFUL
2292  || page_no == 0xFFFFFFFFUL) {
2293  terminated = true;
2294  break;
2295  }
2296 
2297  records[length].space_id = space_id;
2298  records[length].page_no = page_no;
2299  length++;
2300  if (length * 8 >= size) {
2301  fprintf(stderr,
2302  " InnoDB: could not find the "
2303  "end-of-file marker after reading "
2304  "the expected %lu bytes from the "
2305  "LRU dump file.\n"
2306  " InnoDB: this could be caused by a "
2307  "broken or incomplete file.\n"
2308  " InnoDB: trying to process what has "
2309  "been read so far.\n",
2310  size);
2311  terminated = true;
2312  break;
2313  }
2314  }
2315  buffers++;
2316  }
2317 
2318  qsort(records, length, sizeof(dump_record_t), dump_record_cmp);
2319 
2320  for (offset = 0; offset < length; offset++) {
2321  ulint space_id;
2322  ulint page_no;
2323  ulint zip_size;
2324  ulint err;
2325  int64_t tablespace_version;
2326 
2327  space_id = records[offset].space_id;
2328  page_no = records[offset].page_no;
2329 
2330  if (offset % 16 == 15) {
2333  }
2334 
2335  zip_size = fil_space_get_zip_size(space_id);
2336  if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) {
2337  continue;
2338  }
2339 
2340  if (fil_is_exist(space_id, page_no)) {
2341 
2342  tablespace_version = fil_space_get_version(space_id);
2343 
2344  req++;
2345  reads += buf_read_page_low(&err, FALSE, BUF_READ_ANY_PAGE
2347  space_id, zip_size, TRUE,
2348  tablespace_version, page_no);
2350  }
2351  }
2352 
2355 
2356  ut_print_timestamp(stderr);
2357  fprintf(stderr,
2358  " InnoDB: reading pages based on the dumped LRU list was done."
2359  " (requested: %lu, read: %lu)\n", req, reads);
2360  ret = true;
2361 end:
2362  if (dump_file != -1)
2363  os_file_close(dump_file);
2364  if (buffer_base)
2365  ut_free(buffer_base);
2366  if (records)
2367  ut_free(records);
2368 
2369  return(ret);
2370 }
2371 
2372 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2373 /**********************************************************************/
2375 static
2376 void
2377 buf_LRU_validate_instance(
2378 /*======================*/
2379  buf_pool_t* buf_pool)
2380 {
2381  buf_page_t* bpage;
2382  buf_block_t* block;
2383  ulint old_len;
2384  ulint new_len;
2385 
2386  ut_ad(buf_pool);
2387  buf_pool_mutex_enter(buf_pool);
2388 
2389  if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
2390 
2391  ut_a(buf_pool->LRU_old);
2392  old_len = buf_pool->LRU_old_len;
2393  new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
2394  * buf_pool->LRU_old_ratio
2396  UT_LIST_GET_LEN(buf_pool->LRU)
2397  - (BUF_LRU_OLD_TOLERANCE
2398  + BUF_LRU_NON_OLD_MIN_LEN));
2399  ut_a(old_len >= new_len - BUF_LRU_OLD_TOLERANCE);
2400  ut_a(old_len <= new_len + BUF_LRU_OLD_TOLERANCE);
2401  }
2402 
2403  UT_LIST_VALIDATE(LRU, buf_page_t, buf_pool->LRU,
2404  ut_ad(ut_list_node_313->in_LRU_list));
2405 
2406  bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
2407 
2408  old_len = 0;
2409 
2410  while (bpage != NULL) {
2411 
2412  switch (buf_page_get_state(bpage)) {
2413  case BUF_BLOCK_ZIP_FREE:
2414  case BUF_BLOCK_NOT_USED:
2416  case BUF_BLOCK_MEMORY:
2417  case BUF_BLOCK_REMOVE_HASH:
2418  ut_error;
2419  break;
2420  case BUF_BLOCK_FILE_PAGE:
2421  ut_ad(((buf_block_t*) bpage)->in_unzip_LRU_list
2422  == buf_page_belongs_to_unzip_LRU(bpage));
2423  case BUF_BLOCK_ZIP_PAGE:
2424  case BUF_BLOCK_ZIP_DIRTY:
2425  break;
2426  }
2427 
2428  if (buf_page_is_old(bpage)) {
2429  const buf_page_t* prev
2430  = UT_LIST_GET_PREV(LRU, bpage);
2431  const buf_page_t* next
2432  = UT_LIST_GET_NEXT(LRU, bpage);
2433 
2434  if (!old_len++) {
2435  ut_a(buf_pool->LRU_old == bpage);
2436  } else {
2437  ut_a(!prev || buf_page_is_old(prev));
2438  }
2439 
2440  ut_a(!next || buf_page_is_old(next));
2441  }
2442 
2443  bpage = UT_LIST_GET_NEXT(LRU, bpage);
2444  }
2445 
2446  ut_a(buf_pool->LRU_old_len == old_len);
2447 
2448  UT_LIST_VALIDATE(list, buf_page_t, buf_pool->free,
2449  ut_ad(ut_list_node_313->in_free_list));
2450 
2451  for (bpage = UT_LIST_GET_FIRST(buf_pool->free);
2452  bpage != NULL;
2453  bpage = UT_LIST_GET_NEXT(list, bpage)) {
2454 
2456  }
2457 
2458  UT_LIST_VALIDATE(unzip_LRU, buf_block_t, buf_pool->unzip_LRU,
2459  ut_ad(ut_list_node_313->in_unzip_LRU_list
2460  && ut_list_node_313->page.in_LRU_list));
2461 
2462  for (block = UT_LIST_GET_FIRST(buf_pool->unzip_LRU);
2463  block;
2464  block = UT_LIST_GET_NEXT(unzip_LRU, block)) {
2465 
2466  ut_ad(block->in_unzip_LRU_list);
2467  ut_ad(block->page.in_LRU_list);
2469  }
2470 
2471  buf_pool_mutex_exit(buf_pool);
2472 }
2473 
2474 /**********************************************************************/
2477 UNIV_INTERN
2478 ibool
2479 buf_LRU_validate(void)
2480 /*==================*/
2481 {
2482  ulint i;
2483 
2484  for (i = 0; i < srv_buf_pool_instances; i++) {
2485  buf_pool_t* buf_pool;
2486 
2487  buf_pool = buf_pool_from_array(i);
2488  buf_LRU_validate_instance(buf_pool);
2489  }
2490 
2491  return(TRUE);
2492 }
2493 #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
2494 
2495 #if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2496 /**********************************************************************/
2498 UNIV_INTERN
2499 void
2500 buf_LRU_print_instance(
2501 /*===================*/
2502  buf_pool_t* buf_pool)
2503 {
2504  const buf_page_t* bpage;
2505 
2506  ut_ad(buf_pool);
2507  buf_pool_mutex_enter(buf_pool);
2508 
2509  bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
2510 
2511  while (bpage != NULL) {
2512 
2513  mutex_enter(buf_page_get_mutex(bpage));
2514  fprintf(stderr, "BLOCK space %lu page %lu ",
2515  (ulong) buf_page_get_space(bpage),
2516  (ulong) buf_page_get_page_no(bpage));
2517 
2518  if (buf_page_is_old(bpage)) {
2519  fputs("old ", stderr);
2520  }
2521 
2522  if (bpage->buf_fix_count) {
2523  fprintf(stderr, "buffix count %lu ",
2524  (ulong) bpage->buf_fix_count);
2525  }
2526 
2527  if (buf_page_get_io_fix(bpage)) {
2528  fprintf(stderr, "io_fix %lu ",
2529  (ulong) buf_page_get_io_fix(bpage));
2530  }
2531 
2532  if (bpage->oldest_modification) {
2533  fputs("modif. ", stderr);
2534  }
2535 
2536  switch (buf_page_get_state(bpage)) {
2537  const byte* frame;
2538  case BUF_BLOCK_FILE_PAGE:
2539  frame = buf_block_get_frame((buf_block_t*) bpage);
2540  fprintf(stderr, "\ntype %lu"
2541  " index id %llu\n",
2542  (ulong) fil_page_get_type(frame),
2543  (ullint) btr_page_get_index_id(frame));
2544  break;
2545  case BUF_BLOCK_ZIP_PAGE:
2546  frame = bpage->zip.data;
2547  fprintf(stderr, "\ntype %lu size %lu"
2548  " index id %llu\n",
2549  (ulong) fil_page_get_type(frame),
2550  (ulong) buf_page_get_zip_size(bpage),
2551  (ullint) btr_page_get_index_id(frame));
2552  break;
2553 
2554  default:
2555  fprintf(stderr, "\n!state %lu!\n",
2556  (ulong) buf_page_get_state(bpage));
2557  break;
2558  }
2559 
2560  mutex_exit(buf_page_get_mutex(bpage));
2561  bpage = UT_LIST_GET_NEXT(LRU, bpage);
2562  }
2563 
2564  buf_pool_mutex_exit(buf_pool);
2565 }
2566 
2567 /**********************************************************************/
2569 UNIV_INTERN
2570 void
2571 buf_LRU_print(void)
2572 /*===============*/
2573 {
2574  ulint i;
2575  buf_pool_t* buf_pool;
2576 
2577  for (i = 0; i < srv_buf_pool_instances; i++) {
2578  buf_pool = buf_pool_from_array(i);
2579  buf_LRU_print_instance(buf_pool);
2580  }
2581 }
2582 #endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
UNIV_INLINE void buf_buddy_free(buf_pool_t *buf_pool, void *buf, ulint size) __attribute__((nonnull))
#define UT_LIST_GET_LEN(BASE)
Definition: ut0lst.h:217
int os_file_t
Definition: os0file.h:87
UNIV_INTERN bool buf_LRU_file_restore(void)
Definition: buf0lru.cc:2231
UNIV_INTERN void btr_search_drop_page_hash_when_freed(ulint space, ulint zip_size, ulint page_no)
Definition: btr0sea.cc:1188
#define buf_pool_mutex_enter(b)
Definition: buf0buf.h:1765
unsigned offset
Definition: buf0buf.h:1281
UNIV_INTERN void buf_LRU_make_block_young(buf_page_t *bpage)
Definition: buf0lru.cc:1394
#define UT_LIST_GET_NEXT(NAME, N)
Definition: ut0lst.h:201
ulint LRU_old_ratio
Definition: buf0buf.h:1621
UNIV_INTERN void buf_flush_remove(buf_page_t *bpage)
Definition: buf0flu.cc:522
UNIV_INLINE ibool buf_page_belongs_to_unzip_LRU(const buf_page_t *bpage) __attribute__((pure))
#define FIL_PAGE_INDEX
Definition: fil0fil.h:173
buf_lru_free_block_status
Definition: buf0lru.h:36
ulint curr_size
Definition: buf0buf.h:1632
UNIV_INLINE void mach_write_to_4(byte *b, ulint n)
UNIV_INTERN ulint buf_read_page_low(ulint *err, ibool sync, ulint mode, ulint space, ulint zip_size, ibool unzip, ib_int64_t tablespace_version, ulint offset)
Definition: buf0rea.cc:61
unsigned state
Definition: buf0buf.h:1284
#define FIL_PAGE_INODE
Definition: fil0fil.h:175
#define UT_LIST_VALIDATE(NAME, TYPE, BASE, ASSERTION)
Definition: ut0lst.h:244
UNIV_INLINE void buf_block_set_state(buf_block_t *block, enum buf_page_state state)
UNIV_INLINE ulint buf_page_get_page_no(const buf_page_t *bpage) __attribute__((pure))
#define FIL_PAGE_TYPE_ALLOCATED
Definition: fil0fil.h:178
buf_page_t * LRU_old
Definition: buf0buf.h:1717
UNIV_INTERN ibool os_file_get_size(os_file_t file, ulint *size, ulint *size_high)
Definition: os0file.cc:1895
#define buf_pool_mutex_exit_forbid(b)
Definition: buf0buf.h:1803
#define FIL_PAGE_SPACE_OR_CHKSUM
Definition: fil0fil.h:75
unsigned space
Definition: buf0buf.h:1279
mutex_t zip_mutex
Definition: buf0buf.h:1613
uint buf_LRU_old_threshold_ms
Definition: buf0lru.cc:119
#define FIL_PAGE_TYPE_ZBLOB2
Definition: fil0fil.h:186
UNIV_INTERN void ut_print_buf(FILE *file, const void *buf, ulint len)
Definition: ut0ut.cc:444
UNIV_INTERN ib_int64_t fil_space_get_version(ulint id)
Definition: fil0fil.cc:488
UNIV_INTERN void buf_LRU_block_free_non_file_page(buf_block_t *block)
Definition: buf0lru.cc:1676
UNIV_INTERN void * ut_malloc(ulint n)
Definition: ut0mem.cc:235
UNIV_INLINE void buf_block_modify_clock_inc(buf_block_t *block)
hash_table_t * page_hash
Definition: buf0buf.h:1633
UNIV_INLINE ibool buf_page_in_file(const buf_page_t *bpage) __attribute__((pure))
#define BUF_LRU_OLD_RATIO_MIN
Definition: buf0lru.h:266
#define BUF_LRU_OLD_RATIO_MAX
Definition: buf0lru.h:260
ulint LRU_flush_ended
Definition: buf0buf.h:1703
UNIV_INTERN ulint fil_space_get_zip_size(ulint id)
Definition: fil0fil.cc:1535
UNIV_INLINE buf_page_t * buf_page_hash_get_low(buf_pool_t *buf_pool, ulint space, ulint offset, ulint fold)
#define FIL_PAGE_TYPE_XDES
Definition: fil0fil.h:183
#define ut_d(EXPR)
Definition: ut0dbg.h:129
#define buf_LRU_stat_inc_io()
Definition: buf0lru.h:304
UNIV_INLINE void buf_page_set_old(buf_page_t *bpage, ibool old)
UNIV_INTERN void buf_flush_free_margin(buf_pool_t *buf_pool)
Definition: buf0flu.cc:2048
#define BUF_LRU_OLD_RATIO_DIV
Definition: buf0lru.h:256
buf_page_t page
Definition: buf0buf.h:1433
UNIV_INLINE unsigned buf_page_is_accessed(const buf_page_t *bpage) __attribute__((nonnull
buf_pool_stat_t stat
Definition: buf0buf.h:1651
UNIV_INTERN ibool buf_LRU_search_and_free_block(buf_pool_t *buf_pool, ulint n_iterations)
Definition: buf0lru.cc:711
#define HASH_INSERT(TYPE, NAME, TABLE, FOLD, DATA)
Definition: hash0hash.h:101
#define BUF_READ_ANY_PAGE
Definition: buf0rea.h:166
#define BUF_NO_CHECKSUM_MAGIC
Definition: buf0buf.h:101
UNIV_INLINE ulint buf_page_address_fold(ulint space, ulint offset) __attribute__((const ))
buf_LRU_stat_t buf_LRU_stat_sum
Definition: buf0lru.cc:112
The buffer pool structure.
Definition: buf0buf.h:1607
UNIV_INLINE ulint ut_min(ulint n1, ulint n2)
UNIV_INTERN enum buf_lru_free_block_status buf_LRU_free_block(buf_page_t *bpage, ibool zip) __attribute__((nonnull))
Definition: buf0lru.cc:1437
#define UT_LIST_REMOVE(NAME, BASE, N)
Definition: ut0lst.h:178
UNIV_INLINE index_id_t btr_page_get_index_id(const page_t *page)
UNIV_INLINE void buf_page_set_state(buf_page_t *bpage, enum buf_page_state state)
UNIV_INLINE ulint buf_page_get_zip_size(const buf_page_t *bpage) __attribute__((pure))
unsigned buf_fix_count
Definition: buf0buf.h:1299
UNIV_INLINE ibool buf_page_can_relocate(const buf_page_t *bpage) __attribute__((pure))
buf_page_state
States of a control block.
Definition: buf0buf.h:107
UNIV_INLINE enum buf_page_state buf_block_get_state(const buf_block_t *block) __attribute__((pure))
UNIV_INLINE buf_pool_t * buf_pool_from_array(ulint index)
page_zip_des_t zip
Definition: buf0buf.h:1308
UNIV_INLINE void * ut_align(const void *ptr, ulint align_no)
UNIV_INTERN void buf_LRU_make_block_old(buf_page_t *bpage)
Definition: buf0lru.cc:1414
UNIV_INTERN void os_event_set(os_event_t event)
Definition: os0sync.cc:434
#define FIL_PAGE_TYPE_ZBLOB
Definition: fil0fil.h:185
UNIV_INTERN void os_thread_sleep(ulint tm)
Definition: os0thread.cc:265
UNIV_INTERN void buf_flush_free_margins(void)
Definition: buf0flu.cc:2074
mutex_t mutex
Definition: buf0buf.h:1452
UNIV_INLINE enum buf_page_state buf_page_get_state(const buf_page_t *bpage)
UNIV_INTERN void buf_unzip_LRU_add_block(buf_block_t *block, ibool old)
Definition: buf0lru.cc:1234
UNIV_INLINE enum buf_io_fix buf_page_get_io_fix(const buf_page_t *bpage) __attribute__((pure))
#define ut_a(EXPR)
Definition: ut0dbg.h:105
UNIV_INLINE void page_zip_set_size(page_zip_des_t *page_zip, ulint size)
unsigned old
Definition: buf0buf.h:1404
#define UT_LIST_GET_PREV(NAME, N)
Definition: ut0lst.h:209
#define UT_LIST_INSERT_AFTER(NAME, BASE, NODE1, NODE2)
Definition: ut0lst.h:142
buf_LRU_stat_t buf_LRU_stat_cur
Definition: buf0lru.cc:108
UNIV_INTERN ulint fil_page_get_type(const byte *page)
Definition: fil0fil.cc:4915
UNIV_INTERN ulint os_file_get_last_error(ibool report_all_errors)
Definition: os0file.cc:385
UNIV_INLINE ibool buf_page_is_old(const buf_page_t *bpage) __attribute__((pure))
#define UT_LIST_ADD_LAST(NAME, BASE, N)
Definition: ut0lst.h:119
ulint freed_page_clock
Definition: buf0buf.h:1694
#define UT_LIST_GET_FIRST(BASE)
Definition: ut0lst.h:224
ulint n_ra_pages_evicted
Definition: buf0buf.h:1581
UNIV_INLINE void * buf_buddy_alloc(buf_pool_t *buf_pool, ulint size, ibool *lru) __attribute__((malloc))
UNIV_INLINE ulint buf_page_get_space(const buf_page_t *bpage) __attribute__((pure))
#define OS_DATA_FILE
Definition: os0file.h:125
#define ut_ad(EXPR)
Definition: ut0dbg.h:127
UNIV_INTERN void btr_search_drop_page_hash_index(buf_block_t *block)
Definition: btr0sea.cc:1020
UNIV_INLINE buf_pool_t * buf_pool_from_bpage(const buf_page_t *bpage)
ulint fil_n_pending_tablespace_flushes
Definition: fil0fil.cc:120
UNIV_INTERN void buf_LRU_try_free_flushed_blocks(buf_pool_t *buf_pool)
Definition: buf0lru.cc:757
UNIV_INTERN void ut_free(void *ptr)
Definition: ut0mem.cc:294
#define ut_error
Definition: ut0dbg.h:115
UNIV_INLINE void buf_page_set_io_fix(buf_page_t *bpage, enum buf_io_fix io_fix)
UNIV_INTERN byte UNIV_INTERN ulint page_zip_calc_checksum(const void *data, ulint size) __attribute__((nonnull))
Definition: page0zip.cc:4650
unsigned io_fix
Definition: buf0buf.h:1296
ulint LRU_old_len
Definition: buf0buf.h:1724
#define FIL_PAGE_OFFSET
Definition: fil0fil.h:82
#define buf_pool_mutex_exit_allow(b)
Definition: buf0buf.h:1805
#define UT_LIST_ADD_FIRST(NAME, BASE, N)
Definition: ut0lst.h:97
UNIV_INLINE ulint mach_read_from_4(const byte *b) __attribute__((nonnull
UNIV_INTERN void buf_LRU_add_block(buf_page_t *bpage, ibool old)
Definition: buf0lru.cc:1378
UNIV_INTERN bool buf_LRU_file_dump(void)
Definition: buf0lru.cc:2100
page_zip_t * data
Definition: page0types.h:68
UNIV_INTERN void ut_print_timestamp(FILE *file)
Definition: ut0ut.cc:247
UNIV_INTERN void os_aio_simulated_wake_handler_threads(void)
Definition: os0file.cc:3908
#define OS_FILE_OPEN
Definition: os0file.h:107
ib_uint64_t oldest_modification
Definition: buf0buf.h:1378
UNIV_INTERN buf_block_t * buf_LRU_get_free_only(buf_pool_t *buf_pool)
Definition: buf0lru.cc:825
ibool recv_recovery_on
Definition: log0recv.cc:77
byte page_t
Definition: page0types.h:37
UNIV_INLINE buf_pool_t * buf_pool_from_block(const buf_block_t *block)
UNIV_INLINE mutex_t * buf_page_get_mutex(const buf_page_t *bpage) __attribute__((pure))
UNIV_INTERN void buf_LRU_invalidate_tablespace(ulint id)
Definition: buf0lru.cc:508
UNIV_INTERN ibool buf_LRU_buf_pool_running_out(void)
Definition: buf0lru.cc:792
#define HASH_DELETE(TYPE, NAME, TABLE, FOLD, DATA)
Definition: hash0hash.h:137
#define buf_pool_mutex_own(b)
Definition: buf0buf.h:1763
UNIV_INTERN ulint buf_LRU_old_ratio_update(uint old_pct, ibool adjust)
Definition: buf0lru.cc:2018
#define FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID
Definition: fil0fil.h:156
UNIV_INTERN bool fil_is_exist(ulint space_id, ulint block_offset)
Definition: fil0fil.cc:4502
#define OS_AIO_SIMULATED_WAKE_LATER
Definition: os0file.h:180
Statistics for selecting the LRU list for eviction.
Definition: buf0lru.h:285
#define FIL_PAGE_IBUF_BITMAP
Definition: fil0fil.h:179
ulint fil_n_pending_log_flushes
Definition: fil0fil.cc:118
ulint n_pages_made_young
Definition: buf0buf.h:1584
ulint srv_buf_pool_instances
Definition: srv0srv.cc:255
#define FIL_PAGE_TYPE_FSP_HDR
Definition: fil0fil.h:182
#define buf_pool_mutex_exit(b)
Definition: buf0buf.h:1807
#define UT_LIST_GET_LAST(BASE)
Definition: ut0lst.h:235
UNIV_INTERN void buf_LRU_insert_zip_clean(buf_page_t *bpage)
Definition: buf0lru.cc:533
#define BUF_LRU_OLD_MIN_LEN
Definition: buf0lru.h:75
unsigned freed_page_clock
Definition: buf0buf.h:1406
UNIV_INLINE ulint page_zip_get_size(const page_zip_des_t *page_zip) __attribute__((nonnull
UNIV_INTERN void buf_LRU_stat_update(void)
Definition: buf0lru.cc:2046
UNIV_INTERN void buf_flush_relocate_on_flush_list(buf_page_t *bpage, buf_page_t *dpage)
Definition: buf0flu.cc:585
UNIV_INTERN buf_block_t * buf_LRU_get_free_block(buf_pool_t *buf_pool) __attribute__((nonnull