40 #if !defined(BUILD_DRIZZLE)
41 # include <mysql/plugin.h>
65 #include <drizzled/session.h>
68 #define TABLE_CACHE_INITIAL_ROWSNUM 1024
78 #define MEM_CHUNKS_IN_TABLE_CACHE 39
89 #define TEST_LOCK_FOLD_ALWAYS_DIFFERENT
97 #define TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
104 #define TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
110 #define TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
117 #define TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
124 #define MAX_ALLOWED_FOR_STORAGE(cache) \
126 - (cache)->mem_allocd)
131 #define MAX_ALLOWED_FOR_ALLOC(cache) \
133 - (cache)->mem_allocd \
134 - ha_storage_get_size((cache)->storage))
171 #define LOCKS_HASH_CELLS_NUM 10000
176 #define CACHE_STORAGE_INITIAL_SIZE 1024
178 #define CACHE_STORAGE_HASH_CELLS 2048
200 #ifdef UNIV_PFS_RWLOCK
201 UNIV_INTERN mysql_pfs_key_t trx_i_s_cache_lock_key;
204 #ifdef UNIV_PFS_MUTEX
205 UNIV_INTERN mysql_pfs_key_t cache_last_read_mutex_key;
214 wait_lock_get_heap_no(
223 ut_a(ret != ULINT_UNDEFINED);
226 ret = ULINT_UNDEFINED;
251 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
269 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
288 table_cache_create_empty_row(
314 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
324 ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
331 req_rows = TABLE_CACHE_INITIAL_ROWSNUM;
346 req_bytes = req_rows * table_cache->
row_size;
348 if (req_bytes > MAX_ALLOWED_FOR_ALLOC(cache)) {
353 chunk = &table_cache->
chunks[i];
355 chunk->
base = mem_alloc2(req_bytes, &got_bytes);
357 got_rows = got_bytes / table_cache->
row_size;
362 printf(
"allocating chunk %d req bytes=%lu, got bytes=%lu, "
364 "req rows=%lu, got rows=%lu\n",
365 i, req_bytes, got_bytes,
375 if (i < MEM_CHUNKS_IN_TABLE_CACHE - 1) {
394 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
409 ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
411 chunk_start = (
char*) table_cache->
chunks[i].
base;
415 row = chunk_start + offset * table_cache->
row_size;
429 i_s_locks_row_validate(
484 ut_ad(mutex_own(&kernel_mutex));
490 ut_ad(requested_lock_row == NULL
491 || i_s_locks_row_validate(requested_lock_row));
494 ut_a(requested_lock_row != NULL);
497 ut_a(requested_lock_row == NULL);
513 stmt= trx->
mysql_thd->getQueryStringCopy(stmt_len);
523 memcpy(query, stmt, stmt_len);
524 query[stmt_len] =
'\0';
526 row->
trx_query =
static_cast<const char *
>(ha_storage_put_memlim(
527 cache->
storage, stmt, stmt_len + 1,
528 MAX_ALLOWED_FOR_STORAGE(cache)));
542 if (s != NULL && s[0] !=
'\0') {
570 switch (trx->isolation_level) {
571 case TRX_ISO_READ_UNCOMMITTED:
574 case TRX_ISO_READ_COMMITTED:
577 case TRX_ISO_REPEATABLE_READ:
580 case TRX_ISO_SERIALIZABLE:
594 if (s != NULL && s[0] !=
'\0') {
629 const ulint* offsets)
655 memcpy(buf,
", ", 3);
664 data = rec_get_nth_field(rec, offsets, n, &data_len);
666 dict_field = dict_index_get_nth_field(index, n);
669 dict_field, buf, buf_size);
682 const char** lock_data,
711 page = (
const page_t*) buf_block_get_frame(block);
718 cache->
storage,
"infimum pseudo-record",
719 MAX_ALLOWED_FOR_STORAGE(cache));
723 cache->
storage,
"supremum pseudo-record",
724 MAX_ALLOWED_FOR_STORAGE(cache));
730 ulint offsets_onstack[REC_OFFS_NORMAL_SIZE];
736 rec_offs_init(offsets_onstack);
737 offsets = offsets_onstack;
746 offsets = rec_get_offsets(rec, index, offsets, n_fields,
752 for (i = 0; i < n_fields; i++) {
754 buf_used += put_nth_field(
755 buf + buf_used,
sizeof(buf) - buf_used,
756 i, index, rec, offsets) - 1;
759 *lock_data = (
const char*) ha_storage_put_memlim(
760 cache->
storage, buf, buf_used + 1,
761 MAX_ALLOWED_FOR_STORAGE(cache));
763 if (UNIV_UNLIKELY(heap != NULL)) {
768 ut_a(offsets != offsets_onstack);
775 if (*lock_data == NULL) {
805 MAX_ALLOWED_FOR_STORAGE(cache));
817 MAX_ALLOWED_FOR_STORAGE(cache));
829 if (!fill_lock_data(&row->
lock_data, lock, heap_no, cache)) {
853 ut_ad(i_s_locks_row_validate(row));
874 ut_ad(i_s_locks_row_validate(requested_lock_row));
875 ut_ad(i_s_locks_row_validate(blocking_lock_row));
898 #ifdef TEST_LOCK_FOLD_ALWAYS_DIFFERENT
899 static ulint fold = 0;
907 ut_a(heap_no != ULINT_UNDEFINED);
922 ut_a(heap_no == ULINT_UNDEFINED);
948 ut_ad(i_s_locks_row_validate(row));
949 #ifdef TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
954 ut_a(heap_no != ULINT_UNDEFINED);
965 ut_a(heap_no == ULINT_UNDEFINED);
1000 fold_lock(lock, heap_no),
1006 ut_ad(i_s_locks_row_validate(hash_chain->
value)),
1008 locks_row_eq_lock(hash_chain->
value, lock, heap_no));
1010 if (hash_chain == NULL) {
1016 return(hash_chain->
value);
1037 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
1039 for (i = 0; i < 10000; i++) {
1041 #ifndef TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
1043 dst_row = search_innodb_locks(cache, lock, heap_no);
1044 if (dst_row != NULL) {
1046 ut_ad(i_s_locks_row_validate(dst_row));
1052 table_cache_create_empty_row(&cache->
innodb_locks, cache);
1055 if (dst_row == NULL) {
1060 if (!fill_locks_row(dst_row, lock, heap_no, cache)) {
1067 #ifndef TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
1076 fold_lock(lock, heap_no),
1080 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
1084 ut_ad(i_s_locks_row_validate(dst_row));
1094 add_lock_wait_to_cache(
1111 if (dst_row == NULL) {
1116 fill_lock_waits_row(dst_row, requested_lock_row, blocking_lock_row);
1131 add_trx_relevant_locks_to_cache(
1139 ut_ad(mutex_own(&kernel_mutex));
1143 if (trx->
que_state == TRX_QUE_LOCK_WAIT) {
1146 ulint wait_lock_heap_no;
1153 = wait_lock_get_heap_no(trx->
wait_lock);
1157 = add_lock_to_cache(cache, trx->
wait_lock,
1161 if (*requested_lock_row == NULL) {
1173 while (curr_lock != NULL) {
1181 = add_lock_to_cache(
1189 if (blocking_lock_row == NULL) {
1196 if (!add_lock_wait_to_cache(
1197 cache, *requested_lock_row,
1198 blocking_lock_row)) {
1209 *requested_lock_row = NULL;
1219 #define CACHE_MIN_IDLE_TIME_US 100000
1226 can_cache_be_updated(
1239 #ifdef UNIV_SYNC_DEBUG
1244 if (now - cache->
last_read > CACHE_MIN_IDLE_TIME_US) {
1257 trx_i_s_cache_clear(
1275 fetch_data_into_cache(
1283 ut_ad(mutex_own(&kernel_mutex));
1285 trx_i_s_cache_clear(cache);
1296 if (!add_trx_relevant_locks_to_cache(cache, trx,
1297 &requested_lock_row)) {
1304 table_cache_create_empty_row(&cache->
innodb_trx,
1308 if (trx_row == NULL) {
1314 if (!fill_trx_row(trx_row, trx, requested_lock_row, cache)) {
1336 if (!can_cache_be_updated(cache)) {
1342 mutex_enter(&kernel_mutex);
1344 fetch_data_into_cache(cache);
1346 mutex_exit(&kernel_mutex);
1383 SYNC_TRX_I_S_RWLOCK);
1387 mutex_create(cache_last_read_mutex_key,
1395 cache->
locks_hash = hash_create(LOCKS_HASH_CELLS_NUM);
1398 CACHE_STORAGE_HASH_CELLS);
1418 memset(cache, 0,
sizeof *cache);
1442 #ifdef UNIV_SYNC_DEBUG
1443 ut_a(rw_lock_own(&cache->
rw_lock, RW_LOCK_SHARED));
1452 rw_lock_s_unlock(&cache->
rw_lock);
1463 rw_lock_x_lock(&cache->
rw_lock);
1474 #ifdef UNIV_SYNC_DEBUG
1478 rw_lock_x_unlock(&cache->
rw_lock);
1493 #ifdef UNIV_SYNC_DEBUG
1495 || rw_lock_own(&cache->
rw_lock, RW_LOCK_EX));
1512 return(table_cache);
1528 table_cache = cache_select_table(cache, table);
1549 table_cache = cache_select_table(cache, table);
1551 ut_a(n < table_cache->rows_used);
1555 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
1608 ut_a((ulint) res_len < lock_id_size);