40 #if !defined(BUILD_DRIZZLE)
41 # include <mysql/plugin.h>
65 #include <drizzled/session.h>
68 #define TABLE_CACHE_INITIAL_ROWSNUM 1024
78 #define MEM_CHUNKS_IN_TABLE_CACHE 39
89 #define TEST_LOCK_FOLD_ALWAYS_DIFFERENT
97 #define TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
104 #define TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
110 #define TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
117 #define TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
124 #define MAX_ALLOWED_FOR_STORAGE(cache) \
126 - (cache)->mem_allocd)
131 #define MAX_ALLOWED_FOR_ALLOC(cache) \
133 - (cache)->mem_allocd \
134 - ha_storage_get_size((cache)->storage))
171 #define LOCKS_HASH_CELLS_NUM 10000
176 #define CACHE_STORAGE_INITIAL_SIZE 1024
178 #define CACHE_STORAGE_HASH_CELLS 2048
200 #ifdef UNIV_PFS_RWLOCK
201 UNIV_INTERN mysql_pfs_key_t trx_i_s_cache_lock_key;
204 #ifdef UNIV_PFS_MUTEX
205 UNIV_INTERN mysql_pfs_key_t cache_last_read_mutex_key;
214 wait_lock_get_heap_no(
223 ut_a(ret != ULINT_UNDEFINED);
226 ret = ULINT_UNDEFINED;
251 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
269 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
288 table_cache_create_empty_row(
314 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
324 ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
331 req_rows = TABLE_CACHE_INITIAL_ROWSNUM;
346 req_bytes = req_rows * table_cache->
row_size;
348 if (req_bytes > MAX_ALLOWED_FOR_ALLOC(cache)) {
353 chunk = &table_cache->
chunks[i];
355 chunk->
base = mem_alloc2(req_bytes, &got_bytes);
357 got_rows = got_bytes / table_cache->
row_size;
362 printf(
"allocating chunk %d req bytes=%lu, got bytes=%lu, "
364 "req rows=%lu, got rows=%lu\n",
365 i, req_bytes, got_bytes,
375 if (i < MEM_CHUNKS_IN_TABLE_CACHE - 1) {
394 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
409 ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
411 chunk_start = (
char*) table_cache->
chunks[i].
base;
415 row = chunk_start + offset * table_cache->
row_size;
429 i_s_locks_row_validate(
484 ut_ad(mutex_own(&kernel_mutex));
490 ut_ad(requested_lock_row == NULL
491 || i_s_locks_row_validate(requested_lock_row));
494 ut_a(requested_lock_row != NULL);
497 ut_a(requested_lock_row == NULL);
513 stmt= trx->
mysql_thd->getQueryStringCopy(stmt_len);
522 memcpy(query, stmt, stmt_len);
523 query[stmt_len] =
'\0';
525 row->
trx_query =
static_cast<const char *
>(ha_storage_put_memlim(
526 cache->
storage, query, stmt_len + 1,
527 MAX_ALLOWED_FOR_STORAGE(cache)));
541 if (s != NULL && s[0] !=
'\0') {
569 switch (trx->isolation_level) {
570 case TRX_ISO_READ_UNCOMMITTED:
573 case TRX_ISO_READ_COMMITTED:
576 case TRX_ISO_REPEATABLE_READ:
579 case TRX_ISO_SERIALIZABLE:
593 if (s != NULL && s[0] !=
'\0') {
628 const ulint* offsets)
654 memcpy(buf,
", ", 3);
663 data = rec_get_nth_field(rec, offsets, n, &data_len);
665 dict_field = dict_index_get_nth_field(index, n);
668 dict_field, buf, buf_size);
681 const char** lock_data,
710 page = (
const page_t*) buf_block_get_frame(block);
717 cache->
storage,
"infimum pseudo-record",
718 MAX_ALLOWED_FOR_STORAGE(cache));
722 cache->
storage,
"supremum pseudo-record",
723 MAX_ALLOWED_FOR_STORAGE(cache));
729 ulint offsets_onstack[REC_OFFS_NORMAL_SIZE];
735 rec_offs_init(offsets_onstack);
736 offsets = offsets_onstack;
745 offsets = rec_get_offsets(rec, index, offsets, n_fields,
751 for (i = 0; i < n_fields; i++) {
753 buf_used += put_nth_field(
754 buf + buf_used,
sizeof(buf) - buf_used,
755 i, index, rec, offsets) - 1;
758 *lock_data = (
const char*) ha_storage_put_memlim(
759 cache->
storage, buf, buf_used + 1,
760 MAX_ALLOWED_FOR_STORAGE(cache));
762 if (UNIV_UNLIKELY(heap != NULL)) {
767 ut_a(offsets != offsets_onstack);
774 if (*lock_data == NULL) {
804 MAX_ALLOWED_FOR_STORAGE(cache));
816 MAX_ALLOWED_FOR_STORAGE(cache));
828 if (!fill_lock_data(&row->
lock_data, lock, heap_no, cache)) {
852 ut_ad(i_s_locks_row_validate(row));
873 ut_ad(i_s_locks_row_validate(requested_lock_row));
874 ut_ad(i_s_locks_row_validate(blocking_lock_row));
897 #ifdef TEST_LOCK_FOLD_ALWAYS_DIFFERENT
898 static ulint fold = 0;
906 ut_a(heap_no != ULINT_UNDEFINED);
921 ut_a(heap_no == ULINT_UNDEFINED);
947 ut_ad(i_s_locks_row_validate(row));
948 #ifdef TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
953 ut_a(heap_no != ULINT_UNDEFINED);
964 ut_a(heap_no == ULINT_UNDEFINED);
999 fold_lock(lock, heap_no),
1005 ut_ad(i_s_locks_row_validate(hash_chain->
value)),
1007 locks_row_eq_lock(hash_chain->
value, lock, heap_no));
1009 if (hash_chain == NULL) {
1015 return(hash_chain->
value);
1036 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
1038 for (i = 0; i < 10000; i++) {
1040 #ifndef TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
1042 dst_row = search_innodb_locks(cache, lock, heap_no);
1043 if (dst_row != NULL) {
1045 ut_ad(i_s_locks_row_validate(dst_row));
1051 table_cache_create_empty_row(&cache->
innodb_locks, cache);
1054 if (dst_row == NULL) {
1059 if (!fill_locks_row(dst_row, lock, heap_no, cache)) {
1066 #ifndef TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
1075 fold_lock(lock, heap_no),
1079 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
1083 ut_ad(i_s_locks_row_validate(dst_row));
1093 add_lock_wait_to_cache(
1110 if (dst_row == NULL) {
1115 fill_lock_waits_row(dst_row, requested_lock_row, blocking_lock_row);
1130 add_trx_relevant_locks_to_cache(
1138 ut_ad(mutex_own(&kernel_mutex));
1142 if (trx->
que_state == TRX_QUE_LOCK_WAIT) {
1145 ulint wait_lock_heap_no;
1152 = wait_lock_get_heap_no(trx->
wait_lock);
1156 = add_lock_to_cache(cache, trx->
wait_lock,
1160 if (*requested_lock_row == NULL) {
1172 while (curr_lock != NULL) {
1180 = add_lock_to_cache(
1188 if (blocking_lock_row == NULL) {
1195 if (!add_lock_wait_to_cache(
1196 cache, *requested_lock_row,
1197 blocking_lock_row)) {
1208 *requested_lock_row = NULL;
1218 #define CACHE_MIN_IDLE_TIME_US 100000
1225 can_cache_be_updated(
1238 #ifdef UNIV_SYNC_DEBUG
1243 if (now - cache->
last_read > CACHE_MIN_IDLE_TIME_US) {
1256 trx_i_s_cache_clear(
1274 fetch_data_into_cache(
1282 ut_ad(mutex_own(&kernel_mutex));
1284 trx_i_s_cache_clear(cache);
1295 if (!add_trx_relevant_locks_to_cache(cache, trx,
1296 &requested_lock_row)) {
1303 table_cache_create_empty_row(&cache->
innodb_trx,
1307 if (trx_row == NULL) {
1313 if (!fill_trx_row(trx_row, trx, requested_lock_row, cache)) {
1335 if (!can_cache_be_updated(cache)) {
1341 mutex_enter(&kernel_mutex);
1343 fetch_data_into_cache(cache);
1345 mutex_exit(&kernel_mutex);
1382 SYNC_TRX_I_S_RWLOCK);
1386 mutex_create(cache_last_read_mutex_key,
1394 cache->
locks_hash = hash_create(LOCKS_HASH_CELLS_NUM);
1397 CACHE_STORAGE_HASH_CELLS);
1417 memset(cache, 0,
sizeof *cache);
1441 #ifdef UNIV_SYNC_DEBUG
1442 ut_a(rw_lock_own(&cache->
rw_lock, RW_LOCK_SHARED));
1451 rw_lock_s_unlock(&cache->
rw_lock);
1462 rw_lock_x_lock(&cache->
rw_lock);
1473 #ifdef UNIV_SYNC_DEBUG
1477 rw_lock_x_unlock(&cache->
rw_lock);
1492 #ifdef UNIV_SYNC_DEBUG
1494 || rw_lock_own(&cache->
rw_lock, RW_LOCK_EX));
1511 return(table_cache);
1527 table_cache = cache_select_table(cache, table);
1548 table_cache = cache_select_table(cache, table);
1550 ut_a(n < table_cache->rows_used);
1554 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
1607 ut_a((ulint) res_len < lock_id_size);