112 #include <boost/dynamic_bitset.hpp>
114 #include <drizzled/check_stack_overrun.h>
115 #include <drizzled/error.h>
116 #include <drizzled/field/num.h>
117 #include <drizzled/internal/iocache.h>
118 #include <drizzled/internal/my_sys.h>
119 #include <drizzled/item/cmpfunc.h>
120 #include <drizzled/optimizer/cost_vector.h>
121 #include <drizzled/optimizer/quick_group_min_max_select.h>
122 #include <drizzled/optimizer/quick_index_merge_select.h>
123 #include <drizzled/optimizer/quick_range.h>
124 #include <drizzled/optimizer/quick_range_select.h>
125 #include <drizzled/optimizer/quick_ror_intersect_select.h>
126 #include <drizzled/optimizer/quick_ror_union_select.h>
127 #include <drizzled/optimizer/range.h>
128 #include <drizzled/optimizer/range_param.h>
129 #include <drizzled/optimizer/sel_arg.h>
130 #include <drizzled/optimizer/sel_imerge.h>
131 #include <drizzled/optimizer/sel_tree.h>
132 #include <drizzled/optimizer/sum.h>
133 #include <drizzled/optimizer/table_read_plan.h>
134 #include <drizzled/plugin/storage_engine.h>
135 #include <drizzled/records.h>
136 #include <drizzled/sql_base.h>
137 #include <drizzled/sql_select.h>
139 #include <drizzled/session.h>
140 #include <drizzled/key.h>
141 #include <drizzled/unique.h>
143 #include <drizzled/sql_lex.h>
144 #include <drizzled/system_variables.h>
150 static const int HA_END_SPACE_KEY= 0;
156 static inline ha_rows double2rows(
double x)
158 return static_cast<ha_rows
>(x);
161 static unsigned char is_null_string[2]= {1,0};
213 if (table->
cursor->primary_key_is_clustered())
215 cost->setIOCount(table->
cursor->read_time(table->getShare()->getPrimaryKey(),
216 static_cast<uint32_t
>(nrows),
222 ceil(static_cast<double>(table->
cursor->stats.data_file_length) / IO_SIZE);
224 n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, static_cast<double>(nrows)));
225 if (busy_blocks < 1.0)
228 cost->setIOCount(busy_blocks);
233 cost->setAvgIOCost((DISK_SEEK_BASE_COST +
234 DISK_SEEK_PROP_COST*n_blocks/busy_blocks));
239 static optimizer::SEL_TREE * get_mm_parts(optimizer::RangeParameter *param,
242 Item_func::Functype type,
244 Item_result cmp_type);
246 static optimizer::SEL_ARG *get_mm_leaf(optimizer::RangeParameter *param,
250 Item_func::Functype type,
253 static optimizer::SEL_TREE *get_mm_tree(optimizer::RangeParameter *param, COND *cond);
255 static bool is_key_scan_ror(optimizer::Parameter *param, uint32_t keynr, uint8_t nparts);
257 static ha_rows check_quick_select(Session *session,
258 optimizer::Parameter *param,
261 optimizer::SEL_ARG *tree,
262 bool update_tbl_stats,
265 optimizer::CostVector *cost);
267 static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
268 optimizer::Parameter *param,
269 optimizer::SEL_TREE *tree,
270 bool index_read_must_be_used,
271 bool update_tbl_stats,
275 optimizer::RorIntersectReadPlan *get_best_ror_intersect(
const optimizer::Parameter *param,
276 optimizer::SEL_TREE *tree,
278 bool *are_all_covering);
281 optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
282 optimizer::SEL_TREE *tree,
286 optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
287 optimizer::Parameter *param,
288 optimizer::SEL_IMERGE *imerge,
292 optimizer::GroupMinMaxReadPlan *get_best_group_min_max(optimizer::Parameter *param, optimizer::SEL_TREE *tree);
294 static optimizer::SEL_TREE *tree_and(optimizer::RangeParameter *param,
295 optimizer::SEL_TREE *tree1,
296 optimizer::SEL_TREE *tree2);
298 static optimizer::SEL_ARG *sel_add(optimizer::SEL_ARG *key1, optimizer::SEL_ARG *key2);
300 static optimizer::SEL_ARG *key_and(optimizer::RangeParameter *param,
301 optimizer::SEL_ARG *key1,
302 optimizer::SEL_ARG *key2,
303 uint32_t clone_flag);
305 static bool get_range(optimizer::SEL_ARG **e1, optimizer::SEL_ARG **e2, optimizer::SEL_ARG *root1);
307 optimizer::SEL_ARG optimizer::null_element(optimizer::SEL_ARG::IMPOSSIBLE);
309 static bool null_part_in_key(KEY_PART *key_part,
310 const unsigned char *key,
313 bool sel_trees_can_be_ored(optimizer::SEL_TREE *tree1,
314 optimizer::SEL_TREE *tree2,
315 optimizer::RangeParameter *param);
326 inline void imerge_list_and_list(List<optimizer::SEL_IMERGE> *im1, List<optimizer::SEL_IMERGE> *im2)
342 optimizer::SqlSelect *optimizer::make_select(Table *head,
343 table_map const_tables,
344 table_map read_tables,
346 bool allow_null_cond,
351 if (! conds && ! allow_null_cond)
355 optimizer::SqlSelect* select=
new optimizer::SqlSelect;
356 select->read_tables=read_tables;
357 select->const_tables=const_tables;
361 if (head->sort.io_cache)
363 memcpy(select->file, head->sort.io_cache,
sizeof(internal::io_cache_st));
364 select->records=(ha_rows) (select->file->end_of_file/
365 head->cursor->ref_length);
366 delete head->sort.io_cache;
367 head->sort.io_cache=0;
373 optimizer::SqlSelect::SqlSelect()
377 file(static_cast<internal::io_cache_st *>(memory::sql_calloc(sizeof(internal::io_cache_st)))),
386 void optimizer::SqlSelect::cleanup()
397 file->close_cached_file();
401 optimizer::SqlSelect::~SqlSelect()
407 bool optimizer::SqlSelect::check_quick(Session *session,
408 bool force_quick_range,
413 return (test_quick_select(session,
422 bool optimizer::SqlSelect::skip_record()
424 return (cond ? cond->val_int() == 0 : 0);
428 optimizer::QuickSelectInterface::QuickSelectInterface()
430 max_used_key_length(0),
464 uint32_t optimizer::get_index_for_order(Table *table, Order *order, ha_rows limit)
467 uint32_t match_key= MAX_KEY, match_key_len= MAX_KEY_LENGTH + 1;
470 for (ord= order; ord; ord= ord->next)
474 for (idx= 0; idx < table->getShare()->sizeKeys(); idx++)
476 if (!(table->keys_in_use_for_query.test(idx)))
478 KeyPartInfo *keyinfo= table->key_info[idx].key_part;
479 uint32_t n_parts= table->key_info[idx].key_parts;
487 if (! (table->index_flags(idx) & HA_READ_ORDER))
489 for (ord= order; ord && partno < n_parts; ord= ord->next, partno++)
491 Item *item= order->item[0];
492 if (! (item->type() == Item::FIELD_ITEM &&
493 ((Item_field*)item)->field->eq(keyinfo[partno].field)))
497 if (! ord && table->key_info[idx].key_length < match_key_len)
505 match_key_len= table->key_info[idx].key_length;
509 if (match_key != MAX_KEY)
516 double full_scan_time= table->cursor->scan_time();
517 double index_scan_time= table->cursor->read_time(match_key, 1, limit);
518 if (index_scan_time > full_scan_time)
540 static int fill_used_fields_bitmap(optimizer::Parameter *param)
542 Table *table= param->table;
544 param->tmp_covered_fields.clear();
545 param->needed_fields.resize(table->getShare()->sizeFields());
546 param->needed_fields.reset();
548 param->needed_fields|= *table->read_set;
549 param->needed_fields|= *table->write_set;
551 pk= param->table->getShare()->getPrimaryKey();
552 if (pk != MAX_KEY && param->table->cursor->primary_key_is_clustered())
555 KeyPartInfo *key_part= param->table->key_info[pk].key_part;
556 KeyPartInfo *key_part_end= key_part +
557 param->table->key_info[pk].key_parts;
558 for (;key_part != key_part_end; ++key_part)
559 param->needed_fields.reset(key_part->fieldnr-1);
631 int optimizer::SqlSelect::test_quick_select(Session *session,
633 table_map prev_tables,
635 bool force_quick_range,
646 if (keys_to_use.none())
648 records= head->cursor->stats.records;
652 read_time= (double) head->cursor->scan_time() + scan_time + 1.1;
653 if (head->force_index)
654 scan_time= read_time= DBL_MAX;
656 read_time= (double) records + scan_time + 1;
657 else if (read_time <= 2.0 && !force_quick_range)
660 keys_to_use&= head->keys_in_use_for_query;
661 if (keys_to_use.any())
664 optimizer::SEL_TREE *tree= NULL;
667 optimizer::Parameter param;
673 param.session= session;
674 param.prev_tables= prev_tables | const_tables;
675 param.read_tables= read_tables;
676 param.current_table= head->map;
679 param.mem_root= &alloc;
680 param.old_root= session->mem_root;
681 param.needed_reg= &needed_reg;
682 param.imerge_cost_buff_size= 0;
683 param.using_real_indexes=
true;
684 param.remove_jump_scans=
true;
685 param.force_default_mrr= ordered_output;
687 session->no_errors=1;
688 alloc.init(session->variables.range_alloc_block_size);
689 param.key_parts=
new (alloc) KEY_PART[head->getShare()->key_parts];
690 if (fill_used_fields_bitmap(¶m))
692 session->no_errors=0;
693 alloc.free_root(MYF(0));
697 key_parts= param.key_parts;
698 session->mem_root= &alloc;
704 key_info= head->key_info;
705 for (idx=0 ; idx < head->getShare()->sizeKeys() ; idx++, key_info++)
707 KeyPartInfo *key_part_info;
708 if (! keys_to_use.test(idx))
711 param.key[param.keys]=key_parts;
712 key_part_info= key_info->key_part;
713 for (uint32_t part=0;
714 part < key_info->key_parts;
715 part++, key_parts++, key_part_info++)
717 key_parts->key= param.keys;
718 key_parts->part= part;
719 key_parts->length= key_part_info->length;
720 key_parts->store_length= key_part_info->store_length;
721 key_parts->field= key_part_info->field;
722 key_parts->null_bit= key_part_info->null_bit;
724 key_parts->flag= (uint8_t) key_part_info->key_part_flag;
726 param.real_keynr[param.keys++]=idx;
728 param.key_parts_end=key_parts;
729 param.alloced_sel_args= 0;
732 if (!head->covering_keys.none())
734 int key_for_use= head->find_shortest_key(&head->covering_keys);
735 double key_read_time=
736 param.table->cursor->index_only_read_time(key_for_use, records) +
738 if (key_read_time < read_time)
739 read_time= key_read_time;
742 optimizer::TableReadPlan *best_trp= NULL;
743 optimizer::GroupMinMaxReadPlan *group_trp= NULL;
744 double best_read_time= read_time;
748 if ((tree= get_mm_tree(¶m,cond)))
750 if (tree->type == optimizer::SEL_TREE::IMPOSSIBLE)
753 read_time= (double) HA_POS_ERROR;
760 if (tree->type != optimizer::SEL_TREE::KEY && tree->type != optimizer::SEL_TREE::KEY_SMALLER)
769 group_trp= get_best_group_min_max(¶m, tree);
772 param.table->quick_condition_rows= min(group_trp->records,
773 head->cursor->stats.records);
774 if (group_trp->read_cost < best_read_time)
777 best_read_time= best_trp->read_cost;
787 if (tree->merges.is_empty())
789 optimizer::RangeReadPlan *range_trp= NULL;
790 optimizer::RorIntersectReadPlan *rori_trp= NULL;
791 bool can_build_covering=
false;
794 if ((range_trp= get_key_scans_params(session, ¶m, tree,
false,
true,
798 best_read_time= best_trp->read_cost;
806 if ((session->lex().sql_command != SQLCOM_DELETE))
812 if ((rori_trp= get_best_ror_intersect(¶m, tree, best_read_time,
813 &can_build_covering)))
816 best_read_time= best_trp->read_cost;
821 if (!rori_trp->is_covering && can_build_covering &&
822 (rori_trp= get_best_covering_ror_intersect(¶m, tree,
831 optimizer::SEL_IMERGE *imerge= NULL;
832 optimizer::TableReadPlan *best_conj_trp= NULL;
833 optimizer::TableReadPlan *new_conj_trp= NULL;
834 List<optimizer::SEL_IMERGE>::iterator it(tree->merges.begin());
835 while ((imerge= it++))
837 new_conj_trp= get_best_disjunct_quick(session, ¶m, imerge, best_read_time);
839 set_if_smaller(param.table->quick_condition_rows,
840 new_conj_trp->records);
841 if (!best_conj_trp || (new_conj_trp && new_conj_trp->read_cost <
842 best_conj_trp->read_cost))
843 best_conj_trp= new_conj_trp;
846 best_trp= best_conj_trp;
850 session->mem_root= param.old_root;
855 records= best_trp->records;
856 if (! (quick= best_trp->make_quick(¶m,
true)) || quick->init())
864 alloc.free_root(MYF(0));
865 session->mem_root= param.old_root;
866 session->no_errors=0;
873 return(records ? test(quick) : -1);
943 optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
944 optimizer::Parameter *param,
945 optimizer::SEL_IMERGE *imerge,
948 optimizer::SEL_TREE **ptree= NULL;
949 optimizer::IndexMergeReadPlan *imerge_trp= NULL;
950 uint32_t n_child_scans= imerge->trees_next - imerge->trees;
951 optimizer::RangeReadPlan **range_scans= NULL;
952 optimizer::RangeReadPlan **cur_child= NULL;
953 optimizer::RangeReadPlan **cpk_scan= NULL;
954 bool imerge_too_expensive=
false;
955 double imerge_cost= 0.0;
956 ha_rows cpk_scan_records= 0;
957 ha_rows non_cpk_scan_records= 0;
958 bool pk_is_clustered= param->table->cursor->primary_key_is_clustered();
959 bool all_scans_ror_able=
true;
960 bool all_scans_rors=
true;
961 uint32_t unique_calc_buff_size;
962 optimizer::TableReadPlan **roru_read_plans= NULL;
963 optimizer::TableReadPlan **cur_roru_plan= NULL;
964 double roru_index_costs;
965 ha_rows roru_total_records;
966 double roru_intersect_part= 1.0;
968 range_scans=
new (*param->mem_root) optimizer::RangeReadPlan*[n_child_scans];
975 for (ptree= imerge->trees, cur_child= range_scans; ptree != imerge->trees_next; ptree++, cur_child++)
977 if (!(*cur_child= get_key_scans_params(session, param, *ptree,
true,
false, read_time)))
985 imerge_too_expensive=
true;
987 if (imerge_too_expensive)
990 imerge_cost += (*cur_child)->read_cost;
991 all_scans_ror_able &= ((*ptree)->n_ror_scans > 0);
992 all_scans_rors &= (*cur_child)->is_ror;
993 if (pk_is_clustered &&
994 param->real_keynr[(*cur_child)->key_idx] ==
995 param->table->getShare()->getPrimaryKey())
998 cpk_scan_records= (*cur_child)->records;
1001 non_cpk_scan_records += (*cur_child)->records;
1004 if (imerge_too_expensive || (imerge_cost > read_time) ||
1005 ((non_cpk_scan_records+cpk_scan_records >= param->table->cursor->stats.records) && read_time != DBL_MAX))
1015 roru_read_plans= (optimizer::TableReadPlan **) range_scans;
1016 goto skip_to_ror_scan;
1029 optimizer::CostVector sweep_cost;
1030 Join *join= param->session->lex().select_lex.join;
1031 bool is_interrupted= test(join && join->tables == 1);
1034 imerge_cost += sweep_cost.total_cost();
1036 if (imerge_cost > read_time)
1037 goto build_ror_index_merge;
1040 unique_calc_buff_size=
1041 Unique::get_cost_calc_buff_size((ulong)non_cpk_scan_records,
1042 param->table->cursor->ref_length,
1043 param->session->variables.sortbuff_size);
1044 if (param->imerge_cost_buff_size < unique_calc_buff_size)
1046 param->imerge_cost_buff= (uint*)param->mem_root->alloc(unique_calc_buff_size);
1047 param->imerge_cost_buff_size= unique_calc_buff_size;
1051 Unique::get_use_cost(param->imerge_cost_buff, (uint32_t)non_cpk_scan_records,
1052 param->table->cursor->ref_length,
1053 param->session->variables.sortbuff_size);
1054 if (imerge_cost < read_time)
1056 imerge_trp=
new (*param->mem_root) optimizer::IndexMergeReadPlan;
1057 imerge_trp->read_cost= imerge_cost;
1058 imerge_trp->records= non_cpk_scan_records + cpk_scan_records;
1059 imerge_trp->records= min(imerge_trp->records, param->table->cursor->stats.records);
1060 imerge_trp->range_scans= range_scans;
1061 imerge_trp->range_scans_end= range_scans + n_child_scans;
1062 read_time= imerge_cost;
1065 build_ror_index_merge:
1066 if (!all_scans_ror_able || param->session->lex().sql_command == SQLCOM_DELETE)
1071 roru_read_plans=
new (*param->mem_root) optimizer::TableReadPlan*[n_child_scans];
1073 roru_index_costs= 0.0;
1074 roru_total_records= 0;
1075 cur_roru_plan= roru_read_plans;
1078 for (ptree= imerge->trees, cur_child= range_scans;
1079 ptree != imerge->trees_next;
1080 ptree++, cur_child++, cur_roru_plan++)
1089 if ((*cur_child)->is_ror)
1092 cost= param->table->cursor->
1093 read_time(param->real_keynr[(*cur_child)->key_idx], 1,
1094 (*cur_child)->records) +
1100 optimizer::TableReadPlan *prev_plan= *cur_child;
1101 if (!(*cur_roru_plan= get_best_ror_intersect(param, *ptree, cost,
1104 if (prev_plan->is_ror)
1105 *cur_roru_plan= prev_plan;
1108 roru_index_costs += (*cur_roru_plan)->read_cost;
1112 ((optimizer::RorIntersectReadPlan*)(*cur_roru_plan))->index_scan_costs;
1113 roru_total_records += (*cur_roru_plan)->records;
1114 roru_intersect_part *= (*cur_roru_plan)->records /
1115 param->table->cursor->stats.records;
1124 roru_total_records -= (ha_rows)(roru_intersect_part*
1125 param->table->cursor->stats.records);
1134 double roru_total_cost;
1136 optimizer::CostVector sweep_cost;
1137 Join *join= param->session->lex().select_lex.join;
1138 bool is_interrupted= test(join && join->tables == 1);
1141 roru_total_cost= roru_index_costs +
1142 static_cast<double>(roru_total_records)*log((
double)n_child_scans) /
1144 sweep_cost.total_cost();
1147 optimizer::RorUnionReadPlan *roru= NULL;
1148 if (roru_total_cost < read_time)
1150 if ((roru=
new (*param->mem_root) optimizer::RorUnionReadPlan))
1152 roru->first_ror= roru_read_plans;
1153 roru->last_ror= roru_read_plans + n_child_scans;
1154 roru->read_cost= roru_total_cost;
1155 roru->records= roru_total_records;
1180 optimizer::RorScanInfo *make_ror_scan(
const optimizer::Parameter *param,
int idx, optimizer::SEL_ARG *sel_arg)
1183 optimizer::RorScanInfo* ror_scan=
new (*param->mem_root) optimizer::RorScanInfo;
1186 ror_scan->keynr= keynr= param->real_keynr[idx];
1187 ror_scan->key_rec_length= (param->table->key_info[keynr].key_length +
1188 param->table->cursor->ref_length);
1189 ror_scan->sel_arg= sel_arg;
1190 ror_scan->records= param->table->quick_rows[keynr];
1192 ror_scan->covered_fields_size= param->table->getShare()->sizeFields();
1193 boost::dynamic_bitset<> tmp_bitset(param->table->getShare()->sizeFields());
1196 KeyPartInfo *key_part= param->table->key_info[keynr].key_part;
1197 KeyPartInfo *key_part_end= key_part +
1198 param->table->key_info[keynr].key_parts;
1199 for (; key_part != key_part_end; ++key_part)
1201 if (param->needed_fields.test(key_part->fieldnr-1))
1202 tmp_bitset.set(key_part->fieldnr-1);
1204 double rows= param->table->quick_rows[ror_scan->keynr];
1205 ror_scan->index_read_cost=
1206 param->table->cursor->index_only_read_time(ror_scan->keynr, rows);
1207 ror_scan->covered_fields= tmp_bitset.to_ulong();
1225 static int cmp_ror_scan_info(optimizer::RorScanInfo** a, optimizer::RorScanInfo** b)
1227 double val1=
static_cast<double>((*a)->records) * (*a)->key_rec_length;
1228 double val2= static_cast<double>((*b)->records) * (*b)->key_rec_length;
1229 return (val1 < val2)? -1: (val1 == val2)? 0 : 1;
1250 static int cmp_ror_scan_info_covering(optimizer::RorScanInfo** a, optimizer::RorScanInfo** b)
1252 if ((*a)->used_fields_covered > (*b)->used_fields_covered)
1254 if ((*a)->used_fields_covered < (*b)->used_fields_covered)
1256 if ((*a)->key_components < (*b)->key_components)
1258 if ((*a)->key_components > (*b)->key_components)
1260 if ((*a)->first_uncovered_field < (*b)->first_uncovered_field)
1262 if ((*a)->first_uncovered_field > (*b)->first_uncovered_field)
1277 index_scan_costs(0.0),
1284 covered_fields(in_param->table->getShare()->sizeFields()),
1285 out_rows(in_param->table->
cursor->stats.records),
1288 index_scan_costs(0.0),
1291 covered_fields.reset();
1295 boost::dynamic_bitset<> covered_fields;
1305 ha_rows index_records;
1306 double index_scan_costs;
1314 dst->param= src->param;
1315 dst->covered_fields= src->covered_fields;
1316 dst->out_rows= src->out_rows;
1317 dst->is_covering= src->is_covering;
1318 dst->index_records= src->index_records;
1319 dst->index_scan_costs= src->index_scan_costs;
1320 dst->total_cost= src->total_cost;
1414 static double ror_scan_selectivity(
const ROR_INTERSECT_INFO *info,
1415 const optimizer::RorScanInfo *scan)
1417 double selectivity_mult= 1.0;
1418 KeyPartInfo *key_part= info->param->table->key_info[scan->keynr].key_part;
1419 unsigned char key_val[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
1420 unsigned char *key_ptr= key_val;
1421 optimizer::SEL_ARG *sel_arg= NULL;
1422 optimizer::SEL_ARG *tuple_arg= NULL;
1423 key_part_map keypart_map= 0;
1425 bool prev_covered= test(info->covered_fields.test(key_part->fieldnr-1));
1426 key_range min_range;
1427 key_range max_range;
1428 min_range.key= key_val;
1429 min_range.flag= HA_READ_KEY_EXACT;
1430 max_range.key= key_val;
1431 max_range.flag= HA_READ_AFTER_KEY;
1432 ha_rows prev_records= info->param->table->cursor->stats.records;
1434 for (sel_arg= scan->sel_arg; sel_arg;
1435 sel_arg= sel_arg->next_key_part)
1438 test(info->covered_fields.test(key_part[sel_arg->part].fieldnr-1));
1439 if (cur_covered != prev_covered)
1445 tuple_arg= scan->sel_arg;
1447 tuple_arg->store_min(key_part->store_length, &key_ptr, 0);
1450 while (tuple_arg->next_key_part != sel_arg)
1452 tuple_arg= tuple_arg->next_key_part;
1453 tuple_arg->store_min(key_part[tuple_arg->part].store_length,
1455 keypart_map= (keypart_map << 1) | 1;
1457 min_range.length= max_range.length= (size_t) (key_ptr - key_val);
1458 min_range.keypart_map= max_range.keypart_map= keypart_map;
1459 records= (info->param->table->cursor->
1460 records_in_range(scan->keynr, &min_range, &max_range));
1464 selectivity_mult *=
static_cast<double>(records) / prev_records;
1465 prev_records= HA_POS_ERROR;
1470 prev_records= records;
1473 prev_covered= cur_covered;
1477 selectivity_mult *=
static_cast<double>(info->param->table->quick_rows[scan->keynr]) / prev_records;
1479 return selectivity_mult;
1519 static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
1520 optimizer::RorScanInfo* ror_scan,
bool is_cpk_scan)
1522 double selectivity_mult= 1.0;
1524 selectivity_mult = ror_scan_selectivity(info, ror_scan);
1525 if (selectivity_mult == 1.0)
1531 info->out_rows *= selectivity_mult;
1540 info->index_scan_costs +=
static_cast<double>(info->index_records) /
1545 info->index_records += info->param->table->quick_rows[ror_scan->keynr];
1546 info->index_scan_costs += ror_scan->index_read_cost;
1547 boost::dynamic_bitset<> tmp_bitset= ror_scan->bitsToBitset();
1548 info->covered_fields|= tmp_bitset;
1549 if (! info->is_covering && info->param->needed_fields.is_subset_of(info->covered_fields))
1551 info->is_covering=
true;
1555 info->total_cost= info->index_scan_costs;
1556 if (! info->is_covering)
1558 optimizer::CostVector sweep_cost;
1559 Join *join= info->param->session->lex().select_lex.join;
1560 bool is_interrupted= test(join && join->tables == 1);
1562 is_interrupted, &sweep_cost);
1563 info->total_cost += sweep_cost.total_cost();
1603 optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
1604 optimizer::SEL_TREE *tree,
1607 optimizer::RorScanInfo **ror_scan_mark;
1608 optimizer::RorScanInfo **ror_scans_end= tree->ror_scans_end;
1610 for (optimizer::RorScanInfo **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
1611 (*scan)->key_components=
1612 param->table->key_info[(*scan)->keynr].key_parts;
1620 ror_scan_mark= tree->ror_scans;
1622 boost::dynamic_bitset<> *covered_fields= ¶m->tmp_covered_fields;
1623 if (covered_fields->empty())
1625 covered_fields->resize(param->table->getShare()->sizeFields());
1627 covered_fields->reset();
1629 double total_cost= 0.0f;
1641 for (optimizer::RorScanInfo **scan= ror_scan_mark; scan != ror_scans_end; ++scan)
1644 (*scan)->subtractBitset(*covered_fields);
1645 (*scan)->used_fields_covered=
1646 (*scan)->getBitCount();
1647 (*scan)->first_uncovered_field= (*scan)->findFirstNotSet();
1650 internal::my_qsort(ror_scan_mark, ror_scans_end-ror_scan_mark,
1651 sizeof(optimizer::RorScanInfo*),
1652 (qsort_cmp)cmp_ror_scan_info_covering);
1655 total_cost += (*ror_scan_mark)->index_read_cost;
1656 records += (*ror_scan_mark)->records;
1657 if (total_cost > read_time)
1660 boost::dynamic_bitset<> tmp_bitset= (*ror_scan_mark)->bitsToBitset();
1661 *covered_fields|= tmp_bitset;
1662 all_covered= param->needed_fields.is_subset_of(*covered_fields);
1663 }
while ((++ror_scan_mark < ror_scans_end) && ! all_covered);
1665 if (!all_covered || (ror_scan_mark - tree->ror_scans) == 1)
1673 total_cost +=
static_cast<double>(records) *
1674 log((
double)(ror_scan_mark - tree->ror_scans)) /
1677 if (total_cost > read_time)
1680 optimizer::RorIntersectReadPlan* trp=
new (*param->mem_root) optimizer::RorIntersectReadPlan;
1682 uint32_t best_num= (ror_scan_mark - tree->ror_scans);
1683 trp->first_scan=
new (*param->mem_root) optimizer::RorScanInfo*[best_num];
1684 memcpy(trp->first_scan, tree->ror_scans, best_num*
sizeof(optimizer::RorScanInfo*));
1685 trp->last_scan= trp->first_scan + best_num;
1686 trp->is_covering=
true;
1687 trp->read_cost= total_cost;
1688 trp->records= records;
1689 trp->cpk_scan= NULL;
1690 set_if_smaller(param->table->quick_condition_rows, records);
1761 optimizer::RorIntersectReadPlan *get_best_ror_intersect(
const optimizer::Parameter *param,
1762 optimizer::SEL_TREE *tree,
1764 bool *are_all_covering)
1767 double min_cost= DBL_MAX;
1769 if ((tree->n_ror_scans < 2) || ! param->table->cursor->stats.records)
1776 optimizer::RorScanInfo **cur_ror_scan= NULL;
1777 optimizer::RorScanInfo *cpk_scan= NULL;
1779 bool cpk_scan_used=
false;
1781 tree->ror_scans=
new (*param->mem_root) optimizer::RorScanInfo*[param->keys];
1782 cpk_no= ((param->table->cursor->primary_key_is_clustered()) ? param->table->getShare()->getPrimaryKey() : MAX_KEY);
1784 for (idx= 0, cur_ror_scan= tree->ror_scans; idx < param->keys; idx++)
1786 optimizer::RorScanInfo *scan;
1787 if (! tree->ror_scans_map.test(idx))
1789 if (! (scan= make_ror_scan(param, idx, tree->keys[idx])))
1791 if (param->real_keynr[idx] == cpk_no)
1794 tree->n_ror_scans--;
1797 *(cur_ror_scan++)= scan;
1800 tree->ror_scans_end= cur_ror_scan;
1806 internal::my_qsort(tree->ror_scans, tree->n_ror_scans,
sizeof(optimizer::RorScanInfo*),
1807 (qsort_cmp)cmp_ror_scan_info);
1809 optimizer::RorScanInfo **intersect_scans= NULL;
1810 optimizer::RorScanInfo **intersect_scans_end= intersect_scans=
new (*param->mem_root) optimizer::RorScanInfo*[tree->n_ror_scans];
1811 intersect_scans_end= intersect_scans;
1814 ROR_INTERSECT_INFO intersect(param);
1815 ROR_INTERSECT_INFO intersect_best(param);
1818 optimizer::RorScanInfo **intersect_scans_best= NULL;
1819 cur_ror_scan= tree->ror_scans;
1820 intersect_scans_best= intersect_scans;
1821 while (cur_ror_scan != tree->ror_scans_end && ! intersect.is_covering)
1824 if (! ror_intersect_add(&intersect, *cur_ror_scan,
false))
1830 *(intersect_scans_end++)= *(cur_ror_scan++);
1832 if (intersect.total_cost < min_cost)
1835 ror_intersect_cpy(&intersect_best, &intersect);
1836 intersect_scans_best= intersect_scans_end;
1837 min_cost = intersect.total_cost;
1841 if (intersect_scans_best == intersect_scans)
1846 *are_all_covering= intersect.is_covering;
1847 uint32_t best_num= intersect_scans_best - intersect_scans;
1848 ror_intersect_cpy(&intersect, &intersect_best);
1855 if (cpk_scan && ! intersect.is_covering)
1857 if (ror_intersect_add(&intersect, cpk_scan,
true) &&
1858 (intersect.total_cost < min_cost))
1860 cpk_scan_used=
true;
1861 intersect_best= intersect;
1866 optimizer::RorIntersectReadPlan *trp= NULL;
1867 if (min_cost < read_time && (cpk_scan_used || best_num > 1))
1869 trp=
new (*param->mem_root) optimizer::RorIntersectReadPlan;
1870 trp->first_scan=
new (*param->mem_root) optimizer::RorScanInfo*[best_num];
1871 memcpy(trp->first_scan, intersect_scans, best_num*
sizeof(optimizer::RorScanInfo*));
1872 trp->last_scan= trp->first_scan + best_num;
1873 trp->is_covering= intersect_best.is_covering;
1874 trp->read_cost= intersect_best.total_cost;
1876 ha_rows best_rows = double2rows(intersect_best.out_rows);
1879 set_if_smaller(param->table->quick_condition_rows, best_rows);
1880 trp->records= best_rows;
1881 trp->index_scan_costs= intersect_best.index_scan_costs;
1882 trp->cpk_scan= cpk_scan_used? cpk_scan: NULL;
1915 static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
1916 optimizer::Parameter *param,
1917 optimizer::SEL_TREE *tree,
1918 bool index_read_must_be_used,
1919 bool update_tbl_stats,
1923 optimizer::SEL_ARG **key= NULL;
1924 optimizer::SEL_ARG **end= NULL;
1925 optimizer::SEL_ARG **key_to_read= NULL;
1926 ha_rows best_records= 0;
1927 uint32_t best_mrr_flags= 0;
1928 uint32_t best_buf_size= 0;
1929 optimizer::RangeReadPlan *read_plan= NULL;
1935 tree->ror_scans_map.reset();
1936 tree->n_ror_scans= 0;
1937 for (idx= 0,key=tree->keys, end=key+param->keys; key != end; key++,idx++)
1941 ha_rows found_records;
1942 optimizer::CostVector cost;
1943 double found_read_time= 0.0;
1944 uint32_t mrr_flags, buf_size;
1945 uint32_t keynr= param->real_keynr[idx];
1946 if ((*key)->type == optimizer::SEL_ARG::MAYBE_KEY ||
1948 param->needed_reg->set(keynr);
1950 bool read_index_only= index_read_must_be_used ||
1951 param->table->covering_keys.test(keynr);
1953 found_records= check_quick_select(session, param, idx, read_index_only, *key,
1954 update_tbl_stats, &mrr_flags,
1956 found_read_time= cost.total_cost();
1957 if ((found_records != HA_POS_ERROR) && param->is_ror_scan)
1959 tree->n_ror_scans++;
1960 tree->ror_scans_map.set(idx);
1962 if (read_time > found_read_time && found_records != HA_POS_ERROR)
1964 read_time= found_read_time;
1965 best_records= found_records;
1967 best_mrr_flags= mrr_flags;
1968 best_buf_size= buf_size;
1975 idx= key_to_read - tree->keys;
1976 read_plan=
new (*param->mem_root) optimizer::RangeReadPlan(*key_to_read, idx, best_mrr_flags);
1977 read_plan->records= best_records;
1978 read_plan->is_ror= tree->ror_scans_map.test(idx);
1979 read_plan->read_cost= read_time;
1980 read_plan->mrr_buf_size= best_buf_size;
1986 optimizer::QuickSelectInterface *optimizer::IndexMergeReadPlan::make_quick(optimizer::Parameter *param,
bool, memory::Root *)
1989 optimizer::QuickIndexMergeSelect* quick_imerge=
new optimizer::QuickIndexMergeSelect(param->session, param->table);
1990 quick_imerge->records= records;
1991 quick_imerge->read_time= read_cost;
1992 for (optimizer::RangeReadPlan **range_scan= range_scans; range_scan != range_scans_end; range_scan++)
1994 optimizer::QuickRangeSelect* quick= (optimizer::QuickRangeSelect*)((*range_scan)->make_quick(param,
false, &quick_imerge->alloc));
1998 delete quick_imerge;
2001 quick_imerge->push_quick_back(quick);
2003 return quick_imerge;
2006 optimizer::QuickSelectInterface *optimizer::RorIntersectReadPlan::make_quick(optimizer::Parameter *param,
2007 bool retrieve_full_rows,
2008 memory::Root *parent_alloc)
2010 optimizer::QuickRorIntersectSelect *quick_intersect= NULL;
2011 optimizer::QuickRangeSelect *quick= NULL;
2012 memory::Root *alloc= NULL;
2014 if ((quick_intersect=
2015 new optimizer::QuickRorIntersectSelect(param->session,
2017 (retrieve_full_rows? (! is_covering) :
false),
2020 alloc= parent_alloc ? parent_alloc : &quick_intersect->alloc;
2021 for (; first_scan != last_scan; ++first_scan)
2023 if (! (quick= optimizer::get_quick_select(param,
2025 (*first_scan)->sel_arg,
2026 HA_MRR_USE_DEFAULT_IMPL | HA_MRR_SORTED,
2030 delete quick_intersect;
2033 quick_intersect->push_quick_back(quick);
2037 if (! (quick= optimizer::get_quick_select(param,
2040 HA_MRR_USE_DEFAULT_IMPL | HA_MRR_SORTED,
2044 delete quick_intersect;
2047 quick->resetCursor();
2048 quick_intersect->cpk_quick= quick;
2050 quick_intersect->records= records;
2051 quick_intersect->read_time= read_cost;
2053 return quick_intersect;
2057 optimizer::QuickSelectInterface *optimizer::RorUnionReadPlan::make_quick(optimizer::Parameter *param,
bool, memory::Root *)
2063 optimizer::QuickRorUnionSelect* quick_roru=
new optimizer::QuickRorUnionSelect(param->session, param->table);
2064 for (optimizer::TableReadPlan** scan= first_ror; scan != last_ror; scan++)
2066 optimizer::QuickSelectInterface* quick= (*scan)->make_quick(param,
false, &quick_roru->alloc);
2069 quick_roru->push_quick_back(quick);
2071 quick_roru->records= records;
2072 quick_roru->read_time= read_cost;
2093 static optimizer::SEL_TREE *get_ne_mm_tree(optimizer::RangeParameter *param,
2094 Item_func *cond_func,
2096 Item *lt_value, Item *gt_value,
2097 Item_result cmp_type)
2099 optimizer::SEL_TREE *tree= NULL;
2100 tree= get_mm_parts(param, cond_func, field, Item_func::LT_FUNC,
2101 lt_value, cmp_type);
2104 tree= tree_or(param,
2106 get_mm_parts(param, cond_func, field,
2131 static optimizer::SEL_TREE *get_func_mm_tree(optimizer::RangeParameter *param,
2132 Item_func *cond_func,
2135 Item_result cmp_type,
2138 optimizer::SEL_TREE *tree= NULL;
2140 switch (cond_func->functype())
2143 case Item_func::NE_FUNC:
2144 tree= get_ne_mm_tree(param, cond_func, field, value, value, cmp_type);
2147 case Item_func::BETWEEN:
2153 tree= get_ne_mm_tree(param,
2156 cond_func->arguments()[1],
2157 cond_func->arguments()[2],
2162 tree= get_mm_parts(param,
2166 cond_func->arguments()[1],
2170 tree= tree_and(param,
2172 get_mm_parts(param, cond_func, field,
2174 cond_func->arguments()[2],
2180 tree= get_mm_parts(param,
2184 (value == (Item*)1 ? Item_func::GT_FUNC :
2185 Item_func::LT_FUNC):
2186 (value == (Item*)1 ? Item_func::LE_FUNC :
2187 Item_func::GE_FUNC)),
2188 cond_func->arguments()[0],
2192 case Item_func::IN_FUNC:
2194 Item_func_in *func= (Item_func_in*) cond_func;
2201 if (! func->arg_types_compatible)
2206 if (func->array && func->array->result_type() != ROW_RESULT)
2235 const unsigned int NOT_IN_IGNORE_THRESHOLD= 1000;
2236 memory::Root *tmp_root= param->mem_root;
2237 param->session->mem_root= param->old_root;
2246 Item *value_item= func->array->create_item();
2247 param->session->mem_root= tmp_root;
2249 if (func->array->count > NOT_IN_IGNORE_THRESHOLD || ! value_item)
2256 func->array->value_to_item(i, value_item);
2257 tree= get_mm_parts(param,
2259 field, Item_func::LT_FUNC,
2265 }
while (i < func->array->count && tree->type == optimizer::SEL_TREE::IMPOSSIBLE);
2267 if (!tree || tree->type == optimizer::SEL_TREE::IMPOSSIBLE)
2273 optimizer::SEL_TREE *tree2= NULL;
2274 for (; i < func->array->count; i++)
2276 if (func->array->compare_elems(i, i-1))
2279 func->array->value_to_item(i, value_item);
2280 tree2= get_mm_parts(param, cond_func, field, Item_func::LT_FUNC,
2281 value_item, cmp_type);
2289 for (uint32_t idx= 0; idx < param->keys; idx++)
2291 optimizer::SEL_ARG *new_interval, *last_val;
2292 if (((new_interval= tree2->keys[idx])) &&
2293 (tree->keys[idx]) &&
2294 ((last_val= tree->keys[idx]->last())))
2296 new_interval->min_value= last_val->max_value;
2297 new_interval->min_flag= NEAR_MIN;
2304 tree= tree_or(param, tree, tree2);
2308 if (tree && tree->type != optimizer::SEL_TREE::IMPOSSIBLE)
2314 tree2= get_mm_parts(param, cond_func, field, Item_func::GT_FUNC,
2315 value_item, cmp_type);
2316 tree= tree_or(param, tree, tree2);
2321 tree= get_ne_mm_tree(param, cond_func, field,
2322 func->arguments()[1], func->arguments()[1],
2327 for (arg= func->arguments()+2, end= arg+func->argument_count()-2;
2330 tree= tree_and(param, tree, get_ne_mm_tree(param, cond_func, field,
2331 *arg, *arg, cmp_type));
2338 tree= get_mm_parts(param, cond_func, field, Item_func::EQ_FUNC,
2339 func->arguments()[1], cmp_type);
2343 for (arg= func->arguments()+2, end= arg+func->argument_count()-2;
2346 tree= tree_or(param, tree, get_mm_parts(param, cond_func, field,
2363 Item_func::Functype func_type=
2364 (value != cond_func->arguments()[0]) ? cond_func->functype() :
2365 ((Item_bool_func2*) cond_func)->rev_functype();
2366 tree= get_mm_parts(param, cond_func, field, func_type, value, cmp_type);
2444 static optimizer::SEL_TREE *get_full_func_mm_tree(optimizer::RangeParameter *param,
2445 Item_func *cond_func,
2446 Item_field *field_item, Item *value,
2449 optimizer::SEL_TREE *tree= 0;
2450 optimizer::SEL_TREE *ftree= 0;
2451 table_map ref_tables= 0;
2452 table_map param_comp= ~(param->prev_tables | param->read_tables |
2453 param->current_table);
2455 for (uint32_t i= 0; i < cond_func->arg_count; i++)
2457 Item *arg= cond_func->arguments()[i]->real_item();
2458 if (arg != field_item)
2459 ref_tables|= arg->used_tables();
2462 Field *field= field_item->field;
2463 field->setWriteSet();
2465 Item_result cmp_type= field->cmp_type();
2466 if (!((ref_tables | field->getTable()->map) & param_comp))
2467 ftree= get_func_mm_tree(param, cond_func, field, value, cmp_type, inv);
2468 Item_equal *item_equal= field_item->item_equal;
2471 Item_equal_iterator it(item_equal->begin());
2473 while ((item= it++))
2475 Field *f= item->field;
2480 if (!((ref_tables | f->getTable()->map) & param_comp))
2482 tree= get_func_mm_tree(param, cond_func, f, value, cmp_type, inv);
2483 ftree= !ftree ? tree : tree_and(param, ftree, tree);
2492 static optimizer::SEL_TREE *get_mm_tree(optimizer::RangeParameter *param, COND *cond)
2494 optimizer::SEL_TREE *tree=0;
2495 optimizer::SEL_TREE *ftree= 0;
2496 Item_field *field_item= 0;
2500 if (cond->type() == Item::COND_ITEM)
2502 List<Item>::iterator li(((Item_cond*) cond)->argument_list()->begin());
2504 if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
2507 while (Item* item=li++)
2509 optimizer::SEL_TREE *new_tree= get_mm_tree(param,item);
2510 if (param->session->is_fatal_error ||
2511 param->alloced_sel_args > optimizer::SEL_ARG::MAX_SEL_ARGS)
2513 tree=tree_and(param,tree,new_tree);
2514 if (tree && tree->type == optimizer::SEL_TREE::IMPOSSIBLE)
2520 tree= get_mm_tree(param,li++);
2523 while (Item* item= li++)
2525 optimizer::SEL_TREE *new_tree= get_mm_tree(param,item);
2528 tree=tree_or(param,tree,new_tree);
2529 if (!tree || tree->type == optimizer::SEL_TREE::ALWAYS)
2540 if (cond->const_item() && !cond->is_expensive())
2548 memory::Root *tmp_root= param->mem_root;
2549 param->session->mem_root= param->old_root;
2550 tree= cond->val_int() ?
new(tmp_root) optimizer::SEL_TREE(optimizer::SEL_TREE::ALWAYS) :
2551 new(tmp_root) optimizer::SEL_TREE(optimizer::SEL_TREE::IMPOSSIBLE);
2552 param->session->mem_root= tmp_root;
2556 table_map ref_tables= 0;
2557 table_map param_comp= ~(param->prev_tables | param->read_tables |
2558 param->current_table);
2559 if (cond->type() != Item::FUNC_ITEM)
2561 ref_tables= cond->used_tables();
2562 if ((ref_tables & param->current_table) ||
2563 (ref_tables & ~(param->prev_tables | param->read_tables)))
2565 return(
new optimizer::SEL_TREE(optimizer::SEL_TREE::MAYBE));
2568 Item_func *cond_func= (Item_func*) cond;
2569 if (cond_func->functype() == Item_func::BETWEEN ||
2570 cond_func->functype() == Item_func::IN_FUNC)
2571 inv= ((Item_func_opt_neg *) cond_func)->negated;
2572 else if (cond_func->select_optimize() == Item_func::OPTIMIZE_NONE)
2577 switch (cond_func->functype()) {
2578 case Item_func::BETWEEN:
2579 if (cond_func->arguments()[0]->real_item()->type() == Item::FIELD_ITEM)
2581 field_item= (Item_field*) (cond_func->arguments()[0]->real_item());
2582 ftree= get_full_func_mm_tree(param, cond_func, field_item, NULL, inv);
2589 for (uint32_t i= 1 ; i < cond_func->arg_count ; i++)
2591 if (cond_func->arguments()[i]->real_item()->type() == Item::FIELD_ITEM)
2593 field_item= (Item_field*) (cond_func->arguments()[i]->real_item());
2594 optimizer::SEL_TREE *tmp= get_full_func_mm_tree(param, cond_func,
2595 field_item, (Item*)(intptr_t)i, inv);
2597 tree= !tree ? tmp : tree_or(param, tree, tmp);
2599 tree= tree_and(param, tree, tmp);
2608 ftree = tree_and(param, ftree, tree);
2610 case Item_func::IN_FUNC:
2612 Item_func_in *func=(Item_func_in*) cond_func;
2613 if (func->key_item()->real_item()->type() != Item::FIELD_ITEM)
2615 field_item= (Item_field*) (func->key_item()->real_item());
2616 ftree= get_full_func_mm_tree(param, cond_func, field_item, NULL, inv);
2619 case Item_func::MULT_EQUAL_FUNC:
2621 Item_equal *item_equal= (Item_equal *) cond;
2622 if (!(value= item_equal->get_const()))
2624 Item_equal_iterator it(item_equal->begin());
2625 ref_tables= value->used_tables();
2626 while ((field_item= it++))
2628 Field *field= field_item->field;
2629 field->setWriteSet();
2631 Item_result cmp_type= field->cmp_type();
2632 if (!((ref_tables | field->getTable()->map) & param_comp))
2634 tree= get_mm_parts(param, cond, field, Item_func::EQ_FUNC,
2636 ftree= !ftree ? tree : tree_and(param, ftree, tree);
2643 if (cond_func->arguments()[0]->real_item()->type() == Item::FIELD_ITEM)
2645 field_item= (Item_field*) (cond_func->arguments()[0]->real_item());
2646 value= cond_func->arg_count > 1 ? cond_func->arguments()[1] : 0;
2648 else if (cond_func->have_rev_func() &&
2649 cond_func->arguments()[1]->real_item()->type() ==
2652 field_item= (Item_field*) (cond_func->arguments()[1]->real_item());
2653 value= cond_func->arguments()[0];
2657 ftree= get_full_func_mm_tree(param, cond_func, field_item, value, inv);
2664 static optimizer::SEL_TREE *
2665 get_mm_parts(optimizer::RangeParameter *param,
2668 Item_func::Functype type,
2669 Item *value, Item_result)
2671 if (field->getTable() != param->table)
2674 KEY_PART *key_part = param->key_parts;
2675 KEY_PART *end = param->key_parts_end;
2676 optimizer::SEL_TREE *tree=0;
2678 value->used_tables() & ~(param->prev_tables | param->read_tables))
2680 for (; key_part != end; key_part++)
2682 if (field->eq(key_part->field))
2684 optimizer::SEL_ARG *sel_arg=0;
2686 tree=
new optimizer::SEL_TREE;
2687 if (!value || !(value->used_tables() & ~param->read_tables))
2689 sel_arg= get_mm_leaf(param,cond_func, key_part->field,key_part,type,value);
2692 if (sel_arg->type == optimizer::SEL_ARG::IMPOSSIBLE)
2694 tree->type=optimizer::SEL_TREE::IMPOSSIBLE;
2701 sel_arg=
new optimizer::SEL_ARG(optimizer::SEL_ARG::MAYBE_KEY);
2703 sel_arg->part=(
unsigned char) key_part->part;
2704 tree->keys[key_part->key]=sel_add(tree->keys[key_part->key],sel_arg);
2705 tree->keys_map.set(key_part->key);
2713 static optimizer::SEL_ARG *
2714 get_mm_leaf(optimizer::RangeParameter *param,
2718 Item_func::Functype type,
2721 uint32_t maybe_null=(uint32_t) field->real_maybe_null();
2722 bool optimize_range;
2723 optimizer::SEL_ARG *tree= NULL;
2724 memory::Root *alloc= param->mem_root;
2736 param->session->mem_root= param->old_root;
2739 if (field->getTable()->maybe_null)
2743 if (type == Item_func::ISNULL_FUNC)
2744 tree= &optimizer::null_element;
2747 tree=
new (*alloc) optimizer::SEL_ARG(field,is_null_string,is_null_string);
2748 if (type == Item_func::ISNOTNULL_FUNC)
2750 tree->min_flag=NEAR_MIN;
2751 tree->max_flag=NO_MAX_RANGE;
2768 if (field->result_type() == STRING_RESULT &&
2769 value->result_type() == STRING_RESULT &&
2770 ((Field_str*)field)->charset() != conf_func->compare_collation() &&
2771 !(conf_func->compare_collation()->state & MY_CS_BINSORT))
2774 if (param->using_real_indexes)
2775 optimize_range= field->optimize_range(param->real_keynr[key_part->key], key_part->part);
2777 optimize_range=
true;
2779 if (type == Item_func::LIKE_FUNC)
2782 char buff1[MAX_FIELD_WIDTH];
2783 unsigned char *min_str,*max_str;
2784 String tmp(buff1,
sizeof(buff1),value->collation.collation),*res;
2785 size_t length, offset, min_length, max_length;
2786 uint32_t field_length= field->pack_length()+maybe_null;
2788 if (!optimize_range)
2790 if (!(res= value->val_str(&tmp)))
2792 tree= &optimizer::null_element;
2806 if (field->cmp_type() != STRING_RESULT)
2810 length=key_part->store_length;
2812 if (length != key_part->length + maybe_null)
2815 offset+= HA_KEY_BLOB_LENGTH;
2816 field_length= length - HA_KEY_BLOB_LENGTH;
2820 if (unlikely(length < field_length))
2826 length= field_length;
2829 field_length= length;
2832 min_str= alloc->alloc(length*2);
2833 max_str=min_str+length;
2835 max_str[0]= min_str[0]=0;
2837 field_length-= maybe_null;
2838 int escape_code= make_escape_code(field->charset(), ((Item_func_like*)(param->cond))->escape);
2839 like_error= my_like_range(field->charset(),
2840 res->ptr(), res->length(),
2842 internal::wild_one, internal::wild_many,
2844 (
char*) min_str+offset, (
char*) max_str+offset,
2845 &min_length, &max_length);
2849 if (offset != maybe_null)
2851 int2store(min_str+maybe_null,min_length);
2852 int2store(max_str+maybe_null,max_length);
2854 tree=
new (alloc) optimizer::SEL_ARG(field, min_str, max_str);
2858 if (! optimize_range &&
2859 type != Item_func::EQ_FUNC &&
2860 type != Item_func::EQUAL_FUNC)
2867 if (field->result_type() == STRING_RESULT &&
2868 value->result_type() != STRING_RESULT &&
2869 field->cmp_type() != value->result_type())
2903 if (field->is_timestamp())
2911 if (value->real_item()->type() == Item::FIELD_ITEM
2912 && value->result_type() == STRING_RESULT)
2914 char buff[DateTime::MAX_STRING_LENGTH];
2915 String tmp(buff,
sizeof(buff), &my_charset_bin);
2916 String *res= value->val_str(&tmp);
2926 DateTime value_datetime;
2928 if (! value_datetime.from_string(res->c_ptr(), (size_t) res->length()))
2931 Timestamp max_timestamp;
2932 Timestamp min_timestamp;
2934 (void) max_timestamp.from_time_t((time_t) INT32_MAX);
2935 (void) min_timestamp.from_time_t((time_t) 0);
2938 if (value_datetime < min_timestamp)
2944 char new_value_buff[DateTime::MAX_STRING_LENGTH];
2945 int new_value_length;
2946 String new_value_string(new_value_buff,
sizeof(new_value_buff), &my_charset_bin);
2948 new_value_length= min_timestamp.to_string(new_value_string.c_ptr(),
2949 DateTime::MAX_STRING_LENGTH);
2950 assert((new_value_length+1) < DateTime::MAX_STRING_LENGTH);
2951 new_value_string.length(new_value_length);
2952 err= value->save_str_value_in_field(field, &new_value_string);
2954 else if (value_datetime > max_timestamp)
2960 char new_value_buff[DateTime::MAX_STRING_LENGTH];
2961 int new_value_length;
2962 String new_value_string(new_value_buff,
sizeof(new_value_buff), &my_charset_bin);
2964 new_value_length= max_timestamp.to_string(new_value_string.c_ptr(),
2965 DateTime::MAX_STRING_LENGTH);
2966 assert((new_value_length+1) < DateTime::MAX_STRING_LENGTH);
2967 new_value_string.length(new_value_length);
2968 err= value->save_str_value_in_field(field, &new_value_string);
2971 err= value->save_in_field(field, 1);
2975 err= value->save_in_field(field, 1);
2978 err= value->save_in_field(field, 1);
2982 if (field->cmp_type() != value->result_type())
2984 if ((type == Item_func::EQ_FUNC || type == Item_func::EQUAL_FUNC) &&
2985 value->result_type() == item_cmp_type(field->result_type(),
2986 value->result_type()))
2988 tree=
new (alloc) optimizer::SEL_ARG(field, 0, 0);
2989 tree->type= optimizer::SEL_ARG::IMPOSSIBLE;
2999 if (err == 3 && field->type() == DRIZZLE_TYPE_DATE &&
3000 (type == Item_func::GT_FUNC || type == Item_func::GE_FUNC ||
3001 type == Item_func::LT_FUNC || type == Item_func::LE_FUNC) )
3032 else if (err == 1 && field->result_type() == INT_RESULT)
3034 if (type == Item_func::LT_FUNC && (value->val_int() > 0))
3035 type = Item_func::LE_FUNC;
3036 else if (type == Item_func::GT_FUNC &&
3037 !((Field_num*)field)->unsigned_flag &&
3038 !((Item_int*)value)->unsigned_flag &&
3039 (value->val_int() < 0))
3040 type = Item_func::GE_FUNC;
3044 tree=
new (alloc) optimizer::SEL_ARG(field, 0, 0);
3045 tree->type= optimizer::SEL_ARG::IMPOSSIBLE;
3052 tree= &optimizer::null_element;
3061 if (type != Item_func::EQUAL_FUNC && field->is_real_null())
3063 tree= &optimizer::null_element;
3067 str= alloc->alloc(key_part->store_length+1);
3069 *str= field->is_real_null();
3070 field->get_key_image(str+maybe_null, key_part->length);
3071 tree=
new (alloc) optimizer::SEL_ARG(field, str, str);
3084 if (field->result_type() == INT_RESULT &&
3085 value->result_type() == INT_RESULT &&
3086 ((Field_num*)field)->unsigned_flag && !((Item_int*)value)->unsigned_flag)
3088 int64_t item_val= value->val_int();
3091 if (type == Item_func::LT_FUNC || type == Item_func::LE_FUNC)
3093 tree->type= optimizer::SEL_ARG::IMPOSSIBLE;
3096 if (type == Item_func::GT_FUNC || type == Item_func::GE_FUNC)
3105 case Item_func::LT_FUNC:
3107 tree->max_flag=NEAR_MAX;
3109 case Item_func::LE_FUNC:
3111 tree->min_flag=NO_MIN_RANGE;
3114 tree->min_value=is_null_string;
3115 tree->min_flag=NEAR_MIN;
3118 case Item_func::GT_FUNC:
3121 !(key_part->flag & HA_PART_KEY_SEG))
3122 tree->min_flag=NEAR_MIN;
3124 case Item_func::GE_FUNC:
3125 tree->max_flag=NO_MAX_RANGE;
3132 param->session->mem_root= alloc;
3154 static optimizer::SEL_ARG *
3155 sel_add(optimizer::SEL_ARG *key1, optimizer::SEL_ARG *key2)
3157 optimizer::SEL_ARG *root= NULL;
3158 optimizer::SEL_ARG **key_link= NULL;
3166 while (key1 && key2)
3168 if (key1->part < key2->part)
3171 key_link= &key1->next_key_part;
3172 key1=key1->next_key_part;
3177 key_link= &key2->next_key_part;
3178 key2=key2->next_key_part;
3181 *key_link=key1 ? key1 : key2;
3185 static const int CLONE_KEY1_MAYBE= 1;
3186 static const int CLONE_KEY2_MAYBE= 2;
3188 static uint32_t swap_clone_flag(uint32_t a)
3190 return ((a & 1) << 1) | ((a & 2) >> 1);
3193 static optimizer::SEL_TREE *
3194 tree_and(optimizer::RangeParameter *param, optimizer::SEL_TREE *tree1, optimizer::SEL_TREE *tree2)
3200 if (tree1->type == optimizer::SEL_TREE::IMPOSSIBLE || tree2->type == optimizer::SEL_TREE::ALWAYS)
3202 if (tree2->type == optimizer::SEL_TREE::IMPOSSIBLE || tree1->type == optimizer::SEL_TREE::ALWAYS)
3204 if (tree1->type == optimizer::SEL_TREE::MAYBE)
3206 if (tree2->type == optimizer::SEL_TREE::KEY)
3207 tree2->type=optimizer::SEL_TREE::KEY_SMALLER;
3210 if (tree2->type == optimizer::SEL_TREE::MAYBE)
3212 tree1->type=optimizer::SEL_TREE::KEY_SMALLER;
3215 key_map result_keys;
3216 result_keys.reset();
3219 optimizer::SEL_ARG **key1,**key2,**end;
3220 for (key1= tree1->keys,key2= tree2->keys,end=key1+param->keys ;
3221 key1 != end ; key1++,key2++)
3226 if (*key1 && !(*key1)->simple_key())
3227 flag|=CLONE_KEY1_MAYBE;
3228 if (*key2 && !(*key2)->simple_key())
3229 flag|=CLONE_KEY2_MAYBE;
3230 *key1=key_and(param, *key1, *key2, flag);
3231 if (*key1 && (*key1)->type == optimizer::SEL_ARG::IMPOSSIBLE)
3233 tree1->type= optimizer::SEL_TREE::IMPOSSIBLE;
3236 result_keys.set(key1 - tree1->keys);
3239 tree1->keys_map= result_keys;
3241 if (result_keys.any())
3243 tree1->merges.clear();
3248 imerge_list_and_list(&tree1->merges, &tree2->merges);
3256 static optimizer::SEL_ARG *
3257 and_all_keys(optimizer::RangeParameter *param,
3258 optimizer::SEL_ARG *key1,
3259 optimizer::SEL_ARG *key2,
3260 uint32_t clone_flag)
3262 optimizer::SEL_ARG *next= NULL;
3263 ulong use_count=key1->use_count;
3265 if (key1->size() != 1)
3267 key2->use_count+=key1->size()-1;
3268 key2->increment_use_count((
int) key1->size()-1);
3270 if (key1->type == optimizer::SEL_ARG::MAYBE_KEY)
3272 key1->right= key1->left= &optimizer::null_element;
3273 key1->next= key1->prev= 0;
3275 for (next= key1->first(); next ; next=next->next)
3277 if (next->next_key_part)
3279 optimizer::SEL_ARG *tmp= key_and(param, next->next_key_part, key2, clone_flag);
3280 if (tmp && tmp->type == optimizer::SEL_ARG::IMPOSSIBLE)
3282 key1=key1->tree_delete(next);
3285 next->next_key_part=tmp;
3287 next->increment_use_count(use_count);
3288 if (param->alloced_sel_args > optimizer::SEL_ARG::MAX_SEL_ARGS)
3292 next->next_key_part=key2;
3295 return &optimizer::null_element;
3316 static optimizer::SEL_ARG *
3317 key_and(optimizer::RangeParameter *param,
3318 optimizer::SEL_ARG *key1,
3319 optimizer::SEL_ARG *key2,
3320 uint32_t clone_flag)
3326 if (key1->part != key2->part)
3328 if (key1->part > key2->part)
3330 std::swap(key1, key2);
3331 clone_flag=swap_clone_flag(clone_flag);
3335 if (key1->use_count > 0)
3336 if (! (key1= key1->clone_tree(param)))
3338 return and_all_keys(param, key1, key2, clone_flag);
3341 if (((clone_flag & CLONE_KEY2_MAYBE) &&
3342 ! (clone_flag & CLONE_KEY1_MAYBE) &&
3343 key2->type != optimizer::SEL_ARG::MAYBE_KEY) ||
3344 key1->type == optimizer::SEL_ARG::MAYBE_KEY)
3346 std::swap(key1, key2);
3347 clone_flag= swap_clone_flag(clone_flag);
3351 if (key2->type == optimizer::SEL_ARG::MAYBE_KEY)
3353 if (key1->use_count > 1)
3356 if (! (key1=key1->clone_tree(param)))
3360 if (key1->type == optimizer::SEL_ARG::MAYBE_KEY)
3362 key1->next_key_part= key_and(param,
3363 key1->next_key_part,
3364 key2->next_key_part,
3366 if (key1->next_key_part &&
3367 key1->next_key_part->type == optimizer::SEL_ARG::IMPOSSIBLE)
3372 key1->maybe_smaller();
3373 if (key2->next_key_part)
3376 return and_all_keys(param, key1, key2, clone_flag);
3385 optimizer::SEL_ARG *e1= key1->first();
3386 optimizer::SEL_ARG *e2= key2->first();
3387 optimizer::SEL_ARG *new_tree= NULL;
3391 int cmp= e1->cmp_min_to_min(e2);
3394 if (get_range(&e1, &e2, key1))
3397 else if (get_range(&e2, &e1, key2))
3399 optimizer::SEL_ARG *next= key_and(param,
3403 e1->increment_use_count(1);
3404 e2->increment_use_count(1);
3405 if (! next || next->type != optimizer::SEL_ARG::IMPOSSIBLE)
3407 optimizer::SEL_ARG *new_arg= e1->clone_and(e2);
3408 new_arg->next_key_part=next;
3414 new_tree=new_tree->insert(new_arg);
3416 if (e1->cmp_max_to_max(e2) < 0)
3424 return &optimizer::null_element;
3430 get_range(optimizer::SEL_ARG **e1, optimizer::SEL_ARG **e2, optimizer::SEL_ARG *root1)
3432 (*e1)= root1->find_range(*e2);
3433 if ((*e1)->cmp_max_to_min(*e2) < 0)
3435 if (! ((*e1)=(*e1)->next))
3437 if ((*e1)->cmp_min_to_max(*e2) > 0)
3458 unsigned char *min_key, *max_key;
3464 uint32_t min_key_flag, max_key_flag;
3467 uint32_t min_key_parts, max_key_parts;
3478 uint32_t real_keyno;
3502 static range_seq_t sel_arg_range_seq_init(
void *init_param, uint32_t, uint32_t)
3505 seq->at_start=
true;
3506 seq->stack[0].key_tree= NULL;
3507 seq->stack[0].min_key= seq->param->min_key;
3508 seq->stack[0].min_key_flag= 0;
3509 seq->stack[0].min_key_parts= 0;
3511 seq->stack[0].max_key= seq->param->max_key;
3512 seq->stack[0].max_key_flag= 0;
3513 seq->stack[0].max_key_parts= 0;
3519 static void step_down_to(SEL_ARG_RANGE_SEQ *arg, optimizer::SEL_ARG *key_tree)
3521 RANGE_SEQ_ENTRY *cur= &arg->stack[arg->i+1];
3522 RANGE_SEQ_ENTRY *prev= &arg->stack[arg->i];
3524 cur->key_tree= key_tree;
3525 cur->min_key= prev->min_key;
3526 cur->max_key= prev->max_key;
3527 cur->min_key_parts= prev->min_key_parts;
3528 cur->max_key_parts= prev->max_key_parts;
3530 uint16_t stor_length= arg->param->key[arg->keyno][key_tree->part].store_length;
3531 cur->min_key_parts += key_tree->store_min(stor_length, &cur->min_key,
3532 prev->min_key_flag);
3533 cur->max_key_parts += key_tree->store_max(stor_length, &cur->max_key,
3534 prev->max_key_flag);
3536 cur->min_key_flag= prev->min_key_flag | key_tree->min_flag;
3537 cur->max_key_flag= prev->max_key_flag | key_tree->max_flag;
3539 if (key_tree->is_null_interval())
3540 cur->min_key_flag |= NULL_RANGE;
3569 static uint32_t sel_arg_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range)
3571 optimizer::SEL_ARG *key_tree;
3572 SEL_ARG_RANGE_SEQ *seq= (SEL_ARG_RANGE_SEQ*)rseq;
3575 key_tree= seq->start;
3576 seq->at_start=
false;
3577 goto walk_up_n_right;
3580 key_tree= seq->stack[seq->i].key_tree;
3584 if (key_tree->next && key_tree->next != &optimizer::null_element)
3588 step_down_to(seq, key_tree->next);
3589 key_tree= key_tree->next;
3590 seq->param->is_ror_scan=
false;
3591 goto walk_right_n_up;
3601 key_tree= seq->stack[seq->i].key_tree;
3604 if (key_tree->next && key_tree->next != &optimizer::null_element)
3608 step_down_to(seq, key_tree->next);
3609 key_tree= key_tree->next;
3619 while (key_tree->next_key_part && key_tree->next_key_part != &optimizer::null_element &&
3620 key_tree->next_key_part->part == key_tree->part + 1 &&
3621 key_tree->next_key_part->type == optimizer::SEL_ARG::KEY_RANGE)
3624 RANGE_SEQ_ENTRY *cur= &seq->stack[seq->i];
3625 uint32_t min_key_length= cur->min_key - seq->param->min_key;
3626 uint32_t max_key_length= cur->max_key - seq->param->max_key;
3627 uint32_t len= cur->min_key - cur[-1].min_key;
3628 if (! (min_key_length == max_key_length &&
3629 ! memcmp(cur[-1].min_key, cur[-1].max_key, len) &&
3630 ! key_tree->min_flag && !key_tree->max_flag))
3632 seq->param->is_ror_scan=
false;
3633 if (! key_tree->min_flag)
3634 cur->min_key_parts +=
3635 key_tree->next_key_part->store_min_key(seq->param->key[seq->keyno],
3637 &cur->min_key_flag);
3638 if (! key_tree->max_flag)
3639 cur->max_key_parts +=
3640 key_tree->next_key_part->store_max_key(seq->param->key[seq->keyno],
3642 &cur->max_key_flag);
3651 key_tree= key_tree->next_key_part;
3654 while (key_tree->prev && key_tree->prev != &optimizer::null_element)
3657 key_tree= key_tree->prev;
3659 step_down_to(seq, key_tree);
3663 RANGE_SEQ_ENTRY *cur= &seq->stack[seq->i];
3665 range->ptr= (
char*)(
size_t)(key_tree->part);
3667 range->range_flag= cur->min_key_flag | cur->max_key_flag;
3669 range->start_key.key= seq->param->min_key;
3670 range->start_key.length= cur->min_key - seq->param->min_key;
3671 range->start_key.keypart_map= make_prev_keypart_map(cur->min_key_parts);
3672 range->start_key.flag= (cur->min_key_flag & NEAR_MIN ? HA_READ_AFTER_KEY :
3675 range->end_key.key= seq->param->max_key;
3676 range->end_key.length= cur->max_key - seq->param->max_key;
3677 range->end_key.flag= (cur->max_key_flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
3679 range->end_key.keypart_map= make_prev_keypart_map(cur->max_key_parts);
3681 if (!(cur->min_key_flag & ~NULL_RANGE) && !cur->max_key_flag &&
3682 (uint32_t)key_tree->part+1 == seq->param->table->key_info[seq->real_keyno].key_parts &&
3683 (seq->param->table->key_info[seq->real_keyno].flags & (HA_NOSAME)) ==
3685 range->start_key.length == range->end_key.length &&
3686 !memcmp(seq->param->min_key,seq->param->max_key,range->start_key.length))
3687 range->range_flag= UNIQUE_RANGE | (cur->min_key_flag & NULL_RANGE);
3689 if (seq->param->is_ror_scan)
3700 if (!(!(cur->min_key_flag & ~NULL_RANGE) && !cur->max_key_flag &&
3701 (range->start_key.length == range->end_key.length) &&
3702 !memcmp(range->start_key.key, range->end_key.key, range->start_key.length) &&
3703 is_key_scan_ror(seq->param, seq->real_keyno, key_tree->part + 1)))
3704 seq->param->is_ror_scan=
false;
3707 seq->param->range_count++;
3708 seq->param->max_key_part= max(seq->param->max_key_part,(uint32_t)key_tree->part);
3741 ha_rows check_quick_select(Session *session,
3742 optimizer::Parameter *param,
3745 optimizer::SEL_ARG *tree,
3746 bool update_tbl_stats,
3747 uint32_t *mrr_flags,
3749 optimizer::CostVector *cost)
3751 SEL_ARG_RANGE_SEQ seq;
3752 RANGE_SEQ_IF seq_if = {sel_arg_range_seq_init, sel_arg_range_seq_next};
3753 Cursor *cursor= param->table->cursor;
3755 uint32_t keynr= param->real_keynr[idx];
3759 return(HA_POS_ERROR);
3760 if (tree->type == optimizer::SEL_ARG::IMPOSSIBLE)
3762 if (tree->type != optimizer::SEL_ARG::KEY_RANGE || tree->part != 0)
3763 return(HA_POS_ERROR);
3766 seq.real_keyno= keynr;
3770 param->range_count=0;
3771 param->max_key_part=0;
3773 param->is_ror_scan=
true;
3774 if (param->table->index_flags(keynr) & HA_KEY_SCAN_NOT_ROR)
3775 param->is_ror_scan=
false;
3777 *mrr_flags= param->force_default_mrr? HA_MRR_USE_DEFAULT_IMPL: 0;
3778 *mrr_flags|= HA_MRR_NO_ASSOCIATION;
3780 bool pk_is_clustered= cursor->primary_key_is_clustered();
3782 (param->table->index_flags(keynr) & HA_KEYREAD_ONLY) &&
3783 !(pk_is_clustered && keynr == param->table->getShare()->getPrimaryKey()))
3784 *mrr_flags |= HA_MRR_INDEX_ONLY;
3786 if (session->lex().sql_command != SQLCOM_SELECT)
3787 *mrr_flags |= HA_MRR_USE_DEFAULT_IMPL;
3789 *bufsize= param->session->variables.read_rnd_buff_size;
3790 rows= cursor->multi_range_read_info_const(keynr, &seq_if, (
void*)&seq, 0,
3791 bufsize, mrr_flags, cost);
3792 if (rows != HA_POS_ERROR)
3794 param->table->quick_rows[keynr]=rows;
3795 if (update_tbl_stats)
3797 param->table->quick_keys.set(keynr);
3798 param->table->quick_key_parts[keynr]=param->max_key_part+1;
3799 param->table->quick_n_ranges[keynr]= param->range_count;
3800 param->table->quick_condition_rows=
3801 min(param->table->quick_condition_rows, rows);
3805 enum ha_key_alg key_alg= param->table->key_info[seq.real_keyno].algorithm;
3806 if ((key_alg != HA_KEY_ALG_BTREE) && (key_alg!= HA_KEY_ALG_UNDEF))
3813 param->is_ror_scan=
false;
3818 if (param->table->getShare()->getPrimaryKey() == keynr && pk_is_clustered)
3819 param->is_ror_scan=
true;
3863 static bool is_key_scan_ror(optimizer::Parameter *param, uint32_t keynr, uint8_t nparts)
3865 KeyInfo *table_key= param->table->key_info + keynr;
3866 KeyPartInfo *key_part= table_key->key_part + nparts;
3867 KeyPartInfo *key_part_end= (table_key->key_part +
3868 table_key->key_parts);
3871 for (KeyPartInfo *kp= table_key->key_part; kp < key_part; kp++)
3873 uint16_t fieldnr= param->table->key_info[keynr].
3874 key_part[kp - table_key->key_part].fieldnr - 1;
3875 if (param->table->getField(fieldnr)->key_length() != kp->length)
3879 if (key_part == key_part_end)
3882 key_part= table_key->key_part + nparts;
3883 pk_number= param->table->getShare()->getPrimaryKey();
3884 if (!param->table->cursor->primary_key_is_clustered() || pk_number == MAX_KEY)
3887 KeyPartInfo *pk_part= param->table->key_info[pk_number].key_part;
3888 KeyPartInfo *pk_part_end= pk_part +
3889 param->table->key_info[pk_number].key_parts;
3890 for (;(key_part!=key_part_end) && (pk_part != pk_part_end);
3891 ++key_part, ++pk_part)
3893 if ((key_part->field != pk_part->field) ||
3894 (key_part->length != pk_part->length))
3897 return (key_part == key_part_end);
3901 optimizer::QuickRangeSelect *
3902 optimizer::get_quick_select(Parameter *param,
3904 optimizer::SEL_ARG *key_tree,
3906 uint32_t mrr_buf_size,
3907 memory::Root *parent_alloc)
3909 optimizer::QuickRangeSelect *quick=
new optimizer::QuickRangeSelect(param->session,
3911 param->real_keynr[idx],
3917 if (get_quick_keys(param,
3931 quick->mrr_flags= mrr_flags;
3932 quick->mrr_buf_size= mrr_buf_size;
3933 quick->key_parts= parent_alloc
3934 ? (KEY_PART*)parent_alloc->memdup(param->key[idx],
sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts)
3935 : (KEY_PART*)quick->alloc.memdup(param->key[idx],
sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts);
3946 optimizer::get_quick_keys(optimizer::Parameter *param,
3947 optimizer::QuickRangeSelect *quick,
3949 optimizer::SEL_ARG *key_tree,
3950 unsigned char *min_key,
3951 uint32_t min_key_flag,
3952 unsigned char *max_key,
3953 uint32_t max_key_flag)
3955 optimizer::QuickRange *range= NULL;
3957 int min_part= key_tree->part - 1;
3958 int max_part= key_tree->part - 1;
3960 if (key_tree->left != &optimizer::null_element)
3962 if (get_quick_keys(param,
3974 unsigned char *tmp_min_key= min_key,*tmp_max_key= max_key;
3975 min_part+= key_tree->store_min(key[key_tree->part].store_length,
3976 &tmp_min_key,min_key_flag);
3977 max_part+= key_tree->store_max(key[key_tree->part].store_length,
3978 &tmp_max_key,max_key_flag);
3980 if (key_tree->next_key_part &&
3981 key_tree->next_key_part->part == key_tree->part+1 &&
3982 key_tree->next_key_part->type == optimizer::SEL_ARG::KEY_RANGE)
3984 if ((tmp_min_key - min_key) == (tmp_max_key - max_key) &&
3985 memcmp(min_key, max_key, (uint32_t)(tmp_max_key - max_key))==0 &&
3986 key_tree->min_flag==0 && key_tree->max_flag==0)
3988 if (get_quick_keys(param,
3991 key_tree->next_key_part,
3993 min_key_flag | key_tree->min_flag,
3995 max_key_flag | key_tree->max_flag))
4002 uint32_t tmp_min_flag=key_tree->min_flag,tmp_max_flag=key_tree->max_flag;
4005 min_part+= key_tree->next_key_part->store_min_key(key,
4011 max_part+= key_tree->next_key_part->store_max_key(key,
4015 flag=tmp_min_flag | tmp_max_flag;
4020 flag= key_tree->min_flag | key_tree->max_flag;
4027 if (tmp_min_key != param->min_key)
4029 flag&= ~NO_MIN_RANGE;
4033 flag|= NO_MIN_RANGE;
4035 if (tmp_max_key != param->max_key)
4037 flag&= ~NO_MAX_RANGE;
4041 flag|= NO_MAX_RANGE;
4045 uint32_t length= (uint32_t) (tmp_min_key - param->min_key);
4046 if (length == (uint32_t) (tmp_max_key - param->max_key) &&
4047 ! memcmp(param->min_key,param->max_key,length))
4049 KeyInfo *table_key= quick->head->key_info+quick->index;
4051 if ((table_key->flags & (HA_NOSAME)) == HA_NOSAME &&
4052 key->part == table_key->key_parts-1)
4054 if (! (table_key->flags & HA_NULL_PART_KEY) ||
4055 ! null_part_in_key(key,
4057 (uint32_t) (tmp_min_key - param->min_key)))
4059 flag|= UNIQUE_RANGE;
4070 range=
new optimizer::QuickRange(param->min_key,
4071 (uint32_t) (tmp_min_key - param->min_key),
4072 min_part >=0 ? make_keypart_map(min_part) : 0,
4074 (uint32_t) (tmp_max_key - param->max_key),
4075 max_part >=0 ? make_keypart_map(max_part) : 0,
4078 set_if_bigger(quick->max_used_key_length, (uint32_t)range->min_length);
4079 set_if_bigger(quick->max_used_key_length, (uint32_t)range->max_length);
4080 set_if_bigger(quick->used_key_parts, (uint32_t) key_tree->part+1);
4081 quick->ranges.push_back(&range);
4084 if (key_tree->right != &optimizer::null_element)
4086 return get_quick_keys(param,
4112 static bool null_part_in_key(KEY_PART *key_part,
const unsigned char *key, uint32_t length)
4114 for (
const unsigned char *end=key+length ;
4116 key+= key_part++->store_length)
4118 if (key_part->null_bit && *key)
4125 bool optimizer::QuickSelectInterface::is_keys_used(
const boost::dynamic_bitset<>& fields)
4127 return is_key_used(head, index, fields);
4184 range->min_key= range->max_key= ref->
key_buff;
4185 range->min_length= range->max_length= ref->
key_length;
4188 range->flag= (ref->
key_length == key_info->key_length && (key_info->flags & HA_END_SPACE_KEY) == 0) ? EQ_RANGE : 0;
4192 for (part=0 ; part < ref->
key_parts ;part++,key_part++)
4194 key_part->part=part;
4195 key_part->field= key_info->key_part[part].field;
4196 key_part->length= key_info->key_part[part].length;
4197 key_part->store_length= key_info->key_part[part].store_length;
4198 key_part->null_bit= key_info->key_part[part].null_bit;
4199 key_part->
flag= (uint8_t) key_info->key_part[part].key_part_flag;
4201 quick->
ranges.push_back(&range);
4211 optimizer::QuickRange *null_range= NULL;
4214 null_range=
new (alloc)
4218 make_prev_keypart_map(ref->
key_parts), EQ_RANGE);
4220 quick->
ranges.push_back(&null_range);
4224 quick->
mrr_flags= HA_MRR_NO_ASSOCIATION |
4225 (table->key_read ? HA_MRR_INDEX_ONLY : 0);
4226 if (session->lex().sql_command != SQLCOM_SELECT)
4227 quick->
mrr_flags |= HA_MRR_USE_DEFAULT_IMPL;
4253 range_seq_t optimizer::quick_range_seq_init(
void *init_param, uint32_t, uint32_t)
4255 optimizer::QuickRangeSelect *quick= (optimizer::QuickRangeSelect*)init_param;
4256 quick->
qr_traversal_ctx.first= (optimizer::QuickRange**)quick->ranges.buffer;
4257 quick->qr_traversal_ctx.cur= (optimizer::QuickRange**)quick->ranges.buffer;
4258 quick->qr_traversal_ctx.last= quick->qr_traversal_ctx.cur +
4259 quick->ranges.size();
4260 return &quick->qr_traversal_ctx;
4276 uint32_t optimizer::quick_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range)
4278 QuickRangeSequenceContext *ctx= (QuickRangeSequenceContext*) rseq;
4280 if (ctx->cur == ctx->last)
4283 optimizer::QuickRange *cur= *(ctx->cur);
4284 key_range *start_key= &range->start_key;
4285 key_range *end_key= &range->end_key;
4287 start_key->key= cur->min_key;
4288 start_key->length= cur->min_length;
4289 start_key->keypart_map= cur->min_keypart_map;
4290 start_key->flag= ((cur->flag & NEAR_MIN) ? HA_READ_AFTER_KEY :
4291 (cur->flag & EQ_RANGE) ?
4292 HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT);
4293 end_key->key= cur->max_key;
4294 end_key->length= cur->max_length;
4295 end_key->keypart_map= cur->max_keypart_map;
4300 end_key->flag= (cur->flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
4302 range->range_flag= cur->flag;
4308 static inline uint32_t get_field_keypart(KeyInfo *index, Field *field);
4310 static inline optimizer::SEL_ARG * get_index_range_tree(uint32_t index,
4311 optimizer::SEL_TREE *range_tree,
4312 optimizer::Parameter *param,
4313 uint32_t *param_idx);
4315 static bool get_constant_key_infix(KeyInfo *index_info,
4316 optimizer::SEL_ARG *index_range_tree,
4317 KeyPartInfo *first_non_group_part,
4318 KeyPartInfo *min_max_arg_part,
4319 KeyPartInfo *last_part,
4321 unsigned char *key_infix,
4322 uint32_t *key_infix_len,
4323 KeyPartInfo **first_non_infix_part);
4325 static bool check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item);
4328 cost_group_min_max(Table* table,
4329 KeyInfo *index_info,
4330 uint32_t used_key_parts,
4331 uint32_t group_key_parts,
4332 optimizer::SEL_TREE *range_tree,
4333 optimizer::SEL_ARG *index_tree,
4334 ha_rows quick_prefix_records,
4468 static optimizer::GroupMinMaxReadPlan *
4469 get_best_group_min_max(optimizer::Parameter *param, optimizer::SEL_TREE *tree)
4471 Session *session= param->session;
4472 Join *join= session->lex().current_select->join;
4473 Table *table= param->table;
4474 bool have_min=
false;
4475 bool have_max=
false;
4476 Item_field *min_max_arg_item= NULL;
4477 KeyPartInfo *min_max_arg_part= NULL;
4478 uint32_t group_prefix_len= 0;
4479 KeyInfo *index_info= NULL;
4481 uint32_t group_key_parts= 0;
4482 uint32_t used_key_parts= 0;
4483 unsigned char key_infix[MAX_KEY_LENGTH];
4484 uint32_t key_infix_len= 0;
4485 optimizer::GroupMinMaxReadPlan *read_plan= NULL;
4486 uint32_t key_part_nr;
4487 Order *tmp_group= NULL;
4489 Item_field *item_field= NULL;
4495 if ((join->tables != 1) ||
4496 ((! join->group_list) &&
4497 (! join->select_distinct)) ||
4498 (join->select_lex->olap == ROLLUP_TYPE))
4500 if (table->getShare()->sizeKeys() == 0)
4504 List<Item>::iterator select_items_it(join->fields_list.begin());
4507 if (join->make_sum_func_list(join->all_fields, join->fields_list, 1))
4510 if (join->sum_funcs[0])
4512 Item_sum *min_max_item= NULL;
4513 Item_sum **func_ptr= join->sum_funcs;
4514 while ((min_max_item= *(func_ptr++)))
4516 if (min_max_item->sum_func() == Item_sum::MIN_FUNC)
4518 else if (min_max_item->sum_func() == Item_sum::MAX_FUNC)
4524 Item *expr= min_max_item->args[0]->real_item();
4525 if (expr->type() == Item::FIELD_ITEM)
4527 if (! min_max_arg_item)
4528 min_max_arg_item= (Item_field*) expr;
4529 else if (! min_max_arg_item->eq(expr, 1))
4538 if (join->select_distinct)
4540 while ((item= select_items_it++))
4542 if (item->type() != Item::FIELD_ITEM)
4548 for (tmp_group= join->group_list; tmp_group; tmp_group= tmp_group->next)
4550 if ((*tmp_group->item)->type() != Item::FIELD_ITEM)
4559 KeyInfo *cur_index_info= table->key_info;
4560 KeyInfo *cur_index_info_end= cur_index_info + table->getShare()->sizeKeys();
4561 KeyPartInfo *cur_part= NULL;
4562 KeyPartInfo *end_part= NULL;
4564 KeyPartInfo *last_part= NULL;
4565 KeyPartInfo *first_non_group_part= NULL;
4566 KeyPartInfo *first_non_infix_part= NULL;
4567 uint32_t key_infix_parts= 0;
4568 uint32_t cur_group_key_parts= 0;
4569 uint32_t cur_group_prefix_len= 0;
4571 double best_read_cost= DBL_MAX;
4572 ha_rows best_records= 0;
4573 optimizer::SEL_ARG *best_index_tree= NULL;
4574 ha_rows best_quick_prefix_records= 0;
4575 uint32_t best_param_idx= 0;
4576 double cur_read_cost= DBL_MAX;
4577 ha_rows cur_records;
4578 optimizer::SEL_ARG *cur_index_tree= NULL;
4579 ha_rows cur_quick_prefix_records= 0;
4580 uint32_t cur_param_idx= MAX_KEY;
4581 key_map used_key_parts_map;
4582 uint32_t cur_key_infix_len= 0;
4583 unsigned char cur_key_infix[MAX_KEY_LENGTH];
4584 uint32_t cur_used_key_parts= 0;
4585 uint32_t pk= param->table->getShare()->getPrimaryKey();
4587 for (uint32_t cur_index= 0;
4588 cur_index_info != cur_index_info_end;
4589 cur_index_info++, cur_index++)
4592 if (! table->covering_keys.test(cur_index))
4604 if (pk < MAX_KEY && cur_index != pk &&
4605 (table->cursor->getEngine()->check_flag(HTON_BIT_PRIMARY_KEY_IN_READ_INDEX)))
4608 for (uint32_t i= 0; i < table->getShare()->sizeFields(); i++)
4610 Field *cur_field= table->getField(i);
4615 if ((cur_field->isReadSet()) &&
4616 ! cur_field->part_of_key_not_clustered.test(cur_index))
4624 if (join->group_list)
4626 cur_part= cur_index_info->key_part;
4627 end_part= cur_part + cur_index_info->key_parts;
4629 for (tmp_group= join->group_list;
4630 tmp_group && (cur_part != end_part);
4631 tmp_group= tmp_group->next, cur_part++)
4639 assert((*tmp_group->item)->type() == Item::FIELD_ITEM);
4640 Item_field *group_field= (Item_field *) (*tmp_group->item);
4641 if (group_field->field->eq(cur_part->field))
4643 cur_group_prefix_len+= cur_part->store_length;
4644 ++cur_group_key_parts;
4658 else if (join->select_distinct)
4660 select_items_it= join->fields_list.begin();
4661 used_key_parts_map.reset();
4662 uint32_t max_key_part= 0;
4663 while ((item= select_items_it++))
4665 item_field= (Item_field*) item;
4667 key_part_nr= get_field_keypart(cur_index_info, item_field->field);
4672 if (used_key_parts_map.test(key_part_nr))
4674 if (key_part_nr < 1 || key_part_nr > join->fields_list.size())
4676 cur_part= cur_index_info->key_part + key_part_nr - 1;
4677 cur_group_prefix_len+= cur_part->store_length;
4678 used_key_parts_map.set(key_part_nr);
4679 ++cur_group_key_parts;
4680 max_key_part= max(max_key_part,key_part_nr);
4690 for (uint32_t pos= 0; pos < max_key_part; pos++)
4692 cur_parts= used_key_parts_map >> 1;
4693 if (all_parts != cur_parts)
4700 if (min_max_arg_item)
4702 key_part_nr= get_field_keypart(cur_index_info, min_max_arg_item->field);
4703 if (key_part_nr <= cur_group_key_parts)
4705 min_max_arg_part= cur_index_info->key_part + key_part_nr - 1;
4723 last_part= cur_index_info->key_part + cur_index_info->key_parts;
4724 first_non_group_part= (cur_group_key_parts < cur_index_info->key_parts) ?
4725 cur_index_info->key_part + cur_group_key_parts :
4727 first_non_infix_part= min_max_arg_part ?
4728 (min_max_arg_part < last_part) ?
4732 if (first_non_group_part &&
4733 (! min_max_arg_part || (min_max_arg_part - first_non_group_part > 0)))
4738 optimizer::SEL_ARG *index_range_tree= get_index_range_tree(cur_index,
4742 if (! get_constant_key_infix(cur_index_info,
4744 first_non_group_part,
4750 &first_non_infix_part))
4755 else if (min_max_arg_part &&
4756 (min_max_arg_part - first_non_group_part > 0))
4764 else if (first_non_group_part && join->conds)
4778 KeyPartInfo *key_part_range[2];
4779 key_part_range[0]= first_non_group_part;
4780 key_part_range[1]= last_part;
4783 if (join->conds->walk(&Item::find_item_in_field_list_processor,
4785 (
unsigned char*) key_part_range))
4794 if (first_non_infix_part)
4796 cur_part= first_non_infix_part +
4797 (min_max_arg_part && (min_max_arg_part < last_part));
4798 for (; cur_part != last_part; cur_part++)
4800 if (cur_part->field->isReadSet())
4806 key_infix_parts= cur_key_infix_len ?
4807 (first_non_infix_part - first_non_group_part) : 0;
4808 cur_used_key_parts= cur_group_key_parts + key_infix_parts;
4814 cur_index_tree= get_index_range_tree(cur_index,
4819 optimizer::CostVector dummy_cost;
4820 uint32_t mrr_flags= HA_MRR_USE_DEFAULT_IMPL;
4821 uint32_t mrr_bufsize= 0;
4822 cur_quick_prefix_records= check_quick_select(session,
4832 cost_group_min_max(table,
4835 cur_group_key_parts,
4838 cur_quick_prefix_records,
4848 if (cur_read_cost < best_read_cost - (DBL_EPSILON * cur_read_cost))
4850 assert(tree != 0 || cur_param_idx == MAX_KEY);
4851 index_info= cur_index_info;
4853 best_read_cost= cur_read_cost;
4854 best_records= cur_records;
4855 best_index_tree= cur_index_tree;
4856 best_quick_prefix_records= cur_quick_prefix_records;
4857 best_param_idx= cur_param_idx;
4858 group_key_parts= cur_group_key_parts;
4859 group_prefix_len= cur_group_prefix_len;
4860 key_infix_len= cur_key_infix_len;
4864 memcpy(key_infix, cur_key_infix,
sizeof (key_infix));
4867 used_key_parts= cur_used_key_parts;
4871 cur_group_key_parts= 0;
4872 cur_group_prefix_len= 0;
4873 cur_key_infix_len= 0;
4879 if (join->conds && min_max_arg_item &&
4880 ! check_group_min_max_predicates(join->conds, min_max_arg_item))
4884 read_plan=
new (*param->mem_root) optimizer::GroupMinMaxReadPlan(have_min,
4893 (key_infix_len > 0) ? key_infix : NULL,
4897 best_quick_prefix_records);
4898 if (tree && read_plan->quick_prefix_records == 0)
4900 read_plan->read_cost= best_read_cost;
4901 read_plan->records= best_records;
4927 static bool check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item)
4929 assert(cond && min_max_arg_item);
4931 cond= cond->real_item();
4932 Item::Type cond_type= cond->type();
4933 if (cond_type == Item::COND_ITEM)
4935 List<Item>::iterator li(((Item_cond*) cond)->argument_list()->begin());
4936 Item *and_or_arg= NULL;
4937 while ((and_or_arg= li++))
4939 if (! check_group_min_max_predicates(and_or_arg, min_max_arg_item))
4954 if (cond_type == Item::SUBSELECT_ITEM)
4958 assert(cond_type == Item::FUNC_ITEM);
4961 Item_func *pred= (Item_func*) cond;
4962 Item **arguments= pred->arguments();
4963 Item *cur_arg= NULL;
4964 for (uint32_t arg_idx= 0; arg_idx < pred->argument_count (); arg_idx++)
4966 cur_arg= arguments[arg_idx]->real_item();
4967 if (cur_arg->type() == Item::FIELD_ITEM)
4969 if (min_max_arg_item->eq(cur_arg, 1))
4975 Item_func::Functype pred_type= pred->functype();
4976 if (pred_type != Item_func::EQUAL_FUNC &&
4977 pred_type != Item_func::LT_FUNC &&
4978 pred_type != Item_func::LE_FUNC &&
4979 pred_type != Item_func::GT_FUNC &&
4980 pred_type != Item_func::GE_FUNC &&
4981 pred_type != Item_func::BETWEEN &&
4982 pred_type != Item_func::ISNULL_FUNC &&
4983 pred_type != Item_func::ISNOTNULL_FUNC &&
4984 pred_type != Item_func::EQ_FUNC &&
4985 pred_type != Item_func::NE_FUNC)
4990 memset(args, 0, 3 *
sizeof(Item*));
4993 if (! optimizer::simple_pred(pred, args, inv))
4997 if (args[0] && args[1] && !args[2] &&
4998 min_max_arg_item->result_type() == STRING_RESULT &&
5002 ((args[1]->result_type() == STRING_RESULT &&
5003 ((Field_str*) min_max_arg_item->field)->charset() !=
5004 pred->compare_collation())
5010 (args[1]->result_type() != STRING_RESULT &&
5011 min_max_arg_item->field->cmp_type() != args[1]->result_type())))
5017 else if (cur_arg->type() == Item::FUNC_ITEM)
5019 if (! check_group_min_max_predicates(cur_arg, min_max_arg_item))
5022 else if (cur_arg->const_item())
5065 get_constant_key_infix(KeyInfo *,
5066 optimizer::SEL_ARG *index_range_tree,
5067 KeyPartInfo *first_non_group_part,
5068 KeyPartInfo *min_max_arg_part,
5069 KeyPartInfo *last_part,
5071 unsigned char *key_infix,
5072 uint32_t *key_infix_len,
5073 KeyPartInfo **first_non_infix_part)
5075 optimizer::SEL_ARG *cur_range= NULL;
5076 KeyPartInfo *cur_part= NULL;
5078 KeyPartInfo *end_part= min_max_arg_part ? min_max_arg_part : last_part;
5081 unsigned char *key_ptr= key_infix;
5082 for (cur_part= first_non_group_part; cur_part != end_part; cur_part++)
5088 for (cur_range= index_range_tree; cur_range;
5089 cur_range= cur_range->next_key_part)
5091 if (cur_range->field->eq(cur_part->field))
5096 if (min_max_arg_part)
5100 *first_non_infix_part= cur_part;
5106 if (cur_range->prev || cur_range->next)
5108 if ((cur_range->min_flag & NO_MIN_RANGE) ||
5109 (cur_range->max_flag & NO_MAX_RANGE) ||
5110 (cur_range->min_flag & NEAR_MIN) ||
5111 (cur_range->max_flag & NEAR_MAX))
5114 uint32_t field_length= cur_part->store_length;
5115 if ((cur_range->maybe_null &&
5116 cur_range->min_value[0] && cur_range->max_value[0]) ||
5117 !memcmp(cur_range->min_value, cur_range->max_value, field_length))
5120 memcpy(key_ptr, cur_range->min_value, field_length);
5121 key_ptr+= field_length;
5122 *key_infix_len+= field_length;
5128 if (!min_max_arg_part && (cur_part == last_part))
5129 *first_non_infix_part= last_part;
5152 get_field_keypart(KeyInfo *index, Field *field)
5154 KeyPartInfo *part= NULL;
5155 KeyPartInfo *end= NULL;
5157 for (part= index->key_part, end= part + index->key_parts; part < end; part++)
5159 if (field->eq(part->field))
5160 return part - index->key_part + 1;
5188 optimizer::SEL_ARG *get_index_range_tree(uint32_t index,
5189 optimizer::SEL_TREE* range_tree,
5190 optimizer::Parameter *param,
5191 uint32_t *param_idx)
5194 while (idx < param->keys)
5196 if (index == param->real_keynr[idx])
5201 return range_tree->keys[idx];
5264 void cost_group_min_max(Table* table,
5265 KeyInfo *index_info,
5266 uint32_t used_key_parts,
5267 uint32_t group_key_parts,
5268 optimizer::SEL_TREE *range_tree,
5269 optimizer::SEL_ARG *,
5270 ha_rows quick_prefix_records,
5276 ha_rows table_records;
5277 uint32_t num_groups;
5278 uint32_t num_blocks;
5279 uint32_t keys_per_block;
5280 uint32_t keys_per_group;
5281 uint32_t keys_per_subgroup;
5284 double quick_prefix_selectivity;
5288 table_records= table->cursor->stats.records;
5289 keys_per_block= (table->cursor->stats.block_size / 2 /
5290 (index_info->key_length + table->cursor->ref_length)
5292 num_blocks= (uint32_t) (table_records / keys_per_block) + 1;
5295 keys_per_group= index_info->rec_per_key[group_key_parts - 1];
5296 if (keys_per_group == 0)
5298 keys_per_group= (uint32_t)(table_records / 10) + 1;
5299 num_groups= (uint32_t)(table_records / keys_per_group) + 1;
5302 if (range_tree && (quick_prefix_records != HA_POS_ERROR))
5304 quick_prefix_selectivity= (double) quick_prefix_records /
5305 (
double) table_records;
5306 num_groups= (uint32_t) rint(num_groups * quick_prefix_selectivity);
5307 set_if_bigger(num_groups, 1U);
5310 if (used_key_parts > group_key_parts)
5315 keys_per_subgroup= index_info->rec_per_key[used_key_parts - 1];
5316 if (keys_per_subgroup >= keys_per_block)
5320 double blocks_per_group= (double) num_blocks / (
double) num_groups;
5321 p_overlap= (blocks_per_group * (keys_per_subgroup - 1)) / keys_per_group;
5322 p_overlap= min(p_overlap, 1.0);
5324 io_cost= (double) min(num_groups * (1 + p_overlap), (double)num_blocks);
5327 io_cost= (keys_per_group > keys_per_block) ?
5328 (have_min && have_max) ? (double) (num_groups + 1) :
5329 (double) num_groups :
5330 (
double) num_blocks;
5339 *read_cost= io_cost + cpu_cost;
5340 *records= num_groups;
5364 optimizer::QuickSelectInterface *
5365 optimizer::GroupMinMaxReadPlan::make_quick(optimizer::Parameter *param,
bool, memory::Root *parent_alloc)
5367 optimizer::QuickGroupMinMaxSelect *quick=
new optimizer::QuickGroupMinMaxSelect(param->table,
5368 param->session->lex().current_select->join,
5390 assert(quick_prefix_records > 0);
5391 if (quick_prefix_records == HA_POS_ERROR)
5393 quick->quick_prefix_select= NULL;
5398 quick->quick_prefix_select= optimizer::get_quick_select(param,
5401 HA_MRR_USE_DEFAULT_IMPL,
5411 if (min_max_arg_part)
5413 optimizer::SEL_ARG *min_max_range= index_tree;
5414 while (min_max_range)
5416 if (min_max_range->field->eq(min_max_arg_part->field))
5418 min_max_range= min_max_range->next_key_part;
5421 while (min_max_range && min_max_range->prev)
5422 min_max_range= min_max_range->prev;
5424 while (min_max_range)
5426 if (quick->add_range(min_max_range))
5432 min_max_range= min_max_range->next;
5437 quick->quick_prefix_select= NULL;
5439 quick->update_key_stat();
5440 quick->adjust_prefix_ranges();
5446 optimizer::QuickSelectInterface *optimizer::RangeReadPlan::make_quick(optimizer::Parameter *param,
bool, memory::Root *parent_alloc)
5448 optimizer::QuickRangeSelect *quick= optimizer::get_quick_select(param, key_idx, key, mrr_flags, mrr_buf_size, parent_alloc);
5451 quick->records= records;
5452 quick->read_time= read_cost;
5458 uint32_t optimizer::RorScanInfo::findFirstNotSet()
const
5460 boost::dynamic_bitset<> map= bitsToBitset();
5461 for (boost::dynamic_bitset<>::size_type i= 0; i < map.size(); i++)
5463 if (not map.test(i))
5470 size_t optimizer::RorScanInfo::getBitCount()
const
5472 boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
5473 return tmp_bitset.count();
5477 void optimizer::RorScanInfo::subtractBitset(
const boost::dynamic_bitset<>& in_bitset)
5479 boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
5480 tmp_bitset-= in_bitset;
5481 covered_fields= tmp_bitset.to_ulong();
5485 boost::dynamic_bitset<> optimizer::RorScanInfo::bitsToBitset()
const
5488 uint64_t conv= covered_fields;
5491 res.push_back((conv & 1) +
'0');
5496 std::reverse(res.begin(), res.end());
5498 string final(covered_fields_size - res.length(),
'0');
5500 return boost::dynamic_bitset<>(
final);