mirror of
https://github.com/MariaDB/server.git
synced 2025-12-28 08:10:14 +00:00
Cache file->index_flags(index, 0, 1) in table->key_info[index].index_flags
The reason for this is that we call file->index_flags(index, 0, 1) multiple times in best_access_patch()when optimizing a table. For example, in InnoDB, the calls is not trivial (4 if's and 2 assignments) Now the function is inlined and is just a memory reference. Other things: - handler::is_clustering_key() and pk_is_clustering_key() are now inline. - Added TABLE::can_use_rowid_filter() to simplify some code. - Test if we should use a rowid_filter only if can_use_rowid_filter() is true. - Added TABLE::is_clustering_key() to avoid a memory reference. - Simplify some code using the fact that HA_KEYREAD_ONLY is true implies that HA_CLUSTERED_INDEX is false. - Added DBUG_ASSERT to TABLE::best_range_rowid_filter() to ensure we do not call it with a clustering key. - Reorginized elements in struct st_key to get better memory alignment. - Updated ha_innobase::index_flags() to not have HA_DO_RANGE_FILTER_PUSHDOWN for clustered index
This commit is contained in:
parent
5e0832e132
commit
ed0a723566
@ -3498,7 +3498,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
|
||||
m_psi= PSI_CALL_open_table(ha_table_share_psi(), this);
|
||||
}
|
||||
|
||||
if (table->s->db_options_in_use & HA_OPTION_READ_ONLY_DATA)
|
||||
if (table_share->db_options_in_use & HA_OPTION_READ_ONLY_DATA)
|
||||
table->db_stat|=HA_READ_ONLY;
|
||||
(void) extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL
|
||||
|
||||
@ -3512,14 +3512,19 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
|
||||
else
|
||||
dup_ref=ref+ALIGN_SIZE(ref_length);
|
||||
cached_table_flags= table_flags();
|
||||
if (!table->s->optimizer_costs_inited)
|
||||
/* Cache index flags */
|
||||
for (uint index= 0 ; index < table_share->keys ; index++)
|
||||
table->key_info[index].index_flags= index_flags(index, 0, 1);
|
||||
|
||||
if (!table_share->optimizer_costs_inited)
|
||||
{
|
||||
table->s->optimizer_costs_inited=1;
|
||||
table_share->optimizer_costs_inited=1;
|
||||
/* Copy data from global 'engine'.optimizer_costs to TABLE_SHARE */
|
||||
table->s->update_optimizer_costs(partition_ht());
|
||||
table_share->update_optimizer_costs(partition_ht());
|
||||
/* Update costs depend on table structure */
|
||||
update_optimizer_costs(&table->s->optimizer_costs);
|
||||
update_optimizer_costs(&table_share->optimizer_costs);
|
||||
}
|
||||
|
||||
/* Copy current optimizer costs. Needed in case clone() is used */
|
||||
reset_statistics();
|
||||
}
|
||||
@ -3818,7 +3823,7 @@ int handler::read_first_row(uchar * buf, uint primary_key)
|
||||
TODO remove the test for HA_READ_ORDER
|
||||
*/
|
||||
if (stats.deleted < 10 || primary_key >= MAX_KEY ||
|
||||
!(index_flags(primary_key, 0, 0) & HA_READ_ORDER))
|
||||
!(table->key_info[primary_key].index_flags & HA_READ_ORDER))
|
||||
{
|
||||
if (likely(!(error= ha_rnd_init(1))))
|
||||
{
|
||||
|
||||
@ -4551,6 +4551,7 @@ public:
|
||||
|
||||
For a clustered (primary) key, the following should also hold:
|
||||
index_flags() should contain HA_CLUSTERED_INDEX
|
||||
index_flags() should not contain HA_KEYREAD_ONLY or HA_DO_RANGE_FILTER_PUSHDOWN
|
||||
table_flags() should contain HA_TABLE_SCAN_ON_INDEX
|
||||
|
||||
For a reference key the following should also hold:
|
||||
@ -4561,20 +4562,9 @@ public:
|
||||
*/
|
||||
|
||||
/* The following code is for primary keys */
|
||||
bool pk_is_clustering_key(uint index) const
|
||||
{
|
||||
/*
|
||||
We have to check for MAX_INDEX as table->s->primary_key can be
|
||||
MAX_KEY in the case where there is no primary key.
|
||||
*/
|
||||
return index != MAX_KEY && is_clustering_key(index);
|
||||
}
|
||||
inline bool pk_is_clustering_key(uint index) const;
|
||||
/* Same as before but for other keys, in which case we can skip the check */
|
||||
bool is_clustering_key(uint index) const
|
||||
{
|
||||
DBUG_ASSERT(index != MAX_KEY);
|
||||
return (index_flags(index, 0, 1) & HA_CLUSTERED_INDEX);
|
||||
}
|
||||
inline bool is_clustering_key(uint index) const;
|
||||
|
||||
virtual int cmp_ref(const uchar *ref1, const uchar *ref2)
|
||||
{
|
||||
|
||||
@ -335,13 +335,12 @@ void push_index_cond(JOIN_TAB *tab, uint keyno)
|
||||
than on a non-clustered key. This restriction should be
|
||||
re-evaluated when WL#6061 is implemented.
|
||||
*/
|
||||
if ((tab->table->file->index_flags(keyno, 0, 1) &
|
||||
HA_DO_INDEX_COND_PUSHDOWN) &&
|
||||
if ((tab->table->key_info[keyno].index_flags & HA_DO_INDEX_COND_PUSHDOWN) &&
|
||||
optimizer_flag(tab->join->thd, OPTIMIZER_SWITCH_INDEX_COND_PUSHDOWN) &&
|
||||
tab->join->thd->lex->sql_command != SQLCOM_UPDATE_MULTI &&
|
||||
tab->join->thd->lex->sql_command != SQLCOM_DELETE_MULTI &&
|
||||
tab->type != JT_CONST && tab->type != JT_SYSTEM &&
|
||||
!tab->table->file->is_clustering_key(keyno)) // 6
|
||||
!tab->table->is_clustering_key(keyno)) // 6
|
||||
{
|
||||
DBUG_EXECUTE("where",
|
||||
print_where(tab->select_cond, "full cond", QT_ORDINARY););
|
||||
|
||||
@ -7326,8 +7326,7 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
|
||||
if (!tree->ror_scans_map.is_set(idx))
|
||||
continue;
|
||||
key_no= param->real_keynr[idx];
|
||||
if (key_no != cpk_no &&
|
||||
param->table->file->index_flags(key_no,0,0) & HA_CLUSTERED_INDEX)
|
||||
if (key_no != cpk_no && param->table->file->is_clustering_key(key_no))
|
||||
{
|
||||
/* Ignore clustering keys */
|
||||
tree->n_ror_scans--;
|
||||
@ -11906,7 +11905,7 @@ ha_rows check_quick_select(PARAM *param, uint idx, ha_rows limit,
|
||||
param->max_key_parts=0;
|
||||
|
||||
seq.is_ror_scan= TRUE;
|
||||
if (file->index_flags(keynr, 0, TRUE) & HA_KEY_SCAN_NOT_ROR)
|
||||
if (param->table->key_info[keynr].index_flags & HA_KEY_SCAN_NOT_ROR)
|
||||
seq.is_ror_scan= FALSE;
|
||||
|
||||
*mrr_flags= param->force_default_mrr? HA_MRR_USE_DEFAULT_IMPL: 0;
|
||||
@ -11919,9 +11918,9 @@ ha_rows check_quick_select(PARAM *param, uint idx, ha_rows limit,
|
||||
// Passing wrong second argument to index_flags() makes no difference for
|
||||
// most storage engines but might be an issue for MyRocks with certain
|
||||
// datatypes.
|
||||
// Note that HA_KEYREAD_ONLY implies that this is not a clustered index
|
||||
if (index_only &&
|
||||
(file->index_flags(keynr, param->max_key_parts, 1) & HA_KEYREAD_ONLY) &&
|
||||
!(file->index_flags(keynr, param->max_key_parts, 1) & HA_CLUSTERED_INDEX))
|
||||
(file->index_flags(keynr, param->max_key_parts, 1) & HA_KEYREAD_ONLY))
|
||||
*mrr_flags |= HA_MRR_INDEX_ONLY;
|
||||
|
||||
if (param->thd->lex->sql_command != SQLCOM_SELECT)
|
||||
|
||||
@ -4879,7 +4879,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
|
||||
DBUG_PRINT("info",("Creating group key in temporary table"));
|
||||
share->keys=1;
|
||||
share->uniques= MY_TEST(using_unique_constraint);
|
||||
table->key_info=keyinfo;
|
||||
table->key_info= share->key_info= keyinfo;
|
||||
keyinfo->key_part=key_part_info;
|
||||
keyinfo->flags=HA_NOSAME;
|
||||
keyinfo->usable_key_parts= keyinfo->user_defined_key_parts= 1;
|
||||
|
||||
@ -381,9 +381,7 @@ void TABLE::init_cost_info_for_usable_range_rowid_filters(THD *thd)
|
||||
*/
|
||||
while ((key_no= it++) != key_map::Iterator::BITMAP_END)
|
||||
{
|
||||
if (!(file->index_flags(key_no, 0, 1) & HA_DO_RANGE_FILTER_PUSHDOWN)) // !1
|
||||
continue;
|
||||
if (file->is_clustering_key(key_no)) // !2
|
||||
if (!can_use_rowid_filter(key_no)) // 1 & 2
|
||||
continue;
|
||||
if (opt_range[key_no].rows >
|
||||
get_max_range_rowid_filter_elems_for_table(thd, this,
|
||||
@ -483,6 +481,9 @@ void Range_rowid_filter_cost_info::trace_info(THD *thd)
|
||||
and chooses the element for the range filter that promise the greatest
|
||||
gain with the the ref or range access of the table by access_key_no.
|
||||
|
||||
The function assumes that caller has checked that the key is not a clustered
|
||||
key. See best_access_path().
|
||||
|
||||
@retval Pointer to the cost info for the range filter that promises
|
||||
the greatest gain, NULL if there is no such range filter
|
||||
*/
|
||||
@ -500,10 +501,9 @@ TABLE::best_range_rowid_filter(uint access_key_no, double records,
|
||||
is accessed by the clustered primary key. It does not make sense
|
||||
if a full key is used. If the table is accessed by a partial
|
||||
clustered primary key it would, but the current InnoDB code does not
|
||||
allow it. Later this limitation will be lifted
|
||||
allow it. Later this limitation may be lifted.
|
||||
*/
|
||||
if (file->is_clustering_key(access_key_no))
|
||||
return 0;
|
||||
DBUG_ASSERT(!file->is_clustering_key(access_key_no));
|
||||
|
||||
// Disallow use of range filter if the key contains partially-covered
|
||||
// columns.
|
||||
|
||||
@ -7430,6 +7430,21 @@ inline void handler::set_table(TABLE* table_arg)
|
||||
costs= &table_arg->s->optimizer_costs;
|
||||
}
|
||||
|
||||
inline bool handler::pk_is_clustering_key(uint index) const
|
||||
{
|
||||
/*
|
||||
We have to check for MAX_INDEX as table->s->primary_key can be
|
||||
MAX_KEY in the case where there is no primary key.
|
||||
*/
|
||||
return index != MAX_KEY && is_clustering_key(index);
|
||||
}
|
||||
|
||||
inline bool handler::is_clustering_key(uint index) const
|
||||
{
|
||||
DBUG_ASSERT(index != MAX_KEY);
|
||||
return table->is_clustering_key(index);
|
||||
}
|
||||
|
||||
inline int handler::ha_ft_read(uchar *buf)
|
||||
{
|
||||
int error= ft_read(buf);
|
||||
|
||||
@ -674,7 +674,7 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler,
|
||||
if ((c_key->flags & HA_SPATIAL) ||
|
||||
c_key->algorithm == HA_KEY_ALG_FULLTEXT ||
|
||||
(ha_rkey_mode != HA_READ_KEY_EXACT &&
|
||||
(table->file->index_flags(handler->keyno, 0, TRUE) &
|
||||
(table->key_info[handler->keyno].index_flags &
|
||||
(HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE)) == 0))
|
||||
{
|
||||
my_error(ER_KEY_DOESNT_SUPPORT, MYF(0),
|
||||
@ -690,8 +690,7 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler,
|
||||
}
|
||||
|
||||
if (key_expr->elements < keyinfo->user_defined_key_parts &&
|
||||
(table->file->index_flags(handler->keyno, 0, TRUE) &
|
||||
HA_ONLY_WHOLE_INDEX))
|
||||
(table->key_info[handler->keyno].index_flags & HA_ONLY_WHOLE_INDEX))
|
||||
{
|
||||
my_error(ER_KEY_DOESNT_SUPPORT, MYF(0),
|
||||
table->file->index_type(handler->keyno), keyinfo->name.str);
|
||||
|
||||
@ -8466,7 +8466,7 @@ best_access_path(JOIN *join,
|
||||
records.
|
||||
*/
|
||||
if ((found_part & 1) &&
|
||||
(!(file->index_flags(key, 0, 0) & HA_ONLY_WHOLE_INDEX) ||
|
||||
(!(table->key_info[key].index_flags & HA_ONLY_WHOLE_INDEX) ||
|
||||
found_part == PREV_BITS(uint,keyinfo->user_defined_key_parts)))
|
||||
{
|
||||
double extra_cost= 0;
|
||||
@ -8663,8 +8663,7 @@ best_access_path(JOIN *join,
|
||||
Records can be 0 in case of empty tables.
|
||||
*/
|
||||
if ((found_part & 1) && records &&
|
||||
(table->file->index_flags(start_key->key,0,1) &
|
||||
HA_DO_RANGE_FILTER_PUSHDOWN))
|
||||
table->can_use_rowid_filter(start_key->key))
|
||||
{
|
||||
/*
|
||||
If we use filter F with selectivity s the the cost of fetching data
|
||||
@ -8989,27 +8988,31 @@ best_access_path(JOIN *join,
|
||||
range->cost.total_cost() / s->quick->read_time >= 0.9999999));
|
||||
|
||||
range->get_costs(&tmp);
|
||||
filter= table->best_range_rowid_filter(key_no,
|
||||
rows2double(range->rows),
|
||||
file->cost(&tmp),
|
||||
file->cost(tmp.index_cost),
|
||||
record_count,
|
||||
&records_best_filter);
|
||||
set_if_smaller(best.records_out, records_best_filter);
|
||||
if (filter)
|
||||
if (table->can_use_rowid_filter(key_no))
|
||||
{
|
||||
filter= filter->apply_filter(thd, table, &tmp,
|
||||
&records_after_filter,
|
||||
&startup_cost,
|
||||
range->ranges,
|
||||
record_count);
|
||||
filter= table->best_range_rowid_filter(key_no,
|
||||
rows2double(range->rows),
|
||||
file->cost(&tmp),
|
||||
file->cost(tmp.index_cost),
|
||||
record_count,
|
||||
&records_best_filter);
|
||||
set_if_smaller(best.records_out, records_best_filter);
|
||||
if (filter)
|
||||
{
|
||||
tmp.row_cost.cpu+= records_after_filter * WHERE_COST_THD(thd);
|
||||
cur_cost= file->cost_for_reading_multiple_times(record_count, &tmp);
|
||||
cur_cost= COST_ADD(cur_cost, startup_cost);
|
||||
startup_cost= 0; // Avoid adding it again later
|
||||
table->opt_range[key_no].selectivity= filter->selectivity;
|
||||
filter= filter->apply_filter(thd, table, &tmp,
|
||||
&records_after_filter,
|
||||
&startup_cost,
|
||||
range->ranges,
|
||||
record_count);
|
||||
if (filter)
|
||||
{
|
||||
tmp.row_cost.cpu+= records_after_filter * WHERE_COST_THD(thd);
|
||||
cur_cost= file->cost_for_reading_multiple_times(record_count,
|
||||
&tmp);
|
||||
cur_cost= COST_ADD(cur_cost, startup_cost);
|
||||
startup_cost= 0; // Avoid adding it again later
|
||||
table->opt_range[key_no].selectivity= filter->selectivity;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (best.key && key_no == best.key->key &&
|
||||
@ -21087,6 +21090,8 @@ bool Create_tmp_table::finalize(THD *thd,
|
||||
m_key_part_info++;
|
||||
}
|
||||
}
|
||||
if (share->keys)
|
||||
keyinfo->index_flags= table->file->index_flags(0, 0, 1);
|
||||
|
||||
if (unlikely(thd->is_fatal_error)) // If end of memory
|
||||
goto err; /* purecov: inspected */
|
||||
@ -21541,6 +21546,8 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
|
||||
keydef.flag|= HA_NULL_ARE_EQUAL;
|
||||
}
|
||||
}
|
||||
if (share->keys)
|
||||
keyinfo->index_flags= table->file->index_flags(0, 0, 1);
|
||||
}
|
||||
bzero((char*) &create_info,sizeof(create_info));
|
||||
create_info.data_file_length= table->in_use->variables.tmp_disk_table_size;
|
||||
@ -21734,6 +21741,8 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
|
||||
keydef.flag|= HA_NULL_ARE_EQUAL;
|
||||
}
|
||||
}
|
||||
if (share->keys)
|
||||
keyinfo->index_flags= table->file->index_flags(0, 0, 1);
|
||||
}
|
||||
MI_CREATE_INFO create_info;
|
||||
bzero((char*) &create_info,sizeof(create_info));
|
||||
@ -25639,7 +25648,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
||||
((select_limit >= table_records) &&
|
||||
((tab->type == JT_ALL || tab->type == JT_RANGE) &&
|
||||
tab->join->table_count > tab->join->const_tables + 1) &&
|
||||
!(table->file->index_flags(best_key, 0, 1) & HA_CLUSTERED_INDEX)))
|
||||
!table->is_clustering_key(best_key)))
|
||||
goto use_filesort;
|
||||
|
||||
if (table->opt_range_keys.is_set(best_key) && best_key != ref_key)
|
||||
@ -30607,9 +30616,8 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
|
||||
possible_key.add("can_resolve_order", true);
|
||||
possible_key.add("direction", direction);
|
||||
bool is_covering= (table->covering_keys.is_set(nr) ||
|
||||
(table->file->index_flags(nr, 0, 1) &
|
||||
HA_CLUSTERED_INDEX));
|
||||
/*
|
||||
table->is_clustering_key(nr));
|
||||
/*
|
||||
Don't use an index scan with ORDER BY without limit.
|
||||
For GROUP BY without limit always use index scan
|
||||
if there is a suitable index.
|
||||
@ -31560,7 +31568,8 @@ void JOIN::init_join_cache_and_keyread()
|
||||
/* purecov: end */
|
||||
}
|
||||
|
||||
if (table->file->keyread_enabled())
|
||||
if (table->file->keyread_enabled() &&
|
||||
!table->is_clustering_key(table->file->keyread))
|
||||
{
|
||||
/*
|
||||
Here we set the read_set bitmap for all covering keys
|
||||
@ -31595,8 +31604,7 @@ void JOIN::init_join_cache_and_keyread()
|
||||
c, which is not a problem as we read all the columns from the index
|
||||
tuple.
|
||||
*/
|
||||
if (!(table->file->index_flags(table->file->keyread, 0, 1) & HA_CLUSTERED_INDEX))
|
||||
table->mark_index_columns(table->file->keyread, table->read_set);
|
||||
table->mark_index_columns(table->file->keyread, table->read_set);
|
||||
}
|
||||
if (tab->cache && tab->cache->init(select_options & SELECT_DESCRIBE))
|
||||
revise_cache_usage(tab);
|
||||
|
||||
@ -96,12 +96,22 @@ class engine_option_value;
|
||||
struct ha_index_option_struct;
|
||||
|
||||
typedef struct st_key {
|
||||
uint key_length; /* total length of user defined key parts */
|
||||
ulong flags; /* dupp key and pack flags */
|
||||
ulong flags; /* dupp key and pack flags */
|
||||
ulong ext_key_flags; /* Flags for extended key */
|
||||
ulong index_flags; /* Copy of handler->index_flags(index_number, 0, 1) */
|
||||
uint key_length; /* total length of user defined key parts */
|
||||
uint user_defined_key_parts; /* How many key_parts */
|
||||
uint usable_key_parts; /* Should normally be = user_defined_key_parts */
|
||||
uint ext_key_parts; /* Number of key parts in extended key */
|
||||
ulong ext_key_flags; /* Flags for extended key */
|
||||
uint ext_key_parts; /* Number of key parts in extended key */
|
||||
uint block_size;
|
||||
/*
|
||||
The flag is on if statistical data for the index prefixes
|
||||
has to be taken from the system statistical tables.
|
||||
*/
|
||||
bool is_statistics_from_stat_tables;
|
||||
bool without_overlaps;
|
||||
bool is_ignored; // TRUE if index needs to be ignored
|
||||
|
||||
/*
|
||||
Parts of primary key that are in the extension of this index.
|
||||
|
||||
@ -123,13 +133,7 @@ typedef struct st_key {
|
||||
/* Set of keys constraint correlated with this key */
|
||||
key_map constraint_correlated;
|
||||
LEX_CSTRING name;
|
||||
uint block_size;
|
||||
enum ha_key_alg algorithm;
|
||||
/*
|
||||
The flag is on if statistical data for the index prefixes
|
||||
has to be taken from the system statistical tables.
|
||||
*/
|
||||
bool is_statistics_from_stat_tables;
|
||||
/*
|
||||
Note that parser is used when the table is opened for use, and
|
||||
parser_name is used when the table is being created.
|
||||
@ -167,12 +171,6 @@ typedef struct st_key {
|
||||
ha_index_option_struct *option_struct; /* structure with parsed options */
|
||||
|
||||
double actual_rec_per_key(uint i);
|
||||
|
||||
bool without_overlaps;
|
||||
/*
|
||||
TRUE if index needs to be ignored
|
||||
*/
|
||||
bool is_ignored;
|
||||
} KEY;
|
||||
|
||||
|
||||
|
||||
@ -7411,7 +7411,7 @@ MY_BITMAP *TABLE::prepare_for_keyread(uint index, MY_BITMAP *map)
|
||||
DBUG_ENTER("TABLE::prepare_for_keyread");
|
||||
if (!no_keyread)
|
||||
file->ha_start_keyread(index);
|
||||
if (map != read_set || !(file->index_flags(index, 0, 1) & HA_CLUSTERED_INDEX))
|
||||
if (map != read_set || !is_clustering_key(index))
|
||||
{
|
||||
mark_index_columns(index, map);
|
||||
column_bitmaps_set(map);
|
||||
@ -8342,6 +8342,11 @@ bool TABLE::add_tmp_key(uint key, uint key_parts,
|
||||
key_start= FALSE;
|
||||
key_part_info++;
|
||||
}
|
||||
/*
|
||||
We have to cache index_flags here as the table may be used by the
|
||||
optimizer before it's opened.
|
||||
*/
|
||||
keyinfo->index_flags= file->index_flags(key, 0, 1);
|
||||
|
||||
/*
|
||||
For the case when there is a derived table that would give distinct rows,
|
||||
|
||||
23
sql/table.h
23
sql/table.h
@ -1733,7 +1733,7 @@ public:
|
||||
int update_virtual_field(Field *vf, bool ignore_warnings);
|
||||
inline size_t key_storage_length(uint index)
|
||||
{
|
||||
if (file->is_clustering_key(index))
|
||||
if (is_clustering_key(index))
|
||||
return s->stored_rec_length;
|
||||
return key_info[index].key_length + file->ref_length;
|
||||
}
|
||||
@ -1877,6 +1877,27 @@ public:
|
||||
opt_range_condition_rows= rows;
|
||||
}
|
||||
|
||||
/* Return true if the key is a clustered key */
|
||||
inline bool is_clustering_key(uint index) const
|
||||
{
|
||||
return key_info[index].index_flags & HA_CLUSTERED_INDEX;
|
||||
}
|
||||
|
||||
/*
|
||||
Return true if we can use rowid filter with this index
|
||||
rowid filter can be used if
|
||||
- filter pushdown is supported by the engine for the index. If this is set then
|
||||
file->ha_table_flags() should not contain HA_NON_COMPARABLE_ROWID!
|
||||
- The index is not a clustered primary index
|
||||
*/
|
||||
|
||||
inline bool can_use_rowid_filter(uint index) const
|
||||
{
|
||||
return ((key_info[index].index_flags &
|
||||
(HA_DO_RANGE_FILTER_PUSHDOWN | HA_CLUSTERED_INDEX)) ==
|
||||
HA_DO_RANGE_FILTER_PUSHDOWN);
|
||||
}
|
||||
|
||||
ulonglong vers_start_id() const;
|
||||
ulonglong vers_end_id() const;
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
|
||||
@ -5039,13 +5039,11 @@ ha_innobase::index_flags(
|
||||
}
|
||||
|
||||
ulong flags= key == table_share->primary_key
|
||||
? HA_CLUSTERED_INDEX : HA_KEYREAD_ONLY;
|
||||
? HA_CLUSTERED_INDEX : HA_KEYREAD_ONLY | HA_DO_RANGE_FILTER_PUSHDOWN;
|
||||
|
||||
flags |= HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER
|
||||
| HA_READ_RANGE
|
||||
| HA_DO_INDEX_COND_PUSHDOWN
|
||||
| HA_DO_RANGE_FILTER_PUSHDOWN;
|
||||
|
||||
| HA_READ_RANGE
|
||||
| HA_DO_INDEX_COND_PUSHDOWN;
|
||||
return(flags);
|
||||
}
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user