OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
anonymous_namespace{QueryMemoryDescriptor.cpp} Namespace Reference

Functions

bool is_int_and_no_bigger_than (const SQLTypeInfo &ti, const size_t byte_width)
 
bool is_valid_int32_range (const ExpressionRange &range)
 
std::vector< int64_t > target_expr_group_by_indices (const std::list< std::shared_ptr< Analyzer::Expr >> &groupby_exprs, const std::vector< Analyzer::Expr * > &target_exprs)
 
std::vector< int64_t > target_expr_proj_indices (const RelAlgExecutionUnit &ra_exe_unit)
 
int8_t pick_baseline_key_component_width (const ExpressionRange &range, const size_t group_col_width)
 
int8_t pick_baseline_key_width (const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const Executor *executor)
 
bool use_streaming_top_n (const RelAlgExecutionUnit &ra_exe_unit, const bool output_columnar)
 
template<class T >
std::vector< int8_t > get_col_byte_widths (const T &col_expr_list)
 
template<SQLAgg... agg_types>
bool any_of (std::vector< Analyzer::Expr * > const &target_exprs)
 

Function Documentation

template<SQLAgg... agg_types>
bool anonymous_namespace{QueryMemoryDescriptor.cpp}::any_of ( std::vector< Analyzer::Expr * > const &  target_exprs)

Definition at line 445 of file QueryMemoryDescriptor.cpp.

Referenced by import_export::TypedImportBuffer::add_values(), ResultSet::areAnyColumnsLazyFetched(), QueryMemoryDescriptor::canUsePerDeviceCardinality(), Executor::codegenJoinLoops(), RelAlgExecutor::executeSort(), RelAlgExecutor::executeUnion(), anonymous_namespace{FromTableReordering.cpp}::force_table_reordering_st_contain_func(), anonymous_namespace{FromTableReordering.cpp}::force_table_reordering_st_intersects_func(), QueryPlanDagExtractor::handleLeftDeepJoinTree(), Analyzer::WindowFunction::hasAggregateTreeRequiredWindowFunc(), RelAlgExecutor::hasDeletedRowInQuery(), PlanState::hasExpressionNeedsLazyFetch(), HashingSchemeRecycler::hasItemInCache(), BoundingBoxIntersectJoinSupportedFunction::is_bbox_intersect_supported_func(), anonymous_namespace{DdlCommandExecutor.cpp}::is_default_server(), BoundingBoxIntersectJoinSupportedFunction::is_many_to_many_func(), BoundingBoxIntersectJoinSupportedFunction::is_point_poly_rewrite_target_func(), BoundingBoxIntersectJoinSupportedFunction::is_poly_mpoly_rewrite_target_func(), BoundingBoxIntersectJoinSupportedFunction::is_poly_point_rewrite_target_func(), BoundingBoxIntersectJoinSupportedFunction::is_range_join_rewrite_target_func(), anonymous_namespace{RelAlgExecutor.cpp}::is_window_execution_unit(), RegisteredQueryHint::isAnyQueryHintDelivered(), Analyzer::WindowFunction::isFrameNavigateWindowFunction(), Analyzer::WindowFunction::isFramingAvailableWindowFunc(), HashtableRecycler::isInvalidHashTableCacheKey(), Analyzer::WindowFunction::isMissingValueFillingFunction(), PerfectJoinHashTable::reify(), RelAlgTranslator::translateWindowFunction(), and anonymous_namespace{ExpressionRewrite.cpp}::update_input_to_nest_lv().

445  {
446  return boost::algorithm::any_of(target_exprs, [=](Analyzer::Expr const* expr) {
447  auto const* const agg = dynamic_cast<Analyzer::AggExpr const*>(expr);
448  return agg && (... || (agg_types == agg->get_aggtype()));
449  });
450 }
bool any_of(std::vector< Analyzer::Expr * > const &target_exprs)

+ Here is the caller graph for this function:

template<class T >
std::vector<int8_t> anonymous_namespace{QueryMemoryDescriptor.cpp}::get_col_byte_widths ( const T &  col_expr_list)
inline

Definition at line 185 of file QueryMemoryDescriptor.cpp.

References CHECK, CHECK_EQ, g_bigint_count, get_bit_width(), get_compact_type(), get_target_info(), anonymous_namespace{TargetExprBuilder.cpp}::is_varlen_projection(), kAVG, kENCODING_NONE, and heavydb.dtypes::T.

Referenced by QueryMemoryDescriptor::init(), and QueryMemoryDescriptor::pick_target_compact_width().

185  {
186  std::vector<int8_t> col_widths;
187  size_t col_expr_idx = 0;
188  for (const auto& col_expr : col_expr_list) {
189  if (!col_expr) {
190  // row index
191  col_widths.push_back(sizeof(int64_t));
192  } else {
193  bool is_varlen_projection{false};
194  if constexpr (std::is_same<T, std::list<std::shared_ptr<Analyzer::Expr>>>::value) {
196  !(std::dynamic_pointer_cast<const Analyzer::GeoExpr>(col_expr) == nullptr);
197  } else {
199  !(dynamic_cast<const Analyzer::GeoExpr*>(col_expr) == nullptr);
200  }
201 
202  if (is_varlen_projection) {
203  col_widths.push_back(sizeof(int64_t));
204  ++col_expr_idx;
205  continue;
206  }
207  const auto agg_info = get_target_info(col_expr, g_bigint_count);
208  const auto chosen_type = get_compact_type(agg_info);
209  if ((chosen_type.is_string() && chosen_type.get_compression() == kENCODING_NONE) ||
210  chosen_type.is_array()) {
211  col_widths.push_back(sizeof(int64_t));
212  col_widths.push_back(sizeof(int64_t));
213  ++col_expr_idx;
214  continue;
215  }
216  if (chosen_type.is_geometry()) {
217  for (auto i = 0; i < chosen_type.get_physical_coord_cols(); ++i) {
218  col_widths.push_back(sizeof(int64_t));
219  col_widths.push_back(sizeof(int64_t));
220  }
221  ++col_expr_idx;
222  continue;
223  }
224  const auto col_expr_bitwidth = get_bit_width(chosen_type);
225  CHECK_EQ(size_t(0), col_expr_bitwidth % 8);
226  col_widths.push_back(static_cast<int8_t>(col_expr_bitwidth >> 3));
227  // for average, we'll need to keep the count as well
228  if (agg_info.agg_kind == kAVG) {
229  CHECK(agg_info.is_agg);
230  col_widths.push_back(sizeof(int64_t));
231  }
232  }
233  ++col_expr_idx;
234  }
235  return col_widths;
236 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
TargetInfo get_target_info(const Analyzer::Expr *target_expr, const bool bigint_count)
Definition: TargetInfo.h:92
const SQLTypeInfo get_compact_type(const TargetInfo &target)
bool is_varlen_projection(const Analyzer::Expr *target_expr, const SQLTypeInfo &ti)
size_t get_bit_width(const SQLTypeInfo &ti)
bool g_bigint_count
#define CHECK(condition)
Definition: Logger.h:291
Definition: sqldefs.h:77

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{QueryMemoryDescriptor.cpp}::is_int_and_no_bigger_than ( const SQLTypeInfo ti,
const size_t  byte_width 
)

Definition at line 34 of file QueryMemoryDescriptor.cpp.

References get_bit_width(), and SQLTypeInfo::is_integer().

Referenced by QueryMemoryDescriptor::pick_target_compact_width().

34  {
35  if (!ti.is_integer()) {
36  return false;
37  }
38  return get_bit_width(ti) <= (byte_width * 8);
39 }
size_t get_bit_width(const SQLTypeInfo &ti)
bool is_integer() const
Definition: sqltypes.h:567

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{QueryMemoryDescriptor.cpp}::is_valid_int32_range ( const ExpressionRange range)

Definition at line 41 of file QueryMemoryDescriptor.cpp.

References EMPTY_KEY_32, ExpressionRange::getIntMax(), and ExpressionRange::getIntMin().

Referenced by pick_baseline_key_component_width().

41  {
42  return range.getIntMin() > INT32_MIN && range.getIntMax() < EMPTY_KEY_32 - 1;
43 }
int64_t getIntMin() const
int64_t getIntMax() const
#define EMPTY_KEY_32

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int8_t anonymous_namespace{QueryMemoryDescriptor.cpp}::pick_baseline_key_component_width ( const ExpressionRange range,
const size_t  group_col_width 
)

Definition at line 114 of file QueryMemoryDescriptor.cpp.

References Double, Float, ExpressionRange::getType(), ExpressionRange::hasNulls(), Integer, Invalid, is_valid_int32_range(), and UNREACHABLE.

Referenced by pick_baseline_key_width().

115  {
116  if (range.getType() == ExpressionRangeType::Invalid) {
117  return sizeof(int64_t);
118  }
119  switch (range.getType()) {
121  if (group_col_width == sizeof(int64_t) && range.hasNulls()) {
122  return sizeof(int64_t);
123  }
124  return is_valid_int32_range(range) ? sizeof(int32_t) : sizeof(int64_t);
127  return sizeof(int64_t); // No compaction for floating point yet.
128  default:
129  UNREACHABLE();
130  }
131  return sizeof(int64_t);
132 }
#define UNREACHABLE()
Definition: Logger.h:338
bool is_valid_int32_range(const ExpressionRange &range)
bool hasNulls() const
ExpressionRangeType getType() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int8_t anonymous_namespace{QueryMemoryDescriptor.cpp}::pick_baseline_key_width ( const RelAlgExecutionUnit ra_exe_unit,
const std::vector< InputTableInfo > &  query_infos,
const Executor executor 
)

Definition at line 135 of file QueryMemoryDescriptor.cpp.

References getExpressionRange(), RelAlgExecutionUnit::groupby_exprs, and pick_baseline_key_component_width().

137  {
138  int8_t compact_width{4};
139  for (const auto& groupby_expr : ra_exe_unit.groupby_exprs) {
140  const auto expr_range = getExpressionRange(groupby_expr.get(), query_infos, executor);
141  compact_width = std::max(compact_width,
143  expr_range, groupby_expr->get_type_info().get_size()));
144  }
145  return compact_width;
146 }
int8_t pick_baseline_key_component_width(const ExpressionRange &range, const size_t group_col_width)
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
ExpressionRange getExpressionRange(const Analyzer::BinOper *expr, const std::vector< InputTableInfo > &query_infos, const Executor *, boost::optional< std::list< std::shared_ptr< Analyzer::Expr >>> simple_quals)

+ Here is the call graph for this function:

std::vector<int64_t> anonymous_namespace{QueryMemoryDescriptor.cpp}::target_expr_group_by_indices ( const std::list< std::shared_ptr< Analyzer::Expr >> &  groupby_exprs,
const std::vector< Analyzer::Expr * > &  target_exprs 
)

Definition at line 45 of file QueryMemoryDescriptor.cpp.

References Analyzer::Var::get_varno(), and Analyzer::Var::kGROUPBY.

47  {
48  std::vector<int64_t> indices(target_exprs.size(), -1);
49  for (size_t target_idx = 0; target_idx < target_exprs.size(); ++target_idx) {
50  const auto target_expr = target_exprs[target_idx];
51  if (dynamic_cast<const Analyzer::AggExpr*>(target_expr)) {
52  continue;
53  }
54  const auto var_expr = dynamic_cast<const Analyzer::Var*>(target_expr);
55  if (var_expr && var_expr->get_which_row() == Analyzer::Var::kGROUPBY) {
56  indices[target_idx] = var_expr->get_varno() - 1;
57  continue;
58  }
59  }
60  return indices;
61 }
int32_t get_varno() const
Definition: Analyzer.h:288

+ Here is the call graph for this function:

std::vector<int64_t> anonymous_namespace{QueryMemoryDescriptor.cpp}::target_expr_proj_indices ( const RelAlgExecutionUnit ra_exe_unit)

Definition at line 63 of file QueryMemoryDescriptor.cpp.

References CHECK, get_column_descriptor_maybe(), RelAlgExecutionUnit::input_descs, SortInfo::order_entries, RelAlgExecutionUnit::quals, RelAlgExecutionUnit::simple_quals, RelAlgExecutionUnit::sort_info, RelAlgExecutionUnit::target_exprs, and ScalarExprVisitor< T >::visit().

63  {
64  if (ra_exe_unit.input_descs.size() > 1 ||
65  !ra_exe_unit.sort_info.order_entries.empty()) {
66  return {};
67  }
68  std::vector<int64_t> target_indices(ra_exe_unit.target_exprs.size(), -1);
69  UsedColumnsVisitor columns_visitor;
70  std::unordered_set<shared::ColumnKey> used_columns;
71  for (const auto& simple_qual : ra_exe_unit.simple_quals) {
72  const auto crt_used_columns = columns_visitor.visit(simple_qual.get());
73  used_columns.insert(crt_used_columns.begin(), crt_used_columns.end());
74  }
75  for (const auto& qual : ra_exe_unit.quals) {
76  const auto crt_used_columns = columns_visitor.visit(qual.get());
77  used_columns.insert(crt_used_columns.begin(), crt_used_columns.end());
78  }
79  for (const auto& target : ra_exe_unit.target_exprs) {
80  const auto col_var = dynamic_cast<const Analyzer::ColumnVar*>(target);
81  if (col_var) {
82  const auto cd = get_column_descriptor_maybe(col_var->getColumnKey());
83  if (!cd || !cd->isVirtualCol) {
84  continue;
85  }
86  }
87  const auto crt_used_columns = columns_visitor.visit(target);
88  used_columns.insert(crt_used_columns.begin(), crt_used_columns.end());
89  }
90  for (size_t target_idx = 0; target_idx < ra_exe_unit.target_exprs.size();
91  ++target_idx) {
92  const auto target_expr = ra_exe_unit.target_exprs[target_idx];
93  CHECK(target_expr);
94  const auto& ti = target_expr->get_type_info();
95  // TODO: add proper lazy fetch for varlen types in result set
96  if (ti.is_varlen()) {
97  continue;
98  }
99  const auto col_var = dynamic_cast<const Analyzer::ColumnVar*>(target_expr);
100  if (!col_var) {
101  continue;
102  }
103  if (!ti.is_varlen() &&
104  used_columns.find(col_var->getColumnKey()) == used_columns.end()) {
105  // setting target index to be zero so that later it can be decoded properly (in lazy
106  // fetch, the zeroth target index indicates the corresponding rowid column for the
107  // projected entry)
108  target_indices[target_idx] = 0;
109  }
110  }
111  return target_indices;
112 }
std::vector< Analyzer::Expr * > target_exprs
std::vector< InputDescriptor > input_descs
T visit(const Analyzer::Expr *expr) const
const ColumnDescriptor * get_column_descriptor_maybe(const shared::ColumnKey &column_key)
Definition: Execute.h:241
std::list< Analyzer::OrderEntry > order_entries
std::list< std::shared_ptr< Analyzer::Expr > > quals
#define CHECK(condition)
Definition: Logger.h:291
std::list< std::shared_ptr< Analyzer::Expr > > simple_quals

+ Here is the call graph for this function:

bool anonymous_namespace{QueryMemoryDescriptor.cpp}::use_streaming_top_n ( const RelAlgExecutionUnit ra_exe_unit,
const bool  output_columnar 
)

Definition at line 148 of file QueryMemoryDescriptor.cpp.

References SortInfo::algorithm, CHECK_GT, CHECK_LE, g_cluster, g_streaming_topn_max, SortInfo::limit, anonymous_namespace{Utm.h}::n, SortInfo::offset, SortInfo::order_entries, RelAlgExecutionUnit::sort_info, StreamingTopN, and RelAlgExecutionUnit::target_exprs.

149  {
150  if (g_cluster) {
151  return false; // TODO(miyu)
152  }
153 
154  for (const auto target_expr : ra_exe_unit.target_exprs) {
155  if (dynamic_cast<const Analyzer::AggExpr*>(target_expr)) {
156  return false;
157  }
158  if (dynamic_cast<const Analyzer::WindowFunction*>(target_expr)) {
159  return false;
160  }
161  }
162 
163  // TODO: Allow streaming top n for columnar output
164  auto limit_value = ra_exe_unit.sort_info.limit.value_or(0);
165  if (!output_columnar && ra_exe_unit.sort_info.order_entries.size() == 1 &&
166  limit_value > 0 &&
168  const auto only_order_entry = ra_exe_unit.sort_info.order_entries.front();
169  CHECK_GT(only_order_entry.tle_no, int(0));
170  CHECK_LE(static_cast<size_t>(only_order_entry.tle_no),
171  ra_exe_unit.target_exprs.size());
172  const auto order_entry_expr = ra_exe_unit.target_exprs[only_order_entry.tle_no - 1];
173  const auto n = ra_exe_unit.sort_info.offset + limit_value;
174  if ((order_entry_expr->get_type_info().is_number() ||
175  order_entry_expr->get_type_info().is_time()) &&
176  n <= g_streaming_topn_max) {
177  return true;
178  }
179  }
180 
181  return false;
182 }
std::vector< Analyzer::Expr * > target_exprs
size_t g_streaming_topn_max
Definition: ResultSet.cpp:51
SortAlgorithm algorithm
#define CHECK_GT(x, y)
Definition: Logger.h:305
std::optional< size_t > limit
std::list< Analyzer::OrderEntry > order_entries
#define CHECK_LE(x, y)
Definition: Logger.h:304
bool g_cluster
constexpr double n
Definition: Utm.h:38