OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ExecutionKernel Class Reference

#include <ExecutionKernel.h>

+ Collaboration diagram for ExecutionKernel:

Public Member Functions

 ExecutionKernel (const RelAlgExecutionUnit &ra_exe_unit, const ExecutorDeviceType chosen_device_type, int chosen_device_id, const ExecutionOptions &eo, const ColumnFetcher &column_fetcher, const QueryCompilationDescriptor &query_comp_desc, const QueryMemoryDescriptor &query_mem_desc, const FragmentsList &frag_list, const ExecutorDispatchMode kernel_dispatch_mode, RenderInfo *render_info, const int64_t rowid_lookup_key)
 
void run (Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
 
FragmentsList get_fragment_list () const
 
int32_t get_chosen_device_id () const
 

Public Attributes

const RelAlgExecutionUnitra_exe_unit_
 

Private Member Functions

void runImpl (Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
 

Private Attributes

const ExecutorDeviceType chosen_device_type
 
int chosen_device_id
 
const ExecutionOptionseo
 
const ColumnFetchercolumn_fetcher
 
const QueryCompilationDescriptorquery_comp_desc
 
const QueryMemoryDescriptorquery_mem_desc
 
const FragmentsList frag_list
 
const ExecutorDispatchMode kernel_dispatch_mode
 
RenderInforender_info_
 
const int64_t rowid_lookup_key
 
ResultSetPtr device_results_
 

Friends

class KernelSubtask
 

Detailed Description

Definition at line 92 of file ExecutionKernel.h.

Constructor & Destructor Documentation

ExecutionKernel::ExecutionKernel ( const RelAlgExecutionUnit ra_exe_unit,
const ExecutorDeviceType  chosen_device_type,
int  chosen_device_id,
const ExecutionOptions eo,
const ColumnFetcher column_fetcher,
const QueryCompilationDescriptor query_comp_desc,
const QueryMemoryDescriptor query_mem_desc,
const FragmentsList frag_list,
const ExecutorDispatchMode  kernel_dispatch_mode,
RenderInfo render_info,
const int64_t  rowid_lookup_key 
)
inline

Definition at line 94 of file ExecutionKernel.h.

105  : ra_exe_unit_(ra_exe_unit)
108  , eo(eo)
109  , column_fetcher(column_fetcher)
110  , query_comp_desc(query_comp_desc)
111  , query_mem_desc(query_mem_desc)
114  , render_info_(render_info)
const ExecutionOptions & eo
const ExecutorDispatchMode kernel_dispatch_mode
const RelAlgExecutionUnit & ra_exe_unit_
const int64_t rowid_lookup_key
const ExecutorDeviceType chosen_device_type
RenderInfo * render_info_
const QueryMemoryDescriptor & query_mem_desc
const QueryCompilationDescriptor & query_comp_desc
const FragmentsList frag_list
const ColumnFetcher & column_fetcher

Member Function Documentation

int32_t ExecutionKernel::get_chosen_device_id ( ) const
inline

Definition at line 122 of file ExecutionKernel.h.

References chosen_device_id.

122 { return chosen_device_id; }
FragmentsList ExecutionKernel::get_fragment_list ( ) const
inline

Definition at line 121 of file ExecutionKernel.h.

References frag_list.

121 { return frag_list; }
const FragmentsList frag_list
void ExecutionKernel::run ( Executor executor,
const size_t  thread_idx,
SharedKernelContext shared_context 
)

Definition at line 129 of file ExecutionKernel.cpp.

References DEBUG_TIMER, QueryMemoryDescriptor::getQueryDescriptionType(), INJECT_TIMER, kernel_dispatch_mode, MultifragmentKernel, query_mem_desc, runImpl(), and OutOfHostMemory::what().

Referenced by Executor::executeUpdate(), and Executor::executeWorkUnitPerFragment().

131  {
132  DEBUG_TIMER("ExecutionKernel::run");
133  INJECT_TIMER(kernel_run);
134  try {
135  runImpl(executor, thread_idx, shared_context);
136  } catch (const OutOfHostMemory& e) {
137  throw QueryExecutionError(ErrorCode::OUT_OF_CPU_MEM, e.what());
138  } catch (const std::bad_alloc& e) {
139  throw QueryExecutionError(ErrorCode::OUT_OF_CPU_MEM, e.what());
140  } catch (const OutOfRenderMemory& e) {
141  throw QueryExecutionError(ErrorCode::OUT_OF_RENDER_MEM, e.what());
142  } catch (const OutOfMemory& e) {
143  throw QueryExecutionError(
144  ErrorCode::OUT_OF_GPU_MEM,
145  e.what(),
149  } catch (const ColumnarConversionNotSupported& e) {
150  throw QueryExecutionError(ErrorCode::COLUMNAR_CONVERSION_NOT_SUPPORTED, e.what());
151  } catch (const TooManyLiterals& e) {
152  throw QueryExecutionError(ErrorCode::TOO_MANY_LITERALS, e.what());
153  } catch (const StringConstInResultSet& e) {
154  throw QueryExecutionError(ErrorCode::STRING_CONST_IN_RESULTSET, e.what());
155  } catch (const QueryExecutionError& e) {
156  throw e;
157  }
158 }
const ExecutorDispatchMode kernel_dispatch_mode
#define INJECT_TIMER(DESC)
Definition: measure.h:122
const QueryMemoryDescriptor & query_mem_desc
QueryDescriptionType getQueryDescriptionType() const
void runImpl(Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
#define DEBUG_TIMER(name)
Definition: Logger.h:412
const char * what() const noexceptfinal
Definition: checked_alloc.h:39

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void ExecutionKernel::runImpl ( Executor executor,
const size_t  thread_idx,
SharedKernelContext shared_context 
)
private

Definition at line 183 of file ExecutionKernel.cpp.

References gpu_enabled::accumulate(), SharedKernelContext::addDeviceResults(), ExecutionOptions::allow_runtime_query_interrupt, CHECK, CHECK_EQ, CHECK_GE, CHECK_GT, CHECK_LT, chosen_device_id, chosen_device_type, column_fetcher, QueryFragmentDescriptor::computeAllTablesFragments(), CPU, Data_Namespace::CPU_LEVEL, device_results_, dynamic_watchdog_init(), SharedKernelContext::dynamic_watchdog_set, ExecutionOptions::dynamic_watchdog_time_limit, eo, RelAlgExecutionUnit::estimator, ExecutionOptions::executor_type, Extern, frag_list, g_cpu_sub_task_size, anonymous_namespace{ExecutionKernel.cpp}::get_available_cpu_threads_per_task(), QueryCompilationDescriptor::getCompilationResult(), SharedKernelContext::getFragOffsets(), QueryMemoryDescriptor::getQueryDescriptionType(), getQueryEngineCudaStreamForDevice(), QueryMemoryDescriptor::getQueryExecutionContext(), SharedKernelContext::getQueryInfos(), GPU, Data_Namespace::GPU_LEVEL, RelAlgExecutionUnit::groupby_exprs, QueryCompilationDescriptor::hoistLiterals(), logger::INFO, RelAlgExecutionUnit::input_descs, heavyai::InSituFlagsOwnerInterface::isInSitu(), kernel_dispatch_mode, KernelPerFragment, LOG, Executor::max_gpu_count, MultifragmentKernel, Native, anonymous_namespace{ExecutionKernel.cpp}::need_to_hold_chunk(), ExecutionOptions::optimize_cuda_block_and_grid_sizes, CompilationResult::output_columnar, heavyai::Projection, query_comp_desc, anonymous_namespace{ExecutionKernel.cpp}::query_has_inner_join(), query_mem_desc, ra_exe_unit_, render_info_, rowid_lookup_key, run_query_external(), RelAlgExecutionUnit::scan_limit, serialize_to_sql(), QueryMemoryDescriptor::setAvailableCpuThreads(), QueryMemoryDescriptor::sortOnGpu(), RelAlgExecutionUnit::target_exprs, target_exprs_to_infos(), to_string(), RelAlgExecutionUnit::union_all, VLOG, and ExecutionOptions::with_dynamic_watchdog.

Referenced by run().

185  {
186  CHECK(executor);
187  const auto memory_level = chosen_device_type == ExecutorDeviceType::GPU
190  CHECK_GE(frag_list.size(), size_t(1));
191  // frag_list[0].table_id is how we tell which query we are running for UNION ALL.
192  const auto& outer_table_key = ra_exe_unit_.union_all
193  ? frag_list[0].table_key
194  : ra_exe_unit_.input_descs[0].getTableKey();
195  CHECK_EQ(frag_list[0].table_key, outer_table_key);
196  const auto& outer_tab_frag_ids = frag_list[0].fragment_ids;
197 
200 
201  auto data_mgr = executor->getDataMgr();
202  executor->logSystemCPUMemoryStatus("Before Query Execution", thread_idx);
204  executor->logSystemGPUMemoryStatus("Before Query Execution", thread_idx);
205  }
206 
207  // need to own them while query executes
208  auto chunk_iterators_ptr = std::make_shared<std::list<ChunkIter>>();
209  std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks;
210  std::unique_ptr<std::lock_guard<std::mutex>> gpu_lock;
211  std::unique_ptr<CudaAllocator> device_allocator;
213  gpu_lock.reset(
214  new std::lock_guard<std::mutex>(executor->gpu_exec_mutex_[chosen_device_id]));
215  device_allocator = std::make_unique<CudaAllocator>(
216  data_mgr, chosen_device_id, getQueryEngineCudaStreamForDevice(chosen_device_id));
217  }
218  std::shared_ptr<FetchResult> fetch_result(new FetchResult);
219  try {
220  std::map<shared::TableKey, const TableFragments*> all_tables_fragments;
222  all_tables_fragments, ra_exe_unit_, shared_context.getQueryInfos());
223 
224  *fetch_result = ra_exe_unit_.union_all
225  ? executor->fetchUnionChunks(column_fetcher,
226  ra_exe_unit_,
228  memory_level,
229  all_tables_fragments,
230  frag_list,
231  *chunk_iterators_ptr,
232  chunks,
233  device_allocator.get(),
234  thread_idx,
236  : executor->fetchChunks(column_fetcher,
237  ra_exe_unit_,
239  memory_level,
240  all_tables_fragments,
241  frag_list,
242  *chunk_iterators_ptr,
243  chunks,
244  device_allocator.get(),
245  thread_idx,
247  if (fetch_result->num_rows.empty()) {
248  return;
249  }
251  !shared_context.dynamic_watchdog_set.test_and_set(std::memory_order_acquire)) {
254  LOG(INFO) << "Dynamic Watchdog budget: CPU: "
256  << std::to_string(cycle_budget) << " cycles";
257  }
258  } catch (const OutOfMemory&) {
259  throw QueryExecutionError(
260  memory_level == Data_Namespace::GPU_LEVEL ? ErrorCode::OUT_OF_GPU_MEM
261  : ErrorCode::OUT_OF_CPU_MEM,
265  return;
266  }
267 
269  if (ra_exe_unit_.input_descs.size() > 1) {
270  throw std::runtime_error("Joins not supported through external execution");
271  }
272  const auto query = serialize_to_sql(&ra_exe_unit_);
273  GroupByAndAggregate group_by_and_aggregate(executor,
275  ra_exe_unit_,
276  shared_context.getQueryInfos(),
277  executor->row_set_mem_owner_,
278  std::nullopt);
279  const auto query_mem_desc =
280  group_by_and_aggregate.initQueryMemoryDescriptor(false, 0, 8, nullptr, false);
282  query,
283  *fetch_result,
284  executor->plan_state_.get(),
288  executor});
289  shared_context.addDeviceResults(std::move(device_results_), outer_tab_frag_ids);
290  return;
291  }
292  const CompilationResult& compilation_result = query_comp_desc.getCompilationResult();
293  std::unique_ptr<QueryExecutionContext> query_exe_context_owned;
294  const bool do_render = render_info_ && render_info_->isInSitu();
295 
296  int64_t total_num_input_rows{-1};
298  query_mem_desc.getQueryDescriptionType() == QueryDescriptionType::Projection) {
299  total_num_input_rows = 0;
300  std::for_each(fetch_result->num_rows.begin(),
301  fetch_result->num_rows.end(),
302  [&total_num_input_rows](const std::vector<int64_t>& frag_row_count) {
303  total_num_input_rows = std::accumulate(frag_row_count.begin(),
304  frag_row_count.end(),
305  total_num_input_rows);
306  });
307  VLOG(2) << "total_num_input_rows=" << total_num_input_rows;
308  // TODO(adb): we may want to take this early out for all queries, but we are most
309  // likely to see this query pattern on the kernel per fragment path (e.g. with HAVING
310  // 0=1)
311  if (total_num_input_rows == 0) {
312  return;
313  }
314 
316  total_num_input_rows *= ra_exe_unit_.input_descs.size();
317  }
318  }
319 
320  uint32_t start_rowid{0};
321  if (rowid_lookup_key >= 0) {
322  if (!frag_list.empty()) {
323  const auto& all_frag_row_offsets = shared_context.getFragOffsets();
324  start_rowid = rowid_lookup_key -
325  all_frag_row_offsets[frag_list.begin()->fragment_ids.front()];
326  }
327  }
328 
329  // determine the # available CPU threads for each kernel to parallelize rest of
330  // initialization steps when necessary
331  query_mem_desc.setAvailableCpuThreads(
332  get_available_cpu_threads_per_task(executor, shared_context));
333 
334 #ifdef HAVE_TBB
335  bool can_run_subkernels = shared_context.getThreadPool() != nullptr;
336 
337  // Sub-tasks are supported for groupby queries and estimators only for now.
338  bool is_groupby =
339  (ra_exe_unit_.groupby_exprs.size() > 1) ||
340  (ra_exe_unit_.groupby_exprs.size() == 1 && ra_exe_unit_.groupby_exprs.front());
341  can_run_subkernels = can_run_subkernels && (is_groupby || ra_exe_unit_.estimator);
342 
343  // In case some column is lazily fetched, we cannot mix different fragments in a single
344  // ResultSet.
345  can_run_subkernels =
346  can_run_subkernels && !executor->hasLazyFetchColumns(ra_exe_unit_.target_exprs);
347 
348  // TODO: Use another structure to hold chunks. Currently, ResultSet holds them, but with
349  // sub-tasks chunk can be referenced by many ResultSets. So, some outer structure to
350  // hold all ResultSets and all chunks is required.
351  can_run_subkernels =
352  can_run_subkernels &&
354  chunks, ra_exe_unit_, std::vector<ColumnLazyFetchInfo>(), chosen_device_type);
355 
356  // TODO: check for literals? We serialize literals before execution and hold them in
357  // result sets. Can we simply do it once and holdin an outer structure?
358  if (can_run_subkernels) {
359  size_t total_rows = fetch_result->num_rows[0][0];
360  size_t sub_size = g_cpu_sub_task_size;
361 
362  for (size_t sub_start = start_rowid; sub_start < total_rows; sub_start += sub_size) {
363  sub_size = (sub_start + sub_size > total_rows) ? total_rows - sub_start : sub_size;
364  auto subtask = std::make_shared<KernelSubtask>(*this,
365  shared_context,
366  fetch_result,
367  chunk_iterators_ptr,
368  total_num_input_rows,
369  sub_start,
370  sub_size,
371  thread_idx);
372  shared_context.getThreadPool()->run(
373  [subtask, executor] { subtask->run(executor); });
374  }
375 
376  return;
377  }
378 #endif // HAVE_TBB
379 
381  try {
382  // std::unique_ptr<QueryExecutionContext> query_exe_context_owned
383  // has std::unique_ptr<QueryMemoryInitializer> query_buffers_
384  // has std::vector<std::unique_ptr<ResultSet>> result_sets_
385  // has std::unique_ptr<ResultSetStorage> storage_
386  // which are initialized and possibly allocated here.
387  query_exe_context_owned =
388  query_mem_desc.getQueryExecutionContext(ra_exe_unit_,
389  executor,
393  outer_table_key,
394  total_num_input_rows,
395  fetch_result->col_buffers,
396  fetch_result->frag_offsets,
397  executor->getRowSetMemoryOwner(),
398  compilation_result.output_columnar,
399  query_mem_desc.sortOnGpu(),
400  thread_idx,
401  do_render ? render_info_ : nullptr);
402  } catch (const OutOfHostMemory& e) {
403  throw QueryExecutionError(ErrorCode::OUT_OF_CPU_MEM);
404  }
405  }
406  QueryExecutionContext* query_exe_context{query_exe_context_owned.get()};
407  CHECK(query_exe_context);
408  int32_t err{0};
409  bool optimize_cuda_block_and_grid_sizes =
412 
413  executor->logSystemCPUMemoryStatus("After Query Memory Initialization", thread_idx);
414 
415  if (ra_exe_unit_.groupby_exprs.empty()) {
416  err = executor->executePlanWithoutGroupBy(ra_exe_unit_,
417  compilation_result,
422  fetch_result->col_buffers,
423  query_exe_context,
424  fetch_result->num_rows,
425  fetch_result->frag_offsets,
426  data_mgr,
428  start_rowid,
429  ra_exe_unit_.input_descs.size(),
431  do_render ? render_info_ : nullptr,
432  optimize_cuda_block_and_grid_sizes);
433  } else {
434  if (ra_exe_unit_.union_all) {
435  VLOG(1) << "outer_table_key=" << outer_table_key
436  << " ra_exe_unit_.scan_limit=" << ra_exe_unit_.scan_limit;
437  }
438  err = executor->executePlanWithGroupBy(ra_exe_unit_,
439  compilation_result,
443  fetch_result->col_buffers,
444  outer_tab_frag_ids,
445  query_exe_context,
446  fetch_result->num_rows,
447  fetch_result->frag_offsets,
448  data_mgr,
450  outer_table_key,
452  start_rowid,
453  ra_exe_unit_.input_descs.size(),
455  do_render ? render_info_ : nullptr,
456  optimize_cuda_block_and_grid_sizes);
457  }
458  if (device_results_) {
459  std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks_to_hold;
460  for (const auto& chunk : chunks) {
461  if (need_to_hold_chunk(chunk.get(),
462  ra_exe_unit_,
463  device_results_->getLazyFetchInfo(),
465  chunks_to_hold.push_back(chunk);
466  }
467  }
468  device_results_->holdChunks(chunks_to_hold);
469  device_results_->holdChunkIterators(chunk_iterators_ptr);
470  } else {
471  VLOG(1) << "null device_results.";
472  }
473  if (err) {
474  throw QueryExecutionError(err);
475  }
476  shared_context.addDeviceResults(std::move(device_results_), outer_tab_frag_ids);
477  executor->logSystemCPUMemoryStatus("After Query Execution", thread_idx);
478  if (chosen_device_type == ExecutorDeviceType::GPU) {
479  executor->logSystemGPUMemoryStatus("After Query Execution", thread_idx);
480  }
481 }
bool need_to_hold_chunk(const Chunk_NS::Chunk *chunk, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const ExecutorDeviceType device_type)
std::vector< Analyzer::Expr * > target_exprs
#define CHECK_EQ(x, y)
Definition: Logger.h:301
std::atomic_flag dynamic_watchdog_set
const ExecutionOptions & eo
size_t g_cpu_sub_task_size
Definition: Execute.cpp:90
const std::vector< uint64_t > & getFragOffsets()
static const int max_gpu_count
Definition: Execute.h:1535
const std::optional< bool > union_all
#define LOG(tag)
Definition: Logger.h:285
const ExecutorDispatchMode kernel_dispatch_mode
const RelAlgExecutionUnit & ra_exe_unit_
size_t get_available_cpu_threads_per_task(Executor *executor, SharedKernelContext &shared_context)
const int64_t rowid_lookup_key
void addDeviceResults(ResultSetPtr &&device_results, std::vector< size_t > outer_table_fragment_ids)
std::vector< InputDescriptor > input_descs
const ExecutorDeviceType chosen_device_type
#define CHECK_GE(x, y)
Definition: Logger.h:306
Projection
Definition: enums.h:58
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
std::unique_ptr< ResultSet > run_query_external(const ExecutionUnitSql &sql, const FetchResult &fetch_result, const PlanState *plan_state, const ExternalQueryOutputSpec &output_spec)
RenderInfo * render_info_
#define CHECK_GT(x, y)
Definition: Logger.h:305
std::string to_string(char const *&&v)
ExecutorType executor_type
const QueryMemoryDescriptor & query_mem_desc
DEVICE auto accumulate(ARGS &&...args)
Definition: gpu_enabled.h:42
const QueryCompilationDescriptor & query_comp_desc
static void computeAllTablesFragments(std::map< shared::TableKey, const TableFragments * > &all_tables_fragments, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos)
const std::shared_ptr< Analyzer::Estimator > estimator
QueryDescriptionType getQueryDescriptionType() const
RUNTIME_EXPORT uint64_t dynamic_watchdog_init(unsigned ms_budget)
#define CHECK_LT(x, y)
Definition: Logger.h:303
const FragmentsList frag_list
ExecutionUnitSql serialize_to_sql(const RelAlgExecutionUnit *ra_exe_unit)
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
bool optimize_cuda_block_and_grid_sizes
bool query_has_inner_join(const RelAlgExecutionUnit &ra_exe_unit)
const std::vector< InputTableInfo > & getQueryInfos() const
ResultSetPtr device_results_
#define CHECK(condition)
Definition: Logger.h:291
std::vector< TargetInfo > target_exprs_to_infos(const std::vector< Analyzer::Expr * > &targets, const QueryMemoryDescriptor &query_mem_desc)
unsigned dynamic_watchdog_time_limit
#define VLOG(n)
Definition: Logger.h:388
const ColumnFetcher & column_fetcher

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

Friends And Related Function Documentation

friend class KernelSubtask
friend

Definition at line 143 of file ExecutionKernel.h.

Member Data Documentation

int ExecutionKernel::chosen_device_id
private

Definition at line 127 of file ExecutionKernel.h.

Referenced by get_chosen_device_id(), and runImpl().

const ExecutorDeviceType ExecutionKernel::chosen_device_type
private

Definition at line 126 of file ExecutionKernel.h.

Referenced by runImpl().

const ColumnFetcher& ExecutionKernel::column_fetcher
private

Definition at line 129 of file ExecutionKernel.h.

Referenced by runImpl().

ResultSetPtr ExecutionKernel::device_results_
private

Definition at line 137 of file ExecutionKernel.h.

Referenced by runImpl().

const ExecutionOptions& ExecutionKernel::eo
private

Definition at line 128 of file ExecutionKernel.h.

Referenced by runImpl().

const FragmentsList ExecutionKernel::frag_list
private

Definition at line 132 of file ExecutionKernel.h.

Referenced by get_fragment_list(), and runImpl().

const ExecutorDispatchMode ExecutionKernel::kernel_dispatch_mode
private

Definition at line 133 of file ExecutionKernel.h.

Referenced by run(), and runImpl().

const QueryCompilationDescriptor& ExecutionKernel::query_comp_desc
private

Definition at line 130 of file ExecutionKernel.h.

Referenced by runImpl().

const QueryMemoryDescriptor& ExecutionKernel::query_mem_desc
private

Definition at line 131 of file ExecutionKernel.h.

Referenced by run(), and runImpl().

const RelAlgExecutionUnit& ExecutionKernel::ra_exe_unit_

Definition at line 123 of file ExecutionKernel.h.

Referenced by runImpl().

RenderInfo* ExecutionKernel::render_info_
private

Definition at line 134 of file ExecutionKernel.h.

Referenced by runImpl().

const int64_t ExecutionKernel::rowid_lookup_key
private

Definition at line 135 of file ExecutionKernel.h.

Referenced by runImpl().


The documentation for this class was generated from the following files: