33 return !res || res->definitelyHasNoRows();
37 return (std::count_if(ra_exe_unit.
join_quals.begin(),
39 [](
const auto& join_condition) {
47 const std::vector<ColumnLazyFetchInfo>& lazy_fetch_info,
52 (chunk_ti.is_array() ||
53 (chunk_ti.is_string() && chunk_ti.get_compression() ==
kENCODING_NONE))) {
54 for (
const auto target_expr : ra_exe_unit.
target_exprs) {
64 if (lazy_fetch_info.empty()) {
68 for (
size_t i = 0; i < ra_exe_unit.
target_exprs.size(); i++) {
70 const auto& col_lazy_fetch = lazy_fetch_info[i];
77 if (col_lazy_fetch.is_lazily_fetched) {
89 const std::vector<ColumnLazyFetchInfo>& lazy_fetch_info,
91 for (
const auto& chunk : chunks) {
106 for (
size_t i = 1; i <=
query_infos_.front().info.fragments.size(); ++i) {
109 query_infos_.front().info.fragments[i - 1].getNumTuples();
116 std::vector<size_t> outer_table_fragment_ids) {
120 outer_table_fragment_ids);
124 std::vector<std::pair<ResultSetPtr, std::vector<size_t>>>&
130 const size_t thread_idx,
135 runImpl(executor, thread_idx, shared_context);
138 }
catch (
const std::bad_alloc& e) {
144 ErrorCode::OUT_OF_GPU_MEM,
166 size_t available_slots_per_task;
167 if (executor->executor_resource_mgr_) {
168 auto const resources_status = executor->executor_resource_mgr_->get_resource_info();
170 auto const idle_cpu_slots =
171 resources_status.total_cpu_slots - resources_status.allocated_cpu_slots;
173 available_slots_per_task = 1u + (idle_cpu_slots + num_kernels - 1u) / num_kernels;
175 available_slots_per_task = std::max(static_cast<size_t>(
cpu_threads()) / num_kernels,
176 static_cast<size_t>(1));
178 CHECK_GE(available_slots_per_task, 1u);
179 return available_slots_per_task;
184 const size_t thread_idx,
196 const auto& outer_tab_frag_ids =
frag_list[0].fragment_ids;
201 auto data_mgr = executor->getDataMgr();
202 executor->logSystemCPUMemoryStatus(
"Before Query Execution", thread_idx);
204 executor->logSystemGPUMemoryStatus(
"Before Query Execution", thread_idx);
208 auto chunk_iterators_ptr = std::make_shared<std::list<ChunkIter>>();
209 std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks;
210 std::unique_ptr<std::lock_guard<std::mutex>> gpu_lock;
211 std::unique_ptr<CudaAllocator> device_allocator;
214 new std::lock_guard<std::mutex>(executor->gpu_exec_mutex_[
chosen_device_id]));
215 device_allocator = std::make_unique<CudaAllocator>(
218 std::shared_ptr<FetchResult> fetch_result(
new FetchResult);
220 std::map<shared::TableKey, const TableFragments*> all_tables_fragments;
229 all_tables_fragments,
231 *chunk_iterators_ptr,
233 device_allocator.get(),
240 all_tables_fragments,
242 *chunk_iterators_ptr,
244 device_allocator.get(),
247 if (fetch_result->num_rows.empty()) {
254 LOG(
INFO) <<
"Dynamic Watchdog budget: CPU: "
261 : ErrorCode::OUT_OF_CPU_MEM,
270 throw std::runtime_error(
"Joins not supported through external execution");
277 executor->row_set_mem_owner_,
280 group_by_and_aggregate.initQueryMemoryDescriptor(
false, 0, 8,
nullptr,
false);
284 executor->plan_state_.get(),
293 std::unique_ptr<QueryExecutionContext> query_exe_context_owned;
296 int64_t total_num_input_rows{-1};
299 total_num_input_rows = 0;
300 std::for_each(fetch_result->num_rows.begin(),
301 fetch_result->num_rows.end(),
302 [&total_num_input_rows](
const std::vector<int64_t>& frag_row_count) {
304 frag_row_count.end(),
305 total_num_input_rows);
307 VLOG(2) <<
"total_num_input_rows=" << total_num_input_rows;
311 if (total_num_input_rows == 0) {
320 uint32_t start_rowid{0};
323 const auto& all_frag_row_offsets = shared_context.
getFragOffsets();
325 all_frag_row_offsets[
frag_list.begin()->fragment_ids.front()];
335 bool can_run_subkernels = shared_context.getThreadPool() !=
nullptr;
352 can_run_subkernels &&
358 if (can_run_subkernels) {
359 size_t total_rows = fetch_result->num_rows[0][0];
362 for (
size_t sub_start = start_rowid; sub_start < total_rows; sub_start += sub_size) {
363 sub_size = (sub_start + sub_size > total_rows) ? total_rows - sub_start : sub_size;
364 auto subtask = std::make_shared<KernelSubtask>(*
this,
368 total_num_input_rows,
372 shared_context.getThreadPool()->run(
373 [subtask, executor] { subtask->run(executor); });
387 query_exe_context_owned =
394 total_num_input_rows,
395 fetch_result->col_buffers,
396 fetch_result->frag_offsets,
397 executor->getRowSetMemoryOwner(),
407 CHECK(query_exe_context);
409 bool optimize_cuda_block_and_grid_sizes =
413 executor->logSystemCPUMemoryStatus(
"After Query Memory Initialization", thread_idx);
422 fetch_result->col_buffers,
424 fetch_result->num_rows,
425 fetch_result->frag_offsets,
432 optimize_cuda_block_and_grid_sizes);
435 VLOG(1) <<
"outer_table_key=" << outer_table_key
443 fetch_result->col_buffers,
446 fetch_result->num_rows,
447 fetch_result->frag_offsets,
456 optimize_cuda_block_and_grid_sizes);
459 std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks_to_hold;
460 for (
const auto& chunk : chunks) {
465 chunks_to_hold.push_back(chunk);
471 VLOG(1) <<
"null device_results.";
477 executor->logSystemCPUMemoryStatus(
"After Query Execution", thread_idx);
479 executor->logSystemGPUMemoryStatus(
"After Query Execution", thread_idx);
490 }
catch (
const std::bad_alloc& e) {
496 ErrorCode::OUT_OF_GPU_MEM,
499 kernel_.query_mem_desc.getQueryDescriptionType(),
512 void KernelSubtask::runImpl(Executor* executor) {
513 auto& query_exe_context_owned = shared_context_.getTlsExecutionContext().local();
514 const bool do_render = kernel_.render_info_ && kernel_.render_info_->isInSitu();
516 kernel_.query_comp_desc.getCompilationResult();
518 kernel_.ra_exe_unit_.union_all ? kernel_.frag_list[0].table_key
519 : kernel_.ra_exe_unit_.input_descs[0].getTableKey();
521 if (!query_exe_context_owned) {
525 std::vector<std::vector<const int8_t*>> col_buffers(
526 fetch_result_->col_buffers.size(),
527 std::vector<const int8_t*>(fetch_result_->col_buffers[0].size()));
528 std::vector<std::vector<uint64_t>> frag_offsets(
529 fetch_result_->frag_offsets.size(),
530 std::vector<uint64_t>(fetch_result_->frag_offsets[0].size()));
531 query_exe_context_owned = kernel_.query_mem_desc.getQueryExecutionContext(
532 kernel_.ra_exe_unit_,
534 kernel_.chosen_device_type,
535 kernel_.kernel_dispatch_mode,
536 kernel_.chosen_device_id,
538 total_num_input_rows_,
541 executor->getRowSetMemoryOwner(),
543 kernel_.query_mem_desc.sortOnGpu(),
546 do_render ? kernel_.render_info_ :
nullptr);
552 const auto& outer_tab_frag_ids = kernel_.frag_list[0].fragment_ids;
554 CHECK(query_exe_context);
556 bool optimize_cuda_block_and_grid_sizes =
558 kernel_.eo.optimize_cuda_block_and_grid_sizes;
559 if (kernel_.ra_exe_unit_.groupby_exprs.empty()) {
560 err = executor->executePlanWithoutGroupBy(kernel_.ra_exe_unit_,
562 kernel_.query_comp_desc.hoistLiterals(),
564 kernel_.ra_exe_unit_.target_exprs,
565 kernel_.chosen_device_type,
566 fetch_result_->col_buffers,
568 fetch_result_->num_rows,
569 fetch_result_->frag_offsets,
570 executor->getDataMgr(),
571 kernel_.chosen_device_id,
573 kernel_.ra_exe_unit_.input_descs.size(),
574 kernel_.eo.allow_runtime_query_interrupt,
575 do_render ? kernel_.render_info_ :
nullptr,
576 optimize_cuda_block_and_grid_sizes,
577 start_rowid_ + num_rows_to_process_);
579 err = executor->executePlanWithGroupBy(kernel_.ra_exe_unit_,
581 kernel_.query_comp_desc.hoistLiterals(),
583 kernel_.chosen_device_type,
584 fetch_result_->col_buffers,
587 fetch_result_->num_rows,
588 fetch_result_->frag_offsets,
589 executor->getDataMgr(),
590 kernel_.chosen_device_id,
592 kernel_.ra_exe_unit_.scan_limit,
594 kernel_.ra_exe_unit_.input_descs.size(),
595 kernel_.eo.allow_runtime_query_interrupt,
596 do_render ? kernel_.render_info_ :
nullptr,
597 optimize_cuda_block_and_grid_sizes,
598 start_rowid_ + num_rows_to_process_);
bool need_to_hold_chunk(const Chunk_NS::Chunk *chunk, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const ExecutorDeviceType device_type)
std::vector< Analyzer::Expr * > target_exprs
std::atomic_flag dynamic_watchdog_set
const ExecutionOptions & eo
size_t g_cpu_sub_task_size
const std::vector< uint64_t > & getFragOffsets()
static const int max_gpu_count
bool with_dynamic_watchdog
const std::optional< bool > union_all
const ExecutorDispatchMode kernel_dispatch_mode
const RelAlgExecutionUnit & ra_exe_unit_
size_t get_available_cpu_threads_per_task(Executor *executor, SharedKernelContext &shared_context)
std::vector< uint64_t > all_frag_row_offsets_
const int64_t rowid_lookup_key
std::mutex all_frag_row_offsets_mutex_
void addDeviceResults(ResultSetPtr &&device_results, std::vector< size_t > outer_table_fragment_ids)
std::vector< InputDescriptor > input_descs
const ExecutorDeviceType chosen_device_type
bool hoistLiterals() const
std::shared_ptr< ResultSet > ResultSetPtr
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
std::unique_ptr< ResultSet > run_query_external(const ExecutionUnitSql &sql, const FetchResult &fetch_result, const PlanState *plan_state, const ExternalQueryOutputSpec &output_spec)
RenderInfo * render_info_
const ColumnDescriptor * getColumnDesc() const
bool needs_skip_result(const ResultSetPtr &res)
ExecutorType executor_type
#define INJECT_TIMER(DESC)
const JoinQualsPerNestingLevel join_quals
const QueryMemoryDescriptor & query_mem_desc
DEVICE auto accumulate(ARGS &&...args)
const QueryCompilationDescriptor & query_comp_desc
static void computeAllTablesFragments(std::map< shared::TableKey, const TableFragments * > &all_tables_fragments, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos)
const std::shared_ptr< Analyzer::Estimator > estimator
QueryDescriptionType getQueryDescriptionType() const
RUNTIME_EXPORT uint64_t dynamic_watchdog_init(unsigned ms_budget)
void runImpl(Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
const shared::ColumnKey & getColumnKey() const
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > all_fragment_results_
void run(Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
void setAvailableCpuThreads(size_t num_available_threads) const
const FragmentsList frag_list
ExecutionUnitSql serialize_to_sql(const RelAlgExecutionUnit *ra_exe_unit)
CUstream getQueryEngineCudaStreamForDevice(int device_num)
bool optimize_cuda_block_and_grid_sizes
bool query_has_inner_join(const RelAlgExecutionUnit &ra_exe_unit)
const std::vector< InputTableInfo > & getQueryInfos() const
ResultSetPtr device_results_
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > & getFragmentResults()
#define DEBUG_TIMER(name)
const char * what() const noexceptfinal
std::vector< TargetInfo > target_exprs_to_infos(const std::vector< Analyzer::Expr * > &targets, const QueryMemoryDescriptor &query_mem_desc)
unsigned dynamic_watchdog_time_limit
const std::vector< InputTableInfo > & query_infos_
size_t getNumAllocatedThreads()
bool allow_runtime_query_interrupt
auto getCompilationResult() const
std::unique_ptr< QueryExecutionContext > getQueryExecutionContext(const RelAlgExecutionUnit &, const Executor *executor, const ExecutorDeviceType device_type, const ExecutorDispatchMode dispatch_mode, const int device_id, const shared::TableKey &outer_table_key, const int64_t num_rows, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< uint64_t >> &frag_offsets, std::shared_ptr< RowSetMemoryOwner >, const bool output_columnar, const bool sort_on_gpu, const size_t thread_idx, RenderInfo *) const
const ColumnFetcher & column_fetcher