OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
TargetExprBuilder.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
23 #include "TargetExprBuilder.h"
24 
25 #include "CodeGenerator.h"
26 #include "Execute.h"
27 #include "GroupByAndAggregate.h"
28 #include "Logger/Logger.h"
29 #include "MaxwellCodegenPatch.h"
31 
32 #define LL_CONTEXT executor->cgen_state_->context_
33 #define LL_BUILDER executor->cgen_state_->ir_builder_
34 #define LL_BOOL(v) executor->ll_bool(v)
35 #define LL_INT(v) executor->cgen_state_->llInt(v)
36 #define LL_FP(v) executor->cgen_state_->llFp(v)
37 #define ROW_FUNC executor->cgen_state_->row_func_
38 
39 namespace {
40 
41 inline bool is_varlen_projection(const Analyzer::Expr* target_expr,
42  const SQLTypeInfo& ti) {
43  return dynamic_cast<const Analyzer::GeoExpr*>(target_expr) && ti.get_type() == kPOINT;
44 }
45 
46 std::vector<std::string> agg_fn_base_names(const TargetInfo& target_info,
47  const bool is_varlen_projection) {
48  const auto& chosen_type = get_compact_type(target_info);
49  if (is_varlen_projection) {
50  // TODO: support other types here
51  CHECK(chosen_type.is_geometry());
52  return {"agg_id_varlen"};
53  }
54  if (!target_info.is_agg || target_info.agg_kind == kSAMPLE) {
55  if (chosen_type.is_geometry()) {
56  return std::vector<std::string>(2 * chosen_type.get_physical_coord_cols(),
57  "agg_id");
58  }
59  if (chosen_type.is_varlen()) {
60  // not a varlen projection (not creating new varlen outputs). Just store the pointer
61  // and offset into the input buffer in the output slots.
62  return {"agg_id", "agg_id"};
63  }
64  return {"agg_id"};
65  }
66  switch (target_info.agg_kind) {
67  case kAVG:
68  return {"agg_sum", "agg_count"};
69  case kCOUNT:
70  return {target_info.is_distinct ? "agg_count_distinct" : "agg_count"};
71  case kCOUNT_IF:
72  return {"agg_count_if"};
73  case kMAX:
74  return {"agg_max"};
75  case kMIN:
76  return {"agg_min"};
77  case kSUM:
78  return {"agg_sum"};
79  case kSUM_IF:
80  return {"agg_sum_if"};
82  return {"agg_approximate_count_distinct"};
83  case kAPPROX_QUANTILE:
84  return {"agg_approx_quantile"};
85  case kSINGLE_VALUE:
86  return {"checked_single_agg_id"};
87  case kSAMPLE:
88  return {"agg_id"};
89  case kMODE:
90  return {"agg_mode_func"};
91  default:
92  UNREACHABLE() << "Unrecognized agg kind: " << target_info.agg_kind;
93  }
94  return {};
95 }
96 
98  return (query_mem_desc.getQueryDescriptionType() == QueryDescriptionType::Projection ||
99  query_mem_desc.getQueryDescriptionType() ==
101  query_mem_desc.didOutputColumnar();
102 }
103 
104 bool is_simple_count(const TargetInfo& target_info) {
105  return target_info.is_agg && shared::is_any<kCOUNT>(target_info.agg_kind) &&
106  !target_info.is_distinct;
107 }
108 
109 bool target_has_geo(const TargetInfo& target_info) {
110  return target_info.is_agg ? target_info.agg_arg_type.is_geometry()
111  : target_info.sql_type.is_geometry();
112 }
113 
114 } // namespace
115 
117  GroupByAndAggregate* group_by_and_agg,
118  Executor* executor,
120  const CompilationOptions& co,
121  const GpuSharedMemoryContext& gpu_smem_context,
122  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx_in,
123  const std::vector<llvm::Value*>& agg_out_vec,
124  llvm::Value* output_buffer_byte_stream,
125  llvm::Value* out_row_idx,
126  llvm::Value* varlen_output_buffer,
127  DiamondCodegen& diamond_codegen,
128  DiamondCodegen* sample_cfg) const {
129  CHECK(group_by_and_agg);
130  CHECK(executor);
131  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
132  auto agg_out_ptr_w_idx = agg_out_ptr_w_idx_in;
133  const auto arg_expr = agg_arg(target_expr);
134  const bool varlen_projection = is_varlen_projection(target_expr, target_info.sql_type);
135  const bool uses_flatbuffer = target_info.sql_type.usesFlatBuffer();
136  const auto agg_fn_names = agg_fn_base_names(target_info, varlen_projection);
137  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
139  auto target_lvs =
140  window_func
141  ? std::vector<llvm::Value*>{executor->codegenWindowFunction(target_idx, co)}
142  : group_by_and_agg->codegenAggArg(target_expr, co);
143  const auto window_row_ptr = window_func
144  ? group_by_and_agg->codegenWindowRowPointer(
145  window_func, query_mem_desc, co, diamond_codegen)
146  : nullptr;
147  if (window_row_ptr) {
148  agg_out_ptr_w_idx =
149  std::make_tuple(window_row_ptr, std::get<1>(agg_out_ptr_w_idx_in));
150  if (window_function_is_aggregate(window_func->getKind())) {
151  out_row_idx = window_row_ptr;
152  }
153  }
154 
155  llvm::Value* str_target_lv{nullptr};
156  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
157  // none encoding string, pop the packed pointer + length since
158  // it's only useful for IS NULL checks and assumed to be only
159  // two components (pointer and length) for the purpose of projection
160  str_target_lv = target_lvs.front();
161  target_lvs.erase(target_lvs.begin());
162  }
163  if (target_info.sql_type.is_geometry() && !varlen_projection) {
164  // Geo cols are expanded to the physical coord cols. Each physical coord col is an
165  // array. Ensure that the target values generated match the number of agg
166  // functions before continuing
167  if (target_lvs.size() < agg_fn_names.size()) {
168  if (!uses_flatbuffer) {
169  CHECK_EQ(target_lvs.size(), agg_fn_names.size() / 2);
170  }
171  std::vector<llvm::Value*> new_target_lvs;
172  new_target_lvs.reserve(agg_fn_names.size());
173  for (const auto& target_lv : target_lvs) {
174  new_target_lvs.push_back(target_lv);
175  new_target_lvs.push_back(target_lv);
176  }
177  target_lvs = new_target_lvs;
178  }
179  }
180  if (target_lvs.size() < agg_fn_names.size()) {
181  if (!uses_flatbuffer) {
182  CHECK_EQ(size_t(1), target_lvs.size());
183  CHECK_EQ(size_t(2), agg_fn_names.size());
184  }
185  for (size_t i = 1; i < agg_fn_names.size(); ++i) {
186  target_lvs.push_back(target_lvs.front());
187  }
188  } else {
190  if (!target_info.is_agg && !varlen_projection) {
191  if (!uses_flatbuffer) {
192  CHECK_EQ(
193  static_cast<size_t>(2 * target_info.sql_type.get_physical_coord_cols()),
194  target_lvs.size());
195  }
196  CHECK_EQ(agg_fn_names.size(), target_lvs.size());
197  }
198  } else {
199  CHECK(str_target_lv || (agg_fn_names.size() == target_lvs.size()));
200  CHECK(target_lvs.size() == 1 || target_lvs.size() == 2);
201  }
202  }
203 
204  int32_t slot_index = base_slot_index;
205  CHECK_GE(slot_index, 0);
206  CHECK(is_group_by || static_cast<size_t>(slot_index) < agg_out_vec.size());
207 
208  uint32_t col_off{0};
209  if (co.device_type == ExecutorDeviceType::GPU && query_mem_desc.threadsShareMemory() &&
211  (!arg_expr || arg_expr->get_type_info().get_notnull())) {
212  CHECK_EQ(size_t(1), agg_fn_names.size());
213  const auto chosen_bytes = query_mem_desc.getPaddedSlotWidthBytes(slot_index);
214  llvm::Value* agg_col_ptr{nullptr};
215  if (is_group_by) {
216  if (query_mem_desc.didOutputColumnar()) {
217  col_off = query_mem_desc.getColOffInBytes(slot_index);
218  CHECK_EQ(size_t(0), col_off % chosen_bytes);
219  col_off /= chosen_bytes;
220  CHECK(std::get<1>(agg_out_ptr_w_idx));
221  auto offset =
222  LL_BUILDER.CreateAdd(std::get<1>(agg_out_ptr_w_idx), LL_INT(col_off));
223  auto* bit_cast = LL_BUILDER.CreateBitCast(
224  std::get<0>(agg_out_ptr_w_idx),
225  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0));
226  agg_col_ptr = LL_BUILDER.CreateGEP(
227  bit_cast->getType()->getScalarType()->getPointerElementType(),
228  bit_cast,
229  offset);
230  } else {
231  col_off = query_mem_desc.getColOnlyOffInBytes(slot_index);
232  CHECK_EQ(size_t(0), col_off % chosen_bytes);
233  col_off /= chosen_bytes;
234  auto* bit_cast = LL_BUILDER.CreateBitCast(
235  std::get<0>(agg_out_ptr_w_idx),
236  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0));
237  agg_col_ptr = LL_BUILDER.CreateGEP(
238  bit_cast->getType()->getScalarType()->getPointerElementType(),
239  bit_cast,
240  LL_INT(col_off));
241  }
242  }
243 
244  if (chosen_bytes != sizeof(int32_t)) {
245  CHECK_EQ(8, chosen_bytes);
246  if (g_bigint_count) {
247  const auto acc_i64 = LL_BUILDER.CreateBitCast(
248  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
249  llvm::PointerType::get(get_int_type(64, LL_CONTEXT), 0));
250  if (gpu_smem_context.isSharedMemoryUsed()) {
251  group_by_and_agg->emitCall(
252  "agg_count_shared", std::vector<llvm::Value*>{acc_i64, LL_INT(int64_t(1))});
253  } else {
254  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
255  acc_i64,
256  LL_INT(int64_t(1)),
257 #if LLVM_VERSION_MAJOR > 12
258  LLVM_ALIGN(8),
259 #endif
260  llvm::AtomicOrdering::Monotonic);
261  }
262  } else {
263  auto acc_i32 = LL_BUILDER.CreateBitCast(
264  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
265  llvm::PointerType::get(get_int_type(32, LL_CONTEXT), 0));
266  if (gpu_smem_context.isSharedMemoryUsed()) {
267  acc_i32 = LL_BUILDER.CreatePointerCast(
268  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
269  }
270  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
271  acc_i32,
272  LL_INT(1),
273 #if LLVM_VERSION_MAJOR > 12
274  LLVM_ALIGN(4),
275 #endif
276  llvm::AtomicOrdering::Monotonic);
277  }
278  } else {
279  const auto acc_i32 = (is_group_by ? agg_col_ptr : agg_out_vec[slot_index]);
280  if (gpu_smem_context.isSharedMemoryUsed()) {
281  // Atomic operation on address space level 3 (Shared):
282  const auto shared_acc_i32 = LL_BUILDER.CreatePointerCast(
283  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
284  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
285  shared_acc_i32,
286  LL_INT(1),
287 #if LLVM_VERSION_MAJOR > 12
288  LLVM_ALIGN(4),
289 #endif
290  llvm::AtomicOrdering::Monotonic);
291  } else {
292  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
293  acc_i32,
294  LL_INT(1),
295 #if LLVM_VERSION_MAJOR > 12
296  LLVM_ALIGN(4),
297 #endif
298  llvm::AtomicOrdering::Monotonic);
299  }
300  }
301  return;
302  }
303 
304  codegenAggregate(group_by_and_agg,
305  executor,
306  query_mem_desc,
307  co,
308  target_lvs,
309  agg_out_ptr_w_idx,
310  agg_out_vec,
311  output_buffer_byte_stream,
312  out_row_idx,
313  varlen_output_buffer,
314  slot_index);
315 }
316 
318  GroupByAndAggregate* group_by_and_agg,
319  Executor* executor,
321  const CompilationOptions& co,
322  const std::vector<llvm::Value*>& target_lvs,
323  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
324  const std::vector<llvm::Value*>& agg_out_vec,
325  llvm::Value* output_buffer_byte_stream,
326  llvm::Value* out_row_idx,
327  llvm::Value* varlen_output_buffer,
328  int32_t slot_index) const {
329  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
330  size_t target_lv_idx = 0;
331  const bool lazy_fetched{executor->plan_state_->isLazyFetchColumn(target_expr)};
332 
333  CodeGenerator code_generator(executor);
334 
335  const auto agg_fn_names = agg_fn_base_names(
337  auto arg_expr = agg_arg(target_expr);
338 
339  for (const auto& agg_base_name : agg_fn_names) {
340  if (target_info.is_distinct && arg_expr->get_type_info().is_array()) {
341  CHECK_EQ(static_cast<size_t>(query_mem_desc.getLogicalSlotWidthBytes(slot_index)),
342  sizeof(int64_t));
343  // TODO(miyu): check if buffer may be columnar here
344  CHECK(!query_mem_desc.didOutputColumnar());
345  const auto& elem_ti = arg_expr->get_type_info().get_elem_type();
346  uint32_t col_off{0};
347  if (is_group_by) {
348  const auto col_off_in_bytes = query_mem_desc.getColOnlyOffInBytes(slot_index);
349  CHECK_EQ(size_t(0), col_off_in_bytes % sizeof(int64_t));
350  col_off /= sizeof(int64_t);
351  }
352  executor->cgen_state_->emitExternalCall(
353  "agg_count_distinct_array_" + numeric_type_name(elem_ti),
354  llvm::Type::getVoidTy(LL_CONTEXT),
355  {is_group_by ? LL_BUILDER.CreateGEP(std::get<0>(agg_out_ptr_w_idx)
356  ->getType()
357  ->getScalarType()
358  ->getPointerElementType(),
359  std::get<0>(agg_out_ptr_w_idx),
360  LL_INT(col_off))
361  : agg_out_vec[slot_index],
362  target_lvs[target_lv_idx],
363  code_generator.posArg(arg_expr),
364  elem_ti.is_fp()
365  ? static_cast<llvm::Value*>(executor->cgen_state_->inlineFpNull(elem_ti))
366  : static_cast<llvm::Value*>(
367  executor->cgen_state_->inlineIntNull(elem_ti))});
368  ++slot_index;
369  ++target_lv_idx;
370  continue;
371  }
372 
373  llvm::Value* agg_col_ptr{nullptr};
374  const auto chosen_bytes =
375  static_cast<size_t>(query_mem_desc.getPaddedSlotWidthBytes(slot_index));
376  const auto& chosen_type = get_compact_type(target_info);
377  const auto& arg_type =
378  ((arg_expr && arg_expr->get_type_info().get_type() != kNULLT) &&
382  const bool is_fp_arg =
383  !lazy_fetched && arg_type.get_type() != kNULLT && arg_type.is_fp();
384  if (is_group_by) {
385  agg_col_ptr = group_by_and_agg->codegenAggColumnPtr(output_buffer_byte_stream,
386  out_row_idx,
387  agg_out_ptr_w_idx,
388  query_mem_desc,
389  chosen_bytes,
390  slot_index,
391  target_idx);
392  CHECK(agg_col_ptr);
393  agg_col_ptr->setName("agg_col_ptr");
394  }
395 
397  CHECK(!query_mem_desc.didOutputColumnar());
398 
400  CHECK_LT(target_lv_idx, target_lvs.size());
401  CHECK(varlen_output_buffer);
402  auto target_lv = target_lvs[target_lv_idx];
403 
404  std::string agg_fname_suffix = "";
406  query_mem_desc.threadsShareMemory()) {
407  agg_fname_suffix += "_shared";
408  }
409 
410  // first write the varlen data into the varlen buffer and get the pointer location
411  // into the varlen buffer
412  auto& builder = executor->cgen_state_->ir_builder_;
413  auto orig_bb = builder.GetInsertBlock();
414  auto target_ptr_type = llvm::dyn_cast<llvm::PointerType>(target_lv->getType());
415  CHECK(target_ptr_type) << "Varlen projections expect a pointer input.";
416  auto is_nullptr =
417  builder.CreateICmp(llvm::CmpInst::ICMP_EQ,
418  target_lv,
419  llvm::ConstantPointerNull::get(llvm::PointerType::get(
420  target_ptr_type->getPointerElementType(), 0)));
421  llvm::BasicBlock* true_bb{nullptr};
422  {
423  DiamondCodegen nullcheck_diamond(
424  is_nullptr, executor, false, "varlen_null_check", nullptr, false);
425  // maintain a reference to the true bb, overriding the diamond codegen destructor
426  true_bb = nullcheck_diamond.cond_true_;
427  // if not null, process the pointer and insert it into the varlen buffer
428  builder.SetInsertPoint(nullcheck_diamond.cond_false_);
429  auto arr_ptr_lv = executor->cgen_state_->ir_builder_.CreateBitCast(
430  target_lv,
431  llvm::PointerType::get(get_int_type(8, executor->cgen_state_->context_), 0));
432  const int64_t chosen_bytes =
434  auto* arg = get_arg_by_name(ROW_FUNC, "old_total_matched");
435  const auto output_buffer_slot = LL_BUILDER.CreateZExt(
436  LL_BUILDER.CreateLoad(arg->getType()->getPointerElementType(), arg),
437  llvm::Type::getInt64Ty(LL_CONTEXT));
438  const auto varlen_buffer_row_sz = query_mem_desc.varlenOutputBufferElemSize();
439  CHECK(varlen_buffer_row_sz);
440  const auto output_buffer_slot_bytes = LL_BUILDER.CreateAdd(
441  LL_BUILDER.CreateMul(output_buffer_slot,
442  executor->cgen_state_->llInt(
443  static_cast<int64_t>(*varlen_buffer_row_sz))),
444  executor->cgen_state_->llInt(static_cast<int64_t>(
445  query_mem_desc.varlenOutputRowSizeToSlot(slot_index))));
446 
447  std::vector<llvm::Value*> varlen_agg_args{
448  executor->castToIntPtrTyIn(varlen_output_buffer, 8),
449  output_buffer_slot_bytes,
450  arr_ptr_lv,
451  executor->cgen_state_->llInt(chosen_bytes)};
452  auto varlen_offset_ptr =
453  group_by_and_agg->emitCall(agg_base_name + agg_fname_suffix, varlen_agg_args);
454 
455  // then write that pointer location into the 64 bit slot in the output buffer
456  auto varlen_offset_int = LL_BUILDER.CreatePtrToInt(
457  varlen_offset_ptr, llvm::Type::getInt64Ty(LL_CONTEXT));
458  builder.CreateBr(nullcheck_diamond.cond_true_);
459 
460  // use the true block to do the output buffer insertion regardless of nullness
461  builder.SetInsertPoint(nullcheck_diamond.cond_true_);
462  auto output_phi =
463  builder.CreatePHI(llvm::Type::getInt64Ty(executor->cgen_state_->context_), 2);
464  output_phi->addIncoming(varlen_offset_int, nullcheck_diamond.cond_false_);
465  output_phi->addIncoming(executor->cgen_state_->llInt(static_cast<int64_t>(0)),
466  orig_bb);
467 
468  std::vector<llvm::Value*> agg_args{agg_col_ptr, output_phi};
469  group_by_and_agg->emitCall("agg_id" + agg_fname_suffix, agg_args);
470  }
471  CHECK(true_bb);
472  builder.SetInsertPoint(true_bb);
473 
474  ++slot_index;
475  ++target_lv_idx;
476  continue;
477  }
478 
479  const bool float_argument_input = takes_float_argument(target_info);
480  const bool is_count_in_avg = target_info.agg_kind == kAVG && target_lv_idx == 1;
481  // The count component of an average should never be compacted.
482  const auto agg_chosen_bytes =
483  float_argument_input && !is_count_in_avg ? sizeof(float) : chosen_bytes;
484  if (float_argument_input) {
485  CHECK_GE(chosen_bytes, sizeof(float));
486  }
487 
488  auto target_lv = target_lvs[target_lv_idx];
489  const auto needs_unnest_double_patch = group_by_and_agg->needsUnnestDoublePatch(
490  target_lv, agg_base_name, query_mem_desc.threadsShareMemory(), co);
491  const auto need_skip_null = !needs_unnest_double_patch && target_info.skip_null_val;
492  if (!needs_unnest_double_patch) {
493  if (need_skip_null && !is_agg_domain_range_equivalent(target_info.agg_kind)) {
494  target_lv = group_by_and_agg->convertNullIfAny(arg_type, target_info, target_lv);
495  } else if (is_fp_arg) {
496  target_lv = executor->castToFP(target_lv, arg_type, target_info.sql_type);
497  }
498  if (!dynamic_cast<const Analyzer::AggExpr*>(target_expr) || arg_expr) {
499  target_lv =
500  executor->cgen_state_->castToTypeIn(target_lv, (agg_chosen_bytes << 3));
501  }
502  }
503 
504  const bool is_simple_count_target = is_simple_count(target_info);
505  llvm::Value* str_target_lv{nullptr};
506  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
507  // none encoding string
508  str_target_lv = target_lvs.front();
509  }
510  std::vector<llvm::Value*> agg_args{
511  executor->castToIntPtrTyIn((is_group_by ? agg_col_ptr : agg_out_vec[slot_index]),
512  (agg_chosen_bytes << 3)),
513  (is_simple_count_target && !arg_expr)
514  ? (agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0))
515  : LL_INT(int64_t(0)))
516  : (is_simple_count_target && arg_expr && str_target_lv ? str_target_lv
517  : target_lv)};
518  if (query_mem_desc.isLogicalSizedColumnsAllowed()) {
519  if (is_simple_count_target && arg_expr && str_target_lv) {
520  agg_args[1] =
521  agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0)) : LL_INT(int64_t(0));
522  }
523  }
524  std::string agg_fname{agg_base_name};
525  if (is_fp_arg) {
526  if (!lazy_fetched) {
527  if (agg_chosen_bytes == sizeof(float)) {
528  CHECK_EQ(arg_type.get_type(), kFLOAT);
529  agg_fname += "_float";
530  } else {
531  CHECK_EQ(agg_chosen_bytes, sizeof(double));
532  agg_fname += "_double";
533  }
534  }
535  } else if (agg_chosen_bytes == sizeof(int32_t)) {
536  agg_fname += "_int32";
537  } else if (agg_chosen_bytes == sizeof(int16_t) &&
538  query_mem_desc.didOutputColumnar()) {
539  agg_fname += "_int16";
540  } else if (agg_chosen_bytes == sizeof(int8_t) && query_mem_desc.didOutputColumnar()) {
541  agg_fname += "_int8";
542  }
543 
545  CHECK_EQ(agg_chosen_bytes, sizeof(int64_t));
546  CHECK(!chosen_type.is_fp());
547  group_by_and_agg->codegenCountDistinct(
548  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
549  } else if (target_info.agg_kind == kAPPROX_QUANTILE) {
550  CHECK_EQ(agg_chosen_bytes, sizeof(int64_t));
551  group_by_and_agg->codegenApproxQuantile(
552  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
553  } else if (target_info.agg_kind == kMODE) {
554  group_by_and_agg->codegenMode(
555  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
556  } else {
557  const auto& arg_ti = target_info.agg_arg_type;
558  if (need_skip_null && !arg_ti.is_geometry()) {
559  agg_fname += "_skip_val";
560  }
561 
563  (need_skip_null && !arg_ti.is_geometry())) {
564  llvm::Value* null_in_lv{nullptr};
565  if (arg_ti.is_fp()) {
566  null_in_lv = executor->cgen_state_->inlineFpNull(arg_ti);
567  } else {
568  null_in_lv = executor->cgen_state_->inlineIntNull(
570  ? arg_ti
572  }
573  CHECK(null_in_lv);
574  auto null_lv =
575  executor->cgen_state_->castToTypeIn(null_in_lv, (agg_chosen_bytes << 3));
576  agg_args.push_back(null_lv);
577  }
578  if (target_info.agg_kind == kSUM_IF) {
579  const auto agg_expr = dynamic_cast<const Analyzer::AggExpr*>(target_expr);
580  auto cond_expr_lv =
581  code_generator.codegen(agg_expr->get_arg1().get(), true, co).front();
582  auto cond_lv = executor->codegenConditionalAggregateCondValSelector(
583  cond_expr_lv, kSUM_IF, co);
584  agg_args.push_back(cond_lv);
585  }
586  if (!target_info.is_distinct) {
588  query_mem_desc.threadsShareMemory()) {
589  agg_fname += "_shared";
590  if (needs_unnest_double_patch) {
591  agg_fname = patch_agg_fname(agg_fname);
592  }
593  }
594  auto agg_fname_call_ret_lv = group_by_and_agg->emitCall(agg_fname, agg_args);
595 
596  if (agg_fname.find("checked") != std::string::npos) {
597  group_by_and_agg->checkErrorCode(agg_fname_call_ret_lv);
598  }
599  }
600  }
601  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
602  // window function with framing has a different code path and codegen logic
603  if (window_func && !window_func->hasFraming() &&
605  const auto window_func_context =
607  const auto pending_outputs =
608  LL_INT(window_func_context->aggregateStatePendingOutputs());
609  executor->cgen_state_->emitExternalCall("add_window_pending_output",
610  llvm::Type::getVoidTy(LL_CONTEXT),
611  {agg_args.front(), pending_outputs});
612  const auto& window_func_ti = window_func->get_type_info();
613  std::string apply_window_pending_outputs_name = "apply_window_pending_outputs";
614  switch (window_func_ti.get_type()) {
615  case kFLOAT: {
616  apply_window_pending_outputs_name += "_float";
617  if (query_mem_desc.didOutputColumnar()) {
618  apply_window_pending_outputs_name += "_columnar";
619  }
620  break;
621  }
622  case kDOUBLE: {
623  apply_window_pending_outputs_name += "_double";
624  break;
625  }
626  default: {
627  apply_window_pending_outputs_name += "_int";
628  if (query_mem_desc.didOutputColumnar()) {
629  apply_window_pending_outputs_name +=
630  std::to_string(window_func_ti.get_size() * 8);
631  } else {
632  apply_window_pending_outputs_name += "64";
633  }
634  break;
635  }
636  }
637  const auto partition_end =
638  LL_INT(reinterpret_cast<int64_t>(window_func_context->partitionEnd()));
639  executor->cgen_state_->emitExternalCall(apply_window_pending_outputs_name,
640  llvm::Type::getVoidTy(LL_CONTEXT),
641  {pending_outputs,
642  target_lvs.front(),
643  partition_end,
644  code_generator.posArg(nullptr)});
645  }
646 
647  ++slot_index;
648  ++target_lv_idx;
649  }
650 }
651 
653  const Executor* executor,
654  QueryMemoryDescriptor& query_mem_desc,
655  const CompilationOptions& co) {
656  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
657  if (query_mem_desc.getPaddedSlotWidthBytes(slot_index_counter) == 0) {
658  CHECK(!dynamic_cast<const Analyzer::AggExpr*>(target_expr));
659  ++slot_index_counter;
660  ++target_index_counter;
661  return;
662  }
663  if (dynamic_cast<const Analyzer::UOper*>(target_expr) &&
664  static_cast<const Analyzer::UOper*>(target_expr)->get_optype() == kUNNEST) {
665  throw std::runtime_error("UNNEST not supported in the projection list yet.");
666  }
667  if ((executor->plan_state_->isLazyFetchColumn(target_expr) || !is_group_by) &&
668  (static_cast<size_t>(query_mem_desc.getPaddedSlotWidthBytes(slot_index_counter)) <
669  sizeof(int64_t)) &&
670  !is_columnar_projection(query_mem_desc)) {
671  // TODO(miyu): enable different byte width in the layout w/o padding
672  VLOG(2) << "Throw CompilationRetryNoCompaction exception";
674  }
675 
676  if (is_columnar_projection(query_mem_desc) &&
677  executor->plan_state_->isLazyFetchColumn(target_expr)) {
678  // For columnar outputs, we need to pad lazy fetched columns to 8 bytes to allow the
679  // lazy fetch index to be placed in the column. The QueryMemoryDescriptor is created
680  // before Lazy Fetch information is known, therefore we need to update the QMD with
681  // the new slot size width bytes for these columns.
682  VLOG(2) << "Set padded slot-width byte for the slot-"
683  << std::to_string(slot_index_counter) << " to 8";
684  query_mem_desc.setPaddedSlotWidthBytes(slot_index_counter, int8_t(8));
685  CHECK_EQ(query_mem_desc.getPaddedSlotWidthBytes(slot_index_counter), int8_t(8));
686  }
687 
688  auto target_info = get_target_info(target_expr, g_bigint_count);
689  auto arg_expr = agg_arg(target_expr);
690  if (arg_expr) {
693  target_info.skip_null_val = false;
694  } else if (query_mem_desc.getQueryDescriptionType() ==
696  !arg_expr->get_type_info().is_varlen()) {
697  // TODO: COUNT is currently not null-aware for varlen types. Need to add proper code
698  // generation for handling varlen nulls.
699  target_info.skip_null_val = true;
700  } else if (constrained_not_null(arg_expr, ra_exe_unit.quals)) {
701  target_info.skip_null_val = false;
702  }
703  }
704 
705  if (!(query_mem_desc.getQueryDescriptionType() ==
709  sample_exprs_to_codegen.emplace_back(target_expr,
710  target_info,
711  slot_index_counter,
712  target_index_counter++,
713  is_group_by);
714  } else {
715  target_exprs_to_codegen.emplace_back(target_expr,
716  target_info,
717  slot_index_counter,
718  target_index_counter++,
719  is_group_by);
720  }
721 
722  const auto agg_fn_names = agg_fn_base_names(
724  slot_index_counter += agg_fn_names.size();
725 }
726 
727 namespace {
728 
730  const QueryMemoryDescriptor& query_mem_desc) {
731  const bool is_group_by{query_mem_desc.isGroupBy()};
732  if (target_info.agg_kind == kSAMPLE && target_info.sql_type.is_string() &&
733  target_info.sql_type.get_compression() != kENCODING_NONE) {
734  return get_agg_initial_val(target_info.agg_kind,
735  target_info.sql_type,
736  is_group_by,
737  query_mem_desc.getCompactByteWidth());
738  }
739  return 0;
740 }
741 
742 } // namespace
743 
745  GroupByAndAggregate* group_by_and_agg,
746  Executor* executor,
747  const QueryMemoryDescriptor& query_mem_desc,
748  const CompilationOptions& co,
749  const GpuSharedMemoryContext& gpu_smem_context,
750  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
751  const std::vector<llvm::Value*>& agg_out_vec,
752  llvm::Value* output_buffer_byte_stream,
753  llvm::Value* out_row_idx,
754  llvm::Value* varlen_output_buffer,
755  DiamondCodegen& diamond_codegen) const {
756  CHECK(group_by_and_agg);
757  CHECK(executor);
758  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
759 
760  // check the target_exprs and find a set of exprs need non-lazy fetch before entering
761  // the expr compilation to avoid a crash during the codegen due to a wrong
762  // classification of expr fetch type (lazy vs. non-lazy), and also we can avoid
763  // unnecessary query recompilation due to `CompilationRetryNoLazyFetch` exception
764  executor->plan_state_->registerNonLazyFetchExpression(target_exprs_to_codegen);
765 
766  for (const auto& target_expr_codegen : target_exprs_to_codegen) {
767  target_expr_codegen.codegen(group_by_and_agg,
768  executor,
769  query_mem_desc,
770  co,
771  gpu_smem_context,
772  agg_out_ptr_w_idx,
773  agg_out_vec,
774  output_buffer_byte_stream,
775  out_row_idx,
776  varlen_output_buffer,
777  diamond_codegen);
778  }
779  if (!sample_exprs_to_codegen.empty()) {
780  codegenSampleExpressions(group_by_and_agg,
781  executor,
782  query_mem_desc,
783  co,
784  agg_out_ptr_w_idx,
785  agg_out_vec,
786  output_buffer_byte_stream,
787  out_row_idx,
788  diamond_codegen);
789  }
790 }
791 
793  GroupByAndAggregate* group_by_and_agg,
794  Executor* executor,
795  const QueryMemoryDescriptor& query_mem_desc,
796  const CompilationOptions& co,
797  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
798  const std::vector<llvm::Value*>& agg_out_vec,
799  llvm::Value* output_buffer_byte_stream,
800  llvm::Value* out_row_idx,
801  DiamondCodegen& diamond_codegen) const {
802  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
803  CHECK(!sample_exprs_to_codegen.empty());
805  if (sample_exprs_to_codegen.size() == 1 &&
806  !sample_exprs_to_codegen.front().target_info.sql_type.is_varlen()) {
807  codegenSingleSlotSampleExpression(group_by_and_agg,
808  executor,
809  query_mem_desc,
810  co,
811  agg_out_ptr_w_idx,
812  agg_out_vec,
813  output_buffer_byte_stream,
814  out_row_idx,
815  diamond_codegen);
816  } else {
817  codegenMultiSlotSampleExpressions(group_by_and_agg,
818  executor,
819  query_mem_desc,
820  co,
821  agg_out_ptr_w_idx,
822  agg_out_vec,
823  output_buffer_byte_stream,
824  out_row_idx,
825  diamond_codegen);
826  }
827 }
828 
830  GroupByAndAggregate* group_by_and_agg,
831  Executor* executor,
832  const QueryMemoryDescriptor& query_mem_desc,
833  const CompilationOptions& co,
834  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
835  const std::vector<llvm::Value*>& agg_out_vec,
836  llvm::Value* output_buffer_byte_stream,
837  llvm::Value* out_row_idx,
838  DiamondCodegen& diamond_codegen) const {
839  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
840  CHECK_EQ(size_t(1), sample_exprs_to_codegen.size());
841  CHECK(!sample_exprs_to_codegen.front().target_info.sql_type.is_varlen());
843  // no need for the atomic if we only have one SAMPLE target
844  sample_exprs_to_codegen.front().codegen(group_by_and_agg,
845  executor,
846  query_mem_desc,
847  co,
848  {},
849  agg_out_ptr_w_idx,
850  agg_out_vec,
851  output_buffer_byte_stream,
852  out_row_idx,
853  /*varlen_output_buffer=*/nullptr,
854  diamond_codegen);
855 }
856 
858  GroupByAndAggregate* group_by_and_agg,
859  Executor* executor,
860  const QueryMemoryDescriptor& query_mem_desc,
861  const CompilationOptions& co,
862  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
863  const std::vector<llvm::Value*>& agg_out_vec,
864  llvm::Value* output_buffer_byte_stream,
865  llvm::Value* out_row_idx,
866  DiamondCodegen& diamond_codegen) const {
867  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
868  CHECK(sample_exprs_to_codegen.size() > 1 ||
869  sample_exprs_to_codegen.front().target_info.sql_type.is_varlen());
871  const auto& first_sample_expr = sample_exprs_to_codegen.front();
872  auto target_lvs = group_by_and_agg->codegenAggArg(first_sample_expr.target_expr, co);
873  CHECK_GE(target_lvs.size(), size_t(1));
874 
875  const auto init_val =
876  get_initial_agg_val(first_sample_expr.target_info, query_mem_desc);
877 
878  llvm::Value* agg_col_ptr{nullptr};
879  if (is_group_by) {
880  const auto agg_column_size_bytes =
881  query_mem_desc.isLogicalSizedColumnsAllowed() &&
882  !first_sample_expr.target_info.sql_type.is_varlen()
883  ? first_sample_expr.target_info.sql_type.get_size()
884  : sizeof(int64_t);
885  agg_col_ptr = group_by_and_agg->codegenAggColumnPtr(output_buffer_byte_stream,
886  out_row_idx,
887  agg_out_ptr_w_idx,
888  query_mem_desc,
889  agg_column_size_bytes,
890  first_sample_expr.base_slot_index,
891  first_sample_expr.target_idx);
892  } else {
893  CHECK_LT(static_cast<size_t>(first_sample_expr.base_slot_index), agg_out_vec.size());
894  agg_col_ptr =
895  executor->castToIntPtrTyIn(agg_out_vec[first_sample_expr.base_slot_index], 64);
896  }
897 
898  auto sample_cas_lv =
899  codegenSlotEmptyKey(agg_col_ptr, target_lvs, executor, query_mem_desc, init_val);
900 
901  DiamondCodegen sample_cfg(
902  sample_cas_lv, executor, false, "sample_valcheck", &diamond_codegen, false);
903 
904  for (const auto& target_expr_codegen : sample_exprs_to_codegen) {
905  target_expr_codegen.codegen(group_by_and_agg,
906  executor,
907  query_mem_desc,
908  co,
909  {},
910  agg_out_ptr_w_idx,
911  agg_out_vec,
912  output_buffer_byte_stream,
913  out_row_idx,
914  /*varlen_output_buffer=*/nullptr,
915  diamond_codegen,
916  &sample_cfg);
917  }
918 }
919 
921  llvm::Value* agg_col_ptr,
922  std::vector<llvm::Value*>& target_lvs,
923  Executor* executor,
924  const QueryMemoryDescriptor& query_mem_desc,
925  const int64_t init_val) const {
926  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
927  const auto& first_sample_expr = sample_exprs_to_codegen.front();
928  const auto first_sample_slot_bytes =
929  first_sample_expr.target_info.sql_type.is_varlen()
930  ? sizeof(int64_t)
931  : first_sample_expr.target_info.sql_type.get_size();
932  llvm::Value* target_lv_casted{nullptr};
933  // deciding whether proper casting is required for the first sample's slot:
934  if (first_sample_expr.target_info.sql_type.is_varlen()) {
935  target_lv_casted =
936  LL_BUILDER.CreatePtrToInt(target_lvs.front(), llvm::Type::getInt64Ty(LL_CONTEXT));
937  } else if (first_sample_expr.target_info.sql_type.is_fp()) {
938  // Initialization value for SAMPLE on a float column should be 0
939  CHECK_EQ(init_val, 0);
940  if (query_mem_desc.isLogicalSizedColumnsAllowed()) {
941  target_lv_casted = executor->cgen_state_->ir_builder_.CreateFPToSI(
942  target_lvs.front(),
943  first_sample_slot_bytes == sizeof(float) ? llvm::Type::getInt32Ty(LL_CONTEXT)
944  : llvm::Type::getInt64Ty(LL_CONTEXT));
945  } else {
946  target_lv_casted = executor->cgen_state_->ir_builder_.CreateFPToSI(
947  target_lvs.front(), llvm::Type::getInt64Ty(LL_CONTEXT));
948  }
949  } else if (first_sample_slot_bytes != sizeof(int64_t) &&
950  !query_mem_desc.isLogicalSizedColumnsAllowed()) {
951  target_lv_casted =
952  executor->cgen_state_->ir_builder_.CreateCast(llvm::Instruction::CastOps::SExt,
953  target_lvs.front(),
954  llvm::Type::getInt64Ty(LL_CONTEXT));
955  } else {
956  target_lv_casted = target_lvs.front();
957  }
958 
959  std::string slot_empty_cas_func_name("slotEmptyKeyCAS");
960  llvm::Value* init_val_lv{LL_INT(init_val)};
961  if (query_mem_desc.isLogicalSizedColumnsAllowed() &&
962  !first_sample_expr.target_info.sql_type.is_varlen()) {
963  // add proper suffix to the function name:
964  switch (first_sample_slot_bytes) {
965  case 1:
966  slot_empty_cas_func_name += "_int8";
967  break;
968  case 2:
969  slot_empty_cas_func_name += "_int16";
970  break;
971  case 4:
972  slot_empty_cas_func_name += "_int32";
973  break;
974  case 8:
975  break;
976  default:
977  UNREACHABLE() << "Invalid slot size for slotEmptyKeyCAS function.";
978  break;
979  }
980  if (first_sample_slot_bytes != sizeof(int64_t)) {
981  init_val_lv = llvm::ConstantInt::get(
982  get_int_type(first_sample_slot_bytes * 8, LL_CONTEXT), init_val);
983  }
984  }
985 
986  auto sample_cas_lv = executor->cgen_state_->emitExternalCall(
987  slot_empty_cas_func_name,
988  llvm::Type::getInt1Ty(executor->cgen_state_->context_),
989  {agg_col_ptr, target_lv_casted, init_val_lv});
990  return sample_cas_lv;
991 }
992 
993 std::ostream& operator<<(std::ostream& os, const TargetExprCodegen& target_expr_codegen) {
994  os << "(target_expr: " << target_expr_codegen.target_expr->toString()
995  << ", target_info: " << target_expr_codegen.target_info.toString()
996  << ", base_slot_index: " << target_expr_codegen.base_slot_index
997  << ", target_idx:" << target_expr_codegen.target_idx
998  << ", is_group_by: " << target_expr_codegen.is_group_by << ")";
999  return os;
1000 }
size_t varlenOutputRowSizeToSlot(const size_t slot_idx) const
#define LL_BUILDER
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
#define CHECK_EQ(x, y)
Definition: Logger.h:301
bool target_has_geo(const TargetInfo &target_info)
bool constrained_not_null(const Analyzer::Expr *expr, const std::list< std::shared_ptr< Analyzer::Expr >> &quals)
llvm::BasicBlock * cond_false_
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
NonGroupedAggregate
Definition: enums.h:58
std::vector< std::string > agg_fn_base_names(const TargetInfo &target_info, const bool is_varlen_projection)
bool isLogicalSizedColumnsAllowed() const
void codegenMode(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
void codegen(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, DiamondCodegen &diamond_codegen, DiamondCodegen *sample_cfg=nullptr) const
std::ostream & operator<<(std::ostream &os, const SessionInfo &session_info)
Definition: SessionInfo.cpp:57
void codegenMultiSlotSampleExpressions(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const
llvm::Value * posArg(const Analyzer::Expr *) const
Definition: ColumnIR.cpp:590
bool is_agg_domain_range_equivalent(const SQLAgg agg_kind)
Definition: TargetInfo.h:83
#define UNREACHABLE()
Definition: Logger.h:338
#define CHECK_GE(x, y)
Definition: Logger.h:306
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
int64_t get_agg_initial_val(const SQLAgg agg, const SQLTypeInfo &ti, const bool enable_compaction, const unsigned min_byte_width_to_compact)
Projection
Definition: enums.h:58
void codegenApproxQuantile(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
void checkErrorCode(llvm::Value *retCode)
bool takes_float_argument(const TargetInfo &target_info)
Definition: TargetInfo.h:106
#define LLVM_ALIGN(alignment)
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:391
bool needsUnnestDoublePatch(llvm::Value const *val_ptr, const std::string &agg_base_name, const bool threads_share_memory, const CompilationOptions &co) const
bool skip_null_val
Definition: TargetInfo.h:54
llvm::BasicBlock * cond_true_
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
static WindowFunctionContext * getActiveWindowFunctionContext(Executor *executor)
TargetInfo get_target_info(const Analyzer::Expr *target_expr, const bool bigint_count)
Definition: TargetInfo.h:92
std::string to_string(char const *&&v)
SQLTypeInfo agg_arg_type
Definition: TargetInfo.h:53
std::string patch_agg_fname(const std::string &agg_name)
Helpers for codegen of target expressions.
size_t getColOnlyOffInBytes(const size_t col_idx) const
Definition: sqldefs.h:78
const SQLTypeInfo get_compact_type(const TargetInfo &target)
TableFunction
Definition: enums.h:58
bool is_varlen_projection(const Analyzer::Expr *target_expr, const SQLTypeInfo &ti)
bool is_agg
Definition: TargetInfo.h:50
llvm::Value * get_arg_by_name(llvm::Function *func, const std::string &name)
Definition: Execute.h:168
void operator()(const Analyzer::Expr *target_expr, const Executor *executor, QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co)
std::string toString() const
Definition: TargetInfo.h:59
#define LL_INT(v)
llvm::Value * convertNullIfAny(const SQLTypeInfo &arg_type, const TargetInfo &agg_info, llvm::Value *target)
void codegenSampleExpressions(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const
bool g_bigint_count
Definition: sqldefs.h:80
void codegen(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, DiamondCodegen &diamond_codegen) const
#define LL_CONTEXT
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:102
void codegenCountDistinct(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &, const ExecutorDeviceType)
bool usesFlatBuffer() const
Definition: sqltypes.h:1083
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define AUTOMATIC_IR_METADATA(CGENSTATE)
SQLAgg agg_kind
Definition: TargetInfo.h:51
QueryDescriptionType getQueryDescriptionType() const
ExecutorDeviceType device_type
std::optional< size_t > varlenOutputBufferElemSize() const
std::vector< llvm::Value * > codegen(const Analyzer::Expr *, const bool fetch_columns, const CompilationOptions &)
Definition: IRCodegen.cpp:30
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
Definition: WindowContext.h:61
#define CHECK_LT(x, y)
Definition: Logger.h:303
llvm::Value * codegenSlotEmptyKey(llvm::Value *agg_col_ptr, std::vector< llvm::Value * > &target_lvs, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const int64_t init_val) const
virtual std::string toString() const =0
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:399
const Analyzer::Expr * target_expr
Definition: sqldefs.h:81
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
llvm::Value * codegenWindowRowPointer(const Analyzer::WindowFunction *window_func, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
bool window_function_requires_peer_handling(const Analyzer::WindowFunction *window_func)
TO bit_cast(FROM &&from)
Definition: misc.h:307
bool is_simple_count(const TargetInfo &target_info)
#define CHECK(condition)
Definition: Logger.h:291
bool is_geometry() const
Definition: sqltypes.h:597
static void resetWindowFunctionContext(Executor *executor)
void setPaddedSlotWidthBytes(const size_t slot_idx, const int8_t bytes)
int64_t get_initial_agg_val(const TargetInfo &target_info, const QueryMemoryDescriptor &query_mem_desc)
std::string numeric_type_name(const SQLTypeInfo &ti)
Definition: Execute.h:230
bool is_string() const
Definition: sqltypes.h:561
bool is_distinct
Definition: TargetInfo.h:55
Definition: sqldefs.h:79
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
int get_physical_coord_cols() const
Definition: sqltypes.h:451
Definition: sqldefs.h:77
size_t getColOffInBytes(const size_t col_idx) const
void codegenAggregate(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::vector< llvm::Value * > &target_lvs, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, int32_t slot_index) const
bool is_columnar_projection(const QueryMemoryDescriptor &query_mem_desc)
Definition: sqldefs.h:86
#define VLOG(n)
Definition: Logger.h:388
#define ROW_FUNC
void codegenSingleSlotSampleExpression(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const