OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
TargetExprCodegenBuilder Struct Reference

#include <TargetExprBuilder.h>

+ Collaboration diagram for TargetExprCodegenBuilder:

Public Member Functions

 TargetExprCodegenBuilder (const RelAlgExecutionUnit &ra_exe_unit, const bool is_group_by)
 
void operator() (const Analyzer::Expr *target_expr, const Executor *executor, QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co)
 
void codegen (GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, DiamondCodegen &diamond_codegen) const
 
void codegenSampleExpressions (GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const
 
void codegenSingleSlotSampleExpression (GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const
 
void codegenMultiSlotSampleExpressions (GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const
 
llvm::Value * codegenSlotEmptyKey (llvm::Value *agg_col_ptr, std::vector< llvm::Value * > &target_lvs, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const int64_t init_val) const
 

Public Attributes

size_t target_index_counter {0}
 
size_t slot_index_counter {0}
 
const RelAlgExecutionUnitra_exe_unit
 
std::vector< TargetExprCodegentarget_exprs_to_codegen
 
std::vector< TargetExprCodegensample_exprs_to_codegen
 
bool is_group_by
 

Detailed Description

Definition at line 81 of file TargetExprBuilder.h.

Constructor & Destructor Documentation

TargetExprCodegenBuilder::TargetExprCodegenBuilder ( const RelAlgExecutionUnit ra_exe_unit,
const bool  is_group_by 
)
inline

Definition at line 82 of file TargetExprBuilder.h.

83  : ra_exe_unit(ra_exe_unit), is_group_by(is_group_by) {}
const RelAlgExecutionUnit & ra_exe_unit

Member Function Documentation

void TargetExprCodegenBuilder::codegen ( GroupByAndAggregate group_by_and_agg,
Executor executor,
const QueryMemoryDescriptor query_mem_desc,
const CompilationOptions co,
const GpuSharedMemoryContext gpu_smem_context,
const std::tuple< llvm::Value *, llvm::Value * > &  agg_out_ptr_w_idx,
const std::vector< llvm::Value * > &  agg_out_vec,
llvm::Value *  output_buffer_byte_stream,
llvm::Value *  out_row_idx,
llvm::Value *  varlen_output_buffer,
DiamondCodegen diamond_codegen 
) const

Definition at line 744 of file TargetExprBuilder.cpp.

References AUTOMATIC_IR_METADATA, and CHECK.

Referenced by GroupByAndAggregate::codegenAggCalls().

755  {
756  CHECK(group_by_and_agg);
757  CHECK(executor);
758  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
759 
760  // check the target_exprs and find a set of exprs need non-lazy fetch before entering
761  // the expr compilation to avoid a crash during the codegen due to a wrong
762  // classification of expr fetch type (lazy vs. non-lazy), and also we can avoid
763  // unnecessary query recompilation due to `CompilationRetryNoLazyFetch` exception
764  executor->plan_state_->registerNonLazyFetchExpression(target_exprs_to_codegen);
765 
766  for (const auto& target_expr_codegen : target_exprs_to_codegen) {
767  target_expr_codegen.codegen(group_by_and_agg,
768  executor,
769  query_mem_desc,
770  co,
771  gpu_smem_context,
772  agg_out_ptr_w_idx,
773  agg_out_vec,
774  output_buffer_byte_stream,
775  out_row_idx,
776  varlen_output_buffer,
777  diamond_codegen);
778  }
779  if (!sample_exprs_to_codegen.empty()) {
780  codegenSampleExpressions(group_by_and_agg,
781  executor,
782  query_mem_desc,
783  co,
784  agg_out_ptr_w_idx,
785  agg_out_vec,
786  output_buffer_byte_stream,
787  out_row_idx,
788  diamond_codegen);
789  }
790 }
std::vector< TargetExprCodegen > target_exprs_to_codegen
void codegenSampleExpressions(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const
std::vector< TargetExprCodegen > sample_exprs_to_codegen
#define AUTOMATIC_IR_METADATA(CGENSTATE)
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

void TargetExprCodegenBuilder::codegenMultiSlotSampleExpressions ( GroupByAndAggregate group_by_and_agg,
Executor executor,
const QueryMemoryDescriptor query_mem_desc,
const CompilationOptions co,
const std::tuple< llvm::Value *, llvm::Value * > &  agg_out_ptr_w_idx,
const std::vector< llvm::Value * > &  agg_out_vec,
llvm::Value *  output_buffer_byte_stream,
llvm::Value *  out_row_idx,
DiamondCodegen diamond_codegen 
) const

Definition at line 857 of file TargetExprBuilder.cpp.

References AUTOMATIC_IR_METADATA, CHECK, CHECK_GE, CHECK_LT, GroupByAndAggregate::codegenAggArg(), GroupByAndAggregate::codegenAggColumnPtr(), CompilationOptions::device_type, anonymous_namespace{TargetExprBuilder.cpp}::get_initial_agg_val(), GPU, TargetExprCodegen::is_group_by, and QueryMemoryDescriptor::isLogicalSizedColumnsAllowed().

866  {
867  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
868  CHECK(sample_exprs_to_codegen.size() > 1 ||
869  sample_exprs_to_codegen.front().target_info.sql_type.is_varlen());
871  const auto& first_sample_expr = sample_exprs_to_codegen.front();
872  auto target_lvs = group_by_and_agg->codegenAggArg(first_sample_expr.target_expr, co);
873  CHECK_GE(target_lvs.size(), size_t(1));
874 
875  const auto init_val =
876  get_initial_agg_val(first_sample_expr.target_info, query_mem_desc);
877 
878  llvm::Value* agg_col_ptr{nullptr};
879  if (is_group_by) {
880  const auto agg_column_size_bytes =
881  query_mem_desc.isLogicalSizedColumnsAllowed() &&
882  !first_sample_expr.target_info.sql_type.is_varlen()
883  ? first_sample_expr.target_info.sql_type.get_size()
884  : sizeof(int64_t);
885  agg_col_ptr = group_by_and_agg->codegenAggColumnPtr(output_buffer_byte_stream,
886  out_row_idx,
887  agg_out_ptr_w_idx,
888  query_mem_desc,
889  agg_column_size_bytes,
890  first_sample_expr.base_slot_index,
891  first_sample_expr.target_idx);
892  } else {
893  CHECK_LT(static_cast<size_t>(first_sample_expr.base_slot_index), agg_out_vec.size());
894  agg_col_ptr =
895  executor->castToIntPtrTyIn(agg_out_vec[first_sample_expr.base_slot_index], 64);
896  }
897 
898  auto sample_cas_lv =
899  codegenSlotEmptyKey(agg_col_ptr, target_lvs, executor, query_mem_desc, init_val);
900 
901  DiamondCodegen sample_cfg(
902  sample_cas_lv, executor, false, "sample_valcheck", &diamond_codegen, false);
903 
904  for (const auto& target_expr_codegen : sample_exprs_to_codegen) {
905  target_expr_codegen.codegen(group_by_and_agg,
906  executor,
907  query_mem_desc,
908  co,
909  {},
910  agg_out_ptr_w_idx,
911  agg_out_vec,
912  output_buffer_byte_stream,
913  out_row_idx,
914  /*varlen_output_buffer=*/nullptr,
915  diamond_codegen,
916  &sample_cfg);
917  }
918 }
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
bool isLogicalSizedColumnsAllowed() const
#define CHECK_GE(x, y)
Definition: Logger.h:306
std::vector< TargetExprCodegen > sample_exprs_to_codegen
#define AUTOMATIC_IR_METADATA(CGENSTATE)
ExecutorDeviceType device_type
#define CHECK_LT(x, y)
Definition: Logger.h:303
llvm::Value * codegenSlotEmptyKey(llvm::Value *agg_col_ptr, std::vector< llvm::Value * > &target_lvs, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const int64_t init_val) const
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
#define CHECK(condition)
Definition: Logger.h:291
int64_t get_initial_agg_val(const TargetInfo &target_info, const QueryMemoryDescriptor &query_mem_desc)

+ Here is the call graph for this function:

void TargetExprCodegenBuilder::codegenSampleExpressions ( GroupByAndAggregate group_by_and_agg,
Executor executor,
const QueryMemoryDescriptor query_mem_desc,
const CompilationOptions co,
const std::tuple< llvm::Value *, llvm::Value * > &  agg_out_ptr_w_idx,
const std::vector< llvm::Value * > &  agg_out_vec,
llvm::Value *  output_buffer_byte_stream,
llvm::Value *  out_row_idx,
DiamondCodegen diamond_codegen 
) const

Definition at line 792 of file TargetExprBuilder.cpp.

References AUTOMATIC_IR_METADATA, CHECK, CompilationOptions::device_type, and GPU.

801  {
802  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
803  CHECK(!sample_exprs_to_codegen.empty());
805  if (sample_exprs_to_codegen.size() == 1 &&
806  !sample_exprs_to_codegen.front().target_info.sql_type.is_varlen()) {
807  codegenSingleSlotSampleExpression(group_by_and_agg,
808  executor,
809  query_mem_desc,
810  co,
811  agg_out_ptr_w_idx,
812  agg_out_vec,
813  output_buffer_byte_stream,
814  out_row_idx,
815  diamond_codegen);
816  } else {
817  codegenMultiSlotSampleExpressions(group_by_and_agg,
818  executor,
819  query_mem_desc,
820  co,
821  agg_out_ptr_w_idx,
822  agg_out_vec,
823  output_buffer_byte_stream,
824  out_row_idx,
825  diamond_codegen);
826  }
827 }
void codegenMultiSlotSampleExpressions(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const
std::vector< TargetExprCodegen > sample_exprs_to_codegen
#define AUTOMATIC_IR_METADATA(CGENSTATE)
ExecutorDeviceType device_type
#define CHECK(condition)
Definition: Logger.h:291
void codegenSingleSlotSampleExpression(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const
void TargetExprCodegenBuilder::codegenSingleSlotSampleExpression ( GroupByAndAggregate group_by_and_agg,
Executor executor,
const QueryMemoryDescriptor query_mem_desc,
const CompilationOptions co,
const std::tuple< llvm::Value *, llvm::Value * > &  agg_out_ptr_w_idx,
const std::vector< llvm::Value * > &  agg_out_vec,
llvm::Value *  output_buffer_byte_stream,
llvm::Value *  out_row_idx,
DiamondCodegen diamond_codegen 
) const

Definition at line 829 of file TargetExprBuilder.cpp.

References AUTOMATIC_IR_METADATA, CHECK, CHECK_EQ, CompilationOptions::device_type, and GPU.

838  {
839  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
840  CHECK_EQ(size_t(1), sample_exprs_to_codegen.size());
841  CHECK(!sample_exprs_to_codegen.front().target_info.sql_type.is_varlen());
843  // no need for the atomic if we only have one SAMPLE target
844  sample_exprs_to_codegen.front().codegen(group_by_and_agg,
845  executor,
846  query_mem_desc,
847  co,
848  {},
849  agg_out_ptr_w_idx,
850  agg_out_vec,
851  output_buffer_byte_stream,
852  out_row_idx,
853  /*varlen_output_buffer=*/nullptr,
854  diamond_codegen);
855 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
std::vector< TargetExprCodegen > sample_exprs_to_codegen
#define AUTOMATIC_IR_METADATA(CGENSTATE)
ExecutorDeviceType device_type
#define CHECK(condition)
Definition: Logger.h:291
llvm::Value * TargetExprCodegenBuilder::codegenSlotEmptyKey ( llvm::Value *  agg_col_ptr,
std::vector< llvm::Value * > &  target_lvs,
Executor executor,
const QueryMemoryDescriptor query_mem_desc,
const int64_t  init_val 
) const

Definition at line 920 of file TargetExprBuilder.cpp.

References AUTOMATIC_IR_METADATA, CHECK_EQ, get_int_type(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), LL_BUILDER, LL_CONTEXT, LL_INT, and UNREACHABLE.

925  {
926  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
927  const auto& first_sample_expr = sample_exprs_to_codegen.front();
928  const auto first_sample_slot_bytes =
929  first_sample_expr.target_info.sql_type.is_varlen()
930  ? sizeof(int64_t)
931  : first_sample_expr.target_info.sql_type.get_size();
932  llvm::Value* target_lv_casted{nullptr};
933  // deciding whether proper casting is required for the first sample's slot:
934  if (first_sample_expr.target_info.sql_type.is_varlen()) {
935  target_lv_casted =
936  LL_BUILDER.CreatePtrToInt(target_lvs.front(), llvm::Type::getInt64Ty(LL_CONTEXT));
937  } else if (first_sample_expr.target_info.sql_type.is_fp()) {
938  // Initialization value for SAMPLE on a float column should be 0
939  CHECK_EQ(init_val, 0);
940  if (query_mem_desc.isLogicalSizedColumnsAllowed()) {
941  target_lv_casted = executor->cgen_state_->ir_builder_.CreateFPToSI(
942  target_lvs.front(),
943  first_sample_slot_bytes == sizeof(float) ? llvm::Type::getInt32Ty(LL_CONTEXT)
944  : llvm::Type::getInt64Ty(LL_CONTEXT));
945  } else {
946  target_lv_casted = executor->cgen_state_->ir_builder_.CreateFPToSI(
947  target_lvs.front(), llvm::Type::getInt64Ty(LL_CONTEXT));
948  }
949  } else if (first_sample_slot_bytes != sizeof(int64_t) &&
950  !query_mem_desc.isLogicalSizedColumnsAllowed()) {
951  target_lv_casted =
952  executor->cgen_state_->ir_builder_.CreateCast(llvm::Instruction::CastOps::SExt,
953  target_lvs.front(),
954  llvm::Type::getInt64Ty(LL_CONTEXT));
955  } else {
956  target_lv_casted = target_lvs.front();
957  }
958 
959  std::string slot_empty_cas_func_name("slotEmptyKeyCAS");
960  llvm::Value* init_val_lv{LL_INT(init_val)};
961  if (query_mem_desc.isLogicalSizedColumnsAllowed() &&
962  !first_sample_expr.target_info.sql_type.is_varlen()) {
963  // add proper suffix to the function name:
964  switch (first_sample_slot_bytes) {
965  case 1:
966  slot_empty_cas_func_name += "_int8";
967  break;
968  case 2:
969  slot_empty_cas_func_name += "_int16";
970  break;
971  case 4:
972  slot_empty_cas_func_name += "_int32";
973  break;
974  case 8:
975  break;
976  default:
977  UNREACHABLE() << "Invalid slot size for slotEmptyKeyCAS function.";
978  break;
979  }
980  if (first_sample_slot_bytes != sizeof(int64_t)) {
981  init_val_lv = llvm::ConstantInt::get(
982  get_int_type(first_sample_slot_bytes * 8, LL_CONTEXT), init_val);
983  }
984  }
985 
986  auto sample_cas_lv = executor->cgen_state_->emitExternalCall(
987  slot_empty_cas_func_name,
988  llvm::Type::getInt1Ty(executor->cgen_state_->context_),
989  {agg_col_ptr, target_lv_casted, init_val_lv});
990  return sample_cas_lv;
991 }
#define LL_BUILDER
#define CHECK_EQ(x, y)
Definition: Logger.h:301
bool isLogicalSizedColumnsAllowed() const
#define UNREACHABLE()
Definition: Logger.h:338
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
#define LL_INT(v)
std::vector< TargetExprCodegen > sample_exprs_to_codegen
#define LL_CONTEXT
#define AUTOMATIC_IR_METADATA(CGENSTATE)

+ Here is the call graph for this function:

void TargetExprCodegenBuilder::operator() ( const Analyzer::Expr target_expr,
const Executor executor,
QueryMemoryDescriptor query_mem_desc,
const CompilationOptions co 
)

Definition at line 652 of file TargetExprBuilder.cpp.

References agg_arg(), anonymous_namespace{TargetExprBuilder.cpp}::agg_fn_base_names(), TargetInfo::agg_kind, AUTOMATIC_IR_METADATA, CHECK, CHECK_EQ, constrained_not_null(), CompilationOptions::device_type, g_bigint_count, get_target_info(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getQueryDescriptionType(), GPU, TargetInfo::is_agg, anonymous_namespace{TargetExprBuilder.cpp}::is_columnar_projection(), TargetExprCodegen::is_group_by, anonymous_namespace{TargetExprBuilder.cpp}::is_varlen_projection(), kAPPROX_QUANTILE, kSAMPLE, kSINGLE_VALUE, kUNNEST, heavyai::NonGroupedAggregate, QueryMemoryDescriptor::setPaddedSlotWidthBytes(), TargetInfo::skip_null_val, TargetInfo::sql_type, TargetExprCodegen::target_info, to_string(), and VLOG.

655  {
656  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
657  if (query_mem_desc.getPaddedSlotWidthBytes(slot_index_counter) == 0) {
658  CHECK(!dynamic_cast<const Analyzer::AggExpr*>(target_expr));
661  return;
662  }
663  if (dynamic_cast<const Analyzer::UOper*>(target_expr) &&
664  static_cast<const Analyzer::UOper*>(target_expr)->get_optype() == kUNNEST) {
665  throw std::runtime_error("UNNEST not supported in the projection list yet.");
666  }
667  if ((executor->plan_state_->isLazyFetchColumn(target_expr) || !is_group_by) &&
668  (static_cast<size_t>(query_mem_desc.getPaddedSlotWidthBytes(slot_index_counter)) <
669  sizeof(int64_t)) &&
670  !is_columnar_projection(query_mem_desc)) {
671  // TODO(miyu): enable different byte width in the layout w/o padding
672  VLOG(2) << "Throw CompilationRetryNoCompaction exception";
674  }
675 
676  if (is_columnar_projection(query_mem_desc) &&
677  executor->plan_state_->isLazyFetchColumn(target_expr)) {
678  // For columnar outputs, we need to pad lazy fetched columns to 8 bytes to allow the
679  // lazy fetch index to be placed in the column. The QueryMemoryDescriptor is created
680  // before Lazy Fetch information is known, therefore we need to update the QMD with
681  // the new slot size width bytes for these columns.
682  VLOG(2) << "Set padded slot-width byte for the slot-"
683  << std::to_string(slot_index_counter) << " to 8";
684  query_mem_desc.setPaddedSlotWidthBytes(slot_index_counter, int8_t(8));
685  CHECK_EQ(query_mem_desc.getPaddedSlotWidthBytes(slot_index_counter), int8_t(8));
686  }
687 
688  auto target_info = get_target_info(target_expr, g_bigint_count);
689  auto arg_expr = agg_arg(target_expr);
690  if (arg_expr) {
691  if (target_info.agg_kind == kSINGLE_VALUE || target_info.agg_kind == kSAMPLE ||
692  target_info.agg_kind == kAPPROX_QUANTILE) {
693  target_info.skip_null_val = false;
694  } else if (query_mem_desc.getQueryDescriptionType() ==
696  !arg_expr->get_type_info().is_varlen()) {
697  // TODO: COUNT is currently not null-aware for varlen types. Need to add proper code
698  // generation for handling varlen nulls.
699  target_info.skip_null_val = true;
700  } else if (constrained_not_null(arg_expr, ra_exe_unit.quals)) {
701  target_info.skip_null_val = false;
702  }
703  }
704 
705  if (!(query_mem_desc.getQueryDescriptionType() ==
707  (co.device_type == ExecutorDeviceType::GPU) && target_info.is_agg &&
708  (target_info.agg_kind == kSAMPLE)) {
709  sample_exprs_to_codegen.emplace_back(target_expr,
710  target_info,
713  is_group_by);
714  } else {
715  target_exprs_to_codegen.emplace_back(target_expr,
716  target_info,
719  is_group_by);
720  }
721 
722  const auto agg_fn_names = agg_fn_base_names(
723  target_info, is_varlen_projection(target_expr, target_info.sql_type));
724  slot_index_counter += agg_fn_names.size();
725 }
const RelAlgExecutionUnit & ra_exe_unit
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
#define CHECK_EQ(x, y)
Definition: Logger.h:301
bool constrained_not_null(const Analyzer::Expr *expr, const std::list< std::shared_ptr< Analyzer::Expr >> &quals)
NonGroupedAggregate
Definition: enums.h:58
std::vector< std::string > agg_fn_base_names(const TargetInfo &target_info, const bool is_varlen_projection)
std::vector< TargetExprCodegen > target_exprs_to_codegen
TargetInfo get_target_info(const Analyzer::Expr *target_expr, const bool bigint_count)
Definition: TargetInfo.h:92
std::string to_string(char const *&&v)
bool is_varlen_projection(const Analyzer::Expr *target_expr, const SQLTypeInfo &ti)
bool g_bigint_count
std::vector< TargetExprCodegen > sample_exprs_to_codegen
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define AUTOMATIC_IR_METADATA(CGENSTATE)
QueryDescriptionType getQueryDescriptionType() const
ExecutorDeviceType device_type
std::list< std::shared_ptr< Analyzer::Expr > > quals
#define CHECK(condition)
Definition: Logger.h:291
void setPaddedSlotWidthBytes(const size_t slot_idx, const int8_t bytes)
bool is_columnar_projection(const QueryMemoryDescriptor &query_mem_desc)
#define VLOG(n)
Definition: Logger.h:388

+ Here is the call graph for this function:

Member Data Documentation

bool TargetExprCodegenBuilder::is_group_by

Definition at line 149 of file TargetExprBuilder.h.

const RelAlgExecutionUnit& TargetExprCodegenBuilder::ra_exe_unit

Definition at line 144 of file TargetExprBuilder.h.

std::vector<TargetExprCodegen> TargetExprCodegenBuilder::sample_exprs_to_codegen

Definition at line 147 of file TargetExprBuilder.h.

size_t TargetExprCodegenBuilder::slot_index_counter {0}

Definition at line 142 of file TargetExprBuilder.h.

std::vector<TargetExprCodegen> TargetExprCodegenBuilder::target_exprs_to_codegen

Definition at line 146 of file TargetExprBuilder.h.

size_t TargetExprCodegenBuilder::target_index_counter {0}

Definition at line 141 of file TargetExprBuilder.h.


The documentation for this struct was generated from the following files: