OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
RuntimeFunctions.h File Reference
#include "Shared/funcannotations.h"
#include <cassert>
#include <cstdint>
#include <ctime>
#include <limits>
#include <type_traits>
+ Include dependency graph for RuntimeFunctions.h:
+ This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define EMPTY_KEY_64   std::numeric_limits<int64_t>::max()
 
#define EMPTY_KEY_32   std::numeric_limits<int32_t>::max()
 
#define EMPTY_KEY_16   std::numeric_limits<int16_t>::max()
 
#define EMPTY_KEY_8   std::numeric_limits<int8_t>::max()
 

Enumerations

enum  RuntimeInterruptFlags { INT_CHECK = 0, INT_ABORT = -1, INT_RESET = -2 }
 

Functions

RUNTIME_EXPORT int64_t agg_sum (int64_t *agg, const int64_t val)
 
RUNTIME_EXPORT int64_t agg_sum_if (int64_t *agg, const int64_t val, const int8_t cond)
 
RUNTIME_EXPORT void agg_max (int64_t *agg, const int64_t val)
 
RUNTIME_EXPORT void agg_min (int64_t *agg, const int64_t val)
 
RUNTIME_EXPORT void agg_sum_double (int64_t *agg, const double val)
 
RUNTIME_EXPORT void agg_sum_if_double (int64_t *agg, const double val, const int8_t cond)
 
RUNTIME_EXPORT void agg_max_double (int64_t *agg, const double val)
 
RUNTIME_EXPORT void agg_min_double (int64_t *agg, const double val)
 
RUNTIME_EXPORT int32_t agg_sum_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
RUNTIME_EXPORT int64_t agg_sum_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
RUNTIME_EXPORT int32_t agg_sum_if_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val, const int8_t cond)
 
RUNTIME_EXPORT int64_t agg_sum_if_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val, const int8_t cond)
 
RUNTIME_EXPORT void agg_max_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
RUNTIME_EXPORT void agg_min_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
RUNTIME_EXPORT void agg_sum_float_skip_val (int32_t *agg, const float val, const float skip_val)
 
RUNTIME_EXPORT void agg_sum_double_skip_val (int64_t *agg, const double val, const double skip_val)
 
RUNTIME_EXPORT void agg_sum_if_float_skip_val (int32_t *agg, const float val, const float skip_val, const int8_t cond)
 
RUNTIME_EXPORT void agg_sum_if_double_skip_val (int64_t *agg, const double val, const double skip_val, const int8_t cond)
 
RUNTIME_EXPORT void agg_max_double_skip_val (int64_t *agg, const double val, const double skip_val)
 
RUNTIME_EXPORT void agg_min_double_skip_val (int64_t *agg, const double val, const double skip_val)
 
RUNTIME_EXPORT int32_t agg_sum_int32 (int32_t *agg, const int32_t val)
 
RUNTIME_EXPORT int32_t agg_sum_if_int32 (int32_t *agg, const int32_t val, const int8_t cond)
 
RUNTIME_EXPORT void agg_max_int32 (int32_t *agg, const int32_t val)
 
RUNTIME_EXPORT void agg_max_int16 (int16_t *agg, const int16_t val)
 
RUNTIME_EXPORT void agg_max_int8 (int8_t *agg, const int8_t val)
 
RUNTIME_EXPORT void agg_min_int32 (int32_t *agg, const int32_t val)
 
RUNTIME_EXPORT void agg_min_int16 (int16_t *agg, const int16_t val)
 
RUNTIME_EXPORT void agg_min_int8 (int8_t *agg, const int8_t val)
 
RUNTIME_EXPORT void agg_sum_float (int32_t *agg, const float val)
 
RUNTIME_EXPORT void agg_sum_if_float (int32_t *agg, const float val, const int8_t cond)
 
RUNTIME_EXPORT void agg_max_float (int32_t *agg, const float val)
 
RUNTIME_EXPORT void agg_min_float (int32_t *agg, const float val)
 
RUNTIME_EXPORT void agg_max_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
RUNTIME_EXPORT void agg_max_int16_skip_val (int16_t *agg, const int16_t val, const int16_t skip_val)
 
RUNTIME_EXPORT void agg_max_int8_skip_val (int8_t *agg, const int8_t val, const int8_t skip_val)
 
RUNTIME_EXPORT void agg_min_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
RUNTIME_EXPORT void agg_min_int16_skip_val (int16_t *agg, const int16_t val, const int16_t skip_val)
 
RUNTIME_EXPORT void agg_min_int8_skip_val (int8_t *agg, const int8_t val, const int8_t skip_val)
 
RUNTIME_EXPORT void agg_max_float_skip_val (int32_t *agg, const float val, const float skip_val)
 
RUNTIME_EXPORT void agg_min_float_skip_val (int32_t *agg, const float val, const float skip_val)
 
RUNTIME_EXPORT void agg_count_distinct_bitmap (int64_t *agg, const int64_t val, const int64_t min_val, const int64_t bucket_size)
 
RUNTIME_EXPORT uint32_t key_hash (const int64_t *key, const uint32_t key_qw_count, const uint32_t key_byte_width)
 
RUNTIME_EXPORT int64_t * get_group_value (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad)
 
bool RUNTIME_EXPORT check_interrupt ()
 
bool RUNTIME_EXPORT check_interrupt_init (unsigned command)
 
RUNTIME_EXPORT int64_t * get_group_value_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad)
 
RUNTIME_EXPORT int64_t * get_group_value_columnar (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
RUNTIME_EXPORT int64_t * get_group_value_columnar_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
RUNTIME_EXPORT int64_t * get_group_value_fast (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
RUNTIME_EXPORT int64_t * get_group_value_fast_with_original_key (int64_t *groups_buffer, const int64_t key, const int64_t orig_key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
RUNTIME_EXPORT uint32_t get_columnar_group_bin_offset (int64_t *key_base_ptr, const int64_t key, const int64_t min_key, const int64_t bucket)
 
RUNTIME_EXPORT int64_t * get_matching_group_value_perfect_hash (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const uint32_t row_size_quad)
 
RUNTIME_EXPORT int64_t * get_matching_group_value_perfect_hash_keyless (int64_t *groups_buffer, const uint32_t hashed_index, const uint32_t row_size_quad)
 
RUNTIME_EXPORT int32_t * get_bucketized_hash_slot (int32_t *buff, const int64_t key, const int64_t min_key, const int64_t translated_null_val, const int64_t bucket_normalization=1)
 
RUNTIME_EXPORT int32_t * get_hash_slot_bitwise_eq (int32_t *buff, const int64_t key, const int64_t min_key, const int64_t translated_null_val)
 
RUNTIME_EXPORT int32_t * get_hash_slot (int32_t *buff, const int64_t key, const int64_t min_key)
 
RUNTIME_EXPORT int32_t * get_hash_slot_sharded (int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
 
RUNTIME_EXPORT int32_t * get_bucketized_hash_slot_sharded (int32_t *buff, const int64_t key, const int64_t min_key, const int64_t translated_null_val, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count, const int64_t bucket_normalization)
 
RUNTIME_EXPORT int32_t * get_hash_slot_sharded_opt (int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t shard, const uint32_t num_shards, const uint32_t device_count)
 
RUNTIME_EXPORT int32_t * get_bucketized_hash_slot_sharded_opt (int32_t *buff, const int64_t key, const int64_t min_key, const int64_t translated_null_val, const uint32_t entry_count_per_shard, const uint32_t shard, const uint32_t num_shards, const uint32_t device_count, const int64_t bucket_normalization)
 
RUNTIME_EXPORT int fill_one_to_one_hashtable (size_t idx, int32_t *entry_ptr, const int32_t invalid_slot_val)
 
RUNTIME_EXPORT int fill_hashtable_for_semi_join (size_t idx, int32_t *entry_ptr, const int32_t invalid_slot_val)
 
RUNTIME_EXPORT void linear_probabilistic_count (uint8_t *bitmap, const uint32_t bitmap_bytes, const uint8_t *key_bytes, const uint32_t key_len)
 
RUNTIME_EXPORT int64_t fixed_width_int_decode_noinline (const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
 
RUNTIME_EXPORT int64_t fixed_width_unsigned_decode_noinline (const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
 
RUNTIME_EXPORT float fixed_width_float_decode_noinline (const int8_t *byte_stream, const int64_t pos)
 
RUNTIME_EXPORT double fixed_width_double_decode_noinline (const int8_t *byte_stream, const int64_t pos)
 
RUNTIME_EXPORT int64_t fixed_width_small_date_decode_noinline (const int8_t *byte_stream, const int32_t byte_width, const int32_t null_val, const int64_t ret_null_val, const int64_t pos)
 
DEVICE NEVER_INLINE int64_t
SUFFIX() 
fixed_width_date_encode_noinline (const int64_t cur_col_val, const int32_t null_val, const int64_t ret_null_val)
 
template<typename T = int64_t>
get_empty_key ()
 
template<>
int32_t get_empty_key ()
 

Macro Definition Documentation

#define EMPTY_KEY_16   std::numeric_limits<int16_t>::max()

Definition at line 159 of file RuntimeFunctions.h.

#define EMPTY_KEY_32   std::numeric_limits<int32_t>::max()

Definition at line 158 of file RuntimeFunctions.h.

Referenced by get_empty_key().

#define EMPTY_KEY_64   std::numeric_limits<int64_t>::max()

Definition at line 157 of file RuntimeFunctions.h.

Referenced by get_empty_key().

#define EMPTY_KEY_8   std::numeric_limits<int8_t>::max()

Definition at line 160 of file RuntimeFunctions.h.

Enumeration Type Documentation

Enumerator
INT_CHECK 
INT_ABORT 
INT_RESET 

Definition at line 174 of file RuntimeFunctions.h.

Function Documentation

RUNTIME_EXPORT void agg_count_distinct_bitmap ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val,
const int64_t  bucket_size 
)

Definition at line 366 of file RuntimeFunctions.cpp.

Referenced by agg_count_distinct_bitmap_skip_val(), WindowFunctionContext::fillPartitionEnd(), WindowFunctionContext::fillPartitionStart(), anonymous_namespace{WindowContext.cpp}::index_to_partition_end(), and InValuesBitmap::InValuesBitmap().

370  {
371  uint64_t bitmap_idx = val - min_val;
372  if (1 < bucket_size) {
373  bitmap_idx /= static_cast<uint64_t>(bucket_size);
374  }
375  reinterpret_cast<int8_t*>(*agg)[bitmap_idx >> 3] |= (1 << (bitmap_idx & 7));
376 }

+ Here is the caller graph for this function:

RUNTIME_EXPORT void agg_max ( int64_t *  agg,
const int64_t  val 
)

Definition at line 1140 of file RuntimeFunctions.cpp.

1140  {
1141  *agg = std::max(*agg, val);
1142 }
RUNTIME_EXPORT void agg_max_double ( int64_t *  agg,
const double  val 
)

Definition at line 1436 of file RuntimeFunctions.cpp.

1437  {
1438  const auto r = std::max(*reinterpret_cast<const double*>(agg), val);
1439  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
1440 }
RUNTIME_EXPORT void agg_max_double_skip_val ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

RUNTIME_EXPORT void agg_max_float ( int32_t *  agg,
const float  val 
)

Definition at line 1489 of file RuntimeFunctions.cpp.

1490  {
1491  const auto r = std::max(*reinterpret_cast<const float*>(agg), val);
1492  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
1493 }
RUNTIME_EXPORT void agg_max_float_skip_val ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

RUNTIME_EXPORT void agg_max_int16 ( int16_t *  agg,
const int16_t  val 
)
RUNTIME_EXPORT void agg_max_int16_skip_val ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)
RUNTIME_EXPORT void agg_max_int32 ( int32_t *  agg,
const int32_t  val 
)
RUNTIME_EXPORT void agg_max_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)
RUNTIME_EXPORT void agg_max_int8 ( int8_t *  agg,
const int8_t  val 
)
RUNTIME_EXPORT void agg_max_int8_skip_val ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)
RUNTIME_EXPORT void agg_max_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

RUNTIME_EXPORT void agg_min ( int64_t *  agg,
const int64_t  val 
)

Definition at line 1144 of file RuntimeFunctions.cpp.

1144  {
1145  *agg = std::min(*agg, val);
1146 }
RUNTIME_EXPORT void agg_min_double ( int64_t *  agg,
const double  val 
)

Definition at line 1442 of file RuntimeFunctions.cpp.

1443  {
1444  const auto r = std::min(*reinterpret_cast<const double*>(agg), val);
1445  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
1446 }
RUNTIME_EXPORT void agg_min_double_skip_val ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

RUNTIME_EXPORT void agg_min_float ( int32_t *  agg,
const float  val 
)

Definition at line 1495 of file RuntimeFunctions.cpp.

1496  {
1497  const auto r = std::min(*reinterpret_cast<const float*>(agg), val);
1498  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
1499 }
RUNTIME_EXPORT void agg_min_float_skip_val ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

RUNTIME_EXPORT void agg_min_int16 ( int16_t *  agg,
const int16_t  val 
)
RUNTIME_EXPORT void agg_min_int16_skip_val ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)
RUNTIME_EXPORT void agg_min_int32 ( int32_t *  agg,
const int32_t  val 
)
RUNTIME_EXPORT void agg_min_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)
RUNTIME_EXPORT void agg_min_int8 ( int8_t *  agg,
const int8_t  val 
)
RUNTIME_EXPORT void agg_min_int8_skip_val ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)
RUNTIME_EXPORT void agg_min_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

RUNTIME_EXPORT int64_t agg_sum ( int64_t *  agg,
const int64_t  val 
)

Definition at line 1128 of file RuntimeFunctions.cpp.

Referenced by agg_sum_if(), and agg_sum_skip_val().

1128  {
1129  const auto old = *agg;
1130  *agg += val;
1131  return old;
1132 }

+ Here is the caller graph for this function:

RUNTIME_EXPORT void agg_sum_double ( int64_t *  agg,
const double  val 
)

Definition at line 1422 of file RuntimeFunctions.cpp.

Referenced by agg_sum_if_double().

1423  {
1424  const auto r = *reinterpret_cast<const double*>(agg) + val;
1425  *agg = *reinterpret_cast<const int64_t*>(may_alias_ptr(&r));
1426 }

+ Here is the caller graph for this function:

RUNTIME_EXPORT void agg_sum_double_skip_val ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

RUNTIME_EXPORT void agg_sum_float ( int32_t *  agg,
const float  val 
)

Definition at line 1475 of file RuntimeFunctions.cpp.

Referenced by agg_if_sum_float().

1476  {
1477  const auto r = *reinterpret_cast<const float*>(agg) + val;
1478  *agg = *reinterpret_cast<const int32_t*>(may_alias_ptr(&r));
1479 }

+ Here is the caller graph for this function:

RUNTIME_EXPORT void agg_sum_float_skip_val ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

RUNTIME_EXPORT int64_t agg_sum_if ( int64_t *  agg,
const int64_t  val,
const int8_t  cond 
)

Definition at line 1134 of file RuntimeFunctions.cpp.

References agg_sum().

1136  {
1137  return cond ? agg_sum(agg, val) : *agg;
1138 }
RUNTIME_EXPORT ALWAYS_INLINE int64_t agg_sum(int64_t *agg, const int64_t val)

+ Here is the call graph for this function:

RUNTIME_EXPORT void agg_sum_if_double ( int64_t *  agg,
const double  val,
const int8_t  cond 
)

Definition at line 1428 of file RuntimeFunctions.cpp.

References agg_sum_double().

1430  {
1431  if (cond) {
1432  agg_sum_double(agg, val);
1433  }
1434 }
RUNTIME_EXPORT ALWAYS_INLINE void agg_sum_double(int64_t *agg, const double val)

+ Here is the call graph for this function:

RUNTIME_EXPORT void agg_sum_if_double_skip_val ( int64_t *  agg,
const double  val,
const double  skip_val,
const int8_t  cond 
)
RUNTIME_EXPORT void agg_sum_if_float ( int32_t *  agg,
const float  val,
const int8_t  cond 
)
RUNTIME_EXPORT void agg_sum_if_float_skip_val ( int32_t *  agg,
const float  val,
const float  skip_val,
const int8_t  cond 
)
RUNTIME_EXPORT int32_t agg_sum_if_int32 ( int32_t *  agg,
const int32_t  val,
const int8_t  cond 
)

Definition at line 1217 of file RuntimeFunctions.cpp.

References agg_sum_int32().

1219  {
1220  return cond ? agg_sum_int32(agg, val) : *agg;
1221 }
RUNTIME_EXPORT ALWAYS_INLINE int32_t agg_sum_int32(int32_t *agg, const int32_t val)

+ Here is the call graph for this function:

RUNTIME_EXPORT int32_t agg_sum_if_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val,
const int8_t  cond 
)

Definition at line 1327 of file RuntimeFunctions.cpp.

References agg_sum_int32_skip_val().

1330  {
1331  return cond ? agg_sum_int32_skip_val(agg, val, skip_val) : *agg;
1332 }
RUNTIME_EXPORT ALWAYS_INLINE int32_t agg_sum_int32_skip_val(int32_t *agg, const int32_t val, const int32_t skip_val)

+ Here is the call graph for this function:

RUNTIME_EXPORT int64_t agg_sum_if_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val,
const int8_t  cond 
)

Definition at line 1319 of file RuntimeFunctions.cpp.

References agg_sum_skip_val().

1322  {
1323  return cond ? agg_sum_skip_val(agg, val, skip_val) : *agg;
1324 }
RUNTIME_EXPORT ALWAYS_INLINE int64_t agg_sum_skip_val(int64_t *agg, const int64_t val, const int64_t skip_val)

+ Here is the call graph for this function:

RUNTIME_EXPORT int32_t agg_sum_int32 ( int32_t *  agg,
const int32_t  val 
)

Definition at line 1210 of file RuntimeFunctions.cpp.

Referenced by agg_sum_if_int32(), and agg_sum_int32_skip_val().

1211  {
1212  const auto old = *agg;
1213  *agg += val;
1214  return old;
1215 }

+ Here is the caller graph for this function:

RUNTIME_EXPORT int32_t agg_sum_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 1306 of file RuntimeFunctions.cpp.

References agg_sum_int32().

Referenced by agg_sum_if_int32_skip_val().

1306  {
1307  const auto old = *agg;
1308  if (val != skip_val) {
1309  if (old != skip_val) {
1310  return agg_sum_int32(agg, val);
1311  } else {
1312  *agg = val;
1313  }
1314  }
1315  return old;
1316 }
RUNTIME_EXPORT ALWAYS_INLINE int32_t agg_sum_int32(int32_t *agg, const int32_t val)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT int64_t agg_sum_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 1291 of file RuntimeFunctions.cpp.

References agg_sum().

Referenced by agg_sum_if_skip_val(), and Executor::reduceResults().

1293  {
1294  const auto old = *agg;
1295  if (val != skip_val) {
1296  if (old != skip_val) {
1297  return agg_sum(agg, val);
1298  } else {
1299  *agg = val;
1300  }
1301  }
1302  return old;
1303 }
RUNTIME_EXPORT ALWAYS_INLINE int64_t agg_sum(int64_t *agg, const int64_t val)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool RUNTIME_EXPORT check_interrupt ( )

Definition at line 159 of file cuda_mapd_rt.cu.

References check_interrupt_init(), INT_CHECK, and runtime_interrupt_flag.

Referenced by check_interrupt_rt(), and ColumnFetcher::linearizeFixedLenArrayColFrags().

159  {
160  return (runtime_interrupt_flag == 1) ? true : false;
161 }
__device__ int32_t runtime_interrupt_flag
Definition: cuda_mapd_rt.cu:95

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool RUNTIME_EXPORT check_interrupt_init ( unsigned  command)

Definition at line 2523 of file RuntimeFunctions.cpp.

References INT_ABORT, INT_CHECK, INT_RESET, and runtime_interrupt_flag.

Referenced by check_interrupt(), Executor::interrupt(), and Executor::resetInterrupt().

2523  {
2524  static std::atomic_bool runtime_interrupt_flag{false};
2525 
2526  if (command == static_cast<unsigned>(INT_CHECK)) {
2527  if (runtime_interrupt_flag.load()) {
2528  return true;
2529  }
2530  return false;
2531  }
2532  if (command == static_cast<unsigned>(INT_ABORT)) {
2533  runtime_interrupt_flag.store(true);
2534  return false;
2535  }
2536  if (command == static_cast<unsigned>(INT_RESET)) {
2537  runtime_interrupt_flag.store(false);
2538  return false;
2539  }
2540  return false;
2541 }
__device__ int32_t runtime_interrupt_flag
Definition: cuda_mapd_rt.cu:95

+ Here is the caller graph for this function:

RUNTIME_EXPORT int fill_hashtable_for_semi_join ( size_t  idx,
int32_t *  entry_ptr,
const int32_t  invalid_slot_val 
)

Definition at line 54 of file JoinHashImpl.h.

References insert_key_cas.

Referenced by fill_hash_join_buff(), fill_hash_join_buff_bitwise_eq(), fill_hash_join_buff_bucketized(), fill_hash_join_buff_sharded(), and fill_hash_join_buff_sharded_bucketized().

57  {
58  // just mark the existence of value to the corresponding hash slot
59  // regardless of hashtable collision
60  insert_key_cas(entry_ptr, invalid_slot_val, idx);
61  return 0;
62 }
#define insert_key_cas(address, compare, val)
Definition: JoinHashImpl.h:40

+ Here is the caller graph for this function:

RUNTIME_EXPORT int fill_one_to_one_hashtable ( size_t  idx,
int32_t *  entry_ptr,
const int32_t  invalid_slot_val 
)

Definition at line 44 of file JoinHashImpl.h.

References insert_key_cas.

Referenced by fill_hash_join_buff(), fill_hash_join_buff_bitwise_eq(), fill_hash_join_buff_bucketized(), fill_hash_join_buff_sharded(), and fill_hash_join_buff_sharded_bucketized().

47  {
48  if (insert_key_cas(entry_ptr, invalid_slot_val, idx) != invalid_slot_val) {
49  return -1;
50  }
51  return 0;
52 }
#define insert_key_cas(address, compare, val)
Definition: JoinHashImpl.h:40

+ Here is the caller graph for this function:

DEVICE NEVER_INLINE int64_t SUFFIX() fixed_width_date_encode_noinline ( const int64_t  cur_col_val,
const int32_t  null_val,
const int64_t  ret_null_val 
)

Definition at line 173 of file DecodersImpl.h.

References fixed_width_date_encode(), and SUFFIX.

175  {
176  return SUFFIX(fixed_width_date_encode)(cur_col_val, ret_null_val, null_val);
177 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE int64_t SUFFIX() fixed_width_date_encode(const int64_t cur_col_val, const int32_t ret_null_val, const int64_t null_val)
Definition: DecodersImpl.h:159

+ Here is the call graph for this function:

RUNTIME_EXPORT double fixed_width_double_decode_noinline ( const int8_t *  byte_stream,
const int64_t  pos 
)

Definition at line 134 of file DecodersImpl.h.

References fixed_width_double_decode(), and SUFFIX.

Referenced by compute_bucket_sizes_impl(), JoinColumnIterator::getElementSwitch(), result_set::lazy_decode(), BoundingBoxIntersectKeyHandler::operator()(), and RangeKeyHandler::operator()().

134  {
135  return SUFFIX(fixed_width_double_decode)(byte_stream, pos);
136 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE double SUFFIX() fixed_width_double_decode(const int8_t *byte_stream, const int64_t pos)
Definition: DecodersImpl.h:126

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT float fixed_width_float_decode_noinline ( const int8_t *  byte_stream,
const int64_t  pos 
)

Definition at line 121 of file DecodersImpl.h.

References fixed_width_float_decode(), and SUFFIX.

Referenced by result_set::lazy_decode().

121  {
122  return SUFFIX(fixed_width_float_decode)(byte_stream, pos);
123 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE float SUFFIX() fixed_width_float_decode(const int8_t *byte_stream, const int64_t pos)
Definition: DecodersImpl.h:113

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT int64_t fixed_width_int_decode_noinline ( const int8_t *  byte_stream,
const int32_t  byte_width,
const int64_t  pos 
)

Definition at line 91 of file DecodersImpl.h.

References fixed_width_int_decode(), and SUFFIX.

Referenced by JoinColumnIterator::getElementSwitch(), result_set::lazy_decode(), and RangeKeyHandler::operator()().

93  {
94  return SUFFIX(fixed_width_int_decode)(byte_stream, byte_width, pos);
95 }
DEVICE ALWAYS_INLINE int64_t SUFFIX() fixed_width_int_decode(const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
Definition: DecodersImpl.h:31
#define SUFFIX(name)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT int64_t fixed_width_small_date_decode_noinline ( const int8_t *  byte_stream,
const int32_t  byte_width,
const int32_t  null_val,
const int64_t  ret_null_val,
const int64_t  pos 
)

Definition at line 149 of file DecodersImpl.h.

References fixed_width_small_date_decode(), and SUFFIX.

Referenced by JoinColumnIterator::getElementSwitch(), and result_set::lazy_decode().

153  {
155  byte_stream, byte_width, null_val, ret_null_val, pos);
156 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE int64_t SUFFIX() fixed_width_small_date_decode(const int8_t *byte_stream, const int32_t byte_width, const int32_t null_val, const int64_t ret_null_val, const int64_t pos)
Definition: DecodersImpl.h:139

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT int64_t fixed_width_unsigned_decode_noinline ( const int8_t *  byte_stream,
const int32_t  byte_width,
const int64_t  pos 
)

Definition at line 98 of file DecodersImpl.h.

References fixed_width_unsigned_decode(), and SUFFIX.

Referenced by JoinColumnIterator::getElementSwitch(), and result_set::lazy_decode().

100  {
101  return SUFFIX(fixed_width_unsigned_decode)(byte_stream, byte_width, pos);
102 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE int64_t SUFFIX() fixed_width_unsigned_decode(const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
Definition: DecodersImpl.h:61

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT int32_t* get_bucketized_hash_slot ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  translated_null_val,
const int64_t  bucket_normalization = 1 
)

Definition at line 66 of file JoinHashImpl.h.

Referenced by bucketized_hash_join_idx(), count_matches_bucketized(), fill_hash_join_buff_bucketized(), and fill_row_ids_bucketized().

71  {
72  auto hash_slot = key / bucket_normalization - min_key + (key == translated_null_val);
73  return buff + hash_slot;
74 }

+ Here is the caller graph for this function:

RUNTIME_EXPORT int32_t* get_bucketized_hash_slot_sharded ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  translated_null_val,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  bucket_normalization 
)

Definition at line 90 of file JoinHashImpl.h.

References SHARD_FOR_KEY.

Referenced by fill_row_ids_sharded_bucketized().

98  {
99  const uint32_t shard = SHARD_FOR_KEY(key, num_shards);
100  const uint32_t shard_buffer_index =
101  shard / device_count; // shard sub-buffer index within `buff`
102  int32_t* shard_buffer = buff + shard_buffer_index * entry_count_per_shard;
103  auto hash_slot = ((key / bucket_normalization) - min_key) / num_shards +
104  (key == translated_null_val);
105  return shard_buffer + hash_slot;
106 }
#define SHARD_FOR_KEY(key, num_shards)
Definition: shard_key.h:20

+ Here is the caller graph for this function:

RUNTIME_EXPORT int32_t* get_bucketized_hash_slot_sharded_opt ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  translated_null_val,
const uint32_t  entry_count_per_shard,
const uint32_t  shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  bucket_normalization 
)

Definition at line 122 of file JoinHashImpl.h.

Referenced by fill_hash_join_buff_sharded_bucketized().

131  {
132  const uint32_t shard_buffer_index =
133  shard / device_count; // shard sub-buffer index within `buff`
134  int32_t* shard_buffer = buff + shard_buffer_index * entry_count_per_shard;
135  int64_t hash_slot = ((key / bucket_normalization) - min_key) / num_shards +
136  (key == translated_null_val);
137  return shard_buffer + hash_slot;
138 }

+ Here is the caller graph for this function:

RUNTIME_EXPORT uint32_t get_columnar_group_bin_offset ( int64_t *  key_base_ptr,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket 
)

Definition at line 228 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

231  {
232  int64_t off = key - min_key;
233  if (bucket) {
234  off /= bucket;
235  }
236  if (key_base_ptr[off] == EMPTY_KEY_64) {
237  key_base_ptr[off] = key;
238  }
239  return off;
240 }
#define EMPTY_KEY_64
template<typename T = int64_t>
T get_empty_key ( )
inline

Definition at line 334 of file RuntimeFunctions.h.

References EMPTY_KEY_64.

334  {
335  static_assert(std::is_same<T, int64_t>::value,
336  "Unsupported template parameter other than int64_t for now");
337  return EMPTY_KEY_64;
338 }
#define EMPTY_KEY_64
template<>
int32_t get_empty_key ( )
inline

Definition at line 341 of file RuntimeFunctions.h.

References EMPTY_KEY_32.

341  {
342  return EMPTY_KEY_32;
343 }
#define EMPTY_KEY_32
RUNTIME_EXPORT int64_t* get_group_value ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad 
)

Definition at line 25 of file GroupByRuntime.cpp.

References get_matching_group_value(), and key_hash().

Referenced by ResultSetStorage::moveOneEntryToBuffer().

31  {
32  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
33  int64_t* matching_group = get_matching_group_value(
34  groups_buffer, h, key, key_count, key_width, row_size_quad);
35  if (matching_group) {
36  return matching_group;
37  }
38  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
39  while (h_probe != h) {
40  matching_group = get_matching_group_value(
41  groups_buffer, h_probe, key, key_count, key_width, row_size_quad);
42  if (matching_group) {
43  return matching_group;
44  }
45  h_probe = (h_probe + 1) % groups_buffer_entry_count;
46  }
47  return NULL;
48 }
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
RUNTIME_EXPORT ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT int64_t* get_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 139 of file GroupByRuntime.cpp.

References get_matching_group_value_columnar(), and key_hash().

Referenced by ResultSetStorage::moveOneEntryToBuffer().

143  {
144  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
145  int64_t* matching_group = get_matching_group_value_columnar(
146  groups_buffer, h, key, key_qw_count, groups_buffer_entry_count);
147  if (matching_group) {
148  return matching_group;
149  }
150  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
151  while (h_probe != h) {
152  matching_group = get_matching_group_value_columnar(
153  groups_buffer, h_probe, key, key_qw_count, groups_buffer_entry_count);
154  if (matching_group) {
155  return matching_group;
156  }
157  h_probe = (h_probe + 1) % groups_buffer_entry_count;
158  }
159  return NULL;
160 }
__device__ int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
RUNTIME_EXPORT ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT int64_t* get_group_value_columnar_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 163 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value_columnar(), and key_hash().

166  {
167  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
168  int64_t* matching_group = get_matching_group_value_columnar(
169  groups_buffer, h, key, key_qw_count, groups_buffer_entry_count);
170  if (matching_group) {
171  return matching_group;
172  }
173  uint32_t watchdog_countdown = 100;
174  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
175  while (h_probe != h) {
176  matching_group = get_matching_group_value_columnar(
177  groups_buffer, h_probe, key, key_qw_count, groups_buffer_entry_count);
178  if (matching_group) {
179  return matching_group;
180  }
181  h_probe = (h_probe + 1) % groups_buffer_entry_count;
182  if (--watchdog_countdown == 0) {
183  if (dynamic_watchdog()) {
184  return NULL;
185  }
186  watchdog_countdown = 100;
187  }
188  }
189  return NULL;
190 }
__device__ bool dynamic_watchdog()
__device__ int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
RUNTIME_EXPORT ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)

+ Here is the call graph for this function:

RUNTIME_EXPORT int64_t* get_group_value_fast ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 192 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

197  {
198  int64_t key_diff = key - min_key;
199  if (bucket) {
200  key_diff /= bucket;
201  }
202  int64_t off = key_diff * row_size_quad;
203  if (groups_buffer[off] == EMPTY_KEY_64) {
204  groups_buffer[off] = key;
205  }
206  return groups_buffer + off + 1;
207 }
#define EMPTY_KEY_64
RUNTIME_EXPORT int64_t* get_group_value_fast_with_original_key ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  orig_key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 210 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

215  {
216  int64_t key_diff = key - min_key;
217  if (bucket) {
218  key_diff /= bucket;
219  }
220  int64_t off = key_diff * row_size_quad;
221  if (groups_buffer[off] == EMPTY_KEY_64) {
222  groups_buffer[off] = orig_key;
223  }
224  return groups_buffer + off + 1;
225 }
#define EMPTY_KEY_64
RUNTIME_EXPORT int64_t* get_group_value_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad 
)

Definition at line 52 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value(), and key_hash().

58  {
59  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
60  int64_t* matching_group = get_matching_group_value(
61  groups_buffer, h, key, key_count, key_width, row_size_quad);
62  if (matching_group) {
63  return matching_group;
64  }
65  uint32_t watchdog_countdown = 100;
66  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
67  while (h_probe != h) {
68  matching_group = get_matching_group_value(
69  groups_buffer, h_probe, key, key_count, key_width, row_size_quad);
70  if (matching_group) {
71  return matching_group;
72  }
73  h_probe = (h_probe + 1) % groups_buffer_entry_count;
74  if (--watchdog_countdown == 0) {
75  if (dynamic_watchdog()) {
76  return NULL;
77  }
78  watchdog_countdown = 100;
79  }
80  }
81  return NULL;
82 }
__device__ bool dynamic_watchdog()
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
RUNTIME_EXPORT ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)

+ Here is the call graph for this function:

RUNTIME_EXPORT int32_t* get_hash_slot ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key 
)

Definition at line 76 of file JoinHashImpl.h.

Referenced by count_matches(), fill_hash_join_buff(), fill_row_ids(), and hash_join_idx().

78  {
79  return buff + (key - min_key);
80 }

+ Here is the caller graph for this function:

RUNTIME_EXPORT int32_t* get_hash_slot_bitwise_eq ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  translated_null_val 
)

Definition at line 82 of file JoinHashImpl.h.

Referenced by fill_hash_join_buff_bitwise_eq().

86  {
87  return buff + (key - min_key) + (key == translated_null_val);
88 }

+ Here is the caller graph for this function:

RUNTIME_EXPORT int32_t* get_hash_slot_sharded ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count 
)

Definition at line 108 of file JoinHashImpl.h.

References SHARD_FOR_KEY.

Referenced by count_matches_sharded(), fill_row_ids_sharded(), and hash_join_idx_sharded().

114  {
115  const uint32_t shard = SHARD_FOR_KEY(key, num_shards);
116  const uint32_t shard_buffer_index =
117  shard / device_count; // shard sub-buffer index within `buff`
118  int32_t* shard_buffer = buff + shard_buffer_index * entry_count_per_shard;
119  return shard_buffer + (key - min_key) / num_shards;
120 }
#define SHARD_FOR_KEY(key, num_shards)
Definition: shard_key.h:20

+ Here is the caller graph for this function:

RUNTIME_EXPORT int32_t* get_hash_slot_sharded_opt ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const uint32_t  entry_count_per_shard,
const uint32_t  shard,
const uint32_t  num_shards,
const uint32_t  device_count 
)

Definition at line 140 of file JoinHashImpl.h.

Referenced by fill_hash_join_buff_sharded().

147  {
148  const uint32_t shard_buffer_index =
149  shard / device_count; // shard sub-buffer index within `buff`
150  int32_t* shard_buffer = buff + shard_buffer_index * entry_count_per_shard;
151  return shard_buffer + (key - min_key) / num_shards;
152 }

+ Here is the caller graph for this function:

RUNTIME_EXPORT int64_t* get_matching_group_value_perfect_hash ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_qw_count,
const uint32_t  row_size_quad 
)

Definition at line 2053 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

2058  {
2059  uint32_t off = hashed_index * row_size_quad;
2060  if (groups_buffer[off] == EMPTY_KEY_64) {
2061  for (uint32_t i = 0; i < key_count; ++i) {
2062  groups_buffer[off + i] = key[i];
2063  }
2064  }
2065  return groups_buffer + off + key_count;
2066 }
#define EMPTY_KEY_64
RUNTIME_EXPORT int64_t* get_matching_group_value_perfect_hash_keyless ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const uint32_t  row_size_quad 
)

For a particular hashed index (only used with multi-column perfect hash group by) it returns the row-wise offset of the group in the output buffer. Since it is intended for keyless hash use, it assumes there is no group columns prepending the output buffer.

Definition at line 2075 of file RuntimeFunctions.cpp.

2077  {
2078  return groups_buffer + row_size_quad * hashed_index;
2079 }
RUNTIME_EXPORT uint32_t key_hash ( const int64_t *  key,
const uint32_t  key_qw_count,
const uint32_t  key_byte_width 
)

Definition at line 21 of file GroupByRuntime.cpp.

References MurmurHash3().

Referenced by get_group_value(), get_group_value_columnar(), anonymous_namespace{ResultSetReduction.cpp}::get_group_value_columnar_reduction(), get_group_value_columnar_slot(), get_group_value_columnar_slot_with_watchdog(), get_group_value_columnar_with_watchdog(), result_set::get_group_value_reduction(), and get_group_value_with_watchdog().

21  {
22  return MurmurHash3(key, key_byte_width * key_count, 0);
23 }
RUNTIME_EXPORT NEVER_INLINE DEVICE uint32_t MurmurHash3(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:33

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT void linear_probabilistic_count ( uint8_t *  bitmap,
const uint32_t  bitmap_bytes,
const uint8_t *  key_bytes,
const uint32_t  key_len 
)

Definition at line 1293 of file cuda_mapd_rt.cu.

References MurmurHash3().

1296  {
1297  const uint32_t bit_pos = MurmurHash3(key_bytes, key_len, 0) % (bitmap_bytes * 8);
1298  const uint32_t word_idx = bit_pos / 32;
1299  const uint32_t bit_idx = bit_pos % 32;
1300  atomicOr(((uint32_t*)bitmap) + word_idx, 1 << bit_idx);
1301 }
RUNTIME_EXPORT NEVER_INLINE DEVICE uint32_t MurmurHash3(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:33

+ Here is the call graph for this function: