OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
GroupByRuntime.cpp File Reference
+ Include dependency graph for GroupByRuntime.cpp:
+ This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define DEF_TRANSLATE_NULL_KEY(key_type)
 

Functions

RUNTIME_EXPORT ALWAYS_INLINE
DEVICE uint32_t 
key_hash (const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
 
RUNTIME_EXPORT NEVER_INLINE
DEVICE int64_t * 
get_group_value (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad)
 
RUNTIME_EXPORT NEVER_INLINE
DEVICE bool 
dynamic_watchdog ()
 
RUNTIME_EXPORT NEVER_INLINE
DEVICE int64_t * 
get_group_value_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad)
 
RUNTIME_EXPORT NEVER_INLINE
DEVICE int32_t 
get_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
RUNTIME_EXPORT NEVER_INLINE
DEVICE int32_t 
get_group_value_columnar_slot_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
RUNTIME_EXPORT NEVER_INLINE
DEVICE int64_t * 
get_group_value_columnar (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
RUNTIME_EXPORT NEVER_INLINE
DEVICE int64_t * 
get_group_value_columnar_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE int64_t * 
get_group_value_fast (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE int64_t * 
get_group_value_fast_with_original_key (int64_t *groups_buffer, const int64_t key, const int64_t orig_key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE uint32_t 
get_columnar_group_bin_offset (int64_t *key_base_ptr, const int64_t key, const int64_t min_key, const int64_t bucket)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE int64_t * 
get_scan_output_slot (int64_t *output_buffer, const uint32_t output_buffer_entry_count, const uint32_t pos, const int64_t offset_in_fragment, const uint32_t row_size_quad)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE int32_t 
get_columnar_scan_output_offset (int64_t *output_buffer, const uint32_t output_buffer_entry_count, const uint32_t pos, const int64_t offset_in_fragment)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE int64_t 
bucketized_hash_join_idx (int64_t hash_buff, int64_t const key, int64_t const min_key, int64_t const max_key, const int64_t translated_null_val, int64_t bucket_normalization)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE int64_t 
hash_join_idx (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE int64_t 
bucketized_hash_join_idx_nullable (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val, const int64_t bucket_normalization)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE int64_t 
hash_join_idx_nullable (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE int64_t 
bucketized_hash_join_idx_bitwise (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val, const int64_t translated_val, const int64_t bucket_normalization)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE int64_t 
hash_join_idx_bitwise (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val, const int64_t translated_val)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE int64_t 
hash_join_idx_sharded (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE int64_t 
hash_join_idx_sharded_nullable (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count, const int64_t null_val)
 
RUNTIME_EXPORT ALWAYS_INLINE
DEVICE int64_t 
hash_join_idx_bitwise_sharded (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count, const int64_t null_val, const int64_t translated_val)
 

Macro Definition Documentation

#define DEF_TRANSLATE_NULL_KEY (   key_type)
Value:
extern "C" RUNTIME_EXPORT NEVER_INLINE DEVICE int64_t translate_null_key_##key_type( \
const key_type key, const key_type null_val, const int64_t translated_val) { \
if (key == null_val) { \
return translated_val; \
} \
return key; \
}
#define DEVICE
#define RUNTIME_EXPORT
#define NEVER_INLINE

Definition at line 413 of file GroupByRuntime.cpp.

Function Documentation

RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx ( int64_t  hash_buff,
int64_t const  key,
int64_t const  min_key,
int64_t const  max_key,
const int64_t  translated_null_val,
int64_t  bucket_normalization 
)

Definition at line 269 of file GroupByRuntime.cpp.

References get_bucketized_hash_slot(), and SUFFIX.

Referenced by bucketized_hash_join_idx_bitwise(), and bucketized_hash_join_idx_nullable().

274  {
275  if (hash_buff && key >= min_key && key <= max_key) {
276  return *SUFFIX(get_bucketized_hash_slot)(reinterpret_cast<int32_t*>(hash_buff),
277  key,
278  min_key / bucket_normalization,
279  translated_null_val,
280  bucket_normalization);
281  }
282  return -1;
283 }
#define SUFFIX(name)
ALWAYS_INLINE DEVICE int32_t *SUFFIX() get_bucketized_hash_slot(int32_t *buff, const int64_t key, const int64_t min_key, const int64_t translated_null_val, const int64_t bucket_normalization)
Definition: JoinHashImpl.h:66

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_bitwise ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val,
const int64_t  translated_val,
const int64_t  bucket_normalization 
)

Definition at line 319 of file GroupByRuntime.cpp.

References bucketized_hash_join_idx().

325  {
326  return key != null_val
328  hash_buff, key, min_key, max_key, translated_val, bucket_normalization)
329  : bucketized_hash_join_idx(hash_buff,
330  translated_val,
331  min_key,
332  max_key,
333  translated_val,
334  bucket_normalization);
335 }
RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx(int64_t hash_buff, int64_t const key, int64_t const min_key, int64_t const max_key, const int64_t translated_null_val, int64_t bucket_normalization)

+ Here is the call graph for this function:

RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_nullable ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val,
const int64_t  bucket_normalization 
)

Definition at line 297 of file GroupByRuntime.cpp.

References bucketized_hash_join_idx().

302  {
303  return key != null_val
305  hash_buff, key, min_key, max_key, null_val, bucket_normalization)
306  : -1;
307 }
RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx(int64_t hash_buff, int64_t const key, int64_t const min_key, int64_t const max_key, const int64_t translated_null_val, int64_t bucket_normalization)

+ Here is the call graph for this function:

RUNTIME_EXPORT NEVER_INLINE DEVICE bool dynamic_watchdog ( )

Definition at line 115 of file cuda_mapd_rt.cu.

References dw_abort, dw_cycle_budget, DW_DEADLINE, dw_sm_cycle_start, dynamic_watchdog_init(), get_smid(), logger::INFO, LOG, and read_cycle_counter().

Referenced by anonymous_namespace{ResultSetReduction.cpp}::check_watchdog(), check_watchdog_rt(), anonymous_namespace{ResultSetReduction.cpp}::check_watchdog_with_seed(), get_group_value_columnar_slot_with_watchdog(), get_group_value_columnar_with_watchdog(), and get_group_value_with_watchdog().

115  {
116  // check for dynamic watchdog, if triggered all threads return true
117  if (dw_cycle_budget == 0LL) {
118  return false; // Uninitialized watchdog can't check time
119  }
120  if (dw_abort == 1) {
121  return true; // Received host request to abort
122  }
123  uint32_t smid = get_smid();
124  if (smid >= 128) {
125  return false;
126  }
127  __shared__ volatile int64_t dw_block_cycle_start; // Thread block shared cycle start
128  __shared__ volatile bool
129  dw_should_terminate; // all threads within a block should return together if
130  // watchdog criteria is met
131 
132  // thread 0 either initializes or read the initial clock cycle, the result is stored
133  // into shared memory. Since all threads wihtin a block shares the same SM, there's no
134  // point in using more threads here.
135  if (threadIdx.x == 0) {
136  dw_block_cycle_start = 0LL;
137  int64_t cycle_count = static_cast<int64_t>(clock64());
138  // Make sure the block hasn't switched SMs
139  if (smid == get_smid()) {
140  dw_block_cycle_start = static_cast<int64_t>(
141  atomicCAS(reinterpret_cast<unsigned long long*>(&dw_sm_cycle_start[smid]),
142  0ULL,
143  static_cast<unsigned long long>(cycle_count)));
144  }
145 
146  int64_t cycles = cycle_count - dw_block_cycle_start;
147  if ((smid == get_smid()) && (dw_block_cycle_start > 0LL) &&
148  (cycles > dw_cycle_budget)) {
149  // Check if we're out of time on this particular SM
150  dw_should_terminate = true;
151  } else {
152  dw_should_terminate = false;
153  }
154  }
155  __syncthreads();
156  return dw_should_terminate;
157 }
__device__ int64_t dw_sm_cycle_start[128]
Definition: cuda_mapd_rt.cu:91
__device__ int64_t dw_cycle_budget
Definition: cuda_mapd_rt.cu:93
__inline__ __device__ uint32_t get_smid(void)
Definition: cuda_mapd_rt.cu:97
__device__ int32_t dw_abort
Definition: cuda_mapd_rt.cu:94

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT ALWAYS_INLINE DEVICE uint32_t get_columnar_group_bin_offset ( int64_t *  key_base_ptr,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket 
)

Definition at line 228 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

231  {
232  int64_t off = key - min_key;
233  if (bucket) {
234  off /= bucket;
235  }
236  if (key_base_ptr[off] == EMPTY_KEY_64) {
237  key_base_ptr[off] = key;
238  }
239  return off;
240 }
#define EMPTY_KEY_64
RUNTIME_EXPORT ALWAYS_INLINE DEVICE int32_t get_columnar_scan_output_offset ( int64_t *  output_buffer,
const uint32_t  output_buffer_entry_count,
const uint32_t  pos,
const int64_t  offset_in_fragment 
)

Definition at line 257 of file GroupByRuntime.cpp.

260  {
261  if (pos < output_buffer_entry_count) {
262  output_buffer[pos] = offset_in_fragment;
263  return pos;
264  }
265  return -1;
266 }
RUNTIME_EXPORT NEVER_INLINE DEVICE int64_t* get_group_value ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad 
)

Definition at line 25 of file GroupByRuntime.cpp.

References get_matching_group_value(), and key_hash().

Referenced by ResultSetStorage::moveOneEntryToBuffer().

31  {
32  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
33  int64_t* matching_group = get_matching_group_value(
34  groups_buffer, h, key, key_count, key_width, row_size_quad);
35  if (matching_group) {
36  return matching_group;
37  }
38  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
39  while (h_probe != h) {
40  matching_group = get_matching_group_value(
41  groups_buffer, h_probe, key, key_count, key_width, row_size_quad);
42  if (matching_group) {
43  return matching_group;
44  }
45  h_probe = (h_probe + 1) % groups_buffer_entry_count;
46  }
47  return NULL;
48 }
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
RUNTIME_EXPORT ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT NEVER_INLINE DEVICE int64_t* get_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 139 of file GroupByRuntime.cpp.

References get_matching_group_value_columnar(), and key_hash().

Referenced by ResultSetStorage::moveOneEntryToBuffer().

143  {
144  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
145  int64_t* matching_group = get_matching_group_value_columnar(
146  groups_buffer, h, key, key_qw_count, groups_buffer_entry_count);
147  if (matching_group) {
148  return matching_group;
149  }
150  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
151  while (h_probe != h) {
152  matching_group = get_matching_group_value_columnar(
153  groups_buffer, h_probe, key, key_qw_count, groups_buffer_entry_count);
154  if (matching_group) {
155  return matching_group;
156  }
157  h_probe = (h_probe + 1) % groups_buffer_entry_count;
158  }
159  return NULL;
160 }
__device__ int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
RUNTIME_EXPORT ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 85 of file GroupByRuntime.cpp.

References get_matching_group_value_columnar_slot(), and key_hash().

89  {
90  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
91  int32_t matching_slot = get_matching_group_value_columnar_slot(
92  groups_buffer, groups_buffer_entry_count, h, key, key_count, key_width);
93  if (matching_slot != -1) {
94  return h;
95  }
96  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
97  while (h_probe != h) {
99  groups_buffer, groups_buffer_entry_count, h_probe, key, key_count, key_width);
100  if (matching_slot != -1) {
101  return h_probe;
102  }
103  h_probe = (h_probe + 1) % groups_buffer_entry_count;
104  }
105  return -1;
106 }
RUNTIME_EXPORT ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
__device__ int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)

+ Here is the call graph for this function:

RUNTIME_EXPORT NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 109 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value_columnar_slot(), and key_hash().

113  {
114  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
115  int32_t matching_slot = get_matching_group_value_columnar_slot(
116  groups_buffer, groups_buffer_entry_count, h, key, key_count, key_width);
117  if (matching_slot != -1) {
118  return h;
119  }
120  uint32_t watchdog_countdown = 100;
121  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
122  while (h_probe != h) {
124  groups_buffer, groups_buffer_entry_count, h_probe, key, key_count, key_width);
125  if (matching_slot != -1) {
126  return h_probe;
127  }
128  h_probe = (h_probe + 1) % groups_buffer_entry_count;
129  if (--watchdog_countdown == 0) {
130  if (dynamic_watchdog()) {
131  return -1;
132  }
133  watchdog_countdown = 100;
134  }
135  }
136  return -1;
137 }
__device__ bool dynamic_watchdog()
RUNTIME_EXPORT ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
__device__ int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)

+ Here is the call graph for this function:

RUNTIME_EXPORT NEVER_INLINE DEVICE int64_t* get_group_value_columnar_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 163 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value_columnar(), and key_hash().

166  {
167  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
168  int64_t* matching_group = get_matching_group_value_columnar(
169  groups_buffer, h, key, key_qw_count, groups_buffer_entry_count);
170  if (matching_group) {
171  return matching_group;
172  }
173  uint32_t watchdog_countdown = 100;
174  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
175  while (h_probe != h) {
176  matching_group = get_matching_group_value_columnar(
177  groups_buffer, h_probe, key, key_qw_count, groups_buffer_entry_count);
178  if (matching_group) {
179  return matching_group;
180  }
181  h_probe = (h_probe + 1) % groups_buffer_entry_count;
182  if (--watchdog_countdown == 0) {
183  if (dynamic_watchdog()) {
184  return NULL;
185  }
186  watchdog_countdown = 100;
187  }
188  }
189  return NULL;
190 }
__device__ bool dynamic_watchdog()
__device__ int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
RUNTIME_EXPORT ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)

+ Here is the call graph for this function:

RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t* get_group_value_fast ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 192 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

197  {
198  int64_t key_diff = key - min_key;
199  if (bucket) {
200  key_diff /= bucket;
201  }
202  int64_t off = key_diff * row_size_quad;
203  if (groups_buffer[off] == EMPTY_KEY_64) {
204  groups_buffer[off] = key;
205  }
206  return groups_buffer + off + 1;
207 }
#define EMPTY_KEY_64
RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t* get_group_value_fast_with_original_key ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  orig_key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 210 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

215  {
216  int64_t key_diff = key - min_key;
217  if (bucket) {
218  key_diff /= bucket;
219  }
220  int64_t off = key_diff * row_size_quad;
221  if (groups_buffer[off] == EMPTY_KEY_64) {
222  groups_buffer[off] = orig_key;
223  }
224  return groups_buffer + off + 1;
225 }
#define EMPTY_KEY_64
RUNTIME_EXPORT NEVER_INLINE DEVICE int64_t* get_group_value_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad 
)

Definition at line 52 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value(), and key_hash().

58  {
59  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
60  int64_t* matching_group = get_matching_group_value(
61  groups_buffer, h, key, key_count, key_width, row_size_quad);
62  if (matching_group) {
63  return matching_group;
64  }
65  uint32_t watchdog_countdown = 100;
66  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
67  while (h_probe != h) {
68  matching_group = get_matching_group_value(
69  groups_buffer, h_probe, key, key_count, key_width, row_size_quad);
70  if (matching_group) {
71  return matching_group;
72  }
73  h_probe = (h_probe + 1) % groups_buffer_entry_count;
74  if (--watchdog_countdown == 0) {
75  if (dynamic_watchdog()) {
76  return NULL;
77  }
78  watchdog_countdown = 100;
79  }
80  }
81  return NULL;
82 }
__device__ bool dynamic_watchdog()
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
RUNTIME_EXPORT ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)

+ Here is the call graph for this function:

RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t* get_scan_output_slot ( int64_t *  output_buffer,
const uint32_t  output_buffer_entry_count,
const uint32_t  pos,
const int64_t  offset_in_fragment,
const uint32_t  row_size_quad 
)

Definition at line 242 of file GroupByRuntime.cpp.

Referenced by SqliteMemDatabase::runSelect().

247  {
248  uint64_t off = static_cast<uint64_t>(pos) * static_cast<uint64_t>(row_size_quad);
249  if (pos < output_buffer_entry_count) {
250  output_buffer[off] = offset_in_fragment;
251  return output_buffer + off + 1;
252  }
253  return NULL;
254 }

+ Here is the caller graph for this function:

RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t hash_join_idx ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key 
)

Definition at line 286 of file GroupByRuntime.cpp.

References get_hash_slot(), and SUFFIX.

Referenced by hash_join_idx_bitwise(), and hash_join_idx_nullable().

289  {
290  if (key >= min_key && key <= max_key) {
291  return *SUFFIX(get_hash_slot)(reinterpret_cast<int32_t*>(hash_buff), key, min_key);
292  }
293  return -1;
294 }
#define SUFFIX(name)
ALWAYS_INLINE DEVICE int32_t *SUFFIX() get_hash_slot(int32_t *buff, const int64_t key, const int64_t min_key)
Definition: JoinHashImpl.h:76

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val,
const int64_t  translated_val 
)

Definition at line 338 of file GroupByRuntime.cpp.

References hash_join_idx().

343  {
344  return key != null_val
345  ? hash_join_idx(hash_buff, key, min_key, max_key)
346  : hash_join_idx(hash_buff, translated_val, min_key, translated_val);
347 }
RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t hash_join_idx(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key)

+ Here is the call graph for this function:

RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise_sharded ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  null_val,
const int64_t  translated_val 
)

Definition at line 388 of file GroupByRuntime.cpp.

References hash_join_idx_sharded().

396  {
397  return key != null_val ? hash_join_idx_sharded(hash_buff,
398  key,
399  min_key,
400  max_key,
401  entry_count_per_shard,
402  num_shards,
403  device_count)
404  : hash_join_idx_sharded(hash_buff,
405  translated_val,
406  min_key,
407  translated_val,
408  entry_count_per_shard,
409  num_shards,
410  device_count);
411 }
RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)

+ Here is the call graph for this function:

RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t hash_join_idx_nullable ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val 
)

Definition at line 310 of file GroupByRuntime.cpp.

References hash_join_idx().

314  {
315  return key != null_val ? hash_join_idx(hash_buff, key, min_key, max_key) : -1;
316 }
RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t hash_join_idx(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key)

+ Here is the call graph for this function:

RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count 
)

Definition at line 350 of file GroupByRuntime.cpp.

References get_hash_slot_sharded(), and SUFFIX.

Referenced by hash_join_idx_bitwise_sharded(), and hash_join_idx_sharded_nullable().

356  {
357  if (hash_buff && key >= min_key && key <= max_key) {
358  return *SUFFIX(get_hash_slot_sharded)(reinterpret_cast<int32_t*>(hash_buff),
359  key,
360  min_key,
361  entry_count_per_shard,
362  num_shards,
363  device_count);
364  }
365  return -1;
366 }
#define SUFFIX(name)
ALWAYS_INLINE DEVICE int32_t *SUFFIX() get_hash_slot_sharded(int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
Definition: JoinHashImpl.h:108

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded_nullable ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  null_val 
)

Definition at line 369 of file GroupByRuntime.cpp.

References hash_join_idx_sharded().

376  {
377  return key != null_val ? hash_join_idx_sharded(hash_buff,
378  key,
379  min_key,
380  max_key,
381  entry_count_per_shard,
382  num_shards,
383  device_count)
384  : -1;
385 }
RUNTIME_EXPORT ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)

+ Here is the call graph for this function:

RUNTIME_EXPORT ALWAYS_INLINE DEVICE uint32_t key_hash ( const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_byte_width 
)

Definition at line 21 of file GroupByRuntime.cpp.

References MurmurHash3().

Referenced by get_group_value(), get_group_value_columnar(), anonymous_namespace{ResultSetReduction.cpp}::get_group_value_columnar_reduction(), get_group_value_columnar_slot(), get_group_value_columnar_slot_with_watchdog(), get_group_value_columnar_with_watchdog(), result_set::get_group_value_reduction(), and get_group_value_with_watchdog().

21  {
22  return MurmurHash3(key, key_byte_width * key_count, 0);
23 }
RUNTIME_EXPORT NEVER_INLINE DEVICE uint32_t MurmurHash3(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:33

+ Here is the call graph for this function:

+ Here is the caller graph for this function: