OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ResultSetSortImpl.h File Reference
+ Include dependency graph for ResultSetSortImpl.h:
+ This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  PodOrderEntry
 
struct  GroupByBufferLayoutInfo
 

Namespaces

 Data_Namespace
 

Functions

template<class K >
std::vector< uint32_t > baseline_sort (const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr *data_mgr, const int8_t *groupby_buffer, const PodOrderEntry &oe, const GroupByBufferLayoutInfo &layout, const size_t top_n, const size_t start, const size_t step)
 

Function Documentation

template<class K >
std::vector<uint32_t> baseline_sort ( const ExecutorDeviceType  device_type,
const int  device_id,
Data_Namespace::DataMgr data_mgr,
const int8_t *  groupby_buffer,
const PodOrderEntry oe,
const GroupByBufferLayoutInfo layout,
const size_t  top_n,
const size_t  start,
const size_t  step 
)

Definition at line 353 of file ResultSetSortImpl.cu.

References CHECK, CPU, get_compact_type(), anonymous_namespace{ResultSetSortImpl.cu}::get_device_copy_ptr(), GPU, PodOrderEntry::is_desc, kAVG, and PodOrderEntry::nulls_first.

361  {
362  auto oe_col_buffer = collect_order_entry_column<K>(groupby_buffer, layout, start, step);
363  const auto& entry_ti = get_compact_type(layout.oe_target_info);
364  CHECK(entry_ti.is_number());
365  if (entry_ti.is_fp() || layout.oe_target_info.agg_kind == kAVG) {
366  return baseline_sort_fp<K>(device_type,
367  device_id,
368  data_mgr,
369  groupby_buffer,
370  oe_col_buffer,
371  oe,
372  layout,
373  top_n,
374  start,
375  step);
376  }
377  // Because of how we represent nulls for integral types, they'd be at the
378  // wrong position in these two cases. Separate them into a different buffer.
379  if ((oe.is_desc && oe.nulls_first) || (!oe.is_desc && !oe.nulls_first)) {
380  return baseline_sort_int<K>(device_type,
381  device_id,
382  data_mgr,
383  groupby_buffer,
384  oe_col_buffer,
385  oe,
386  layout,
387  top_n,
388  start,
389  step);
390  }
391  ThrustAllocator thrust_allocator(data_mgr, device_id);
392  // Fastest path, no need to separate nulls away since they'll end up at the
393  // right place as a side effect of how we're representing nulls.
394  if (device_type == ExecutorDeviceType::GPU) {
395  if (oe_col_buffer.empty()) {
396  return {};
397  }
398  const auto dev_idx_buff =
399  get_device_ptr<uint32_t>(oe_col_buffer.size(), thrust_allocator);
400  thrust::sequence(dev_idx_buff, dev_idx_buff + oe_col_buffer.size(), start, step);
401  const auto dev_oe_col_buffer = get_device_copy_ptr(oe_col_buffer, thrust_allocator);
402  return do_radix_sort<K>(device_type,
403  device_id,
404  thrust_allocator,
405  groupby_buffer,
406  dev_oe_col_buffer,
407  dev_oe_col_buffer + oe_col_buffer.size(),
408  dev_idx_buff,
409  oe_col_buffer.size(),
410  oe,
411  layout,
412  top_n);
413  }
414  CHECK(device_type == ExecutorDeviceType::CPU);
415  thrust::host_vector<uint32_t> host_idx_buff(oe_col_buffer.size());
416  thrust::sequence(host_idx_buff.begin(), host_idx_buff.end(), start, step);
417  return do_radix_sort<K>(device_type,
418  device_id,
419  thrust_allocator,
420  groupby_buffer,
421  oe_col_buffer.begin(),
422  oe_col_buffer.end(),
423  host_idx_buff.begin(),
424  host_idx_buff.size(),
425  oe,
426  layout,
427  top_n);
428 }
thrust::device_ptr< T > get_device_copy_ptr(const thrust::host_vector< T > &host_vec, ThrustAllocator &thrust_allocator)
bool nulls_first
const SQLTypeInfo get_compact_type(const TargetInfo &target)
bool is_desc
#define CHECK(condition)
Definition: Logger.h:291
Definition: sqldefs.h:77

+ Here is the call graph for this function: