OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
RowSetMemoryOwner.h
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #pragma once
18 
19 #include <boost/noncopyable.hpp>
20 #include <deque>
21 #include <list>
22 #include <memory>
23 #include <mutex>
24 #include <string>
25 #include <unordered_map>
26 #include <vector>
27 
29 #include "DataMgr/AbstractBuffer.h"
33 #include "DataMgr/DataMgr.h"
34 #include "Logger/Logger.h"
35 #include "QueryEngine/AggMode.h"
39 #include "Shared/DbObjectKeys.h"
40 #include "Shared/quantile.h"
42 #include "StringOps/StringOps.h"
43 
44 extern bool g_allow_memory_status_log;
46 
47 namespace Catalog_Namespace {
48 class Catalog;
49 }
50 
51 class ResultSet;
52 
57 class RowSetMemoryOwner final : public SimpleAllocator, boost::noncopyable {
58  public:
59  RowSetMemoryOwner(const size_t arena_block_size, const size_t executor_id)
60  : arena_block_size_(arena_block_size), executor_id_(executor_id) {
61  // initialize shared allocator (i.e., allocators_[0])
63  allocators_.emplace_back(std::make_unique<CpuMgrArenaAllocator>());
64  } else {
65  allocators_.emplace_back(std::make_unique<DramArena>(arena_block_size_));
66  }
68  }
69 
71 
72  void setKernelMemoryAllocator(const size_t num_kernels) {
73  CHECK_GT(num_kernels, static_cast<size_t>(0));
74  CHECK_EQ(non_owned_group_by_buffers_.size(), static_cast<size_t>(0));
75  // buffer for kernels starts with one-based indexing
76  auto const required_num_kernels = num_kernels + 1;
77  non_owned_group_by_buffers_.resize(required_num_kernels, nullptr);
78  // sometimes the same RSMO instance handles multiple work units or even multiple query
79  // steps (this means the RSMO's owner, an Executor instance, takes a responsibility to
80  // proceed them) so, if the first query step has M allocators but if the second query
81  // step requires N allocators where N > M, let's allocate M - N allocators instead of
82  // recreating M new allocators
83  if (required_num_kernels > allocators_.size()) {
84  auto const required_num_allocators = required_num_kernels - allocators_.size();
85  VLOG(1) << "Prepare " << required_num_allocators
86  << " memory allocator(s) (Executor-" << executor_id_
87  << ", # existing allocator(s): " << allocators_.size()
88  << ", # requested allocator(s): " << required_num_kernels << ")";
89  for (size_t i = 0; i < required_num_allocators; i++) {
91  allocators_.emplace_back(std::make_unique<CpuMgrArenaAllocator>());
92  } else {
93  // todo (yoonmin): can we determine better default min_block_size per query?
94  allocators_.emplace_back(std::make_unique<DramArena>(arena_block_size_));
95  }
96  }
97  }
98  CHECK_GE(allocators_.size(), required_num_kernels);
100  }
101 
102  // allocate memory via shared allocator
103  int8_t* allocate(const size_t num_bytes) override {
104  constexpr size_t thread_idx = 0u;
105  return allocate(num_bytes, thread_idx);
106  }
107 
108  // allocate memory via thread's unique allocator
109  int8_t* allocate(const size_t num_bytes, const size_t thread_idx) {
110  CHECK_LT(thread_idx, allocators_.size());
111  std::lock_guard<std::mutex> lock(state_mutex_);
112  return allocateUnlocked(num_bytes, thread_idx);
113  }
114 
115  void initCountDistinctBufferAllocator(size_t buffer_size, size_t thread_idx) {
116  std::lock_guard<std::mutex> lock(state_mutex_);
117  VLOG(2) << "Count distinct buffer allocator initialized with buffer_size: "
118  << buffer_size << ", thread_idx: " << thread_idx;
119  CHECK_LT(thread_idx, count_distinct_buffer_allocators_.size());
120  if (count_distinct_buffer_allocators_[thread_idx]) {
121  VLOG(2) << "Replacing count_distinct_buffer_allocators_[" << thread_idx << "].";
122  }
124  std::make_unique<CountDistinctBufferAllocator>(
125  allocateUnlocked(buffer_size, thread_idx), buffer_size);
126  }
127 
128  std::pair<int64_t*, bool> allocateCachedGroupByBuffer(const size_t num_bytes,
129  const size_t thread_idx) {
130  std::lock_guard<std::mutex> lock(state_mutex_);
131  CHECK_LT(thread_idx, non_owned_group_by_buffers_.size());
132  // First try cache
133  if (non_owned_group_by_buffers_[thread_idx]) { // not nullptr
134  return std::make_pair(non_owned_group_by_buffers_[thread_idx], true);
135  }
136  // Was not in cache so must allocate
137  auto allocator = allocators_[thread_idx].get();
139  VLOG(1) << "Try to allocate CPU memory: " << num_bytes << " bytes (THREAD-"
140  << thread_idx << ")";
141  }
142  int64_t* group_by_buffer = reinterpret_cast<int64_t*>(allocator->allocate(num_bytes));
143  CHECK(group_by_buffer);
144  // Put in cache
145  non_owned_group_by_buffers_[thread_idx] = group_by_buffer;
146  return std::make_pair(group_by_buffer, false);
147  }
148 
149  int8_t* allocateCountDistinctBuffer(const size_t num_bytes,
150  const size_t thread_idx = 0) {
151  CHECK_LT(thread_idx, count_distinct_buffer_allocators_.size());
153  int8_t* buffer = count_distinct_buffer_allocators_[thread_idx]->allocate(num_bytes);
154  std::memset(buffer, 0, num_bytes);
155  addCountDistinctBuffer(buffer, num_bytes, /*physical_buffer=*/true);
156  return buffer;
157  }
158 
159  void addCountDistinctBuffer(int8_t* count_distinct_buffer,
160  const size_t bytes,
161  const bool physical_buffer) {
162  std::lock_guard<std::mutex> lock(state_mutex_);
163  count_distinct_bitmaps_.emplace_back(
164  CountDistinctBitmapBuffer{count_distinct_buffer, bytes, physical_buffer});
165  }
166 
167  void addCountDistinctSet(CountDistinctSet* count_distinct_set) {
168  std::lock_guard<std::mutex> lock(state_mutex_);
169  count_distinct_sets_.push_back(count_distinct_set);
170  }
171 
173 
174  void addVarlenBuffer(void* varlen_buffer) {
175  std::lock_guard<std::mutex> lock(state_mutex_);
176  if (std::find(varlen_buffers_.begin(), varlen_buffers_.end(), varlen_buffer) ==
177  varlen_buffers_.end()) {
178  varlen_buffers_.push_back(varlen_buffer);
179  }
180  }
181 
188  std::lock_guard<std::mutex> lock(state_mutex_);
190  varlen_input_buffers_.push_back(buffer);
191  }
192 
193  std::string* addString(const std::string& str) {
194  std::lock_guard<std::mutex> lock(state_mutex_);
195  strings_.emplace_back(str);
196  return &strings_.back();
197  }
198 
199  std::vector<int64_t>* addArray(const std::vector<int64_t>& arr) {
200  std::lock_guard<std::mutex> lock(state_mutex_);
201  arrays_.emplace_back(arr);
202  return &arrays_.back();
203  }
204 
205  StringDictionaryProxy* addStringDict(std::shared_ptr<StringDictionary> str_dict,
206  const shared::StringDictKey& dict_key,
207  const int64_t generation) {
208  std::lock_guard<std::mutex> lock(state_mutex_);
209  auto it = str_dict_proxy_owned_.find(dict_key);
210  if (it != str_dict_proxy_owned_.end()) {
211  CHECK_EQ(it->second->getDictionary(), str_dict.get());
212  it->second->updateGeneration(generation);
213  return it->second.get();
214  }
216  .emplace(
217  dict_key,
218  std::make_shared<StringDictionaryProxy>(str_dict, dict_key, generation))
219  .first;
220  return it->second.get();
221  }
222 
224  const shared::StringDictKey& source_proxy_dict_key,
225  const std::vector<StringOps_Namespace::StringOpInfo>& string_op_infos) {
226  std::ostringstream oss;
227  oss << "{source_dict_key: " << source_proxy_dict_key
228  << " StringOps: " << string_op_infos << "}";
229  return oss.str();
230  }
231 
233  const shared::StringDictKey& source_proxy_dict_key,
234  const shared::StringDictKey& dest_proxy_dict_key,
235  const std::vector<StringOps_Namespace::StringOpInfo>& string_op_infos) {
236  std::ostringstream oss;
237  oss << "{source_dict_key: " << source_proxy_dict_key
238  << ", dest_dict_key: " << dest_proxy_dict_key << " StringOps: " << string_op_infos
239  << "}";
240  return oss.str();
241  }
242 
244  const StringDictionaryProxy* source_proxy,
245  const StringDictionaryProxy* dest_proxy,
246  const std::vector<StringOps_Namespace::StringOpInfo>& string_op_infos) {
247  std::lock_guard<std::mutex> lock(state_mutex_);
248  const auto map_key =
250  dest_proxy->getDictionary()->getDictKey(),
251  string_op_infos);
252  auto it = str_proxy_intersection_translation_maps_owned_.find(map_key);
255  .emplace(map_key,
257  dest_proxy, string_op_infos))
258  .first;
259  }
260  return &it->second;
261  }
262 
264  const StringDictionaryProxy* source_proxy,
265  const std::vector<StringOps_Namespace::StringOpInfo>& string_op_infos) {
266  const auto map_key = generate_translation_map_key(
267  source_proxy->getDictionary()->getDictKey(), string_op_infos);
268  auto it = str_proxy_numeric_translation_maps_owned_.lower_bound(map_key);
269  if (it->first != map_key) {
271  it, map_key, source_proxy->buildNumericTranslationMap(string_op_infos));
272  }
273  return &it->second;
274  }
275 
277  const StringDictionaryProxy* source_proxy,
278  StringDictionaryProxy* dest_proxy,
279  const std::vector<StringOps_Namespace::StringOpInfo>& string_op_infos) {
280  std::lock_guard<std::mutex> lock(state_mutex_);
281  const auto map_key =
283  dest_proxy->getDictionary()->getDictKey(),
284  string_op_infos);
285  auto it = str_proxy_union_translation_maps_owned_.find(map_key);
286  if (it == str_proxy_union_translation_maps_owned_.end()) {
288  .emplace(map_key,
290  dest_proxy, string_op_infos))
291  .first;
292  }
293  return &it->second;
294  }
295 
296  const StringOps_Namespace::StringOps* getStringOps(
297  const std::vector<StringOps_Namespace::StringOpInfo>& string_op_infos) {
298  std::lock_guard<std::mutex> lock(state_mutex_);
299  const auto map_key = generate_translation_map_key({}, {}, string_op_infos);
300  auto it = string_ops_owned_.find(map_key);
301  if (it == string_ops_owned_.end()) {
302  it = string_ops_owned_
303  .emplace(map_key,
304  std::make_shared<StringOps_Namespace::StringOps>(string_op_infos))
305  .first;
306  }
307  return it->second.get();
308  }
309 
311  std::lock_guard<std::mutex> lock(state_mutex_);
312  auto it = str_dict_proxy_owned_.find(dict_key);
313  CHECK(it != str_dict_proxy_owned_.end());
314  return it->second.get();
315  }
316 
318  const bool with_generation);
319 
321  std::shared_ptr<StringDictionaryProxy> lit_str_dict_proxy) {
322  std::lock_guard<std::mutex> lock(state_mutex_);
323  lit_str_dict_proxy_ = lit_str_dict_proxy;
324  }
325 
327  std::lock_guard<std::mutex> lock(state_mutex_);
328  return lit_str_dict_proxy_.get();
329  }
330 
332  const shared::StringDictKey& source_dict_id_in,
333  const shared::StringDictKey& dest_dict_id_in,
334  const bool with_generation,
335  const StringTranslationType translation_map_type,
336  const std::vector<StringOps_Namespace::StringOpInfo>& string_op_infos);
337 
340  const shared::StringDictKey& source_dict_id_in,
341  const bool with_generation,
342  const std::vector<StringOps_Namespace::StringOpInfo>& string_op_infos);
343 
344  void addColBuffer(const void* col_buffer) {
345  std::lock_guard<std::mutex> lock(state_mutex_);
346  col_buffers_.push_back(const_cast<void*>(col_buffer));
347  }
348 
350  std::ostringstream oss;
351  oss << "Destruct RowSetMemoryOwner attached to Executor-" << executor_id_ << "{\t";
352  int allocator_id = 0;
353  for (auto const& allocator : allocators_) {
354  auto const usedBytes = allocator->bytesUsed();
355  if (usedBytes > 0) {
356  oss << "allocator-" << allocator_id << ", byteUsed: " << usedBytes << "/"
357  << allocator->totalBytes() << "\t";
358  }
359  ++allocator_id;
360  }
361  oss << "}";
362  allocators_.clear();
363  VLOG(1) << oss.str();
364  for (auto count_distinct_set : count_distinct_sets_) {
365  delete count_distinct_set;
366  }
367  for (auto varlen_buffer : varlen_buffers_) {
368  free(varlen_buffer);
369  }
370  for (auto varlen_input_buffer : varlen_input_buffers_) {
371  CHECK(varlen_input_buffer);
372  varlen_input_buffer->unPin();
373  }
374  for (auto col_buffer : col_buffers_) {
375  free(col_buffer);
376  }
377  }
378 
379  std::shared_ptr<RowSetMemoryOwner> cloneStrDictDataOnly() {
380  auto rtn = std::make_shared<RowSetMemoryOwner>(arena_block_size_, executor_id_);
381  rtn->str_dict_proxy_owned_ = str_dict_proxy_owned_;
382  rtn->lit_str_dict_proxy_ = lit_str_dict_proxy_;
383  return rtn;
384  }
385 
387  string_dictionary_generations_ = generations;
388  }
389 
392  }
393 
394  quantile::TDigest* initTDigest(size_t thread_idx, ApproxQuantileDescriptor, double q);
395  void reserveTDigestMemory(size_t thread_idx, size_t capacity);
396 
397  //
398  // key/value store for table function intercommunication
399  //
400 
401  void setTableFunctionMetadata(const char* key,
402  const uint8_t* raw_data,
403  const size_t num_bytes,
404  const TableFunctionMetadataType value_type) {
405  MetadataValue metadata_value(num_bytes, value_type);
406  std::memcpy(metadata_value.first.data(), raw_data, num_bytes);
407  std::lock_guard<std::mutex> lock(table_function_metadata_store_mutex_);
408  table_function_metadata_store_[key] = std::move(metadata_value);
409  }
410 
411  void getTableFunctionMetadata(const char* key,
412  const uint8_t*& raw_data,
413  size_t& num_bytes,
414  TableFunctionMetadataType& value_type) const {
415  std::lock_guard<std::mutex> lock(table_function_metadata_store_mutex_);
416  auto const itr = table_function_metadata_store_.find(key);
417  if (itr == table_function_metadata_store_.end()) {
418  throw std::runtime_error("Failed to find Table Function Metadata with key '" +
419  std::string(key) + "'");
420  }
421  raw_data = itr->second.first.data();
422  num_bytes = itr->second.first.size();
423  value_type = itr->second.second;
424  }
425 
427  std::lock_guard<std::mutex> lock(state_mutex_);
428  return &mode_maps_.emplace_back();
429  }
430 
431  private:
432  int8_t* allocateUnlocked(const size_t num_bytes, const size_t thread_idx) {
434  VLOG(1) << "Try to allocate CPU memory: " << num_bytes << " bytes (THREAD-"
435  << thread_idx << ")";
436  }
437  auto allocator = allocators_[thread_idx].get();
438  return reinterpret_cast<int8_t*>(allocator->allocate(num_bytes));
439  }
440 
442  int8_t* ptr;
443  const size_t size;
444  const bool physical_buffer;
445  };
446 
447  std::vector<CountDistinctBitmapBuffer> count_distinct_bitmaps_;
448  std::vector<CountDistinctSet*> count_distinct_sets_;
449  std::vector<int64_t*> non_owned_group_by_buffers_;
450  std::vector<void*> varlen_buffers_;
451  std::list<std::string> strings_;
452  std::list<std::vector<int64_t>> arrays_;
453  std::unordered_map<shared::StringDictKey, std::shared_ptr<StringDictionaryProxy>>
455  std::map<std::string, StringDictionaryProxy::IdMap>
457  std::map<std::string, StringDictionaryProxy::IdMap>
459  std::map<std::string, StringDictionaryProxy::TranslationMap<Datum>>
461  std::shared_ptr<StringDictionaryProxy> lit_str_dict_proxy_;
463  std::vector<void*> col_buffers_;
464  std::vector<Data_Namespace::AbstractBuffer*> varlen_input_buffers_;
465 
467  std::deque<TDigestAllocator> t_digest_allocators_;
468  std::vector<std::unique_ptr<quantile::TDigest>> t_digests_;
469 
470  std::map<std::string, std::shared_ptr<StringOps_Namespace::StringOps>>
472  std::list<AggMode> mode_maps_;
473 
474  size_t arena_block_size_; // for cloning
475  std::vector<std::unique_ptr<Arena>> allocators_;
476 
478  std::vector<std::unique_ptr<CountDistinctBufferAllocator>>
480 
481  size_t executor_id_;
482 
483  mutable std::mutex state_mutex_;
484 
485  using MetadataValue = std::pair<std::vector<uint8_t>, TableFunctionMetadataType>;
486  std::map<std::string, MetadataValue> table_function_metadata_store_;
488 
489  friend class ResultSet;
490  friend class QueryExecutionContext;
491 };
std::shared_ptr< RowSetMemoryOwner > cloneStrDictDataOnly()
#define CHECK_EQ(x, y)
Definition: Logger.h:301
robin_hood::unordered_set< int64_t > CountDistinctSet
Definition: CountDistinct.h:35
std::vector< std::unique_ptr< Arena > > allocators_
int8_t * allocateCountDistinctBuffer(const size_t num_bytes, const size_t thread_idx=0)
std::list< std::vector< int64_t > > arrays_
void addVarlenInputBuffer(Data_Namespace::AbstractBuffer *buffer)
const shared::StringDictKey & getDictKey() const noexcept
const StringDictionaryProxy::IdMap * getOrAddStringProxyTranslationMap(const shared::StringDictKey &source_dict_id_in, const shared::StringDictKey &dest_dict_id_in, const bool with_generation, const StringTranslationType translation_map_type, const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos)
Definition: Execute.cpp:652
void addLiteralStringDictProxy(std::shared_ptr< StringDictionaryProxy > lit_str_dict_proxy)
std::map< std::string, StringDictionaryProxy::TranslationMap< Datum > > str_proxy_numeric_translation_maps_owned_
const StringDictionaryProxy::TranslationMap< Datum > * addStringProxyNumericTranslationMap(const StringDictionaryProxy *source_proxy, const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos)
std::list< std::string > strings_
Calculate approximate median and general quantiles, based on &quot;Computing Extremely Accurate Quantiles ...
void addCountDistinctBuffer(int8_t *count_distinct_buffer, const size_t bytes, const bool physical_buffer)
virtual MemoryLevel getType() const =0
std::vector< int64_t > * addArray(const std::vector< int64_t > &arr)
std::vector< CountDistinctSet * > count_distinct_sets_
StringDictionary * getDictionary() const noexcept
#define CHECK_GE(x, y)
Definition: Logger.h:306
std::pair< std::vector< uint8_t >, TableFunctionMetadataType > MetadataValue
int8_t * allocateUnlocked(const size_t num_bytes, const size_t thread_idx)
StringDictionaryGenerations & getStringDictionaryGenerations()
StringDictionaryProxy * getStringDictProxy(const shared::StringDictKey &dict_key) const
#define CHECK_GT(x, y)
Definition: Logger.h:305
StringDictionaryProxy * getLiteralStringDictProxy() const
bool g_use_cpu_mem_pool_for_output_buffers
Definition: Execute.cpp:197
std::mutex table_function_metadata_store_mutex_
std::pair< int64_t *, bool > allocateCachedGroupByBuffer(const size_t num_bytes, const size_t thread_idx)
TranslationMap< Datum > buildNumericTranslationMap(const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos) const
Builds a vectorized string_id translation map from this proxy to dest_proxy.
int8_t * allocate(const size_t num_bytes) override
void setKernelMemoryAllocator(const size_t num_kernels)
RowSetMemoryOwner(const size_t arena_block_size, const size_t executor_id)
quantile::TDigest * initTDigest(size_t thread_idx, ApproxQuantileDescriptor, double q)
Definition: Execute.cpp:677
std::map< std::string, MetadataValue > table_function_metadata_store_
std::list< AggMode > mode_maps_
StringDictionaryProxy * addStringDict(std::shared_ptr< StringDictionary > str_dict, const shared::StringDictKey &dict_key, const int64_t generation)
std::map< std::string, StringDictionaryProxy::IdMap > str_proxy_intersection_translation_maps_owned_
void setDictionaryGenerations(StringDictionaryGenerations generations)
const StringDictionaryProxy::TranslationMap< Datum > * getOrAddStringProxyNumericTranslationMap(const shared::StringDictKey &source_dict_id_in, const bool with_generation, const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos)
Definition: Execute.cpp:669
std::vector< CountDistinctBitmapBuffer > count_distinct_bitmaps_
std::vector< void * > col_buffers_
std::map< std::string, StringDictionaryProxy::IdMap > str_proxy_union_translation_maps_owned_
std::map< std::string, std::shared_ptr< StringOps_Namespace::StringOps > > string_ops_owned_
std::shared_ptr< StringDictionaryProxy > lit_str_dict_proxy_
Calculate statistical mode as an aggregate function.
std::vector< void * > varlen_buffers_
std::string generate_translation_map_key(const shared::StringDictKey &source_proxy_dict_key, const shared::StringDictKey &dest_proxy_dict_key, const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos)
std::unordered_map< shared::StringDictKey, std::shared_ptr< StringDictionaryProxy > > str_dict_proxy_owned_
std::vector< Data_Namespace::AbstractBuffer * > varlen_input_buffers_
An AbstractBuffer is a unit of data management for a data manager.
Quickly allocate many memory pieces by reserving them ahead of time. Calls to allocate() are thread-s...
StringDictionaryGenerations string_dictionary_generations_
IdMap buildUnionTranslationMapToOtherProxy(StringDictionaryProxy *dest_proxy, const std::vector< StringOps_Namespace::StringOpInfo > &string_op_types) const
void initCountDistinctBufferAllocator(size_t buffer_size, size_t thread_idx)
AggMode * allocateMode()
TableFunctionMetadataType
const StringOps_Namespace::StringOps * getStringOps(const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos)
int8_t * allocate(const size_t num_bytes, const size_t thread_idx)
#define CHECK_LT(x, y)
Definition: Logger.h:303
Allocate CPU memory using CpuBuffers via DataMgr.
void setTableFunctionMetadata(const char *key, const uint8_t *raw_data, const size_t num_bytes, const TableFunctionMetadataType value_type)
void addVarlenBuffer(void *varlen_buffer)
std::vector< std::unique_ptr< quantile::TDigest > > t_digests_
Functions used to work with (approximate) count distinct sets.
void addCountDistinctSet(CountDistinctSet *count_distinct_set)
std::vector< std::unique_ptr< CountDistinctBufferAllocator > > count_distinct_buffer_allocators_
void reserveTDigestMemory(size_t thread_idx, size_t capacity)
Definition: Execute.cpp:687
void clearNonOwnedGroupByBuffers()
void getTableFunctionMetadata(const char *key, const uint8_t *&raw_data, size_t &num_bytes, TableFunctionMetadataType &value_type) const
std::vector< int64_t * > non_owned_group_by_buffers_
std::deque< TDigestAllocator > t_digest_allocators_
const StringDictionaryProxy::IdMap * addStringProxyUnionTranslationMap(const StringDictionaryProxy *source_proxy, StringDictionaryProxy *dest_proxy, const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos)
IdMap buildIntersectionTranslationMapToOtherProxy(const StringDictionaryProxy *dest_proxy, const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos) const
bool g_allow_memory_status_log
Definition: Execute.cpp:200
#define CHECK(condition)
Definition: Logger.h:291
std::string * addString(const std::string &str)
const StringDictionaryProxy::IdMap * addStringProxyIntersectionTranslationMap(const StringDictionaryProxy *source_proxy, const StringDictionaryProxy *dest_proxy, const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos)
void addColBuffer(const void *col_buffer)
std::string generate_translation_map_key(const shared::StringDictKey &source_proxy_dict_key, const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos)
ResultSet(const std::vector< TargetInfo > &targets, const ExecutorDeviceType device_type, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const unsigned block_size, const unsigned grid_size)
Definition: ResultSet.cpp:64
#define VLOG(n)
Definition: Logger.h:388
friend class ResultSet
StringDictionaryProxy * getOrAddStringDictProxy(const shared::StringDictKey &dict_key, const bool with_generation)
Definition: Execute.cpp:576