35 #include "torch/script.h"
36 #include "torch/torch.h"
74 torch::Tensor random = torch::randn({num_elements}, at::dtype(at::kDouble));
75 random = random.unsqueeze(1);
76 double* data_ptr = (
double*)random.data_ptr();
78 for (int32_t i = 0; i < num_elements; ++i) {
79 output[i] = *data_ptr++;
87 int32_t poly_degree = cols.
numCols();
88 torch::Tensor output = torch::empty({batch_size, poly_degree}, {torch::kCPU});
92 for (
int i = 0; i < batch_size; i++) {
93 int32_t idx = rand() % cols.
size();
94 for (
int j = 0; j < poly_degree; j++) {
95 output[i][j] = cols[j][idx];
103 torch::Tensor
f(torch::Tensor x, torch::Tensor W_target, torch::Tensor b_target) {
104 return x.mm(W_target) + b_target.item();
108 std::string
poly_desc(torch::Tensor W, torch::Tensor b) {
109 auto size = W.size(0);
110 std::ostringstream stream;
112 if (W.scalar_type() != c10::ScalarType::Float ||
113 b.scalar_type() != c10::ScalarType::Float) {
114 throw std::runtime_error(
115 "Attempted to print polynomial with non-float coefficients!");
119 for (int64_t i = 0; i < size; ++i)
120 stream << W[i].item<float>() <<
" x^" << size - i <<
" ";
121 stream <<
"+ " << b[0].item<
float>();
127 torch::Tensor W_target,
128 torch::Tensor b_target,
129 int32_t batch_size) {
131 auto y =
f(x, W_target, b_target);
132 return std::make_pair(x, y);
153 int32_t poly_degree = features.
numCols();
155 int32_t output_size = (poly_degree + 1) * 2;
157 std::srand(std::time(
nullptr));
158 #ifdef HAVE_CUDA_TORCH
159 if (torch::cuda::is_available() && use_gpu) {
168 auto fc = torch::nn::Linear(W_target.size(0), 1);
170 torch::optim::SGD optim(fc->parameters(), .1);
173 int64_t batch_idx = 0;
175 while (++batch_idx) {
177 torch::Tensor batch_x, batch_y;
178 std::tie(batch_x, batch_y) =
get_batch(features, W_target, b_target, batch_size);
184 auto output = torch::smooth_l1_loss(fc(batch_x), batch_y);
185 loss = output.item<
float>();
203 torch::Tensor output_coefficients = fc->weight.view({-1}).cpu();
204 torch::Tensor goal_coefficients = W_target.view({-1}).cpu();
205 int32_t out_column_idx, input_idx;
206 for (out_column_idx = 0, input_idx = 0; input_idx < output_coefficients.size(0);
208 output[out_column_idx++] = output_coefficients[input_idx].item<
float>();
209 output[out_column_idx++] = goal_coefficients[input_idx].item<
float>();
211 output[out_column_idx++] = fc->bias[0].item<
float>();
212 output[out_column_idx] = b_target[0].item<
float>();
214 std::fprintf(stdout,
"Loss: %lf after %ld batches\n", loss, batch_idx);
216 "==> Learned function:\t%s\n",
217 poly_desc(output_coefficients, fc->bias).c_str());
219 "==> Actual function:\t%s\n",
220 poly_desc(W_target.view({-1}).cpu(), b_target).c_str());
230 torch::jit::script::Module module;
233 }
catch (
const std::exception& e) {
234 return mgr.ERROR_MESSAGE(
"Error loading torchscript model: " + e.what());
241 #endif // #ifndef __CUDACC__
void set_output_row_size(int64_t num_rows)
#define EXTENSION_NOINLINE
void load(Archive &ar, ExplainedQueryHint &query_hint, const unsigned int version)
std::string getString() const
DEVICE int64_t numCols() const
void save(Archive &ar, const ExplainedQueryHint &query_hint, const unsigned int version)
torch::Tensor make_features_from_columns(const ColumnList< double > &cols, int32_t batch_size)
EXTENSION_NOINLINE int32_t tf_test_torch_load_model(TableFunctionManager &mgr, const TextEncodingNone &model_filename, Column< bool > &output)
TEMPLATE_NOINLINE int32_t tf_test_runtime_torch_template__template(TableFunctionManager &mgr, const Column< T > &input, Column< T > &output)
std::pair< torch::Tensor, torch::Tensor > get_batch(const ColumnList< double > &cols, torch::Tensor W_target, torch::Tensor b_target, int32_t batch_size)
EXTENSION_NOINLINE int32_t tf_test_torch_regression(TableFunctionManager &mgr, const ColumnList< double > &features, int32_t batch_size, bool use_gpu, bool save_model, const TextEncodingNone &model_filename, Column< double > &output)
std::string poly_desc(torch::Tensor W, torch::Tensor b)
torch::Tensor f(torch::Tensor x, torch::Tensor W_target, torch::Tensor b_target)
DEVICE int64_t size() const
EXTENSION_NOINLINE int32_t tf_test_torch_generate_random_column(TableFunctionManager &mgr, int32_t num_elements, Column< double > &output)
torch::Device _test_torch_tfs_device
EXTENSION_NOINLINE int32_t tf_test_runtime_torch(TableFunctionManager &mgr, Column< int64_t > &input, Column< int64_t > &output)
#define TEMPLATE_NOINLINE