OmniSciDB  72c90bc290
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
TestTorchTableFunctions.h File Reference
+ Include dependency graph for TestTorchTableFunctions.h:
+ This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

EXTENSION_NOINLINE int32_t tf_test_runtime_torch (TableFunctionManager &mgr, Column< int64_t > &input, Column< int64_t > &output)
 
template<typename T >
TEMPLATE_NOINLINE int32_t tf_test_runtime_torch_template__template (TableFunctionManager &mgr, const Column< T > &input, Column< T > &output)
 
EXTENSION_NOINLINE int32_t tf_test_torch_regression (TableFunctionManager &mgr, const ColumnList< double > &features, int32_t batch_size, bool use_gpu, bool save_model, const TextEncodingNone &model_filename, Column< double > &output)
 
EXTENSION_NOINLINE int32_t tf_test_torch_generate_random_column (TableFunctionManager &mgr, int32_t num_elements, Column< double > &output)
 
EXTENSION_NOINLINE int32_t tf_test_torch_load_model (TableFunctionManager &mgr, const TextEncodingNone &model_filename, Column< bool > &output)
 

Function Documentation

EXTENSION_NOINLINE int32_t tf_test_runtime_torch ( TableFunctionManager mgr,
Column< int64_t > &  input,
Column< int64_t > &  output 
)

Definition at line 43 of file TestTorchTableFunctions.cpp.

45  {
46  return 0;
47 }
template<typename T >
TEMPLATE_NOINLINE int32_t tf_test_runtime_torch_template__template ( TableFunctionManager mgr,
const Column< T > &  input,
Column< T > &  output 
)

Definition at line 51 of file TestTorchTableFunctions.cpp.

53  {
54  return 0;
55 }
EXTENSION_NOINLINE int32_t tf_test_torch_generate_random_column ( TableFunctionManager mgr,
int32_t  num_elements,
Column< double > &  output 
)

Definition at line 70 of file TestTorchTableFunctions.cpp.

References TableFunctionManager::set_output_row_size().

72  {
73  mgr.set_output_row_size(num_elements);
74  torch::Tensor random = torch::randn({num_elements}, at::dtype(at::kDouble));
75  random = random.unsqueeze(1);
76  double* data_ptr = (double*)random.data_ptr();
77 
78  for (int32_t i = 0; i < num_elements; ++i) {
79  output[i] = *data_ptr++;
80  }
81 
82  return num_elements;
83 }
void set_output_row_size(int64_t num_rows)
Definition: heavydbTypes.h:373

+ Here is the call graph for this function:

EXTENSION_NOINLINE int32_t tf_test_torch_load_model ( TableFunctionManager mgr,
const TextEncodingNone model_filename,
Column< bool > &  output 
)

Definition at line 226 of file TestTorchTableFunctions.cpp.

References TextEncodingNone::getString(), boost::serialization::load(), and TableFunctionManager::set_output_row_size().

228  {
229  mgr.set_output_row_size(1);
230  torch::jit::script::Module module;
231  try {
232  module = torch::jit::load(model_filename.getString());
233  } catch (const std::exception& e) {
234  return mgr.ERROR_MESSAGE("Error loading torchscript model: " + e.what());
235  }
236 
237  output[0] = true;
238  return 1;
239 }
void set_output_row_size(int64_t num_rows)
Definition: heavydbTypes.h:373
void load(Archive &ar, ExplainedQueryHint &query_hint, const unsigned int version)
std::string getString() const
Definition: heavydbTypes.h:641

+ Here is the call graph for this function:

EXTENSION_NOINLINE int32_t tf_test_torch_regression ( TableFunctionManager mgr,
const ColumnList< double > &  features,
int32_t  batch_size,
bool  use_gpu,
bool  save_model,
const TextEncodingNone model_filename,
Column< double > &  output 
)

Definition at line 146 of file TestTorchTableFunctions.cpp.

References _test_torch_tfs_device, f(), get_batch(), TextEncodingNone::getString(), ColumnList< T >::numCols(), poly_desc(), boost::serialization::save(), and TableFunctionManager::set_output_row_size().

152  {
153  int32_t poly_degree = features.numCols();
154  // we output target and trained coefficients + bias
155  int32_t output_size = (poly_degree + 1) * 2;
156  mgr.set_output_row_size(output_size);
157  std::srand(std::time(nullptr)); // not ideal RNG, but fine for test purpooses
158 #ifdef HAVE_CUDA_TORCH
159  if (torch::cuda::is_available() && use_gpu) {
160  _test_torch_tfs_device = torch::kCUDA;
161  }
162 #endif
163 
164  auto W_target = torch::randn({poly_degree, 1}, at::device(_test_torch_tfs_device)) * 5;
165  auto b_target = torch::randn({1}, at::device(_test_torch_tfs_device)) * 5;
166 
167  // Define the model and optimizer
168  auto fc = torch::nn::Linear(W_target.size(0), 1);
169  fc->to(_test_torch_tfs_device);
170  torch::optim::SGD optim(fc->parameters(), .1);
171 
172  float loss = 0;
173  int64_t batch_idx = 0;
174 
175  while (++batch_idx) {
176  // Get data
177  torch::Tensor batch_x, batch_y;
178  std::tie(batch_x, batch_y) = get_batch(features, W_target, b_target, batch_size);
179 
180  // Reset gradients
181  optim.zero_grad();
182 
183  // Forward pass
184  auto output = torch::smooth_l1_loss(fc(batch_x), batch_y);
185  loss = output.item<float>();
186 
187  // Backward pass
188  output.backward();
189 
190  // Apply gradients
191  optim.step();
192 
193  // Stop criterion
194  if (loss < 1e-3f)
195  break;
196  }
197 
198  if (save_model) {
199  torch::save(fc, model_filename.getString());
200  }
201 
202  // output column with target + trained coefficients ordered by degree, then bias
203  torch::Tensor output_coefficients = fc->weight.view({-1}).cpu();
204  torch::Tensor goal_coefficients = W_target.view({-1}).cpu();
205  int32_t out_column_idx, input_idx;
206  for (out_column_idx = 0, input_idx = 0; input_idx < output_coefficients.size(0);
207  ++input_idx) {
208  output[out_column_idx++] = output_coefficients[input_idx].item<float>();
209  output[out_column_idx++] = goal_coefficients[input_idx].item<float>();
210  }
211  output[out_column_idx++] = fc->bias[0].item<float>();
212  output[out_column_idx] = b_target[0].item<float>();
213 
214  std::fprintf(stdout, "Loss: %lf after %ld batches\n", loss, batch_idx);
215  std::fprintf(stdout,
216  "==> Learned function:\t%s\n",
217  poly_desc(output_coefficients, fc->bias).c_str());
218  std::fprintf(stdout,
219  "==> Actual function:\t%s\n",
220  poly_desc(W_target.view({-1}).cpu(), b_target).c_str());
221 
222  return output_size;
223 }
void set_output_row_size(int64_t num_rows)
Definition: heavydbTypes.h:373
std::string getString() const
Definition: heavydbTypes.h:641
DEVICE int64_t numCols() const
void save(Archive &ar, const ExplainedQueryHint &query_hint, const unsigned int version)
std::pair< torch::Tensor, torch::Tensor > get_batch(const ColumnList< double > &cols, torch::Tensor W_target, torch::Tensor b_target, int32_t batch_size)
std::string poly_desc(torch::Tensor W, torch::Tensor b)
torch::Tensor f(torch::Tensor x, torch::Tensor W_target, torch::Tensor b_target)
torch::Device _test_torch_tfs_device

+ Here is the call graph for this function: