souffle  2.0.2-371-g6315b36
Engine.cpp
Go to the documentation of this file.
1 /*
2  * Souffle - A Datalog Compiler
3  * Copyright (c) 2019, The Souffle Developers. All rights reserved.
4  * Licensed under the Universal Permissive License v 1.0 as shown at:
5  * - https://opensource.org/licenses/UPL
6  * - <souffle root>/licenses/SOUFFLE-UPL.txt
7  */
8 
9 /************************************************************************
10  *
11  * @file Engine.cpp
12  *
13  * Define the Interpreter Engine class.
14  ***********************************************************************/
15 
16 #include "interpreter/Engine.h"
17 #include "AggregateOp.h"
18 #include "FunctorOps.h"
19 #include "Global.h"
20 #include "interpreter/Context.h"
21 #include "interpreter/Index.h"
22 #include "interpreter/Node.h"
23 #include "interpreter/Relation.h"
25 #include "ram/Aggregate.h"
26 #include "ram/AutoIncrement.h"
27 #include "ram/Break.h"
28 #include "ram/Call.h"
29 #include "ram/Choice.h"
30 #include "ram/Clear.h"
31 #include "ram/Conjunction.h"
32 #include "ram/Constant.h"
33 #include "ram/Constraint.h"
34 #include "ram/DebugInfo.h"
35 #include "ram/EmptinessCheck.h"
36 #include "ram/ExistenceCheck.h"
37 #include "ram/Exit.h"
38 #include "ram/Extend.h"
39 #include "ram/False.h"
40 #include "ram/Filter.h"
41 #include "ram/IO.h"
42 #include "ram/IndexAggregate.h"
43 #include "ram/IndexChoice.h"
44 #include "ram/IndexScan.h"
45 #include "ram/IntrinsicOperator.h"
46 #include "ram/LogRelationTimer.h"
47 #include "ram/LogSize.h"
48 #include "ram/LogTimer.h"
49 #include "ram/Loop.h"
50 #include "ram/Negation.h"
52 #include "ram/PackRecord.h"
53 #include "ram/Parallel.h"
54 #include "ram/ParallelAggregate.h"
55 #include "ram/ParallelChoice.h"
58 #include "ram/ParallelIndexScan.h"
59 #include "ram/ParallelScan.h"
60 #include "ram/Program.h"
61 #include "ram/Project.h"
63 #include "ram/Query.h"
64 #include "ram/Relation.h"
65 #include "ram/RelationSize.h"
66 #include "ram/Scan.h"
67 #include "ram/Sequence.h"
68 #include "ram/Statement.h"
69 #include "ram/SubroutineArgument.h"
70 #include "ram/SubroutineReturn.h"
71 #include "ram/Swap.h"
72 #include "ram/TranslationUnit.h"
73 #include "ram/True.h"
74 #include "ram/TupleElement.h"
75 #include "ram/TupleOperation.h"
76 #include "ram/UnpackRecord.h"
78 #include "ram/utility/Visitor.h"
80 #include "souffle/RamTypes.h"
81 #include "souffle/RecordTable.h"
82 #include "souffle/SignalHandler.h"
83 #include "souffle/SymbolTable.h"
84 #include "souffle/TypeAttribute.h"
85 #include "souffle/io/IOSystem.h"
86 #include "souffle/io/ReadStream.h"
87 #include "souffle/io/WriteStream.h"
88 #include "souffle/profile/Logger.h"
94 #include <algorithm>
95 #include <array>
96 #include <atomic>
97 #include <cassert>
98 #include <cstdint>
99 #include <cstdlib>
100 #include <cstring>
101 #include <deque>
102 #include <functional>
103 #include <iostream>
104 #include <iterator>
105 #include <map>
106 #include <memory>
107 #include <regex>
108 #include <sstream>
109 #include <string>
110 #include <utility>
111 #include <vector>
112 #include <dlfcn.h>
113 #include <ffi.h>
114 
115 namespace souffle::interpreter {
116 
117 // Handle difference in dynamic libraries suffixes.
118 #ifdef __APPLE__
119 #define dynamicLibSuffix ".dylib";
120 #else
121 #define dynamicLibSuffix ".so";
122 #endif
123 
124 // Aliases for foreign function interface.
125 #if RAM_DOMAIN_SIZE == 64
126 #define FFI_RamSigned ffi_type_sint64
127 #define FFI_RamUnsigned ffi_type_uint64
128 #define FFI_RamFloat ffi_type_double
129 #else
130 #define FFI_RamSigned ffi_type_sint32
131 #define FFI_RamUnsigned ffi_type_uint32
132 #define FFI_RamFloat ffi_type_float
133 #endif
134 
135 #define FFI_Symbol ffi_type_pointer
136 
137 namespace {
138 constexpr RamDomain RAM_BIT_SHIFT_MASK = RAM_DOMAIN_SIZE - 1;
139 }
140 
141 Engine::Engine(ram::TranslationUnit& tUnit)
142  : profileEnabled(Global::config().has("profile")), isProvenance(Global::config().has("provenance")),
143  numOfThreads(std::stoi(Global::config().get("jobs"))), tUnit(tUnit),
144  isa(tUnit.getAnalysis<ram::analysis::IndexAnalysis>()) {
145 #ifdef _OPENMP
146  if (numOfThreads > 0) {
147  omp_set_num_threads(numOfThreads);
148  }
149 #endif
150 }
151 
152 Engine::RelationHandle& Engine::getRelationHandle(const size_t idx) {
153  return *relations[idx];
154 }
155 
156 void Engine::swapRelation(const size_t ramRel1, const size_t ramRel2) {
157  RelationHandle& rel1 = getRelationHandle(ramRel1);
158  RelationHandle& rel2 = getRelationHandle(ramRel2);
159  std::swap(rel1, rel2);
160 }
161 
162 int Engine::incCounter() {
163  return counter++;
164 }
165 
166 SymbolTable& Engine::getSymbolTable() {
167  return tUnit.getSymbolTable();
168 }
169 
170 RecordTable& Engine::getRecordTable() {
171  return recordTable;
172 }
173 
174 ram::TranslationUnit& Engine::getTranslationUnit() {
175  return tUnit;
176 }
177 
178 void* Engine::getMethodHandle(const std::string& method) {
179  // load DLLs (if not done yet)
180  for (void* libHandle : loadDLL()) {
181  auto* methodHandle = dlsym(libHandle, method.c_str());
182  if (methodHandle != nullptr) {
183  return methodHandle;
184  }
185  }
186  return nullptr;
187 }
188 
189 VecOwn<Engine::RelationHandle>& Engine::getRelationMap() {
190  return relations;
191 }
192 
193 void Engine::createRelation(const ram::Relation& id, const size_t idx) {
194  if (relations.size() < idx + 1) {
195  relations.resize(idx + 1);
196  }
197 
199  const auto& orderSet = isa->getIndexes(id.getName());
200  if (id.getRepresentation() == RelationRepresentation::EQREL) {
201  res = createEqrelRelation(id, orderSet);
202  } else {
203  if (isProvenance) {
204  res = createProvenanceRelation(id, orderSet);
205  } else {
206  res = createBTreeRelation(id, orderSet);
207  }
208  }
209  relations[idx] = mk<RelationHandle>(std::move(res));
210 }
211 
212 const std::vector<void*>& Engine::loadDLL() {
213  if (!dll.empty()) {
214  return dll;
215  }
216 
217  if (!Global::config().has("libraries")) {
218  Global::config().set("libraries", "functors");
219  }
220  if (!Global::config().has("library-dir")) {
221  Global::config().set("library-dir", ".");
222  }
223 
224  for (const std::string& library : splitString(Global::config().get("libraries"), ' ')) {
225  // The library may be blank
226  if (library.empty()) {
227  continue;
228  }
229  auto paths = splitString(Global::config().get("library-dir"), ' ');
230  // Set up our paths to have a library appended
231  for (std::string& path : paths) {
232  if (path.back() != '/') {
233  path += '/';
234  }
235  }
236 
237  if (library.find('/') != std::string::npos) {
238  paths.clear();
239  }
240 
241  paths.push_back("");
242 
243  void* tmp = nullptr;
244  for (const std::string& path : paths) {
245  std::string fullpath = path + "lib" + library + dynamicLibSuffix;
246  tmp = dlopen(fullpath.c_str(), RTLD_LAZY);
247  if (tmp != nullptr) {
248  dll.push_back(tmp);
249  break;
250  }
251  }
252  }
253 
254  return dll;
255 }
256 
257 size_t Engine::getIterationNumber() const {
258  return iteration;
259 }
260 void Engine::incIterationNumber() {
261  ++iteration;
262 }
263 void Engine::resetIterationNumber() {
264  iteration = 0;
265 }
266 
267 void Engine::executeMain() {
268  SignalHandler::instance()->set();
269  if (Global::config().has("verbose")) {
270  SignalHandler::instance()->enableLogging();
271  }
272 
273  generateIR();
274  assert(main != nullptr && "Executing an empty program");
275 
276  Context ctxt;
277 
278  if (!profileEnabled) {
279  Context ctxt;
280  execute(main.get(), ctxt);
281  } else {
282  ProfileEventSingleton::instance().setOutputFile(Global::config().get("profile"));
283  // Prepare the frequency table for threaded use
284  const ram::Program& program = tUnit.getProgram();
285  ram::visitDepthFirst(program, [&](const ram::TupleOperation& node) {
286  if (!node.getProfileText().empty()) {
287  frequencies.emplace(node.getProfileText(), std::deque<std::atomic<size_t>>());
288  frequencies[node.getProfileText()].emplace_back(0);
289  }
290  });
291  // Enable profiling for execution of main
292  ProfileEventSingleton::instance().startTimer();
293  ProfileEventSingleton::instance().makeTimeEvent("@time;starttime");
294  // Store configuration
295  for (const auto& cur : Global::config().data()) {
296  ProfileEventSingleton::instance().makeConfigRecord(cur.first, cur.second);
297  }
298  // Store count of relations
299  size_t relationCount = 0;
300  for (auto rel : tUnit.getProgram().getRelations()) {
301  if (rel->getName()[0] != '@') {
302  ++relationCount;
303  reads[rel->getName()] = 0;
304  }
305  }
306  ProfileEventSingleton::instance().makeConfigRecord("relationCount", std::to_string(relationCount));
307 
308  // Store count of rules
309  size_t ruleCount = 0;
310  ram::visitDepthFirst(program, [&](const ram::Query&) { ++ruleCount; });
311  ProfileEventSingleton::instance().makeConfigRecord("ruleCount", std::to_string(ruleCount));
312 
313  Context ctxt;
314  execute(main.get(), ctxt);
315  ProfileEventSingleton::instance().stopTimer();
316  for (auto const& cur : frequencies) {
317  for (size_t i = 0; i < cur.second.size(); ++i) {
318  ProfileEventSingleton::instance().makeQuantityEvent(cur.first, cur.second[i], i);
319  }
320  }
321  for (auto const& cur : reads) {
322  ProfileEventSingleton::instance().makeQuantityEvent(
323  "@relation-reads;" + cur.first, cur.second, 0);
324  }
325  }
326  SignalHandler::instance()->reset();
327 }
328 
329 void Engine::generateIR() {
330  const ram::Program& program = tUnit.getProgram();
331  NodeGenerator generator(*this);
332  if (subroutine.empty()) {
333  for (const auto& sub : program.getSubroutines()) {
334  subroutine.push_back(generator.generateTree(*sub.second));
335  }
336  }
337  if (main == nullptr) {
338  main = generator.generateTree(program.getMain());
339  }
340 }
341 
342 void Engine::executeSubroutine(
343  const std::string& name, const std::vector<RamDomain>& args, std::vector<RamDomain>& ret) {
344  Context ctxt;
345  ctxt.setReturnValues(ret);
346  ctxt.setArguments(args);
347  generateIR();
348  const ram::Program& program = tUnit.getProgram();
349  auto subs = program.getSubroutines();
350  size_t i = distance(subs.begin(), subs.find(name));
351  execute(subroutine[i].get(), ctxt);
352 }
353 
354 RamDomain Engine::execute(const Node* node, Context& ctxt) {
355 #define DEBUG(Kind) std::cout << "Running Node: " << #Kind << "\n";
356 #define EVAL_CHILD(ty, idx) ramBitCast<ty>(execute(shadow.getChild(idx), ctxt))
357 #define EVAL_LEFT(ty) ramBitCast<ty>(execute(shadow.getLhs(), ctxt))
358 #define EVAL_RIGHT(ty) ramBitCast<ty>(execute(shadow.getRhs(), ctxt))
359 
360 // Overload CASE based on number of arguments.
361 // CASE(Kind) -> BASE_CASE(Kind)
362 // CASE(Kind, Structure, Arity) -> EXTEND_CASE(Kind, Structure, Arity)
363 #define GET_MACRO(_1, _2, _3, NAME, ...) NAME
364 #define CASE(...) GET_MACRO(__VA_ARGS__, EXTEND_CASE, _Dummy, BASE_CASE)(__VA_ARGS__)
365 
366 #define BASE_CASE(Kind) \
367  case (I_##Kind): { \
368  return [&]() -> RamDomain { \
369  [[maybe_unused]] const auto& shadow = *static_cast<const interpreter::Kind*>(node); \
370  [[maybe_unused]] const auto& cur = *static_cast<const ram::Kind*>(node->getShadow());
371 // EXTEND_CASE also defer the relation type
372 #define EXTEND_CASE(Kind, Structure, Arity) \
373  case (I_##Kind##_##Structure##_##Arity): { \
374  return [&]() -> RamDomain { \
375  [[maybe_unused]] const auto& shadow = *static_cast<const interpreter::Kind*>(node); \
376  [[maybe_unused]] const auto& cur = *static_cast<const ram::Kind*>(node->getShadow());\
377  using RelType = Relation<Arity, interpreter::Structure>;
378 #define ESAC(Kind) \
379  } \
380  (); \
381  }
382 
383 #define TUPLE_COPY_FROM(dst, src) \
384  assert(dst.size() == src.size()); \
385  std::copy_n(src.begin(), dst.size(), dst.begin())
386 
387 #define CAL_SEARCH_BOUND(superInfo, low, high) \
388  /** Unbounded and Constant */ \
389  TUPLE_COPY_FROM(low, superInfo.first); \
390  TUPLE_COPY_FROM(high, superInfo.second); \
391  /* TupleElement */ \
392  for (const auto& tupleElement : superInfo.tupleFirst) { \
393  low[tupleElement[0]] = ctxt[tupleElement[1]][tupleElement[2]]; \
394  } \
395  for (const auto& tupleElement : superInfo.tupleSecond) { \
396  high[tupleElement[0]] = ctxt[tupleElement[1]][tupleElement[2]]; \
397  } \
398  /* Generic */ \
399  for (const auto& expr : superInfo.exprFirst) { \
400  low[expr.first] = execute(expr.second.get(), ctxt); \
401  } \
402  for (const auto& expr : superInfo.exprSecond) { \
403  high[expr.first] = execute(expr.second.get(), ctxt); \
404  }
405 
406  switch (node->getType()) {
407  CASE(Constant)
408  return cur.getConstant();
409  ESAC(Constant)
410 
412  return ctxt[shadow.getTupleId()][shadow.getElement()];
414 
416  return incCounter();
418 
420 // clang-format off
421 #define BINARY_OP_TYPED(ty, op) return ramBitCast(static_cast<ty>(EVAL_CHILD(ty, 0) op EVAL_CHILD(ty, 1)))
422 
423 #define BINARY_OP_LOGICAL(opcode, op) BINARY_OP_INTEGRAL(opcode, op)
424 #define BINARY_OP_INTEGRAL(opcode, op) \
425  case FunctorOp:: opcode: BINARY_OP_TYPED(RamSigned , op); \
426  case FunctorOp::U##opcode: BINARY_OP_TYPED(RamUnsigned, op);
427 #define BINARY_OP_NUMERIC(opcode, op) \
428  BINARY_OP_INTEGRAL(opcode, op) \
429  case FunctorOp::F##opcode: BINARY_OP_TYPED(RamFloat, op);
430 
431 #define BINARY_OP_SHIFT_MASK(ty, op) \
432  return ramBitCast(EVAL_CHILD(ty, 0) op (EVAL_CHILD(ty, 1) & RAM_BIT_SHIFT_MASK))
433 #define BINARY_OP_INTEGRAL_SHIFT(opcode, op, tySigned, tyUnsigned) \
434  case FunctorOp:: opcode: BINARY_OP_SHIFT_MASK(tySigned , op); \
435  case FunctorOp::U##opcode: BINARY_OP_SHIFT_MASK(tyUnsigned , op);
436 
437 #define MINMAX_OP_SYM(op) \
438  { \
439  auto result = EVAL_CHILD(RamDomain, 0); \
440  auto* result_val = &getSymbolTable().resolve(result); \
441  for (size_t i = 1; i < args.size(); i++) { \
442  auto alt = EVAL_CHILD(RamDomain, i); \
443  if (alt == result) continue; \
444  \
445  const auto& alt_val = getSymbolTable().resolve(alt); \
446  if (*result_val op alt_val) { \
447  result_val = &alt_val; \
448  result = alt; \
449  } \
450  } \
451  return result; \
452  }
453 #define MINMAX_OP(ty, op) \
454  { \
455  auto result = EVAL_CHILD(ty, 0); \
456  for (size_t i = 1; i < args.size(); i++) { \
457  result = op(result, EVAL_CHILD(ty, i)); \
458  } \
459  return ramBitCast(result); \
460  }
461 #define MINMAX_NUMERIC(opCode, op) \
462  case FunctorOp:: opCode: MINMAX_OP(RamSigned , op) \
463  case FunctorOp::U##opCode: MINMAX_OP(RamUnsigned, op) \
464  case FunctorOp::F##opCode: MINMAX_OP(RamFloat , op)
465 
466 #define UNARY_OP(op, ty, func) \
467  case FunctorOp::op: { \
468  auto x = EVAL_CHILD(ty, 0); \
469  return ramBitCast(func(x)); \
470  }
471 #define CONV_TO_STRING(op, ty) \
472  case FunctorOp::op: return getSymbolTable().lookup(std::to_string(EVAL_CHILD(ty, 0)));
473 #define CONV_FROM_STRING(op, ty) \
474  case FunctorOp::op: return evaluator::symbol2numeric<ty>( \
475  getSymbolTable().resolve(EVAL_CHILD(RamDomain, 0)));
476  // clang-format on
477 
478  const auto& args = cur.getArguments();
479  switch (cur.getOperator()) {
480  /** Unary Functor Operators */
481  case FunctorOp::ORD: return execute(shadow.getChild(0), ctxt);
482  case FunctorOp::STRLEN:
483  return getSymbolTable().resolve(execute(shadow.getChild(0), ctxt)).size();
484  case FunctorOp::NEG: return -execute(shadow.getChild(0), ctxt);
485  case FunctorOp::FNEG: {
486  RamDomain result = execute(shadow.getChild(0), ctxt);
487  return ramBitCast(-ramBitCast<RamFloat>(result));
488  }
489  case FunctorOp::BNOT: return ~execute(shadow.getChild(0), ctxt);
490  case FunctorOp::UBNOT: {
491  RamDomain result = execute(shadow.getChild(0), ctxt);
492  return ramBitCast(~ramBitCast<RamUnsigned>(result));
493  }
494  case FunctorOp::LNOT: return !execute(shadow.getChild(0), ctxt);
495 
496  case FunctorOp::ULNOT: {
497  RamDomain result = execute(shadow.getChild(0), ctxt);
498  // Casting is a bit tricky here, since ! returns a boolean.
499  return ramBitCast(static_cast<RamUnsigned>(!ramBitCast<RamUnsigned>(result)));
500  }
501 
502  // clang-format off
503  /** numeric coersions follow C++ semantics. */
504  UNARY_OP(F2I, RamFloat , static_cast<RamSigned>)
505  UNARY_OP(F2U, RamFloat , static_cast<RamUnsigned>)
506  UNARY_OP(I2U, RamSigned , static_cast<RamUnsigned>)
507  UNARY_OP(I2F, RamSigned , static_cast<RamFloat>)
508  UNARY_OP(U2I, RamUnsigned, static_cast<RamSigned>)
509  UNARY_OP(U2F, RamUnsigned, static_cast<RamFloat>)
510 
514 
518 
519  /** Binary Functor Operators */
524  // clang-format on
525 
526  case FunctorOp::EXP: {
527  return std::pow(execute(shadow.getChild(0), ctxt), execute(shadow.getChild(1), ctxt));
528  }
529 
530  case FunctorOp::UEXP: {
531  auto first = ramBitCast<RamUnsigned>(execute(shadow.getChild(0), ctxt));
532  auto second = ramBitCast<RamUnsigned>(execute(shadow.getChild(1), ctxt));
533  // Extra casting required: pow returns a floating point.
534  return ramBitCast(static_cast<RamUnsigned>(std::pow(first, second)));
535  }
536 
537  case FunctorOp::FEXP: {
538  auto first = ramBitCast<RamFloat>(execute(shadow.getChild(0), ctxt));
539  auto second = ramBitCast<RamFloat>(execute(shadow.getChild(1), ctxt));
540  return ramBitCast(static_cast<RamFloat>(std::pow(first, second)));
541  }
542 
543  // clang-format off
548  // Handle left-shift as unsigned to match Java semantics of `<<`, namely:
549  // "... `n << s` is `n` left-shifted `s` bit positions; ..."
550  // Using `RamSigned` would imply UB due to signed overflow when shifting negatives.
552  // For right-shift, we do need sign extension.
555 
557  BINARY_OP_LOGICAL(LOR , ||)
559 
560  MINMAX_NUMERIC(MAX, std::max)
561  MINMAX_NUMERIC(MIN, std::min)
562 
563  case FunctorOp::SMAX: MINMAX_OP_SYM(<)
564  case FunctorOp::SMIN: MINMAX_OP_SYM(>)
565  // clang-format on
566 
567  case FunctorOp::CAT: {
568  std::stringstream ss;
569  for (size_t i = 0; i < args.size(); i++) {
570  ss << getSymbolTable().resolve(execute(shadow.getChild(i), ctxt));
571  }
572  return getSymbolTable().lookup(ss.str());
573  }
574  /** Ternary Functor Operators */
575  case FunctorOp::SUBSTR: {
576  auto symbol = execute(shadow.getChild(0), ctxt);
577  const std::string& str = getSymbolTable().resolve(symbol);
578  auto idx = execute(shadow.getChild(1), ctxt);
579  auto len = execute(shadow.getChild(2), ctxt);
580  std::string sub_str;
581  try {
582  sub_str = str.substr(idx, len);
583  } catch (...) {
584  std::cerr << "warning: wrong index position provided by substr(\"";
585  std::cerr << str << "\"," << (int32_t)idx << "," << (int32_t)len << ") functor.\n";
586  }
587  return getSymbolTable().lookup(sub_str);
588  }
589 
590  case FunctorOp::RANGE:
591  case FunctorOp::URANGE:
592  case FunctorOp::FRANGE:
593  fatal("ICE: functor `%s` must map onto `NestedIntrinsicOperator`", cur.getOperator());
594  }
595 
597 
598 #undef BINARY_OP_LOGICAL
599 #undef BINARY_OP_INTEGRAL
600 #undef BINARY_OP_NUMERIC
601 #undef BINARY_OP_SHIFT_MASK
602 #undef BINARY_OP_INTEGRAL_SHIFT
603 #undef MINMAX_OP_SYM
604 #undef MINMAX_OP
605 #undef MINMAX_NUMERIC
606 #undef UNARY_OP
607 #undef CONV_TO_STRING
608 #undef CONV_FROM_STRING
610 
612  auto numArgs = cur.getArguments().size();
613  auto runNested = [&](auto&& tuple) {
614  ctxt[cur.getTupleId()] = tuple.data();
615  execute(shadow.getChild(numArgs), ctxt);
616  };
617 
618 #define RUN_RANGE(ty) \
619  numArgs == 3 \
620  ? evaluator::runRange<ty>(EVAL_CHILD(ty, 0), EVAL_CHILD(ty, 1), EVAL_CHILD(ty, 2), runNested) \
621  : evaluator::runRange<ty>(EVAL_CHILD(ty, 0), EVAL_CHILD(ty, 1), runNested), \
622  true
623 
624  switch (cur.getFunction()) {
625  case ram::NestedIntrinsicOp::RANGE: return RUN_RANGE(RamSigned);
626  case ram::NestedIntrinsicOp::URANGE: return RUN_RANGE(RamUnsigned);
627  case ram::NestedIntrinsicOp::FRANGE: return RUN_RANGE(RamFloat);
628  }
629 
631 #undef RUN_RANGE
633 
635  const std::string& name = cur.getName();
636 
637  auto fn = reinterpret_cast<void (*)()>(getMethodHandle(name));
638  if (fn == nullptr) fatal("cannot find user-defined operator `%s`", name);
639  size_t arity = cur.getArguments().size();
640 
641  if (cur.isStateful()) {
642  // prepare dynamic call environment
643  ffi_cif cif;
644  ffi_type* args[arity + 2];
645  void* values[arity + 2];
646  RamDomain intVal[arity];
647  ffi_arg rc;
648 
649  /* Initialize arguments for ffi-call */
650  args[0] = args[1] = &ffi_type_pointer;
651  void* symbolTable = (void*)&getSymbolTable();
652  values[0] = &symbolTable;
653  void* recordTable = (void*)&getRecordTable();
654  values[1] = &recordTable;
655  for (size_t i = 0; i < arity; i++) {
656  intVal[i] = execute(shadow.getChild(i), ctxt);
657  args[i + 2] = &FFI_RamSigned;
658  values[i + 2] = &intVal[i];
659  }
660 
661  // Set codomain.
662  auto codomain = &FFI_RamSigned;
663 
664  // Call the external function.
665  const auto prepStatus = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, arity + 2, codomain, args);
666  if (prepStatus != FFI_OK) {
667  fatal("Failed to prepare CIF for user-defined operator `%s`; error code = %d", name,
668  prepStatus);
669  }
670  ffi_call(&cif, fn, &rc, values);
671  return static_cast<RamDomain>(rc);
672  } else {
673  // get name and type
674  const std::vector<TypeAttribute>& type = cur.getArgsTypes();
675 
676  // prepare dynamic call environment
677  ffi_cif cif;
678  ffi_type* args[arity];
679  void* values[arity];
680  RamDomain intVal[arity];
681  RamUnsigned uintVal[arity];
682  RamFloat floatVal[arity];
683  const char* strVal[arity];
684  ffi_arg rc;
685 
686  /* Initialize arguments for ffi-call */
687  for (size_t i = 0; i < arity; i++) {
688  RamDomain arg = execute(shadow.getChild(i), ctxt);
689  switch (type[i]) {
690  case TypeAttribute::Symbol:
691  args[i] = &FFI_Symbol;
692  strVal[i] = getSymbolTable().resolve(arg).c_str();
693  values[i] = &strVal[i];
694  break;
695  case TypeAttribute::Signed:
696  args[i] = &FFI_RamSigned;
697  intVal[i] = arg;
698  values[i] = &intVal[i];
699  break;
700  case TypeAttribute::Unsigned:
701  args[i] = &FFI_RamUnsigned;
702  uintVal[i] = ramBitCast<RamUnsigned>(arg);
703  values[i] = &uintVal[i];
704  break;
705  case TypeAttribute::Float:
706  args[i] = &FFI_RamFloat;
707  floatVal[i] = ramBitCast<RamFloat>(arg);
708  values[i] = &floatVal[i];
709  break;
710  case TypeAttribute::ADT: fatal("ADT support is not implemented");
711  case TypeAttribute::Record: fatal("Record support is not implemented");
712  }
713  }
714 
715  // Get codomain.
716  auto codomain = &FFI_RamSigned;
717  switch (cur.getReturnType()) {
718  // initialize for string value.
719  case TypeAttribute::Symbol: codomain = &FFI_Symbol; break;
720  case TypeAttribute::Signed: codomain = &FFI_RamSigned; break;
721  case TypeAttribute::Unsigned: codomain = &FFI_RamUnsigned; break;
722  case TypeAttribute::Float: codomain = &FFI_RamFloat; break;
723  case TypeAttribute::ADT: fatal("Not implemented");
724  case TypeAttribute::Record: fatal("Not implemented");
725  }
726 
727  // Call the external function.
728  const auto prepStatus = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, arity, codomain, args);
729  if (prepStatus != FFI_OK) {
730  fatal("Failed to prepare CIF for user-defined operator `%s`; error code = %d", name,
731  prepStatus);
732  }
733  ffi_call(&cif, fn, &rc, values);
734 
735  switch (cur.getReturnType()) {
736  case TypeAttribute::Signed: return static_cast<RamDomain>(rc);
737  case TypeAttribute::Symbol:
738  return getSymbolTable().lookup(reinterpret_cast<const char*>(rc));
739 
740  case TypeAttribute::Unsigned: return ramBitCast(static_cast<RamUnsigned>(rc));
741  case TypeAttribute::Float: return ramBitCast(static_cast<RamFloat>(rc));
742  case TypeAttribute::ADT: fatal("Not implemented");
743  case TypeAttribute::Record: fatal("Not implemented");
744  }
745  fatal("Unsupported user defined operator");
746  }
747 
749 
751  auto values = cur.getArguments();
752  size_t arity = values.size();
753  RamDomain data[arity];
754  for (size_t i = 0; i < arity; ++i) {
755  data[i] = execute(shadow.getChild(i), ctxt);
756  }
757  return getRecordTable().pack(data, arity);
759 
761  return ctxt.getArgument(cur.getArgument());
763 
764  CASE(True)
765  return true;
766  ESAC(True)
767 
768  CASE(False)
769  return false;
770  ESAC(False)
771 
773  return execute(shadow.getLhs(), ctxt) && execute(shadow.getRhs(), ctxt);
775 
776  CASE(Negation)
777  return !execute(shadow.getChild(), ctxt);
778  ESAC(Negation)
779 
780 #define EMPTINESS_CHECK(Structure, Arity, ...) \
781  CASE(EmptinessCheck, Structure, Arity) \
782  const auto& rel = *static_cast<RelType*>(node->getRelation()); \
783  return rel.empty(); \
784  ESAC(EmptinessCheck)
785 
787 #undef EMPTINESS_CHECK
788 
789 #define RELATION_SIZE(Structure, Arity, ...) \
790  CASE(RelationSize, Structure, Arity) \
791  const auto& rel = *static_cast<RelType*>(node->getRelation()); \
792  return rel.size(); \
793  ESAC(RelationSize)
794 
796 #undef RELATION_SIZE
797 
798 #define EXISTENCE_CHECK(Structure, Arity, ...) \
799  CASE(ExistenceCheck, Structure, Arity) \
800  return evalExistenceCheck<RelType>(shadow, ctxt); \
801  ESAC(ExistenceCheck)
802 
804 #undef EXISTENCE_CHECK
805 
806 #define PROVENANCE_EXISTENCE_CHECK(Structure, Arity, ...) \
807  CASE(ProvenanceExistenceCheck, Structure, Arity) \
808  return evalProvenanceExistenceCheck<RelType>(shadow, ctxt); \
809  ESAC(ProvenanceExistenceCheck)
810 
812 #undef PROVENANCE_EXISTENCE_CHECK
813 
815  // clang-format off
816 #define COMPARE_NUMERIC(ty, op) return EVAL_LEFT(ty) op EVAL_RIGHT(ty)
817 #define COMPARE_STRING(op) \
818  return (getSymbolTable().resolve(EVAL_LEFT(RamDomain)) op \
819  getSymbolTable().resolve(EVAL_RIGHT(RamDomain)))
820 #define COMPARE_EQ_NE(opCode, op) \
821  case BinaryConstraintOp:: opCode: COMPARE_NUMERIC(RamDomain , op); \
822  case BinaryConstraintOp::F##opCode: COMPARE_NUMERIC(RamFloat , op);
823 #define COMPARE(opCode, op) \
824  case BinaryConstraintOp:: opCode: COMPARE_NUMERIC(RamSigned , op); \
825  case BinaryConstraintOp::U##opCode: COMPARE_NUMERIC(RamUnsigned, op); \
826  case BinaryConstraintOp::F##opCode: COMPARE_NUMERIC(RamFloat , op); \
827  case BinaryConstraintOp::S##opCode: COMPARE_STRING(op);
828  // clang-format on
829 
830  switch (cur.getOperator()) {
831  COMPARE_EQ_NE(EQ, ==)
832  COMPARE_EQ_NE(NE, !=)
833 
834  COMPARE(LT, <)
835  COMPARE(LE, <=)
836  COMPARE(GT, >)
837  COMPARE(GE, >=)
838 
839  case BinaryConstraintOp::MATCH: {
840  RamDomain left = execute(shadow.getLhs(), ctxt);
841  RamDomain right = execute(shadow.getRhs(), ctxt);
842  const std::string& pattern = getSymbolTable().resolve(left);
843  const std::string& text = getSymbolTable().resolve(right);
844  bool result = false;
845  try {
846  result = std::regex_match(text, std::regex(pattern));
847  } catch (...) {
848  std::cerr << "warning: wrong pattern provided for match(\"" << pattern << "\",\""
849  << text << "\").\n";
850  }
851  return result;
852  }
853  case BinaryConstraintOp::NOT_MATCH: {
854  RamDomain left = execute(shadow.getLhs(), ctxt);
855  RamDomain right = execute(shadow.getRhs(), ctxt);
856  const std::string& pattern = getSymbolTable().resolve(left);
857  const std::string& text = getSymbolTable().resolve(right);
858  bool result = false;
859  try {
860  result = !std::regex_match(text, std::regex(pattern));
861  } catch (...) {
862  std::cerr << "warning: wrong pattern provided for !match(\"" << pattern << "\",\""
863  << text << "\").\n";
864  }
865  return result;
866  }
867  case BinaryConstraintOp::CONTAINS: {
868  RamDomain left = execute(shadow.getLhs(), ctxt);
869  RamDomain right = execute(shadow.getRhs(), ctxt);
870  const std::string& pattern = getSymbolTable().resolve(left);
871  const std::string& text = getSymbolTable().resolve(right);
872  return text.find(pattern) != std::string::npos;
873  }
874  case BinaryConstraintOp::NOT_CONTAINS: {
875  RamDomain left = execute(shadow.getLhs(), ctxt);
876  RamDomain right = execute(shadow.getRhs(), ctxt);
877  const std::string& pattern = getSymbolTable().resolve(left);
878  const std::string& text = getSymbolTable().resolve(right);
879  return text.find(pattern) == std::string::npos;
880  }
881  }
882 
884 
885 #undef COMPARE_NUMERIC
886 #undef COMPARE_STRING
887 #undef COMPARE
888 #undef COMPARE_EQ_NE
890 
892  bool result = execute(shadow.getChild(), ctxt);
893 
894  if (profileEnabled && !cur.getProfileText().empty()) {
895  auto& currentFrequencies = frequencies[cur.getProfileText()];
896  while (currentFrequencies.size() <= getIterationNumber()) {
897 #pragma omp critical(frequencies)
898  currentFrequencies.emplace_back(0);
899  }
900  frequencies[cur.getProfileText()][getIterationNumber()]++;
901  }
902  return result;
904 
905 #define SCAN(Structure, Arity, ...) \
906  CASE(Scan, Structure, Arity) \
907  const auto& rel = *static_cast<RelType*>(node->getRelation()); \
908  return evalScan(rel, cur, shadow, ctxt); \
909  ESAC(Scan)
910 
911  FOR_EACH(SCAN)
912 #undef SCAN
913 
914 #define PARALLEL_SCAN(Structure, Arity, ...) \
915  CASE(ParallelScan, Structure, Arity) \
916  const auto& rel = *static_cast<RelType*>(node->getRelation()); \
917  return evalParallelScan(rel, cur, shadow, ctxt); \
918  ESAC(ParallelScan)
920 #undef PARALLEL_SCAN
921 
922 #define INDEX_SCAN(Structure, Arity, ...) \
923  CASE(IndexScan, Structure, Arity) \
924  return evalIndexScan<RelType>(cur, shadow, ctxt); \
925  ESAC(IndexScan)
926 
928 #undef INDEX_SCAN
929 
930 #define PARALLEL_INDEX_SCAN(Structure, Arity, ...) \
931  CASE(ParallelIndexScan, Structure, Arity) \
932  const auto& rel = *static_cast<RelType*>(node->getRelation()); \
933  return evalParallelIndexScan(rel, cur, shadow, ctxt); \
934  ESAC(ParallelIndexScan)
935 
937 #undef PARALLEL_INDEX_SCAN
938 
939 #define CHOICE(Structure, Arity, ...) \
940  CASE(Choice, Structure, Arity) \
941  const auto& rel = *static_cast<RelType*>(node->getRelation()); \
942  return evalChoice(rel, cur, shadow, ctxt); \
943  ESAC(Choice)
944 
946 #undef CHOICE
947 
948 #define PARALLEL_CHOICE(Structure, Arity, ...) \
949  CASE(ParallelChoice, Structure, Arity) \
950  const auto& rel = *static_cast<RelType*>(node->getRelation()); \
951  return evalParallelChoice(rel, cur, shadow, ctxt); \
952  ESAC(ParallelChoice)
953 
955 #undef PARALLEL_CHOICE
956 
957 #define INDEX_CHOICE(Structure, Arity, ...) \
958  CASE(IndexChoice, Structure, Arity) \
959  return evalIndexChoice<RelType>(cur, shadow, ctxt); \
960  ESAC(IndexChoice)
961 
963 #undef INDEX_CHOICE
964 
965 #define PARALLEL_INDEX_CHOICE(Structure, Arity, ...) \
966  CASE(ParallelIndexChoice, Structure, Arity) \
967  const auto& rel = *static_cast<RelType*>(node->getRelation()); \
968  return evalParallelIndexChoice(rel, cur, shadow, ctxt); \
969  ESAC(ParallelIndexChoice)
970 
972 #undef PARALLEL_INDEX_CHOICE
973 
975  RamDomain ref = execute(shadow.getExpr(), ctxt);
976 
977  // check for nil
978  if (ref == 0) {
979  return true;
980  }
981 
982  // update environment variable
983  size_t arity = cur.getArity();
984  const RamDomain* tuple = getRecordTable().unpack(ref, arity);
985 
986  // save reference to temporary value
987  ctxt[cur.getTupleId()] = tuple;
988 
989  // run nested part - using base class visitor
990  return execute(shadow.getNestedOperation(), ctxt);
992 
993 #define PARALLEL_AGGREGATE(Structure, Arity, ...) \
994  CASE(ParallelAggregate, Structure, Arity) \
995  const auto& rel = *static_cast<RelType*>(node->getRelation()); \
996  return evalParallelAggregate(rel, cur, shadow, ctxt); \
997  ESAC(ParallelAggregate)
998 
1000 #undef PARALLEL_AGGREGATE
1001 
1002 #define AGGREGATE(Structure, Arity, ...) \
1003  CASE(Aggregate, Structure, Arity) \
1004  const auto& rel = *static_cast<RelType*>(node->getRelation()); \
1005  return evalAggregate(cur, *shadow.getCondition(), shadow.getExpr(), *shadow.getNestedOperation(), \
1006  rel.scan(), ctxt); \
1007  ESAC(Aggregate)
1008 
1010 #undef AGGREGATE
1011 
1012 #define PARALLEL_INDEX_AGGREGATE(Structure, Arity, ...) \
1013  CASE(ParallelIndexAggregate, Structure, Arity) \
1014  return evalParallelIndexAggregate<RelType>(cur, shadow, ctxt); \
1015  ESAC(ParallelIndexAggregate)
1016 
1018 #undef PARALLEL_INDEX_AGGREGATE
1019 
1020 #define INDEX_AGGREGATE(Structure, Arity, ...) \
1021  CASE(IndexAggregate, Structure, Arity) \
1022  return evalIndexAggregate<RelType>(cur, shadow, ctxt); \
1023  ESAC(IndexAggregate)
1024 
1026 #undef INDEX_AGGREGATE
1027 
1028  CASE(Break)
1029  // check condition
1030  if (execute(shadow.getCondition(), ctxt)) {
1031  return false;
1032  }
1033  return execute(shadow.getNestedOperation(), ctxt);
1034  ESAC(Break)
1035 
1036  CASE(Filter)
1037  bool result = true;
1038  // check condition
1039  if (execute(shadow.getCondition(), ctxt)) {
1040  // process nested
1041  result = execute(shadow.getNestedOperation(), ctxt);
1042  }
1043 
1044  if (profileEnabled && !cur.getProfileText().empty()) {
1045  auto& currentFrequencies = frequencies[cur.getProfileText()];
1046  while (currentFrequencies.size() <= getIterationNumber()) {
1047  currentFrequencies.emplace_back(0);
1048  }
1049  frequencies[cur.getProfileText()][getIterationNumber()]++;
1050  }
1051  return result;
1052  ESAC(Filter)
1053 
1054 #define PROJECT(Structure, Arity, ...) \
1055  CASE(Project, Structure, Arity) \
1056  auto& rel = *static_cast<RelType*>(node->getRelation()); \
1057  return evalProject(rel, shadow, ctxt); \
1058  ESAC(Project)
1059 
1060  FOR_EACH(PROJECT)
1061 #undef PROJECT
1062 
1064  for (size_t i = 0; i < cur.getValues().size(); ++i) {
1065  if (shadow.getChild(i) == nullptr) {
1066  ctxt.addReturnValue(0);
1067  } else {
1068  ctxt.addReturnValue(execute(shadow.getChild(i), ctxt));
1069  }
1070  }
1071  return true;
1073 
1074  CASE(Sequence)
1075  for (const auto& child : shadow.getChildren()) {
1076  if (!execute(child.get(), ctxt)) {
1077  return false;
1078  }
1079  }
1080  return true;
1081  ESAC(Sequence)
1082 
1083  CASE(Parallel)
1084  for (const auto& child : shadow.getChildren()) {
1085  if (!execute(child.get(), ctxt)) {
1086  return false;
1087  }
1088  }
1089  return true;
1090  ESAC(Parallel)
1091 
1092  CASE(Loop)
1093  resetIterationNumber();
1094  while (execute(shadow.getChild(), ctxt)) {
1095  incIterationNumber();
1096  }
1097  resetIterationNumber();
1098  return true;
1099  ESAC(Loop)
1100 
1101  CASE(Exit)
1102  return !execute(shadow.getChild(), ctxt);
1103  ESAC(Exit)
1104 
1106  Logger logger(cur.getMessage(), getIterationNumber(),
1107  std::bind(&RelationWrapper::size, node->getRelation()));
1108  return execute(shadow.getChild(), ctxt);
1110 
1111  CASE(LogTimer)
1112  Logger logger(cur.getMessage(), getIterationNumber());
1113  return execute(shadow.getChild(), ctxt);
1114  ESAC(LogTimer)
1115 
1116  CASE(DebugInfo)
1117  SignalHandler::instance()->setMsg(cur.getMessage().c_str());
1118  return execute(shadow.getChild(), ctxt);
1119  ESAC(DebugInfo)
1120 
1121 #define CLEAR(Structure, Arity, ...) \
1122  CASE(Clear, Structure, Arity) \
1123  auto& rel = *static_cast<RelType*>(node->getRelation()); \
1124  rel.__purge(); \
1125  return true; \
1126  ESAC(Clear)
1127 
1128  FOR_EACH(CLEAR)
1129 #undef CLEAR
1130 
1131  CASE(Call)
1132  execute(subroutine[shadow.getSubroutineId()].get(), ctxt);
1133  return true;
1134  ESAC(Call)
1135 
1136  CASE(LogSize)
1137  const auto& rel = *node->getRelation();
1138  ProfileEventSingleton::instance().makeQuantityEvent(
1139  cur.getMessage(), rel.size(), getIterationNumber());
1140  return true;
1141  ESAC(LogSize)
1142 
1143  CASE(IO)
1144  const auto& directive = cur.getDirectives();
1145  const std::string& op = cur.get("operation");
1146  auto& rel = *node->getRelation();
1147 
1148  if (op == "input") {
1149  try {
1151  .getReader(directive, getSymbolTable(), getRecordTable())
1152  ->readAll(rel);
1153  } catch (std::exception& e) {
1154  std::cerr << "Error loading data: " << e.what() << "\n";
1155  }
1156  return true;
1157  } else if (op == "output" || op == "printsize") {
1158  try {
1160  .getWriter(directive, getSymbolTable(), getRecordTable())
1161  ->writeAll(rel);
1162  } catch (std::exception& e) {
1163  std::cerr << e.what();
1164  exit(EXIT_FAILURE);
1165  }
1166  return true;
1167  } else {
1168  assert("wrong i/o operation");
1169  return true;
1170  }
1171  ESAC(IO)
1172 
1173  CASE(Query)
1174  ViewContext* viewContext = shadow.getViewContext();
1175 
1176  // Execute view-free operations in outer filter if any.
1177  auto& viewFreeOps = viewContext->getOuterFilterViewFreeOps();
1178  for (auto& op : viewFreeOps) {
1179  if (!execute(op.get(), ctxt)) {
1180  return true;
1181  }
1182  }
1183 
1184  // Create Views for outer filter operation if any.
1185  auto& viewsForOuter = viewContext->getViewInfoForFilter();
1186  for (auto& info : viewsForOuter) {
1187  ctxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1188  }
1189 
1190  // Execute outer filter operation.
1191  auto& viewOps = viewContext->getOuterFilterViewOps();
1192  for (auto& op : viewOps) {
1193  if (!execute(op.get(), ctxt)) {
1194  return true;
1195  }
1196  }
1197 
1198  if (viewContext->isParallel) {
1199  // If Parallel is true, holds views creation unitl parallel instructions.
1200  } else {
1201  // Issue views for nested operation.
1202  auto& viewsForNested = viewContext->getViewInfoForNested();
1203  for (auto& info : viewsForNested) {
1204  ctxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1205  }
1206  }
1207  execute(shadow.getChild(), ctxt);
1208  return true;
1209  ESAC(Query)
1210 
1211  CASE(Extend)
1212  auto& src = *static_cast<EqrelRelation*>(getRelationHandle(shadow.getSourceId()).get());
1213  auto& trg = *static_cast<EqrelRelation*>(getRelationHandle(shadow.getTargetId()).get());
1214  src.extend(trg);
1215  trg.insert(src);
1216  return true;
1217  ESAC(Extend)
1218 
1219  CASE(Swap)
1220  swapRelation(shadow.getSourceId(), shadow.getTargetId());
1221  return true;
1222  ESAC(Swap)
1223  }
1224 
1226 
1227 #undef EVAL_CHILD
1228 #undef DEBUG
1229 }
1230 
1231 template <typename Rel>
1232 RamDomain Engine::evalExistenceCheck(const ExistenceCheck& shadow, Context& ctxt) {
1233  constexpr size_t Arity = Rel::Arity;
1234  size_t viewPos = shadow.getViewId();
1235 
1236  if (profileEnabled && !shadow.isTemp()) {
1237  reads[shadow.getRelationName()]++;
1238  }
1239 
1240  const auto& superInfo = shadow.getSuperInst();
1241  // for total we use the exists test
1242  if (shadow.isTotalSearch()) {
1244  TUPLE_COPY_FROM(tuple, superInfo.first);
1245  /* TupleElement */
1246  for (const auto& tupleElement : superInfo.tupleFirst) {
1247  tuple[tupleElement[0]] = ctxt[tupleElement[1]][tupleElement[2]];
1248  }
1249  /* Generic */
1250  for (const auto& expr : superInfo.exprFirst) {
1251  tuple[expr.first] = execute(expr.second.get(), ctxt);
1252  }
1253  return Rel::castView(ctxt.getView(viewPos))->contains(tuple);
1254  }
1255 
1256  // for partial we search for lower and upper boundaries
1259  TUPLE_COPY_FROM(low, superInfo.first);
1260  TUPLE_COPY_FROM(high, superInfo.second);
1261 
1262  /* TupleElement */
1263  for (const auto& tupleElement : superInfo.tupleFirst) {
1264  low[tupleElement[0]] = ctxt[tupleElement[1]][tupleElement[2]];
1265  high[tupleElement[0]] = low[tupleElement[0]];
1266  }
1267  /* Generic */
1268  for (const auto& expr : superInfo.exprFirst) {
1269  low[expr.first] = execute(expr.second.get(), ctxt);
1270  high[expr.first] = low[expr.first];
1271  }
1272 
1273  return Rel::castView(ctxt.getView(viewPos))->contains(low, high);
1274 }
1275 
1276 template <typename Rel>
1277 RamDomain Engine::evalProvenanceExistenceCheck(const ProvenanceExistenceCheck& shadow, Context& ctxt) {
1278  // construct the pattern tuple
1279  constexpr size_t Arity = Rel::Arity;
1280  const auto& superInfo = shadow.getSuperInst();
1281 
1282  // for partial we search for lower and upper boundaries
1285  TUPLE_COPY_FROM(low, superInfo.first);
1286  TUPLE_COPY_FROM(high, superInfo.second);
1287 
1288  /* TupleElement */
1289  for (const auto& tupleElement : superInfo.tupleFirst) {
1290  low[tupleElement[0]] = ctxt[tupleElement[1]][tupleElement[2]];
1291  high[tupleElement[0]] = low[tupleElement[0]];
1292  }
1293  /* Generic */
1294  for (const auto& expr : superInfo.exprFirst) {
1295  assert(expr.second.get() != nullptr &&
1296  "ProvenanceExistenceCheck should always be specified for payload");
1297  low[expr.first] = execute(expr.second.get(), ctxt);
1298  high[expr.first] = low[expr.first];
1299  }
1300 
1301  low[Arity - 2] = MIN_RAM_SIGNED;
1302  low[Arity - 1] = MIN_RAM_SIGNED;
1303  high[Arity - 2] = MAX_RAM_SIGNED;
1304  high[Arity - 1] = MAX_RAM_SIGNED;
1305 
1306  // obtain view
1307  size_t viewPos = shadow.getViewId();
1308 
1309  // get an equalRange
1310  auto equalRange = Rel::castView(ctxt.getView(viewPos))->range(low, high);
1311 
1312  // if range is empty
1313  if (equalRange.begin() == equalRange.end()) {
1314  return false;
1315  }
1316 
1317  // check whether the height is less than the current height
1318  return (*equalRange.begin())[Arity - 1] <= execute(shadow.getChild(), ctxt);
1319 }
1320 
1321 template <typename Rel>
1322 RamDomain Engine::evalScan(const Rel& rel, const ram::Scan& cur, const Scan& shadow, Context& ctxt) {
1323  for (const auto& tuple : rel.scan()) {
1324  ctxt[cur.getTupleId()] = tuple.data();
1325  if (!execute(shadow.getNestedOperation(), ctxt)) {
1326  break;
1327  }
1328  }
1329  return true;
1330 }
1331 
1332 template <typename Rel>
1333 RamDomain Engine::evalParallelScan(
1334  const Rel& rel, const ram::ParallelScan& cur, const ParallelScan& shadow, Context& ctxt) {
1335  auto viewContext = shadow.getViewContext();
1336 
1337  auto pStream = rel.partitionScan(numOfThreads);
1340  Context newCtxt(ctxt);
1341  auto viewInfo = viewContext->getViewInfoForNested();
1342  for (const auto& info : viewInfo) {
1343  newCtxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1344  }
1345  pfor(auto it = pStream.begin(); it < pStream.end(); it++) {
1346  for (const auto& tuple : *it) {
1347  newCtxt[cur.getTupleId()] = tuple.data();
1348  if (!execute(shadow.getNestedOperation(), newCtxt)) {
1349  break;
1350  }
1351  }
1352  }
1353  PARALLEL_END
1354  return true;
1355 }
1356 
1357 template <typename Rel>
1358 RamDomain Engine::evalIndexScan(const ram::IndexScan& cur, const IndexScan& shadow, Context& ctxt) {
1359  constexpr size_t Arity = Rel::Arity;
1360  // create pattern tuple for range query
1361  const auto& superInfo = shadow.getSuperInst();
1364  CAL_SEARCH_BOUND(superInfo, low, high);
1365 
1366  size_t viewId = shadow.getViewId();
1367  auto view = Rel::castView(ctxt.getView(viewId));
1368  // conduct range query
1369  for (const auto& tuple : view->range(low, high)) {
1370  ctxt[cur.getTupleId()] = tuple.data();
1371  if (!execute(shadow.getNestedOperation(), ctxt)) {
1372  break;
1373  }
1374  }
1375  return true;
1376 }
1377 
1378 template <typename Rel>
1379 RamDomain Engine::evalParallelIndexScan(
1380  const Rel& rel, const ram::ParallelIndexScan& cur, const ParallelIndexScan& shadow, Context& ctxt) {
1381  auto viewContext = shadow.getViewContext();
1382 
1383  // create pattern tuple for range query
1384  constexpr size_t Arity = Rel::Arity;
1385  const auto& superInfo = shadow.getSuperInst();
1388  CAL_SEARCH_BOUND(superInfo, low, high);
1389 
1390  size_t indexPos = shadow.getViewId();
1391  auto pStream = rel.partitionRange(indexPos, low, high, numOfThreads);
1393  Context newCtxt(ctxt);
1394  auto viewInfo = viewContext->getViewInfoForNested();
1395  for (const auto& info : viewInfo) {
1396  newCtxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1397  }
1398  pfor(auto it = pStream.begin(); it < pStream.end(); it++) {
1399  for (const auto& tuple : *it) {
1400  newCtxt[cur.getTupleId()] = tuple.data();
1401  if (!execute(shadow.getNestedOperation(), newCtxt)) {
1402  break;
1403  }
1404  }
1405  }
1406  PARALLEL_END
1407  return true;
1408 }
1409 
1410 template <typename Rel>
1411 RamDomain Engine::evalChoice(const Rel& rel, const ram::Choice& cur, const Choice& shadow, Context& ctxt) {
1412  // use simple iterator
1413  for (const auto& tuple : rel.scan()) {
1414  ctxt[cur.getTupleId()] = tuple.data();
1415  if (execute(shadow.getCondition(), ctxt)) {
1416  execute(shadow.getNestedOperation(), ctxt);
1417  break;
1418  }
1419  }
1420  return true;
1421 }
1422 
1423 template <typename Rel>
1424 RamDomain Engine::evalParallelChoice(
1425  const Rel& rel, const ram::ParallelChoice& cur, const ParallelChoice& shadow, Context& ctxt) {
1426  auto viewContext = shadow.getViewContext();
1427 
1428  auto pStream = rel.partitionScan(numOfThreads);
1429  auto viewInfo = viewContext->getViewInfoForNested();
1431  Context newCtxt(ctxt);
1432  for (const auto& info : viewInfo) {
1433  newCtxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1434  }
1435  pfor(auto it = pStream.begin(); it < pStream.end(); it++) {
1436  for (const auto& tuple : *it) {
1437  newCtxt[cur.getTupleId()] = tuple.data();
1438  if (execute(shadow.getCondition(), newCtxt)) {
1439  execute(shadow.getNestedOperation(), newCtxt);
1440  break;
1441  }
1442  }
1443  }
1444  PARALLEL_END
1445  return true;
1446 }
1447 
1448 template <typename Rel>
1449 RamDomain Engine::evalIndexChoice(const ram::IndexChoice& cur, const IndexChoice& shadow, Context& ctxt) {
1450  constexpr size_t Arity = Rel::Arity;
1451  const auto& superInfo = shadow.getSuperInst();
1454  CAL_SEARCH_BOUND(superInfo, low, high);
1455 
1456  size_t viewId = shadow.getViewId();
1457  auto view = Rel::castView(ctxt.getView(viewId));
1458 
1459  for (const auto& tuple : view->range(low, high)) {
1460  ctxt[cur.getTupleId()] = tuple.data();
1461  if (execute(shadow.getCondition(), ctxt)) {
1462  execute(shadow.getNestedOperation(), ctxt);
1463  break;
1464  }
1465  }
1466  return true;
1467 }
1468 
1469 template <typename Rel>
1470 RamDomain Engine::evalParallelIndexChoice(const Rel& rel, const ram::ParallelIndexChoice& cur,
1471  const ParallelIndexChoice& shadow, Context& ctxt) {
1472  auto viewContext = shadow.getViewContext();
1473 
1474  auto viewInfo = viewContext->getViewInfoForNested();
1476  // create pattern tuple for range query
1477  constexpr size_t Arity = Rel::Arity;
1478  const auto& superInfo = shadow.getSuperInst();
1481  CAL_SEARCH_BOUND(superInfo, low, high);
1482 
1483  size_t indexPos = shadow.getViewId();
1484  auto pStream = rel.partitionRange(indexPos, low, high, numOfThreads);
1485 
1487  Context newCtxt(ctxt);
1488  for (const auto& info : viewInfo) {
1489  newCtxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1490  }
1491  pfor(auto it = pStream.begin(); it < pStream.end(); it++) {
1492  for (const auto& tuple : *it) {
1493  newCtxt[cur.getTupleId()] = tuple.data();
1494  if (execute(shadow.getCondition(), newCtxt)) {
1495  execute(shadow.getNestedOperation(), newCtxt);
1496  break;
1497  }
1498  }
1499  }
1500  PARALLEL_END
1501 
1502  return true;
1503 }
1504 
1505 template <typename Aggregate, typename Iter>
1506 RamDomain Engine::evalAggregate(const Aggregate& aggregate, const Node& filter, const Node* expression,
1507  const Node& nestedOperation, const Iter& ranges, Context& ctxt) {
1508  bool shouldRunNested = false;
1509 
1510  // initialize result
1511  RamDomain res = 0;
1512 
1513  // Use for calculating mean.
1514  std::pair<RamFloat, RamFloat> accumulateMean;
1515 
1516  switch (aggregate.getFunction()) {
1517  case AggregateOp::MIN: res = ramBitCast(MAX_RAM_SIGNED); break;
1518  case AggregateOp::UMIN: res = ramBitCast(MAX_RAM_UNSIGNED); break;
1519  case AggregateOp::FMIN: res = ramBitCast(MAX_RAM_FLOAT); break;
1520 
1521  case AggregateOp::MAX: res = ramBitCast(MIN_RAM_SIGNED); break;
1522  case AggregateOp::UMAX: res = ramBitCast(MIN_RAM_UNSIGNED); break;
1523  case AggregateOp::FMAX: res = ramBitCast(MIN_RAM_FLOAT); break;
1524 
1525  case AggregateOp::SUM:
1526  res = ramBitCast(static_cast<RamSigned>(0));
1527  shouldRunNested = true;
1528  break;
1529  case AggregateOp::USUM:
1530  res = ramBitCast(static_cast<RamUnsigned>(0));
1531  shouldRunNested = true;
1532  break;
1533  case AggregateOp::FSUM:
1534  res = ramBitCast(static_cast<RamFloat>(0));
1535  shouldRunNested = true;
1536  break;
1537 
1538  case AggregateOp::MEAN:
1539  res = 0;
1540  accumulateMean = {0, 0};
1541  break;
1542 
1543  case AggregateOp::COUNT:
1544  res = 0;
1545  shouldRunNested = true;
1546  break;
1547  }
1548 
1549  for (const auto& tuple : ranges) {
1550  ctxt[aggregate.getTupleId()] = tuple.data();
1551 
1552  if (!execute(&filter, ctxt)) {
1553  continue;
1554  }
1555 
1556  shouldRunNested = true;
1557 
1558  // count is a special case.
1559  if (aggregate.getFunction() == AggregateOp::COUNT) {
1560  ++res;
1561  continue;
1562  }
1563 
1564  // eval target expression
1565  assert(expression); // only case where this is null is `COUNT`
1566  RamDomain val = execute(expression, ctxt);
1567 
1568  switch (aggregate.getFunction()) {
1569  case AggregateOp::MIN: res = std::min(res, val); break;
1570  case AggregateOp::FMIN:
1571  res = ramBitCast(std::min(ramBitCast<RamFloat>(res), ramBitCast<RamFloat>(val)));
1572  break;
1573  case AggregateOp::UMIN:
1574  res = ramBitCast(std::min(ramBitCast<RamUnsigned>(res), ramBitCast<RamUnsigned>(val)));
1575  break;
1576 
1577  case AggregateOp::MAX: res = std::max(res, val); break;
1578  case AggregateOp::FMAX:
1579  res = ramBitCast(std::max(ramBitCast<RamFloat>(res), ramBitCast<RamFloat>(val)));
1580  break;
1581  case AggregateOp::UMAX:
1582  res = ramBitCast(std::max(ramBitCast<RamUnsigned>(res), ramBitCast<RamUnsigned>(val)));
1583  break;
1584 
1585  case AggregateOp::SUM: res += val; break;
1586  case AggregateOp::FSUM:
1587  res = ramBitCast(ramBitCast<RamFloat>(res) + ramBitCast<RamFloat>(val));
1588  break;
1589  case AggregateOp::USUM:
1590  res = ramBitCast(ramBitCast<RamUnsigned>(res) + ramBitCast<RamUnsigned>(val));
1591  break;
1592 
1593  case AggregateOp::MEAN:
1594  accumulateMean.first += ramBitCast<RamFloat>(val);
1595  accumulateMean.second++;
1596  break;
1597 
1598  case AggregateOp::COUNT: fatal("This should never be executed");
1599  }
1600  }
1601 
1602  if (aggregate.getFunction() == AggregateOp::MEAN && accumulateMean.second != 0) {
1603  res = ramBitCast(accumulateMean.first / accumulateMean.second);
1604  }
1605 
1606  // write result to environment
1608  tuple[0] = res;
1609  ctxt[aggregate.getTupleId()] = tuple.data();
1610 
1611  if (!shouldRunNested) {
1612  return true;
1613  } else {
1614  return execute(&nestedOperation, ctxt);
1615  }
1616 }
1617 template <typename Rel>
1618 RamDomain Engine::evalParallelAggregate(
1619  const Rel& rel, const ram::ParallelAggregate& cur, const ParallelAggregate& shadow, Context& ctxt) {
1620  // TODO (rdowavic): make parallel
1621  auto viewContext = shadow.getViewContext();
1622 
1623  Context newCtxt(ctxt);
1624  auto viewInfo = viewContext->getViewInfoForNested();
1625  for (const auto& info : viewInfo) {
1626  newCtxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1627  }
1628  return evalAggregate(
1629  cur, *shadow.getCondition(), shadow.getExpr(), *shadow.getNestedOperation(), rel.scan(), newCtxt);
1630 }
1631 
1632 template <typename Rel>
1633 RamDomain Engine::evalParallelIndexAggregate(
1634  const ram::ParallelIndexAggregate& cur, const ParallelIndexAggregate& shadow, Context& ctxt) {
1635  // TODO (rdowavic): make parallel
1636  auto viewContext = shadow.getViewContext();
1637 
1638  Context newCtxt(ctxt);
1639  auto viewInfo = viewContext->getViewInfoForNested();
1640  for (const auto& info : viewInfo) {
1641  newCtxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1642  }
1643  // init temporary tuple for this level
1644  constexpr size_t Arity = Rel::Arity;
1645  const auto& superInfo = shadow.getSuperInst();
1646  // get lower and upper boundaries for iteration
1649  CAL_SEARCH_BOUND(superInfo, low, high);
1650 
1651  size_t viewId = shadow.getViewId();
1652  auto view = Rel::castView(newCtxt.getView(viewId));
1653 
1654  return evalAggregate(cur, *shadow.getCondition(), shadow.getExpr(), *shadow.getNestedOperation(),
1655  view->range(low, high), newCtxt);
1656 }
1657 
1658 template <typename Rel>
1659 RamDomain Engine::evalIndexAggregate(
1660  const ram::IndexAggregate& cur, const IndexAggregate& shadow, Context& ctxt) {
1661  // init temporary tuple for this level
1662  const size_t Arity = Rel::Arity;
1663  const auto& superInfo = shadow.getSuperInst();
1666  CAL_SEARCH_BOUND(superInfo, low, high);
1667 
1668  size_t viewId = shadow.getViewId();
1669  auto view = Rel::castView(ctxt.getView(viewId));
1670 
1671  return evalAggregate(cur, *shadow.getCondition(), shadow.getExpr(), *shadow.getNestedOperation(),
1672  view->range(low, high), ctxt);
1673 }
1674 
1675 template <typename Rel>
1676 RamDomain Engine::evalProject(Rel& rel, const Project& shadow, Context& ctxt) {
1677  constexpr size_t Arity = Rel::Arity;
1678  const auto& superInfo = shadow.getSuperInst();
1680  TUPLE_COPY_FROM(tuple, superInfo.first);
1682  /* TupleElement */
1683  for (const auto& tupleElement : superInfo.tupleFirst) {
1684  tuple[tupleElement[0]] = ctxt[tupleElement[1]][tupleElement[2]];
1685  }
1686  /* Generic */
1687  for (const auto& expr : superInfo.exprFirst) {
1688  tuple[expr.first] = execute(expr.second.get(), ctxt);
1689  }
1690 
1691  // insert in target relation
1692  rel.insert(tuple);
1693  return true;
1694 }
1695 
1696 } // namespace souffle::interpreter
souffle::FunctorOp::DIV
@ DIV
souffle::interpreter::Break
Definition: Node.h:713
Aggregate.h
Node.h
souffle::AggregateOp::MIN
@ MIN
souffle::RamUnsigned
uint32_t RamUnsigned
Definition: RamTypes.h:58
BinaryConstraintOps.h
UserDefinedOperator.h
souffle::FunctorOp::U2I
@ U2I
TCB_SPAN_NAMESPACE_NAME::detail::size
constexpr auto size(const C &c) -> decltype(c.size())
Definition: span.h:198
Constant.h
souffle::interpreter::Negation
Definition: Node.h:509
souffle::FunctorOp::I2S
@ I2S
UNREACHABLE_BAD_CASE_ANALYSIS
#define UNREACHABLE_BAD_CASE_ANALYSIS
Definition: MiscUtil.h:206
souffle::ram::Program
RAM program relation declaration and functions.
Definition: Program.h:58
PackRecord.h
souffle::interpreter::LogSize
Definition: Node.h:821
souffle::interpreter::UnpackRecord
Definition: Node.h:654
BINARY_OP_INTEGRAL_SHIFT
#define BINARY_OP_INTEGRAL_SHIFT(opcode, op, tySigned, tyUnsigned)
Constraint.h
Call.h
BINARY_OP_INTEGRAL
#define BINARY_OP_INTEGRAL(opcode, op)
souffle::MAX_RAM_SIGNED
constexpr RamSigned MAX_RAM_SIGNED
Definition: RamTypes.h:97
Scan.h
Exit.h
INDEX_AGGREGATE
#define INDEX_AGGREGATE(Structure, Arity,...)
souffle::RamDomain
int32_t RamDomain
Definition: RamTypes.h:56
Negation.h
AggregateOp.h
Project.h
FOR_EACH
#define FOR_EACH(func,...)
Definition: Util.h:116
souffle::interpreter::createEqrelRelation
Own< RelationWrapper > createEqrelRelation(const ram::Relation &id, const ram::analysis::MinIndexSelection &orderSet)
Definition: EqrelIndex.cpp:29
souffle::ast::analysis::sub
std::shared_ptr< Constraint< Var > > sub(const Var &a, const Var &b, const std::string &symbol="⊑")
A generic factory for constraints of the form.
Definition: ConstraintSystem.h:228
PARALLEL_AGGREGATE
#define PARALLEL_AGGREGATE(Structure, Arity,...)
Clear.h
Relation.h
souffle::interpreter::LogTimer
Definition: Node.h:784
souffle::interpreter::Node
This is a shadow node for a ram::Node that is enriched for with local information so that the interpr...
Definition: Node.h:178
AGGREGATE
#define AGGREGATE(Structure, Arity,...)
souffle::BinaryConstraintOp::LE
@ LE
souffle::interpreter::TupleElement
Definition: Node.h:425
souffle::interpreter::createBTreeRelation
Own< RelationWrapper > createBTreeRelation(const ram::Relation &id, const ram::analysis::MinIndexSelection &orderSet)
Definition: BTreeIndex.cpp:35
LogTimer.h
souffle::interpreter::Node::getType
enum NodeType getType() const
get node type
Definition: Node.h:187
SymbolTable.h
low
d d low
Definition: htmlJsChartistMin.h:15
souffle::interpreter::Loop
Definition: Node.h:763
high
d high
Definition: htmlJsChartistMin.h:15
souffle::interpreter::SubroutineArgument
Definition: Node.h:481
souffle::interpreter::SubroutineReturn
Definition: Node.h:742
ParallelUtil.h
RUN_RANGE
#define RUN_RANGE(ty)
souffle::RecordTable
Definition: RecordTable.h:114
souffle::interpreter::Context::createView
void createView(const RelationWrapper &rel, size_t indexPos, size_t viewPos)
Create a view in the environment.
Definition: Context.h:110
UNARY_OP
#define UNARY_OP(op, ty, func)
e
l j a showGridBackground &&c b raw series this eventEmitter e
Definition: htmlJsChartistMin.h:15
False.h
ParallelChoice.h
souffle::interpreter::EqrelRelation::extend
void extend(const EqrelRelation &rel)
Definition: Relation.h:413
souffle::RamFloat
float RamFloat
Definition: RamTypes.h:60
souffle::BinaryConstraintOp::EQ
@ EQ
LogRelationTimer.h
souffle::interpreter::Engine::Engine
Engine(ram::TranslationUnit &tUnit)
Definition: Engine.cpp:146
souffle::BinaryConstraintOp::GT
@ GT
MiscUtil.h
SCAN
#define SCAN(Structure, Arity,...)
souffle::interpreter::Constraint
Definition: Node.h:569
souffle::interpreter::IO
Definition: Node.h:828
CLEAR
#define CLEAR(Structure, Arity,...)
souffle::BinaryConstraintOp::NE
@ NE
Index.h
souffle::interpreter::Extend
Definition: Node.h:842
Filter.h
SubroutineArgument.h
DebugInfo.h
Engine.h
EmptinessCheck.h
souffle::interpreter::EqrelRelation
Definition: Relation.h:409
IOSystem.h
Swap.h
souffle::MIN_RAM_FLOAT
constexpr RamFloat MIN_RAM_FLOAT
Definition: RamTypes.h:102
souffle::FunctorOp::U2F
@ U2F
True.h
relations
std::vector< Own< Relation > > relations
Definition: ComponentInstantiation.cpp:65
souffle::interpreter::ViewContext::getOuterFilterViewFreeOps
const VecOwn< Node > & getOuterFilterViewFreeOps()
Return views for outer-most filter operations.
Definition: ViewContext.h:62
PARALLEL_INDEX_AGGREGATE
#define PARALLEL_INDEX_AGGREGATE(Structure, Arity,...)
souffle::interpreter::createProvenanceRelation
Own< RelationWrapper > createProvenanceRelation(const ram::Relation &id, const ram::analysis::MinIndexSelection &orderSet)
Definition: ProvenanceIndex.cpp:32
souffle::interpreter::ViewContext
This class contains information for views (Hints) creation for ram::Query and ram::Parallel operation...
Definition: ViewContext.h:39
souffle::FunctorOp::BXOR
@ BXOR
iteration
Iteration & iteration
Definition: Reader.h:129
TupleOperation.h
PROJECT
#define PROJECT(Structure, Arity,...)
souffle::FunctorOp::MOD
@ MOD
souffle::interpreter::NestedIntrinsicOperator
Definition: Node.h:467
ParallelIndexScan.h
souffle::FunctorOp::BSHIFT_L
@ BSHIFT_L
souffle::interpreter::False
Definition: Node.h:495
souffle::Logger
The class utilized to times for the souffle profiling tool.
Definition: Logger.h:44
souffle::interpreter::IntrinsicOperator
Definition: Node.h:453
Program.h
ProfileEvent.h
Logger.h
IndexAggregate.h
souffle::interpreter::Context::addReturnValue
void addReturnValue(RamDomain val)
Add subroutine return value.
Definition: Context.h:88
str
const std::string & str
Definition: json11.h:662
Global.h
dynamicLibSuffix
#define dynamicLibSuffix
Definition: Engine.cpp:126
souffle::FunctorOp::LOR
@ LOR
souffle::ram::TranslationUnit
Translating a RAM program.
Definition: TranslationUnit.h:55
souffle::interpreter
Definition: BrieIndex.cpp:22
souffle::interpreter::Filter
Definition: Node.h:723
EXISTENCE_CHECK
#define EXISTENCE_CHECK(Structure, Arity,...)
souffle::MAX_RAM_UNSIGNED
constexpr RamUnsigned MAX_RAM_UNSIGNED
Definition: RamTypes.h:100
souffle::interpreter::UserDefinedOperator
Definition: Node.h:460
ProvenanceExistenceCheck.h
CONV_TO_STRING
#define CONV_TO_STRING(op, ty)
IndexScan.h
Visitor.h
FFI_RamUnsigned
#define FFI_RamUnsigned
Definition: Engine.cpp:136
ReadStream.h
IndexChoice.h
i
size_t i
Definition: json11.h:663
souffle::filter
std::vector< A > filter(std::vector< A > xs, F &&f)
Filter a vector to include certain elements.
Definition: FunctionalUtil.h:155
souffle::interpreter::RelationHandle
Own< RelationWrapper > RelationHandle
Definition: Generator.cpp:28
souffle::MAX_RAM_FLOAT
constexpr RamFloat MAX_RAM_FLOAT
Definition: RamTypes.h:103
CAL_SEARCH_BOUND
#define CAL_SEARCH_BOUND(superInfo, low, high)
souffle::ram::NestedOperation::getProfileText
const std::string & getProfileText() const
Get profile text.
Definition: NestedOperation.h:67
PARALLEL_SCAN
#define PARALLEL_SCAN(Structure, Arity,...)
souffle::tuple::data
const RamDomain * data
Allows printing using WriteStream.
Definition: SouffleInterface.h:498
Choice.h
souffle::ram::ParallelIndexAggregate
Aggregate over values of a relation using an index in parallel.
Definition: ParallelIndexAggregate.h:52
RELATION_SIZE
#define RELATION_SIZE(Structure, Arity,...)
FFI_RamFloat
#define FFI_RamFloat
Definition: Engine.cpp:137
souffle::AggregateOp::MAX
@ MAX
souffle::ram::Program::getSubroutines
const std::map< std::string, Statement * > getSubroutines() const
Get all subroutines of a RAM program.
Definition: Program.h:97
Relation.h
NestedIntrinsicOperator.h
souffle::FunctorOp::U2S
@ U2S
souffle::BinaryConstraintOp::GE
@ GE
CASE
#define CASE(...)
EMPTINESS_CHECK
#define EMPTINESS_CHECK(Structure, Arity,...)
StringUtil.h
ParallelIndexChoice.h
souffle::main
int main(int argc, char **argv)
Definition: main.cpp:191
souffle::SymbolTable
Definition: SymbolTable.h:48
ParallelIndexAggregate.h
souffle::ram::IndexAggregate
Indexed aggregation on a relation. The index allows us to iterate over a restricted range.
Definition: IndexAggregate.h:51
souffle::interpreter::Project
Definition: Node.h:733
ExistenceCheck.h
UnpackRecord.h
AutoIncrement.h
TCB_SPAN_NAMESPACE_NAME::get
constexpr auto get(span< E, S > s) -> decltype(s[N])
Definition: span.h:599
souffle::FunctorOp::LAND
@ LAND
souffle::interpreter::LogRelationTimer
Definition: Node.h:777
IO.h
souffle::Tuple
std::array< A, N > Tuple
Definition: RamTypes.h:36
souffle::FunctorOp::I2F
@ I2F
RelationSize.h
PARALLEL_END
#define PARALLEL_END
Definition: ParallelUtil.h:65
souffle::MIN_RAM_SIGNED
constexpr RamSigned MIN_RAM_SIGNED
lower and upper boundaries for the ram types
Definition: RamTypes.h:96
INDEX_CHOICE
#define INDEX_CHOICE(Structure, Arity,...)
souffle::BinaryConstraintOp::LT
@ LT
souffle::interpreter::Query
Definition: Node.h:835
souffle::FunctorOp::BOR
@ BOR
ParallelScan.h
COMPARE
#define COMPARE(opCode, op)
FFI_Symbol
#define FFI_Symbol
Definition: Engine.cpp:140
Break.h
BINARY_OP_LOGICAL
#define BINARY_OP_LOGICAL(opcode, op)
souffle::interpreter::ViewContext::getViewInfoForNested
std::vector< std::array< size_t, 3 > > & getViewInfoForNested()
Return Views information for nested operation.
Definition: ViewContext.h:77
RecordTable.h
ParallelAggregate.h
TranslationUnit.h
souffle::ram::TupleOperation
Abstract class for relation searches and lookups.
Definition: TupleOperation.h:35
souffle::FunctorOp::S2I
@ S2I
pfor
#define pfor
Definition: ParallelUtil.h:68
souffle::interpreter::ViewContext::isParallel
bool isParallel
If this context has information for parallel operation.
Definition: ViewContext.h:92
souffle::interpreter::Parallel
Definition: Node.h:756
WriteStream.h
souffle::interpreter::Engine::RelationHandle
Own< RelationWrapper > RelationHandle
Definition: Engine.h:57
CHOICE
#define CHOICE(Structure, Arity,...)
souffle::MIN_RAM_UNSIGNED
constexpr RamUnsigned MIN_RAM_UNSIGNED
Definition: RamTypes.h:99
SignalHandler.h
EvaluatorUtil.h
souffle::interpreter::PackRecord
Definition: Node.h:474
souffle::FunctorOp::F2I
@ F2I
PARALLEL_INDEX_CHOICE
#define PARALLEL_INDEX_CHOICE(Structure, Arity,...)
Context.h
FOR_EACH_PROVENANCE
#define FOR_EACH_PROVENANCE(func,...)
Definition: Util.h:34
souffle::FunctorOp::S2F
@ S2F
TupleElement.h
souffle::interpreter::True
Definition: Node.h:488
MINMAX_OP_SYM
#define MINMAX_OP_SYM(op)
souffle::interpreter::DebugInfo
Definition: Node.h:791
std
Definition: Brie.h:3053
souffle::FunctorOp::BSHIFT_R_UNSIGNED
@ BSHIFT_R_UNSIGNED
souffle::FunctorOp::F2U
@ F2U
Query.h
souffle::FunctorOp::BAND
@ BAND
souffle::FunctorOp::F2S
@ F2S
CONV_FROM_STRING
#define CONV_FROM_STRING(op, ty)
souffle::FunctorOp::SUB
@ SUB
RamTypes.h
souffle::splitString
std::vector< std::string > splitString(const std::string &str, char delimiter)
Splits a string given a delimiter.
Definition: StringUtil.h:321
Sequence.h
MINMAX_NUMERIC
#define MINMAX_NUMERIC(opCode, op)
PARALLEL_CHOICE
#define PARALLEL_CHOICE(Structure, Arity,...)
Statement.h
souffle::interpreter::Context::getArgument
RamDomain getArgument(size_t i) const
Get subroutine Arguments.
Definition: Context.h:104
souffle::fatal
void fatal(const char *format, const Args &... args)
Definition: MiscUtil.h:198
souffle::interpreter::Exit
Definition: Node.h:770
souffle::interpreter::Call
Definition: Node.h:805
souffle::FunctorOp::BSHIFT_R
@ BSHIFT_R
souffle::interpreter::ViewContext::getViewInfoForFilter
std::vector< std::array< size_t, 3 > > & getViewInfoForFilter()
Return Views information for outer filter operation.
Definition: ViewContext.h:72
TCB_SPAN_NAMESPACE_NAME::detail::data
constexpr auto data(C &c) -> decltype(c.data())
Definition: span.h:210
souffle::interpreter::SuperOperation::getSuperInst
const SuperInstruction & getSuperInst() const
Definition: Node.h:315
SubroutineReturn.h
FFI_RamSigned
#define FFI_RamSigned
Definition: Engine.cpp:135
PARALLEL_START
#define PARALLEL_START
Definition: ParallelUtil.h:64
souffle::ramBitCast
To ramBitCast(From source)
In C++20 there will be a new way to cast between types by reinterpreting bits (std::bit_cast),...
Definition: RamTypes.h:87
RAM_DOMAIN_SIZE
#define RAM_DOMAIN_SIZE
Types of elements in a tuple.
Definition: RamTypes.h:46
souffle::FunctorOp::MUL
@ MUL
souffle::ast::visitDepthFirst
void visitDepthFirst(const Node &root, Visitor< R, Ps... > &visitor, Args &... args)
A utility function visiting all nodes within the ast rooted by the given node recursively in a depth-...
Definition: Visitor.h:273
FunctorOps.h
souffle::RamSigned
RamDomain RamSigned
Definition: RamTypes.h:57
souffle::interpreter::TupleOperation
Definition: Node.h:576
Loop.h
souffle::interpreter::AutoIncrement
Definition: Node.h:446
souffle::interpreter::ViewContext::getOuterFilterViewOps
const VecOwn< Node > & getOuterFilterViewOps()
Return outer-most filter operations.
Definition: ViewContext.h:57
souffle::FunctorOp::ADD
@ ADD
Binary Functor Operators.
souffle::evaluator::lxor_infix
Definition: EvaluatorUtil.h:82
rel
void rel(size_t limit, bool showLimit=true)
Definition: Tui.h:1086
souffle::tuple
Defines a tuple for the OO interface such that relations with varying columns can be accessed.
Definition: SouffleInterface.h:443
souffle::interpreter::Node::getRelation
RelationWrapper * getRelation() const
get relation from handle
Definition: Node.h:197
souffle::FunctorOp::S2U
@ S2U
BINARY_OP_NUMERIC
#define BINARY_OP_NUMERIC(opcode, op)
LogSize.h
PARALLEL_INDEX_SCAN
#define PARALLEL_INDEX_SCAN(Structure, Arity,...)
INDEX_SCAN
#define INDEX_SCAN(Structure, Arity,...)
souffle::interpreter::Conjunction
Definition: Node.h:502
ESAC
#define ESAC(Kind)
souffle::interpreter::Constant
Definition: Node.h:418
TUPLE_COPY_FROM
#define TUPLE_COPY_FROM(dst, src)
IntrinsicOperator.h
COMPARE_EQ_NE
#define COMPARE_EQ_NE(opCode, op)
souffle::interpreter::Context
Evaluation context for Interpreter operations.
Definition: Context.h:39
ViewContext.h
souffle::FunctorOp::I2U
@ I2U
std::type
ElementType type
Definition: span.h:640
TypeAttribute.h
souffle::interpreter::Sequence
Definition: Node.h:749
Parallel.h
souffle::profile::ss
class souffle::profile::Tui ss
Definition: Tui.h:336
souffle::interpreter::Swap
Definition: Node.h:851
PROVENANCE_EXISTENCE_CHECK
#define PROVENANCE_EXISTENCE_CHECK(Structure, Arity,...)
Conjunction.h
souffle::getInstance
souffle::SouffleProgram * getInstance(const char *p)
Definition: CompiledSouffle.h:71
souffle::FunctorOp::LXOR
@ LXOR
Extend.h