102 #include <functional>
119 #define dynamicLibSuffix ".dylib";
121 #define dynamicLibSuffix ".so";
125 #if RAM_DOMAIN_SIZE == 64
126 #define FFI_RamSigned ffi_type_sint64
127 #define FFI_RamUnsigned ffi_type_uint64
128 #define FFI_RamFloat ffi_type_double
130 #define FFI_RamSigned ffi_type_sint32
131 #define FFI_RamUnsigned ffi_type_uint32
132 #define FFI_RamFloat ffi_type_float
135 #define FFI_Symbol ffi_type_pointer
142 : profileEnabled(Global::config().has(
"profile")), isProvenance(Global::config().has(
"provenance")),
143 numOfThreads(
std::stoi(Global::config().
get(
"jobs"))), tUnit(tUnit),
144 isa(tUnit.getAnalysis<ram::analysis::IndexAnalysis>()) {
146 if (numOfThreads > 0) {
147 omp_set_num_threads(numOfThreads);
156 void Engine::swapRelation(
const size_t ramRel1,
const size_t ramRel2) {
159 std::swap(rel1, rel2);
162 int Engine::incCounter() {
167 return tUnit.getSymbolTable();
178 void* Engine::getMethodHandle(
const std::string& method) {
180 for (
void* libHandle : loadDLL()) {
181 auto* methodHandle = dlsym(libHandle, method.c_str());
182 if (methodHandle !=
nullptr) {
189 VecOwn<Engine::RelationHandle>& Engine::getRelationMap() {
193 void Engine::createRelation(
const ram::Relation&
id,
const size_t idx) {
199 const auto& orderSet = isa->getIndexes(
id.getName());
200 if (
id.getRepresentation() == RelationRepresentation::EQREL) {
209 relations[idx] = mk<RelationHandle>(std::move(res));
212 const std::vector<void*>& Engine::loadDLL() {
217 if (!Global::config().has(
"libraries")) {
218 Global::config().set(
"libraries",
"functors");
220 if (!Global::config().has(
"library-dir")) {
221 Global::config().set(
"library-dir",
".");
224 for (
const std::string& library :
splitString(Global::config().
get(
"libraries"),
' ')) {
226 if (library.empty()) {
229 auto paths =
splitString(Global::config().
get(
"library-dir"),
' ');
231 for (std::string& path : paths) {
232 if (path.back() !=
'/') {
237 if (library.find(
'/') != std::string::npos) {
244 for (
const std::string& path : paths) {
246 tmp = dlopen(fullpath.c_str(), RTLD_LAZY);
247 if (tmp !=
nullptr) {
257 size_t Engine::getIterationNumber()
const {
260 void Engine::incIterationNumber() {
263 void Engine::resetIterationNumber() {
267 void Engine::executeMain() {
268 SignalHandler::instance()->set();
269 if (Global::config().has(
"verbose")) {
270 SignalHandler::instance()->enableLogging();
274 assert(
main !=
nullptr &&
"Executing an empty program");
278 if (!profileEnabled) {
280 execute(
main.get(), ctxt);
282 ProfileEventSingleton::instance().setOutputFile(Global::config().
get(
"profile"));
287 frequencies.emplace(node.getProfileText(), std::deque<std::atomic<size_t>>());
288 frequencies[node.getProfileText()].emplace_back(0);
292 ProfileEventSingleton::instance().startTimer();
293 ProfileEventSingleton::instance().makeTimeEvent(
"@time;starttime");
295 for (
const auto& cur : Global::config().
data()) {
296 ProfileEventSingleton::instance().makeConfigRecord(cur.first, cur.second);
299 size_t relationCount = 0;
300 for (
auto rel : tUnit.getProgram().getRelations()) {
301 if (
rel->getName()[0] !=
'@') {
303 reads[
rel->getName()] = 0;
306 ProfileEventSingleton::instance().makeConfigRecord(
"relationCount", std::to_string(relationCount));
309 size_t ruleCount = 0;
311 ProfileEventSingleton::instance().makeConfigRecord(
"ruleCount", std::to_string(ruleCount));
314 execute(
main.get(), ctxt);
315 ProfileEventSingleton::instance().stopTimer();
316 for (
auto const& cur : frequencies) {
317 for (
size_t i = 0;
i < cur.second.size(); ++
i) {
318 ProfileEventSingleton::instance().makeQuantityEvent(cur.first, cur.second[
i],
i);
321 for (
auto const& cur : reads) {
322 ProfileEventSingleton::instance().makeQuantityEvent(
323 "@relation-reads;" + cur.first, cur.second, 0);
326 SignalHandler::instance()->reset();
329 void Engine::generateIR() {
330 const ram::Program& program = tUnit.getProgram();
331 NodeGenerator generator(*
this);
332 if (subroutine.empty()) {
333 for (
const auto&
sub : program.getSubroutines()) {
334 subroutine.push_back(generator.generateTree(*
sub.second));
337 if (
main ==
nullptr) {
338 main = generator.generateTree(program.getMain());
342 void Engine::executeSubroutine(
343 const std::string& name,
const std::vector<RamDomain>& args, std::vector<RamDomain>& ret) {
345 ctxt.setReturnValues(ret);
346 ctxt.setArguments(args);
350 size_t i = distance(subs.begin(), subs.find(name));
351 execute(subroutine[
i].
get(), ctxt);
355 #define DEBUG(Kind) std::cout << "Running Node: " << #Kind << "\n";
356 #define EVAL_CHILD(ty, idx) ramBitCast<ty>(execute(shadow.getChild(idx), ctxt))
357 #define EVAL_LEFT(ty) ramBitCast<ty>(execute(shadow.getLhs(), ctxt))
358 #define EVAL_RIGHT(ty) ramBitCast<ty>(execute(shadow.getRhs(), ctxt))
363 #define GET_MACRO(_1, _2, _3, NAME, ...) NAME
364 #define CASE(...) GET_MACRO(__VA_ARGS__, EXTEND_CASE, _Dummy, BASE_CASE)(__VA_ARGS__)
366 #define BASE_CASE(Kind) \
368 return [&]() -> RamDomain { \
369 [[maybe_unused]] const auto& shadow = *static_cast<const interpreter::Kind*>(node); \
370 [[maybe_unused]] const auto& cur = *static_cast<const ram::Kind*>(node->getShadow());
372 #define EXTEND_CASE(Kind, Structure, Arity) \
373 case (I_##Kind##_##Structure##_##Arity): { \
374 return [&]() -> RamDomain { \
375 [[maybe_unused]] const auto& shadow = *static_cast<const interpreter::Kind*>(node); \
376 [[maybe_unused]] const auto& cur = *static_cast<const ram::Kind*>(node->getShadow());\
377 using RelType = Relation<Arity, interpreter::Structure>;
383 #define TUPLE_COPY_FROM(dst, src) \
384 assert(dst.size() == src.size()); \
385 std::copy_n(src.begin(), dst.size(), dst.begin())
387 #define CAL_SEARCH_BOUND(superInfo, low, high) \
389 TUPLE_COPY_FROM(low, superInfo.first); \
390 TUPLE_COPY_FROM(high, superInfo.second); \
392 for (const auto& tupleElement : superInfo.tupleFirst) { \
393 low[tupleElement[0]] = ctxt[tupleElement[1]][tupleElement[2]]; \
395 for (const auto& tupleElement : superInfo.tupleSecond) { \
396 high[tupleElement[0]] = ctxt[tupleElement[1]][tupleElement[2]]; \
399 for (const auto& expr : superInfo.exprFirst) { \
400 low[expr.first] = execute(expr.second.get(), ctxt); \
402 for (const auto& expr : superInfo.exprSecond) { \
403 high[expr.first] = execute(expr.second.get(), ctxt); \
408 return cur.getConstant();
412 return ctxt[shadow.getTupleId()][shadow.getElement()];
421 #define BINARY_OP_TYPED(ty, op) return ramBitCast(static_cast<ty>(EVAL_CHILD(ty, 0) op EVAL_CHILD(ty, 1)))
423 #define BINARY_OP_LOGICAL(opcode, op) BINARY_OP_INTEGRAL(opcode, op)
424 #define BINARY_OP_INTEGRAL(opcode, op) \
425 case FunctorOp:: opcode: BINARY_OP_TYPED(RamSigned , op); \
426 case FunctorOp::U##opcode: BINARY_OP_TYPED(RamUnsigned, op);
427 #define BINARY_OP_NUMERIC(opcode, op) \
428 BINARY_OP_INTEGRAL(opcode, op) \
429 case FunctorOp::F##opcode: BINARY_OP_TYPED(RamFloat, op);
431 #define BINARY_OP_SHIFT_MASK(ty, op) \
432 return ramBitCast(EVAL_CHILD(ty, 0) op (EVAL_CHILD(ty, 1) & RAM_BIT_SHIFT_MASK))
433 #define BINARY_OP_INTEGRAL_SHIFT(opcode, op, tySigned, tyUnsigned) \
434 case FunctorOp:: opcode: BINARY_OP_SHIFT_MASK(tySigned , op); \
435 case FunctorOp::U##opcode: BINARY_OP_SHIFT_MASK(tyUnsigned , op);
437 #define MINMAX_OP_SYM(op) \
439 auto result = EVAL_CHILD(RamDomain, 0); \
440 auto* result_val = &getSymbolTable().resolve(result); \
441 for (size_t i = 1; i < args.size(); i++) { \
442 auto alt = EVAL_CHILD(RamDomain, i); \
443 if (alt == result) continue; \
445 const auto& alt_val = getSymbolTable().resolve(alt); \
446 if (*result_val op alt_val) { \
447 result_val = &alt_val; \
453 #define MINMAX_OP(ty, op) \
455 auto result = EVAL_CHILD(ty, 0); \
456 for (size_t i = 1; i < args.size(); i++) { \
457 result = op(result, EVAL_CHILD(ty, i)); \
459 return ramBitCast(result); \
461 #define MINMAX_NUMERIC(opCode, op) \
462 case FunctorOp:: opCode: MINMAX_OP(RamSigned , op) \
463 case FunctorOp::U##opCode: MINMAX_OP(RamUnsigned, op) \
464 case FunctorOp::F##opCode: MINMAX_OP(RamFloat , op)
466 #define UNARY_OP(op, ty, func) \
467 case FunctorOp::op: { \
468 auto x = EVAL_CHILD(ty, 0); \
469 return ramBitCast(func(x)); \
471 #define CONV_TO_STRING(op, ty) \
472 case FunctorOp::op: return getSymbolTable().lookup(std::to_string(EVAL_CHILD(ty, 0)));
473 #define CONV_FROM_STRING(op, ty) \
474 case FunctorOp::op: return evaluator::symbol2numeric<ty>( \
475 getSymbolTable().resolve(EVAL_CHILD(RamDomain, 0)));
478 const auto& args = cur.getArguments();
479 switch (cur.getOperator()) {
481 case FunctorOp::ORD:
return execute(shadow.getChild(0), ctxt);
482 case FunctorOp::STRLEN:
483 return getSymbolTable().resolve(execute(shadow.getChild(0), ctxt)).size();
484 case FunctorOp::NEG:
return -execute(shadow.getChild(0), ctxt);
485 case FunctorOp::FNEG: {
486 RamDomain result = execute(shadow.getChild(0), ctxt);
487 return ramBitCast(-ramBitCast<RamFloat>(result));
489 case FunctorOp::BNOT:
return ~execute(shadow.getChild(0), ctxt);
490 case FunctorOp::UBNOT: {
491 RamDomain result = execute(shadow.getChild(0), ctxt);
492 return ramBitCast(~ramBitCast<RamUnsigned>(result));
494 case FunctorOp::LNOT:
return !execute(shadow.getChild(0), ctxt);
496 case FunctorOp::ULNOT: {
497 RamDomain result = execute(shadow.getChild(0), ctxt);
526 case FunctorOp::EXP: {
527 return std::pow(execute(shadow.getChild(0), ctxt), execute(shadow.getChild(1), ctxt));
530 case FunctorOp::UEXP: {
531 auto first = ramBitCast<RamUnsigned>(execute(shadow.getChild(0), ctxt));
532 auto second = ramBitCast<RamUnsigned>(execute(shadow.getChild(1), ctxt));
537 case FunctorOp::FEXP: {
538 auto first = ramBitCast<RamFloat>(execute(shadow.getChild(0), ctxt));
539 auto second = ramBitCast<RamFloat>(execute(shadow.getChild(1), ctxt));
567 case FunctorOp::CAT: {
568 std::stringstream
ss;
569 for (
size_t i = 0;
i < args.size();
i++) {
570 ss << getSymbolTable().resolve(execute(shadow.getChild(
i), ctxt));
572 return getSymbolTable().lookup(
ss.str());
575 case FunctorOp::SUBSTR: {
576 auto symbol = execute(shadow.getChild(0), ctxt);
577 const std::string&
str = getSymbolTable().resolve(symbol);
578 auto idx = execute(shadow.getChild(1), ctxt);
579 auto len = execute(shadow.getChild(2), ctxt);
582 sub_str =
str.substr(idx, len);
584 std::cerr <<
"warning: wrong index position provided by substr(\"";
585 std::cerr <<
str <<
"\"," << (int32_t)idx <<
"," << (int32_t)len <<
") functor.\n";
587 return getSymbolTable().lookup(sub_str);
590 case FunctorOp::RANGE:
591 case FunctorOp::URANGE:
592 case FunctorOp::FRANGE:
593 fatal(
"ICE: functor `%s` must map onto `NestedIntrinsicOperator`", cur.getOperator());
598 #undef BINARY_OP_LOGICAL
599 #undef BINARY_OP_INTEGRAL
600 #undef BINARY_OP_NUMERIC
601 #undef BINARY_OP_SHIFT_MASK
602 #undef BINARY_OP_INTEGRAL_SHIFT
605 #undef MINMAX_NUMERIC
607 #undef CONV_TO_STRING
608 #undef CONV_FROM_STRING
612 auto numArgs = cur.getArguments().size();
613 auto runNested = [&](
auto&&
tuple) {
615 execute(shadow.getChild(numArgs), ctxt);
618 #define RUN_RANGE(ty) \
620 ? evaluator::runRange<ty>(EVAL_CHILD(ty, 0), EVAL_CHILD(ty, 1), EVAL_CHILD(ty, 2), runNested) \
621 : evaluator::runRange<ty>(EVAL_CHILD(ty, 0), EVAL_CHILD(ty, 1), runNested), \
624 switch (cur.getFunction()) {
635 const std::string& name = cur.getName();
637 auto fn =
reinterpret_cast<void (*)()
>(getMethodHandle(name));
638 if (fn ==
nullptr)
fatal(
"cannot find user-defined operator `%s`", name);
639 size_t arity = cur.getArguments().size();
641 if (cur.isStateful()) {
644 ffi_type* args[arity + 2];
645 void* values[arity + 2];
650 args[0] = args[1] = &ffi_type_pointer;
651 void* symbolTable = (
void*)&getSymbolTable();
652 values[0] = &symbolTable;
653 void* recordTable = (
void*)&getRecordTable();
654 values[1] = &recordTable;
655 for (
size_t i = 0;
i < arity;
i++) {
656 intVal[
i] = execute(shadow.getChild(
i), ctxt);
658 values[
i + 2] = &intVal[
i];
665 const auto prepStatus = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, arity + 2, codomain, args);
666 if (prepStatus != FFI_OK) {
667 fatal(
"Failed to prepare CIF for user-defined operator `%s`; error code = %d", name,
670 ffi_call(&cif, fn, &rc, values);
674 const std::vector<TypeAttribute>&
type = cur.getArgsTypes();
678 ffi_type* args[arity];
683 const char* strVal[arity];
687 for (
size_t i = 0;
i < arity;
i++) {
688 RamDomain arg = execute(shadow.getChild(
i), ctxt);
690 case TypeAttribute::Symbol:
692 strVal[
i] = getSymbolTable().resolve(arg).c_str();
693 values[
i] = &strVal[
i];
695 case TypeAttribute::Signed:
698 values[
i] = &intVal[
i];
700 case TypeAttribute::Unsigned:
702 uintVal[
i] = ramBitCast<RamUnsigned>(arg);
703 values[
i] = &uintVal[
i];
705 case TypeAttribute::Float:
707 floatVal[
i] = ramBitCast<RamFloat>(arg);
708 values[
i] = &floatVal[
i];
710 case TypeAttribute::ADT:
fatal(
"ADT support is not implemented");
711 case TypeAttribute::Record:
fatal(
"Record support is not implemented");
717 switch (cur.getReturnType()) {
719 case TypeAttribute::Symbol: codomain = &
FFI_Symbol;
break;
720 case TypeAttribute::Signed: codomain = &
FFI_RamSigned;
break;
722 case TypeAttribute::Float: codomain = &
FFI_RamFloat;
break;
723 case TypeAttribute::ADT:
fatal(
"Not implemented");
724 case TypeAttribute::Record:
fatal(
"Not implemented");
728 const auto prepStatus = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, arity, codomain, args);
729 if (prepStatus != FFI_OK) {
730 fatal(
"Failed to prepare CIF for user-defined operator `%s`; error code = %d", name,
733 ffi_call(&cif, fn, &rc, values);
735 switch (cur.getReturnType()) {
736 case TypeAttribute::Signed:
return static_cast<RamDomain>(rc);
737 case TypeAttribute::Symbol:
738 return getSymbolTable().lookup(
reinterpret_cast<const char*
>(rc));
742 case TypeAttribute::ADT:
fatal(
"Not implemented");
743 case TypeAttribute::Record:
fatal(
"Not implemented");
745 fatal(
"Unsupported user defined operator");
751 auto values = cur.getArguments();
752 size_t arity = values.size();
754 for (
size_t i = 0;
i < arity; ++
i) {
755 data[
i] = execute(shadow.getChild(
i), ctxt);
757 return getRecordTable().pack(
data, arity);
773 return execute(shadow.getLhs(), ctxt) && execute(shadow.getRhs(), ctxt);
777 return !execute(shadow.getChild(), ctxt);
780 #define EMPTINESS_CHECK(Structure, Arity, ...) \
781 CASE(EmptinessCheck, Structure, Arity) \
782 const auto& rel = *static_cast<RelType*>(node->getRelation()); \
783 return rel.empty(); \
787 #undef EMPTINESS_CHECK
789 #define RELATION_SIZE(Structure, Arity, ...) \
790 CASE(RelationSize, Structure, Arity) \
791 const auto& rel = *static_cast<RelType*>(node->getRelation()); \
798 #define EXISTENCE_CHECK(Structure, Arity, ...) \
799 CASE(ExistenceCheck, Structure, Arity) \
800 return evalExistenceCheck<RelType>(shadow, ctxt); \
804 #undef EXISTENCE_CHECK
806 #define PROVENANCE_EXISTENCE_CHECK(Structure, Arity, ...) \
807 CASE(ProvenanceExistenceCheck, Structure, Arity) \
808 return evalProvenanceExistenceCheck<RelType>(shadow, ctxt); \
809 ESAC(ProvenanceExistenceCheck)
812 #undef PROVENANCE_EXISTENCE_CHECK
816 #define COMPARE_NUMERIC(ty, op) return EVAL_LEFT(ty) op EVAL_RIGHT(ty)
817 #define COMPARE_STRING(op) \
818 return (getSymbolTable().resolve(EVAL_LEFT(RamDomain)) op \
819 getSymbolTable().resolve(EVAL_RIGHT(RamDomain)))
820 #define COMPARE_EQ_NE(opCode, op) \
821 case BinaryConstraintOp:: opCode: COMPARE_NUMERIC(RamDomain , op); \
822 case BinaryConstraintOp::F##opCode: COMPARE_NUMERIC(RamFloat , op);
823 #define COMPARE(opCode, op) \
824 case BinaryConstraintOp:: opCode: COMPARE_NUMERIC(RamSigned , op); \
825 case BinaryConstraintOp::U##opCode: COMPARE_NUMERIC(RamUnsigned, op); \
826 case BinaryConstraintOp::F##opCode: COMPARE_NUMERIC(RamFloat , op); \
827 case BinaryConstraintOp::S##opCode: COMPARE_STRING(op);
830 switch (cur.getOperator()) {
839 case BinaryConstraintOp::MATCH: {
840 RamDomain left = execute(shadow.getLhs(), ctxt);
841 RamDomain right = execute(shadow.getRhs(), ctxt);
842 const std::string& pattern = getSymbolTable().resolve(left);
843 const std::string& text = getSymbolTable().resolve(right);
846 result = std::regex_match(text, std::regex(pattern));
848 std::cerr <<
"warning: wrong pattern provided for match(\"" << pattern <<
"\",\""
853 case BinaryConstraintOp::NOT_MATCH: {
854 RamDomain left = execute(shadow.getLhs(), ctxt);
855 RamDomain right = execute(shadow.getRhs(), ctxt);
856 const std::string& pattern = getSymbolTable().resolve(left);
857 const std::string& text = getSymbolTable().resolve(right);
860 result = !std::regex_match(text, std::regex(pattern));
862 std::cerr <<
"warning: wrong pattern provided for !match(\"" << pattern <<
"\",\""
867 case BinaryConstraintOp::CONTAINS: {
868 RamDomain left = execute(shadow.getLhs(), ctxt);
869 RamDomain right = execute(shadow.getRhs(), ctxt);
870 const std::string& pattern = getSymbolTable().resolve(left);
871 const std::string& text = getSymbolTable().resolve(right);
872 return text.find(pattern) != std::string::npos;
874 case BinaryConstraintOp::NOT_CONTAINS: {
875 RamDomain left = execute(shadow.getLhs(), ctxt);
876 RamDomain right = execute(shadow.getRhs(), ctxt);
877 const std::string& pattern = getSymbolTable().resolve(left);
878 const std::string& text = getSymbolTable().resolve(right);
879 return text.find(pattern) == std::string::npos;
885 #undef COMPARE_NUMERIC
886 #undef COMPARE_STRING
892 bool result = execute(shadow.getChild(), ctxt);
894 if (profileEnabled && !cur.getProfileText().empty()) {
895 auto& currentFrequencies = frequencies[cur.getProfileText()];
896 while (currentFrequencies.size() <= getIterationNumber()) {
897 #pragma omp critical(frequencies)
898 currentFrequencies.emplace_back(0);
900 frequencies[cur.getProfileText()][getIterationNumber()]++;
905 #define SCAN(Structure, Arity, ...) \
906 CASE(Scan, Structure, Arity) \
907 const auto& rel = *static_cast<RelType*>(node->getRelation()); \
908 return evalScan(rel, cur, shadow, ctxt); \
914 #define PARALLEL_SCAN(Structure, Arity, ...) \
915 CASE(ParallelScan, Structure, Arity) \
916 const auto& rel = *static_cast<RelType*>(node->getRelation()); \
917 return evalParallelScan(rel, cur, shadow, ctxt); \
922 #define INDEX_SCAN(Structure, Arity, ...) \
923 CASE(IndexScan, Structure, Arity) \
924 return evalIndexScan<RelType>(cur, shadow, ctxt); \
930 #define PARALLEL_INDEX_SCAN(Structure, Arity, ...) \
931 CASE(ParallelIndexScan, Structure, Arity) \
932 const auto& rel = *static_cast<RelType*>(node->getRelation()); \
933 return evalParallelIndexScan(rel, cur, shadow, ctxt); \
934 ESAC(ParallelIndexScan)
937 #undef PARALLEL_INDEX_SCAN
939 #define CHOICE(Structure, Arity, ...) \
940 CASE(Choice, Structure, Arity) \
941 const auto& rel = *static_cast<RelType*>(node->getRelation()); \
942 return evalChoice(rel, cur, shadow, ctxt); \
948 #define PARALLEL_CHOICE(Structure, Arity, ...) \
949 CASE(ParallelChoice, Structure, Arity) \
950 const auto& rel = *static_cast<RelType*>(node->getRelation()); \
951 return evalParallelChoice(rel, cur, shadow, ctxt); \
955 #undef PARALLEL_CHOICE
957 #define INDEX_CHOICE(Structure, Arity, ...) \
958 CASE(IndexChoice, Structure, Arity) \
959 return evalIndexChoice<RelType>(cur, shadow, ctxt); \
965 #define PARALLEL_INDEX_CHOICE(Structure, Arity, ...) \
966 CASE(ParallelIndexChoice, Structure, Arity) \
967 const auto& rel = *static_cast<RelType*>(node->getRelation()); \
968 return evalParallelIndexChoice(rel, cur, shadow, ctxt); \
969 ESAC(ParallelIndexChoice)
972 #undef PARALLEL_INDEX_CHOICE
975 RamDomain ref = execute(shadow.getExpr(), ctxt);
983 size_t arity = cur.getArity();
987 ctxt[cur.getTupleId()] =
tuple;
990 return execute(shadow.getNestedOperation(), ctxt);
993 #define PARALLEL_AGGREGATE(Structure, Arity, ...) \
994 CASE(ParallelAggregate, Structure, Arity) \
995 const auto& rel = *static_cast<RelType*>(node->getRelation()); \
996 return evalParallelAggregate(rel, cur, shadow, ctxt); \
997 ESAC(ParallelAggregate)
1000 #undef PARALLEL_AGGREGATE
1002 #define AGGREGATE(Structure, Arity, ...) \
1003 CASE(Aggregate, Structure, Arity) \
1004 const auto& rel = *static_cast<RelType*>(node->getRelation()); \
1005 return evalAggregate(cur, *shadow.getCondition(), shadow.getExpr(), *shadow.getNestedOperation(), \
1006 rel.scan(), ctxt); \
1012 #define PARALLEL_INDEX_AGGREGATE(Structure, Arity, ...) \
1013 CASE(ParallelIndexAggregate, Structure, Arity) \
1014 return evalParallelIndexAggregate<RelType>(cur, shadow, ctxt); \
1015 ESAC(ParallelIndexAggregate)
1018 #undef PARALLEL_INDEX_AGGREGATE
1020 #define INDEX_AGGREGATE(Structure, Arity, ...) \
1021 CASE(IndexAggregate, Structure, Arity) \
1022 return evalIndexAggregate<RelType>(cur, shadow, ctxt); \
1023 ESAC(IndexAggregate)
1026 #undef INDEX_AGGREGATE
1030 if (execute(shadow.getCondition(), ctxt)) {
1033 return execute(shadow.getNestedOperation(), ctxt);
1039 if (execute(shadow.getCondition(), ctxt)) {
1041 result = execute(shadow.getNestedOperation(), ctxt);
1044 if (profileEnabled && !cur.getProfileText().empty()) {
1045 auto& currentFrequencies = frequencies[cur.getProfileText()];
1046 while (currentFrequencies.size() <= getIterationNumber()) {
1047 currentFrequencies.emplace_back(0);
1049 frequencies[cur.getProfileText()][getIterationNumber()]++;
1054 #define PROJECT(Structure, Arity, ...) \
1055 CASE(Project, Structure, Arity) \
1056 auto& rel = *static_cast<RelType*>(node->getRelation()); \
1057 return evalProject(rel, shadow, ctxt); \
1064 for (
size_t i = 0;
i < cur.getValues().
size(); ++
i) {
1065 if (shadow.getChild(
i) ==
nullptr) {
1075 for (
const auto& child : shadow.getChildren()) {
1076 if (!execute(child.get(), ctxt)) {
1084 for (
const auto& child : shadow.getChildren()) {
1085 if (!execute(child.get(), ctxt)) {
1093 resetIterationNumber();
1094 while (execute(shadow.getChild(), ctxt)) {
1095 incIterationNumber();
1097 resetIterationNumber();
1102 return !execute(shadow.getChild(), ctxt);
1106 Logger logger(cur.getMessage(), getIterationNumber(),
1108 return execute(shadow.getChild(), ctxt);
1112 Logger logger(cur.getMessage(), getIterationNumber());
1113 return execute(shadow.getChild(), ctxt);
1117 SignalHandler::instance()->setMsg(cur.getMessage().c_str());
1118 return execute(shadow.getChild(), ctxt);
1121 #define CLEAR(Structure, Arity, ...) \
1122 CASE(Clear, Structure, Arity) \
1123 auto& rel = *static_cast<RelType*>(node->getRelation()); \
1132 execute(subroutine[shadow.getSubroutineId()].get(), ctxt);
1138 ProfileEventSingleton::instance().makeQuantityEvent(
1139 cur.getMessage(),
rel.size(), getIterationNumber());
1144 const auto& directive = cur.getDirectives();
1145 const std::string& op = cur.get(
"operation");
1148 if (op ==
"input") {
1151 .getReader(directive, getSymbolTable(), getRecordTable())
1153 }
catch (std::exception&
e) {
1154 std::cerr <<
"Error loading data: " <<
e.what() <<
"\n";
1157 }
else if (op ==
"output" || op ==
"printsize") {
1160 .getWriter(directive, getSymbolTable(), getRecordTable())
1162 }
catch (std::exception&
e) {
1163 std::cerr <<
e.what();
1168 assert(
"wrong i/o operation");
1174 ViewContext* viewContext = shadow.getViewContext();
1178 for (
auto& op : viewFreeOps) {
1179 if (!execute(op.get(), ctxt)) {
1186 for (
auto& info : viewsForOuter) {
1187 ctxt.
createView(*getRelationHandle(info[0]), info[1], info[2]);
1192 for (
auto& op : viewOps) {
1193 if (!execute(op.get(), ctxt)) {
1203 for (
auto& info : viewsForNested) {
1204 ctxt.
createView(*getRelationHandle(info[0]), info[1], info[2]);
1207 execute(shadow.getChild(), ctxt);
1212 auto& src = *
static_cast<EqrelRelation*
>(getRelationHandle(shadow.getSourceId()).get());
1213 auto& trg = *
static_cast<EqrelRelation*
>(getRelationHandle(shadow.getTargetId()).get());
1220 swapRelation(shadow.getSourceId(), shadow.getTargetId());
1231 template <
typename Rel>
1232 RamDomain Engine::evalExistenceCheck(
const ExistenceCheck& shadow, Context& ctxt) {
1233 constexpr
size_t Arity = Rel::Arity;
1234 size_t viewPos = shadow.getViewId();
1236 if (profileEnabled && !shadow.isTemp()) {
1237 reads[shadow.getRelationName()]++;
1240 const auto& superInfo = shadow.getSuperInst();
1242 if (shadow.isTotalSearch()) {
1246 for (
const auto& tupleElement : superInfo.tupleFirst) {
1247 tuple[tupleElement[0]] = ctxt[tupleElement[1]][tupleElement[2]];
1250 for (
const auto& expr : superInfo.exprFirst) {
1251 tuple[expr.first] = execute(expr.second.get(), ctxt);
1253 return Rel::castView(ctxt.getView(viewPos))->contains(tuple);
1263 for (
const auto& tupleElement : superInfo.tupleFirst) {
1264 low[tupleElement[0]] = ctxt[tupleElement[1]][tupleElement[2]];
1265 high[tupleElement[0]] =
low[tupleElement[0]];
1268 for (
const auto& expr : superInfo.exprFirst) {
1269 low[expr.first] = execute(expr.second.get(), ctxt);
1270 high[expr.first] =
low[expr.first];
1273 return Rel::castView(ctxt.getView(viewPos))->contains(
low,
high);
1276 template <
typename Rel>
1277 RamDomain Engine::evalProvenanceExistenceCheck(
const ProvenanceExistenceCheck& shadow, Context& ctxt) {
1279 constexpr
size_t Arity = Rel::Arity;
1280 const auto& superInfo = shadow.getSuperInst();
1289 for (
const auto& tupleElement : superInfo.tupleFirst) {
1290 low[tupleElement[0]] = ctxt[tupleElement[1]][tupleElement[2]];
1291 high[tupleElement[0]] =
low[tupleElement[0]];
1294 for (
const auto& expr : superInfo.exprFirst) {
1295 assert(expr.second.get() !=
nullptr &&
1296 "ProvenanceExistenceCheck should always be specified for payload");
1297 low[expr.first] = execute(expr.second.get(), ctxt);
1298 high[expr.first] =
low[expr.first];
1307 size_t viewPos = shadow.getViewId();
1310 auto equalRange = Rel::castView(ctxt.getView(viewPos))->range(
low,
high);
1313 if (equalRange.begin() == equalRange.end()) {
1318 return (*equalRange.begin())[Arity - 1] <= execute(shadow.getChild(), ctxt);
1321 template <
typename Rel>
1322 RamDomain Engine::evalScan(
const Rel&
rel,
const ram::Scan& cur,
const Scan& shadow, Context& ctxt) {
1323 for (
const auto& tuple :
rel.scan()) {
1324 ctxt[cur.getTupleId()] = tuple.data();
1325 if (!execute(shadow.getNestedOperation(), ctxt)) {
1332 template <
typename Rel>
1334 const Rel&
rel,
const ram::ParallelScan& cur,
const ParallelScan& shadow, Context& ctxt) {
1335 auto viewContext = shadow.getViewContext();
1337 auto pStream =
rel.partitionScan(numOfThreads);
1341 auto viewInfo = viewContext->getViewInfoForNested();
1342 for (
const auto& info : viewInfo) {
1343 newCtxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1345 pfor(
auto it = pStream.begin(); it < pStream.end(); it++) {
1346 for (
const auto&
tuple : *it) {
1347 newCtxt[cur.getTupleId()] =
tuple.
data();
1348 if (!execute(shadow.getNestedOperation(), newCtxt)) {
1357 template <
typename Rel>
1358 RamDomain Engine::evalIndexScan(
const ram::IndexScan& cur,
const IndexScan& shadow, Context& ctxt) {
1359 constexpr
size_t Arity = Rel::Arity;
1361 const auto& superInfo = shadow.getSuperInst();
1366 size_t viewId = shadow.getViewId();
1367 auto view = Rel::castView(ctxt.getView(viewId));
1371 if (!execute(shadow.getNestedOperation(), ctxt)) {
1378 template <
typename Rel>
1379 RamDomain Engine::evalParallelIndexScan(
1380 const Rel&
rel,
const ram::ParallelIndexScan& cur,
const ParallelIndexScan& shadow, Context& ctxt) {
1381 auto viewContext = shadow.getViewContext();
1384 constexpr
size_t Arity = Rel::Arity;
1385 const auto& superInfo = shadow.getSuperInst();
1390 size_t indexPos = shadow.getViewId();
1391 auto pStream =
rel.partitionRange(indexPos,
low,
high, numOfThreads);
1394 auto viewInfo = viewContext->getViewInfoForNested();
1395 for (
const auto& info : viewInfo) {
1396 newCtxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1398 pfor(
auto it = pStream.begin(); it < pStream.end(); it++) {
1399 for (
const auto&
tuple : *it) {
1400 newCtxt[cur.getTupleId()] =
tuple.
data();
1401 if (!execute(shadow.getNestedOperation(), newCtxt)) {
1410 template <
typename Rel>
1411 RamDomain Engine::evalChoice(
const Rel&
rel,
const ram::Choice& cur,
const Choice& shadow, Context& ctxt) {
1413 for (
const auto& tuple :
rel.scan()) {
1414 ctxt[cur.getTupleId()] = tuple.data();
1415 if (execute(shadow.getCondition(), ctxt)) {
1416 execute(shadow.getNestedOperation(), ctxt);
1423 template <
typename Rel>
1425 const Rel&
rel,
const ram::ParallelChoice& cur,
const ParallelChoice& shadow, Context& ctxt) {
1426 auto viewContext = shadow.getViewContext();
1428 auto pStream =
rel.partitionScan(numOfThreads);
1429 auto viewInfo = viewContext->getViewInfoForNested();
1432 for (
const auto& info : viewInfo) {
1433 newCtxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1435 pfor(
auto it = pStream.begin(); it < pStream.end(); it++) {
1436 for (
const auto&
tuple : *it) {
1437 newCtxt[cur.getTupleId()] =
tuple.
data();
1438 if (execute(shadow.getCondition(), newCtxt)) {
1439 execute(shadow.getNestedOperation(), newCtxt);
1448 template <
typename Rel>
1449 RamDomain Engine::evalIndexChoice(
const ram::IndexChoice& cur,
const IndexChoice& shadow, Context& ctxt) {
1450 constexpr
size_t Arity = Rel::Arity;
1451 const auto& superInfo = shadow.getSuperInst();
1456 size_t viewId = shadow.getViewId();
1457 auto view = Rel::castView(ctxt.getView(viewId));
1461 if (execute(shadow.getCondition(), ctxt)) {
1462 execute(shadow.getNestedOperation(), ctxt);
1469 template <
typename Rel>
1470 RamDomain Engine::evalParallelIndexChoice(
const Rel&
rel,
const ram::ParallelIndexChoice& cur,
1471 const ParallelIndexChoice& shadow, Context& ctxt) {
1472 auto viewContext = shadow.getViewContext();
1474 auto viewInfo = viewContext->getViewInfoForNested();
1477 constexpr
size_t Arity = Rel::Arity;
1478 const auto& superInfo = shadow.getSuperInst();
1483 size_t indexPos = shadow.getViewId();
1484 auto pStream =
rel.partitionRange(indexPos,
low,
high, numOfThreads);
1488 for (
const auto& info : viewInfo) {
1489 newCtxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1491 pfor(
auto it = pStream.begin(); it < pStream.end(); it++) {
1492 for (
const auto&
tuple : *it) {
1493 newCtxt[cur.getTupleId()] =
tuple.
data();
1494 if (execute(shadow.getCondition(), newCtxt)) {
1495 execute(shadow.getNestedOperation(), newCtxt);
1505 template <
typename Aggregate,
typename Iter>
1506 RamDomain Engine::evalAggregate(
const Aggregate& aggregate,
const Node&
filter,
const Node* expression,
1507 const Node& nestedOperation,
const Iter& ranges, Context& ctxt) {
1508 bool shouldRunNested =
false;
1514 std::pair<RamFloat, RamFloat> accumulateMean;
1516 switch (aggregate.getFunction()) {
1525 case AggregateOp::SUM:
1527 shouldRunNested =
true;
1529 case AggregateOp::USUM:
1531 shouldRunNested =
true;
1533 case AggregateOp::FSUM:
1535 shouldRunNested =
true;
1538 case AggregateOp::MEAN:
1540 accumulateMean = {0, 0};
1543 case AggregateOp::COUNT:
1545 shouldRunNested =
true;
1549 for (
const auto&
tuple : ranges) {
1550 ctxt[aggregate.getTupleId()] =
tuple.
data();
1552 if (!execute(&
filter, ctxt)) {
1556 shouldRunNested =
true;
1559 if (aggregate.getFunction() == AggregateOp::COUNT) {
1566 RamDomain val = execute(expression, ctxt);
1568 switch (aggregate.getFunction()) {
1569 case AggregateOp::MIN: res = std::min(res, val);
break;
1570 case AggregateOp::FMIN:
1571 res =
ramBitCast(std::min(ramBitCast<RamFloat>(res), ramBitCast<RamFloat>(val)));
1573 case AggregateOp::UMIN:
1574 res =
ramBitCast(std::min(ramBitCast<RamUnsigned>(res), ramBitCast<RamUnsigned>(val)));
1577 case AggregateOp::MAX: res = std::max(res, val);
break;
1578 case AggregateOp::FMAX:
1579 res =
ramBitCast(std::max(ramBitCast<RamFloat>(res), ramBitCast<RamFloat>(val)));
1581 case AggregateOp::UMAX:
1582 res =
ramBitCast(std::max(ramBitCast<RamUnsigned>(res), ramBitCast<RamUnsigned>(val)));
1585 case AggregateOp::SUM: res += val;
break;
1586 case AggregateOp::FSUM:
1587 res =
ramBitCast(ramBitCast<RamFloat>(res) + ramBitCast<RamFloat>(val));
1589 case AggregateOp::USUM:
1590 res =
ramBitCast(ramBitCast<RamUnsigned>(res) + ramBitCast<RamUnsigned>(val));
1593 case AggregateOp::MEAN:
1594 accumulateMean.first += ramBitCast<RamFloat>(val);
1595 accumulateMean.second++;
1598 case AggregateOp::COUNT:
fatal(
"This should never be executed");
1602 if (aggregate.getFunction() == AggregateOp::MEAN && accumulateMean.second != 0) {
1603 res =
ramBitCast(accumulateMean.first / accumulateMean.second);
1609 ctxt[aggregate.getTupleId()] = tuple.data();
1611 if (!shouldRunNested) {
1614 return execute(&nestedOperation, ctxt);
1617 template <
typename Rel>
1618 RamDomain Engine::evalParallelAggregate(
1619 const Rel&
rel,
const ram::ParallelAggregate& cur,
const ParallelAggregate& shadow, Context& ctxt) {
1621 auto viewContext = shadow.getViewContext();
1624 auto viewInfo = viewContext->getViewInfoForNested();
1625 for (
const auto& info : viewInfo) {
1626 newCtxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1628 return evalAggregate(
1629 cur, *shadow.getCondition(), shadow.getExpr(), *shadow.getNestedOperation(),
rel.scan(), newCtxt);
1632 template <
typename Rel>
1633 RamDomain Engine::evalParallelIndexAggregate(
1636 auto viewContext = shadow.getViewContext();
1639 auto viewInfo = viewContext->getViewInfoForNested();
1640 for (
const auto& info : viewInfo) {
1641 newCtxt.createView(*getRelationHandle(info[0]), info[1], info[2]);
1644 constexpr
size_t Arity = Rel::Arity;
1645 const auto& superInfo = shadow.getSuperInst();
1651 size_t viewId = shadow.getViewId();
1652 auto view = Rel::castView(newCtxt.getView(viewId));
1654 return evalAggregate(cur, *shadow.getCondition(), shadow.getExpr(), *shadow.getNestedOperation(),
1655 view->range(
low,
high), newCtxt);
1658 template <
typename Rel>
1662 const size_t Arity = Rel::Arity;
1663 const auto& superInfo = shadow.getSuperInst();
1668 size_t viewId = shadow.getViewId();
1669 auto view = Rel::castView(ctxt.getView(viewId));
1671 return evalAggregate(cur, *shadow.getCondition(), shadow.getExpr(), *shadow.getNestedOperation(),
1672 view->range(
low,
high), ctxt);
1675 template <
typename Rel>
1677 constexpr
size_t Arity = Rel::Arity;
1683 for (
const auto& tupleElement : superInfo.tupleFirst) {
1684 tuple[tupleElement[0]] = ctxt[tupleElement[1]][tupleElement[2]];
1687 for (
const auto& expr : superInfo.exprFirst) {
1688 tuple[expr.first] = execute(expr.second.get(), ctxt);