31 #define pthread_yield pthread_yield_np 
   35 #define PARALLEL_START _Pragma("omp parallel") { 
   36 #define PARALLEL_END } 
   39 #define pfor _Pragma("omp for schedule(dynamic)") for 
   49 #define SECTIONS_START { 
   50 #define SECTIONS_END } 
   54 #define SECTION_START { 
   58 #define CREATE_OP_CONTEXT(NAME, INIT) [[maybe_unused]] auto NAME = INIT; 
   59 #define READ_OP_CONTEXT(NAME) NAME 
   64 #define PARALLEL_START { 
   65 #define PARALLEL_END } 
   75 #define SECTIONS_START { 
   76 #define SECTIONS_END } 
   79 #define SECTION_START { 
   83 #define CREATE_OP_CONTEXT(NAME, INIT) [[maybe_unused]] auto NAME = INIT; 
   84 #define READ_OP_CONTEXT(NAME) NAME 
   96 #define MAX_THREADS (omp_get_max_threads()) 
   98 #define MAX_THREADS (1) 
  116         Lease(std::mutex& mux) : mux(&mux) {
 
  119         Lease(Lease&& other) : mux(other.mux) {
 
  122         Lease(
const Lease& other) = 
delete;
 
  124             if (mux != 
nullptr) {
 
  143         return mux.try_lock();
 
  157 #define cpu_relax() asm volatile("pause\n" : : : "memory") 
  159 #define cpu_relax() asm volatile("" : : : "memory") 
  176         if ((
i % 1000) == 0) {
 
  189     std::atomic<int> lck{0};
 
  203         return lck.compare_exchange_weak(should, 1, std::memory_order_acquire);
 
  207         lck.store(0, std::memory_order_release);
 
  215 class ReadWriteLock {
 
  228     std::atomic<int> lck{0};
 
  235         auto r = lck.fetch_add(4, std::memory_order_acquire);
 
  247             r = lck.fetch_add(4, std::memory_order_acquire);
 
  253         lck.fetch_sub(4, std::memory_order_release);
 
  260         auto stat = lck.fetch_or(2, std::memory_order_acquire);
 
  263             stat = lck.fetch_or(2, std::memory_order_acquire);
 
  268         while (!lck.compare_exchange_strong(
 
  269                 should, 1, std::memory_order_acquire, std::memory_order_relaxed)) {
 
  277         return lck.compare_exchange_strong(should, 1, std::memory_order_acquire, std::memory_order_relaxed);
 
  281         lck.fetch_sub(1, std::memory_order_release);
 
  286         return lck.compare_exchange_strong(should, 1, std::memory_order_acquire, std::memory_order_relaxed);
 
  291         lck.fetch_add(3, std::memory_order_release);
 
  298 class OptimisticReadWriteLock {
 
  306     std::atomic<int> version{0};
 
  317         Lease(
int version = 0) : version(version) {}
 
  318         Lease(
const Lease& lease) = 
default;
 
  319         Lease& operator=(
const Lease& other) = 
default;
 
  320         Lease& operator=(Lease&& other) = 
default;
 
  338         auto v = version.load(std::memory_order_acquire);
 
  341         while ((v & 0x1) == 1) {
 
  345             v = version.load(std::memory_order_acquire);
 
  360         std::atomic_thread_fence(std::memory_order_acquire);
 
  361         return lease.version == version.load(std::memory_order_relaxed);
 
  383         auto v = version.fetch_or(0x1, std::memory_order_acquire);
 
  386         while ((v & 0x1) == 1) {
 
  390             v = version.fetch_or(0x1, std::memory_order_acquire);
 
  403         auto v = version.fetch_or(0x1, std::memory_order_acquire);
 
  416         auto v = version.fetch_or(0x1, std::memory_order_acquire);
 
  419         if (v & 0x1) 
return false;  
 
  422         if (lease.version == v) 
return true;
 
  437         version.fetch_sub(1, std::memory_order_release);
 
  446         version.fetch_add(1, std::memory_order_release);
 
  456         return version & 0x1;
 
  569     static Lock outputLock;