21 #ifndef mia_core_parallelcxx11_hh
22 #define mia_core_parallelcxx11_hh
40 static int get_max_tasks();
41 static void set_max_tasks(
int mt);
46 #define ATOMIC std::atomic
48 template <
typename Mutex>
90 m_begin(orig.m_begin),
94 m_current_wp = orig.m_current_wp.load();
98 int wp = m_current_wp++;
99 int begin = m_begin + wp * m_block;
100 int end = begin + m_block;
111 return m_begin >= m_end;
126 std::atomic<int> m_current_wp;
132 template <
typename Range,
typename Func>
136 Range wp = range.get_next_workpackage();
144 template <
typename Range,
typename Func>
145 void pfor(Range range,
const Func& f) {
149 std::thread::hardware_concurrency();
151 std::vector<std::thread> threads;
152 for (
int i = 0; i < max_treads; ++i) {
153 threads.push_back(std::thread(pfor_callback<Range, Func>, std::ref(range), f));
156 for (
int i = 0; i < max_treads; ++i) {
161 template <
typename V>
168 template <
typename Reduce>
189 template <
typename Range,
typename Value,
typename Func,
typename Reduce>
194 Range wp = range.get_next_workpackage();
196 value = f(wp, value);
203 template <
typename Range,
typename Value,
typename Func,
typename Reduce>
204 Value
preduce(Range range, Value identity,
const Func& f, Reduce r)
210 std::vector<std::thread> threads;
211 for (
int i = 0; i < max_treads; ++i) {
212 threads.push_back(std::thread(preduce_callback<Range, Value, Func, Reduce>,
213 std::ref(range), std::ref(value), f, r));
216 for (
int i = 0; i < max_treads; ++i) {