24#include <condition_variable>
49 explicit Thread_pool(std::size_t pool_size = std::thread::hardware_concurrency())
51 VERBOSE_MSG(
"launching " << pool_size <<
" threads ...");
52 for (std::size_t i = 0; i < pool_size; ++i)
56 std::function<void()> task;
59 std::unique_lock<std::mutex> lock(mutex);
60 condition.wait(lock, [this]{ return stop || !tasks.empty(); });
61 if (stop && tasks.empty())
63 task = std::move(tasks.front());
81 std::unique_lock<std::mutex> lock(mutex);
85 condition.notify_all();
88 for (
auto& t: threads)
90 }
catch (
const std::exception& e) {
96 template <
typename Task>
99 using return_t =
decltype(task());
101 auto ptask = std::make_shared<std::packaged_task<return_t()>>([task](){
return task(); });
103 std::future<return_t> fut = ptask->get_future();
105 if (threads.empty()) {
109 std::unique_lock<std::mutex> lock(mutex);
110 tasks.emplace([ptask](){ (*ptask)(); });
112 condition.notify_one();
119 template <
typename Task>
122 if (threads.empty()) {
126 std::unique_lock<std::mutex> lock(mutex);
127 tasks.emplace(std::forward<Task>(task));
129 condition.notify_one();
133 std::size_t
size()
const {
return threads.size(); }
136 std::vector<std::thread> threads{};
137 std::queue<std::function<void()>> tasks{};
139 std::condition_variable condition{};
LinearRange[start_, stop_, steps_] stop
auto run_packaged_task(Task &&task) -> std::future< decltype(task())>
runs task and returns future
Thread_pool(Thread_pool &&)=delete
Thread_pool & operator=(const Thread_pool &)=delete
void run_task(Task &&task)
runs task
std::vector< std::thread > threads
~Thread_pool()
waits for all tasks to finish and closes threads
Thread_pool(const Thread_pool &)=delete
Thread_pool & operator=(Thread_pool &&)=delete
Thread_pool(std::size_t pool_size=std::thread::hardware_concurrency())