initial
This commit is contained in:
112
td/tdactor/test/actors_bugs.cpp
Normal file
112
td/tdactor/test/actors_bugs.cpp
Normal file
@@ -0,0 +1,112 @@
|
||||
//
|
||||
// Copyright Aliaksei Levin (levlam@telegram.org), Arseny Smirnov (arseny30@gmail.com) 2014-2024
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
//
|
||||
#include "td/actor/actor.h"
|
||||
#include "td/actor/ConcurrentScheduler.h"
|
||||
#include "td/actor/MultiTimeout.h"
|
||||
|
||||
#include "td/utils/common.h"
|
||||
#include "td/utils/logging.h"
|
||||
#include "td/utils/Random.h"
|
||||
#include "td/utils/tests.h"
|
||||
|
||||
TEST(MultiTimeout, bug) {
|
||||
td::ConcurrentScheduler sched(0, 0);
|
||||
|
||||
sched.start();
|
||||
td::unique_ptr<td::MultiTimeout> multi_timeout;
|
||||
struct Data {
|
||||
td::MultiTimeout *multi_timeout;
|
||||
};
|
||||
Data data;
|
||||
|
||||
{
|
||||
auto guard = sched.get_main_guard();
|
||||
multi_timeout = td::make_unique<td::MultiTimeout>("MultiTimeout");
|
||||
data.multi_timeout = multi_timeout.get();
|
||||
multi_timeout->set_callback([](void *void_data, td::int64 key) {
|
||||
auto &data = *static_cast<Data *>(void_data);
|
||||
if (key == 1) {
|
||||
data.multi_timeout->cancel_timeout(key + 1);
|
||||
data.multi_timeout->set_timeout_in(key + 2, 1);
|
||||
} else {
|
||||
td::Scheduler::instance()->finish();
|
||||
}
|
||||
});
|
||||
multi_timeout->set_callback_data(&data);
|
||||
multi_timeout->set_timeout_in(1, 1);
|
||||
multi_timeout->set_timeout_in(2, 2);
|
||||
}
|
||||
|
||||
while (sched.run_main(10)) {
|
||||
// empty
|
||||
}
|
||||
sched.finish();
|
||||
}
|
||||
|
||||
class TimeoutManager final : public td::Actor {
|
||||
static td::int32 count;
|
||||
|
||||
public:
|
||||
TimeoutManager() {
|
||||
count++;
|
||||
|
||||
test_timeout_.set_callback(on_test_timeout_callback);
|
||||
test_timeout_.set_callback_data(static_cast<void *>(this));
|
||||
}
|
||||
TimeoutManager(const TimeoutManager &) = delete;
|
||||
TimeoutManager &operator=(const TimeoutManager &) = delete;
|
||||
TimeoutManager(TimeoutManager &&) = delete;
|
||||
TimeoutManager &operator=(TimeoutManager &&) = delete;
|
||||
~TimeoutManager() final {
|
||||
count--;
|
||||
LOG(INFO) << "Destroy TimeoutManager";
|
||||
}
|
||||
|
||||
static void on_test_timeout_callback(void *timeout_manager_ptr, td::int64 id) {
|
||||
CHECK(count >= 0);
|
||||
if (count == 0) {
|
||||
LOG(ERROR) << "Receive timeout after manager was closed";
|
||||
return;
|
||||
}
|
||||
|
||||
auto manager = static_cast<TimeoutManager *>(timeout_manager_ptr);
|
||||
send_closure_later(manager->actor_id(manager), &TimeoutManager::test_timeout);
|
||||
}
|
||||
|
||||
void test_timeout() {
|
||||
CHECK(count > 0);
|
||||
// we must yield scheduler, so run_main breaks immediately, if timeouts are handled immediately
|
||||
td::Scheduler::instance()->yield();
|
||||
}
|
||||
|
||||
td::MultiTimeout test_timeout_{"TestTimeout"};
|
||||
};
|
||||
|
||||
td::int32 TimeoutManager::count;
|
||||
|
||||
TEST(MultiTimeout, Destroy) {
|
||||
td::ConcurrentScheduler sched(0, 0);
|
||||
|
||||
auto timeout_manager = sched.create_actor_unsafe<TimeoutManager>(0, "TimeoutManager");
|
||||
TimeoutManager *manager = timeout_manager.get().get_actor_unsafe();
|
||||
sched.start();
|
||||
int cnt = 100;
|
||||
while (sched.run_main(cnt == 100 || cnt <= 0 ? 0.001 : 10)) {
|
||||
auto guard = sched.get_main_guard();
|
||||
cnt--;
|
||||
if (cnt > 0) {
|
||||
for (int i = 0; i < 2; i++) {
|
||||
manager->test_timeout_.set_timeout_in(td::Random::fast(0, 1000000000), td::Random::fast(2, 5) / 1000.0);
|
||||
}
|
||||
} else if (cnt == 0) {
|
||||
timeout_manager.reset();
|
||||
} else if (cnt == -10) {
|
||||
td::Scheduler::instance()->finish();
|
||||
}
|
||||
}
|
||||
sched.finish();
|
||||
}
|
||||
506
td/tdactor/test/actors_main.cpp
Normal file
506
td/tdactor/test/actors_main.cpp
Normal file
@@ -0,0 +1,506 @@
|
||||
//
|
||||
// Copyright Aliaksei Levin (levlam@telegram.org), Arseny Smirnov (arseny30@gmail.com) 2014-2024
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
//
|
||||
#include "td/actor/actor.h"
|
||||
#include "td/actor/ConcurrentScheduler.h"
|
||||
#include "td/actor/PromiseFuture.h"
|
||||
|
||||
#include "td/utils/common.h"
|
||||
#include "td/utils/logging.h"
|
||||
#include "td/utils/Random.h"
|
||||
#include "td/utils/ScopeGuard.h"
|
||||
#include "td/utils/tests.h"
|
||||
|
||||
#include <limits>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
template <class ContainerT>
|
||||
static typename ContainerT::value_type &rand_elem(ContainerT &cont) {
|
||||
CHECK(0 < cont.size() && cont.size() <= static_cast<size_t>(std::numeric_limits<int>::max()));
|
||||
return cont[td::Random::fast(0, static_cast<int>(cont.size()) - 1)];
|
||||
}
|
||||
|
||||
static td::uint32 fast_pow_mod_uint32(td::uint32 x, td::uint32 p) {
|
||||
td::uint32 res = 1;
|
||||
while (p) {
|
||||
if (p & 1) {
|
||||
res *= x;
|
||||
}
|
||||
x *= x;
|
||||
p >>= 1;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static td::uint32 slow_pow_mod_uint32(td::uint32 x, td::uint32 p) {
|
||||
td::uint32 res = 1;
|
||||
for (td::uint32 i = 0; i < p; i++) {
|
||||
res *= x;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
struct ActorQuery {
|
||||
td::uint32 query_id{};
|
||||
td::uint32 result{};
|
||||
td::vector<int> todo;
|
||||
ActorQuery() = default;
|
||||
ActorQuery(const ActorQuery &) = delete;
|
||||
ActorQuery &operator=(const ActorQuery &) = delete;
|
||||
ActorQuery(ActorQuery &&) = default;
|
||||
ActorQuery &operator=(ActorQuery &&) = default;
|
||||
~ActorQuery() {
|
||||
LOG_CHECK(todo.empty()) << "ActorQuery lost";
|
||||
}
|
||||
int next_pow() {
|
||||
CHECK(!todo.empty());
|
||||
int res = todo.back();
|
||||
todo.pop_back();
|
||||
return res;
|
||||
}
|
||||
bool ready() {
|
||||
return todo.empty();
|
||||
}
|
||||
};
|
||||
|
||||
static td::uint32 fast_calc(ActorQuery &q) {
|
||||
td::uint32 result = q.result;
|
||||
for (auto x : q.todo) {
|
||||
result = fast_pow_mod_uint32(result, x);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
class Worker final : public td::Actor {
|
||||
public:
|
||||
explicit Worker(int threads_n) : threads_n_(threads_n) {
|
||||
}
|
||||
void query(td::PromiseActor<td::uint32> &&promise, td::uint32 x, td::uint32 p) {
|
||||
td::uint32 result = slow_pow_mod_uint32(x, p);
|
||||
promise.set_value(std::move(result));
|
||||
|
||||
(void)threads_n_;
|
||||
// if (threads_n_ > 1 && td::Random::fast(0, 9) == 0) {
|
||||
// migrate(td::Random::fast(2, threads_n));
|
||||
//}
|
||||
}
|
||||
|
||||
private:
|
||||
int threads_n_;
|
||||
};
|
||||
|
||||
class QueryActor final : public td::Actor {
|
||||
public:
|
||||
class Callback {
|
||||
public:
|
||||
Callback() = default;
|
||||
Callback(const Callback &) = delete;
|
||||
Callback &operator=(const Callback &) = delete;
|
||||
Callback(Callback &&) = delete;
|
||||
Callback &operator=(Callback &&) = delete;
|
||||
virtual ~Callback() = default;
|
||||
virtual void on_result(ActorQuery &&query) = 0;
|
||||
virtual void on_closed() = 0;
|
||||
};
|
||||
|
||||
explicit QueryActor(int threads_n) : threads_n_(threads_n) {
|
||||
}
|
||||
|
||||
void set_callback(td::unique_ptr<Callback> callback) {
|
||||
callback_ = std::move(callback);
|
||||
}
|
||||
void set_workers(td::vector<td::ActorId<Worker>> workers) {
|
||||
workers_ = std::move(workers);
|
||||
}
|
||||
|
||||
void query(ActorQuery &&query) {
|
||||
td::uint32 x = query.result;
|
||||
td::uint32 p = query.next_pow();
|
||||
if (td::Random::fast(0, 3) && (p <= 1000 || workers_.empty())) {
|
||||
query.result = slow_pow_mod_uint32(x, p);
|
||||
callback_->on_result(std::move(query));
|
||||
} else {
|
||||
auto future = td::Random::fast(0, 3) == 0
|
||||
? td::send_promise_immediately(rand_elem(workers_), &Worker::query, x, p)
|
||||
: td::send_promise_later(rand_elem(workers_), &Worker::query, x, p);
|
||||
if (future.is_ready()) {
|
||||
query.result = future.move_as_ok();
|
||||
callback_->on_result(std::move(query));
|
||||
} else {
|
||||
future.set_event(td::EventCreator::raw(actor_id(), query.query_id));
|
||||
auto query_id = query.query_id;
|
||||
pending_.emplace(query_id, std::make_pair(std::move(future), std::move(query)));
|
||||
}
|
||||
}
|
||||
if (threads_n_ > 1 && td::Random::fast(0, 9) == 0) {
|
||||
migrate(td::Random::fast(2, threads_n_));
|
||||
}
|
||||
}
|
||||
|
||||
void raw_event(const td::Event::Raw &event) final {
|
||||
td::uint32 id = event.u32;
|
||||
auto it = pending_.find(id);
|
||||
auto future = std::move(it->second.first);
|
||||
auto query = std::move(it->second.second);
|
||||
pending_.erase(it);
|
||||
CHECK(future.is_ready());
|
||||
query.result = future.move_as_ok();
|
||||
callback_->on_result(std::move(query));
|
||||
}
|
||||
|
||||
void close() {
|
||||
callback_->on_closed();
|
||||
stop();
|
||||
}
|
||||
|
||||
void on_start_migrate(td::int32 sched_id) final {
|
||||
for (auto &it : pending_) {
|
||||
start_migrate(it.second.first, sched_id);
|
||||
}
|
||||
}
|
||||
void on_finish_migrate() final {
|
||||
for (auto &it : pending_) {
|
||||
finish_migrate(it.second.first);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
td::unique_ptr<Callback> callback_;
|
||||
std::map<td::uint32, std::pair<td::FutureActor<td::uint32>, ActorQuery>> pending_;
|
||||
td::vector<td::ActorId<Worker>> workers_;
|
||||
int threads_n_;
|
||||
};
|
||||
|
||||
class MainQueryActor final : public td::Actor {
|
||||
class QueryActorCallback final : public QueryActor::Callback {
|
||||
public:
|
||||
void on_result(ActorQuery &&query) final {
|
||||
if (query.ready()) {
|
||||
send_closure(parent_id_, &MainQueryActor::on_result, std::move(query));
|
||||
} else {
|
||||
send_closure(next_solver_, &QueryActor::query, std::move(query));
|
||||
}
|
||||
}
|
||||
void on_closed() final {
|
||||
send_closure(parent_id_, &MainQueryActor::on_closed);
|
||||
}
|
||||
QueryActorCallback(td::ActorId<MainQueryActor> parent_id, td::ActorId<QueryActor> next_solver)
|
||||
: parent_id_(parent_id), next_solver_(next_solver) {
|
||||
}
|
||||
|
||||
private:
|
||||
td::ActorId<MainQueryActor> parent_id_;
|
||||
td::ActorId<QueryActor> next_solver_;
|
||||
};
|
||||
|
||||
const int ACTORS_CNT = 10;
|
||||
const int WORKERS_CNT = 4;
|
||||
|
||||
public:
|
||||
explicit MainQueryActor(int threads_n) : threads_n_(threads_n) {
|
||||
}
|
||||
|
||||
void start_up() final {
|
||||
actors_.resize(ACTORS_CNT);
|
||||
for (auto &actor : actors_) {
|
||||
auto actor_ptr = td::make_unique<QueryActor>(threads_n_);
|
||||
actor = register_actor("QueryActor", std::move(actor_ptr), threads_n_ > 1 ? td::Random::fast(2, threads_n_) : 0)
|
||||
.release();
|
||||
}
|
||||
|
||||
workers_.resize(WORKERS_CNT);
|
||||
for (auto &worker : workers_) {
|
||||
auto actor_ptr = td::make_unique<Worker>(threads_n_);
|
||||
worker = register_actor("Worker", std::move(actor_ptr), threads_n_ > 1 ? td::Random::fast(2, threads_n_) : 0)
|
||||
.release();
|
||||
}
|
||||
|
||||
for (int i = 0; i < ACTORS_CNT; i++) {
|
||||
ref_cnt_++;
|
||||
send_closure(actors_[i], &QueryActor::set_callback,
|
||||
td::make_unique<QueryActorCallback>(actor_id(this), actors_[(i + 1) % ACTORS_CNT]));
|
||||
send_closure(actors_[i], &QueryActor::set_workers, workers_);
|
||||
}
|
||||
yield();
|
||||
}
|
||||
|
||||
void on_result(ActorQuery &&query) {
|
||||
CHECK(query.ready());
|
||||
CHECK(query.result == expected_[query.query_id]);
|
||||
in_cnt_++;
|
||||
wakeup();
|
||||
}
|
||||
|
||||
ActorQuery create_query() {
|
||||
ActorQuery q;
|
||||
q.query_id = (query_id_ += 2);
|
||||
q.result = q.query_id;
|
||||
q.todo = {1, 1, 1, 1, 1, 1, 1, 1, 10000};
|
||||
expected_[q.query_id] = fast_calc(q);
|
||||
return q;
|
||||
}
|
||||
|
||||
void on_closed() {
|
||||
ref_cnt_--;
|
||||
if (ref_cnt_ == 0) {
|
||||
td::Scheduler::instance()->finish();
|
||||
}
|
||||
}
|
||||
|
||||
void wakeup() final {
|
||||
int cnt = 10000;
|
||||
while (out_cnt_ < in_cnt_ + 100 && out_cnt_ < cnt) {
|
||||
if (td::Random::fast_bool()) {
|
||||
send_closure(rand_elem(actors_), &QueryActor::query, create_query());
|
||||
} else {
|
||||
send_closure_later(rand_elem(actors_), &QueryActor::query, create_query());
|
||||
}
|
||||
out_cnt_++;
|
||||
}
|
||||
if (in_cnt_ == cnt) {
|
||||
in_cnt_++;
|
||||
ref_cnt_--;
|
||||
for (auto &actor : actors_) {
|
||||
send_closure(actor, &QueryActor::close);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::map<td::uint32, td::uint32> expected_;
|
||||
td::vector<td::ActorId<QueryActor>> actors_;
|
||||
td::vector<td::ActorId<Worker>> workers_;
|
||||
int out_cnt_ = 0;
|
||||
int in_cnt_ = 0;
|
||||
int query_id_ = 1;
|
||||
int ref_cnt_ = 1;
|
||||
int threads_n_;
|
||||
};
|
||||
|
||||
class SimpleActor final : public td::Actor {
|
||||
public:
|
||||
explicit SimpleActor(td::int32 threads_n) : threads_n_(threads_n) {
|
||||
}
|
||||
void start_up() final {
|
||||
auto actor_ptr = td::make_unique<Worker>(threads_n_);
|
||||
worker_ =
|
||||
register_actor("Worker", std::move(actor_ptr), threads_n_ > 1 ? td::Random::fast(2, threads_n_) : 0).release();
|
||||
yield();
|
||||
}
|
||||
|
||||
void wakeup() final {
|
||||
if (q_ == 10000) {
|
||||
td::Scheduler::instance()->finish();
|
||||
stop();
|
||||
return;
|
||||
}
|
||||
q_++;
|
||||
p_ = td::Random::fast_bool() ? 1 : 10000;
|
||||
auto future = td::Random::fast(0, 3) == 0 ? td::send_promise_immediately(worker_, &Worker::query, q_, p_)
|
||||
: td::send_promise_later(worker_, &Worker::query, q_, p_);
|
||||
if (future.is_ready()) {
|
||||
auto result = future.move_as_ok();
|
||||
CHECK(result == fast_pow_mod_uint32(q_, p_));
|
||||
yield();
|
||||
} else {
|
||||
future.set_event(td::EventCreator::raw(actor_id(), nullptr));
|
||||
future_ = std::move(future);
|
||||
}
|
||||
// if (threads_n_ > 1 && td::Random::fast(0, 2) == 0) {
|
||||
// migrate(td::Random::fast(1, threads_n));
|
||||
//}
|
||||
}
|
||||
void raw_event(const td::Event::Raw &event) final {
|
||||
auto result = future_.move_as_ok();
|
||||
CHECK(result == fast_pow_mod_uint32(q_, p_));
|
||||
yield();
|
||||
}
|
||||
|
||||
void on_start_migrate(td::int32 sched_id) final {
|
||||
start_migrate(future_, sched_id);
|
||||
}
|
||||
void on_finish_migrate() final {
|
||||
finish_migrate(future_);
|
||||
}
|
||||
|
||||
private:
|
||||
td::int32 threads_n_;
|
||||
td::ActorId<Worker> worker_;
|
||||
td::FutureActor<td::uint32> future_;
|
||||
td::uint32 q_ = 1;
|
||||
td::uint32 p_ = 0;
|
||||
};
|
||||
|
||||
class SendToDead final : public td::Actor {
|
||||
public:
|
||||
class Parent final : public td::Actor {
|
||||
public:
|
||||
explicit Parent(td::ActorShared<> parent, int ttl = 3) : parent_(std::move(parent)), ttl_(ttl) {
|
||||
}
|
||||
void start_up() final {
|
||||
set_timeout_in(td::Random::fast_uint32() % 3 * 0.001);
|
||||
if (ttl_ != 0) {
|
||||
child_ = td::create_actor_on_scheduler<Parent>(
|
||||
"Child", td::Random::fast_uint32() % td::Scheduler::instance()->sched_count(), actor_shared(this),
|
||||
ttl_ - 1);
|
||||
}
|
||||
}
|
||||
void timeout_expired() final {
|
||||
stop();
|
||||
}
|
||||
|
||||
private:
|
||||
td::ActorOwn<Parent> child_;
|
||||
td::ActorShared<> parent_;
|
||||
int ttl_;
|
||||
};
|
||||
|
||||
void start_up() final {
|
||||
for (int i = 0; i < 2000; i++) {
|
||||
td::create_actor_on_scheduler<Parent>(
|
||||
"Parent", td::Random::fast_uint32() % td::Scheduler::instance()->sched_count(), create_reference(), 4)
|
||||
.release();
|
||||
}
|
||||
}
|
||||
|
||||
td::ActorShared<> create_reference() {
|
||||
ref_cnt_++;
|
||||
return actor_shared(this);
|
||||
}
|
||||
|
||||
void hangup_shared() final {
|
||||
ref_cnt_--;
|
||||
if (ref_cnt_ == 0) {
|
||||
ttl_--;
|
||||
if (ttl_ <= 0) {
|
||||
td::Scheduler::instance()->finish();
|
||||
stop();
|
||||
} else {
|
||||
start_up();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
td::uint32 ttl_{50};
|
||||
td::uint32 ref_cnt_{0};
|
||||
};
|
||||
|
||||
TEST(Actors, send_to_dead) {
|
||||
//TODO: fix CHECK(storage_count_.load() == 0)
|
||||
return;
|
||||
int threads_n = 5;
|
||||
td::ConcurrentScheduler sched(threads_n, 0);
|
||||
|
||||
sched.create_actor_unsafe<SendToDead>(0, "SendToDead").release();
|
||||
sched.start();
|
||||
while (sched.run_main(10)) {
|
||||
// empty
|
||||
}
|
||||
sched.finish();
|
||||
}
|
||||
|
||||
TEST(Actors, main_simple) {
|
||||
int threads_n = 3;
|
||||
td::ConcurrentScheduler sched(threads_n, 0);
|
||||
|
||||
sched.create_actor_unsafe<SimpleActor>(threads_n > 1 ? 1 : 0, "simple", threads_n).release();
|
||||
sched.start();
|
||||
while (sched.run_main(10)) {
|
||||
// empty
|
||||
}
|
||||
sched.finish();
|
||||
}
|
||||
|
||||
TEST(Actors, main) {
|
||||
int threads_n = 9;
|
||||
td::ConcurrentScheduler sched(threads_n, 0);
|
||||
|
||||
sched.create_actor_unsafe<MainQueryActor>(threads_n > 1 ? 1 : 0, "MainQuery", threads_n).release();
|
||||
sched.start();
|
||||
while (sched.run_main(10)) {
|
||||
// empty
|
||||
}
|
||||
sched.finish();
|
||||
}
|
||||
|
||||
class DoAfterStop final : public td::Actor {
|
||||
public:
|
||||
void loop() final {
|
||||
ptr = td::make_unique<int>(10);
|
||||
stop();
|
||||
CHECK(*ptr == 10);
|
||||
td::Scheduler::instance()->finish();
|
||||
}
|
||||
|
||||
private:
|
||||
td::unique_ptr<int> ptr;
|
||||
};
|
||||
|
||||
TEST(Actors, do_after_stop) {
|
||||
int threads_n = 0;
|
||||
td::ConcurrentScheduler sched(threads_n, 0);
|
||||
|
||||
sched.create_actor_unsafe<DoAfterStop>(0, "DoAfterStop").release();
|
||||
sched.start();
|
||||
while (sched.run_main(10)) {
|
||||
// empty
|
||||
}
|
||||
sched.finish();
|
||||
}
|
||||
|
||||
class XContext final : public td::ActorContext {
|
||||
public:
|
||||
td::int32 get_id() const final {
|
||||
return 123456789;
|
||||
}
|
||||
|
||||
void validate() {
|
||||
CHECK(x == 1234);
|
||||
}
|
||||
~XContext() final {
|
||||
x = 0;
|
||||
}
|
||||
int x = 1234;
|
||||
};
|
||||
|
||||
class WithXContext final : public td::Actor {
|
||||
public:
|
||||
void start_up() final {
|
||||
auto old_context = set_context(std::make_shared<XContext>());
|
||||
}
|
||||
void f(td::unique_ptr<td::Guard> guard) {
|
||||
}
|
||||
void close() {
|
||||
stop();
|
||||
}
|
||||
};
|
||||
|
||||
static void check_context() {
|
||||
auto ptr = static_cast<XContext *>(td::Scheduler::context());
|
||||
CHECK(ptr != nullptr);
|
||||
ptr->validate();
|
||||
}
|
||||
|
||||
TEST(Actors, context_during_destruction) {
|
||||
int threads_n = 0;
|
||||
td::ConcurrentScheduler sched(threads_n, 0);
|
||||
|
||||
{
|
||||
auto guard = sched.get_main_guard();
|
||||
auto with_context = td::create_actor<WithXContext>("WithXContext").release();
|
||||
send_closure(with_context, &WithXContext::f, td::create_lambda_guard([] { check_context(); }));
|
||||
send_closure_later(with_context, &WithXContext::close);
|
||||
send_closure(with_context, &WithXContext::f, td::create_lambda_guard([] { check_context(); }));
|
||||
send_closure(with_context, &WithXContext::f, td::create_lambda_guard([] { td::Scheduler::instance()->finish(); }));
|
||||
}
|
||||
sched.start();
|
||||
while (sched.run_main(10)) {
|
||||
// empty
|
||||
}
|
||||
sched.finish();
|
||||
}
|
||||
682
td/tdactor/test/actors_simple.cpp
Normal file
682
td/tdactor/test/actors_simple.cpp
Normal file
@@ -0,0 +1,682 @@
|
||||
//
|
||||
// Copyright Aliaksei Levin (levlam@telegram.org), Arseny Smirnov (arseny30@gmail.com) 2014-2024
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
//
|
||||
#include "td/actor/actor.h"
|
||||
#include "td/actor/ConcurrentScheduler.h"
|
||||
#include "td/actor/MultiPromise.h"
|
||||
#include "td/actor/PromiseFuture.h"
|
||||
#include "td/actor/SleepActor.h"
|
||||
|
||||
#include "td/utils/common.h"
|
||||
#include "td/utils/logging.h"
|
||||
#include "td/utils/MpscPollableQueue.h"
|
||||
#include "td/utils/Observer.h"
|
||||
#include "td/utils/port/FileFd.h"
|
||||
#include "td/utils/port/path.h"
|
||||
#include "td/utils/port/thread.h"
|
||||
#include "td/utils/Promise.h"
|
||||
#include "td/utils/Slice.h"
|
||||
#include "td/utils/Status.h"
|
||||
#include "td/utils/StringBuilder.h"
|
||||
#include "td/utils/tests.h"
|
||||
#include "td/utils/Time.h"
|
||||
|
||||
#include <memory>
|
||||
#include <tuple>
|
||||
|
||||
static const size_t BUF_SIZE = 1024 * 1024;
|
||||
static char buf[BUF_SIZE];
|
||||
static char buf2[BUF_SIZE];
|
||||
static td::StringBuilder sb(td::MutableSlice(buf, BUF_SIZE - 1));
|
||||
static td::StringBuilder sb2(td::MutableSlice(buf2, BUF_SIZE - 1));
|
||||
|
||||
static td::vector<std::shared_ptr<td::MpscPollableQueue<td::EventFull>>> create_queues() {
|
||||
#if TD_THREAD_UNSUPPORTED || TD_EVENTFD_UNSUPPORTED
|
||||
return {};
|
||||
#else
|
||||
auto res = std::make_shared<td::MpscPollableQueue<td::EventFull>>();
|
||||
res->init();
|
||||
return {res};
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST(Actors, SendLater) {
|
||||
sb.clear();
|
||||
td::Scheduler scheduler;
|
||||
scheduler.init(0, create_queues(), nullptr);
|
||||
|
||||
auto guard = scheduler.get_guard();
|
||||
class Worker final : public td::Actor {
|
||||
public:
|
||||
void f() {
|
||||
sb << "A";
|
||||
}
|
||||
};
|
||||
auto id = td::create_actor<Worker>("Worker");
|
||||
scheduler.run_no_guard(td::Timestamp::in(1));
|
||||
td::send_closure(id, &Worker::f);
|
||||
td::send_closure_later(id, &Worker::f);
|
||||
td::send_closure(id, &Worker::f);
|
||||
ASSERT_STREQ("A", sb.as_cslice().c_str());
|
||||
scheduler.run_no_guard(td::Timestamp::in(1));
|
||||
ASSERT_STREQ("AAA", sb.as_cslice().c_str());
|
||||
}
|
||||
|
||||
class X {
|
||||
public:
|
||||
X() {
|
||||
sb << "[cnstr_default]";
|
||||
}
|
||||
X(const X &) {
|
||||
sb << "[cnstr_copy]";
|
||||
}
|
||||
X(X &&) noexcept {
|
||||
sb << "[cnstr_move]";
|
||||
}
|
||||
X &operator=(const X &) {
|
||||
sb << "[set_copy]";
|
||||
return *this;
|
||||
}
|
||||
X &operator=(X &&) noexcept {
|
||||
sb << "[set_move]";
|
||||
return *this;
|
||||
}
|
||||
~X() = default;
|
||||
};
|
||||
|
||||
class XReceiver final : public td::Actor {
|
||||
public:
|
||||
void by_const_ref(const X &) {
|
||||
sb << "[by_const_ref]";
|
||||
}
|
||||
void by_lvalue_ref(const X &) {
|
||||
sb << "[by_lvalue_ref]";
|
||||
}
|
||||
void by_value(X) {
|
||||
sb << "[by_value]";
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Actors, simple_pass_event_arguments) {
|
||||
td::Scheduler scheduler;
|
||||
scheduler.init(0, create_queues(), nullptr);
|
||||
|
||||
auto guard = scheduler.get_guard();
|
||||
auto id = td::create_actor<XReceiver>("XR").release();
|
||||
scheduler.run_no_guard(td::Timestamp::in(1));
|
||||
|
||||
X x;
|
||||
|
||||
// check tuple
|
||||
// std::tuple<X> tx;
|
||||
// sb.clear();
|
||||
// std::tuple<X> ty(std::move(tx));
|
||||
// tx = std::move(ty);
|
||||
// ASSERT_STREQ("[cnstr_move]", sb.as_cslice().c_str());
|
||||
|
||||
// Send temporary object
|
||||
|
||||
// Tmp-->ConstRef
|
||||
sb.clear();
|
||||
td::send_closure(id, &XReceiver::by_const_ref, X());
|
||||
ASSERT_STREQ("[cnstr_default][by_const_ref]", sb.as_cslice().c_str());
|
||||
|
||||
// Tmp-->ConstRef (Delayed)
|
||||
sb.clear();
|
||||
td::send_closure_later(id, &XReceiver::by_const_ref, X());
|
||||
scheduler.run_no_guard(td::Timestamp::in(1));
|
||||
// LOG(ERROR) << sb.as_cslice();
|
||||
ASSERT_STREQ("[cnstr_default][cnstr_move][by_const_ref]", sb.as_cslice().c_str());
|
||||
|
||||
// Tmp-->LvalueRef
|
||||
sb.clear();
|
||||
td::send_closure(id, &XReceiver::by_lvalue_ref, X());
|
||||
ASSERT_STREQ("[cnstr_default][by_lvalue_ref]", sb.as_cslice().c_str());
|
||||
|
||||
// Tmp-->LvalueRef (Delayed)
|
||||
sb.clear();
|
||||
td::send_closure_later(id, &XReceiver::by_lvalue_ref, X());
|
||||
scheduler.run_no_guard(td::Timestamp::in(1));
|
||||
ASSERT_STREQ("[cnstr_default][cnstr_move][by_lvalue_ref]", sb.as_cslice().c_str());
|
||||
|
||||
// Tmp-->Value
|
||||
sb.clear();
|
||||
td::send_closure(id, &XReceiver::by_value, X());
|
||||
ASSERT_STREQ("[cnstr_default][cnstr_move][by_value]", sb.as_cslice().c_str());
|
||||
|
||||
// Tmp-->Value (Delayed)
|
||||
sb.clear();
|
||||
td::send_closure_later(id, &XReceiver::by_value, X());
|
||||
scheduler.run_no_guard(td::Timestamp::in(1));
|
||||
ASSERT_STREQ("[cnstr_default][cnstr_move][cnstr_move][by_value]", sb.as_cslice().c_str());
|
||||
|
||||
// Var-->ConstRef
|
||||
sb.clear();
|
||||
td::send_closure(id, &XReceiver::by_const_ref, x);
|
||||
ASSERT_STREQ("[by_const_ref]", sb.as_cslice().c_str());
|
||||
|
||||
// Var-->ConstRef (Delayed)
|
||||
sb.clear();
|
||||
td::send_closure_later(id, &XReceiver::by_const_ref, x);
|
||||
scheduler.run_no_guard(td::Timestamp::in(1));
|
||||
ASSERT_STREQ("[cnstr_copy][by_const_ref]", sb.as_cslice().c_str());
|
||||
|
||||
// Var-->LvalueRef
|
||||
// Var-->LvalueRef (Delayed)
|
||||
// CE or strange behaviour
|
||||
|
||||
// Var-->Value
|
||||
sb.clear();
|
||||
td::send_closure(id, &XReceiver::by_value, x);
|
||||
ASSERT_STREQ("[cnstr_copy][by_value]", sb.as_cslice().c_str());
|
||||
|
||||
// Var-->Value (Delayed)
|
||||
sb.clear();
|
||||
td::send_closure_later(id, &XReceiver::by_value, x);
|
||||
scheduler.run_no_guard(td::Timestamp::in(1));
|
||||
ASSERT_STREQ("[cnstr_copy][cnstr_move][by_value]", sb.as_cslice().c_str());
|
||||
}
|
||||
|
||||
class PrintChar final : public td::Actor {
|
||||
public:
|
||||
PrintChar(char c, int cnt) : char_(c), cnt_(cnt) {
|
||||
}
|
||||
void start_up() final {
|
||||
yield();
|
||||
}
|
||||
void wakeup() final {
|
||||
if (cnt_ == 0) {
|
||||
stop();
|
||||
} else {
|
||||
sb << char_;
|
||||
cnt_--;
|
||||
yield();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
char char_;
|
||||
int cnt_;
|
||||
};
|
||||
|
||||
//
|
||||
// Yield must add actor to the end of queue
|
||||
//
|
||||
TEST(Actors, simple_hand_yield) {
|
||||
td::Scheduler scheduler;
|
||||
scheduler.init(0, create_queues(), nullptr);
|
||||
sb.clear();
|
||||
int cnt = 1000;
|
||||
{
|
||||
auto guard = scheduler.get_guard();
|
||||
td::create_actor<PrintChar>("PrintA", 'A', cnt).release();
|
||||
td::create_actor<PrintChar>("PrintB", 'B', cnt).release();
|
||||
td::create_actor<PrintChar>("PrintC", 'C', cnt).release();
|
||||
}
|
||||
scheduler.run(td::Timestamp::in(1));
|
||||
td::string expected;
|
||||
for (int i = 0; i < cnt; i++) {
|
||||
expected += "ABC";
|
||||
}
|
||||
ASSERT_STREQ(expected.c_str(), sb.as_cslice().c_str());
|
||||
}
|
||||
|
||||
class Ball {
|
||||
public:
|
||||
friend void start_migrate(Ball &ball, td::int32 sched_id) {
|
||||
sb << "start";
|
||||
}
|
||||
friend void finish_migrate(Ball &ball) {
|
||||
sb2 << "finish";
|
||||
}
|
||||
};
|
||||
|
||||
class Pong final : public td::Actor {
|
||||
public:
|
||||
void pong(Ball ball) {
|
||||
td::Scheduler::instance()->finish();
|
||||
}
|
||||
};
|
||||
|
||||
class Ping final : public td::Actor {
|
||||
public:
|
||||
explicit Ping(td::ActorId<Pong> pong) : pong_(pong) {
|
||||
}
|
||||
void start_up() final {
|
||||
td::send_closure(pong_, &Pong::pong, Ball());
|
||||
}
|
||||
|
||||
private:
|
||||
td::ActorId<Pong> pong_;
|
||||
};
|
||||
|
||||
TEST(Actors, simple_migrate) {
|
||||
sb.clear();
|
||||
sb2.clear();
|
||||
|
||||
td::ConcurrentScheduler scheduler(2, 0);
|
||||
auto pong = scheduler.create_actor_unsafe<Pong>(2, "Pong").release();
|
||||
scheduler.create_actor_unsafe<Ping>(1, "Ping", pong).release();
|
||||
scheduler.start();
|
||||
while (scheduler.run_main(10)) {
|
||||
}
|
||||
scheduler.finish();
|
||||
#if TD_THREAD_UNSUPPORTED || TD_EVENTFD_UNSUPPORTED
|
||||
ASSERT_STREQ("", sb.as_cslice().c_str());
|
||||
ASSERT_STREQ("", sb2.as_cslice().c_str());
|
||||
#else
|
||||
ASSERT_STREQ("start", sb.as_cslice().c_str());
|
||||
ASSERT_STREQ("finish", sb2.as_cslice().c_str());
|
||||
#endif
|
||||
}
|
||||
|
||||
class OpenClose final : public td::Actor {
|
||||
public:
|
||||
explicit OpenClose(int cnt) : cnt_(cnt) {
|
||||
}
|
||||
void start_up() final {
|
||||
yield();
|
||||
}
|
||||
void wakeup() final {
|
||||
auto observer = reinterpret_cast<td::ObserverBase *>(123);
|
||||
td::CSlice file_name = "server";
|
||||
if (cnt_ > 0) {
|
||||
auto r_file_fd = td::FileFd::open(file_name, td::FileFd::Read | td::FileFd::Create);
|
||||
LOG_CHECK(r_file_fd.is_ok()) << r_file_fd.error();
|
||||
auto file_fd = r_file_fd.move_as_ok();
|
||||
{ auto pollable_fd = file_fd.get_poll_info().extract_pollable_fd(observer); }
|
||||
file_fd.close();
|
||||
cnt_--;
|
||||
yield();
|
||||
} else {
|
||||
td::Scheduler::instance()->finish();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
int cnt_;
|
||||
};
|
||||
|
||||
TEST(Actors, open_close) {
|
||||
td::ConcurrentScheduler scheduler(2, 0);
|
||||
int cnt = 10000; // TODO(perf) optimize
|
||||
scheduler.create_actor_unsafe<OpenClose>(1, "A", cnt).release();
|
||||
scheduler.create_actor_unsafe<OpenClose>(2, "B", cnt).release();
|
||||
scheduler.start();
|
||||
while (scheduler.run_main(10)) {
|
||||
}
|
||||
scheduler.finish();
|
||||
td::unlink("server").ignore();
|
||||
}
|
||||
|
||||
class MsgActor : public td::Actor {
|
||||
public:
|
||||
virtual void msg() = 0;
|
||||
};
|
||||
|
||||
class Slave final : public td::Actor {
|
||||
public:
|
||||
td::ActorId<MsgActor> msg;
|
||||
explicit Slave(td::ActorId<MsgActor> msg) : msg(msg) {
|
||||
}
|
||||
void hangup() final {
|
||||
td::send_closure(msg, &MsgActor::msg);
|
||||
}
|
||||
};
|
||||
|
||||
class MasterActor final : public MsgActor {
|
||||
public:
|
||||
void loop() final {
|
||||
alive_ = true;
|
||||
slave = td::create_actor<Slave>("Slave", static_cast<td::ActorId<MsgActor>>(actor_id(this)));
|
||||
stop();
|
||||
}
|
||||
td::ActorOwn<Slave> slave;
|
||||
|
||||
MasterActor() = default;
|
||||
MasterActor(const MasterActor &) = delete;
|
||||
MasterActor &operator=(const MasterActor &) = delete;
|
||||
MasterActor(MasterActor &&) = delete;
|
||||
MasterActor &operator=(MasterActor &&) = delete;
|
||||
~MasterActor() final {
|
||||
alive_ = 987654321;
|
||||
}
|
||||
void msg() final {
|
||||
CHECK(alive_ == 123456789);
|
||||
}
|
||||
td::uint64 alive_ = 123456789;
|
||||
};
|
||||
|
||||
TEST(Actors, call_after_destruct) {
|
||||
td::Scheduler scheduler;
|
||||
scheduler.init(0, create_queues(), nullptr);
|
||||
{
|
||||
auto guard = scheduler.get_guard();
|
||||
td::create_actor<MasterActor>("Master").release();
|
||||
}
|
||||
scheduler.run(td::Timestamp::in(1));
|
||||
}
|
||||
|
||||
class LinkTokenSlave final : public td::Actor {
|
||||
public:
|
||||
explicit LinkTokenSlave(td::ActorShared<> parent) : parent_(std::move(parent)) {
|
||||
}
|
||||
void add(td::uint64 link_token) {
|
||||
CHECK(link_token == get_link_token());
|
||||
}
|
||||
void close() {
|
||||
stop();
|
||||
}
|
||||
|
||||
private:
|
||||
td::ActorShared<> parent_;
|
||||
};
|
||||
|
||||
class LinkTokenMasterActor final : public td::Actor {
|
||||
public:
|
||||
explicit LinkTokenMasterActor(int cnt) : cnt_(cnt) {
|
||||
}
|
||||
void start_up() final {
|
||||
child_ = td::create_actor<LinkTokenSlave>("Slave", actor_shared(this, 123)).release();
|
||||
yield();
|
||||
}
|
||||
void loop() final {
|
||||
for (int i = 0; i < 100 && cnt_ > 0; cnt_--, i++) {
|
||||
auto token = static_cast<td::uint64>(cnt_) + 1;
|
||||
switch (i % 4) {
|
||||
case 0: {
|
||||
td::send_closure(td::ActorShared<LinkTokenSlave>(child_, token), &LinkTokenSlave::add, token);
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
td::send_closure_later(td::ActorShared<LinkTokenSlave>(child_, token), &LinkTokenSlave::add, token);
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
td::EventCreator::closure(td::ActorShared<LinkTokenSlave>(child_, token), &LinkTokenSlave::add, token)
|
||||
.try_emit();
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
td::EventCreator::closure(td::ActorShared<LinkTokenSlave>(child_, token), &LinkTokenSlave::add, token)
|
||||
.try_emit_later();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (cnt_ == 0) {
|
||||
td::send_closure(child_, &LinkTokenSlave::close);
|
||||
} else {
|
||||
yield();
|
||||
}
|
||||
}
|
||||
|
||||
void hangup_shared() final {
|
||||
CHECK(get_link_token() == 123);
|
||||
td::Scheduler::instance()->finish();
|
||||
stop();
|
||||
}
|
||||
|
||||
private:
|
||||
int cnt_;
|
||||
td::ActorId<LinkTokenSlave> child_;
|
||||
};
|
||||
|
||||
TEST(Actors, link_token) {
|
||||
td::ConcurrentScheduler scheduler(0, 0);
|
||||
auto cnt = 100000;
|
||||
scheduler.create_actor_unsafe<LinkTokenMasterActor>(0, "A", cnt).release();
|
||||
scheduler.start();
|
||||
while (scheduler.run_main(10)) {
|
||||
}
|
||||
scheduler.finish();
|
||||
}
|
||||
|
||||
TEST(Actors, promise) {
|
||||
int value = -1;
|
||||
td::Promise<int> p1 = td::PromiseCreator::lambda([&](int x) { value = x; });
|
||||
p1.set_error(td::Status::Error("Test error"));
|
||||
ASSERT_EQ(0, value);
|
||||
td::Promise<td::int32> p2 = td::PromiseCreator::lambda([&](td::Result<td::int32> x) { value = 1; });
|
||||
p2.set_error(td::Status::Error("Test error"));
|
||||
ASSERT_EQ(1, value);
|
||||
}
|
||||
|
||||
class LaterSlave final : public td::Actor {
|
||||
public:
|
||||
explicit LaterSlave(td::ActorShared<> parent) : parent_(std::move(parent)) {
|
||||
}
|
||||
|
||||
private:
|
||||
td::ActorShared<> parent_;
|
||||
|
||||
void hangup() final {
|
||||
sb << "A";
|
||||
td::send_closure(actor_id(this), &LaterSlave::finish);
|
||||
}
|
||||
void finish() {
|
||||
sb << "B";
|
||||
stop();
|
||||
}
|
||||
};
|
||||
|
||||
class LaterMasterActor final : public td::Actor {
|
||||
int cnt_ = 3;
|
||||
td::vector<td::ActorOwn<LaterSlave>> children_;
|
||||
void start_up() final {
|
||||
for (int i = 0; i < cnt_; i++) {
|
||||
children_.push_back(td::create_actor<LaterSlave>("B", actor_shared(this)));
|
||||
}
|
||||
yield();
|
||||
}
|
||||
void loop() final {
|
||||
children_.clear();
|
||||
}
|
||||
void hangup_shared() final {
|
||||
if (!--cnt_) {
|
||||
td::Scheduler::instance()->finish();
|
||||
stop();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Actors, later) {
|
||||
sb.clear();
|
||||
td::ConcurrentScheduler scheduler(0, 0);
|
||||
scheduler.create_actor_unsafe<LaterMasterActor>(0, "A").release();
|
||||
scheduler.start();
|
||||
while (scheduler.run_main(10)) {
|
||||
}
|
||||
scheduler.finish();
|
||||
ASSERT_STREQ(sb.as_cslice().c_str(), "AAABBB");
|
||||
}
|
||||
|
||||
class MultiPromise2 final : public td::Actor {
|
||||
public:
|
||||
void start_up() final {
|
||||
auto promise = td::PromiseCreator::lambda([](td::Result<td::Unit> result) {
|
||||
result.ensure();
|
||||
td::Scheduler::instance()->finish();
|
||||
});
|
||||
|
||||
td::MultiPromiseActorSafe multi_promise{"MultiPromiseActor2"};
|
||||
multi_promise.add_promise(std::move(promise));
|
||||
for (int i = 0; i < 10; i++) {
|
||||
td::create_actor<td::SleepActor>("Sleep", 0.1, multi_promise.get_promise()).release();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class MultiPromise1 final : public td::Actor {
|
||||
public:
|
||||
void start_up() final {
|
||||
auto promise = td::PromiseCreator::lambda([](td::Result<td::Unit> result) {
|
||||
CHECK(result.is_error());
|
||||
td::create_actor<MultiPromise2>("B").release();
|
||||
});
|
||||
td::MultiPromiseActorSafe multi_promise{"MultiPromiseActor1"};
|
||||
multi_promise.add_promise(std::move(promise));
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Actors, MultiPromise) {
|
||||
td::ConcurrentScheduler scheduler(0, 0);
|
||||
scheduler.create_actor_unsafe<MultiPromise1>(0, "A").release();
|
||||
scheduler.start();
|
||||
while (scheduler.run_main(10)) {
|
||||
}
|
||||
scheduler.finish();
|
||||
}
|
||||
|
||||
class FastPromise final : public td::Actor {
|
||||
public:
|
||||
void start_up() final {
|
||||
td::PromiseFuture<int> pf;
|
||||
auto promise = pf.move_promise();
|
||||
auto future = pf.move_future();
|
||||
promise.set_value(123);
|
||||
CHECK(future.move_as_ok() == 123);
|
||||
td::Scheduler::instance()->finish();
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Actors, FastPromise) {
|
||||
td::ConcurrentScheduler scheduler(0, 0);
|
||||
scheduler.create_actor_unsafe<FastPromise>(0, "A").release();
|
||||
scheduler.start();
|
||||
while (scheduler.run_main(10)) {
|
||||
}
|
||||
scheduler.finish();
|
||||
}
|
||||
|
||||
class StopInTeardown final : public td::Actor {
|
||||
void loop() final {
|
||||
stop();
|
||||
}
|
||||
void tear_down() final {
|
||||
stop();
|
||||
td::Scheduler::instance()->finish();
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Actors, stop_in_teardown) {
|
||||
td::ConcurrentScheduler scheduler(0, 0);
|
||||
scheduler.create_actor_unsafe<StopInTeardown>(0, "A").release();
|
||||
scheduler.start();
|
||||
while (scheduler.run_main(10)) {
|
||||
}
|
||||
scheduler.finish();
|
||||
}
|
||||
|
||||
class AlwaysWaitForMailbox final : public td::Actor {
|
||||
public:
|
||||
void start_up() final {
|
||||
td::create_actor<td::SleepActor>("Sleep", 0.1,
|
||||
td::PromiseCreator::lambda([actor_id = actor_id(this), ptr = this](td::Unit) {
|
||||
td::send_closure(actor_id, &AlwaysWaitForMailbox::g);
|
||||
td::send_closure(actor_id, &AlwaysWaitForMailbox::g);
|
||||
CHECK(!ptr->was_f_);
|
||||
}))
|
||||
.release();
|
||||
}
|
||||
|
||||
void f() {
|
||||
was_f_ = true;
|
||||
td::Scheduler::instance()->finish();
|
||||
}
|
||||
void g() {
|
||||
td::send_closure(actor_id(this), &AlwaysWaitForMailbox::f);
|
||||
}
|
||||
|
||||
private:
|
||||
bool was_f_{false};
|
||||
};
|
||||
|
||||
TEST(Actors, always_wait_for_mailbox) {
|
||||
td::ConcurrentScheduler scheduler(0, 0);
|
||||
scheduler.create_actor_unsafe<AlwaysWaitForMailbox>(0, "A").release();
|
||||
scheduler.start();
|
||||
while (scheduler.run_main(10)) {
|
||||
}
|
||||
scheduler.finish();
|
||||
}
|
||||
|
||||
#if !TD_THREAD_UNSUPPORTED && !TD_EVENTFD_UNSUPPORTED
|
||||
TEST(Actors, send_from_other_threads) {
|
||||
td::ConcurrentScheduler scheduler(1, 0);
|
||||
int thread_n = 10;
|
||||
class Listener final : public td::Actor {
|
||||
public:
|
||||
explicit Listener(int cnt) : cnt_(cnt) {
|
||||
}
|
||||
void dec() {
|
||||
if (--cnt_ == 0) {
|
||||
td::Scheduler::instance()->finish();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
int cnt_;
|
||||
};
|
||||
|
||||
auto A = scheduler.create_actor_unsafe<Listener>(1, "A", thread_n).release();
|
||||
scheduler.start();
|
||||
td::vector<td::thread> threads(thread_n);
|
||||
for (auto &thread : threads) {
|
||||
thread = td::thread([&A, &scheduler] {
|
||||
auto guard = scheduler.get_send_guard();
|
||||
td::send_closure(A, &Listener::dec);
|
||||
});
|
||||
}
|
||||
while (scheduler.run_main(10)) {
|
||||
}
|
||||
for (auto &thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
scheduler.finish();
|
||||
}
|
||||
#endif
|
||||
|
||||
class DelayedCall final : public td::Actor {
|
||||
public:
|
||||
void on_called(int *step) {
|
||||
CHECK(*step == 0);
|
||||
*step = 1;
|
||||
}
|
||||
};
|
||||
|
||||
class MultiPromiseSendClosureLaterTest final : public td::Actor {
|
||||
public:
|
||||
void start_up() final {
|
||||
delayed_call_ = td::create_actor<DelayedCall>("DelayedCall").release();
|
||||
mpa_.add_promise(td::PromiseCreator::lambda([this](td::Unit) {
|
||||
CHECK(step_ == 1);
|
||||
step_++;
|
||||
td::Scheduler::instance()->finish();
|
||||
}));
|
||||
auto lock = mpa_.get_promise();
|
||||
td::send_closure_later(delayed_call_, &DelayedCall::on_called, &step_);
|
||||
lock.set_value(td::Unit());
|
||||
}
|
||||
|
||||
void tear_down() final {
|
||||
CHECK(step_ == 2);
|
||||
}
|
||||
|
||||
private:
|
||||
int step_ = 0;
|
||||
td::MultiPromiseActor mpa_{"MultiPromiseActor"};
|
||||
td::ActorId<DelayedCall> delayed_call_;
|
||||
};
|
||||
|
||||
TEST(Actors, MultiPromiseSendClosureLater) {
|
||||
td::ConcurrentScheduler scheduler(0, 0);
|
||||
scheduler.create_actor_unsafe<MultiPromiseSendClosureLaterTest>(0, "MultiPromiseSendClosureLaterTest").release();
|
||||
scheduler.start();
|
||||
while (scheduler.run_main(1)) {
|
||||
}
|
||||
scheduler.finish();
|
||||
}
|
||||
188
td/tdactor/test/actors_workers.cpp
Normal file
188
td/tdactor/test/actors_workers.cpp
Normal file
@@ -0,0 +1,188 @@
|
||||
//
|
||||
// Copyright Aliaksei Levin (levlam@telegram.org), Arseny Smirnov (arseny30@gmail.com) 2014-2024
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
//
|
||||
#include "td/actor/actor.h"
|
||||
#include "td/actor/ConcurrentScheduler.h"
|
||||
|
||||
#include "td/utils/common.h"
|
||||
#include "td/utils/SliceBuilder.h"
|
||||
#include "td/utils/tests.h"
|
||||
#include "td/utils/Time.h"
|
||||
|
||||
class PowerWorker final : public td::Actor {
|
||||
public:
|
||||
class Callback {
|
||||
public:
|
||||
Callback() = default;
|
||||
Callback(const Callback &) = delete;
|
||||
Callback &operator=(const Callback &) = delete;
|
||||
Callback(Callback &&) = delete;
|
||||
Callback &operator=(Callback &&) = delete;
|
||||
virtual ~Callback() = default;
|
||||
virtual void on_ready(int query, int res) = 0;
|
||||
virtual void on_closed() = 0;
|
||||
};
|
||||
void set_callback(td::unique_ptr<Callback> callback) {
|
||||
callback_ = std::move(callback);
|
||||
}
|
||||
void task(td::uint32 x, td::uint32 p) {
|
||||
td::uint32 res = 1;
|
||||
for (td::uint32 i = 0; i < p; i++) {
|
||||
res *= x;
|
||||
}
|
||||
callback_->on_ready(x, res);
|
||||
}
|
||||
void close() {
|
||||
callback_->on_closed();
|
||||
stop();
|
||||
}
|
||||
|
||||
private:
|
||||
td::unique_ptr<Callback> callback_;
|
||||
};
|
||||
|
||||
class Manager final : public td::Actor {
|
||||
public:
|
||||
Manager(int queries_n, int query_size, td::vector<td::ActorId<PowerWorker>> workers)
|
||||
: workers_(std::move(workers))
|
||||
, ref_cnt_(static_cast<int>(workers_.size()))
|
||||
, left_query_(queries_n)
|
||||
, query_size_(query_size) {
|
||||
}
|
||||
|
||||
class Callback final : public PowerWorker::Callback {
|
||||
public:
|
||||
Callback(td::ActorId<Manager> actor_id, int worker_id) : actor_id_(actor_id), worker_id_(worker_id) {
|
||||
}
|
||||
void on_ready(int query, int result) final {
|
||||
td::send_closure(actor_id_, &Manager::on_ready, worker_id_, query, result);
|
||||
}
|
||||
void on_closed() final {
|
||||
td::send_closure_later(actor_id_, &Manager::on_closed, worker_id_);
|
||||
}
|
||||
|
||||
private:
|
||||
td::ActorId<Manager> actor_id_;
|
||||
int worker_id_;
|
||||
};
|
||||
|
||||
void start_up() final {
|
||||
int i = 0;
|
||||
for (auto &worker : workers_) {
|
||||
ref_cnt_++;
|
||||
td::send_closure_later(worker, &PowerWorker::set_callback, td::make_unique<Callback>(actor_id(this), i));
|
||||
i++;
|
||||
td::send_closure_later(worker, &PowerWorker::task, 3, query_size_);
|
||||
left_query_--;
|
||||
}
|
||||
}
|
||||
|
||||
void on_ready(int worker_id, int query, int res) {
|
||||
ref_cnt_--;
|
||||
if (left_query_ == 0) {
|
||||
td::send_closure(workers_[worker_id], &PowerWorker::close);
|
||||
} else {
|
||||
ref_cnt_++;
|
||||
td::send_closure(workers_[worker_id], &PowerWorker::task, 3, query_size_);
|
||||
left_query_--;
|
||||
}
|
||||
}
|
||||
|
||||
void on_closed(int worker_id) {
|
||||
ref_cnt_--;
|
||||
if (ref_cnt_ == 0) {
|
||||
td::Scheduler::instance()->finish();
|
||||
stop();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
td::vector<td::ActorId<PowerWorker>> workers_;
|
||||
int ref_cnt_;
|
||||
int left_query_;
|
||||
int query_size_;
|
||||
};
|
||||
|
||||
static void test_workers(int threads_n, int workers_n, int queries_n, int query_size) {
|
||||
td::ConcurrentScheduler sched(threads_n, 0);
|
||||
|
||||
td::vector<td::ActorId<PowerWorker>> workers;
|
||||
for (int i = 0; i < workers_n; i++) {
|
||||
int thread_id = threads_n ? i % (threads_n - 1) + 2 : 0;
|
||||
workers.push_back(sched.create_actor_unsafe<PowerWorker>(thread_id, PSLICE() << "worker" << i).release());
|
||||
}
|
||||
sched.create_actor_unsafe<Manager>(threads_n ? 1 : 0, "Manager", queries_n, query_size, std::move(workers)).release();
|
||||
|
||||
sched.start();
|
||||
while (sched.run_main(10)) {
|
||||
// empty
|
||||
}
|
||||
sched.finish();
|
||||
|
||||
// sched.test_one_thread_run();
|
||||
}
|
||||
|
||||
TEST(Actors, workers_big_query_one_thread) {
|
||||
test_workers(0, 10, 1000, 300000);
|
||||
}
|
||||
|
||||
TEST(Actors, workers_big_query_two_threads) {
|
||||
test_workers(2, 10, 1000, 300000);
|
||||
}
|
||||
|
||||
TEST(Actors, workers_big_query_nine_threads) {
|
||||
test_workers(9, 10, 1000, 300000);
|
||||
}
|
||||
|
||||
TEST(Actors, workers_small_query_one_thread) {
|
||||
test_workers(0, 10, 100000, 1);
|
||||
}
|
||||
|
||||
TEST(Actors, workers_small_query_two_threads) {
|
||||
test_workers(2, 10, 100000, 1);
|
||||
}
|
||||
|
||||
TEST(Actors, workers_small_query_nine_threads) {
|
||||
test_workers(9, 10, 10000, 1);
|
||||
}
|
||||
|
||||
class SenderActor;
|
||||
|
||||
class ReceiverActor final : public td::Actor {
|
||||
public:
|
||||
void receive(td::ActorId<SenderActor>) {
|
||||
}
|
||||
};
|
||||
|
||||
class SenderActor final : public td::Actor {
|
||||
public:
|
||||
explicit SenderActor(td::ActorId<ReceiverActor> actor_id) : actor_id_(std::move(actor_id)) {
|
||||
}
|
||||
|
||||
private:
|
||||
td::ActorId<ReceiverActor> actor_id_;
|
||||
|
||||
void loop() final {
|
||||
for (int i = 0; i < 10000; i++) {
|
||||
send_closure(actor_id_, &ReceiverActor::receive, actor_id(this));
|
||||
}
|
||||
set_timeout_in(0.001);
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Actors, send_closure_while_finish) {
|
||||
td::ConcurrentScheduler sched(1, 0);
|
||||
|
||||
auto receiver = sched.create_actor_unsafe<ReceiverActor>(0, "ReceiverActor").release();
|
||||
sched.create_actor_unsafe<SenderActor>(1, "SenderActor", receiver).release();
|
||||
|
||||
sched.start();
|
||||
auto end_time = td::Time::now() + 0.2;
|
||||
while (td::Time::now() < end_time) {
|
||||
sched.run_main(0.1);
|
||||
}
|
||||
sched.finish();
|
||||
}
|
||||
Reference in New Issue
Block a user