Drop all additional IO contexts

Not really needed (since the only other thing happening in the IO
context is processing stdio from the Wine process) and it was causing
some impossible to debug malloc failures in Boost.Asio.
This commit is contained in:
Robbert van der Helm
2020-10-26 17:32:37 +01:00
parent cde7b4ec67
commit ac17539ef3
2 changed files with 17 additions and 17 deletions
+14 -15
View File
@@ -206,8 +206,9 @@ class EventHandler {
* Sets up a single main socket for this type of events. The sockets won't
* be active until `connect()` gets called.
*
* @param io_context The IO context the sockets should be bound to. Relevant
* when doing asynchronous operations.
* @param io_context The IO context the sockets should be bound to.
* Additional incoming connections will be handled asynchronously within
* this IO context.
* @param endpoint The socket endpoint used for this event handler.
* @param listen If `true`, start listening on the sockets. Incoming
* connections will be accepted when `connect()` gets called. This should
@@ -355,10 +356,9 @@ class EventHandler {
// thread to handle the request. When `socket` closes and this loop
// breaks, the listener and any still active threads will be cleaned up
// before this function exits.
boost::asio::io_context secondary_context{};
// The previous acceptor has already been shut down by
// `EventHandler::connect()`
acceptor.emplace(secondary_context, endpoint);
acceptor.emplace(io_context, endpoint);
// This works the exact same was as `active_plugins` and
// `next_plugin_id` in `GroupBridge`
@@ -400,7 +400,7 @@ class EventHandler {
// When we have processed this request, we'll join the
// thread again with the thread that's handling
// `secondary_context`.
boost::asio::post(secondary_context, [&, request_id]() {
boost::asio::post(io_context, [&, request_id]() {
std::lock_guard lock(
active_secondary_requests_mutex);
@@ -412,9 +412,6 @@ class EventHandler {
std::move(secondary_socket));
});
std::jthread secondary_requests_handler(
[&]() { secondary_context.run(); });
while (true) {
try {
auto event = read_object<Event>(socket);
@@ -441,10 +438,11 @@ class EventHandler {
}
}
// When the socket gets terminated we'll stop accepting new connections
// and thus terminate `secondary_requests_handler`. Dropping
// `active_secondary_requests` will wait for all threads to join.
secondary_context.stop();
// After the main socket gets terminated (during shutdown) we'll make
// sure all outstanding jobs have been processed and then drop all work
// from the IO context
std::lock_guard lock(active_secondary_requests_mutex);
io_context.stop();
}
private:
@@ -492,9 +490,10 @@ class EventHandler {
}
/**
* The main IO context. New sockets created during `send()` will be bound to
* this context. In `receive()` we'll create a new IO context since we want
* to do all listening there on a dedicated thread.
* The main IO context for this application. New sockets created during
* `send()` will be bound to this context, and in `receive()` we'll
* asynchronously listen for additional incoming connections through this
* context.
*/
boost::asio::io_context& io_context;
+3 -2
View File
@@ -97,8 +97,9 @@ class Logger {
void log_get_parameter_response(float vlaue);
void log_set_parameter(int index, float value);
void log_set_parameter_response();
// If is_dispatch is true, then use opcode names from the plugin's dispatch
// function. Otherwise use names for the host callback function opcodes.
// If `is_dispatch` is `true`, then use opcode names from the plugin's
// dispatch function. Otherwise use names for the host callback function
// opcodes.
void log_event(bool is_dispatch,
int opcode,
int index,