Fall back to waiting when socket is not yet ready

This can happen with plugin groups.
This commit is contained in:
Robbert van der Helm
2020-10-26 22:13:55 +01:00
parent 016ceccc18
commit 5b00ddb0c4
2 changed files with 26 additions and 6 deletions
+25 -5
View File
@@ -179,6 +179,11 @@ class DefaultDataConverter {
};
/**
* So, this is a bit of a mess. The TL;DR is that we want to use a single long
* living socket connection for `dispatch()` and another one for `audioMaster()`
* for performance reasons, but when the socket is already being written to we
* create new connections on demand.
*
* For most of our sockets we can just send out our messages on the writing
* side, and do a simple blocking loop on the reading side. The `dispatch()` and
* `audioMaster()` calls are different. Not only do they have they come with
@@ -299,12 +304,27 @@ class EventHandler {
write_object(socket, event);
response = read_object<EventResult>(socket);
} else {
boost::asio::local::stream_protocol::socket secondary_socket(
io_context);
secondary_socket.connect(endpoint);
try {
boost::asio::local::stream_protocol::socket
secondary_socket(io_context);
secondary_socket.connect(endpoint);
write_object(secondary_socket, event);
response = read_object<EventResult>(secondary_socket);
write_object(secondary_socket, event);
response = read_object<EventResult>(secondary_socket);
} catch (const boost::system::system_error&) {
// So, what do we do when noone is listening on the endpoint
// yet? This can happen with plugin groups when the Wine
// host process does an `audioMaster()` call before the
// plugin is listening. If that happens we'll fall back to a
// synchronous request. This is not very pretty, so if
// anyone can think of a better way to structure all of this
// while still mainting a long living primary socket please
// let me know.
std::lock_guard lock(write_mutex);
write_object(socket, event);
response = read_object<EventResult>(socket);
}
}
}
+1 -1
View File
@@ -219,7 +219,7 @@ void GroupBridge::accept_requests() {
// is only used within this context we don't need any locks.
const size_t plugin_id = next_plugin_id.fetch_add(1);
active_plugins[plugin_id] =
std::pair(std::jthread([&, request]() {
std::pair(std::jthread([this, plugin_id]() {
handle_plugin_dispatch(plugin_id);
}),
std::move(bridge));