From 5b00ddb0c4dcfc4a0748e1a80eee1f00edaf1f30 Mon Sep 17 00:00:00 2001 From: Robbert van der Helm Date: Mon, 26 Oct 2020 22:13:55 +0100 Subject: [PATCH] Fall back to waiting when socket is not yet ready This can happen with plugin groups. --- src/common/communication.h | 30 +++++++++++++++++++++++++----- src/wine-host/bridges/group.cpp | 2 +- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/src/common/communication.h b/src/common/communication.h index 671ebaec..5664f551 100644 --- a/src/common/communication.h +++ b/src/common/communication.h @@ -179,6 +179,11 @@ class DefaultDataConverter { }; /** + * So, this is a bit of a mess. The TL;DR is that we want to use a single long + * living socket connection for `dispatch()` and another one for `audioMaster()` + * for performance reasons, but when the socket is already being written to we + * create new connections on demand. + * * For most of our sockets we can just send out our messages on the writing * side, and do a simple blocking loop on the reading side. The `dispatch()` and * `audioMaster()` calls are different. Not only do they have they come with @@ -299,12 +304,27 @@ class EventHandler { write_object(socket, event); response = read_object(socket); } else { - boost::asio::local::stream_protocol::socket secondary_socket( - io_context); - secondary_socket.connect(endpoint); + try { + boost::asio::local::stream_protocol::socket + secondary_socket(io_context); + secondary_socket.connect(endpoint); - write_object(secondary_socket, event); - response = read_object(secondary_socket); + write_object(secondary_socket, event); + response = read_object(secondary_socket); + } catch (const boost::system::system_error&) { + // So, what do we do when noone is listening on the endpoint + // yet? This can happen with plugin groups when the Wine + // host process does an `audioMaster()` call before the + // plugin is listening. If that happens we'll fall back to a + // synchronous request. This is not very pretty, so if + // anyone can think of a better way to structure all of this + // while still mainting a long living primary socket please + // let me know. + std::lock_guard lock(write_mutex); + + write_object(socket, event); + response = read_object(socket); + } } } diff --git a/src/wine-host/bridges/group.cpp b/src/wine-host/bridges/group.cpp index e8620ed0..8a42c56d 100644 --- a/src/wine-host/bridges/group.cpp +++ b/src/wine-host/bridges/group.cpp @@ -219,7 +219,7 @@ void GroupBridge::accept_requests() { // is only used within this context we don't need any locks. const size_t plugin_id = next_plugin_id.fetch_add(1); active_plugins[plugin_id] = - std::pair(std::jthread([&, request]() { + std::pair(std::jthread([this, plugin_id]() { handle_plugin_dispatch(plugin_id); }), std::move(bridge));