Allow mutual recursion on all CLAP callbacks

This commit is contained in:
Robbert van der Helm
2022-10-30 16:47:37 +01:00
parent 9dbaec4071
commit ab3d61cc91
4 changed files with 175 additions and 135 deletions
@@ -128,6 +128,10 @@ class clap_plugin_proxy {
/** /**
* Asynchronously run a function on the host's main thread, returning the * Asynchronously run a function on the host's main thread, returning the
* result as a future. * result as a future.
*
* Instead of calling this directly, `ClapBridge::run_on_main_thread()`
* should be used instead. That also handles mutually recursive main thread
* callbacks.
*/ */
template <std::invocable F> template <std::invocable F>
std::future<std::invoke_result_t<F>> run_on_main_thread(F&& fn) { std::future<std::invoke_result_t<F>> run_on_main_thread(F&& fn) {
+89 -88
View File
@@ -60,11 +60,10 @@ ClapPluginBridge::ClapPluginBridge(const ghc::filesystem::path& plugin_path)
const auto& [plugin_proxy, _] = const auto& [plugin_proxy, _] =
get_proxy(request.owner_instance_id); get_proxy(request.owner_instance_id);
plugin_proxy run_on_main_thread(plugin_proxy, [host = plugin_proxy
.run_on_main_thread([host = plugin_proxy.host_]() { .host_]() {
host->request_restart(host); host->request_restart(host);
}) }).wait();
.wait();
return Ack{}; return Ack{};
}, },
@@ -73,11 +72,10 @@ ClapPluginBridge::ClapPluginBridge(const ghc::filesystem::path& plugin_path)
const auto& [plugin_proxy, _] = const auto& [plugin_proxy, _] =
get_proxy(request.owner_instance_id); get_proxy(request.owner_instance_id);
plugin_proxy run_on_main_thread(plugin_proxy, [host = plugin_proxy
.run_on_main_thread([host = plugin_proxy.host_]() { .host_]() {
host->request_process(host); host->request_process(host);
}) }).wait();
.wait();
return Ack{}; return Ack{};
}, },
@@ -88,15 +86,16 @@ ClapPluginBridge::ClapPluginBridge(const ghc::filesystem::path& plugin_path)
const auto& [plugin_proxy, _] = const auto& [plugin_proxy, _] =
get_proxy(request.owner_instance_id); get_proxy(request.owner_instance_id);
return plugin_proxy return run_on_main_thread(
.run_on_main_thread( plugin_proxy,
[&, host = plugin_proxy.host_, [&, host = plugin_proxy.host_,
audio_ports = plugin_proxy.host_extensions_ audio_ports =
.audio_ports]() { plugin_proxy.host_extensions_
return audio_ports .audio_ports]() {
->is_rescan_flag_supported( return audio_ports
host, request.flag); ->is_rescan_flag_supported(
}) host, request.flag);
})
.get(); .get();
}, },
[&](const clap::ext::audio_ports::host::Rescan& request) [&](const clap::ext::audio_ports::host::Rescan& request)
@@ -104,13 +103,13 @@ ClapPluginBridge::ClapPluginBridge(const ghc::filesystem::path& plugin_path)
const auto& [plugin_proxy, _] = const auto& [plugin_proxy, _] =
get_proxy(request.owner_instance_id); get_proxy(request.owner_instance_id);
plugin_proxy run_on_main_thread(
.run_on_main_thread( plugin_proxy,
[&, host = plugin_proxy.host_, [&, host = plugin_proxy.host_,
audio_ports = audio_ports =
plugin_proxy.host_extensions_.audio_ports]() { plugin_proxy.host_extensions_.audio_ports]() {
audio_ports->rescan(host, request.flags); audio_ports->rescan(host, request.flags);
}) })
.wait(); .wait();
return Ack{}; return Ack{};
@@ -120,13 +119,13 @@ ClapPluginBridge::ClapPluginBridge(const ghc::filesystem::path& plugin_path)
const auto& [plugin_proxy, _] = const auto& [plugin_proxy, _] =
get_proxy(request.owner_instance_id); get_proxy(request.owner_instance_id);
plugin_proxy run_on_main_thread(
.run_on_main_thread( plugin_proxy,
[&, host = plugin_proxy.host_, [&, host = plugin_proxy.host_,
audio_ports_config = plugin_proxy.host_extensions_ audio_ports_config = plugin_proxy.host_extensions_
.audio_ports_config]() { .audio_ports_config]() {
audio_ports_config->rescan(host); audio_ports_config->rescan(host);
}) })
.wait(); .wait();
return Ack{}; return Ack{};
@@ -185,13 +184,12 @@ ClapPluginBridge::ClapPluginBridge(const ghc::filesystem::path& plugin_path)
const auto& [plugin_proxy, _] = const auto& [plugin_proxy, _] =
get_proxy(request.owner_instance_id); get_proxy(request.owner_instance_id);
plugin_proxy run_on_main_thread(
.run_on_main_thread( plugin_proxy,
[&, host = plugin_proxy.host_, [&, host = plugin_proxy.host_,
latency = latency = plugin_proxy.host_extensions_.latency]() {
plugin_proxy.host_extensions_.latency]() { latency->changed(host);
latency->changed(host); })
})
.wait(); .wait();
return Ack{}; return Ack{};
@@ -201,13 +199,13 @@ ClapPluginBridge::ClapPluginBridge(const ghc::filesystem::path& plugin_path)
const auto& [plugin_proxy, _] = const auto& [plugin_proxy, _] =
get_proxy(request.owner_instance_id); get_proxy(request.owner_instance_id);
plugin_proxy run_on_main_thread(
.run_on_main_thread( plugin_proxy,
[&, host = plugin_proxy.host_, [&, host = plugin_proxy.host_,
note_name = note_name =
plugin_proxy.host_extensions_.note_name]() { plugin_proxy.host_extensions_.note_name]() {
note_name->changed(host); note_name->changed(host);
}) })
.wait(); .wait();
return Ack{}; return Ack{};
@@ -219,14 +217,15 @@ ClapPluginBridge::ClapPluginBridge(const ghc::filesystem::path& plugin_path)
const auto& [plugin_proxy, _] = const auto& [plugin_proxy, _] =
get_proxy(request.owner_instance_id); get_proxy(request.owner_instance_id);
return plugin_proxy return run_on_main_thread(
.run_on_main_thread( plugin_proxy,
[host = plugin_proxy.host_, [host = plugin_proxy.host_,
note_ports = plugin_proxy.host_extensions_ note_ports =
.note_ports]() { plugin_proxy.host_extensions_
return note_ports->supported_dialects( .note_ports]() {
host); return note_ports
}) ->supported_dialects(host);
})
.get(); .get();
}, },
[&](const clap::ext::note_ports::host::Rescan& request) [&](const clap::ext::note_ports::host::Rescan& request)
@@ -234,13 +233,13 @@ ClapPluginBridge::ClapPluginBridge(const ghc::filesystem::path& plugin_path)
const auto& [plugin_proxy, _] = const auto& [plugin_proxy, _] =
get_proxy(request.owner_instance_id); get_proxy(request.owner_instance_id);
plugin_proxy run_on_main_thread(
.run_on_main_thread( plugin_proxy,
[&, host = plugin_proxy.host_, [&, host = plugin_proxy.host_,
note_ports = note_ports =
plugin_proxy.host_extensions_.note_ports]() { plugin_proxy.host_extensions_.note_ports]() {
note_ports->rescan(host, request.flags); note_ports->rescan(host, request.flags);
}) })
.wait(); .wait();
return Ack{}; return Ack{};
@@ -250,12 +249,14 @@ ClapPluginBridge::ClapPluginBridge(const ghc::filesystem::path& plugin_path)
const auto& [plugin_proxy, _] = const auto& [plugin_proxy, _] =
get_proxy(request.owner_instance_id); get_proxy(request.owner_instance_id);
plugin_proxy // TODO: Handle mutual recursion here and for latency
.run_on_main_thread( // changes
[&, host = plugin_proxy.host_, run_on_main_thread(
params = plugin_proxy.host_extensions_.params]() { plugin_proxy,
params->rescan(host, request.flags); [&, host = plugin_proxy.host_,
}) params = plugin_proxy.host_extensions_.params]() {
params->rescan(host, request.flags);
})
.wait(); .wait();
return Ack{}; return Ack{};
@@ -265,13 +266,13 @@ ClapPluginBridge::ClapPluginBridge(const ghc::filesystem::path& plugin_path)
const auto& [plugin_proxy, _] = const auto& [plugin_proxy, _] =
get_proxy(request.owner_instance_id); get_proxy(request.owner_instance_id);
plugin_proxy run_on_main_thread(
.run_on_main_thread( plugin_proxy,
[&, host = plugin_proxy.host_, [&, host = plugin_proxy.host_,
params = plugin_proxy.host_extensions_.params]() { params = plugin_proxy.host_extensions_.params]() {
params->clear(host, request.param_id, params->clear(host, request.param_id,
request.flags); request.flags);
}) })
.wait(); .wait();
return Ack{}; return Ack{};
@@ -281,12 +282,12 @@ ClapPluginBridge::ClapPluginBridge(const ghc::filesystem::path& plugin_path)
const auto& [plugin_proxy, _] = const auto& [plugin_proxy, _] =
get_proxy(request.owner_instance_id); get_proxy(request.owner_instance_id);
plugin_proxy run_on_main_thread(
.run_on_main_thread( plugin_proxy,
[&, host = plugin_proxy.host_, [&, host = plugin_proxy.host_,
state = plugin_proxy.host_extensions_.state]() { state = plugin_proxy.host_extensions_.state]() {
state->mark_dirty(host); state->mark_dirty(host);
}) })
.wait(); .wait();
return Ack{}; return Ack{};
@@ -296,13 +297,13 @@ ClapPluginBridge::ClapPluginBridge(const ghc::filesystem::path& plugin_path)
const auto& [plugin_proxy, _] = const auto& [plugin_proxy, _] =
get_proxy(request.owner_instance_id); get_proxy(request.owner_instance_id);
plugin_proxy run_on_main_thread(
.run_on_main_thread( plugin_proxy,
[&, host = plugin_proxy.host_, [&, host = plugin_proxy.host_,
voice_info = voice_info =
plugin_proxy.host_extensions_.voice_info]() { plugin_proxy.host_extensions_.voice_info]() {
voice_info->changed(host); voice_info->changed(host);
}) })
.wait(); .wait();
return Ack{}; return Ack{};
+81 -46
View File
@@ -142,45 +142,81 @@ class ClapPluginBridge : PluginBridge<ClapSockets<std::jthread>> {
} }
// TODO: Do we need this for CLAP? If we do, update the docstring // TODO: Do we need this for CLAP? If we do, update the docstring
// /** /**
// * Send a message, and allow other threads to call functions on _this * Send a message meant to be executed on the main thread, and allow other
// * thread_ while we're waiting for a response. This lets us execute * threads to call functions on _this thread_ while we're waiting for a
// * functions from the host's GUI thread while it is also calling * response. This lets us execute functions from the host's main thread
// functions * while it is also calling functions from that same thread. Because of
// * from that same thread. Because of that, we also know that while this * that, we also know that while this function is being called the host
// * function is being called the host won't be able to handle any * won't be able to handle any `clap_host::request_callback()` requests. We
// `IRunLoop` * need this for a couple situations, like a plugin calling
// * events. We need this to support REAPER, because REAPER requires * `clap_host_*::rescan()` during state loading.
// function *
// * calls involving the GUI to be run from the GUI thread. Grep for * We use the same trick in `ClapBridge`.
// * `run_gui_task` for instances of this. */
// * template <typename T>
// * We use the same trick in `ClapBridge`. typename T::Response send_mutually_recursive_main_thread_message(
// */ const T& object) {
// template <typename T> return mutual_recursion_.fork(
// typename T::Response send_mutually_recursive_message(const T& object) { [&]() { return send_main_thread_message(object); });
// return mutual_recursion_.fork([&]() { return send_message(object); }
// });
// }
// /** /**
// * If `send_mutually_recursive_message()` is currently being called, then * Run a callback on the host's GUI thread.
// * run `fn` on the thread that's currently calling that function and *
// return * If `send_mutually_recursive_main_thread_message()` is currently being
// * the result of the call. If there's currently no mutually recursive * called, then run `fn` on the thread that's currently calling that
// * function call going on, this will return an `std::nullopt`, and the * function and return the result of the call.
// * caller should call `fn` itself. *
// * * Otherwise, use `clap_plugin_proxy::run_on_main_thread()` to use CLAP's
// * @return The result of calling `fn`, if `fn` was called. * `clap_plugin::request_callback()` mechanic.
// * *
// * @see ClapPlugViewProxyImpl::run_gui_task * @return The result of calling `fn`
// */ *
// template <std::invocable F> * @see clap_plugin_proxy::run_on_main_thread
// std::optional<std::invoke_result_t<F>> */
// maybe_run_on_mutual_recursion_thread( template <std::invocable F>
// F&& fn) { std::future<std::invoke_result_t<F>> run_on_main_thread(
// return mutual_recursion_.maybe_handle(std::forward<F>(fn)); clap_plugin_proxy& plugin,
// } F&& fn) {
using Result = std::invoke_result_t<F>;
// If `ClapBridge::send_mutually_recursive_main_thread_message()` is
// currently being called, then we'll call `fn` from that same thread.
// Otherwise we'll schedule the task to be run using the host's main
// thread using `clap_host::request_callback()`. This is needed because
// `request_callback()` won't do anything if that thread is currently
// blocked.
// Modifying the `mutual_recursion_` methods to handle `void` correctly
// would lead to a lot more template soup, so we'll just work around it
// here.
// TODO: At some point, do improve the API so it can handle void without
// workaorunds
if constexpr (std::is_void_v<Result>) {
if (const auto result =
mutual_recursion_.maybe_handle([f = std::forward<F>(fn)]() {
f();
return Ack{};
})) {
// Apparently there's no way to just create a ready future
std::promise<void> result_promise;
result_promise.set_value();
return result_promise.get_future();
}
} else {
if (const auto result =
mutual_recursion_.maybe_handle(std::forward<F>(fn))) {
std::promise<Result> result_promise;
result_promise.set_value(std::move(*result));
return result_promise.get_future();
}
}
return plugin.run_on_main_thread(std::forward<F>(fn));
}
/** /**
* The logging facility used for this instance of yabridge. Wraps around * The logging facility used for this instance of yabridge. Wraps around
@@ -230,12 +266,11 @@ class ClapPluginBridge : PluginBridge<ClapSockets<std::jthread>> {
*/ */
std::shared_mutex plugin_proxies_mutex_; std::shared_mutex plugin_proxies_mutex_;
// TODO: Do we need this in CLAP? /**
// /** * Used in `ClapBridge::send_mutually_recursive_message()` to be able to
// * Used in `ClapBridge::send_mutually_recursive_message()` to be able to * execute functions from that same calling thread while we're waiting for a
// * execute functions from that same calling thread while we're waiting * response. See the uses for `send_mutually_recursive_message()` for use
// for a * cases where this is needed.
// * response. This is used in `ClapPlugViewProxyImpl::run_loop_tasks()`. */
// */ MutualRecursionHelper<std::jthread> mutual_recursion_;
// MutualRecursionHelper<std::jthread> mutual_recursion_;
}; };
@@ -143,7 +143,7 @@ class Vst3PlugViewProxyImpl : public Vst3PlugViewProxy {
// If `Vst3Bridge::send_mutually_recursive_message()` is currently being // If `Vst3Bridge::send_mutually_recursive_message()` is currently being
// called (because the host is calling one of `IPlugView`'s methods from // called (because the host is calling one of `IPlugView`'s methods from
// its UGI thread), then we'll call `fn` from that same thread. // its GUI thread), then we'll call `fn` from that same thread.
// Otherwise we'll schedule the task to be run from an event handler // Otherwise we'll schedule the task to be run from an event handler
// registered to the host's run loop, if that exists. Finally if the // registered to the host's run loop, if that exists. Finally if the
// host does not support `IRunLoop`, then we'll just run `fn` directly. // host does not support `IRunLoop`, then we'll just run `fn` directly.