mirror of
https://github.com/mikeoliphant/neural-amp-modeler-lv2.git
synced 2026-05-07 04:00:09 +02:00
Compare commits
18 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 5a5865a8a4 | |||
| 5880267d49 | |||
| 6cd11a9e57 | |||
| 513a537d43 | |||
| 1193da70ca | |||
| 2f81ad2b81 | |||
| eeaeeecf24 | |||
| 0fd82dc816 | |||
| d998b95e45 | |||
| 42d9d8b4c3 | |||
| b5b934d4e7 | |||
| c3bcac7085 | |||
| 94d86f5bc6 | |||
| 4b5f7d9051 | |||
| 91259b8eb6 | |||
| 4c8c341fdd | |||
| ccfa2e3882 | |||
| 2fdabf74ce |
+1
-1
@@ -12,7 +12,7 @@ set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
if (CMAKE_SYSTEM_NAME STREQUAL "Darwin")
|
||||
include_directories(SYSTEM /usr/local/include)
|
||||
elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
link_libraries(stdc++fs)
|
||||
link_libraries( "$<$<AND:$<CXX_COMPILER_ID:GNU>,$<VERSION_LESS:$<CXX_COMPILER_VERSION>,9.0>>:-lstdc++fs>" )
|
||||
elseif (CMAKE_SYSTEM_NAME STREQUAL "Windows")
|
||||
add_compile_definitions(NOMINMAX WIN32_LEAN_AND_MEAN)
|
||||
else()
|
||||
|
||||
@@ -1,27 +1,28 @@
|
||||
# neural-amp-modeler-lv2
|
||||
|
||||
LV2 plugin for using neural network machine learning amp models.
|
||||
LV2 plugin for neural network machine learning amp model playback using the [NeuralAudio](https://github.com/mikeoliphant/NeuralAudio) engine.
|
||||
|
||||
**There is no custom plugin user interface**. Setting the model to use requires that your LV2 host supports atom:Path parameters. Reaper does as of v6.82. Carla and Ardour do. If your favorite LV2 host does not support atom:Path, let them know you want it.
|
||||
If you are looking for a GUI version, @brummer10 [has one here](https://github.com/brummer10/neural-amp-modeler-ui) that works for Linux and Windows. You may also be interested in the the version shipped with the [MOD Desktop App](https://github.com/moddevices/mod-desktop-app), or my digital pedalboard app [Stompbox](https://github.com/mikeoliphant/StompboxUI).
|
||||
If you are looking for a GUI version, @brummer10 [has one here](https://github.com/brummer10/neural-amp-modeler-ui) that works for Linux and Windows. You may also be interested in the the version shipped with the [MOD Desktop App](https://github.com/moddevices/mod-desktop-app), or my digital pedalboard app [Stompbox](https://github.com/mikeoliphant/Stompbox).
|
||||
|
||||
To get the intended behavior, **you must run your audio host at the same sample rate the model was trained at** (usually 48kHz) - no resampling is done by the plugin.
|
||||
|
||||
For amp-only models (the most typical), **you will need to run an impulse reponse after this plugin** to model the cabinet.
|
||||
|
||||
## Models and Performance
|
||||
## Models Supported
|
||||
|
||||
The plugin supports both [Neural Amp Modeler (NAM)](https://github.com/sdatkinson/neural-amp-modeler) models and [RTNeural keras json models](https://github.com/jatinchowdhury18/RTNeural) (like those used by [Aida-X](https://github.com/AidaDSP/AIDA-X)).
|
||||
|
||||
The best source of models is [ToneHunt](https://tonehunt.org/).
|
||||
The best source of models is [Tone3000](https://www.tone3000.com/).
|
||||
|
||||
For more information on model type support, see the [NeuralAudio](https://github.com/mikeoliphant/NeuralAudio) repository, which is where the model handling code lives.
|
||||
|
||||
## Performance
|
||||
|
||||
NAM WaveNet models are generally quite expensive to run. This isn't (much of) an issue on modern PCs, but you may have trouble running on less powerful hardware.
|
||||
|
||||
A Raspberry Pi 4 running a 64bit OS can run "standard" NAM models with plenty of room to spare for a cabinet IR and some effects. It is also capable of running two "standard" NAM models, but with less headroom for other effects.
|
||||
If you are having trouble running a "standard" model, try looking for "feather", or even "nano" (the least expensive) models. You can find a list of ["feather"-tagged models on Tone3000](https://www.tone3000.com/search?sizes=feather). Note that tagging models is up to the submitter, so not all "feather" models are tagged as such - you should be able to find more if you dig around.
|
||||
|
||||
If you are having trouble running a "standard" model, try looking for "feather", or even "nano" (the least expensive) models. You can find a list of ["feather"-tagged models on ToneHunt](https://tonehunt.org/models?tags%5B0%5D=feather-mdl). Note that tagging models is up to the submitter, so not all "feather" models are tagged as such - you should be able to find more if you dig around.
|
||||
|
||||
For more information on model type support, see the [NeuralAudio](https://github.com/mikeoliphant/NeuralAudio) repository, which is where the model handling code lives.
|
||||
|
||||
## Input Calibration
|
||||
|
||||
@@ -57,4 +58,6 @@ After building, the plugin will be in **build/neural_amp_modeler.lv2**.
|
||||
|
||||
```-DUSE_NATIVE_ARCH=ON```: If you have a relatively modern x64 processor, you can pass ```-DUSE_NATIVE_ARCH=ON``` on your cmake command line to enable certain processor-specific optimizations.
|
||||
|
||||
```-DSMART_BYPASS_ENABLED=ON```: If enabled, this will bypass model processing if input has been silent (below -100 dB by default) for a sufficient number of samples (determined by the model's receptive field size).
|
||||
|
||||
Also see the [NeuralAudio CMake options](https://github.com/mikeoliphant/NeuralAudio#cmake-options) - adding these to your neural-amp-modeler-lv2 cmake will pass them to the NeuralAudio build.
|
||||
|
||||
Vendored
+1
-1
Submodule deps/NeuralAudio updated: 4c9d20ee1c...842f675179
@@ -54,6 +54,12 @@ if (DISABLE_DENORMALS)
|
||||
add_definitions(-DDISABLE_DENORMALS)
|
||||
endif (DISABLE_DENORMALS)
|
||||
|
||||
option(SMART_BYPASS_ENABLED "Enable auto-bypass on silence" OFF)
|
||||
|
||||
if (SMART_BYPASS_ENABLED)
|
||||
add_definitions(-DSMART_BYPASS_ENABLED)
|
||||
endif (SMART_BYPASS_ENABLED)
|
||||
|
||||
set_target_properties(neural_amp_modeler
|
||||
PROPERTIES
|
||||
CXX_VISIBILITY_PRESET hidden
|
||||
|
||||
@@ -7,12 +7,18 @@
|
||||
|
||||
#define SMOOTH_EPSILON .0001f
|
||||
|
||||
#ifndef BYPASS_DB_THRESHOLD
|
||||
#define BYPASS_DB_THRESHOLD -100
|
||||
#endif
|
||||
|
||||
namespace NAM {
|
||||
Plugin::Plugin()
|
||||
{
|
||||
// prevent allocations on the audio thread
|
||||
currentModelPath.reserve(MAX_FILE_NAME + 1);
|
||||
|
||||
bypassThresholdLinear = powf(10, BYPASS_DB_THRESHOLD * 0.05f);
|
||||
|
||||
// NeuralAudio::NeuralModel::SetLSTMLoadMode(
|
||||
//#ifdef LSTM_PREFER_NAM
|
||||
// NeuralAudio::PreferNAMCore
|
||||
@@ -181,6 +187,18 @@ namespace NAM {
|
||||
nam->currentModelPath = msg->path;
|
||||
assert(nam->currentModelPath.capacity() >= MAX_FILE_NAME + 1);
|
||||
|
||||
if (nam->currentModel != nullptr)
|
||||
{
|
||||
int receptiveFieldSize = nam->currentModel->GetReceptiveFieldSize();
|
||||
|
||||
if (receptiveFieldSize > -1)
|
||||
{
|
||||
// A newly loaded model is prewarmed to have a silent sample history
|
||||
nam->silentSamples = receptiveFieldSize;
|
||||
nam->smartBypassed = true;
|
||||
}
|
||||
}
|
||||
|
||||
// send reply
|
||||
nam->schedule->schedule_work(nam->schedule->handle, sizeof(reply), &reply);
|
||||
|
||||
@@ -241,6 +259,44 @@ namespace NAM {
|
||||
if (currentModel != nullptr)
|
||||
{
|
||||
modelInputAdjustmentDB = currentModel->GetRecommendedInputDBAdjustment();
|
||||
|
||||
#ifdef SMART_BYPASS_ENABLED
|
||||
int receptiveFieldSamples = currentModel->GetReceptiveFieldSize();
|
||||
|
||||
if (receptiveFieldSamples > -1)
|
||||
{
|
||||
for (unsigned int i = 0; i < n_samples; i++)
|
||||
{
|
||||
if (abs(ports.audio_in[i]) <= bypassThresholdLinear)
|
||||
{
|
||||
silentSamples++;
|
||||
}
|
||||
else
|
||||
{
|
||||
silentSamples = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (silentSamples >= (uint32_t)receptiveFieldSamples)
|
||||
{
|
||||
silentSamples = (uint32_t)receptiveFieldSamples; // Prevent silentSamples growing and eventually overflowing uint32
|
||||
|
||||
if (smartBypassed)
|
||||
{
|
||||
for (unsigned int i = 0; i < n_samples; i++)
|
||||
{
|
||||
ports.audio_out[i] = ports.audio_in[i];
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
smartBypassed = true; // If we aren't already, we'll be bypassed on the next process call
|
||||
}
|
||||
else
|
||||
smartBypassed = false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// convert input level from db
|
||||
|
||||
@@ -80,6 +80,7 @@ namespace NAM {
|
||||
|
||||
bool initialize(double rate, const LV2_Feature* const* features) noexcept;
|
||||
void set_max_buffer_size(int size) noexcept;
|
||||
void activate() noexcept;
|
||||
void process(uint32_t n_samples) noexcept;
|
||||
|
||||
void write_current_path();
|
||||
@@ -120,5 +121,8 @@ namespace NAM {
|
||||
float inputLevel = 0;
|
||||
float outputLevel = 0;
|
||||
int32_t maxBufferSize = 512;
|
||||
float bypassThresholdLinear = 0;
|
||||
uint32_t silentSamples = 0;
|
||||
bool smartBypassed = true;
|
||||
};
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user