49 Commits

Author SHA1 Message Date
Mike Oliphant 0649b4a822 Bump version -> 0.1.4 2024-08-17 11:27:04 -07:00
Mike Oliphant 61bbce1e2a Add "nam" namespace 2024-08-17 11:23:52 -07:00
Mike Oliphant 1dc54bc5f9 Update NeuralAmpModelerCore 2024-08-17 11:17:46 -07:00
Mike Oliphant 40bae2bd87 c++ version -> 17 2024-08-01 12:41:26 -07:00
Mike Oliphant 1ff6dab119 Update LV2 description 2024-05-03 17:50:54 -07:00
Mike Oliphant baff908f93 Update README.md 2024-04-29 12:42:37 -07:00
Mike Oliphant 72fd4a1e3d Make DC blocker coefficient depend on sample rate 2024-04-09 08:00:35 -07:00
Mike Oliphant 1626e74d0e Update README.md 2024-01-31 07:15:04 -08:00
Mike Oliphant d1794e1bbd Update README.md 2024-01-06 07:52:52 -08:00
Mike Oliphant b8c6333955 Merge branch 'main' of https://github.com/mikeoliphant/neural-amp-modeler-lv2 2024-01-06 07:49:52 -08:00
Mike Oliphant 02256b4349 Default x64 optimizations to OFF 2024-01-06 07:49:33 -08:00
Mike Oliphant f07c40c16f Update README.md 2024-01-01 15:36:33 -08:00
Mike Oliphant 5605b03acc Update README.md 2024-01-01 15:33:50 -08:00
Mike Oliphant 43fb036706 Update submodules 2023-11-12 09:20:40 +08:00
Mike Oliphant b40df0945c Merge branch 'main' of https://github.com/mikeoliphant/neural-amp-modeler-lv2 2023-10-24 13:11:17 -07:00
Mike Oliphant 24f03afd69 Add x86_64 to 64-bit check 2023-10-24 13:11:04 -07:00
Mike Oliphant db3603e134 Update build.yml 2023-10-24 10:14:46 -07:00
Mike Oliphant 97cff04114 Enable architecture-specific optimization by default. Added option to turn it off. 2023-10-24 10:09:16 -07:00
Mike Oliphant 067b3236ea remove deb artifact for now 2023-10-19 13:25:30 -07:00
Mike Oliphant 191ae3786e proper indent 2023-10-19 13:11:05 -07:00
Mike Oliphant 2b5a4c9912 Try to add .deb artifact 2023-10-19 13:10:17 -07:00
Mike Oliphant 23200eb566 Add Linux artifact 2023-10-19 12:57:19 -07:00
Mike Oliphant 3434ba0b7d <sigh> 2023-10-19 12:49:31 -07:00
Mike Oliphant 3e9ffee4e7 Update build.yml 2023-10-19 12:45:32 -07:00
Mike Oliphant 3cff8ca37a Update build.yml 2023-10-19 12:40:40 -07:00
Mike Oliphant fcec78fa0f First try at an artifact 2023-10-19 12:36:58 -07:00
Mike Oliphant 34be69df6d Remove AudioDSPTools dependency. Do simple DC blocker instead of HPF. 2023-10-17 12:25:14 -07:00
Mike Oliphant 34c7628202 Bump version to 0.1.3 2023-10-16 08:17:06 -07:00
Mike Oliphant 8384827981 Removed test value. 2023-10-15 18:12:27 -07:00
Mike Oliphant e3c5b6bd4f Update for normalization->loudness in NAM core. 2023-10-15 18:11:36 -07:00
Mike Oliphant eb46377d71 Update to latest NAM core. Handle model normalization with output gain. 2023-10-15 12:09:52 -07:00
Mike Oliphant b22f02c84e Update NAM core. Switch to new simplified NAM process method. 2023-10-06 11:51:30 -07:00
Mike Oliphant 52810a3f9c Switched to AudioDSPTools for highpass filter implementation 2023-10-02 08:12:33 -07:00
Mike Oliphant 1deb8cb5bc Bump version to 0.1.2 2023-09-13 07:52:52 -07:00
Mike Oliphant c3888eccae Update NAM core 2023-09-13 07:51:22 -07:00
Mike Oliphant 2d126db631 Update NAM Core. 2023-09-06 17:32:00 -07:00
Mike Oliphant bae8b0a627 Update README.md 2023-08-23 07:43:25 -07:00
Mike Oliphant 46a73d83fa Initialize model pre-run buffer to zero. Update to latest devel NAM Core with pre-warm instead of anti-pop. 2023-08-18 13:20:27 -07:00
Mike Oliphant 88c8441f0b Merge pull request #47 from moddevices/optimize-audio-level-loop
Optimize audio level loop code
2023-08-16 12:13:29 -07:00
falkTX 6f1a423b5a Optimize audio level loop code
Signed-off-by: falkTX <falktx@falktx.com>
2023-08-16 18:39:50 +02:00
Mike Oliphant fbf05b4472 Do 5Hz high pass filter to clean up any DC offset created by the model 2023-07-28 09:20:52 -07:00
Mike Oliphant ee0c83a10e Update NeuralAmpModelerCore 2023-07-27 15:32:38 -07:00
Mike Oliphant d4482b3b14 Catch exceptions by reference 2023-07-12 09:50:27 -07:00
Mike Oliphant c990eedf33 Bump version 2023-07-07 07:29:50 -07:00
Mike Oliphant 0255f36ae4 Don't report a worker error if we can't load a model since we're already logging an error. 2023-07-04 16:28:57 -07:00
Mike Oliphant 207fb2281e Fixed some warnings 2023-07-04 14:55:17 -07:00
Mike Oliphant dbf00f0ed3 Formatting 2023-07-04 14:53:39 -07:00
Mike Oliphant 33a0c08327 Set currentModelPath in restore() - even if it is null or empty.
Set it to empty and clear the current model if a model fails to load.
2023-07-04 13:03:46 -07:00
Mike Oliphant cde3df4e84 Update to latest NAM Core 2023-06-23 08:07:43 -07:00
12 changed files with 177 additions and 101 deletions
+19 -1
View File
@@ -24,6 +24,18 @@ jobs:
cmake --build . --config $BUILD_TYPE -j4
cpack
- name: Upload binary
uses: actions/upload-artifact@v1
with:
name: neural_amp_modeler.lv2-linux-amd64
path: ${{github.workspace}}/build/neural_amp_modeler.lv2
# - name: Upload deb
# uses: actions/upload-artifact@v1
# with:
# name: neural_amp_modeler.lv2-linux-deb-amd64
# path: ${{github.workspace}}/build/*.deb
build-windows:
name: Build Windows
runs-on: windows-latest
@@ -35,5 +47,11 @@ jobs:
- name: Build Plugin
working-directory: ${{github.workspace}}/build
run: |
cmake.exe -G "Visual Studio 17 2022" -A x64 ..
cmake.exe -G "Visual Studio 17 2022" -A x64 -T ClangCL ..
cmake --build . --config=release -j4
- name: Upload artifact
uses: actions/upload-artifact@v1
with:
name: neural_amp_modeler.lv2-win
path: ${{github.workspace}}/build/neural_amp_modeler.lv2
+3
View File
@@ -8,3 +8,6 @@
path = deps/NeuralAmpModelerCore
url = https://github.com/mikeoliphant/NeuralAmpModelerCore
branch = devel
[submodule "deps/AudioDSPTools"]
path = deps/AudioDSPTools
url = https://github.com/sdatkinson/AudioDSPTools
+4 -3
View File
@@ -1,10 +1,10 @@
cmake_minimum_required(VERSION 3.10)
project(NeuralAmpModelerLv2 VERSION 0.1.0)
project(NeuralAmpModelerLv2 VERSION 0.1.4)
set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake")
set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED OFF)
set(CMAKE_CXX_EXTENSIONS OFF)
@@ -22,11 +22,12 @@ set(NAM_LV2_ID http://github.com/mikeoliphant/neural-amp-modeler-lv2)
include_directories(SYSTEM deps/eigen)
include_directories(SYSTEM deps/lv2/include)
include_directories(SYSTEM deps/NeuralAmpModelerCore/NAM)
include_directories(SYSTEM deps/NeuralAmpModelerCore)
include_directories(SYSTEM deps/json)
include_directories(SYSTEM deps/denormal)
add_definitions(-DNAM_SAMPLE_FLOAT)
add_definitions(-DDSP_SAMPLE_FLOAT)
add_subdirectory(src)
+5 -1
View File
@@ -2,7 +2,8 @@
Bare-bones implementation of [Neural Amp Modeler](https://github.com/sdatkinson/neural-amp-modeler) (NAM) models in an LV2 plugin.
**There is no user interface**. Setting the model to use requires that your LV2 host supports atom:Path parameters. Reaper does not. Carla and Ardour do. If your favorite LV2 host does not support atom:Path, let them know you want it. **A Reaper feature request for this is [here](https://forum.cockos.com/showthread.php?p=2505988)**.
**There is no user interface**. Setting the model to use requires that your LV2 host supports atom:Path parameters. Reaper does as of v6.82. Carla and Ardour do. If your favorite LV2 host does not support atom:Path, let them know you want it.
If you are looking for a GUI version, @brummer10 [has one here](https://github.com/brummer10/neural-amp-modeler-ui) that works for Linux and Windows. You may also be interested in the the version shipped with the [MOD Desktop App](https://github.com/moddevices/mod-desktop-app), or my digital pedalboard app [Stompbox](https://github.com/mikeoliphant/StompboxUI).
To get the intended behavior, **you must run your audio host at the same sample rate the model was trained at** (usually 48kHz) - no resampling is done by the plugin.
@@ -44,3 +45,6 @@ Note - you'll have to change the Visual Studio version if you are using a differ
After building, the plugin will be in **build/neural_amp_modeler.lv2**.
### Optimization
If you have a relatively modern x64 processor, you can pass "**&#8209;DUSE_NATIVE_ARCH=ON**" on your cmake command line to enable certain processor-specific optimizations.
Vendored
+1 -1
Submodule deps/eigen updated: 7bf2968fed...f78c37f0af
Vendored
+1 -1
Submodule deps/lv2 updated: dbdbe3e518...e9d9432874
+3 -3
View File
@@ -39,11 +39,11 @@
opts:supportedOption <http://lv2plug.in/ns/ext/buf-size#maxBlockLength>;
rdfs:comment """
An LV2 implementation of Neural Amp Modeler.
LV2 plugin for Neural Amp Modeler machine learning guitar amplifier simulation models
NAM accepts Neural Amp Modeler .nam files.
Based on the Neural Amp Modeler Core codebase: https://github.com/sdatkinson/NeuralAmpModelerCore
A large collection of .nam files is available at https://tonehunt.org/
A large collection of .nam models is available at https://tonehunt.org
""";
patch:writable <@NAM_LV2_ID@#model>;
+10 -3
View File
@@ -24,11 +24,18 @@ source_group(NAM ${CMAKE_CURRENT_SOURCE_DIR} FILES ${NAM_SOURCES})
option(DISABLE_DENORMALS "Disable floating point denormals" ON)
if(DISABLE_DENORMALS)
if (DISABLE_DENORMALS)
add_definitions(-DDISABLE_DENORMALS)
endif(DISABLE_DENORMALS)
endif (DISABLE_DENORMALS)
target_compile_features(neural_amp_modeler PUBLIC cxx_std_17)
if (CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64)|(AMD64)|(x86_64)")
option(USE_NATIVE_ARCH "Enable architecture-specific optimizations" OFF)
if (USE_NATIVE_ARCH)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=x86-64-v3")
message("Enabling -march=x86-64-v3")
endif (USE_NATIVE_ARCH)
endif ()
set_target_properties(neural_amp_modeler
PROPERTIES
+4 -3
View File
@@ -27,7 +27,7 @@ static LV2_Handle instantiate(const LV2_Descriptor*, double rate, const char*, c
return nullptr;
}
catch(const std::exception& e)
catch(const std::exception&)
{
return nullptr;
}
@@ -36,7 +36,8 @@ static LV2_Handle instantiate(const LV2_Descriptor*, double rate, const char*, c
static void connect_port(LV2_Handle instance, uint32_t port, void* data)
{
auto nam = static_cast<NAM::Plugin*>(instance);
*(reinterpret_cast<void**>(&nam->ports)+port) = data;
*(reinterpret_cast<void**>(&nam->ports)+port) = data;
}
static void activate(LV2_Handle) {}
@@ -95,7 +96,7 @@ LV2_SYMBOL_EXPORT const LV2_Descriptor* lv2_descriptor(uint32_t index)
{
if (index == 0) {
// Turn on fast tanh approximation
activations::Activation::enable_fast_tanh();
nam::activations::Activation::enable_fast_tanh();
return &descriptor;
}
+118 -77
View File
@@ -1,10 +1,10 @@
#include <algorithm>
#include <cmath>
#include <utility>
#include <cassert>
#include "nam_plugin.h"
#include "activations.h"
#include <cassert>
#include <NAM/activations.h>
#define SMOOTH_EPSILON .0001f
@@ -20,12 +20,15 @@ namespace NAM {
delete currentModel;
}
bool Plugin::initialize(double rate, const LV2_Feature* const* features) noexcept
bool Plugin::initialize(double sampleRate, const LV2_Feature* const* features) noexcept
{
this->sampleRate = sampleRate;
// for fetching initial options, can be null
LV2_Options_Option* options = nullptr;
for (size_t i = 0; features[i]; ++i) {
for (size_t i = 0; features[i]; ++i)
{
if (std::string(features[i]->URI) == std::string(LV2_URID__map))
map = static_cast<LV2_URID_Map*>(features[i]->data);
else if (std::string(features[i]->URI) == std::string(LV2_WORKER__schedule))
@@ -85,11 +88,14 @@ namespace NAM {
auto msg = static_cast<const LV2LoadModelMsg*>(data);
auto nam = static_cast<NAM::Plugin*>(instance);
nam::DSP* model = nullptr;
LV2SwitchModelMsg response = { kWorkTypeSwitch, {}, {} };
LV2_Worker_Status result = LV2_WORKER_SUCCESS;
try
{
// load model from path
const size_t pathlen = strlen(msg->path);
::DSP* model;
if (pathlen == 0 || pathlen >= MAX_FILE_NAME)
{
@@ -101,42 +107,44 @@ namespace NAM {
{
lv2_log_trace(&nam->logger, "Staging model change: `%s`\n", msg->path);
model = get_dsp(msg->path).release();
// Enable model loudness normalization
model->SetNormalize(true);
model = nam::get_dsp(msg->path).release();
// Pre-run model to ensure all needed buffers are allocated in advance
if (const int32_t numSamples = nam->maxBufferSize)
{
float* buffer = new float[numSamples];
memset(buffer, 0, numSamples * sizeof(float));
std::unordered_map<std::string, double> params = {};
model->process(&buffer, &buffer, 1, numSamples, 1.0, 1.0, params);
model->process(buffer, buffer, numSamples);
model->finalize_(numSamples);
delete[] buffer;
}
}
LV2SwitchModelMsg response = { kWorkTypeSwitch, {}, model };
response.model = model;
memcpy(response.path, msg->path, pathlen);
respond(handle, sizeof(response), &response);
return LV2_WORKER_SUCCESS;
}
catch (std::exception& e)
catch (const std::exception&)
{
response.path[0] = '\0';
lv2_log_error(&nam->logger, "Unable to load model from: '%s'\n", msg->path);
//result = LV2_WORKER_ERR_UNKNOWN;
}
break;
respond(handle, sizeof(response), &response);
return result;
}
case kWorkTypeFree:
{
auto msg = static_cast<const LV2FreeModelMsg*>(data);
delete msg->model;
return LV2_WORKER_SUCCESS;
}
@@ -211,57 +219,87 @@ namespace NAM {
}
}
float level;
// convert input level from db
float desiredInputLevel = powf(10, *(ports.input_level) * 0.05f);
if (fabs(desiredInputLevel - inputLevel) > SMOOTH_EPSILON)
{
level = inputLevel;
for (unsigned int i = 0; i < n_samples; i++)
{
// do very basic smoothing
inputLevel = (.99f * inputLevel) + (.01f * desiredInputLevel);
level = (.99f * level) + (.01f * desiredInputLevel);
ports.audio_out[i] = ports.audio_in[i] * inputLevel;
ports.audio_out[i] = ports.audio_in[i] * level;
}
inputLevel = level;
}
else
{
inputLevel = desiredInputLevel;
level = inputLevel = desiredInputLevel;
for (unsigned int i = 0; i < n_samples; i++)
{
ports.audio_out[i] = ports.audio_in[i] * inputLevel;
ports.audio_out[i] = ports.audio_in[i] * level;
}
}
float modelLoudnessAdjustmentDB = 0;
if (currentModel != nullptr)
{
currentModel->process(&ports.audio_out, &ports.audio_out, 1, n_samples, 1.0, 1.0, mNAMParams);
currentModel->process(ports.audio_out, ports.audio_out, n_samples);
currentModel->finalize_(n_samples);
if (currentModel->HasLoudness())
{
// Normalize model to -18dB
modelLoudnessAdjustmentDB = -18 - currentModel->GetLoudness();
}
}
// convert output level from db
float desiredOutputLevel = powf(10, *(ports.output_level) * 0.05f);
// Convert output level from db
float desiredOutputLevel = powf(10, (*(ports.output_level) + modelLoudnessAdjustmentDB) * 0.05f);
if (fabs(desiredOutputLevel - outputLevel) > SMOOTH_EPSILON)
{
level = outputLevel;
for (unsigned int i = 0; i < n_samples; i++)
{
// do very basic smoothing
outputLevel = (.99f * outputLevel) + (.01f * desiredOutputLevel);
level = (.99f * level) + (.01f * desiredOutputLevel);
ports.audio_out[i] *= outputLevel;
ports.audio_out[i] = ports.audio_out[i] * outputLevel;
}
outputLevel = level;
}
else
{
outputLevel = desiredOutputLevel;
level = outputLevel = desiredOutputLevel;
for (unsigned int i = 0; i < n_samples; i++)
{
ports.audio_out[i] *= outputLevel;
ports.audio_out[i] = ports.audio_out[i] * level;
}
}
float dcBlockCoefficient = 1 - (220.0 / sampleRate);
for (unsigned int i = 0; i < n_samples; i++)
{
float dcInput = ports.audio_out[i];
// dc blocker
ports.audio_out[i] = ports.audio_out[i] - prevDCInput + dcBlockCoefficient * prevDCOutput;
prevDCInput = dcInput;
prevDCOutput = ports.audio_out[i];
}
}
uint32_t Plugin::options_get(LV2_Handle, LV2_Options_Option*)
@@ -293,7 +331,8 @@ namespace NAM {
lv2_log_trace(&nam->logger, "Saving state\n");
if (!nam->currentModel) {
if (!nam->currentModel)
{
return LV2_STATE_SUCCESS;
}
@@ -331,8 +370,6 @@ namespace NAM {
LV2_State_Status Plugin::restore(LV2_Handle instance, LV2_State_Retrieve_Function retrieve, LV2_State_Handle handle,
uint32_t flags, const LV2_Feature* const* features)
{
//if (!haveLog) return LV2_STATE_SUCCESS;
auto nam = static_cast<NAM::Plugin*>(instance);
// Get model_Path from state
@@ -343,58 +380,62 @@ namespace NAM {
lv2_log_trace(&nam->logger, "Restoring model '%s'\n", (const char*)value);
if (!value) {
lv2_log_error(&nam->logger, "Missing model_Path\n");
return LV2_STATE_ERR_NO_PROPERTY;
}
if (type != nam->uris.atom_Path) {
lv2_log_error(&nam->logger, "Non-path model_Path\n");
return LV2_STATE_ERR_BAD_TYPE;
}
LV2_State_Map_Path* map_path = (LV2_State_Map_Path*)lv2_features_data(features, LV2_STATE__mapPath);
if (map_path == nullptr)
{
lv2_log_error(&nam->logger, "LV2_STATE__mapPath unsupported by host\n");
return LV2_STATE_ERR_NO_FEATURE;
}
// Map abstract state path to absolute path
char* path = map_path->absolute_path(map_path->handle, (const char *)value);
size_t pathLen = strlen(path);
NAM::LV2LoadModelMsg msg = { NAM::kWorkTypeLoad, {} };
LV2_State_Status result = LV2_STATE_SUCCESS;
if (pathLen < MAX_FILE_NAME)
// Check if a path is set
if (!value || (type != nam->uris.atom_Path))
{
msg.path[0] = '\0';
}
else
{
LV2_State_Map_Path* map_path = (LV2_State_Map_Path*)lv2_features_data(features, LV2_STATE__mapPath);
if (map_path == nullptr)
{
lv2_log_error(&nam->logger, "LV2_STATE__mapPath unsupported by host\n");
return LV2_STATE_ERR_NO_FEATURE;
}
// Map abstract state path to absolute path
char* path = map_path->absolute_path(map_path->handle, (const char *)value);
size_t pathLen = strlen(path);
if (pathLen >= MAX_FILE_NAME)
{
lv2_log_error(&nam->logger, "Model path is too long (max %u chars)\n", MAX_FILE_NAME);
result = LV2_STATE_ERR_UNKNOWN;
}
else
{
memcpy(msg.path, path, pathLen);
}
LV2_State_Free_Path* free_path = (LV2_State_Free_Path*)lv2_features_data(features, LV2_STATE__freePath);
if (free_path != nullptr)
{
free_path->free_path(free_path->handle, path);
}
else
{
#ifndef _WIN32 // Can't free host-allocated memory on plugin side under Windows
free(path);
#endif
}
}
if (result == LV2_STATE_SUCCESS)
{
// Schedule model to be loaded by the provided worker
NAM::LV2LoadModelMsg msg = { NAM::kWorkTypeLoad, {} };
memcpy(msg.path, path, pathLen);
nam->schedule->schedule_work(nam->schedule->handle, sizeof(msg), &msg);
}
else
{
lv2_log_error(&nam->logger, "Model path is too long (max %u chars)\n", MAX_FILE_NAME);
result = LV2_STATE_ERR_UNKNOWN;
}
LV2_State_Free_Path* free_path = (LV2_State_Free_Path*)lv2_features_data(features, LV2_STATE__freePath);
if (free_path != nullptr)
{
free_path->free_path(free_path->handle, path);
}
else
{
#ifndef _WIN32 // Can't free host-allocated memory on plugin side under Windows
free(path);
#endif
nam->currentModelPath = msg.path;
}
return result;
@@ -410,7 +451,7 @@ namespace NAM {
lv2_atom_forge_key(&atom_forge, uris.patch_property);
lv2_atom_forge_urid(&atom_forge, uris.model_Path);
lv2_atom_forge_key(&atom_forge, uris.patch_value);
lv2_atom_forge_path(&atom_forge, currentModelPath.c_str(), currentModelPath.length() + 1);
lv2_atom_forge_path(&atom_forge, currentModelPath.c_str(), (uint32_t)currentModelPath.length() + 1);
lv2_atom_forge_pop(&atom_forge, &frame);
}
+8 -7
View File
@@ -21,7 +21,7 @@
#include <lv2/state/state.h>
#include <lv2/units/units.h>
#include "dsp.h"
#include <NAM/dsp.h>
#define PlUGIN_URI "http://github.com/mikeoliphant/neural-amp-modeler-lv2"
#define MODEL_URI PlUGIN_URI "#model"
@@ -43,12 +43,12 @@ namespace NAM {
struct LV2SwitchModelMsg {
LV2WorkType type;
char path[MAX_FILE_NAME];
::DSP* model;
nam::DSP* model;
};
struct LV2FreeModelMsg {
LV2WorkType type;
::DSP* model;
nam::DSP* model;
};
class Plugin {
@@ -64,14 +64,16 @@ namespace NAM {
Ports ports = {};
double sampleRate;
LV2_URID_Map* map = nullptr;
LV2_Log_Logger logger = {};
LV2_Worker_Schedule* schedule = nullptr;
::DSP* currentModel = nullptr;
nam::DSP* currentModel = nullptr;
std::string currentModelPath;
std::unordered_map<std::string, double> mNAMParams = {};
float prevDCInput = 0;
float prevDCOutput = 0;
Plugin();
~Plugin();
@@ -114,7 +116,6 @@ namespace NAM {
LV2_Atom_Forge atom_forge = {};
LV2_Atom_Forge_Frame sequence_frame;
float m_rate;
float inputLevel = 0;
float outputLevel = 0;
int32_t maxBufferSize = 0;