44 Commits

Author SHA1 Message Date
Mike Oliphant 42b2173e57 Bump version -> 0.1.6 2024-12-02 10:37:27 -08:00
Mike Oliphant 695f129e7f Update README.md 2024-12-01 09:13:01 -08:00
Mike Oliphant 952fc0edaf Update NeuralAudio 2024-12-01 09:05:17 -08:00
Mike Oliphant 76d29579b0 Update NeuralAudio 2024-12-01 07:46:02 -08:00
Mike Oliphant 00fde9f172 Update NeuralAudio 2024-11-30 12:42:18 -08:00
Mike Oliphant 14a05cbd27 Update NeuralAudio 2024-11-30 12:38:13 -08:00
Mike Oliphant 2a6aa910fc Fix NeuralAudio 2024-11-30 12:34:07 -08:00
Mike Oliphant 24fd1504ae fix NeuralAudio 2024-11-30 12:33:07 -08:00
Mike Oliphant af73122323 Update NeuralAudio 2024-11-30 12:16:55 -08:00
Mike Oliphant 2c4fdeafbd Update NeuralAudio 2024-11-30 12:09:36 -08:00
Mike Oliphant 05eb9c3669 Update release.yml 2024-11-27 12:55:56 -08:00
Mike Oliphant f15a88e392 Update release.yml 2024-11-27 12:51:06 -08:00
Mike Oliphant 1732b0e46a Update release.yml 2024-11-27 12:43:26 -08:00
Mike Oliphant d17b169e5c Update release.yml 2024-11-27 12:37:35 -08:00
Mike Oliphant bd4b2e997f Update release.yml 2024-11-27 12:33:12 -08:00
Mike Oliphant b7dd908297 Update release.yml 2024-11-27 12:28:44 -08:00
Mike Oliphant 8668125890 Update release.yml 2024-11-27 12:26:06 -08:00
Mike Oliphant 873e17f5b0 Update release.yml 2024-11-27 12:23:03 -08:00
Mike Oliphant d326635556 Update release.yml 2024-11-27 12:20:00 -08:00
Mike Oliphant fe662d4273 Update release.yml 2024-11-27 12:18:21 -08:00
Mike Oliphant 368e47e12e Update NeuralAudio (better RTNeural NAM performance) 2024-11-27 09:17:49 -08:00
Mike Oliphant 429ce7ac4b Update NeuralAudio (add NAM model input level calibration) 2024-11-25 09:42:43 -08:00
Mike Oliphant 979b8f4cf1 Default WAVENET_PREFER_NAM to ON 2024-11-22 07:58:28 -08:00
Mike Oliphant f14faca788 Update NeuralAudio (prefer NAM Core for NAM models) 2024-11-20 12:12:27 -08:00
Mike Oliphant 7fd72e3fe1 Revert last update 2024-11-19 12:24:14 -08:00
Mike Oliphant fcd3d6b93c Update NeuralAudio 2024-11-19 09:48:05 -08:00
Mike Oliphant 48b6765d72 Merge branch 'main' of https://github.com/mikeoliphant/neural-amp-modeler-lv2 2024-11-18 08:58:01 -08:00
Mike Oliphant e5984f6e0c Update NeuralAudio 2024-11-18 08:57:59 -08:00
Mike Oliphant 54516bd618 Update README.md 2024-11-17 07:21:05 -08:00
Mike Oliphant 4b10ae103a Update NeuralAudio 2024-11-16 14:06:55 -08:00
Mike Oliphant e9f6f01d8e Update NeuralAudio 2024-11-16 12:00:50 -08:00
Mike Oliphant e2caafbb2d Merge branch 'main' of https://github.com/mikeoliphant/neural-amp-modeler-lv2 2024-11-16 08:09:59 -08:00
Mike Oliphant 9d2e7ae205 Fixed propagation of native arch optimizations 2024-11-16 08:09:58 -08:00
Mike Oliphant 84a74d51ff Update README.md 2024-11-15 13:24:48 -08:00
Mike Oliphant b7595d2750 Add "aidax" file type to ttl 2024-11-15 09:56:38 -08:00
Mike Oliphant ac4f7d1a50 Update NeuralAudio 2024-11-15 07:44:56 -08:00
Mike Oliphant eeaa4649a4 Update NeuralAudio (add ".aidax" extension") 2024-11-15 07:25:59 -08:00
Mike Oliphant 3f5872cc78 CMake options for forcing use of NAM Core for LSTM or WaveNet 2024-11-14 07:47:52 -08:00
Mike Oliphant 5dd03764c5 Merge branch 'main' of https://github.com/mikeoliphant/neural-amp-modeler-lv2 2024-11-14 07:42:25 -08:00
Mike Oliphant 43870cc0da Update NeuralAudio - use RTNeural for WaveNet 2024-11-14 07:42:24 -08:00
Mike Oliphant 69bfdb4370 Update README.md 2024-11-04 08:12:04 -08:00
Mike Oliphant 84f5851b5d Update README.md 2024-11-04 08:10:54 -08:00
Mike Oliphant 75fcc78bae Update README.md 2024-11-04 08:10:07 -08:00
Mike Oliphant 5b2c9a154f Update README.md 2024-11-04 08:09:45 -08:00
9 changed files with 196 additions and 60 deletions
+94 -4
View File
@@ -27,7 +27,69 @@ jobs:
draft: true
tag_name: ${{github.ref}}
release_name: Release ${{github.ref}}
build-linux-x64:
name: Build Linux x64
needs: create_release
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3.3.0
with:
submodules: recursive
- name: Build Plugin
working-directory: ${{github.workspace}}/build
env:
CXX: clang++
run: |
cmake .. -DCMAKE_BUILD_TYPE=$BUILD_TYPE
cmake --build . --config $BUILD_TYPE -j4
- name: Add LV2 Archive
working-directory: ${{github.workspace}}/build
run: tar -czf neural_amp_modeler_lv2_linux_x64.tgz neural_amp_modeler.lv2
- name: Upload Plugin Asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs.create_release.outputs.upload_url }}
asset_path: ./build/neural_amp_modeler_lv2_linux_x64.tgz
asset_name: neural_amp_modeler_lv2_linux_x64.tgz
asset_content_type: application/tgz
build-linux-x64v3:
name: Build Linux x64v3
needs: create_release
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3.3.0
with:
submodules: recursive
- name: Build Plugin
working-directory: ${{github.workspace}}/build
env:
CXX: clang++
run: |
cmake .. -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DUSE_NATIVE_ARCH=ON
cmake --build . --config $BUILD_TYPE -j4
- name: Add LV2 Archive
working-directory: ${{github.workspace}}/build
run: tar -czf neural_amp_modeler_lv2_linux_x64v3.tgz neural_amp_modeler.lv2
- name: Upload Plugin Asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs.create_release.outputs.upload_url }}
asset_path: ./build/neural_amp_modeler_lv2_linux_x64v3.tgz
asset_name: neural_amp_modeler_lv2_linux_x64v3.tgz
asset_content_type: application/tgz
build-windows:
name: Build Windows
needs: create_release
@@ -44,7 +106,7 @@ jobs:
cmake --build . --config=release -j4
- name: Add LV2 Archive
run: Compress-Archive -Path ${{github.workspace}}\build\neural_amp_modeler.lv2 -Destination neural_amp_modeler.lv2.zip
run: Compress-Archive -Path ${{github.workspace}}\build\neural_amp_modeler.lv2 -Destination neural_amp_modeler_lv2_win_x64.zip
- name: Upload Plugin Asset
uses: actions/upload-release-asset@v1
@@ -52,6 +114,34 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs.create_release.outputs.upload_url }}
asset_path: ./neural_amp_modeler.lv2.zip
asset_name: neural_amp_modeler.lv2.zip
asset_path: ./neural_amp_modeler_lv2_win_x64.zip
asset_name: neural_amp_modeler_lv2_win_x64.zip
asset_content_type: application/zip
build-windows-x64v3:
name: Build Windows x64v3
needs: create_release
runs-on: windows-latest
steps:
- uses: actions/checkout@v3.3.0
with:
submodules: recursive
- name: Build Plugin
working-directory: ${{github.workspace}}/build
run: |
cmake.exe -G "Visual Studio 17 2022" -A x64 -DUSE_NATIVE_ARCH=ON -T ClangCL ..
cmake --build . --config=release -j4
- name: Add LV2 Archive
run: Compress-Archive -Path ${{github.workspace}}\build\neural_amp_modeler.lv2 -Destination neural_amp_modeler_lv2_win_x64v3.zip
- name: Upload Plugin Asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs.create_release.outputs.upload_url }}
asset_path: ./neural_amp_modeler_lv2_win_x64v3.zip
asset_name: neural_amp_modeler_lv2_win_x64v3.zip
asset_content_type: application/zip
+2 -2
View File
@@ -1,10 +1,10 @@
cmake_minimum_required(VERSION 3.10)
project(NeuralAmpModelerLv2 VERSION 0.1.5)
project(NeuralAmpModelerLv2 VERSION 0.1.6)
set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake")
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD_REQUIRED OFF)
set(CMAKE_CXX_EXTENSIONS OFF)
+15 -5
View File
@@ -1,8 +1,8 @@
# neural-amp-modeler-lv2
Bare-bones implementation of [Neural Amp Modeler](https://github.com/sdatkinson/neural-amp-modeler) (NAM) models in an LV2 plugin.
LV2 plugin for using neural network machine learning amp models.
**There is no user interface**. Setting the model to use requires that your LV2 host supports atom:Path parameters. Reaper does as of v6.82. Carla and Ardour do. If your favorite LV2 host does not support atom:Path, let them know you want it.
**There is no custom plugin user interface**. Setting the model to use requires that your LV2 host supports atom:Path parameters. Reaper does as of v6.82. Carla and Ardour do. If your favorite LV2 host does not support atom:Path, let them know you want it.
If you are looking for a GUI version, @brummer10 [has one here](https://github.com/brummer10/neural-amp-modeler-ui) that works for Linux and Windows. You may also be interested in the the version shipped with the [MOD Desktop App](https://github.com/moddevices/mod-desktop-app), or my digital pedalboard app [Stompbox](https://github.com/mikeoliphant/StompboxUI).
To get the intended behavior, **you must run your audio host at the same sample rate the model was trained at** (usually 48kHz) - no resampling is done by the plugin.
@@ -11,13 +11,21 @@ For amp-only models (the most typical), **you will need to run an impulse repons
### Models and Performance
The plugin supports both [Neural Amp Modeler (NAM)](https://github.com/sdatkinson/neural-amp-modeler) models and [RTNeural keras json models](https://github.com/jatinchowdhury18/RTNeural) (like those used by [Aida-X](https://github.com/AidaDSP/AIDA-X)).
The best source of models is [ToneHunt](https://tonehunt.org/).
NAM models are generally quite expensive to run. This isn't (much of) an issue on modern PCs, but you may have trouble running on less powerful hardware.
NAM WaveNet models are generally quite expensive to run. This isn't (much of) an issue on modern PCs, but you may have trouble running on less powerful hardware.
A Raspberry Pi 4 running a 64bit OS can run "standard" NAM models with a bit of room to spare for a cabinet IR and some lightweight effects.
If you are having trouble running a "standard" model, try looking for "feather" (the least expensive) models. You can find a list of ["feather"-tagged models on ToneHunt](https://tonehunt.org/?tags=feather-mdl). Note that tagging models is up to the submitter, so not all "feather" models are tagged as such - you should be able to find more if you dig around.
If you are having trouble running a "standard" model, try looking for "feather", or even "nano" (the least expensive) models. You can find a list of ["feather"-tagged models on ToneHunt](https://tonehunt.org/models?tags%5B0%5D=feather-mdl). Note that tagging models is up to the submitter, so not all "feather" models are tagged as such - you should be able to find more if you dig around.
For more information on model type support, see the [NeuralAudio](https://github.com/mikeoliphant/NeuralAudio) repository, which is where the model handling code lives.
## Input Calibration
The expected input level to the plugin is 12dBu. For models that include input level information, they will be calibrated against this level. If you know the input level of your audio interface, you should adjust the input level relative to the expected 12dBu to provide the appropriate signal level to the model.
### Building
@@ -47,4 +55,6 @@ After building, the plugin will be in **build/neural_amp_modeler.lv2**.
### Optimization
If you have a relatively modern x64 processor, you can pass "**‑DUSE_NATIVE_ARCH=ON**" on your cmake command line to enable certain processor-specific optimizations.
If you have a relatively modern x64 processor, you can pass ```-DUSE_NATIVE_ARCH=ON``` on your cmake command line to enable certain processor-specific optimizations.
You can also alter the default model loading behavior with ```-DLSTM_PREFER_NAM=ON``` (use NAM Core instead of RTNeural for NAM LSTM models) and ```-DWAVENET_PREFER_NAM=ON``` (use NAM Core instead of RTNeural or NAM WaveNet models).
+1 -1
View File
@@ -16,7 +16,7 @@
<@NAM_LV2_ID@#model>
a lv2:Parameter;
mod:fileTypes "nam,nammodel,json";
mod:fileTypes "nam,nammodel,json,aidax";
rdfs:label "Neural Model";
rdfs:range atom:Path.
+45 -26
View File
@@ -1,8 +1,29 @@
add_subdirectory(../deps/NeuralAudio NeuralAudio)
if (MSVC)
add_compile_options(
"$<$<CONFIG:DEBUG>:/W4>"
"$<$<CONFIG:RELEASE>:/O2>"
)
else()
add_compile_options(
-Wall
# -Wpedantic -Wextra -Wstrict-aliasing -Wunreachable-code -Weffc++ -Wno-unused-parameter
"$<$<CONFIG:DEBUG>:-Og;-ggdb>"
"$<$<CONFIG:RELWITHDEBINFO>:-Ofast>"
"$<$<CONFIG:RELEASE>:-Ofast>"
)
endif()
include_directories(SYSTEM ../deps/NeuralAudio)
include_directories(SYSTEM ../deps/lv2/include)
include_directories(SYSTEM ../deps/denormal)
if (CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64)|(AMD64)|(x86_64)")
option(USE_NATIVE_ARCH "Enable architecture-specific optimizations" OFF)
if (USE_NATIVE_ARCH)
add_compile_options(-march=x86-64-v3)
message("Enabling -march=x86-64-v3")
endif (USE_NATIVE_ARCH)
endif ()
add_subdirectory(../deps/NeuralAudio NeuralAudio)
set(SOURCES nam_lv2.cpp
nam_plugin.cpp
@@ -12,6 +33,10 @@ set(NA_SOURCES ../deps/NeuralAudio/NeuralAudio/NeuralModel.h)
add_library(neural_amp_modeler SHARED ${SOURCES} ${NA_SOURCES})
target_include_directories(neural_amp_modeler PUBLIC ../deps/NeuralAudio)
target_include_directories(neural_amp_modeler PUBLIC ../deps/lv2/include)
target_include_directories(neural_amp_modeler PUBLIC ../deps/denormal)
target_link_libraries(neural_amp_modeler PRIVATE NeuralAudio)
source_group(TREE ${CMAKE_CURRENT_SOURCE_DIR} FILES ${SOURCES})
@@ -23,14 +48,23 @@ if (DISABLE_DENORMALS)
add_definitions(-DDISABLE_DENORMALS)
endif (DISABLE_DENORMALS)
if (CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64)|(AMD64)|(x86_64)")
option(USE_NATIVE_ARCH "Enable architecture-specific optimizations" OFF)
option(LSTM_PREFER_NAM "Always use NAM Core for NAM LSTM models" ON)
if (USE_NATIVE_ARCH)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=x86-64-v3")
message("Enabling -march=x86-64-v3")
endif (USE_NATIVE_ARCH)
endif ()
if (LSTM_PREFER_NAM)
add_definitions(-DLSTM_PREFER_NAM)
message("Using NAM Core for LSTM models")
else()
message("Using RTNeural for LSTM models")
endif (LSTM_PREFER_NAM)
option(WAVENET_PREFER_NAM "Always use NAM Core for NAM WaveNet models" OFF)
if (WAVENET_PREFER_NAM)
add_definitions(-DWAVENET_PREFER_NAM)
message("Using NAM Core for WaveNet models")
else()
message("Using RTNeural for WaveNet models")
endif (WAVENET_PREFER_NAM)
set_target_properties(neural_amp_modeler
PROPERTIES
@@ -44,18 +78,3 @@ set_target_properties(neural_amp_modeler
if (CMAKE_SYSTEM_NAME STREQUAL "Windows")
target_compile_definitions(neural_amp_modeler PRIVATE NOMINMAX WIN32_LEAN_AND_MEAN)
endif()
if (MSVC)
target_compile_options(neural_amp_modeler PRIVATE
"$<$<CONFIG:DEBUG>:/W4>"
"$<$<CONFIG:RELEASE>:/O2>"
)
else()
target_compile_options(neural_amp_modeler PRIVATE
-Wall
# -Wpedantic -Wextra -Wstrict-aliasing -Wunreachable-code -Weffc++ -Wno-unused-parameter
"$<$<CONFIG:DEBUG>:-Og;-ggdb>"
"$<$<CONFIG:RELWITHDEBINFO>:-Ofast>"
"$<$<CONFIG:RELEASE>:-Ofast>"
)
endif()
+2 -2
View File
@@ -14,8 +14,8 @@
#include "nam_plugin.h"
// LV2 Functions
static LV2_Handle instantiate(const LV2_Descriptor*, double rate, const char*, const LV2_Feature* const* features
) {
static LV2_Handle instantiate(const LV2_Descriptor*, double rate, const char*, const LV2_Feature* const* features)
{
try
{
auto nam = std::make_unique<NAM::Plugin>();
+34 -18
View File
@@ -11,7 +11,23 @@ namespace NAM {
Plugin::Plugin()
{
// prevent allocations on the audio thread
currentModelPath.reserve(MAX_FILE_NAME+1);
currentModelPath.reserve(MAX_FILE_NAME + 1);
NeuralAudio::NeuralModel::SetLSTMLoadMode(
#ifdef LSTM_PREFER_NAM
NeuralAudio::PreferNAMCore
#else
NeuralAudio::PreferRTNeural
#endif
);
NeuralAudio::NeuralModel::SetWaveNetLoadMode(
#ifdef WAVENET_PREFER_NAM
NeuralAudio::PreferNAMCore
#else
NeuralAudio::PreferRTNeural
#endif
);
}
Plugin::~Plugin()
@@ -107,31 +123,24 @@ namespace NAM {
lv2_log_trace(&nam->logger, "Staging model change: `%s`\n", msg->path);
model = NeuralAudio::NeuralModel::CreateFromFile(msg->path);
// Pre-run model to ensure all needed buffers are allocated in advance
//if (const int32_t numSamples = nam->maxBufferSize)
//{
// float* buffer = new float[numSamples];
// memset(buffer, 0, numSamples * sizeof(float));
// model->Process(buffer, buffer, numSamples);
// //model->finalize_(numSamples);
// delete[] buffer;
//}
}
response.model = model;
if (model != nullptr)
{
response.model = model;
memcpy(response.path, msg->path, pathlen);
memcpy(response.path, msg->path, pathlen);
}
}
catch (const std::exception&)
{
}
if (model == nullptr)
{
response.path[0] = '\0';
lv2_log_error(&nam->logger, "Unable to load model from: '%s'\n", msg->path);
//result = LV2_WORKER_ERR_UNKNOWN;
}
respond(handle, sizeof(response), &response);
@@ -181,6 +190,13 @@ namespace NAM {
return LV2_WORKER_SUCCESS;
}
void Plugin::set_max_buffer_size(int size) noexcept
{
maxBufferSize = size;
NeuralAudio::NeuralModel::SetDefaultMaxAudioBufferSize(size);
}
void Plugin::process(uint32_t n_samples) noexcept
{
lv2_atom_forge_set_buffer(&atom_forge, (uint8_t*)ports.notify, ports.notify->atom.size);
@@ -317,7 +333,7 @@ namespace NAM {
{
if (options[i].key == nam->uris.bufSize_maxBlockLength && options[i].type == nam->uris.atom_Int)
{
nam->maxBufferSize = *(const int32_t*)options[i].value;
nam->set_max_buffer_size(*(const int32_t*)options[i].value);
break;
}
}
+2 -1
View File
@@ -79,6 +79,7 @@ namespace NAM {
~Plugin();
bool initialize(double rate, const LV2_Feature* const* features) noexcept;
void set_max_buffer_size(int size) noexcept;
void process(uint32_t n_samples) noexcept;
void write_current_path();
@@ -118,6 +119,6 @@ namespace NAM {
float inputLevel = 0;
float outputLevel = 0;
int32_t maxBufferSize = 0;
int32_t maxBufferSize = 512;
};
}