Merge branch 'params-kr' into ParametersWithTuples

# Conflicts:
#	include/FluidSCWrapper.hpp
nix
Owen Green 7 years ago
commit 64387f868c

@ -30,6 +30,8 @@ MACRO(SUBDIRLIST result curdir)
ENDMACRO()
set(FLUID_PATH ~/fluid_decomposition CACHE PATH "The top level of the fluid_decomposition repo")
set(LOCAL_INCLUDES ${CMAKE_CURRENT_SOURCE_DIR}/include)
get_filename_component(FLUID_ABS_PATH ${FLUID_PATH} ABSOLUTE)
message(${FLUID_ABS_PATH})

@ -1,4 +1,4 @@
#pragma once
#pragma once
#include "SCBufferAdaptor.hpp"
#include <clients/common/FluidBaseClient.hpp>
@ -16,7 +16,7 @@
namespace fluid {
namespace client {
template <typename Client> class FluidSCWrapper;
template <typename Client, typename Params> class FluidSCWrapper;
namespace impl {
@ -25,120 +25,160 @@ template <size_t N, typename T> struct ArgumentGetter;
template <size_t N, typename T> struct ControlGetter;
template <typename T> using msg_iter_method = T (sc_msg_iter::*)(T);
template <size_t N, typename T, msg_iter_method<T> Method> struct GetArgument
{
T operator()(World* w, sc_msg_iter *args)
{
T r = (args->*Method)(0);
return r;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//Iterate over kr/ir inputs via callbacks from params object
struct FloatControlsIter
{
FloatControlsIter(float** vals, size_t N):mValues(vals), mSize(N) {}
float next()
{
assert(mCount < mSize);
return *mValues[mCount++];
assert(mCount < mSize && "Boundary error fail horror");
float f = *mValues[mCount++];
return f;
}
// float operator[](size_t i)
// {
// assert(i < mSize);
// return *mValues[i];
// }
void reset(float** vals)
{
mValues = vals;
mCount = 0;
}
private:
float** mValues;
size_t mSize;
size_t mCount{0};
};
//General case
template <size_t N, typename T> struct GetControl
{
T operator()(World*, FloatControlsIter& controls) { return controls.next(); }
};
template <size_t N> struct ArgumentGetter<N, FloatT> : public GetArgument<N, float, &sc_msg_iter::getf>
{};
template <size_t N> struct ArgumentGetter<N, LongT> : public GetArgument<N, int32, &sc_msg_iter::geti>
{};
template <size_t N> struct ArgumentGetter<N, EnumT> : public GetArgument<N, int32, &sc_msg_iter::geti>
template <size_t N, typename T> struct ControlGetter : public GetControl<N, typename T::type>
{};
template <size_t N> struct ArgumentGetter<N, BufferT>
//Specializations
template <size_t N> struct ControlGetter<N, BufferT>
{
auto operator()(World* w, sc_msg_iter *args)
auto operator() (World* w, FloatControlsIter& iter)
{
typename LongT::type bufnum = iter.next();
return std::unique_ptr<BufferAdaptor>(bufnum >= 0 ? new SCBufferAdaptor(bufnum,w): nullptr);
}
};
long bufnum = args->geti(-1);
template<size_t N>
struct ControlGetter<N,FloatPairsArrayT>
{
typename FloatPairsArrayT::type operator()(World*, FloatControlsIter& iter)
{
return {{iter.next(),iter.next()},{iter.next(),iter.next()}};
}
};
return std::unique_ptr<BufferAdaptor>(new SCBufferAdaptor(bufnum,w));
template<size_t N>
struct ControlGetter<N,FFTParamsT>
{
typename FFTParamsT::type operator()(World*, FloatControlsIter& iter)
{
return {static_cast<long>(iter.next()),static_cast<long>(iter.next()),static_cast<long>(iter.next())};
}
};
template <size_t N> struct ArgumentGetter<N, FloatPairsArrayT>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Iterate over arguments in sc_msg_iter, via callbacks from params object
template <size_t N, typename T, msg_iter_method<T> Method> struct GetArgument
{
typename FloatPairsArrayT::type operator()(World* w, sc_msg_iter *args)
T operator()(World* w, sc_msg_iter *args)
{
return {{args->getf(),args->getf()},{args->getf(),args->getf()}};
T r = (args->*Method)(T{0});
return r;
}
};
//General cases
template <size_t N> struct ArgumentGetter<N, FloatT> : public GetArgument<N, float, &sc_msg_iter::getf>
{};
template <size_t N> struct ArgumentGetter<N, LongT> : public GetArgument<N, int32, &sc_msg_iter::geti>
{};
template <size_t N, typename T> struct ControlGetter : public GetControl<N, typename T::type>
template <size_t N> struct ArgumentGetter<N, EnumT> : public GetArgument<N, int32, &sc_msg_iter::geti>
{};
//Specializations
template <size_t N> struct ArgumentGetter<N, BufferT>
{
auto operator() (World* w, sc_msg_iter *args)
{
typename LongT::type bufnum = args->geti(-1);
return std::unique_ptr<BufferAdaptor>(bufnum >= 0 ? new SCBufferAdaptor(bufnum,w) : nullptr);
}
};
template <size_t N> struct ControlGetter<N, BufferT>
template <size_t N> struct ArgumentGetter<N, FloatPairsArrayT>
{
auto operator()(World* w, FloatControlsIter& iter)
typename FloatPairsArrayT::type operator()(World* w, sc_msg_iter *args)
{
long bufnum = iter.next();
return std::unique_ptr<BufferAdaptor>(new SCBufferAdaptor(bufnum,w));
return {{args->getf(),args->getf()},{args->getf(),args->getf()}};
}
};
template<size_t N>
struct ControlGetter<N,FloatPairsArrayT>
template <size_t N> struct ArgumentGetter<N, FFTParamsT>
{
typename FloatPairsArrayT::type operator()(World*, FloatControlsIter& iter)
typename FFTParamsT::type operator()(World* w, sc_msg_iter *args)
{
return {{iter.next(),iter.next()},{iter.next(),iter.next()}};
return {args->geti(),args->geti(),args->geti()};
}
};
//template <size_t N, typename
template <class Wrapper> class RealTime : public SCUnit
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//Real Time Processor
template <typename Client,class Wrapper, class Params> class RealTime : public SCUnit
{
using HostVector = FluidTensorView<float, 1>;
// using Client = typename Wrapper::ClientType;
public:
static void setup(InterfaceTable *ft, const char *name) { registerUnit<Wrapper>(ft, name); }
RealTime(): mControlsIterator{mInBuf + mSpecialIndex + 1,mNumInputs - mSpecialIndex - 1}
static void setup(InterfaceTable *ft, const char *name)
{
registerUnit<Wrapper>(ft, name);
ft->fDefineUnitCmd(name,"latency",doLatency);
}
static void doLatency(Unit *unit, sc_msg_iter *args)
{
float l[] {static_cast<float>(static_cast<Wrapper*>(unit)->mClient.latency())};
auto ft = Wrapper::getInterfaceTable();
ft->fSendNodeReply(&unit->mParent->mNode,-1,Wrapper::getName(), 1, l);
}
RealTime():
mControlsIterator{mInBuf + mSpecialIndex + 1,mNumInputs - mSpecialIndex - 1},
mParams{*Wrapper::getParamDescriptors()},
mClient{Wrapper::setParams(mParams,mWorld->mVerbosity > 0, mWorld, mControlsIterator)}
{}
//mControlsIterator{nullptr,0} {}
void init()
{
Wrapper *w = static_cast<Wrapper *>(this);
auto &mClient = w->client();
assert(!(mClient.audioChannelsOut() > 0 && mClient.controlChannelsOut() > 0) && "Client can't have both audio and control outputs");
mInputConnections.reserve(mClient.audioChannelsIn());
mOutputConnections.reserve(mClient.audioChannelsOut());
mAudioInputs.reserve(mClient.audioChannelsIn());
mAudioOutputs.reserve(mClient.audioChannelsOut());
mOutputs.reserve(std::max(mClient.audioChannelsOut(),mClient.controlChannelsOut()));
for (int i = 0; i < mClient.audioChannelsIn(); ++i)
{
mInputConnections.emplace_back(isAudioRateIn(i));
@ -148,7 +188,12 @@ public:
for (int i = 0; i < mClient.audioChannelsOut(); ++i)
{
mOutputConnections.emplace_back(true);
mAudioOutputs.emplace_back(nullptr, 0, 0);
mOutputs.emplace_back(nullptr, 0, 0);
}
for (int i = 0; i < mClient.controlChannelsOut(); ++i)
{
mOutputs.emplace_back(nullptr, 0, 0);
}
set_calc_function<RealTime, &RealTime::next>();
@ -157,74 +202,95 @@ public:
void next(int n)
{
Wrapper *w = static_cast<Wrapper *>(this);
auto &client = w->client();
mControlsIterator.reset(mInBuf + client.audioChannelsIn());
w->setParams( mWorld->mVerbosity > 0, mWorld,mControlsIterator); // forward on inputs N + audio inputs as params
mControlsIterator.reset(mInBuf + 1); //mClient.audioChannelsIn());
Wrapper::setParams(mParams,mWorld->mVerbosity > 0, mWorld,mControlsIterator); // forward on inputs N + audio inputs as params
const Unit *unit = this;
for (int i = 0; i < client.audioChannelsIn(); ++i)
for (int i = 0; i < mClient.audioChannelsIn(); ++i)
{
if (mInputConnections[i]) mAudioInputs[i].reset(IN(i), 0, fullBufferSize());
}
for (int i = 0; i < client.audioChannelsOut(); ++i)
for (int i = 0; i < mClient.audioChannelsOut(); ++i)
{
if (mOutputConnections[i]) mOutputs[i].reset(out(i), 0, fullBufferSize());
}
for(int i = 0; i < mClient.controlChannelsOut();++i)
{
if (mOutputConnections[i]) mAudioOutputs[i].reset(out(i), 0, fullBufferSize());
mOutputs[i].reset(out(i),0,1);
}
client.process(mAudioInputs, mAudioOutputs);
mClient.process(mAudioInputs, mOutputs);
}
private:
std::vector<bool> mInputConnections;
std::vector<bool> mOutputConnections;
std::vector<HostVector> mAudioInputs;
std::vector<HostVector> mAudioOutputs;
std::vector<HostVector> mOutputs;
FloatControlsIter mControlsIterator;
protected:
ParameterSet<Params> mParams;
Client mClient;
};
template <class Wrapper> class NonRealTime
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Non Real Time Processor
template <typename Client, typename Wrapper, typename Params> class NonRealTime
{
public:
static void setup(InterfaceTable *ft, const char *name) { DefinePlugInCmd(name, launch, nullptr); }
NonRealTime() {}
NonRealTime(World *world,sc_msg_iter *args):
mParams{*Wrapper::getParamDescriptors()},
mClient{mParams}
{}
void init(){};
static void launch(World *world, void *inUserData, struct sc_msg_iter *args, void *replyAddr)
{
Wrapper *w = new Wrapper(); //this has to be on the heap, because it doesn't get destoryed until the async command is done
w->parseBuffers(w, world, args);
Wrapper *w = new Wrapper(world,args); //this has to be on the heap, because it doesn't get destoryed until the async command is done
int argsPosition = args->count;
auto argsRdPos = args->rdpos;
Result result = validateParameters(w, world, args);
if (!result.ok())
{
std::cout << "FluCoMa Error " << Wrapper::getName() << ": " << result.message().c_str();
delete w;
return;
}
args->count = argsPosition;
w->setParams(false, world, args);
args->rdpos = argsRdPos;
Wrapper::setParams(w->mParams,false, world, args);
size_t msgSize = args->getbsize();
char * completionMsgData = 0;
std::vector<char> completionMessage(msgSize);
// char * completionMsgData = 0;
if (msgSize)
{
completionMsgData = (char *) world->ft->fRTAlloc(world, msgSize);
args->getb(completionMsgData, msgSize);
args->getb(completionMessage.data(), msgSize);
}
world->ft->fDoAsynchronousCommand(world, replyAddr, Wrapper::getName(), w, process, exchangeBuffers, tidyUp, destroy,
msgSize, completionMsgData);
world->ft->fDoAsynchronousCommand(world, replyAddr, Wrapper::getName(), w, process, exchangeBuffers, tidyUp, destroy,msgSize, completionMessage.data());
}
static bool process(World *world, void *data) { return static_cast<Wrapper *>(data)->process(world); }
static bool exchangeBuffers(World *world, void *data) { return static_cast<Wrapper *>(data)->exchangeBuffers(world); }
static bool tidyUp(World *world, void *data) { return static_cast<Wrapper *>(data)->tidyUp(world); }
static void destroy(World *world, void *data) { delete static_cast<Wrapper *>(data); }
static void destroy(World *world, void *data)
{
// void* c = static_cast<Wrapper *>(data)->mCompletionMessage;
// if(c) world->ft->fRTFree(world,c);
delete static_cast<Wrapper *>(data);
}
protected:
ParameterSet<Params> mParams;
Client mClient;
private:
static Result validateParameters(Wrapper *w, World* world, sc_msg_iter *args)
static Result validateParameters(NonRealTime *w, World* world, sc_msg_iter *args)
{
auto &c = w->client();
auto results = c.template checkParameterValues<ArgumentGetter>(world, args);
auto results = w->mParams.template checkParameterValues<ArgumentGetter>(world, args);
for (auto &r : results)
{
std::cout << r.message() << '\n';
@ -233,107 +299,122 @@ private:
return {};
}
void parseBuffers(Wrapper *w, World *world, sc_msg_iter *args)
{
auto &c = w->client();
mBuffersIn.reserve(c.audioBuffersIn());
mInputs.reserve(c.audioBuffersIn());
mBuffersOut.reserve(c.audioBuffersOut());
mOutputs.reserve(c.audioBuffersOut());
for (int i = 0; i < c.audioBuffersIn(); i++)
{
mBuffersIn.emplace_back(args->geti(0), world);
mInputs.emplace_back();
mInputs[i].buffer = &mBuffersIn[i];
mInputs[i].startFrame = args->geti(0);
mInputs[i].nFrames = args->geti(0);
mInputs[i].startChan = args->geti(0);
mInputs[i].nChans = args->geti(0);
}
for (int i = 0; i < c.audioBuffersOut(); i++)
{
mBuffersOut.emplace_back(args->geti(0), world);
mOutputs.emplace_back();
mOutputs[i].buffer = &mBuffersOut[i];
}
}
bool process(World *world)
{
Wrapper *wrapper = static_cast<Wrapper *>(this);
Result r = wrapper->client().process(mInputs, mOutputs);
Result r = mClient.process();///mInputs, mOutputs);
if(!r.ok())
{
std::cout << "FluCoMa Error " << Wrapper::getName() << ": " << r.message().c_str();
return false;
return false;
}
return true;
}
bool exchangeBuffers(World *world)
{
for (auto &b : mBuffersOut) b.assignToRT(world);
mParams.template forEachParamType<BufferT,AssignBuffer>(world);
// for (auto &b : mBuffersOut) b.assignToRT(world);
return true;
}
bool tidyUp(World *world)
{
for (auto &b : mBuffersIn) b.cleanUp();
for (auto &b : mBuffersOut) b.cleanUp();
// for (auto &b : mBuffersIn) b.cleanUp();
// for (auto &b : mBuffersOut) b.cleanUp()
mParams.template forEachParamType<BufferT,CleanUpBuffer>();
return true;
}
std::vector<SCBufferAdaptor> mBuffersIn;
std::vector<SCBufferAdaptor> mBuffersOut;
std::vector<BufferProcessSpec> mInputs;
std::vector<BufferProcessSpec> mOutputs;
void * mReplyAddr;
const char * mName;
template<size_t N,typename T>
struct AssignBuffer
{
void operator()(typename BufferT::type& p, World* w)
{
if(auto b = static_cast<SCBufferAdaptor*>(p.get()))
b->assignToRT(w);
}
};
template<size_t N,typename T>
struct CleanUpBuffer
{
void operator()(typename BufferT::type& p)
{
if(auto b = static_cast<SCBufferAdaptor*>(p.get()))
b->cleanUp();
}
};
// std::vector<SCBufferAdaptor> mBuffersIn;
// std::vector<SCBufferAdaptor> mBuffersOut;
// std::vector<BufferProcessSpec> mInputs;
// std::vector<BufferProcessSpec> mOutputs;
char * mCompletionMessage = nullptr;
void * mReplyAddr = nullptr;
const char * mName = nullptr;
};
template <typename Wrapper> class NonRealTimeAndRealTime : public RealTime<Wrapper>, public NonRealTime<Wrapper>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// An impossible monstrosty
template <typename Client, typename Wrapper, typename Params> class NonRealTimeAndRealTime : public RealTime<Client,Wrapper, Params>, public NonRealTime<Client,Wrapper, Params>
{
static void setup(InterfaceTable *ft, const char *name)
{
RealTime<Wrapper>::setup(ft, name);
NonRealTime<Wrapper>::setup(ft, name);
RealTime<Client,Wrapper,Params >::setup(ft, name);
NonRealTime<Client,Wrapper, Params>::setup(ft, name);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Template Specialisations for NRT/RT
template <typename Wrapper, typename NRT, typename RT> class FluidSCWrapperImpl;
template <typename Client, typename Wrapper, typename Params, typename NRT, typename RT> class FluidSCWrapperImpl;
template <typename Wrapper> class FluidSCWrapperImpl<Wrapper, std::true_type, std::false_type> : public NonRealTime<Wrapper>
{};
template <typename Client, typename Wrapper, typename Params> class FluidSCWrapperImpl<Client, Wrapper, Params, std::true_type, std::false_type> : public NonRealTime<Client, Wrapper, Params>
{
public:
FluidSCWrapperImpl(World* w, sc_msg_iter *args): NonRealTime<Client, Wrapper, Params>(w,args){};
};
template <typename Wrapper> class FluidSCWrapperImpl<Wrapper, std::false_type, std::true_type> : public RealTime<Wrapper>
template <typename Client, typename Wrapper, typename Params> class FluidSCWrapperImpl<Client, Wrapper,Params, std::false_type, std::true_type> : public RealTime<Client, Wrapper, Params>
{};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Make base class(es), full of CRTP mixin goodness
template <typename Client>
using FluidSCWrapperBase = FluidSCWrapperImpl<FluidSCWrapper<Client>, isNonRealTime<Client>, isRealTime<Client>>;
template <typename Client,typename Params>
using FluidSCWrapperBase = FluidSCWrapperImpl<Client, FluidSCWrapper<Client, Params>,Params, isNonRealTime<Client>, isRealTime<Client>>;
} // namespace impl
template <typename Client> class FluidSCWrapper : public impl::FluidSCWrapperBase<Client>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///The main wrapper
template <typename C, typename P> class FluidSCWrapper : public impl::FluidSCWrapperBase<C,P>
{
public:
using ClientType = Client;
using Client = C;
using Params = P;
FluidSCWrapper() //mParams{*getParamDescriptors()}, //impl::FluidSCWrapperBase<Client,Params>()
{ impl::FluidSCWrapperBase<Client,Params>::init(); }
FluidSCWrapper(World* w, sc_msg_iter *args): impl::FluidSCWrapperBase<Client, Params>(w,args)
{ impl::FluidSCWrapperBase<Client, Params>::init(); }
FluidSCWrapper() { impl::FluidSCWrapperBase<Client>::init(); }
static const char *getName(const char *setName = nullptr)
{
static const char *name = nullptr;
return (name = setName ? setName : name);
}
static Params *getParamDescriptors(Params *setParams = nullptr)
{
static Params* descriptors = nullptr;
return (descriptors = setParams ? setParams : descriptors);
}
static InterfaceTable *getInterfaceTable(InterfaceTable *setTable = nullptr)
{
@ -341,32 +422,40 @@ public:
return (ft = setTable ? setTable : ft);
}
static void setup(InterfaceTable *ft, const char *name)
static void setup(Params& p, InterfaceTable *ft, const char *name)
{
getName(name);
getInterfaceTable(ft);
impl::FluidSCWrapperBase<Client>::setup(ft, name);
getParamDescriptors(&p);
impl::FluidSCWrapperBase<Client, Params>::setup(ft, name);
}
auto setParams(bool verbose, World* world, impl::FloatControlsIter& inputs)
template<typename ParameterSet>
static auto& setParams(ParameterSet& p, bool verbose, World* world, impl::FloatControlsIter& inputs)
{
return mClient.template setParameterValues<impl::ControlGetter>(verbose, world, inputs);
p.template setParameterValues<impl::ControlGetter>(verbose, world, inputs);
return p;
}
auto setParams(bool verbose, World* world, sc_msg_iter *args)
template<typename ParameterSet>
static auto& setParams(ParameterSet& p, bool verbose, World* world, sc_msg_iter *args)
{
return mClient.template setParameterValues<impl::ArgumentGetter>(verbose,world, args);
p.template setParameterValues<impl::ArgumentGetter>(verbose,world, args);
return p;
}
Client &client() { return mClient; }
// impl::ParameterSet<Params> mParams;
private:
Client mClient;
// Client &client() { return mClient; }
//
//private:
// Client mClient;
};
template <typename Client> void makeSCWrapper(InterfaceTable *ft, const char *name)
template <template <typename...> class Client,typename...Rest,typename Params>
void makeSCWrapper(const char *name, Params& params, InterfaceTable *ft)
{
FluidSCWrapper<Client>::setup(ft, name);
FluidSCWrapper<Client<ParameterSet<Params>,Rest...>, Params>::setup(params, ft, name);
}
} // namespace client

@ -66,15 +66,18 @@ public:
SCBufferAdaptor& operator=(SCBufferAdaptor&&) = default;
SCBufferAdaptor(long bufnum, World *world, bool rt = false)
SCBufferAdaptor(long bufnum,World *world, bool rt = false)
: NRTBuf(world, bufnum, rt)
, mBufnum(bufnum)
, mWorld(world)
{
}
SCBufferAdaptor() = default;
~SCBufferAdaptor(){ cleanUp(); }
void assignToRT(World *rtWorld)
{
SndBuf *rtBuf = World_GetBuf(rtWorld, mBufnum);
@ -84,7 +87,11 @@ public:
void cleanUp()
{
if (mOldData) boost::alignment::aligned_free(mOldData);
if (mOldData)
{
boost::alignment::aligned_free(mOldData);
mOldData = nullptr;
}
}
// No locks in (vanilla) SC, so no-ops for these
@ -144,10 +151,8 @@ public:
thisThing->samplerate);
}
int bufnum()
{
return mBufnum;
}
int bufnum() { return mBufnum; }
void realTime(bool rt) { mRealTime = rt; }
protected:
bool equal(BufferAdaptor *rhs) const override
@ -157,10 +162,11 @@ protected:
return false;
}
float *mOldData = 0;
bool mRealTime{false};
float *mOldData{0};
long mBufnum;
World *mWorld;
size_t mRank = 1;
size_t mRank{1};
};
class RTBufferView : public client::BufferAdaptor

@ -1,14 +1,11 @@
FluidBufCompose{
*process { arg server, srcBufNumA, startAtA = 0, nFramesA = -1, startChanA = 0, nChansA = -1, srcGainA = 1, dstStartAtA = 0, dstStartChanA = 0, srcBufNumB, startAtB = 0, nFramesB = -1, startChanB = 0, nChansB = -1, srcGainB = 1, dstStartAtB = 0, dstStartChanB = 0, dstBufNum;
*process { arg server, srcBufNum, startAt = 0, nFrames = -1, startChan = 0, nChans = -1, srcGain = 1, dstBufNum, dstStartAt = 0, dstStartChan = 0;
if(srcBufNumA.isNil) {Error("Invalid Buffer").format(thisMethod.name, this.class.name).throw};
if(srcBufNumB.isNil) {Error("Invalid Buffer").format(thisMethod.name, this.class.name).throw};
if(srcBufNum.isNil) {Error("Invalid Buffer").format(thisMethod.name, this.class.name).throw};
if(srcBufNum.isNil) {Error("Invalid Buffer").format(thisMethod.name, this.class.name).throw};
if(dstBufNum.isNil) {Error("Invalid Buffer").format(thisMethod.name, this.class.name).throw};
server = server ? Server.default;
server.sendMsg(\cmd, \BufCompose, srcBufNumA, startAtA, nFramesA, startChanA, nChansA, srcGainA, dstStartAtA, dstStartChanA,
srcBufNumB, startAtB, nFramesB, startChanB, nChansB, srcGainB, dstStartAtB, dstStartChanB,
dstBufNum);
server.sendMsg(\cmd, \BufCompose, srcBufNum, startAt, nFrames, startChan, nChans, srcGain, dstBufNum,dstStartAt, dstStartChan);
}
}

@ -7,6 +7,9 @@ FluidBufHPSS{
harmBufNum = harmBufNum ? -1;
percBufNum = percBufNum ? -1;
server.sendMsg(\cmd, \BufHPSS, srcBufNum, startAt, nFrames, startChan, nChans, harmBufNum, percBufNum, resBufNum, percFiltSize, harmFiltSize, modeFlag, htf1, hta1, htf2, hta2, ptf1, pta1, ptf2, pta2, winSize, hopSize, fftSize);
//For wrapped RT clients, send maximal param values as aliases of the ones that are passed
harmFiltSize.postln;
server.sendMsg(\cmd, \BufHPSS, srcBufNum, startAt, nFrames, startChan, nChans, harmBufNum, percBufNum, resBufNum, harmFiltSize,percFiltSize, modeFlag, htf1, hta1, htf2, hta2, ptf1, pta1, ptf2, pta2, winSize, hopSize, fftSize, fftSize,harmFiltSize, percFiltSize);
}
}

@ -1,5 +1,5 @@
FluidBufNMF {
*process { arg server, srcBufNum, startAt = 0, nFrames = -1, startChan = 0, nChans = -1, dstBufNum, dictBufNum, dictFlag = 0, actBufNum, actFlag = 0, rank = 1, nIter = 100, sortFlag = 0, winSize = 1024, hopSize = 256, fftSize = -1, winType = 0, randSeed = -1;
*process { arg server, srcBufNum, startAt = 0, nFrames = -1, startChan = 0, nChans = -1, dstBufNum, dictBufNum, dictFlag = 0, actBufNum, actFlag = 0, rank = 1, nIter = 100, sortFlag = 0, winSize = 1024, hopSize = 512, fftSize = -1, winType = 0, randSeed = -1;
if(srcBufNum.isNil) { Error("Invalid buffer").format(thisMethod.name, this.class.name).throw};

@ -7,6 +7,9 @@ FluidBufSines{
sineBufNum = sineBufNum ? -1;
resBufNum = resBufNum ? -1;
server.sendMsg(\cmd, \BufSines, srcBufNum, startAt, nFrames, startChan, nChans, sineBufNum, resBufNum, bandwidth, thresh, minTrackLen, magWeight, freqWeight, winSize, hopSize, fftSize);
//NB For wrapped versions of NRT classes, we set the params for maxima to
//whatever has been passed in language-side (e.g maxFFTSize still exists as a parameter for the server plugin, but makes less sense here: it just needs to be set to a legal value)
server.sendMsg(\cmd, \BufSines, srcBufNum, startAt, nFrames, startChan, nChans, sineBufNum, resBufNum, bandwidth, thresh, minTrackLen, magWeight, freqWeight, winSize, hopSize, fftSize, fftSize);
}
}

@ -1,11 +1,11 @@
FluidBufTransientSlice{
*process { arg server, srcBufNum, startAt = 0, nFrames = -1, startChan = 0, nChans = -1, transBufNum, order = 200, blockSize = 2048, padSize = 1024, skew = 0, threshFwd = 3, threshBack = 1.1, winSize = 14, debounce = 25;
*process { arg server, srcBufNum, startAt = 0, nFrames = -1, startChan = 0, nChans = -1, transBufNum, order = 200, blockSize = 2048, padSize = 1024, skew = 0, threshFwd = 3, threshBack = 1.1, winSize = 14, debounce = 25, minSlice = 1000;
if(srcBufNum.isNil) { Error("Invalid buffer").format(thisMethod.name, this.class.name).throw};
if(transBufNum.isNil) { Error("Invalid buffer").format(thisMethod.name, this.class.name).throw};
server = server ? Server.default;
server.sendMsg(\cmd, \BufTransientSlice, srcBufNum, startAt, nFrames, startChan, nChans, transBufNum, order, blockSize, padSize, skew, threshFwd, threshBack, winSize, debounce);
server.sendMsg(\cmd, \BufTransientSlice, srcBufNum, startAt, nFrames, startChan, nChans, transBufNum, order, blockSize, padSize, skew, threshFwd, threshBack, winSize, debounce, minSlice);
}
}

@ -7,6 +7,9 @@ FluidBufTransients {
transBufNum = transBufNum ? -1;
resBufNum = resBufNum ? -1;
("Source" + srcBufNum).postln;
("Trans" + transBufNum).postln;
("Res" + resBufNum).postln;
server.sendMsg(\cmd, \BufTransients, srcBufNum, startAt, nFrames, startChan, nChans, transBufNum, resBufNum, order, blockSize, padSize, skew, threshFwd, threshBack, winSize, debounce);
}
}

@ -1,6 +1,6 @@
FluidHPSS : MultiOutUGen {
*ar { arg in = 0, harmFiltSize=17, percFiltSize = 17, modeFlag=0, htf1 = 0.1, hta1 = 0, htf2 = 0.5, hta2 = 0, ptf1 = 0.1, pta1 = 0, ptf2 = 0.5, pta2 = 0, winSize= 1024, hopSize= 256, fftSize= -1;
^this.multiNew('audio', in.asAudioRateInput(this), percFiltSize, harmFiltSize, modeFlag, htf1, hta1, htf2, hta2, ptf1, pta1, ptf2, pta2, winSize, hopSize, fftSize)
*ar { arg in = 0, harmFiltSize=17, percFiltSize = 17, modeFlag=0, htf1 = 0.1, hta1 = 0, htf2 = 0.5, hta2 = 0, ptf1 = 0.1, pta1 = 0, ptf2 = 0.5, pta2 = 0, winSize= 1024, hopSize= 256, fftSize= -1, maxFFTSize = 16384, maxHSize = 101, maxPSize = 101;
^this.multiNew('audio', in.asAudioRateInput(this), percFiltSize, harmFiltSize, modeFlag, htf1, hta1, htf2, hta2, ptf1, pta1, ptf2, pta2, winSize, hopSize, fftSize, maxFFTSize, maxHSize, maxPSize)
}
init { arg ... theInputs;
inputs = theInputs;

@ -1,7 +1,7 @@
FluidNMFMatch : MultiOutUGen {
*kr { arg in = 0, dictBufNum, rank = 1, nIter = 10, winSize = 1024, hopSize = 256, fftSize = -1;
^this.multiNew('control', in, dictBufNum, rank, nIter, winSize, hopSize, fftSize);
*kr { arg in = 0, dictBufNum, maxRank = 1, nIter = 10, winSize = 1024, hopSize = 512, fftSize = -1, maxFFTSize = 16384;
^this.multiNew('control', in, dictBufNum, maxRank, nIter, winSize, hopSize, fftSize, maxFFTSize);
}
init {arg ...theInputs;

@ -1,6 +1,5 @@
FluidSTFTPass : UGen {
*ar { arg in = 0, windowSize= 1024, hopSize= 256, fftSize= -1, maxWinSize= 16384;
^this.multiNew('audio', in.asAudioRateInput(this),windowSize, hopSize, fftSize, maxWinSize)
*ar { arg in = 0, windowSize= 1024, hopSize= 256, fftSize= -1, maxFFTSize = 16384;
^this.multiNew('audio', in.asAudioRateInput(this),windowSize, hopSize, fftSize, maxFFTSize)
}
}
//

@ -1,6 +1,6 @@
FluidSines : MultiOutUGen {
*ar { arg in = 0, bandwidth = 76, thresh = 0.7, minTrackLen = 15, magWeight = 0.1, freqWeight = 1.0, winSize= 2048, hopSize= 512, fftSize= 8192;
^this.multiNew('audio', in.asAudioRateInput(this), bandwidth, thresh, minTrackLen, magWeight,freqWeight ,winSize, hopSize, fftSize)
*ar { arg in = 0, bandwidth = 76, thresh = 0.7, minTrackLen = 15, magWeight = 0.1, freqWeight = 1.0, winSize= 2048, hopSize= 512, fftSize= 8192, maxFFTSize=16384;
^this.multiNew('audio', in.asAudioRateInput(this), bandwidth, thresh, minTrackLen, magWeight,freqWeight ,winSize, hopSize, fftSize, maxFFTSize)
}
init { arg ... theInputs;
inputs = theInputs;

@ -1,5 +1,5 @@
FluidTransientSlice : UGen {
*ar { arg in = 0, order = 20, blockSize = 256, padSize = 128, skew = 0.0, threshFwd = 3.0, threshBack = 1.1, winSize=14, debounce=25;
^this.multiNew('audio', in.asAudioRateInput(this), order, blockSize, padSize, skew, threshFwd ,threshBack, winSize, debounce)
*ar { arg in = 0, order = 20, blockSize = 256, padSize = 128, skew = 0.0, threshFwd = 3.0, threshBack = 1.1, winSize=14, debounce=25, minSlice = 1000;
^this.multiNew('audio', in.asAudioRateInput(this), order, blockSize, padSize, skew, threshFwd ,threshBack, winSize, debounce, minSlice)
}
}

@ -183,4 +183,4 @@ f.play;
// compare with the original
b.play;
::
::

@ -115,55 +115,54 @@ Discussion::
EXAMPLES::
code::
//load buffers
(
b = Buffer.read(s,File.realpath(FluidBufHPSS.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-AaS-SynthTwoVoices-M.wav");
c = Buffer.new(s);
d = Buffer.new(s);
e = Buffer.new(s);
)
// run with basic parameters
(
Routine{
t = Main.elapsedTime;
FluidBufHPSS.process(s, b.bufnum, harmBufNum: c.bufnum, percBufNum: d.bufnum);
//load buffers
(
b = Buffer.read(s,File.realpath(FluidBufHPSS.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-AaS-SynthTwoVoices-M.wav");
c = Buffer.new(s);
d = Buffer.new(s);
e = Buffer.new(s);
)
// run with basic parameters
(
Routine{
t = Main.elapsedTime;
FluidBufHPSS.process(s, b.bufnum, harmBufNum: c.bufnum, percBufNum: d.bufnum);
s.sync;
(Main.elapsedTime - t).postln;
}.play
)
//query and play the harmonic
c.query;
c.play;
//querry and play the percussive
d.query;
d.play;
//nullsumming tests
{(PlayBuf.ar(1,c.bufnum))+(PlayBuf.ar(1,d.bufnum))+(-1*PlayBuf.ar(1,b.bufnum,doneAction:2))}.play
//more daring parameters, in mode 2
(
Routine{
t = Main.elapsedTime;
FluidBufHPSS.process(s, b.bufnum, harmBufNum: c.bufnum, percBufNum: d.bufnum, resBufNum:e.bufnum, harmFiltSize:31, modeFlag:2, htf1: 0.005, hta1: 7.5, htf2: 0.168, hta2: 7.5, ptf1: 0.004, pta1: 26.5, ptf2: 0.152, pta2: 26.5);
s.sync;
(Main.elapsedTime - t).postln;
}.play
)
//query and play the harmonic
c.query;
c.play;
//querry and play the percussive
d.query;
d.play;
//nullsumming tests
{(PlayBuf.ar(1,c.bufnum))+(PlayBuf.ar(1,d.bufnum))+(-1*PlayBuf.ar(1,b.bufnum,doneAction:2))}.play
//more daring parameters, in mode 2
(
Routine{
t = Main.elapsedTime;
FluidBufHPSS.process(s, b.bufnum, harmBufNum: c.bufnum, percBufNum: d.bufnum, resBufNum:e.bufnum, harmFiltSize:31, modeFlag:2, htf1: 0.005, hta1: 7.5, htf2: 0.168, hta2: 7.5, ptf1: 0.004, pta1: 26.5, ptf2: 0.152, pta2: 26.5);
s.sync;
(Main.elapsedTime - t).postln;
}.play
)
//query and play the harmonic
c.query;
c.play;
//query and play the percussive
d.query;
d.play;
//query and play the residual
e.query;
e.play;
//still nullsumming
{PlayBuf.ar(1,c.bufnum) + PlayBuf.ar(1,d.bufnum) + PlayBuf.ar(1,e.bufnum) - PlayBuf.ar(1,b.bufnum,doneAction:2)}.play;
::
(Main.elapsedTime - t).postln;
}.play
)
//query and play the harmonic
c.query;
c.play;
//query and play the percussive
d.query;
d.play;
//query and play the residual
e.query;
e.play;
//still nullsumming
{PlayBuf.ar(1,c.bufnum) + PlayBuf.ar(1,d.bufnum) + PlayBuf.ar(1,e.bufnum) - PlayBuf.ar(1,b.bufnum,doneAction:2)}.play;
::

@ -31,10 +31,7 @@ The whole process can be related to a channel vocoder where, instead of fixed ba
More information on possible musicianly uses of NMF are availabe in LINK::Guides/FluCoMa:: overview file.
FluidBufNMF is part of the Fluid Decomposition Toolkit of the FluCoMa project. footnote::
This was made possible thanks to the FluCoMa project ( http://www.flucoma.org/ ) funded by the European Research Council ( https://erc.europa.eu/ ) under the European Unions Horizon 2020 research and innovation programme (grant agreement No 725899).
::
This was made possible thanks to the FluCoMa project ( http://www.flucoma.org/ ) funded by the European Research Council ( https://erc.europa.eu/ ) under the European Unions Horizon 2020 research and innovation programme (grant agreement No 725899). ::
CLASSMETHODS::
@ -113,6 +110,58 @@ RETURNS::
EXAMPLES::
STRONG::A didactic example::
CODE::
(
// create buffers
b = Buffer.alloc(s,44100);
c = Buffer.alloc(s, 44100);
d = Buffer.new(s);
e = Buffer.new(s);
f = Buffer.new(s);
g = Buffer.new(s);
)
(
// fill them with 2 clearly segregated sine waves and composite a buffer where they are consecutive
Routine {
b.sine2([500],[1], false, false);
c.sine2([5000],[1],false, false);
s.sync;
FluidBufCompose.process(s,srcBufNumA:b.bufnum, srcBufNumB:c.bufnum,dstStartAtB:44100,dstBufNum:d.bufnum);
s.sync;
d.query;
}.play;
)
// check
d.plot
d.play //////(beware !!!! loud!!!)
(
// separate them in 2 ranks
Routine {
FluidBufNMF.process(s, d.bufnum, dstBufNum:e.bufnum, dictBufNum: f.bufnum, actBufNum:g.bufnum, rank:2);
s.sync;
e.query;
f.query;
g.query;
}.play
)
// look at the resynthesised separated signal
e.plot;
// look at the dictionaries signal for 2 spikes
f.plot;
// look at the activations
g.plot;
//trying running the same process on superimposed sinewaves instead of consecutive in the source and see how it fails.
::
STRONG::Basic musical examples::
code::
// set some buffers and parameters
@ -186,11 +235,11 @@ c.plot;x.plot; y.plot;
)
::
STRONG::Fixed Dictionnaries:: The process can be trained, and the learnt dictionaries or activations can be used as templates.
STRONG::Fixed Dictionnaries:: The process can be trained, and the learnt dictionaries or activations can be used as templates.
CODE::
CODE::
//set some buffers
//set some buffers
(
b = Buffer.read(s,File.realpath(FluidBufNMF.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-AaS-AcousticStrums-M.wav");
c = Buffer.new(s);
@ -199,7 +248,7 @@ e = Buffer.alloc(s,1,1);
y = Buffer.alloc(s,1,1);
)
// train only 2 seconds
// train only 2 seconds
(
Routine {
FluidBufNMF.process(s,b.bufnum,0,88200,0,1, c.bufnum, x.bufnum, rank:10);
@ -210,27 +259,28 @@ Routine {
// find the rank that has the picking sound by changing which channel to listen to
(
~element = 0;
~element = 9;
{PlayBuf.ar(10,c.bufnum)[~element]}.play
)
// copy all the other ranks on itself and the picking dictionnary as the sole component of the 1st channel
(
Routine{
(0..9).remove(~element).do({|chan|FluidBufCompose.process(s,srcBufNumA: x.bufnum, startChanA:chan, nChansA: 1, srcBufNumB: e.bufnum, dstBufNum: e.bufnum)});
s.sync;
e.query;
s.sync;
FluidBufCompose.process(s,srcBufNumA: x.bufnum, startChanA: ~element, nChansA: 1, srcBufNumB: e.bufnum, dstStartChanB: 1, dstBufNum: e.bufnum);
s.sync;
e.query;
z = (0..9);
FluidBufCompose.process(s,srcBufNumA: x.bufnum, startChanA: z.removeAt(~element), nChansA: 1, srcBufNumB: e.bufnum, dstBufNum: e.bufnum);
s.sync;
e.query;
s.sync;
z.do({|chan|FluidBufCompose.process(s,srcBufNumA: x.bufnum, startChanA:chan, nChansA: 1, dstStartChanA: 1, srcBufNumB: e.bufnum, dstBufNum: e.bufnum)});
s.sync;
e.query;
}.play;
)
//process the whole file, splitting it with the 2 trained dictionnaries
(
Routine{
FluidBufNMF.process(s, b.bufnum, dstBufNum: c.bufnum, dictBufNum: e.bufnum, dictFlag: 2, actBufNum:y.bufnum, rank:2);
FluidBufNMF.process(s, b.bufnum, dstBufNum: c.bufnum, dictBufNum: e.bufnum, dictFlag: 2, actBufNum: y.bufnum, rank:2);
s.sync;
c.query;
}.play;
@ -242,4 +292,86 @@ c.play
// it even null-sums
{(PlayBuf.ar(2,c.bufnum,doneAction:2).sum)-(PlayBuf.ar(1,b.bufnum,doneAction:2))}.play
::
STRONG::Updating Dictionnaries:: The process can update dictionaries provided as seed.
CODE::
(
// create buffers
b = Buffer.alloc(s,44100);
c = Buffer.alloc(s, 44100);
d = Buffer.new(s);
e = Buffer.alloc(s,513,3);
f = Buffer.new(s);
g = Buffer.new(s);
)
(
// fill them with 2 clearly segregated sine waves and composite a buffer where they are consecutive
Routine {
b.sine2([500],[1], false, false);
c.sine2([5000],[1],false, false);
s.sync;
FluidBufCompose.process(s,srcBufNumA:b.bufnum, srcBufNumB:c.bufnum,dstStartAtB:44100,dstBufNum:d.bufnum);
s.sync;
d.query;
}.play;
)
// check
d.plot
d.play //////(beware !!!! loud!!!)
(
//make a seeding dictionary of 3 ranks:
var highpass, lowpass, direct;
highpass = Array.fill(513,{|i| (i < 50).asInteger});
lowpass = 1 - highpass;
direct = Array.fill(513,0.1);
e.setn(0,[highpass, lowpass, direct].flop.flat);
)
//check the dictionary: a steep lowpass, a steep highpass, and a small DC
e.plot
e.query
(
// use the seeding dictionary, without updating
Routine {
FluidBufNMF.process(s, d.bufnum, dstBufNum:f.bufnum, dictBufNum: e.bufnum, dictFlag: 2, actBufNum:g.bufnum, rank:3);
s.sync;
e.query;
f.query;
g.query;
}.play
)
// look at the resynthesised separated signal
f.plot;
// look at the dictionaries that have not changed
e.plot;
// look at the activations
g.plot;
(
// use the seeding dictionary, with updating this time
Routine {
FluidBufNMF.process(s, d.bufnum, dstBufNum:f.bufnum, dictBufNum: e.bufnum, dictFlag: 1, actBufNum:g.bufnum, rank:3);
s.sync;
e.query;
f.query;
g.query;
}.play
)
// look at the resynthesised separated signal
f.plot;
// look at the dictionaries that have now updated in place (with the 3rd channel being more focused
e.plot;
// look at the activations (sharper 3rd rank at transitions)
g.plot;
::

@ -40,7 +40,10 @@ ARGUMENT:: kernelSize
The granularity of the window in which the algorithm looks for change, in samples. A small number will be sensitive to short term changes, and a large number should look for long term changes.
ARGUMENT:: thresh
The normalised threshold, between 0 an 1, to consider a peak as a sinusoidal component from the in the novelty curve.
The normalised threshold, between 0 an 1, on the novelty curve to consider it a segmentation point.
ARGUMENT:: filterSize
The size of a smoothing filter that is applied on the novelty curve. A larger filter filter size allows for cleaner cuts on very sharp changes.
ARGUMENT:: winSize
The window size. As novelty estimation relies on spectral frames, we need to decide what precision we give it spectrally and temporally, in line with Gabor Uncertainty principles. http://www.subsurfwiki.org/wiki/Gabor_uncertainty
@ -91,3 +94,39 @@ c.query;
}.play;
)
::
STRONG::Examples of the impact of the filterSize::
CODE::
// load some buffers
(
b = Buffer.read(s,File.realpath(FluidBufNoveltySlice.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-AaS-AcousticStrums-M.wav");
c = Buffer.new(s);
)
// process with a given filterSize
FluidBufNoveltySlice.process(s,b.bufnum, transBufNum: c.bufnum, kernelSize:31, thresh:0.35, filterSize:0)
//check the number of slices: it is the number of frames in the transBuf minus the boundary index.
c.query;
//play slice number 2
(
{
BufRd.ar(1, b.bufnum,
Line.ar(
BufRd.kr(1, c.bufnum, DC.kr(2), 0, 1),
BufRd.kr(1, c.bufnum, DC.kr(3), 0, 1),
(BufRd.kr(1, c.bufnum, DC.kr(3)) - BufRd.kr(1, c.bufnum, DC.kr(2), 0, 1) + 1) / s.sampleRate),
0,1);
}.play;
)
// change the filterSize in the code above to 4. Then to 8. Listen in between to the differences.
// What's happening? In the first instance (filterSize = 0), the novelty line is jittery and therefore overtriggers on the arpegiated guitar. We also can hear attacks at the end of the segment. Setting the threshold higher (like in the 'Basic Example' pane) misses some more subtle variations.
// So in the second settings (filterSize = 4), we smooth the novelty line a little, which allows us to catch small differences that are not jittery. It also corrects the ending cutting by the same trick: the averaging of the sharp pick is sliding up, crossing the threshold slightly earlier.
// If we smooth too much, like the third settings (filterSize = 8), we start to loose precision. Have fun with different values of theshold then will allow you to find the perfect segment for your signal.
::

@ -58,7 +58,10 @@ ARGUMENT:: winSize
The averaging window of the error detection function. It needs smoothing as it is very jittery. The longer the window, the less precise, but the less false positives.
ARGUMENT:: debounce
The window size in sample within which positive detections will be clumped together to avoid overdetecting in time. No slice will be shorter than this duration.
The window size in sample within which positive detections will be clumped together to avoid overdetecting in time.
ARGUMENT:: minSlice
The minimum duration of a slice in samples.
RETURNS::
Nothing, as the destination buffer is declared in the function call.
@ -77,7 +80,7 @@ c = Buffer.new(s);
(
Routine{
t = Main.elapsedTime;
FluidBufTransientSlice.process(s,b.bufnum, transBufNum:c.bufnum, order:80, debounce:4410);
FluidBufTransientSlice.process(s,b.bufnum, transBufNum:c.bufnum, order:80, minSlice:4410);
s.sync;
(Main.elapsedTime - t).postln;
}.play
@ -105,7 +108,7 @@ c.query;
(
Routine{
t = Main.elapsedTime;
FluidBufTransients.process(s,b.bufnum, 44100, 44100, 0, 0, c.bufnum, d.bufnum, 100, 512,256,1,2,1,12,20);
FluidBufTransients.process(s,b.bufnum, 44100, 44100, 0, 0, c.bufnum, d.bufnum, 100, 512,256,1,2,1,12,20,441);
s.sync;
(Main.elapsedTime - t).postln;
}.play

@ -109,7 +109,4 @@ Routine{
(Main.elapsedTime - t).postln;
}.play
)
::
::

@ -28,14 +28,14 @@ RETURNS::
EXAMPLES::
Summing with the inverse (gain of -1) with a delay of the latency gives us CPU-expensive silence.
CODE::
{ var source = PinkNoise.ar(0.1); DelayN.ar(source,delaytime:1000/s.sampleRate) + FluidGain.ar(source,1000,-1); }.play
::
Varying the gain at audio rate.
CODE::
{ FluidGain.ar(PinkNoise.ar(0.1), gain:LFTri.ar(1)) }.play
::
Varying the gain at comtrol rate, in beautiful stereo.
CODE::
{ FluidGain.ar(SinOsc.ar([222,333],mul:0.1), gain:LFTri.kr([0.5,0.7])) }.play
::
CODE::
{ var source = PinkNoise.ar(0.1); DelayN.ar(source,delaytime:1000/s.sampleRate) + FluidGain.ar(source,1000,-1); }.play
::
Varying the gain at audio rate.
CODE::
{ FluidGain.ar(PinkNoise.ar(0.1), gain:LFTri.ar(1)) }.play
::
Varying the gain at comtrol rate, in beautiful stereo.
CODE::
{ FluidGain.ar(SinOsc.ar([222,333],mul:0.1), gain:LFTri.kr([0.5,0.7])) }.play
::

@ -89,24 +89,23 @@ Discussion::
EXAMPLES::
CODE::
//load a soundfile to play
b = Buffer.read(s,File.realpath(FluidHPSS.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-AaS-SynthTwoVoices-M.wav");
//load a soundfile to play
b = Buffer.read(s,File.realpath(FluidHPSS.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-AaS-SynthTwoVoices-M.wav");
// run with basic parameters (left is harmonic, right is percussive)
{FluidHPSS.ar(PlayBuf.ar(1,b.bufnum,loop:1))}.play
// run with basic parameters (left is harmonic, right is percussive)
{FluidHPSS.ar(PlayBuf.ar(1,b.bufnum,loop:1))}.play
// run in mode 1
{FluidHPSS.ar(PlayBuf.ar(1,b.bufnum,loop:1),17,31,1,0.05,40,0.1,-40)}.play
// run in mode 1
{FluidHPSS.ar(PlayBuf.ar(1,b.bufnum,loop:1),17,31,1,0.05,40,0.1,-40)}.play
// run in mode 2m listening to
//the harmonic stream
{FluidHPSS.ar(PlayBuf.ar(1,b.bufnum,loop:1),17,31,2,0.05,40,0.1,-40, 0.1, -10, 0.2, 10)[0].dup}.play
// the percussive stream
{FluidHPSS.ar(PlayBuf.ar(1,b.bufnum,loop:1),17,31,2,0.05,40,0.1,-40, 0.1, -10, 0.2, 10)[1].dup}.play
// the residual stream
{FluidHPSS.ar(PlayBuf.ar(1,b.bufnum,loop:1),17,31,2,0.05,40,0.1,-40, 0.1, -10, 0.2, 10)[2].dup}.play
// run in mode 2m listening to
//the harmonic stream
{FluidHPSS.ar(PlayBuf.ar(1,b.bufnum,loop:1),17,31,2,0.05,40,0.1,-40, 0.1, -10, 0.2, 10)[0].dup}.play
// the percussive stream
{FluidHPSS.ar(PlayBuf.ar(1,b.bufnum,loop:1),17,31,2,0.05,40,0.1,-40, 0.1, -10, 0.2, 10)[1].dup}.play
// the residual stream
{FluidHPSS.ar(PlayBuf.ar(1,b.bufnum,loop:1),17,31,2,0.05,40,0.1,-40, 0.1, -10, 0.2, 10)[2].dup}.play
// null test (the process add a latency of ((harmFiltSize + (winSize / hopSize) - 1) * hopSize) samples
{var sig = PlayBuf.ar(1,b.bufnum,loop:1); [FluidHPSS.ar(sig,17,31, winSize:1024,hopSize:512,fftSize:2048).sum - DelayN.ar(sig, 1, ((31 + 1) * 512 / s.sampleRate))]}.play
// null test (the process add a latency of ((harmFiltSize + (winSize / hopSize) - 1) * hopSize) samples
{var sig = PlayBuf.ar(1,b.bufnum,loop:1); [FluidHPSS.ar(sig,17,31, winSize:1024,hopSize:512,fftSize:2048).sum - DelayN.ar(sig, 1, ((31 + 1) * 512 / s.sampleRate))]}.play
::

@ -1,55 +1,313 @@
TITLE:: FluidNMFMatch
SUMMARY:: Real-Time Non-Negative Matrix Factorisation on Buffered Dictionaries
SUMMARY:: Real-Time Non-Negative Matrix Factorisation with Fixed Dictionaries
CATEGORIES:: Libraries>FluidDecomposition
RELATED:: Guides/FluCoMa, Guides/FluidDecomposition, Classes/FluidBufNMF
DESCRIPTION::
The FluidBufNMF object provides the activation (linked to amplitude) for each pre-defined dictionaries (similar to spectra) predefined in a buffer. These dictionaries would have usually be computed through an offline Non-Negative Matrix Factorisation (NMF) footnote:: Lee, Daniel D., and H. Sebastian Seung. 1999. Learning the Parts of Objects by Non-Negative Matrix Factorization. Nature 401 (6755): 78891. https://doi.org/10.1038/44565 :: with the link::Classes/FluidBufNMF:: UGen. NMF has been a popular technique in signal processing research for things like source separation and transcription footnote:: Smaragdis and Brown, Non-Negative Matrix Factorization for Polyphonic Music Transcription.::, although its creative potential is so far relatively unexplored.
The FluidNMFMatch object matches an incoming audio signal against a set of spectral templates using an slimmed-down version of Nonnegative Matrix Factorisation (NMF) footnote:: Lee, Daniel D., and H. Sebastian Seung. 1999. Learning the Parts of Objects by Non-Negative Matrix Factorization. Nature 401 (6755): 78891. https://doi.org/10.1038/44565. ::
The algorithm takes a buffer in which provides a spectral definition of a number of components, determined by the rank argument and the dictionary buffer channel count. It works iteratively, by trying to find a combination of amplitudes ('activations') that yield the original magnitude spectrogram of the audio input when added together. By and large, there is no unique answer to this question (i.e. there are different ways of accounting for an evolving spectrum in terms of some set of templates and envelopes). In its basic form, NMF is a form of unsupervised learning: it starts with some random data and then converges towards something that minimizes the distance between its generated data and the original:it tends to converge very quickly at first and then level out. Fewer iterations mean less processing, but also less predictable results.
It outputs at kr the degree of detected match for each template (the activation amount, in NMF-terms). The spectral templates are presumed to have been produced by the offline NMF process (link::Classes/FluidBufNMF::), and must be the correct size with respect to the FFT settings being used (FFT size / 2 + 1 frames long). The rank of the decomposition is determined by the number of channels in the supplied buffer of templates, up to a maximum set by the STRONG::maxRank:: parameter.
NMF has been a popular technique in signal processing research for things like source separation and transcription footnote:: Smaragdis and Brown, Non-Negative Matrix Factorization for Polyphonic Music Transcription.::, although its creative potential is so far relatively unexplored. It works iteratively, by trying to find a combination of amplitudes ('activations') that yield the original magnitude spectrogram of the audio input when added together. By and large, there is no unique answer to this question (i.e. there are different ways of accounting for an evolving spectrum in terms of some set of templates and envelopes). In its basic form, NMF is a form of unsupervised learning: it starts with some random data and then converges towards something that minimizes the distance between its generated data and the original:it tends to converge very quickly at first and then level out. Fewer iterations mean less processing, but also less predictable results.
The whole process can be related to a channel vocoder where, instead of fixed bandpass filters, we get more complex filter shapes and the activations correspond to channel envelopes.
More information on possible musicianly uses of NMF are availabe in LINK::Guides/FluCoMa:: overview file.
FluidBufNMF is part of the Fluid Decomposition Toolkit of the FluCoMa project. footnote::
This was made possible thanks to the FluCoMa project ( http://www.flucoma.org/ ) funded by the European Research Council ( https://erc.europa.eu/ ) under the European Unions Horizon 2020 research and innovation programme (grant agreement No 725899).
::
FluidBufNMF is part of the Fluid Decomposition Toolkit of the FluCoMa project. footnote::This was made possible thanks to the FluCoMa project ( http://www.flucoma.org/ ) funded by the European Research Council ( https://erc.europa.eu/ ) under the European Unions Horizon 2020 research and innovation programme (grant agreement No 725899). ::
CLASSMETHODS::
METHOD:: kr
The real-time processing method. It takes an audio or control input, and will yield a control stream in the form of a multichannel array of size STRONG::rank::.
The real-time processing method. It takes an audio or control input, and will yield a control stream in the form of a multichannel array of size STRONG::maxRank:: . If the dictionary buffer has fewer than maxRank channels, the remaining outputs will be zeroed.
ARGUMENT:: in
The input to the factorisation process.
The signal input to the factorisation process.
ARGUMENT:: dictBufNum
The index of the buffer where the different dictionaries will be matched against. Dictionaries must be STRONG::(fft size / 2) + 1:: frames and STRONG::rank:: channels
The server index of the buffer containing the different dictionaries that the input signal will be matched against. Dictionaries must be STRONG::(fft size / 2) + 1:: frames. If the buffer has more than STRONG::maxRank:: channels, the excess will be ignored.
ARGUMENT:: rank
The number of elements the NMF algorithm will try to divide the spectrogram of the source in. This should match the number of channels of the dictBuf defined above.
ARGUMENT::maxRank
The maximum number of elements the NMF algorithm will try to divide the spectrogram of the source in. This dictates the number of output channelsfor the ugen.
ARGUMENT:: nIter
The NMF process is iterative, trying to converge to the smallest error in its factorisation. The number of iterations will decide how many times it tries to adjust its estimates. Higher numbers here will be more CPU expensive, lower numbers will be more unpredictable in quality.
The NMF process is iterative, trying to converge to the smallest error in its factorisation. The number of iterations will decide how many times it tries to adjust its estimates. Higher numbers here will be more CPU intensive, lower numbers will be more unpredictable in quality.
ARGUMENT:: winSize
The window size. As NMF relies on spectral frames, we need to decide what precision we give it spectrally and temporally, in line with Gabor Uncertainty principles. http://www.subsurfwiki.org/wiki/Gabor_uncertainty
The number of samples that are analysed at a time. A lower number yields greater temporal resolution, at the expense of spectral resoultion, and vice-versa.
ARGUMENT:: hopSize
The window hope size. As NMF relies on spectral frames, we need to move the window forward. It can be any size but low overlap will create audible artefacts.
The window hope size. As NMF relies on spectral frames, we need to move the window forward. It can be any size but low overlap will create audible artefacts. Default = winSize / 2
ARGUMENT:: fftSize
The inner FFT/IFFT size. It should be at least 4 samples long, at least the size of the window, and a power of 2. Making it larger allows an oversampling of the spectral precision.
The FFT/IFFT size. It should be at least 4 samples long, at least the size of the window, and a power of 2. Making it larger allows an oversampling of the spectral precision. Default = winSize
returns::
A multichannel array, giving for each dictionary the activation value.
RETURNS::
A multichannel kr output, giving for each dictionary component the activation amount.
EXAMPLES::
yes
STRONG::A didactic example::
CODE::
(
// create buffers
b= Buffer.alloc(s,44100);
c = Buffer.alloc(s, 44100);
d = Buffer.new(s);
e= Buffer.new(s);
)
(
// fill them with 2 clearly segregated sine waves and composite a buffer where they are consecutive
Routine {
b.sine2([500],[1], false, false);
c.sine2([5000],[1],false, false);
s.sync;
FluidBufCompose.process(s,srcBufNumA:b.bufnum, srcBufNumB:c.bufnum,dstStartAtB:44100,dstBufNum:d.bufnum);
s.sync;
d.query;
}.play;
)
// check
d.plot
d.play //////(beware !!!! loud!!!)
(
// separate them in 2 ranks
Routine {
FluidBufNMF.process(s, d.bufnum, dictBufNum: e.bufnum, rank:2);
s.sync;
e.query;
}.play
)
// check for 2 spikes in the spectra
e.query
e.plot
// test the activations values with test one, another, or both ideal material
{FluidNMFMatch.kr(SinOsc.ar(500),e.bufnum,2, hopSize:512)}.plot(1)
{FluidNMFMatch.kr(SinOsc.ar(5000),e.bufnum,2, hopSize:512)}.plot(1)
{FluidNMFMatch.kr(SinOsc.ar([500,5000]).sum,e.bufnum,2, hopSize:512)}.plot(1)
::
STRONG::A pick compressor::
CODE::
//set some buffers
(
b = Buffer.read(s,File.realpath(FluidNMFMatch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-AaS-AcousticStrums-M.wav");
c = Buffer.new(s);
x = Buffer.new(s);
e = Buffer.alloc(s,1,1);
)
// train only 2 seconds
(
Routine {
FluidBufNMF.process(s,b.bufnum,0,88200,0,1, c.bufnum, x.bufnum, rank:10,fftSize:2048);
s.sync;
c.query;
}.play;
)
// wait for the query to print
// then find the rank that has the picking sound by changing which channel to listen to
(
~element = 8;
{PlayBuf.ar(10,c.bufnum)[~element]}.play
)
// copy all the other ranks on itself and the picking dictionnary as the sole component of the 1st channel
(
Routine{
z = (0..9);
FluidBufCompose.process(s,srcBufNumA: x.bufnum, startChanA: z.removeAt(~element), nChansA: 1, srcBufNumB: e.bufnum, dstBufNum: e.bufnum);
s.sync;
e.query;
s.sync;
z.do({|chan|FluidBufCompose.process(s,srcBufNumA: x.bufnum, startChanA:chan, nChansA: 1, dstStartChanA: 1, srcBufNumB: e.bufnum, dstBufNum: e.bufnum)});
s.sync;
e.query;
}.play;
)
e.plot;
//using this trained dictionary we can see the envelop (activations) of each rank
{FluidNMFMatch.kr(PlayBuf.ar(1,b.bufnum),e.bufnum,2,fftSize:2048)}.plot(1);
// the left/top activations are before, the pick before the sustain.
//we can then use the activation value to sidechain a compression patch that is sent in a delay
(
{
var source, todelay, delay1, delay2, delay3, feedback, mod1, mod2, mod3, mod4;
//read the source
source = PlayBuf.ar(1, b.bufnum);
// generate modulators that are coprime in frequency
mod1 = SinOsc.ar(1, 0, 0.001);
mod2 = SinOsc.ar(((617 * 181) / (461 * 991)), 0, 0.001);
mod3 = SinOsc.ar(((607 * 193) / (491 * 701)), 0, 0.001);
mod4 = SinOsc.ar(((613 * 191) / (463 * 601)), 0, 0.001);
// compress the signal to send to the delays
todelay = DelayN.ar(source,0.1, 800/44100, //delaying it to compensate for FluidNMFMatch's latency
LagUD.ar(K2A.ar(FluidNMFMatch.kr(source,e.bufnum,2,fftSize:2048)[0]), //reading the channel of the activations on the pick dictionary
80/44100, // lag uptime (compressor's attack)
1000/44100, // lag downtime (compressor's decay)
(1/(2.dbamp) // compressor's threshold inverted
)).clip(1,1000).pow((8.reciprocal)-1)); //clipping it so we only affect above threshold, then ratio(8) becomes the exponent of that base
// delay network
feedback = LocalIn.ar(3);// take the feedback in for the delays
delay1 = DelayC.ar(BPF.ar(todelay+feedback[1]+(feedback[2] * 0.3), 987, 6.7,0.8),0.123,0.122+(mod1*mod2));
delay2 = DelayC.ar(BPF.ar(todelay+feedback[0]+(feedback[2] * 0.3), 1987, 6.7,0.8),0.345,0.344+(mod3*mod4));
delay3 = DelayC.ar(BPF.ar(todelay+feedback[1], 1456, 6.7,0.8),0.567,0.566+(mod1*mod3),0.6);
LocalOut.ar([delay1,delay2, delay3]); // write the feedback for the delays
//listen to the delays only by uncommenting the following line
// [delay1+delay3,delay2+delay3]
source.dup + ([delay1+delay3,delay2+delay3]*(-3.dbamp))
}.play;
)
::
STRONG::Object finder::
CODE::
/set some buffers
(
b = Buffer.read(s,File.realpath(FluidNMFMatch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-BaB-SoundscapeGolcarWithDog.wav");
c = Buffer.new(s);
x = Buffer.new(s);
e = Buffer.alloc(s,1,1);
)
// train where all objects are present
(
Routine {
FluidBufNMF.process(s,b.bufnum,130000,150000,0,1, c.bufnum, x.bufnum, rank:10);
s.sync;
c.query;
}.play;
)
// wait for the query to print
// then find a rank for each item you want to find. You could also sum them. Try to find a rank with a good object-to-rest ratio
(
~dog =0;
{PlayBuf.ar(10,c.bufnum)[~dog]}.play
)
(
~bird = 5;
{PlayBuf.ar(10,c.bufnum)[~bird]}.play
)
// copy at least one other rank to a third rank, a sort of left-over channel
(
Routine{
FluidBufCompose.process(s,srcBufNumA: x.bufnum, startChanA:~dog, nChansA: 1, srcBufNumB: e.bufnum, dstBufNum: e.bufnum);
FluidBufCompose.process(s,srcBufNumA: x.bufnum, startChanA:~bird, nChansA: 1, dstStartChanA: 1, srcBufNumB: e.bufnum, dstBufNum: e.bufnum);
s.sync;
(0..9).removeAll([~dog,~bird]).do({|chan|FluidBufCompose.process(s,srcBufNumA: x.bufnum, startChanA:chan, nChansA: 1, dstStartChanA: 2, srcBufNumB: e.bufnum, dstBufNum: e.bufnum)});
s.sync;
e.query;
}.play;
)
e.plot;
//using this trained dictionary we can then see the activation...
(
{
var source, blips;
//read the source
source = PlayBuf.ar(2, b.bufnum);
blips = FluidNMFMatch.kr(source.sum,e.bufnum,3);
}.plot(10);
)
// ...and use some threshold to 'find' objects...
(
{
var source, blips;
//read the source
source = PlayBuf.ar(2, b.bufnum);
blips = Schmidt.kr(FluidNMFMatch.kr(source.sum,e.bufnum,3),0.5,[10,1,1000]);
}.plot(10);
)
// ...and use these to sonify them
(
{
var source, blips, dogs, birds;
//read the source
source = PlayBuf.ar(2, b.bufnum);
blips = Schmidt.kr(FluidNMFMatch.kr(source.sum,e.bufnum,3),0.5,[10,1,1000]);
dogs = SinOsc.ar(100,0,Lag.kr(blips[0],0.05,0.15));
birds = SinOsc.ar(1000,0,Lag.kr(blips[1],0.05,0.05));
[dogs, birds] + source;
}.play;
)
::
STRONG::Pretrained piano::
CODE::
//load in the sound in and a pretrained dictionary
(
b = Buffer.read(s,File.realpath(FluidNMFMatch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-SA-UprightPianoPedalWide.wav");
c = Buffer.read(s,File.realpath(FluidNMFMatch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/filters/piano-dicts.wav");
)
b.play
c.query
//use the pretrained dictionary to compute activations of each notes to drive the amplitude of a resynth
(
{
var source, resynth;
source = PlayBuf.ar(2, b.bufnum,loop:1).sum;
resynth = SinOsc.ar((21..108).midicps, 0, FluidNMFMatch.kr(source,c.bufnum,88,10,4096).madd(0.002)).sum;
[source, resynth]
}.play
)
//now sample and hold the same stream to get notes identified, played and sent back via osc
(
{
var source, resynth, chain, trig, acts;
source = PlayBuf.ar(2,b.bufnum,loop:1).sum;
// built in attack detection, delayed until the stable part of the sound
chain = FFT(LocalBuf(256), source);
trig = TDelay.kr(Onsets.kr(chain, 0.5),0.1);
// samples and holds activation values that are scaled and capped, in effect thresholding them
acts = Latch.kr(FluidNMFMatch.kr(source,c.bufnum,88,10,4096).linlin(15,20,0,0.1),trig);
// resynths as in the previous example, with the values sent back to the language
resynth = SinOsc.ar((21..108).midicps, 0, acts).sum;
SendReply.kr(trig, '/activations', acts);
[source, resynth]
// [source, T2A.ar(trig)]
// resynth
}.play
)
// define a receiver for the activations
(
OSCdef(\listener, {|msg|
var data = msg[3..];
// removes the silent and spits out the indicies as midinote number
data.collect({arg item, i; if (item > 0.01, {i + 21})}).reject({arg item; item.isNil}).postln;
}, '/activations');
)
::
STRONG::Strange Resonators::
CODE::
//indeed
//to be completed
::

@ -31,19 +31,19 @@ RETURNS::
EXAMPLES::
Summing with the inverse (gain of -1) with a delay of the latency gives us CPU-expensive silence.
CODE::
{ var source = PinkNoise.ar(0.1); DelayN.ar(source, delaytime:1024/s.sampleRate, mul: -1) + FluidSTFTPass.ar(source, 1024, 256, 1024); }.play
::
Larger, oversampled, FFT
CODE::
{ FluidSTFTPass.ar(PinkNoise.ar(0.1), 2048, 128, 8192) }.play
::
Stereo Input Tests.
CODE::
{ FluidSTFTPass.ar([SinOsc.ar(222,mul: 0.1), PinkNoise.ar(Decay.ar(Impulse.ar(0.666,mul: 0.2), 0.5))], fftSize:1024)}.play
::
Stereo Parameter Tests.
CODE::
{ FluidSTFTPass.ar(SinOsc.ar(222,mul: 0.1), [1024,8192],256,8192)}.play
::
Summing with the inverse (gain of -1) with a delay of the latency gives us CPU-expensive silence.
CODE::
{ var source = PinkNoise.ar(0.1); DelayN.ar(source, delaytime:1024/s.sampleRate, mul: -1) + FluidSTFTPass.ar(source, 1024, 256, 1024); }.play
::
Larger, oversampled, FFT
CODE::
{ FluidSTFTPass.ar(PinkNoise.ar(0.1), 2048, 128, 8192) }.play
::
Stereo Input Tests.
CODE::
{ FluidSTFTPass.ar([SinOsc.ar(222,mul: 0.1), PinkNoise.ar(Decay.ar(Impulse.ar(0.666,mul: 0.2), 0.5))], fftSize:1024)}.play
::
Stereo Parameter Tests.
CODE::
{ FluidSTFTPass.ar(SinOsc.ar(222,mul: 0.1), [1024,8192],256,8192)}.play
::

@ -38,7 +38,10 @@ ARGUMENT:: winSize
The averaging window of the error detection function. It needs smoothing as it is very jittery. The longer the window, the less precise, but the less false positives.
ARGUMENT:: debounce
The window size in sample within with positive detections will be clumped together to avoid overdetecting in time. No slice will be shorter than this duration.
The window size in sample within with positive detections will be clumped together to avoid overdetecting in time.
ARGUMENT:: minSlice
The minimum duration of a slice in samples.
RETURNS::
An audio stream with impulses at detected transients. The latency between the input and the output is (blockSize + padSize - order) samples.
@ -54,18 +57,17 @@ b = Buffer.read(s,File.realpath(FluidTransientSlice.class.filenameSymbol).dirnam
{var sig = PlayBuf.ar(1,b.bufnum,loop:1); [FluidTransientSlice.ar(sig)*0.5, DelayN.ar(sig, 1, ((256 + 128 - 20)/ s.sampleRate))]}.play
// other parameters
{var sig = PlayBuf.ar(1,b.bufnum,loop:1); [FluidTransientSlice.ar(sig,order:80,debounce:2205)*0.5, DelayN.ar(sig, 1, ((256 + 128 - 80)/ s.sampleRate))]}.play
{var sig = PlayBuf.ar(1,b.bufnum,loop:1); [FluidTransientSlice.ar(sig,order:80,minSlice:2205)*0.5, DelayN.ar(sig, 1, ((256 + 128 - 80)/ s.sampleRate))]}.play
// more musical trans-trigged autopan
(
{
var sig, trig, syncd, pan;
sig = PlayBuf.ar(1,b.bufnum,loop:1);
trig = FluidTransientSlice.ar(sig,order:10,debounce:2205);
trig = FluidTransientSlice.ar(sig,order:10,minSlice:4410);
syncd = DelayN.ar(sig, 1, ((256 + 128 - 10)/ s.sampleRate));
pan = TRand.ar(-1,1,trig);
Pan2.ar(syncd,pan);
}.play
)
::

@ -9,7 +9,7 @@ target_link_libraries(
target_include_directories(
${PLUGIN}
PRIVATE
${CMAKE_CURRENT_LIST_DIR}/../../include
${LOCAL_INCLUDES}
SYSTEM PRIVATE
${SC_PATH}/include/plugin_interface
${SC_PATH}/include/common

@ -1,74 +1,14 @@
// FD_BufNMF, an NRT buffer NMF Processor
// A tool from the FluCoMa project, funded by the European Research Council (ERC) under the European Unions Horizon 2020 research and innovation programme (grant agreement No 725899)
#include "clients/nrt/BufferComposeNRT.hpp"
#include "fdNRTBase.hpp"
#include "data/FluidTensor.hpp"
#include "clients/common/FluidParams.hpp"
#include "SC_PlugIn.h"
#include <unordered_set>
#include <vector>
#include <clients/nrt/BufferComposeNRT.hpp>
#include <FluidSCWrapper.hpp>
static InterfaceTable *ft;
namespace fluid {
namespace wrapper{
class BufCompose: public NRTCommandBase
{
public:
using client_type = client::BufferComposeClient;
using NRTCommandBase::NRTCommandBase;
~BufCompose() {}
void runCommand(World* world, void* replyAddr, char* completionMsgData, size_t completionMsgSize)
{
cmd<BufCompose, &BufCompose::process, &BufCompose::postProcess, &BufCompose::postComplete>(world, "/BufCompose", replyAddr, completionMsgData, completionMsgSize);
}
bool process(World* world)
{
//sanity check the parameters
bool parametersOk;
client_type::ProcessModel processModel;
std::string whatHappened;//this will give us a message to pass back if param check fails
std::tie(parametersOk,whatHappened,processModel) = bufferCompose.sanityCheck();
if(!parametersOk)
{
Print("fdCompose: %s \n", whatHappened.c_str());
return false;
}
bufferCompose.process(processModel);
mModel = processModel;
return true;
}
bool postProcess(World* world)
{
static_cast<SCBufferView*>(mModel.dst)->assignToRT(world);
return true;
}
bool postComplete(World* w) {
static_cast<SCBufferView*>(mModel.dst)->cleanUp();
return true;
}
std::vector<client::Instance>& parameters()
{
return bufferCompose.getParams();
}
private:
client_type bufferCompose;
client_type::ProcessModel mModel;
};//class
} //namespace wrapper
}//namespace fluid
PluginLoad(OfflineFluidDecompositionUGens) {
ft = inTable;
registerCommand<fluid::wrapper::BufCompose,fluid:: client::BufferComposeClient>(ft, "BufCompose");
fluid::wrapper::printCmd<fluid::client::BufferComposeClient>(ft,"BufCompose","FDCompose");
using namespace fluid::client;
makeSCWrapper<BufferComposeClient, float,float>("BufCompose", BufComposeParams, ft);
// registerCommand<fluid::wrapper::BufCompose,fluid:: client::BufferComposeClient>(ft, "BufCompose");
// fluid::wrapper::printCmd<fluid::client::BufferComposeClient>(ft,"BufCompose","FDCompose");
}

@ -2,7 +2,6 @@
// A tool from the FluCoMa project, funded by the European Research Council (ERC) under the European Unions Horizon 2020 research and innovation programme (grant agreement No 725899)
#include <clients/rt/HPSSClient.hpp>
#include <clients/nrt/FluidNRTClientWrapper.hpp>
#include <FluidSCWrapper.hpp>
static InterfaceTable *ft;
@ -10,5 +9,5 @@ static InterfaceTable *ft;
PluginLoad(OfflineFluidDecompositionUGens) {
ft = inTable;
using namespace fluid::client;
makeSCWrapper<NRTStreamAdaptor<HPSSClient<double,float>>>(ft, "BufHPSS");
makeSCWrapper<NRTHPSS,double,float>("BufHPSS",NRTHPSSParams,ft);
}

@ -6,5 +6,5 @@ static InterfaceTable *ft;
PluginLoad(OfflineFluidDecompositionUGens) {
ft = inTable;
using namespace fluid::client;
makeSCWrapper<NMFClient>(ft, "BufNMF");
makeSCWrapper<NMFClient,double,float>("BufNMF",NMFParams, ft);
}

@ -10,5 +10,5 @@ static InterfaceTable* ft;
PluginLoad(OfflineFluidDecompositionUGens) {
ft = inTable;
using namespace fluid::client;
makeSCWrapper<NoveltyClient>(ft, "BufNoveltySlice");
makeSCWrapper<NoveltyClient,double,float>("BufNoveltySlice",NoveltyParams,ft);
}

@ -10,6 +10,6 @@ static InterfaceTable *ft;
PluginLoad(OfflineFluidDecompositionUGens) {
ft = inTable;
using namespace fluid::client;
makeSCWrapper<NRTStreamAdaptor<SinesClient<double,float>>>(ft, "BufSines");
makeSCWrapper<NRTSines,double,float>("BufSines",NRTSineParams,ft);
}

@ -10,6 +10,5 @@ static InterfaceTable* ft;
PluginLoad(OfflineFluidDecompositionUGens) {
ft = inTable;
using namespace fluid::client;
makeSCWrapper<NRTSliceAdaptor<TransientsSlice<double,float>>>(ft, "BufTransientSlice");
makeSCWrapper<NRTTransientSlice,double,float>("BufTransientSlice",NRTTransientSliceParams, ft);
}

@ -7,5 +7,5 @@ static InterfaceTable *ft;
PluginLoad(OfflineFluidDecompositionUGens) {
ft = inTable;
using namespace fluid::client;
makeSCWrapper<NRTStreamAdaptor<TransientClient<double,float>>>(ft,"BufTransients");
makeSCWrapper<NRTTransients,double,float>("BufTransients",NRTTransientParams,ft);
}

@ -9,5 +9,5 @@ static InterfaceTable *ft;
PluginLoad(FluidGainUgen) {
ft = inTable;
using namespace fluid::client;
makeSCWrapper<GainClient<double,float>>(ft, "FluidGain");
makeSCWrapper<GainClient,double,float>("FluidGain", GainParams,ft);
}

@ -8,7 +8,7 @@ static InterfaceTable *ft;
PluginLoad(FluidSTFTUGen) {
ft = inTable;
using namespace fluid::client;
makeSCWrapper<HPSSClient<double,float>>(ft, "FluidHPSS");
makeSCWrapper<HPSSClient,double,float>("FluidHPSS",HPSSParams,ft);
}

@ -1,134 +1,15 @@
// A tool from the FluCoMa project, funded by the European Research Council (ERC) under the European Unions Horizon 2020 research and innovation programme (grant agreement No 725899)
#include "SC_PlugIn.hpp"
#include "data/FluidTensor.hpp"
#include "fdNRTBase.hpp"
#include "clients/rt/NMFMatch.hpp"
#include <clients/rt/NMFMatch.hpp>
#include <FluidSCWrapper.hpp>
static InterfaceTable *ft;
namespace fluid {
namespace nmf{
class FDNMFMatch: public SCUnit
{
using Client = client::NMFMatch<double,float>;
using AudioSignalWrapper = Client::AudioSignal;
using SignalWrapper = Client::Signal<float>;
using SignalPointer = std::unique_ptr<SignalWrapper>;
using ClientPointer = std::unique_ptr<Client>;
template <size_t N>
using SignalArray = std::array<SignalPointer,N>;
using SignalVector = std::vector<SignalPointer>;
public:
FDNMFMatch()
{
//Order of args
//psize hszie pthresh hthresh Window size, Hop size, FFT Size
mClient = ClientPointer(new Client(65536));
setParams(true);
bool isOK;
std::string feedback;
std::tie(isOK, feedback) = mClient->sanityCheck();
if(!isOK)
{
std::cout << "FluidNMFMatch Error: " << feedback << '\n';
mCalcFunc = ClearUnitOutputs;
return;
}
mRank = client::lookupParam("rank", mClient->getParams()).getLong();
mClient->setHostBufferSize(bufferSize());
mClient->reset();
inputSignals[0] = SignalPointer(new AudioSignalWrapper());
outputSignals.resize(mRank);
for(size_t i = 0; i < mRank; ++i)
outputSignals[i].reset(new Client::ScalarSignal());
mCalcFunc = make_calc_function<FDNMFMatch,&FDNMFMatch::next>();
Unit* unit = this;
ClearUnitOutputs(unit,1);
}
~FDNMFMatch() {}
private:
void setParams(bool instantiation)
{
assert(mClient);
for(size_t i = 0; i < mClient->getParams().size(); ++i)
{
client::Instance& p = mClient->getParams()[i];
if(!instantiation && p.getDescriptor().instantiation())
continue;
switch(p.getDescriptor().getType())
{
case client::Type::kLong:
p.setLong(in0(i + 1));
p.checkRange();
break;
case client::Type::kFloat: {
p.setFloat(in0(i + 1));
p.checkRange();
}
break;
case client::Type::kBuffer: {
long bufnum = static_cast<long>(in0(i+1));
wrapper::RTBufferView* currentBuf = static_cast<wrapper::RTBufferView*>(p.getBuffer());
if(bufnum >= 0 && (currentBuf? (currentBuf->bufnum() != bufnum) : true)){
wrapper::RTBufferView* buf = new wrapper::RTBufferView(mWorld,bufnum);
p.setBuffer(buf);
}
break;
}
default:
break;
}
}
}
void next(int numsamples)
{
setParams(false);
const float* input = zin(0);
const float inscalar = in0(0);
inputSignals[0]->set(const_cast<float*>(input), inscalar);
for(size_t i = 0; i < mRank; ++i)
outputSignals[i]->set(out(i),out0(i));
mClient->doProcessNoOla(inputSignals.begin(),inputSignals.end(), outputSignals.begin(), outputSignals.end(), mWorld->mFullRate.mBufLength ,1,mRank);
for(size_t i = 0; i < mRank; ++i)
out0(i) = outputSignals[i]->next();
}
size_t mRank;
ClientPointer mClient;
SignalArray<1> inputSignals;
SignalVector outputSignals;
};
}
}
PluginLoad(FluidSTFTUGen) {
ft = inTable;
registerUnit<fluid::nmf::FDNMFMatch>(ft, "FluidNMFMatch");
using namespace fluid::client;
makeSCWrapper<NMFMatch,double, float>("FluidNMFMatch",NMFMatchParams,ft);
}

@ -9,5 +9,5 @@ static InterfaceTable *ft;
PluginLoad(FluidSTFTUGen) {
ft = inTable;
using namespace fluid::client;
makeSCWrapper<BaseSTFTClient<double,float>>(ft, "FluidSTFTPass",STFTParams);
makeSCWrapper<BaseSTFTClient,double,float>("FluidSTFTPass",STFTParams,ft);
}

@ -9,6 +9,6 @@ static InterfaceTable *ft;
PluginLoad(FluidSTFTUGen) {
ft = inTable;
using namespace fluid::client;
makeSCWrapper<SinesClient<double,float>>(ft, "FluidSines");
makeSCWrapper<SinesClient,double,float>("FluidSines",SinesParams,ft);
}

@ -9,5 +9,5 @@ static InterfaceTable *ft;
PluginLoad(FluidSTFTUGen) {
ft = inTable;
using namespace fluid::client;
makeSCWrapper<TransientsSlice<double,float>>(ft, "FluidTransientSlice");
makeSCWrapper<TransientsSlice,double,float>("FluidTransientSlice",TransientParams,ft);
}

@ -1,17 +1,13 @@
// A tool from the FluCoMa project, funded by the European Research Council (ERC) under the European Unions Horizon 2020 research and innovation programme (grant agreement No 725899)
#include <clients/rt/TransientClient.hpp>
#include <SC_PlugIn.hpp>
#include <FluidSCWrapper.hpp>
static InterfaceTable *ft;
PluginLoad(FluidSTFTUGen) {
ft = inTable;
// registerUnit<fluid::wrapper::FluidTransients>(ft, "FluidTransients");
using namespace fluid::client;
makeSCWrapper<TransientClient<double,float>>(ft, "FluidTransients",TransientParams);
makeSCWrapper<TransientClient, double, float>("FluidTransients", TransientParams, ft);
}

Loading…
Cancel
Save