Example files corrections (#145)

* WIP: RT-alloc in RT clients (#129)

* Update Realtime to pass RT allocator to clients

* added link to examples folder in the guide

* Wrapper: Allocatorize Part 1

* (Buf)MFCC.sc: Handle maxNumBands

* (Buf)MFCC.sc: Handle maxNumBands (#130)

* typo

* Remove CondVar (#132)

* removed from FluidWaveform

* typo

* Wrapper: allocatorize

* Remove redundant old help files

* Wrapper: Use `fmt` insetad of `std::to_chars`

(STL function needs macOS >= 10.15)

* CMake: Set PIC globally

* ensure PIC for all libs

* Readme: Correct C++ version

Co-authored-by: Ted Moore <ted@tedmooremusic.com>

* correction in example code of the new NN interface

* fixed example: 'Neural Network Predicts FM Params from Audio Analysis'

* Feature/peaks (#143)

* working frankenstein freq only

* removed all the unused arguments

* now with mag out

* now with the buffer version

* change the interface to singular like other bufSTFT

* added logFreq and linMag

* change of interface (sortBy to order)

* last SC commit - object overview added

* corrected interface and simplified some examples

Co-authored-by: Owen Green <gungwho@gmail.com>
Co-authored-by: Ted Moore <ted@tedmooremusic.com>
nix
tremblap 3 years ago committed by GitHub
parent 401cc00339
commit b8c057fc1c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -11,7 +11,7 @@ set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>") set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
################################################################################ ################################################################################
# Paths # Paths
set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_SOURCE_DIR}/install" CACHE PATH "") set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_SOURCE_DIR}/install" CACHE PATH "")
@ -89,7 +89,7 @@ include(flucoma-buildtools)
include(flucoma-buildtype) include(flucoma-buildtype)
# endif() # endif()
option(DOCS "Generate scdocs" OFF) option(DOCS "Generate scdocs" ON)
set(FLUID_DOCS_PATH "" CACHE PATH "Optional path to flucoma-docs (needed for docs); will download if absent") set(FLUID_DOCS_PATH "" CACHE PATH "Optional path to flucoma-docs (needed for docs); will download if absent")
if(DOCS) if(DOCS)

@ -10,7 +10,7 @@ Note that on macOS you may need to [dequarantine](https://learn.flucoma.org/inst
## Pre-requisites ## Pre-requisites
* C++14 compliant compiler (clang, GCC or MSVC) * C++17 compliant compiler (clang, GCC or MSVC)
* cmake * cmake
* make (or Ninja or XCode or VisualStudio) * make (or Ninja or XCode or VisualStudio)
* git * git

@ -76,8 +76,9 @@ public:
} }
static auto& setParams(Unit* x, ParamSetType& p, static auto& setParams(Unit* x, ParamSetType& p, FloatControlsIter& inputs,
FloatControlsIter& inputs, bool constrain = false, bool initialized = true) Allocator& alloc, bool constrain = false,
bool initialized = true)
{ {
bool verbose = x->mWorld->mVerbosity > 0; bool verbose = x->mWorld->mVerbosity > 0;
@ -85,7 +86,7 @@ public:
Reportage* reportage = initialized ? &(static_cast<FluidSCWrapper*>(x)->mReportage) : new Reportage(); Reportage* reportage = initialized ? &(static_cast<FluidSCWrapper*>(x)->mReportage) : new Reportage();
p.template setParameterValuesRT<ControlSetter>(verbose ? reportage: nullptr , x, inputs); p.template setParameterValuesRT<ControlSetter>(verbose ? reportage: nullptr , x, inputs, alloc);
if (constrain) p.constrainParameterValuesRT(verbose ? reportage : nullptr); if (constrain) p.constrainParameterValuesRT(verbose ? reportage : nullptr);
if(verbose) if(verbose)
{ {

@ -46,12 +46,12 @@ public:
static constexpr auto &getParameterDescriptors() { return DataSetWrParams; } static constexpr auto &getParameterDescriptors() { return DataSetWrParams; }
DataSetWriterClient(ParamSetViewType &p) : mParams(p) {} DataSetWriterClient(ParamSetViewType &p, FluidContext&) : mParams(p) {}
template <typename T> Result process(FluidContext &) { template <typename T> Result process(FluidContext &) {
auto dataset = get<kDataSet>().get(); auto dataset = get<kDataSet>().get();
if (auto datasetPtr = dataset.lock()) { if (auto datasetPtr = dataset.lock()) {
std::string &idPrefix = get<kIDPrefix>(); std::string idPrefix = std::string(get<kIDPrefix>());
auto &idNumberArr = get<kIDNumber>(); auto &idNumberArr = get<kIDNumber>();
if (idNumberArr.size() != 2) if (idNumberArr.size() != 2)
return {Result::Status::kError, "ID number malformed"}; return {Result::Status::kError, "ID number malformed"};

@ -1,6 +1,8 @@
#pragma once #pragma once
#include "Meta.hpp" #include "Meta.hpp"
#include <data/FluidMemory.hpp>
#include <fmt/format.h>
namespace fluid { namespace fluid {
namespace client { namespace client {
@ -40,7 +42,11 @@ struct ParamReader<impl::FloatControlsIter>
using Controls = impl::FloatControlsIter; using Controls = impl::FloatControlsIter;
static auto fromArgs(Unit* /*x*/, Controls& args, std::string, int) /// todo: fix std::string to use a specialisation with RT alloc
template <typename Alloc>
static auto
fromArgs(Unit* /*x*/, Controls& args,
std::basic_string<char, std::char_traits<char>, Alloc> const&, int)
{ {
// first is string size, then chars // first is string size, then chars
index size = static_cast<index>(args.next()); index size = static_cast<index>(args.next());
@ -57,7 +63,8 @@ struct ParamReader<impl::FloatControlsIter>
using Container = typename LongArrayT::type; using Container = typename LongArrayT::type;
using Value = typename Container::type; using Value = typename Container::type;
index size = static_cast<index>(args.next()); index size = static_cast<index>(args.next());
Container res(size); /// todo: fix allocator
Container res(size, FluidDefaultAllocator());
for (index i = 0; i < size; ++i) for (index i = 0; i < size; ++i)
res[i] = static_cast<Value>(args.next()); res[i] = static_cast<Value>(args.next());
return res; return res;
@ -225,7 +232,8 @@ struct ParamReader<sc_msg_iter>
return argTypeOK(T{},tag); return argTypeOK(T{},tag);
} }
static auto fromArgs(World*, sc_msg_iter& args, std::string, int) template<typename Alloc>
static auto fromArgs(World*, sc_msg_iter& args, std::basic_string<char,std::char_traits<char>,Alloc> const&, int)
{ {
const char* recv = args.gets(""); const char* recv = args.gets("");
@ -285,7 +293,7 @@ struct ParamReader<sc_msg_iter>
using Container = typename LongArrayT::type; using Container = typename LongArrayT::type;
using Value = typename Container::type; using Value = typename Container::type;
index size = static_cast<index>(args.geti()); index size = static_cast<index>(args.geti());
Container res(size); Container res(size, FluidDefaultAllocator());
for (index i = 0; i < size; ++i) for (index i = 0; i < size; ++i)
res[i] = static_cast<Value>(args.geti()); res[i] = static_cast<Value>(args.geti());
return res; return res;
@ -325,14 +333,14 @@ struct ClientParams{
template<typename Context, typename Client = typename Wrapper::Client, size_t Number = N> template<typename Context, typename Client = typename Wrapper::Client, size_t Number = N>
std::enable_if_t<!impl::IsNamedShared_v<Client> || Number!=0, typename T::type> std::enable_if_t<!impl::IsNamedShared_v<Client> || Number!=0, typename T::type>
operator()(Context* x, ArgType& args) operator()(Context* x, ArgType& args, Allocator& alloc)
{ {
// Just return default if there's nothing left to grab // Just return default if there's nothing left to grab
if (args.remain() == 0) if (args.remain() == 0)
{ {
std::cout << "WARNING: " << Wrapper::getName() std::cout << "WARNING: " << Wrapper::getName()
<< " received fewer parameters than expected\n"; << " received fewer parameters than expected\n";
return Wrapper::Client::getParameterDescriptors().template makeValue<N>(); return Wrapper::Client::getParameterDescriptors().template makeValue<N>(alloc);
} }
ParamLiteralConvertor<T, argSize> a; ParamLiteralConvertor<T, argSize> a;
@ -348,18 +356,25 @@ struct ClientParams{
template<typename Context, typename Client = typename Wrapper::Client, size_t Number = N> template<typename Context, typename Client = typename Wrapper::Client, size_t Number = N>
std::enable_if_t<impl::IsNamedShared_v<Client> && Number==0, typename T::type> std::enable_if_t<impl::IsNamedShared_v<Client> && Number==0, typename T::type>
operator()(Context* x, ArgType& args) operator()(Context* x, ArgType& args, Allocator& alloc)
{ {
// Just return default if there's nothing left to grab // Just return default if there's nothing left to grab
if (args.remain() == 0) if (args.remain() == 0)
{ {
std::cout << "WARNING: " << Wrapper::getName() std::cout << "WARNING: " << Wrapper::getName()
<< " received fewer parameters than expected\n"; << " received fewer parameters than expected\n";
return Wrapper::Client::getParameterDescriptors().template makeValue<N>(); return Wrapper::Client::getParameterDescriptors().template makeValue<N>(alloc);
} }
index id = ParamReader<ArgType>::fromArgs(x,args,index{},0); index id = ParamReader<ArgType>::fromArgs(x,args,index{},0);
return std::to_string(id); using StdAlloc = foonathan::memory::std_allocator<char, Allocator>;
using fmt_memory_buffer =
fmt::basic_memory_buffer<char, fmt::inline_buffer_size, StdAlloc>;
auto buf = fmt_memory_buffer(alloc);
std::string_view fmt_string("{}");
fmt::vformat_to(std::back_inserter(buf), fmt_string,
fmt::make_format_args(id));
return rt::string(buf.data(), buf.size(), alloc);
} }
}; };

@ -1,5 +1,7 @@
#pragma once #pragma once
#include <data/FluidMemory.hpp>
namespace fluid { namespace fluid {
namespace client { namespace client {
@ -15,12 +17,12 @@ namespace client {
return 1; return 1;
} }
static index allocSize(std::string s) static index allocSize(std::string const& s)
{ {
return asSigned(s.size()) + 1; return asSigned(s.size()) + 1;
} // put null char at end when we send } // put null char at end when we send
static index allocSize(FluidTensor<std::string, 1> s) static index allocSize(FluidTensor<std::string, 1> const& s)
{ {
index count = 0; index count = 0;
for (auto& str : s) count += (str.size() + 1); for (auto& str : s) count += (str.size() + 1);
@ -28,7 +30,7 @@ namespace client {
} }
template <typename T> template <typename T>
static index allocSize(FluidTensor<T, 1> s) static index allocSize(FluidTensor<T, 1> const& s)
{ {
return s.size(); return s.size();
} }
@ -68,12 +70,12 @@ namespace client {
f[0] = static_cast<float>(x); f[0] = static_cast<float>(x);
} }
static void convert(float* f, std::string s) static void convert(float* f, std::string const& s)
{ {
std::copy(s.begin(), s.end(), f); std::copy(s.begin(), s.end(), f);
f[s.size()] = 0; // terminate f[s.size()] = 0; // terminate
} }
static void convert(float* f, FluidTensor<std::string, 1> s) static void convert(float* f, FluidTensor<std::string, 1> const& s)
{ {
for (auto& str : s) for (auto& str : s)
{ {
@ -83,7 +85,7 @@ namespace client {
} }
} }
template <typename T> template <typename T>
static void convert(float* f, FluidTensor<T, 1> s) static void convert(float* f, FluidTensor<T, 1> const& s)
{ {
static_assert(std::is_convertible<T, float>::value, static_assert(std::is_convertible<T, float>::value,
"Can't convert this to float output"); "Can't convert this to float output");
@ -114,19 +116,24 @@ namespace client {
return 1; return 1;
} }
static index numTags(std::string) static index numTags(rt::string const&)
{
return 1;;
}
static index numTags(std::string const&)
{ {
return 1;; return 1;;
} }
template <typename T> template <typename T>
static index numTags(FluidTensor<T, 1> s) static index numTags(FluidTensor<T, 1> const& s)
{ {
return s.size(); return s.size();
} }
template <typename... Ts> template <typename... Ts>
static index numTags(std::tuple<Ts...>&& t) static index numTags(std::tuple<Ts...> const& t)
{ {
index count = 0; index count = 0;
ForEach(t,[&count](auto& x){ count += numTags(x);}); ForEach(t,[&count](auto& x){ count += numTags(x);});
@ -143,10 +150,11 @@ namespace client {
static std::enable_if_t<std::is_floating_point<std::decay_t<T>>::value> static std::enable_if_t<std::is_floating_point<std::decay_t<T>>::value>
getTag(Packet& p, T&&) { p.addtag('f'); } getTag(Packet& p, T&&) { p.addtag('f'); }
static void getTag (Packet& p, std::string) { p.addtag('s'); } static void getTag (Packet& p, std::string const&) { p.addtag('s'); }
static void getTag (Packet& p, rt::string const&) { p.addtag('s'); }
template <typename T> template <typename T>
static void getTag(Packet& p, FluidTensor<T, 1> x) static void getTag(Packet& p, FluidTensor<T, 1> const& x)
{ {
T dummy{}; T dummy{};
for (int i = 0; i < x.rows(); i++) for (int i = 0; i < x.rows(); i++)
@ -154,7 +162,7 @@ namespace client {
} }
template <typename... Ts> template <typename... Ts>
static void getTag(Packet& p, std::tuple<Ts...>&& t) static void getTag(Packet& p, std::tuple<Ts...> const& t)
{ {
ForEach(t,[&p](auto& x){getTag(p,x);}); ForEach(t,[&p](auto& x){getTag(p,x);});
} }
@ -179,19 +187,24 @@ namespace client {
p.addf(static_cast<float>(x)); p.addf(static_cast<float>(x));
} }
static void convert(Packet& p, std::string s) static void convert(Packet& p, std::string const& s)
{
p.adds(s.c_str());
}
static void convert(Packet& p, rt::string const& s)
{ {
p.adds(s.c_str()); p.adds(s.c_str());
} }
template <typename T> template <typename T>
static void convert(Packet& p, FluidTensor<T, 1> s) static void convert(Packet& p, FluidTensor<T, 1> const& s)
{ {
for(auto& x: s) convert(p,x); for(auto& x: s) convert(p,x);
} }
template <typename... Ts> template <typename... Ts>
static void convert(Packet& p, std::tuple<Ts...>&& t) static void convert(Packet& p, std::tuple<Ts...> const& t)
{ {
ForEach(t,[&p](auto& x){ convert(p,x);}); ForEach(t,[&p](auto& x){ convert(p,x);});
} }

@ -154,7 +154,7 @@ public:
static void refreshParams(Params& p, MessageResult<ParamValues>& r) static void refreshParams(Params& p, MessageResult<ParamValues>& r)
{ {
p.fromTuple(ParamValues(r)); p.fromTuple(r.value());
} }
template<typename T> template<typename T>
@ -253,7 +253,7 @@ public:
template <typename T> // call from RT template <typename T> // call from RT
static void messageOutput(const std::string& s, index id, MessageResult<T>& result, void* replyAddr) static void messageOutput(const std::string& s, index id, MessageResult<T>& result, void* replyAddr)
{ {
index numTags = ToOSCTypes<small_scpacket>::numTags(static_cast<T>(result)); index numTags = ToOSCTypes<small_scpacket>::numTags(result.value());
if(numTags > 2048) if(numTags > 2048)
{ {
std::cout << "ERROR: Message response too big to send (" << asUnsigned(numTags) * sizeof(float) << " bytes)." << std::endl; std::cout << "ERROR: Message response too big to send (" << asUnsigned(numTags) * sizeof(float) << " bytes)." << std::endl;
@ -290,9 +290,7 @@ public:
template <typename... Ts> template <typename... Ts>
static void messageOutput(const std::string& s, index id, MessageResult<std::tuple<Ts...>>& result, void* replyAddr) static void messageOutput(const std::string& s, index id, MessageResult<std::tuple<Ts...>>& result, void* replyAddr)
{ {
using T = std::tuple<Ts...>; index numTags = ToOSCTypes<small_scpacket>::numTags(result.value());
index numTags = ToOSCTypes<small_scpacket>::numTags(static_cast<T>(result));
if(numTags > 2048) if(numTags > 2048)
{ {
std::cout << "ERROR: Message response too big to send (" << asUnsigned(numTags) * sizeof(float) << " bytes)." << std::endl; std::cout << "ERROR: Message response too big to send (" << asUnsigned(numTags) * sizeof(float) << " bytes)." << std::endl;
@ -304,10 +302,10 @@ public:
packet.maketags(static_cast<int>(numTags + 2)); packet.maketags(static_cast<int>(numTags + 2));
packet.addtag(','); packet.addtag(',');
packet.addtag('i'); packet.addtag('i');
ToOSCTypes<small_scpacket>::getTag(packet,static_cast<T>(result)); ToOSCTypes<small_scpacket>::getTag(packet,result.value());
packet.addi(static_cast<int>(id)); packet.addi(static_cast<int>(id));
ToOSCTypes<small_scpacket>::convert(packet, static_cast<T>(result)); ToOSCTypes<small_scpacket>::convert(packet, result.value());
if(replyAddr) if(replyAddr)
SendReply(replyAddr,packet.data(),static_cast<int>(packet.size())); SendReply(replyAddr,packet.data(),static_cast<int>(packet.size()));

@ -42,7 +42,8 @@ private:
/// Instance cache /// Instance cache
struct CacheEntry struct CacheEntry
{ {
CacheEntry(const Params& p) : mParams{p}, mClient{mParams} {} CacheEntry(const Params& p, FluidContext c)
: mParams{p}, mClient{mParams, c} {}
Params mParams; Params mParams;
Client mClient; Client mClient;
@ -151,11 +152,11 @@ public:
return lookup == mCache.end() ? WeakCacheEntryPointer() : lookup->second; return lookup == mCache.end() ? WeakCacheEntryPointer() : lookup->second;
} }
static WeakCacheEntryPointer add(World* world, index id, const Params& params) static WeakCacheEntryPointer add(World* world, index id, const Params& params, FluidContext context)
{ {
if (isNull(get(id))) if (isNull(get(id)))
{ {
auto result = mCache.emplace(id, std::make_shared<CacheEntry>(params)); auto result = mCache.emplace(id, std::make_shared<CacheEntry>(params, context));
addToRTCache{}(world, *(result.first)); addToRTCache{}(world, *(result.first));
@ -199,8 +200,10 @@ private:
struct NRTCommand struct NRTCommand
{ {
NRTCommand(World*, sc_msg_iter* args, void* replyAddr, NRTCommand(World* world, sc_msg_iter* args, void* replyAddr,
bool consumeID = true) bool consumeID = true)
: mSCAlloc{world, Wrapper::getInterfaceTable()},
mAlloc{foonathan::memory::make_allocator_reference(mSCAlloc)}
{ {
auto count = args->count; auto count = args->count;
auto pos = args->rdpos; auto pos = args->rdpos;
@ -221,9 +224,11 @@ private:
if (mReplyAddress) deleteReplyAddress(mReplyAddress); if (mReplyAddress) deleteReplyAddress(mReplyAddress);
} }
NRTCommand() {} // NRTCommand() {}
explicit NRTCommand(index id) : mID{id} {} explicit NRTCommand(World* world, index id)
: mSCAlloc{world, Wrapper::getInterfaceTable()},
mAlloc{foonathan::memory::make_allocator_reference(mSCAlloc)}, mID{id} {}
bool stage2(World*) { return true; } // nrt bool stage2(World*) { return true; } // nrt
bool stage3(World*) { return true; } // rt bool stage3(World*) { return true; } // rt
@ -248,7 +253,15 @@ private:
static_cast<int>(packet.size())); static_cast<int>(packet.size()));
} }
} }
Allocator& allocator()
{
return mAlloc;
}
// protected: // protected:
SCRawAllocator mSCAlloc;
Allocator mAlloc;
index mID; index mID;
void* mReplyAddress{nullptr}; void* mReplyAddress{nullptr};
}; };
@ -257,16 +270,18 @@ private:
{ {
CommandNew(World* world, sc_msg_iter* args, void* replyAddr) CommandNew(World* world, sc_msg_iter* args, void* replyAddr)
: NRTCommand{world, args, replyAddr, !IsNamedShared_v<Client>}, : NRTCommand{world, args, replyAddr, !IsNamedShared_v<Client>},
mParams{Client::getParameterDescriptors()} mParams{Client::getParameterDescriptors(), NRTCommand::allocator()}
{ {
mParams.template setParameterValuesRT<ParamsFromOSC>(nullptr, world, mParams.template setParameterValuesRT<ParamsFromOSC>(nullptr, world,
*args); *args, NRTCommand::allocator());
} }
CommandNew(index id, World*, FloatControlsIter& args, Unit* x) CommandNew(index id, World* world, FloatControlsIter& args, Unit* x)
: NRTCommand{id}, mParams{Client::getParameterDescriptors()} : NRTCommand{world, id}, mParams{Client::getParameterDescriptors(),
NRTCommand::allocator()}
{ {
mParams.template setParameterValuesRT<ParamsFromSynth>(nullptr, x, args); mParams.template setParameterValuesRT<ParamsFromSynth>(
nullptr, x, args, NRTCommand::allocator());
} }
static const char* name() static const char* name()
@ -281,7 +296,7 @@ private:
if (!constraintsRes.ok()) Wrapper::printResult(w, constraintsRes); if (!constraintsRes.ok()) Wrapper::printResult(w, constraintsRes);
mResult = (!isNull(add(w, NRTCommand::mID, mParams))); mResult = (!isNull(add(w, NRTCommand::mID, mParams, FluidContext())));
// Sigh. The cache entry above has both the client instance and main // Sigh. The cache entry above has both the client instance and main
// params instance. // params instance.
@ -343,21 +358,23 @@ private:
{ {
CommandProcess(World* world, sc_msg_iter* args, void* replyAddr) CommandProcess(World* world, sc_msg_iter* args, void* replyAddr)
: NRTCommand{world, args, replyAddr}, : NRTCommand{world, args, replyAddr},
mParams{Client::getParameterDescriptors()} mParams{Client::getParameterDescriptors(),NRTCommand::allocator()}
{ {
auto& ar = *args; auto& ar = *args;
if (auto ptr = get(NRTCommand::mID).lock()) if (auto ptr = get(NRTCommand::mID).lock())
{ {
ptr->mDone.store(false, std::memory_order_release); ptr->mDone.store(false, std::memory_order_release);
mParams.template setParameterValuesRT<ParamsFromOSC>(nullptr, world, mParams.template setParameterValuesRT<ParamsFromOSC>(nullptr, world,
ar); ar, NRTCommand::allocator());
mSynchronous = static_cast<bool>(ar.geti()); mSynchronous = static_cast<bool>(ar.geti());
} // if this fails, we'll hear about it in stage2 anyway } // if this fails, we'll hear about it in stage2 anyway
} }
explicit CommandProcess(index id, bool synchronous, Params* params) explicit CommandProcess(World* world, index id, bool synchronous,
: NRTCommand{id}, Params* params)
mSynchronous(synchronous), mParams{Client::getParameterDescriptors()} : NRTCommand{world, id},
mSynchronous(synchronous), mParams{Client::getParameterDescriptors(),
NRTCommand::allocator()}
{ {
if (params) if (params)
{ {
@ -473,7 +490,8 @@ private:
/// Not registered as a PlugInCmd. Triggered by worker thread callback /// Not registered as a PlugInCmd. Triggered by worker thread callback
struct CommandAsyncComplete : public NRTCommand struct CommandAsyncComplete : public NRTCommand
{ {
CommandAsyncComplete(World*, index id, void* replyAddress) CommandAsyncComplete(World* world, index id, void* replyAddress)
: NRTCommand(world, id)
{ {
NRTCommand::mID = id; NRTCommand::mID = id;
NRTCommand::mReplyAddress = replyAddress; NRTCommand::mReplyAddress = replyAddress;
@ -612,7 +630,9 @@ private:
struct CommandProcessNew : public NRTCommand struct CommandProcessNew : public NRTCommand
{ {
CommandProcessNew(World* world, sc_msg_iter* args, void* replyAddr) CommandProcessNew(World* world, sc_msg_iter* args, void* replyAddr)
: mNew{world, args, replyAddr}, mProcess{mNew.mID, false, nullptr} : NRTCommand{world, args, replyAddr, false},
mNew{world, args, replyAddr},
mProcess{world, mNew.mID, false, nullptr}
{ {
mProcess.mSynchronous = args->geti(); mProcess.mSynchronous = args->geti();
mProcess.mReplyAddress = mNew.mReplyAddress; mProcess.mReplyAddress = mNew.mReplyAddress;
@ -695,8 +715,8 @@ private:
auto& ar = *args; auto& ar = *args;
if (auto ptr = get(NRTCommand::mID).lock()) if (auto ptr = get(NRTCommand::mID).lock())
{ {
ptr->mParams.template setParameterValuesRT<ParamsFromOSC>(nullptr, ptr->mParams.template setParameterValuesRT<ParamsFromOSC>(
world, ar); nullptr, world, ar, NRTCommand::allocator());
Result result = validateParameters(ptr->mParams); Result result = validateParameters(ptr->mParams);
ptr->mClient.setParams(ptr->mParams); ptr->mClient.setParams(ptr->mParams);
} }
@ -726,7 +746,8 @@ private:
if (auto ptr = get(NRTCommand::mID).lock()) if (auto ptr = get(NRTCommand::mID).lock())
{ {
ptr->mParams.template setParameterValues<ParamsFromOSC>(true, world, mArgs); ptr->mParams.template setParameterValues<ParamsFromOSC>(
true, world, mArgs, FluidDefaultAllocator());
Result result = validateParameters(ptr->mParams); Result result = validateParameters(ptr->mParams);
ptr->mClient.setParams(ptr->mParams); ptr->mClient.setParams(ptr->mParams);
} }
@ -810,7 +831,6 @@ private:
template <typename Command> template <typename Command>
static void defineNRTCommand() static void defineNRTCommand()
{ {
auto ft = getInterfaceTable();
auto commandRunner = [](World* world, void*, struct sc_msg_iter* args, auto commandRunner = [](World* world, void*, struct sc_msg_iter* args,
void* replyAddr) { void* replyAddr) {
auto ft = getInterfaceTable(); auto ft = getInterfaceTable();
@ -918,7 +938,9 @@ private:
NRTTriggerUnit() NRTTriggerUnit()
: mControlsIterator{mInBuf + ControlOffset(), ControlSize()}, : mControlsIterator{mInBuf + ControlOffset(), ControlSize()},
mParams{Client::getParameterDescriptors()} mSCAlloc(mWorld, Wrapper::getInterfaceTable()),
mAlloc{foonathan::memory::make_allocator_reference(mSCAlloc)},
mParams{Client::getParameterDescriptors(), mAlloc}
{ {
mID = static_cast<index>(mInBuf[0][0]); mID = static_cast<index>(mInBuf[0][0]);
if (mID == -1) mID = count(); if (mID == -1) mID = count();
@ -936,7 +958,7 @@ private:
~NRTTriggerUnit() ~NRTTriggerUnit()
{ {
set_calc_function<NRTTriggerUnit, &NRTTriggerUnit::clear>(); set_calc_function<NRTTriggerUnit, &NRTTriggerUnit::clear>();
auto cmd = NonRealTime::rtalloc<CommandFree>(mWorld, mID); auto cmd = NonRealTime::rtalloc<CommandFree>(mWorld, mWorld, mID);
if (runAsyncCommand(mWorld, cmd, nullptr, 0, nullptr) != 0) if (runAsyncCommand(mWorld, cmd, nullptr, 0, nullptr) != 0)
{ {
std::cout << "ERROR: Async command failed in ~NRTTriggerUnit()" std::cout << "ERROR: Async command failed in ~NRTTriggerUnit()"
@ -963,12 +985,12 @@ private:
if (trigger) if (trigger)
{ {
mControlsIterator.reset(1 + mInBuf); // add one for ID mControlsIterator.reset(1 + mInBuf); // add one for ID
Wrapper::setParams(this, mParams, mControlsIterator, true, false); Wrapper::setParams(this, mParams, mControlsIterator, mAlloc, true, false);
bool blocking = mInBuf[mNumInputs - 1][0] > 0; bool blocking = mInBuf[mNumInputs - 1][0] > 0;
CommandProcess* cmd = CommandProcess* cmd =
rtalloc<CommandProcess>(mWorld, mID, blocking, &mParams); rtalloc<CommandProcess>(mWorld, mWorld, mID, blocking, &mParams);
if (runAsyncCommand(mWorld, cmd, nullptr, 0, nullptr) != 0) if (runAsyncCommand(mWorld, cmd, nullptr, 0, nullptr) != 0)
{ {
std::cout << "ERROR: Async command failed in NRTTriggerUnit::next()" std::cout << "ERROR: Async command failed in NRTTriggerUnit::next()"
@ -1000,6 +1022,8 @@ private:
index mID; index mID;
index mRunCount{0}; index mRunCount{0};
WeakCacheEntryPointer mInst; WeakCacheEntryPointer mInst;
SCRawAllocator mSCAlloc;
Allocator mAlloc;
Params mParams; Params mParams;
bool mInit{false}; bool mInit{false};
}; };

@ -1,6 +1,8 @@
#pragma once #pragma once
#include <data/FluidMemory.hpp>
#include <SC_PlugIn.hpp> #include <SC_PlugIn.hpp>
#include <Eigen/Core>
namespace fluid { namespace fluid {
namespace client { namespace client {
@ -81,7 +83,8 @@ struct RealTimeBase
return countScan; return countScan;
} }
void init(SCUnit& unit, Client& client, FloatControlsIter& controls)
void init(SCUnit& unit, Client& client, FloatControlsIter& controls, Allocator& alloc)
{ {
assert(!(client.audioChannelsOut() > 0 && assert(!(client.audioChannelsOut() > 0 &&
client.controlChannelsOut().count > 0) && client.controlChannelsOut().count > 0) &&
@ -89,7 +92,7 @@ struct RealTimeBase
client.sampleRate(unit.fullSampleRate()); client.sampleRate(unit.fullSampleRate());
mInputConnections.reserve(asUnsigned(client.audioChannelsIn())); mInputConnections.reserve(asUnsigned(client.audioChannelsIn()));
mOutputConnections.reserve(asUnsigned(client.audioChannelsOut())); mOutputConnections.reserve(asUnsigned(client.audioChannelsOut()));
mContext = FluidContext(unit.fullBufferSize(), alloc);
Result r; Result r;
if (!(r = expectedSize(controls)).ok()) if (!(r = expectedSize(controls)).ok())
{ {
@ -195,16 +198,19 @@ struct RealTimeBase
} }
void next(SCUnit& unit, Client& client, Params& params, void next(SCUnit& unit, Client& client, Params& params,
FloatControlsIter& controls, bool updateParams = true) FloatControlsIter& controls, Allocator& alloc,
bool updateParams = true)
{ {
bool trig = bool trig =
IsModel_t<Client>::value ? !mPrevTrig && unit.in0(0) > 0 : false; IsModel_t<Client>::value ? !mPrevTrig && unit.in0(0) > 0 : false;
mPrevTrig = trig; mPrevTrig = trig;
#ifdef EIGEN_RUNTIME_NO_MALLOC
Eigen::internal::set_is_malloc_allowed(false);
#endif
if (updateParams) if (updateParams)
{ {
Wrapper::setParams(&unit, params, controls); Wrapper::setParams(&unit, params, controls, alloc);
params.constrainParameterValuesRT(nullptr); params.constrainParameterValuesRT(nullptr);
} }
@ -212,6 +218,9 @@ struct RealTimeBase
(this->*mOutMapperPre)(unit, client); (this->*mOutMapperPre)(unit, client);
client.process(mAudioInputs, mOutputs, mContext); client.process(mAudioInputs, mOutputs, mContext);
(this->*mOutMapperPost)(unit, client); (this->*mOutMapperPost)(unit, client);
#ifdef EIGEN_RUNTIME_NO_MALLOC
Eigen::internal::set_is_malloc_allowed(true); //not really
#endif
} }
private: private:
@ -221,11 +230,11 @@ private:
std::vector<HostVector> mOutputs; std::vector<HostVector> mOutputs;
FluidTensor<float, 1> mControlInputBuffer; FluidTensor<float, 1> mControlInputBuffer;
FluidTensor<float, 1> mControlOutputBuffer; FluidTensor<float, 1> mControlOutputBuffer;
FluidContext mContext;
bool mPrevTrig; bool mPrevTrig;
IOMapFn mInputMapper; IOMapFn mInputMapper;
IOMapFn mOutMapperPre; IOMapFn mOutMapperPre;
IOMapFn mOutMapperPost; IOMapFn mOutMapperPost;
FluidContext mContext;
}; };
} // namespace impl } // namespace impl
} // namespace client } // namespace client

@ -3,6 +3,7 @@
#include "ArgsFromClient.hpp" #include "ArgsFromClient.hpp"
#include "Meta.hpp" #include "Meta.hpp"
#include "RealTimeBase.hpp" #include "RealTimeBase.hpp"
#include "SCWorldAllocator.hpp"
#include <clients/common/FluidBaseClient.hpp> #include <clients/common/FluidBaseClient.hpp>
#include <SC_PlugIn.hpp> #include <SC_PlugIn.hpp>
@ -53,73 +54,38 @@ public:
} }
RealTime() RealTime()
: mControls{mInBuf + ControlOffset(this),ControlSize(this)}, :
mClient{Wrapper::setParams(this, mParams, mControls,true)} mSCAlloc{mWorld, Wrapper::getInterfaceTable()},
mAlloc{foonathan::memory::make_allocator_reference(mSCAlloc)},
mContext{fullBufferSize(), mAlloc},
mControls{mInBuf + ControlOffset(this),ControlSize(this)},
mParams{Client::getParameterDescriptors(), mAlloc},
mClient{Wrapper::setParams(this, mParams, mControls, mAlloc,true), mContext}
{ {
init(); init();
} }
void init() void init()
{ {
// auto& client = mClient; mDelegate.init(*this,mClient,mControls,mAlloc);
mDelegate.init(*this,mClient,mControls);
mCalcFunc = make_calc_function<RealTime, &RealTime::next>(); mCalcFunc = make_calc_function<RealTime, &RealTime::next>();
Wrapper::getInterfaceTable()->fClearUnitOutputs(this, 1); Wrapper::getInterfaceTable()->fClearUnitOutputs(this, 1);
// assert(
// !(client.audioChannelsOut() > 0 && client.controlChannelsOut() > 0) &&
// "Client can't have both audio and control outputs");
//
// Result r;
// if(!(r = expectedSize(mWrapper->mControlsIterator)).ok())
// {
// mCalcFunc = Wrapper::getInterfaceTable()->fClearUnitOutputs;
// std::cout
// << "ERROR: " << Wrapper::getName()
// << " wrong number of arguments."
// << r.message()
// << std::endl;
// return;
// }
//
// mWrapper->mControlsIterator.reset(mInBuf + mSpecialIndex + 1);
//
// client.sampleRate(fullSampleRate());
// mInputConnections.reserve(asUnsigned(client.audioChannelsIn()));
// mOutputConnections.reserve(asUnsigned(client.audioChannelsOut()));
// mAudioInputs.reserve(asUnsigned(client.audioChannelsIn()));
// mOutputs.reserve(asUnsigned(
// std::max(client.audioChannelsOut(), client.controlChannelsOut())));
//
// for (index i = 0; i < client.audioChannelsIn(); ++i)
// {
// mInputConnections.emplace_back(isAudioRateIn(static_cast<int>(i)));
// mAudioInputs.emplace_back(nullptr, 0, 0);
// }
//
// for (index i = 0; i < client.audioChannelsOut(); ++i)
// {
// mOutputConnections.emplace_back(true);
// mOutputs.emplace_back(nullptr, 0, 0);
// }
//
// for (index i = 0; i < client.controlChannelsOut(); ++i)
// { mOutputs.emplace_back(nullptr, 0, 0); }
//
// mCalcFunc = make_calc_function<RealTime, &RealTime::next>();
// Wrapper::getInterfaceTable()->fClearUnitOutputs(this, 1);
} }
void next(int) void next(int)
{ {
mControls.reset(mInBuf + ControlOffset(this)); mControls.reset(mInBuf + ControlOffset(this));
mDelegate.next(*this,mClient,mParams,mControls); mDelegate.next(*this,mClient,mParams,mControls, mAlloc);
} }
private: private:
SCRawAllocator mSCAlloc;
Allocator mAlloc;
FluidContext mContext;
Delegate mDelegate; Delegate mDelegate;
FloatControlsIter mControls; FloatControlsIter mControls;
Params mParams{Client::getParameterDescriptors()}; Params mParams;
Client mClient; Client mClient;
Wrapper* mWrapper{static_cast<Wrapper*>(this)}; Wrapper* mWrapper{static_cast<Wrapper*>(this)};
}; };

@ -14,6 +14,7 @@
#include <limits> #include <limits>
#include <new> #include <new>
namespace fluid { namespace fluid {
template <typename T, typename Wrapper> template <typename T, typename Wrapper>
@ -57,4 +58,34 @@ public:
if (mWorld && mInterface) mInterface->fRTFree(mWorld, p); if (mWorld && mInterface) mInterface->fRTFree(mWorld, p);
} }
}; };
//foonathan::memory RawAllocator with SC rtalloc
struct SCRawAllocator
{
using is_stateful = std::true_type;
SCRawAllocator(World* w, InterfaceTable* interface)
: mWorld{w}, mInterface{interface}
{}
void* allocate_node(std::size_t size, std::size_t)
{
if(auto res = mInterface->fRTAlloc(mWorld,size))
{
// std::cout << "Allocated " << res << " with " << size << '\n';
return res;
}
throw std::bad_alloc();
}
void deallocate_node(void* node, std::size_t /*size*/, std::size_t) noexcept
{
mInterface->fRTFree(mWorld, node);
// std::cout << "Freed " << node << " with " << size << '\n';
}
private:
World* mWorld;
InterfaceTable* mInterface;
};
} // namespace fluid } // namespace fluid

@ -0,0 +1,51 @@
FluidBufSineFeature : FluidBufProcessor {
*kr { |source, startFrame = 0, numFrames = -1, startChan = 0, numChans = -1, frequency = -1, magnitude = -1, numPeaks = 10, detectionThreshold = -96, order = 0, freqUnit = 0, magUnit = 0, windowSize = 1024, hopSize = -1, fftSize = -1, padding = 1, trig = 1, blocking = 0|
var maxFFTSize = if (fftSize == -1) {windowSize.nextPowerOfTwo} {fftSize};
source = source.asUGenInput;
frequency = frequency !? {frequency.asUGenInput} ?? {-1};
magnitude = magnitude !? {magnitude.asUGenInput} ?? {-1};
source.isNil.if {"FluidBufSineFeature: Invalid source buffer".throw};
^FluidProxyUgen.multiNew(\FluidBufSineFeatureTrigger, -1, source, startFrame, numFrames, startChan, numChans, frequency, magnitude, padding, numPeaks, numPeaks, detectionThreshold, order, freqUnit, magUnit, windowSize, hopSize, fftSize, maxFFTSize, trig, blocking);
}
*process { |server, source, startFrame = 0, numFrames = -1, startChan = 0, numChans = -1, frequency = -1, magnitude = -1, numPeaks = 10, detectionThreshold = -96, order = 0, freqUnit = 0, magUnit = 0, windowSize = 1024, hopSize = -1, fftSize = -1, padding = 1, freeWhenDone = true, action|
var maxFFTSize = if (fftSize == -1) {windowSize.nextPowerOfTwo} {fftSize};
source = source.asUGenInput;
frequency = frequency !? {frequency.asUGenInput} ?? {-1};
magnitude = magnitude !? {magnitude.asUGenInput} ?? {-1};
source.isNil.if {"FluidBufSineFeature: Invalid source buffer".throw};
^this.new(
server, nil, [frequency, magnitude].select{|x| x!= -1}
).processList(
[source, startFrame, numFrames, startChan, numChans, frequency, magnitude, padding, numPeaks, numPeaks, detectionThreshold, order, freqUnit, magUnit, windowSize, hopSize, fftSize,maxFFTSize,0],freeWhenDone,action
);
}
*processBlocking { |server, source, startFrame = 0, numFrames = -1, startChan = 0, numChans = -1, frequency = -1, magnitude = -1, numPeaks = 10, detectionThreshold = -96, order = 0, freqUnit = 0, magUnit = 0, windowSize = 1024, hopSize = -1, fftSize = -1, padding = 1, freeWhenDone = true, action|
var maxFFTSize = if (fftSize == -1) {windowSize.nextPowerOfTwo} {fftSize};
source = source.asUGenInput;
frequency = frequency !? {frequency.asUGenInput} ?? {-1};
magnitude = magnitude !? {magnitude.asUGenInput} ?? {-1};
source.isNil.if {"FluidBufSineFeature: Invalid source buffer".throw};
^this.new(
server, nil, [frequency, magnitude].select{|x| x!= -1}
).processList(
[source, startFrame, numFrames, startChan, numChans, frequency, magnitude, padding, numPeaks, numPeaks, detectionThreshold, order, freqUnit, magUnit, windowSize, hopSize, fftSize,maxFFTSize,1],freeWhenDone,action
);
}
}
FluidBufSineFeatureTrigger : FluidProxyUgen {}

@ -0,0 +1,37 @@
FluidSineFeature : FluidRTMultiOutUGen {
*kr { arg in = 0, numPeaks = 10, detectionThreshold = -96, order = 0, freqUnit = 0, magUnit = 0, windowSize= 1024, hopSize= -1, fftSize= -1, maxFFTSize = -1, maxNumPeaks = nil;
maxNumPeaks = maxNumPeaks ? numPeaks;
^this.multiNew('control', in.asAudioRateInput(this), numPeaks, maxNumPeaks, detectionThreshold, order, freqUnit, magUnit, windowSize, hopSize, fftSize, maxFFTSize)
}
init { arg ... theInputs;
inputs = theInputs;
^this.initOutputs(inputs.at(2),rate);//this instantiate the number of output from the maxNumPeaks in the multiNew order
}
checkInputs {
if(inputs.at(8).rate != 'scalar') {
^(": maxNumPeaks cannot be modulated.");
};
if(inputs.at(7).rate != 'scalar') {
^(": maxFFTSize cannot be modulated.");
};
^this.checkValidInputs;
}
initOutputs{|numChans,rate|
if(numChans.isNil or: {numChans < 1})
{
Error("No input channels").throw
};
channels = Array.fill(numChans * 2, { |i|
OutputProxy('control',this,i);
});
^channels
}
numOutputs { ^(channels.size); }
}

@ -9,7 +9,7 @@
( (
// ============= 1. LOAD SOME FILES TO BE THE SOURCE MATERIAL =================== // ============= 1. LOAD SOME FILES TO BE THE SOURCE MATERIAL ===================
// put your own folder path here! it's best if they're all mono for now. // put your own folder path here! it's best if they're all mono for now.
~source_files_folder = "/Users/macprocomputer/Desktop/sccm/files_fabrizio_01/src_files/"; ~source_files_folder = FluidFilesPath();
~loader = FluidLoadFolder(~source_files_folder); // this is a nice helper class that will load a bunch of files from a folder. ~loader = FluidLoadFolder(~source_files_folder); // this is a nice helper class that will load a bunch of files from a folder.
~loader.play(s,{ // .play will cause it to *actually* do the loading ~loader.play(s,{ // .play will cause it to *actually* do the loading
@ -62,7 +62,7 @@ FluidBufOnsetSlice.process(s,~source_buf,indices:~source_indices_buf,metric:9,th
"analyzing slice: % / %".format(slice_index + 1,slices_array.size - 1).postln; "analyzing slice: % / %".format(slice_index + 1,slices_array.size - 1).postln;
// mfcc analysis, hop over that 0th coefficient because it relates to loudness and here we want to focus on timbre // mfcc analysis, hop over that 0th coefficient because it relates to loudness and here we want to focus on timbre
FluidBufMFCC.process(s,audio_buffer,start_frame,num_frames,features:features_buf,startCoeff:1,numCoeffs:~nmfccs).wait; FluidBufMFCC.process(s,audio_buffer,start_frame,num_frames,features:features_buf,startCoeff:1,numCoeffs:~nmfccs, numChans: 1).wait;
// get a statistical summary of the MFCC analysis for this slice // get a statistical summary of the MFCC analysis for this slice
FluidBufStats.process(s,features_buf,stats:stats_buf).wait; FluidBufStats.process(s,features_buf,stats:stats_buf).wait;
@ -205,7 +205,7 @@ Routine{
var end_frame = Index.kr(~source_indices_buf,index+1); // same for the end frame var end_frame = Index.kr(~source_indices_buf,index+1); // same for the end frame
var num_frames = end_frame - start_frame; var num_frames = end_frame - start_frame;
var dur_secs = min(num_frames / SampleRate.ir(~source_buf),src_dur); var dur_secs = min(num_frames / SampleRate.ir(~source_buf),src_dur);
var sig = PlayBuf.ar(1,~source_buf,BufRateScale.ir(~source_buf),0,start_frame,0,2); var sig = PlayBuf.ar(~loader.buffer.numChannels,~source_buf,BufRateScale.ir(~source_buf),0,start_frame,0,2);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:2); var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:2);
// sig = sig * env; // include this env if you like, but keep the line above because it will free the synth after the slice! // sig = sig * env; // include this env if you like, but keep the line above because it will free the synth after the slice!
sig.dup; sig.dup;
@ -219,7 +219,10 @@ Routine{
// is is very similar to step 8 above, but now instead of playing the slice of // is is very similar to step 8 above, but now instead of playing the slice of
// the drum loop, it get's the analysis of the drum loop's slice into "query_buf", // the drum loop, it get's the analysis of the drum loop's slice into "query_buf",
// then uses that info to lookup the nearest neighbour in the source dataset and // then uses that info to lookup the nearest neighbour in the source dataset and
// play that slice // play that slice. If you used, at line 12 above, the FluCoMa sound set, it sounds boringly
// similar: this is because the target drum loop is in the corpus! So it finds, for each slice
// itself... this is a good incentive to reload, with your own soundbank :)
Routine{ Routine{
var query_buf = Buffer.alloc(s,~nmfccs); // a buffer for doing the neighbor lookup with var query_buf = Buffer.alloc(s,~nmfccs); // a buffer for doing the neighbor lookup with
var scaled_buf = Buffer.alloc(s,~nmfccs); var scaled_buf = Buffer.alloc(s,~nmfccs);
@ -261,7 +264,7 @@ Routine{
// once it's loaded, scale it using the scaler // once it's loaded, scale it using the scaler
~scaler.transformPoint(query_buf,scaled_buf,{ ~scaler.transformPoint(query_buf,scaled_buf,{
// once it's neighbour data point in the kdtree of source slices // once it's neighbour data point in the kdtree of source slices
~kdtree.kNearest(scaled_buf,{ ~kdtree.kNearest(scaled_buf,action: {
arg nearest; arg nearest;
// peel off just the integer part of the slice to use in the helper function // peel off just the integer part of the slice to use in the helper function

@ -87,7 +87,7 @@ s.waitForBoot{
ds.dump({ // dump out that dataset to dictionary so that we can use it with the plotter! ds.dump({ // dump out that dataset to dictionary so that we can use it with the plotter!
arg ds_dict;// the dictionary version of this dataset arg ds_dict;// the dictionary version of this dataset
var previous = nil; // a variable for checking if the currently passed nearest neighbour is the same or different from the previous one var previous = nil; // a variable for checking if the currently passed nearest neighbour is the same or different from the previous one
FluidPlotter(bounds:Rect(0,0,800,800),dict:ds_dict,mouseMoveAction:{ {FluidPlotter(bounds:Rect(0,0,800,800),dict:ds_dict,mouseMoveAction:{
/* make a FluidPlotter. nb. the dict is the dict from a FluidDataSet.dump. the mouseMoveAction is a callback function that is called /* make a FluidPlotter. nb. the dict is the dict from a FluidDataSet.dump. the mouseMoveAction is a callback function that is called
anytime the mouseDownAction or mouseMoveAction function is called on this view. i.e., anytime you click or drag on this plotter */ anytime the mouseDownAction or mouseMoveAction function is called on this view. i.e., anytime you click or drag on this plotter */
@ -99,7 +99,7 @@ s.waitForBoot{
(4) modifier keys that are pressed while clicking or dragging (4) modifier keys that are pressed while clicking or dragging
*/ */
point_buf.setn(0,[x,y]); // write the x y position into a buffer so that we can use it to... point_buf.setn(0,[x,y]); // write the x y position into a buffer so that we can use it to...
kdtree.kNearest(point_buf,{ // look up the nearest slice to that x y position kdtree.kNearest(point_buf, action:{ // look up the nearest slice to that x y position
arg nearest; // this is reported back as a symbol, so... arg nearest; // this is reported back as a symbol, so...
nearest = nearest.asString; // we'll convert it to a string here nearest = nearest.asString; // we'll convert it to a string here
@ -124,7 +124,7 @@ s.waitForBoot{
previous = nearest; previous = nearest;
}); });
}); });
}); })}.defer;
}); });
}); });
}); });

@ -1,14 +1,12 @@
( (
// 1. Instantiate some of the things we need. // 1. Instantiate some of the things we need.
Window.closeAll; Window.closeAll;
s.options.sampleRate_(48000);
s.options.device_("Fireface UC Mac (24006457)");
s.waitForBoot{ s.waitForBoot{
Task{ Task{
var win; var win;
~nMFCCs = 13; ~nMFCCs = 13;
~trombone = Buffer.read(s,"/Users/macprocomputer/Desktop/_flucoma/code/flucoma-core-src/AudioFiles/Olencki-TenTromboneLongTones-M.wav"); ~trombone = Buffer.read(s,FluidFilesPath("Olencki-TenTromboneLongTones-M.wav"));
~oboe = Buffer.read(s,"/Users/macprocomputer/Desktop/_flucoma/code/flucoma-core-src/AudioFiles/Harker-DS-TenOboeMultiphonics-M.wav"); ~oboe = Buffer.read(s,FluidFilesPath("Harker-DS-TenOboeMultiphonics-M.wav"));
~timbre_buf = Buffer.alloc(s,~nMFCCs); ~timbre_buf = Buffer.alloc(s,~nMFCCs);
~ds = FluidDataSet(s); ~ds = FluidDataSet(s);
~labels = FluidLabelSet(s); ~labels = FluidLabelSet(s);
@ -169,63 +167,10 @@ some points that are labeled "silence".
~ds.print; ~ds.print;
~labels.print; ~labels.print;
~ds.write("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/%_ds.json".format(Date.localtime.stamp)); ~ds.write("tmp/%_ds.json".format(Date.localtime.stamp));
~labels.write("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/%_labels.json".format(Date.localtime.stamp)) ~labels.write("tmp/%_labels.json".format(Date.localtime.stamp))
/* /*
12. Now go retrain some more and do some more predictions. The silent gaps between 12. Now go retrain some more and do some more predictions. The silent gaps between
tones should now report a "2". tones should now report a "2".
*/ */
// ========================= DATA VERIFICATION ADDENDUM ============================
// This data is pretty well separated, except for that one trombone point.
~ds.read("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/211102_122330_ds.json");
~labels.read("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/211102_122331_labels.json");
/*
This data is not well separated. Once can see that in the cluster that should probably be all silences,
there is a lot of oboe and trombone points mixed in!
This will likely be confusing to a neural network!
*/
~ds.read("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/211102_122730_ds.json");
~labels.read("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/211102_122731_labels.json");
(
Task{
~stand = FluidStandardize(s);
~ds_plotter = FluidDataSet(s);
~umap = FluidUMAP(s,2,30,0.5);
~normer = FluidNormalize(s);
~kdtree = FluidKDTree(s);
~pt_buf = Buffer.alloc(s,2);
s.sync;
~stand.fitTransform(~ds,~ds_plotter,{
~umap.fitTransform(~ds_plotter,~ds_plotter,{
~normer.fitTransform(~ds_plotter,~ds_plotter,{
~kdtree.fit(~ds_plotter,{
~ds_plotter.dump({
arg ds_dict;
~labels.dump({
arg label_dict;
// label_dict.postln;
~plotter = FluidPlotter(bounds:Rect(0,0,800,800),dict:ds_dict,mouseMoveAction:{
arg view, x, y;
~pt_buf.setn(0,[x,y]);
~kdtree.kNearest(~pt_buf,{
arg nearest;
"%:\t%".format(nearest,label_dict.at("data").at(nearest.asString)[0]).postln;
});
});
~plotter.categories_(label_dict);
});
});
});
});
});
});
}.play(AppClock);
)

@ -13,7 +13,7 @@ Routine{
~windowSize = 4096; ~windowSize = 4096;
~hopSize = 512; ~hopSize = 512;
~buf = Buffer.read(s,"/Users/macprocomputer/Desktop/_flucoma/code/flucoma-core-src/AudioFiles/Tremblay-FMTri-M.wav"); ~buf = Buffer.read(s,FluidFilesPath("Tremblay-FMTriDist-M.wav"));
s.sync; s.sync;

@ -61,7 +61,7 @@ y.set(\mix,1);
// send just the 'sines' to a Reverb // send just the 'sines' to a Reverb
( (
{ {
var sig = PlayBuf.ar(1,~song,BufRateScale.ir(~buf),loop:1); var sig = PlayBuf.ar(2,~song,BufRateScale.ir(~buf),loop:1).sum * 0.5;
var sines, residual; var sines, residual;
var latency = ((15 * 512) + 1024 ) / ~song.sampleRate; var latency = ((15 * 512) + 1024 ) / ~song.sampleRate;
# sines, residual = FluidSines.ar(sig); # sines, residual = FluidSines.ar(sig);
@ -72,7 +72,7 @@ y.set(\mix,1);
// send just the 'residual' to a Reverb // send just the 'residual' to a Reverb
( (
{ {
var sig = PlayBuf.ar(1,~song,BufRateScale.ir(~buf),loop:1); var sig = PlayBuf.ar(2,~song,BufRateScale.ir(~buf),loop:1).sum * 0.5;
var sines, residual; var sines, residual;
var latency = ((15 * 512) + 1024 ) / ~song.sampleRate; var latency = ((15 * 512) + 1024 ) / ~song.sampleRate;
# sines, residual = FluidSines.ar(sig); # sines, residual = FluidSines.ar(sig);

@ -1,38 +1,53 @@
/*
this script shows how to
1. load a folder of sounds
2. find smaller time segments within the sounds according to novelty
3. analyse the sounds according to MFCC and add these analyses to a dataset
4. dimensionally reduce that dataset to 2D using umap
5. (optional) turn the plot of points in 2D into a grid
6. plot the points!
notice that each step in this process is created within a function so that
at the bottom of the patch, these functions are all chained together to
do the whole process in one go!
*/
( (
// 1. define a function to load a folder of sounds // 1. load a folder of sounds
~load_folder = { ~load_folder = {
arg folder_path, action; arg folder_path, action;
var loader = FluidLoadFolder(folder_path); var loader = FluidLoadFolder(folder_path); // pass in the folder to load
loader.play(s,{ loader.play(s,{ // play will do the actual loading
fork{ var mono_buffer = Buffer.alloc(s,loader.buffer.numFrames);
var mono_buffer = Buffer.alloc(s,loader.buffer.numFrames); // convert to mono for ease of use for this example FluidBufCompose.processBlocking(s,loader.buffer,destination:mono_buffer,numChans:1,action:{
FluidBufCompose.processBlocking(s,loader.buffer,destination:mono_buffer,numChans:1);
s.sync;
action.(mono_buffer); action.(mono_buffer);
} });
}); });
}; };
// this will load all the audio files that are included with the flucoma toolkit, but you can put your own path here:
~load_folder.(FluidFilesPath(),{ ~load_folder.(FluidFilesPath(),{
arg buffer; arg buffer;
"mono buffer: %".format(buffer).postln; "mono buffer: %".format(buffer).postln;
~buffer = buffer; ~buffer = buffer; // save the buffer to a global variable so we can use it later
}); });
) )
( (
// 2. define a function to slice the sounds, play with the threshold to get different results // 2. slice the sounds
~slice = { ~slice = {
arg buffer, action; arg buffer, action;
Routine{ var indices = Buffer(s); // a buffer for saving the discovered indices into
var indices = Buffer(s);
s.sync; // play around the the threshold anad feature (see help file) to get differet slicing results
FluidBufNoveltySlice.process(s,buffer,indices:indices,threshold:0.5,action:{ FluidBufNoveltySlice.processBlocking(s,buffer,indices:indices,algorithm:0,threshold:0.5,action:{
"% slices found".format(indices.numFrames).postln; "% slices found".format(indices.numFrames).postln;
"average duration in seconds: %".format(buffer.duration/indices.numFrames).postln; "average duration in seconds: %".format(buffer.duration/indices.numFrames).postln;
action.(buffer,indices); action.(buffer,indices);
}); });
}.play;
}; };
~slice.(~buffer,{ ~slice.(~buffer,{
@ -41,34 +56,65 @@
}); });
) )
// you may want to check the slice points here using FluidWaveform
FluidWaveform(~buffer,~indices); // it may also be way too many slices to see properly!
( (
// 3. analyze the slices // 3. analyze the slices
~analyze = { ~analyze = {
arg buffer, indices, action; arg buffer, indices, action;
var time = SystemClock.seconds; var time = SystemClock.seconds; // a timer just to keep tabs on how long this stuff is taking
Routine{ Routine{
var feature_buf = Buffer(s); var feature_buf = Buffer(s); // a buffer for storing the mfcc analyses into
var stats_buf = Buffer(s); var stats_buf = Buffer(s); // a buffer for storing the stats into
var point_buf = Buffer(s); var point_buf = Buffer(s); // a buffer we will use to add points to the dataset
var ds = FluidDataSet(s); var ds = FluidDataSet(s); // the dataset that we'll add all these mfcc analyses to
// bring the values in the slicepoints buffer from the server to the language as a float array
indices.loadToFloatArray(action:{ indices.loadToFloatArray(action:{
arg fa; arg fa; // float array
fa.doAdjacentPairs{ fa.doAdjacentPairs{
/*
take each of the adjacent pairs and pass them to this function as an array of 2 values
nb. for example [0,1,2,3,4] will execute this function 4 times, passing these 2 value arrays:
[0,1]
[1,2]
[2,3]
[3,4]
this will give us each slice point *and* the next slice point so that we
can tell the analyzers where to start analyzing and how many frames to analyze
*/
arg start, end, i; arg start, end, i;
// the next slice point minus the current one will give us the difference how many slices to analyze)
var num = end - start; var num = end - start;
/* analyze the drum buffer starting at `start_samps` and for `num_samps` samples
this returns a buffer (feautre_buf) that is 13 channels wide (for the 13 mfccs, see helpfile) and
however many frames long as there are fft frames in the slice */
FluidBufMFCC.processBlocking(s,buffer,start,num,features:feature_buf,numCoeffs:13,startCoeff:1); FluidBufMFCC.processBlocking(s,buffer,start,num,features:feature_buf,numCoeffs:13,startCoeff:1);
/* perform a statistical analysis on the mfcc analysis
this will return just 13 channels, one for each mfcc channel in the feature_buf.
each channel will have 7 frames corresponding to the 7 statistical analyses that it performs
on that channel */
FluidBufStats.processBlocking(s,feature_buf,stats:stats_buf); FluidBufStats.processBlocking(s,feature_buf,stats:stats_buf);
/* take all 13 channels from stats_buf, but just the first frame (mean) and convert it into a buffer
that is 1 channel and 13 frames. this shape will be considered "flat" and therefore able to be
added to the dataset */
FluidBufFlatten.processBlocking(s,stats_buf,numFrames:1,destination:point_buf); FluidBufFlatten.processBlocking(s,stats_buf,numFrames:1,destination:point_buf);
// add it
ds.addPoint("slice-%".format(i),point_buf); ds.addPoint("slice-%".format(i),point_buf);
"Processing Slice % / %".format(i+1,indices.numFrames-1).postln; "Processing Slice % / %".format(i+1,indices.numFrames-1).postln;
}; };
s.sync; s.sync;
feature_buf.free; stats_buf.free; point_buf.free; feature_buf.free; stats_buf.free; point_buf.free; // free buffers
ds.print; ds.print;
@ -89,10 +135,17 @@
~umap = { ~umap = {
arg buffer, indices, ds, action, numNeighbours = 15, minDist = 0.1; arg buffer, indices, ds, action, numNeighbours = 15, minDist = 0.1;
Routine{ Routine{
// get all the dimensions in the same general range so that when umap
// makes its initial tree structure, the lower order mfcc coefficients
// aren't over weighted
var standardizer = FluidStandardize(s); var standardizer = FluidStandardize(s);
// this is the dimensionality reduction algorithm, see helpfile for
// more info
var umap = FluidUMAP(s,2,numNeighbours,minDist); var umap = FluidUMAP(s,2,numNeighbours,minDist);
var redux_ds = FluidDataSet(s); var redux_ds = FluidDataSet(s); // a new dataset for putting the 2D points into
s.sync; s.sync;
@ -117,8 +170,14 @@
~grid = { ~grid = {
arg buffer, indices, redux_ds, action; arg buffer, indices, redux_ds, action;
Routine{ Routine{
// first normalize so they're all 0 to 1
var normer = FluidNormalize(s); var normer = FluidNormalize(s);
// this will shift all dots around so they're in a grid shape
var grider = FluidGrid(s); var grider = FluidGrid(s);
// a new dataset to hold the gridified dots
var newds = FluidDataSet(s); var newds = FluidDataSet(s);
s.sync; s.sync;
@ -144,11 +203,17 @@
~plot = { ~plot = {
arg buffer, indices, redux_ds, action; arg buffer, indices, redux_ds, action;
Routine{ Routine{
var kdtree = FluidKDTree(s); var kdtree = FluidKDTree(s); // tree structure of the 2D points for fast neighbour lookup
// a buffer for putting the 2D mouse point into so that it can be used to find the nearest neighbour
var buf_2d = Buffer.alloc(s,2); var buf_2d = Buffer.alloc(s,2);
// scaler just to double check and make sure that the points are 0 to 1
// if the plotter is receiving the output of umap, they probably won't be...
var scaler = FluidNormalize(s); var scaler = FluidNormalize(s);
// a new dataset told the normalized data
var newds = FluidDataSet(s); var newds = FluidDataSet(s);
var xmin = 0, xmax = 1, ymin = 0, ymax = 1;
s.sync; s.sync;
@ -160,31 +225,46 @@
arg dict; arg dict;
var previous, fp; var previous, fp;
"ds dumped".postln; "ds dumped".postln;
fp = FluidPlotter(nil,Rect(0,0,800,800),dict,xmin:xmin,xmax:xmax,ymin:ymin,ymax:ymax,mouseMoveAction:{
arg view, x, y; // pass in the dict from the dumped dataset. this is the data that we want to plot!
[x,y].postln;
buf_2d.setn(0,[x,y]); {
kdtree.kNearest(buf_2d,{ fp = FluidPlotter(nil,Rect(0,0,800,800),dict,mouseMoveAction:{
arg nearest;
if(previous != nearest,{ // when the mouse is clicked or dragged on the plotter, this function executes
var index = nearest.asString.split($-)[1].asInteger;
previous = nearest; // the view is the FluidPlotter, the x and y are the position of the mouse according
nearest.postln; // to the range of the plotter. i.e., since our plotter is showing us the range 0 to 1
index.postln; // for both x and y, the xy positions will always be between 0 and 1
{ arg view, x, y;
var startPos = Index.kr(indices,index); buf_2d.setn(0,[x,y]); // set the mouse position into a buffer
var dur_samps = Index.kr(indices,index + 1) - startPos;
var sig = PlayBuf.ar(1,buffer,BufRateScale.ir(buffer),startPos:startPos); // then send that buffer to the kdtree to find the nearest point
var dur_sec = dur_samps / BufSampleRate.ir(buffer); kdtree.kNearest(buf_2d,action:{
var env; arg nearest; // the identifier of the nearest point is returned (always as a symbol)
dur_sec = min(dur_sec,1);
env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_sec-0.06,0.03]),doneAction:2); if(previous != nearest,{ // as long as this isn't also the last one that was returned
sig.dup * env;
}.play; // split the integer off the indentifier to know how to look it up for playback
var index = nearest.asString.split($-)[1].asInteger;
previous = nearest;
nearest.postln;
// index.postln;
{
var startPos = Index.kr(indices,index); // look in the indices buf to see where to start playback
var dur_samps = Index.kr(indices,index + 1) - startPos; // and how long
var sig = PlayBuf.ar(1,buffer,BufRateScale.ir(buffer),startPos:startPos);
var dur_sec = dur_samps / BufSampleRate.ir(buffer);
var env;
dur_sec = min(dur_sec,1); // just in case some of the slices are *very* long...
env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_sec-0.06,0.03]),doneAction:2);
sig.dup * env;
}.play;
});
}); });
}); });
}); action.(fp,newds);
action.(fp,newds); }.defer;
}); });
}); });
}); });
@ -194,29 +274,17 @@
~plot.(~buffer,~indices,~ds); ~plot.(~buffer,~indices,~ds);
) )
// ============== do all of it ======================= // ============== do all of it in one go (without the grid for instance) =======================
( (
var path = "/Users/macprocomputer/Desktop/_flucoma/data_saves/%_2D_browsing_Pitch".format(Date.localtime.stamp); var path = FluidFilesPath();
~load_folder.("/Users/macprocomputer/Desktop/_flucoma/favs mono/",{ ~load_folder.(path,{
arg buffer0; arg buffer0;
~slice.(buffer0,{ ~slice.(buffer0,{
arg buffer1, indices1; arg buffer1, indices1;
~analyze.(buffer1, indices1,{ ~analyze.(buffer1, indices1,{
arg buffer2, indices2, ds2; arg buffer2, indices2, ds2;
/* path.mkdir;
buffer2.write(path+/+"buffer.wav","wav");
indices2.write(path+/+"indices.wav","wav","float");
ds2.write(path+/+"ds.json");*/
~umap.(buffer2,indices2,ds2,{ ~umap.(buffer2,indices2,ds2,{
arg buffer3, indices3, ds3; arg buffer3, indices3, ds3;
/* path.mkdir;
buffer3.write(path+/+"buffer.wav","wav");
indices3.write(path+/+"indices.wav","wav","float");
ds3.write(path+/+"ds.json");*/
~plot.(buffer3,indices3,ds3,{ ~plot.(buffer3,indices3,ds3,{
arg plotter; arg plotter;
"done with all".postln; "done with all".postln;
@ -227,178 +295,3 @@ var path = "/Users/macprocomputer/Desktop/_flucoma/data_saves/%_2D_browsing_Pitc
}); });
}); });
) )
/*=============== Know Your Data =================
hmmm... there's a lot of white space in that UMAP plot. A few options:
1. Adjust the parameters of UMAP to make the plot look different.
- minDist
- numNeighbours
2. Gridify the whole thing to spread it out.
3. Remove some of the outliers to get a more full shape.
===================================================*/
// #2
(
Window.closeAll;
Task{
var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_121441_2D_browsing/";
var ds = FluidDataSet(s);
var buffer = Buffer.read(s,folder+/+"buffer.wav");
var indices = Buffer.read(s,folder+/+"indices.wav");
var normalizer = FluidNormalize(s);
var ds_grid = FluidDataSet(s);
var grid = FluidGrid(s);
var kdtree = FluidKDTree(s);
var pt_buf = Buffer.alloc(s,2);
s.sync;
ds.read(folder+/+"ds.json",{
"read".postln;
normalizer.fitTransform(ds,ds_grid,{
"normalized".postln;
grid.fitTransform(ds_grid,ds_grid,{
"grid done".postln;
normalizer.fitTransform(ds_grid,ds_grid,{
"normalized".postln;
kdtree.fit(ds_grid,{
"tree fit".postln;
normalizer.fitTransform(ds,ds,{
"normalized".postln;
ds.dump({
arg ds_dict;
ds_grid.dump({
arg ds_grid_dict;
defer{
var distances = Dictionary.new;
var max_dist = 0;
var win, plotter, uv;
var previous;
ds_dict.at("data").keysValuesDo({
arg id, pt;
var other, pt0, pt1, dist, distpoint;
/*
id.postln;
pt.postln;
"".postln;
*/
other = ds_grid_dict.at("data").at(id);
pt0 = Point(pt[0],pt[1]);
pt1 = Point(other[0],other[1]);
dist = pt0.dist(pt1);
distpoint = Dictionary.new;
if(dist > max_dist,{max_dist = dist});
distpoint.put("pt0",pt0);
distpoint.put("pt1",pt1);
distpoint.put("dist",dist);
distances.put(id,distpoint);
});
win = Window("FluidGrid",Rect(0,0,800,800));
win.background_(Color.white);
uv = UserView(win,win.bounds)
.drawFunc_({
var size_pt = Point(uv.bounds.width,uv.bounds.height);
distances.keysValuesDo({
arg id, distpoint;
var alpha = distpoint.at("dist") / max_dist;
var pt0 = distpoint.at("pt0") * size_pt;
var pt1 = distpoint.at("pt1") * size_pt;
pt0.y = uv.bounds.height - pt0.y;
pt1.y = uv.bounds.height - pt1.y;
/* id.postln;
distpoint.postln;
alpha.postln;
"".postln;
*/
Pen.line(pt0,pt1);
Pen.color_(Color(1.0,0.0,0.0,0.25));
Pen.stroke;
});
});
plotter = FluidPlotter(win,win.bounds,ds_dict,{
arg view, x, y;
pt_buf.setn(0,[x,y]);
kdtree.kNearest(pt_buf,{
arg nearest;
if(previous != nearest,{
var index = nearest.asString.split($-)[1].asInteger;
previous = nearest;
nearest.postln;
index.postln;
{
var startPos = Index.kr(indices,index);
var dur_samps = Index.kr(indices,index + 1) - startPos;
var sig = PlayBuf.ar(1,buffer,BufRateScale.ir(buffer),startPos:startPos);
var dur_sec = dur_samps / BufSampleRate.ir(buffer);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_sec-0.06,0.03]),doneAction:2);
sig.dup * env;
}.play;
});
});
});
plotter.background_(Color(0,0,0,0));
ds_grid_dict.at("data").keysValuesDo({
arg id, pt;
plotter.addPoint_("%-grid".format(id),pt[0],pt[1],0.75,Color.blue.alpha_(0.5));
});
win.front;
};
})
});
});
});
});
});
});
});
}.play(AppClock);
)
// #3
(
Routine{
var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_152523_2D_browsing/";
var ds = FluidDataSet(s);
var buffer = Buffer.read(s,folder+/+"buffer.wav");
var indices = Buffer.read(s,folder+/+"indices.wav");
var robust_scaler = FluidRobustScale(s,10,90);
var newds = FluidDataSet(s);
var dsq = FluidDataSetQuery(s);
s.sync;
// {indices.plot}.defer;
ds.read(folder+/+"ds.json",{
robust_scaler.fitTransform(ds,newds,{
dsq.addRange(0,2,{
dsq.filter(0,">",-1,{
dsq.and(0,"<",1,{
dsq.and(1,">",-1,{
dsq.and(1,"<",1,{
dsq.transform(newds,newds,{
~plot.(buffer,indices,newds);
});
});
});
});
});
});
})
});
}.play;
)

@ -1,295 +0,0 @@
/*
this script shows how to
1. load a folder of sounds
2. find smaller time segments within the sounds according to novelty
3. analyse the sounds according to MFCC and add these analyses to a dataset
4. dimensionally reduce that dataset to 2D using umap
5. (optional) turn the plot of points in 2D into a grid
6. plot the points!
notice that each step in this process is created within a function so that
at the bottom of the patch, these functions are all chained together to
do the whole process in one go!
*/
(
// 1. load a folder of sounds
~load_folder = {
arg folder_path, action;
var loader = FluidLoadFolder(folder_path); // pass in the folder to load
loader.play(s,{ // play will do the actual loading
var mono_buffer = Buffer.alloc(s,loader.buffer.numFrames);
FluidBufCompose.processBlocking(s,loader.buffer,destination:mono_buffer,numChans:1,action:{
action.(mono_buffer);
});
});
};
// this will load all the audio files that are included with the flucoma toolkit, but you can put your own path here:
~load_folder.(FluidFilesPath(),{
arg buffer;
"mono buffer: %".format(buffer).postln;
~buffer = buffer; // save the buffer to a global variable so we can use it later
});
)
(
// 2. slice the sounds
~slice = {
arg buffer, action;
var indices = Buffer(s); // a buffer for saving the discovered indices into
// play around the the threshold anad feature (see help file) to get differet slicing results
FluidBufNoveltySlice.processBlocking(s,buffer,indices:indices,algorithm:0,threshold:0.5,action:{
"% slices found".format(indices.numFrames).postln;
"average duration in seconds: %".format(buffer.duration/indices.numFrames).postln;
action.(buffer,indices);
});
};
~slice.(~buffer,{
arg buffer, indices;
~indices = indices;
});
)
// you may want to check the slice points here using FluidWaveform
FluidWaveform(~buffer,~indices); // it may also be way too many slices to see properly!
(
// 3. analyze the slices
~analyze = {
arg buffer, indices, action;
var time = SystemClock.seconds; // a timer just to keep tabs on how long this stuff is taking
Routine{
var feature_buf = Buffer(s); // a buffer for storing the mfcc analyses into
var stats_buf = Buffer(s); // a buffer for storing the stats into
var point_buf = Buffer(s); // a buffer we will use to add points to the dataset
var ds = FluidDataSet(s); // the dataset that we'll add all these mfcc analyses to
// bring the values in the slicepoints buffer from the server to the language as a float array
indices.loadToFloatArray(action:{
arg fa; // float array
fa.doAdjacentPairs{
/*
take each of the adjacent pairs and pass them to this function as an array of 2 values
nb. for example [0,1,2,3,4] will execute this function 4 times, passing these 2 value arrays:
[0,1]
[1,2]
[2,3]
[3,4]
this will give us each slice point *and* the next slice point so that we
can tell the analyzers where to start analyzing and how many frames to analyze
*/
arg start, end, i;
// the next slice point minus the current one will give us the difference how many slices to analyze)
var num = end - start;
/* analyze the drum buffer starting at `start_samps` and for `num_samps` samples
this returns a buffer (feautre_buf) that is 13 channels wide (for the 13 mfccs, see helpfile) and
however many frames long as there are fft frames in the slice */
FluidBufMFCC.processBlocking(s,buffer,start,num,features:feature_buf,numCoeffs:13,startCoeff:1);
/* perform a statistical analysis on the mfcc analysis
this will return just 13 channels, one for each mfcc channel in the feature_buf.
each channel will have 7 frames corresponding to the 7 statistical analyses that it performs
on that channel */
FluidBufStats.processBlocking(s,feature_buf,stats:stats_buf);
/* take all 13 channels from stats_buf, but just the first frame (mean) and convert it into a buffer
that is 1 channel and 13 frames. this shape will be considered "flat" and therefore able to be
added to the dataset */
FluidBufFlatten.processBlocking(s,stats_buf,numFrames:1,destination:point_buf);
// add it
ds.addPoint("slice-%".format(i),point_buf);
"Processing Slice % / %".format(i+1,indices.numFrames-1).postln;
};
s.sync;
feature_buf.free; stats_buf.free; point_buf.free; // free buffers
ds.print;
"Completed in % seconds".format(SystemClock.seconds - time).postln;
action.(buffer,indices,ds);
});
}.play;
};
~analyze.(~buffer,~indices,{
arg buffer, indices, ds;
~ds = ds;
});
)
(
// 4. Reduce to 2 Dimensions
~umap = {
arg buffer, indices, ds, action, numNeighbours = 15, minDist = 0.1;
Routine{
// get all the dimensions in the same general range so that when umap
// makes its initial tree structure, the lower order mfcc coefficients
// aren't over weighted
var standardizer = FluidStandardize(s);
// this is the dimensionality reduction algorithm, see helpfile for
// more info
var umap = FluidUMAP(s,2,numNeighbours,minDist);
var redux_ds = FluidDataSet(s); // a new dataset for putting the 2D points into
s.sync;
standardizer.fitTransform(ds,redux_ds,{
"standardization done".postln;
umap.fitTransform(redux_ds,redux_ds,{
"umap done".postln;
action.(buffer,indices,redux_ds);
});
});
}.play;
};
~umap.(~buffer,~indices,~ds,{
arg buffer, indices, redux_ds;
~ds = redux_ds;
});
)
(
// 5. Gridify if Desired
~grid = {
arg buffer, indices, redux_ds, action;
Routine{
// first normalize so they're all 0 to 1
var normer = FluidNormalize(s);
// this will shift all dots around so they're in a grid shape
var grider = FluidGrid(s);
// a new dataset to hold the gridified dots
var newds = FluidDataSet(s);
s.sync;
normer.fitTransform(redux_ds,newds,{
"normalization done".postln;
grider.fitTransform(newds,newds,{
"grid done".postln;
action.(buffer,indices,newds);
});
});
}.play;
};
~grid.(~buffer,~indices,~ds,{
arg buffer, indices, grid_ds;
~ds = grid_ds;
});
)
(
// 6. Plot
~plot = {
arg buffer, indices, redux_ds, action;
Routine{
var kdtree = FluidKDTree(s); // tree structure of the 2D points for fast neighbour lookup
// a buffer for putting the 2D mouse point into so that it can be used to find the nearest neighbour
var buf_2d = Buffer.alloc(s,2);
// scaler just to double check and make sure that the points are 0 to 1
// if the plotter is receiving the output of umap, they probably won't be...
var scaler = FluidNormalize(s);
// a new dataset told the normalized data
var newds = FluidDataSet(s);
s.sync;
scaler.fitTransform(redux_ds,newds,{
"scaling done".postln;
kdtree.fit(newds,{
"kdtree fit".postln;
newds.dump({
arg dict;
var previous, fp;
"ds dumped".postln;
// pass in the dict from the dumped dataset. this is the data that we want to plot!
fp = FluidPlotter(nil,Rect(0,0,800,800),dict,mouseMoveAction:{
// when the mouse is clicked or dragged on the plotter, this function executes
// the view is the FluidPlotter, the x and y are the position of the mouse according
// to the range of the plotter. i.e., since our plotter is showing us the range 0 to 1
// for both x and y, the xy positions will always be between 0 and 1
arg view, x, y;
buf_2d.setn(0,[x,y]); // set the mouse position into a buffer
// then send that buffer to the kdtree to find the nearest point
kdtree.kNearest(buf_2d,{
arg nearest; // the identifier of the nearest point is returned (always as a symbol)
if(previous != nearest,{ // as long as this isn't also the last one that was returned
// split the integer off the indentifier to know how to look it up for playback
var index = nearest.asString.split($-)[1].asInteger;
previous = nearest;
nearest.postln;
// index.postln;
{
var startPos = Index.kr(indices,index); // look in the indices buf to see where to start playback
var dur_samps = Index.kr(indices,index + 1) - startPos; // and how long
var sig = PlayBuf.ar(1,buffer,BufRateScale.ir(buffer),startPos:startPos);
var dur_sec = dur_samps / BufSampleRate.ir(buffer);
var env;
dur_sec = min(dur_sec,1); // just in case some of the slices are *very* long...
env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_sec-0.06,0.03]),doneAction:2);
sig.dup * env;
}.play;
});
});
});
action.(fp,newds);
});
});
});
}.play;
};
~plot.(~buffer,~indices,~ds);
)
// ============== do all of it in one go =======================
(
var path = FluidFilesPath();
~load_folder.(path,{
arg buffer0;
~slice.(buffer0,{
arg buffer1, indices1;
~analyze.(buffer1, indices1,{
arg buffer2, indices2, ds2;
~umap.(buffer2,indices2,ds2,{
arg buffer3, indices3, ds3;
~plot.(buffer3,indices3,ds3,{
arg plotter;
"done with all".postln;
~fp = plotter;
});
});
});
});
});
)

@ -1,108 +0,0 @@
(
Task{
var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_152953_2D_browsing_MFCC/";
// var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_161354_2D_browsing_SpectralShape/";
// var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_161638_2D_browsing_Pitch/";
~ds_original = FluidDataSet(s);
~buffer = Buffer.read(s,folder+/+"buffer.wav");
~indices = Buffer.read(s,folder+/+"indices.wav");
~kdtree = FluidKDTree(s,6);
~ds = FluidDataSet(s);
s.sync;
~indices.loadToFloatArray(action:{
arg fa;
~indices = fa;
});
~ds_original.read(folder+/+"ds.json",{
~ds.read(folder+/+"ds.json",{
~kdtree.fit(~ds,{
~ds.dump({
arg dict;
~ds_dict = dict;
"kdtree fit".postln;
});
});
});
});
}.play;
~play_id = {
arg id;
var index = id.asString.split($-)[1].asInteger;
var start_samps = ~indices[index];
var end_samps = ~indices[index+1];
var dur_secs = (end_samps - start_samps) / ~buffer.sampleRate;
{
var sig = PlayBuf.ar(1,~buffer,BufRateScale.ir(~buffer),startPos:start_samps);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:2);
sig.dup;// * env;
}.play;
dur_secs;
};
~pt_buf = Buffer.alloc(s,~ds_dict.at("cols"));
)
(
// hear the 5 nearest points
Routine{
// var id = "slice-558";
var id = ~ds_dict.at("data").keys.choose;
~ds.getPoint(id,~pt_buf,{
~kdtree.kNearest(~pt_buf,{
arg nearest;
Routine{
id.postln;
~play_id.(id).wait;
nearest[1..].do{
arg near;
1.wait;
near.postln;
~play_id.(near).wait;
};
}.play;
})
});
}.play;
)
// Standardize
(
Routine{
var scaler = FluidStandardize(s);
s.sync;
scaler.fitTransform(~ds_original,~ds,{
~kdtree.fit(~ds,{
"standardized & kdtree fit".postln;
});
});
}.play;
)
// Normalize
(
Routine{
var scaler = FluidNormalize(s);
s.sync;
scaler.fitTransform(~ds_original,~ds,{
~kdtree.fit(~ds,{
"normalized & kdtree fit".postln;
});
});
}.play;
)
// Robust Scaler
(
Routine{
var scaler = FluidRobustScale(s);
s.sync;
scaler.fitTransform(~ds_original,~ds,{
~kdtree.fit(~ds,{
"normalized & kdtree fit".postln;
});
});
}.play;
)

@ -1,6 +1,3 @@
s.options.sampleRate_(44100);
s.options.device_("Fireface UC Mac (24006457)");
( (
// decompose! // decompose!
s.waitForBoot{ s.waitForBoot{
@ -8,7 +5,7 @@ s.waitForBoot{
var drums = Buffer.read(s,FluidFilesPath("Nicol-LoopE-M.wav")); var drums = Buffer.read(s,FluidFilesPath("Nicol-LoopE-M.wav"));
var resynth = Buffer(s); var resynth = Buffer(s);
var n_components = 2; var n_components = 2;
FluidBufNMF.process(s,drums,resynth:resynth,components:n_components).wait; FluidBufNMF.process(s,drums,resynth:resynth,components:n_components,resynthMode: 1).wait;
"original sound".postln; "original sound".postln;
{ {
@ -46,7 +43,7 @@ Routine{
~bases = Buffer(s); ~bases = Buffer(s);
~activations = Buffer(s); ~activations = Buffer(s);
~resynth = Buffer(s); ~resynth = Buffer(s);
FluidBufNMF.process(s,drums,bases:~bases,activations:~activations,resynth:~resynth,components:n_components).wait; FluidBufNMF.process(s,drums,bases:~bases,activations:~activations,resynth:~resynth,components:n_components,resynthMode: 1).wait;
{ {
~bases.plot("bases"); ~bases.plot("bases");
~activations.plot("activations"); ~activations.plot("activations");

@ -105,10 +105,10 @@ s.waitForBoot{
// wiggle as the neural network makes it's predictions! // wiggle as the neural network makes it's predictions!
SendReply.kr(trig,"/predictions",val); SendReply.kr(trig,"/predictions",val);
// the actual synthesis algorithm. made by PA Tremblay // the actual synthesis algorithm, made by P.A. Tremblay
#feed2,feed1 = LocalIn.ar(2); #feed2,feed1 = LocalIn.ar(2);
osc1 = MoogFF.ar(SinOsc.ar((((feed1 * val[0]) + val[1]) * base1).midicps,mul: (val[2] * 50).dbamp).atan,(base3 - (val[3] * (FluidLoudness.kr(feed2, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[4] * 3.5); osc1 = MoogFF.ar(SinOsc.ar((((feed1 * val[0]) + val[1]) * base1).midicps,mul: (val[2] * 50).dbamp).atan,(base3 - (val[3] * (FluidLoudness.kr(feed2,truePeak: 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[4] * 3.5);
osc2 = MoogFF.ar(SinOsc.ar((((feed2 * val[5]) + val[6]) * base2).midicps,mul: (val[7] * 50).dbamp).atan,(base3 - (val[8] * (FluidLoudness.kr(feed1, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[9] * 3.5); osc2 = MoogFF.ar(SinOsc.ar((((feed2 * val[5]) + val[6]) * base2).midicps,mul: (val[7] * 50).dbamp).atan,(base3 - (val[8] * (FluidLoudness.kr(feed1,truePeak: 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[9] * 3.5);
Out.ar(0,LeakDC.ar([osc1,osc2],mul: 0.1)); Out.ar(0,LeakDC.ar([osc1,osc2],mul: 0.1));
LocalOut.ar([osc1,osc2]); LocalOut.ar([osc1,osc2]);
}.play; }.play;

@ -1,9 +1,9 @@
// first run this code - it will sample 40 random points
// you can add more points if you want
// you must 'train' it
// then run the playing target at line 335 below and press 'predict'
( (
Window.closeAll; Window.closeAll;
s.options.inDevice_("MacBook Pro Microphone");
s.options.outDevice_("External Headphones");
// s.options.sampleRate_(48000);
s.options.sampleRate_(44100);
s.waitForBoot{ s.waitForBoot{
Task{ Task{
var win = Window(bounds:Rect(100,100,1000,800)); var win = Window(bounds:Rect(100,100,1000,800));
@ -47,37 +47,33 @@ s.waitForBoot{
}); });
}); });
}; };
var open_mlp = { var display_mlp_params = {
arg path; var params = nn.prGetParams;
// nn.prGetParams.postln; var n_layers = params[1];
nn.read(path,{ var layers_string = "";
var params = nn.prGetParams;
var n_layers = params[1];
var layers_string = "";
// params.postln;
n_layers.do({ // params.postln;
arg i;
if(i > 0,{layers_string = "% ".format(layers_string)});
layers_string = "%%".format(layers_string,params[2+i]);
});
nn.maxIter_(maxIter_nb.value); n_layers.do({
nn.learnRate_(learnRate_nb.value); arg i;
nn.momentum_(momentum_nb.value); if(i > 0,{layers_string = "% ".format(layers_string)});
nn.batchSize_(batchSize_nb.value); layers_string = "%%".format(layers_string,params[2+i]);
defer{
hidden_tf.string_(layers_string);
act_pum.value_(nn.activation);
outAct_pum.value_(nn.outputActivation);
/* maxIter_nb.value_(nn.maxIter);
learnRate_nb.value_(nn.learnRate);
momentum_nb.value_(nn.momentum);
batchSize_nb.value_(nn.batchSize);*/
};
}); });
nn.maxIter_(maxIter_nb.value);
nn.learnRate_(learnRate_nb.value);
nn.momentum_(momentum_nb.value);
nn.batchSize_(batchSize_nb.value);
defer{
hidden_tf.string_(layers_string);
act_pum.value_(nn.activation);
outAct_pum.value_(nn.outputActivation);
/* maxIter_nb.value_(nn.maxIter);
learnRate_nb.value_(nn.learnRate);
momentum_nb.value_(nn.momentum);
batchSize_nb.value_(nn.batchSize);*/
};
}; };
~in_bus = Bus.audio(s); ~in_bus = Bus.audio(s);
@ -138,7 +134,7 @@ s.waitForBoot{
StaticText(win,Rect(0,0,label_width,20)).string_("% MFCCs".format(nMFCCs)); StaticText(win,Rect(0,0,label_width,20)).string_("% MFCCs".format(nMFCCs));
win.view.decorator.nextLine; win.view.decorator.nextLine;
statsWinSl = EZSlider(win,Rect(0,0,item_width,20),"fmcc avg smooth",nil.asSpec,{arg sl; synth.set(\avg_win,sl.value)},0,true,label_width); statsWinSl = EZSlider(win,Rect(0,0,item_width,20),"mfcc avg smooth",nil.asSpec,{arg sl; synth.set(\avg_win,sl.value)},0,true,label_width);
win.view.decorator.nextLine; win.view.decorator.nextLine;
mfcc_multslider = MultiSliderView(win,Rect(0,0,item_width,200)) mfcc_multslider = MultiSliderView(win,Rect(0,0,item_width,200))
@ -162,11 +158,11 @@ s.waitForBoot{
// MLP Parameters // MLP Parameters
StaticText(win,Rect(0,0,label_width,20)).align_(\right).string_("hidden layers"); StaticText(win,Rect(0,0,label_width,20)).align_(\right).string_("hidden layers");
hidden_tf = TextField(win,Rect(0,0,item_width - label_width,20)) hidden_tf = TextField(win,Rect(0,0,item_width - label_width,20))
.string_(nn.hidden.asString.replace(", "," ")[2..(nn.hidden.asString.size-3)]) .string_(nn.hiddenLayers.asString.replace(", "," ")[2..(nn.hiddenLayers.asString.size-3)])
.action_{ .action_{
arg tf; arg tf;
var hidden_ = "[%]".format(tf.string.replace(" ",",")).interpret; var hidden_ = "[%]".format(tf.string.replace(" ",",")).interpret;
nn.hidden_(hidden_); nn.hiddenLayers_(hidden_);
// nn.prGetParams.postln; // nn.prGetParams.postln;
}; };
@ -256,20 +252,39 @@ s.waitForBoot{
win.view.decorator.nextLine; win.view.decorator.nextLine;
Button(win,Rect(0,0,100,20)) Button(win,Rect(0,0,100,20))
.states_([["Save MLP"]]) .states_([["Save"]])
.action_{ .action_{
Dialog.savePanel({ Dialog.savePanel({
arg path; arg path;
nn.write(path); nn.dump{
arg mlp_dict;
scaler_params.dump{
arg scaler_params_dict;
scaler_mfcc.dump{
arg scaler_mfcc_dict;
var dict = Dictionary.new;
dict['mlp'] = mlp_dict;
dict['scaler_params'] = scaler_params_dict;
dict['scaler_mfcc'] = scaler_mfcc_dict;
dict.writeArchive(path);
};
};
};
}); });
}; };
Button(win,Rect(0,0,100,20)) Button(win,Rect(0,0,100,20))
.states_([["Open MLP"]]) .states_([["Open"]])
.action_{ .action_{
Dialog.openPanel({ Dialog.openPanel({
arg path; arg path;
open_mlp.(path); var dict = Object.readArchive(path);
nn.load(dict['mlp'].postln,{display_mlp_params.value});
scaler_params.load(dict['scaler_params'].postln);
scaler_mfcc.load(dict['scaler_mfcc'].postln);
}); });
}; };
@ -299,15 +314,6 @@ s.waitForBoot{
statsWinSl.valueAction_(0.0); statsWinSl.valueAction_(0.0);
/* 100.do{
var cfreq = exprand(20,20000);
var mfreq = exprand(20,20000);
var index = rrand(0.0,20);
parambuf.setn(0,[cfreq,mfreq,index]);
0.2.wait;
add_point.value;
0.05.wait;
};*/
40.do{ 40.do{
var cfreq = exprand(100.0,1000.0); var cfreq = exprand(100.0,1000.0);
var mfreq = exprand(100.0,min(cfreq,500.0)); var mfreq = exprand(100.0,min(cfreq,500.0));
@ -328,7 +334,6 @@ s.waitForBoot{
( (
Routine{ Routine{
//~path = FluidFilesPath("Tremblay-AaS-VoiceQC-B2K.wav");
~path = FluidFilesPath("Tremblay-CEL-GlitchyMusicBoxMelo.wav"); ~path = FluidFilesPath("Tremblay-CEL-GlitchyMusicBoxMelo.wav");
~test_buf = Buffer.readChannel(s,~path,channels:[0]); ~test_buf = Buffer.readChannel(s,~path,channels:[0]);
s.sync; s.sync;

@ -151,3 +151,5 @@ FluidBufStats.process(s,~pitches,numChans:1,stats:~stats,weights:~thresh_buf,out
//compare with the source //compare with the source
~src.play; ~src.play;
) )
// further investigations are also in the Examples/dataset/1-learning examples/10b-weighted-pitch-comparison.scd

@ -25,7 +25,7 @@
Routine{ Routine{
for (1,x.size - 1, { for (1,x.size - 1, {
arg i; arg i;
FluidBufCompose.process(s,x[i],destination:x[0], destStartFrame:x[0].numFrames); FluidBufCompose.process(s,x[i],destination:x[0], destStartFrame:x[0].numFrames).wait;
}); });
"Done!".postln; "Done!".postln;
}.play; }.play;
@ -83,7 +83,7 @@
Routine{ Routine{
for (1,x.size - 1, { for (1,x.size - 1, {
arg i; arg i;
FluidBufCompose.process(s,x[i],destination:x[0], destStartChan:x[0].numChannels); FluidBufCompose.process(s,x[i],destination:x[0], destStartChan:x[0].numChannels).wait;
}); });
"Done!".postln; "Done!".postln;
}.play; }.play;

@ -30,7 +30,7 @@
~knnALLval.add((x["data"][i.asString])) ~knnALLval.add((x["data"][i.asString]))
}}; }};
) )
~knnALLval.flatten(1).plot(\source,discrete: true, minval:0, maxval: 1).plotMode=\bars; ~knnALLval.flatten(1).plot(\knn,discrete: true, minval:0, maxval: 1).plotMode=\bars;
//Regressing directly these value-pairs in knn we see a full set of values being predicted: we can see what looks like linear interpolation, but not outside the boundaries. This is because we make a weighted average of the nearest 2 neigbourgs, which are not necessarily around the predicted value, they might both be on the same side like 0 to 9 (10 and 20 are nearest) and 31 to 40 (20 and 30 are nearest). //Regressing directly these value-pairs in knn we see a full set of values being predicted: we can see what looks like linear interpolation, but not outside the boundaries. This is because we make a weighted average of the nearest 2 neigbourgs, which are not necessarily around the predicted value, they might both be on the same side like 0 to 9 (10 and 20 are nearest) and 31 to 40 (20 and 30 are nearest).
@ -46,7 +46,7 @@
~mlpALLval.add((x["data"][i.asString])) ~mlpALLval.add((x["data"][i.asString]))
}}; }};
) )
~mlpALLval.flatten(1).plot(\source,discrete: true, minval:0, maxval: 1).plotMode=\bars; ~mlpALLval.flatten(1).plot(\mlp_full_range,discrete: true, minval:0, maxval: 1).plotMode=\bars;
//We see that we have a large bump and nothing else. This is because our input are very large (10-30) and outside the optimal range of the activation function (0-1 for sigmoid) so our network saturates and cannot recover. If we normalise our inputs and we rerun the network we get a curve that fits the 3 values. You can fit more than once to get more iterations and lower the error. //We see that we have a large bump and nothing else. This is because our input are very large (10-30) and outside the optimal range of the activation function (0-1 for sigmoid) so our network saturates and cannot recover. If we normalise our inputs and we rerun the network we get a curve that fits the 3 values. You can fit more than once to get more iterations and lower the error.
@ -63,7 +63,7 @@
~mlpALLval.add((x["data"][i.asString])) ~mlpALLval.add((x["data"][i.asString]))
}}; }};
) )
~mlpALLval.flatten(1).plot(\source,discrete: true, minval:0, maxval: 1).plotMode=\bars; ~mlpALLval.flatten(1).plot(\mlp_normalized,discrete: true, minval:0, maxval: 1).plotMode=\bars;
//Now we can add one value to our sparse dataset. Note that we revert back to full range values here for the example //Now we can add one value to our sparse dataset. Note that we revert back to full range values here for the example
~dsIN.load(Dictionary.newFrom([\cols, 1, \data, Dictionary.newFrom([\point1, [10], \point2, [20], \point3, [30], \point4, [22]])])); ~dsIN.load(Dictionary.newFrom([\cols, 1, \data, Dictionary.newFrom([\point1, [10], \point2, [20], \point3, [30], \point4, [22]])]));

@ -47,9 +47,9 @@ y = Synth(\becauseIcan,[\bufnum, b.bufnum, \nmfa, c.bufnum, \nmfb, d.bufnum, \in
( (
w = OSCFunc({ arg msg; w = OSCFunc({ arg msg;
if(msg[3]== 1, { if(msg[3]== 1, {
FluidBufNMF.process(s, b, numFrames: 22500, resynth: c.bufnum, components: 3, fftSize: 1024, windowSize: 512, hopSize: 256); FluidBufNMF.process(s, b, numFrames: 22500, resynth: c.bufnum, resynthMode: 1, components: 3, fftSize: 1024, windowSize: 512, hopSize: 256);
}, { }, {
FluidBufNMF.process(s, b, 22050, 22500, resynth: d.bufnum, components: 3, fftSize: 1024, windowSize: 512, hopSize: 256); FluidBufNMF.process(s, b, 22050, 22500, resynth: d.bufnum, resynthMode: 1, components: 3, fftSize: 1024, windowSize: 512, hopSize: 256);
});}, '/processplease', s.addr); });}, '/processplease', s.addr);
) )

@ -11,7 +11,7 @@ e = Buffer.new(s);
// train where all objects are present // train where all objects are present
( (
Routine { Routine {
FluidBufNMF.process(s,b,130000,150000,0,1, c, x, components:10); FluidBufNMF.process(s, b, startFrame: 130000, numFrames: 150000, numChans: 1, resynth: c, resynthMode: 1, bases: x, components:10).wait;
c.query; c.query;
}.play; }.play;
) )
@ -19,12 +19,12 @@ Routine {
// wait for the query to print // wait for the query to print
// then find a component for each item you want to find. You could also sum them. Try to find a component with a good object-to-rest ratio // then find a component for each item you want to find. You could also sum them. Try to find a component with a good object-to-rest ratio
( (
~dog =1; ~dog =0;
{PlayBuf.ar(10,c)[~dog]}.play {PlayBuf.ar(10,c)[~dog]}.play
) )
( (
~bird = 3; ~bird = 1;
{PlayBuf.ar(10,c)[~bird]}.play {PlayBuf.ar(10,c)[~bird]}.play
) )
@ -32,8 +32,8 @@ Routine {
// copy at least one other component to a third filter, a sort of left-over channel // copy at least one other component to a third filter, a sort of left-over channel
( (
Routine{ Routine{
FluidBufCompose.process(s, x, startChan:~dog, numChans: 1, destination: e); FluidBufCompose.process(s, x, startChan:~dog, numChans: 1, destination: e).wait;
FluidBufCompose.process(s, x, startChan:~bird, numChans: 1, destStartChan: 1, destination: e, destGain:1); FluidBufCompose.process(s, x, startChan:~bird, numChans: 1, destStartChan: 1, destination: e, destGain:1).wait;
(0..9).removeAll([~dog,~bird]).do({|chan|FluidBufCompose.process(s,x, startChan:chan, numChans: 1, destStartChan: 2, destination: e, destGain:1)}); (0..9).removeAll([~dog,~bird]).do({|chan|FluidBufCompose.process(s,x, startChan:chan, numChans: 1, destStartChan: 2, destination: e, destGain:1)});
e.query; e.query;
}.play; }.play;

@ -49,6 +49,7 @@ table::
##link::Classes/FluidAmpFeature:: || link::Classes/FluidBufAmpFeature:: || Detrending Amplitude Envelope Descriptor ##link::Classes/FluidAmpFeature:: || link::Classes/FluidBufAmpFeature:: || Detrending Amplitude Envelope Descriptor
##link::Classes/FluidNoveltyFeature:: || link::Classes/FluidBufNoveltyFeature:: || Novelty descriptor based on a choice of analysis descriptors ##link::Classes/FluidNoveltyFeature:: || link::Classes/FluidBufNoveltyFeature:: || Novelty descriptor based on a choice of analysis descriptors
##link::Classes/FluidOnsetFeature:: || link::Classes/FluidBufOnsetFeature:: || Descriptor comparing spectral frames using a choice of comparisons ##link::Classes/FluidOnsetFeature:: || link::Classes/FluidBufOnsetFeature:: || Descriptor comparing spectral frames using a choice of comparisons
##link::Classes/FluidSineFeature:: || link::Classes/FluidBufSineFeature:: || Sinusoidal peak extraction
:: ::
section:: Decompose Audio section:: Decompose Audio

Loading…
Cancel
Save