Merge branch 'help-files' into dev

nix
Ted Moore 4 years ago
commit 249c36b75a

@ -0,0 +1,285 @@
/*
=================================================
| |
| LOAD AND ANALYZE THE SOURCE MATERIAL |
| |
=================================================
*/
(
// ============= 1. LOAD SOME FILES TO BE THE SOURCE MATERIAL ===================
// put your own folder path here! it's best if they're all mono for now.
~source_files_folder = "/Users/macprocomputer/Desktop/audio_files/";
~loader = FluidLoadFolder(~source_files_folder); // this is a nice helper class that will load a bunch of files from a folder.
~loader.play(s,{ // .play will cause it to *actually* do the loading
// we really just want access to the buffer. there is also a .index with some info about the files
// but we'll igore that for now
~source_buf = ~loader.buffer;
"all files loaded".postln;
// double check if they're all mono? the buffer of the loaded files will have as many channels as the file with the most channels
// so if this is 1, then we know all the files were mono.
"num channels: %".format(~source_buf.numChannels).postln
});
)
~loader.buffer
~loader.index.dopostln
// ~source_buf.plot
FluidBufNMF
(
// ==================== 2. SLICE THE SOURCE MATERIAL ACCORDING TO SPECTRAL ONSETS =========================
~source_indices_buf = Buffer(s); // a buffer for writing the indices into
FluidBufOnsetSlice.process(s,~source_buf,indices:~source_indices_buf,metric:9,threshold:0.15,minSliceLength:9,action:{ // do the slicing
~source_indices_buf.loadToFloatArray(action:{
arg indices_array;
// post the results so that you can tweak the parameters and get what you want
"found % slices".format(indices_array.size-1).postln;
"average length: % seconds".format((~source_buf.duration / (indices_array.size-1)).round(0.001)).postln;
})
});
)
(
// =========================== 3. DEFINE A FUNCTION FOR DOING THE ANALYSIS ===================================
~analyze_to_dataset = {
arg audio_buffer, slices_buffer, action; // the audio buffer to analyze, a buffer with the slice points, and an action to execute when done
~nmfccs = 13;
Routine{
var features_buf = Buffer(s); // a buffer for writing the MFCC analyses into
var stats_buf = Buffer(s); // a buffer for writing the statistical summary of the MFCC analyses into
var flat_buf = Buffer(s); // a buffer for writing only he mean MFCC values into
var dataset = FluidDataSet(s); // the dataset that all of these analyses will be stored in
slices_buffer.loadToFloatArray(action:{ // get the indices from the server loaded down to the language
arg slices_array;
// iterate over each index in this array, paired with this next neighbor so that we know where to start
// and stop the analysis
slices_array.doAdjacentPairs{
arg start_frame, end_frame, slice_index;
var num_frames = end_frame - start_frame;
"analyzing slice: % / %".format(slice_index + 1,slices_array.size - 1).postln;
// mfcc analysis, hop over that 0th coefficient because it relates to loudness and here we want to focus on timbre
FluidBufMFCC.process(s,audio_buffer,start_frame,num_frames,features:features_buf,startCoeff:1,numCoeffs:~nmfccs).wait;
// get a statistical summary of the MFCC analysis for this slice
FluidBufStats.process(s,features_buf,stats:stats_buf).wait;
// extract and flatten just the 0th frame (numFrames:1) of the statistical summary (because that is the mean)
FluidBufFlatten.process(s,stats_buf,numFrames:1,destination:flat_buf).wait;
// now that the means are extracted and flattened, we can add this datapoint to the dataset:
dataset.addPoint("slice-%".format(slice_index),flat_buf);
};
});
action.value(dataset); // execute the function and pass in the dataset that was created!
}.play;
};
)
(
// =================== 4. DO THE ANALYSIS =====================
~analyze_to_dataset.(~source_buf,~source_indices_buf,{ // pass in the audio buffer of the source, and the slice points
arg ds;
~source_dataset = ds; // set the ds to a global variable so we can access it later
~source_dataset.print;
});
)
/*
=================================================
| |
| LOAD AND ANALYZE THE TARGET |
| |
=================================================
*/
(
// ============= 5. LOAD THE FILE ===================
~target_path = File.realpath(FluidBufPitch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Nicol-LoopE-M.wav";
~target_buf = Buffer.read(s,~target_path);
)
(
// ============= 6. SLICE ===================
~target_indices_buf = Buffer(s);
FluidBufOnsetSlice.process(s,~target_buf,indices:~target_indices_buf,metric:9,threshold:0.5,action:{
~target_indices_buf.loadToFloatArray(action:{
arg indices_array;
// post the results so that you can tweak the parameters and get what you want
"found % slices".format(indices_array.size-1).postln;
"average length: % seconds".format((~target_buf.duration / (indices_array.size-1)).round(0.001)).postln;
})
});
)
(
// =========== 7. USE THE SAME ANALYSIS FUNCTION
~analyze_to_dataset.(~target_buf,~target_indices_buf,{
arg ds;
~target_dataset = ds;
~target_dataset.print;
});
)
(
// ======================= 8. TEST DRUM LOOP PLAYBACK ====================
// play back the drum slices with a .wait in between so we hear the drum loop
Routine{
~target_indices_buf.loadToFloatArray(action:{
arg target_indices_array;
// prepend 0 (the start of the file) to the indices array
target_indices_array = [0] ++ target_indices_array;
// append the total number of frames to know how long to play the last slice for
target_indices_array = target_indices_array ++ [~target_buf.numFrames];
inf.do{ // loop for infinity
arg i;
// get the index to play by modulo one less than the number of slices (we don't want to *start* playing from the
// last slice point, because that's the end of the file!)
var index = i % (target_indices_array.size - 1);
// nb. that the minus one is so that the drum slice from the beginning of the file to the first index is call "-1"
// this is because that slice didn't actually get analyzed
var slice_id = index - 1;
var start_frame = target_indices_array[index];
var dur_frames = target_indices_array[index + 1] - start_frame;
var dur_secs = dur_frames / ~target_buf.sampleRate;
"playing slice: %".format(slice_id).postln;
{
var sig = PlayBuf.ar(1,~target_buf,BufRateScale.ir(~target_buf),0,start_frame,0,2);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:2);
sig = sig * env; // include this env if you like, but keep the line above because it will free the synth after the slice!
sig.dup;
}.play;
dur_secs.wait;
};
});
}.play;
)
/*
=================================================
| |
| KDTREE THE DATA AND DO THE LOOKUP |
| |
=================================================
*/
( // ========== 9. FIT THE KDTREE TO THE SOURCE DATASET SO THAT WE CAN QUICKLY LOOKUP NEIGHBORS ===============
Routine{
~kdtree = FluidKDTree(s,10);
s.sync;
~kdtree.fit(~source_dataset,{
"kdtree fit".postln;
});
}.play;
)
(
// ========= 10. A LITTLE HELPER FUNCTION THAT WILL PLAY BACK A SLICE FROM THE SOURCE BY JUST PASSING THE INDEX =============
~play_source_index = {
arg index;
{
var start_frame = Index.kr(~source_indices_buf,index); // lookup the start frame with the index *one the server* using Index.kr
var end_frame = Index.kr(~source_indices_buf,index+1); // same for the end frame
var num_frames = end_frame - start_frame;
var dur_secs = num_frames / SampleRate.ir(~source_buf);
var sig = PlayBuf.ar(1,~source_buf,BufRateScale.ir(~source_buf),0,start_frame,0,Done.freeSelf);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:Done.freeSelf);
// sig = sig * env; // include this env if you like, but keep the line above because it will free the synth after the slice!
sig.dup;
}.play;
};
)
~target_dataset.print;
(
// ======================= 11. TEST DRUM LOOP PLAYBACK ====================
// play back the drum slices with a .wait in between so we hear the drum loop
// is is very similar to step 8 above, but now instead of playing the slice of
// the drum loop, it get's the analysis of the drum loop's slice into "query_buf",
// then uses that info to lookup the nearest neighbour in the source dataset and
// play that slice
Routine{
var query_buf = Buffer.alloc(s,~nmfccs); // a buffer for doing the neighbor lookup with
~target_indices_buf.loadToFloatArray(action:{
arg target_indices_array;
// prepend 0 (the start of the file) to the indices array
target_indices_array = [0] ++ target_indices_array;
// append the total number of frames to know how long to play the last slice for
target_indices_array = target_indices_array ++ [~target_buf.numFrames];
inf.do{ // loop for infinity
arg i;
// get the index to play by modulo one less than the number of slices (we don't want to *start* playing from the
// last slice point, because that's the end of the file!)
var index = i % (target_indices_array.size - 1);
// nb. that the minus one is so that the drum slice from the beginning of the file to the first index is call "-1"
// this is because that slice didn't actually get analyzed
var slice_id = index - 1;
var start_frame = target_indices_array[index];
var dur_frames = target_indices_array[index + 1] - start_frame;
// this will be used to space out the source slices according to the target timings
var dur_secs = dur_frames / ~target_buf.sampleRate;
"target slice: %".format(slice_id).postln;
// as long as this slice is not the one that starts at the beginning of the file (-1) and
// not the slice at the end of the file (because neither of these have analyses), let's
// do the lookup
if((slice_id >= 0) && (slice_id < (target_indices_array.size - 3)),{
// use the slice id to (re)create the slice identifier and load the data point into "query_buf"
~target_dataset.getPoint("slice-%".format(slice_id.asInteger),query_buf,{
// once it's loaded, use that buffer as the input to lookup the nearest
// neighbour data point in the kdtree of source slices
~kdtree.kNearest(query_buf,{
arg nearest;
var nearest_index;
nearest.postln;
// peel off just the integer part of the slice to use in the helper function
nearest_index = nearest.choose.asString.split($-)[1].asInteger;
nearest_index.postln;
~play_source_index.(nearest_index);
});
});
});
// if you want to hear the drum set along side the neighbor slices, uncomment this function
{
var sig = PlayBuf.ar(1,~target_buf,BufRateScale.ir(~target_buf),0,start_frame,0,2);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:2);
// sig = sig * env; // include this env if you like, but keep the line above because it will free the synth after the slice!
sig.dup * -8.dbamp;
}.play;
dur_secs.wait;
};
});
}.play;
)

@ -0,0 +1,288 @@
/*
=================================================
| |
| LOAD AND ANALYZE THE SOURCE MATERIAL |
| |
=================================================
*/
(
// ============= 1. LOAD SOME FILES TO BE THE SOURCE MATERIAL ===================
// put your own folder path here! it's best if they're all mono for now.
~source_files_folder = "/Users/macprocomputer/Desktop/sccm/files_fabrizio_01/src_files/";
~loader = FluidLoadFolder(~source_files_folder); // this is a nice helper class that will load a bunch of files from a folder.
~loader.play(s,{ // .play will cause it to *actually* do the loading
// we really just want access to the buffer. there is also a .index with some info about the files
// but we'll igore that for now
~source_buf = ~loader.buffer;
"all files loaded".postln;
// double check if they're all mono? the buffer of the loaded files will have as many channels as the file with the most channels
// so if this is 1, then we know all the files were mono.
"num channels: %".format(~source_buf.numChannels).postln
});
)
(
// ==================== 2. SLICE THE SOURCE MATERIAL ACCORDING TO SPECTRAL ONSETS =========================
~source_indices_buf = Buffer(s); // a buffer for writing the indices into
FluidBufOnsetSlice.process(s,~source_buf,indices:~source_indices_buf,metric:9,threshold:0.5,minSliceLength:9,action:{ // do the slicing
~source_indices_buf.loadToFloatArray(action:{
arg indices_array;
// post the results so that you can tweak the parameters and get what you want
"found % slices".format(indices_array.size-1).postln;
"average length: % seconds".format((~source_buf.duration / (indices_array.size-1)).round(0.001)).postln;
})
});
)
(
// =========================== 3. DEFINE A FUNCTION FOR DOING THE ANALYSIS ===================================
~analyze_to_dataset = {
arg audio_buffer, slices_buffer, action; // the audio buffer to analyze, a buffer with the slice points, and an action to execute when done
~nmfccs = 13;
Routine{
var features_buf = Buffer(s); // a buffer for writing the MFCC analyses into
var stats_buf = Buffer(s); // a buffer for writing the statistical summary of the MFCC analyses into
var flat_buf = Buffer(s); // a buffer for writing only he mean MFCC values into
var dataset = FluidDataSet(s); // the dataset that all of these analyses will be stored in
slices_buffer.loadToFloatArray(action:{ // get the indices from the server loaded down to the language
arg slices_array;
// iterate over each index in this array, paired with this next neighbor so that we know where to start
// and stop the analysis
slices_array.doAdjacentPairs{
arg start_frame, end_frame, slice_index;
var num_frames = end_frame - start_frame;
"analyzing slice: % / %".format(slice_index + 1,slices_array.size - 1).postln;
// mfcc analysis, hop over that 0th coefficient because it relates to loudness and here we want to focus on timbre
FluidBufMFCC.process(s,audio_buffer,start_frame,num_frames,features:features_buf,startCoeff:1,numCoeffs:~nmfccs).wait;
// get a statistical summary of the MFCC analysis for this slice
FluidBufStats.process(s,features_buf,stats:stats_buf).wait;
// extract and flatten just the 0th frame (numFrames:1) of the statistical summary (because that is the mean)
FluidBufFlatten.process(s,stats_buf,numFrames:1,destination:flat_buf).wait;
// now that the means are extracted and flattened, we can add this datapoint to the dataset:
dataset.addPoint("slice-%".format(slice_index),flat_buf);
};
});
action.value(dataset); // execute the function and pass in the dataset that was created!
}.play;
};
)
(
// =================== 4. DO THE ANALYSIS =====================
~analyze_to_dataset.(~source_buf,~source_indices_buf,{ // pass in the audio buffer of the source, and the slice points
arg ds;
~source_dataset = ds; // set the ds to a global variable so we can access it later
~source_dataset.print;
});
)
/*
=================================================
| |
| LOAD AND ANALYZE THE TARGET |
| |
=================================================
*/
(
// ============= 5. LOAD THE FILE ===================
~target_path = File.realpath(FluidBufPitch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Nicol-LoopE-M.wav";
~target_buf = Buffer.read(s,~target_path);
)
(
// ============= 6. SLICE ===================
~target_indices_buf = Buffer(s);
FluidBufOnsetSlice.process(s,~target_buf,indices:~target_indices_buf,metric:9,threshold:0.5,action:{
~target_indices_buf.loadToFloatArray(action:{
arg indices_array;
// post the results so that you can tweak the parameters and get what you want
"found % slices".format(indices_array.size-1).postln;
"average length: % seconds".format((~target_buf.duration / (indices_array.size-1)).round(0.001)).postln;
})
});
)
(
// =========== 7. USE THE SAME ANALYSIS FUNCTION
~analyze_to_dataset.(~target_buf,~target_indices_buf,{
arg ds;
~target_dataset = ds;
~target_dataset.print;
});
)
(
// ======================= 8. TEST DRUM LOOP PLAYBACK ====================
// play back the drum slices with a .wait in between so we hear the drum loop
Routine{
~target_indices_buf.loadToFloatArray(action:{
arg target_indices_array;
// prepend 0 (the start of the file) to the indices array
target_indices_array = [0] ++ target_indices_array;
// append the total number of frames to know how long to play the last slice for
target_indices_array = target_indices_array ++ [~target_buf.numFrames];
inf.do{ // loop for infinity
arg i;
// get the index to play by modulo one less than the number of slices (we don't want to *start* playing from the
// last slice point, because that's the end of the file!)
var index = i % (target_indices_array.size - 1);
// nb. that the minus one is so that the drum slice from the beginning of the file to the first index is call "-1"
// this is because that slice didn't actually get analyzed
var slice_id = index - 1;
var start_frame = target_indices_array[index];
var dur_frames = target_indices_array[index + 1] - start_frame;
var dur_secs = dur_frames / ~target_buf.sampleRate;
"playing slice: %".format(slice_id).postln;
{
var sig = PlayBuf.ar(1,~target_buf,BufRateScale.ir(~target_buf),0,start_frame,0,2);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:2);
// sig = sig * env; // include this env if you like, but keep the line above because it will free the synth after the slice!
sig.dup;
}.play;
dur_secs.wait;
};
});
}.play;
)
/*
=================================================
| |
| KDTREE THE DATA AND DO THE LOOKUP |
| |
=================================================
*/
(
// ========== 9. FIT THE KDTREE TO THE SOURCE DATASET SO THAT WE CAN QUICKLY LOOKUP NEIGHBORS ===============
Routine{
~kdtree = FluidKDTree(s);
~scaled_dataset = FluidDataSet(s);
// leave only one of these scalers *not* commented-out. try all of them!
//~scaler = FluidStandardize(s);
~scaler = FluidNormalize(s);
// ~scaler = FluidRobustScale(s);
s.sync;
~scaler.fitTransform(~source_dataset,~scaled_dataset,{
~kdtree.fit(~scaled_dataset,{
"kdtree fit".postln;
});
});
}.play;
)
(
// ========= 10. A LITTLE HELPER FUNCTION THAT WILL PLAY BACK A SLICE FROM THE SOURCE BY JUST PASSING THE INDEX =============
~play_source_index = {
arg index, src_dur;
{
var start_frame = Index.kr(~source_indices_buf,index); // lookup the start frame with the index *one the server* using Index.kr
var end_frame = Index.kr(~source_indices_buf,index+1); // same for the end frame
var num_frames = end_frame - start_frame;
var dur_secs = min(num_frames / SampleRate.ir(~source_buf),src_dur);
var sig = PlayBuf.ar(1,~source_buf,BufRateScale.ir(~source_buf),0,start_frame,0,2);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:2);
// sig = sig * env; // include this env if you like, but keep the line above because it will free the synth after the slice!
sig.dup;
}.play;
};
)
(
// ======================= 11. QUERY THE DRUM SONDS TO FIND "REPLACEMENTS" ====================
// play back the drum slices with a .wait in between so we hear the drum loop
// is is very similar to step 8 above, but now instead of playing the slice of
// the drum loop, it get's the analysis of the drum loop's slice into "query_buf",
// then uses that info to lookup the nearest neighbour in the source dataset and
// play that slice
Routine{
var query_buf = Buffer.alloc(s,~nmfccs); // a buffer for doing the neighbor lookup with
var scaled_buf = Buffer.alloc(s,~nmfccs);
~target_indices_buf.loadToFloatArray(action:{
arg target_indices_array;
// prepend 0 (the start of the file) to the indices array
target_indices_array = [0] ++ target_indices_array;
// append the total number of frames to know how long to play the last slice for
target_indices_array = target_indices_array ++ [~target_buf.numFrames];
inf.do{ // loop for infinity
arg i;
// get the index to play by modulo one less than the number of slices (we don't want to *start* playing from the
// last slice point, because that's the end of the file!)
var index = i % (target_indices_array.size - 1);
// nb. that the minus one is so that the drum slice from the beginning of the file to the first index is call "-1"
// this is because that slice didn't actually get analyzed
var slice_id = index - 1;
var start_frame = target_indices_array[index];
var dur_frames = target_indices_array[index + 1] - start_frame;
// this will be used to space out the source slices according to the target timings
var dur_secs = dur_frames / ~target_buf.sampleRate;
"target slice: %".format(slice_id).postln;
// as long as this slice is not the one that starts at the beginning of the file (-1) and
// not the slice at the end of the file (because neither of these have analyses), let's
// do the lookup
if((slice_id >= 0) && (slice_id < (target_indices_array.size - 3)),{
// use the slice id to (re)create the slice identifier and load the data point into "query_buf"
~target_dataset.getPoint("slice-%".format(slice_id.asInteger),query_buf,{
// once it's loaded, scale it using the scaler
~scaler.transformPoint(query_buf,scaled_buf,{
// once it's neighbour data point in the kdtree of source slices
~kdtree.kNearest(scaled_buf,{
arg nearest;
// peel off just the integer part of the slice to use in the helper function
var nearest_index = nearest.asString.split($-)[1].asInteger;
nearest_index.postln;
~play_source_index.(nearest_index,dur_secs);
});
});
});
});
// if you want to hear the drum set along side the neighbor slices, uncomment this function
/*{
var sig = PlayBuf.ar(1,~target_buf,BufRateScale.ir(~target_buf),0,start_frame,0,2);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:2);
// sig = sig * env; // include this env if you like, but keep the line above because it will free the synth after the slice!
sig.dup;
}.play;*/
dur_secs.wait;
};
});
}.play;
)

@ -0,0 +1,131 @@
(
Window.closeAll;
s.waitForBoot{
Task{
var buf = Buffer.read(s,File.realpath(FluidBufPitch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Nicol-LoopE-M.wav");
var slicepoints = Buffer(s); // FluidBufAmpSlice will write into this buffer the samples at which slices are detected.
var features_buf = Buffer(s); // a buffer for writing the analysis from FluidSpectralShape into
var stats_buf = Buffer(s); // a buffer for writing the statistic analyses into
var point_buf = Buffer(s,2); // a buffer that will be used to add points to the dataset - the analyses will be written into this buffer first
var ds = FluidDataSet(s); // a data set for storing the analysis of each slice (mean centroid & mean loudness)
var scaler = FluidNormalize(s); // a tool for normalizing a dataset (making it all range between zero and one)
var kdtree = FluidKDTree(s); // a kdtree for fast nearest neighbour lookup
s.sync;
FluidBufAmpSlice.process(s,buf,indices:slicepoints,fastRampUp:10,fastRampDown:2205,slowRampUp:4410,slowRampDown:4410,onThreshold:10,offThreshold:5,floor:-40,minSliceLength:4410,highPassFreq:20).wait;
// slice the drums buffer based on amplitude
// the samples at which slices are detected will be written into the "slicepoints" buffer
FluidWaveform(buf,slicepoints,Rect(0,0,1600,400));
// plot the drums buffer with the slicepoints overlayed
slicepoints.loadToFloatArray(action:{ // bring the values in the slicepoints buffer from the server to the language as a float array
arg slicepoints_fa; // fa stands for float array
slicepoints_fa.postln;
slicepoints_fa.doAdjacentPairs{
/*
take each of the adjacent pairs and pass them to this function as an array of 2 values
nb. for example [0,1,2,3,4] will execute this function 4 times, passing these 2 value arrays:
[0,1]
[1,2]
[2,3]
[3,4]
this will give us each slice point *and* the next slice point so that we
can tell the analyzers where to start analyzing and how many frames to analyze
*/
arg start_samps, end_samps, slice_i;
var num_samps = end_samps - start_samps; // the next slice point minus the current one will give us the difference how many slices to analyze)
slice_i.postln; // post which slice index we're currently analyzing
// the ".wait"s will pause the Task (that this whole things is in) until the analysis is done;
FluidBufSpectralShape.process(s,buf,start_samps,num_samps,features:features_buf).wait;
/* analyze the drum buffer starting at `start_samps` and for `num_samps` samples
this returns a buffer (feautres_buf) that is 7 channels wide (for the 7 spectral analyses, see helpfile) and
however many frames long as there are fft frames in the slice */
FluidBufStats.process(s,features_buf,numChans:1,stats:stats_buf).wait;
/* perform a statistical analysis the spectral analysis, doing only the first channel (specified by `numChans:1`)
this will return just one channel because we asked it to analyze only 1 channel. that one channel will have 7 frames
corresponding to the 7 statistical analyses that it performs */
FluidBufCompose.process(s,stats_buf,0,1,destination:point_buf,destStartFrame:0).wait;
/* FluidBufCompose is essentially a "buf copy" operation. this will copy just the zeroth frame from `stats_buf` (mean)
into the zeroth buf of `point_buf` which is what we'll evenutally use to add the data to the dataset */
FluidBufLoudness.process(s,buf,start_samps,num_samps,features:features_buf).wait;
// do a loudness analysis
FluidBufStats.process(s,features_buf,numChans:1,stats:stats_buf).wait;
// see above
FluidBufCompose.process(s,stats_buf,0,1,destination:point_buf,destStartFrame:1).wait;
/* see above, but this time the mean loudnessi s being copied into the 1st frame of `point_buf` so that it doesn't overwrite the mean centroid */
ds.addPoint("point-%".format(slice_i),point_buf);
/* now that we've added the mean centroid and mean loudness into `point_buf`, we can use that buf to add the data that is in it to the dataset.
we also need to give it an identifer. here we're calling it "point-%", where the "%" is replaced by the index of the slice */
s.sync;
};
});
scaler.fitTransform(ds,ds,{
/* scale the dataset so that each dimension is scaled to between 0 and 1. this will do this operation "in place", so that once the
scaling is done on the dataset "ds" it will overwrite that dataset with the normalized values. that is why both the "sourceDataSet" and
"destDataSet" are the same here
*/
kdtree.fit(ds,{ // fit the kdtree to the (now) normalized dataset
ds.dump({ // dump out that dataset to dictionary so that we can use it with the plotter!
arg ds_dict;// the dictionary version of this dataset
var previous = nil; // a variable for checking if the currently passed nearest neighbour is the same or different from the previous one
FluidPlotter(bounds:Rect(0,0,800,800),dict:ds_dict,mouseMoveAction:{
/* make a FluidPlotter. nb. the dict is the dict from a FluidDataSet.dump. the mouseMoveAction is a callback function that is called
anytime the mouseDownAction or mouseMoveAction function is called on this view. i.e., anytime you click or drag on this plotter */
arg view, x, y, modifiers;
/* the function is passed:
(1) itself
(2) mouse x position (scaled to what the view's scales are)
(3) mouse y position (scaled to what the view's scales are)
(4) modifier keys that are pressed while clicking or dragging
*/
point_buf.setn(0,[x,y]); // write the x y position into a buffer so that we can use it to...
kdtree.kNearest(point_buf,{ // look up the nearest slice to that x y position
arg nearest; // this is reported back as a symbol, so...
nearest = nearest.asString; // we'll convert it to a string here
if(nearest != previous,{
/* if it's not the last one that was found, we can do something with it. this
is kind of like a debounce. we just don't want to retrigger this action each time a drag
happens if it is actually the same nearest neighbor*/
var index = nearest.split($-)[1].interpret;
// split at the hyphen and interpret the integer on the end to find out what slice index it is
{
var startPos = Index.kr(slicepoints,index); // look up the start sample based on the index
var endPos = Index.kr(slicepoints,index + 1); // look up the end sample based on the index
var dur_secs = (endPos - startPos) / BufSampleRate.ir(buf); // figure out how long it is in seconds to create an envelope
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:2);
var sig = PlayBuf.ar(1,buf,BufRateScale.ir(buf),startPos:startPos);
sig.dup * env;
}.play; // play it!
view.highlight_(nearest); // make this point a little bit bigger in the plot
previous = nearest;
});
});
});
});
});
});
}.play(AppClock);
}
)

@ -0,0 +1,231 @@
(
// 1. Instantiate some of the things we need.
Window.closeAll;
s.options.sampleRate_(48000);
s.options.device_("Fireface UC Mac (24006457)");
s.waitForBoot{
Task{
var win;
~nMFCCs = 13;
~trombone = Buffer.read(s,"/Users/macprocomputer/Desktop/_flucoma/code/flucoma-core-src/AudioFiles/Olencki-TenTromboneLongTones-M.wav");
~oboe = Buffer.read(s,"/Users/macprocomputer/Desktop/_flucoma/code/flucoma-core-src/AudioFiles/Harker-DS-TenOboeMultiphonics-M.wav");
~timbre_buf = Buffer.alloc(s,~nMFCCs);
~ds = FluidDataSet(s);
~labels = FluidLabelSet(s);
~point_counter = 0;
s.sync;
win = Window("MFCCs",Rect(0,0,800,300));
~mfcc_multislider = MultiSliderView(win,win.bounds)
.elasticMode_(true)
.size_(~nMFCCs);
win.front;
}.play(AppClock);
};
)
/*
2. Play some trombone sounds.
*/
(
{
var sig = PlayBuf.ar(1,~trombone,BufRateScale.ir(~trombone),doneAction:2);
var mfccs = FluidMFCC.kr(sig,~nMFCCs,40,1,maxNumCoeffs:~nMFCCs);
SendReply.kr(Impulse.kr(30),"/mfccs",mfccs);
FluidKrToBuf.kr(mfccs,~timbre_buf);
sig.dup;
}.play;
OSCFunc({
arg msg;
{~mfcc_multislider.value_(msg[3..].linlin(-30,30,0,1))}.defer;
},"\mfccs");
)
/*
3. When you know the MFCC buf has trombone timbre data in it
(because you hear trombone and see it in the multislider),
execute this next block to add points to the dataset and
labels to the label set.
Avoid adding points when there is silence inbetween trombone
tones, because... silence isn't trombone, so we don't want
to label it that way.
Try adding points continuously during the first three or so
trombone tones. We'll save the rest to test on later.
*/
(
var id = "example-%".format(~point_counter);
~ds.addPoint(id,~timbre_buf);
~labels.addLabel(id,"trombone");
~point_counter = ~point_counter + 1;
)
/*
4. Play some oboe sounds.
*/
(
{
var sig = PlayBuf.ar(1,~oboe,BufRateScale.ir(~oboe),doneAction:2);
var mfccs = FluidMFCC.kr(sig,~nMFCCs,40,1,maxNumCoeffs:~nMFCCs);
SendReply.kr(Impulse.kr(30),"/mfccs",mfccs);
FluidKrToBuf.kr(mfccs,~timbre_buf);
sig.dup;
}.play;
OSCFunc({
arg msg;
{~mfcc_multislider.value_(msg[3..].linlin(-30,30,0,1))}.defer;
},"\mfccs");
)
/*
5. All same as before with the trombone sounds.
*/
(
var id = "example-%".format(~point_counter);
~ds.addPoint(id,~timbre_buf);
~labels.addLabel(id,"oboe");
~point_counter = ~point_counter + 1;
)
/*
6. Make an MLPClassifier (neural network) to train. For more information about the parameters
visit: https://learn.flucoma.org/reference/mlpclassifier
*/
~mlpclassifier = FluidMLPClassifier(s,[5],1,learnRate:0.05,batchSize:5,validation:0.1);
/*
7. You may want to do a ".fit" more than once. For this task a loss value less than 0.01 would
be pretty good. Loss values however are always very relative so it's not really possible
to make objective observations about what one should "aim" for with a loss value. The best
way to know if a neural network is successfully performing the task you would like it to
is to test it. Probably using examples that it has never seen before.
*/
(
~mlpclassifier.fit(~ds,~labels,{
arg loss;
loss.postln;
});
)
/*
8. Make a prediction buffer to write the MLPClassifier's predictions into. The predictions that
it outputs to a buffer are integers. "0" will be represent what ever the "zeroth" example
label it saw was (because we always start counting from zero in these cases). "1" will represent
the "first" example label it saw, etc.
*/
~prediction_buf = Buffer.alloc(s,1);
/*
9. Play some trombone sounds and make some predictions. It should show a 0.
*/
(
{
var sig = PlayBuf.ar(1,~trombone,BufRateScale.ir(~trombone),doneAction:2);
var mfccs = FluidMFCC.kr(sig,~nMFCCs,40,1,maxNumCoeffs:~nMFCCs);
FluidKrToBuf.kr(mfccs,~timbre_buf);
~mlpclassifier.kr(Impulse.kr(30),~timbre_buf,~prediction_buf);
FluidBufToKr.kr(~prediction_buf).poll;
sig.dup;
}.play;
)
/*
10. Play some oboe sounds and make some predictions. It should show a 1.
*/
(
{
var sig = PlayBuf.ar(1,~oboe,BufRateScale.ir(~oboe),doneAction:2);
var mfccs = FluidMFCC.kr(sig,~nMFCCs,40,1,maxNumCoeffs:~nMFCCs);
FluidKrToBuf.kr(mfccs,~timbre_buf);
~mlpclassifier.kr(Impulse.kr(30),~timbre_buf,~prediction_buf);
FluidBufToKr.kr(~prediction_buf).poll;
sig.dup;
}.play;
)
/*
11. During the silences it is reporting either trombone or oboe, because that's all
it knows about, let's zero out the timbre_buf to simulate silence and then add
some points that are labeled "silence".
*/
~timbre_buf.setn(0,0.dup(~nMFCCs))
(
100.do{
var id = "example-%".format(~point_counter);
~ds.addPoint(id,~timbre_buf);
~labels.addLabel(id,"silence");
~point_counter = ~point_counter + 1;
};
)
~ds.print;
~labels.print;
~ds.write("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/%_ds.json".format(Date.localtime.stamp));
~labels.write("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/%_labels.json".format(Date.localtime.stamp))
/*
12. Now go retrain some more and do some more predictions. The silent gaps between
tones should now report a "2".
*/
// ========================= DATA VERIFICATION ADDENDUM ============================
// This data is pretty well separated, except for that one trombone point.
~ds.read("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/211102_122330_ds.json");
~labels.read("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/211102_122331_labels.json");
/*
This data is not well separated. Once can see that in the cluster that should probably be all silences,
there is a lot of oboe and trombone points mixed in!
This will likely be confusing to a neural network!
*/
~ds.read("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/211102_122730_ds.json");
~labels.read("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/211102_122731_labels.json");
(
Task{
~stand = FluidStandardize(s);
~ds_plotter = FluidDataSet(s);
~umap = FluidUMAP(s,2,30,0.5);
~normer = FluidNormalize(s);
~kdtree = FluidKDTree(s);
~pt_buf = Buffer.alloc(s,2);
s.sync;
~stand.fitTransform(~ds,~ds_plotter,{
~umap.fitTransform(~ds_plotter,~ds_plotter,{
~normer.fitTransform(~ds_plotter,~ds_plotter,{
~kdtree.fit(~ds_plotter,{
~ds_plotter.dump({
arg ds_dict;
~labels.dump({
arg label_dict;
// label_dict.postln;
~plotter = FluidPlotter(bounds:Rect(0,0,800,800),dict:ds_dict,mouseMoveAction:{
arg view, x, y;
~pt_buf.setn(0,[x,y]);
~kdtree.kNearest(~pt_buf,{
arg nearest;
"%:\t%".format(nearest,label_dict.at("data").at(nearest.asString)[0]).postln;
});
});
~plotter.categories_(label_dict);
});
});
});
});
});
});
}.play(AppClock);
)

@ -0,0 +1,72 @@
(
// run the analysis
Routine{
var time = Main.elapsedTime;
var ds = FluidDataSet(s);
var labels = FluidLabelSet(s);
var scaler = FluidStandardize(s);
var buf1 = Buffer.alloc(s,1);
var dsq = FluidDataSetQuery(s);
~pitch_features_buf = Buffer.new(s);
// specify some params for the analysis (these are the defaults, but we'll specify them here so we can use them later)
~windowSize = 4096;
~hopSize = 512;
~buf = Buffer.read(s,"/Users/macprocomputer/Desktop/_flucoma/code/flucoma-core-src/AudioFiles/Tremblay-FMTri-M.wav");
s.sync;
FluidBufPitch.process(s,~buf,features:~pitch_features_buf,windowSize:~windowSize,hopSize:~hopSize).wait;
// {~pitch_features_buf.plot(separately:true)}.defer;
ds.fromBuffer(~pitch_features_buf,action:{
ds.print;
/*dsq.addRange(0,2,{
dsq.filter(1,">",0.7,{
dsq.transform(ds,ds,{
ds.print;*/
ds.dump({
arg dict;
~pitch_features_array = Array.newClear(dict.at("data").size);
dict.at("data").keysValuesDo({
arg id, pt, i;
~pitch_features_array[i] = [id,pt];
});
~pitch_features_sorted = ~pitch_features_array.sort({
arg a, b;
a[1][0] < b[1][0];
});
~center_pos = ~pitch_features_sorted.collect({arg arr; (arr[0].asInteger * ~hopSize) / ~buf.sampleRate});
~center_pos_buf = Buffer.loadCollection(s,~center_pos);
});
/*});
});
});*/
});
}.play
)
(
OSCdef(\fluidbufpitch_help,{
arg msg;
msg[3].midiname.postln;
},"/fluidbufpitch_help");
{
var trig = Impulse.kr(s.sampleRate / ~hopSize);
var index = (PulseCount.kr(trig) - 1) % BufFrames.ir(~center_pos_buf);
var centerPos = Index.kr(~center_pos_buf,index);
var pan = TRand.kr(-1.0,1.0,trig);
var sig;
var pitch, conf;
sig = TGrains.ar(2,trig,~buf,BufRateScale.ir(~buf),centerPos,~windowSize / BufSampleRate.ir(~buf),pan,0.5);
# pitch, conf = FluidPitch.kr(sig,unit:1,windowSize:4096);
pitch = FluidStats.kr(pitch,25)[0];
SendReply.kr(Impulse.kr(30) * (conf > 0.6),"/fluidbufpitch_help",pitch);
sig;
}.play;
)

@ -0,0 +1,129 @@
/* ================= FluidSines =================
FluidSines will extract a sound into a sinusoidal and residual component. It does this by trying to recreate the input sound with a sinusoidal model. Anything that it can't confidently form as a sinusoid, is considered "residual".
Useful for separating the stable, pitched components of a sound from the rest.
*/
// sines in L, residual in R
~buf = Buffer.read(s,File.realpath(FluidSines.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-AaS-SynthTwoVoices-M.wav");
(
y = {
var sig = PlayBuf.ar(1,~buf,BufRateScale.ir(~buf),loop:1);
var sines, residual;
# sines, residual = FluidSines.ar(sig,detectionThreshold:-40,minTrackLen:2);
[sines,residual];
}.play;
)
// isolate just sines or residual;
~song = Buffer.readChannel(s,File.realpath(FluidSines.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-beatRemember.wav",channels:[0]);
(
y = {
arg mix = 0.5;
var sig = PlayBuf.ar(1,~song,BufRateScale.ir(~song),loop:1);
var sines, residual;
# sines, residual = FluidSines.ar(sig);
sig = SelectX.ar(mix,[sines,residual]);
sig.dup;
}.play;
)
// just sines
y.set(\mix,0);
// just residual
y.set(\mix,1);
// a stereo example
~song = Buffer.read(s,File.realpath(FluidSines.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-beatRemember.wav");
(
y = {
arg mix = 0.5;
var sig = PlayBuf.ar(2,~song,BufRateScale.ir(~buf),loop:1);
var l, r, sinesL, residualL, sinesR, residualR, sines, residual;
# l, r = FluidSines.ar(sig);
# sinesL, residualL = l;
# sinesR, residualR = r;
sig = SelectX.ar(mix,[[sinesL,sinesR],[residualL,residualR]]);
sig;
}.play;
)
// just sines
y.set(\mix,0);
// just residual
y.set(\mix,1);
// send just the 'sines' to a Reverb
(
{
var sig = PlayBuf.ar(1,~song,BufRateScale.ir(~buf),loop:1);
var sines, residual;
var latency = ((15 * 512) + 1024 ) / ~song.sampleRate;
# sines, residual = FluidSines.ar(sig);
DelayN.ar(sig,latency,latency) + GVerb.ar(sines);
}.play;
)
// send just the 'residual' to a Reverb
(
{
var sig = PlayBuf.ar(1,~song,BufRateScale.ir(~buf),loop:1);
var sines, residual;
var latency = ((15 * 512) + 1024 ) / ~song.sampleRate;
# sines, residual = FluidSines.ar(sig);
DelayN.ar(sig,latency,latency) + GVerb.ar(residual);
}.play;
)
/* ============== FluidHPSS ===============
FluidHPSS separates a sound into "harmonic" and "percussive" components. This can be useful for material where there is a somewhat realistic basis for these two types to exist, such as in a drum hit. It can also be interesting on material where the two are merged together in more complex ways.
*/
//load a soundfile to play
~buf = Buffer.readChannel(s,File.realpath(FluidSines.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-beatRemember.wav",channels:[0]);
// run with basic parameters (left is harmonic, right is percussive)
{FluidHPSS.ar(PlayBuf.ar(1,~buf,loop:1))}.play
// run in mode 2, listening to:
//the harmonic stream
{FluidHPSS.ar(PlayBuf.ar(1,~buf,loop:1),maskingMode:2)[0].dup}.play
// the percussive stream
{FluidHPSS.ar(PlayBuf.ar(1,~buf,loop:1),maskingMode:2)[1].dup}.play
// the residual stream
{FluidHPSS.ar(PlayBuf.ar(1,~buf,loop:1),maskingMode:2)[2].dup}.play
// do the above again with another sound file
~buf = Buffer.read(s,File.realpath(FluidSines.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Nicol-LoopE-M.wav");
/* =================== FluidTransients =========================
FluidTransients can separate out transient from residual material. Transient is quite a fuzzy term depending on who you are talking to. Producers might use it to talk about any sound that is bright, loud or percussive while an engineer could be referring to a short, full spectrum change in the signal.
This algorithm is based on a "de-clicking" audio restoration approach.
*/
//load some buffer
~buf = Buffer.read(s,File.realpath(FluidTransients.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-AaS-SynthTwoVoices-M.wav");
// basic parameters
{FluidTransients.ar(PlayBuf.ar(1, ~buf, loop:1))}.play
// just the transients
{FluidTransients.ar(PlayBuf.ar(1, ~buf, loop:1))[0].dup}.play
// =================== Audio Transport =========================
//load 2 files
(
b = Buffer.read(s,File.realpath(FluidAudioTransport.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-CEL-GlitchyMusicBoxMelo.wav");
c = Buffer.read(s,File.realpath(FluidAudioTransport.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-CF-ChurchBells.wav");
)
//listen to them
b.play
c.play
//stereo cross!
{FluidAudioTransport.ar(PlayBuf.ar(2,b,loop: 1),PlayBuf.ar(2,c,loop: 1),MouseX.kr())}.play;

@ -0,0 +1,373 @@
s.boot;
// 1. Load a folder of sounds
(
~load_folder = {
arg folder_path, action;
var loader = FluidLoadFolder(folder_path);
loader.play(s,{
"loaded % soundfiles".format(loader.index.size).postln;
action.(loader.buffer);
});
};
// 2. Slice
~slice = {
arg buffer, action;
Routine{
var indices = Buffer(s);
s.sync;
FluidBufNoveltySlice.process(s,buffer,indices:indices,threshold:0.5,action:{
"% slices found".format(indices.numFrames).postln;
"average duration in seconds: %".format(buffer.duration/indices.numFrames).postln;
action.(buffer,indices);
});
}.play;
};
// 3. Analyze
~analyze = {
arg buffer, indices, action;
Routine{
var feature_buf = Buffer(s);
var stats_buf = Buffer(s);
var point_buf = Buffer(s);
var cond = Condition.new;
var ds = FluidDataSet(s);
s.sync;
indices.loadToFloatArray(action:{
arg fa;
fa.doAdjacentPairs{
arg start, end, i;
var num = end - start;
// === PICK YOUR ANALYSIS (JUST CHOOSE 1) ===
FluidBufMFCC.process(s,buffer,start,num,features:feature_buf,numCoeffs:13,startCoeff:1).wait;
// FluidBufChroma.process(s,~loader.buffer,start,num,features:feature_buf).wait;
// FluidBufSpectralShape.process(s,buffer,start,num,features:feature_buf).wait;
//FluidBufPitch.process(s,buffer,start,num,features:feature_buf).wait;
FluidBufStats.process(s,feature_buf,stats:stats_buf).wait;
FluidBufFlatten.process(s,stats_buf,numFrames:1,destination:point_buf).wait;
ds.addPoint("slice-%".format(i),point_buf);
"% / % done".format(i+1,indices.numFrames-1).postln;
};
ds.print;
action.(buffer,indices,ds);
});
}.play;
};
// 4. Reduce to 2 Dimensions
~umap = {
arg buffer, indices, ds, action, numNeighbours = 15, minDist = 0.1;
Routine{
var standardizer = FluidStandardize(s);
var umap = FluidUMAP(s,2,numNeighbours,minDist);
var redux_ds = FluidDataSet(s);
s.sync;
standardizer.fitTransform(ds,redux_ds,{
"standardization done".postln;
umap.fitTransform(redux_ds,redux_ds,{
"umap done".postln;
action.(buffer,indices,redux_ds);
});
});
}.play;
};
// 5. Gridify if Desired
~grid = {
arg buffer, indices, redux_ds, action;
Routine{
var normer = FluidNormalize(s);
var grider = FluidGrid(s);
var newds = FluidDataSet(s);
s.sync;
normer.fitTransform(redux_ds,newds,{
"normalization done".postln;
grider.fitTransform(newds,newds,{
"grid done".postln;
action.(buffer,indices,newds);
});
});
}.play;
};
// 6. Plot
~plot = {
arg buffer, indices, redux_ds, action;
Routine{
var kdtree = FluidKDTree(s);
var buf_2d = Buffer.alloc(s,2);
var scaler = FluidNormalize(s);
var newds = FluidDataSet(s);
var xmin = 0, xmax = 1, ymin = 0, ymax = 1;
s.sync;
scaler.fitTransform(redux_ds,newds,{
"scaling done".postln;
kdtree.fit(newds,{
"kdtree fit".postln;
newds.dump({
arg dict;
var previous, fp;
"ds dumped".postln;
fp = FluidPlotter(nil,Rect(0,0,800,800),dict,xmin:xmin,xmax:xmax,ymin:ymin,ymax:ymax,mouseMoveAction:{
arg view, x, y;
[x,y].postln;
buf_2d.setn(0,[x,y]);
kdtree.kNearest(buf_2d,{
arg nearest;
if(previous != nearest,{
var index = nearest.asString.split($-)[1].asInteger;
previous = nearest;
nearest.postln;
index.postln;
{
var startPos = Index.kr(indices,index);
var dur_samps = Index.kr(indices,index + 1) - startPos;
var sig = PlayBuf.ar(1,buffer,BufRateScale.ir(buffer),startPos:startPos);
var dur_sec = dur_samps / BufSampleRate.ir(buffer);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_sec-0.06,0.03]),doneAction:2);
sig.dup * env;
}.play;
});
});
});
action.(fp,newds);
});
});
});
}.play;
};
)
~load_folder.("/Users/macprocomputer/Desktop/_flucoma/favs mono fewer/");
~slice.();
~indices.postln;
FluidWaveform(~loader.buffer,~indices);
~analyze.();
~umap.();
~grid.();
~plot.();
FluidLabelSet
// ============== do all of it =======================
(
var path = "/Users/macprocomputer/Desktop/_flucoma/data_saves/%_2D_browsing_Pitch".format(Date.localtime.stamp);
~load_folder.("/Users/macprocomputer/Desktop/_flucoma/favs mono/",{
arg buffer0;
~slice.(buffer0,{
arg buffer1, indices1;
~analyze.(buffer1, indices1,{
arg buffer2, indices2, ds2;
/* path.mkdir;
buffer2.write(path+/+"buffer.wav","wav");
indices2.write(path+/+"indices.wav","wav","float");
ds2.write(path+/+"ds.json");*/
~umap.(buffer2,indices2,ds2,{
arg buffer3, indices3, ds3;
/* path.mkdir;
buffer3.write(path+/+"buffer.wav","wav");
indices3.write(path+/+"indices.wav","wav","float");
ds3.write(path+/+"ds.json");*/
~plot.(buffer3,indices3,ds3,{
arg plotter;
"done with all".postln;
~fp = plotter;
});
});
});
});
});
)
/*=============== Know Your Data =================
hmmm... there's a lot of white space in that UMAP plot. A few options:
1. Adjust the parameters of UMAP to make the plot look different.
- minDist
- numNeighbours
2. Gridify the whole thing to spread it out.
3. Remove some of the outliers to get a more full shape.
===================================================*/
// #2
(
Window.closeAll;
Task{
var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_121441_2D_browsing/";
var ds = FluidDataSet(s);
var buffer = Buffer.read(s,folder+/+"buffer.wav");
var indices = Buffer.read(s,folder+/+"indices.wav");
var normalizer = FluidNormalize(s);
var ds_grid = FluidDataSet(s);
var grid = FluidGrid(s);
var kdtree = FluidKDTree(s);
var pt_buf = Buffer.alloc(s,2);
s.sync;
ds.read(folder+/+"ds.json",{
"read".postln;
normalizer.fitTransform(ds,ds_grid,{
"normalized".postln;
grid.fitTransform(ds_grid,ds_grid,{
"grid done".postln;
normalizer.fitTransform(ds_grid,ds_grid,{
"normalized".postln;
kdtree.fit(ds_grid,{
"tree fit".postln;
normalizer.fitTransform(ds,ds,{
"normalized".postln;
ds.dump({
arg ds_dict;
ds_grid.dump({
arg ds_grid_dict;
defer{
var distances = Dictionary.new;
var max_dist = 0;
var win, plotter, uv;
var previous;
ds_dict.at("data").keysValuesDo({
arg id, pt;
var other, pt0, pt1, dist, distpoint;
/*
id.postln;
pt.postln;
"".postln;
*/
other = ds_grid_dict.at("data").at(id);
pt0 = Point(pt[0],pt[1]);
pt1 = Point(other[0],other[1]);
dist = pt0.dist(pt1);
distpoint = Dictionary.new;
if(dist > max_dist,{max_dist = dist});
distpoint.put("pt0",pt0);
distpoint.put("pt1",pt1);
distpoint.put("dist",dist);
distances.put(id,distpoint);
});
win = Window("FluidGrid",Rect(0,0,800,800));
win.background_(Color.white);
uv = UserView(win,win.bounds)
.drawFunc_({
var size_pt = Point(uv.bounds.width,uv.bounds.height);
distances.keysValuesDo({
arg id, distpoint;
var alpha = distpoint.at("dist") / max_dist;
var pt0 = distpoint.at("pt0") * size_pt;
var pt1 = distpoint.at("pt1") * size_pt;
pt0.y = uv.bounds.height - pt0.y;
pt1.y = uv.bounds.height - pt1.y;
/* id.postln;
distpoint.postln;
alpha.postln;
"".postln;
*/
Pen.line(pt0,pt1);
Pen.color_(Color(1.0,0.0,0.0,0.25));
Pen.stroke;
});
});
plotter = FluidPlotter(win,win.bounds,ds_dict,{
arg view, x, y;
pt_buf.setn(0,[x,y]);
kdtree.kNearest(pt_buf,{
arg nearest;
if(previous != nearest,{
var index = nearest.asString.split($-)[1].asInteger;
previous = nearest;
nearest.postln;
index.postln;
{
var startPos = Index.kr(indices,index);
var dur_samps = Index.kr(indices,index + 1) - startPos;
var sig = PlayBuf.ar(1,buffer,BufRateScale.ir(buffer),startPos:startPos);
var dur_sec = dur_samps / BufSampleRate.ir(buffer);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_sec-0.06,0.03]),doneAction:2);
sig.dup * env;
}.play;
});
});
});
plotter.background_(Color(0,0,0,0));
ds_grid_dict.at("data").keysValuesDo({
arg id, pt;
plotter.addPoint_("%-grid".format(id),pt[0],pt[1],0.75,Color.blue.alpha_(0.5));
});
win.front;
};
})
});
});
});
});
});
});
});
}.play(AppClock);
)
// #3
(
Routine{
var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_152523_2D_browsing/";
var ds = FluidDataSet(s);
var buffer = Buffer.read(s,folder+/+"buffer.wav");
var indices = Buffer.read(s,folder+/+"indices.wav");
var robust_scaler = FluidRobustScale(s,10,90);
var newds = FluidDataSet(s);
var dsq = FluidDataSetQuery(s);
s.sync;
// {indices.plot}.defer;
ds.read(folder+/+"ds.json",{
robust_scaler.fitTransform(ds,newds,{
dsq.addRange(0,2,{
dsq.filter(0,">",-1,{
dsq.and(0,"<",1,{
dsq.and(1,">",-1,{
dsq.and(1,"<",1,{
dsq.transform(newds,newds,{
~plot.(buffer,indices,newds);
});
});
});
});
});
});
})
});
}.play;
)

@ -0,0 +1,108 @@
(
Task{
var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_152953_2D_browsing_MFCC/";
// var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_161354_2D_browsing_SpectralShape/";
// var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_161638_2D_browsing_Pitch/";
~ds_original = FluidDataSet(s);
~buffer = Buffer.read(s,folder+/+"buffer.wav");
~indices = Buffer.read(s,folder+/+"indices.wav");
~kdtree = FluidKDTree(s,6);
~ds = FluidDataSet(s);
s.sync;
~indices.loadToFloatArray(action:{
arg fa;
~indices = fa;
});
~ds_original.read(folder+/+"ds.json",{
~ds.read(folder+/+"ds.json",{
~kdtree.fit(~ds,{
~ds.dump({
arg dict;
~ds_dict = dict;
"kdtree fit".postln;
});
});
});
});
}.play;
~play_id = {
arg id;
var index = id.asString.split($-)[1].asInteger;
var start_samps = ~indices[index];
var end_samps = ~indices[index+1];
var dur_secs = (end_samps - start_samps) / ~buffer.sampleRate;
{
var sig = PlayBuf.ar(1,~buffer,BufRateScale.ir(~buffer),startPos:start_samps);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:2);
sig.dup;// * env;
}.play;
dur_secs;
};
~pt_buf = Buffer.alloc(s,~ds_dict.at("cols"));
)
(
// hear the 5 nearest points
Routine{
// var id = "slice-558";
var id = ~ds_dict.at("data").keys.choose;
~ds.getPoint(id,~pt_buf,{
~kdtree.kNearest(~pt_buf,{
arg nearest;
Routine{
id.postln;
~play_id.(id).wait;
nearest[1..].do{
arg near;
1.wait;
near.postln;
~play_id.(near).wait;
};
}.play;
})
});
}.play;
)
// Standardize
(
Routine{
var scaler = FluidStandardize(s);
s.sync;
scaler.fitTransform(~ds_original,~ds,{
~kdtree.fit(~ds,{
"standardized & kdtree fit".postln;
});
});
}.play;
)
// Normalize
(
Routine{
var scaler = FluidNormalize(s);
s.sync;
scaler.fitTransform(~ds_original,~ds,{
~kdtree.fit(~ds,{
"normalized & kdtree fit".postln;
});
});
}.play;
)
// Robust Scaler
(
Routine{
var scaler = FluidRobustScale(s);
s.sync;
scaler.fitTransform(~ds_original,~ds,{
~kdtree.fit(~ds,{
"normalized & kdtree fit".postln;
});
});
}.play;
)

@ -0,0 +1,150 @@
s.options.sampleRate_(44100);
s.options.device_("Fireface UC Mac (24006457)");
(
// decompose!
s.waitForBoot{
Routine{
var drums = Buffer.read(s,File.realpath(FluidBufNMF.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Nicol-LoopE-M.wav");
var resynth = Buffer(s);
var n_components = 2;
FluidBufNMF.process(s,drums,resynth:resynth,components:n_components).wait;
"original sound".postln;
{
PlayBuf.ar(1,drums,BufRateScale.ir(drums),doneAction:2).dup;
}.play;
(drums.duration + 1).wait;
n_components.do{
arg i;
"decomposed part #%".format(i+1).postln;
{
PlayBuf.ar(n_components,resynth,BufRateScale.ir(resynth),doneAction:2)[i].dup;
}.play;
(drums.duration + 1).wait;
};
"all decomposed parts spread across the stereo field".postln;
{
Splay.ar(PlayBuf.ar(n_components,resynth,BufRateScale.ir(resynth),doneAction:2));
}.play;
}.play;
}
)
// ok so what is it doing?
(
Routine{
var n_components = 2;
var drums = Buffer.read(s,File.realpath(FluidBufNMF.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Nicol-LoopE-M.wav");
~bases = Buffer(s);
~activations = Buffer(s);
~resynth = Buffer(s);
FluidBufNMF.process(s,drums,bases:~bases,activations:~activations,resynth:~resynth,components:n_components).wait;
{
~bases.plot("bases");
~activations.plot("activations");
}.defer;
}.play;
)
// base as a filter
(
Routine{
var drums = Buffer.read(s,File.realpath(FluidBufNMF.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Nicol-LoopE-M.wav");
var voice = Buffer.read(s,File.realpath(FluidBufNMF.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-AaS-VoiceQC-B2K-M.wav");
var song = Buffer.read(s,File.realpath(FluidBufNMF.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-beatRemember.wav");
s.sync;
"drums through the drums bases as filters".postln;
{
var src = PlayBuf.ar(1,drums,BufRateScale.ir(drums),doneAction:2);
var sig = FluidNMFFilter.ar(src,~bases,2);
sig;
}.play;
(drums.duration+1).wait;
"voice through the drum bases as filters".postln;
{
var src = PlayBuf.ar(1,voice,BufRateScale.ir(voice),doneAction:2);
var sig = FluidNMFFilter.ar(src,~bases,2);
sig;
}.play;
(voice.duration+1).wait;
"song through the drum bases as filters".postln;
{
var src = PlayBuf.ar(2,song,BufRateScale.ir(song),doneAction:2)[0];
var sig = FluidNMFFilter.ar(src,~bases,2);
sig;
}.play;
}.play;
)
// activations as an envelope
(
{
var activation = PlayBuf.ar(2,~activations,BufRateScale.ir(~activations),doneAction:2);
var sig = WhiteNoise.ar(0.dbamp) * activation;
sig;
}.play;
)
// put them together...
(
{
var activation = PlayBuf.ar(2,~activations,BufRateScale.ir(~activations),doneAction:2);
var sig = WhiteNoise.ar(0.dbamp);
sig = FluidNMFFilter.ar(sig,~bases,2) * activation;
sig;
}.play;
)
// as a matcher, train on only 4 of the 22 seconds
(
Task{
var dog = Buffer.readChannel(s,File.realpath(FluidBufNMF.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-BaB-SoundscapeGolcarWithDog.wav",channels:[0]);
var bases = Buffer(s);
var match = [0,0];
var win = Window("FluidNMFMatch",Rect(0,0,200,400));
var uv = UserView(win,win.bounds)
.drawFunc_{
var w = uv.bounds.width / 2;
Pen.color_(Color.green);
match.do{
arg match_val, i;
var match_norm = match_val.linlin(0,30,0,uv.bounds.height);
var top = uv.bounds.height - match_norm;
/*top.postln;*/
Pen.addRect(Rect(i * w,top,w,match_norm));
Pen.draw;
};
};
OSCdef(\nmfmatch,{
arg msg;
match = msg[3..];
{uv.refresh}.defer;
},"/nmfmatch");
win.front;
s.sync;
FluidBufNMF.process(s,dog,numFrames:dog.sampleRate * 4,bases:bases,components:2).wait;
{
var sig = PlayBuf.ar(1,dog,BufRateScale.ir(dog),doneAction:2);
SendReply.kr(Impulse.kr(30),"/nmfmatch",FluidNMFMatch.kr(sig,bases,2));
sig;
}.play;
}.play(AppClock);
)

@ -0,0 +1,80 @@
(
~counter = 0;
~predicting = false;
~prediction_buf = Buffer.alloc(s,10);
Window.closeAll;
~win = Window("MLP Regressor",Rect(0,0,1000,400));
~multisliderview = MultiSliderView(~win,Rect(0,0,400,400))
.size_(10)
.elasticMode_(true)
.action_({
arg msv;
// ~synth.set(\val,msv.value);
// msv.value.postln;
~y_buf.setn(0,msv.value);
});
Slider2D(~win,Rect(400,0,400,400))
.action_({
arg s2d;
[s2d.x,s2d.y].postln;
~x_buf.setn(0,[s2d.x,s2d.y]);
if(~predicting,{
~nn.predictPoint(~x_buf,~y_buf,{
~y_buf.getn(0,10,{
{~multisliderview.value_(prediction_values)}.defer;
});
});
});
});
Button(~win,Rect(800,0,200,20))
.states_([["Add Point"]])
.action_({
arg but;
var id = "example-%".format(~counter);
~ds_input.addPoint(id,~x_buf);
~ds_output.addPoint(id,~y_buf);
~counter = ~counter + 1;
~ds_input.print;
~ds_output.print;
});
Button(~win,Rect(800,20,200,20))
.states_([["Train"]])
.action_({
arg but;
~nn.fit(~ds_input,~ds_output,{
arg loss;
"loss: %".format(loss).postln;
});
});
Button(~win,Rect(800,40,200,20))
.states_([["Not Predicting",Color.yellow,Color.black],["Is Predicting",Color.green,Color.black]])
.action_({
arg but;
~predicting = but.value.asBoolean;
});
~win.front;
~ds_input = FluidDataSet(s);
~ds_output = FluidDataSet(s);
~x_buf = Buffer.alloc(s,2);
~y_buf = Buffer.alloc(s,10);
~nn = FluidMLPRegressor(s,[7],FluidMLPRegressor.sigmoid,FluidMLPRegressor.sigmoid,learnRate:0.1,batchSize:1,validation:0);
~synth = {
//arg val = #[0,0,0,0,0,0,0,0,0,0];
var val = FluidBufToKr.kr(~y_buf)
var osc1, osc2, feed1, feed2, base1=69, base2=69, base3 = 130;
#feed2,feed1 = LocalIn.ar(2);
osc1 = MoogFF.ar(SinOsc.ar((((feed1 * val[0]) + val[1]) * base1).midicps,mul: (val[2] * 50).dbamp).atan,(base3 - (val[3] * (FluidLoudness.kr(feed2, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[4] * 3.5);
osc2 = MoogFF.ar(SinOsc.ar((((feed2 * val[5]) + val[6]) * base2).midicps,mul: (val[7] * 50).dbamp).atan,(base3 - (val[8] * (FluidLoudness.kr(feed1, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[9] * 3.5);
Out.ar(0,LeakDC.ar([osc1,osc2],mul: 0.1));
LocalOut.ar([osc1,osc2]);
}.play;
)

@ -0,0 +1,123 @@
(
s.waitForBoot{
// a counter that will increment each time we add a point to the datasets
// (so that they each can have a unique identifier)
~counter = 0;
~ds_input = FluidDataSet(s); // dataset to hold the input data points (xy position)
~ds_output = FluidDataSet(s); // data set to hold the output data points (the 10 synth parameters)
~x_buf = Buffer.alloc(s,2); // a buffer for holding the current xy position (2 dimensions)
~y_buf = Buffer.alloc(s,10); // a buffer for holding the current synthparameters (10 parameters)
// the neural network. for more info on these arguments, visit learn.flucoma.com/reference/mlpregressor
~nn = FluidMLPRegressor(s,[7],FluidMLPRegressor.sigmoid,FluidMLPRegressor.sigmoid,learnRate:0.1,batchSize:1,validation:0);
// just nice to close any open windows, in case this script gets run multiple times...
// that way the windows don't pile up
Window.closeAll;
~win = Window("MLP Regressor",Rect(0,0,1000,400));
Slider2D(~win,Rect(0,0,400,400))
.action_({
arg s2d;
// [s2d.x,s2d.y].postln;
// we're sendinig these values up to the synth, once there, they will get written into the buffer
// for the mlp to use as input
~synth.set(\x,s2d.x,\y,s2d.y);
});
~multisliderview = MultiSliderView(~win,Rect(400,0,400,400))
.size_(10) // we know that it will need 10 sliders
.elasticMode_(true) // this will ensure that the sliders are spread out evenly across the whole view
.action_({
arg msv;
// here we'll just set these values directly into the buffer
// on the server they get read out of the buffer and used to control the synthesizer
~y_buf.setn(0,msv.value);
});
// a button for adding points to the datasets, both datasets at the same time
// with the same identifier
Button(~win,Rect(800,0,200,20))
.states_([["Add Point"]])
.action_({
arg but;
var id = "example-%".format(~counter); // use the counter to create a unique identifier
~ds_input.addPoint(id,~x_buf); // add a point to the input dataset using whatever values are in x_buf
~ds_output.addPoint(id,~y_buf); // add a pointi to the output dataset using whatever values a are in y_buf
~counter = ~counter + 1; // increment the counter!
// nice to just see every time what is going into the datasets
~ds_input.print;
~ds_output.print;
});
// a button to train train the neural network. you can push the button multiple times to watch the loss
// decrease. each time you press it, the neural network doesn't reset, it just keeps training from where it left off
Button(~win,Rect(800,20,200,20))
.states_([["Train"]])
.action_({
arg but;
~nn.fit(~ds_input,~ds_output,{ // provide the dataset to use as input and the dataset to use os output
arg loss;
"loss: %".format(loss).postln; // post the loss so we can watch it go down after multiple trainings
});
});
// a button to control when the neural network is actually making predictions
// we want it to *not* be making predictions while we're adding points to the datasets (because we want
// the neural network to not be writing into y_buf)
Button(~win,Rect(800,40,200,20))
.states_([["Not Predicting",Color.yellow,Color.black],["Is Predicting",Color.green,Color.black]])
.action_({
arg but;
~synth.set(\predicting,but.value); // send the "boolean" (0 or 1) up to the synth
});
~win.front;
~synth = {
arg predicting = 0, x = 0, y = 0;
var osc1, osc2, feed1, feed2, base1=69, base2=69, base3 = 130, val, trig;
FluidKrToBuf.kr([x,y],~x_buf); // receive the xy positions as arguments to the synth, then write them into the buffer here
// if predicting is 1 "trig" will be impulses 30 times per second, if 0 it will be just a stream of zeros
trig = Impulse.kr(30) * predicting;
// the neural network will make a prediction each time a trigger, or impulse, is received in the first argument
// the next two arguments are (1) which buffer to use as input to the neural network, and (2) which buffer
// to write the output prediction into
~nn.kr(trig,~x_buf,~y_buf);
// read the 10 synth parameter values out of this buffer. val is a control rate stream of the 10 values
// when the neural network is making predictions (predicting == 1), it will be writing the predictions
// into that buffer, so that is what will be read out of here. when the neural network is not making predictions
// (predicting == 0) it will not be writing values into the buffer, so you can use the MultiSliderView above to
// write values into the buffer -- they'll still get read out into a control stream right here to control the synth!
val = FluidBufToKr.kr(~y_buf);
// if we are making predictions (trig is a series of impulses), send the values back to the language so that we can
// update the values in the multislider. this is basically only for aesthetic purposes. it's nice to see the multislider
// wiggle as the neural network makes it's predictions!
SendReply.kr(trig,"/predictions",val);
// the actual synthesis algorithm. made by PA Tremblay
#feed2,feed1 = LocalIn.ar(2);
osc1 = MoogFF.ar(SinOsc.ar((((feed1 * val[0]) + val[1]) * base1).midicps,mul: (val[2] * 50).dbamp).atan,(base3 - (val[3] * (FluidLoudness.kr(feed2, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[4] * 3.5);
osc2 = MoogFF.ar(SinOsc.ar((((feed2 * val[5]) + val[6]) * base2).midicps,mul: (val[7] * 50).dbamp).atan,(base3 - (val[8] * (FluidLoudness.kr(feed1, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[9] * 3.5);
Out.ar(0,LeakDC.ar([osc1,osc2],mul: 0.1));
LocalOut.ar([osc1,osc2]);
}.play;
// catch the osc messages sent by the SendReply above and update the MultiSliderView
OSCdef(\predictions,{
arg msg;
// msg.postln;
{~multisliderview.value_(msg[3..])}.defer;
},"/predictions");
}
)

@ -0,0 +1,321 @@
(
Window.closeAll;
s.options.inDevice_("MacBook Pro Microphone");
s.options.outDevice_("External Headphones");
// s.options.sampleRate_(48000);
s.options.sampleRate_(44100);
s.waitForBoot{
Task{
var win = Window(bounds:Rect(100,100,1000,800));
var label_width = 120;
var item_width = 300;
var mfcc_multslider;
var nMFCCs = 13;
var mfccbuf = Buffer.alloc(s,nMFCCs);
var parambuf = Buffer.alloc(s,3);
var id_counter = 0;
var continuous_training = false;
var mfcc_ds = FluidDataSet(s);
var param_ds = FluidDataSet(s);
var mfcc_ds_norm = FluidDataSet(s);
var param_ds_norm = FluidDataSet(s);
var scaler_params = FluidNormalize(s);
var scaler_mfcc = FluidNormalize(s);
var nn = FluidMLPRegressor(s,[3,3],FluidMLPRegressor.sigmoid,FluidMLPRegressor.sigmoid,learnRate:0.05,batchSize:5,validation:0);
var synth, loss_st;
var param_sliders = Array.newClear(3);
var statsWinSl, hidden_tf, batchSize_nb, momentum_nb, learnRate_nb, maxIter_nb, outAct_pum, act_pum;
var add_point = {
var id = "point-%".format(id_counter);
mfcc_ds.addPoint(id,mfccbuf,{mfcc_ds.print});
param_ds.addPoint(id,parambuf,{param_ds.print});
id_counter = id_counter + 1;
};
var train = {
scaler_mfcc.fitTransform(mfcc_ds,mfcc_ds_norm,{
scaler_params.fitTransform(param_ds,param_ds_norm,{
// mfcc_ds.print;
// param_ds.print;
nn.fit(mfcc_ds_norm,param_ds_norm,{
arg loss;
// loss.postln;
defer{loss_st.string_("loss: %".format(loss))};
if(continuous_training,{
train.value;
});
});
});
});
};
var open_mlp = {
arg path;
// nn.prGetParams.postln;
nn.read(path,{
var params = nn.prGetParams;
var n_layers = params[1];
var layers_string = "";
// params.postln;
n_layers.do({
arg i;
if(i > 0,{layers_string = "% ".format(layers_string)});
layers_string = "%%".format(layers_string,params[2+i]);
});
nn.maxIter_(maxIter_nb.value);
nn.learnRate_(learnRate_nb.value);
nn.momentum_(momentum_nb.value);
nn.batchSize_(batchSize_nb.value);
defer{
hidden_tf.string_(layers_string);
act_pum.value_(nn.activation);
outAct_pum.value_(nn.outputActivation);
/* maxIter_nb.value_(nn.maxIter);
learnRate_nb.value_(nn.learnRate);
momentum_nb.value_(nn.momentum);
batchSize_nb.value_(nn.batchSize);*/
};
});
};
s.sync;
synth = {
arg vol = -15, isPredicting = 0, avg_win = 0, smooth_params = 0;
var params = FluidStats.kr(FluidBufToKr.kr(parambuf),ControlRate.ir * smooth_params * isPredicting)[0];
var msig = SinOsc.ar(params[1],0,params[2] * params[1]);
var csig = SinOsc.ar(params[0] + msig);
var sound_in = SoundIn.ar(0);
var analysis_sig, mfccs, trig, mfccbuf_norm, parambuf_norm;
csig = BLowPass4.ar(csig,16000);
csig = BHiPass4.ar(csig,40);
analysis_sig = Select.ar(isPredicting,[csig,sound_in]);
mfccs = FluidMFCC.kr(analysis_sig,nMFCCs,startCoeff:1,maxNumCoeffs:nMFCCs);
trig = Impulse.kr(30);
mfccbuf_norm = LocalBuf(nMFCCs);
parambuf_norm = LocalBuf(3);
mfccs = FluidStats.kr(mfccs,ControlRate.ir * avg_win)[0];
FluidKrToBuf.kr(mfccs,mfccbuf);
scaler_mfcc.kr(trig * isPredicting,mfccbuf,mfccbuf_norm);
nn.kr(trig * isPredicting,mfccbuf_norm,parambuf_norm);
scaler_params.kr(trig * isPredicting,parambuf_norm,parambuf,invert:1);
SendReply.kr(trig * isPredicting,"/params",params);
SendReply.kr(trig,"/mfccs",mfccs);
csig = csig.dup;
csig * Select.kr(isPredicting,[vol.dbamp,Amplitude.kr(sound_in)]);
}.play;
s.sync;
win.view.decorator_(FlowLayout(Rect(0,0,win.bounds.width,win.bounds.height)));
param_sliders[0] = EZSlider(win,Rect(0,0,item_width,20),"carrier freq",\freq.asSpec,{arg sl; parambuf.set(0,sl.value)},440,true,label_width);
win.view.decorator.nextLine;
param_sliders[1] = EZSlider(win,Rect(0,0,item_width,20),"mod freq",\freq.asSpec,{arg sl; parambuf.set(1,sl.value)},100,true,label_width);
win.view.decorator.nextLine;
param_sliders[2] = EZSlider(win,Rect(0,0,item_width,20),"index",ControlSpec(0,20),{arg sl; parambuf.set(2,sl.value)},10,true,label_width);
win.view.decorator.nextLine;
EZSlider(win,Rect(0,0,item_width,20),"params avg smooth",nil.asSpec,{arg sl; synth.set(\avg_win,sl.value)},0,true,label_width);
win.view.decorator.nextLine;
StaticText(win,Rect(0,0,label_width,20)).string_("% MFCCs".format(nMFCCs));
win.view.decorator.nextLine;
statsWinSl = EZSlider(win,Rect(0,0,item_width,20),"fmcc avg smooth",nil.asSpec,{arg sl; synth.set(\avg_win,sl.value)},0,true,label_width);
win.view.decorator.nextLine;
mfcc_multslider = MultiSliderView(win,Rect(0,0,item_width,200))
.size_(nMFCCs)
.elasticMode_(true);
win.view.decorator.nextLine;
Button(win,Rect(0,0,100,20))
.states_([["Add Point"]])
.action_{
add_point.value;
};
win.view.decorator.nextLine;
// spacer
StaticText(win,Rect(0,0,label_width,20));
win.view.decorator.nextLine;
// MLP Parameters
StaticText(win,Rect(0,0,label_width,20)).align_(\right).string_("hidden layers");
hidden_tf = TextField(win,Rect(0,0,item_width - label_width,20))
.string_(nn.hidden.asString.replace(", "," ")[2..(nn.hidden.asString.size-3)])
.action_{
arg tf;
var hidden_ = "[%]".format(tf.string.replace(" ",",")).interpret;
nn.hidden_(hidden_);
// nn.prGetParams.postln;
};
win.view.decorator.nextLine;
StaticText(win,Rect(0,0,label_width,20)).align_(\right).string_("activation");
act_pum = PopUpMenu(win,Rect(0,0,item_width - label_width,20))
.items_(["identity","sigmoid","relu","tanh"])
.value_(nn.activation)
.action_{
arg pum;
nn.activation_(pum.value);
// nn.prGetParams.postln;
};
win.view.decorator.nextLine;
StaticText(win,Rect(0,0,label_width,20)).align_(\right).string_("output activation");
outAct_pum = PopUpMenu(win,Rect(0,0,item_width - label_width,20))
.items_(["identity","sigmoid","relu","tanh"])
.value_(nn.outputActivation)
.action_{
arg pum;
nn.outputActivation_(pum.value);
// nn.prGetParams.postln;
};
win.view.decorator.nextLine;
maxIter_nb = EZNumber(win,Rect(0,0,item_width,20),"max iter",ControlSpec(1,10000,step:1),{
arg nb;
nn.maxIter_(nb.value.asInteger);
// nn.prGetParams.postln;
},nn.maxIter,false,label_width);
win.view.decorator.nextLine;
learnRate_nb = EZNumber(win,Rect(0,0,item_width,20),"learn rate",ControlSpec(0.001,1.0),{
arg nb;
nn.learnRate_(nb.value);
// nn.prGetParams.postln;
},nn.learnRate,false,label_width);
win.view.decorator.nextLine;
momentum_nb = EZNumber(win,Rect(0,0,item_width,20),"momentum",ControlSpec(0,1),{
arg nb;
nn.momentum_(nb.value);
// nn.prGetParams.postln;
},nn.momentum,false,label_width);
win.view.decorator.nextLine;
batchSize_nb = EZNumber(win,Rect(0,0,item_width,20),"batch size",ControlSpec(1,1000,step:1),{
arg nb;
nn.batchSize_(nb.value.asInteger);
// nn.prGetParams.postln;
},nn.batchSize,false,label_width);
win.view.decorator.nextLine;
Button(win,Rect(0,0,100,20))
.states_([["Train"]])
.action_{
train.value;
};
Button(win,Rect(0,0,200,20))
.states_([["Continuous Training Off"],["Continuous Training On"]])
.action_{
arg but;
continuous_training = but.value.asBoolean;
train.value;
};
win.view.decorator.nextLine;
loss_st = StaticText(win,Rect(0,0,item_width,20)).string_("loss:");
win.view.decorator.nextLine;
Button(win,Rect(0,0,100,20))
.states_([["Not Predicting"],["Predicting"]])
.action_{
arg but;
synth.set(\isPredicting,but.value);
};
win.view.decorator.nextLine;
Button(win,Rect(0,0,100,20))
.states_([["Save MLP"]])
.action_{
Dialog.savePanel({
arg path;
nn.write(path);
});
};
Button(win,Rect(0,0,100,20))
.states_([["Open MLP"]])
.action_{
Dialog.openPanel({
arg path;
open_mlp.(path);
});
};
win.bounds_(win.view.decorator.used);
win.front;
OSCdef(\mfccs,{
arg msg;
// msg.postln;
defer{
mfcc_multslider.value_(msg[3..].linlin(-40,40,0,1));
};
},"/mfccs");
OSCdef(\params,{
arg msg;
// msg.postln;
defer{
param_sliders.do{
arg sl, i;
sl.value_(msg[3 + i]);
};
};
},"/params");
s.sync;
/* statsWinSl.valueAction_(0.1);
100.do{
var cfreq = exprand(20,20000);
var mfreq = exprand(20,20000);
var index = rrand(0.0,20);
parambuf.setn(0,[cfreq,mfreq,index]);
0.2.wait;
add_point.value;
0.05.wait;
};*/
/* 100.do{
var cfreq = exprand(60,4000);
var mfreq = exprand(60,1000).clip(0,cfreq);
var index = rrand(0.0,20);
parambuf.setn(0,[cfreq,mfreq,index]);
0.2.wait;
add_point.value;
0.05.wait;
};*/
}.play(AppClock);
};
)

@ -0,0 +1,153 @@
/* ======= 1. Hear the Sound ============
load a part of a sound that has 3 clear components:
- a clear pitch component to start
- a noisy pitchless ending
- DC offset silence on both ends
*/
(
~src = Buffer.read(s,File.realpath(FluidBufPitch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-ASWINE-ScratchySynth-M.wav");//,42250,44100);
)
// listen
~src.play;
// ======= Let's try to extract that frequency from the audio file. ===========
// analyze
~pitches = Buffer(s);
~stats = Buffer(s);
FluidBufPitch.process(s,~src,features: ~pitches);
FluidBufStats.process(s,~pitches,stats:~stats);
(
// get the average freq;
~stats.get(0,{
arg f;
~avgfreq = f;
~avgfreq.postln;
});
)
(
// play a sine tone at the avg freq alongside the soundfile
//average freq
~avgfreq_synth = {SinOsc.ar(~avgfreq,mul: 0.05)}.play;
//compare with the source
~src.play;
)
// hmm... that seems wrong...
/*
what if we weight the average frequency by the loudness
of the analysis frame so that the silences are not considered
as strongly.
*/
// do a loudness analysis
~loud = Buffer(s);
FluidBufLoudness.process(s,~src,features:~loud);
FluidBufStats.process(s,~loud,stats:~stats);
(
// get min and max
~stats.loadToFloatArray(action:{
arg stats;
~min_loudness = stats.clump(2).flop[0][4];
~max_loudness = stats.clump(2).flop[0][6];
~min_loudness.postln;
~max_loudness.postln;
});
)
// scale the loudness analysis from 0 to 1, using the min and max gotten above
~scaled = Buffer(s);
FluidBufScale.process(s,~loud,numChans: 1,destination: ~scaled,inputLow: ~min_loudness,inputHigh: ~max_loudness);
// then use this scaled analysis to weight the statistical analysis
FluidBufStats.process(s,~pitches,numChans:1,stats:~stats,weights:~scaled);
(
// get the average freq (now with the weighted average)
~stats.get(0,{
arg f;
~avgfreq = f;
~avgfreq.postln;
});
)
(
// play a sine tone at the avg freq alongside the soundfile
//average freq
~avgfreq_synth = {SinOsc.ar(~avgfreq,mul: 0.05)}.play;
//compare with the source
~src.play;
)
// hmm... still wrong. too low now.
/*
ok, how about if we weight not by loudness, but by the pitch confidence of the pitch analysis
*/
FluidBufPitch.process(s,~src,features: ~pitches);
~thresh_buf = Buffer(s);
FluidBufThresh.process(s, ~pitches, startChan: 1, numChans: 1, destination: ~thresh_buf, threshold: 0.8)
FluidBufStats.process(s,~pitches,numChans:1,stats:~stats,weights:~thresh_buf);
(
// get the average freq
~stats.get(0,{
arg f;
~avgfreq = f;
~avgfreq.postln;
});
)
(
// play a sine tone at the avg freq alongside the soundfile
//average freq
~avgfreq_synth = {SinOsc.ar(~avgfreq,mul: 0.05)}.play;
//compare with the source
~src.play;
)
// closer!
FluidBufPitch.process(s,~src,features: ~pitches);
(
~pitches.loadToFloatArray(action:{
arg pitches;
defer{pitches.histo(50,1000,20000).plot(discrete:true)};
});
)
// raise the threshold and toss out some outliers
FluidBufPitch.process(s,~src,features: ~pitches);
~pitches.plot(separately:true);
~thresh_buf = Buffer(s);
FluidBufThresh.process(s, ~pitches, startChan: 1, numChans: 1, destination: ~thresh_buf, threshold: 0.9)
FluidBufStats.process(s,~pitches,numChans:1,stats:~stats,weights:~thresh_buf,outliersCutoff:1.5);
(
// get the average freq
~stats.get(0,{
arg f;
~avgfreq = f;
~avgfreq.postln;
});
)
(
// play a sine tone at the avg freq alongside the soundfile
//average freq
~avgfreq_synth = {SinOsc.ar(~avgfreq,mul: 0.05)}.play;
//compare with the source
~src.play;
)

@ -1,90 +1,101 @@
//1- make the gui then the synth below
(
var trained = 0, entering = 0;
var va = Array.fill(10,{0.5});
var input = Buffer.alloc(s,2);
var output = Buffer.alloc(s,10);
var mlp = FluidMLPRegressor(s,[6],activation: 1,outputActivation: 1,maxIter: 1000,learnRate: 0.1,momentum: 0,batchSize: 1,validation: 0);
var entry = 0;
Window.closeAll;
s.waitForBoot{
Task{
var trained = 0, entering = 0;
var input_buffer = Buffer.alloc(s,2);
var output_buffer = Buffer.alloc(s,10);
var mlp = FluidMLPRegressor(s,[6],activation: 1,outputActivation: 1,maxIter: 1000,learnRate: 0.1,momentum: 0.9,batchSize: 1);
var entry_counter = 0;
var win, multislider, xyslider, synth, error_st, prediction_but, addPoints_but, train_but;
var item_width = 100;
var inData = FluidDataSet(s);
var outData = FluidDataSet(s);
~inData = FluidDataSet(s);
~outData = FluidDataSet(s);
win = Window("ChaosSynth", Rect(10, 10, 840, 320)).front;
w = Window("ChaosSynth", Rect(10, 10, 790, 320)).front;
a = MultiSliderView(w,Rect(10, 10, 400, 300)).elasticMode_(1).isFilled_(1);
a.value=va;
a.action = {arg q;
b.set(\val, q.value);
va = q.value;};
f = Slider2D(w,Rect(420,10,300, 300));
f.x = 0.5;
f.y = 0.5;
f.action = {arg x,y; //if trained, predict the point f.x f.y
if (entering == 1, { //if entering a point, add to the the database f.x f.y against the array va
input.setn(0, [f.x, f.y]);
output.setn(0, va);
~inData.addPoint(entry.asSymbol,input);
~outData.addPoint(entry.asSymbol,output);
entering = 0;
entry = entry + 1;
{d.value = 0;}.defer;
}, { //if not entering a point
if (trained == 1, { //if trained
input.setn(0, [f.x, f.y]);
mlp.predictPoint(input,output,{
output.getn(0,10,{
|x|va = x; b.set(\val, va); {a.value = va;}.defer;});
multislider = MultiSliderView(win,Rect(10, 10, 400, 300))
.elasticMode_(1)
.isFilled_(1)
.action_({
arg ms;
// ms.value.postln;
synth.set(\val,ms.value);
output_buffer.setn(0,ms.value);
})
.value_(0.5.dup(10));
xyslider = Slider2D(win,Rect(420,10,300, 300))
.x_(0.5)
.y_(0.5)
.action_({
arg sl;
input_buffer.setn(0,[sl.x,sl.y]);
if(prediction_but.value.asBoolean,{
mlp.predictPoint(input_buffer,output_buffer,{
output_buffer.getn(0,10,{
arg output_values;
synth.set(\val, output_values);
{
multislider.value_(output_values)
}.defer;
});
});
});
});
});
};
c = Button(w, Rect(730,240,50, 20)).states_([["train", Color.red, Color.white], ["trained", Color.white, Color.grey]]).action_{
mlp.fit(~inData,~outData,{|x|
trained = 1;
{
c.value = 1;
e.value = x.round(0.001).asString;
}.defer;
});//train the network
};
d = Button(w, Rect(730,10,50, 20)).states_([["entry", Color.white, Color.grey], ["entry", Color.red, Color.white]]).action_{
entering = 1;
};
StaticText(w,Rect(732,260,50,20)).string_("Error:");
e = TextField(w,Rect(730,280,50,20)).string_(0.asString);
StaticText(w,Rect(732,70,50,20)).string_("rate:");
TextField(w,Rect(730,90,50,20)).string_(0.1.asString).action_{|in|mlp.learnRate = in.value.asFloat.postln;};
StaticText(w,Rect(732,110,50,20)).string_("momentum:");
TextField(w,Rect(730,130,50,20)).string_(0.0.asString).action_{|in|mlp.momentum = in.value.asFloat.postln;};
StaticText(w,Rect(732,150,50,20)).string_("maxIter:");
TextField(w,Rect(730,170,50,20)).string_(1000.asString).action_{|in| mlp.maxIter = in.value.asInteger.postln;};
StaticText(w,Rect(732,190,50,20)).string_("validation:");
TextField(w,Rect(730,210,50,20)).string_(0.0.asString).action_{|in|mlp.validation = in.value.asFloat.postln;};
)
addPoints_but = Button(win, Rect(730,10,item_width, 20))
.states_([["add points", Color.white, Color.grey]])
.action_({
inData.addPoint(entry_counter.asSymbol,input_buffer);
outData.addPoint(entry_counter.asSymbol,output_buffer);
entry_counter = entry_counter + 1;
inData.print;
outData.print;
});
//2- the synth
(
b = {
arg val = #[0,0,0,0,0,0,0,0,0,0];
var osc1, osc2, feed1, feed2, base1=69, base2=69, base3 = 130;
#feed2,feed1 = LocalIn.ar(2);
osc1 = MoogFF.ar(SinOsc.ar((((feed1 * val[0]) + val[1]) * base1).midicps,mul: (val[2] * 50).dbamp).atan,(base3 - (val[3] * (FluidLoudness.kr(feed2, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[4] * 3.5);
osc2 = MoogFF.ar(SinOsc.ar((((feed2 * val[5]) + val[6]) * base2).midicps,mul: (val[7] * 50).dbamp).atan,(base3 - (val[8] * (FluidLoudness.kr(feed1, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[9] * 3.5);
Out.ar(0,LeakDC.ar([osc1,osc2],mul: 0.1));
LocalOut.ar([osc1,osc2]);
}.play;
)
train_but = Button(win, Rect(730,240,item_width, 20))
.states_([["train", Color.red, Color.white]])
.action_({
mlp.fit(inData,outData,{
arg loss;
{error_st.string_("loss: %".format(loss.round(0.001)))}.defer;
});
});
prediction_but = Button(win, Rect(730,40,item_width, 20))
.states_([["Not Predicting", Color.black, Color.white],["Predicting",Color.black,Color.white]]);
error_st = StaticText(win,Rect(732,260,item_width,20)).string_("Error:");
StaticText(win,Rect(732,70,item_width,20)).string_("rate:");
TextField(win,Rect(730,90,item_width,20)).string_(0.1.asString).action_{|in|mlp.learnRate = in.value.asFloat.postln;};
StaticText(win,Rect(732,110,item_width,20)).string_("momentum:");
TextField(win,Rect(730,130,item_width,20)).string_(0.9.asString).action_{|in|mlp.momentum = in.value.asFloat.postln;};
StaticText(win,Rect(732,150,item_width,20)).string_("maxIter:");
TextField(win,Rect(730,170,item_width,20)).string_(1000.asString).action_{|in| mlp.maxIter = in.value.asInteger.postln;};
~inData.print;
~outData.print;
s.sync;
//2- the synth
synth = {
arg val = #[0,0,0,0,0,0,0,0,0,0];
var osc1, osc2, feed1, feed2, base1=69, base2=69, base3 = 130;
#feed2,feed1 = LocalIn.ar(2);
osc1 = MoogFF.ar(SinOsc.ar((((feed1 * val[0]) + val[1]) * base1).midicps,mul: (val[2] * 50).dbamp).atan,(base3 - (val[3] * (FluidLoudness.kr(feed2, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[4] * 3.5);
osc2 = MoogFF.ar(SinOsc.ar((((feed2 * val[5]) + val[6]) * base2).midicps,mul: (val[7] * 50).dbamp).atan,(base3 - (val[8] * (FluidLoudness.kr(feed1, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[9] * 3.5);
Out.ar(0,LeakDC.ar([osc1,osc2],mul: 0.1));
LocalOut.ar([osc1,osc2]);
}.play;
}.play(AppClock);
};
)
/////////
//3 - play with the multislider
//4 - when you like a spot, click entry (become read) then a position in the 2D graph where this point should be
//4 - when you like a spot, move the 2d slider to a position that you want to represent that sound and click "add point"
//5 - do that for a few points
//6 - click train
//6 - click train, keep clicking train until the loss is at or below 0.01 or so. feel free to adjust the learning rate, momentum, and max iter.
//7 - the 2D graph controls the 10D
//8 - if you like a new sound and you want to update the graph, just click entry, then where it should be in the 2D, then retrain when you are happy

@ -4,14 +4,16 @@ CATEGORIES:: Libraries>FluidCorpusManipulation
RELATED:: Guides/FluidCorpusManipulation, Classes/Pitch
DESCRIPTION::
This class implements three popular pitch descriptors, computed as frequency and the confidence in its value. It is part of the LINK:: Guides/FluidCorpusManipulation##Fluid Corpus Manipulation Toolkit::. For more explanations, learning material, and discussions on its musicianly uses, visit http://www.flucoma.org/
FluidPitch implements three popular pitch descriptors. It outputs two values: the computed frequency and the confidence in the accuracy of that frequency. It is part of the LINK:: Guides/FluidCorpusManipulation##Fluid Corpus Manipulation Toolkit::. For more explanations, learning materials, and discussions on its musicianly uses, visit https://www.flucoma.org/reference/pitch
The process will return a multichannel control steam with [pitch, confidence] values, which will be repeated if no change happens within the algorithm, i.e. when the hopSize is larger than the server's kr period. A pitch of 0 Hz is yield (or -999.0 when the unit is in MIDI note) when the algorithm cannot find a fundamental at all.
FluidPitch outputs a multichannel control steam of [pitch, confidence] values. The 'unit' argument changes the unit of the pitch output. 'unit' set to 0 will return a frequency in Hz, 'unit' set to 1 will return the MIDI note (with a decimal for microtonal values). If the chosen algorithm cannot determine a fundamental pitch at all, a frequency of 0 Hz is returned (or -999.0 when the unit is in MIDI note).
When the hopSize is larger than the server's kr period, the returned values will be repeated until the next window is computed.
CLASSMETHODS::
METHOD:: kr
The audio rate in, control rate out version of the object.
The audio rate in, control rate out
ARGUMENT:: in
The audio to be processed.
@ -21,26 +23,26 @@ ARGUMENT:: algorithm
TABLE::
## 0 || Cepstrum: Returns a pitch estimate as the location of the second highest peak in the Cepstrum of the signal (after DC).
## 1 || Harmonic Product Spectrum: Implements the Harmonic Product Spectrum algorithm for pitch detection . See e.g. FOOTNOTE:: A. Lerch, "An Introduction to Audio Content Analysis: Applications in Signal Processing and Music Informatics." John Wiley & Sons, 2012.https://onlinelibrary.wiley.com/doi/book/10.1002/9781118393550 ::
## 2 || YinFFT: Implements the frequency domain version of the YIN algorithm, as described in FOOTNOTE::P. M. Brossier, "Automatic Annotation of Musical Audio for Interactive Applications.” QMUL, London, UK, 2007. :: See also https://essentia.upf.edu/documentation/reference/streaming_PitchYinFFT.html
## 2 || YinFFT: Implements the frequency domain version of the YIN algorithm, as described in FOOTNOTE::P. M. Brossier, "Automatic Annotation of Musical Audio for Interactive Applications.” QMUL, London, UK, 2007. :: See also FOOTNOTE::https://essentia.upf.edu/documentation/reference/streaming_PitchYinFFT.html::
::
ARGUMENT:: minFreq
The minimum frequency that the algorithm will search for an estimated fundamental. This sets the lowest value that will be generated.
The minimum frequency that the algorithm will search for an estimated fundamental. This sets the lowest value that will be generated. The default is 20.
ARGUMENT:: maxFreq
The maximum frequency that the algorithm will search for an estimated fundamental. This sets the highest value that will be generated.
The maximum frequency that the algorithm will search for an estimated fundamental. This sets the highest value that will be generated. The default is 10000.
ARGUMENT:: unit
The unit of the estimated value. The default of 0 is in Hz. A value of 1 will convert to MIDI note values.
The unit of the estimated frequency. The default of 0 is in Hz. A value of 1 will convert to MIDI note values.
ARGUMENT:: windowSize
The window size. As sinusoidal estimation relies on spectral frames, we need to decide what precision we give it spectrally and temporally, in line with Gabor Uncertainty principles. http://www.subsurfwiki.org/wiki/Gabor_uncertainty
The window size. The number of samples used to calculate the FFT.
ARGUMENT:: hopSize
The window hop size. As sinusoidal estimation relies on spectral frames, we need to move the window forward. It can be any size but low overlap will create audible artefacts. The -1 default value will default to half of windowSize (overlap of 2).
ARGUMENT:: fftSize
The inner FFT/IFFT size. It should be at least 4 samples long, at least the size of the window, and a power of 2. Making it larger allows an oversampling of the spectral precision. The -1 default value will use the next power of 2 equal or above the windowSize.
The inner FFT/IFFT size. It must be at least 4 samples long, at least the size of the window, and a power of 2. Making it larger allows an oversampling of the spectral precision. The -1 default value will use the next power of 2 equal to or above the windowSize.
ARGUMENT:: maxFFTSize
How large can the FFT be, by allocating memory at instantiation time. This cannot be modulated.

Loading…
Cancel
Save