Merge remote-tracking branch 'origin/main' into dev

nix
tremblap 3 years ago
commit 339fad944d

@ -9,7 +9,7 @@
( (
// ============= 1. LOAD SOME FILES TO BE THE SOURCE MATERIAL =================== // ============= 1. LOAD SOME FILES TO BE THE SOURCE MATERIAL ===================
// put your own folder path here! it's best if they're all mono for now. // put your own folder path here! it's best if they're all mono for now.
~source_files_folder = "/Users/macprocomputer/Desktop/sccm/files_fabrizio_01/src_files/"; ~source_files_folder = FluidFilesPath();
~loader = FluidLoadFolder(~source_files_folder); // this is a nice helper class that will load a bunch of files from a folder. ~loader = FluidLoadFolder(~source_files_folder); // this is a nice helper class that will load a bunch of files from a folder.
~loader.play(s,{ // .play will cause it to *actually* do the loading ~loader.play(s,{ // .play will cause it to *actually* do the loading
@ -62,7 +62,7 @@ FluidBufOnsetSlice.process(s,~source_buf,indices:~source_indices_buf,metric:9,th
"analyzing slice: % / %".format(slice_index + 1,slices_array.size - 1).postln; "analyzing slice: % / %".format(slice_index + 1,slices_array.size - 1).postln;
// mfcc analysis, hop over that 0th coefficient because it relates to loudness and here we want to focus on timbre // mfcc analysis, hop over that 0th coefficient because it relates to loudness and here we want to focus on timbre
FluidBufMFCC.process(s,audio_buffer,start_frame,num_frames,features:features_buf,startCoeff:1,numCoeffs:~nmfccs).wait; FluidBufMFCC.process(s,audio_buffer,start_frame,num_frames,features:features_buf,startCoeff:1,numCoeffs:~nmfccs, numChans: 1).wait;
// get a statistical summary of the MFCC analysis for this slice // get a statistical summary of the MFCC analysis for this slice
FluidBufStats.process(s,features_buf,stats:stats_buf).wait; FluidBufStats.process(s,features_buf,stats:stats_buf).wait;
@ -205,7 +205,7 @@ Routine{
var end_frame = Index.kr(~source_indices_buf,index+1); // same for the end frame var end_frame = Index.kr(~source_indices_buf,index+1); // same for the end frame
var num_frames = end_frame - start_frame; var num_frames = end_frame - start_frame;
var dur_secs = min(num_frames / SampleRate.ir(~source_buf),src_dur); var dur_secs = min(num_frames / SampleRate.ir(~source_buf),src_dur);
var sig = PlayBuf.ar(1,~source_buf,BufRateScale.ir(~source_buf),0,start_frame,0,2); var sig = PlayBuf.ar(~loader.buffer.numChannels,~source_buf,BufRateScale.ir(~source_buf),0,start_frame,0,2);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:2); var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:2);
// sig = sig * env; // include this env if you like, but keep the line above because it will free the synth after the slice! // sig = sig * env; // include this env if you like, but keep the line above because it will free the synth after the slice!
sig.dup; sig.dup;
@ -219,7 +219,10 @@ Routine{
// is is very similar to step 8 above, but now instead of playing the slice of // is is very similar to step 8 above, but now instead of playing the slice of
// the drum loop, it get's the analysis of the drum loop's slice into "query_buf", // the drum loop, it get's the analysis of the drum loop's slice into "query_buf",
// then uses that info to lookup the nearest neighbour in the source dataset and // then uses that info to lookup the nearest neighbour in the source dataset and
// play that slice // play that slice. If you used, at line 12 above, the FluCoMa sound set, it sounds boringly
// similar: this is because the target drum loop is in the corpus! So it finds, for each slice
// itself... this is a good incentive to reload, with your own soundbank :)
Routine{ Routine{
var query_buf = Buffer.alloc(s,~nmfccs); // a buffer for doing the neighbor lookup with var query_buf = Buffer.alloc(s,~nmfccs); // a buffer for doing the neighbor lookup with
var scaled_buf = Buffer.alloc(s,~nmfccs); var scaled_buf = Buffer.alloc(s,~nmfccs);
@ -261,7 +264,7 @@ Routine{
// once it's loaded, scale it using the scaler // once it's loaded, scale it using the scaler
~scaler.transformPoint(query_buf,scaled_buf,{ ~scaler.transformPoint(query_buf,scaled_buf,{
// once it's neighbour data point in the kdtree of source slices // once it's neighbour data point in the kdtree of source slices
~kdtree.kNearest(scaled_buf,{ ~kdtree.kNearest(scaled_buf,action: {
arg nearest; arg nearest;
// peel off just the integer part of the slice to use in the helper function // peel off just the integer part of the slice to use in the helper function

@ -87,7 +87,7 @@ s.waitForBoot{
ds.dump({ // dump out that dataset to dictionary so that we can use it with the plotter! ds.dump({ // dump out that dataset to dictionary so that we can use it with the plotter!
arg ds_dict;// the dictionary version of this dataset arg ds_dict;// the dictionary version of this dataset
var previous = nil; // a variable for checking if the currently passed nearest neighbour is the same or different from the previous one var previous = nil; // a variable for checking if the currently passed nearest neighbour is the same or different from the previous one
FluidPlotter(bounds:Rect(0,0,800,800),dict:ds_dict,mouseMoveAction:{ {FluidPlotter(bounds:Rect(0,0,800,800),dict:ds_dict,mouseMoveAction:{
/* make a FluidPlotter. nb. the dict is the dict from a FluidDataSet.dump. the mouseMoveAction is a callback function that is called /* make a FluidPlotter. nb. the dict is the dict from a FluidDataSet.dump. the mouseMoveAction is a callback function that is called
anytime the mouseDownAction or mouseMoveAction function is called on this view. i.e., anytime you click or drag on this plotter */ anytime the mouseDownAction or mouseMoveAction function is called on this view. i.e., anytime you click or drag on this plotter */
@ -99,7 +99,7 @@ s.waitForBoot{
(4) modifier keys that are pressed while clicking or dragging (4) modifier keys that are pressed while clicking or dragging
*/ */
point_buf.setn(0,[x,y]); // write the x y position into a buffer so that we can use it to... point_buf.setn(0,[x,y]); // write the x y position into a buffer so that we can use it to...
kdtree.kNearest(point_buf,{ // look up the nearest slice to that x y position kdtree.kNearest(point_buf, action:{ // look up the nearest slice to that x y position
arg nearest; // this is reported back as a symbol, so... arg nearest; // this is reported back as a symbol, so...
nearest = nearest.asString; // we'll convert it to a string here nearest = nearest.asString; // we'll convert it to a string here
@ -124,7 +124,7 @@ s.waitForBoot{
previous = nearest; previous = nearest;
}); });
}); });
}); })}.defer;
}); });
}); });
}); });

@ -1,14 +1,12 @@
( (
// 1. Instantiate some of the things we need. // 1. Instantiate some of the things we need.
Window.closeAll; Window.closeAll;
s.options.sampleRate_(48000);
s.options.device_("Fireface UC Mac (24006457)");
s.waitForBoot{ s.waitForBoot{
Task{ Task{
var win; var win;
~nMFCCs = 13; ~nMFCCs = 13;
~trombone = Buffer.read(s,"/Users/macprocomputer/Desktop/_flucoma/code/flucoma-core-src/AudioFiles/Olencki-TenTromboneLongTones-M.wav"); ~trombone = Buffer.read(s,FluidFilesPath("Olencki-TenTromboneLongTones-M.wav"));
~oboe = Buffer.read(s,"/Users/macprocomputer/Desktop/_flucoma/code/flucoma-core-src/AudioFiles/Harker-DS-TenOboeMultiphonics-M.wav"); ~oboe = Buffer.read(s,FluidFilesPath("Harker-DS-TenOboeMultiphonics-M.wav"));
~timbre_buf = Buffer.alloc(s,~nMFCCs); ~timbre_buf = Buffer.alloc(s,~nMFCCs);
~ds = FluidDataSet(s); ~ds = FluidDataSet(s);
~labels = FluidLabelSet(s); ~labels = FluidLabelSet(s);
@ -169,63 +167,10 @@ some points that are labeled "silence".
~ds.print; ~ds.print;
~labels.print; ~labels.print;
~ds.write("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/%_ds.json".format(Date.localtime.stamp)); ~ds.write("tmp/%_ds.json".format(Date.localtime.stamp));
~labels.write("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/%_labels.json".format(Date.localtime.stamp)) ~labels.write("tmp/%_labels.json".format(Date.localtime.stamp))
/* /*
12. Now go retrain some more and do some more predictions. The silent gaps between 12. Now go retrain some more and do some more predictions. The silent gaps between
tones should now report a "2". tones should now report a "2".
*/ */
// ========================= DATA VERIFICATION ADDENDUM ============================
// This data is pretty well separated, except for that one trombone point.
~ds.read("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/211102_122330_ds.json");
~labels.read("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/211102_122331_labels.json");
/*
This data is not well separated. Once can see that in the cluster that should probably be all silences,
there is a lot of oboe and trombone points mixed in!
This will likely be confusing to a neural network!
*/
~ds.read("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/211102_122730_ds.json");
~labels.read("/Users/macprocomputer/Desktop/_flucoma/code/Utrecht-2021/Lesson_Plans/classifier (pre-workshop)/211102_122731_labels.json");
(
Task{
~stand = FluidStandardize(s);
~ds_plotter = FluidDataSet(s);
~umap = FluidUMAP(s,2,30,0.5);
~normer = FluidNormalize(s);
~kdtree = FluidKDTree(s);
~pt_buf = Buffer.alloc(s,2);
s.sync;
~stand.fitTransform(~ds,~ds_plotter,{
~umap.fitTransform(~ds_plotter,~ds_plotter,{
~normer.fitTransform(~ds_plotter,~ds_plotter,{
~kdtree.fit(~ds_plotter,{
~ds_plotter.dump({
arg ds_dict;
~labels.dump({
arg label_dict;
// label_dict.postln;
~plotter = FluidPlotter(bounds:Rect(0,0,800,800),dict:ds_dict,mouseMoveAction:{
arg view, x, y;
~pt_buf.setn(0,[x,y]);
~kdtree.kNearest(~pt_buf,{
arg nearest;
"%:\t%".format(nearest,label_dict.at("data").at(nearest.asString)[0]).postln;
});
});
~plotter.categories_(label_dict);
});
});
});
});
});
});
}.play(AppClock);
)

@ -13,7 +13,7 @@ Routine{
~windowSize = 4096; ~windowSize = 4096;
~hopSize = 512; ~hopSize = 512;
~buf = Buffer.read(s,"/Users/macprocomputer/Desktop/_flucoma/code/flucoma-core-src/AudioFiles/Tremblay-FMTri-M.wav"); ~buf = Buffer.read(s,FluidFilesPath("Tremblay-FMTriDist-M.wav"));
s.sync; s.sync;

@ -61,7 +61,7 @@ y.set(\mix,1);
// send just the 'sines' to a Reverb // send just the 'sines' to a Reverb
( (
{ {
var sig = PlayBuf.ar(1,~song,BufRateScale.ir(~buf),loop:1); var sig = PlayBuf.ar(2,~song,BufRateScale.ir(~buf),loop:1).sum * 0.5;
var sines, residual; var sines, residual;
var latency = ((15 * 512) + 1024 ) / ~song.sampleRate; var latency = ((15 * 512) + 1024 ) / ~song.sampleRate;
# sines, residual = FluidSines.ar(sig); # sines, residual = FluidSines.ar(sig);
@ -72,7 +72,7 @@ y.set(\mix,1);
// send just the 'residual' to a Reverb // send just the 'residual' to a Reverb
( (
{ {
var sig = PlayBuf.ar(1,~song,BufRateScale.ir(~buf),loop:1); var sig = PlayBuf.ar(2,~song,BufRateScale.ir(~buf),loop:1).sum * 0.5;
var sines, residual; var sines, residual;
var latency = ((15 * 512) + 1024 ) / ~song.sampleRate; var latency = ((15 * 512) + 1024 ) / ~song.sampleRate;
# sines, residual = FluidSines.ar(sig); # sines, residual = FluidSines.ar(sig);

@ -1,38 +1,53 @@
/*
this script shows how to
1. load a folder of sounds
2. find smaller time segments within the sounds according to novelty
3. analyse the sounds according to MFCC and add these analyses to a dataset
4. dimensionally reduce that dataset to 2D using umap
5. (optional) turn the plot of points in 2D into a grid
6. plot the points!
notice that each step in this process is created within a function so that
at the bottom of the patch, these functions are all chained together to
do the whole process in one go!
*/
( (
// 1. define a function to load a folder of sounds // 1. load a folder of sounds
~load_folder = { ~load_folder = {
arg folder_path, action; arg folder_path, action;
var loader = FluidLoadFolder(folder_path); var loader = FluidLoadFolder(folder_path); // pass in the folder to load
loader.play(s,{ loader.play(s,{ // play will do the actual loading
fork{ var mono_buffer = Buffer.alloc(s,loader.buffer.numFrames);
var mono_buffer = Buffer.alloc(s,loader.buffer.numFrames); // convert to mono for ease of use for this example FluidBufCompose.processBlocking(s,loader.buffer,destination:mono_buffer,numChans:1,action:{
FluidBufCompose.processBlocking(s,loader.buffer,destination:mono_buffer,numChans:1);
s.sync;
action.(mono_buffer); action.(mono_buffer);
} });
}); });
}; };
// this will load all the audio files that are included with the flucoma toolkit, but you can put your own path here:
~load_folder.(FluidFilesPath(),{ ~load_folder.(FluidFilesPath(),{
arg buffer; arg buffer;
"mono buffer: %".format(buffer).postln; "mono buffer: %".format(buffer).postln;
~buffer = buffer; ~buffer = buffer; // save the buffer to a global variable so we can use it later
}); });
) )
( (
// 2. define a function to slice the sounds, play with the threshold to get different results // 2. slice the sounds
~slice = { ~slice = {
arg buffer, action; arg buffer, action;
Routine{ var indices = Buffer(s); // a buffer for saving the discovered indices into
var indices = Buffer(s);
s.sync; // play around the the threshold anad feature (see help file) to get differet slicing results
FluidBufNoveltySlice.process(s,buffer,indices:indices,threshold:0.5,action:{ FluidBufNoveltySlice.processBlocking(s,buffer,indices:indices,algorithm:0,threshold:0.5,action:{
"% slices found".format(indices.numFrames).postln; "% slices found".format(indices.numFrames).postln;
"average duration in seconds: %".format(buffer.duration/indices.numFrames).postln; "average duration in seconds: %".format(buffer.duration/indices.numFrames).postln;
action.(buffer,indices); action.(buffer,indices);
}); });
}.play;
}; };
~slice.(~buffer,{ ~slice.(~buffer,{
@ -41,34 +56,65 @@
}); });
) )
// you may want to check the slice points here using FluidWaveform
FluidWaveform(~buffer,~indices); // it may also be way too many slices to see properly!
( (
// 3. analyze the slices // 3. analyze the slices
~analyze = { ~analyze = {
arg buffer, indices, action; arg buffer, indices, action;
var time = SystemClock.seconds; var time = SystemClock.seconds; // a timer just to keep tabs on how long this stuff is taking
Routine{ Routine{
var feature_buf = Buffer(s); var feature_buf = Buffer(s); // a buffer for storing the mfcc analyses into
var stats_buf = Buffer(s); var stats_buf = Buffer(s); // a buffer for storing the stats into
var point_buf = Buffer(s); var point_buf = Buffer(s); // a buffer we will use to add points to the dataset
var ds = FluidDataSet(s); var ds = FluidDataSet(s); // the dataset that we'll add all these mfcc analyses to
// bring the values in the slicepoints buffer from the server to the language as a float array
indices.loadToFloatArray(action:{ indices.loadToFloatArray(action:{
arg fa; arg fa; // float array
fa.doAdjacentPairs{ fa.doAdjacentPairs{
/*
take each of the adjacent pairs and pass them to this function as an array of 2 values
nb. for example [0,1,2,3,4] will execute this function 4 times, passing these 2 value arrays:
[0,1]
[1,2]
[2,3]
[3,4]
this will give us each slice point *and* the next slice point so that we
can tell the analyzers where to start analyzing and how many frames to analyze
*/
arg start, end, i; arg start, end, i;
// the next slice point minus the current one will give us the difference how many slices to analyze)
var num = end - start; var num = end - start;
/* analyze the drum buffer starting at `start_samps` and for `num_samps` samples
this returns a buffer (feautre_buf) that is 13 channels wide (for the 13 mfccs, see helpfile) and
however many frames long as there are fft frames in the slice */
FluidBufMFCC.processBlocking(s,buffer,start,num,features:feature_buf,numCoeffs:13,startCoeff:1); FluidBufMFCC.processBlocking(s,buffer,start,num,features:feature_buf,numCoeffs:13,startCoeff:1);
/* perform a statistical analysis on the mfcc analysis
this will return just 13 channels, one for each mfcc channel in the feature_buf.
each channel will have 7 frames corresponding to the 7 statistical analyses that it performs
on that channel */
FluidBufStats.processBlocking(s,feature_buf,stats:stats_buf); FluidBufStats.processBlocking(s,feature_buf,stats:stats_buf);
/* take all 13 channels from stats_buf, but just the first frame (mean) and convert it into a buffer
that is 1 channel and 13 frames. this shape will be considered "flat" and therefore able to be
added to the dataset */
FluidBufFlatten.processBlocking(s,stats_buf,numFrames:1,destination:point_buf); FluidBufFlatten.processBlocking(s,stats_buf,numFrames:1,destination:point_buf);
// add it
ds.addPoint("slice-%".format(i),point_buf); ds.addPoint("slice-%".format(i),point_buf);
"Processing Slice % / %".format(i+1,indices.numFrames-1).postln; "Processing Slice % / %".format(i+1,indices.numFrames-1).postln;
}; };
s.sync; s.sync;
feature_buf.free; stats_buf.free; point_buf.free; feature_buf.free; stats_buf.free; point_buf.free; // free buffers
ds.print; ds.print;
@ -89,10 +135,17 @@
~umap = { ~umap = {
arg buffer, indices, ds, action, numNeighbours = 15, minDist = 0.1; arg buffer, indices, ds, action, numNeighbours = 15, minDist = 0.1;
Routine{ Routine{
// get all the dimensions in the same general range so that when umap
// makes its initial tree structure, the lower order mfcc coefficients
// aren't over weighted
var standardizer = FluidStandardize(s); var standardizer = FluidStandardize(s);
// this is the dimensionality reduction algorithm, see helpfile for
// more info
var umap = FluidUMAP(s,2,numNeighbours,minDist); var umap = FluidUMAP(s,2,numNeighbours,minDist);
var redux_ds = FluidDataSet(s); var redux_ds = FluidDataSet(s); // a new dataset for putting the 2D points into
s.sync; s.sync;
@ -117,8 +170,14 @@
~grid = { ~grid = {
arg buffer, indices, redux_ds, action; arg buffer, indices, redux_ds, action;
Routine{ Routine{
// first normalize so they're all 0 to 1
var normer = FluidNormalize(s); var normer = FluidNormalize(s);
// this will shift all dots around so they're in a grid shape
var grider = FluidGrid(s); var grider = FluidGrid(s);
// a new dataset to hold the gridified dots
var newds = FluidDataSet(s); var newds = FluidDataSet(s);
s.sync; s.sync;
@ -144,11 +203,17 @@
~plot = { ~plot = {
arg buffer, indices, redux_ds, action; arg buffer, indices, redux_ds, action;
Routine{ Routine{
var kdtree = FluidKDTree(s); var kdtree = FluidKDTree(s); // tree structure of the 2D points for fast neighbour lookup
// a buffer for putting the 2D mouse point into so that it can be used to find the nearest neighbour
var buf_2d = Buffer.alloc(s,2); var buf_2d = Buffer.alloc(s,2);
// scaler just to double check and make sure that the points are 0 to 1
// if the plotter is receiving the output of umap, they probably won't be...
var scaler = FluidNormalize(s); var scaler = FluidNormalize(s);
// a new dataset told the normalized data
var newds = FluidDataSet(s); var newds = FluidDataSet(s);
var xmin = 0, xmax = 1, ymin = 0, ymax = 1;
s.sync; s.sync;
@ -160,31 +225,46 @@
arg dict; arg dict;
var previous, fp; var previous, fp;
"ds dumped".postln; "ds dumped".postln;
fp = FluidPlotter(nil,Rect(0,0,800,800),dict,xmin:xmin,xmax:xmax,ymin:ymin,ymax:ymax,mouseMoveAction:{
arg view, x, y; // pass in the dict from the dumped dataset. this is the data that we want to plot!
[x,y].postln;
buf_2d.setn(0,[x,y]); {
kdtree.kNearest(buf_2d,{ fp = FluidPlotter(nil,Rect(0,0,800,800),dict,mouseMoveAction:{
arg nearest;
if(previous != nearest,{ // when the mouse is clicked or dragged on the plotter, this function executes
var index = nearest.asString.split($-)[1].asInteger;
previous = nearest; // the view is the FluidPlotter, the x and y are the position of the mouse according
nearest.postln; // to the range of the plotter. i.e., since our plotter is showing us the range 0 to 1
index.postln; // for both x and y, the xy positions will always be between 0 and 1
{ arg view, x, y;
var startPos = Index.kr(indices,index); buf_2d.setn(0,[x,y]); // set the mouse position into a buffer
var dur_samps = Index.kr(indices,index + 1) - startPos;
var sig = PlayBuf.ar(1,buffer,BufRateScale.ir(buffer),startPos:startPos); // then send that buffer to the kdtree to find the nearest point
var dur_sec = dur_samps / BufSampleRate.ir(buffer); kdtree.kNearest(buf_2d,action:{
var env; arg nearest; // the identifier of the nearest point is returned (always as a symbol)
dur_sec = min(dur_sec,1);
env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_sec-0.06,0.03]),doneAction:2); if(previous != nearest,{ // as long as this isn't also the last one that was returned
sig.dup * env;
}.play; // split the integer off the indentifier to know how to look it up for playback
var index = nearest.asString.split($-)[1].asInteger;
previous = nearest;
nearest.postln;
// index.postln;
{
var startPos = Index.kr(indices,index); // look in the indices buf to see where to start playback
var dur_samps = Index.kr(indices,index + 1) - startPos; // and how long
var sig = PlayBuf.ar(1,buffer,BufRateScale.ir(buffer),startPos:startPos);
var dur_sec = dur_samps / BufSampleRate.ir(buffer);
var env;
dur_sec = min(dur_sec,1); // just in case some of the slices are *very* long...
env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_sec-0.06,0.03]),doneAction:2);
sig.dup * env;
}.play;
});
}); });
}); });
}); action.(fp,newds);
action.(fp,newds); }.defer;
}); });
}); });
}); });
@ -194,29 +274,17 @@
~plot.(~buffer,~indices,~ds); ~plot.(~buffer,~indices,~ds);
) )
// ============== do all of it ======================= // ============== do all of it in one go (without the grid for instance) =======================
( (
var path = "/Users/macprocomputer/Desktop/_flucoma/data_saves/%_2D_browsing_Pitch".format(Date.localtime.stamp); var path = FluidFilesPath();
~load_folder.("/Users/macprocomputer/Desktop/_flucoma/favs mono/",{ ~load_folder.(path,{
arg buffer0; arg buffer0;
~slice.(buffer0,{ ~slice.(buffer0,{
arg buffer1, indices1; arg buffer1, indices1;
~analyze.(buffer1, indices1,{ ~analyze.(buffer1, indices1,{
arg buffer2, indices2, ds2; arg buffer2, indices2, ds2;
/* path.mkdir;
buffer2.write(path+/+"buffer.wav","wav");
indices2.write(path+/+"indices.wav","wav","float");
ds2.write(path+/+"ds.json");*/
~umap.(buffer2,indices2,ds2,{ ~umap.(buffer2,indices2,ds2,{
arg buffer3, indices3, ds3; arg buffer3, indices3, ds3;
/* path.mkdir;
buffer3.write(path+/+"buffer.wav","wav");
indices3.write(path+/+"indices.wav","wav","float");
ds3.write(path+/+"ds.json");*/
~plot.(buffer3,indices3,ds3,{ ~plot.(buffer3,indices3,ds3,{
arg plotter; arg plotter;
"done with all".postln; "done with all".postln;
@ -227,178 +295,3 @@ var path = "/Users/macprocomputer/Desktop/_flucoma/data_saves/%_2D_browsing_Pitc
}); });
}); });
) )
/*=============== Know Your Data =================
hmmm... there's a lot of white space in that UMAP plot. A few options:
1. Adjust the parameters of UMAP to make the plot look different.
- minDist
- numNeighbours
2. Gridify the whole thing to spread it out.
3. Remove some of the outliers to get a more full shape.
===================================================*/
// #2
(
Window.closeAll;
Task{
var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_121441_2D_browsing/";
var ds = FluidDataSet(s);
var buffer = Buffer.read(s,folder+/+"buffer.wav");
var indices = Buffer.read(s,folder+/+"indices.wav");
var normalizer = FluidNormalize(s);
var ds_grid = FluidDataSet(s);
var grid = FluidGrid(s);
var kdtree = FluidKDTree(s);
var pt_buf = Buffer.alloc(s,2);
s.sync;
ds.read(folder+/+"ds.json",{
"read".postln;
normalizer.fitTransform(ds,ds_grid,{
"normalized".postln;
grid.fitTransform(ds_grid,ds_grid,{
"grid done".postln;
normalizer.fitTransform(ds_grid,ds_grid,{
"normalized".postln;
kdtree.fit(ds_grid,{
"tree fit".postln;
normalizer.fitTransform(ds,ds,{
"normalized".postln;
ds.dump({
arg ds_dict;
ds_grid.dump({
arg ds_grid_dict;
defer{
var distances = Dictionary.new;
var max_dist = 0;
var win, plotter, uv;
var previous;
ds_dict.at("data").keysValuesDo({
arg id, pt;
var other, pt0, pt1, dist, distpoint;
/*
id.postln;
pt.postln;
"".postln;
*/
other = ds_grid_dict.at("data").at(id);
pt0 = Point(pt[0],pt[1]);
pt1 = Point(other[0],other[1]);
dist = pt0.dist(pt1);
distpoint = Dictionary.new;
if(dist > max_dist,{max_dist = dist});
distpoint.put("pt0",pt0);
distpoint.put("pt1",pt1);
distpoint.put("dist",dist);
distances.put(id,distpoint);
});
win = Window("FluidGrid",Rect(0,0,800,800));
win.background_(Color.white);
uv = UserView(win,win.bounds)
.drawFunc_({
var size_pt = Point(uv.bounds.width,uv.bounds.height);
distances.keysValuesDo({
arg id, distpoint;
var alpha = distpoint.at("dist") / max_dist;
var pt0 = distpoint.at("pt0") * size_pt;
var pt1 = distpoint.at("pt1") * size_pt;
pt0.y = uv.bounds.height - pt0.y;
pt1.y = uv.bounds.height - pt1.y;
/* id.postln;
distpoint.postln;
alpha.postln;
"".postln;
*/
Pen.line(pt0,pt1);
Pen.color_(Color(1.0,0.0,0.0,0.25));
Pen.stroke;
});
});
plotter = FluidPlotter(win,win.bounds,ds_dict,{
arg view, x, y;
pt_buf.setn(0,[x,y]);
kdtree.kNearest(pt_buf,{
arg nearest;
if(previous != nearest,{
var index = nearest.asString.split($-)[1].asInteger;
previous = nearest;
nearest.postln;
index.postln;
{
var startPos = Index.kr(indices,index);
var dur_samps = Index.kr(indices,index + 1) - startPos;
var sig = PlayBuf.ar(1,buffer,BufRateScale.ir(buffer),startPos:startPos);
var dur_sec = dur_samps / BufSampleRate.ir(buffer);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_sec-0.06,0.03]),doneAction:2);
sig.dup * env;
}.play;
});
});
});
plotter.background_(Color(0,0,0,0));
ds_grid_dict.at("data").keysValuesDo({
arg id, pt;
plotter.addPoint_("%-grid".format(id),pt[0],pt[1],0.75,Color.blue.alpha_(0.5));
});
win.front;
};
})
});
});
});
});
});
});
});
}.play(AppClock);
)
// #3
(
Routine{
var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_152523_2D_browsing/";
var ds = FluidDataSet(s);
var buffer = Buffer.read(s,folder+/+"buffer.wav");
var indices = Buffer.read(s,folder+/+"indices.wav");
var robust_scaler = FluidRobustScale(s,10,90);
var newds = FluidDataSet(s);
var dsq = FluidDataSetQuery(s);
s.sync;
// {indices.plot}.defer;
ds.read(folder+/+"ds.json",{
robust_scaler.fitTransform(ds,newds,{
dsq.addRange(0,2,{
dsq.filter(0,">",-1,{
dsq.and(0,"<",1,{
dsq.and(1,">",-1,{
dsq.and(1,"<",1,{
dsq.transform(newds,newds,{
~plot.(buffer,indices,newds);
});
});
});
});
});
});
})
});
}.play;
)

@ -1,295 +0,0 @@
/*
this script shows how to
1. load a folder of sounds
2. find smaller time segments within the sounds according to novelty
3. analyse the sounds according to MFCC and add these analyses to a dataset
4. dimensionally reduce that dataset to 2D using umap
5. (optional) turn the plot of points in 2D into a grid
6. plot the points!
notice that each step in this process is created within a function so that
at the bottom of the patch, these functions are all chained together to
do the whole process in one go!
*/
(
// 1. load a folder of sounds
~load_folder = {
arg folder_path, action;
var loader = FluidLoadFolder(folder_path); // pass in the folder to load
loader.play(s,{ // play will do the actual loading
var mono_buffer = Buffer.alloc(s,loader.buffer.numFrames);
FluidBufCompose.processBlocking(s,loader.buffer,destination:mono_buffer,numChans:1,action:{
action.(mono_buffer);
});
});
};
// this will load all the audio files that are included with the flucoma toolkit, but you can put your own path here:
~load_folder.(FluidFilesPath(),{
arg buffer;
"mono buffer: %".format(buffer).postln;
~buffer = buffer; // save the buffer to a global variable so we can use it later
});
)
(
// 2. slice the sounds
~slice = {
arg buffer, action;
var indices = Buffer(s); // a buffer for saving the discovered indices into
// play around the the threshold anad feature (see help file) to get differet slicing results
FluidBufNoveltySlice.processBlocking(s,buffer,indices:indices,algorithm:0,threshold:0.5,action:{
"% slices found".format(indices.numFrames).postln;
"average duration in seconds: %".format(buffer.duration/indices.numFrames).postln;
action.(buffer,indices);
});
};
~slice.(~buffer,{
arg buffer, indices;
~indices = indices;
});
)
// you may want to check the slice points here using FluidWaveform
FluidWaveform(~buffer,~indices); // it may also be way too many slices to see properly!
(
// 3. analyze the slices
~analyze = {
arg buffer, indices, action;
var time = SystemClock.seconds; // a timer just to keep tabs on how long this stuff is taking
Routine{
var feature_buf = Buffer(s); // a buffer for storing the mfcc analyses into
var stats_buf = Buffer(s); // a buffer for storing the stats into
var point_buf = Buffer(s); // a buffer we will use to add points to the dataset
var ds = FluidDataSet(s); // the dataset that we'll add all these mfcc analyses to
// bring the values in the slicepoints buffer from the server to the language as a float array
indices.loadToFloatArray(action:{
arg fa; // float array
fa.doAdjacentPairs{
/*
take each of the adjacent pairs and pass them to this function as an array of 2 values
nb. for example [0,1,2,3,4] will execute this function 4 times, passing these 2 value arrays:
[0,1]
[1,2]
[2,3]
[3,4]
this will give us each slice point *and* the next slice point so that we
can tell the analyzers where to start analyzing and how many frames to analyze
*/
arg start, end, i;
// the next slice point minus the current one will give us the difference how many slices to analyze)
var num = end - start;
/* analyze the drum buffer starting at `start_samps` and for `num_samps` samples
this returns a buffer (feautre_buf) that is 13 channels wide (for the 13 mfccs, see helpfile) and
however many frames long as there are fft frames in the slice */
FluidBufMFCC.processBlocking(s,buffer,start,num,features:feature_buf,numCoeffs:13,startCoeff:1);
/* perform a statistical analysis on the mfcc analysis
this will return just 13 channels, one for each mfcc channel in the feature_buf.
each channel will have 7 frames corresponding to the 7 statistical analyses that it performs
on that channel */
FluidBufStats.processBlocking(s,feature_buf,stats:stats_buf);
/* take all 13 channels from stats_buf, but just the first frame (mean) and convert it into a buffer
that is 1 channel and 13 frames. this shape will be considered "flat" and therefore able to be
added to the dataset */
FluidBufFlatten.processBlocking(s,stats_buf,numFrames:1,destination:point_buf);
// add it
ds.addPoint("slice-%".format(i),point_buf);
"Processing Slice % / %".format(i+1,indices.numFrames-1).postln;
};
s.sync;
feature_buf.free; stats_buf.free; point_buf.free; // free buffers
ds.print;
"Completed in % seconds".format(SystemClock.seconds - time).postln;
action.(buffer,indices,ds);
});
}.play;
};
~analyze.(~buffer,~indices,{
arg buffer, indices, ds;
~ds = ds;
});
)
(
// 4. Reduce to 2 Dimensions
~umap = {
arg buffer, indices, ds, action, numNeighbours = 15, minDist = 0.1;
Routine{
// get all the dimensions in the same general range so that when umap
// makes its initial tree structure, the lower order mfcc coefficients
// aren't over weighted
var standardizer = FluidStandardize(s);
// this is the dimensionality reduction algorithm, see helpfile for
// more info
var umap = FluidUMAP(s,2,numNeighbours,minDist);
var redux_ds = FluidDataSet(s); // a new dataset for putting the 2D points into
s.sync;
standardizer.fitTransform(ds,redux_ds,{
"standardization done".postln;
umap.fitTransform(redux_ds,redux_ds,{
"umap done".postln;
action.(buffer,indices,redux_ds);
});
});
}.play;
};
~umap.(~buffer,~indices,~ds,{
arg buffer, indices, redux_ds;
~ds = redux_ds;
});
)
(
// 5. Gridify if Desired
~grid = {
arg buffer, indices, redux_ds, action;
Routine{
// first normalize so they're all 0 to 1
var normer = FluidNormalize(s);
// this will shift all dots around so they're in a grid shape
var grider = FluidGrid(s);
// a new dataset to hold the gridified dots
var newds = FluidDataSet(s);
s.sync;
normer.fitTransform(redux_ds,newds,{
"normalization done".postln;
grider.fitTransform(newds,newds,{
"grid done".postln;
action.(buffer,indices,newds);
});
});
}.play;
};
~grid.(~buffer,~indices,~ds,{
arg buffer, indices, grid_ds;
~ds = grid_ds;
});
)
(
// 6. Plot
~plot = {
arg buffer, indices, redux_ds, action;
Routine{
var kdtree = FluidKDTree(s); // tree structure of the 2D points for fast neighbour lookup
// a buffer for putting the 2D mouse point into so that it can be used to find the nearest neighbour
var buf_2d = Buffer.alloc(s,2);
// scaler just to double check and make sure that the points are 0 to 1
// if the plotter is receiving the output of umap, they probably won't be...
var scaler = FluidNormalize(s);
// a new dataset told the normalized data
var newds = FluidDataSet(s);
s.sync;
scaler.fitTransform(redux_ds,newds,{
"scaling done".postln;
kdtree.fit(newds,{
"kdtree fit".postln;
newds.dump({
arg dict;
var previous, fp;
"ds dumped".postln;
// pass in the dict from the dumped dataset. this is the data that we want to plot!
fp = FluidPlotter(nil,Rect(0,0,800,800),dict,mouseMoveAction:{
// when the mouse is clicked or dragged on the plotter, this function executes
// the view is the FluidPlotter, the x and y are the position of the mouse according
// to the range of the plotter. i.e., since our plotter is showing us the range 0 to 1
// for both x and y, the xy positions will always be between 0 and 1
arg view, x, y;
buf_2d.setn(0,[x,y]); // set the mouse position into a buffer
// then send that buffer to the kdtree to find the nearest point
kdtree.kNearest(buf_2d,{
arg nearest; // the identifier of the nearest point is returned (always as a symbol)
if(previous != nearest,{ // as long as this isn't also the last one that was returned
// split the integer off the indentifier to know how to look it up for playback
var index = nearest.asString.split($-)[1].asInteger;
previous = nearest;
nearest.postln;
// index.postln;
{
var startPos = Index.kr(indices,index); // look in the indices buf to see where to start playback
var dur_samps = Index.kr(indices,index + 1) - startPos; // and how long
var sig = PlayBuf.ar(1,buffer,BufRateScale.ir(buffer),startPos:startPos);
var dur_sec = dur_samps / BufSampleRate.ir(buffer);
var env;
dur_sec = min(dur_sec,1); // just in case some of the slices are *very* long...
env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_sec-0.06,0.03]),doneAction:2);
sig.dup * env;
}.play;
});
});
});
action.(fp,newds);
});
});
});
}.play;
};
~plot.(~buffer,~indices,~ds);
)
// ============== do all of it in one go =======================
(
var path = FluidFilesPath();
~load_folder.(path,{
arg buffer0;
~slice.(buffer0,{
arg buffer1, indices1;
~analyze.(buffer1, indices1,{
arg buffer2, indices2, ds2;
~umap.(buffer2,indices2,ds2,{
arg buffer3, indices3, ds3;
~plot.(buffer3,indices3,ds3,{
arg plotter;
"done with all".postln;
~fp = plotter;
});
});
});
});
});
)

@ -1,108 +0,0 @@
(
Task{
var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_152953_2D_browsing_MFCC/";
// var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_161354_2D_browsing_SpectralShape/";
// var folder = "/Users/macprocomputer/Desktop/_flucoma/data_saves/211103_161638_2D_browsing_Pitch/";
~ds_original = FluidDataSet(s);
~buffer = Buffer.read(s,folder+/+"buffer.wav");
~indices = Buffer.read(s,folder+/+"indices.wav");
~kdtree = FluidKDTree(s,6);
~ds = FluidDataSet(s);
s.sync;
~indices.loadToFloatArray(action:{
arg fa;
~indices = fa;
});
~ds_original.read(folder+/+"ds.json",{
~ds.read(folder+/+"ds.json",{
~kdtree.fit(~ds,{
~ds.dump({
arg dict;
~ds_dict = dict;
"kdtree fit".postln;
});
});
});
});
}.play;
~play_id = {
arg id;
var index = id.asString.split($-)[1].asInteger;
var start_samps = ~indices[index];
var end_samps = ~indices[index+1];
var dur_secs = (end_samps - start_samps) / ~buffer.sampleRate;
{
var sig = PlayBuf.ar(1,~buffer,BufRateScale.ir(~buffer),startPos:start_samps);
var env = EnvGen.kr(Env([0,1,1,0],[0.03,dur_secs-0.06,0.03]),doneAction:2);
sig.dup;// * env;
}.play;
dur_secs;
};
~pt_buf = Buffer.alloc(s,~ds_dict.at("cols"));
)
(
// hear the 5 nearest points
Routine{
// var id = "slice-558";
var id = ~ds_dict.at("data").keys.choose;
~ds.getPoint(id,~pt_buf,{
~kdtree.kNearest(~pt_buf,{
arg nearest;
Routine{
id.postln;
~play_id.(id).wait;
nearest[1..].do{
arg near;
1.wait;
near.postln;
~play_id.(near).wait;
};
}.play;
})
});
}.play;
)
// Standardize
(
Routine{
var scaler = FluidStandardize(s);
s.sync;
scaler.fitTransform(~ds_original,~ds,{
~kdtree.fit(~ds,{
"standardized & kdtree fit".postln;
});
});
}.play;
)
// Normalize
(
Routine{
var scaler = FluidNormalize(s);
s.sync;
scaler.fitTransform(~ds_original,~ds,{
~kdtree.fit(~ds,{
"normalized & kdtree fit".postln;
});
});
}.play;
)
// Robust Scaler
(
Routine{
var scaler = FluidRobustScale(s);
s.sync;
scaler.fitTransform(~ds_original,~ds,{
~kdtree.fit(~ds,{
"normalized & kdtree fit".postln;
});
});
}.play;
)

@ -1,6 +1,3 @@
s.options.sampleRate_(44100);
s.options.device_("Fireface UC Mac (24006457)");
( (
// decompose! // decompose!
s.waitForBoot{ s.waitForBoot{
@ -8,7 +5,7 @@ s.waitForBoot{
var drums = Buffer.read(s,FluidFilesPath("Nicol-LoopE-M.wav")); var drums = Buffer.read(s,FluidFilesPath("Nicol-LoopE-M.wav"));
var resynth = Buffer(s); var resynth = Buffer(s);
var n_components = 2; var n_components = 2;
FluidBufNMF.process(s,drums,resynth:resynth,components:n_components).wait; FluidBufNMF.process(s,drums,resynth:resynth,components:n_components,resynthMode: 1).wait;
"original sound".postln; "original sound".postln;
{ {
@ -46,7 +43,7 @@ Routine{
~bases = Buffer(s); ~bases = Buffer(s);
~activations = Buffer(s); ~activations = Buffer(s);
~resynth = Buffer(s); ~resynth = Buffer(s);
FluidBufNMF.process(s,drums,bases:~bases,activations:~activations,resynth:~resynth,components:n_components).wait; FluidBufNMF.process(s,drums,bases:~bases,activations:~activations,resynth:~resynth,components:n_components,resynthMode: 1).wait;
{ {
~bases.plot("bases"); ~bases.plot("bases");
~activations.plot("activations"); ~activations.plot("activations");

@ -105,10 +105,10 @@ s.waitForBoot{
// wiggle as the neural network makes it's predictions! // wiggle as the neural network makes it's predictions!
SendReply.kr(trig,"/predictions",val); SendReply.kr(trig,"/predictions",val);
// the actual synthesis algorithm. made by PA Tremblay // the actual synthesis algorithm, made by P.A. Tremblay
#feed2,feed1 = LocalIn.ar(2); #feed2,feed1 = LocalIn.ar(2);
osc1 = MoogFF.ar(SinOsc.ar((((feed1 * val[0]) + val[1]) * base1).midicps,mul: (val[2] * 50).dbamp).atan,(base3 - (val[3] * (FluidLoudness.kr(feed2, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[4] * 3.5); osc1 = MoogFF.ar(SinOsc.ar((((feed1 * val[0]) + val[1]) * base1).midicps,mul: (val[2] * 50).dbamp).atan,(base3 - (val[3] * (FluidLoudness.kr(feed2,truePeak: 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[4] * 3.5);
osc2 = MoogFF.ar(SinOsc.ar((((feed2 * val[5]) + val[6]) * base2).midicps,mul: (val[7] * 50).dbamp).atan,(base3 - (val[8] * (FluidLoudness.kr(feed1, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[9] * 3.5); osc2 = MoogFF.ar(SinOsc.ar((((feed2 * val[5]) + val[6]) * base2).midicps,mul: (val[7] * 50).dbamp).atan,(base3 - (val[8] * (FluidLoudness.kr(feed1,truePeak: 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[9] * 3.5);
Out.ar(0,LeakDC.ar([osc1,osc2],mul: 0.1)); Out.ar(0,LeakDC.ar([osc1,osc2],mul: 0.1));
LocalOut.ar([osc1,osc2]); LocalOut.ar([osc1,osc2]);
}.play; }.play;

@ -1,9 +1,9 @@
// first run this code - it will sample 40 random points
// you can add more points if you want
// you must 'train' it
// then run the playing target at line 335 below and press 'predict'
( (
Window.closeAll; Window.closeAll;
s.options.inDevice_("MacBook Pro Microphone");
s.options.outDevice_("External Headphones");
// s.options.sampleRate_(48000);
s.options.sampleRate_(44100);
s.waitForBoot{ s.waitForBoot{
Task{ Task{
var win = Window(bounds:Rect(100,100,1000,800)); var win = Window(bounds:Rect(100,100,1000,800));
@ -134,7 +134,7 @@ s.waitForBoot{
StaticText(win,Rect(0,0,label_width,20)).string_("% MFCCs".format(nMFCCs)); StaticText(win,Rect(0,0,label_width,20)).string_("% MFCCs".format(nMFCCs));
win.view.decorator.nextLine; win.view.decorator.nextLine;
statsWinSl = EZSlider(win,Rect(0,0,item_width,20),"fmcc avg smooth",nil.asSpec,{arg sl; synth.set(\avg_win,sl.value)},0,true,label_width); statsWinSl = EZSlider(win,Rect(0,0,item_width,20),"mfcc avg smooth",nil.asSpec,{arg sl; synth.set(\avg_win,sl.value)},0,true,label_width);
win.view.decorator.nextLine; win.view.decorator.nextLine;
mfcc_multslider = MultiSliderView(win,Rect(0,0,item_width,200)) mfcc_multslider = MultiSliderView(win,Rect(0,0,item_width,200))
@ -334,7 +334,6 @@ s.waitForBoot{
( (
Routine{ Routine{
//~path = FluidFilesPath("Tremblay-AaS-VoiceQC-B2K.wav");
~path = FluidFilesPath("Tremblay-CEL-GlitchyMusicBoxMelo.wav"); ~path = FluidFilesPath("Tremblay-CEL-GlitchyMusicBoxMelo.wav");
~test_buf = Buffer.readChannel(s,~path,channels:[0]); ~test_buf = Buffer.readChannel(s,~path,channels:[0]);
s.sync; s.sync;

@ -151,3 +151,5 @@ FluidBufStats.process(s,~pitches,numChans:1,stats:~stats,weights:~thresh_buf,out
//compare with the source //compare with the source
~src.play; ~src.play;
) )
// further investigations are also in the Examples/dataset/1-learning examples/10b-weighted-pitch-comparison.scd

@ -25,7 +25,7 @@
Routine{ Routine{
for (1,x.size - 1, { for (1,x.size - 1, {
arg i; arg i;
FluidBufCompose.process(s,x[i],destination:x[0], destStartFrame:x[0].numFrames); FluidBufCompose.process(s,x[i],destination:x[0], destStartFrame:x[0].numFrames).wait;
}); });
"Done!".postln; "Done!".postln;
}.play; }.play;
@ -83,7 +83,7 @@
Routine{ Routine{
for (1,x.size - 1, { for (1,x.size - 1, {
arg i; arg i;
FluidBufCompose.process(s,x[i],destination:x[0], destStartChan:x[0].numChannels); FluidBufCompose.process(s,x[i],destination:x[0], destStartChan:x[0].numChannels).wait;
}); });
"Done!".postln; "Done!".postln;
}.play; }.play;

@ -30,7 +30,7 @@
~knnALLval.add((x["data"][i.asString])) ~knnALLval.add((x["data"][i.asString]))
}}; }};
) )
~knnALLval.flatten(1).plot(\source,discrete: true, minval:0, maxval: 1).plotMode=\bars; ~knnALLval.flatten(1).plot(\knn,discrete: true, minval:0, maxval: 1).plotMode=\bars;
//Regressing directly these value-pairs in knn we see a full set of values being predicted: we can see what looks like linear interpolation, but not outside the boundaries. This is because we make a weighted average of the nearest 2 neigbourgs, which are not necessarily around the predicted value, they might both be on the same side like 0 to 9 (10 and 20 are nearest) and 31 to 40 (20 and 30 are nearest). //Regressing directly these value-pairs in knn we see a full set of values being predicted: we can see what looks like linear interpolation, but not outside the boundaries. This is because we make a weighted average of the nearest 2 neigbourgs, which are not necessarily around the predicted value, they might both be on the same side like 0 to 9 (10 and 20 are nearest) and 31 to 40 (20 and 30 are nearest).
@ -46,7 +46,7 @@
~mlpALLval.add((x["data"][i.asString])) ~mlpALLval.add((x["data"][i.asString]))
}}; }};
) )
~mlpALLval.flatten(1).plot(\source,discrete: true, minval:0, maxval: 1).plotMode=\bars; ~mlpALLval.flatten(1).plot(\mlp_full_range,discrete: true, minval:0, maxval: 1).plotMode=\bars;
//We see that we have a large bump and nothing else. This is because our input are very large (10-30) and outside the optimal range of the activation function (0-1 for sigmoid) so our network saturates and cannot recover. If we normalise our inputs and we rerun the network we get a curve that fits the 3 values. You can fit more than once to get more iterations and lower the error. //We see that we have a large bump and nothing else. This is because our input are very large (10-30) and outside the optimal range of the activation function (0-1 for sigmoid) so our network saturates and cannot recover. If we normalise our inputs and we rerun the network we get a curve that fits the 3 values. You can fit more than once to get more iterations and lower the error.
@ -63,7 +63,7 @@
~mlpALLval.add((x["data"][i.asString])) ~mlpALLval.add((x["data"][i.asString]))
}}; }};
) )
~mlpALLval.flatten(1).plot(\source,discrete: true, minval:0, maxval: 1).plotMode=\bars; ~mlpALLval.flatten(1).plot(\mlp_normalized,discrete: true, minval:0, maxval: 1).plotMode=\bars;
//Now we can add one value to our sparse dataset. Note that we revert back to full range values here for the example //Now we can add one value to our sparse dataset. Note that we revert back to full range values here for the example
~dsIN.load(Dictionary.newFrom([\cols, 1, \data, Dictionary.newFrom([\point1, [10], \point2, [20], \point3, [30], \point4, [22]])])); ~dsIN.load(Dictionary.newFrom([\cols, 1, \data, Dictionary.newFrom([\point1, [10], \point2, [20], \point3, [30], \point4, [22]])]));

@ -47,9 +47,9 @@ y = Synth(\becauseIcan,[\bufnum, b.bufnum, \nmfa, c.bufnum, \nmfb, d.bufnum, \in
( (
w = OSCFunc({ arg msg; w = OSCFunc({ arg msg;
if(msg[3]== 1, { if(msg[3]== 1, {
FluidBufNMF.process(s, b, numFrames: 22500, resynth: c.bufnum, components: 3, fftSize: 1024, windowSize: 512, hopSize: 256); FluidBufNMF.process(s, b, numFrames: 22500, resynth: c.bufnum, resynthMode: 1, components: 3, fftSize: 1024, windowSize: 512, hopSize: 256);
}, { }, {
FluidBufNMF.process(s, b, 22050, 22500, resynth: d.bufnum, components: 3, fftSize: 1024, windowSize: 512, hopSize: 256); FluidBufNMF.process(s, b, 22050, 22500, resynth: d.bufnum, resynthMode: 1, components: 3, fftSize: 1024, windowSize: 512, hopSize: 256);
});}, '/processplease', s.addr); });}, '/processplease', s.addr);
) )

@ -11,7 +11,7 @@ e = Buffer.new(s);
// train where all objects are present // train where all objects are present
( (
Routine { Routine {
FluidBufNMF.process(s,b,130000,150000,0,1, c, x, components:10); FluidBufNMF.process(s, b, startFrame: 130000, numFrames: 150000, numChans: 1, resynth: c, resynthMode: 1, bases: x, components:10).wait;
c.query; c.query;
}.play; }.play;
) )
@ -19,12 +19,12 @@ Routine {
// wait for the query to print // wait for the query to print
// then find a component for each item you want to find. You could also sum them. Try to find a component with a good object-to-rest ratio // then find a component for each item you want to find. You could also sum them. Try to find a component with a good object-to-rest ratio
( (
~dog =1; ~dog =0;
{PlayBuf.ar(10,c)[~dog]}.play {PlayBuf.ar(10,c)[~dog]}.play
) )
( (
~bird = 3; ~bird = 1;
{PlayBuf.ar(10,c)[~bird]}.play {PlayBuf.ar(10,c)[~bird]}.play
) )
@ -32,8 +32,8 @@ Routine {
// copy at least one other component to a third filter, a sort of left-over channel // copy at least one other component to a third filter, a sort of left-over channel
( (
Routine{ Routine{
FluidBufCompose.process(s, x, startChan:~dog, numChans: 1, destination: e); FluidBufCompose.process(s, x, startChan:~dog, numChans: 1, destination: e).wait;
FluidBufCompose.process(s, x, startChan:~bird, numChans: 1, destStartChan: 1, destination: e, destGain:1); FluidBufCompose.process(s, x, startChan:~bird, numChans: 1, destStartChan: 1, destination: e, destGain:1).wait;
(0..9).removeAll([~dog,~bird]).do({|chan|FluidBufCompose.process(s,x, startChan:chan, numChans: 1, destStartChan: 2, destination: e, destGain:1)}); (0..9).removeAll([~dog,~bird]).do({|chan|FluidBufCompose.process(s,x, startChan:chan, numChans: 1, destStartChan: 2, destination: e, destGain:1)});
e.query; e.query;
}.play; }.play;

Loading…
Cancel
Save