Merge branch 'clients/inter_client_comms' of https://bitbucket.org/flucoma/flucoma-supercollider into clients/inter_client_comms

nix
Gerard 6 years ago
commit 7c83690d35

@ -14,7 +14,8 @@ FluidKMeans : FluidManipulationClient {
fit{|dataset,k, maxIter = 100, buffer, action| fit{|dataset,k, maxIter = 100, buffer, action|
buffer = buffer ? -1; buffer = buffer ? -1;
this.k = k; this.k = k;
this.prSendMsg(\fit,[dataset.asSymbol, k,maxIter, buffer.asUGenInput],action,[numbers(FluidMessageResponse,_,k,_)]); // this.prSendMsg(\fit,[dataset.asSymbol, k,maxIter, buffer.asUGenInput],action,[numbers(FluidMessageResponse,_,k,_)]);
this.prSendMsg(\fit,[dataset.asSymbol, k,maxIter],action,[numbers(FluidMessageResponse,_,k,_)]);
} }
fitPredict{|dataset,labelset, k, maxIter = 100, action| fitPredict{|dataset,labelset, k, maxIter = 100, action|

@ -22,14 +22,14 @@ FileDialog.new(fileMode:2,okFunc:{|x| ~path = x[0];
) )
//STEP 2: Make a FluidDataSet //STEP 2: Make a FluidDataSet
~dataset = FluidDataSet.new(s,"mfccs", 96) //12 dims * 4 stats * 2 derivatives ~dataset = FluidDataSet.new(s,"mfccs");
//STEP 3A: EITHER populate the dataset like so (and cry about how long the data point assembly takes) //STEP 3A: EITHER populate the dataset like so (and cry about how long the data point assembly takes)
( (
Routine{ Routine{
var tmpMFCCs = Buffer.new(s); var tmpMFCCs = Buffer.new(s);
var tmpStats = Buffer.new(s); var tmpStats = Buffer.new(s);
var tmpFlat = Buffer.alloc(s,12 * 4 * 2, 1); var tmpFlat = Buffer.alloc(s,12 * 4 * 2, 1);//12 dims * 4 stats * 2 derivatives
s.sync; s.sync;
~audioBuffers.do{|b| ~audioBuffers.do{|b|
("Analyzing" + b.path).postln; ("Analyzing" + b.path).postln;
@ -61,7 +61,7 @@ Routine{
var tmpStats = Buffer.new(s); var tmpStats = Buffer.new(s);
var langStats; var langStats;
var langFlat; var langFlat;
var tmpFlat = Buffer.alloc(s,12 * 4 * 2, 1); var tmpFlat = Buffer.alloc(s,12 * 4 * 2, 1); //12 dims * 4 stats * 2 derivatives
s.sync; s.sync;
~audioBuffers.do{|b| ~audioBuffers.do{|b|
("Analyzing" + b.path).postln; ("Analyzing" + b.path).postln;

@ -1,5 +1,5 @@
s.reboot s.reboot
~ds = FluidDataSet.new(s,\simple1data,1) ~ds = FluidDataSet.new(s,\simple1data)
~point = Buffer.alloc(s,1,1) ~point = Buffer.alloc(s,1,1)
( (
Routine{ Routine{
@ -30,8 +30,8 @@ Routine{
/*** KMEANS ***/ /*** KMEANS ***/
~kmeans = FluidKMeans.new(s) ~kmeans = FluidKMeans.new(s)
~nClusters = 4; //play with this ~nClusters = 2; //play with this
~kmeans.fit(~ds,~nClusters,100,action:{"Done fitting".postln}) ~kmeans.fit(~ds,~nClusters,100,action:{|x| "Done fitting with these number of items per cluster ".post;x.postln;})
( (
Routine{ Routine{
@ -50,11 +50,9 @@ Routine{
( (
Routine{ Routine{
var n; var n;
~labels.size({|x| n = x.asInt}); ~labels.size({|x|
n.asInt.do{|i| x.asInteger.do{|i|
~labels.getLabel(i.asString,action: {|l|("Label for" + i ++ ":" + l).postln}); ~labels.getLabel(i.asString,action: {|l|("Label for" + i ++ ":" + l).postln});
} };});
}.play }.play
) )

@ -1,5 +1,5 @@
s.reboot s.reboot
~ds = FluidDataSet.new(s,\simple1data,1) ~ds = FluidDataSet.new(s,\simple1data)
~point = Buffer.alloc(s,1,1) ~point = Buffer.alloc(s,1,1)
( (
Routine{ Routine{
@ -34,7 +34,8 @@ Routine{
~kmeans = FluidKMeans.new(s) ~kmeans = FluidKMeans.new(s)
~nClusters = 2; //play with this ~nClusters = 2; //play with this
~kmeans.fit(~ds,~nClusters,100,action:{"Done fitting".postln}) ~kmeans.fit(~ds,~nClusters,100,action:{|x| "Done fitting with these number of items per cluster ".post;x.postln;})
( (
Routine{ Routine{
10.do{|i| 10.do{|i|
@ -48,14 +49,13 @@ Routine{
~labels = FluidLabelSet(s,\simple1label); ~labels = FluidLabelSet(s,\simple1label);
~kmeans.predict(~ds,~labels, {|x| ("Size of each cluster" + x).postln}) ~kmeans.predict(~ds,~labels, {|x| ("Size of each cluster" + x).postln})
(
s(
Routine{ Routine{
var n; var n;
~labels.size({|x| n = x.asInt}); ~labels.size({|x|
n.asInt.do{|i| x.asInteger.do{|i|
~labels.getLabel(i.asString,action: {|l|("Label for" + i ++ ":" + l).postln}); ~labels.getLabel(i.asString,action: {|l|("Label for" + i ++ ":" + l).postln});
} };});
}.play }.play
) )

@ -1,6 +1,6 @@
( (
~simpleInput = FluidDataSet(s,\simpleInput,2); ~simpleInput = FluidDataSet(s,\simpleInput);
~simpleOutput = FluidLabelSet(s,\simpleOutput,2); ~simpleOutput = FluidLabelSet(s,\simpleOutput);
b = Buffer.alloc(s,2); b = Buffer.alloc(s,2);
~knn = FluidKNNClassifier(s); ~knn = FluidKNNClassifier(s);
k = 3 k = 3
@ -23,7 +23,7 @@ v.mouseDownAction = {|view, x, y|myx=x;myy=y;w.refresh;
Routine{ Routine{
b.setn(0,[myx,myy]); b.setn(0,[myx,myy]);
s.sync; s.sync;
~knn.predictPoint(b, k, {|x|x.postln;}); ~knn.predictPoint(b, k, action: {|x|x.postln;});
}.play;}; }.play;};
//custom redraw function //custom redraw function

@ -39,7 +39,7 @@
~normed_dataset = FluidDataSet(s,\normed,~nb_of_dim); ~normed_dataset = FluidDataSet(s,\normed,~nb_of_dim);
// normalize the full dataset // normalize the full dataset
~normalize.normalize(~dataset,~normed_dataset,{"done".postln;}); ~normalize.transform(~dataset,~normed_dataset,{"done".postln;});
// look at a point to see that it has points in it // look at a point to see that it has points in it
~normed_dataset.getPoint("point-0",~query_buf,{~query_buf.getn(0,~nb_of_dim,{|x|x.postln;});}); ~normed_dataset.getPoint("point-0",~query_buf,{~query_buf.getn(0,~nb_of_dim,{|x|x.postln;});});
@ -54,7 +54,7 @@
// standardize the full dataset // standardize the full dataset
~standardized_dataset = FluidDataSet(s,\standardized,~nb_of_dim); ~standardized_dataset = FluidDataSet(s,\standardized,~nb_of_dim);
~standardize.standardize(~dataset,~standardized_dataset,{"done".postln;}); ~standardize.transform(~dataset,~standardized_dataset,{"done".postln;});
// look at a point to see that it has points in it // look at a point to see that it has points in it
~standardized_dataset.getPoint("point-0",~query_buf,{~query_buf.getn(0,~nb_of_dim,{|x|x.postln;});}); ~standardized_dataset.getPoint("point-0",~query_buf,{~query_buf.getn(0,~nb_of_dim,{|x|x.postln;});});
@ -79,7 +79,7 @@
// normalise that point (~query_buf) to be at the right scale // normalise that point (~query_buf) to be at the right scale
~normbuf = Buffer.alloc(s,~nb_of_dim); ~normbuf = Buffer.alloc(s,~nb_of_dim);
~normalize.normalizePoint(~query_buf,~normbuf); ~normalize.transformPoint(~query_buf,~normbuf);
~normbuf.getn(0,~nb_of_dim,{arg vec;vec.postln;}); ~normbuf.getn(0,~nb_of_dim,{arg vec;vec.postln;});
// make a tree of the normalized database and query with the normalize buffer // make a tree of the normalized database and query with the normalize buffer
@ -91,7 +91,7 @@
// standardize that same point (~query_buf) to be at the right scale // standardize that same point (~query_buf) to be at the right scale
~stdbuf = Buffer.alloc(s,~nb_of_dim); ~stdbuf = Buffer.alloc(s,~nb_of_dim);
~standardize.standardizePoint(~query_buf,~stdbuf); ~standardize.transformPoint(~query_buf,~stdbuf);
~stdbuf.getn(0,~nb_of_dim,{arg vec;vec.postln;}); ~stdbuf.getn(0,~nb_of_dim,{arg vec;vec.postln;});
// make a tree of the standardized database and query with the normalize buffer // make a tree of the standardized database and query with the normalize buffer
@ -107,8 +107,8 @@
~query_buf.fill(0,~nb_of_dim,50); ~query_buf.fill(0,~nb_of_dim,50);
// normalize and standardize the query buffer. Note that we do not need to fit since we have not added a point to our reference dataset // normalize and standardize the query buffer. Note that we do not need to fit since we have not added a point to our reference dataset
~normalize.normalizePoint(~query_buf,~normbuf); ~normalize.transformPoint(~query_buf,~normbuf);
~standardize.standardizePoint(~query_buf,~stdbuf); ~standardize.transformPoint(~query_buf,~stdbuf);
//query the single nearest neighbourg via 3 different data scaling. Depending on the random source at the begining, you will get small to large differences between the 3 answers! //query the single nearest neighbourg via 3 different data scaling. Depending on the random source at the begining, you will get small to large differences between the 3 answers!
~tree.kNearest(~query_buf,1, {|x| ("Original:" + x).post;~tree.kNearestDist(~query_buf,1, {|x| (" with a distance of " + x).postln});}); ~tree.kNearest(~query_buf,1, {|x| ("Original:" + x).post;~tree.kNearestDist(~query_buf,1, {|x| (" with a distance of " + x).postln});});

@ -144,4 +144,16 @@ Routine{
} }
}.play }.play
) )
//Inspect the dataset using print (abbreviated output) or dump (JSON output)
~ds.print //to post window by default, but you can supply a custom action instead
~ds.dump //likewise
//for example
~ds.dump{|j|
~dict = j.parseJSON
}
//Now we have a Dictionary of our data and IDs
~dict.postcs
:: ::

@ -105,9 +105,6 @@ fork{
} }
) )
//Dims of kmeans should match dataset
~kmeans.cols
//Return labels of clustered points //Return labels of clustered points
( (
~assignments = Array.new(~testpoints.size); ~assignments = Array.new(~testpoints.size);

@ -15,7 +15,7 @@ The server to run this model on.
INSTANCEMETHODS:: INSTANCEMETHODS::
METHOD:: fit METHOD:: fit
Map a source link::Classes/FluidDataSet:: to a target; they must be the same size, but can have different dimesionality Map a source link::Classes/FluidDataSet:: to a target; they must be the same size, but can have different dimensionality
ARGUMENT:: sourceDataset ARGUMENT:: sourceDataset
Source data Source data
ARGUMENT:: targetDataset ARGUMENT:: targetDataset

Loading…
Cancel
Save