diff --git a/release-packaging/Classes/FluidMLP.sc b/release-packaging/Classes/FluidMLP.sc index b7fef0e..d459d1f 100644 --- a/release-packaging/Classes/FluidMLP.sc +++ b/release-packaging/Classes/FluidMLP.sc @@ -61,7 +61,7 @@ FluidMLPRegressor : FluidRealTimeModel { predictPoint { |sourceBuffer, targetBuffer, action| actions[\predictPoint] = [nil,{action.value(targetBuffer)}]; - this.predictPointMsg(sourceBuffer, targetBuffer).postln; + this.predictPointMsg(sourceBuffer, targetBuffer); this.prSendMsg(this.predictPointMsg(sourceBuffer, targetBuffer)); } diff --git a/release-packaging/Examples/dataset/1-learning examples/10b-weighted-pitch-comparison.scd b/release-packaging/Examples/dataset/1-learning examples/10b-weighted-pitch-comparison.scd index 72b8e0c..a063065 100644 --- a/release-packaging/Examples/dataset/1-learning examples/10b-weighted-pitch-comparison.scd +++ b/release-packaging/Examples/dataset/1-learning examples/10b-weighted-pitch-comparison.scd @@ -68,7 +68,6 @@ c.free //for completion, here is just with rejection of outliers - not as good, but a decent second best! FluidBufStats.process(s,~pitches, stats:~stats,outliersCutoff: 1.5) ~stats.getn(0,14,{|x|~pitchIQRStats = x;x.reshape(7,2).do{|y| "%\t\t\t%\n".postf(y[0].round(0.1),y[1].round(0.01))}}) -//now that is impressive! c = {SinOsc.ar(~pitchIQRStats[0],mul: 0.05)}.play b.play c.free diff --git a/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd b/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd index eb7104e..5167a5d 100644 --- a/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd +++ b/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd @@ -4,7 +4,7 @@ //slightly oversegment with novelty //segments should still make sense but might cut a few elements in 2 or 3 -~slicer = FluidSliceCorpus({ |src,start,num,dest| FluidBufNoveltySlice.kr(src,start,num,indices:dest, feature: 1, kernelSize: 29, threshold: 0.1, filterSize: 5, hopSize: 128)}); +~slicer = FluidSliceCorpus({ |src,start,num,dest| FluidBufNoveltySlice.kr(src,start,num,indices:dest, feature: 1, kernelSize: 29, threshold: 0.1, filterSize: 5, hopSize: 128, blocking: 1)}); ~slicer.play(s, ~loader.buffer,~loader.index); //test the segmentation by looping them @@ -45,7 +45,11 @@ a = Slider(w, Rect(10, 20, 330, 20)) writer = FluidDataSetWr.kr(~slicesShapes,label, -1, ~flatbuf[voice], Done.kr(flatten),blocking: 1); }); ) -~extractor.play(s,~loader.buffer, ~slicer.index); + +( +t = Main.elapsedTime; +~extractor.play(s,~loader.buffer, ~slicer.index, action:{(Main.elapsedTime - t).postln;"Analysis done".postln}); +) ~slicesMFCC.print ~slicesShapes.print @@ -102,22 +106,24 @@ a = Slider(w, Rect(10, 20, 330, 20)) //copy the items to a subdataset from hear winSize.do{|i| tempDict.put((i.asString), ~sliceDict["data"][(~orginalkeys[(i+head)]).asString]);//here one could curate which stats to take - "whichslices:%\n".postf(i+head); + // "whichslices:%\n".postf(i+head); }; ~windowDS.load(Dictionary.newFrom([\cols, ~sliceDict["cols"].asInteger, \data, tempDict]), action: { - "% - loaded\n".postf(head); + // "% - loaded\n".postf(head); //kmeans 2 and retrieve ordered array of class assignations ~kmeans.fitPredict(~windowDS, ~windowLS, action: {|x| nbass = x; - "% - fitted1: ".postf(head); nbass.postln; + // "% - fitted1: ".postf(head); nbass.postln; if (nbass.includes(winSize.asFloat), { ~kmeans.fitPredict(~windowDS, ~windowLS, {|x| - nbass = x; "% - fitted2: ".postf(head); nbass.postln; + nbass = x; + // "% - fitted2: ".postf(head); nbass.postln; if (nbass.includes(winSize.asFloat), { ~kmeans.fitPredict(~windowDS, ~windowLS, {|x| - nbass = x; "% - fitted3: ".postf(head); nbass.postln; + nbass = x; + // "% - fitted3: ".postf(head); nbass.postln; }); }); }); diff --git a/release-packaging/Examples/dataset/1-learning examples/13-massive-parallelisation-example.scd b/release-packaging/Examples/dataset/1-learning examples/13-massive-parallelisation-example.scd index acf0323..4db597a 100644 --- a/release-packaging/Examples/dataset/1-learning examples/13-massive-parallelisation-example.scd +++ b/release-packaging/Examples/dataset/1-learning examples/13-massive-parallelisation-example.scd @@ -211,9 +211,7 @@ fork{ ) //Above we sneakily made a dictionary of slice data for playback (bufnum,start,end). Let's throw it in a dataset -( ~slicedata = FluidDataSet(s); // will hold slice data (bufnum,start,end) for playback -) //dict -> dataset ( diff --git a/release-packaging/Examples/dataset/1-learning examples/3a-classifier-example.scd b/release-packaging/Examples/dataset/1-learning examples/3a-classifier-example.scd index f9af7c8..5f80842 100644 --- a/release-packaging/Examples/dataset/1-learning examples/3a-classifier-example.scd +++ b/release-packaging/Examples/dataset/1-learning examples/3a-classifier-example.scd @@ -53,7 +53,7 @@ Routine{ ~simpleInput.addPoint((i+50).asString,b,{("Added Input" + (i+50)).postln}); ~simpleOutput.addLabel((i+50).asString,"Red",{("Added Output" + (i+50)).postln}); s.sync; - } + }; \done.postln; }.play; ) diff --git a/release-packaging/Examples/dataset/1-learning examples/4-regressor-example.scd b/release-packaging/Examples/dataset/1-learning examples/4-regressor-example.scd index 9afbbb1..2d6b92d 100644 --- a/release-packaging/Examples/dataset/1-learning examples/4-regressor-example.scd +++ b/release-packaging/Examples/dataset/1-learning examples/4-regressor-example.scd @@ -19,8 +19,8 @@ c = Buffer.alloc(s,1); ( Routine{ n.do{|i| - b.set(0,~idx[i].postln); - c.set(0,~data[i].postln); + b.set(0,~idx[i]); + c.set(0,~data[i]); ~simpleInput.addPoint(i.asString,b,{("Added Input" + i).postln}); ~simpleOutput.addPoint(i.asString,c,{("Added Output" + i).postln}); ~mappingviz.set((~idx[i]/61.4).asInteger,~data[i]); @@ -57,5 +57,3 @@ Routine{ // look at the interpolated values ~mappingresult.plot - -(31416/61.4).asInteger diff --git a/release-packaging/Examples/dataset/1-learning examples/5-normalization-and-standardization-example.scd b/release-packaging/Examples/dataset/1-learning examples/5-normalization-and-standardization-example.scd index 6ec3983..1cf3de6 100644 --- a/release-packaging/Examples/dataset/1-learning examples/5-normalization-and-standardization-example.scd +++ b/release-packaging/Examples/dataset/1-learning examples/5-normalization-and-standardization-example.scd @@ -113,7 +113,7 @@ Routine{ ~normalize.transformPoint(~query_buf,~normbuf); ~standardize.transformPoint(~query_buf,~stdbuf); -//query the single nearest neighbourg via 3 different data scaling. Depending on the random source at the begining, you will get small to large differences between the 3 answers! +//query the single nearest neighbourg via 3 different data scaling. Depending on the random source at the begining, you should get (small or large) differences between the 3 answers! [~tree,~normtree,~stdtree].do{|t| t.numNeighbours =1 }; ~tree.kNearest(~query_buf, {|x| ("Original:" + x).post;~tree.kNearestDist(~query_buf, {|x| (" with a distance of " + x).postln});}); ~normtree.kNearest(~normbuf, {|x| ("Normalized:" + x).post;~normtree.kNearestDist(~normbuf, {|x| (" with a distance of " + x).postln});});