diff --git a/release-packaging/Classes/FluidCorpusBuilders.sc b/release-packaging/Classes/FluidCorpusBuilders.sc index b8cea79..efb81de 100644 --- a/release-packaging/Classes/FluidCorpusBuilders.sc +++ b/release-packaging/Classes/FluidCorpusBuilders.sc @@ -34,6 +34,7 @@ FluidLoadFolder { entry.add(\bounds->startEnd[i]); entry.add(\numchans->f.numChannels); entry.add(\sr->f.sampleRate); + entry.add(\path->f.path); index.add(label->entry); counter = counter + 1; if(counter == (files.size)) {action !? action.value(index)}; diff --git a/release-packaging/Examples/dataset/0-demo-dataset-maker-utilities.scd b/release-packaging/Examples/dataset/0-demo-dataset-maker-utilities.scd index 90fc66c..9ceb017 100644 --- a/release-packaging/Examples/dataset/0-demo-dataset-maker-utilities.scd +++ b/release-packaging/Examples/dataset/0-demo-dataset-maker-utilities.scd @@ -1,6 +1,6 @@ // define a few processes ( -~ds = FluidDataSet(s,\test); // still need a name on the server to make sure we do not forget it exists. it is now permanent aka will resist cmd+. +~ds = FluidDataSet(s);//no name needs to be provided //define as many buffers as we have parallel voices/threads in the extractor processing (default is 4) ~mfccbuf = 4.collect{Buffer.new}; ~statsbuf = 4.collect{Buffer.new}; @@ -8,11 +8,11 @@ // here we instantiate a loader which creates a single large buffer with a dictionary of what was included in it // ~loader = FluidLoadFolder("/Volumes/machins/projets/newsfeed/sons/smallnum/"); -~loader = FluidLoadFolder("/Volumes/machins/projets/newsfeed/sons/segments/"); +~loader = FluidLoadFolder(File.realpath(FluidLoadFolder.class.filenameSymbol).dirname +/+ "../AudioFiles"); -// here we instantiate a further slicing step if needs be, which iterate through all the items of the FluidLoadFolder and slice the slices with the declared function. Here it is a very picky onset slicer +// here we instantiate a further slicing step if needs be, which iterate through all the items of the FluidLoadFolder and slice the slices with the declared function. ~slicer = FluidSliceCorpus({ |src,start,num,dest| - FluidBufOnsetSlice.kr(src,start,num,metric: 9, minSliceLength: 17, indices:dest, threshold:2,blocking: 1) + FluidBufOnsetSlice.kr(src, start, num, metric: 9, minSliceLength: 17, indices:dest, threshold:0.7, blocking: 1) }); // here we instantiate a process of description and dataset writing, which will run each slice of the previous slice and write the entry. Note the chain of Done.kr triggers. @@ -20,10 +20,10 @@ var mfcc, stats, writer, flatten,mfccBuf, statsBuf, flatBuf, label, voice; label = data.key; voice = data.value[\voice]; - mfcc = FluidBufMFCC.kr(src,startFrame:start,numFrames:num,numChans:1,features:~mfccbuf[voice],trig:1,blocking: 1); - stats = FluidBufStats.kr(~mfccbuf[voice],stats:~statsbuf[voice],trig:Done.kr(mfcc),blocking: 1); - flatten = FluidBufFlatten.kr(~statsbuf[voice],~flatbuf[voice],trig:Done.kr(stats),blocking: 1); - writer = FluidDataSetWr.kr(~ds,label, -1, ~flatbuf[voice], Done.kr(flatten),blocking: 1) + mfcc = FluidBufMFCC.kr(src, startFrame:start, numFrames:num, numChans:1, features:~mfccbuf[voice], trig:1, blocking: 1); + stats = FluidBufStats.kr(~mfccbuf[voice], stats:~statsbuf[voice], trig:Done.kr(mfcc), blocking: 1); + flatten = FluidBufFlatten.kr(~statsbuf[voice], ~flatbuf[voice], trig:Done.kr(stats), blocking: 1); + writer = FluidDataSetWr.kr(~ds, label, -1, ~flatbuf[voice], trig: Done.kr(flatten), blocking: 1) }); ) @@ -134,7 +134,7 @@ t = Main.elapsedTime; //retrieve a sound to match ~targetsound = Buffer(s); -~targetname = ~slicer.index.keys.asArray.last.asSymbol; +~targetname = ~slicer.index.keys.asArray.scramble[0].asSymbol; #a,b = ~slicer.index[~targetname][\bounds]; FluidBufCompose.process(s,~loader.buffer,a,(b-a),numChans: 1, destination: ~targetsound,action: {~targetsound.play;}) @@ -145,6 +145,7 @@ FluidBufCompose.process(s,~loader.buffer,a,(b-a),numChans: 1, destination: ~targ mfcc = FluidBufMFCC.kr(~targetsound,features:~mfccbuf[0],trig:1); stats = FluidBufStats.kr(~mfccbuf[0],stats:~statsbuf[0],trig:Done.kr(mfcc)); flatten = FluidBufFlatten.kr(~statsbuf[0],~flatbuf[0],trig:Done.kr(stats)); + FreeSelfWhenDone.kr(flatten); }.play; ) diff --git a/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd b/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd index 4b88813..49205ab 100644 --- a/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd +++ b/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd @@ -1,122 +1,223 @@ -// load a source -b = Buffer.read(s,"/Volumes/machins/projets/newsfeed/sons/textes/Audio/synth/fromtexttospeech-AmE-George.wav") -b.play +// load a source folder +~loader = FluidLoadFolder("/Volumes/machins/projets/newsfeed/sons/textes/Audio/synth"); +~loader.play; //slightly oversegment with novelty //segments should still make sense but might cut a few elements in 2 or 3 -~originalslices = Buffer(s); -FluidBufNoveltySlice.process(s, b, indices: ~originalslices, feature: 1, kernelSize: 29, threshold: 0.05, filterSize: 5, hopSize: 128, action: {~originalslices.numFrames.postln;}) +~slicer = FluidSliceCorpus({ |src,start,num,dest| FluidBufNoveltySlice.kr(src,start,num,indices:dest, feature: 1, kernelSize: 29, threshold: 0.05, filterSize: 5, hopSize: 128)}); +~slicer.play(s, ~loader.buffer,~loader.index); //test the segmentation by looping them ( -{ - BufRd.ar(1, b, - Phasor.ar(0,1, - BufRd.kr(1, ~originalslices, - MouseX.kr(0, BufFrames.kr(~originalslices) - 1), 0, 1), - BufRd.kr(1, ~originalslices, - MouseX.kr(1, BufFrames.kr(~originalslices)), 0, 1), - BufRd.kr(1,~originalslices, - MouseX.kr(0, BufFrames.kr(~originalslices) - 1), 0, 1)), 0, 1); +~originalindices = Array.newFrom(~slicer.index.keys).sort{|a,b| ~slicer.index[a][\bounds][0]< ~slicer.index[b][\bounds][0]}.collect{|x|~slicer.index[x][\bounds]}; +d = {arg start=0, end = 44100; + BufRd.ar(1, ~loader.buffer, Phasor.ar(0,1,start,end,start),0,1); }.play; -) -//analyse each segment with MFCCs in a dataset -~originalslices.getn(0,~originalslices.numFrames, {|x|~originalslicesarray = x; if ((x.last != b.numFrames), {~originalslicesarray = ~originalslicesarray ++ (b.numFrames)}); });//retrieve the indices and add the file boundary at the end if not there already +w = Window.new.front; +b = ControlSpec(0, ~originalindices.size - 1, \linear, 1); // min, max, mapping, step +c = StaticText(w, Rect(340, 20, 50, 20)).align_(\center); +a = Slider(w, Rect(10, 20, 330, 20)) +.action_({var val = b.map(a.value).asInteger; + c.string_(val.asString); + d.set(\start,~originalindices[val][0], \end, ~originalindices[val][1]); +}); +) -//iterates through the -//a few buffers and our dataset - with back and forth from the language +//analyse each segment with 20 MFCCs in a dataset and spectralshapes in another one ( -~mfccs = Buffer(s); -~stats = Buffer(s); -~flat = Buffer(s); -~slices = FluidDataSet(s,\slices); - -Routine{ - s.sync; - (~originalslicesarray.size - 1).do{|i| - FluidBufMFCC.process(s, b, startFrame: ~originalslicesarray[i], numFrames: (~originalslicesarray[i+1] - ~originalslicesarray[i]), numChans: 1,features: ~mfccs, numCoeffs: 20, action: { - FluidBufStats.process(s, ~mfccs, startChan: 1, stats: ~stats, action: { - FluidBufFlatten.process(s, ~stats, ~flat, action: { - ~slices.addPoint(i.asSymbol, ~flat); - }); - }); - }); - }; -}.play; +~featuresbuf = 4.collect{Buffer.new}; +~statsbuf = 4.collect{Buffer.new}; +~flatbuf = 4.collect{Buffer.new}; +~slicesMFCC = FluidDataSet(s,\slicesM); +~slicesShapes = FluidDataSet(s,\slicesS); +~extractor = FluidProcessSlices({|src,start,num,data| + var features, stats, writer, flatten,mfccBuf, statsBuf, flatBuf, label, voice; + label = data.key; + voice = data.value[\voice]; + features = FluidBufMFCC.kr(src,startFrame:start,numFrames:num,numChans:1, numCoeffs: 20, features:~featuresbuf[voice],trig:1,blocking: 1); + stats = FluidBufStats.kr(~featuresbuf[voice],stats:~statsbuf[voice],trig:Done.kr(features),blocking: 1); + flatten = FluidBufFlatten.kr(~statsbuf[voice],~flatbuf[voice],trig:Done.kr(stats),blocking: 1); + writer = FluidDataSetWr.kr(~slicesMFCC,label, -1, ~flatbuf[voice], Done.kr(flatten),blocking: 1); + features = FluidBufSpectralShape.kr(src,startFrame:start,numFrames:num,numChans:1, features:~featuresbuf[voice],trig:Done.kr(writer),blocking: 1); + stats = FluidBufStats.kr(~featuresbuf[voice],stats:~statsbuf[voice],trig:Done.kr(features),blocking: 1); + flatten = FluidBufFlatten.kr(~statsbuf[voice],~flatbuf[voice],trig:Done.kr(stats),blocking: 1); + writer = FluidDataSetWr.kr(~slicesShapes,label, -1, ~flatbuf[voice], Done.kr(flatten),blocking: 1); +}); ) +~extractor.play(s,~loader.buffer, ~slicer.index); -~slices.print +~slicesMFCC.print +~slicesShapes.print //run a window over consecutive segments, forcing them in 2 classes, and merging the consecutive segments of similar class //we overlap the analysis with the last (original) slice to check for continuity ( ~winSize = 4;//the number of consecutive items to split in 2 classes; +~curated = FluidDataSet(s,\curatedDS); ~query = FluidDataSetQuery(s); -~kmeans = FluidKMeans(s,2,100); +~stan = FluidStandardize(s); +~kmeans = FluidKMeans(s,2,1000); ~windowDS = FluidDataSet(s,\windowDS); ~windowLS = FluidLabelSet(s,\windowLS); ) -( -Routine{ - ~indices = [0]; - ~head = 0; - - ~sliceDict = Dictionary.new(4); - ~tempDict = Dictionary.new(4); - - ~slices.dump{|x|~sliceDict = x;}; - s.sync; - - while ( {~head <= (~originalslicesarray.size - ~winSize)}, - { - var step = ~winSize - 1; - var nbass = []; - //run a process on ~winSize items from ~head (with an overlap of 1) - //copy the items to a subdataset - ~winSize.do{|i| - ~tempDict.put((i.asString), ~sliceDict["data"][(i+~head).asString]);//here one could curate which stats to take - "whichslices:%\n".postf(i+~head); - }; - ~windowDS.load(Dictionary.newFrom([\cols, 133, \data, ~tempDict])); - s.sync; - "% - loaded\n".postf(~head); +//curate stats (MFCCs) +~query.clear +~query.addRange((0*20)+1,10); +~query.transform(~slicesMFCC,~curated); - //kmeans 2 and retrieve ordered array of class assignations - ~kmeans.fitPredict(~windowDS, ~windowLS, {|x| nbass = x;}); - s.sync; - "% - fitted1: ".postf(~head); nbass.postln; +//OR +//curate stats (moments) +~query.clear +~query.addRange(0,3); +~query.transform(~slicesShapes,~curated); - // if (nbass.includes(0.0), { Routine{~kmeans.fitPredict(~windowDS, ~windowLS, {|x| nbass = x; "% - fitted2: ".postf(~head); nbass.postln; s.sync;});}.play; }); +//OR +//curate both +~query.clear +~query.addColumn(0);//add col 0 (mean of mfcc0 as 'loudness') +~query.transform(~slicesMFCC,~curated);//mfcc0 as loudness +~query.clear; +~query.addRange(0,3);//add some spectral moments +~query.transformJoin(~slicesShapes, ~curated, ~curated);//join in centroids - ~windowLS.dump{|x|~assignments = x.at("data").asSortedArray.flop[1].flatten;}; - s.sync; - "% - assigned ".postf(~head); +~stan.fitTransform(~curated, ~curated); - ~assignments.postln; +~curated.print +~curated.dump{|x|~sliceDict = x;}; - step.do{|i| - if (~assignments[i+1] != ~assignments[i], {~indices= ~indices ++ (~originalslicesarray[~head+i+1])}); - }; +~originalslicesarray = (~originalindices.flatten ++ ~loader.buffer.numFrames).asSet.asArray.sort +~orginalkeys = Array.newFrom(~slicer.index.keys).sort{|a,b| ~slicer.index[a][\bounds][0]< ~slicer.index[b][\bounds][0]} - ~head = ~head + step; - "-----------------".postln; - }); +//the windowed function +( +~windowedFunct = {arg head, winSize, overlap; + var nbass = [], assignments = [], tempDict = (); + //check the size of everything to not overrun + winSize = (~originalslicesarray.size - head).min(winSize); + //copy the items to a subdataset from hear + winSize.do{|i| + tempDict.put((i.asString), ~sliceDict["data"][(~orginalkeys[(i+head)]).asString]);//here one could curate which stats to take + "whichslices:%\n".postf(i+head); + }; + ~windowDS.load(Dictionary.newFrom([\cols, ~sliceDict["cols"].asInteger, \data, tempDict]), action: { + "% - loaded\n".postf(head); + + //kmeans 2 and retrieve ordered array of class assignations + ~kmeans.fitPredict(~windowDS, ~windowLS, action: {|x| + nbass = x; + "% - fitted1: ".postf(head); nbass.postln; + + if (nbass.includes(winSize.asFloat), { + ~kmeans.fitPredict(~windowDS, ~windowLS, {|x| + nbass = x; "% - fitted2: ".postf(head); nbass.postln; + if (nbass.includes(winSize.asFloat), { + ~kmeans.fitPredict(~windowDS, ~windowLS, {|x| + nbass = x; "% - fitted3: ".postf(head); nbass.postln; + }); + }); + }); + }); + + ~windowLS.dump{|x| + var assignments = x.at("data").asSortedArray.flop[1].flatten; + "% - assigned ".postf(head); + + assignments.postln; - //leftovers - if ( (~originalslicesarray.size - ~head) > 1, { - //run a process on (a.size - ~head) items from ~head - (~originalslicesarray.size - ~head - 1).do{|i| - if (~assignments[i+1] != ~assignments[i], {~indices= ~indices ++ (~originalslicesarray[~head+i+1])}); - // (~head+i).postln; - }; + (winSize-1).do{|i| + if (assignments[i+1] != assignments[i], { + ~newindices= ~newindices ++ (~originalslicesarray[head+i+1]).asInteger; + ~newkeys = ~newkeys ++ (~orginalkeys[head+i+1]); + }); + + }; + //if we still have some frames to do, do them + if (((winSize + head) < ~originalslicesarray.size), { + "-----------------".postln; + ~windowedFunct.value(head + winSize - overlap, winSize, overlap); + }, {~newindices = (~newindices ++ ~loader.buffer.numFrames); "done".postln;});//if we're done close the books + }; + }); }); - ~indices.postln; -}.play; +} ) -{var i = 8;BufRd.ar(1,b,Line.ar(~originalslicesarray[i],~originalslicesarray[i+1],(~originalslicesarray[i+1] - ~originalslicesarray[i])/b.sampleRate, doneAction: 2))}.play; -{var i = 4;BufRd.ar(1,b,Line.ar(~indices[i],~indices[i+1],(~indices[i+1] - ~indices[i])/b.sampleRate, doneAction: 2))}.play; +//the job + +~newindices = [~originalslicesarray[0]]; ~newkeys = [~orginalkeys[0]]; +~windowedFunct.value(0, 4, 1); + +//try again with more clusters +~newindices = [~originalslicesarray[0]]; ~newkeys = [~orginalkeys[0]]; +~kmeans.numClusters = 3; +~windowedFunct.value(0,6,2); + +~newindices.postln; +~newkeys.postln; + +~newindices.size; +~newkeys.size; + +~newindices.last; +~newkeys.last; + +~slicer.index[~orginalkeys[0]] //export to reaper +( +//first create a new file that ends with rpp - it will overwrite if the file exists +f = File.new("/tmp/clusteredslices-" ++ Date.getDate.stamp ++".rpp","w+"); + +if (f.isOpen , { + var path, prevpath ="", sr, count, dur; + //write the header + f.write(" 0, { + f.write("\n>\n"); + }); + count = count + dur; + }; + //write the track footer + f.write(">\n"); + + // a second track with the new ~indices + prevpath = ""; + //write the track header + f.write(" 0, { + f.write("\n>\n"); + }); + count = count + dur; + }; + //write the track footer + f.write(">\n"); + + //write the footer + f.write(">\n"); + f.close; +}); +) \ No newline at end of file diff --git a/release-packaging/Examples/dataset/1-learning examples/1a-starting-1D-example.scd b/release-packaging/Examples/dataset/1-learning examples/1a-starting-1D-example.scd index d10bbfd..2d2e03c 100644 --- a/release-packaging/Examples/dataset/1-learning examples/1a-starting-1D-example.scd +++ b/release-packaging/Examples/dataset/1-learning examples/1a-starting-1D-example.scd @@ -1,5 +1,5 @@ s.reboot -~ds = FluidDataSet.new(s,\simple1data) +~ds = FluidDataSet.new(s) ~point = Buffer.alloc(s,1,1) ( Routine{ @@ -21,6 +21,7 @@ Routine{ Routine{ 10.do{|i| ~point.set(0,i); + s.sync; ~tree.kNearest(~point, {|x| "Neighbours for a value of % are ".postf(i); x.postln}); s.sync; } @@ -29,7 +30,7 @@ Routine{ /*** KMEANS ***/ -~kmeans = FluidKMeans.new(s,maxIter:100) +~kmeans = FluidKMeans.new(s,maxIter:100); ~kmeans.numClusters = 2; //play with this ~kmeans.fit(~ds,action:{|x| "Done fitting with these number of items per cluster ".post;x.postln;}) @@ -37,6 +38,7 @@ Routine{ Routine{ 10.do{|i| ~point.set(0,i); + s.sync; ~kmeans.predictPoint(~point,{|x| ("Predicted Cluster for a value of " + i ++ ":" + x).postln}); s.sync; } @@ -49,7 +51,7 @@ Routine{ ( ~labels.size{|x| - Routine{x.asInteger.do{|i| //size does not return a value, but we can retrieve it via a function + Routine{x.asInteger.do{|i| ~labels.getLabel(i,action: {|l| ("Label for entry " + i ++ ":" + l).postln; }); diff --git a/release-packaging/Examples/dataset/2-various other examples/scaling-dimension-as-weighting/2-3Dscaling.scd b/release-packaging/Examples/dataset/2-various other examples/scaling-dimension-as-weighting/2-3Dscaling.scd new file mode 100644 index 0000000..2d47818 --- /dev/null +++ b/release-packaging/Examples/dataset/2-various other examples/scaling-dimension-as-weighting/2-3Dscaling.scd @@ -0,0 +1,161 @@ +// Make: +// - A kmeans +// - a datasetquery +// - a normalizer +// - a standardizer +// - 3 DataSets of example points R-G-B descriptions +// - 3 DataSets for the scaled versions +// - 1 summative dataset and a LabelSet for predicted labels + +( +~classifier = FluidKMeans(s,5, 1000); +~query = FluidDataSetQuery(s); +~stan = FluidStandardize(s); +~norm = FluidNormalize(s); +~sourceR = FluidDataSet(s,\a3DsourceR); +~sourceG = FluidDataSet(s,\a3DsourceG); +~sourceB = FluidDataSet(s,\a3DsourceB); +~scaledR = FluidDataSet(s,\a3DscaledR); +~scaledG = FluidDataSet(s,\a3DscaledG); +~scaledB = FluidDataSet(s,\a3DscaledB); +~composited = FluidDataSet(s,\a3Dcomposited); +~labels = FluidLabelSet(s,\a3Dlabels); +) + +//Make some random, but clustered test points, each descriptor category in a separate dataset +( +~sourceR.load(Dictionary.newFrom([\cols, 1, \data, (Dictionary.newFrom(40.collect{|x| [x, 1.0.sum3rand]}.flatten))])); +~sourceG.load(Dictionary.newFrom([\cols, 1, \data, (Dictionary.newFrom(40.collect{|x| [x, 1.0.rand2]}.flatten))])); +~sourceB.load(Dictionary.newFrom([\cols, 1, \data, (Dictionary.newFrom(40.collect{|x| [x, (0.5.sum3rand).squared + [0.75,-0.1].choose]}.flatten))])); +) + +//here we manipulate + +//assemble the scaled dataset +( +~query.addColumn(0, { + ~query.transformJoin(~sourceB, ~sourceG, ~composited, { + ~query.transformJoin(~sourceR, ~composited, ~composited); + }); +}); +) + +~composited.print + +//Fit the classifier to the example DataSet and labels, and then run prediction on the test data into our mapping label set +~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict = x;};~composited.dump{|x|~compodict=x;};}); + +//Visualise: +( +w = Window("sourceClasses", Rect(128, 64, 820, 120)); +w.drawFunc = { + Pen.use{ + ~compodict["data"].keysValuesDo{|key, colour| + Pen.fillColor = Color.fromArray((colour * 0.5 + 0.5 ).clip(0,1) ++ 1); + Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict["data"].at(key).asInteger[0] * 20 + 10),15,15)); + }; + }; +}; +w.refresh; +w.front; +) + +// standardize our colours and rerun +( +~stan.fitTransform(~sourceR, ~scaledR, { + ~stan.fitTransform(~sourceG, ~scaledG, { + ~stan.fitTransform(~sourceB, ~scaledB, { + //assemble + ~query.addColumn(0, { + ~query.transformJoin(~scaledB, ~scaledG, ~composited, { + ~query.transformJoin(~scaledR, ~composited, ~composited, { + //fit + ~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict2 = x;};~composited.dump{|x|~compodict2=x;};}); + }); + }); + }); + }); + }); +}); +) + +//Visualise: +( +w = Window("stanClasses", Rect(128, 64, 820, 120)); +w.drawFunc = { + Pen.use{ + ~compodict2["data"].keysValuesDo{|key, colour| + Pen.fillColor = Color.fromArray((colour * 0.25 + 0.5 ).clip(0,1) ++ 1); + Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict2["data"].at(key).asInteger[0] * 20 + 10),15,15)); + }; + }; +}; +w.refresh; +w.front; +) + +//now let's normalise instead +( +~norm.fitTransform(~sourceR, ~scaledR, { + ~norm.fitTransform(~sourceG, ~scaledG, { + ~norm.fitTransform(~sourceB, ~scaledB, { + //assemble + ~query.addColumn(0, { + ~query.transformJoin(~scaledB, ~scaledG, ~composited, { + ~query.transformJoin(~scaledR, ~composited, ~composited, { + //fit + ~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict2 = x;};~composited.dump{|x|~compodict2=x;};}); + }); + }); + }); + }); + }); +}); +) + +//Visualise: +( +w = Window("normClasses", Rect(128, 64, 820, 120)); +w.drawFunc = { + Pen.use{ + ~compodict2["data"].keysValuesDo{|key, colour| + Pen.fillColor = Color.fromArray((colour * 0.25 + 0.5 ).clip(0,1) ++ 1); + Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict2["data"].at(key).asInteger[0] * 20 + 10),15,15)); + }; + }; +}; +w.refresh; +w.front; +) + +// let's mess up with the scaling of one dimension: let's multiply the range of Red by 10 +~norm.min = -10; +~norm.max = 10; +( +~norm.fitTransform(~sourceR, ~scaledR, { + //assemble + ~query.addColumn(0, { + ~query.transformJoin(~scaledB, ~scaledG, ~composited, { + ~query.transformJoin(~scaledR, ~composited, ~composited, { + //fit + ~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict2 = x;};~composited.dump{|x|~compodict2=x;};}); + }); + }); + }); +}); +) + +//Visualise: +( +w = Window("norm10rClasses", Rect(128, 64, 820, 120)); +w.drawFunc = { + Pen.use{ + ~compodict2["data"].keysValuesDo{|key, colour| + Pen.fillColor = Color.fromArray((colour * 0.25 + 0.5 ).clip(0,1) ++ 1); + Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict2["data"].at(key).asInteger[0] * 20 + 10),15,15)); + }; + }; +}; +w.refresh; +w.front; +)