relective merge branch 'nested-iterator-with-condition' into refactor/NRTSynths

nix
Pierre Alexandre Tremblay 5 years ago
commit 50b59a39cf

@ -34,6 +34,7 @@ FluidLoadFolder {
entry.add(\bounds->startEnd[i]); entry.add(\bounds->startEnd[i]);
entry.add(\numchans->f.numChannels); entry.add(\numchans->f.numChannels);
entry.add(\sr->f.sampleRate); entry.add(\sr->f.sampleRate);
entry.add(\path->f.path);
index.add(label->entry); index.add(label->entry);
counter = counter + 1; counter = counter + 1;
if(counter == (files.size)) {action !? action.value(index)}; if(counter == (files.size)) {action !? action.value(index)};

@ -1,6 +1,6 @@
// define a few processes // define a few processes
( (
~ds = FluidDataSet(s,\test); // still need a name on the server to make sure we do not forget it exists. it is now permanent aka will resist cmd+. ~ds = FluidDataSet(s);//no name needs to be provided
//define as many buffers as we have parallel voices/threads in the extractor processing (default is 4) //define as many buffers as we have parallel voices/threads in the extractor processing (default is 4)
~mfccbuf = 4.collect{Buffer.new}; ~mfccbuf = 4.collect{Buffer.new};
~statsbuf = 4.collect{Buffer.new}; ~statsbuf = 4.collect{Buffer.new};
@ -8,11 +8,11 @@
// here we instantiate a loader which creates a single large buffer with a dictionary of what was included in it // here we instantiate a loader which creates a single large buffer with a dictionary of what was included in it
// ~loader = FluidLoadFolder("/Volumes/machins/projets/newsfeed/sons/smallnum/"); // ~loader = FluidLoadFolder("/Volumes/machins/projets/newsfeed/sons/smallnum/");
~loader = FluidLoadFolder("/Volumes/machins/projets/newsfeed/sons/segments/"); ~loader = FluidLoadFolder(File.realpath(FluidLoadFolder.class.filenameSymbol).dirname +/+ "../AudioFiles");
// here we instantiate a further slicing step if needs be, which iterate through all the items of the FluidLoadFolder and slice the slices with the declared function. Here it is a very picky onset slicer // here we instantiate a further slicing step if needs be, which iterate through all the items of the FluidLoadFolder and slice the slices with the declared function.
~slicer = FluidSliceCorpus({ |src,start,num,dest| ~slicer = FluidSliceCorpus({ |src,start,num,dest|
FluidBufOnsetSlice.kr(src,start,num,metric: 9, minSliceLength: 17, indices:dest, threshold:2,blocking: 1) FluidBufOnsetSlice.kr(src, start, num, metric: 9, minSliceLength: 17, indices:dest, threshold:0.7, blocking: 1)
}); });
// here we instantiate a process of description and dataset writing, which will run each slice of the previous slice and write the entry. Note the chain of Done.kr triggers. // here we instantiate a process of description and dataset writing, which will run each slice of the previous slice and write the entry. Note the chain of Done.kr triggers.
@ -20,10 +20,10 @@
var mfcc, stats, writer, flatten,mfccBuf, statsBuf, flatBuf, label, voice; var mfcc, stats, writer, flatten,mfccBuf, statsBuf, flatBuf, label, voice;
label = data.key; label = data.key;
voice = data.value[\voice]; voice = data.value[\voice];
mfcc = FluidBufMFCC.kr(src,startFrame:start,numFrames:num,numChans:1,features:~mfccbuf[voice],trig:1,blocking: 1); mfcc = FluidBufMFCC.kr(src, startFrame:start, numFrames:num, numChans:1, features:~mfccbuf[voice], trig:1, blocking: 1);
stats = FluidBufStats.kr(~mfccbuf[voice],stats:~statsbuf[voice],trig:Done.kr(mfcc),blocking: 1); stats = FluidBufStats.kr(~mfccbuf[voice], stats:~statsbuf[voice], trig:Done.kr(mfcc), blocking: 1);
flatten = FluidBufFlatten.kr(~statsbuf[voice],~flatbuf[voice],trig:Done.kr(stats),blocking: 1); flatten = FluidBufFlatten.kr(~statsbuf[voice], ~flatbuf[voice], trig:Done.kr(stats), blocking: 1);
writer = FluidDataSetWr.kr(~ds,label, -1, ~flatbuf[voice], Done.kr(flatten),blocking: 1) writer = FluidDataSetWr.kr(~ds, label, -1, ~flatbuf[voice], trig: Done.kr(flatten), blocking: 1)
}); });
) )
@ -134,7 +134,7 @@ t = Main.elapsedTime;
//retrieve a sound to match //retrieve a sound to match
~targetsound = Buffer(s); ~targetsound = Buffer(s);
~targetname = ~slicer.index.keys.asArray.last.asSymbol; ~targetname = ~slicer.index.keys.asArray.scramble[0].asSymbol;
#a,b = ~slicer.index[~targetname][\bounds]; #a,b = ~slicer.index[~targetname][\bounds];
FluidBufCompose.process(s,~loader.buffer,a,(b-a),numChans: 1, destination: ~targetsound,action: {~targetsound.play;}) FluidBufCompose.process(s,~loader.buffer,a,(b-a),numChans: 1, destination: ~targetsound,action: {~targetsound.play;})
@ -145,6 +145,7 @@ FluidBufCompose.process(s,~loader.buffer,a,(b-a),numChans: 1, destination: ~targ
mfcc = FluidBufMFCC.kr(~targetsound,features:~mfccbuf[0],trig:1); mfcc = FluidBufMFCC.kr(~targetsound,features:~mfccbuf[0],trig:1);
stats = FluidBufStats.kr(~mfccbuf[0],stats:~statsbuf[0],trig:Done.kr(mfcc)); stats = FluidBufStats.kr(~mfccbuf[0],stats:~statsbuf[0],trig:Done.kr(mfcc));
flatten = FluidBufFlatten.kr(~statsbuf[0],~flatbuf[0],trig:Done.kr(stats)); flatten = FluidBufFlatten.kr(~statsbuf[0],~flatbuf[0],trig:Done.kr(stats));
FreeSelfWhenDone.kr(flatten);
}.play; }.play;
) )

@ -1,122 +1,223 @@
// load a source // load a source folder
b = Buffer.read(s,"/Volumes/machins/projets/newsfeed/sons/textes/Audio/synth/fromtexttospeech-AmE-George.wav") ~loader = FluidLoadFolder("/Volumes/machins/projets/newsfeed/sons/textes/Audio/synth");
b.play ~loader.play;
//slightly oversegment with novelty //slightly oversegment with novelty
//segments should still make sense but might cut a few elements in 2 or 3 //segments should still make sense but might cut a few elements in 2 or 3
~originalslices = Buffer(s); ~slicer = FluidSliceCorpus({ |src,start,num,dest| FluidBufNoveltySlice.kr(src,start,num,indices:dest, feature: 1, kernelSize: 29, threshold: 0.05, filterSize: 5, hopSize: 128)});
FluidBufNoveltySlice.process(s, b, indices: ~originalslices, feature: 1, kernelSize: 29, threshold: 0.05, filterSize: 5, hopSize: 128, action: {~originalslices.numFrames.postln;}) ~slicer.play(s, ~loader.buffer,~loader.index);
//test the segmentation by looping them //test the segmentation by looping them
( (
{ ~originalindices = Array.newFrom(~slicer.index.keys).sort{|a,b| ~slicer.index[a][\bounds][0]< ~slicer.index[b][\bounds][0]}.collect{|x|~slicer.index[x][\bounds]};
BufRd.ar(1, b, d = {arg start=0, end = 44100;
Phasor.ar(0,1, BufRd.ar(1, ~loader.buffer, Phasor.ar(0,1,start,end,start),0,1);
BufRd.kr(1, ~originalslices,
MouseX.kr(0, BufFrames.kr(~originalslices) - 1), 0, 1),
BufRd.kr(1, ~originalslices,
MouseX.kr(1, BufFrames.kr(~originalslices)), 0, 1),
BufRd.kr(1,~originalslices,
MouseX.kr(0, BufFrames.kr(~originalslices) - 1), 0, 1)), 0, 1);
}.play; }.play;
)
//analyse each segment with MFCCs in a dataset w = Window.new.front;
~originalslices.getn(0,~originalslices.numFrames, {|x|~originalslicesarray = x; if ((x.last != b.numFrames), {~originalslicesarray = ~originalslicesarray ++ (b.numFrames)}); });//retrieve the indices and add the file boundary at the end if not there already b = ControlSpec(0, ~originalindices.size - 1, \linear, 1); // min, max, mapping, step
c = StaticText(w, Rect(340, 20, 50, 20)).align_(\center);
a = Slider(w, Rect(10, 20, 330, 20))
.action_({var val = b.map(a.value).asInteger;
c.string_(val.asString);
d.set(\start,~originalindices[val][0], \end, ~originalindices[val][1]);
});
)
//iterates through the //analyse each segment with 20 MFCCs in a dataset and spectralshapes in another one
//a few buffers and our dataset - with back and forth from the language
( (
~mfccs = Buffer(s); ~featuresbuf = 4.collect{Buffer.new};
~stats = Buffer(s); ~statsbuf = 4.collect{Buffer.new};
~flat = Buffer(s); ~flatbuf = 4.collect{Buffer.new};
~slices = FluidDataSet(s,\slices); ~slicesMFCC = FluidDataSet(s,\slicesM);
~slicesShapes = FluidDataSet(s,\slicesS);
Routine{ ~extractor = FluidProcessSlices({|src,start,num,data|
s.sync; var features, stats, writer, flatten,mfccBuf, statsBuf, flatBuf, label, voice;
(~originalslicesarray.size - 1).do{|i| label = data.key;
FluidBufMFCC.process(s, b, startFrame: ~originalslicesarray[i], numFrames: (~originalslicesarray[i+1] - ~originalslicesarray[i]), numChans: 1,features: ~mfccs, numCoeffs: 20, action: { voice = data.value[\voice];
FluidBufStats.process(s, ~mfccs, startChan: 1, stats: ~stats, action: { features = FluidBufMFCC.kr(src,startFrame:start,numFrames:num,numChans:1, numCoeffs: 20, features:~featuresbuf[voice],trig:1,blocking: 1);
FluidBufFlatten.process(s, ~stats, ~flat, action: { stats = FluidBufStats.kr(~featuresbuf[voice],stats:~statsbuf[voice],trig:Done.kr(features),blocking: 1);
~slices.addPoint(i.asSymbol, ~flat); flatten = FluidBufFlatten.kr(~statsbuf[voice],~flatbuf[voice],trig:Done.kr(stats),blocking: 1);
}); writer = FluidDataSetWr.kr(~slicesMFCC,label, -1, ~flatbuf[voice], Done.kr(flatten),blocking: 1);
}); features = FluidBufSpectralShape.kr(src,startFrame:start,numFrames:num,numChans:1, features:~featuresbuf[voice],trig:Done.kr(writer),blocking: 1);
}); stats = FluidBufStats.kr(~featuresbuf[voice],stats:~statsbuf[voice],trig:Done.kr(features),blocking: 1);
}; flatten = FluidBufFlatten.kr(~statsbuf[voice],~flatbuf[voice],trig:Done.kr(stats),blocking: 1);
}.play; writer = FluidDataSetWr.kr(~slicesShapes,label, -1, ~flatbuf[voice], Done.kr(flatten),blocking: 1);
});
) )
~extractor.play(s,~loader.buffer, ~slicer.index);
~slices.print ~slicesMFCC.print
~slicesShapes.print
//run a window over consecutive segments, forcing them in 2 classes, and merging the consecutive segments of similar class //run a window over consecutive segments, forcing them in 2 classes, and merging the consecutive segments of similar class
//we overlap the analysis with the last (original) slice to check for continuity //we overlap the analysis with the last (original) slice to check for continuity
( (
~winSize = 4;//the number of consecutive items to split in 2 classes; ~winSize = 4;//the number of consecutive items to split in 2 classes;
~curated = FluidDataSet(s,\curatedDS);
~query = FluidDataSetQuery(s); ~query = FluidDataSetQuery(s);
~kmeans = FluidKMeans(s,2,100); ~stan = FluidStandardize(s);
~kmeans = FluidKMeans(s,2,1000);
~windowDS = FluidDataSet(s,\windowDS); ~windowDS = FluidDataSet(s,\windowDS);
~windowLS = FluidLabelSet(s,\windowLS); ~windowLS = FluidLabelSet(s,\windowLS);
) )
//curate stats (MFCCs)
~query.clear
~query.addRange((0*20)+1,10);
~query.transform(~slicesMFCC,~curated);
//OR
//curate stats (moments)
~query.clear
~query.addRange(0,3);
~query.transform(~slicesShapes,~curated);
//OR
//curate both
~query.clear
~query.addColumn(0);//add col 0 (mean of mfcc0 as 'loudness')
~query.transform(~slicesMFCC,~curated);//mfcc0 as loudness
~query.clear;
~query.addRange(0,3);//add some spectral moments
~query.transformJoin(~slicesShapes, ~curated, ~curated);//join in centroids
~stan.fitTransform(~curated, ~curated);
~curated.print
~curated.dump{|x|~sliceDict = x;};
~originalslicesarray = (~originalindices.flatten ++ ~loader.buffer.numFrames).asSet.asArray.sort
~orginalkeys = Array.newFrom(~slicer.index.keys).sort{|a,b| ~slicer.index[a][\bounds][0]< ~slicer.index[b][\bounds][0]}
//the windowed function
( (
Routine{ ~windowedFunct = {arg head, winSize, overlap;
~indices = [0]; var nbass = [], assignments = [], tempDict = ();
~head = 0; //check the size of everything to not overrun
winSize = (~originalslicesarray.size - head).min(winSize);
~sliceDict = Dictionary.new(4); //copy the items to a subdataset from hear
~tempDict = Dictionary.new(4); winSize.do{|i|
tempDict.put((i.asString), ~sliceDict["data"][(~orginalkeys[(i+head)]).asString]);//here one could curate which stats to take
~slices.dump{|x|~sliceDict = x;}; "whichslices:%\n".postf(i+head);
s.sync;
while ( {~head <= (~originalslicesarray.size - ~winSize)},
{
var step = ~winSize - 1;
var nbass = [];
//run a process on ~winSize items from ~head (with an overlap of 1)
//copy the items to a subdataset
~winSize.do{|i|
~tempDict.put((i.asString), ~sliceDict["data"][(i+~head).asString]);//here one could curate which stats to take
"whichslices:%\n".postf(i+~head);
}; };
~windowDS.load(Dictionary.newFrom([\cols, 133, \data, ~tempDict])); ~windowDS.load(Dictionary.newFrom([\cols, ~sliceDict["cols"].asInteger, \data, tempDict]), action: {
s.sync; "% - loaded\n".postf(head);
"% - loaded\n".postf(~head);
//kmeans 2 and retrieve ordered array of class assignations //kmeans 2 and retrieve ordered array of class assignations
~kmeans.fitPredict(~windowDS, ~windowLS, {|x| nbass = x;}); ~kmeans.fitPredict(~windowDS, ~windowLS, action: {|x|
s.sync; nbass = x;
"% - fitted1: ".postf(~head); nbass.postln; "% - fitted1: ".postf(head); nbass.postln;
if (nbass.includes(winSize.asFloat), {
~kmeans.fitPredict(~windowDS, ~windowLS, {|x|
nbass = x; "% - fitted2: ".postf(head); nbass.postln;
if (nbass.includes(winSize.asFloat), {
~kmeans.fitPredict(~windowDS, ~windowLS, {|x|
nbass = x; "% - fitted3: ".postf(head); nbass.postln;
});
});
});
});
// if (nbass.includes(0.0), { Routine{~kmeans.fitPredict(~windowDS, ~windowLS, {|x| nbass = x; "% - fitted2: ".postf(~head); nbass.postln; s.sync;});}.play; }); ~windowLS.dump{|x|
var assignments = x.at("data").asSortedArray.flop[1].flatten;
"% - assigned ".postf(head);
~windowLS.dump{|x|~assignments = x.at("data").asSortedArray.flop[1].flatten;}; assignments.postln;
s.sync;
"% - assigned ".postf(~head);
~assignments.postln; (winSize-1).do{|i|
if (assignments[i+1] != assignments[i], {
~newindices= ~newindices ++ (~originalslicesarray[head+i+1]).asInteger;
~newkeys = ~newkeys ++ (~orginalkeys[head+i+1]);
});
step.do{|i|
if (~assignments[i+1] != ~assignments[i], {~indices= ~indices ++ (~originalslicesarray[~head+i+1])});
}; };
//if we still have some frames to do, do them
~head = ~head + step; if (((winSize + head) < ~originalslicesarray.size), {
"-----------------".postln; "-----------------".postln;
}); ~windowedFunct.value(head + winSize - overlap, winSize, overlap);
}, {~newindices = (~newindices ++ ~loader.buffer.numFrames); "done".postln;});//if we're done close the books
//leftovers
if ( (~originalslicesarray.size - ~head) > 1, {
//run a process on (a.size - ~head) items from ~head
(~originalslicesarray.size - ~head - 1).do{|i|
if (~assignments[i+1] != ~assignments[i], {~indices= ~indices ++ (~originalslicesarray[~head+i+1])});
// (~head+i).postln;
}; };
}); });
~indices.postln; });
}.play; }
) )
{var i = 8;BufRd.ar(1,b,Line.ar(~originalslicesarray[i],~originalslicesarray[i+1],(~originalslicesarray[i+1] - ~originalslicesarray[i])/b.sampleRate, doneAction: 2))}.play; //the job
{var i = 4;BufRd.ar(1,b,Line.ar(~indices[i],~indices[i+1],(~indices[i+1] - ~indices[i])/b.sampleRate, doneAction: 2))}.play;
~newindices = [~originalslicesarray[0]]; ~newkeys = [~orginalkeys[0]];
~windowedFunct.value(0, 4, 1);
//try again with more clusters
~newindices = [~originalslicesarray[0]]; ~newkeys = [~orginalkeys[0]];
~kmeans.numClusters = 3;
~windowedFunct.value(0,6,2);
~newindices.postln;
~newkeys.postln;
~newindices.size;
~newkeys.size;
~newindices.last;
~newkeys.last;
~slicer.index[~orginalkeys[0]]
//export to reaper //export to reaper
(
//first create a new file that ends with rpp - it will overwrite if the file exists
f = File.new("/tmp/clusteredslices-" ++ Date.getDate.stamp ++".rpp","w+");
if (f.isOpen , {
var path, prevpath ="", sr, count, dur;
//write the header
f.write("<REAPER_PROJECT 0.1 \"5.99/OSX64\" 1603037150\n\n");
//a first track with the originalslicearray
//write the track header
f.write("<TRACK\nNAME \"novelty output\"\n");
// iterate through the items in the track
~orginalkeys.do{|v, i|
path = ~slicer.index[v][\path];
if (path != prevpath, {
sr = ~slicer.index[v][\sr];
prevpath = path;
count = 0;
});
dur = ~originalslicesarray[i+1] - ~originalslicesarray[i];
if ( dur > 0, {
f.write("<ITEM\nPOSITION " ++ (~originalslicesarray[i] / sr) ++ "\nLENGTH " ++ (dur / sr) ++ "\nNAME \"" ++ v ++ "\"\nSOFFS " ++ (count / sr) ++ "\n<SOURCE WAVE\nFILE \"" ++ path ++ "\"\n>\n>\n");
});
count = count + dur;
};
//write the track footer
f.write(">\n");
// a second track with the new ~indices
prevpath = "";
//write the track header
f.write("<TRACK\nNAME \"clustered output\"\n");
// iterate through the items in the track
~newkeys.do{|v, i|
path = ~slicer.index[v][\path];
if (path != prevpath, {
sr = ~slicer.index[v][\sr];
prevpath = path;
count = 0;
});
dur = ~newindices[i+1] - ~newindices[i];
if (dur > 0, {
f.write("<ITEM\nPOSITION " ++ (~newindices[i] / sr) ++ "\nLENGTH " ++ (dur / sr) ++ "\nNAME \"" ++ v ++ "\"\nSOFFS " ++ (count / sr) ++ "\n<SOURCE WAVE\nFILE \"" ++ path ++ "\"\n>\n>\n");
});
count = count + dur;
};
//write the track footer
f.write(">\n");
//write the footer
f.write(">\n");
f.close;
});
)

@ -1,5 +1,5 @@
s.reboot s.reboot
~ds = FluidDataSet.new(s,\simple1data) ~ds = FluidDataSet.new(s)
~point = Buffer.alloc(s,1,1) ~point = Buffer.alloc(s,1,1)
( (
Routine{ Routine{
@ -21,6 +21,7 @@ Routine{
Routine{ Routine{
10.do{|i| 10.do{|i|
~point.set(0,i); ~point.set(0,i);
s.sync;
~tree.kNearest(~point, {|x| "Neighbours for a value of % are ".postf(i); x.postln}); ~tree.kNearest(~point, {|x| "Neighbours for a value of % are ".postf(i); x.postln});
s.sync; s.sync;
} }
@ -29,7 +30,7 @@ Routine{
/*** KMEANS ***/ /*** KMEANS ***/
~kmeans = FluidKMeans.new(s,maxIter:100) ~kmeans = FluidKMeans.new(s,maxIter:100);
~kmeans.numClusters = 2; //play with this ~kmeans.numClusters = 2; //play with this
~kmeans.fit(~ds,action:{|x| "Done fitting with these number of items per cluster ".post;x.postln;}) ~kmeans.fit(~ds,action:{|x| "Done fitting with these number of items per cluster ".post;x.postln;})
@ -37,6 +38,7 @@ Routine{
Routine{ Routine{
10.do{|i| 10.do{|i|
~point.set(0,i); ~point.set(0,i);
s.sync;
~kmeans.predictPoint(~point,{|x| ("Predicted Cluster for a value of " + i ++ ":" + x).postln}); ~kmeans.predictPoint(~point,{|x| ("Predicted Cluster for a value of " + i ++ ":" + x).postln});
s.sync; s.sync;
} }
@ -49,7 +51,7 @@ Routine{
( (
~labels.size{|x| ~labels.size{|x|
Routine{x.asInteger.do{|i| //size does not return a value, but we can retrieve it via a function Routine{x.asInteger.do{|i|
~labels.getLabel(i,action: {|l| ~labels.getLabel(i,action: {|l|
("Label for entry " + i ++ ":" + l).postln; ("Label for entry " + i ++ ":" + l).postln;
}); });

@ -0,0 +1,161 @@
// Make:
// - A kmeans
// - a datasetquery
// - a normalizer
// - a standardizer
// - 3 DataSets of example points R-G-B descriptions
// - 3 DataSets for the scaled versions
// - 1 summative dataset and a LabelSet for predicted labels
(
~classifier = FluidKMeans(s,5, 1000);
~query = FluidDataSetQuery(s);
~stan = FluidStandardize(s);
~norm = FluidNormalize(s);
~sourceR = FluidDataSet(s,\a3DsourceR);
~sourceG = FluidDataSet(s,\a3DsourceG);
~sourceB = FluidDataSet(s,\a3DsourceB);
~scaledR = FluidDataSet(s,\a3DscaledR);
~scaledG = FluidDataSet(s,\a3DscaledG);
~scaledB = FluidDataSet(s,\a3DscaledB);
~composited = FluidDataSet(s,\a3Dcomposited);
~labels = FluidLabelSet(s,\a3Dlabels);
)
//Make some random, but clustered test points, each descriptor category in a separate dataset
(
~sourceR.load(Dictionary.newFrom([\cols, 1, \data, (Dictionary.newFrom(40.collect{|x| [x, 1.0.sum3rand]}.flatten))]));
~sourceG.load(Dictionary.newFrom([\cols, 1, \data, (Dictionary.newFrom(40.collect{|x| [x, 1.0.rand2]}.flatten))]));
~sourceB.load(Dictionary.newFrom([\cols, 1, \data, (Dictionary.newFrom(40.collect{|x| [x, (0.5.sum3rand).squared + [0.75,-0.1].choose]}.flatten))]));
)
//here we manipulate
//assemble the scaled dataset
(
~query.addColumn(0, {
~query.transformJoin(~sourceB, ~sourceG, ~composited, {
~query.transformJoin(~sourceR, ~composited, ~composited);
});
});
)
~composited.print
//Fit the classifier to the example DataSet and labels, and then run prediction on the test data into our mapping label set
~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict = x;};~composited.dump{|x|~compodict=x;};});
//Visualise:
(
w = Window("sourceClasses", Rect(128, 64, 820, 120));
w.drawFunc = {
Pen.use{
~compodict["data"].keysValuesDo{|key, colour|
Pen.fillColor = Color.fromArray((colour * 0.5 + 0.5 ).clip(0,1) ++ 1);
Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict["data"].at(key).asInteger[0] * 20 + 10),15,15));
};
};
};
w.refresh;
w.front;
)
// standardize our colours and rerun
(
~stan.fitTransform(~sourceR, ~scaledR, {
~stan.fitTransform(~sourceG, ~scaledG, {
~stan.fitTransform(~sourceB, ~scaledB, {
//assemble
~query.addColumn(0, {
~query.transformJoin(~scaledB, ~scaledG, ~composited, {
~query.transformJoin(~scaledR, ~composited, ~composited, {
//fit
~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict2 = x;};~composited.dump{|x|~compodict2=x;};});
});
});
});
});
});
});
)
//Visualise:
(
w = Window("stanClasses", Rect(128, 64, 820, 120));
w.drawFunc = {
Pen.use{
~compodict2["data"].keysValuesDo{|key, colour|
Pen.fillColor = Color.fromArray((colour * 0.25 + 0.5 ).clip(0,1) ++ 1);
Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict2["data"].at(key).asInteger[0] * 20 + 10),15,15));
};
};
};
w.refresh;
w.front;
)
//now let's normalise instead
(
~norm.fitTransform(~sourceR, ~scaledR, {
~norm.fitTransform(~sourceG, ~scaledG, {
~norm.fitTransform(~sourceB, ~scaledB, {
//assemble
~query.addColumn(0, {
~query.transformJoin(~scaledB, ~scaledG, ~composited, {
~query.transformJoin(~scaledR, ~composited, ~composited, {
//fit
~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict2 = x;};~composited.dump{|x|~compodict2=x;};});
});
});
});
});
});
});
)
//Visualise:
(
w = Window("normClasses", Rect(128, 64, 820, 120));
w.drawFunc = {
Pen.use{
~compodict2["data"].keysValuesDo{|key, colour|
Pen.fillColor = Color.fromArray((colour * 0.25 + 0.5 ).clip(0,1) ++ 1);
Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict2["data"].at(key).asInteger[0] * 20 + 10),15,15));
};
};
};
w.refresh;
w.front;
)
// let's mess up with the scaling of one dimension: let's multiply the range of Red by 10
~norm.min = -10;
~norm.max = 10;
(
~norm.fitTransform(~sourceR, ~scaledR, {
//assemble
~query.addColumn(0, {
~query.transformJoin(~scaledB, ~scaledG, ~composited, {
~query.transformJoin(~scaledR, ~composited, ~composited, {
//fit
~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict2 = x;};~composited.dump{|x|~compodict2=x;};});
});
});
});
});
)
//Visualise:
(
w = Window("norm10rClasses", Rect(128, 64, 820, 120));
w.drawFunc = {
Pen.use{
~compodict2["data"].keysValuesDo{|key, colour|
Pen.fillColor = Color.fromArray((colour * 0.25 + 0.5 ).clip(0,1) ++ 1);
Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict2["data"].at(key).asInteger[0] * 20 + 10),15,15));
};
};
};
w.refresh;
w.front;
)
Loading…
Cancel
Save