You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
162 lines
4.4 KiB
Markdown
162 lines
4.4 KiB
Markdown
// Make:
|
|
// - A kmeans
|
|
// - a datasetquery
|
|
// - a normalizer
|
|
// - a standardizer
|
|
// - 3 DataSets of example points R-G-B descriptions
|
|
// - 3 DataSets for the scaled versions
|
|
// - 1 summative dataset and a LabelSet for predicted labels
|
|
|
|
(
|
|
~classifier = FluidKMeans(s,5, 1000);
|
|
~query = FluidDataSetQuery(s);
|
|
~stan = FluidStandardize(s);
|
|
~norm = FluidNormalize(s);
|
|
~sourceR = FluidDataSet(s);
|
|
~sourceG = FluidDataSet(s);
|
|
~sourceB = FluidDataSet(s);
|
|
~scaledR = FluidDataSet(s);
|
|
~scaledG = FluidDataSet(s);
|
|
~scaledB = FluidDataSet(s);
|
|
~composited = FluidDataSet(s);
|
|
~labels = FluidLabelSet(s);
|
|
)
|
|
|
|
//Make some random, but clustered test points, each descriptor category in a separate dataset
|
|
(
|
|
~sourceR.load(Dictionary.newFrom([\cols, 1, \data, (Dictionary.newFrom(40.collect{|x| [x, 1.0.sum3rand]}.flatten))]));
|
|
~sourceG.load(Dictionary.newFrom([\cols, 1, \data, (Dictionary.newFrom(40.collect{|x| [x, 1.0.rand2]}.flatten))]));
|
|
~sourceB.load(Dictionary.newFrom([\cols, 1, \data, (Dictionary.newFrom(40.collect{|x| [x, (0.5.sum3rand).squared + [0.75,-0.1].choose]}.flatten))]));
|
|
)
|
|
|
|
//here we manipulate
|
|
|
|
//assemble the scaled dataset
|
|
(
|
|
~query.addColumn(0, {
|
|
~query.transformJoin(~sourceB, ~sourceG, ~composited, {
|
|
~query.transformJoin(~sourceR, ~composited, ~composited);
|
|
});
|
|
});
|
|
)
|
|
|
|
~composited.print
|
|
|
|
//Fit the classifier to the example DataSet and labels, and then run prediction on the test data into our mapping label set
|
|
~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict = x;};~composited.dump{|x|~compodict=x;};});
|
|
|
|
//Visualise:
|
|
(
|
|
w = Window("sourceClasses", Rect(128, 64, 820, 120));
|
|
w.drawFunc = {
|
|
Pen.use{
|
|
~compodict["data"].keysValuesDo{|key, colour|
|
|
Pen.fillColor = Color.fromArray((colour * 0.5 + 0.5 ).clip(0,1) ++ 1);
|
|
Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict["data"].at(key).asInteger[0] * 20 + 10),15,15));
|
|
};
|
|
};
|
|
};
|
|
w.refresh;
|
|
w.front;
|
|
)
|
|
|
|
// standardize our colours and rerun
|
|
(
|
|
~stan.fitTransform(~sourceR, ~scaledR, {
|
|
~stan.fitTransform(~sourceG, ~scaledG, {
|
|
~stan.fitTransform(~sourceB, ~scaledB, {
|
|
//assemble
|
|
~query.addColumn(0, {
|
|
~query.transformJoin(~scaledB, ~scaledG, ~composited, {
|
|
~query.transformJoin(~scaledR, ~composited, ~composited, {
|
|
//fit
|
|
~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict2 = x;};~composited.dump{|x|~compodict2=x;};});
|
|
});
|
|
});
|
|
});
|
|
});
|
|
});
|
|
});
|
|
)
|
|
|
|
//Visualise:
|
|
(
|
|
w = Window("stanClasses", Rect(128, 204, 820, 120));
|
|
w.drawFunc = {
|
|
Pen.use{
|
|
~compodict2["data"].keysValuesDo{|key, colour|
|
|
Pen.fillColor = Color.fromArray((colour * 0.25 + 0.5 ).clip(0,1) ++ 1);
|
|
Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict2["data"].at(key).asInteger[0] * 20 + 10),15,15));
|
|
};
|
|
};
|
|
};
|
|
w.refresh;
|
|
w.front;
|
|
)
|
|
|
|
//now let's normalise instead
|
|
(
|
|
~norm.fitTransform(~sourceR, ~scaledR, {
|
|
~norm.fitTransform(~sourceG, ~scaledG, {
|
|
~norm.fitTransform(~sourceB, ~scaledB, {
|
|
//assemble
|
|
~query.addColumn(0, {
|
|
~query.transformJoin(~scaledB, ~scaledG, ~composited, {
|
|
~query.transformJoin(~scaledR, ~composited, ~composited, {
|
|
//fit
|
|
~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict2 = x;};~composited.dump{|x|~compodict2=x;};});
|
|
});
|
|
});
|
|
});
|
|
});
|
|
});
|
|
});
|
|
)
|
|
|
|
//Visualise:
|
|
(
|
|
w = Window("normClasses", Rect(128, 344, 820, 120));
|
|
w.drawFunc = {
|
|
Pen.use{
|
|
~compodict2["data"].keysValuesDo{|key, colour|
|
|
Pen.fillColor = Color.fromArray((colour * 0.25 + 0.5 ).clip(0,1) ++ 1);
|
|
Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict2["data"].at(key).asInteger[0] * 20 + 10),15,15));
|
|
};
|
|
};
|
|
};
|
|
w.refresh;
|
|
w.front;
|
|
)
|
|
|
|
// let's mess up with the scaling of one dimension: let's multiply the range of Red by 10
|
|
~norm.min = -10;
|
|
~norm.max = 10;
|
|
(
|
|
~norm.fitTransform(~sourceR, ~scaledR, {
|
|
//assemble
|
|
~query.addColumn(0, {
|
|
~query.transformJoin(~scaledB, ~scaledG, ~composited, {
|
|
~query.transformJoin(~scaledR, ~composited, ~composited, {
|
|
//fit
|
|
~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict2 = x;};~composited.dump{|x|~compodict2=x;};});
|
|
});
|
|
});
|
|
});
|
|
});
|
|
)
|
|
|
|
//Visualise:
|
|
(
|
|
w = Window("norm10rClasses", Rect(128, 484, 820, 120));
|
|
w.drawFunc = {
|
|
Pen.use{
|
|
~compodict2["data"].keysValuesDo{|key, colour|
|
|
Pen.fillColor = Color.fromArray((colour * 0.25 + 0.5 ).clip(0,1) ++ 1);
|
|
Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict2["data"].at(key).asInteger[0] * 20 + 10),15,15));
|
|
};
|
|
};
|
|
};
|
|
w.refresh;
|
|
w.front;
|
|
)
|