//Train network to map source to target. fit() returns loss. If this is -1, then training has failed
// Now make a regressor and fit it to the source and target, and predict against test
//grab the output data whilst we're at it, so we can inspect
// run this to train the network for up to 1000(max epochs to map source to target. fit() returns loss. If this is -1, then training has failed. Run until the printed error is satisfactory to you
~regressor.fit(~source, ~target, {|x|x.postln;});
//you can change parameters of the MLPregressor with setters
~regressor.learnRate = 0.01;
~regressor.momentum = 0;
~regressor.validation= 0.2;
(
~regressor.fit(~source,~target,action: {|x|
if(x != -1) {("MLP trained with loss"+x).postln;}{"Training failed. Try again (perhaps with a lower learning rate)".postln;}
~outputdata = Array(128);
~regressor.predict(~test, ~output, action:{
~output.dump{|x| 128.do{|i|
~outputdata.add(x["data"][i.asString][0])
}};
});
)
//Batch predict takes a FluidDataSet source, a FluidDataSet to write netwotk output to, and layer to read from
~regressor.predict(~source,~dest,2);
~dest.dump
//Single point predict uses Buffers rater than FluidDataSet:
{
~datapoint.setn(0,[1,1]);
~regressor.predictPoint(~datapoint,~destpoint,2);
s.sync;
~destpoint.loadToFloatArray(0,action:{|a|
a.postln;
});
}.fork
//We should see a single cycle of a chirp. if not,