now with finalActivation

nix
Pierre Alexandre Tremblay 6 years ago
parent dc73287280
commit 21daae30ed

@ -5,7 +5,7 @@ FluidMLPRegressor : FluidRTDataClient {
const <relu = 2;
const <tanh = 3;
*new {|server, hidden = #[3,3] , activation = 0, outputLayer = 0, maxIter = 1000, learnRate = 0.0001, momentum = 0.9, batchSize = 50, validation = 0.2|
*new {|server, hidden = #[3,3] , activation = 0, finalActivation = 0, outputLayer = 0, maxIter = 1000, learnRate = 0.0001, momentum = 0.9, batchSize = 50, validation = 0.2|
var hiddenCtrlLabels;
hidden = [hidden.size]++hidden;
@ -15,6 +15,7 @@ FluidMLPRegressor : FluidRTDataClient {
[hiddenCtrlLabels,hidden].lace ++
[
\activation,activation,
\finalActivation, finalActivation,
\outputLayer, outputLayer,
\maxIter, maxIter,
\learnRate,learnRate,
@ -85,9 +86,9 @@ FluidMLPClassifier : FluidRTDataClient {
);
}
predict{ |sourceDataSet, targetDataSet, action|
predict{ |sourceDataSet, targetLabelSet, action|
this.prSendMsg(\predict,
[sourceDataSet.asSymbol, targetDataSet.asSymbol],
[sourceDataSet.asSymbol, targetLabelSet.asSymbol],
action);
}

@ -4,7 +4,7 @@ var trained = 0, entering = 0;
var va = Array.fill(10,{0.5});
var input = Buffer.alloc(s,2);
var output = Buffer.alloc(s,10);
var mlp = FluidMLPRegressor(s,[6],1,0,1000,0.1,0,1,0);
var mlp = FluidMLPRegressor(s,[6],1,1,0,1000,0.1,0,1,0);
var entry = 0;
~inData = FluidDataSet(s,\indata);

@ -26,7 +26,7 @@ Routine{
~mlpHelpTarget.print
// make an MLPregressor
~mlp = FluidMLPRegressor(s, [3], FluidMLPRegressor.sigmoid, 0, 1000,0.1,0.1,1,0);//1000 epoch at a time
~mlp = FluidMLPRegressor(s, [3], FluidMLPRegressor.sigmoid, FluidMLPRegressor.sigmoid, 0, 1000,0.1,0.1,1,0);//1000 epoch at a time
//train on it and observe the error
~mlp.fit(~mlpHelpSource,~mlpHelpTarget,{|x|x.postln;});

@ -20,6 +20,9 @@ An link::Classes/Array:: that gives the sizes of any hidden layers in the networ
ARGUMENT:: activation
The activation function to use for the hidden layer units. Beware of the permitted ranges of each: relu (0->inf), sigmoid (0->1), tanh (-1,1).
ARGUMENT:: finalActivation
The activation function to use for the final layer units. Beware of the permitted ranges of each: relu (0->inf), sigmoid (0->1), tanh (-1,1).
ARGUMENT:: outputLayer
The layer whose output to return. It is negative 0 counting, where the default of 0 is the output layer, and 1 would be the last hidden layer, and so on.
@ -100,7 +103,7 @@ code::
~test = FluidDataSet(s,\mlp_regressor_dest);
~output = FluidDataSet(s,\mlp_regress_out);
~tmpbuf = Buffer.alloc(s,1);
~regressor = FluidMLPRegressor(s,[2], FluidMLPRegressor.tanh, 0, 1000,0.1,0.1,1,0);
~regressor = FluidMLPRegressor(s,[2], FluidMLPRegressor.tanh, FluidMLPRegressor.tanh, 0, 1000,0.1,0.1,1,0);
)
//Make source, target and test data

Loading…
Cancel
Save