From efab1d08b2730c30e5d762fc4ee38125c96ab313 Mon Sep 17 00:00:00 2001 From: Pierre Alexandre Tremblay Date: Fri, 17 Jul 2020 21:00:45 +0100 Subject: [PATCH] MLPregressor: corrected the helpfiles and toy example for the new parameter in the list --- .../Examples/dataset/MLP-synth-control.scd | 4 ++-- .../Examples/dataset/MLP-toy-example.scd | 2 +- .../HelpSource/Classes/FluidMLPRegressor.schelp | 11 ++++------- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/release-packaging/Examples/dataset/MLP-synth-control.scd b/release-packaging/Examples/dataset/MLP-synth-control.scd index 7d832a5..7d24b00 100644 --- a/release-packaging/Examples/dataset/MLP-synth-control.scd +++ b/release-packaging/Examples/dataset/MLP-synth-control.scd @@ -4,7 +4,7 @@ var trained = 0, entering = 0; var va = Array.fill(10,{0.5}); var input = Buffer.alloc(s,2); var output = Buffer.alloc(s,10); -var mlp = FluidMLPRegressor(s,[6],1,1000,0.1,0,1,0); +var mlp = FluidMLPRegressor(s,[6],1,0,1000,0.1,0,1,0); var entry = 0; ~inData = FluidDataSet(s,\indata); @@ -31,7 +31,7 @@ f.action = {arg x,y; //if trained, predict the point f.x f.y }, { //if not entering a point if (trained == 1, { //if trained input.setn(0, [f.x, f.y]); - mlp.predictPoint(input,output,0,{ + mlp.predictPoint(input,output,{ output.getn(0,10,{ |x|va = x; b.set(\val, va); {a.value = va;}.defer;}); }); diff --git a/release-packaging/Examples/dataset/MLP-toy-example.scd b/release-packaging/Examples/dataset/MLP-toy-example.scd index 79fabf2..6d5dd37 100644 --- a/release-packaging/Examples/dataset/MLP-toy-example.scd +++ b/release-packaging/Examples/dataset/MLP-toy-example.scd @@ -26,7 +26,7 @@ Routine{ ~mlpHelpTarget.print // make an MLPregressor -~mlp = FluidMLPRegressor(s,[3],FluidMLPRegressor.sigmoid,1000,0.1,0.1,1,0);//1000 epoch at a time +~mlp = FluidMLPRegressor(s, [3], FluidMLPRegressor.sigmoid, 0, 1000,0.1,0.1,1,0);//1000 epoch at a time //train on it and observe the error ~mlp.fit(~mlpHelpSource,~mlpHelpTarget,{|x|x.postln;}); diff --git a/release-packaging/HelpSource/Classes/FluidMLPRegressor.schelp b/release-packaging/HelpSource/Classes/FluidMLPRegressor.schelp index ed92fa0..cb7da81 100644 --- a/release-packaging/HelpSource/Classes/FluidMLPRegressor.schelp +++ b/release-packaging/HelpSource/Classes/FluidMLPRegressor.schelp @@ -20,6 +20,9 @@ An link::Classes/Array:: that gives the sizes of any hidden layers in the networ ARGUMENT:: activation The activation function to use for the hidden layer units. +ARGUMENT:: outputLayer +The layer whose output to return. It is negative 0 counting, where the default of 0 is the output layer, and 1 would be the last hidden layer, and so on. + ARGUMENT:: maxIter The maximum number of iterations to use in training. @@ -65,9 +68,6 @@ Input data ARGUMENT:: targetDataSet Output data -ARGUMENT:: layer -Layer whose output to return. - ARGUMENT:: action Function to run when complete @@ -80,9 +80,6 @@ Input point ARGUMENT:: targetBuffer Output point -ARGUMENT:: layer -Layer whose output to return. - ARGUMENT:: action A function to run when complete @@ -103,7 +100,7 @@ code:: ~test = FluidDataSet(s,\mlp_regressor_dest); ~output = FluidDataSet(s,\mlp_regress_out); ~tmpbuf = Buffer.alloc(s,1); -~regressor = FluidMLPRegressor(s,[2],FluidMLPRegressor.tanh,1000,0.1,0.1,1,0); +~regressor = FluidMLPRegressor(s,[2], FluidMLPRegressor.tanh, 0, 1000,0.1,0.1,1,0); ) //Make source, target and test data