From fd0bd47cf33976ba30af94e827d50ff06705b383 Mon Sep 17 00:00:00 2001 From: Ted Moore Date: Mon, 31 Jan 2022 17:58:22 -0500 Subject: [PATCH] begin cleaning up of the examples folder --- .../Examples/GUI_examples/HPSS.scd | 100 ----- .../Examples/GUI_examples/NMF4.scd | 111 ------ .../GUI_examples/NoveltySegmentation.scd | 150 -------- .../Examples/GUI_examples/SineExtraction.scd | 107 ------ .../GUI_examples/TransientExtraction.scd | 103 ----- .../GUI_examples/TransientSegmentation.scd | 148 -------- .../fileiterator-2passes.scd | 45 --- .../buffer_compositing/fileiterator.scd | 40 -- .../0-demo-dataset-maker-utilities.scd | 169 --------- .../10a-weighted-MFCCs-comparison.scd | 235 ------------ .../11-compositing-datasets.scd | 355 ------------------ .../12-windowed-clustered-segmentation.scd | 230 ------------ .../13-massive-parallelisation-example.scd | 324 ---------------- .../1a-starting-1D-example.scd | 73 ---- .../2a-starting-1D-example2.scd | 67 ---- .../3a-classifier-example.scd | 64 ---- .../4-regressor-example.scd | 74 ---- ...malization-and-standardization-example.scd | 120 ------ .../8b-mlp-synth-control.scd | 101 ----- .../2-3Dscaling.scd | 161 -------- .../Examples/nmf/JiT-NMF-classifier.scd | 202 ---------- 21 files changed, 2979 deletions(-) delete mode 100755 release-packaging/Examples/GUI_examples/HPSS.scd delete mode 100755 release-packaging/Examples/GUI_examples/NMF4.scd delete mode 100755 release-packaging/Examples/GUI_examples/NoveltySegmentation.scd delete mode 100755 release-packaging/Examples/GUI_examples/SineExtraction.scd delete mode 100755 release-packaging/Examples/GUI_examples/TransientExtraction.scd delete mode 100755 release-packaging/Examples/GUI_examples/TransientSegmentation.scd delete mode 100644 release-packaging/Examples/buffer_compositing/fileiterator-2passes.scd delete mode 100644 release-packaging/Examples/buffer_compositing/fileiterator.scd delete mode 100644 release-packaging/Examples/dataset/0-demo-dataset-maker-utilities.scd delete mode 100644 release-packaging/Examples/dataset/1-learning examples/10a-weighted-MFCCs-comparison.scd delete mode 100644 release-packaging/Examples/dataset/1-learning examples/11-compositing-datasets.scd delete mode 100644 release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd delete mode 100644 release-packaging/Examples/dataset/1-learning examples/13-massive-parallelisation-example.scd delete mode 100644 release-packaging/Examples/dataset/1-learning examples/1a-starting-1D-example.scd delete mode 100644 release-packaging/Examples/dataset/1-learning examples/2a-starting-1D-example2.scd delete mode 100644 release-packaging/Examples/dataset/1-learning examples/3a-classifier-example.scd delete mode 100644 release-packaging/Examples/dataset/1-learning examples/4-regressor-example.scd delete mode 100644 release-packaging/Examples/dataset/1-learning examples/5-normalization-and-standardization-example.scd delete mode 100644 release-packaging/Examples/dataset/1-learning examples/8b-mlp-synth-control.scd delete mode 100644 release-packaging/Examples/dataset/2-various other examples/scaling-dimension-as-weighting/2-3Dscaling.scd delete mode 100644 release-packaging/Examples/nmf/JiT-NMF-classifier.scd diff --git a/release-packaging/Examples/GUI_examples/HPSS.scd b/release-packaging/Examples/GUI_examples/HPSS.scd deleted file mode 100755 index f426b04..0000000 --- a/release-packaging/Examples/GUI_examples/HPSS.scd +++ /dev/null @@ -1,100 +0,0 @@ -( -var win, soundFileView, freqSscope,loadButton, loopButton; -var harmSlider, percSlider, mixSlider; -var soundFile, buffer; -var synthDef, synth; -var makeSynthDef; - -Font.default = Font("Monaco", 16); -buffer = Buffer.new; -win = Window.new("HPSS", Rect(200,200,800,450)).background_(Color.gray); - -soundFileView = SoundFileView.new(win) - .gridOn_(false) - .waveColors_([Color.white]); - -loadButton = Button(win, Rect(0, 0, 100, 100)) - .minHeight_(150) - .states_([["Load", Color.grey, Color.grey(0.8)]]); - -loopButton = Button(win, Rect(0, 0, 100, 100)) - .minHeight_(150) - .states_( - [["Play", Color.grey, Color.grey(0.8)], - ["Stop", Color.grey, Color.grey(0.2)]] - ); - -harmSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); -percSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); -mixSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); -freqSscope = FreqScopeView(win, server:Server.default); -freqSscope.active_(true); - -loadButton.action_{ - FileDialog({ |path| - soundFile = SoundFile.new; - soundFile.openRead(path[0]); - buffer = Buffer.read(Server.default, path[0]); - soundFileView.soundfile = soundFile; - soundFileView.read(0, soundFile.numFrames); - }); -}; - -loopButton.action_{|but| - if(but.value == 1, { - synth = Synth(\hpssExtractionDemo, [\buffer, buffer.bufnum]); - mixSlider.action.value(mixSlider); - },{ - synth.free; - }); -}; - - -mixSlider.action_{|slider| - synth.set(\bal, ControlSpec(0, 1).map(slider.value)); -}; - - - -makeSynthDef = { - -synthDef = SynthDef(\hpssExtractionDemo, - {|buffer, bal = 0.5| - var player, fhpss, mix; - var harmSize = (2 * ControlSpec(1, 100, step:1).map(harmSlider.value)) - 1; - var percSize = (2 * ControlSpec(1,100, step:1).map(percSlider.value)) - 1; - player = PlayBuf.ar(1, buffer, loop:1); - fhpss = FluidHPSS.ar(in: player, harmFilterSize: harmSize, percFilterSize: percSize, maskingMode: 1, harmThreshFreq1: 0.1, harmThreshAmp1: 0, harmThreshFreq2: 0.5, harmThreshAmp2: 0, percThreshFreq1: 0.1, percThreshAmp1: 0, percThreshFreq2: 0.5, percThreshAmp2: 0, windowSize: 1024, hopSize: 256, fftSize: -1); - - mix =(bal * fhpss[0]) + ((1 - bal) * fhpss[1]); - Out.ar(0,Pan2.ar(mix)); - } -).add; - -}; - -win.layout_( - VLayout( - [ - HLayout( - [loadButton, stretch:1], - [soundFileView, stretch:5] - ), stretch:2 - ], - [ - HLayout( - [loopButton, stretch:1], - [VLayout( - HLayout(StaticText(win).string_("H Size ").minWidth_(100), harmSlider), - HLayout(StaticText(win).string_("P Size").minWidth_(100), percSlider), - HLayout(StaticText(win).string_("Mix").minWidth_(100), mixSlider) - ), stretch:5] - ), stretch:2 - ], - [freqSscope, stretch:2] - ) -); - -makeSynthDef.value; -win.front; -) \ No newline at end of file diff --git a/release-packaging/Examples/GUI_examples/NMF4.scd b/release-packaging/Examples/GUI_examples/NMF4.scd deleted file mode 100755 index cdd7833..0000000 --- a/release-packaging/Examples/GUI_examples/NMF4.scd +++ /dev/null @@ -1,111 +0,0 @@ -( -var server; -var win, soundFileView, loadButton, loopButton; -var sliders; -var soundFile, audioBuffer, destBuffer; -var synthDef, synth; -var sl1, sl2, sl3, sl4; - -server = Server.default; -Font.default = Font("Monaco", 16); - -audioBuffer = Buffer.new; -destBuffer = Buffer.new; - - -synthDef = SynthDef(\nmfDemo,{|bufnum, a1, a2, a3, a4| - var p = PlayBuf.ar(4, bufnum, loop:1); - var mix = (a1*p[0]) + (a2 * p[1]) + (a3*p[2]) + (a4*p[3]); - Out.ar(0, Pan2.ar(mix)); -}).add; - - - -win = Window.new("NMF4", - Rect(200,200,800,450)).background_(Color.gray); - -soundFileView = SoundFileView.new(win) - .gridOn_(false) - .waveColors_([Color.white]); - -loadButton = Button(win, Rect(0, 0, 100, 100)) - .minHeight_(150) - .states_([["Load", Color.grey, Color.grey(0.8)], - ["Wait", Color.grey, Color.grey(0.2)]] - ); - -loopButton = Button(win, Rect(0, 0, 100, 100)) - .minHeight_(150) - .states_( - [["Play", Color.grey, Color.grey(0.8)], - ["Stop", Color.grey, Color.grey(0.2)]] - ); - -sliders = Array.fill(4, {|i| - var s = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); - s.action_{ - var sym = ("a"++(i+1)).asSymbol; - synth.set(sym, ControlSpec(0, 1).map(s.value)); - } -}); - - -loadButton.action_{ - FileDialog({ |path| - soundFile = SoundFile.new; - soundFile.openRead(path[0]); - soundFileView.soundfile = soundFile; - soundFileView.read(0, soundFile.numFrames); - Routine{ - audioBuffer = Buffer.read(server, path[0]); - server.sync; - FluidBufNMF.process(server, - audioBuffer.bufnum,resynth:destBuffer.bufnum, components:4 - ); - server.sync; - destBuffer.query; - server.sync; - {loadButton.value_(0)}.defer; - }.play; - }); -}; - -loopButton.action_{|but| - var a1 = ControlSpec(0, 1).map(sliders[0].value); - var a2 = ControlSpec(0, 1).map(sliders[1].value); - var a3 = ControlSpec(0, 1).map(sliders[2].value); - var a4 = ControlSpec(0, 1).map(sliders[3].value); - - if(but.value == 1, { - synth = Synth(\nmfDemo, - [\bufnum, destBuffer.bufnum, \a1, a1, \a2, a2, \a3, a3, \a4, a4]); - },{ - synth.free; - }); -}; - - -win.layout_( - VLayout( - [ - HLayout( - [loadButton, stretch:1], - [soundFileView, stretch:5] - ), stretch:2 - ], - [ - HLayout( - [loopButton, stretch:1], - [VLayout( - HLayout(StaticText(win).string_("source 1 ").minWidth_(100), sliders[0]), - HLayout(StaticText(win).string_("source 2 ").minWidth_(100), sliders[1]), - HLayout(StaticText(win).string_("source 3 ").minWidth_(100), sliders[2]), - HLayout(StaticText(win).string_("source 4 ").minWidth_(100), sliders[3]) - ), stretch:5] - ), stretch:2 - ] - ) -); - -win.front; -) diff --git a/release-packaging/Examples/GUI_examples/NoveltySegmentation.scd b/release-packaging/Examples/GUI_examples/NoveltySegmentation.scd deleted file mode 100755 index f8469e8..0000000 --- a/release-packaging/Examples/GUI_examples/NoveltySegmentation.scd +++ /dev/null @@ -1,150 +0,0 @@ -( -var server; -var win, soundFileView,loadButton, processButton; -var ksSlider, thSlider; -var soundFile, audioBuffer, slicesBuffer, slicesArray; -var addSelections, playFunc, stopFunc; -var synthDef, synth; -var synths; - -var playing, currentSelection, colors, prevColor; -var qwerty = "1234567890qwertyuiopasdfghjklzxcvbnm"; -playing = Array.fill(qwerty.size, {false}); -server = Server.default; -Font.default = Font("Monaco", 16); - -audioBuffer = Buffer.new; -slicesBuffer = Buffer.new; - -colors = Array.fill(qwerty.size, {Color.rand}); -synths = Array.fill(qwerty.size, {nil}); - -synthDef = SynthDef(\noveltySegDemo,{|buf, start, end| - Out.ar(0, BufRd.ar(1, buf, Phasor.ar(1, 1, start, end))); -}).add; - -playFunc = {|index| - var dur; - currentSelection = index; - if(playing[index].not){ - synths[index] = Synth(\noveltySegDemo, - [\buf, audioBuffer.bufnum, - \start, slicesArray[index], - \end, slicesArray[index+1] - ]); - playing[index] = true; - }; - soundFileView.setSelectionColor(currentSelection, Color.white); -}; - -stopFunc = {|index| synths[index].free; playing[index] = false; - soundFileView.setSelectionColor( - index, colors[index] - ); -}; - - -win = Window.new("NoveltySegmentation", - Rect(200,200,800,450)).background_(Color.gray); - -win.view.keyDownAction_{|view, char, modifiers, unicode, keycode, key| - var num = qwerty.indexOf(char); - if (num.notNil&& slicesArray.notNil){ - playFunc.value(num); - } -}; - -win.view.keyUpAction_{|view, char| - var num = qwerty.indexOf(char); - if(num.notNil){ - stopFunc.value(num); - } -}; - -soundFileView = SoundFileView.new(win) - .gridOn_(false) - .waveColors_([Color.white]); - -loadButton = Button(win, Rect(0, 0, 100, 100)) - .minHeight_(150) - .states_([["Load", Color.grey, Color.grey(0.8)]]); - -processButton = Button(win, Rect(0, 0, 100, 100)) - .minHeight_(150) - .states_( - [["Process", Color.grey, Color.grey(0.8)], - ["Wait", Color.grey, Color.grey(0.2)]] - ); - -ksSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); -thSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); - - -loadButton.action_{ - FileDialog({ |path| - soundFile = SoundFile.new; - soundFile.openRead(path[0]); - audioBuffer = Buffer.read(server, path[0]); - soundFileView.soundfile = soundFile; - soundFileView.read(0, soundFile.numFrames); - }); -}; - -processButton.action_{|but| - var ks = 2*(ControlSpec(2, 100, step:1).map(ksSlider.value)) - 1; - var th = ControlSpec(0, 1).map(thSlider.value); - if(but.value == 1, { - Routine{ - FluidBufNoveltySlice.process( - server, - source:audioBuffer.bufnum, - indices:slicesBuffer.bufnum, - kernelSize:ks, - threshold: th - ); - server.sync; - slicesBuffer.loadToFloatArray(action:{|arr| - slicesArray = arr; - { processButton.value_(0); - addSelections.value(slicesArray) - }.defer; - - }); - }.play; - }); -}; - - - -addSelections = {|array| - var nSegments = min(array.size, soundFileView.selections.size) - 1; - soundFileView.selections.do({|sel, i| soundFileView.selectNone(i)}); - nSegments.do({|i| - soundFileView.setSelectionStart(i, array[i]); - soundFileView.setSelectionSize(i, array[i+1] - array[i]); - soundFileView.setSelectionColor(i, colors[i]); - }); -}; - -win.layout_( - VLayout( - [ - HLayout( - [loadButton, stretch:1], - [soundFileView, stretch:5] - ), stretch:2 - ], - [ - HLayout( - [processButton, stretch:1], - [VLayout( - HLayout(StaticText(win).string_("Kernel ").minWidth_(100), ksSlider), - HLayout(StaticText(win).string_(" Threshold").minWidth_(100), thSlider) - ), stretch:5] - ), stretch:2 - ] - ) -); - -win.front; -) diff --git a/release-packaging/Examples/GUI_examples/SineExtraction.scd b/release-packaging/Examples/GUI_examples/SineExtraction.scd deleted file mode 100755 index 9903797..0000000 --- a/release-packaging/Examples/GUI_examples/SineExtraction.scd +++ /dev/null @@ -1,107 +0,0 @@ -( -var win, soundFileView, freqSscope,loadButton, loopButton; -var thresholdSlider, lenSlider, mixSlider; -var soundFile, buffer; -var synthDef, synth; - -Font.default = Font("Monaco", 16); -buffer = Buffer.new; -win = Window.new("SineExtraction", - Rect(200,200,800,450)).background_(Color.gray); - -soundFileView = SoundFileView.new(win) - .gridOn_(false) - .waveColors_([Color.white]); - -loadButton = Button(win, Rect(0, 0, 100, 100)) - .minHeight_(150) - .states_([["Load", Color.grey, Color.grey(0.8)]]); - -loopButton = Button(win, Rect(0, 0, 100, 100)) - .minHeight_(150) - .states_( - [["Play", Color.grey, Color.grey(0.8)], - ["Stop", Color.grey, Color.grey(0.2)]] - ); - -thresholdSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); -lenSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); -mixSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); -freqSscope = FreqScopeView(win, server:Server.default); -freqSscope.active_(true); - -loadButton.action_{ - FileDialog({ |path| - soundFile = SoundFile.new; - soundFile.openRead(path[0]); - buffer = Buffer.read(Server.default, path[0]); - soundFileView.soundfile = soundFile; - soundFileView.read(0, soundFile.numFrames); - }); -}; - -loopButton.action_{|but| - if(but.value == 1, { - synth = Synth(\sineExtractionDemo, [\buffer, buffer.bufnum]); - mixSlider.action.value(mixSlider); - thresholdSlider.action.value(thresholdSlider); - lenSlider.action.value(lenSlider); - },{ - synth.free; - }); -}; - - -mixSlider.action_{|slider| - synth.set(\bal, ControlSpec(0, 1).map(slider.value)); -}; - - -thresholdSlider.action_{|slider| - synth.set(\threshold, ControlSpec(-144, 0).map(slider.value)); -}; - - -lenSlider.action_{|slider| - synth.set(\minLength, ControlSpec(0, 30).map(slider.value)); -}; - - -synthDef = SynthDef(\sineExtractionDemo, - {|buffer, threshold = 0.9, minLength = 15, bal = 0.5| - var player, fse, mix; - player = PlayBuf.ar(1, buffer, loop:1); - fse = FluidSines.ar(in: player, bandwidth: 76, - detectionThreshold: threshold, minTrackLen: minLength, - windowSize: 2048, - hopSize: 512, fftSize: 8192 - ); - mix =(bal * fse[0]) + ((1 - bal) * fse[1]); - Out.ar(0,Pan2.ar(mix)); - } -).add; - -win.layout_( - VLayout( - [ - HLayout( - [loadButton, stretch:1], - [soundFileView, stretch:5] - ), stretch:2 - ], - [ - HLayout( - [loopButton, stretch:1], - [VLayout( - HLayout(StaticText(win).string_("Threshold ").minWidth_(100), thresholdSlider), - HLayout(StaticText(win).string_("Min Length").minWidth_(100), lenSlider), - HLayout(StaticText(win).string_("Mix").minWidth_(100), mixSlider) - ), stretch:5] - ), stretch:2 - ], - [freqSscope, stretch:2] - ) -); - -win.front; -) diff --git a/release-packaging/Examples/GUI_examples/TransientExtraction.scd b/release-packaging/Examples/GUI_examples/TransientExtraction.scd deleted file mode 100755 index 35b882d..0000000 --- a/release-packaging/Examples/GUI_examples/TransientExtraction.scd +++ /dev/null @@ -1,103 +0,0 @@ -( -var win, soundFileView, freqSscope,loadButton, loopButton; -var fwSlider, bwSlider, mixSlider; -var soundFile, buffer; -var synthDef, synth; - -Font.default = Font("Monaco", 16); -buffer = Buffer.new; -win = Window.new("TransientExtraction", - Rect(200,200,800,450)).background_(Color.gray); - -soundFileView = SoundFileView.new(win) - .gridOn_(false) - .waveColors_([Color.white]); - -loadButton = Button(win, Rect(0, 0, 100, 100)) - .minHeight_(150) - .states_([["Load", Color.grey, Color.grey(0.8)]]); - -loopButton = Button(win, Rect(0, 0, 100, 100)) - .minHeight_(150) - .states_( - [["Play", Color.grey, Color.grey(0.8)], - ["Stop", Color.grey, Color.grey(0.2)]] - ); - -fwSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); -bwSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); -mixSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); -freqSscope = FreqScopeView(win, server:Server.default); -freqSscope.active_(true); - -loadButton.action_{ - FileDialog({ |path| - soundFile = SoundFile.new; - soundFile.openRead(path[0]); - buffer = Buffer.read(Server.default, path[0]); - soundFileView.soundfile = soundFile; - soundFileView.read(0, soundFile.numFrames); - }); -}; - -loopButton.action_{|but| - if(but.value == 1, { - synth = Synth(\transientExtractionDemo, [\buffer, buffer.bufnum]); - mixSlider.action.value(mixSlider); - fwSlider.action.value(fwSlider); - bwSlider.action.value(bwSlider); - },{ - synth.free; - }); -}; - - -mixSlider.action_{|slider| - synth.set(\bal, ControlSpec(0, 1).map(slider.value)); -}; - - -fwSlider.action_{|slider| - synth.set(\fw, ControlSpec(0.0001, 3, \exp).map(slider.value)); -}; - - -bwSlider.action_{|slider| - synth.set(\bw, ControlSpec(0.0001, 3, \exp).map(slider.value)); -}; - - -synthDef = SynthDef(\transientExtractionDemo, - {|buffer, fw = 3, bw = 1, bal = 0.5| - var player, fte, mix; - player = PlayBuf.ar(1, buffer, loop:1); - fte = FluidTransients.ar(in: player, threshFwd:fw, threshBack:bw, clumpLength:256); - mix =(bal * fte[0]) + ((1 - bal) * fte[1]); - Out.ar(0,Pan2.ar(mix)); - } -).add; - -win.layout_( - VLayout( - [ - HLayout( - [loadButton, stretch:1], - [soundFileView, stretch:5] - ), stretch:2 - ], - [ - HLayout( - [loopButton, stretch:1], - [VLayout( - HLayout(StaticText(win).string_("Forward Th ").minWidth_(100), fwSlider), - HLayout(StaticText(win).string_("Backward Th").minWidth_(100), bwSlider), - HLayout(StaticText(win).string_("Mix").minWidth_(100), mixSlider) - ), stretch:5] - ), stretch:2 - ], - [freqSscope, stretch:2] - ) -); - -win.front; -) diff --git a/release-packaging/Examples/GUI_examples/TransientSegmentation.scd b/release-packaging/Examples/GUI_examples/TransientSegmentation.scd deleted file mode 100755 index b7b69f5..0000000 --- a/release-packaging/Examples/GUI_examples/TransientSegmentation.scd +++ /dev/null @@ -1,148 +0,0 @@ -( -var server; -var win, soundFileView,loadButton, processButton; -var fwSlider, bwSlider, debounceSlider; -var soundFile, audioBuffer, slicesBuffer, slicesArray; -var addSelections, playFunc, stopFunc; -var synthDef, synth; - -var playing, currentSelection, colors, prevColor; -var qwerty = "1234567890qwertyuiopasdfghjklzxcvbnm"; - -playing = false; -server = Server.default; -Font.default = Font("Monaco", 16); - -audioBuffer = Buffer.new; -slicesBuffer = Buffer.new; - -colors = Array.fill(64, {Color.rand}); - -synthDef = SynthDef(\transientSegDemo,{|buf, start, end| - Out.ar(0, BufRd.ar(1, buf, Phasor.ar(1, 1, start, end))); -}).add; - -playFunc = {|index| - var dur; - currentSelection = index; - if(playing.not){ - synth = Synth(\transientSegDemo, - [\buf, audioBuffer.bufnum, - \start, slicesArray[index], - \end, slicesArray[index+1] - ]); - playing = true; - }; - soundFileView.setSelectionColor(currentSelection, Color.white); -}; - -stopFunc = {synth.free; playing = false; - soundFileView.setSelectionColor(currentSelection, colors[currentSelection]); - -}; - - -win = Window.new("TransientSegmentation", - Rect(200,200,800,450)).background_(Color.gray); - -win.view.keyDownAction_{|view, char, modifiers, unicode, keycode, key| - var num = qwerty.indexOf(char); - if(num.notNil && slicesArray.notNil){ - playFunc.value(num); - } -}; - -win.view.keyUpAction_{stopFunc.value;}; - - - -soundFileView = SoundFileView.new(win) - .gridOn_(false) - .waveColors_([Color.white]); - -loadButton = Button(win, Rect(0, 0, 100, 100)) - .minHeight_(150) - .states_([["Load", Color.grey, Color.grey(0.8)]]); - -processButton = Button(win, Rect(0, 0, 100, 100)) - .minHeight_(150) - .states_( - [["Process", Color.grey, Color.grey(0.8)], - ["Wait", Color.grey, Color.grey(0.2)]] - ); - -fwSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); -bwSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); -debounceSlider = Slider(win, Rect(0, 0, 100, 10)).value_(0.5); - -loadButton.action_{ - FileDialog({ |path| - soundFile = SoundFile.new; - soundFile.openRead(path[0]); - audioBuffer = Buffer.read(server, path[0]); - soundFileView.soundfile = soundFile; - soundFileView.read(0, soundFile.numFrames); - }); -}; - -processButton.action_{|but| - var fw = ControlSpec(0.0001, 3, \exp).map(fwSlider.value); - var bw = ControlSpec(0.0001, 3, \exp).map(bwSlider.value); - var db = ControlSpec(1, 4410).map(debounceSlider.value); - if(but.value == 1, { - Routine{ - FluidBufTransientSlice.process( - server, - source:audioBuffer.bufnum, - indices:slicesBuffer.bufnum, - threshFwd: fw, - threshBack: bw, - clumpLength:db - ); - server.sync; - slicesBuffer.loadToFloatArray(action:{|arr| - slicesArray = arr; - { processButton.value_(0); - addSelections.value(slicesArray) - }.defer; - - }); - }.play; - }); -}; - - - -addSelections = {|array| - var nSegments = min(array.size, soundFileView.selections.size) - 1; - soundFileView.selections.do({|sel, i| soundFileView.selectNone(i)}); - nSegments.do({|i| - soundFileView.setSelectionStart(i, array[i]); - soundFileView.setSelectionSize(i, array[i+1] - array[i]); - soundFileView.setSelectionColor(i, colors[i]); - }); -}; - -win.layout_( - VLayout( - [ - HLayout( - [loadButton, stretch:1], - [soundFileView, stretch:5] - ), stretch:2 - ], - [ - HLayout( - [processButton, stretch:1], - [VLayout( - HLayout(StaticText(win).string_("Forward Th ").minWidth_(100), fwSlider), - HLayout(StaticText(win).string_("Backward Th").minWidth_(100), bwSlider), - HLayout(StaticText(win).string_("Debounce").minWidth_(100), debounceSlider) - ), stretch:5] - ), stretch:2 - ] - ) -); - -win.front; -) diff --git a/release-packaging/Examples/buffer_compositing/fileiterator-2passes.scd b/release-packaging/Examples/buffer_compositing/fileiterator-2passes.scd deleted file mode 100644 index 502518c..0000000 --- a/release-packaging/Examples/buffer_compositing/fileiterator-2passes.scd +++ /dev/null @@ -1,45 +0,0 @@ -//this patch requests a folder and will iterate through all accepted audiofiles and concatenate them in the destination buffer. It will also yield an array with the numFrame where files start in the new buffer. - -( -var fileNames; -c = []; - -FileDialog.new({|selection| - var total, totaldur = 0, maxchans = 0; - t = Main.elapsedTime; - fileNames = PathName.new(selection[0]) - .entries - .select({|f| - [\wav, \WAV, \mp3,\aif].includes(f.extension.asSymbol);}); - total = fileNames.size(); - fileNames.do({arg fp; - SoundFile.use(fp.asAbsolutePath , { - arg file; - var dur = file.numFrames; - c = c.add(totaldur); - totaldur = totaldur + dur; - maxchans = maxchans.max(file.numChannels); - }); - }); - Routine{ - b = Buffer.alloc(s,totaldur,maxchans); - s.sync; - fileNames.do{|f, i| - f.postln; - ("Loading"+(i+1)+"of"+total).postln; - Buffer.read(s, f.asAbsolutePath,action:{arg tempbuf; FluidBufCompose.process(s,tempbuf,destination:b,destStartFrame:c[i],action:{tempbuf.free});}); - }; - s.sync; - ("loading buffers done in" + (Main.elapsedTime - t).round(0.1) + "seconds.").postln; - }.play; -}, fileMode:2); -) - -b.plot -c.postln -b.play - - -{PlayBuf.ar(1,b.bufnum,startPos:c[15])}.play - -Buffer.freeAll \ No newline at end of file diff --git a/release-packaging/Examples/buffer_compositing/fileiterator.scd b/release-packaging/Examples/buffer_compositing/fileiterator.scd deleted file mode 100644 index 7e0f32a..0000000 --- a/release-packaging/Examples/buffer_compositing/fileiterator.scd +++ /dev/null @@ -1,40 +0,0 @@ -//destination buffer -( -b = Buffer.new(); -c = Array.new(); -) - -//this patch requests a folder and will iterate through all accepted audiofiles and concatenate them in the destination buffer. It will also yield an array with the numFrame where files start in the new buffer. - -( -var tempbuf,dest=0, fileNames; - -FileDialog.new({|selection| - var total; - t = Main.elapsedTime; - fileNames = PathName.new(selection[0]) - .entries - .select({|f| - [\wav, \WAV, \mp3,\aif].includes(f.extension.asSymbol);}); - total = fileNames.size(); - Routine{ - fileNames.do{|f, i| - f.postln; - ("Loading"+(i+1)+"of"+total).postln; - tempbuf = Buffer.read(s,f.asAbsolutePath); - s.sync; - c = c.add(dest); - FluidBufCompose.process(s,tempbuf,destStartFrame:dest,destination:b); - s.sync; - dest = b.numFrames; - }; - ("loading buffers done in" + (Main.elapsedTime - t).round(0.1) + "seconds.").postln; - }.play; -}, fileMode:2); -) - -b.plot -c.postln -b.play - -{PlayBuf.ar(1,b.bufnum,startPos:c[15])}.play diff --git a/release-packaging/Examples/dataset/0-demo-dataset-maker-utilities.scd b/release-packaging/Examples/dataset/0-demo-dataset-maker-utilities.scd deleted file mode 100644 index e636297..0000000 --- a/release-packaging/Examples/dataset/0-demo-dataset-maker-utilities.scd +++ /dev/null @@ -1,169 +0,0 @@ -// define a few processes -( -~ds = FluidDataSet(s);//no name needs to be provided -//define as many buffers as we have parallel voices/threads in the extractor processing (default is 4) -~mfccbuf = 4.collect{Buffer.new}; -~statsbuf = 4.collect{Buffer.new}; -~flatbuf = 4.collect{Buffer.new}; - -// here we instantiate a loader which creates a single large buffer with a dictionary of what was included in it -// ~loader = FluidLoadFolder("/Volumes/machins/projets/newsfeed/sons/smallnum/"); -~loader = FluidLoadFolder(File.realpath(FluidLoadFolder.class.filenameSymbol).dirname +/+ "../AudioFiles"); - -// here we instantiate a further slicing step if needs be, which iterate through all the items of the FluidLoadFolder and slice the slices with the declared function. -~slicer = FluidSliceCorpus({ |src,start,num,dest| - FluidBufOnsetSlice.kr(src, start, num, metric: 9, minSliceLength: 17, indices:dest, threshold:0.7, blocking: 1) -}); - -// here we instantiate a process of description and dataset writing, which will run each slice of the previous slice and write the entry. Note the chain of Done.kr triggers. -~extractor = FluidProcessSlices({|src,start,num,data| - var mfcc, stats, writer, flatten,mfccBuf, statsBuf, flatBuf, identifier, voice; - identifier = data.key; - voice = data.value[\voice]; - mfcc = FluidBufMFCC.kr(src, startFrame:start, numFrames:num, numChans:1, features:~mfccbuf[voice], trig:1, blocking: 1); - stats = FluidBufStats.kr(~mfccbuf[voice], stats:~statsbuf[voice], trig:Done.kr(mfcc), blocking: 1); - flatten = FluidBufFlatten.kr(~statsbuf[voice], destination:~flatbuf[voice], trig:Done.kr(stats), blocking: 1); - writer = FluidDataSetWr.kr(~ds, identifier, nil, ~flatbuf[voice], trig: Done.kr(flatten), blocking: 1) -}); -) - - -////////////////////////////////////////////////////////////////////////// -//loading process - -// just run the loader -( -t = Main.elapsedTime; -~loader.play(s,action:{(Main.elapsedTime - t).postln;"Loaded".postln;}); -) - -//load and play to test if it is that quick - it is! -( -t = Main.elapsedTime; -~loader.play(s,action:{(Main.elapsedTime - t).postln;"Loaded".postln;{var start, stop; PlayBuf.ar(~loader.index[~loader.index.keys.asArray.last.asSymbol][\numchans],~loader.buffer,startPos: ~loader.index[~loader.index.keys.asArray.last.asSymbol][\bounds][0])}.play;}); -) - -//ref to the buffer -~loader.buffer -//size of item -~loader.index.keys.size -//a way to get all keys info sorted by time -~stuff = Array.newFrom(~loader.index.keys).sort.collect{|x|~loader.index[x][\bounds]}.sort{|a,b| a[0]u).postln; - } -} -) - -// or write to file a human readable, sorted version of the database after sorting it by index. -( -a = File(Platform.defaultTempDir ++ "sc-loading.json","w"); -~stuffsorted = Array.newFrom(~loader.index.keys).sort{|a,b| ~loader.index[a][\bounds][0]< ~loader.index[b][\bounds][0]}.do{|k| - v = ~loader.index[k]; - a.write(k.asString ++ "\n"); - v.pairsDo{|l,u,j| - a.write("\t\t\t" ++ (l->u).asString ++ "\n"); - } -}; -a.close; -) - -////////////////////////////////////////////////////////////////////////// -// slicing process - -// just run the slicer -( -t = Main.elapsedTime; -~slicer.play(s,~loader.buffer,~loader.index,action:{(Main.elapsedTime - t).postln;"Slicing done".postln}); -) - -//slice count -~slicer.index.keys.size - -// iterate -( -~slicer.index.pairsDo{ |k,v,i| - k.postln; - v.pairsDo{|l,u,j| - "\t\t\t".post; - (l->u).postln; - } -} -) - -///// write to file in human readable format, in order. -( -a = File(Platform.defaultTempDir ++ "sc-spliting.json","w"); -~stuffsorted = Array.newFrom(~slicer.index.keys).sort{|a,b| ~slicer.index[a][\bounds][0]< ~slicer.index[b][\bounds][0]}.do{|k| - v = ~slicer.index[k]; - a.write(k.asString ++ "\n"); - v.pairsDo{|l,u,j| - a.write("\t\t\t" ++ (l->u).asString ++ "\n"); - } -}; -a.close; -) - -////////////////////////////////////////////////////////////////////////// -// description process - -// just run the descriptor extractor -( -t = Main.elapsedTime; -~extractor.play(s,~loader.buffer,~slicer.index,action:{(Main.elapsedTime - t).postln;"Features done".postln}); -) - -// write the dataset to file with the native JSON -~ds.write(Platform.defaultTempDir ++ "sc-dataset.json") - -// open the file in your default json editor -(Platform.defaultTempDir ++ "sc-dataset.json").openOS - -////////////////////////////////////////////////////////////////////////// -// manipulating and querying the data - -//building a tree -~tree = FluidKDTree(s); -~tree.fit(~ds,{"Fitted".postln;}); - -//retrieve a sound to match -~targetsound = Buffer(s); -~targetname = ~slicer.index.keys.asArray.scramble[0].asSymbol; -#a,b = ~slicer.index[~targetname][\bounds]; -FluidBufCompose.process(s,~loader.buffer,a,(b-a),numChans: 1, destination: ~targetsound,action: {~targetsound.play;}) - -//describe the sound to match -( -{ - var mfcc, stats, flatten; - mfcc = FluidBufMFCC.kr(~targetsound,features:~mfccbuf[0],trig:1); - stats = FluidBufStats.kr(~mfccbuf[0],stats:~statsbuf[0],trig:Done.kr(mfcc)); - flatten = FluidBufFlatten.kr(~statsbuf[0],destination:~flatbuf[0],trig:Done.kr(stats)); - FreeSelfWhenDone.kr(flatten); -}.play; -) - -//find its nearest neighbours -~friends = Array; -~tree.numNeighbours = 5; -~tree.kNearest(~flatbuf[0],{|x| ~friends = x.postln;}) - -// play them in a row -( -Routine{ -5.do{|i| - var dur; - v = ~slicer.index[~friends[i].asSymbol]; - dur = (v[\bounds][1] - v[\bounds][0]) / s.sampleRate; - {BufRd.ar(v[\numchans],~loader.buffer,Line.ar(v[\bounds][0],v[\bounds][1],dur, doneAction: 2))}.play; - ~friends[i].postln; - dur.wait; - }; -}.play; -) diff --git a/release-packaging/Examples/dataset/1-learning examples/10a-weighted-MFCCs-comparison.scd b/release-packaging/Examples/dataset/1-learning examples/10a-weighted-MFCCs-comparison.scd deleted file mode 100644 index c14a7b6..0000000 --- a/release-packaging/Examples/dataset/1-learning examples/10a-weighted-MFCCs-comparison.scd +++ /dev/null @@ -1,235 +0,0 @@ -// define a few processes -( -~ds = FluidDataSet(s); -~dsW = FluidDataSet(s); -~dsL = FluidDataSet(s); -//define as many buffers as we have parallel voices/threads in the extractor processing (default is 4) -~loudbuf = 4.collect{Buffer.new}; -~weightbuf = 4.collect{Buffer.new}; -~mfccbuf = 4.collect{Buffer.new}; -~statsbuf = 4.collect{Buffer.new}; -~flatbuf = 4.collect{Buffer.new}; - -// here we instantiate a loader as per example 0 -~loader = FluidLoadFolder(File.realpath(FluidBufMFCC.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/"); - -// here we instantiate a further slicing step as per example 0 -~slicer = FluidSliceCorpus({ |src,start,num,dest| - FluidBufOnsetSlice.kr(src,start,num,metric: 9, minSliceLength: 17, indices:dest, threshold:0.2,blocking: 1) -}); - -// here we instantiate a process of description and dataset writing, as per example 0 -~extractor = FluidProcessSlices({|src,start,num,data| - var identifier, voice, mfcc, stats, flatten; - identifier = data.key; - voice = data.value[\voice]; - mfcc = FluidBufMFCC.kr(src, startFrame:start, numFrames:num, numChans:1, features:~mfccbuf[voice], padding: 2, trig:1, blocking: 1); - stats = FluidBufStats.kr(~mfccbuf[voice], stats:~statsbuf[voice], numDerivs: 1, trig:Done.kr(mfcc), blocking: 1); - flatten = FluidBufFlatten.kr(~statsbuf[voice], destination:~flatbuf[voice], trig:Done.kr(stats), blocking: 1); - FluidDataSetWr.kr(~ds, identifier, nil, ~flatbuf[voice], Done.kr(flatten), blocking: 1); -}); - -// here we make another processor, this time with doing an amplitude weighing -~extractorW = FluidProcessSlices({|src,start,num,data| - var identifier, voice, loud, weights, mfcc, stats, flatten; - identifier = data.key; - voice = data.value[\voice]; - mfcc = FluidBufMFCC.kr(src, startFrame:start, numFrames:num, numChans:1, features:~mfccbuf[voice], padding: 2, trig:1, blocking: 1); - loud = FluidBufLoudness.kr(src, startFrame:start, numFrames:num, numChans:1, features:~loudbuf[voice], padding: 2, trig:Done.kr(mfcc), blocking: 1); - weights = FluidBufScale.kr(~loudbuf[voice], numChans: 1, destination: ~weightbuf[voice], inputLow: -70, inputHigh: 0, trig: Done.kr(loud), blocking: 1); - stats = FluidBufStats.kr(~mfccbuf[voice], stats:~statsbuf[voice], numDerivs: 1, weights: ~weightbuf[voice], trig:Done.kr(weights), blocking: 1); - flatten = FluidBufFlatten.kr(~statsbuf[voice], destination:~flatbuf[voice], trig:Done.kr(stats), blocking: 1); - FluidDataSetWr.kr(~dsW, identifier, nil, ~flatbuf[voice], Done.kr(flatten), blocking: 1); -}); - -// and here we make a little processor for loudness if we want to poke at it -~extractorL = FluidProcessSlices({|src,start,num,data| - var identifier, voice, loud, stats, flatten; - identifier = data.key; - voice = data.value[\voice]; - loud = FluidBufLoudness.kr(src, startFrame:start, numFrames:num, numChans:1, features:~mfccbuf[voice], trig:1, padding: 2, blocking: 1); - stats = FluidBufStats.kr(~mfccbuf[voice], stats:~statsbuf[voice], numDerivs: 1, trig:Done.kr(loud), blocking: 1); - flatten = FluidBufFlatten.kr(~statsbuf[voice], destination:~flatbuf[voice], trig:Done.kr(stats), blocking: 1); - FluidDataSetWr.kr(~dsL, identifier, nil, ~flatbuf[voice], Done.kr(flatten), blocking: 1); -}); -) - -////////////////////////////////////////////////////////////////////////// -//loading process - -//load and play to test if it is that quick - it is! -( -t = Main.elapsedTime; -~loader.play(s,action:{(Main.elapsedTime - t).postln;"Loaded".postln;{var start, stop; PlayBuf.ar(~loader.index[~loader.index.keys.asArray.last.asSymbol][\numchans],~loader.buffer,startPos: ~loader.index[~loader.index.keys.asArray.last.asSymbol][\bounds][0])}.play;}); -) - -////////////////////////////////////////////////////////////////////////// -// slicing process - -// run the slicer -( -t = Main.elapsedTime; -~slicer.play(s,~loader.buffer,~loader.index,action:{(Main.elapsedTime - t).postln;"Slicing done".postln}); -) - -//slice count -~slicer.index.keys.size - -////////////////////////////////////////////////////////////////////////// -// description process - -// run both descriptor extractor - here they are separate to the batch process duration -( -t = Main.elapsedTime; -~extractor.play(s,~loader.buffer,~slicer.index,action:{(Main.elapsedTime - t).postln;"Features done".postln}); -) - -( -t = Main.elapsedTime; -~extractorW.play(s,~loader.buffer,~slicer.index,action:{(Main.elapsedTime - t).postln;"Features done".postln}); -) - -////////////////////////////////////////////////////////////////////////// -// manipulating and querying the data - -// extracting whatever stats we want. In this case, mean/std/lowest/highest, and the same on the first derivative - excluding MFCC0 as it is mostly volume, keeping MFCC1-12 - -( -~curated = FluidDataSet(s); -~curatedW = FluidDataSet(s); -~curator = FluidDataSetQuery.new(s); -) - -( -~curator.addRange(1,12,{ - ~curator.addRange(14,12,{ - ~curator.addRange(53,12,{ - ~curator.addRange(79,12,{ - ~curator.addRange(92,12,{ - ~curator.addRange(105,12,{ - ~curator.addRange(144,12,{ - ~curator.addRange(170,12); - }); - }); - }); - }); - }); - }); -}); -) -~curator.transform(~ds,~curated) -~curator.transform(~dsW,~curatedW) - -//check the dimension count -~ds.print -~dsW.print -~curated.print -~curatedW.print - -//building a tree for each dataset -~tree = FluidKDTree(s,5); -~tree.fit(~ds,{"Fitted".postln;}); -~treeW = FluidKDTree(s,5); -~treeW.fit(~dsW,{"Fitted".postln;}); -~treeC = FluidKDTree(s,5); -~treeC.fit(~curated,{"Fitted".postln;}); -~treeCW = FluidKDTree(s,5); -~treeCW.fit(~curatedW,{"Fitted".postln;}); - -//select a sound to match -// EITHER retrieve a random slice -~targetsound = Buffer(s); -~targetname = ~slicer.index.keys.asArray.scramble.last.asSymbol; -#a,b = ~slicer.index[~targetname][\bounds]; -FluidBufCompose.process(s,~loader.buffer,a,(b-a),numChans: 1, destination: ~targetsound,action: {~targetsound.play;}) - -// OR just load a file in that buffer -~targetsound = Buffer.read(s,Platform.resourceDir +/+ "sounds/a11wlk01.wav"); - -//describe the sound to match -( -{ - var loud, weights, mfcc, stats, flatten, stats2, written; - mfcc = FluidBufMFCC.kr(~targetsound,features:~mfccbuf[0],padding: 2, trig:1); - stats = FluidBufStats.kr(~mfccbuf[0],stats:~statsbuf[0], numDerivs: 1,trig:Done.kr(mfcc)); - flatten = FluidBufFlatten.kr(~statsbuf[0],destination:~flatbuf[0],trig:Done.kr(stats)); - loud = FluidBufLoudness.kr(~targetsound,features:~loudbuf[0],padding: 2,trig:Done.kr(flatten),blocking: 1); - weights = FluidBufScale.kr(~loudbuf[0],numChans: 1,destination: ~weightbuf[0],inputLow: -70,inputHigh: 0,trig: Done.kr(loud),blocking: 1); - stats2 = FluidBufStats.kr(~mfccbuf[0],stats:~statsbuf[0], numDerivs: 1, weights: ~weightbuf[0], trig:Done.kr(weights),blocking: 1); - written = FluidBufFlatten.kr(~statsbuf[0],destination:~flatbuf[1],trig:Done.kr(stats2)); - FreeSelf.kr(Done.kr(written)); -}.play; -) - -//go language side to extract the right dimensions -~flatbuf[0].getn(0,182,{|x|~curatedBuf = Buffer.loadCollection(s, x[[0,1,4,6,7,8,11,13].collect{|x|var y=x*13+1;(y..(y+11))}.flat].postln)}) -~flatbuf[1].getn(0,182,{|x|~curatedWBuf = Buffer.loadCollection(s, x[[0,1,4,6,7,8,11,13].collect{|x|var y=x*13+1;(y..(y+11))}.flat].postln)}) - -//find its nearest neighbours -~tree.kNearest(~flatbuf[0],{|x| ~friends = x.postln;}) -~treeW.kNearest(~flatbuf[1],{|x| ~friendsW = x.postln;}) -~treeC.kNearest(~curatedBuf,{|x| ~friendsC = x.postln;}) -~treeCW.kNearest(~curatedWBuf,{|x| ~friendsCW = x.postln;}) - - -// play them in a row -( -Routine{ -5.do{|i| - var dur; - v = ~slicer.index[~friends[i].asSymbol]; - dur = (v[\bounds][1] - v[\bounds][0]) / s.sampleRate; - {BufRd.ar(v[\numchans],~loader.buffer,Line.ar(v[\bounds][0],v[\bounds][1],dur, doneAction: 2))}.play; - ~friends[i].postln; - dur.wait; - }; -}.play; -) - -( -Routine{ -5.do{|i| - var dur; - v = ~slicer.index[~friendsW[i].asSymbol]; - dur = (v[\bounds][1] - v[\bounds][0]) / s.sampleRate; - {BufRd.ar(v[\numchans],~loader.buffer,Line.ar(v[\bounds][0],v[\bounds][1],dur, doneAction: 2))}.play; - ~friendsW[i].postln; - dur.wait; - }; -}.play; -) - -( -Routine{ -5.do{|i| - var dur; - v = ~slicer.index[~friendsC[i].asSymbol]; - dur = (v[\bounds][1] - v[\bounds][0]) / s.sampleRate; - {BufRd.ar(v[\numchans],~loader.buffer,Line.ar(v[\bounds][0],v[\bounds][1],dur, doneAction: 2))}.play; - ~friendsC[i].postln; - dur.wait; - }; -}.play; -) - -( -Routine{ -5.do{|i| - var dur; - v = ~slicer.index[~friendsCW[i].asSymbol]; - dur = (v[\bounds][1] - v[\bounds][0]) / s.sampleRate; - {BufRd.ar(v[\numchans],~loader.buffer,Line.ar(v[\bounds][0],v[\bounds][1],dur, doneAction: 2))}.play; - ~friendsCW[i].postln; - dur.wait; - }; -}.play; -) - -//explore dynamic range (changing the weigting's value of 0 in lines 39 and 157 will change the various weights given to quieter parts of the signal -( -t = Main.elapsedTime; -~extractorL.play(s,~loader.buffer,~slicer.index,action:{(Main.elapsedTime - t).postln;"Features done".postln}); -) -~norm = FluidNormalize.new(s) -~norm.fit(~dsL) -~norm.dump({|x|x["data_min"][[8,12]].postln;x["data_max"][[8,12]].postln;})//here we extract the stats from the dataset by retrieving the stored maxima of the fitting process in FluidNormalize diff --git a/release-packaging/Examples/dataset/1-learning examples/11-compositing-datasets.scd b/release-packaging/Examples/dataset/1-learning examples/11-compositing-datasets.scd deleted file mode 100644 index cdc613c..0000000 --- a/release-packaging/Examples/dataset/1-learning examples/11-compositing-datasets.scd +++ /dev/null @@ -1,355 +0,0 @@ -// here we will define a process that creates and populates a series of parallel dataset, one of each 'feature-space' that we can then eventually manipulate more easily than individual dimensions. - -// define a few datasets -( -~pitchDS = FluidDataSet(s); -~loudDS = FluidDataSet(s); -~mfccDS = FluidDataSet(s); -~durDS = FluidDataSet(s); - -//define as many buffers as we have parallel voices/threads in the extractor processing (default is 4) -~pitchbuf = 4.collect{Buffer.new}; -~statsPitchbuf = 4.collect{Buffer.new}; -~weightPitchbuf = 4.collect{Buffer.new}; -~flatPitchbuf = 4.collect{Buffer.new}; -~loudbuf = 4.collect{Buffer.new}; -~statsLoudbuf = 4.collect{Buffer.new}; -~flatLoudbuf = 4.collect{Buffer.new}; -~weightMFCCbuf = 4.collect{Buffer.new}; -~mfccbuf = 4.collect{Buffer.new}; -~statsMFCCbuf = 4.collect{Buffer.new}; -~flatMFCCbuf = 4.collect{Buffer.new}; - -// here we instantiate a loader as per example 0 -~loader = FluidLoadFolder(File.realpath(FluidBufPitch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/"); - -// here we instantiate a further slicing step as per example 0 -~slicer = FluidSliceCorpus({ |src,start,num,dest| - FluidBufOnsetSlice.kr(src ,start, num, indices:dest, metric: 9, threshold:0.2, minSliceLength: 17, blocking: 1) -}); - -// here we make the full processor building our 3 source datasets -~extractor = FluidProcessSlices({|src,start,num,data| - var identifier, voice, pitch, pitchweights, pitchstats, pitchflat, loud, statsLoud, flattenLoud, mfcc, mfccweights, mfccstats, mfccflat, writePitch, writeLoud; - identifier = data.key; - voice = data.value[\voice]; - // the pitch computation is independant so it starts right away - pitch = FluidBufPitch.kr(src, startFrame:start, numFrames:num, numChans:1, features:~pitchbuf[voice], unit: 1, trig:1, blocking: 1); - pitchweights = FluidBufThresh.kr(~pitchbuf[voice], numChans: 1, startChan: 1, destination: ~weightPitchbuf[voice], threshold: 0.7, trig:Done.kr(pitch), blocking: 1);//pull down low conf - pitchstats = FluidBufStats.kr(~pitchbuf[voice], stats:~statsPitchbuf[voice], numDerivs: 1, weights: ~weightPitchbuf[voice], outliersCutoff: 1.5, trig:Done.kr(pitchweights), blocking: 1); - pitchflat = FluidBufFlatten.kr(~statsPitchbuf[voice],destination:~flatPitchbuf[voice],trig:Done.kr(pitchstats),blocking: 1); - writePitch = FluidDataSetWr.kr(~pitchDS,identifier, nil, ~flatPitchbuf[voice], Done.kr(pitchflat),blocking: 1); - // the mfcc need loudness to weigh, so let's start with that - loud = FluidBufLoudness.kr(src,startFrame:start, numFrames:num, numChans:1, features:~loudbuf[voice], trig:Done.kr(writePitch), blocking: 1);//here trig was 1 - //we can now flatten and write Loudness in its own trigger tree - statsLoud = FluidBufStats.kr(~loudbuf[voice], stats:~statsLoudbuf[voice], numDerivs: 1, trig:Done.kr(loud), blocking: 1); - flattenLoud = FluidBufFlatten.kr(~statsLoudbuf[voice],destination:~flatLoudbuf[voice],trig:Done.kr(statsLoud),blocking: 1); - writeLoud = FluidDataSetWr.kr(~loudDS,identifier, nil, ~flatLoudbuf[voice], Done.kr(flattenLoud),blocking: 1); - //we can resume from the loud computation trigger - mfcc = FluidBufMFCC.kr(src,startFrame:start,numFrames:num,numChans:1,features:~mfccbuf[voice],trig:Done.kr(writeLoud),blocking: 1);//here trig was loud - mfccweights = FluidBufScale.kr(~loudbuf[voice],numChans: 1,destination: ~weightMFCCbuf[voice],inputLow: -70,inputHigh: 0, trig: Done.kr(mfcc), blocking: 1); - mfccstats = FluidBufStats.kr(~mfccbuf[voice], stats:~statsMFCCbuf[voice], startChan: 1, numDerivs: 1, weights: ~weightMFCCbuf[voice], trig:Done.kr(mfccweights), blocking: 1);//remove mfcc0 and weigh by loudness instead - mfccflat = FluidBufFlatten.kr(~statsMFCCbuf[voice],destination:~flatMFCCbuf[voice],trig:Done.kr(mfccstats),blocking: 1); - FluidDataSetWr.kr(~mfccDS,identifier, nil, ~flatMFCCbuf[voice], Done.kr(mfccflat),blocking: 1); -}); - -) -////////////////////////////////////////////////////////////////////////// -//loading process - -//load and play to test if it is that quick - it is! -( -t = Main.elapsedTime; -~loader.play(s,action:{(Main.elapsedTime - t).postln;"Loaded".postln;{var start, stop; PlayBuf.ar(~loader.index[~loader.index.keys.asArray.last.asSymbol][\numchans],~loader.buffer,startPos: ~loader.index[~loader.index.keys.asArray.last.asSymbol][\bounds][0])}.play;}); -) - -////////////////////////////////////////////////////////////////////////// -// slicing process - -// run the slicer -( -t = Main.elapsedTime; -~slicer.play(s,~loader.buffer,~loader.index,action:{(Main.elapsedTime - t).postln;"Slicing done".postln}); -) -//slice count -~slicer.index.keys.size - -////////////////////////////////////////////////////////////////////////// -// description process - -// run the descriptor extractor (errors will be given, this is normal: the pitch conditions are quite exacting and therefore many slices are not valid) -( -t = Main.elapsedTime; -~extractor.play(s,~loader.buffer,~slicer.index,action:{(Main.elapsedTime - t).postln;"Features done".postln}); -) - -// make a dataset of durations for querying that too (it could have been made in the process loop, but hey, we have dictionaries we can manipulate too!) -( -~dict = Dictionary.new; -~temp = ~slicer.index.collect{ |k| [k[\bounds][1] - k[\bounds][0]]}; -~dict.add(\data -> ~temp); -~dict.add(\cols -> 1); -~durDS.load(~dict) -) - -////////////////////////////////////////////////////////////////////////// -// manipulating and querying the data - -~pitchDS.print; -~loudDS.print; -~mfccDS.print; -~durDS.print; - -/////////////////////////////////////////////////////// -//reduce the MFCC timbral space stats (many potential ways to explore here... - 2 are provided to compare, with and without the derivatives before running a dimension reduction) -~tempDS = FluidDataSet(s); - -~query = FluidDataSetQuery(s); -~query.addRange(0,24);//add only means and stddev of the 12 coeffs... -~query.addRange((7*12),24);// and the same stats of the first derivative (moving 7 stats x 12 mfccs to the right) -~query.transform(~mfccDS, ~tempDS); - -//check that you end up with the expected 48 dimensions -~tempDS.print; - -// standardizing before the PCA, as argued here: -// https://scikit-learn.org/stable/auto_examples/preprocessing/plot_scaling_importance.html -~stan = FluidStandardize(s); -~stanDS = FluidDataSet(s); -~stan.fitTransform(~tempDS,~stanDS) - -//shrinking A: using 2 stats on the values, and 2 stats on the redivative (12 x 2 x 2 = 48 dim) -~pca = FluidPCA(s,4);//shrink to 4 dimensions -~timbreDSd = FluidDataSet(s); -~pca.fitTransform(~stanDS,~timbreDSd,{|x|x.postln;})//accuracy - -//shrinking B: using only the 2 stats on the values -~query.clear; -~query.addRange(0,24);//add only means and stddev of the 12 coeffs... -~query.transform(~stanDS, ~tempDS);//retrieve the values from the already standardized dataset - -//check you have the expected 24 dimensions -~tempDS.print; - -//keep its own PCA so we can keep the various states for later transforms -~pca2 = FluidPCA(s,4);//shrink to 4 dimensions -~timbreDS = FluidDataSet(s); -~pca2.fitTransform(~tempDS,~timbreDS,{|x|x.postln;})//accuracy - -// comparing NN for fun -~targetDSd = Buffer(s) -~targetDS = Buffer(s) -~tree = FluidKDTree(s,5) - -// you can run this a few times to have fun -( -~target = ~slicer.index.keys.asArray.scramble.[0].asSymbol; -~timbreDSd.getPoint(~target, ~targetDSd); -~timbreDS.getPoint(~target, ~targetDS); -) - -~tree.fit(~timbreDSd,{~tree.kNearest(~targetDSd,{|x|~nearestDSd = x.postln;})}) -~tree.fit(~timbreDS,{~tree.kNearest(~targetDS,{|x|~nearestDS = x.postln;})}) - -// play them in a row -( -Routine{ -5.do{|i| - var dur; - v = ~slicer.index[~nearestDSd[i].asSymbol]; - dur = (v[\bounds][1] - v[\bounds][0]) / s.sampleRate; - {BufRd.ar(v[\numchans],~loader.buffer,Line.ar(v[\bounds][0],v[\bounds][1],dur, doneAction: 2))}.play; - ~nearestDSd[i].postln; - dur.wait; - }; -}.play; -) - -( -Routine{ -5.do{|i| - var dur; - v = ~slicer.index[~nearestDS[i].asSymbol]; - dur = (v[\bounds][1] - v[\bounds][0]) / s.sampleRate; - {BufRd.ar(v[\numchans],~loader.buffer,Line.ar(v[\bounds][0],v[\bounds][1],dur, doneAction: 2))}.play; - ~nearestDS[i].postln; - dur.wait; - }; -}.play; -) - -/////////////////////////////////////////////////////// -// compositing queries - defining a target and analysing it - -~globalDS = FluidDataSet(s); - -// define a source -~targetsound = Buffer.read(s,File.realpath(FluidBufPitch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-ASWINE-ScratchySynth-M.wav",42250,44100); -~targetsound.play - -// analyse it as above, using voice 0 in the arrays of buffer to store the info -( -{ - var identifier, voice, pitch, pitchweights, pitchstats, pitchflat, loud, statsLoud, flattenLoud, mfcc, mfccweights, mfccstats, mfccflat, writePitch, writeLoud; - pitch = FluidBufPitch.kr(~targetsound, numChans:1, features:~pitchbuf[0], unit: 1, trig:1, blocking: 1); - pitchweights = FluidBufThresh.kr(~pitchbuf[0], numChans: 1, startChan: 1, destination: ~weightPitchbuf[0], threshold: 0.7, trig:Done.kr(pitch), blocking: 1); - pitchstats = FluidBufStats.kr(~pitchbuf[0], stats:~statsPitchbuf[0], numDerivs: 1, weights: ~weightPitchbuf[0], outliersCutoff: 1.5, trig:Done.kr(pitchweights), blocking: 1); - pitchflat = FluidBufFlatten.kr(~statsPitchbuf[0],destination:~flatPitchbuf[0],trig:Done.kr(pitchstats),blocking: 1); - loud = FluidBufLoudness.kr(~targetsound, numChans:1, features:~loudbuf[0], trig:Done.kr(pitchflat), blocking: 1); - statsLoud = FluidBufStats.kr(~loudbuf[0], stats:~statsLoudbuf[0], numDerivs: 1, trig:Done.kr(loud), blocking: 1); - flattenLoud = FluidBufFlatten.kr(~statsLoudbuf[0],destination:~flatLoudbuf[0],trig:Done.kr(statsLoud),blocking: 1); - mfcc = FluidBufMFCC.kr(~targetsound,numChans:1,features:~mfccbuf[0],trig:Done.kr(flattenLoud),blocking: 1); - mfccweights = FluidBufScale.kr(~loudbuf[0],numChans: 1,destination: ~weightMFCCbuf[0],inputLow: -70,inputHigh: 0, trig: Done.kr(mfcc), blocking: 1); - mfccstats = FluidBufStats.kr(~mfccbuf[0], stats:~statsMFCCbuf[0], startChan: 1, numDerivs: 1, weights: ~weightMFCCbuf[0], trig:Done.kr(mfccweights), blocking: 1); - mfccflat = FluidBufFlatten.kr(~statsMFCCbuf[0],destination:~flatMFCCbuf[0],trig:Done.kr(mfccstats),blocking: 1); - FreeSelf.kr(Done.kr(mfccflat)); -}.play; -) - -// a first query - length and pitch -~query.clear -~query.filter(0,"<",44100+22050)//column0 a little smaller than our source -~query.and(0,">", 44100-22050)//also as far as a little larger than the source -~query.transformJoin(~durDS, ~pitchDS, ~tempDS); //this passes to ~tempDS only the points that have the same label than those in ~durDS that satisfy the condition. No column were added so nothing from ~durDS is copied - -// print to see how many slices (rows) we have -~tempDS.print - -// further conditions to assemble the query -~query.clear -~query.filter(11,">",0.7)//column11 (median of pitch confidence) larger than 0.7 -~query.addRange(0,4) //copy only mean and stddev of pitch and confidence -~query.transform(~tempDS, ~globalDS); // pass it to the final search - -// print to see that we have less items, with only their pitch -~globalDS.print - -// compare knearest on both globalDS and tempDS -// assemble search buffer -~targetPitch = Buffer(s) -FluidBufCompose.process(s, ~flatPitchbuf[0],numFrames: 4,destination: ~targetPitch) - -// feed the trees -~tree.fit(~pitchDS,{~tree.kNearest(~flatPitchbuf[0],{|x|~nearestA = x.postln;})}) //all the points with all the stats -~tree.fit(~globalDS,{~tree.kNearest(~targetPitch,{|x|~nearestB = x.postln;})}) //just the points with the right lenght conditions, with the curated stats - -// play them in a row -( -Routine{ -5.do{|i| - var dur; - v = ~slicer.index[~nearestA[i].asSymbol]; - dur = (v[\bounds][1] - v[\bounds][0]) / s.sampleRate; - {BufRd.ar(v[\numchans],~loader.buffer,Line.ar(v[\bounds][0],v[\bounds][1],dur, doneAction: 2))}.play; - ~nearestA[i].postln; - dur.wait; - }; -}.play; -) - -// with our duration limits, strange results appear eventually -( -Routine{ -5.do{|i| - var dur; - v = ~slicer.index[~nearestB[i].asSymbol]; - dur = (v[\bounds][1] - v[\bounds][0]) / s.sampleRate; - {BufRd.ar(v[\numchans],~loader.buffer,Line.ar(v[\bounds][0],v[\bounds][1],dur, doneAction: 2))}.play; - ~nearestB[i].postln; - dur.wait; - }; -}.play; -) - -/////////////////////////////////////////////////////// -// compositing queries to weigh - defining a target and analysing it - -// make sure to define and describe the source above (lines 178 to 201) - -// let's make normalised versions of the 3 datasets, keeping the normalisers separate to query later -~loudDSn = FluidDataSet(s); -~pitchDSn = FluidDataSet(s); -~timbreDSn = FluidDataSet(s); - -~normL = FluidNormalize(s) -~normP = FluidNormalize(s) -~normT = FluidNormalize(s) - -~normL.fitTransform(~loudDS, ~loudDSn); -~normP.fitTransform(~pitchDS, ~pitchDSn); -~normT.fitTransform(~timbreDSd, ~timbreDSn); - -// let's assemble these datasets -~query.clear -~query.addRange(0,4) -~query.transformJoin(~pitchDSn,~timbreDSn, ~tempDS) //appends 4 dims of pitch to 4 dims of timbre -~query.transformJoin(~loudDSn, ~tempDS, ~globalDS) // appends 4 dims of loud to the 8 dims above - -~globalDS.print//12 dim: 4 timbre, 4 pitch, 4 loud, all normalised between 0 and 1 -~globalDS.write(Platform.defaultTempDir ++ "test12dims.json") // write to file to look at the values -// open the file in your default json editor -(Platform.defaultTempDir ++ "test12dims.json").openOS - -// let's assemble the query -// first let's normalise our target descriptors -( -~targetPitch = Buffer(s); -~targetLoud = Buffer(s); -~targetMFCC = Buffer(s); -~targetMFCCs = Buffer(s); -~targetMFCCsp = Buffer(s); -~targetTimbre = Buffer(s); -~targetAll= Buffer(s); -) - -~normL.transformPoint(~flatLoudbuf[0], ~targetLoud) //normalise the loudness (all dims) -~normP.transformPoint(~flatPitchbuf[0], ~targetPitch) //normalise the pitch (all dims) -FluidBufCompose.process(s,~flatMFCCbuf[0],numFrames: 24,destination: ~targetMFCC) // copy the process of dimension reduction above -FluidBufCompose.process(s,~flatMFCCbuf[0],startFrame: (7*12), numFrames: 24, destination: ~targetMFCC,destStartFrame: 24) //keeping 48 dims -~stan.transformPoint(~targetMFCC,~targetMFCCs) //standardize with the same coeffs -~pca.transformPoint(~targetMFCCs, ~targetMFCCsp) //then down to 4 -~normT.transformPoint(~targetMFCCsp, ~targetTimbre) //then normalised -FluidBufCompose.process(s, ~targetTimbre,destination: ~targetAll) // assembling the single query -FluidBufCompose.process(s, ~targetPitch, numFrames: 4, destination: ~targetAll, destStartFrame: 4) // copying the 4 stats of pitch we care about -FluidBufCompose.process(s, ~targetLoud, numFrames: 4, destination: ~targetAll, destStartFrame: 8) // same for loudness -//check the sanity -~targetAll.query - -// now let's see which is nearest that point -~tree.fit(~globalDS,{~tree.kNearest(~targetAll,{|x|~nearest = x.postln;})}) //just the points with the right lenght conditions, with the curated stats - -// play them in a row -( -Routine{ -5.do{|i| - var dur; - v = ~slicer.index[~nearest[i].asSymbol]; - dur = (v[\bounds][1] - v[\bounds][0]) / s.sampleRate; - {BufRd.ar(v[\numchans],~loader.buffer,Line.ar(v[\bounds][0],v[\bounds][1],dur, doneAction: 2))}.play; - ~nearest[i].postln; - dur.wait; - }; -}.play; -) - -// to change the relative weight of each dataset, let's change the normalisation range. Larger ranges will mean larger distance, and therefore less importance for that parameter. -// for instance to downplay pitch, let's make it larger by a factor of 10 around the center of 0.5 -~normP.max = 5.5 -~normP.min = -4.5 -~normP.fitTransform(~pitchDS, ~pitchDSn); -// here we can re-run just the part that composites the pitch -~normP.transformPoint(~flatPitchbuf[0], ~targetPitch) //normalise the pitch (all dims) -FluidBufCompose.process(s, ~targetPitch, numFrames: 4, destination: ~targetAll, destStartFrame: 4) // copying the 4 stats of pitch we care about - -//see that the middle 4 values are much larger in range -~targetAll.getn(0,12,{|x|x.postln;}) - -// let's re-assemble these datasets -~query.transformJoin(~pitchDSn,~timbreDSn, ~tempDS) //appends 4 dims of pitch to 4 dims of timbre -~query.transformJoin(~loudDSn, ~tempDS, ~globalDS) // appends 4 dims of loud to the 8 dims above - -// now let's see which is nearest that point -~tree.fit(~globalDS,{~tree.kNearest(~targetAll,{|x|~nearest = x.postln;})}) //just the points with the right lenght conditions, with the curated stats - -/////////////////////////////////////////////// -// todo: segment then query musaik diff --git a/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd b/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd deleted file mode 100644 index e1dd3b0..0000000 --- a/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd +++ /dev/null @@ -1,230 +0,0 @@ -// load a source folder -~loader = FluidLoadFolder(File.realpath(FluidBufMFCC.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/"); -~loader.play; - -//slightly oversegment with novelty -//segments should still make sense but might cut a few elements in 2 or 3 -~slicer = FluidSliceCorpus({ |src,start,num,dest| FluidBufNoveltySlice.kr(src,start,num,indices:dest, feature: 1, kernelSize: 29, threshold: 0.1, filterSize: 5, hopSize: 128, blocking: 1)}); -~slicer.play(s, ~loader.buffer,~loader.index); - -//test the segmentation by looping them -( -~originalindices = Array.newFrom(~slicer.index.keys).sort{|a,b| ~slicer.index[a][\bounds][0]< ~slicer.index[b][\bounds][0]}.collect{|x|~slicer.index[x][\bounds]}; -d = {arg start=0, end = 44100; - BufRd.ar(1, ~loader.buffer, Phasor.ar(0,1,start,end,start),0,1); -}.play; - -w = Window.new(bounds:Rect(100,100,400,60)).front; -b = ControlSpec(0, ~originalindices.size - 1, \linear, 1); // min, max, mapping, step -c = StaticText(w, Rect(340, 20, 50, 20)).align_(\center); -a = Slider(w, Rect(10, 20, 330, 20)) -.action_({var val = b.map(a.value).asInteger; - c.string_(val.asString); - d.set(\start,~originalindices[val][0], \end, ~originalindices[val][1]); -}); -) - -//analyse each segment with 20 MFCCs in a dataset and spectralshapes in another one -( -~featuresbuf = 4.collect{Buffer.new}; -~statsbuf = 4.collect{Buffer.new}; -~flatbuf = 4.collect{Buffer.new}; -~slicesMFCC = FluidDataSet(s); -~slicesShapes = FluidDataSet(s); -~extractor = FluidProcessSlices({|src,start,num,data| - var features, stats, writer, flatten,mfccBuf, statsBuf, flatBuf, identifier, voice; - identifier = data.key; - voice = data.value[\voice]; - features = FluidBufMFCC.kr(src,startFrame:start,numFrames:num,numChans:1, numCoeffs: 20, features:~featuresbuf[voice],trig:1,blocking: 1); - stats = FluidBufStats.kr(~featuresbuf[voice],stats:~statsbuf[voice],trig:Done.kr(features),blocking: 1); - flatten = FluidBufFlatten.kr(~statsbuf[voice],destination:~flatbuf[voice],trig:Done.kr(stats),blocking: 1); - writer = FluidDataSetWr.kr(~slicesMFCC,identifier, nil, ~flatbuf[voice], Done.kr(flatten),blocking: 1); - features = FluidBufSpectralShape.kr(src,startFrame:start,numFrames:num,numChans:1, features:~featuresbuf[voice],trig:Done.kr(writer),blocking: 1); - stats = FluidBufStats.kr(~featuresbuf[voice],stats:~statsbuf[voice],trig:Done.kr(features),blocking: 1); - flatten = FluidBufFlatten.kr(~statsbuf[voice],destination:~flatbuf[voice],trig:Done.kr(stats),blocking: 1); - writer = FluidDataSetWr.kr(~slicesShapes,identifier, nil, ~flatbuf[voice], Done.kr(flatten),blocking: 1); -}); -) - -( -t = Main.elapsedTime; -~extractor.play(s,~loader.buffer, ~slicer.index, action:{(Main.elapsedTime - t).postln;"Analysis done".postln}); -) - -~originalindices.size -~slicesMFCC.print -~slicesShapes.print - -//run a window over consecutive segments, forcing them in 2 classes, and merging the consecutive segments of similar class -//we overlap the analysis with the last (original) slice to check for continuity -( -~winSize = 4;//the number of consecutive items to split in 2 classes; -~curated = FluidDataSet(s); -~query = FluidDataSetQuery(s); -~stan = FluidStandardize(s); -~kmeans = FluidKMeans(s,2,1000); -~windowDS = FluidDataSet(s); -~windowLS = FluidLabelSet(s); -) - -//curate stats (MFCCs) -~query.clear -~query.addRange((0*20)+1,10); -~query.transform(~slicesMFCC,~curated); - -//OR -//curate stats (moments) -~query.clear -~query.addRange(0,3); -~query.transform(~slicesShapes,~curated); - -//OR -//curate both -~query.clear -~query.addColumn(0);//add col 0 (mean of mfcc0 as 'loudness') -~query.transform(~slicesMFCC,~curated);//mfcc0 as loudness -~query.clear; -~query.addRange(0,3);//add some spectral moments -~query.transformJoin(~slicesShapes, ~curated, ~curated);//join in centroids - -//optionally standardize in place -~stan.fitTransform(~curated, ~curated); - -~curated.print - -//retrieve the dataset as dictionary -~curated.dump{|x|~sliceDict = x;}; - -~originalslicesarray = ~originalindices.flop[0] ++ ~loader.buffer.numFrames -~orginalkeys = Array.newFrom(~slicer.index.keys).sort{|a,b| ~slicer.index[a][\bounds][0]< ~slicer.index[b][\bounds][0]} - -//the windowed function, recursive to deal with sync dependencies -( -~windowedFunct = {arg head, winSize, overlap; - var nbass = [], assignments = [], tempDict = (); - //check the size of everything to not overrun - winSize = (~originalslicesarray.size - head).min(winSize); - //copy the items to a subdataset from hear - winSize.do{|i| - tempDict.put((i.asString), ~sliceDict["data"][(~orginalkeys[(i+head)]).asString]);//here one could curate which stats to take - // "whichslices:%\n".postf(i+head); - }; - ~windowDS.load(Dictionary.newFrom([\cols, ~sliceDict["cols"].asInteger, \data, tempDict]), action: { - // "% - loaded\n".postf(head); - - //kmeans 2 and retrieve ordered array of class assignations - ~kmeans.fitPredict(~windowDS, ~windowLS, action: {|x| - nbass = x; - // "% - fitted1: ".postf(head); nbass.postln; - - if (nbass.includes(winSize.asFloat), { - ~kmeans.fitPredict(~windowDS, ~windowLS, {|x| - nbass = x; - // "% - fitted2: ".postf(head); nbass.postln; - if (nbass.includes(winSize.asFloat), { - ~kmeans.fitPredict(~windowDS, ~windowLS, {|x| - nbass = x; - // "% - fitted3: ".postf(head); nbass.postln; - }); - }); - }); - }); - - ~windowLS.dump{|x| - var assignments = x.at("data").asSortedArray.flop[1].flatten; - "% - assigned ".postf(head); - - assignments.postln; - - (winSize-1).do{|i| - if (assignments[i+1] != assignments[i], { - ~newindices= ~newindices ++ (~originalslicesarray[head+i+1]).asInteger; - ~newkeys = ~newkeys ++ (~orginalkeys[head+i+1]); - }); - - }; - //if we still have some frames to do, do them - if (((winSize + head) < ~originalslicesarray.size), { - "-----------------".postln; - ~windowedFunct.value(head + winSize - overlap, winSize, overlap); - }, {~newindices = (~newindices ++ ~loader.buffer.numFrames); "done".postln;});//if we're done close the books - }; - }); - }); -} -) - -//the job - -//test 1 - start at the begining, consider 4 items at a time, make 2 clusters, overlap 1 -~newindices = [~originalslicesarray[0]]; ~newkeys = [~orginalkeys[0]]; -~windowedFunct.value(0, 4, 1); - -//OPTIONAL: try again with more clusters (3) and a wider window (6) and more overlap (2) -~newindices = [~originalslicesarray[0]]; ~newkeys = [~orginalkeys[0]]; -~kmeans.numClusters = 3; -~windowedFunct.value(0,6,2); - -//compare sizes -~orginalkeys.size -~newkeys.size; - -//export to reaper -( -//first create a new file that ends with rpp - it will overwrite if the file exists -f = File.new(Platform.defaultTempDir ++ "clusteredslices-" ++ Date.getDate.stamp ++".rpp","w+"); - -if (f.isOpen , { - var path, prevpath ="", sr, count, dur, realDur; - //write the header - f.write(" 0, { - f.write("\n>\n"); - }); - count = count + dur; - }; - //write the track footer - f.write(">\n"); - - // a second track with the new ~indices - prevpath = ""; - //write the track header - f.write(" 0, { - path = ~slicer.index[v][\path]; - if (path != prevpath, { - sr = ~slicer.index[v][\sr]; - prevpath = path; - count = 0; - }); - f.write("\n>\n"); - count = count + dur; - }); - }; - //write the track footer - f.write(">\n"); - - //write the footer - f.write(">\n"); - f.close; -}); -) - -(then open the time-stamped reaper file clusterdslice in the folder tmp) -Platform.defaultTempDir.openOS diff --git a/release-packaging/Examples/dataset/1-learning examples/13-massive-parallelisation-example.scd b/release-packaging/Examples/dataset/1-learning examples/13-massive-parallelisation-example.scd deleted file mode 100644 index 81b960d..0000000 --- a/release-packaging/Examples/dataset/1-learning examples/13-massive-parallelisation-example.scd +++ /dev/null @@ -1,324 +0,0 @@ -// Lookup in a KDTree using melbands -// Demonstration of a massive parallel approach to batch process swiftly in SC - -s.options.numBuffers = 16384 //The method below for doing the analysus quickly needs lots of buffers -s.reboot - -//Step 0: Make a corpus - -//We'll jam together some random flucoma sounds for illustrative purposes -//Get some files -( -~audioexamples_path = File.realpath(FluidBufMelBands.class.filenameSymbol).dirname.withTrailingSlash +/+ "../AudioFiles/*.wav"; -~allTheSounds = SoundFile.collect(~audioexamples_path); -~testSounds = ~allTheSounds; -~testSounds.do{|f| f.path.postln}; // print out the files that are loaded -) - -//Load the files into individual buffers: -( -~audio_buffers = ~testSounds.collect{|f| - Buffer.readChannel( - server: s, - path:f.path, - channels:[0], - action:{("Loaded" + f.path).postln;} - ) -}; -) - -//Do a segmentation of each buffer, in parallel -( -fork{ - ~index_buffers = ~audio_buffers.collect{Buffer.new}; - s.sync; - ~count = ~audio_buffers.size; - ~audio_buffers.do{|src,i| - FluidBufOnsetSlice.process( - server:s, - source:src, - indices:~index_buffers[i], - metric: 9, - threshold:0.2, - minSliceLength: 17, - action:{ - (~testSounds[i].path ++ ":" + ~index_buffers[i].numFrames + "slices").postln; - ~count = ~count - 1; - if(~count == 0){"Done slicing".postln}; - } - ); - } -} -) - -// we now have an array of index buffers, one per source buffer, each containing the segmentation points as a frame positions -// this allows us to make an array of sizes -~index_buffers.collect{|b| b.numFrames}.sum - -//For each of these segments, let's make a datapoint using the mean melbands. -// There's a number of ways of skinning this cat w/r/t telling the server what to do, but here we want to minimize traffic between language and server, and also produce undertsandable code - -//First, we'll grab the onset points as language-side arrays, then scroll through each slice getting the mean melbands -( -// - a dataset to keep the mean melbands in -~mels = FluidDataSet(s); -// - a dictionary to keep the slice points in for later playback -~slices = Dictionary(); -//The code below (as well as needing lots of buffers), creates lots of threads and we need a big ass scheduling queue -~clock = TempoClock(queueSize:8192); -) - - -// Do the Mel analysis in a cunning parallel fashion -( -{ - var counter, remaining; - var condition = Condition.new; // used to create a test condition to pause the routine ... - var index_arrays = Dictionary(); - - "Process started. Please wait.".postln; - - ~total_slice_count = ~index_buffers.collect{|b| b.numFrames}.sum + ~index_buffers.size; //we get an extra slice in buffer - ~featurebuffers = ~total_slice_count.collect{Buffer.new}; // create a buffer per slice - - //Make our dictionary FluidDataSet-shaped - ~slices.put("cols",3);//[bufnum,start,end] for each slice - ~slices.put("data",Dictionary()); - - //Collect each set of onsets into a language side array and store them in a dict - ~index_buffers.do{|b,i| // iterate over the input buffer array - { - b.loadToFloatArray( // load to language side array - action:{|indices| - //Glue the first and last samples of the buffer on to the index list, and place in dictionary with the - //Buffer object as a key - - index_arrays.put(~audio_buffers[i], Array.newFrom([0] ++ indices ++ (~audio_buffers[i].numFrames - 1))); - - if(i==(~index_buffers.size-1)) {condition.unhang}; - } - ) - }.fork(stackSize:~total_slice_count); - }; - condition.hang; //Pause until all the callbacks above have completed - "Arrays loaded. Starting on the analysis, please wait.".postln; - - //For each of these lists of points, we want to scroll over the indices in pairs and get some mel bands - counter = 0; - remaining = ~total_slice_count; - - s.sync; - - // now iterate over Dict and calc melbands - - index_arrays.keysValuesDo{|buffer, indices| - indices.doAdjacentPairs{|start,end,num| - var analysis = Routine({|counter| - FluidBufMelBands.processBlocking( - server:s, - source:buffer, - startFrame:start, - numFrames:(end-1) - start, - features:~featurebuffers[counter], - action:{ - remaining = remaining - 1; - if(remaining == 0) { ~numMelBands = ~featurebuffers[0].numChannels;condition.unhang }; - } - ); - }); - - ~slices["data"].put(counter,[buffer.bufnum,start,end]); - - //I'm spawning new threads to wait for the analysis callback from the server. The final callback will un-hang this thread - analysis.value(counter); //Done differently to other blocks because I need to pass in the value of counter - counter = counter + 1; - } - }; - condition.hang; - "Analysis of % slices done.\n".postf(~total_slice_count); -}.fork(clock:~clock); -) - - -// Run stats on each mel buffer - -// create a stats buffer for each of the slices -~statsbuffers = ~total_slice_count.collect{Buffer.new}; // create n Slices buffers - to be filled with (40 mel bands * 7 stats) - -// run stats on all the buffers -( -{ - var remaining = ~total_slice_count; - ~featurebuffers.do{|buffer,i| - FluidBufStats.processBlocking( - server:s, - source:buffer, - stats:~statsbuffers[i], - action:{ - remaining = remaining - 1; - if(remaining == 0) { "done".postln}; - } - ); - }; -}.fork(clock:~clock); -) - -~featurebuffers.size - -//Flatten each stats buffer into a data point -~flatbuffers = ~total_slice_count.collect{Buffer.new};// create an array of flatten stats - -( -{ - var remaining = ~total_slice_count; - ~statsbuffers.do{|buffer,i| - FluidBufFlatten.processBlocking( - server:s, - source:buffer, - destination:~flatbuffers[i], - action:{ - remaining = remaining - 1; - if(remaining == 0) { "Got flat points".postln; }; - } - ); - }; -}.fork(clock:~clock); -) - - -//Ram each flat point into a data set. At this point we have more data than we need, but we'll prune in moment -( -"Filling dataset".postln; -~mels.clear; - -// ~flatbuffers = flatbuffers; -~flatbuffers.do{|buf,i| - ~mels.addPoint(i,buf); -}; - -~mels.print; -) - - -// Prune & standardise - -// Tidy up the temp arrays of buffers we do not need anymore - -( -"Cleaning".postln; -(~featurebuffers ++ ~statsbuffers ++ ~flatbuffers).do{|buf| buf.free}; -) - -//Above we sneakily made a dictionary of slice data for playback (bufnum,start,end). Let's throw it in a dataset -~slicedata = FluidDataSet(s); // will hold slice data (bufnum,start,end) for playback - -//dict -> dataset -( -~slicedata.load(~slices); -~slicedata.print; -) - -// Step 1. Let's prune and standardize before fitting to a tree -( -~meanmels = FluidDataSet(s);//will hold pruned mel data -~stdmels = FluidDataSet(s);//will standardised, pruned mel data -~standardizer = FluidStandardize(s); -~pruner = FluidDataSetQuery(s); -~tree = FluidKDTree(s,numNeighbours:10,lookupDataSet:~slicedata);//we have to supply the lookup data set when we make the tree (boo!) -) - -//Prune, standardize and fit KDTree -( -{ - ~meanmels.clear; - ~stdmels.clear; - ~pruner.addRange(0,~numMelBands).transform(~mels,~meanmels); //prune with a 'query' -- so this is dropping all but ~meanmels - ~standardizer.fitTransform(~meanmels,~stdmels); - ~tree.fit(~stdmels,{"KDTree ready".postln}); -}.fork(clock:~clock); -) - -~meanmels.print - -//Step 2: Set the FluidStandardizer and FluidKDTree up for listening -//set the buffers and busses needed -( -~stdInputPoint = Buffer.alloc(s,40); -~stdOutputPoint = Buffer.alloc(s,40); -~treeOutputPoint = Buffer.alloc(s,3 * 10);//numNeighbours x triples of bufnum,start,end -) - - -// let's play a random sound (to make sure we understand our data structure! -( -{ - var randPoint, buf, start, stop, dur; - - randPoint = ~slices["data"].keys.asArray.scramble[0]; // this good way of getting - but recast as strong - - buf= ~slices["data"][randPoint][0]; - start = ~slices["data"][randPoint][1]; - stop = ~slices["data"][randPoint][2]; - - dur = stop - start; - - BufRd.ar(1,buf, Line.ar(start,stop,dur/s.sampleRate, doneAction: 2), 0, 2); -}.play -) - - -// Query KD tree - -// a target sound from outside our dataset -~inBuf = Buffer.readChannel(s, Platform.resourceDir +/+ "sounds/a11wlk01.wav", numFrames:15000, channels:[0]); -~inBuf.play - -//OR one from within (but just the begining so beware of the difference!) -~inBuf = Buffer.alloc(s,15000); -~randomSlice = ~slices["data"].keys.asArray.scramble[0]; -~audio_buffers[~slices["data"][~randomSlice][0]].copyData(~inBuf,srcStartAt: ~slices["data"][~randomSlice][1], numSamples: 15000.min(~slices["data"][~randomSlice][2] - (~slices["data"][~randomSlice][1]))); -~inBuf.play - -// now try getting a point, playing it, grabbing nearest neighbour and playing it ... - -( -~inBufMels = Buffer(s); -~inBufStats = Buffer(s); -~inBufFlat = Buffer(s); -~inBufComp = Buffer(s); -~inBufStand = Buffer(s); -) - -// FluidBuf Compose is buf version of dataSetQuery - -( -FluidBufMelBands.process(s, ~inBuf, features: ~inBufMels, action: { - FluidBufStats.process(s, ~inBufMels, stats:~inBufStats, action: { - FluidBufFlatten.process(s, ~inBufStats, destination:~inBufFlat, action: { - FluidBufCompose.process(s, ~inBufFlat, numFrames: ~numMelBands, destination: ~inBufComp, action: { - ~standardizer.transformPoint(~inBufComp, ~inBufStand, { - ~tree.kNearest(~inBufStand,{ |a|a.postln;~nearest = a;}) - }) - }) - }) - }) -}) -) - -// playback nearest in order -( -fork{ - ~nearest.do{|i| - var buf, start, stop, dur; - - buf= ~slices["data"][i.asInteger][0]; - start = ~slices["data"][i.asInteger][1]; - stop = ~slices["data"][i.asInteger][2]; - dur = (stop - start)/ s.sampleRate; - {BufRd.ar(1,buf, Line.ar(start,stop,dur, doneAction: 2), 0, 2);}.play; - - i.postln; - dur.wait; - }; -} -) diff --git a/release-packaging/Examples/dataset/1-learning examples/1a-starting-1D-example.scd b/release-packaging/Examples/dataset/1-learning examples/1a-starting-1D-example.scd deleted file mode 100644 index 02fb085..0000000 --- a/release-packaging/Examples/dataset/1-learning examples/1a-starting-1D-example.scd +++ /dev/null @@ -1,73 +0,0 @@ -s.reboot -~ds = FluidDataSet.new(s) -~point = Buffer.alloc(s,1,1) -( -Routine{ - 10.do{|i| - ~point.set(0,i); - ~ds.addPoint(i.asString,~point,{("addPoint"+i).postln}); //because buffer.set do an immediate update in the RT thread we can take for granted it'll be updated when we call addPoint - s.sync; //but we need to sync to make sure everything is done on the DataSet before the next iteration - } -}.play -) -~ds.print; - -/*** KDTREE ***/ -~tree = FluidKDTree.new(s) -~tree.fit(~ds,action:{"Done indexing".postln}) - -~tree.numNeighbours = 5; //play with this -( -Routine{ - 10.do{|i| - ~point.set(0,i); - ~tree.kNearest(~point, {|x| "Neighbours for a value of % are ".postf(i); x.postln}); - s.sync; - } -}.play -) - -/*** KMEANS ***/ - -~kmeans = FluidKMeans.new(s,maxIter:100); -~kmeans.numClusters = 2; //play with this -~kmeans.fit(~ds,action:{|x| "Done fitting with these number of items per cluster ".post;x.postln;}) - -( -Routine{ - 10.do{|i| - ~point.set(0,i); - ~kmeans.predictPoint(~point,{|x| ("Predicted Cluster for a value of " + i ++ ":" + x).postln}); - s.sync; - } -}.play -) - -~labels = FluidLabelSet(s); - -~kmeans.predict(~ds,~labels, {|x| ("Size of each cluster" + x).postln}) - -( -~labels.size{|x| - Routine{x.asInteger.do{|i| - ~labels.getLabel(i,action: {|l| - ("Label for entry " + i ++ ":" + l).postln; - }); - s.sync; - } - }.play; -}; -) - -// or simply print it -~labels.print - -// or dump and format -( -~labels.dump{|x| - var keys = x["data"].keys.asArray.sort; - keys.do{|key| - "Label for entry % is %\n".postf(key, x["data"][key][0]); - } -} -) diff --git a/release-packaging/Examples/dataset/1-learning examples/2a-starting-1D-example2.scd b/release-packaging/Examples/dataset/1-learning examples/2a-starting-1D-example2.scd deleted file mode 100644 index faa0ada..0000000 --- a/release-packaging/Examples/dataset/1-learning examples/2a-starting-1D-example2.scd +++ /dev/null @@ -1,67 +0,0 @@ -s.reboot -~ds = FluidDataSet.new(s) -~point = Buffer.alloc(s,1,1) -( -Routine{ - 10.do{|i| - var d; - if(i<=4,{d=i},{d=i+5}); - ~point.set(0,d); - ~ds.addPoint(i.asString,~point,{("addPoint"+i).postln}); - s.sync; - } -}.play -) -~ds.print; - -/*** KDTREE ***/ -~tree = FluidKDTree.new(s) -~tree.fit(~ds,action:{"Done indexing".postln}) - -~tree.numNeighbours = 5; //play with this -( -Routine{ - 15.do{|i| - ~point.set(0,i); - ~tree.kNearest(~point, {|x| "Neighbours for a value of % are ".postf(i); x.post;" with respective distances of ".post;}); - ~tree.kNearestDist(~point, {|x| x.postln}); - s.sync; - } -}.play -) - - -/*** KMEANS ***/ - -~kmeans = FluidKMeans.new(s,maxIter:100) -~kmeans.numClusters = 2; //play with this -~kmeans.fit(~ds, action:{|x| "Done fitting with these number of items per cluster ".post;x.postln;}) - -( -Routine{ - 15.do{|i| - ~point.set(0,i); - ~kmeans.predictPoint(~point,{|x| ("Predicted Cluster for a value of " + i ++ ":" + x).postln}); - s.sync; - } -}.play -) - -~labels = FluidLabelSet(s); - -~kmeans.predict(~ds,~labels, {|x| ("Size of each cluster" + x).postln}) - -( -~labels.size{|x| - Routine{x.asInteger.do{|i| //size does not return a value, but we can retrieve it via a function - ~labels.getLabel(i,action: {|l| - ("Label for entry " + i ++ ":" + l).postln; - }); - s.sync; - } - }.play; -}; -) - -// or simply print it -~labels.print \ No newline at end of file diff --git a/release-packaging/Examples/dataset/1-learning examples/3a-classifier-example.scd b/release-packaging/Examples/dataset/1-learning examples/3a-classifier-example.scd deleted file mode 100644 index 5f80842..0000000 --- a/release-packaging/Examples/dataset/1-learning examples/3a-classifier-example.scd +++ /dev/null @@ -1,64 +0,0 @@ -( -~simpleInput = FluidDataSet(s); -~simpleOutput = FluidLabelSet(s); -b = Buffer.alloc(s,2); -~knn = FluidKNNClassifier(s); -~knn.numNeighbours = 3 -) - -( -var w,v,myx,myy; - -//initialise the mouse position holder -myx=0; -myy=0; - -//make a window and a full size view -w = Window.new("Viewer", Rect(100,Window.screenBounds.height - 400, 310, 310)).front; -v = View.new(w,Rect(0,0, 310, 310)); - -//creates a function that reacts to mousedown -v.mouseDownAction = {|view, x, y|myx=x;myy=y;w.refresh; - // myx.postln;myy.postln; - Routine{ - b.setn(0,[myx,myy]); - ~knn.predictPoint(b, action: {|x|x.postln;}); - s.sync; -}.play;}; - -//custom redraw function -w.drawFunc = { - 100.do { |i| - if (i < 50, {Pen.color = Color.white;} ,{Pen.color = Color.red;}); - Pen.addRect(Rect(i.div(10)*30+10,i.mod(10)*30+10,20,20)); - Pen.perform(\fill); - }; - Pen.color = Color.black; - Pen.addOval(Rect(myx-5, myy-5,10,10)); - Pen.perform(\stroke); -}; -) - -( -//populates a dataset with the same squares as the gui (their centres) (old method, iterating over buffers. A dictionary approach would be more efficient, see the example in this folder) -Routine{ - 50.do{|i| - var x = i.div(10)*30+20; - var y = i.mod(10)*30+20; - b.setn(0,[x,y]); - ~simpleInput.addPoint(i.asString,b,{("Added Input" + i).postln}); - ~simpleOutput.addLabel(i.asString,"White",{("Added Output" + i).postln}); - s.sync; - b.setn(0,[x+150,y]); - ~simpleInput.addPoint((i+50).asString,b,{("Added Input" + (i+50)).postln}); - ~simpleOutput.addLabel((i+50).asString,"Red",{("Added Output" + (i+50)).postln}); - s.sync; - }; - \done.postln; - }.play; -) - -// fit the dataset -~knn.fit(~simpleInput,~simpleOutput, action:{"fitting done".postln}) - -// now click on the grid and read the estimated class according to the nearest K neighbours. diff --git a/release-packaging/Examples/dataset/1-learning examples/4-regressor-example.scd b/release-packaging/Examples/dataset/1-learning examples/4-regressor-example.scd deleted file mode 100644 index e7f2b38..0000000 --- a/release-packaging/Examples/dataset/1-learning examples/4-regressor-example.scd +++ /dev/null @@ -1,74 +0,0 @@ -s.reboot - -~urn = { |n=31416, min=0,max=31415| (min..max).scramble.keep(n) }; - -// creates 200 indices, then values of the output of a fundion with a predictable shape of a sinewave -n = 200 -~idx = ~urn.value(n) -~data = n.collect{|i|sin(~idx[i]/5000)} - -// creates the dataset with these associated indices and values -( -~simpleInput = FluidDataSet(s); -~simpleOutput = FluidDataSet(s); -b = Buffer.alloc(s,1); -c = Buffer.alloc(s,1); -~mappingviz = Buffer.alloc(s,512); -) - -( -Routine{ - n.do{|i| - b.set(0,~idx[i]); - c.set(0,~data[i]); - ~simpleInput.addPoint(i.asString,b,{("Added Input" + i).postln}); - ~simpleOutput.addPoint(i.asString,c,{("Added Output" + i).postln}); - ~mappingviz.set((~idx[i]/61.4).asInteger,~data[i]); - s.sync; - } -}.play -) - -~simpleInput.print -~simpleOutput.print - -//look at the seeing material -~mappingviz.plot(minval:-1,maxval:1) - -//create a buffer to query -~mappingresult = Buffer.alloc(s,512); - -//make the process then fit the data -~knn = FluidKNNRegressor(s,3,1) -~knn.fit(~simpleInput, ~simpleOutput, action:{"fitting done".postln}) - -// query 512 points along the line (slow because of all that sync'ing) -( -~knn.numNeighbours = 1; // change to see how many points the system uses to regress -Routine{ - 512.do{|i| - b.set(0,i*61); - ~knn.predictPoint(b,action:{|d|~mappingresult.set(i,d);}); - s.sync; - i.postln; - } -}.play -) - -// look at the interpolated values -~mappingresult.plot - -// change the number of neighbours to regress on -~knn.numNeighbours_(5) -~knn.fit(~simpleInput, ~simpleOutput, action:{"fitting done".postln}) - -// instead of doing the mapping per point, let's do a dataset of 512 points -~target = FluidDataSet(s) -~target.load(Dictionary.newFrom([\cols, 1, \data, Dictionary.newFrom(512.collect{|i|[i.asString, [i.asFloat * 61]]}.flatten)])) -~regressed = FluidDataSet(s) -~knn.predict(~target, ~regressed, action:{"prediction done".postln}) - -//dump the regressed values -~outputArray = Array.newClear(512); -~regressed.dump{|x| x["data"].keysValuesDo{|key,val|~outputArray[key.asInteger] = val[0]}} -~outputArray.plot \ No newline at end of file diff --git a/release-packaging/Examples/dataset/1-learning examples/5-normalization-and-standardization-example.scd b/release-packaging/Examples/dataset/1-learning examples/5-normalization-and-standardization-example.scd deleted file mode 100644 index 1cf3de6..0000000 --- a/release-packaging/Examples/dataset/1-learning examples/5-normalization-and-standardization-example.scd +++ /dev/null @@ -1,120 +0,0 @@ -( -// set some variables -~nb_of_dim = 10; -~dataset = FluidDataSet(s); -) - -( -// fill up the dataset with 20 entries of 10 column/dimension/descriptor value each. The naming of the item's label is arbitrary as usual -Routine{ - var buf = Buffer.alloc(s,~nb_of_dim); - 20.do({ arg i; - buf.loadCollection(Array.fill(~nb_of_dim,{rrand(0.0,100.0)})); - ~dataset.addPoint("point-"++i.asInteger.asString, buf); - s.sync; - }); - buf.free; - \done.postln; -}.play -) - -~dataset.print; - -// make a buf for getting points back -~query_buf = Buffer.alloc(s,~nb_of_dim); - -// look at a point to see that it has points in it -~dataset.getPoint("point-0",~query_buf,{~query_buf.getn(0,~nb_of_dim,{|x|x.postln;});}); - -// look at another point to make sure it's different... -~dataset.getPoint("point-7",~query_buf,{~query_buf.getn(0,~nb_of_dim,{|x|x.postln;});}); - -/////////////////////////////////////////////////////// -// exploring full dataset normalization and standardization - -// make a FluidNormalize -~normalize = FluidNormalize(s,0,1); - -// fits the dataset to find the coefficients -~normalize.fit(~dataset,{"done".postln;}); - -// making an empty 'normed_dataset' which is required for the normalize function -~normed_dataset = FluidDataSet(s); - -// normalize the full dataset -~normalize.transform(~dataset,~normed_dataset,{"done".postln;}); - -// look at a point to see that it has points in it -~normed_dataset.getPoint("point-0",~query_buf,{~query_buf.getn(0,~nb_of_dim,{|x|x.postln;});}); -// 10 numbers between 0.0 and 1.0 where each column/dimension/descriptor is certain to have at least one item on which it is 0 and one on which it is 1 -// query a few more for fun - -// try FluidStandardize -~standardize = FluidStandardize(s); - -// fits the dataset to find the coefficients -~standardize.fit(~dataset,{"done".postln;}); - -// standardize the full dataset -~standardized_dataset = FluidDataSet(s); -~standardize.transform(~dataset,~standardized_dataset,{"done".postln;}); - -// look at a point to see that it has points in it -~standardized_dataset.getPoint("point-0",~query_buf,{~query_buf.getn(0,~nb_of_dim,{|x|x.postln;});}); -// 10 numbers that are standardize, which mean that, for each column/dimension/descriptor, the average of all the points will be 0. and the standard deviation 1. - -//////////////////////////////////////////////////// -// exploring point querying concepts via norm and std - -// Once a dataset is normalized / standardized, query points have to be scaled accordingly to be used in distance measurement. In our instance, values were originally between 0 and 100, and now they will be between 0 and 1 (norm), or their average will be 0. (std). If we have data that we want to match from a similar ranging input, which is usually the case, we will need to normalize the searching point in each dimension using the same coefficients. - -// first, make sure you have run all the code above, since we will query these datasets - -// get a know point as a query point -~dataset.getPoint("point-7",~query_buf); - -// find the 2 points with the shortest distances in the dataset -~tree = FluidKDTree.new(s,numNeighbours:2); -~tree.fit(~dataset) -~tree.kNearest(~query_buf, {|x| ("Labels:" + x).postln}); -~tree.kNearestDist(~query_buf, {|x| ("Distances:" + x).postln}); -// its nearest neighbourg is itself: it should be itself and the distance should be 0. The second point is depending on your input dataset. - -// normalise that point (~query_buf) to be at the right scale -~normbuf = Buffer.alloc(s,~nb_of_dim); -~normalize.transformPoint(~query_buf,~normbuf); -~normbuf.getn(0,~nb_of_dim,{arg vec;vec.postln;}); - -// make a tree of the normalized database and query with the normalize buffer -~normtree = FluidKDTree.new(s,numNeighbours:2); -~normtree.fit(~normed_dataset) -~normtree.kNearest(~normbuf, {|x| ("Labels:" + x).postln}); -~normtree.kNearestDist(~normbuf, {|x| ("Distances:" + x).postln}); -// its nearest neighbourg is still itself as it should be, but the 2nd neighbourg might have changed. The distance is now different too - -// standardize that same point (~query_buf) to be at the right scale -~stdbuf = Buffer.alloc(s,~nb_of_dim); -~standardize.transformPoint(~query_buf,~stdbuf); -~stdbuf.getn(0,~nb_of_dim,{arg vec;vec.postln;}); - -// make a tree of the standardized database and query with the normalize buffer -~stdtree = FluidKDTree.new(s, numNeighbours: 2); -~stdtree.fit(~standardized_dataset) -~stdtree.kNearest(~stdbuf, {|x| ("Labels:" + x).postln}); -~stdtree.kNearestDist(~stdbuf, {|x| ("Distances:" + x).postln}); -// its nearest neighbourg is still itself as it should be, but the 2nd neighbourg might have changed yet again. The distance is also different too - -// where it starts to be interesting is when we query points that are not in our original dataset - -// fill with known values (50.0 for each of the 10 column/dimension/descriptor, aka the theoretical middle point of the multidimension space) This could be anything but it is fun to aim in the middle. -~query_buf.fill(0,~nb_of_dim,50); - -// normalize and standardize the query buffer. Note that we do not need to fit since we have not added a point to our reference dataset -~normalize.transformPoint(~query_buf,~normbuf); -~standardize.transformPoint(~query_buf,~stdbuf); - -//query the single nearest neighbourg via 3 different data scaling. Depending on the random source at the begining, you should get (small or large) differences between the 3 answers! -[~tree,~normtree,~stdtree].do{|t| t.numNeighbours =1 }; -~tree.kNearest(~query_buf, {|x| ("Original:" + x).post;~tree.kNearestDist(~query_buf, {|x| (" with a distance of " + x).postln});}); -~normtree.kNearest(~normbuf, {|x| ("Normalized:" + x).post;~normtree.kNearestDist(~normbuf, {|x| (" with a distance of " + x).postln});}); -~stdtree.kNearest(~stdbuf, {|x| ("Standardized:" + x).post; ~stdtree.kNearestDist(~stdbuf, {|x| (" with a distance of " + x).postln});}); diff --git a/release-packaging/Examples/dataset/1-learning examples/8b-mlp-synth-control.scd b/release-packaging/Examples/dataset/1-learning examples/8b-mlp-synth-control.scd deleted file mode 100644 index c756125..0000000 --- a/release-packaging/Examples/dataset/1-learning examples/8b-mlp-synth-control.scd +++ /dev/null @@ -1,101 +0,0 @@ -//1- make the gui then the synth below -( -Window.closeAll; -s.waitForBoot{ - Task{ - var trained = 0, entering = 0; - var input_buffer = Buffer.alloc(s,2); - var output_buffer = Buffer.alloc(s,10); - var mlp = FluidMLPRegressor(s,[6],activation: 1,outputActivation: 1,maxIter: 1000,learnRate: 0.1,momentum: 0.9,batchSize: 1); - var entry_counter = 0; - var win, multislider, xyslider, synth, error_st, prediction_but, addPoints_but, train_but; - var item_width = 100; - var inData = FluidDataSet(s); - var outData = FluidDataSet(s); - - win = Window("ChaosSynth", Rect(10, 10, 840, 320)).front; - - multislider = MultiSliderView(win,Rect(10, 10, 400, 300)) - .elasticMode_(1) - .isFilled_(1) - .action_({ - arg ms; - // ms.value.postln; - synth.set(\val,ms.value); - output_buffer.setn(0,ms.value); - }) - .value_(0.5.dup(10)); - - xyslider = Slider2D(win,Rect(420,10,300, 300)) - .x_(0.5) - .y_(0.5) - .action_({ - arg sl; - - input_buffer.setn(0,[sl.x,sl.y]); - - if(prediction_but.value.asBoolean,{ - mlp.predictPoint(input_buffer,output_buffer,{ - output_buffer.getn(0,10,{ - arg output_values; - synth.set(\val, output_values); - { - multislider.value_(output_values) - }.defer; - }); - }); - }); - }); - - addPoints_but = Button(win, Rect(730,10,item_width, 20)) - .states_([["add points", Color.white, Color.grey]]) - .action_({ - inData.addPoint(entry_counter.asSymbol,input_buffer); - outData.addPoint(entry_counter.asSymbol,output_buffer); - entry_counter = entry_counter + 1; - inData.print; - outData.print; - }); - - train_but = Button(win, Rect(730,240,item_width, 20)) - .states_([["train", Color.red, Color.white]]) - .action_({ - mlp.fit(inData,outData,{ - arg loss; - {error_st.string_("loss: %".format(loss.round(0.001)))}.defer; - }); - }); - - prediction_but = Button(win, Rect(730,40,item_width, 20)) - .states_([["Not Predicting", Color.black, Color.white],["Predicting",Color.black,Color.white]]); - - error_st = StaticText(win,Rect(732,260,item_width,20)).string_("Error:"); - StaticText(win,Rect(732,70,item_width,20)).string_("rate:"); - TextField(win,Rect(730,90,item_width,20)).string_(0.1.asString).action_{|in|mlp.learnRate = in.value.asFloat.postln;}; - StaticText(win,Rect(732,110,item_width,20)).string_("momentum:"); - TextField(win,Rect(730,130,item_width,20)).string_(0.9.asString).action_{|in|mlp.momentum = in.value.asFloat.postln;}; - StaticText(win,Rect(732,150,item_width,20)).string_("maxIter:"); - TextField(win,Rect(730,170,item_width,20)).string_(1000.asString).action_{|in| mlp.maxIter = in.value.asInteger.postln;}; - - s.sync; - - //2- the synth - synth = { - arg val = #[0,0,0,0,0,0,0,0,0,0]; - var osc1, osc2, feed1, feed2, base1=69, base2=69, base3 = 130; - #feed2,feed1 = LocalIn.ar(2); - osc1 = MoogFF.ar(SinOsc.ar((((feed1 * val[0]) + val[1]) * base1).midicps,mul: (val[2] * 50).dbamp).atan,(base3 - (val[3] * (FluidLoudness.kr(feed2, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[4] * 3.5); - osc2 = MoogFF.ar(SinOsc.ar((((feed2 * val[5]) + val[6]) * base2).midicps,mul: (val[7] * 50).dbamp).atan,(base3 - (val[8] * (FluidLoudness.kr(feed1, 1, 0, hopSize: 64)[0].clip(-120,0) + 120))).lag(128/44100).midicps, val[9] * 3.5); - Out.ar(0,LeakDC.ar([osc1,osc2],mul: 0.1)); - LocalOut.ar([osc1,osc2]); - }.play; - }.play(AppClock); -}; -) - -///////// -//3 - play with the multislider -//4 - when you like a spot, move the 2d slider to a position that you want to represent that sound and click "add point" -//5 - do that for a few points -//6 - click train, keep clicking train until the loss is at or below 0.01 or so. feel free to adjust the learning rate, momentum, and max iter. -//7 - the 2D graph controls the 10D diff --git a/release-packaging/Examples/dataset/2-various other examples/scaling-dimension-as-weighting/2-3Dscaling.scd b/release-packaging/Examples/dataset/2-various other examples/scaling-dimension-as-weighting/2-3Dscaling.scd deleted file mode 100644 index 71b7e7f..0000000 --- a/release-packaging/Examples/dataset/2-various other examples/scaling-dimension-as-weighting/2-3Dscaling.scd +++ /dev/null @@ -1,161 +0,0 @@ -// Make: -// - A kmeans -// - a datasetquery -// - a normalizer -// - a standardizer -// - 3 DataSets of example points R-G-B descriptions -// - 3 DataSets for the scaled versions -// - 1 summative dataset and a LabelSet for predicted labels - -( -~classifier = FluidKMeans(s,5, 1000); -~query = FluidDataSetQuery(s); -~stan = FluidStandardize(s); -~norm = FluidNormalize(s); -~sourceR = FluidDataSet(s); -~sourceG = FluidDataSet(s); -~sourceB = FluidDataSet(s); -~scaledR = FluidDataSet(s); -~scaledG = FluidDataSet(s); -~scaledB = FluidDataSet(s); -~composited = FluidDataSet(s); -~labels = FluidLabelSet(s); -) - -//Make some random, but clustered test points, each descriptor category in a separate dataset -( -~sourceR.load(Dictionary.newFrom([\cols, 1, \data, (Dictionary.newFrom(40.collect{|x| [x, 1.0.sum3rand]}.flatten))])); -~sourceG.load(Dictionary.newFrom([\cols, 1, \data, (Dictionary.newFrom(40.collect{|x| [x, 1.0.rand2]}.flatten))])); -~sourceB.load(Dictionary.newFrom([\cols, 1, \data, (Dictionary.newFrom(40.collect{|x| [x, (0.5.sum3rand).squared + [0.75,-0.1].choose]}.flatten))])); -) - -//here we manipulate - -//assemble the scaled dataset -( -~query.addColumn(0, { - ~query.transformJoin(~sourceB, ~sourceG, ~composited, { - ~query.transformJoin(~sourceR, ~composited, ~composited); - }); -}); -) - -~composited.print - -//Fit the classifier to the example DataSet and labels, and then run prediction on the test data into our mapping label set -~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict = x;};~composited.dump{|x|~compodict=x;};}); - -//Visualise: -( -w = Window("sourceClasses", Rect(128, 64, 820, 120)); -w.drawFunc = { - Pen.use{ - ~compodict["data"].keysValuesDo{|key, colour| - Pen.fillColor = Color.fromArray((colour * 0.5 + 0.5 ).clip(0,1) ++ 1); - Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict["data"].at(key).asInteger[0] * 20 + 10),15,15)); - }; - }; -}; -w.refresh; -w.front; -) - -// standardize our colours and rerun -( -~stan.fitTransform(~sourceR, ~scaledR, { - ~stan.fitTransform(~sourceG, ~scaledG, { - ~stan.fitTransform(~sourceB, ~scaledB, { - //assemble - ~query.addColumn(0, { - ~query.transformJoin(~scaledB, ~scaledG, ~composited, { - ~query.transformJoin(~scaledR, ~composited, ~composited, { - //fit - ~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict2 = x;};~composited.dump{|x|~compodict2=x;};}); - }); - }); - }); - }); - }); -}); -) - -//Visualise: -( -w = Window("stanClasses", Rect(128, 204, 820, 120)); -w.drawFunc = { - Pen.use{ - ~compodict2["data"].keysValuesDo{|key, colour| - Pen.fillColor = Color.fromArray((colour * 0.25 + 0.5 ).clip(0,1) ++ 1); - Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict2["data"].at(key).asInteger[0] * 20 + 10),15,15)); - }; - }; -}; -w.refresh; -w.front; -) - -//now let's normalise instead -( -~norm.fitTransform(~sourceR, ~scaledR, { - ~norm.fitTransform(~sourceG, ~scaledG, { - ~norm.fitTransform(~sourceB, ~scaledB, { - //assemble - ~query.addColumn(0, { - ~query.transformJoin(~scaledB, ~scaledG, ~composited, { - ~query.transformJoin(~scaledR, ~composited, ~composited, { - //fit - ~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict2 = x;};~composited.dump{|x|~compodict2=x;};}); - }); - }); - }); - }); - }); -}); -) - -//Visualise: -( -w = Window("normClasses", Rect(128, 344, 820, 120)); -w.drawFunc = { - Pen.use{ - ~compodict2["data"].keysValuesDo{|key, colour| - Pen.fillColor = Color.fromArray((colour * 0.25 + 0.5 ).clip(0,1) ++ 1); - Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict2["data"].at(key).asInteger[0] * 20 + 10),15,15)); - }; - }; -}; -w.refresh; -w.front; -) - -// let's mess up with the scaling of one dimension: let's multiply the range of Red by 10 -~norm.min = -10; -~norm.max = 10; -( -~norm.fitTransform(~sourceR, ~scaledR, { - //assemble - ~query.addColumn(0, { - ~query.transformJoin(~scaledB, ~scaledG, ~composited, { - ~query.transformJoin(~scaledR, ~composited, ~composited, { - //fit - ~classifier.fitPredict(~composited,~labels,{~labels.dump{|x|~labeldict2 = x;};~composited.dump{|x|~compodict2=x;};}); - }); - }); - }); -}); -) - -//Visualise: -( -w = Window("norm10rClasses", Rect(128, 484, 820, 120)); -w.drawFunc = { - Pen.use{ - ~compodict2["data"].keysValuesDo{|key, colour| - Pen.fillColor = Color.fromArray((colour * 0.25 + 0.5 ).clip(0,1) ++ 1); - Pen.fillRect( Rect( (key.asFloat * 20 + 10), (~labeldict2["data"].at(key).asInteger[0] * 20 + 10),15,15)); - }; - }; -}; -w.refresh; -w.front; -) diff --git a/release-packaging/Examples/nmf/JiT-NMF-classifier.scd b/release-packaging/Examples/nmf/JiT-NMF-classifier.scd deleted file mode 100644 index 1b15b60..0000000 --- a/release-packaging/Examples/nmf/JiT-NMF-classifier.scd +++ /dev/null @@ -1,202 +0,0 @@ -// using nmf in 'real-time' as a classifier -// how it works: a circular buffer is recording and attacks trigger the process -// if in learning mode, it does a one component nmf which makes an approximation of the base. 3 of those will be copied in 3 different positions of our final 3-component base -// in in guessing mode, it does a thres component nmf from the trained bases and yields the 3 activation peaks, on which it thresholds resynth - -//how to use: -// 1. start the server -// 2. select between parenthesis below and execute. You should get a window with 3 pads (bd sn hh) and various menus -// 3. train the 3 classes: -// 3.1 select the learn option -// 3.2 select which class you want to train -// 3.3 play the sound you want to associate with that class a few times (the left audio channel is the source) -// 3.4 click the transfer button -// 3.5 repeat (3.2-3.4) for the other 2 classes. -// 3.x you can observe the 3 bases here: -~classify_bases.plot(numChannels:3) - -// 4. classify -// 4.1 select the classify option -// 4.2 press a pad and look at the activation -// 4.3 tweak the thresholds and enjoy the resynthesis. (the right audio channel is the detected class where classA is a bd sound) -// 4.x you can observe the 3 activations here: -~activations.plot(numChannels:3) - -/// code to execute first -( -var circle_buf = Buffer.alloc(s,s.sampleRate * 2); // b -var input_bus = Bus.audio(s,1); // g -var classifying = 0; // c -var cur_training_class = 0; // d -var train_base = Buffer.alloc(s, 65); // e -var activation_vals = [0.0,0.0,0.0]; // j -var thresholds = [0.5,0.5,0.5]; // k -var activations_disps; -var analysis_synth; -var osc_func; -var update_rout; - -~classify_bases = Buffer.alloc(s, 65, 3); // f -~activations = Buffer.new(s); - -// the circular buffer with triggered actions sending the location of the head at the attack -Routine { - SynthDef(\JITcircular,{arg bufnum = 0, input = 0, env = 0; - var head, head2, duration, audioin, halfdur, trig; - duration = BufFrames.kr(bufnum) / 2; - halfdur = duration / 2; - head = Phasor.ar(0,1,0,duration); - head2 = (head + halfdur) % duration; - - // circular buffer writer - audioin = In.ar(input,1); - BufWr.ar(audioin,bufnum,head,0); - BufWr.ar(audioin,bufnum,head+duration,0); - trig = FluidAmpSlice.ar(audioin, 10, 1666, 2205, 2205, 12, 9, -47,4410, 85); - - // cue the calculations via the language - SendReply.ar(trig, '/attack',head); - - Out.ar(0,audioin); - }).add; - - // drum sounds taken from original code by snappizz - // https://sccode.org/1-523 - // produced further and humanised by PA - SynthDef(\fluidbd, { - |out = 0| - var body, bodyFreq, bodyAmp; - var pop, popFreq, popAmp; - var click, clickAmp; - var snd; - - // body starts midrange, quickly drops down to low freqs, and trails off - bodyFreq = EnvGen.ar(Env([Rand(200,300), 120, Rand(45,49)], [0.035, Rand(0.07,0.1)], curve: \exp)); - bodyAmp = EnvGen.ar(Env([0,Rand(0.8,1.3),1,0],[0.005,Rand(0.08,0.085),Rand(0.25,0.35)]), doneAction: 2); - body = SinOsc.ar(bodyFreq) * bodyAmp; - // pop sweeps over the midrange - popFreq = XLine.kr(Rand(700,800), Rand(250,270), Rand(0.018,0.02)); - popAmp = EnvGen.ar(Env([0,Rand(0.8,1.3),1,0],[0.001,Rand(0.018,0.02),Rand(0.0008,0.0013)])); - pop = SinOsc.ar(popFreq) * popAmp; - // click is spectrally rich, covering the high-freq range - // you can use Formant, FM, noise, whatever - clickAmp = EnvGen.ar(Env.perc(0.001,Rand(0.008,0.012),Rand(0.07,0.12),-5)); - click = RLPF.ar(VarSaw.ar(Rand(900,920),0,0.1), 4760, 0.50150150150) * clickAmp; - - snd = body + pop + click; - snd = snd.tanh; - - Out.ar(out, snd); - }).add; - - SynthDef(\fluidsn, { - |out = 0| - var pop, popAmp, popFreq; - var noise, noiseAmp; - var click; - var snd; - - // pop makes a click coming from very high frequencies - // slowing down a little and stopping in mid-to-low - popFreq = EnvGen.ar(Env([Rand(3210,3310), 410, Rand(150,170)], [0.005, Rand(0.008,0.012)], curve: \exp)); - popAmp = EnvGen.ar(Env.perc(0.001, Rand(0.1,0.12), Rand(0.7,0.9),-5)); - pop = SinOsc.ar(popFreq) * popAmp; - // bandpass-filtered white noise - noiseAmp = EnvGen.ar(Env.perc(0.001, Rand(0.13,0.15), Rand(1.2,1.5),-5), doneAction: 2); - noise = BPF.ar(WhiteNoise.ar, 810, 1.6) * noiseAmp; - - click = Impulse.ar(0); - snd = (pop + click + noise) * 1.4; - - Out.ar(out, snd); - }).add; - - SynthDef(\fluidhh, { - |out = 0| - var click, clickAmp; - var noise, noiseAmp, noiseFreq; - - // noise -> resonance -> expodec envelope - noiseAmp = EnvGen.ar(Env.perc(0.001, Rand(0.28,0.3), Rand(0.4,0.6), [-20,-15]), doneAction: 2); - noiseFreq = Rand(3900,4100); - noise = Mix(BPF.ar(ClipNoise.ar, [noiseFreq, noiseFreq+141], [0.12, 0.31], [2.0, 1.2])) * noiseAmp; - - Out.ar(out, noise); - }).add; - - // makes sure all the synthdefs are on the server - s.sync; - - // instantiate the JIT-circular-buffer - analysis_synth = Synth(\JITcircular,[\bufnum, circle_buf, \input, input_bus]); - train_base.fill(0,65,0.1); - - // instantiate the listener to cue the processing from the language side - osc_func = OSCFunc({ arg msg; - var head_pos = msg[3]; - // when an attack happens - if (classifying == 0, { - // if in training mode, makes a single component nmf - FluidBufNMF.process(s, circle_buf, head_pos, 128, bases:train_base, basesMode: 1, windowSize: 128); - }, { - // if in classifying mode, makes a 3 component nmf from the pretrained bases and compares the activations with the set thresholds - FluidBufNMF.process(s, circle_buf, head_pos, 128, components:3, bases:~classify_bases, basesMode: 2, activations:~activations, windowSize: 128, action:{ - // we are retrieving and comparing against the 2nd activation, because FFT processes are zero-padded on each sides, therefore the complete 128 samples are in the middle of the analysis. - ~activations.getn(3,3,{|x| - activation_vals = x; - if (activation_vals[0] >= thresholds[0], {Synth(\fluidbd,[\out,1])}); - if (activation_vals[1] >= thresholds[1], {Synth(\fluidsn,[\out,1])}); - if (activation_vals[2] >= thresholds[2], {Synth(\fluidhh,[\out,1])}); - defer{ - activations_disps[0].string_("A:" ++ activation_vals[0].round(0.01)); - activations_disps[1].string_("B:" ++ activation_vals[1].round(0.01)); - activations_disps[2].string_("C:" ++ activation_vals[2].round(0.01)); - }; - }); - }; - ); - }); - }, '/attack', s.addr); - - // make sure all the synths are instantiated - s.sync; - - // GUI for control - { - var win = Window("Control", Rect(100,100,610,100)).front; - - Button(win, Rect(10,10,80, 80)).states_([["bd",Color.black,Color.white]]).mouseDownAction_({Synth(\fluidbd, [\out, input_bus], analysis_synth, \addBefore)}); - Button(win, Rect(100,10,80, 80)).states_([["sn",Color.black,Color.white]]).mouseDownAction_({Synth(\fluidsn, [\out, input_bus], analysis_synth, \addBefore)}); - Button(win, Rect(190,10,80, 80)).states_([["hh",Color.black,Color.white]]).mouseDownAction_({Synth(\fluidhh, [\out, input_bus], analysis_synth,\addBefore)}); - StaticText(win, Rect(280,7,85,25)).string_("Select").align_(\center); - PopUpMenu(win, Rect(280,32,85,25)).items_(["learn","classify"]).action_({|value| - classifying = value.value; - if(classifying == 0, { - train_base.fill(0,65,0.1) - }); - }); - PopUpMenu(win, Rect(280,65,85,25)).items_(["classA","classB","classC"]).action_({|value| - cur_training_class = value.value; - train_base.fill(0,65,0.1); - }); - Button(win, Rect(375,65,85,25)).states_([["transfer",Color.black,Color.white]]).mouseDownAction_({ - if(classifying == 0, { - // if training - FluidBufCompose.process(s, train_base, numChans:1, destination:~classify_bases, destStartChan:cur_training_class); - }); - }); - StaticText(win, Rect(470,7,75,25)).string_("Acts"); - activations_disps = Array.fill(3, {arg i; - StaticText(win, Rect(470,((i+1) * 20 )+ 7,80,25)); - }); - StaticText(win, Rect(540,7,55,25)).string_("Thresh").align_(\center); - 3.do {arg i; - TextField(win, Rect(540,((i+1) * 20 )+ 7,55,25)).string_("0.5").action_({|x| thresholds[i] = x.value.asFloat;}); - }; - - win.onClose_({circle_buf.free;input_bus.free;osc_func.clear;analysis_synth.free;}); - }.defer; -}.play; -) - -// thanks to Ted Moore for the SC code cleaning and improvements!