diff --git a/release-packaging/HelpSource/Classes/FluidBufCompose.schelp b/release-packaging/HelpSource/Classes/FluidBufCompose.schelp index 7f188f8..ff76958 100644 --- a/release-packaging/HelpSource/Classes/FluidBufCompose.schelp +++ b/release-packaging/HelpSource/Classes/FluidBufCompose.schelp @@ -94,74 +94,4 @@ FluidBufCompose.process(s, source: b, numFrames: 44100, numChans: 1, destStartCh FluidBufCompose.process(s, source: c, numFrames:44100, numChans:1, destination: d, destGain: 1.0); d.query; d.play; -:: - - STRONG::A more complex example: using composition as an Mid-Side filtering process:: - - CODE:: -// load a stereo buffer and initialise the many destinations -( -b = Buffer.read(s,File.realpath(FluidBufCompose.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-SA-UprightPianoPedalWide.wav"); -c = Buffer.new(s); -d = Buffer.new(s); -e = Buffer.new(s); -f = Buffer.new(s); -) - -// encode the mid (in c) and the side (in d) -( -FluidBufCompose.process(s,b, numChans: 1, gain: -3.0.dbamp, destination: c); -FluidBufCompose.process(s,b, numChans: 1, gain: -3.0.dbamp, destination: d); -FluidBufCompose.process(s,b, numChans: 1, gain: -3.0.dbamp, startChan: 1, destination: c, destGain: 1.0); -FluidBufCompose.process(s,b, numChans: 1, gain: -3.0.dbamp * -1.0, startChan: 1, destination: d, destGain: 1.0); -) - -// (optional) compare auraly the stereo with the MS -b.play; -{PlayBuf.ar(1,[c,d])}.play; - -// The geeky bit: copy the side (buffer d) on itself with specific amplitudes and delays, in effect applying a FIR filter through expensive convolution - -// Important: do either of the 3 options below - -// option 1: apply a high pass on the side, with a cutoff of nyquist / 4 -e.free; e = Buffer.new(s); -( -[1.0, -1.0].do({ arg x,y; - FluidBufCompose.process(s, d, gain: x, destStartFrame: y, destination: e, destGain: 1.0); -}); -) - -// option 2: apply a high pass on the side, with a cutoff of nyquist / 10 -e.free; e = Buffer.new(s); -( -[0.8, -0.32, -0.24, -0.16, -0.08].do({ arg x,y; - FluidBufCompose.process(s, d, gain: x, destStartFrame: y, destination: e, destGain: 1.0); -}); -) - -// option 3: apply a high pass on the side, with a cutoff of nyquist / 100 -e.free; e = Buffer.new(s); -( -[0.982494, -0.066859, -0.064358, -0.061897, -0.059477, -0.057098, -0.054761, -0.052466, -0.050215, -0.048007, -0.045843, -0.043724, -0.041649, -0.03962, -0.037636, -0.035697, -0.033805, -0.031959, -0.030159, -0.028406, -0.026699, -0.025038, -0.023425, -0.021857, -0.020337].do({ arg x,y; - FluidBufCompose.process(s, d, gain: x, destStartFrame: y, destination: e, destGain: 1.0); -}); -) - -// play the high-passed side buffer -e.play; -// if you want to try the other filters, do not forget to clear the destination buffer since it will add programmatically onto itself and would not create the expected frequency response - -// decode the MS back to stereo -( -FluidBufCompose.process(s,c, numChans: 2, gain: -3.0.dbamp, destination: f); -FluidBufCompose.process(s,e, gain: -3.0.dbamp, destination: f, destGain: 1.0); -FluidBufCompose.process(s,e, gain: -3.0.dbamp * -1.0, destination: f, destStartChan: 1, destGain: 1.0); -) - -// play the MS processed version -f.play; - -// compare with the original -b.play; -:: +:: \ No newline at end of file diff --git a/release-packaging/HelpSource/Classes/FluidNMFMatch.schelp b/release-packaging/HelpSource/Classes/FluidNMFMatch.schelp index 3aeecc2..1dc37f3 100644 --- a/release-packaging/HelpSource/Classes/FluidNMFMatch.schelp +++ b/release-packaging/HelpSource/Classes/FluidNMFMatch.schelp @@ -182,134 +182,8 @@ z.do({|chan| FluidBufCompose.process(s, ~bases, startChan:chan, numChans: 1, des ) :: -STRONG::Object finder:: - CODE:: -//set some buffers -( -b = Buffer.read(s,File.realpath(FluidNMFMatch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-BaB-SoundscapeGolcarWithDog.wav"); -c = Buffer.new(s); -x = Buffer.new(s); -e = Buffer.new(s); -) - -// train where all objects are present -( -Routine { - FluidBufNMF.process(s,b,130000,150000,0,1, c, x, components:10); - c.query; -}.play; -) - -// wait for the query to print -// then find a component for each item you want to find. You could also sum them. Try to find a component with a good object-to-rest ratio -( - ~dog =4; - {PlayBuf.ar(10,c)[~dog]}.play -) - -( - ~bird = 3; - {PlayBuf.ar(10,c)[~bird]}.play -) - - -// copy at least one other component to a third filter, a sort of left-over channel -( -Routine{ - FluidBufCompose.process(s, x, startChan:~dog, numChans: 1, destination: e); - FluidBufCompose.process(s, x, startChan:~bird, numChans: 1, destStartChan: 1, destination: e, destGain:1); - (0..9).removeAll([~dog,~bird]).do({|chan|FluidBufCompose.process(s,x, startChan:chan, numChans: 1, destStartChan: 2, destination: e, destGain:1)}); - e.query; -}.play; -) -e.plot; - -//using this trained basis we can then see the activation... (wait for 5 seconds before it prints!) -( -{ - var source, blips; - //read the source - source = PlayBuf.ar(2, b); - blips = FluidNMFMatch.kr(source.sum,e,3); - }.plot(5); -) -// ...and use some threshold to 'find' objects... -( -{ - var source, blips; - //read the source - source = PlayBuf.ar(2, b); - blips = Schmidt.kr(FluidNMFMatch.kr(source.sum,e,3),0.5,[10,1,1000]); - }.plot(5); -) - -// ...and use these to sonify them -( -{ - var source, blips, dogs, birds; - //read the source - source = PlayBuf.ar(2, b); - blips = Schmidt.kr(FluidNMFMatch.kr(source.sum,e,3),0.5,[10,1,1000]); - dogs = SinOsc.ar(100,0,Lag.kr(blips[0],0.05,0.15)); - birds = SinOsc.ar(1000,0,Lag.kr(blips[1],0.05,0.05)); - [dogs, birds] + source; - }.play; -) -:: - STRONG::Pretrained piano:: - CODE:: -//load in the sound in and a pretrained basis -( - b = Buffer.read(s,File.realpath(FluidNMFMatch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-SA-UprightPianoPedalWide.wav"); - c = Buffer.read(s,File.realpath(FluidNMFMatch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/filters/piano-dicts.wav"); -) -b.play -c.query - -//use the pretrained bases to compute activations of each notes to drive the amplitude of a resynth -( -{ - var source, resynth; - source = PlayBuf.ar(2, b,loop:1).sum; - resynth = SinOsc.ar((21..108).midicps, 0, FluidNMFMatch.kr(source,c,88,10,4096).madd(0.002)).sum; - [source, resynth] -}.play -) - - -//now sample and hold the same stream to get notes identified, played and sent back via osc -( -{ - var source, resynth, chain, trig, acts; - source = PlayBuf.ar(2,b,loop:1).sum; - - // built in attack detection, delayed until the stable part of the sound - chain = FFT(LocalBuf(256), source); - trig = TDelay.kr(Onsets.kr(chain, 0.5),0.1); - - // samples and holds activation values that are scaled and capped, in effect thresholding them - acts = Latch.kr(FluidNMFMatch.kr(source,c,88,10,4096).linlin(15,20,0,0.1),trig); - - // resynths as in the previous example, with the values sent back to the language - resynth = SinOsc.ar((21..108).midicps, 0, acts).sum; - SendReply.kr(trig, '/activations', acts); - [source, resynth] - // [source, T2A.ar(trig)] - // resynth -}.play -) -// define a receiver for the activations -( - OSCdef(\listener, {|msg| - var data = msg[3..]; - // removes the silent and spits out the indicies as midinote number - data.collect({arg item, i; if (item > 0.01, {i + 21})}).reject({arg item; item.isNil}).postln; - }, '/activations'); -) - -:: STRONG::Strange Resonators:: CODE:: //load the source and declare buffers/arrays diff --git a/release-packaging/ignore/Examples/buffer_compositing/bufcompose-MS-FIR.sc b/release-packaging/ignore/Examples/buffer_compositing/bufcompose-MS-FIR.sc new file mode 100644 index 0000000..b13b37a --- /dev/null +++ b/release-packaging/ignore/Examples/buffer_compositing/bufcompose-MS-FIR.sc @@ -0,0 +1,67 @@ +// A complex example of using composition as an Mid-Side FIR filtering process + +// load a stereo buffer and initialise the many destinations +( +b = Buffer.read(s,File.realpath(FluidBufCompose.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-SA-UprightPianoPedalWide.wav"); +c = Buffer.new(s); +d = Buffer.new(s); +e = Buffer.new(s); +f = Buffer.new(s); +) + +// encode the mid (in c) and the side (in d) +( +FluidBufCompose.process(s,b, numChans: 1, gain: -3.0.dbamp, destination: c); +FluidBufCompose.process(s,b, numChans: 1, gain: -3.0.dbamp, destination: d); +FluidBufCompose.process(s,b, numChans: 1, gain: -3.0.dbamp, startChan: 1, destination: c, destGain: 1.0); +FluidBufCompose.process(s,b, numChans: 1, gain: -3.0.dbamp * -1.0, startChan: 1, destination: d, destGain: 1.0); +) + +// (optional) compare auraly the stereo with the MS +b.play; +{PlayBuf.ar(1,[c,d])}.play; + +// The geeky bit: copy the side (buffer d) on itself with specific amplitudes and delays, in effect applying a FIR filter through expensive convolution + +// Important: do either of the 3 options below + +// option 1: apply a high pass on the side, with a cutoff of nyquist / 4 +e.free; e = Buffer.new(s); +( +[1.0, -1.0].do({ arg x,y; + FluidBufCompose.process(s, d, gain: x, destStartFrame: y, destination: e, destGain: 1.0); +}); +) + +// option 2: apply a high pass on the side, with a cutoff of nyquist / 10 +e.free; e = Buffer.new(s); +( +[0.8, -0.32, -0.24, -0.16, -0.08].do({ arg x,y; + FluidBufCompose.process(s, d, gain: x, destStartFrame: y, destination: e, destGain: 1.0); +}); +) + +// option 3: apply a high pass on the side, with a cutoff of nyquist / 100 +e.free; e = Buffer.new(s); +( +[0.982494, -0.066859, -0.064358, -0.061897, -0.059477, -0.057098, -0.054761, -0.052466, -0.050215, -0.048007, -0.045843, -0.043724, -0.041649, -0.03962, -0.037636, -0.035697, -0.033805, -0.031959, -0.030159, -0.028406, -0.026699, -0.025038, -0.023425, -0.021857, -0.020337].do({ arg x,y; + FluidBufCompose.process(s, d, gain: x, destStartFrame: y, destination: e, destGain: 1.0); +}); +) + +// play the high-passed side buffer +e.play; +// if you want to try the other filters, do not forget to clear the destination buffer since it will add programmatically onto itself and would not create the expected frequency response + +// decode the MS back to stereo +( +FluidBufCompose.process(s,c, numChans: 2, gain: -3.0.dbamp, destination: f); +FluidBufCompose.process(s,e, gain: -3.0.dbamp, destination: f, destGain: 1.0); +FluidBufCompose.process(s,e, gain: -3.0.dbamp * -1.0, destination: f, destStartChan: 1, destGain: 1.0); +) + +// play the MS processed version +f.play; + +// compare with the original +b.play; \ No newline at end of file diff --git a/release-packaging/ignore/Examples/buffer_compositing/bufcomposemacros.scd b/release-packaging/ignore/Examples/buffer_compositing/bufcomposemacros.scd new file mode 100644 index 0000000..061349f --- /dev/null +++ b/release-packaging/ignore/Examples/buffer_compositing/bufcomposemacros.scd @@ -0,0 +1,100 @@ +// (re)set the source buffers +( +~low = Buffer.sendCollection(s, (Signal.sineFill(4410, Array.fill(3,0) ++ 1))); +~mid = Buffer.sendCollection(s, (Signal.sineFill(4410, Array.fill(12,0) ++ 1))); +~high = Buffer.sendCollection(s, (Signal.sineFill(4410, Array.fill(48,0) ++ 1))); +~piano = Buffer.read(s,File.realpath(FluidBufCompose.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-SA-UprightPianoPedalWide.wav",0,8820); +) + +// draw the buffers to see what happened +( +~low.plot; +~mid.plot; +~high.plot; +~piano.plot; +) + +// define the concatenation macro +( +~concat = { + arg x; + if(x.class != Array, + { + "Error - Needs an array as argument".postln; + }, { + Routine{ + for (1,x.size - 1, { + arg i; + FluidBufCompose.process(s,x[i],destination:x[0], destStartFrame:x[0].numFrames); + }); + "Done!".postln; + }.play; + } + ); +} +) +// test various combinations of concatenation +~concat.value([~low,~mid]) +~concat.value([~mid,~low,~high]) +~concat.value([~mid,~piano,~low]) +~concat.value([~mid,~piano]) + +// check the buffers for the results + +//////////////////////////////// + +// define the merging macro +( +~merge = { + arg x; + if(x.class != Array, + { + "Error - Needs an array as argument".postln; + }, { + Routine{ + for (1,x.size - 1, { + arg i; + FluidBufCompose.process(s,x[i],destination:x[0],destGain:1); + }); + "Done!".postln; + }.play; + } + ); +} +) +// test various combinations of merging +~merge.value([~low,~mid]) +~merge.value([~mid,~low,~high]) +~merge.value([~mid,~piano,~low]) +~merge.value([~mid,~piano]) + +// check the buffers for the results + +//////////////////////////////// + +// define the stacking macro +( +~stack = { + arg x; + if(x.class != Array, + { + "Error - Needs an array as argument".postln; + }, { + Routine{ + for (1,x.size - 1, { + arg i; + FluidBufCompose.process(s,x[i],destination:x[0], destStartChan:x[0].numChannels); + }); + "Done!".postln; + }.play; + } + ); +} +) +// test various combinations of stacking +~stack.value([~low,~mid]) +~stack.value([~mid,~low,~high]) +~stack.value([~mid,~piano,~low]) +~stack.value([~mid,~piano]) + +// check the buffers for the results diff --git a/release-packaging/ignore/Examples/nmf/JiT-NMF-classifier.scd b/release-packaging/ignore/Examples/nmf/JiT-NMF-classifier.scd new file mode 100644 index 0000000..dd450b7 --- /dev/null +++ b/release-packaging/ignore/Examples/nmf/JiT-NMF-classifier.scd @@ -0,0 +1,190 @@ +// using nmf in 'real-time' as a classifier +// how it works: a circular buffer is recording and attacks trigger the process +// if in learning mode, it does a one component nmf which makes an approximation of the base. 3 of those will be copied in 3 different positions of our final 3-component base +// in in guessing mode, it does a thres component nmf from the trained bases and yields the 3 activation peaks, on which it thresholds resynth + +//how to use: +// 1. start the server +// 2. select between parenthesis below and execute. You should get a window with 3 pads (bd sn hh) and various menus +// 3. train the 3 classes: +// 3.1 select the learn option +// 3.2 select which class you want to train +// 3.3 play the sound you want to associate with that class a few times (the left audio channel is the source) +// 3.4 click the transfer button +// 3.5 repeat (3.2-3.4) for the other 2 classes. +// 3.x you can observe the 3 bases here: +f.plot(numChannels:3) + +// 4. classify +// 4.1 select the classify option +// 4.2 press a pad and look at the activation +// 4.3 tweak the thresholds and enjoy the resynthesis. (the right audio channel is the detected class where classA is a bd sound) +// 4.x you can observe the 3 activations here: +h.plot(numChannels:3) + + + +/// code to execute first +( +b = Buffer.alloc(s,s.sampleRate * 2); +g = Bus.audio(s,1); +c = 0; +d = 0; +e = Buffer.alloc(s, 65); +f = Buffer.alloc(s, 65, 3); +h = Buffer.alloc(s, 65, 3); +j = [0.0,0.0,0.0]; +k = [0.5,0.5,0.5]; + +// the circular buffer with triggered actions sending the location of the head at the attack +Routine { + SynthDef(\JITcircular,{arg bufnum = 0, input = 0, env = 0; + var head, head2, duration, audioin, halfdur, trig; + duration = BufFrames.kr(bufnum) / 2; + halfdur = duration / 2; + head = Phasor.ar(0,1,0,duration); + head2 = (head + halfdur) % duration; + + // circular buffer writer + audioin = In.ar(input,1); + BufWr.ar(audioin,bufnum,head,0); + BufWr.ar(audioin,bufnum,head+duration,0); + trig = FluidAmpSlice.ar(audioin,2205,2205,-47,-47,4410,4410,relRampUp: 10, relRampDown:1666, relThreshOn:12, relThreshOff: 9, highPassFreq: 85); + + // cue the calculations via the language + SendReply.ar(trig, '/attack',head); + + Out.ar(0,audioin); + }).add; + + // drum sounds taken from original code by snappizz + // https://sccode.org/1-523 + // produced further and humanised by PA + SynthDef(\fluidbd, { + |out = 0| + var body, bodyFreq, bodyAmp; + var pop, popFreq, popAmp; + var click, clickAmp; + var snd; + + // body starts midrange, quickly drops down to low freqs, and trails off + bodyFreq = EnvGen.ar(Env([Rand(200,300), 120, Rand(45,49)], [0.035, Rand(0.07,0.1)], curve: \exp)); + bodyAmp = EnvGen.ar(Env([0,Rand(0.8,1.3),1,0],[0.005,Rand(0.08,0.085),Rand(0.25,0.35)]), doneAction: 2); + body = SinOsc.ar(bodyFreq) * bodyAmp; + // pop sweeps over the midrange + popFreq = XLine.kr(Rand(700,800), Rand(250,270), Rand(0.018,0.02)); + popAmp = EnvGen.ar(Env([0,Rand(0.8,1.3),1,0],[0.001,Rand(0.018,0.02),Rand(0.0008,0.0013)])); + pop = SinOsc.ar(popFreq) * popAmp; + // click is spectrally rich, covering the high-freq range + // you can use Formant, FM, noise, whatever + clickAmp = EnvGen.ar(Env.perc(0.001,Rand(0.008,0.012),Rand(0.07,0.12),-5)); + click = RLPF.ar(VarSaw.ar(Rand(900,920),0,0.1), 4760, 0.50150150150) * clickAmp; + + snd = body + pop + click; + snd = snd.tanh; + + Out.ar(out, snd); + }).add; + + SynthDef(\fluidsn, { + |out = 0| + var pop, popAmp, popFreq; + var noise, noiseAmp; + var click; + var snd; + + // pop makes a click coming from very high frequencies + // slowing down a little and stopping in mid-to-low + popFreq = EnvGen.ar(Env([Rand(3210,3310), 410, Rand(150,170)], [0.005, Rand(0.008,0.012)], curve: \exp)); + popAmp = EnvGen.ar(Env.perc(0.001, Rand(0.1,0.12), Rand(0.7,0.9),-5)); + pop = SinOsc.ar(popFreq) * popAmp; + // bandpass-filtered white noise + noiseAmp = EnvGen.ar(Env.perc(0.001, Rand(0.13,0.15), Rand(1.2,1.5),-5), doneAction: 2); + noise = BPF.ar(WhiteNoise.ar, 810, 1.6) * noiseAmp; + + click = Impulse.ar(0); + snd = (pop + click + noise) * 1.4; + + Out.ar(out, snd); + }).add; + + SynthDef(\fluidhh, { + |out = 0| + var click, clickAmp; + var noise, noiseAmp, noiseFreq; + + // noise -> resonance -> expodec envelope + noiseAmp = EnvGen.ar(Env.perc(0.001, Rand(0.28,0.3), Rand(0.4,0.6), [-20,-15]), doneAction: 2); + noiseFreq = Rand(3900,4100); + noise = Mix(BPF.ar(ClipNoise.ar, [noiseFreq, noiseFreq+141], [0.12, 0.31], [2.0, 1.2])) * noiseAmp; + + Out.ar(out, noise); + }).add; + + // makes sure all the synthdefs are on the server + s.sync; + + // instantiate the JIT-circular-buffer + x = Synth(\JITcircular,[\bufnum, b.bufnum, \input, g.index]); + e.fill(0,65,0.1); + + // instantiate the listener to cue the processing from the language side + r = OSCFunc({ arg msg; + if (c == 0, { + // if in training mode, makes a single component nmf + FluidBufNMF.process(s, b, msg[3], 128, bases:e, basesMode: 1, windowSize: 128); + }, { + // if in classifying mode, makes a 3 component nmf from the pretrained bases and compares the activations with the set thresholds + FluidBufNMF.process(s, b, msg[3], 128, components:3, bases:f, basesMode: 2, activations:h, windowSize: 128, action:{ + h.getn(3,3,{|x| + j = x; + if (j[0] >= k[0], {Synth(\fluidbd,[\out,1])}); + if (j[1] >= k[1], {Synth(\fluidsn,[\out,1])}); + if (j[2] >= k[2], {Synth(\fluidhh,[\out,1])}); + }); + }; + ); + }); + }, '/attack', s.addr); + + // make sure all the synths are instantiated + s.sync; + + // GUI for control + { + w = Window("Control", Rect(100,100,590,100)).front; + + Button(w, Rect(10,10,80, 80)).states_([["bd",Color.black,Color.white]]).mouseDownAction_({Synth(\fluidbd, [\out, g.index], x, \addBefore)}); + Button(w, Rect(100,10,80, 80)).states_([["sn",Color.black,Color.white]]).mouseDownAction_({Synth(\fluidsn, [\out, g.index], x, \addBefore)}); + Button(w, Rect(190,10,80, 80)).states_([["hh",Color.black,Color.white]]).mouseDownAction_({Synth(\fluidhh, [\out, g.index], x,\addBefore)}); + StaticText(w, Rect(280,7,75,25)).string_("Select").align_(\center); + PopUpMenu(w, Rect(280,32,75,25)).items_(["learn","classify"]).action_({|value| c = value.value; if (c == 0, {e.fill(0,65,0.1)});}); + PopUpMenu(w, Rect(280,65,75,25)).items_(["classA","classB","classC"]).action_({|value| d = value.value; e.fill(0,65,0.1);}); + Button(w, Rect(365,65,65,25)).states_([["transfer",Color.black,Color.white]]).mouseDownAction_({if (c == 0, {FluidBufCompose.process(s, e, numChans:1, destination:f, destStartChan:d);});}); + StaticText(w, Rect(440,7,75,25)).string_("Activations"); + l = Array.fill(3, {arg i; + StaticText(w, Rect(440,((i+1) * 20 )+ 7,75,25)); + }); + StaticText(w, Rect(520,7,55,25)).string_("Thresh").align_(\center); + 3.do {arg i; + TextField(w, Rect(520,((i+1) * 20 )+ 7,55,25)).string_("0.5").action_({|x| k[i] = x.value.asFloat;}); + }; + + w.onClose_({b.free;g.free;r.clear;x.free; y.free;q.stop;}); + }.defer; + + s.sync; + + // updates the activations + q = Routine { + { + { + l[0].string_("A: " ++ j[0].round(0.001)); + l[1].string_("B: " ++ j[1].round(0.001)); + l[2].string_("C: " ++ j[2].round(0.001)); + }.defer; + 0.1.wait; + }.loop; + }.play; +}.play; +) \ No newline at end of file diff --git a/release-packaging/ignore/Examples/nmf/nmfmatch-object-finding.scd b/release-packaging/ignore/Examples/nmf/nmfmatch-object-finding.scd new file mode 100644 index 0000000..7ab2c3d --- /dev/null +++ b/release-packaging/ignore/Examples/nmf/nmfmatch-object-finding.scd @@ -0,0 +1,74 @@ +// using NMF, splitting a small portion, then associating components to targets, then thresholding on these target's activations to find objects. + +//set some buffers +( +b = Buffer.read(s,File.realpath(FluidNMFMatch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-BaB-SoundscapeGolcarWithDog.wav"); +c = Buffer.new(s); +x = Buffer.new(s); +e = Buffer.new(s); +) + +// train where all objects are present +( +Routine { + FluidBufNMF.process(s,b,130000,150000,0,1, c, x, components:10); + c.query; +}.play; +) + +// wait for the query to print +// then find a component for each item you want to find. You could also sum them. Try to find a component with a good object-to-rest ratio +( + ~dog =1; + {PlayBuf.ar(10,c)[~dog]}.play +) + +( + ~bird = 3; + {PlayBuf.ar(10,c)[~bird]}.play +) + + +// copy at least one other component to a third filter, a sort of left-over channel +( +Routine{ + FluidBufCompose.process(s, x, startChan:~dog, numChans: 1, destination: e); + FluidBufCompose.process(s, x, startChan:~bird, numChans: 1, destStartChan: 1, destination: e, destGain:1); + (0..9).removeAll([~dog,~bird]).do({|chan|FluidBufCompose.process(s,x, startChan:chan, numChans: 1, destStartChan: 2, destination: e, destGain:1)}); + e.query; +}.play; +) +e.plot; + +//using this trained basis we can then see the activation... (wait for 5 seconds before it prints!) +( +{ + var source, blips; + //read the source + source = PlayBuf.ar(2, b); + blips = FluidNMFMatch.kr(source.sum,e,3); + }.plot(5); +) + +// ...and use some threshold to 'find' objects... +( +{ + var source, blips; + //read the source + source = PlayBuf.ar(2, b); + blips = Schmidt.kr(FluidNMFMatch.kr(source.sum,e,3),0.5,[10,1,1000]); + }.plot(5); +) + +// ...and use these to sonify them +( +{ + var source, blips, dogs, birds; + //read the source + source = PlayBuf.ar(2, b); + blips = Schmidt.kr(FluidNMFMatch.kr(source.sum,e,3),0.5,[10,1,1000]); + dogs = SinOsc.ar(100,0,Lag.kr(blips[0],0.05,0.15)); + birds = SinOsc.ar(1000,0,Lag.kr(blips[1],0.05,0.05)); + [dogs, birds] + source; + }.play; +) \ No newline at end of file diff --git a/release-packaging/ignore/Examples/nmf/nmfmatch-pretrained-piano.scd b/release-packaging/ignore/Examples/nmf/nmfmatch-pretrained-piano.scd new file mode 100644 index 0000000..d630387 --- /dev/null +++ b/release-packaging/ignore/Examples/nmf/nmfmatch-pretrained-piano.scd @@ -0,0 +1,51 @@ +// Using an 88-components piano base to do polyphonic pitch tracking + +//load in the sound in and a pretrained basis +( + b = Buffer.read(s,File.realpath(FluidNMFMatch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/Tremblay-SA-UprightPianoPedalWide.wav"); + c = Buffer.read(s,File.realpath(FluidNMFMatch.class.filenameSymbol).dirname.withTrailingSlash ++ "../AudioFiles/filters/piano-dicts.wav"); +) +b.play +c.query + +//use the pretrained bases to compute activations of each notes to drive the amplitude of a resynth +( +{ + var source, resynth; + source = PlayBuf.ar(2, b,loop:1).sum; + resynth = SinOsc.ar((21..108).midicps, 0, FluidNMFMatch.kr(source,c,88,10,4096).madd(0.002)).sum; + [source, resynth] +}.play +) + + +//now sample and hold the same stream to get notes identified, played and sent back via osc +( +{ + var source, resynth, chain, trig, acts; + source = PlayBuf.ar(2,b,loop:1).sum; + + // built in attack detection, delayed until the stable part of the sound + chain = FFT(LocalBuf(256), source); + trig = TDelay.kr(Onsets.kr(chain, 0.5),0.1); + + // samples and holds activation values that are scaled and capped, in effect thresholding them + acts = Latch.kr(FluidNMFMatch.kr(source,c,88,10,4096).linlin(15,20,0,0.1),trig); + + // resynths as in the previous example, with the values sent back to the language + resynth = SinOsc.ar((21..108).midicps, 0, acts).sum; + SendReply.kr(trig, '/activations', acts); + [source, resynth] + // [source, T2A.ar(trig)] + // resynth +}.play +) + +// define a receiver for the activations +( + OSCdef(\listener, {|msg| + var data = msg[3..]; + // removes the silent and spits out the indicies as midinote number + data.collect({arg item, i; if (item > 0.01, {i + 21})}).reject({arg item; item.isNil}).postln; + }, '/activations'); +) \ No newline at end of file