From b9a8dd16d9014c11c1d81a56c5a22ee3b3ae0b9f Mon Sep 17 00:00:00 2001 From: Pierre Alexandre Tremblay Date: Tue, 14 Sep 2021 10:33:56 +0100 Subject: [PATCH] replaces the many erroneous references to labels when we meat identifiers, and a few more typos fix #23 --- .../Classes/FluidCorpusBuilders.sc | 22 +++++------ release-packaging/Classes/FluidDataSet.sc | 39 +++++++++---------- .../Classes/FluidDataSetQuery.sc | 1 + .../0-demo-dataset-maker-utilities.scd | 6 +-- .../10a-weighted-MFCCs-comparison.scd | 22 +++++------ .../11-compositing-datasets.scd | 12 +++--- .../12-windowed-clustered-segmentation.scd | 8 ++-- .../HelpSource/Classes/FluidDataSet.schelp | 24 ++++++------ .../Classes/FluidDataSetQuery.schelp | 12 +++--- .../HelpSource/Classes/FluidDataSetWr.schelp | 24 +++++++----- .../Classes/FluidKNNClassifier.schelp | 4 +- .../HelpSource/Classes/FluidLoadFolder.schelp | 8 ++-- .../Classes/FluidProcessSlices.schelp | 18 +++++---- .../Classes/FluidSliceCorpus.schelp | 12 +++--- 14 files changed, 109 insertions(+), 103 deletions(-) diff --git a/release-packaging/Classes/FluidCorpusBuilders.sc b/release-packaging/Classes/FluidCorpusBuilders.sc index efb81de..98ac76a 100644 --- a/release-packaging/Classes/FluidCorpusBuilders.sc +++ b/release-packaging/Classes/FluidCorpusBuilders.sc @@ -1,11 +1,11 @@ FluidLoadFolder { - var path, labelFunc,channelFunc; + var path, idFunc,channelFunc; var < files; var < index; var < buffer; - *new{ |path, labelFunc, channelFunc | - ^super.newCopyArgs(path, labelFunc,channelFunc); + *new{ |path, idFunc, channelFunc | + ^super.newCopyArgs(path, idFunc,channelFunc); } play { |server, action| @@ -25,17 +25,17 @@ FluidLoadFolder { buffer.query; server.sync; this.files.do{|f,i| - var channelMap,label,entry; + var channelMap,identifier,entry; OSCFunc({ - if(labelFunc.notNil) - { label = labelFunc.value(path,i) } - { label = (f.path.basename).asSymbol }; + if(idFunc.notNil) + { identifier = idFunc.value(path,i) } + { identifier = (f.path.basename).asSymbol }; entry = IdentityDictionary(); entry.add(\bounds->startEnd[i]); entry.add(\numchans->f.numChannels); entry.add(\sr->f.sampleRate); entry.add(\path->f.path); - index.add(label->entry); + index.add(identifier->entry); counter = counter + 1; if(counter == (files.size)) {action !? action.value(index)}; },"/done",server.addr,argTemplate:["/b_readChannel"]).oneShot; @@ -50,11 +50,11 @@ FluidLoadFolder { FluidSliceCorpus { - var < sliceFunc, labelFunc; + var < sliceFunc, idFunc; var < index; - *new { |sliceFunc, labelFunc| - ^super.newCopyArgs(sliceFunc,labelFunc); + *new { |sliceFunc, idFunc| + ^super.newCopyArgs(sliceFunc,idFunc); } play{ |server,sourceBuffer,bufIdx, action, tasks = 4| diff --git a/release-packaging/Classes/FluidDataSet.sc b/release-packaging/Classes/FluidDataSet.sc index 12c7edb..3ec0061 100644 --- a/release-packaging/Classes/FluidDataSet.sc +++ b/release-packaging/Classes/FluidDataSet.sc @@ -3,51 +3,51 @@ FluidDataSet : FluidDataObject { *new{|server| ^super.new(server) } - addPointMsg{|label,buffer| + addPointMsg{|identifier,buffer| buffer = this.prEncodeBuffer(buffer); - ^this.prMakeMsg(\addPoint,id,label.asSymbol,buffer); + ^this.prMakeMsg(\addPoint,id,identifier.asSymbol,buffer); } - addPoint{|label, buffer, action| + addPoint{|identifier, buffer, action| actions[\addPoint] = [nil,action]; - this.prSendMsg(this.addPointMsg(label,buffer)); + this.prSendMsg(this.addPointMsg(identifier,buffer)); } - getPointMsg{|label,buffer| + getPointMsg{|identifier,buffer| buffer = this.prEncodeBuffer(buffer); - ^this.prMakeMsg(\getPoint,id,label.asSymbol,buffer,["/b_query",buffer.asUGenInput]); + ^this.prMakeMsg(\getPoint,id,identifier.asSymbol,buffer,["/b_query",buffer.asUGenInput]); } - getPoint{|label, buffer, action| + getPoint{|identifier, buffer, action| actions[\getPoint] = [nil,action]; - this.prSendMsg(this.getPointMsg(label,buffer)); + this.prSendMsg(this.getPointMsg(identifier,buffer)); } - updatePointMsg{|label,buffer| + updatePointMsg{|identifier,buffer| buffer = this.prEncodeBuffer(buffer); - ^this.prMakeMsg(\updatePoint,id,label.asSymbol,buffer,["/b_query",buffer.asUGenInput]); + ^this.prMakeMsg(\updatePoint,id,identifier.asSymbol,buffer,["/b_query",buffer.asUGenInput]); } - updatePoint{|label, buffer, action| + updatePoint{|identifier, buffer, action| actions[\updatePoint] = [nil,action]; - this.prSendMsg(this.updatePointMsg(label,buffer)); + this.prSendMsg(this.updatePointMsg(identifier,buffer)); } - deletePointMsg{|label| ^this.prMakeMsg(\deletePoint,id,label.asSymbol);} + deletePointMsg{|identifier| ^this.prMakeMsg(\deletePoint,id,identifier.asSymbol);} - deletePoint{|label, buffer, action| + deletePoint{|identifier, buffer, action| actions[\deletePoint] = [nil,action]; - this.prSendMsg(this.deletePointMsg(label)); + this.prSendMsg(this.deletePointMsg(identifier)); } - setPointMsg{|label,buffer| + setPointMsg{|identifier,buffer| buffer = this.prEncodeBuffer(buffer); - ^this.prMakeMsg(\setPoint,id,label.asSymbol,buffer,["/b_query",buffer.asUGenInput]); + ^this.prMakeMsg(\setPoint,id,identifier.asSymbol,buffer,["/b_query",buffer.asUGenInput]); } - setPoint{|label, buffer, action| + setPoint{|identifier, buffer, action| actions[\setPoint] = [nil,action]; - this.prSendMsg(this.setPointMsg(label,buffer)); + this.prSendMsg(this.setPointMsg(identifier,buffer)); } clearMsg { ^this.prMakeMsg(\clear,id); } @@ -102,4 +102,3 @@ FluidDataSet : FluidDataObject this.prSendMsg(this.getIdsMsg(labelSet)); } } - diff --git a/release-packaging/Classes/FluidDataSetQuery.sc b/release-packaging/Classes/FluidDataSetQuery.sc index c7df3ca..e9e2108 100644 --- a/release-packaging/Classes/FluidDataSetQuery.sc +++ b/release-packaging/Classes/FluidDataSetQuery.sc @@ -1,5 +1,6 @@ FluidDataSetQuery : FluidDataObject { + *new{|server| ^super.new(server) } addColumnMsg { |column| ^this.prMakeMsg(\addColumn,id,column); diff --git a/release-packaging/Examples/dataset/0-demo-dataset-maker-utilities.scd b/release-packaging/Examples/dataset/0-demo-dataset-maker-utilities.scd index 2409979..e636297 100644 --- a/release-packaging/Examples/dataset/0-demo-dataset-maker-utilities.scd +++ b/release-packaging/Examples/dataset/0-demo-dataset-maker-utilities.scd @@ -17,13 +17,13 @@ // here we instantiate a process of description and dataset writing, which will run each slice of the previous slice and write the entry. Note the chain of Done.kr triggers. ~extractor = FluidProcessSlices({|src,start,num,data| - var mfcc, stats, writer, flatten,mfccBuf, statsBuf, flatBuf, label, voice; - label = data.key; + var mfcc, stats, writer, flatten,mfccBuf, statsBuf, flatBuf, identifier, voice; + identifier = data.key; voice = data.value[\voice]; mfcc = FluidBufMFCC.kr(src, startFrame:start, numFrames:num, numChans:1, features:~mfccbuf[voice], trig:1, blocking: 1); stats = FluidBufStats.kr(~mfccbuf[voice], stats:~statsbuf[voice], trig:Done.kr(mfcc), blocking: 1); flatten = FluidBufFlatten.kr(~statsbuf[voice], destination:~flatbuf[voice], trig:Done.kr(stats), blocking: 1); - writer = FluidDataSetWr.kr(~ds, label, nil, ~flatbuf[voice], trig: Done.kr(flatten), blocking: 1) + writer = FluidDataSetWr.kr(~ds, identifier, nil, ~flatbuf[voice], trig: Done.kr(flatten), blocking: 1) }); ) diff --git a/release-packaging/Examples/dataset/1-learning examples/10a-weighted-MFCCs-comparison.scd b/release-packaging/Examples/dataset/1-learning examples/10a-weighted-MFCCs-comparison.scd index cff2de7..c14a7b6 100644 --- a/release-packaging/Examples/dataset/1-learning examples/10a-weighted-MFCCs-comparison.scd +++ b/release-packaging/Examples/dataset/1-learning examples/10a-weighted-MFCCs-comparison.scd @@ -20,37 +20,37 @@ // here we instantiate a process of description and dataset writing, as per example 0 ~extractor = FluidProcessSlices({|src,start,num,data| - var label, voice, mfcc, stats, flatten; - label = data.key; + var identifier, voice, mfcc, stats, flatten; + identifier = data.key; voice = data.value[\voice]; mfcc = FluidBufMFCC.kr(src, startFrame:start, numFrames:num, numChans:1, features:~mfccbuf[voice], padding: 2, trig:1, blocking: 1); stats = FluidBufStats.kr(~mfccbuf[voice], stats:~statsbuf[voice], numDerivs: 1, trig:Done.kr(mfcc), blocking: 1); flatten = FluidBufFlatten.kr(~statsbuf[voice], destination:~flatbuf[voice], trig:Done.kr(stats), blocking: 1); - FluidDataSetWr.kr(~ds, label, nil, ~flatbuf[voice], Done.kr(flatten), blocking: 1); + FluidDataSetWr.kr(~ds, identifier, nil, ~flatbuf[voice], Done.kr(flatten), blocking: 1); }); // here we make another processor, this time with doing an amplitude weighing ~extractorW = FluidProcessSlices({|src,start,num,data| - var label, voice, loud, weights, mfcc, stats, flatten; - label = data.key; + var identifier, voice, loud, weights, mfcc, stats, flatten; + identifier = data.key; voice = data.value[\voice]; mfcc = FluidBufMFCC.kr(src, startFrame:start, numFrames:num, numChans:1, features:~mfccbuf[voice], padding: 2, trig:1, blocking: 1); loud = FluidBufLoudness.kr(src, startFrame:start, numFrames:num, numChans:1, features:~loudbuf[voice], padding: 2, trig:Done.kr(mfcc), blocking: 1); weights = FluidBufScale.kr(~loudbuf[voice], numChans: 1, destination: ~weightbuf[voice], inputLow: -70, inputHigh: 0, trig: Done.kr(loud), blocking: 1); stats = FluidBufStats.kr(~mfccbuf[voice], stats:~statsbuf[voice], numDerivs: 1, weights: ~weightbuf[voice], trig:Done.kr(weights), blocking: 1); flatten = FluidBufFlatten.kr(~statsbuf[voice], destination:~flatbuf[voice], trig:Done.kr(stats), blocking: 1); - FluidDataSetWr.kr(~dsW, label, nil, ~flatbuf[voice], Done.kr(flatten), blocking: 1); + FluidDataSetWr.kr(~dsW, identifier, nil, ~flatbuf[voice], Done.kr(flatten), blocking: 1); }); // and here we make a little processor for loudness if we want to poke at it ~extractorL = FluidProcessSlices({|src,start,num,data| - var label, voice, loud, stats, flatten; - label = data.key; + var identifier, voice, loud, stats, flatten; + identifier = data.key; voice = data.value[\voice]; loud = FluidBufLoudness.kr(src, startFrame:start, numFrames:num, numChans:1, features:~mfccbuf[voice], trig:1, padding: 2, blocking: 1); stats = FluidBufStats.kr(~mfccbuf[voice], stats:~statsbuf[voice], numDerivs: 1, trig:Done.kr(loud), blocking: 1); flatten = FluidBufFlatten.kr(~statsbuf[voice], destination:~flatbuf[voice], trig:Done.kr(stats), blocking: 1); - FluidDataSetWr.kr(~dsL, label, nil, ~flatbuf[voice], Done.kr(flatten), blocking: 1); + FluidDataSetWr.kr(~dsL, identifier, nil, ~flatbuf[voice], Done.kr(flatten), blocking: 1); }); ) @@ -225,11 +225,11 @@ Routine{ }.play; ) -//explore dynamic range (changing the weigting's value of 0 in lines 44 and 168 will change the various weights given to quieter parts of the signal +//explore dynamic range (changing the weigting's value of 0 in lines 39 and 157 will change the various weights given to quieter parts of the signal ( t = Main.elapsedTime; ~extractorL.play(s,~loader.buffer,~slicer.index,action:{(Main.elapsedTime - t).postln;"Features done".postln}); ) ~norm = FluidNormalize.new(s) ~norm.fit(~dsL) -~norm.dump({|x|x["data_min"][[8,12]].postln;x["data_max"][[8,12]].postln;})//here we extract the stats from the dataset by retrieving the stored maxima of the fitting process in FluidNormalize \ No newline at end of file +~norm.dump({|x|x["data_min"][[8,12]].postln;x["data_max"][[8,12]].postln;})//here we extract the stats from the dataset by retrieving the stored maxima of the fitting process in FluidNormalize diff --git a/release-packaging/Examples/dataset/1-learning examples/11-compositing-datasets.scd b/release-packaging/Examples/dataset/1-learning examples/11-compositing-datasets.scd index 8398b84..cdc613c 100644 --- a/release-packaging/Examples/dataset/1-learning examples/11-compositing-datasets.scd +++ b/release-packaging/Examples/dataset/1-learning examples/11-compositing-datasets.scd @@ -30,27 +30,27 @@ // here we make the full processor building our 3 source datasets ~extractor = FluidProcessSlices({|src,start,num,data| - var label, voice, pitch, pitchweights, pitchstats, pitchflat, loud, statsLoud, flattenLoud, mfcc, mfccweights, mfccstats, mfccflat, writePitch, writeLoud; - label = data.key; + var identifier, voice, pitch, pitchweights, pitchstats, pitchflat, loud, statsLoud, flattenLoud, mfcc, mfccweights, mfccstats, mfccflat, writePitch, writeLoud; + identifier = data.key; voice = data.value[\voice]; // the pitch computation is independant so it starts right away pitch = FluidBufPitch.kr(src, startFrame:start, numFrames:num, numChans:1, features:~pitchbuf[voice], unit: 1, trig:1, blocking: 1); pitchweights = FluidBufThresh.kr(~pitchbuf[voice], numChans: 1, startChan: 1, destination: ~weightPitchbuf[voice], threshold: 0.7, trig:Done.kr(pitch), blocking: 1);//pull down low conf pitchstats = FluidBufStats.kr(~pitchbuf[voice], stats:~statsPitchbuf[voice], numDerivs: 1, weights: ~weightPitchbuf[voice], outliersCutoff: 1.5, trig:Done.kr(pitchweights), blocking: 1); pitchflat = FluidBufFlatten.kr(~statsPitchbuf[voice],destination:~flatPitchbuf[voice],trig:Done.kr(pitchstats),blocking: 1); - writePitch = FluidDataSetWr.kr(~pitchDS,label, nil, ~flatPitchbuf[voice], Done.kr(pitchflat),blocking: 1); + writePitch = FluidDataSetWr.kr(~pitchDS,identifier, nil, ~flatPitchbuf[voice], Done.kr(pitchflat),blocking: 1); // the mfcc need loudness to weigh, so let's start with that loud = FluidBufLoudness.kr(src,startFrame:start, numFrames:num, numChans:1, features:~loudbuf[voice], trig:Done.kr(writePitch), blocking: 1);//here trig was 1 //we can now flatten and write Loudness in its own trigger tree statsLoud = FluidBufStats.kr(~loudbuf[voice], stats:~statsLoudbuf[voice], numDerivs: 1, trig:Done.kr(loud), blocking: 1); flattenLoud = FluidBufFlatten.kr(~statsLoudbuf[voice],destination:~flatLoudbuf[voice],trig:Done.kr(statsLoud),blocking: 1); - writeLoud = FluidDataSetWr.kr(~loudDS,label, nil, ~flatLoudbuf[voice], Done.kr(flattenLoud),blocking: 1); + writeLoud = FluidDataSetWr.kr(~loudDS,identifier, nil, ~flatLoudbuf[voice], Done.kr(flattenLoud),blocking: 1); //we can resume from the loud computation trigger mfcc = FluidBufMFCC.kr(src,startFrame:start,numFrames:num,numChans:1,features:~mfccbuf[voice],trig:Done.kr(writeLoud),blocking: 1);//here trig was loud mfccweights = FluidBufScale.kr(~loudbuf[voice],numChans: 1,destination: ~weightMFCCbuf[voice],inputLow: -70,inputHigh: 0, trig: Done.kr(mfcc), blocking: 1); mfccstats = FluidBufStats.kr(~mfccbuf[voice], stats:~statsMFCCbuf[voice], startChan: 1, numDerivs: 1, weights: ~weightMFCCbuf[voice], trig:Done.kr(mfccweights), blocking: 1);//remove mfcc0 and weigh by loudness instead mfccflat = FluidBufFlatten.kr(~statsMFCCbuf[voice],destination:~flatMFCCbuf[voice],trig:Done.kr(mfccstats),blocking: 1); - FluidDataSetWr.kr(~mfccDS,label, nil, ~flatMFCCbuf[voice], Done.kr(mfccflat),blocking: 1); + FluidDataSetWr.kr(~mfccDS,identifier, nil, ~flatMFCCbuf[voice], Done.kr(mfccflat),blocking: 1); }); ) @@ -190,7 +190,7 @@ Routine{ // analyse it as above, using voice 0 in the arrays of buffer to store the info ( { - var label, voice, pitch, pitchweights, pitchstats, pitchflat, loud, statsLoud, flattenLoud, mfcc, mfccweights, mfccstats, mfccflat, writePitch, writeLoud; + var identifier, voice, pitch, pitchweights, pitchstats, pitchflat, loud, statsLoud, flattenLoud, mfcc, mfccweights, mfccstats, mfccflat, writePitch, writeLoud; pitch = FluidBufPitch.kr(~targetsound, numChans:1, features:~pitchbuf[0], unit: 1, trig:1, blocking: 1); pitchweights = FluidBufThresh.kr(~pitchbuf[0], numChans: 1, startChan: 1, destination: ~weightPitchbuf[0], threshold: 0.7, trig:Done.kr(pitch), blocking: 1); pitchstats = FluidBufStats.kr(~pitchbuf[0], stats:~statsPitchbuf[0], numDerivs: 1, weights: ~weightPitchbuf[0], outliersCutoff: 1.5, trig:Done.kr(pitchweights), blocking: 1); diff --git a/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd b/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd index 813a06e..e1dd3b0 100644 --- a/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd +++ b/release-packaging/Examples/dataset/1-learning examples/12-windowed-clustered-segmentation.scd @@ -32,17 +32,17 @@ a = Slider(w, Rect(10, 20, 330, 20)) ~slicesMFCC = FluidDataSet(s); ~slicesShapes = FluidDataSet(s); ~extractor = FluidProcessSlices({|src,start,num,data| - var features, stats, writer, flatten,mfccBuf, statsBuf, flatBuf, label, voice; - label = data.key; + var features, stats, writer, flatten,mfccBuf, statsBuf, flatBuf, identifier, voice; + identifier = data.key; voice = data.value[\voice]; features = FluidBufMFCC.kr(src,startFrame:start,numFrames:num,numChans:1, numCoeffs: 20, features:~featuresbuf[voice],trig:1,blocking: 1); stats = FluidBufStats.kr(~featuresbuf[voice],stats:~statsbuf[voice],trig:Done.kr(features),blocking: 1); flatten = FluidBufFlatten.kr(~statsbuf[voice],destination:~flatbuf[voice],trig:Done.kr(stats),blocking: 1); - writer = FluidDataSetWr.kr(~slicesMFCC,label, nil, ~flatbuf[voice], Done.kr(flatten),blocking: 1); + writer = FluidDataSetWr.kr(~slicesMFCC,identifier, nil, ~flatbuf[voice], Done.kr(flatten),blocking: 1); features = FluidBufSpectralShape.kr(src,startFrame:start,numFrames:num,numChans:1, features:~featuresbuf[voice],trig:Done.kr(writer),blocking: 1); stats = FluidBufStats.kr(~featuresbuf[voice],stats:~statsbuf[voice],trig:Done.kr(features),blocking: 1); flatten = FluidBufFlatten.kr(~statsbuf[voice],destination:~flatbuf[voice],trig:Done.kr(stats),blocking: 1); - writer = FluidDataSetWr.kr(~slicesShapes,label, nil, ~flatbuf[voice], Done.kr(flatten),blocking: 1); + writer = FluidDataSetWr.kr(~slicesShapes,identifier, nil, ~flatbuf[voice], Done.kr(flatten),blocking: 1); }); ) diff --git a/release-packaging/HelpSource/Classes/FluidDataSet.schelp b/release-packaging/HelpSource/Classes/FluidDataSet.schelp index fa83eb9..92e7127 100644 --- a/release-packaging/HelpSource/Classes/FluidDataSet.schelp +++ b/release-packaging/HelpSource/Classes/FluidDataSet.schelp @@ -4,7 +4,7 @@ categories:: Libraries>FluidCorpusManipulation related:: Classes/FluidLabelSet, Classes/FluidKDTree, Classes/FluidKMeans ​ DESCRIPTION:: -A server-side container associating labels with multi-dimensional data. +A server-side container associating string identifiers with multi-dimensional data. CLASSMETHODS:: ​ @@ -25,25 +25,25 @@ PRIVATE:: init,id,cache METHOD:: addPoint Add a new point to the data set. The dimensionality of the DataSet is governed by the size of the first point added. -Will report an error if the label already exists, or if the size of the data does not match the dimensionality of the DataSet. -ARGUMENT:: label -A symbol or string with the label for the new point. +Will report an error if the identifier already exists, or if the size of the data does not match the dimensionality of the DataSet. +ARGUMENT:: identifier +A symbol or string with the identifier for the new point. ARGUMENT:: buffer A link::Classes/Buffer:: with the new data point. ARGUMENT:: action A function to run when the point has been added. ​​ METHOD:: updatePoint -Update an existing label's data. Will report an error if the label doesn't exist, or if the size of the data does not match the given dimensionality of the DataSet. +Update an existing identifier's data. Will report an error if the identifier doesn't exist, or if the size of the data does not match the given dimensionality of the DataSet. METHOD:: getPoint -Retrieve a point from the data set into a link::Classes/Buffer::. Will report an error if the label or buffer doesn't exist​. +Retrieve a point from the data set into a link::Classes/Buffer::. Will report an error if the identifier or buffer doesn't exist​. METHOD:: deletePoint -Remove a point from the data set. Will report an error if the label doesn't exist. +Remove a point from the data set. Will report an error if the identifier doesn't exist. METHOD:: setPoint -Set the point: if the label exists, this method behaves like updatePoint; if the label doesn't exist, behaves like addPoint. +Set the point: if the identifier exists, this method behaves like updatePoint; if the identifier doesn't exist, behaves like addPoint. ​​ METHOD:: clear Empty the data set. @@ -88,7 +88,7 @@ ARGUMENT:: action A function to run when the export is done. METHOD:: merge -Merge sourceDataSet in the current DataSet. It will update the value of points with the same label if overwrite is set to 1. To add columns instead, see the 'transformJoin' method of link::Classes/FluidDataSetQuery:: +Merge sourceDataSet in the current DataSet. It will update the value of points with the same identifier if overwrite is set to 1. To add columns instead, see the 'transformJoin' method of link::Classes/FluidDataSetQuery:: METHOD:: free Destroy the object on the server. @@ -108,7 +108,7 @@ EXAMPLES:: CODE:: // Create a simple a one-dimensional data set, three ways // Using routine -s.boot; +s.reboot; ( fork{ ~ds = FluidDataSet.new(s); @@ -204,7 +204,7 @@ code:: ~dsB = FluidDataSet.new(s); ) -//feed them items with same dimensions but different labels +//feed them items with same dimensions but different identifiers ~dsA.load(Dictionary.newFrom([\cols, 1, \data, Dictionary.newFrom([\one,1,\two,2])])); ~dsB.load(Dictionary.newFrom([\cols, 1, \data, Dictionary.newFrom([\three,3,\four,4])])); ~dsA.print; @@ -214,7 +214,7 @@ code:: ~dsB.merge(~dsA) ~dsB.print; -//change the content of the dataset to shared labels +//change the content of the dataset to shared identifiers ~dsA.load(Dictionary.newFrom([\cols, 1, \data, Dictionary.newFrom([\three,333,\four,444])])); ~dsB.load(Dictionary.newFrom([\cols, 1, \data, Dictionary.newFrom([\three,3,\four,4])])); ~dsA.print; diff --git a/release-packaging/HelpSource/Classes/FluidDataSetQuery.schelp b/release-packaging/HelpSource/Classes/FluidDataSetQuery.schelp index 6648207..884d3cc 100644 --- a/release-packaging/HelpSource/Classes/FluidDataSetQuery.schelp +++ b/release-packaging/HelpSource/Classes/FluidDataSetQuery.schelp @@ -92,7 +92,7 @@ ARGUMENT:: action Run when done METHOD:: transformJoin -Apply the query to a source link::Classes/FluidDataSet:: and join the resulting subset at the end of the items sharing the same labels in a second source. Items unique to a source dataset will be ignored. To add items at the end of a dataset instead, see the 'merge' method of link::Classes/FluidDataSet:: +Apply the query to a source link::Classes/FluidDataSet:: and join the resulting subset at the end of the items sharing the same identifiers in a second source. Items unique to a source dataset will be ignored. To add items at the end of a dataset instead, see the 'merge' method of link::Classes/FluidDataSet:: ARGUMENT:: source1DataSet Source data, or the DataSet name ARGUMENT:: source2DataSet @@ -108,7 +108,7 @@ EXAMPLES:: code:: s.reboot; -// Create a DataSet with random data +// Create a DataSet with known data ~dataSet= FluidDataSet(s); ( @@ -126,7 +126,7 @@ fork{ } ) -//check the source +//check the source - the column is the integer part of the value, and the row is the fractional part. This will help us identify what we kept in our didactic query ~dataSet.print; // Prepare a FluidDataSetQuery object @@ -157,7 +157,7 @@ fork{ STRONG:: Joining Datasets:: code:: -//this is how to join 2 datasets, adding columns to items with the same label +//this is how to join 2 datasets, adding columns to items with the same identifier //create 3 datasets ( ~dsA = FluidDataSet(s); @@ -165,13 +165,13 @@ code:: ~dsC = FluidDataSet(s); ) -//feed them items with almost overlaping label lists but with different dimensions +//feed them items with almost overlaping identifier lists but with different dimensions ~dsA.load(Dictionary.newFrom([\cols, 2, \data, Dictionary.newFrom([\zero, [0,0], \one,[1,11],\two,[2,22], \three,[3,33],\four,[4,44]])])); ~dsB.load(Dictionary.newFrom([\cols, 2, \data, Dictionary.newFrom([\one,[111,1111],\two,[222,2222], \three,[333,3333],\four,[444,4444],\five,[555,5555]])])); ~dsA.print; ~dsB.print; -// no query/filter defined, copies all items with labels common to both, and all of the defined column of the first input +// no query/filter defined, copies all items with identifiers common to both, and all of the defined column of the first input ~joiner = FluidDataSetQuery.new; ~joiner.transformJoin(~dsA,~dsB,~dsC) ~dsC.print diff --git a/release-packaging/HelpSource/Classes/FluidDataSetWr.schelp b/release-packaging/HelpSource/Classes/FluidDataSetWr.schelp index 25f5615..06f8a47 100644 --- a/release-packaging/HelpSource/Classes/FluidDataSetWr.schelp +++ b/release-packaging/HelpSource/Classes/FluidDataSetWr.schelp @@ -6,7 +6,7 @@ related:: Classes/FLuidDataSet DESCRIPTION:: A UGen that adds labelled points to a link::Classes/FluidDataSet:: Internally, this calls code::setPoint::, so IDs that already exist will be overwritten, and new IDs will be added. The actual work is done on the server's command queue, rather than the real-thread. -By default the object takes a control input (code::idNumber::) as a numerical index that gets used for the point labels. This index is used to write each time the Ugen is re-triggered with a zero to non-zero transition. The label is then concatenated with the code::idPrefix:: symbol, which is fixed at instantiation. In this way, one can make custom, incrementing labels, e.g. +By default the object takes a control input (code::idNumber::) as a numerical index that gets used for the point identifiers. This index is used to write each time the Ugen is re-triggered with a zero to non-zero transition. The identifier is then concatenated with the code::idPrefix:: symbol, which is fixed at instantiation. In this way, one can make custom, incrementing identifiers, e.g. code:: FluidDataSetWr.kr(~somedataset,"my_data",PulseCount.kr(trig),~somebuffer,trig) @@ -27,11 +27,11 @@ ARGUMENT:: dataset An instance of link::Classes/FluidDataSet:: or an instance's name. ARGUMENT:: idPrefix -A string or symbol with a prefix for generated labels. +A string or symbol with a prefix for generated identifiers. ARGUMENT:: idNumber ANCHOR::offset:: -An integer with the offset to start labeling from. If the UGen is run in a server-side loop (i.e. repeatedly re-triggered), the generated labels will count upwards from this offset. If nil, then no numerical index will be applied to the generated label (i.e. only the labelPrefix is used). +An integer with the offset to start labeling from. If the UGen is run in a server-side loop (i.e. repeatedly re-triggered), the generated identifiers will count upwards from this offset. If nil, then no numerical index will be applied to the generated identifier (i.e. only the idPrefix is used). ARGUMENT:: buf The link::Classes/Buffer:: containing the data point. @@ -48,6 +48,8 @@ EXAMPLES:: code:: s.reboot; + +//make a dataset ~ds = FluidDataSet(s); // write a single point, no counting @@ -58,9 +60,8 @@ s.reboot; }.play(s); ) - +//look ~ds.print; -~ds.clear //Write a 100 points quite fast with server-side triggering ( @@ -81,8 +82,7 @@ OSCFunc({ }.play(s,args:[n:100]); ) -~ds.print; -~ds.clear +//it printed with the return function //Again, but as fast as possible using a feedback of the trigger we are given when the writing is done ( @@ -104,10 +104,9 @@ OSCFunc({ }.play(s,args:[n:100]); ) -~ds.print; +// incremental buffer writing - sky is the limit ~ds.clear -// incremental buffer writing - sky is the limit // start the entry maker, trigging twice a second ( { @@ -125,10 +124,14 @@ OSCFunc({ //print a few times ~ds.print; -//clear before flushing the writing synth +//clear before flushing the writing synth and the process keeps on going ~ds.clear ~ds.print; +//command-period to stop it +~ds.print; +~ds.clear + // circular writing ( { @@ -143,6 +146,7 @@ OSCFunc({ }.play(s); ) +//print regularly to see a specific identifier being overwritten ~ds.print; ~ds.clear diff --git a/release-packaging/HelpSource/Classes/FluidKNNClassifier.schelp b/release-packaging/HelpSource/Classes/FluidKNNClassifier.schelp index 44f86d9..156aa56 100644 --- a/release-packaging/HelpSource/Classes/FluidKNNClassifier.schelp +++ b/release-packaging/HelpSource/Classes/FluidKNNClassifier.schelp @@ -52,7 +52,7 @@ code:: // Make: // - A KNN Classifier -// - A DataSet of example points, and a label set of corresponding labels +// - A DataSet of example points, and a LabelSet of corresponding labels // - A DataSet of test data and a LabelSet for predicted labels ( @@ -87,7 +87,7 @@ d = Dictionary.with( ) -//Fit the classifier to the example DataSet and labels, and then run prediction on the test data into our mapping label set +//Fit the classifier to the example DataSet and LabelSet, and then run prediction on the test data into our mapping LabelSet ( ~classifier.fit(~source,~labels); ~classifier.predict(~test, ~mapping, 1); diff --git a/release-packaging/HelpSource/Classes/FluidLoadFolder.schelp b/release-packaging/HelpSource/Classes/FluidLoadFolder.schelp index 98555cc..ae464e5 100644 --- a/release-packaging/HelpSource/Classes/FluidLoadFolder.schelp +++ b/release-packaging/HelpSource/Classes/FluidLoadFolder.schelp @@ -15,7 +15,7 @@ Construct a new instance ARGUMENT:: path A string pointing to a folder on disk -ARGUMENT:: labelFunc +ARGUMENT:: idFunc A function that determines how the chunks in the index are labelled; default is file name without path. ARGUMENT:: channelFunc @@ -26,7 +26,7 @@ INSTANCEMETHODS:: METHOD:: index A link::Classes/IdentityDictionary:: containing the metadata on the loaded files -The keys of this dictionary are the labels produced by the code::labelFunc:: passed to link::Classes/FluidLoadFolder#*new:: (or the default of the filename if nil). The value for each key is a further dictionary consisting of: +The keys of this dictionary are the identifiers produced by the code::idFunc:: passed to link::Classes/FluidLoadFolder#*new:: (or the default of the filename if nil). The value for each key is a further dictionary consisting of: definitionlist:: ## bounds @@ -67,9 +67,9 @@ s.reboot; ~loader.play(s,action:{ |dataDictionary| ("Done loading into" + ~loader.buffer).postln; //we get passed an IdentityDictionary of slice data, let's look at it - dataDictionary.pairsDo{|label,data| + dataDictionary.pairsDo{|identifier,data| //data is also a dictionary - (label ++ '(').post; + (identifier ++ '(').post; data.pairsDo{|k,v| (k ++ ':' + v + ' ').post }; ')'.postln; } diff --git a/release-packaging/HelpSource/Classes/FluidProcessSlices.schelp b/release-packaging/HelpSource/Classes/FluidProcessSlices.schelp index a3cb8dc..b62d78c 100644 --- a/release-packaging/HelpSource/Classes/FluidProcessSlices.schelp +++ b/release-packaging/HelpSource/Classes/FluidProcessSlices.schelp @@ -26,7 +26,7 @@ definitionlist:: ##num || The number of frames to process, in samples ##data -|| anchor::datadict:: An link::Classes/Association:: of the label for this segment, with an link::Classes/IdentityDictionary:: of useful extra data: +|| anchor::datadict:: An link::Classes/Association:: of the identifier for this segment, with an link::Classes/IdentityDictionary:: of useful extra data: definitionlist:: ## sr || The original sample rate of the segment @@ -46,13 +46,13 @@ code:: ~featureBuffers = 4.do{Buffer.new}; ~avgPitch = { |src,start,num,data| - var pitch, stats,statsbuf,label,voice; - label = data.key; + var pitch, stats,statsbuf,identifier,voice; + identifier = data.key; voice = data.value[\voice]; statsbuf = LocalBuf(7); pitch = FluidBufPitch.kr(src,start,num,numChans:1,features:~featurebuffers[voice]); stats = FluidBufStats.kr(~featurebuffers[voice],numChans:1, stats:statsbuf,trig:Done.kr(pitch)); - FluidDataSetWr.kr(~mydataset, label, nil, statsbuf,Done.kr(stats)) + FluidDataSetWr.kr(~mydataset, identifier, nil, statsbuf,Done.kr(stats)) }; :: @@ -68,7 +68,7 @@ ARGUMENT:: sourceBuffer The source link::Classes/Buffer:: containing the audio to process ARGUMENT:: bufIdx -An link::Classes/IdentityDictionary:: specifying labels, boundaries, sample rate and channel count for the segment. See link::Classes/FluidLoadFolder#-index:: for details. +An link::Classes/IdentityDictionary:: specifying identifiers, boundaries, sample rate and channel count for the segment. See link::Classes/FluidLoadFolder#-index:: for details. ARGUMENT:: action A function to run when processing is complete. This gets passed the same link::Classes/Association:: as link::#datadict#the processing function:: @@ -105,17 +105,19 @@ s.reboot; ~subset = IdentityDictionary.newFrom(~slicer.index.asSortedArray[0..7].flatten(1)); //write pitch statistics into a dataset +//definte the extraction function... ( ~extractor = FluidProcessSlices({|src,start,num,data| - var pitch, stats, label,i; + var pitch, stats, identifier,i; i = data.value[\voice]; - label = data.key; + identifier = data.key; pitch = FluidBufPitch.kr(src,start,num,features:~pitchbufs[i]); stats = FluidBufStats.kr(~pitchbufs[i],stats:~statsbufs[i],trig:Done.kr(pitch)); - FluidDataSetWr.kr(~pitchdata,label,nil,buf:~statsbufs[i],trig:Done.kr(stats)) + FluidDataSetWr.kr(~pitchdata,identifier,nil,buf:~statsbufs[i],trig:Done.kr(stats)) }); ) +//... and run it ~extractor.play(s,~loader.buffer,~subset,{"Feature extraction done".postln}); //view the data diff --git a/release-packaging/HelpSource/Classes/FluidSliceCorpus.schelp b/release-packaging/HelpSource/Classes/FluidSliceCorpus.schelp index b99ffd2..6e05621 100644 --- a/release-packaging/HelpSource/Classes/FluidSliceCorpus.schelp +++ b/release-packaging/HelpSource/Classes/FluidSliceCorpus.schelp @@ -46,11 +46,11 @@ code:: }); :: -ARGUMENT:: labelFunc +ARGUMENT:: idFunc ANCHOR::labelling:: warning::Not yet implemented:: -Override the default labelling behaviour for slices. The default is to append the original label with code::-:: counting from 1. +Override the default labelling behaviour for slices. The default is to append the original identifier with code::-:: counting from 1. INSTANCEMETHODS:: @@ -68,7 +68,7 @@ The link::Classes/Buffer:: containing the audio to slice ARGUMENT:: bufIdx ANCHOR::indexformat:: -An link::Classes/IdentityDictionary:: that details labels and start-end positions for each chunk in the source buffer. See link::Classes/FluidLoadFolder#index:: +An link::Classes/IdentityDictionary:: that details identifiers and start-end positions for each chunk in the source buffer. See link::Classes/FluidLoadFolder#index:: ARGUMENT:: action A function that runs on complettion, will be passed the link::Classes/IdentityDictionary:: from link::#index:: as an argument. @@ -78,7 +78,7 @@ ANCHOR::ntasks:: The number of parallel processing tasks to run on the server. Default 4. This should probably never be greater than the number of available CPU cores. METHOD:: index -A link::Classes/IdentityDictionary:: containing information about the position of each discovered slice, using labels based on those passed into link::#play:: (see link::#labelling::). This dictionary copies all other entries from the source dictionary on a per-key basis (so you can store arbitary stuff in there should you wish, and it will remain oassciated with its original source chunk). +A link::Classes/IdentityDictionary:: containing information about the position of each discovered slice, using identifiers based on those passed into link::#play:: (see link::#labelling::). This dictionary copies all other entries from the source dictionary on a per-key basis (so you can store arbitary stuff in there should you wish, and it will remain assciated with its original source chunk). EXAMPLES:: @@ -102,9 +102,9 @@ s.reboot ~slicer.play(s,~loader.buffer,~loader.index,{|dataDictionary| "Slicing done".postln; //we get passed an IdentityDictionary of slice data, let's look at it - dataDictionary.pairsDo{|label,data| + dataDictionary.pairsDo{|identifier,data| //data is also a dictionary - (label ++ '(').post; + (identifier ++ '(').post; data.pairsDo{|k,v| (k ++ ':' + v + ' ').post }; ')'.postln; }