Merged clients/inter_client_comms into fix/manip_client_new

nix
Owen Green 6 years ago
commit 2402b71caa

@ -847,6 +847,7 @@ class FluidSCWrapper : public impl::FluidSCWrapperBase<C>
using FloatControlsIter = impl::FloatControlsIter;
using SharedState = std::shared_ptr<WrapperState<C>>;
//I would like to template these to something more scaleable, but baby steps
friend class impl::RealTime<C,FluidSCWrapper>;
friend class impl::NonRealTime<C,FluidSCWrapper>;
@ -1185,7 +1186,7 @@ class FluidSCWrapper : public impl::FluidSCWrapperBase<C>
struct MessageDispatch
{
static constexpr size_t Message = N;
FluidSCWrapper* wrapper;
SharedState state;
ArgTuple args;
Ret result;
std::string name;
@ -1226,7 +1227,7 @@ class FluidSCWrapper : public impl::FluidSCWrapperBase<C>
void* msgptr = ft->fRTAlloc(x->mWorld, sizeof(MessageData));
MessageData* msg = new (msgptr) MessageData;
msg->name = '/' + Client::getMessageDescriptors().template name<N>();
msg->wrapper = x;
msg->state = x->state();
ArgTuple& args = msg->args;
// type check OSC message
@ -1306,10 +1307,10 @@ class FluidSCWrapper : public impl::FluidSCWrapperBase<C>
{
MessageData* m = static_cast<MessageData*>(data);
m->result =
ReturnType{invokeImpl<N>(m->wrapper, m->args, IndexList{})};
ReturnType{invokeImpl<N>(m->state, m->args, IndexList{})};
if (!m->result.ok())
printResult(m->wrapper, m->result);
printResult(m->state, m->result);
return true;
},
@ -1321,11 +1322,12 @@ class FluidSCWrapper : public impl::FluidSCWrapperBase<C>
world);
if(m->result.status() != Result::Status::kError)
messageOutput(m->wrapper, m->name, m->result);
messageOutput(m->state, m->name, m->result);
else
{
auto ft = getInterfaceTable();
ft->fSendNodeReply(&m->wrapper->mParent->mNode,
auto ft = getInterfaceTable();
if(m->state->mNodeAlive)
ft->fSendNodeReply(m->state->mNode,
-1, m->name.c_str(),0, nullptr);
}
return true;
@ -1341,54 +1343,71 @@ class FluidSCWrapper : public impl::FluidSCWrapperBase<C>
}
template <size_t N, typename ArgsTuple, size_t... Is> // Call from NRT
static decltype(auto) invokeImpl(FluidSCWrapper* x, ArgsTuple& args,
static decltype(auto) invokeImpl(SharedState& x, ArgsTuple& args,
std::index_sequence<Is...>)
{
return x->client().template invoke<N>(x->client(), std::get<Is>(args)...);
return x->client.template invoke<N>(x->client, std::get<Is>(args)...);
}
template <typename T> // call from RT
static void messageOutput(FluidSCWrapper* x, const std::string& s,
static void messageOutput(SharedState& x, const std::string& s,
MessageResult<T>& result)
{
auto ft = getInterfaceTable();
if(!x->mNodeAlive) return;
// allocate return values
index numArgs = ToFloatArray::allocSize(static_cast<T>(result));
if(numArgs > 2048)
{
std::cout << "ERROR: Message response too big to send (" << asUnsigned(numArgs) * sizeof(float) << " bytes)." << std::endl;
return;
}
float* values = static_cast<float*>(
ft->fRTAlloc(x->mWorld, asUnsigned(numArgs) * sizeof(float)));
ft->fRTAlloc(x->mNode->mWorld, asUnsigned(numArgs) * sizeof(float)));
// copy return data
ToFloatArray::convert(values, static_cast<T>(result));
ft->fSendNodeReply(&x->mParent->mNode, -1, s.c_str(),
ft->fSendNodeReply(x->mNode, -1, s.c_str(),
static_cast<int>(numArgs), values);
}
static void messageOutput(FluidSCWrapper* x, const std::string& s,
static void messageOutput(SharedState& x, const std::string& s,
MessageResult<void>&)
{
auto ft = getInterfaceTable();
ft->fSendNodeReply(&x->mParent->mNode, -1, s.c_str(), 0, nullptr);
if(!x->mNodeAlive) return;
ft->fSendNodeReply(x->mNode, -1, s.c_str(), 0, nullptr);
}
template <typename... Ts>
static void messageOutput(FluidSCWrapper* x, const std::string& s,
static void messageOutput(SharedState& x, const std::string& s,
MessageResult<std::tuple<Ts...>>& result)
{
auto ft = getInterfaceTable();
std::array<index, sizeof...(Ts)> offsets;
index numArgs;
if(!x->mNodeAlive) return;
std::tie(offsets, numArgs) =
ToFloatArray::allocSize(static_cast<std::tuple<Ts...>>(result));
if(numArgs > 2048)
{
std::cout << "ERROR: Message response too big to send (" << asUnsigned(numArgs) * sizeof(float) << " bytes)." << std::endl;
return;
}
float* values = static_cast<float*>(
ft->fRTAlloc(x->mWorld, asUnsigned(numArgs) * sizeof(float)));
ft->fRTAlloc(x->mNode->mWorld, asUnsigned(numArgs) * sizeof(float)));
ToFloatArray::convert(values, std::tuple<Ts...>(result), offsets,
std::index_sequence_for<Ts...>());
ft->fSendNodeReply(&x->mParent->mNode, -1, s.c_str(),
ft->fSendNodeReply(x->mNode, -1, s.c_str(),
static_cast<int>(numArgs), values);
}
@ -1464,14 +1483,14 @@ public:
return p;
}
static void printResult(FluidSCWrapper* x, Result& r)
static void printResult(SharedState& x, Result& r)
{
if (!x) return;
if (!x.get() || !x->mNodeAlive) return;
switch (r.status())
{
case Result::Status::kWarning: {
if (x->mWorld->mVerbosity > 0)
if (x->mNode->mWorld->mVerbosity > 0)
std::cout << "WARNING: " << r.message().c_str() << '\n';
break;
}

@ -115,46 +115,47 @@ FluidSliceCorpus {
}
FluidProcessSlices{
var < featureFunc, labelFunc;
var < index;
var < featureFunc;
*new { |featureFunc, labelFunc|
^super.newCopyArgs(featureFunc,labelFunc);
*new { |featureFunc|
^super.newCopyArgs(featureFunc);
}
play{ |server,sourceBuffer,bufIdx, action|
play{ |server, sourceBuffer, bufIdx, action, tasks = 4|
var counter,perf,jobs,total,uid, completed;
sourceBuffer ?? {"No buffer to slice".error; ^nil};
bufIdx ?? {"No slice point dictionary passed".error;^nil};
server ?? {server = Server.default};
index = IdentityDictionary();
uid = UniqueID.next;
jobs = List.newFrom(bufIdx.keys);
total = jobs.size;
counter = 0;
completed = 0;
perf = {
perf = {|jobID|
var idx,v, k = jobs.pop;
v = bufIdx[k];
counter = counter + 1;
idx = counter;
v[\index] = counter;
v[\voice] = jobID;
OSCFunc({
completed = completed + 1;
("FluidProcessSlices:" + (completed.asString ++ "/" ++ total)).postln;
if(jobs.size > 0){perf.value};
if(completed == total){action !? action.value(index);};
if(jobs.size > 0){perf.value(jobID)};
if(completed == total){action !? action.value(v);};
},"/doneFeature" ++ uid ++ counter,server.addr).oneShot;
{
var numframes,feature;
numframes = v[\bounds].reverse.reduce('-');
feature = featureFunc.value(sourceBuffer, v[\bounds][0],numframes,k,v,counter-1);
feature = featureFunc.value(sourceBuffer, v[\bounds][0], numframes, k->v);
SendReply.kr(Done.kr(feature),'/doneFeature' ++ uid ++ idx);
FreeSelfWhenDone.kr(feature);
}.play(server);
};
4.do{perf.value};
tasks ?? {tasks = 4};
tasks.asInteger.min(jobs.size).do{|jobIDs|perf.value(jobIDs)};
}
}

@ -90,11 +90,6 @@ FluidDataSet : FluidManipulationClient {
this.prSendMsg(\print,[],action,[string(FluidMessageResponse,_,_)]);
}
dump { |action|
action ?? {action = postit};
this.prSendMsg(\dump,[],action,[string(FluidMessageResponse,_,_)]);
}
free {
serverCaches.remove(server,id);
super.free;

@ -25,4 +25,13 @@ FluidKNNClassifier : FluidManipulationClient {
[string(FluidMessageResponse,_,_)]
);
}
read{|filename,action|
this.prSendMsg(\read,[filename.asString],action);
}
write{|filename,action|
this.prSendMsg(\write,[filename.asString],action);
}
}

@ -26,4 +26,13 @@ FluidKNNRegressor : FluidManipulationClient {
this.prSendMsg(\predictPoint, [buffer.asUGenInput, k,uniform], action,
[number(FluidMessageResponse,_,_)]);
}
read{|filename,action|
this.prSendMsg(\read,[filename.asString],action);
}
write{|filename,action|
this.prSendMsg(\write,[filename.asString],action);
}
}

@ -1,40 +1,67 @@
+ FluidManipulationClient {
tmpJSONFilename{
^Platform.defaultTempDir++"tmp_fluid_dataset_"++
Date.localtime.stamp++".json";
^Platform.defaultTempDir++"tmp_fluid_data_"++
Date.localtime.stamp++"_"++UniqueID.next++".json";
}
dump {|action|
var filename = this.tmpJSONFilename;
action ?? {action = postit};
this.write(filename, {
action.value(filename.parseYAMLFile);
File.delete(filename);
});
this.write(filename, {
action.value(this.parseJSON(File.readAllString(filename)));
File.delete(filename);
});
}
load{|dict, action|
var filename = this.tmpJSONFilename;
var str = this.asJSON(dict);
File.use(filename, "w", { |f| f.write(this.asJSON(dict));});
File.use(filename, "wt", { |f| f.write(this.asJSON(dict));});
this.read(filename, {
action.notNil.if{ action.value };
File.delete(filename);
action.notNil.if{ action.value; };
File.delete(filename);
});
}
toDict{|obj|
var converted;
if(obj.class === Event){
converted = obj.as(Dictionary);
converted.keysValuesChange{|k,v|this.toDict(v)}
^converted;
};
if(obj.class === Array){
converted = obj.collect{|v| this.toDict(v)};
^converted;
};
^obj;
}
parseJSON{|jsonStr|
var parsed = jsonStr;
jsonStr.do({|char,pos|
var inString = false;
char.switch(
$",{(jsonStr[pos-1]==$\ && inString).not.if({inString = inString.not})},
${,{ if(inString.not){parsed[pos] = $(} },
$},{ if(inString.not){parsed[pos] = $)} }
)
});
^this.toDict(parsed.interpret);
}
asJSON{|d|
if(d.isString || d.isNumber){^d};
if(d.isKindOf(Dictionary),
if(d.isNumber){^d};
if(d.isString){^d.asCompileString};
if(d.isKindOf(Dictionary))
{
^"{" ++ (
d.keys.asList.collect{|k|
k.asString.asCompileString ++ ":" + this.asJSON(d[k])
}).join(", ") ++ "}"
});
if(d.isKindOf(SequenceableCollection),
};
if(d.isKindOf(SequenceableCollection))
{
^"[" ++ d.collect({|x|this.asJSON(x)}).join(", ")++ "]";
});
};
}
}

@ -1,9 +1,10 @@
// define a few processes
(
~ds = FluidDataSet(s,\test); // still need a name on the server to make sure we do not forget it exists. it is now permanent aka will resist cmd+.
~mfccbuf = Buffer.new;
~statsbuf = Buffer.new;
~flatbuf = Buffer.new;
//define as many buffers as we have parallel voices/threads in the extractor processing (default is 4)
~mfccbuf = 4.collect{Buffer.new};
~statsbuf = 4.collect{Buffer.new};
~flatbuf = 4.collect{Buffer.new};
// here we instantiate a loader which creates a single large buffer with a dictionary of what was included in it
// ~loader = FluidLoadFolder("/Volumes/machins/projets/newsfeed/smallnum/");
@ -15,12 +16,14 @@
});
// here we instantiate a process of description and dataset writing, which will run each slice of the previous slice and write the entry. Note the chain of Done.kr triggers.
~extractor = FluidProcessSlices({|src,start, num, idx|
var mfcc, stats, writer, flatten,mfccBuf, statsBuf, flatBuf;
mfcc = FluidBufMFCC.kr(src,startFrame:start,numFrames:num,numChans:1,features:~mfccbuf,trig:1);
stats = FluidBufStats.kr(~mfccbuf,stats:~statsbuf,trig:Done.kr(mfcc));
flatten = FluidBufFlatten.kr(~statsbuf,~flatbuf,trig:Done.kr(stats));
writer = FluidDataSetWr.kr(idx,~flatbuf,~ds,trig:Done.kr(flatten))
~extractor = FluidProcessSlices({|src,start,num,data|
var mfcc, stats, writer, flatten,mfccBuf, statsBuf, flatBuf, label, voice;
label = data.key;
voice = data.value[\voice];
mfcc = FluidBufMFCC.kr(src,startFrame:start,numFrames:num,numChans:1,features:~mfccbuf[voice],trig:1);
stats = FluidBufStats.kr(~mfccbuf[voice],stats:~statsbuf[voice],trig:Done.kr(mfcc));
flatten = FluidBufFlatten.kr(~statsbuf[voice],~flatbuf[voice],trig:Done.kr(stats));
writer = FluidDataSetWr.kr(label,~flatbuf[voice],~ds,trig:Done.kr(flatten))
});
)
@ -118,9 +121,8 @@ t = Main.elapsedTime;
// write the dataset to file with the native JSON
~ds.write("/tmp/sc-dataset.json")
~ds.clear
// open the file in your favourite json editor
// open the file in your default json editor
"open /tmp/sc-dataset.json".unixCmd
//////////////////////////////////////////////////////////////////////////
@ -140,15 +142,15 @@ FluidBufCompose.process(s,~loader.buffer,a,(b-a),numChans: 1, destination: ~targ
(
{
var mfcc, stats, flatten;
mfcc = FluidBufMFCC.kr(~targetsound,features:~mfccbuf,trig:1);
stats = FluidBufStats.kr(~mfccbuf,stats:~statsbuf,trig:Done.kr(mfcc));
flatten = FluidBufFlatten.kr(~statsbuf,~flatbuf,trig:Done.kr(stats));
mfcc = FluidBufMFCC.kr(~targetsound,features:~mfccbuf[0],trig:1);
stats = FluidBufStats.kr(~mfccbuf[0],stats:~statsbuf[0],trig:Done.kr(mfcc));
flatten = FluidBufFlatten.kr(~statsbuf[0],~flatbuf[0],trig:Done.kr(stats));
}.play;
)
//find its nearest neighbours
~friends = Array;
~tree.kNearest(~flatbuf,5,{|x| ~friends = x.postln;})
~tree.kNearest(~flatbuf[0],5,{|x| ~friends = x.postln;})
// play them in a row
(

@ -0,0 +1,45 @@
// create the data dictionary
~data = Dictionary.new
7.do{|i| ~data.add(("entry-"++i).asSymbol -> 10.collect{|j|j/10 + i})}
// nest that dictionary in the dataset format, adding the number of columns
~dict = Dictionary.new
~dict.add(\data -> ~data)
~dict.add(\cols -> 10)
//create a dataset, then loading the dictionary
~ds = FluidDataSet.new(s,\simple1data);
~ds.load(~dict)
~ds.print
//fun with kdtree to see it actually works
~kdtree = FluidKDTree.new(s)
~kdtree.fit(~ds,{\done.postln;})
~target = Buffer.loadCollection(s,(4).dup(10));
~kdtree.kNearest(~target,5,{|a|a.postln;})
~kdtree.kNearestDist(~target,5,{|a|a.postln;})
/////////////////////////////////////////////
// creating a labelset the same way
// creating the data dictionary
~data2 = Dictionary.new
7.do{|i| ~data2.add(("entry-"++i).asSymbol -> (if( i.odd, {["odd"]},{["even"]})))}
// nesting again
~dict2 = Dictionary.new
~dict2.add(\data -> ~data2)
~dict2.add(\cols -> 1)
// creating a labelset and loading the dictionary
~ls = FluidLabelSet.new(s,\simplelabel);
~ls.load(~dict2)
~ls.print
// testin with a classifier toy example
~classifier = FluidKNNClassifier.new(s);
~classifier.fit(~ds,~ls, {\done.postln;})
~classifier.predictPoint(~target,2,action: {|x|x.postln;})

@ -82,10 +82,11 @@ b = Buffer.read(s,~randomsoundfile.path,action:{"Sound Loaded".postln});
FluidBufPitch.process(s,b,numFrames:512 * 10,numChans:1,features:~pitchdata,action:{"Pitch Analysis Done".postln});
// Flatten and print the flat buffer. We expect to see larger numbers (20-2000) interleaved with smaller (0-1)
(
FluidBufFlatten.process(s,~pitchdata,~flatdata,action:{
~flatdata.loadToFloatArray(action:{ |a|
a.postln;
})
})
)
::

@ -156,11 +156,7 @@ Routine{
~ds.print //to post window by default, but you can supply a custom action instead
~ds.dump //likewise
//for example
(
~ds.dump{|j|
~dict = j.parseJSON
}
)
~ds.dump{ |d| ~dict = d }
//Now we have a Dictionary of our data and IDs
~dict.postcs

@ -33,10 +33,12 @@ s.reboot;
~ds = FluidDataSet(s,\FluidDataSetWr);
)
(
{
var b = LocalBuf.newFrom([0,1,2,3]);
FreeSelfWhenDone.kr(FluidDataSetWr.kr("help_data_point",b,~ds));
}.play(s);
)
~ds.print;

@ -6,7 +6,6 @@ related:: Classes/FluidLoadFolder, Classes/FluidSliceCorpus,Guides/FluidDecompos
DESCRIPTION::
This class abstracts some of the boilerplate involved in batch processing a sequence of segments in a link::Classes/Buffer:: on the server. It does this by iteratively running a user supplied function and using slice point information passed as an link::Classes/IdentityDictionary:: (see link::Classes/FluidLoadFolder#-index:: for details on the format of this).
CLASSMETHODS::
METHOD:: new
@ -14,7 +13,11 @@ Creates a new instance
ARGUMENT:: featureFunc
ANCHOR::featureFunction::
A function that will perform some processing on a section of a buffer. It is passed the following arguments
A function that will perform some processing on a section of a buffer.
warning::
This function strong::must:: return a link::Classes/UGen:: that sets a code::done:: flag (see link::Classes/Done::), in order for the iteration and housekeeping to work. All code::FluidBuf*:: objects do this.
::
The functions is passed the following arguments
definitionlist::
##src
|| The source link::Classes/Buffer:: containing the audio to process
@ -22,24 +25,35 @@ definitionlist::
|| The start frame of the section to process, in samples
##num
|| The number of frames to process, in samples
##label
|| The label for the segment from the supplied dictionary to link::#-play::
##data
|| anchor::datadict:: An link::Classes/Association:: of the label for this segment, with an link::Classes/IdentityDictionary:: of useful extra data:
definitionlist::
## sr
|| The original sample rate of the segment
## numchans
|| The original channel count of the segment
## voice
|| By default link::#-play:: will run multiple jobs in parallel dependning on the link::#ntasks#tasks:: argument. This contains the task number, which allows you to maintain separate set of resources (e.g. temporary link::Classes/Buffer::s) for each task.
## index
|| The absolute count of slices processed.
::
warning::
This function strong::must:: return a link::Classes/UGen:: that sets a code::done:: flag (see link::Classes/Done::), in order for the iteration and housekeeping to work. All code::FluidBuf*:: objects do this.
::
An example function that records statistics about the pitch of a segment in to a link::Classes/FluidDataSet:: could look like
code::
~avgPitch = { |src,start,num,label|
var pitch, stats,statsbuf;
~featureBuffers = 4.do{Buffer.new};
~avgPitch = { |src,start,num,data|
var pitch, stats,statsbuf,label,voice;
label = data.key;
voice = data.value[\voice];
statsbuf = LocalBuf(7);
pitch = FluidBufPitch.kr(src,start,num,features:~someotherbuffer);
stats = FluidBufStats.kr(~someotherbuffer,stats:statsbuf,trig:Done.kr(pitch));
pitch = FluidBufPitch.kr(src,start,num,numChans:1,features:~featurebuffers[voice]);
stats = FluidBufStats.kr(~featurebuffers[voice],numChans:1, stats:statsbuf,trig:Done.kr(pitch));
FluidDataSetWr.kr(label,statsbuf,~mydataset,Done.kr(stats))
}
};
::
INSTANCEMETHODS::
@ -57,7 +71,11 @@ ARGUMENT:: bufIdx
An link::Classes/IdentityDictionary:: specifying labels, boundaries, sample rate and channel count for the segment. See link::Classes/FluidLoadFolder#-index:: for details.
ARGUMENT:: action
A function to run when processing is complete
A function to run when processing is complete. This gets passed the same link::Classes/Association:: as link::#datadict#the processing function::
ARGUMENT:: tasks
ANCHOR::ntasks::
The number of parallel processing tasks to run on the server. Default 4. This should probably never be greater than the number of available CPU cores.
METHOD:: featureFunc
Return the function uses by this instance.
@ -66,7 +84,8 @@ EXAMPLES::
code::
s.reboot;
//Load all the Fluid Corpus Manipulation audio f
//Load all the Fluid Corpus Manipulation audio files
(
~path = File.realpath(FluidLoadFolder.class.filenameSymbol).dirname +/+ "../AudioFiles";
~loader = FluidLoadFolder(~path);
@ -78,19 +97,27 @@ s.reboot;
~pitchbufs = 4.collect{Buffer.new};
~statsbufs = 4.collect{Buffer.new};
)
//segment
~slicer.play(s,~loader.buffer,~loader.index,{|dataDictionary| "Slicing done".postln;});
//In the interests of brevity, let's just take a subset of the slices and process these
~subset = IdentityDictionary.newFrom(~slicer.index.asSortedArray[0..3].flatten(1));
~subset = IdentityDictionary.newFrom(~slicer.index.asSortedArray[0..7].flatten(1));
//write pitch statistics into a dataset
~extractor = FluidProcessSlices({|src,start,num,label,data,i|
var pitch, stats;
pitch = FluidBufPitch.kr(src,start,num,features:~pitchbufs[i]);
stats = FluidBufStats.kr(~pitchbufs[i],stats:~statsbufs[i],trig:Done.kr(pitch));
FluidDataSetWr.kr(label,~statsbufs[i],~pitchdata,Done.kr(stats))
(
~extractor = FluidProcessSlices({|src,start,num,data|
var pitch, stats, label,i;
i = data.value[\voice];
label = data.key;
pitch = FluidBufPitch.kr(src,start,num,features:~pitchbufs[i]);
stats = FluidBufStats.kr(~pitchbufs[i],stats:~statsbufs[i],trig:Done.kr(pitch));
FluidDataSetWr.kr(label,~statsbufs[i],~pitchdata,Done.kr(stats))
});
)
~extractor.play(s,~loader.buffer,~subset,{"Feature extraction done".postln});
//view the data
~pitchdata.print
::

@ -6,7 +6,7 @@ related:: Classes/FluidLoadFolder, Classes/FluidProcessSlices, Classes/FluidBufO
DESCRIPTION::
A utility class that abstracts the boiler plate code involved with batch slicing a buffer containing distinct chunks of audio (a 'corpus' for these purposes).
Whilst this class is designed to be used most easily in conjunction with link::Classes/FluidLoadFolder::, it doesn't have to be. However, it does excpect to be passed an link::Classes/IdentityDictionary:: of a particular format (see link::#indexFormat#description below::).
Whilst this class is designed to be used most easily in conjunction with link::Classes/FluidLoadFolder::, it doesn't have to be. However, it does expect to be passed an link::Classes/IdentityDictionary:: of a particular format (see link::#indexFormat#description below::).
The actual mechanism for doing the slicing is provided by the user, in the form of a function that will form part of a larger link::Classes/Synth:: (see link::#sliceFuncDescription#below::).
@ -88,10 +88,13 @@ s.reboot
~loader.play(s,action:{ |dataDictionary| "Done loading".postln});
)
(
~slicer = FluidSliceCorpus({ |src,start,num,dest|
FluidBufOnsetSlice.kr(src,start,num,indices:dest, threshold:0.7)
});
)
(
~slicer.play(s,~loader.buffer,~loader.index,{|dataDictionary|
"Slicing done".postln;
//we get passed an IdentityDictionary of slice data, let's look at it
@ -102,6 +105,5 @@ s.reboot
')'.postln;
}
});
)
::

Loading…
Cancel
Save