Hi! I’m sonifying a very large dataset of timestamped linux kernel activity and I’m trying to go from very short sounds slowly expanding the time between events. The trouble I’m quite naturally running into is that when the time between events is very short scsynth doesn’t seem to be able to keep up with all the messages and Synths.
I’ve tried adding latency with s.makeBundle which didn’t help (I’m getting “late” messages with increasing lateness until the playrate slows down).
Is there a smart way of doing this with the Client-Server architecture or is going all signal domain the only way to go?
The timecodes are in microseconds, I’m starting of at 1/10th that speed with 100000 events/sec. This code exemplifies the general idea:
(
s.options.maxNodes = 8196; // default is 1024
s.options.memSize = 256 * 1024;
s.reboot;
)
(
SynthDef(\tick2, {
var sig, env, rq = \rq.kr(0.1);
env = Env.perc(\atk.kr(0), \rel.kr(0.05)).ar(2);
sig = Impulse.ar(0);
sig = sig * 0.5;
sig = RHPF.ar(sig, \freq.kr(1000), rq) * env;
sig = Pan2.ar(sig, \pan.kr(0)) * \amp.kr(1.0);
Out.ar(\out.kr(0), sig);
}).add;
)
(
~latency = 1.0;
// 0.001 works fine, 0.0001 stutters but sounds good, 0.00001 stutters and is distorted
{ var waitTime = 0.00001;
1000000.do{ // non-endless since that froze my system sometimes
if(rrand(0.0, 1.0) > 0.7) {
0.00001.wait; // simulate some calculations happening here
s.makeBundle(~latency, {
Synth(\tick2,
// (exprand(5, 500) * [-1, 1].choose)
[
\freq, rrand(1000, 8000),
\rq, exprand(0.001, 0.1),
\out, 0,
\amp, rrand(0.001, 0.01),
\rel, waitTime * 300, //0.01,
\pan, rrand(-1.0, 1.0)
]
);
});
};
waitTime.wait;
}
}.fork;
)