Livecoding machine listening


#1

I guys,

I’m moving my first steps into live coding thanks to the beautiful Sean Cotterill tutorials and now I would like my set to send some information via OSC to an external piece of software in order for it to create live visuals.

Some month ago I’ve written a SynthDef and some functions to send machine listening data (like amplitude, centroid, chromas and more) via OSC.

Here’s the code:

(
s.waitForBoot({
	// this is the address of the OSC application
	// which will receive machine listening data from SC
	~data_analyzer = NetAddr("127.0.0.1", 15100);
	~bool_sendOSC   = True;

	~fftWidth = 2048;
	~rate = 1;
	~pitchExecFreq = 10;
	~b = Bus.audio(s, 2);

	s.sync;

	// Synth Definitions 

	SynthDef(\features_onset, {
		| in=0, rate=1 |
		var sig, fft, amplitude;
		var loud, flat, centroid;
		var freq, hasFreq, chroma;
		var ph, onset;
		
		sig = In.ar(in, 2);

		amplitude = Amplitude.kr(sig);
		fft = FFT(LocalBuf(~fftWidth), sig);
		loud = Loudness.kr(fft);
		# freq, hasFreq = Pitch.kr(sig, execFreq:~pitchExecFreq);
		flat = SpecFlatness.kr(fft);
		chroma = Chromagram.kr(fft, ~fftWidth, /*tuningbase:55.0,*/ integrationflag:0, perframenormalize:1);
		centroid = SpecCentroid.kr(fft);

		onset = Onsets.kr(fft, threshold:0.3, odftype:'magsum');
		SendTrig.kr(onset, id:0);

		SendReply.kr(Impulse.kr(rate),'/tr',
			[
				loud, flat, centroid
			],
		replyID:1);

		SendReply.kr(hasFreq * Impulse.kr(~pitchExecFreq),'/tr',
			[
				hasFreq, freq,
				chroma[0], chroma[1], chroma[2],  chroma[3],
				chroma[4], chroma[5], chroma[6],  chroma[7],
				chroma[8], chroma[9], chroma[10], chroma[11],
			],
		replyID:2);
	}).add;

	s.sync;

	// OSC functions 
	o = OSCFunc({
		|msg, time, addr, recvPort|
		var data, elapsed;
		//[time, msg].postln;

		switch( msg[2],
			0, {
				// ONSETS ID
				if( ~bool_sendOSC == True , {
					~data_analyzer.sendMsg("/debug/onset", 1);
					~data_analyzer.sendMsg("/debug/onset", 0);
				});
			},
			1, {
				// FEATURES ID
				data = msg[3..];
				if( ~bool_sendOSC == True , {
					~data_analyzer.sendMsg("/debug/loud", data[0]);
					~data_analyzer.sendMsg("/debug/flat", data[1]);
					~data_analyzer.sendMsg("/debug/centroid", data[2]);
				});
			},
			2, {
				// PITCH FEATURES ID
				data = msg[3..];
				if( ~bool_sendOSC == True , {
					~sendDataViaOsc.value(data);
				});
			},
			{
				"default".postln;
			}
		);

	},'/tr', s.addr);


	s.sync;


	// some utility functions 
	~sendDataViaOsc = {
		|data|

		~data_analyzer.sendMsg("/debug/hasFreq", data[0]);
		~data_analyzer.sendMsg("/debug/freq", data[1]);

		// chroma data
		~data_analyzer.sendMsg("/debug/C",  data[2] );
		~data_analyzer.sendMsg("/debug/C#", data[3] );
		~data_analyzer.sendMsg("/debug/D",  data[4] );
		~data_analyzer.sendMsg("/debug/D#", data[5] );
		~data_analyzer.sendMsg("/debug/E",  data[6] );
		~data_analyzer.sendMsg("/debug/F",  data[7] );
		~data_analyzer.sendMsg("/debug/F#", data[8]);
		~data_analyzer.sendMsg("/debug/G",  data[9]);
		~data_analyzer.sendMsg("/debug/G#", data[10]);
		~data_analyzer.sendMsg("/debug/A",  data[11]);
		~data_analyzer.sendMsg("/debug/A#", data[12]);
		~data_analyzer.sendMsg("/debug/B",  data[13]);
	};

	s.sync;

	// let's start!
	~feature_onset = Synth(\features_onset, [\in, ~b, \rate, ~rate]);
});
)

I would like to use it but I don’t know how.

As you can see the synth must be fed with some audio input.

Is there a way the synth can be fed by ProxySpace audio output?
Should it be the right strategy to get what I want?
Do somebody have some experience with this topic?
maybe is there some Quark I should use instead?

Thank you in advance for your help!


#2

Each ProxySpace output (NodeProxy) has a dedicated Bus it writes its audio data to (from which it is then monitored when .play is called).

Ndef(\a, {SinOsc.ar}); // wrapper for NodeProxy (ProxySpace is a different wrapper style... I find this easier...)

Ndef(\a).bus; // the bus to which the audio is written to

Ndef(\a).play; // creates a monitor from the bus to the output.

Your synth could e.g. have an audio input via

SynthDef(\yoursynth, {
\\ [...]
var in = \in.ar([0, 0]); // an audiorate input
\\ [...]
}).add

then you could map the bus of the NodeProxy to that input:

x = Synth(\yoursynth);

x.map(\in, Ndef(\a).bus)

minimal example:

Ndef(\a, {SinOsc.ar([800, 400]) * LFTri.ar([0.25, 0.06], Rand())})

// sending it to bus 100,your analysis synth does not necessarily have to have an output...
x = {
	var input = \input.ar([0, 0]);
	Amplitude.ar(input); // do some analysis
}.play(outbus: 100, target: s.defaultGroup, addAction: \addAfter)

// map ndef output to analysis input
x.map(\input, Ndef(\a).bus)

// instead of analysing, we just look at the Signal
s.scope(2, 100)

hope this helps to get you started


#3

Seems like you should be able to do it all with NodeProxies. Maybe something like this…

// initialize nodeproxy
Ndef(\features_onset).play;

// add sound from first source
Ndef(\features_onset)[0] = \mix -> {Ndef(\source1).ar};

// add sound from second source
Ndef(\features_onset)[1] = \mix -> {Ndef(\source2).ar};

// add analysis as a filter
Ndef(\features_onset).filter(10, {arg in
	
	var rate  = \rate.kr(1);
	var sig, fft, amplitude;
	var loud, flat, centroid;
	var freq, hasFreq, chroma;
	var ph, onset;
		
	sig = in;

	amplitude = Amplitude.kr(sig);
	fft = FFT(LocalBuf(~fftWidth), sig);
	loud = Loudness.kr(fft);
	# freq, hasFreq = Pitch.kr(sig, execFreq:~pitchExecFreq);
	flat = SpecFlatness.kr(fft);
	chroma = Chromagram.kr(fft, ~fftWidth, /*tuningbase:55.0,*/ integrationflag:0, perframenormalize:1);
	centroid = SpecCentroid.kr(fft);

	onset = Onsets.kr(fft, threshold:0.3, odftype:'magsum');
	SendTrig.kr(onset, id:0);

	SendReply.kr(Impulse.kr(rate),'/tr',
		[
			loud, flat, centroid
		],
	replyID:1);

	SendReply.kr(hasFreq * Impulse.kr(~pitchExecFreq),'/tr',
		[
			hasFreq, freq,
			chroma[0], chroma[1], chroma[2],  chroma[3],
			chroma[4], chroma[5], chroma[6],  chroma[7],
			chroma[8], chroma[9], chroma[10], chroma[11],
		],
	replyID:2);
});

#4

You already have an input: sig = In.ar(in, 2);

And you already allocated a bus for it: ~b = Bus.audio(s, 2);

All that remains, then, is:

  1. Play your source signal onto this bus.
  2. Include bus: ~b in the argument list to the features synth.
  3. Make sure the features synth comes at the end of the node tree.

That’s it. You don’t have to re-architect your SynthDef to work with NodeProxy.

hjh


#5

Thank you so much guys.
Thank to your help I eventually came up with my final solution.

Inside my setup file (100% inpired bye the Sean Cotterill one) I’ve added the synth and the OSC function.

The synth is fed by bus 0 and 1, creates a mix of them and analyze the signal.
Then the server sends back to the language some message containing the analysis data which the OSC func will forward to another external OSC receiver to create visuals.

Here’s my final setup.scd file:

(
//increase number of buffers the server has access to for loading samples
s.options.numBuffers = 1024 * 16;
//increase the memory available to the server
s.options.memSize = 8192 * 64;
//boot the server
s.boot;
//display the oscilloscope
s.scope;

// define the OSC function inside the main enviroment
~oscFunc = OSCFunc({
	|msg, time, addr, recvPort|
	var data_analyzer = NetAddr("127.0.0.1", 12000);
	var data, elapsed;

	switch( msg[2],
		0, {
			// ONSET ID
			//"onset detected".postln;
			data_analyzer.sendMsg("/debug/onset", 1);
			data_analyzer.sendMsg("/debug/onset", 0);

		},
		1, {
			// FEATURE ID
			data = msg[3..];
			//"loudness. flatness, centroid info".postln;
			data_analyzer.sendMsg("/debug/loud", data[0]);
			data_analyzer.sendMsg("/debug/flat", data[1]);
			data_analyzer.sendMsg("/debug/centroid", data[2]);
		},
		2, {
			// PITCH FEATURES ID
			data = msg[3..];
			//"pitch info".postln;
			// hasfreq + freq
			data_analyzer.sendMsg("/debug/hasFreq", data[0]);
			data_analyzer.sendMsg("/debug/freq", data[1]);
			// chroma data
			data_analyzer.sendMsg("/debug/C",  data[2] );
			data_analyzer.sendMsg("/debug/C#", data[3] );
			data_analyzer.sendMsg("/debug/D",  data[4] );
			data_analyzer.sendMsg("/debug/D#", data[5] );
			data_analyzer.sendMsg("/debug/E",  data[6] );
			data_analyzer.sendMsg("/debug/F",  data[7] );
			data_analyzer.sendMsg("/debug/F#", data[8]);
			data_analyzer.sendMsg("/debug/G",  data[9]);
			data_analyzer.sendMsg("/debug/G#", data[10]);
			data_analyzer.sendMsg("/debug/A",  data[11]);
			data_analyzer.sendMsg("/debug/A#", data[12]);
			data_analyzer.sendMsg("/debug/B",  data[13]);
		},
		{
			"default".postln;
		}
	);
},'/tr');

Task ({
	// define the "machine listening" synth
	// fftWidth = 2048;
	SynthDef(\audioAnalyzer, {
		| in=0, rate1=1, rate2=1 |
		var sig, fft, amplitude;
		var loud, flat, centroid;
		var freq, hasFreq, chroma;
		var ph, onset;

		// we must use a mono source
		sig = Mix.ar(In.ar(in, 2));

		amplitude = Amplitude.kr(sig);
		fft = FFT(LocalBuf(2048), sig);
		loud = Loudness.kr(fft);
		# freq, hasFreq = Pitch.kr(sig, execFreq:rate2);
		flat = SpecFlatness.kr(fft);
		chroma = Chromagram.kr(fft, 2048,/*tuningbase:55.0,*/integrationflag:0,perframenormalize:1);
		centroid = SpecCentroid.kr(fft);

		onset = Onsets.kr(fft, threshold:0.1, odftype:'magsum');
		SendTrig.kr(onset, id:0);

		SendReply.kr(Impulse.kr(rate1),'/tr',
			[
				loud, flat, centroid
			],
			replyID:1);

		SendReply.kr(hasFreq * Impulse.kr(rate2),'/tr',
			[
				hasFreq, freq,
				chroma[0], chroma[1], chroma[2],  chroma[3],
				chroma[4], chroma[5], chroma[6],  chroma[7],
				chroma[8], chroma[9], chroma[10], chroma[11],
			],
			replyID:2);
	}).add;

	3.wait;

	// and instantiate it just after the default server group
	~listenSynth = Synth(\audioAnalyzer, [\in, 0,\rate1, 10, \rate2, 10],target:Server.default.defaultGroup, addAction:\addAfter);
}).start;


//start proxyspace
p=ProxySpace.push(s);
//start tempo clock
p.makeTempoClock;
//give proxyspace a tempo
p.clock.tempo = 2;

Task({
	3.wait;
	d = Dictionary.new;
	d.add(\foldernames -> PathName(thisProcess.nowExecutingPath.dirname +/+ "../samples/set1").entries);
	for (0, d[\foldernames].size-1,
		{arg i; d.add(d[\foldernames][i].folderName -> d[\foldernames][i].entries.collect({
			arg sf;
			sf.postln;
			Buffer.read(s,sf.fullPath);
		});
	)});
	5.wait;
	"loading synthdefs".postln;
	("SynthDefs.scd").loadRelative;
	//loads snippets from setup folder
	"loading snippets".postln;
	("Snippets.scd").loadRelative;
	//wait, because otherwise it won't work for some reason
	3.wait;
	//activate StageLimiter - Part of the BatLib quark
	StageLimiter.activate;
	"Setup done!".postln;
}).start;
)

Thank you so much for your help!

UPDATE 24/03/19
I just noticed that using bus 0 as input for the \audioAnalyzer synth is not the correct way to do what I meant. In=0 means it is sampling audio input from the hardware microphone and not from the ProxySpace output bus as I wanted (I wasn’t able to figure out before because the hardware setup in my room is made in a way that the laptop microphone picked up a loud sound from the near speakers so it seemed to work initially).

I’ll continue to work on it in order to find a more correct way to do it.


#6

Bus 0 is hardware output.

You have an ‘in’ argument to the SynthDef, used like In.ar(in, 2) – which is all correct.

So just set ‘in’ to the proxy’s bus: ~listenSynth.set(\in, theProxy.bus). Or wait until the ProxySpace is all set up, and then create ~listenSynth = Synth(\audioAnalyzer, [\in, theProxy.bus, ...]).

hjh