Sub-sample accurate granulation with random periods

hey, here is a version using an array of random values (created by the same 2 ** (mod * modDeph) logic), a “one-shot measure phase” and a “one-shot burst phase” and derive triggers from them. You dont have to create the measurePhase and the measureTrigger, but wanted to show the different ramp and trigger logic for both of them

Here is the plot:

(
var getSubDivs = { |trigIn, arrayOfSubDivs, duration|
	var trig = Trig1.ar(trigIn, SampleDur.ir);
	var hasTriggered = PulseCount.ar(trig) > 0;
	var subDiv = Ddup(2, Dseq(arrayOfSubDivs, numOfSubDivs)) * duration;
	Duty.ar(subDiv, trig, subDiv) * hasTriggered;
};

var rampOneShot = { |trigIn, duration, cycles = 1|
	var trig = Trig1.ar(trigIn, SampleDur.ir);
	var hasTriggered = PulseCount.ar(trig) > 0;
	var phase = Sweep.ar(trig, 1 / duration).clip(0, cycles);
	phase * hasTriggered;
};

var oneShotRampToTrig = { |phase|
	var compare = phase > 0;
	var delta = HPZ1.ar(compare);
	delta > 0;
};

var oneShotBurstsToTrig = { |stepPhase|
	var phaseStepped = stepPhase.ceil;
	var delta = HPZ1.ar(phaseStepped);
	delta > 0;
};

var randomness = 1;
var numOfSubDivs = 12;
var arrayOfSubDivs = Array.fill(numOfSubDivs, { 2 ** (rrand(-1.0, 1.0) * randomness) } ).normalizeSum;

{
	var initTrigger, duration;
	var measurePhase, measureTrigger;
	var seqOfSubDivs, stepPhase, stepTrigger;

	initTrigger = \trig.tr(1);
	duration = \duration.kr(0.02);

	measurePhase = rampOneShot.(initTrigger, duration).wrap(0, 1);
	measureTrigger = oneShotRampToTrig.(measurePhase);

	seqOfSubDivs = getSubDivs.(initTrigger, arrayOfSubDivs, duration);
	stepPhase = rampOneShot.(initTrigger, seqOfSubDivs, numOfSubDivs);
	stepTrigger = oneShotBurstsToTrig.(stepPhase);
	stepPhase = stepPhase.wrap(0, 1);

	[measurePhase, measureTrigger, stepPhase, stepTrigger];

}.plot(0.021);
)

Here triggered by Pmono:

(
var getSubDivs = { |trig, arrayOfSubDivs, numOfSubDivs, duration|
	var hasTriggered = PulseCount.ar(trig) > 0;
	var subDiv = Ddup(2, (Dseq(arrayOfSubDivs, numOfSubDivs).dpoll * duration));
	Duty.ar(subDiv, trig, subDiv) * hasTriggered;
};

var rampOneShot = { |trig, duration, cycles|
	var hasTriggered = PulseCount.ar(trig) > 0;
	var phase = Sweep.ar(trig, 1 / duration).clip(0, cycles);
	phase * hasTriggered;
};

var oneShotBurstsToTrig = { |phaseScaled|
	var phaseStepped = phaseScaled.ceil;
	var delta = HPZ1.ar(phaseStepped);
	delta > 0;
};

var rampToSlope = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	delta.wrap(-0.5, 0.5);
};

var accum = { |trig|
	Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
};

SynthDef(\burst, {

	var initTrigger, arrayOfSubDivs, numOfSubDivs;
	var seqOfSubDivs, stepPhaseScaled, stepPhase, stepTrigger;
	var stepSlope, windowSlope, accumulator, windowPhase, window;
	var grainSlope, grainPhase, sig;

	initTrigger = Trig1.ar(\trig.tr(0), SampleDur.ir);
	arrayOfSubDivs = \arrayOfSubDivs.kr(Array.fill(16, 1));
	numOfSubDivs = \numOfSubDivs.kr(12);
	
	initTrigger.poll(initTrigger, \initTrig);

	seqOfSubDivs = getSubDivs.(initTrigger, arrayOfSubDivs, numOfSubDivs, \sustain.kr(1));
	stepPhaseScaled = rampOneShot.(initTrigger, seqOfSubDivs, numOfSubDivs);
	stepTrigger = oneShotBurstsToTrig.(stepPhaseScaled);
	stepPhase = stepPhaseScaled.wrap(0, 1);

	stepSlope = rampToSlope.(stepPhase);
	accumulator = accum.(stepTrigger);

	windowSlope = Latch.ar(stepSlope, stepTrigger) / max(0.001, \overlap.kr(0.5));
	windowPhase = (windowSlope * accumulator).clip(0, 1);
	
	window = IEnvGen.ar(Env([0, 1, 0], [0.01, 0.99], [4.0, -4.0]), windowPhase);

	grainSlope = \freq.kr(440) * SampleDur.ir;
	grainPhase = (grainSlope * accumulator).wrap(0, 1);
	sig = sin(grainPhase * 2pi);
	
	sig = sig * window;

	sig = sig * Env.asr(0.001, 1, 0.001).ar(Done.freeSelf, \gate.kr(1));

	sig = sig!2 * 0.1;

	Out.ar(\out.kr(0), sig);
}).add;
)

(
var arrayOfSubDivs = Array.fill(12, { 2 ** rrand(-1.0, 1.0) } ).normalizeSum;
arrayOfSubDivs.debug(\arrayOfSubDivs);

Pdef(\burst,
	Pmono(\burst,

		\trig, 1,
		\legato, 1.0,
		\dur, 4,
		
		\freq, 440,

		\time, Pfunc { |ev| ev.use { ~sustain.value } / thisThread.clock.tempo },

		\arrayOfSubDivs, [arrayOfSubDivs],
		\numOfSubDivs, 12,

		\out, 0,

	),
).play;
)

and implemented in the multichannel setup:

(
var getSubDivs = { |trig, arrayOfSubDivs, numOfSubDivs, duration|
	var hasTriggered = PulseCount.ar(trig) > 0;
	var subDiv = Ddup(2, Dseq(arrayOfSubDivs, numOfSubDivs)) * duration;
	Duty.ar(subDiv, trig, subDiv) * hasTriggered;
};

var rampOneShot = { |trig, duration, cycles|
	var hasTriggered = PulseCount.ar(trig) > 0;
	var phase = Sweep.ar(trig, 1 / duration).clip(0, cycles);
	phase * hasTriggered;
};

var oneShotBurstsToTrig = { |phaseScaled|
	var phaseStepped = phaseScaled.ceil;
	var delta = HPZ1.ar(phaseStepped);
	delta > 0;
};

var rampToSlope = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	delta.wrap(-0.5, 0.5);
};

var multiChannelTrigger = { |numChannels, trig|
	var count = Demand.ar(trig, DC.ar(0), Dseries(0, 1, inf));
	numChannels.collect{ |chan|
		trig * BinaryOpUGen('==', (count + (numChannels - 1 - chan) + 1) % numChannels, 0);
	};
};

var getSubSampleOffset = { |phase, trig|
	var slope = rampToSlope.(phase);
	var sampleCount = phase - (slope < 0) / slope;
	Latch.ar(sampleCount, trig);
};

var accumulatorSubSample = { |trig, subSampleOffset|
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	accum + subSampleOffset;
};

var multiChannelAccumulator = { |triggers, subSampleOffsets|
	triggers.collect{ |localTrig, i|
		var hasTriggered = PulseCount.ar(localTrig) > 0;
		var localAccum = accumulatorSubSample.(localTrig, subSampleOffsets[i]);
		localAccum * hasTriggered;
	};
};

var numOfSubDivs = 5;
var arrayOfSubDivs = Array.fill(numOfSubDivs, { 2 ** rrand(-1.0, 1.0) } ).normalizeSum;
arrayOfSubDivs.debug(\arrayOfSubDivs);

{
	var numChannels = 5;

	var initTrigger, duration;
	var seqOfSubDivs, stepPhaseScaled, stepPhase, stepTrigger, stepSlope;
	var triggers, subSampleOffsets, accumulator, overlap, maxOverlap;
	var windowSlopes, windowPhases;
	
	initTrigger = Trig1.ar(\trig.tr(1), SampleDur.ir);
	duration = \duration.kr(0.02);

	seqOfSubDivs = getSubDivs.(initTrigger, arrayOfSubDivs, numOfSubDivs, duration);
	stepPhaseScaled = rampOneShot.(initTrigger, seqOfSubDivs, numOfSubDivs);
	stepTrigger = oneShotBurstsToTrig.(stepPhaseScaled);
	stepPhase = stepPhaseScaled.wrap(0, 1);
	stepSlope = rampToSlope.(stepPhase);

	// distribute triggers round-robin across the channels
	triggers = multiChannelTrigger.(numChannels, stepTrigger);

	// calculate sub-sample offset per multichannel trigger
	subSampleOffsets = getSubSampleOffset.(stepPhase, triggers);

	// create a multichannel accumulator with sub-sample accuracy
	accumulator = multiChannelAccumulator.(triggers, subSampleOffsets);
	
	overlap = \overlap.kr(1);
	maxOverlap = min(overlap, numChannels);

	windowSlopes = Latch.ar(stepSlope, triggers) / max(0.001, maxOverlap);
	windowPhases = (windowSlopes * accumulator).clip(0, 1);

	windowPhases.wrap(0, 1);
	
}.plot(0.041);
)

EDIT: and triggered by Pmono:

(
var getSubDivs = { |trig, arrayOfSubDivs, numOfSubDivs, duration|
	var hasTriggered = PulseCount.ar(trig) > 0;
	var subDiv = Ddup(2, (Dseq(arrayOfSubDivs, numOfSubDivs).dpoll * duration));
	Duty.ar(subDiv, trig, subDiv) * hasTriggered;
};

var rampOneShot = { |trig, duration, cycles|
	var hasTriggered = PulseCount.ar(trig) > 0;
	var phase = Sweep.ar(trig, 1 / duration).clip(0, cycles);
	phase * hasTriggered;
};

var oneShotBurstsToTrig = { |phaseScaled|
	var phaseStepped = phaseScaled.ceil;
	var delta = HPZ1.ar(phaseStepped);
	delta > 0;
};

var rampToSlope = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	delta.wrap(-0.5, 0.5);
};

var multiChannelTrigger = { |numChannels, trig|
	var count = Demand.ar(trig, DC.ar(0), Dseries(0, 1, inf));
	numChannels.collect{ |chan|
		trig * BinaryOpUGen('==', (count + (numChannels - 1 - chan) + 1) % numChannels, 0);
	};
};

var getSubSampleOffset = { |phase, trig|
	var slope = rampToSlope.(phase);
	var sampleCount = phase - (slope < 0) / slope;
	Latch.ar(sampleCount, trig);
};

var accumulatorSubSample = { |trig, subSampleOffset|
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	accum + subSampleOffset;
};

var multiChannelAccumulator = { |triggers, subSampleOffsets|
	triggers.collect{ |localTrig, i|
		var hasTriggered = PulseCount.ar(localTrig) > 0;
		var localAccum = accumulatorSubSample.(localTrig, subSampleOffsets[i]);
		localAccum * hasTriggered;
	};
};

var channelMask = { |triggers, numChannels, channelMask, centerMask|
	var panChannels = Array.series(numChannels, -1 / numChannels, 2 / numChannels).wrap(-1.0, 1.0);
	var panPositions = panChannels.collect { |pos| Dser([pos], channelMask) };
	Demand.ar(triggers, 0, Dseq(panPositions ++ Dser([0], centerMask), inf));
};

SynthDef(\burst, {

	var numChannels = 5;

	var initTrigger, arrayOfSubDivs, numOfSubDivs;
	var seqOfSubDivs, stepPhaseScaled, stepPhase, stepTrigger, stepSlope;
	var triggers, subSampleOffsets, accumulator, overlap, maxOverlap, chanMask;
	var windowSlopes, windowPhases, grainWindows;
	var grainSlope, grainPhases, sigs, sig;

	initTrigger = Trig1.ar(\trig.tr(0), SampleDur.ir);
	arrayOfSubDivs = \arrayOfSubDivs.kr(Array.fill(16, 1));
	numOfSubDivs = \numOfSubDivs.kr(12);
	
	initTrigger.poll(initTrigger, \initTrig);

	seqOfSubDivs = getSubDivs.(initTrigger, arrayOfSubDivs, numOfSubDivs, \sustain.kr(1));
	stepPhaseScaled = rampOneShot.(initTrigger, seqOfSubDivs, numOfSubDivs);
	stepTrigger = oneShotBurstsToTrig.(stepPhaseScaled);
	stepPhase = stepPhaseScaled.wrap(0, 1);
	stepSlope = rampToSlope.(stepPhase);

	// distribute triggers round-robin across the channels
	triggers = multiChannelTrigger.(numChannels, stepTrigger);

	// calculate sub-sample offset per multichannel trigger
	subSampleOffsets = getSubSampleOffset.(stepPhase, triggers);

	// create a multichannel accumulator with sub-sample accuracy
	accumulator = multiChannelAccumulator.(triggers, subSampleOffsets);

	overlap = \overlap.kr(1);
	maxOverlap = min(overlap, numChannels);
	chanMask = channelMask.(triggers, numChannels - 1, \channelMask.kr(1), \centerMask.kr(1));

	windowSlopes = Latch.ar(stepSlope, triggers) / max(0.001, maxOverlap);
	windowPhases = (windowSlopes * accumulator).clip(0, 1);
	grainWindows = IEnvGen.ar(Env([0, 1, 0], [0.01, 0.99], [4.0, -4.0]), windowPhases);

	grainSlope = \freq.kr(440) * SampleDur.ir;
	grainPhases = (grainSlope * accumulator).wrap(0, 1);
	sigs = sin(grainPhases * 2pi);

	sigs = sigs * grainWindows;

	sigs = PanAz.ar(2, sigs, chanMask * \panMax.kr(0.8));
	sig = sigs.sum;

	sig = sig * \amp.kr(-15).dbamp;

	sig = sig * Env.asr(0.001, 1, 0.001).ar(Done.freeSelf, \gate.kr(1));

	sig = LeakDC.ar(sig);
	sig = Limiter.ar(sig);
	Out.ar(\out.kr(0), sig);
}).add;
)

(
var arrayOfSubDivs = Array.fill(12, { 2 ** rrand(-1.0, 1.0) } ).normalizeSum;
arrayOfSubDivs.debug(\arrayOfSubDivs);

Pdef(\burst,
	Pmono(\burst,

		\trig, 1,
		\legato, 1.0,
		\dur, 4,

		\freq, 220,
		\overlap, 1,

		\time, Pfunc { |ev| ev.use { ~sustain.value } / thisThread.clock.tempo },

		\arrayOfSubDivs, [arrayOfSubDivs],
		\numOfSubDivs, 12,

		\amp, -15,
		\out, 0,

	),
).play;
)

I guess this gives a fresh start to consider the overlap problem for random durations.

2 Likes

I have raised an issue on Github about Sweep not starting at a sample count of 0 Sweep starts at a sample count of 1 when beeing triggered by Impulse.ar(0) · Issue #6541 · supercollider/supercollider · GitHub

While not beeing fixed its probably saver to use Duty for all these kinds of ramp divisions.

phase = Duty.ar(SampleDur.ir, trig, Dseries(0, 1)) * (randomPeriod * SampleDur.ir);
instead of
phase = Sweep.ar(trig, randomPeriod);

For the current rampToRandom implementation it seems to work, but i have been working on several other ramp division attempts today, where Sweep lead to unexpected results and when swapped with Duty have been correct.

The rampToRandom function would then look like this:

var getRandomPeriods = { |rate, randomness|
	var randomPeriod = Ddup(2, (2 ** (Dwhite(-1.0, 1.0) * randomness))) / rate;
	Duty.ar(randomPeriod, DC.ar(0), 1 / randomPeriod);
};

var rampToRandom = { |rate, randomness|
	var randomPeriod = getRandomPeriods.(rate, randomness);
	var trig = Changed.ar(randomPeriod) > 0;
	var phase = Duty.ar(SampleDur.ir, trig, Dseries(0, 1)) * (randomPeriod * SampleDur.ir);
	(phase - SampleDur.ir).wrap(0, 1);
};
1 Like

Hi @dietcv,

thanks for the examples! i’m not sure i fully understand the scope of the last 2 examples.
regarding the logic of the “one-shot” implementations: is that about creating a fixed random sequence by subdivisions that can be applied to the (multichannel) trigger streams and thereby predicting ad handling the channel distribution for eventual overlap?
also, that example does not use the rampToRandom update you just optimized? just trying to make sure i get the various aspects of this.
& thanks for the tireless research!

Hey, with the two last examples i have presented a way to schedule a fixed amount of sub-sample accurate ramps from the language. Instead of having an infinite cycling structure this ones schedules a one-shot measure ramp from the language and subdivides it by a fixed amount of pre calculated divisions in the SynthDef. To have random subdivisions which change per measure you would have to pass another set of random divisions after your first one-shot measure ramp has completed to be used for subdivision with your second one-shot measure ramp afterwards. To have these processes in sync you have to reset your ramps.

The function which is used in that example has to work differently from rampToRandom. The difference is that you have a fixed amount of time (your measure ramp) and you want to subdivide that by a number of normalized values stored in your array of random values. Thats different from cycling ramps with rampToRandom which calculates the current period by its current trigger frequency and the current random value without having to subdivide by a number of events.
In Addition the triggers have to be derived differently.

A simplified example: If you subdivide a measure ramp by 8 equal divisions, you want a trigger on the initial start and the first, second … seventh wrap but not on the last, which would otherwise derive 9 triggers from your subidivided ramp.
If you use cycling ramps instead of one-shot ramps thats not a problem because the last trigger of your first measure is the first trigger of your second measure.

You can see that trigger logic on the plot i have shared above:

Both attempts for cycling ramps with rampToRandom and for one-shot ramps with getSubDivs and rampOneShot (i think i rename that one to rampToBurst haha) work equally well, its just a different way of doing things.

1 Like

for the cycling ramps attempt with rampToRandom the trigger frequency argument should ideally be latched to the derived trigger from getRandomPeriods to make sure the trigger frequency doesnt change mid grain (for example when using Synth.set asynchronously). Lets see if i can find a hack for implementing that.

1 Like

i think i have found a solution to latch the trigger frequency. The difference is subtle to the original version. I have added an initial trigger, Latch and no subtraction of SampleDur.ir before the last wrap. compare it with the initial one, once more shared down below. To have a closer look i have used the function ~zoomIn shared in this thread: Offset x-axis of function plot - #4 by TXMod

(
var getRandomPeriods = { |rate, randomness|
	var randomPeriod = Ddup(2, (2 ** (Dwhite(-1.0, 1.0) * randomness))) / rate;
	Duty.ar(randomPeriod, DC.ar(0), 1 / randomPeriod);
};

var rampToRandom_NEW = { |rate, randomness|
	var initTrigger = Impulse.ar(0);
	var randomPeriod = getRandomPeriods.(rate, randomness);
	var trig = Changed.ar(randomPeriod) > 0 + initTrigger;
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	var slope = randomPeriod * SampleDur.ir;
	(slope * accum).wrap(0, 1);
};

{
	rampToRandom_NEW.(\tFreq.kr(500), \randomness.kr(1));
}.plot(0.02);
)

// same here for closer inspection of the different parts with the ~zoomIn function

(
var getRandomPeriods = { |rate, randomness|
	var randomPeriod = Ddup(2, (2 ** (Dwhite(-1.0, 1.0) * randomness))) / rate;
	Duty.ar(randomPeriod, DC.ar(0), 1 / randomPeriod);
};

~rampToRandom_NEW = {
	var randomPeriod, trig, accum, slope, phase;

	randomPeriod = getRandomPeriods.(\rate.kr(500), \randomness.kr(1));
	trig = Changed.ar(randomPeriod) > 0 + Impulse.ar(0);
	accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	slope = Latch.ar(randomPeriod, trig) * SampleDur.ir;
	phase = (slope * accum).wrap(0, 1);

	[randomPeriod / 1500, trig, phase];
};

~zoomIn.(~rampToRandom_NEW, 3);
)

and while trying out different things, came up with this rampSelfModLatch function:

(
var getPeriods = { |rate|
	 Duty.ar(1 / rate, DC.ar(0), rate);
};

var rampSelfModLatch = { |rate|
	var initTrigger = Impulse.ar(0);
	var period = getPeriods.(rate);
	var trig = Changed.ar(period) > 0 + initTrigger;
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	var slope = period * SampleDur.ir;
	(slope * accum).wrap(0, 1);
};

{
	var rateMod = 2 ** (SinOsc.ar(50) * \randomness.kr(2));
	var rate = \rate.kr(500) * rateMod;
	rampSelfModLatch.(rate);
}.plot(0.02);
)

// same here for closer inspection of the different parts with the ~zoomIn function
(
var getPeriods = { |rate|
	 Duty.ar(1 / rate, DC.ar(0), rate);
};

~rampSelfModLatch = {
	var rate, rateMod, phase;
	var period, trig, slope, accum;

	rateMod = 2 ** (SinOsc.ar(50) * \randomness.kr(2));
	rate = \rate.kr(500) * rateMod;

	period = getPeriods.(rate);
	trig = Changed.ar(period) > 0 + Impulse.ar(0);
	accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	slope = Latch.ar(period, trig) * SampleDur.ir;
	phase = (slope * accum).wrap(0, 1);

	[period / 2500, trig, phase];
};

~zoomIn.(~rampSelfModLatch, 3);
)

That was the initial attempt:

// initial attempt for closer inspection of the different parts with the ~zoomIn function

(
var getRandomPeriods = { |rate, randomness|
	var randomPeriod = Ddup(2, (2 ** (Dwhite(-1.0, 1.0) * randomness))) / rate;
	Duty.ar(randomPeriod, DC.ar(0), 1 / randomPeriod);
};

~rampToRandom_OLD = { |rate, randomness|
	var randomPeriod = getRandomPeriods.(\tFreq.kr(500), \randomness.kr(1));
	var trig = Changed.ar(randomPeriod) > 0;
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	var slope = randomPeriod * SampleDur.ir;
	var phase = accum * slope;
	(phase - SampleDur.ir).wrap(0, 1);
	
	[randomPeriod / 1500, trig, phase];
};

~zoomIn.(x, ~rampToRandom_OLD);
)

function for closer inspection:

(
~zoomIn = { |func, n|

func.loadToFloatArray(0.021, action: { |array|
	var d, u;
	{
		d = array.as(Array).clump(n).flop;	// split into n arrays
		// d[0].size.debug("d[0].size");
		u = ScaledUserViewContainer(nil, Rect(10, 35, 490, 400));
		u.maxZoom = 30; // set higher if you want more zoom range
		u.unscaledDrawFunc = { |view|
			d.do({ |item, i|
				var col = [Color.red, Color.blue, Color.gray][i];
				Pen.color = col;
				Pen.moveTo(0 @ item[0]);
				item.do({ |val, ind|
					var x = ind / item.size;
					var y = (1 - val) ;
					Pen.lineTo(view.translateScale(Point(x, y)));
				});
				Pen.stroke;
			});
		};
	}.defer // defer gui process
});
};
)

Could someone have a look at the different examples. What do you think?

for some reason you have to subtract SampleDur.ir in the end, otherwise the accumulated ramps are not correctly distributed across the channels:

(
var rampToTrig = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	var sum = (phase + history);
	var trig = (delta / sum).abs > 0.5;
	Trig1.ar(trig, SampleDur.ir);
};

var rampToSlope = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	delta.wrap(-0.5, 0.5);
};

var multiChannelTrigger = { |numChannels, trig|
	numChannels.collect{ |chan|
		PulseDivider.ar(trig, numChannels, numChannels - 1 - chan);
	};
};

var getSubSampleOffset = { |phase, trig|
	var slope = rampToSlope.(phase);
	var sampleCount = phase - (slope < 0) / slope;
	Latch.ar(sampleCount, trig);
};

var accumulatorSubSample = { |trig, subSampleOffset|
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	accum + subSampleOffset;
};

var multiChannelAccumulator = { |triggers, subSampleOffsets|
	triggers.collect{ |localTrig, i|
		var hasTriggered = PulseCount.ar(localTrig) > 0;
		var localAccum = accumulatorSubSample.(localTrig, subSampleOffsets[i]);
		localAccum * hasTriggered;
	};
};

var multiChannelDemand = { |triggers, demandUgen, paramRange|
	var demand = demandUgen;
	triggers.collect{ |localTrig|
		Demand.ar(localTrig, 0, demand)
	}.linexp(0, 1, paramRange[0], paramRange[1]);
};

var getRandomPeriods = { |rate, randomness|
	var randomPeriod = Ddup(2, (2 ** (Dwhite(-1.0, 1.0) * randomness))) / rate;
	Duty.ar(randomPeriod, DC.ar(0), 1 / randomPeriod);
};

var rampToRandom_NEW = { |rate, randomness|
	var initTrigger = Impulse.ar(0);
	var randomPeriod = getRandomPeriods.(rate, randomness);
	var trig = Changed.ar(randomPeriod) > 0 + initTrigger;
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	var slope = randomPeriod * SampleDur.ir;
	(slope * accum - SampleDur.ir).wrap(0, 1);
};

{
	var numChannels = 5;

	var randomness, stepPhase, stepTrigger, stepSlope, subSampleOffsets;
	var triggers, accumulator, overlaps, maxOverlaps;
	var windowSlopes, windowPhases;

	randomness = \randomness.kr(1);

	stepPhase = rampToRandom_NEW.(\tFreq.kr(500), randomness);
	stepTrigger = rampToTrig.(stepPhase);
	stepSlope = rampToSlope.(stepPhase);

	// distribute triggers round-robin across the channels
	triggers = multiChannelTrigger.(numChannels, stepTrigger);

	// calculate sub-sample offset per multichannel trigger
	subSampleOffsets = getSubSampleOffset.(stepPhase, triggers);

	// create a multichannel accumulator with sub-sample accuracy
	accumulator = multiChannelAccumulator.(triggers, subSampleOffsets);

	overlaps = multiChannelDemand.(triggers, Dwhite(0, 1), \overlapRange.kr([1, 1]));
	maxOverlaps = min(overlaps, 2 ** randomness.neg * numChannels);

	windowSlopes = Latch.ar(stepSlope, triggers) / max(0.001, Latch.ar(maxOverlaps, triggers));
	windowPhases = (windowSlopes * accumulator).clip(0, 1);

	windowPhases.wrap(0, 1);

}.plot(0.021);
)

but then even when the trigger rate is modulated the result are linear ramps between 0 and 1, correctly distributed across the channels:

(
var rampToTrig = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	var sum = (phase + history);
	var trig = (delta / sum).abs > 0.5;
	Trig1.ar(trig, SampleDur.ir);
};

var rampToSlope = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	delta.wrap(-0.5, 0.5);
};

var multiChannelTrigger = { |numChannels, trig|
	numChannels.collect{ |chan|
		PulseDivider.ar(trig, numChannels, numChannels - 1 - chan);
	};
};

var getSubSampleOffset = { |phase, trig|
	var slope = rampToSlope.(phase);
	var sampleCount = phase - (slope < 0) / slope;
	Latch.ar(sampleCount, trig);
};

var accumulatorSubSample = { |trig, subSampleOffset|
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	accum + subSampleOffset;
};

var multiChannelAccumulator = { |triggers, subSampleOffsets|
	triggers.collect{ |localTrig, i|
		var hasTriggered = PulseCount.ar(localTrig) > 0;
		var localAccum = accumulatorSubSample.(localTrig, subSampleOffsets[i]);
		localAccum * hasTriggered;
	};
};

var multiChannelDemand = { |triggers, demandUgen, paramRange|
	var demand = demandUgen;
	triggers.collect{ |localTrig|
		Demand.ar(localTrig, 0, demand)
	}.linexp(0, 1, paramRange[0], paramRange[1]);
};

var getRandomPeriods = { |rate, randomness|
	var randomPeriod = Ddup(2, (2 ** (Dwhite(-1.0, 1.0) * randomness))) / rate;
	Duty.ar(randomPeriod, DC.ar(0), 1 / randomPeriod);
};

var rampToRandom_NEW = { |rate, randomness|
	var initTrigger = Impulse.ar(0);
	var randomPeriod = getRandomPeriods.(rate, randomness);
	var trig = Changed.ar(randomPeriod) > 0 + initTrigger;
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	var slope = randomPeriod * SampleDur.ir;
	(slope * accum - SampleDur.ir).wrap(0, 1);
};

{
	var numChannels = 5;

	var randomness, rateMod, rate, stepPhase, stepTrigger, stepSlope, subSampleOffsets;
	var triggers, accumulator, overlaps, maxOverlaps;
	var windowSlopes, windowPhases;

	randomness = \randomness.kr(0);
	
	rateMod = 2 ** (SinOsc.ar(50) * \index.kr(2));
	rate = \rate.kr(500) * rateMod;

	stepPhase = rampToRandom_NEW.(rate, randomness);
	stepTrigger = rampToTrig.(stepPhase);
	stepSlope = rampToSlope.(stepPhase);

	// distribute triggers round-robin across the channels
	triggers = multiChannelTrigger.(numChannels, stepTrigger);

	// calculate sub-sample offset per multichannel trigger
	subSampleOffsets = getSubSampleOffset.(stepPhase, triggers);

	// create a multichannel accumulator with sub-sample accuracy
	accumulator = multiChannelAccumulator.(triggers, subSampleOffsets);

	overlaps = multiChannelDemand.(triggers, Dwhite(0, 1), \overlapRange.kr([1, 1]));
	maxOverlaps = min(overlaps, 2 ** randomness.neg * numChannels);

	windowSlopes = Latch.ar(stepSlope, triggers) / max(0.001, Latch.ar(maxOverlaps, triggers));
	windowPhases = (windowSlopes * accumulator).clip(0, 1);

	windowPhases.wrap(0, 1);

}.plot(0.021);
)

but the accumulated phase is off by one sample, have to think about that again.

(
var rampToTrig = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	var sum = (phase + history);
	var trig = (delta / sum).abs > 0.5;
	Trig1.ar(trig, SampleDur.ir);
};

var rampToSlope = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	delta.wrap(-0.5, 0.5);
};

var getSubSampleOffset = { |phase, trig|
	var slope = rampToSlope.(phase);
	var sampleCount = phase - (slope < 0) / slope;
	Latch.ar(sampleCount, trig);
};

var accumulatorSubSample = { |trig, subSampleOffset|
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	accum + subSampleOffset;
};

var getRandomPeriods = { |rate, randomness|
	var randomPeriod = Ddup(2, (2 ** (Dwhite(-1.0, 1.0) * randomness))) / rate;
	Duty.ar(randomPeriod, DC.ar(0), 1 / randomPeriod);
};

~rampToRandom_NEW = {
	var randomPeriod, trig, accum, slope, phase;
	var stepSlope, stepTrigger, subSampleOffset, accumulator;
	var windowSlope, windowPhase;

	randomPeriod = getRandomPeriods.(\rate.kr(500), \randomness.kr(1));
	trig = Changed.ar(randomPeriod) > 0 + Impulse.ar(0);
	accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	slope = Latch.ar(randomPeriod, trig) * SampleDur.ir;
	phase = (slope * accum - SampleDur.ir).wrap(0, 1);
	
	stepTrigger = rampToTrig.(phase);
	stepSlope = rampToSlope.(phase);
	
	subSampleOffset = getSubSampleOffset.(phase, stepTrigger);
	accumulator = accumulatorSubSample.(stepTrigger, subSampleOffset);
	
	windowSlope = Latch.ar(stepSlope, stepTrigger) / max(0.001, \overlap.kr(1));
	windowPhase = (windowSlope * accumulator).clip(0, 1).wrap(0, 1);
	
	[phase, windowPhase, stepTrigger];
};

~zoomIn.(~rampToRandom_NEW, 3);
)

Getting surgical here, but i figured it now, lets go through it step by step:

1.) the Duty for the randomPeriods should start right away on initialisation:

var getRandomPeriods = { |rate, randomness|
	var randomPeriod = Ddup(2, (2 ** (Dwhite(-1.0, 1.0) * randomness))) / rate;
	Duty.ar(randomPeriod, DC.ar(0), 1 / randomPeriod);
};

2.) The triggers derived from the randomPeriods should output a trigger on the next random period, therefore you have to use (´current - history).abs > 0.5 or Changed.ar(randomPeriod, 0.5) or Changed.ar(randomPeriod) > 0, which outputs a trigger for random periods which are bigger then the value before or smaller then the value before:

next value is bigger:

next value is smaller:

You have to add an initial trigger with Impulse.ar(0) because getRandomPeriods outputs an initial value, therefore you cant derive a trigger from it via (current - history).abs > 0.5 or Changed.ar(randomPeriod, 0.5) or Changed.ar(randomPeriod) > 0

3.) The accumulator to be used with the latched slope per derived trigger from the random periods should start from - 1
accum = Duty.ar(SampleDur.ir, trig, Dseries(-1, 1)) * (PulseCount.ar(trig) > 0);
and the slope to accumulate the subdivided phases should be latched with the derived trigger from random periods, so if you change the trigger frequency mid grain it doesnt distort the phase.
slope = Latch.ar(randomPeriod, trig) * SampleDur.ir; then you multiply the accumulator by the latched slope and wrap it between 0 and 1 phase = (slope * accum).wrap(0, 1);

If you do that the scheduling phasor (here Duty starting from -1 multiplied by the latched slope derived from getRandomPeriods) and the accumulated windowphase (latched stepSlope derived via rampToSlope with a derived trigger via rampToTrig multiplied by the accumulator) and all of the triggers are perfectly in sync.

Latched slope from the scheduling phasor and latched stepslope for the accumulated window phase (correctly half a sample off):

derived triggers from randomPeriods and derived triggers from the scheduling phasor (correctly half a sample off):

accumulated windowPhase and derived triggers

(
var rampToTrig = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	var sum = (phase + history);
	var trig = (delta / sum).abs > 0.5;
	Trig1.ar(trig, SampleDur.ir);
};

var rampToSlope = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	delta.wrap(-0.5, 0.5);
};

var getSubSampleOffset = { |phase, trig|
	var slope = rampToSlope.(phase);
	var sampleCount = phase - (slope < 0) / slope;
	Latch.ar(sampleCount, trig);
};

var accumulatorSubSample = { |trig, subSampleOffset|
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1)) * (PulseCount.ar(trig) > 0);
	accum + subSampleOffset;
};

var getRandomPeriods = { |rate, randomness|
	var randomPeriod = Ddup(2, (2 ** (Dwhite(-1.0, 1.0) * randomness))) / rate;
	Duty.ar(randomPeriod, DC.ar(0), 1 / randomPeriod);
};

x = {
	var initTrigger, randomPeriod, trig, accum, slope, phase;
	var stepSlope, stepTrigger, subSampleOffset, accumulator;
	var windowSlope, windowPhase;

	initTrigger = Impulse.ar(0);
	randomPeriod = getRandomPeriods.(\rate.kr(500), \randomness.kr(1));
	trig = Changed.ar(randomPeriod, 0.5) + initTrigger;
	// start Dseries from - 1
	accum = Duty.ar(SampleDur.ir, trig, Dseries(-1, 1)) * (PulseCount.ar(trig) > 0);
	slope = Latch.ar(randomPeriod, trig) * SampleDur.ir;
	// dont subtract SampleDur.ir
	phase = (slope * accum).wrap(0, 1);

	stepTrigger = rampToTrig.(phase);
	stepSlope = rampToSlope.(phase);

	subSampleOffset = getSubSampleOffset.(phase, stepTrigger);
	accumulator = accumulatorSubSample.(stepTrigger, subSampleOffset);

	windowSlope = Latch.ar(stepSlope, stepTrigger) / max(0.001, \overlap.kr(1));
	windowPhase = (windowSlope * accumulator).clip(0, 1).wrap(0, 1);

	[phase, windowPhase]; // phase and accumulated window phase perfectly in sync when starting Duty from -1 !!!!
	//[windowPhase, stepTrigger];

	//[randomPeriod / 1000, trig]; // triggers are correctly derived from randomPeriods !!!!
	//[slope * 50, Latch.ar(stepSlope * 50, stepTrigger)]; // derived slope half a sample later, correct !!!
	//[trig, stepTrigger]; // derived trigger half a sample later, correct !!!
};//.plot(0.0002);

~zoomIn.(x, 2);
)

Here with the multichannel setup:

(
var rampToTrig = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	var sum = (phase + history);
	var trig = (delta / sum).abs > 0.5;
	Trig1.ar(trig, SampleDur.ir);
};

var rampToSlope = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	delta.wrap(-0.5, 0.5);
};

var multiChannelTrigger = { |numChannels, trig|
	numChannels.collect{ |chan|
		PulseDivider.ar(trig, numChannels, numChannels - 1 - chan);
	};
};

var getSubSampleOffset = { |phase, trig|
	var slope = rampToSlope.(phase);
	var sampleCount = phase - (slope < 0) / slope;
	Latch.ar(sampleCount, trig);
};

var accumulatorSubSample = { |trig, subSampleOffset|
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	accum + subSampleOffset;
};

var multiChannelAccumulator = { |triggers, subSampleOffsets|
	triggers.collect{ |localTrig, i|
		var hasTriggered = PulseCount.ar(localTrig) > 0;
		var localAccum = accumulatorSubSample.(localTrig, subSampleOffsets[i]);
		localAccum * hasTriggered;
	};
};

var multiChannelDemand = { |triggers, demandUgen, paramRange|
	var demand = demandUgen;
	triggers.collect{ |localTrig|
		Demand.ar(localTrig, 0, demand)
	}.linexp(0, 1, paramRange[0], paramRange[1]);
};

var getRandomPeriods = { |rate, randomness|
	var randomPeriod = Ddup(2, (2 ** (Dwhite(-1.0, 1.0) * randomness))) / rate;
	Duty.ar(randomPeriod, DC.ar(0), 1 / randomPeriod);
};

var rampToRandom = { |rate, randomness|
	var initTrigger = Impulse.ar(0);
	var randomPeriod = getRandomPeriods.(rate, randomness);
	var trig = Changed.ar(randomPeriod) > 0 + initTrigger;
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(-1, 1)) * (PulseCount.ar(trig) > 0);
	var slope = Latch.ar(randomPeriod, trig) * SampleDur.ir;
	(slope * accum).wrap(0, 1);
};

{
	var numChannels = 5;

	var randomness, rateMod, rate, stepPhase, stepTrigger, stepSlope, subSampleOffsets;
	var triggers, accumulator, overlaps, maxOverlaps;
	var windowSlopes, windowPhases;

	randomness = \randomness.kr(1);

	stepPhase = rampToRandom.(\rate.kr(500), randomness);
	stepTrigger = rampToTrig.(stepPhase);
	stepSlope = rampToSlope.(stepPhase);

	// distribute triggers round-robin across the channels
	triggers = multiChannelTrigger.(numChannels, stepTrigger);

	// calculate sub-sample offset per multichannel trigger
	subSampleOffsets = getSubSampleOffset.(stepPhase, triggers);

	// create a multichannel accumulator with sub-sample accuracy
	accumulator = multiChannelAccumulator.(triggers, subSampleOffsets);

	overlaps = multiChannelDemand.(triggers, Dwhite(0, 1), \overlapRange.kr([1, 1]));
	maxOverlaps = min(overlaps, 2 ** randomness.neg * numChannels);

	windowSlopes = Latch.ar(stepSlope, triggers) / max(0.001, Latch.ar(maxOverlaps, triggers));
	windowPhases = (windowSlopes * accumulator).clip(0, 1);

	windowPhases.wrap(0, 1);

}.plot(0.021);
)
1 Like

amazing, sounds really great after trying it out!! also with the implemented tradeoff for overlap!

@dietcv I didn’t have the time to follow all the paths you did. Do you plan to publish it as a blog post? There are too many materials and I get lost sometimes))

Thanks for sharing, well done

After getting the plots right, i have tested the audio and unfortunately its aweful (without any randomness, just regular periods).
I guess the trigger derived from Duty which resets the scheduling phasor (hard sync) causes this nasty behaviour. But you have to use a trigger to reset the scheduling phasor for every new period otherwise the triggers are not correctly distributed across the channels.
You can uncomment the line with the Phasor inside the rampToRandom function (for no hard sync) but without resetting the scheduling ramps, the triggers are not correctly distributed across the channels.

(
var rampToTrig = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	var sum = (phase + history);
	var trig = (delta / sum).abs > 0.5;
	Trig1.ar(trig, SampleDur.ir);
};

var rampToSlope = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	delta.wrap(-0.5, 0.5);
};

var multiChannelTrigger = { |numChannels, trig|
	numChannels.collect{ |chan|
		PulseDivider.ar(trig, numChannels, numChannels - 1 - chan);
	};
};

var getSubSampleOffset = { |phase, trig|
	var slope = rampToSlope.(phase);
	var sampleCount = phase - (slope < 0) / slope;
	Latch.ar(sampleCount, trig);
};

var accumulatorSubSample = { |trig, subSampleOffset|
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	accum + subSampleOffset;
};

var multiChannelAccumulator = { |triggers, subSampleOffsets|
	triggers.collect{ |localTrig, i|
		var hasTriggered = PulseCount.ar(localTrig) > 0;
		var localAccum = accumulatorSubSample.(localTrig, subSampleOffsets[i]);
		localAccum * hasTriggered;
	};
};

var multiChannelDemand = { |triggers, demandUgen, paramRange|
	var demand = demandUgen;
	triggers.collect{ |localTrig|
		Demand.ar(localTrig, 0, demand)
	}.linexp(0, 1, paramRange[0], paramRange[1]);
};

var getRandomPeriods = { |rate, randomness|
	var randomPeriod = Ddup(2, (2 ** (Dwhite(-1.0, 1.0) * randomness))) / rate;
	Duty.ar(randomPeriod, DC.ar(0), 1 / randomPeriod);
};

var rampToRandom = { |rate, randomness|
	var initTrigger = Impulse.ar(0);
	var randomPeriod = getRandomPeriods.(rate, randomness);
	var trig = Changed.ar(randomPeriod, 0.5) + initTrigger;
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(-1, 1)) * (PulseCount.ar(trig) > 0);
	var slope = Latch.ar(randomPeriod, trig) * SampleDur.ir;
	(slope * accum).wrap(0, 1);
	//(Phasor.ar(DC.ar(0), slope) - SampleDur.ir).wrap(0, 1);
};

var sndBuf = Buffer.loadCollection(s, Signal.sineFill(4096, [1,0], 0!2));

{
	var numChannels = 5;

	var randomness, rateMod, rate, stepPhase, stepTrigger, stepSlope, subSampleOffsets;
	var triggers, accumulator, overlaps, maxOverlaps;
	var windowSlopes, windowPhases;
	var grainWindows, grainSlope, grainPhases;
	var sigs, sig;

	randomness = \randomness.kr(0);

	stepPhase = rampToRandom.(1000, randomness);
	stepTrigger = rampToTrig.(stepPhase);
	stepSlope = rampToSlope.(stepPhase);

	// distribute triggers round-robin across the channels
	triggers = multiChannelTrigger.(numChannels, stepTrigger);

	// calculate sub-sample offset per multichannel trigger
	subSampleOffsets = getSubSampleOffset.(stepPhase, triggers);

	// create a multichannel accumulator with sub-sample accuracy
	accumulator = multiChannelAccumulator.(triggers, subSampleOffsets);

	overlaps = multiChannelDemand.(triggers, Dwhite(0, 1), \overlapRange.kr([1, 1]));
	maxOverlaps = min(overlaps, 2 ** randomness.neg * numChannels);

	windowSlopes = Latch.ar(stepSlope, triggers) / max(0.001, Latch.ar(maxOverlaps, triggers));
	windowPhases = (windowSlopes * accumulator).clip(0, 1);

	grainWindows = IEnvGen.ar(Env([0, 1, 0], [0.5, 0.5], \sin), windowPhases);

	grainSlope = 2000 * SampleDur.ir;
	grainPhases = (grainSlope * accumulator).wrap(0, 1);

	sigs = sin(grainPhases * 2pi);
	sigs = sigs * grainWindows;

	sig = PanAz.ar(2, sigs, \pan.kr(0));
	sig = sigs.sum;

	sig = LeakDC.ar(sig);

	sig!2 * 0.1;

}.play;
)

Ah, really, thats unfortunate:/ I was enjoying the randomness behaviour quite a bit, but with buffered noisy material, so that wasn’t a telling test really. …Was just now going to proceed with more tonal stuff.
edit: i think i see what you mean, there’s a kind of zipping sound when modulating the rampToRandom rate.

open a s.freqscope and run the example from above and you can see and hear the unstable behaviour and then run uncommented the Phasor.

I think i have figured out at least one of the issues. If you set randomness to 0, Changed wont output a trigger because randomPeriod is a flat line, so the accumulator wont reset perfectly on time, but because the accumulator (slope * accum).wrap(0, 1); is wrapped between 0 and 1, the wrapping takes over which gets out of sync over a longer period of time, and if we set a trigger rate of 1000 time passes pretty fast. We have discussed that in the thread about deriving LFOs of slower duration then the original phasor i guess.

1 Like

damn, this is absolutely huge!!!
I have figured out a way to create linear ramps between 0 and 1 with random periods just with one Duty, so everything is demand rate. maybe this helps to have scheduling triggers perfectly in sync.

(
var rampToRandom = { |rate, randomness|
	var randomPeriod = Ddup(2, 2 ** (Dwhite(-1.0, 1.0) *  randomness)) / rate;
	Duty.ar(
		// update every sample
		dur: SampleDur.ir,
		// reset every randomPeriod
		reset: randomPeriod,
		// "sample and hold" increment of (SampleDur.ir / randomPeriod) for every randomPeriod
		// to increment for each sample by the sample count multiplied by slope modulus 1
		level: Ddup(SampleRate.ir, SampleDur.ir / randomPeriod) * Dseries(0, 1) % 1
	);
	
};

{
	rampToRandom.(\rate.kr(200), \randomness.kr(1));
}.plot(0.021);
)
1 Like

Im happy if you proof me wrong, but after trying alot of different attempts for linear ramps from 0 to 1 with random periods, i will wrap it up here, none of these can and will work in SC and i will explain you why:

The scheduling phasor (your clock) should be a continuous ramp between 0 and 1.
In this picture, which i have already shared above, you see that the scheduling phasor does have a value of non zero at the moment its wraps from 1 to 0. This value (the subsample offset) is calculated for each sample frame the scheduling phasor wraps to reset your accumulator to the sub sample offset, which then drives your grain window for sub-sample accurate granulation.

1.) Your scheduling phasor only has a non-zero value and the moment it wraps around if its not beeing reset to 0 by a trigger to calculate the sub-sample offset. Therefore you cannot reset it by a trigger.

2.) To have precise timing and distribute your linear phases between 0 and 1 correctly round robin accross the channels, the phase wrap of your scheduling phasor and the new slope derived from random periods have to be 100% in sync. The only way you can ensure this in SC, is to reset your scheduling phasor, which doesnt fullfill the first requirement.

3.) In SC calculations are done with 32bit floats, every attempt whichs needs to calculate the new slope of your scheduling phasor on a sample level (e.g. Duty), will therefore cause floating point number errors which accumulate over time and give you nasty sounding aliasing (have tested that with no randomness for rates which divide your sample rate to perfect integers (works of course) and tried out the alternative to round your samples per period using .round or .floor which changes the pitch and isnt as sub-sample accurate as using an ordinary Phasor for the case no randomness).

The attempts i have tried are:

  • use Duty for random periods, derive a trigger from it with Changed, latch the random periods with that trigger and reset Sweep where you plugin your random periods as an argument directly (doesnt fullfill the first requirement and Sweep currently has a bug not starting from sample count 0 and is therefore unrealiable for testing)
  • use Duty for random periods, derive a trigger from it with Changed, latch the random periods with that trigger to be used as an argument forPhasor without any reset of Phasor (doesnt fullfill the second requirement)
  • use Duty for random periods, derive a trigger from it with Changed, latch the random periods with that trigger to be used as an argument forPhasor and reset Phasor with that trigger (for randomness > 0 this attempt is timing inaccurate and therefore doesnt distribute the phases correctly accross the channels and doesnt fullfill the first requirement).
  • use Duty for random periods, derive a trigger from it with Changed, latch the random periods with that trigger and reset your accumulator multiplied by that random period (doesnt fullfill the first and the third requirement)
  • use Duty for random periods and to accumulate samples multiplied by your random periods to output linear ramps between 0 and 1 inside Duty directly, either reset by the random periods themselves or TDuty (doesnt fullfill the first and the third requirement)
  • use Duty for random periods and to accumulate samples multiplied by your random periods to output linear ramps between 0 and 1 inside Duty directly, but not reset by a trigger but with modulus 1 (doesnt fullfill the second and the third requirement)

The ways i know to achieve the desired behaviour of your scheduling phasor is to derive a trigger from its own wrap to increment a counter which looks up the next value in a list to be used as the next slope for itself or to latch a modulation of its own input. Both are only possible with a single-sample feedback loop.

So yeah, enjoy your sub-sample accurate granulation but without random periods :slight_smile:

Ive made some more experiments and it seems to be the case, that i have found a solution which fullfills all the three requirements from above for modulating the trigger frequency and getting linear ramps between 0 and 1 which are correctly distributed round-robin across the channels. The requirements once more are:

1.) no phase reset of the grain scheduling ramp (otherwise no sub-sample offset calculation possible and unstable output)
2.) correct distribution round-robin accross the channels
3.) no slope calculation on the sample level (otherwise 32bit float rounding errors)

In this rampAndHold function i have omitted the Dwhite part and plugged the trigger rate into Duty directly, to modulate the trigger frequency with arbitrary LFOs. The important part is to delay the derived stepTrigger for the trigger distribution (multiChannelTrigger) and the stepPhase for the subSampleOffset calculation (getSubSampleOffsets) by 2 samples via Delay2, but not the stepPhase for the calculation of the derived slope via rampToSlope.
Im not completly sure why thats working, but on some plots of the grain scheduling phasor you see a small kink in the phase right after the wrap and sometimes the derived trigger from Changed is one sample late (i think because the derived trigger via Changed and Duty are not 100% in sync). I guess directly after the kink would be the correct phase for deriving our slope. So if you delay the multiChannelTrigger and the phase for the subSampleOffset calculation by 2 sample, we are out of this unstable region.

Here is the plot of the example, showing that the windowPhases are correctly distributed round-robin across the channels (in this case the modDepth param is also limited to 1, which results in a range between 0.5 and 2 for bipolar modulators, when implemented with 2 ** (mod * index). With this configuration you can then also calculate the maxOverlap as a tradeoff via 2 ** modDepth.neg * numChannels.

(
var rampToTrig = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	var sum = (phase + history);
	var trig = (delta / sum).abs > 0.5;
	Trig1.ar(trig, SampleDur.ir);
};

var rampToSlope = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	delta.wrap(-0.5, 0.5);
};

var multiChannelTrigger = { |numChannels, trig|
	numChannels.collect{ |chan|
		PulseDivider.ar(trig, numChannels, numChannels - 1 - chan);
	};
};

var getSubSampleOffset = { |phase, trig|
	var slope = rampToSlope.(phase);
	var sampleCount = phase - (slope < 0) / slope;
	Latch.ar(sampleCount, trig);
};

var accumulatorSubSample = { |trig, subSampleOffset|
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	accum + subSampleOffset;
};

var multiChannelAccumulator = { |triggers, subSampleOffsets|
	triggers.collect{ |localTrig, i|
		var hasTriggered = PulseCount.ar(localTrig) > 0;
		var localAccum = accumulatorSubSample.(localTrig, subSampleOffsets[i]);
		localAccum * hasTriggered;
	};
};

var multiChannelDemand = { |triggers, demandUgen, paramRange|
	var demand = demandUgen;
	triggers.collect{ |localTrig|
		Demand.ar(localTrig, 0, demand)
	}.linexp(0, 1, paramRange[0], paramRange[1]);
};

var rampAndHold = { |rate|
	var period = Duty.ar(1 / rate, DC.ar(0), rate);
	var trig = Changed.ar(period) > 0 + Impulse.ar(0);
	var slope = Latch.ar(period, trig) * SampleDur.ir;
	Phasor.ar(DC.ar(0), slope);
};

{
	var numChannels = 5;

	var tFreqMod, modDepth, tFreq, stepPhase, stepTrigger, stepSlope, subSampleOffsets;
	var triggers, accumulator, overlap, maxOverlap;
	var windowSlopes, windowPhases;
	var grainWindows, grainSlope, grainPhases;
	var sigs, sig, modulator;
	
	modDepth = \modMD.kr(1);

	tFreqMod = 2 ** (SinOsc.ar(\modMF.kr(50)) * modDepth);
	tFreq = \tFreq.kr(500) * tFreqMod;
	
	stepPhase = rampAndHold.(tFreq);
	stepTrigger = rampToTrig.(stepPhase);
	stepSlope = rampToSlope.(stepPhase);

	// distribute triggers round-robin across the channels
	triggers = multiChannelTrigger.(numChannels, Delay2.ar(stepTrigger));

	// calculate sub-sample offset per multichannel trigger
	subSampleOffsets = getSubSampleOffset.(Delay2.ar(stepPhase), triggers);

	// create a multichannel accumulator with sub-sample accuracy
	accumulator = multiChannelAccumulator.(triggers, subSampleOffsets);

	overlap = multiChannelDemand.(triggers, Dwhite(0, 1), \overlapRange.kr([1, 1]));
	maxOverlap = min(overlap, 2 ** modDepth.neg * numChannels);

	windowSlopes = Latch.ar(stepSlope, triggers) / max(0.001, maxOverlap);
	windowPhases = (windowSlopes * accumulator).clip(0, 1);
	windowPhases = windowPhases.wrap(0, 1);

}.plot(0.021);
)

Because we dont have a phase reset of the grain scheduling phasor the output is stable and sub-sample accurate, here a test listen and watch the freqscope:

(
var rampToTrig = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	var sum = (phase + history);
	var trig = (delta / sum).abs > 0.5;
	Trig1.ar(trig, SampleDur.ir);
};

var rampToSlope = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	delta.wrap(-0.5, 0.5);
};

var multiChannelTrigger = { |numChannels, trig|
	numChannels.collect{ |chan|
		PulseDivider.ar(trig, numChannels, numChannels - 1 - chan);
	};
};

var getSubSampleOffset = { |phase, trig|
	var slope = rampToSlope.(phase);
	var sampleCount = phase - (slope < 0) / slope;
	Latch.ar(sampleCount, trig);
};

var accumulatorSubSample = { |trig, subSampleOffset|
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	accum + subSampleOffset;
};

var multiChannelAccumulator = { |triggers, subSampleOffsets|
	triggers.collect{ |localTrig, i|
		var hasTriggered = PulseCount.ar(localTrig) > 0;
		var localAccum = accumulatorSubSample.(localTrig, subSampleOffsets[i]);
		localAccum * hasTriggered;
	};
};

var multiChannelDemand = { |triggers, demandUgen, paramRange|
	var demand = demandUgen;
	triggers.collect{ |localTrig|
		Demand.ar(localTrig, 0, demand)
	}.linexp(0, 1, paramRange[0], paramRange[1]);
};

var getRandomPeriods = { |rate, randomness|
	var randomPeriod = Ddup(2, (2 ** (Dwhite(-1.0, 1.0) * randomness))) / rate;
	Duty.ar(randomPeriod, DC.ar(0), 1 / randomPeriod);
};

var rampAndHold = { |rate|
	var period = Duty.ar(1 / rate, DC.ar(0), rate);
	var trig = Changed.ar(period) > 0 + Impulse.ar(0);
	var slope = Latch.ar(period, trig) * SampleDur.ir;
	Phasor.ar(DC.ar(0), slope);
};

{
	var numChannels = 5;

	var modDepth, tFreqMod, tFreq, stepPhase, stepTrigger, stepSlope, subSampleOffsets;
	var triggers, accumulator, overlap, maxOverlap;
	var windowSlopes, windowPhases;
	var grainWindows, grainSlope, grainPhases;
	var sigs, sig;

	modDepth = \modMD.kr(0);

	tFreqMod = 2 ** (SinOsc.ar(\modMF.kr(50)) * modDepth);
	tFreq = \tFreq.kr(1013) * tFreqMod;
	
	stepPhase = rampAndHold.(tFreq);
	stepTrigger = rampToTrig.(stepPhase);
	stepSlope = rampToSlope.(stepPhase);

	// distribute triggers round-robin across the channels
	triggers = multiChannelTrigger.(numChannels, Delay2.ar(stepTrigger));

	// calculate sub-sample offset per multichannel trigger
	subSampleOffsets = getSubSampleOffset.(Delay2.ar(stepPhase), triggers);

	// create a multichannel accumulator with sub-sample accuracy
	accumulator = multiChannelAccumulator.(triggers, subSampleOffsets);

	overlap = multiChannelDemand.(triggers, Dwhite(0, 1), \overlapRange.kr([1, 1]));
	maxOverlap = min(overlap, 2 ** modDepth.neg * numChannels);

	windowSlopes = Latch.ar(stepSlope, triggers) / max(0.001, maxOverlap);
	windowPhases = (windowSlopes * accumulator).clip(0, 1);

	grainWindows = IEnvGen.ar(Env([0, 1, 0], [0.5, 0.5], \sin), windowPhases);

	grainSlope = \freq.kr(1868) * SampleDur.ir;
	grainPhases = (grainSlope * accumulator).wrap(0, 1);

	sigs = sin(grainPhases * 2pi);
	sigs = sigs * grainWindows;

	sig = PanAz.ar(2, sigs, \pan.kr(0));
	sig = sigs.sum;

	sig = LeakDC.ar(sig);

	sig!2 * 0.1;

}.play;
)

Will do some more testing!

1 Like

I couldnt make sense of the two sample delay and figured out a way where its just one sample delay. The stepPhase has to be delayed after the rampToSlope calculation by one sample (to be used for the multiChannelTrigger and the subSampleOffset calculation):

(
var rampToTrig = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	var sum = (phase + history);
	var trig = (delta / sum).abs > 0.5;
	Trig1.ar(trig, SampleDur.ir);
};

var rampToSlope = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	delta.wrap(-0.5, 0.5);
};

var multiChannelTrigger = { |numChannels, trig|
	numChannels.collect{ |chan|
		PulseDivider.ar(trig, numChannels, numChannels - 1 - chan);
	};
};

var getSubSampleOffset = { |phase, trig|
	var slope = rampToSlope.(phase);
	var sampleCount = phase - (slope < 0) / slope;
	Latch.ar(sampleCount, trig);
};

var accumulatorSubSample = { |trig, subSampleOffset|
	var accum = Duty.ar(SampleDur.ir, trig, Dseries(0, 1));
	accum + subSampleOffset;
};

var multiChannelAccumulator = { |triggers, subSampleOffsets|
	triggers.collect{ |localTrig, i|
		var hasTriggered = PulseCount.ar(localTrig) > 0;
		var localAccum = accumulatorSubSample.(localTrig, subSampleOffsets[i]);
		localAccum * hasTriggered;
	};
};

var multiChannelDemand = { |triggers, demandUgen, paramRange|
	var demand = demandUgen;
	triggers.collect{ |localTrig|
		Demand.ar(localTrig, 0, demand)
	}.linexp(0, 1, paramRange[0], paramRange[1]);
};

var rampAndHold = { |rate|
	var period = Duty.ar(1 / rate, DC.ar(0), rate);
	var trig = Changed.ar(period) > 0 + Impulse.ar(0);
	var slope = Latch.ar(period, trig) * SampleDur.ir;
	Phasor.ar(DC.ar(0), slope);
};

{
	var numChannels = 5;

	var yolo, tFreqMod, modDepth, tFreq, stepPhase, stepTrigger, stepSlope, subSampleOffsets;
	var triggers, accumulator, overlap, maxOverlap;
	var windowSlopes, windowPhases;
	var grainWindows, grainSlope, grainPhases;
	var sigs, sig, modulator;
	
	modDepth = \modMD.kr(1);

	tFreqMod = 2 ** (SinOsc.ar(\modMF.kr(50)) * modDepth);
	tFreq = \tFreq.kr(500) * tFreqMod;
	
	stepPhase = rampAndHold.(tFreq);
	stepSlope = rampToSlope.(stepPhase);
	stepPhase = Delay1.ar(stepPhase);
	stepTrigger = rampToTrig.(stepPhase);
	
	// distribute triggers round-robin across the channels
	triggers = multiChannelTrigger.(numChannels, stepTrigger);

	// calculate sub-sample offset per multichannel trigger
	subSampleOffsets = getSubSampleOffset.(stepPhase, triggers);

	// create a multichannel accumulator with sub-sample accuracy
	accumulator = multiChannelAccumulator.(triggers, subSampleOffsets);

	overlap = multiChannelDemand.(triggers, Dwhite(0, 1), \overlapRange.kr([1, 1]));
	maxOverlap = min(overlap, 2 ** modDepth.neg * numChannels);

	windowSlopes = Latch.ar(stepSlope, triggers) / max(0.001, maxOverlap);
	windowPhases = (windowSlopes * accumulator).clip(0, 1);
	windowPhases = windowPhases.wrap(0, 1);

}.plot(0.041);
)

This probably makes sense if we once more think of the structure we are trying to mimic (but maybe not haha):

Have made some additional tests with blockSize = 1, to exactly replicate this gen version. Its completely the same as using Duty and also needs to delay the stepPhase after the slope calculation by one sample to correctly distribute the triggers:

(
var rampToTrig = { |phase|
	var history = Delay1.ar(phase);
	var delta = (phase - history);
	var sum = (phase + history);
	var trig = (delta / sum).abs > 0.5;
	Trig1.ar(trig, SampleDur.ir);
};

{
	var phase, modulation, trig;

	trig = LocalIn.ar(1) + Impulse.ar(0);

	modulation = 2 ** (WhiteNoise.ar(1) * \randomness.kr(1));
	phase = Phasor.ar(DC.ar(0), \rate.kr(500) * Latch.ar(modulation, trig) * SampleDur.ir);
	trig = rampToTrig.(phase);

	LocalOut.ar(trig);

	[phase, trig];
}.plot(0.02);
)
1 Like

Interesting, thanks! To be honest I’ve always felt lang-side approaches to granulation to be the most flexible, and usually the most conceptually clear.