SCLang ScopeView prototype

This is related to this topic.

I’d like to scope signals with SC, either internal or external ones. Now I know ScopeView exists, but unfortunately, it seems C++ implemented, which I don’t really want to touch. I’d like to be able to display RMS/LUFS, maybe some other stuff like differences between signals, etc.

Here’s a working prototype, which works better than expected. For now, only with external sources, and one sample per pixel. I need to figure out what’s the best way to display data when view is zoomed out. If anyone knows how software like Audacity do, this would help me?

To anyone interested,
Regards,
Simon

(
// Parameters
var numChannels = 2;
var plotMaxLength = 2.0;
var externalSource = true;
var inputChannel = 0;

// Advanced parameters
var latencyOffset = 1.2;
var server = s;
var chunkSize = 1024;
var rmsWindow = 0.3;
var numChunks = 8;
var viewFps = 60;

// Internal
var win = Window(), view = UserView();
var oscFunc, synth, buffer, bus;
var peaks, rms;

var initBuffer, onBufferReady, updateData, initUI;

SynthDef(\externalScopeMono, { |in, out, chunkSize, numChunks|
	var input = SoundIn.ar(in);
	var phase = Phasor.ar(0, 1, 0, chunkSize);
	var trig = HPZ1.ar(phase) < 0;
	var partition = PulseCount.ar(trig) % numChunks;
	BufWr.ar(
		input,
		out,
		phase + (chunkSize * partition)
	);
	SendReply.ar(
		trig,
		'/bufferUpdate',
		partition
	);
}).add;

SynthDef(\internalScopeMono, { |in, out, chunkSize, numChunks|
	var input = In.ar(in);
	var phase = Phasor.ar(0, 1, 0, chunkSize);
	var trig = HPZ1.ar(phase) < 0;
	var partition = PulseCount.ar(trig) % numChunks;
	BufWr.ar(
		input,
		out,
		phase + (chunkSize * partition)
	);
	SendReply.ar(
		trig,
		'/bufferUpdate',
		partition
	);
}).add;

SynthDef(\externalScopeStereo, { |in, out, chunkSize, numChunks|
	var input = SoundIn.ar([in, in + 1]);
	var phase = Phasor.ar(0, 1, 0, chunkSize);
	var trig = HPZ1.ar(phase) < 0;
	var partition = PulseCount.ar(trig) % numChunks;
	BufWr.ar(
		input,
		out,
		phase + (chunkSize * partition)
	);
	SendReply.ar(
		trig,
		'/bufferUpdate',
		partition
	);
}).add;

SynthDef(\internalScopeStereo, { |in, out, chunkSize, numChunks|
	var input = In.ar([in, in + 1]);
	var phase = Phasor.ar(0, 1, 0, chunkSize);
	var trig = HPZ1.ar(phase) < 0;
	var partition = PulseCount.ar(trig) % numChunks;
	BufWr.ar(
		input,
		out,
		phase + (chunkSize * partition)
	);
	SendReply.ar(
		trig,
		'/bufferUpdate',
		partition
	);
}).add;

initUI = {
	view
	.animate_(true)
	.frameRate_(viewFps)
	.drawFunc_({
		var ySize = view.bounds.height / numChannels;
		Pen.strokeColor_(Color.red);
		view.bounds.width.do({ |index|
			numChannels.do({ |chan|
				Pen.moveTo(
					Point(
						index,
						ySize * chan + (ySize / 2)
					);
				);
				Pen.lineTo(
					Point(
						index,
						ySize * chan + (ySize / 2)
						- (
							(ySize / 2)
							* peaks[index * numChannels + chan]
						)
					);
				);
				Pen.stroke;
			});
		});
	});
};

updateData = { |data|
	peaks = peaks.rotate(data.size.neg);
	data.do({ |value, index|
		peaks[peaks.size - data.size + index] = value;
	});
};

onBufferReady = { |b|
	// Reference buffer
	buffer = b;
	// Wait for the SynthDefs to load
	s.sync;
	switch(numChannels)
	{ 1 } {
		if(externalSource) {
			synth = Synth(\externalScopeMono, [
				\in, inputChannel,
				\out, buffer,
				\chunkSize, chunkSize,
				\numChunks, numChunks
			]);
		} {
			synth = Synth(\internalScopeMono, [
				\in, bus,
				\out, buffer,
				\chunkSize, chunkSize,
				\numChunks, numChunks
			]);
		};
	}
	{ 2 } {
		if(externalSource) {
			synth = Synth(\externalScopeStereo, [
				\in, inputChannel,
				\out, buffer,
				\chunkSize, chunkSize,
				\numChunks, numChunks
			]);
		} {
			synth = Synth(\internalScopeStereo, [
				\in, bus,
				\out, buffer,
				\chunkSize, chunkSize,
				\numChunks, numChunks
			]);
		};
	};
	oscFunc = OSCFunc(
		{ |msg|
			var partition = (msg[3] - 1) % numChunks;
			buffer.getn(
				partition * (chunkSize * numChannels),
				(chunkSize * numChannels),
				updateData
			);
		},
		'/bufferUpdate',
		server.addr
	);
};

initBuffer = {
	Buffer.alloc(
		server,
		chunkSize * numChunks,
		numChannels,
		onBufferReady
	);
};

// Boot server
server.waitForBoot({
	peaks = Array.fill(
		(
			plotMaxLength
			* server.sampleRate
			* numChannels
		).roundUp.asInteger,
		{ 0 }
	);
	rms = Array.fill(
		(
			plotMaxLength
			* server.sampleRate
			* numChannels
		).roundUp.asInteger,
		{ 0 }
	);
	initUI.value;
	initBuffer.value;
});

win.layout_(
	VLayout()
	.margins_(0)
	.spacing_(0)
	.add(view)
);

win.onClose_({
	synth.free;
	buffer.free;
	oscFunc.clear;
	if(externalSource.not) {
		bus.free;
	};
});

CmdPeriod.doOnce({
	win.close;
});

win.front;
)
3 Likes

I wrote a “minimal” program in C++ and Dear ImGui (Jack API for audio). It displays real-time waveforms from its input.

I wrote a lock-free circular buffer (I will “polish it” later) that you can reuse to do more stuff. The annoying parts are there already.

This basic code allows you to add what you want without writing an annoying boilerplate. Later, I can upload the libraries and CMake files as a git repo. Requires c++20, jack, imgui, opengl3 and implot

Dear ImGui is quite impressive with real-time audio. Visualization refreshes at monitor rate (~16.7ms at 60Hz: Near real-time display of audio signal

I hope this helps build/integrate into your stuff!!!

(@Spacechild1 @scztt and others, feel free to do a quick check if I didn’t do something stupid ))))))


#include "imgui.h"
#include "imgui_impl_glfw.h"
#include "imgui_impl_opengl3.h"
#include "implot.h"
#include <GLFW/glfw3.h>
#include <atomic>
#include <cmath>
#include <cstddef>
#include <jack/jack.h>
#include <memory>
#include <span>
#include <stdio.h>
#include <type_traits>
#include <vector>

template <typename T>
class lock_free_circular_buffer {
public:
    explicit lock_free_circular_buffer(std::size_t size)
        : buffer(size), max_size(size), head(0), tail(0) {
        static_assert(std::is_arithmetic<T>::value, "T must be a numeric type");
    }

    void push(T value) {
        std::size_t h = head.load(std::memory_order_acquire);
        std::size_t next = (h + 1) % max_size;
        if (next != tail.load(std::memory_order_acquire)) {
            buffer[h] = value;
            head.store(next, std::memory_order_release);
        } else {
            std::size_t tail = tail_.load(std::memory_order_acquire);
            std::size_t new_tail = (tail + 1) % max_size_;
            tail_.store(new_tail, std::memory_order_release);
            buffer_[head] = value;
            head_.store(next, std::memory_order_release);
        }
    }

    bool pop(T& value) noexcept {
        std::size_t tail = tail_.load(std::memory_order_relaxed);
        if (head_.load(std::memory_order_acquire) == tail) {
            return false; 
        }
        value = buffer_[tail];
        tail_.store((tail + 1) % max_size_, std::memory_order_release);
        return true;
    }

    bool peek(std::size_t index, T& value) const noexcept {
        if (index >= size()) {
            return false;
        }
        value = buffer_[(tail_.load(std::memory_order_acquire) + index) % max_size_];
        return true;
    }

    [[nodiscard]] std::size_t size() const noexcept {
        std::size_t head = head_.load(std::memory_order_relaxed);
        std::size_t tail = tail_.load(std::memory_order_relaxed);
        return (head + max_size_ - tail) % max_size_;
    }

    [[nodiscard]] std::size_t capacity() const noexcept { return max_size_; }

    [[nodiscard]] bool is_contiguous() const noexcept {
        return head_.load(std::memory_order_relaxed) >= tail_.load(std::memory_order_relaxed);
    }

    void push_back(const T* first, const T* last) {
        while (first != last) {
            push(*first++);
        }
    }

    [[nodiscard]] std::span<const T> get_span() const {
        std::size_t current_tail = tail_.load(std::memory_order_acquire);
        return std::span<const T>(buffer_.data() + current_tail, size());
    }

private:
    std::vector<T> buffer_;
    std::size_t max_size_;
    std::atomic<std::size_t> head_;
    std::atomic<std::size_t> tail_;
};

constexpr float HISTORY_DURATION_SEC = 5.0F;

struct audio_data {
    lock_free_circular_buffer<float> buffer_;
    std::atomic<uint64_t> total_samples_{0};
    int sample_rate_;

    audio_data(std::size_t size, int sr) : buffer_(size), sample_rate_(sr) {}
};

std::unique_ptr<audio_data> global_audio_data;

jack_client_t* client_;
jack_port_t* input_port_;

int jack_callback(jack_nframes_t nframes, void* arg) {
    auto* audio = static_cast<audio_data*>(arg);
    const auto* in =
        static_cast<const jack_default_audio_sample_t*>(jack_port_get_buffer(input_port_, nframes));

    const size_t available_space = audio->buffer_.capacity() - audio->buffer_.size();
    const size_t frames_to_copy = std::min(static_cast<size_t>(nframes), available_space);

    if (audio->buffer_.is_contiguous() && frames_to_copy == nframes) {
        audio->buffer_.push_back(in, in + frames_to_copy);
    } else {
        for (size_t i = 0; i < frames_to_copy; ++i) {
            audio->buffer_.push(in[i]);
        }
    }

    audio->total_samples_.fetch_add(frames_to_copy, std::memory_order_relaxed);

    return (frames_to_copy < static_cast<size_t>(nframes)) ? 1 : 0;
}

static std::vector<float> x_vals;
static std::vector<float> y_vals;

void show_audio_waveform() {
    if (!global_audio_data) {
        return;
    }

    uint64_t total_samples = global_audio_data->total_samples_.load(std::memory_order_relaxed);
    float current_time = static_cast<float>(total_samples) / global_audio_data->sample_rate_;

    auto buffer_span = global_audio_data->buffer_.get_span();
    std::size_t buffer_size = buffer_span.size();

    if (ImPlot::BeginPlot("Audio Waveform")) {
        ImPlot::SetupAxisLimits(ImAxis_X1, current_time - HISTORY_DURATION_SEC, current_time,
                                ImGuiCond_Always);
        ImPlot::SetupAxisLimits(ImAxis_Y1, -1.0f, 1.0f);

        if (x_vals.size() < buffer_size) {
            x_vals.resize(buffer_size);
            y_vals.resize(buffer_size);
        }

        std::size_t valid_samples = 0;
        for (std::size_t i = 0; i < buffer_size; ++i) {
            float sample;
            if (global_audio_data->buffer_.peek(i, sample)) {
                float sample_time = current_time - (static_cast<float>(buffer_size - i) /
                                                    global_audio_data->sample_rate_);
                x_vals[valid_samples] = sample_time;
                y_vals[valid_samples] = sample;
                ++valid_samples;
            } else {
                break;
            }
        }

        ImPlot::PlotLine("Waveform", x_vals.data(), y_vals.data(), valid_samples);
        ImPlot::EndPlot();
    }
}

static void glfw_error_callback(int error, const char* description) {
    fprintf(stderr, "GLFW Error %d: %s\n", error, description);
}

int main(int, char**) {
    glfwSetErrorCallback(glfw_error_callback);
    if (!glfwInit()) {
        return 1;
    }

    const char* glsl_version = "#version 150";
    glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
    glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
    glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
    glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
    GLFWwindow* window = glfwCreateWindow(1280, 720, "Audio Waveform", nullptr, nullptr);
    if (window == nullptr) {
        return 1;
    }
    glfwMakeContextCurrent(window);
    glfwSwapInterval(1);

    IMGUI_CHECKVERSION();
    ImGui::CreateContext();
    ImGuiIO& io = ImGui::GetIO();
    io.ConfigFlags |= ImGuiConfigFlags_NavEnableKeyboard;
    io.ConfigFlags |= ImGuiConfigFlags_NavEnableGamepad;
    io.ConfigFlags |= ImGuiConfigFlags_DockingEnable;
    io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable;

    ImGui::StyleColorsDark();
    ImGui_ImplGlfw_InitForOpenGL(window, true);
    ImGui_ImplOpenGL3_Init(glsl_version);
    ImPlot::CreateContext();

    const char* client_name = "audio_waveform";
    jack_options_t options = JackNullOption;
    jack_status_t status;
    client_ = jack_client_open(client_name, options, &status, nullptr);
    if (!client_) {
        fprintf(stderr, "Could not create JACK client\n");
        return 1;
    }

    int sample_rate = jack_get_sample_rate(client_);
    std::size_t buffer_size = sample_rate * HISTORY_DURATION_SEC;

    try {
        global_audio_data = std::make_unique<audio_data>(buffer_size, sample_rate);
    } catch (const std::bad_alloc& e) {
        fprintf(stderr, "Failed to allocate AudioData: %s\n", e.what());
        jack_client_close(client_);
        return 1;
    }

    jack_set_process_callback(client_, jack_callback, global_audio_data.get());
    input_port_ = jack_port_register(client_, "input", JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0);
    if (!input_port_) {
        fprintf(stderr, "Could not register input port\n");
        jack_client_close(client_);
        return 1;
    }

    if (jack_activate(client_)) {
        fprintf(stderr, "Cannot activate client\n");
        jack_client_close(client_);
        return 1;
    }

    const char** ports =
        jack_get_ports(client_, nullptr, nullptr, JackPortIsPhysical | JackPortIsOutput);
    if (!ports) {
        fprintf(stderr, "No available physical ports\n");
        jack_client_close(client_);
        return 1;
    }

    if (jack_connect(client_, ports[0], jack_port_name(input_port_))) {
        fprintf(stderr, "Cannot connect input port\n");
    }

    jack_free(ports);

    while (!glfwWindowShouldClose(window)) {
        glfwPollEvents();

        ImGui_ImplOpenGL3_NewFrame();
        ImGui_ImplGlfw_NewFrame();
        ImGui::NewFrame();

        ImGui::Begin("Audio Waveform Visualizer");
        show_audio_waveform();
        ImGui::End();

        ImGui::Render();
        int display_w, display_h;
        glfwGetFramebufferSize(window, &display_w, &display_h);
        glViewport(0, 0, display_w, display_h);
        glClearColor(0.1f, 0.1f, 0.1f, 1.00f);
        glClear(GL_COLOR_BUFFER_BIT);
        ImGui_ImplOpenGL3_RenderDrawData(ImGui::GetDrawData());

        if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable) {
            GLFWwindow* backup_current_context = glfwGetCurrentContext();
            ImGui::UpdatePlatformWindows();
            ImGui::RenderPlatformWindowsDefault();
            glfwMakeContextCurrent(backup_current_context);
        }

        glfwSwapBuffers(window);
    }

    jack_client_close(client_);

    ImGui_ImplOpenGL3_Shutdown();
    ImGui_ImplGlfw_Shutdown();
    ImPlot::DestroyContext();
    ImGui::DestroyContext();

    glfwDestroyWindow(window);
    glfwTerminate();

    return 0;
}

2 Likes

In case it’s useful, there’s a class ScaledUserViewContainer in the SC quark wslib which gives you zoom controls on a UserView.

Nice, I’ll have a look!

I was referring to the way samples are averaged when there’s more than one per pixel. What should be drawn ?

I used to think Audacity would display ‘RMS’ value on top of samples peaks. But there’s two definitions. In DAWs, I think RMS meter displays last 300 ms by default (at least, REAPER does). This is like ‘audio perception RMS’. But in Audacity, the RMS is simply the RMS of samples per pixels, it doesn’t display the 300 ms RMS, it can be shorter or longer. This is ‘standard RMS’.

I think displaying the first one, regardless of samples per pixels, makes more sense ? Using visual hints to figure out what can be improve in terms of sound design is quite new to me.

1 Like

See also SoundFileView which has zoom and scroll and displays rms:
https://docs.supercollider.online/Classes/SoundFileView.html

I’m not sure how quickly that will update - haven’t tested.

1 Like

Maybe you have already seen this? Efficient rendering of waveforms - General Audio Programming - Audacity Forum

At least a pointer in the direction of where to look in the audacity source code…

1 Like

Thanks for the help ! Now I need to improve my C++ abilities (which shouldn’t be too complicated because I ain’t got none).

A 2002 paper by Audacity’s creators : A Fast Data Structure for Disk-Based Audio Editing. It doesn’t really goes into details, so source code study is needed to fully understand how everything works. The idea is that storing audio into Sequence data structure speeds up operations and caching.

Those blocks can store between k and 2k samples, so when some audio is deleted, for example, any block leftover can be directly concatenated to contiguous blocks, if possible.

I don’t think block size variability is interesting in term of scoping (we’re not deleting/adding audio), but caching blocks sum, min and max, definitely helps later on with dynamically displaying the data.

1 Like

I think the blocks Christopher is talking about are visual blocks, i.e. display resolution, which I think is what you are also after? So like, if you have more than one sample per pixel, find the min and max and draw a vertical line from the min to the max.

1 Like

here’s my attempt at this algorithm:

~buf = Buffer.alloc(s, 1024*8, 1);

( // synth
{
  var buf = ~buf;
  var in = SoundIn.ar(0);
  RecordBuf.ar(in, buf);
}.play;
)

( // gui
~win = Window("Scope").front.alwaysOnTop_(true);
~view = UserView(~win, ~win.bounds.copy.origin_(0@0)).resize_(5);
)

( // drawing each sample, scaled to fit
~view.drawFunc_({ |view|
  var width = view.bounds.width;
  var height = view.bounds.height;
  var size = ~arr.size;
  ~arr.do { |n, i|
    var x = i / size * width;
    var y = n.linlin(-1, 1, height, 0);
    var yzero = 0.5 * height;
    Pen.addRect(Rect(x, min(y, yzero), size.reciprocal * width, abs(yzero - y)));
  };
  Pen.color = Color.gray(0.5);
  Pen.fill;
})
)

( // scope for a little while
fork {
  100.do {
    ((1024*8) / 48000).wait;
    ~buf.loadToFloatArray(action: { |arr| ~arr = arr; defer { ~view.refresh } });
  };
};
)

( // now try again but reducing sample points to min/max per pixel
~view.drawFunc_({ |view|
  var width = view.bounds.width;
  var height = view.bounds.height;
  
  var thisArr = ~arr;
  var size = thisArr.size;
  if (size > width) {
    var newArr = [0, 0].dup(width);
    thisArr.do { |n, i|
      var newI = (i / size * width).floor;
      var min, max;
      #min, max = newArr[newI];
      min = min(min, n);
      max = max(max, n);
      newArr[newI] = [min, max];
    };
    newArr.do { |arr, i|
      var ymin, ymax;
      var min, max;
      #min, max = arr;
      ymin = min.linlin(-1, 1, height, 0);
      ymax = max.linlin(-1, 1, height, 0);
      Pen.addRect(Rect(i, ymin, 1, ymax - ymin));
      
    }
  } {
    thisArr.do { |n, i|
      var x = i / size * width;
      var y = n.linlin(-1, 1, height, 0);
      var yzero = 0.5 * height;
      Pen.addRect(Rect(x, min(y, yzero), size.reciprocal * width, abs(yzero - y)));
    };
  };
  Pen.color = Color.gray(0.5);
  Pen.fill;
})
)

I’m sure it’s not the most efficient but it still improves things. When there are a lot of vertical zig-zags (loud noises), the GUI still gets very slow. I would next try to create an Image from each buffer frame, which might weirdly be faster…

2 Likes

You raised an important aspect about it, thanks. The code I shared might miss peaks when there are many samples per pixel, as it plots them sequentially.

Maybe an enhancement could use different colors or different modes. Or better yet, multiple time windows for RMS (short, medium, long) because they’re all useful in various contexts.

Another mode could be to check peak samples first, etc.
One mode can improve performance (?) by processing samples based on display resolution instead of raw sample rate, while another can sacrifice latency for perceptual information (using 300ms+ for RMS calculation). The trade-off of the Audacity reference is memory usage and rendering efficiency (not what I initially thought).

Right, I got back at it! Thanks for your responses, I read them but didn’t answer because I was busy doing other stuff. I’m sorry for not always taking the time to answer, but it doesn’t mean I’m not thankful for the help I receive. In fact, I’m always happy and thankful for your help.

In the long term, I’d like to enhance SC’s visual display abilities, but since I ain’t very smart, I figured out I should start with a simpler task.

So here’s a prototype which displays a sound file peaks (no real time is simpler, no rms is simpler). As mentioned previously, it draws a line between min and max peak.

The idea was to avoid iterating through each sample every time the view has to be updated, so when the file is loaded, local minima and maxima are cached according to a certain chunkSize (i.e. 100 samples). When view is redrawn, it looks at chunks instead of samples.

I don’t fully get the picture, but this obviously takes less times to do so than looking at each sample (except for extreme cases). I tried to benchmark a simpler version of this algorithm (and will try to do it with this exact algorithm), and basically, chunk size, file length and view width are intricate in such a way that makes it difficult to find the right chunk size:

  • X axis: chunk size (1 sample → 1000 samples)
  • Y axis: ratio of number of operations it takes to get data compared to fully traversing samples (1:1 ratio on the right)

Changing file length dilates the graph, changing window size changes number of ‘teeth’. Result always looks better (on average) on the left side. But the ‘teeth pattern’ might be a result of my simplification of the problem.

Empirically, small windows resize well, full screen windows take too much time to redraw. I have the intuition that providing multiple chunk sizes on top of each other would improve the algorithm. Also, this ‘teeth pattern’, if it is really a thing, could be improved by ‘interleaving several teeth patterns and choosing the right one’.

// Use this to generate a test array:
// copy-paste a sound file in the same folder
// as this file, indicate its name,
// then evaluate:
(
var fileName = "myFile.wav";
var soundFile = SoundFile.openRead(
	thisProcess.nowExecutingPath.dirname +/+ fileName
);
~testArray = FloatArray.newClear(
	soundFile.numFrames * soundFile.numChannels
);
soundFile.readData(~testArray);
soundFile.close;
)

(
var peakABoo = (
	hasData: false,
	channels: nil,
	datas: nil,
	loadArray: { |self, array, numChannels = 1|
		self[\channels] = array.unlace(numChannels);
		self.generateDatas;
	},
	chunkSize: 20,
	setChunkSize: { |self, size|
		self[\chunkSize] = size;
		if(self[\hasData]) {
			self.generateDatas;
		};
	},
	generateDatas: { |self|
		var nChunks, channelData, min, max, subArray;
		if(self[\channels].notNil) {
			self[\datas] = Array.newClear(self[\channels].size);
			nChunks = (self[\channels][0].size / self[\chunkSize]).roundUp.asInteger;
			self[\channels].do({ |channel, channelIndex|
				channelData = Array.newClear(nChunks);
				nChunks.do({ |chunkIndex|
					min = inf;
					max = inf.neg;
					subArray = channel.copyRange(
						chunkIndex * self[\chunkSize],
						min(
							(chunkIndex * self[\chunkSize]) + (self[\chunkSize] - 1),
							channel.size
						)
					);
					subArray.do({ |data|
						if(min > data) {
							min = data;
						};
						if(max < data) {
							max = data;
						};
					});
					channelData[chunkIndex] = [min, max];
				});
				self[\datas][channelIndex] = channelData.deepCopy;
			});
		};
		self[\hasData] = true;
	},
	getDatas: { |self, startFrame, endFrame|
		var startChunk = 0, overlappedChunks;
		var prevFrames, postFrames;
		var datas, min, max, subArray;
		if(self[\hasData]) {
			datas = Array.newClear(self[\channels].size);
			startFrame = max(0, startFrame);
			endFrame = min(self[\channels][0].size - 1, endFrame);
			// First, check fully overlapped chunks
			while { (startChunk * self[\chunkSize]) < startFrame } {
				startChunk = startChunk + 1;
			};
			prevFrames = startFrame - (startChunk * self[\chunkSize]);
			overlappedChunks = (endFrame - startFrame) + prevFrames;
			postFrames = overlappedChunks % self[\chunkSize];
			overlappedChunks = overlappedChunks - postFrames;
			overlappedChunks = overlappedChunks / self[\chunkSize];
			overlappedChunks = overlappedChunks.asInteger;
			// Special case here: no overlapped chunk
			if(overlappedChunks == 0) {
				self[\channels].do({ |channel, channelIndex|
					min = inf;
					max = inf.neg;
					subArray = channel.copyRange(startFrame, endFrame);
					subArray.do({ |data|
						if(min > data) {
							min = data;
						};
						if(max < data) {
							max = data;
						};
					});
					datas[channelIndex] = [min, max];
				});
			} {
				// Else, at least one chunk is overlapped
				// We need to process channels separately from here
				self[\channels].do({ |channel, channelIndex|
					min = inf;
					max = inf.neg;
					// Find min / max in overlapped chunks
					subArray = self[\datas][channelIndex].copyRange(
						startChunk,
						startChunk + (overlappedChunks - 1)
					);
					subArray.do({ |data|
						if(min > data[0]) {
							min = data[0];
						};
						if(max < data[1]) {
							max = data[1];
						};
					});
					// But min / max might be located outside
					// of overlapped chunks
					if(prevFrames < 0) {
						// Check if min or max COULD be situated here
						if(
							(self[\datas][channelIndex][startChunk - 1][0] < min)
							or:
							{ self[\datas][channelIndex][startChunk - 1][1] > max }
						) {
							// If it might, just check every relevant value
							subArray = channel.copyRange(
								(startChunk * self[\chunkSize]) + prevFrames,
								(startChunk * self[\chunkSize]) - 1
							);
							// /!\ I don't think optimizing here is relevant,
							// so just check both min and max
							subArray.do({ |data|
								if(min > data) {
									min = data;
								};
								if(max < data) {
									max = data;
								};
							});
						};
					};
					// Now the same with the other side
					if(postFrames > 0) {
						if(
							(self[\datas][channelIndex][startChunk + overlappedChunks][0] < min)
							or:
							{ self[\datas][channelIndex][startChunk + overlappedChunks][1] > max }
						) {
							subArray = channel.copyRange(
								((startChunk + overlappedChunks) * self[\chunkSize]) + 1,
								((startChunk + overlappedChunks) * self[\chunkSize]) + postFrames
							);
							subArray.do({ |data|
								if(min > data) {
									min = data;
								};
								if(max < data) {
									max = data;
								};
							});
						};
					};
					datas[channelIndex] = [min, max];
				});
			};
			datas
		};
	},
	getDatasForRange: { |self, range|
		var datas, samplesPerPixel, pixelRemainder;
		var addSampleList, startSample = 0, currentSamplesPerPixel;
		var currentData;
		if(self[\hasData]) {
			datas = Array.fill(
				self[\channels].size, {
					Array.newClear(range);
				};
			);
			samplesPerPixel = (self[\channels][0].size / range).trunc(1).asInteger;
			pixelRemainder = self[\channels][0].size % samplesPerPixel;
			addSampleList = Array.newClear(pixelRemainder);
			pixelRemainder = (range / pixelRemainder);
			addSampleList.do({ |item, index|
				addSampleList[index] = (pixelRemainder * index).trunc(1).asInteger;
			});
			range.do({ |index|
				currentSamplesPerPixel = samplesPerPixel;
				if(addSampleList.includesEqual(index)) {
					currentSamplesPerPixel = currentSamplesPerPixel + 1;
				};
				currentData = self.getDatas(
					startSample,
					startSample + (currentSamplesPerPixel - 1)
				);
				self[\channels].size.do({ |nChannel|
					datas[nChannel][index] = currentData[nChannel];
				});
				startSample = startSample + currentSamplesPerPixel;
			});
		};
		datas
	},
);
peakABoo.setChunkSize(100);
peakABoo.loadArray(~testArray, 2);

UserView()
.background_(Color.black)
.drawFunc_({ |view|
	var datas = peakABoo.getDatasForRange(view.bounds.width);
	var chanHeight = view.bounds.height / datas.size;
	Pen.smoothing = false;
	Pen.width_(1);
	Pen.strokeColor_(Color.cyan);
	datas.do({ |channel, channelIndex|
		channel.do({ |minMax, x|
			Pen.line(
				Point(
					x,
					(chanHeight * channelIndex) +
					(chanHeight * (1 - ((minMax[0] / 2 + 0.5))))
				),
				Point(
					x,
					(chanHeight * channelIndex) +
					(chanHeight * (1 - ((minMax[1] / 2 + 0.5))))
				);
			);
		});
	});
	Pen.stroke;
})
.front;
)