diff --git a/scripts/audio-capture.sh b/scripts/audio-capture.sh index b4dfec3..e1ab0aa 100755 --- a/scripts/audio-capture.sh +++ b/scripts/audio-capture.sh @@ -3,6 +3,6 @@ set -eux sample_rate="44100" -channels="2" +channels="1" -arecord -t raw -c ${channels} -f S16_LE -r ${sample_rate} \ No newline at end of file +arecord -t raw -c ${channels} -f S16_LE -r ${sample_rate} diff --git a/scripts/audio-pipe.sh b/scripts/audio-pipe.sh index f8ae9e4..dd1b504 100755 --- a/scripts/audio-pipe.sh +++ b/scripts/audio-pipe.sh @@ -4,6 +4,6 @@ set -eux audio_file=$1 sample_rate="44100" -channels="2" +channels="1" -ffmpeg -i ${audio_file} -f s16le -ac ${channels} -ar ${sample_rate} - \ No newline at end of file +ffmpeg -hide_banner -loglevel error -i ${audio_file} -f s16le -ac ${channels} -ar ${sample_rate} - diff --git a/scripts/live.py b/scripts/live.py index b541363..8519af3 100755 --- a/scripts/live.py +++ b/scripts/live.py @@ -3,19 +3,14 @@ import sys import threading import numpy as np -import signal +import signal as sig +from tones import TONES +from filters import anti_alias, bandpass_filter, note +from scipy import signal +from scipy.signal import butter, lfilter, decimate + import pyqtgraph as pg - -data1 = np.random.normal(size=300) -ptr1 = 0 - -win = pg.GraphicsLayoutWidget(show=True) -win.setWindowTitle('pyqtgraph example: Scrolling Plots') - -plot = win.addPlot() -curve = plot.plot(data1) - - +from pyqtgraph.Qt import QtGui, QtWidgets, mkQApp keep_running = True def signal_handler(sig, frame): @@ -23,6 +18,62 @@ def signal_handler(sig, frame): print('SIGINT received. Stopping...') keep_running = False + +class DetectorGui(QtWidgets.QMainWindow): + def __init__(self, *args, **kwargs): + super(DetectorGui, self).__init__(*args, **kwargs) + layout = pg.GraphicsLayoutWidget(show=True) + self.setCentralWidget(layout) + self.setWindowTitle('SELCAL Detector') + self.resize(1280, 800) + + self.plot = layout.addPlot() + legend_view = layout.addViewBox() + + legend = pg.LegendItem(offset=(0, 0)) + legend.setParentItem(legend_view) + + color_map = pg.colormap.get('CET-C6s') + colors = color_map.getLookupTable(nPts=len(TONES)) + + t = np.linspace(0, 500, 100) + + self.tone_data = {} + self.tone_lines = {} + + for tone,color in zip(TONES.keys(), colors): + self.tone_data[tone] = np.zeros(int(10000), dtype=np.float64) #np.array([], dtype=np.float64) + self.tone_lines[tone] = self.plot.plot(self.tone_data[tone], pen=pg.mkPen(color=color), name=tone) + legend.addItem(self.tone_lines[tone], tone) + + self.plot.setLabel('left', 'Signal Correlation') + self.plot.setLabel('bottom', 'Time (samples)') + self.plot.showGrid(x=True, y=True) + + legend_view.setFixedWidth(80) + layout.ci.layout.setColumnFixedWidth(1, 80) + + self.show() + + def set_position(self, pos): + pass + + def push_tone(self, tone, value): + self.tone_data[tone] = np.roll(self.tone_data[tone], -1) + self.tone_data[tone][-1] = value + self.tone_lines[tone].setData(self.tone_data[tone]) + + def push_tones(self, tone, values): + #self.tone_data[tone] = np.append(self.tone_data[tone], values) + + self.tone_data[tone] = np.roll(self.tone_data[tone], -len(values)) + self.tone_data[tone][-len(values):] = values + self.tone_lines[tone].setData(self.tone_data[tone]) + + +mkQApp("Correlation matrix display") +gui = DetectorGui() + def read_audio_from_stdin(chunk_size, process_chunk): global keep_running @@ -39,55 +90,43 @@ def read_audio_from_stdin(chunk_size, process_chunk): audio_chunk = np.frombuffer(data, dtype=np.int16) process_chunk(audio_chunk) +sample_rate = 44100 +note_length = 0.1 +N = 256 +cumsum_convolution = np.ones(N)/N + + def process_audio_chunk(audio_chunk): - # Example processing: simply print the chunk - global data1, ptr1, curve - print(f"Read chunk: {len(audio_chunk)}") + global gui - data1[:-1] = data1[1:] # shift data in the array one sample left - # (see also: np.roll) - data1[-1] = len(audio_chunk) + data = audio_chunk + sample_rate = 44100 - ptr1 += 1 - curve.setData(data1) - curve.setPos(ptr1, 0) + data, sample_rate, decimation = anti_alias(data, sample_rate, 4800) + pure_signals = {tone:note(freq, note_length, rate=sample_rate) for tone,freq in TONES.items()} + correlations = {tone:np.abs(signal.correlate(data, pure, mode='same')) for tone,pure in pure_signals.items()} + massaged = {tone:decimate(np.convolve(correlation, cumsum_convolution, mode='valid'), 4) for tone,correlation in correlations.items()} + + print('processing done') + + for tone,massage in massaged.items(): + gui.push_tones(tone, massage ) if __name__ == '__main__': - signal.signal(signal.SIGINT, signal_handler) + sig.signal(sig.SIGINT, signal_handler) chunk_duration = 0.1 # seconds sample_rate = 44100 - channels = 2 + channels = 1 chunk_size = int(sample_rate * chunk_duration) * channels reader_thread = threading.Thread(target=read_audio_from_stdin, args=(chunk_size, process_audio_chunk)) reader_thread.daemon = True reader_thread.start() + pg.exec() # Wait... reader_thread.join() - - -''' -# 1) Simplest approach -- update data in the array such that plot appears to scroll -# In these examples, the array size is fixed. -p1 = win.addPlot() -p2 = win.addPlot() -data1 = np.random.normal(size=300) -curve1 = p1.plot(data1) -curve2 = p2.plot(data1) -ptr1 = 0 -def update1(): - global data1, ptr1 - data1[:-1] = data1[1:] # shift data in the array one sample left - # (see also: np.roll) - data1[-1] = np.random.normal() - curve1.setData(data1) - - ptr1 += 1 - curve2.setData(data1) - curve2.setPos(ptr1, 0) -''' \ No newline at end of file