feat: add audio demodulation and FFT processing, enhance audio input handling and visualization

This commit is contained in:
Nedifinita
2025-12-12 00:47:09 +08:00
parent 8507029499
commit ec9d9a389e
9 changed files with 566 additions and 16 deletions

View File

@@ -6,6 +6,10 @@ project("railwaypagerdemod")
add_library(${CMAKE_PROJECT_NAME} SHARED
demod.cpp
demod.h
audio_demod.cpp
audio_demod.h
audio_fft.cpp
audio_fft.h
native-lib.cpp
${CMAKE_CURRENT_SOURCE_DIR}/dsp/firfilter.cpp
)

View File

@@ -0,0 +1,106 @@
#include "audio_demod.h"
#include "demod.h"
#include <android/log.h>
#include <cmath>
#include <cstring>
#define LOG_TAG "AudioDemod"
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__)
#define FINE_CLKT_HI 1.90
#define FINE_CLKT_LO 0.32
#define SAMPLE_RATE 48000
#define BAUD_RATE 1200
static double watch_ctr = 0.0;
static double atb_ctr = 0.0;
static double clkt = (double)SAMPLE_RATE / BAUD_RATE;
static int last_value = 0;
static int preamble_count = 0;
static int pocbit = 0;
static bool preamble_detected = false;
static int crossing_count = 0;
static int nSamples = 0;
static const int AUDIO_THRESHOLD = 128;
void resetAudioDemod() {
watch_ctr = 0.0;
atb_ctr = 0.0;
clkt = (double)SAMPLE_RATE / BAUD_RATE;
last_value = 0;
preamble_count = 0;
pocbit = 0;
preamble_detected = false;
crossing_count = 0;
nSamples = 0;
LOGD("Audio demodulator reset");
}
void processAudioSamples(int16_t *samples, int size) {
for (int i = 0; i < size; i++) {
int audio_value = (samples[i] + 32768) / 256;
if (audio_value < 0) audio_value = 0;
if (audio_value > 255) audio_value = 255;
int current_bit = (audio_value > AUDIO_THRESHOLD) ? 1 : 0;
if (current_bit != last_value) {
crossing_count++;
if ((nSamples > 28) && (nSamples < 44)) {
preamble_count++;
if (preamble_count > 50 && !preamble_detected) {
preamble_detected = true;
pocbit = 0;
LOGD("Preamble detected! crossings=%d samples=%d", preamble_count, nSamples);
}
}
nSamples = 0;
}
nSamples++;
last_value = current_bit;
watch_ctr += 1.0;
if (watch_ctr - atb_ctr < 1.0) {
int bit = current_bit;
if (preamble_detected) {
processBasebandSample(bit);
pocbit++;
if (pocbit > 1250) {
LOGD("POCSAG timeout - no sync after 1250 bits");
preamble_detected = false;
preamble_count = 0;
pocbit = 0;
}
}
if (crossing_count > 0) {
double offset = watch_ctr - atb_ctr;
if (offset > FINE_CLKT_HI) {
clkt -= 0.01;
if (clkt < (SAMPLE_RATE / BAUD_RATE) * 0.95) {
clkt = (SAMPLE_RATE / BAUD_RATE) * 0.95;
}
} else if (offset < FINE_CLKT_LO) {
clkt += 0.01;
if (clkt > (SAMPLE_RATE / BAUD_RATE) * 1.05) {
clkt = (SAMPLE_RATE / BAUD_RATE) * 1.05;
}
}
crossing_count = 0;
}
atb_ctr += clkt;
}
}
}

View File

@@ -0,0 +1,10 @@
#ifndef AUDIO_DEMOD_H
#define AUDIO_DEMOD_H
#include <cstdint>
void processAudioSamples(int16_t *samples, int size);
void resetAudioDemod();
#endif

View File

@@ -0,0 +1,117 @@
#include "audio_fft.h"
#include <cmath>
#include <algorithm>
#include <android/log.h>
#define LOG_TAG "AudioFFT"
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__)
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
AudioFFT::AudioFFT(int fftSize)
: fftSize_(fftSize)
, inputBuffer_(fftSize, 0.0f)
, windowBuffer_(fftSize, 0.0f)
, realPart_(fftSize, 0.0f)
, imagPart_(fftSize, 0.0f)
, magnitude_(fftSize, 0.0f)
, bufferPos_(0) {
for (int i = 0; i < fftSize_; i++) {
windowBuffer_[i] = 0.54f - 0.46f * std::cos(2.0f * M_PI * i / (fftSize_ - 1));
}
LOGD("AudioFFT initialized with size %d", fftSize_);
}
AudioFFT::~AudioFFT() {
}
void AudioFFT::processSamples(const int16_t* samples, int size) {
for (int i = 0; i < size; i++) {
inputBuffer_[bufferPos_] = samples[i] / 32768.0f;
bufferPos_++;
if (bufferPos_ >= fftSize_) {
computeFFT();
bufferPos_ = 0;
}
}
}
void AudioFFT::applyWindow() {
for (int i = 0; i < fftSize_; i++) {
realPart_[i] = inputBuffer_[i] * windowBuffer_[i];
imagPart_[i] = 0.0f;
}
}
void AudioFFT::computeFFT() {
applyWindow();
int n = fftSize_;
int j = 0;
for (int i = 0; i < n - 1; i++) {
if (i < j) {
std::swap(realPart_[i], realPart_[j]);
std::swap(imagPart_[i], imagPart_[j]);
}
int k = n / 2;
while (k <= j) {
j -= k;
k /= 2;
}
j += k;
}
for (int len = 2; len <= n; len *= 2) {
float angle = -2.0f * M_PI / len;
float wlenReal = std::cos(angle);
float wlenImag = std::sin(angle);
for (int i = 0; i < n; i += len) {
float wReal = 1.0f;
float wImag = 0.0f;
for (int k = 0; k < len / 2; k++) {
int idx1 = i + k;
int idx2 = i + k + len / 2;
float tReal = wReal * realPart_[idx2] - wImag * imagPart_[idx2];
float tImag = wReal * imagPart_[idx2] + wImag * realPart_[idx2];
realPart_[idx2] = realPart_[idx1] - tReal;
imagPart_[idx2] = imagPart_[idx1] - tImag;
realPart_[idx1] += tReal;
imagPart_[idx1] += tImag;
float wTempReal = wReal * wlenReal - wImag * wlenImag;
wImag = wReal * wlenImag + wImag * wlenReal;
wReal = wTempReal;
}
}
}
for (int i = 0; i < fftSize_; i++) {
float real = realPart_[i];
float imag = imagPart_[i];
magnitude_[i] = std::sqrt(real * real + imag * imag);
}
}
void AudioFFT::getSpectrum(float* output, int outputSize) {
int copySize = std::min(outputSize, fftSize_ / 2);
for (int i = 0; i < copySize; i++) {
float mag = magnitude_[i];
if (mag < 1e-10f) mag = 1e-10f;
float db = 20.0f * std::log10(mag);
float normalized = (db + 80.0f) / 80.0f;
output[i] = std::max(0.0f, std::min(1.0f, normalized));
}
}

View File

@@ -0,0 +1,29 @@
#ifndef AUDIO_FFT_H
#define AUDIO_FFT_H
#include <cstdint>
#include <vector>
class AudioFFT {
public:
AudioFFT(int fftSize = 256);
~AudioFFT();
void processSamples(const int16_t* samples, int size);
void getSpectrum(float* output, int outputSize);
int getFFTSize() const { return fftSize_; }
private:
void computeFFT();
void applyWindow();
int fftSize_;
std::vector<float> inputBuffer_;
std::vector<float> windowBuffer_;
std::vector<float> realPart_;
std::vector<float> imagPart_;
std::vector<float> magnitude_;
int bufferPos_;
};
#endif

View File

@@ -15,6 +15,8 @@
#include <android/log.h>
#include <errno.h>
#include "demod.h"
#include "audio_demod.h"
#include "audio_fft.h"
#define BUF_SIZE 8192
@@ -26,6 +28,8 @@ static std::mutex msgMutex;
static std::vector<std::string> messageBuffer;
static std::mutex demodDataMutex;
static std::mutex fftMutex;
static AudioFFT* audioFFT = nullptr;
static JavaVM *g_vm = nullptr;
static jobject g_obj = nullptr;
@@ -95,20 +99,36 @@ Java_org_noxylva_lbjconsole_flutter_AudioInputHandler_nativePushAudio(
jshort *samples = env->GetShortArrayElements(audioData, NULL);
{
std::lock_guard<std::mutex> fftLock(fftMutex);
if (!audioFFT) {
audioFFT = new AudioFFT(4096);
}
audioFFT->processSamples(samples, size);
}
std::lock_guard<std::mutex> demodLock(demodDataMutex);
for (int i = 0; i < size; i++) {
double sample = (double)samples[i] / 32768.0;
processBasebandSample(sample);
}
env->ReleaseShortArrayElements(audioData, samples, 0);
processAudioSamples(samples, size);
if (is_message_ready) {
std::ostringstream ss;
std::lock_guard<std::mutex> msgLock(msgMutex);
std::string message_content = alpha_msg.empty() ? numeric_msg : alpha_msg;
std::string message_content;
if (function_bits == 3) {
message_content = alpha_msg;
} else {
message_content = numeric_msg;
}
if (message_content.empty()) {
message_content = alpha_msg.empty() ? numeric_msg : alpha_msg;
}
__android_log_print(ANDROID_LOG_DEBUG, "AUDIO",
"msg_ready: addr=%u func=%d alpha_len=%zu numeric_len=%zu",
address, function_bits, alpha_msg.length(), numeric_msg.length());
ss << "[MSG]" << address << "|" << function_bits << "|" << message_content;
messageBuffer.push_back(ss.str());
@@ -116,6 +136,8 @@ Java_org_noxylva_lbjconsole_flutter_AudioInputHandler_nativePushAudio(
numeric_msg.clear();
alpha_msg.clear();
}
env->ReleaseShortArrayElements(audioData, samples, 0);
}
extern "C" JNIEXPORT jdouble JNICALL
@@ -135,6 +157,32 @@ Java_org_noxylva_lbjconsole_flutter_AudioInputHandler_clearMessageBuffer(JNIEnv
alpha_msg.clear();
}
extern "C" JNIEXPORT jfloatArray JNICALL
Java_org_noxylva_lbjconsole_flutter_AudioInputHandler_getAudioSpectrum(JNIEnv *env, jobject)
{
std::lock_guard<std::mutex> fftLock(fftMutex);
if (!audioFFT) {
return env->NewFloatArray(0);
}
int spectrumSize = audioFFT->getFFTSize() / 2;
std::vector<float> spectrum(spectrumSize);
audioFFT->getSpectrum(spectrum.data(), spectrumSize);
const int outputBins = 500;
std::vector<float> downsampled(outputBins);
for (int i = 0; i < outputBins; i++) {
int srcIdx = (i * spectrumSize) / outputBins;
downsampled[i] = spectrum[srcIdx];
}
jfloatArray result = env->NewFloatArray(outputBins);
env->SetFloatArrayRegion(result, 0, outputBins, downsampled.data());
return result;
}
extern "C" JNIEXPORT jbyteArray JNICALL
Java_org_noxylva_lbjconsole_flutter_AudioInputHandler_pollMessages(JNIEnv *env, jobject)
{

View File

@@ -53,6 +53,7 @@ class AudioInputHandler(private val context: Context) : MethodChannel.MethodCall
private external fun nativePushAudio(data: ShortArray, size: Int)
private external fun pollMessages(): ByteArray
private external fun clearMessageBuffer()
private external fun getAudioSpectrum(): FloatArray
override fun onMethodCall(call: MethodCall, result: MethodChannel.Result) {
when (call.method) {
@@ -69,6 +70,14 @@ class AudioInputHandler(private val context: Context) : MethodChannel.MethodCall
clearMessageBuffer()
result.success(null)
}
"getSpectrum" -> {
try {
val spectrum = getAudioSpectrum()
result.success(spectrum.toList())
} catch (e: Exception) {
result.error("FFT_ERROR", "Failed to get spectrum", e.message)
}
}
else -> result.notImplemented()
}
}