Sine generator prevent overflow

This commit is contained in:
Phil Schatzmann 2022-03-02 10:48:09 +01:00
parent d067573bdc
commit 5ea9c7b0b9
5 changed files with 1656 additions and 34 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,61 @@
/**
* @file streams-audiokit-tf.ino
* @author Phil Schatzmann
* @brief We use a Nano BLE Sense which has a microphone as input to feed a Tensorflow Light Model which recognises the words yes and no.
* @version 0.1
* @date 2022-03-01
*
* @copyright Copyright (c) 2022
*
*/
#include "AudioTools.h"
#include "AudioLibs/AudioMP34DT05.h"
#include "AudioLibs/TfLiteAudioOutput.h"
#include "model.h" // tensorflow model
AudioMP34DT05 mic; // Access I2S as stream
TfLiteAudioFeatureProvider fp;
TfLiteAudioOutput<4> tfl; // Audio sink
const char* kCategoryLabels[4] = {
"silence",
"unknown",
"yes",
"no",
};
StreamCopy copier(tfl, mic); // copy mic to tfl
int channels = 1;
int samples_per_second = 16000;
void respondToCommand(const char* found_command, uint8_t score,
bool is_new_command) {
if (is_new_command) {
char buffer[80];
sprintf(buffer, "Result: %s, score: %d, is_new: %s", found_command, score,
is_new_command ? "true" : "false");
Serial.println(buffer);
}
}
void setup() {
Serial.begin(115200);
while(!Serial); // wait for serial to be ready
AudioLogger::instance().begin(Serial, AudioLogger::Info);
Serial.println("starting...");
// Setup tensorflow
fp.kAudioChannels = channels;
fp.kAudioSampleFrequency = samples_per_second;
fp.respondToCommand = respondToCommand;
tfl.begin(g_model, fp, kCategoryLabels, 10 * 1024);
// setup Audiomic
auto cfg = mic.defaultConfig(RX_MODE);
cfg.channels = channels;
cfg.sample_rate = samples_per_second;
mic.begin(cfg);
Serial.println("started!");
}
void loop() { copier.copy(); }

View File

@ -207,6 +207,7 @@ class SineWaveGenerator : public SoundGenerator<T>{
float angle = double_Pi * m_frequency * m_time + m_phase;
T result = m_amplitude * sin(angle);
m_time += m_deltaTime;
if (m_time > divisor) m_time -= divisor;
return result;
}
@ -217,6 +218,8 @@ class SineWaveGenerator : public SoundGenerator<T>{
float m_deltaTime = 0.0;
float m_phase = 0.0;
float double_Pi = PI * 2.0;
float divisor = 1000000;
void logStatus() {
SoundGenerator<T>::info.logStatus();

View File

@ -374,10 +374,10 @@ class TfLiteAudioFeatureProvider {
// Variables for the model's output categories.
int kSilenceIndex = 0;
int kUnknownIndex = 1;
// Callback method for result
void(*respondToCommand)(const char* found_command, uint8_t score,
bool is_new_command) = nullptr;
void (*respondToCommand)(const char* found_command, uint8_t score,
bool is_new_command) = nullptr;
private:
// int feature_size_;
@ -561,6 +561,7 @@ class TfLiteAudioOutput : public AudioPrint {
}
// Allocate memory from the tensor_arena for the model's tensors.
LOGI("AllocateTensors");
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
LOGE("AllocateTensors() failed");
@ -568,6 +569,7 @@ class TfLiteAudioOutput : public AudioPrint {
}
// Get information about the memory area to use for the model's input.
LOGI("Get Input");
model_input = interpreter->input(0);
if ((model_input->dims->size != 2) || (model_input->dims->data[0] != 1) ||
(model_input->dims->data[1] != (feature_provider->kFeatureSliceCount *
@ -576,8 +578,10 @@ class TfLiteAudioOutput : public AudioPrint {
LOGE("Bad input tensor parameters in model");
return false;
}
LOGI("Get Buffer");
model_input_buffer = model_input->data.int8;
if (model_input_buffer==nullptr){
if (model_input_buffer == nullptr) {
LOGE("model_input_buffer is null");
return false;
}
@ -588,6 +592,7 @@ class TfLiteAudioOutput : public AudioPrint {
// all good if we made it here
is_setup = true;
LOGI("done");
return true;
}
@ -653,37 +658,26 @@ class TfLiteAudioOutput : public AudioPrint {
//
bool setupInterpreter() {
LOGD(LOG_METHOD);
tflite::AllOpsResolver resolver;
// // NOLINTNEXTLINE(runtime-global-variables)
// static tflite::MicroMutableOpResolver<4>
// micro_op_resolver(error_reporter); if (micro_op_resolver.AddBuiltin(
// tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
// tflite::ops::micro::Register_DEPTHWISE_CONV_2D()) != kTfLiteOk)
// {
// return false;
// }
// if (micro_op_resolver.AddBuiltin(
// tflite::BuiltinOperator_FULLY_CONNECTED,
// tflite::ops::micro::Register_FULLY_CONNECTED()) != kTfLiteOk) {
// return false;
// }
// if (micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_SOFTMAX,
// tflite::ops::micro::Register_SOFTMAX())
// !=
// kTfLiteOk) {
// return false;
// }
// if (micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_RESHAPE,
// tflite::ops::micro::Register_RESHAPE())
// !=
// kTfLiteOk) {
// return false;
// }
// tflite::AllOpsResolver resolver;
// NOLINTNEXTLINE(runtime-global-variables)
static tflite::MicroMutableOpResolver<4> micro_op_resolver(error_reporter);
if (micro_op_resolver.AddDepthwiseConv2D() != kTfLiteOk) {
return false;
}
if (micro_op_resolver.AddFullyConnected() != kTfLiteOk) {
return false;
}
if (micro_op_resolver.AddSoftmax() != kTfLiteOk) {
return false;
}
if (micro_op_resolver.AddReshape() != kTfLiteOk) {
return false;
}
// Build an interpreter to run the model with.
static tflite::MicroInterpreter static_interpreter(
p_model, resolver, tensor_arena, kTensorArenaSize, error_reporter);
p_model, micro_op_resolver, tensor_arena, kTensorArenaSize,
error_reporter);
interpreter = &static_interpreter;
return true;
}
@ -739,7 +733,7 @@ class TfLiteAudioOutput : public AudioPrint {
/// Overwrite this method to implement your own handler or provide callback
virtual void respondToCommand(const char* found_command, uint8_t score,
bool is_new_command) {
if (feature_provider->respondToCommand!=nullptr) {
if (feature_provider->respondToCommand != nullptr) {
feature_provider->respondToCommand(found_command, score, is_new_command);
} else {
LOGD(LOG_METHOD);

View File

@ -20,7 +20,7 @@ void setup(){
auto cfg1 = out.defaultConfig();
cfg1.sample_rate = sample_rate;
cfg1.channels = 1;
cfg1.channels = channels;
cfg1.bits_per_sample = 16;
out.begin(cfg1);
}