I'm building a gesture recognizer, and I'm trying to simplify my code base in order to have it be a bit smoother in reading and running. I have 2 tensorflow models: "Lmodel.h" and "Rmodel.h", and then the correlating code for it. as of now I've just been using one tab for the whole code base and then two other tabs for the models, then calling them individually, but I'm thinking about using separate tabs to make individual .cpps and then calling those in the main code that way it streamlines everything a bit more. Below is what I have currently:
main code (void loop hasnt been done yet, just working on getting everything in the setup done -- bluetooth, calling everything, etc.) :
#include <ArduinoBLE.h>
#include <Arduino_BMI270_BMM150.h>
#include <Wire.h>
const char* deviceServiceUuid = "19b10000-e8f2-537e-4f6c-d104768a1214";
const char* deviceServiceCharacteristicUuid = "19b10001-e8f2-537e-4f6c-d104768a1214";
BLEService gestureService(deviceServiceUuid);
BLEByteCharacteristic gestureCharacteristic(deviceServiceCharacteristicUuid, BLERead | BLENotify | BLEWrite);
const float accelerationThreshold = 2.5; // Threshold of significance in G's
const int numSamples = 119;
const unsigned long gestureTimeout = 5000; // Gesture timeout duration in milliseconds
int samplesRead = numSamples;
bool gestureInProgress = false;
int currentGesture = -1;
int gestureCount = 0;
unsigned long startTime = 0;
unsigned long gestureElapsedTime = 0;
void setupBluetooth() {
if (!BLE.begin()) {
Serial.println("- Starting Bluetooth® Low Energy module failed!");
while (1);
}
BLE.setLocalName("Peripheral_Device");
BLE.setAdvertisedService(gestureService);
gestureService.addCharacteristic(gestureCharacteristic);
BLE.addService(gestureService);
gestureCharacteristic.writeValue(-1);
BLE.advertise();
Serial.println("Peripheral Device");
Serial.println(" ");
}
void setupIMU() {
if (!IMU.begin()) {
Serial.println("- Failed to initialize IMU!");
while (1);
}
}
void setup() {
Serial.begin(9600);
setupBluetooth();
setupIMU();
}
void loop() {
// Handle Bluetooth communication and general functions here
}
and heres one of the tabs, theyre identical just built individually for the two models:
#include <TinyMLShield.h>
#include <TensorFlowLite.h>
#include <tensorflow/lite/micro/all_ops_resolver.h>
#include <tensorflow/lite/micro/micro_error_reporter.h>
#include <tensorflow/lite/micro/micro_interpreter.h>
#include <tensorflow/lite/schema/schema_generated.h>
#include <tensorflow/lite/version.h>
#include "Rmodel.h"
// Global variables used for TensorFlow Lite (Micro)
tflite::MicroErrorReporter tflErrorReporter;
tflite::AllOpsResolver tflOpsResolver;
const tflite::Model* tflModelR = nullptr;
tflite::MicroInterpreter* tflInterpreterR = nullptr;
TfLiteTensor* tflInputTensorR = nullptr;
TfLiteTensor* tflOutputTensorR = nullptr;
// Create a static memory buffer for TFLM, the size may need to
// be adjusted based on the model you are using
constexpr int tensorArenaSize = 8 * 1024;
byte tensorArenaR[tensorArenaSize] __attribute__((aligned(16)));
const char* gestureNames[] = {
"TWIST",
"RAISE",
"CROSS",
"FLEX",
};
#define NUM_GESTURES (sizeof(gestureNames) / sizeof(gestureNames[0]))
void setupRightHandModel() {
tflModelR = tflite::GetModel(Rmodel);
if (tflModelR->version() != TFLITE_SCHEMA_VERSION) {
Serial.println("Right-hand Model schema mismatch!");
while (1);
}
tflInterpreterR = new tflite::MicroInterpreter(tflModelR, tflOpsResolver, tensorArenaR, tensorArenaSize, &tflErrorReporter);
tflInterpreterR->AllocateTensors();
tflInputTensorR = tflInterpreterR->input(0);
tflOutputTensorR = tflInterpreterR->output(0);
}
int recognizeRightHandGesture() {
float aX, aY, aZ, gX, gY, gZ;
// wait for significant motion
while (samplesRead == numSamples) {
if (IMU.accelerationAvailable()) {
// read the acceleration data
IMU.readAcceleration(aX, aY, aZ);
// sum up the absolutes
float aSum = fabs(aX) + fabs(aY) + fabs(aZ);
// check if it's above the threshold
if (aSum >= accelerationThreshold) {
// reset the sample read count
samplesRead = 0;
break;
}
}
}
// check if the all the required samples have been read since
// the last time the significant motion was detected
while (samplesRead < numSamples) {
// check if new acceleration AND gyroscope data is available
if (IMU.accelerationAvailable() && IMU.gyroscopeAvailable()) {
// read the acceleration and gyroscope data
IMU.readAcceleration(aX, aY, aZ);
IMU.readGyroscope(gX, gY, gZ);
// normalize the IMU data between 0 to 1 and store in the model's
// input tensor
tflInputTensor->data.f[samplesRead * 6 + 0] = (aX + 4.0) / 8.0;
tflInputTensor->data.f[samplesRead * 6 + 1] = (aY + 4.0) / 8.0;
tflInputTensor->data.f[samplesRead * 6 + 2] = (aZ + 4.0) / 8.0;
tflInputTensor->data.f[samplesRead * 6 + 3] = (gX + 2000.0) / 4000.0;
tflInputTensor->data.f[samplesRead * 6 + 4] = (gY + 2000.0) / 4000.0;
tflInputTensor->data.f[samplesRead * 6 + 5] = (gZ + 2000.0) / 4000.0;
samplesRead++;
if (samplesRead == numSamples) {
// Run inferencing
TfLiteStatus invokeStatus = tflInterpreterR->Invoke();
if (invokeStatus != kTfLiteOk) {
Serial.println("Invoke failed!");
while (1);
return;
}
// Loop through the output tensor values from the model
for (int i = 0; i < NUM_GESTURES; i++) {
Serial.print(GESTURES[i]);
Serial.print(": ");
Serial.println(tflOutputTensor->data.f[i], 6);
}
Serial.println();
return gestureIndex;
}
My problem right now is the incorperation of the tabs into the main code, and the fact that it's not accepting Rmodel.h as something that can be included. I think this should work, because the code I used for the cpp is something that I've used before with a model.h and it worked perfectly, so I'm trying to transfer it over, but as of right now I'm not having much success. any thoughts?