Are you using strings, they suck up memory. Also .print"Failed to initialize Nicla!"); uses twenty seven bytes of ram, using .print(F("Failed to initialize Nicla!")); uses twenty seven bytes less. The F macro saves a lot of RAM.
No strings are used. I commented out the all print statements, still getting the same error: here is the whole code:
#include "Arduino_BHY2.h"
#include "Nicla_System.h"
#include "ArduinoBLE.h"
// #include "tflm_nicla.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/micro/system_setup.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "Arduino.h"
#include "model.h"
/**
* Important that the Arduino include comes last if on the Arduino platform, as it has an `abs()` function
* that will screw with the stdlib abs() function. If need you can use the following lines
* as well to redeclare the abs() function to be compatible
**/
#include "Arduino.h"
#ifdef ARDUINO
#define abs(x) ((x)>0?(x):-(x))
#endif
// Globals, used for compatibility with Arduino-style sketches.
namespace {
const tflite::Model* model = nullptr;
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* input = nullptr;
TfLiteTensor* output = nullptr;
// int inference_count = 0;
// Arena size just a round number. The exact arena usage can be determined
// using the RecordingMicroInterpreter.
constexpr int kTensorArenaSize = 13000; // in bytes;
// Keep aligned to 16 bytes for CMSIS
alignas(16) uint8_t tensor_arena[kTensorArenaSize];
} // namespace
void setup() {
// Baudrage set to 115200
Serial.begin(115200);
if (!BHY2.begin()) {
// Serial.println("Failed to initialize Nicla!");
while (1);
}
// set advertised local name and service UUID:
// while(!Serial);
// Clear the buffer, just in case.
while (Serial.available() > 0) {
Serial.read();
}
tflite::InitializeTarget();
// Serial.println("TFlite initialized successfully!");
// Serial.println("Fetching model...");
model = tflite::GetModel(g_model);
// Serial.println("Model is fetch!");
if (model->version() != TFLITE_SCHEMA_VERSION) {
// Serial.print("Model provided is schema version ");
// Serial.print(model->version());
// Serial.print(" not equal to supported version ");
// Serial.println(TFLITE_SCHEMA_VERSION);
} else {
// Serial.print("Model version: ");
// Serial.println(model->version());
}
// This pulls in all the operation implementations we need.
// NOLINTNEXTLINE(runtime-global-variables)
// Serial.println("Pulling all operations...");
static tflite::MicroMutableOpResolver<9> resolver;
resolver.AddConv2D();
resolver.AddExpandDims();
resolver.AddMaxPool2D();
resolver.AddPack();
resolver.AddReshape();
resolver.AddShape();
resolver.AddStridedSlice();
resolver.AddFullyConnected();
resolver.AddRelu();
// tflite::MicroProfiler profiler;
// Serial.println("Operations/layers are pulled and set.");
// Build an interpreter to run the model with
static tflite::MicroInterpreter static_interpreter(
model, resolver, tensor_arena, kTensorArenaSize);
interpreter = &static_interpreter;
// Serial.println("Interpreter is built.");
// Allocate memory from the tensor_arena for the model's tensors.
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
// TF_LITE_REPORT_ERROR(error_reporter, "Tensor allocation failed");
// Serial.println("AllocateTensors() failed.");
// Serial.println(allocate_status);
return;
} else {
// Serial.println("AllocateTensors() successed.");
}
// Print out the input tensor's details to verify
// the model is working as expected
// Obtain pointers to the model's input and output tensors.
input = interpreter->input(0);
output = interpreter->output(0);
// Serial.print("Input # dimensions: ");
// Serial.println(input->dims->size);
// Serial.print("Input bytes: ");
// Serial.println(input->bytes);
// Serial.print("Output # dimensions: ");
// Serial.println(output->dims->size);
// Serial.print("Output bytes: ");
// Serial.println(output->bytes);
for (int i = 0; i < input->dims->size; i++) {
// Serial.print("Input dim ");
// Serial.print(i);
// Serial.print(": ");
// Serial.println(input->dims->data[i]);
}
for (int i = 0; i < output->dims->size; i++) {
// Serial.print("Output dim ");
// Serial.print(i);
// Serial.print(": ");
// Serial.println(output->dims->data[i]);
}
}
float storedFloats[405];
// float predefinedFloats[405];
int storedFloatsIndex = 0;
int turnCounter = 0; // Counter to keep track of turns
void loop() {
if (Serial.available() >= 180) { // waiting for 180 bytes (45 floats)
float receivedFloats[45];
// Read 180 bytes into float array
Serial.readBytes((char*)&receivedFloats, 180);
// Ensure not to overflow the buffer and store the received floats.
if (storedFloatsIndex + 45 <= 405) {
memcpy(&storedFloats[storedFloatsIndex], &receivedFloats, sizeof(receivedFloats));
storedFloatsIndex += 45; // Update index
}
// Increment the turnCounter
turnCounter++;
// If this was the 9th turn, send back the acknowledgment
if (turnCounter == 9) {
const int N = output->dims->data[2];
float output_data[N];
for (int i = 0; i < input->bytes; i++) {
input->data.f[i] = storedFloats[i];
// input->data.f[i] = predefinedFloats[i];
}
TfLiteStatus invoke_status = interpreter->Invoke();
if (invoke_status != kTfLiteOk) {
// Serial.println("Invoke failed!");
}
for (int i = 0; i < output->dims->data[2]; i++) {
output_data[i] = output->data.f[i];
}
Serial.write((char*)&output_data, sizeof(output_data));
// Reset the turn counter after sending acknowledgment
turnCounter = 0;
}
if (storedFloatsIndex == 405) {
storedFloatsIndex = 0;
}
}
}
I am not familiar with the Nicla but looking at the messages it appears there is a RAM memory shortage. Change to the Mega and compile it, it will not work but that processor has much more memory and see what happens. If it compiles it is almost certain a RAM shortage problem. Your libraries also use RAM. You can add an external memory device called a FRAM module, it will ot be as fast as the internal RAM because of the way you have to access it via I2C or SPI but a 32K by 8 is less then $10. I use them both is module form and chipo form by swapping the EEPROM out on the RTC module.