Region 'FLASH' overflowed by 7566496 bytes

I was trying to upload this image classification code generated by TinyML into Arduino Nano 33 BLE board and am encountering this error.

The following code has been generated:

/* Edge Impulse ingestion SDK
 * Copyright (c) 2022 EdgeImpulse Inc.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *
 */

/* Includes ---------------------------------------------------------------- */
#include <rahulgb-project-1_inferencing.h>
#include <Arduino_OV767X.h>  //Click here to get the library: http://librarymanager/All#Arduino_OV767X

#include <stdint.h>
#include <stdlib.h>

/* Constant variables ------------------------------------------------------- */
#define EI_CAMERA_RAW_FRAME_BUFFER_COLS 160
#define EI_CAMERA_RAW_FRAME_BUFFER_ROWS 120

#define DWORD_ALIGN_PTR(a) ((a & 0x3) ? (((uintptr_t)a + 0x4) & ~(uintptr_t)0x3) : a)

/*
 ** NOTE: If you run into TFLite arena allocation issue.
 **
 ** This may be due to may dynamic memory fragmentation.
 ** Try defining "-DEI_CLASSIFIER_ALLOCATION_STATIC" in boards.local.txt (create
 ** if it doesn't exist) and copy this file to
 ** `<ARDUINO_CORE_INSTALL_PATH>/arduino/hardware/<mbed_core>/<core_version>/`.
 **
 ** See
 ** (https://support.arduino.cc/hc/en-us/articles/360012076960-Where-are-the-installed-cores-located-)
 ** to find where Arduino installs cores on your machine.
 **
 ** If the problem persists then there's not enough memory for this model and application.
 */

/* Edge Impulse ------------------------------------------------------------- */
class OV7675 : public OV767X {
public:
  int begin(int resolution, int format, int fps);
  void readFrame(void *buffer);

private:
  int vsyncPin;
  int hrefPin;
  int pclkPin;
  int xclkPin;

  volatile uint32_t *vsyncPort;
  uint32_t vsyncMask;
  volatile uint32_t *hrefPort;
  uint32_t hrefMask;
  volatile uint32_t *pclkPort;
  uint32_t pclkMask;

  uint16_t width;
  uint16_t height;
  uint8_t bytes_per_pixel;
  uint16_t bytes_per_row;
  uint8_t buf_rows;
  uint16_t buf_size;
  uint8_t resize_height;
  uint8_t *raw_buf;
  void *buf_mem;
  uint8_t *intrp_buf;
  uint8_t *buf_limit;

  void readBuf();
  int allocate_scratch_buffs();
  int deallocate_scratch_buffs();
};

typedef struct {
  size_t width;
  size_t height;
} ei_device_resize_resolutions_t;

/**
 * @brief      Check if new serial data is available
 *
 * @return     Returns number of available bytes
 */
int ei_get_serial_available(void) {
  return Serial.available();
}

/**
 * @brief      Get next available byte
 *
 * @return     byte
 */
char ei_get_serial_byte(void) {
  return Serial.read();
}

/* Private variables ------------------------------------------------------- */
static OV7675 Cam;
static bool is_initialised = false;

/*
** @brief points to the output of the capture
*/
static uint8_t *ei_camera_capture_out = NULL;
uint32_t resize_col_sz;
uint32_t resize_row_sz;
bool do_resize = false;
bool do_crop = false;

static bool debug_nn = false;  // Set this to true to see e.g. features generated from the raw signal

/* Function definitions ------------------------------------------------------- */
bool ei_camera_init(void);
void ei_camera_deinit(void);
bool ei_camera_capture(uint32_t img_width, uint32_t img_height, uint8_t *out_buf);
int calculate_resize_dimensions(uint32_t out_width, uint32_t out_height, uint32_t *resize_col_sz, uint32_t *resize_row_sz, bool *do_resize);
void resizeImage(int srcWidth, int srcHeight, uint8_t *srcImage, int dstWidth, int dstHeight, uint8_t *dstImage, int iBpp);
void cropImage(int srcWidth, int srcHeight, uint8_t *srcImage, int startX, int startY, int dstWidth, int dstHeight, uint8_t *dstImage, int iBpp);

/**
* @brief      Arduino setup function
*/
void setup() {
  // put your setup code here, to run once:
  Serial.begin(115200);
  // comment out the below line to cancel the wait for USB connection (needed for native USB)
  while (!Serial)
    ;
  Serial.println("Edge Impulse Inferencing Demo");

  // summary of inferencing settings (from model_metadata.h)
  ei_printf("Inferencing settings:\n");
  ei_printf("\tImage resolution: %dx%d\n", EI_CLASSIFIER_INPUT_WIDTH, EI_CLASSIFIER_INPUT_HEIGHT);
  ei_printf("\tFrame size: %d\n", EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE);
  ei_printf("\tNo. of classes: %d\n", sizeof(ei_classifier_inferencing_categories) / sizeof(ei_classifier_inferencing_categories[0]));
}

/**
* @brief      Get data and run inferencing
*
* @param[in]  debug  Get debug info if true
*/
void loop() {
  bool stop_inferencing = false;

  while (stop_inferencing == false) {
    ei_printf("\nStarting inferencing in 2 seconds...\n");

    // instead of wait_ms, we'll wait on the signal, this allows threads to cancel us...
    if (ei_sleep(2000) != EI_IMPULSE_OK) {
      break;
    }

    ei_printf("Taking photo...\n");

    if (ei_camera_init() == false) {
      ei_printf("ERR: Failed to initialize image sensor\r\n");
      break;
    }

    // choose resize dimensions
    uint32_t resize_col_sz;
    uint32_t resize_row_sz;
    bool do_resize = false;
    int res = calculate_resize_dimensions(EI_CLASSIFIER_INPUT_WIDTH, EI_CLASSIFIER_INPUT_HEIGHT, &resize_col_sz, &resize_row_sz, &do_resize);
    if (res) {
      ei_printf("ERR: Failed to calculate resize dimensions (%d)\r\n", res);
      break;
    }

    void *snapshot_mem = NULL;
    uint8_t *snapshot_buf = NULL;
    snapshot_mem = ei_malloc(resize_col_sz * resize_row_sz * 2);
    if (snapshot_mem == NULL) {
      ei_printf("failed to create snapshot_mem\r\n");
      break;
    }
    snapshot_buf = (uint8_t *)DWORD_ALIGN_PTR((uintptr_t)snapshot_mem);

    if (ei_camera_capture(EI_CLASSIFIER_INPUT_WIDTH, EI_CLASSIFIER_INPUT_HEIGHT, snapshot_buf) == false) {
      ei_printf("Failed to capture image\r\n");
      if (snapshot_mem) ei_free(snapshot_mem);
      break;
    }

    ei::signal_t signal;
    signal.total_length = EI_CLASSIFIER_INPUT_WIDTH * EI_CLASSIFIER_INPUT_HEIGHT;
    signal.get_data = &ei_camera_cutout_get_data;

    // run the impulse: DSP, neural network and the Anomaly algorithm
    ei_impulse_result_t result = { 0 };

    EI_IMPULSE_ERROR ei_error = run_classifier(&signal, &result, debug_nn);
    if (ei_error != EI_IMPULSE_OK) {
      ei_printf("Failed to run impulse (%d)\n", ei_error);
      ei_free(snapshot_mem);
      break;
    }

    // print the predictions
    ei_printf("Predictions (DSP: %d ms., Classification: %d ms., Anomaly: %d ms.): \n",
              result.timing.dsp, result.timing.classification, result.timing.anomaly);
#if EI_CLASSIFIER_OBJECT_DETECTION == 1
    bool bb_found = result.bounding_boxes[0].value > 0;
    for (size_t ix = 0; ix < result.bounding_boxes_count; ix++) {
      auto bb = result.bounding_boxes[ix];
      if (bb.value == 0) {
        continue;
      }

      ei_printf("    %s (%f) [ x: %u, y: %u, width: %u, height: %u ]\n", bb.label, bb.value, bb.x, bb.y, bb.width, bb.height);
    }

    if (!bb_found) {
      ei_printf("    No objects found\n");
    }
#else
    for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) {
      ei_printf("    %s: %.5f\n", result.classification[ix].label,
                result.classification[ix].value);
    }
#if EI_CLASSIFIER_HAS_ANOMALY == 1
    ei_printf("    anomaly score: %.3f\n", result.anomaly);
#endif
#endif

    while (ei_get_serial_available() > 0) {
      if (ei_get_serial_byte() == 'b') {
        ei_printf("Inferencing stopped by user\r\n");
        stop_inferencing = true;
      }
    }
    if (snapshot_mem) ei_free(snapshot_mem);
  }
  ei_camera_deinit();
}

/**
 * @brief      Determine whether to resize and to which dimension
 *
 * @param[in]  out_width     width of output image
 * @param[in]  out_height    height of output image
 * @param[out] resize_col_sz       pointer to frame buffer's column/width value
 * @param[out] resize_row_sz       pointer to frame buffer's rows/height value
 * @param[out] do_resize     returns whether to resize (or not)
 *
 */
int calculate_resize_dimensions(uint32_t out_width, uint32_t out_height, uint32_t *resize_col_sz, uint32_t *resize_row_sz, bool *do_resize) {
  size_t list_size = 2;
  const ei_device_resize_resolutions_t list[list_size] = { { 42, 32 }, { 128, 96 } };

  // (default) conditions
  *resize_col_sz = EI_CAMERA_RAW_FRAME_BUFFER_COLS;
  *resize_row_sz = EI_CAMERA_RAW_FRAME_BUFFER_ROWS;
  *do_resize = false;

  for (size_t ix = 0; ix < list_size; ix++) {
    if ((out_width <= list[ix].width) && (out_height <= list[ix].height)) {
      *resize_col_sz = list[ix].width;
      *resize_row_sz = list[ix].height;
      *do_resize = true;
      break;
    }
  }

  return 0;
}

/**
 * @brief   Setup image sensor & start streaming
 *
 * @retval  false if initialisation failed
 */
bool ei_camera_init(void) {
  if (is_initialised) return true;

  if (!Cam.begin(QQVGA, RGB565, 1)) {  // VGA downsampled to QQVGA (OV7675)
    ei_printf("ERR: Failed to initialize camera\r\n");
    return false;
  }
  is_initialised = true;

  return true;
}

/**
 * @brief      Stop streaming of sensor data
 */
void ei_camera_deinit(void) {
  if (is_initialised) {
    Cam.end();
    is_initialised = false;
  }
}

/**
 * @brief      Capture, rescale and crop image
 *
 * @param[in]  img_width     width of output image
 * @param[in]  img_height    height of output image
 * @param[in]  out_buf       pointer to store output image, NULL may be used
 *                           when full resolution is expected.
 *
 * @retval     false if not initialised, image captured, rescaled or cropped failed
 *
 */
bool ei_camera_capture(uint32_t img_width, uint32_t img_height, uint8_t *out_buf) {
  if (!is_initialised) {
    ei_printf("ERR: Camera is not initialized\r\n");
    return false;
  }

  if (!out_buf) {
    ei_printf("ERR: invalid parameters\r\n");
    return false;
  }

  // choose resize dimensions
  int res = calculate_resize_dimensions(img_width, img_height, &resize_col_sz, &resize_row_sz, &do_resize);
  if (res) {
    ei_printf("ERR: Failed to calculate resize dimensions (%d)\r\n", res);
    return false;
  }

  if ((img_width != resize_col_sz)
      || (img_height != resize_row_sz)) {
    do_crop = true;
  }

  Cam.readFrame(out_buf);  // captures image and resizes

  if (do_crop) {
    uint32_t crop_col_sz;
    uint32_t crop_row_sz;
    uint32_t crop_col_start;
    uint32_t crop_row_start;
    crop_row_start = (resize_row_sz - img_height) / 2;
    crop_col_start = (resize_col_sz - img_width) / 2;
    crop_col_sz = img_width;
    crop_row_sz = img_height;

    //ei_printf("crop cols: %d, rows: %d\r\n", crop_col_sz,crop_row_sz);
    cropImage(resize_col_sz, resize_row_sz,
              out_buf,
              crop_col_start, crop_row_start,
              crop_col_sz, crop_row_sz,
              out_buf,
              16);
  }

  // The following variables should always be assigned
  // if this routine is to return true
  // cutout values
  //ei_camera_snapshot_is_resized = do_resize;
  //ei_camera_snapshot_is_cropped = do_crop;
  ei_camera_capture_out = out_buf;

  return true;
}

/**
 * @brief      Convert RGB565 raw camera buffer to RGB888
 *
 * @param[in]   offset       pixel offset of raw buffer
 * @param[in]   length       number of pixels to convert
 * @param[out]  out_buf      pointer to store output image
 */
int ei_camera_cutout_get_data(size_t offset, size_t length, float *out_ptr) {
  size_t pixel_ix = offset * 2;
  size_t bytes_left = length;
  size_t out_ptr_ix = 0;

  // read byte for byte
  while (bytes_left != 0) {
    // grab the value and convert to r/g/b
    uint16_t pixel = (ei_camera_capture_out[pixel_ix] << 8) | ei_camera_capture_out[pixel_ix + 1];
    uint8_t r, g, b;
    r = ((pixel >> 11) & 0x1f) << 3;
    g = ((pixel >> 5) & 0x3f) << 2;
    b = (pixel & 0x1f) << 3;

    // then convert to out_ptr format
    float pixel_f = (r << 16) + (g << 8) + b;
    out_ptr[out_ptr_ix] = pixel_f;

    // and go to the next pixel
    out_ptr_ix++;
    pixel_ix += 2;
    bytes_left--;
  }

  // and done!
  return 0;
}

// This include file works in the Arduino environment
// to define the Cortex-M intrinsics
#ifdef __ARM_FEATURE_SIMD32
#include <device.h>
#endif
// This needs to be < 16 or it won't fit. Cortex-M4 only has SIMD for signed multiplies
#define FRAC_BITS 14
#define FRAC_VAL (1 << FRAC_BITS)
#define FRAC_MASK (FRAC_VAL - 1)
//
// Resize
//
// Assumes that the destination buffer is dword-aligned
// Can be used to resize the image smaller or larger
// If resizing much smaller than 1/3 size, then a more rubust algorithm should average all of the pixels
// This algorithm uses bilinear interpolation - averages a 2x2 region to generate each new pixel
//
// Optimized for 32-bit MCUs
// supports 8 and 16-bit pixels
void resizeImage(int srcWidth, int srcHeight, uint8_t *srcImage, int dstWidth, int dstHeight, uint8_t *dstImage, int iBpp) {
  uint32_t src_x_accum, src_y_accum;  // accumulators and fractions for scaling the image
  uint32_t x_frac, nx_frac, y_frac, ny_frac;
  int x, y, ty, tx;

  if (iBpp != 8 && iBpp != 16)
    return;
  src_y_accum = FRAC_VAL / 2;  // start at 1/2 pixel in to account for integer downsampling which might miss pixels
  const uint32_t src_x_frac = (srcWidth * FRAC_VAL) / dstWidth;
  const uint32_t src_y_frac = (srcHeight * FRAC_VAL) / dstHeight;
  const uint32_t r_mask = 0xf800f800;
  const uint32_t g_mask = 0x07e007e0;
  const uint32_t b_mask = 0x001f001f;
  uint8_t *s, *d;
  uint16_t *s16, *d16;
  uint32_t x_frac2, y_frac2;  // for 16-bit SIMD
  for (y = 0; y < dstHeight; y++) {
    ty = src_y_accum >> FRAC_BITS;  // src y
    y_frac = src_y_accum & FRAC_MASK;
    src_y_accum += src_y_frac;
    ny_frac = FRAC_VAL - y_frac;         // y fraction and 1.0 - y fraction
    y_frac2 = ny_frac | (y_frac << 16);  // for M4/M4 SIMD
    s = &srcImage[ty * srcWidth];
    s16 = (uint16_t *)&srcImage[ty * srcWidth * 2];
    d = &dstImage[y * dstWidth];
    d16 = (uint16_t *)&dstImage[y * dstWidth * 2];
    src_x_accum = FRAC_VAL / 2;  // start at 1/2 pixel in to account for integer downsampling which might miss pixels
    if (iBpp == 8) {
      for (x = 0; x < dstWidth; x++) {
        uint32_t tx, p00, p01, p10, p11;
        tx = src_x_accum >> FRAC_BITS;
        x_frac = src_x_accum & FRAC_MASK;
        nx_frac = FRAC_VAL - x_frac;  // x fraction and 1.0 - x fraction
        x_frac2 = nx_frac | (x_frac << 16);
        src_x_accum += src_x_frac;
        p00 = s[tx];
        p10 = s[tx + 1];
        p01 = s[tx + srcWidth];
        p11 = s[tx + srcWidth + 1];
#ifdef __ARM_FEATURE_SIMD32
        p00 = __SMLAD(p00 | (p10 << 16), x_frac2, FRAC_VAL / 2) >> FRAC_BITS;  // top line
        p01 = __SMLAD(p01 | (p11 << 16), x_frac2, FRAC_VAL / 2) >> FRAC_BITS;  // bottom line
        p00 = __SMLAD(p00 | (p01 << 16), y_frac2, FRAC_VAL / 2) >> FRAC_BITS;  // combine
#else                                                                          // generic C code
        p00 = ((p00 * nx_frac) + (p10 * x_frac) + FRAC_VAL / 2) >> FRAC_BITS;  // top line
        p01 = ((p01 * nx_frac) + (p11 * x_frac) + FRAC_VAL / 2) >> FRAC_BITS;  // bottom line
        p00 = ((p00 * ny_frac) + (p01 * y_frac) + FRAC_VAL / 2) >> FRAC_BITS;  // combine top + bottom
#endif                                                                         // Cortex-M4/M7
        *d++ = (uint8_t)p00;                                                   // store new pixel
      }                                                                        // for x
    }                                                                          // 8-bpp
    else {                                                                     // RGB565
      for (x = 0; x < dstWidth; x++) {
        uint32_t tx, p00, p01, p10, p11;
        uint32_t r00, r01, r10, r11, g00, g01, g10, g11, b00, b01, b10, b11;
        tx = src_x_accum >> FRAC_BITS;
        x_frac = src_x_accum & FRAC_MASK;
        nx_frac = FRAC_VAL - x_frac;  // x fraction and 1.0 - x fraction
        x_frac2 = nx_frac | (x_frac << 16);
        src_x_accum += src_x_frac;
        p00 = __builtin_bswap16(s16[tx]);
        p10 = __builtin_bswap16(s16[tx + 1]);
        p01 = __builtin_bswap16(s16[tx + srcWidth]);
        p11 = __builtin_bswap16(s16[tx + srcWidth + 1]);
#ifdef __ARM_FEATURE_SIMD32
        {
          p00 |= (p10 << 16);
          p01 |= (p11 << 16);
          r00 = (p00 & r_mask) >> 1;
          g00 = p00 & g_mask;
          b00 = p00 & b_mask;
          r01 = (p01 & r_mask) >> 1;
          g01 = p01 & g_mask;
          b01 = p01 & b_mask;
          r00 = __SMLAD(r00, x_frac2, FRAC_VAL / 2) >> FRAC_BITS;                // top line
          r01 = __SMLAD(r01, x_frac2, FRAC_VAL / 2) >> FRAC_BITS;                // bottom line
          r00 = __SMLAD(r00 | (r01 << 16), y_frac2, FRAC_VAL / 2) >> FRAC_BITS;  // combine
          g00 = __SMLAD(g00, x_frac2, FRAC_VAL / 2) >> FRAC_BITS;                // top line
          g01 = __SMLAD(g01, x_frac2, FRAC_VAL / 2) >> FRAC_BITS;                // bottom line
          g00 = __SMLAD(g00 | (g01 << 16), y_frac2, FRAC_VAL / 2) >> FRAC_BITS;  // combine
          b00 = __SMLAD(b00, x_frac2, FRAC_VAL / 2) >> FRAC_BITS;                // top line
          b01 = __SMLAD(b01, x_frac2, FRAC_VAL / 2) >> FRAC_BITS;                // bottom line
          b00 = __SMLAD(b00 | (b01 << 16), y_frac2, FRAC_VAL / 2) >> FRAC_BITS;  // combine
        }
#else   // generic C code
        {
          r00 = (p00 & r_mask) >> 1;
          g00 = p00 & g_mask;
          b00 = p00 & b_mask;
          r10 = (p10 & r_mask) >> 1;
          g10 = p10 & g_mask;
          b10 = p10 & b_mask;
          r01 = (p01 & r_mask) >> 1;
          g01 = p01 & g_mask;
          b01 = p01 & b_mask;
          r11 = (p11 & r_mask) >> 1;
          g11 = p11 & g_mask;
          b11 = p11 & b_mask;
          r00 = ((r00 * nx_frac) + (r10 * x_frac) + FRAC_VAL / 2) >> FRAC_BITS;  // top line
          r01 = ((r01 * nx_frac) + (r11 * x_frac) + FRAC_VAL / 2) >> FRAC_BITS;  // bottom line
          r00 = ((r00 * ny_frac) + (r01 * y_frac) + FRAC_VAL / 2) >> FRAC_BITS;  // combine top + bottom
          g00 = ((g00 * nx_frac) + (g10 * x_frac) + FRAC_VAL / 2) >> FRAC_BITS;  // top line
          g01 = ((g01 * nx_frac) + (g11 * x_frac) + FRAC_VAL / 2) >> FRAC_BITS;  // bottom line
          g00 = ((g00 * ny_frac) + (g01 * y_frac) + FRAC_VAL / 2) >> FRAC_BITS;  // combine top + bottom
          b00 = ((b00 * nx_frac) + (b10 * x_frac) + FRAC_VAL / 2) >> FRAC_BITS;  // top line
          b01 = ((b01 * nx_frac) + (b11 * x_frac) + FRAC_VAL / 2) >> FRAC_BITS;  // bottom line
          b00 = ((b00 * ny_frac) + (b01 * y_frac) + FRAC_VAL / 2) >> FRAC_BITS;  // combine top + bottom
        }
#endif  // Cortex-M4/M7
        r00 = (r00 << 1) & r_mask;
        g00 = g00 & g_mask;
        b00 = b00 & b_mask;
        p00 = (r00 | g00 | b00);                    // re-combine color components
        *d16++ = (uint16_t)__builtin_bswap16(p00);  // store new pixel
      }                                             // for x
    }                                               // 16-bpp
  }                                                 // for y
} /* resizeImage() */
//
// Crop
//
// Assumes that the destination buffer is dword-aligned
// optimized for 32-bit MCUs
// Supports 8 and 16-bit pixels
//
void cropImage(int srcWidth, int srcHeight, uint8_t *srcImage, int startX, int startY, int dstWidth, int dstHeight, uint8_t *dstImage, int iBpp) {
  uint32_t *s32, *d32;
  int x, y;

  if (startX < 0 || startX >= srcWidth || startY < 0 || startY >= srcHeight || (startX + dstWidth) > srcWidth || (startY + dstHeight) > srcHeight)
    return;  // invalid parameters
  if (iBpp != 8 && iBpp != 16)
    return;

  if (iBpp == 8) {
    uint8_t *s, *d;
    for (y = 0; y < dstHeight; y++) {
      s = &srcImage[srcWidth * (y + startY) + startX];
      d = &dstImage[(dstWidth * y)];
      x = 0;
      if ((intptr_t)s & 3 || (intptr_t)d & 3) {  // either src or dst pointer is not aligned
        for (; x < dstWidth; x++) {
          *d++ = *s++;  // have to do it byte-by-byte
        }
      } else {
        // move 4 bytes at a time if aligned or alignment not enforced
        s32 = (uint32_t *)s;
        d32 = (uint32_t *)d;
        for (; x < dstWidth - 3; x += 4) {
          *d32++ = *s32++;
        }
        // any remaining stragglers?
        s = (uint8_t *)s32;
        d = (uint8_t *)d32;
        for (; x < dstWidth; x++) {
          *d++ = *s++;
        }
      }
    }  // for y
  }    // 8-bpp
  else {
    uint16_t *s, *d;
    for (y = 0; y < dstHeight; y++) {
      s = (uint16_t *)&srcImage[2 * srcWidth * (y + startY) + startX * 2];
      d = (uint16_t *)&dstImage[(dstWidth * y * 2)];
      x = 0;
      if ((intptr_t)s & 2 || (intptr_t)d & 2) {  // either src or dst pointer is not aligned
        for (; x < dstWidth; x++) {
          *d++ = *s++;  // have to do it 16-bits at a time
        }
      } else {
        // move 4 bytes at a time if aligned or alignment no enforced
        s32 = (uint32_t *)s;
        d32 = (uint32_t *)d;
        for (; x < dstWidth - 1; x += 2) {  // we can move 2 pixels at a time
          *d32++ = *s32++;
        }
        // any remaining stragglers?
        s = (uint16_t *)s32;
        d = (uint16_t *)d32;
        for (; x < dstWidth; x++) {
          *d++ = *s++;
        }
      }
    }  // for y
  }    // 16-bpp case
} /* cropImage() */

#if !defined(EI_CLASSIFIER_SENSOR) || EI_CLASSIFIER_SENSOR != EI_CLASSIFIER_SENSOR_CAMERA
#error "Invalid model for current sensor"
#endif

// OV767X camera library override
#include <Arduino.h>
#include <Wire.h>

#define digitalPinToBitMask(P) (1 << (digitalPinToPinName(P) % 32))
#define portInputRegister(P) ((P == 0) ? &NRF_P0->IN : &NRF_P1->IN)

//
// OV7675::begin()
//
// Extends the OV767X library function. Some private variables are needed
// to use the OV7675::readFrame function.
//
int OV7675::begin(int resolution, int format, int fps) {
  pinMode(OV7670_VSYNC, INPUT);
  pinMode(OV7670_HREF, INPUT);
  pinMode(OV7670_PLK, INPUT);
  pinMode(OV7670_XCLK, OUTPUT);

  vsyncPort = portInputRegister(digitalPinToPort(OV7670_VSYNC));
  vsyncMask = digitalPinToBitMask(OV7670_VSYNC);
  hrefPort = portInputRegister(digitalPinToPort(OV7670_HREF));
  hrefMask = digitalPinToBitMask(OV7670_HREF);
  pclkPort = portInputRegister(digitalPinToPort(OV7670_PLK));
  pclkMask = digitalPinToBitMask(OV7670_PLK);

  // init driver to use full image sensor size
  bool ret = OV767X::begin(VGA, format, fps);
  width = OV767X::width();    // full sensor width
  height = OV767X::height();  // full sensor height
  bytes_per_pixel = OV767X::bytesPerPixel();
  bytes_per_row = width * bytes_per_pixel;  // each pixel is 2 bytes
  resize_height = 2;

  buf_mem = NULL;
  raw_buf = NULL;
  intrp_buf = NULL;
  //allocate_scratch_buffs();

  return ret;
} /* OV7675::begin() */

int OV7675::allocate_scratch_buffs() {
  //ei_printf("allocating buffers..\r\n");
  buf_rows = height / resize_row_sz * resize_height;
  buf_size = bytes_per_row * buf_rows;

  buf_mem = ei_malloc(buf_size);
  if (buf_mem == NULL) {
    ei_printf("failed to create buf_mem\r\n");
    return false;
  }
  raw_buf = (uint8_t *)DWORD_ALIGN_PTR((uintptr_t)buf_mem);

  //ei_printf("allocating buffers OK\r\n");
  return 0;
}

int OV7675::deallocate_scratch_buffs() {
  //ei_printf("deallocating buffers...\r\n");
  ei_free(buf_mem);
  buf_mem = NULL;

  //ei_printf("deallocating buffers OK\r\n");
  return 0;
}

//
// OV7675::readFrame()
//
// Overrides the OV767X library function. Fixes the camera output to be
// a far more desirable image. This image utilizes the full sensor size
// and has the correct aspect ratio. Since there is limited memory on the
// Nano we bring in only part of the entire sensor at a time and then
// interpolate to a lower resolution.
//
void OV7675::readFrame(void *buffer) {
  allocate_scratch_buffs();

  uint8_t *out = (uint8_t *)buffer;
  noInterrupts();

  // Falling edge indicates start of frame
  while ((*vsyncPort & vsyncMask) == 0)
    ;  // wait for HIGH
  while ((*vsyncPort & vsyncMask) != 0)
    ;  // wait for LOW

  int out_row = 0;
  for (int raw_height = 0; raw_height < height; raw_height += buf_rows) {
    // read in 640xbuf_rows buffer to work with
    readBuf();

    resizeImage(width, buf_rows,
                raw_buf,
                resize_col_sz, resize_height,
                &(out[out_row]),
                16);

    out_row += resize_col_sz * resize_height * bytes_per_pixel; /* resize_col_sz * 2 * 2 */
  }

  interrupts();

  deallocate_scratch_buffs();
} /* OV7675::readFrame() */

//
// OV7675::readBuf()
//
// Extends the OV767X library function. Reads buf_rows VGA rows from the
// image sensor.
//
void OV7675::readBuf() {
  int offset = 0;

  uint32_t ulPin = 33;  // P1.xx set of GPIO is in 'pin' 32 and above
  NRF_GPIO_Type *port;

  port = nrf_gpio_pin_port_decode(&ulPin);

  for (int i = 0; i < buf_rows; i++) {
    // rising edge indicates start of line
    while ((*hrefPort & hrefMask) == 0)
      ;  // wait for HIGH

    for (int col = 0; col < bytes_per_row; col++) {
      // rising edges clock each data byte
      while ((*pclkPort & pclkMask) != 0)
        ;  // wait for LOW

      uint32_t in = port->IN;  // read all bits in parallel

      in >>= 2;         // place bits 0 and 1 at the "bottom" of the register
      in &= 0x3f03;     // isolate the 8 bits we care about
      in |= (in >> 6);  // combine the upper 6 and lower 2 bits

      raw_buf[offset++] = in;

      while ((*pclkPort & pclkMask) == 0)
        ;  // wait for HIGH
    }

    while ((*hrefPort & hrefMask) != 0)
      ;  // wait for LOW
  }
} /* OV7675::readBuf() */

The size of the ino file generated is 26KB.

The error being generated is: region 'FLASH' overflowed by 7566496 bytes

Please help me debug this error.

Welcome to the forum

Your topic was MOVED to its current forum category which is more appropriate than the original as it has nothing to do with Installation and Troubleshooting of the IDE

When do you get the error? Compile / verify or upload?

Source code is compiled to an executable; the 26KB has no relation to the size of the executable.

What is in rahulgb-project-1_inferencing.h? A few megabyte of data? Are there C/CPP files associated with that? Everything counts !!

I have no experience with the Nano 33 BLE so probably can't help further.

I get the error during compile.

The file rahulgb-project-interferencing.h has the following contents:

/* Edge Impulse ingestion SDK
 * Copyright (c) 2022 EdgeImpulse Inc.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *
 */

#ifndef _INFERENCE_H
#define _INFERENCE_H

// Undefine min/max macros as these conflict with C++ std min/max functions
// these are often included by Arduino cores
#include <Arduino.h>
#include <stdarg.h>
#ifdef min
#undef min
#endif // min
#ifdef max
#undef max
#endif // max
#ifdef round
#undef round
#endif // round
// Similar the ESP32 seems to define this, which is also used as an enum value in TFLite
#ifdef DEFAULT
#undef DEFAULT
#endif // DEFAULT
// Infineon core defines this, conflicts with CMSIS/DSP/Include/dsp/controller_functions.h
#ifdef A0
#undef A0
#endif // A0
#ifdef A1
#undef A1
#endif // A1
#ifdef A2
#undef A2
#endif // A2

/* Includes ---------------------------------------------------------------- */
#include "edge-impulse-sdk/classifier/ei_run_classifier.h"
#include "edge-impulse-sdk/dsp/numpy.hpp"
#include "model-parameters/model_metadata.h"
#include "edge-impulse-sdk/classifier/ei_classifier_smooth.h"

extern void ei_printf(const char *format, ...);

#endif // _INFERENCE_H

It's size is 2KB. There are some header files associated with it.

And what do they contain?

I did not get your question.

Sorry - "that" should have read "what".
Corrected now.

/*
 * Copyright (c) 2022 EdgeImpulse Inc.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an "AS
 * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
 * express or implied. See the License for the specific language
 * governing permissions and limitations under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 */

#ifndef _EDGE_IMPULSE_RUN_CLASSIFIER_H_
#define _EDGE_IMPULSE_RUN_CLASSIFIER_H_

#include "model-parameters/model_metadata.h"

#include "ei_run_dsp.h"
#include "ei_classifier_types.h"
#include "ei_signal_with_axes.h"
#include "ei_performance_calibration.h"

#include "edge-impulse-sdk/porting/ei_classifier_porting.h"

// for the release we'll put an actual studio version here
#ifndef EI_CLASSIFIER_STUDIO_VERSION
#define EI_CLASSIFIER_STUDIO_VERSION 2
#endif

#if EI_CLASSIFIER_STUDIO_VERSION < 3
#include "model-parameters/dsp_blocks.h"
#endif

#if EI_CLASSIFIER_HAS_MODEL_VARIABLES == 1
#include "model-parameters/model_variables.h"
#endif

#if EI_CLASSIFIER_HAS_ANOMALY == 1
#include "model-parameters/anomaly_clusters.h"
#include "inferencing_engines/anomaly.h"
#endif

#if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1
#include "ei_sampler.h"
#endif

#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) && (EI_CLASSIFIER_COMPILED != 1)
#include "tflite-model/tflite-trained.h"
#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_micro.h"
#elif EI_CLASSIFIER_COMPILED == 1
#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_eon.h"
#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL
#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_full.h"
#include "tflite-model/tflite-trained.h"
#elif (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSORRT)
#include "edge-impulse-sdk/classifier/inferencing_engines/tensorrt.h"
#include "tflite-model/onnx-trained.h"
#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW
#include "edge-impulse-sdk/classifier/inferencing_engines/tensaiflow.h"
#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI
#include "edge-impulse-sdk/classifier/inferencing_engines/drpai.h"
#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_NONE
// noop
#else
#error "Unknown inferencing engine"
#endif

#if ECM3532
void*   __dso_handle = (void*) &__dso_handle;
#endif

// EI_CLASSIFIER_CALIBRATION_ENABLED needs to be added to new
// model metadata, since we are getting rid of macro for sensors
// (multiple impulses means we can have multiple sensors)
// for now we just enable it if EI_CLASSIFIER_SENSOR is present and
// is microphone (performance calibration only works for mic).
#if defined(EI_CLASSIFIER_SENSOR) && (EI_CLASSIFIER_SENSOR == EI_CLASSIFIER_SENSOR_MICROPHONE)
#define EI_CLASSIFIER_CALIBRATION_ENABLED 1
#else
#define EI_CLASSIFIER_CALIBRATION_ENABLED 0
#endif

#ifdef __cplusplus
namespace {
#endif // __cplusplus

/* Function prototypes ----------------------------------------------------- */
extern "C" EI_IMPULSE_ERROR run_inference(const ei_impulse_t *impulse, ei::matrix_t *fmatrix, ei_impulse_result_t *result, bool debug);
extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized(const ei_impulse_t *impulse, signal_t *signal, ei_impulse_result_t *result, bool debug);
static EI_IMPULSE_ERROR can_run_classifier_image_quantized(const ei_impulse_t *impulse);

/* Private variables ------------------------------------------------------- */

static uint64_t classifier_continuous_features_written = 0;
static RecognizeEvents *avg_scores = NULL;

/* Private functions ------------------------------------------------------- */

/* These functions (up to Public functions section) are not exposed to end-user,
therefore changes are allowed. */

/**
 * @brief      Do inferencing over the processed feature matrix
 *
 * @param      impulse  struct with information about model and DSP
 * @param      fmatrix  Processed matrix
 * @param      result   Output classifier results
 * @param[in]  debug    Debug output enable
 *
 * @return     The ei impulse error.
 */
extern "C" EI_IMPULSE_ERROR run_inference(
    const ei_impulse_t *impulse,
    ei::matrix_t *fmatrix,
    ei_impulse_result_t *result,
    bool debug = false)
{
#if (EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_NONE && EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_DRPAI)
    EI_IMPULSE_ERROR nn_res = run_nn_inference(impulse, fmatrix, result, debug);
    if (nn_res != EI_IMPULSE_OK) {
        return nn_res;
    }
#endif

#if EI_CLASSIFIER_HAS_ANOMALY == 1
    if (impulse->has_anomaly) {
        EI_IMPULSE_ERROR anomaly_res = inference_anomaly_invoke(impulse, fmatrix, result, debug);
        if (anomaly_res != EI_IMPULSE_OK) {
            return anomaly_res;
        }
    }
#endif

    if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) {
        return EI_IMPULSE_CANCELED;
    }

    return EI_IMPULSE_OK;
}

/**
 * @brief      Process a complete impulse
 *
 * @param      impulse  struct with information about model and DSP
 * @param      signal   Sample data
 * @param      result   Output classifier results
 * @param[in]  debug    Debug output enable
 *
 * @return     The ei impulse error.
 */
extern "C" EI_IMPULSE_ERROR process_impulse(const ei_impulse_t *impulse,
                                            signal_t *signal,
                                            ei_impulse_result_t *result,
                                            bool debug = false)
{

#if (EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED == 1 && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW)) || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI
    // Shortcut for quantized image models
    if (can_run_classifier_image_quantized(impulse) == EI_IMPULSE_OK) {
        return run_classifier_image_quantized(impulse, signal, result, debug);
    }
#endif

    memset(result, 0, sizeof(ei_impulse_result_t));

    ei::matrix_t features_matrix(1, impulse->nn_input_frame_size);

    uint64_t dsp_start_us = ei_read_timer_us();

    size_t out_features_index = 0;

    for (size_t ix = 0; ix < impulse->dsp_blocks_size; ix++) {
        ei_model_dsp_t block = impulse->dsp_blocks[ix];

        if (out_features_index + block.n_output_features > impulse->nn_input_frame_size) {
            ei_printf("ERR: Would write outside feature buffer\n");
            return EI_IMPULSE_DSP_ERROR;
        }

        ei::matrix_t fm(1, block.n_output_features, features_matrix.buffer + out_features_index);

#if EIDSP_SIGNAL_C_FN_POINTER
        if (block.axes_size != impulse->raw_samples_per_frame) {
            ei_printf("ERR: EIDSP_SIGNAL_C_FN_POINTER can only be used when all axes are selected for DSP blocks\n");
            return EI_IMPULSE_DSP_ERROR;
        }
        int ret = block.extract_fn(signal, &fm, block.config, impulse->frequency);
#else
        SignalWithAxes swa(signal, block.axes, block.axes_size, impulse);
        int ret = block.extract_fn(swa.get_signal(), &fm, block.config, impulse->frequency);
#endif

        if (ret != EIDSP_OK) {
            ei_printf("ERR: Failed to run DSP process (%d)\n", ret);
            return EI_IMPULSE_DSP_ERROR;
        }

        if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) {
            return EI_IMPULSE_CANCELED;
        }

        out_features_index += block.n_output_features;
    }

    result->timing.dsp_us = ei_read_timer_us() - dsp_start_us;
    result->timing.dsp = (int)(result->timing.dsp_us / 1000);

    if (debug) {
        ei_printf("Features (%d ms.): ", result->timing.dsp);
        for (size_t ix = 0; ix < features_matrix.cols; ix++) {
            ei_printf_float(features_matrix.buffer[ix]);
            ei_printf(" ");
        }
        ei_printf("\n");
    }

    if (debug) {
        ei_printf("Running impulse...\n");
    }

    return run_inference(impulse, &features_matrix, result, debug);

}

/**
 * @brief      Process a complete impulse for continuous inference
 *
 * @param      impulse  struct with information about model and DSP
 * @param      signal   Sample data
 * @param      result   Output classifier results
 * @param[in]  debug    Debug output enable
 *
 * @return     The ei impulse error.
 */
extern "C" EI_IMPULSE_ERROR process_impulse_continuous(const ei_impulse_t *impulse,
                                            signal_t *signal,
                                            ei_impulse_result_t *result,
                                            bool debug,
                                            bool enable_maf)
{

    static ei::matrix_t static_features_matrix(1, impulse->nn_input_frame_size);
    if (!static_features_matrix.buffer) {
        return EI_IMPULSE_ALLOC_FAILED;
    }

    memset(result, 0, sizeof(ei_impulse_result_t));

    EI_IMPULSE_ERROR ei_impulse_error = EI_IMPULSE_OK;

    uint64_t dsp_start_us = ei_read_timer_us();

    size_t out_features_index = 0;
    bool is_mfcc = false;
    bool is_mfe = false;
    bool is_spectrogram = false;

    for (size_t ix = 0; ix < impulse->dsp_blocks_size; ix++) {
        ei_model_dsp_t block = impulse->dsp_blocks[ix];

        if (out_features_index + block.n_output_features > impulse->nn_input_frame_size) {
            ei_printf("ERR: Would write outside feature buffer\n");
            return EI_IMPULSE_DSP_ERROR;
        }

        ei::matrix_t fm(1, block.n_output_features,
                        static_features_matrix.buffer + out_features_index);

        int (*extract_fn_slice)(ei::signal_t *signal, ei::matrix_t *output_matrix, void *config, const float frequency, matrix_size_t *out_matrix_size);

        /* Switch to the slice version of the mfcc feature extract function */
        if (block.extract_fn == extract_mfcc_features) {
            extract_fn_slice = &extract_mfcc_per_slice_features;
            is_mfcc = true;
        }
        else if (block.extract_fn == extract_spectrogram_features) {
            extract_fn_slice = &extract_spectrogram_per_slice_features;
            is_spectrogram = true;
        }
        else if (block.extract_fn == extract_mfe_features) {
            extract_fn_slice = &extract_mfe_per_slice_features;
            is_mfe = true;
        }
        else {
            ei_printf("ERR: Unknown extract function, only MFCC, MFE and spectrogram supported\n");
            return EI_IMPULSE_DSP_ERROR;
        }

        matrix_size_t features_written;

#if EIDSP_SIGNAL_C_FN_POINTER
        if (block.axes_size != impulse->raw_samples_per_frame) {
            ei_printf("ERR: EIDSP_SIGNAL_C_FN_POINTER can only be used when all axes are selected for DSP blocks\n");
            return EI_IMPULSE_DSP_ERROR;
        }
        int ret = extract_fn_slice(signal, &fm, block.config, impulse->frequency, &features_written);
#else
        SignalWithAxes swa(signal, block.axes, block.axes_size, impulse);
        int ret = extract_fn_slice(swa.get_signal(), &fm, block.config, impulse->frequency, &features_written);
#endif

        if (ret != EIDSP_OK) {
            ei_printf("ERR: Failed to run DSP process (%d)\n", ret);
            return EI_IMPULSE_DSP_ERROR;
        }

        if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) {
            return EI_IMPULSE_CANCELED;
        }

        classifier_continuous_features_written += (features_written.rows * features_written.cols);

        out_features_index += block.n_output_features;
    }

    result->timing.dsp_us = ei_read_timer_us() - dsp_start_us;
    result->timing.dsp = (int)(result->timing.dsp_us / 1000);

    if (debug) {
        ei_printf("\r\nFeatures (%d ms.): ", result->timing.dsp);
        for (size_t ix = 0; ix < static_features_matrix.cols; ix++) {
            ei_printf_float(static_features_matrix.buffer[ix]);
            ei_printf(" ");
        }
        ei_printf("\n");
    }

    if (classifier_continuous_features_written >= impulse->nn_input_frame_size) {
        dsp_start_us = ei_read_timer_us();
        ei::matrix_t classify_matrix(1, impulse->nn_input_frame_size);

        /* Create a copy of the matrix for normalization */
        for (size_t m_ix = 0; m_ix < impulse->nn_input_frame_size; m_ix++) {
            classify_matrix.buffer[m_ix] = static_features_matrix.buffer[m_ix];
        }

        if (is_mfcc) {
            calc_cepstral_mean_and_var_normalization_mfcc(&classify_matrix, impulse->dsp_blocks[0].config);
        }
        else if (is_spectrogram) {
            calc_cepstral_mean_and_var_normalization_spectrogram(&classify_matrix, impulse->dsp_blocks[0].config);
        }
        else if (is_mfe) {
            calc_cepstral_mean_and_var_normalization_mfe(&classify_matrix, impulse->dsp_blocks[0].config);
        }
        result->timing.dsp_us += ei_read_timer_us() - dsp_start_us;
        result->timing.dsp = (int)(result->timing.dsp_us / 1000);

        if (debug) {
            ei_printf("Running impulse...\n");
        }

        ei_impulse_error = run_inference(impulse, &classify_matrix, result, debug);

#if EI_CLASSIFIER_CALIBRATION_ENABLED
        if (impulse->sensor == EI_CLASSIFIER_SENSOR_MICROPHONE) {
            if((void *)avg_scores != NULL && enable_maf == true) {
                if (enable_maf && !ei_calibration.is_configured) {
                    // perfcal is not configured, print msg first time
                    static bool has_printed_msg = false;

                    if (!has_printed_msg) {
                        ei_printf("WARN: run_classifier_continuous, enable_maf is true, but performance calibration is not configured.\n");
                        ei_printf("       Previously we'd run a moving-average filter over your outputs in this case, but this is now disabled.\n");
                        ei_printf("       Go to 'Performance calibration' in your Edge Impulse project to configure post-processing parameters.\n");
                        ei_printf("       (You can enable this from 'Dashboard' if it's not visible in your project)\n");
                        ei_printf("\n");

                        has_printed_msg = true;
                    }
                }
                else {
                    // perfcal is configured
                    int label_detected = avg_scores->trigger(result->classification);

                    if (avg_scores->should_boost()) {
                        for (int i = 0; i < impulse->label_count; i++) {
                            if (i == label_detected) {
                                result->classification[i].value = 1.0f;
                            }
                            else {
                                result->classification[i].value = 0.0f;
                            }
                        }
                    }
                }
            }
        }
#endif
    }
    else {
        if (!impulse->object_detection) {
            for (int i = 0; i < impulse->label_count; i++) {
                // set label correctly in the result struct if we have no results (otherwise is nullptr)
                result->classification[i].label = impulse->categories[(uint32_t)i];
            }
        }
    }

    return ei_impulse_error;


}

#if EI_CLASSIFIER_STUDIO_VERSION < 3
/**
 * @brief      Construct impulse from macros - for run_classifer compatibility
 */
extern "C" const ei_impulse_t ei_construct_impulse()
{

const ei_impulse_t impulse =
    {
    .project_id = EI_CLASSIFIER_PROJECT_ID,
    .project_owner = EI_CLASSIFIER_PROJECT_OWNER,
    .project_name = EI_CLASSIFIER_PROJECT_NAME,

    .deploy_version = EI_CLASSIFIER_PROJECT_DEPLOY_VERSION,
    .nn_input_frame_size = EI_CLASSIFIER_NN_INPUT_FRAME_SIZE,
    .raw_sample_count = EI_CLASSIFIER_RAW_SAMPLE_COUNT,
    .raw_samples_per_frame = EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME,
    .dsp_input_frame_size = (EI_CLASSIFIER_RAW_SAMPLE_COUNT * EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME),

    .input_width = EI_CLASSIFIER_INPUT_WIDTH,
    .input_height = EI_CLASSIFIER_INPUT_HEIGHT,
    .input_frames = EI_CLASSIFIER_INPUT_FRAMES,

    .interval_ms = EI_CLASSIFIER_INTERVAL_MS,
    .label_count = EI_CLASSIFIER_LABEL_COUNT,
    .has_anomaly = EI_CLASSIFIER_HAS_ANOMALY,
    .frequency = EI_CLASSIFIER_FREQUENCY,
    .dsp_blocks_size = ei_dsp_blocks_size,
    .dsp_blocks = ei_dsp_blocks,

#if EI_CLASSIFIER_OBJECT_DETECTION == 1
    .object_detection = true,
    .object_detection_count = EI_CLASSIFIER_OBJECT_DETECTION_COUNT,
    .object_detection_threshold = EI_CLASSIFIER_OBJECT_DETECTION_THRESHOLD,
    .object_detection_last_layer = EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER,
    .tflite_output_labels_tensor = EI_CLASSIFIER_TFLITE_OUTPUT_LABELS_TENSOR,
    .tflite_output_score_tensor = EI_CLASSIFIER_TFLITE_OUTPUT_SCORE_TENSOR,
    .tflite_output_data_tensor = EI_CLASSIFIER_TFLITE_OUTPUT_DATA_TENSOR,
#else
    .object_detection = false,
    .object_detection_count = 0,
    .object_detection_threshold = 0.0,
    .object_detection_last_layer = EI_CLASSIFIER_LAST_LAYER_UNKNOWN,
    .tflite_output_labels_tensor = 0,
    .tflite_output_score_tensor = 0,
    .tflite_output_data_tensor = 0,
#endif

#ifdef EI_CLASSIFIER_NN_OUTPUT_COUNT
    .tflite_output_features_count = EI_CLASSIFIER_NN_OUTPUT_COUNT,
#else
    .tflite_output_features_count = 0,
#endif

#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) && (EI_CLASSIFIER_COMPILED != 1)
    .tflite_arena_size = EI_CLASSIFIER_TFLITE_ARENA_SIZE,
#else
    .tflite_arena_size = 0,
#endif

#if ((EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) || (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL)) && (EI_CLASSIFIER_COMPILED != 1)
    .model_arr = trained_tflite,
    .model_arr_size = trained_tflite_len,
#elif (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSORRT)
    .model_arr = trained_onnx,
    .model_arr_size = trained_onnx_len,
#else
    .model_arr = 0,
    .model_arr_size = 0,
#endif

#if (EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_NONE)
    .tflite_input_datatype = EI_CLASSIFIER_TFLITE_INPUT_DATATYPE,
    .tflite_input_quantized = EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED,
    .tflite_input_scale = EI_CLASSIFIER_TFLITE_INPUT_SCALE,
    .tflite_input_zeropoint = EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT,
    .tflite_output_datatype = EI_CLASSIFIER_TFLITE_OUTPUT_DATATYPE,
    .tflite_output_quantized = EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED,
    .tflite_output_scale = EI_CLASSIFIER_TFLITE_OUTPUT_SCALE,
    .tflite_output_zeropoint = EI_CLASSIFIER_TFLITE_OUTPUT_ZEROPOINT,
    .inferencing_engine = EI_CLASSIFIER_INFERENCING_ENGINE,
    .compiled = EI_CLASSIFIER_COMPILED,
    .has_tflite_ops_resolver = EI_CLASSIFIER_HAS_TFLITE_OPS_RESOLVER,
#else
    .tflite_input_datatype = 0,
    .tflite_input_quantized = 0,
    .tflite_input_scale = 0,
    .tflite_input_zeropoint = 0,
    .tflite_output_datatype = 0,
    .tflite_output_quantized = 0,
    .tflite_output_scale = 0,
    .tflite_output_zeropoint = 0,
    .inferencing_engine = 0,
    .compiled = 0,
    .has_tflite_ops_resolver = 0,
#endif

    .sensor = EI_CLASSIFIER_SENSOR,
#ifdef EI_CLASSIFIER_FUSION_AXES_STRING
    .fusion_string = EI_CLASSIFIER_FUSION_AXES_STRING,
#else
    .fusion_string = "null",
#endif

    .slice_size = (EI_CLASSIFIER_RAW_SAMPLE_COUNT / EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW),
    .slices_per_model_window = EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW,

#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) && (EI_CLASSIFIER_COMPILED == 1)
    .model_input = &trained_model_input,
    .model_output = &trained_model_output,
    .model_init = &trained_model_init,
    .model_invoke = &trained_model_invoke,
    .model_reset = &trained_model_reset,
#else
    .model_input = NULL,
    .model_output = NULL,
    .model_init =  NULL,
    .model_invoke = NULL,
    .model_reset = NULL,
#endif
    .categories = ei_classifier_inferencing_categories
    };

    return impulse;
}
#endif

/**
 * Check if the current impulse could be used by 'run_classifier_image_quantized'
 */
__attribute__((unused)) static EI_IMPULSE_ERROR can_run_classifier_image_quantized(const ei_impulse_t *impulse) {

    if (impulse->inferencing_engine != EI_CLASSIFIER_TFLITE
        && impulse->inferencing_engine != EI_CLASSIFIER_TENSAIFLOW
        && impulse->inferencing_engine != EI_CLASSIFIER_DRPAI) // check later
    {
        return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
    }

    if (impulse->has_anomaly == 1){
        return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES;
    }

        // Check if we have a quantized NN Input layer (input is always quantized for DRP-AI)
    if (impulse->tflite_input_quantized != 1) {
        return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES;
    }

    // And if we have one DSP block which operates on images...
    if (impulse->dsp_blocks_size != 1 || impulse->dsp_blocks[0].extract_fn != extract_image_features) {
        return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES;
    }

    return EI_IMPULSE_OK;
}

#if EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED == 1 && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI)

/**
 * Special function to run the classifier on images, only works on TFLite models (either interpreter or EON or for tensaiflow)
 * that allocates a lot less memory by quantizing in place. This only works if 'can_run_classifier_image_quantized'
 * returns EI_IMPULSE_OK.
 */
extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized(
    const ei_impulse_t *impulse,
    signal_t *signal,
    ei_impulse_result_t *result,
    bool debug = false)
{
    EI_IMPULSE_ERROR verify_res = can_run_classifier_image_quantized(impulse);
    if (verify_res != EI_IMPULSE_OK) {
        return verify_res;
    }

    memset(result, 0, sizeof(ei_impulse_result_t));

    return run_nn_inference_image_quantized(impulse, signal, result, debug);

}

#endif // #if EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED == 1 && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI)

/* Public functions ------------------------------------------------------- */

/* Thread carefully: public functions are not to be changed
to preserve backwards compatibility. */

/**
 * @brief      Init static vars
 */
extern "C" void run_classifier_init()
{

    classifier_continuous_features_written = 0;
    ei_dsp_clear_continuous_audio_state();

#if EI_CLASSIFIER_CALIBRATION_ENABLED

#if EI_CLASSIFIER_STUDIO_VERSION < 3
        const ei_impulse_t impulse = ei_construct_impulse();
#else
       const ei_impulse_t impulse = ei_default_impulse;
#endif

    const ei_model_performance_calibration_t *calibration = &ei_calibration;

    if(calibration != NULL) {
        avg_scores = new RecognizeEvents(calibration,
            impulse.label_count, impulse.slice_size, impulse.interval_ms);
    }
#endif
}

/**
 * @brief      Init static vars, for multi-model support
 */
__attribute__((unused)) void run_classifier_init(const ei_impulse_t *impulse)
{
    classifier_continuous_features_written = 0;
    ei_dsp_clear_continuous_audio_state();

#if EI_CLASSIFIER_CALIBRATION_ENABLED
    const ei_model_performance_calibration_t *calibration = &ei_calibration;

    if(calibration != NULL) {
        avg_scores = new RecognizeEvents(calibration,
            impulse->label_count, impulse->slice_size, impulse->interval_ms);
    }
#endif
}

extern "C" void run_classifier_deinit(void)
{
    if((void *)avg_scores != NULL) {
        delete avg_scores;
    }
}

/**
 * @brief      Fill the complete matrix with sample slices. From there, run inference
 *             on the matrix.
 *
 * @param      signal  Sample data
 * @param      result  Classification output
 * @param[in]  debug   Debug output enable boot
 *
 * @return     The ei impulse error.
 */
extern "C" EI_IMPULSE_ERROR run_classifier_continuous(
    signal_t *signal,
    ei_impulse_result_t *result,
    bool debug = false,
    bool enable_maf = true)
{
#if EI_CLASSIFIER_STUDIO_VERSION < 3
        const ei_impulse_t impulse = ei_construct_impulse();
#else
       const ei_impulse_t impulse = ei_default_impulse;
#endif
    return process_impulse_continuous(&impulse, signal, result, debug, enable_maf);
}

/**
 * @brief      Fill the complete matrix with sample slices. From there, run impulse
 *             on the matrix.
 *
 * @param      impulse struct with information about model and DSP
 * @param      signal  Sample data
 * @param      result  Classification output
 * @param[in]  debug   Debug output enable boot
 *
 * @return     The ei impulse error.
 */
__attribute__((unused)) EI_IMPULSE_ERROR run_classifier_continuous(
    const ei_impulse_t *impulse,
    signal_t *signal,
    ei_impulse_result_t *result,
    bool debug = false,
    bool enable_maf = true)
{
    return process_impulse_continuous(impulse, signal, result, debug, enable_maf);
}

/**
 * Run the classifier over a raw features array
 * @param raw_features Raw features array
 * @param raw_features_size Size of the features array
 * @param result Object to store the results in
 * @param debug Whether to show debug messages (default: false)
 */
extern "C" EI_IMPULSE_ERROR run_classifier(
    signal_t *signal,
    ei_impulse_result_t *result,
    bool debug = false)
{
#if EI_CLASSIFIER_STUDIO_VERSION < 3
        const ei_impulse_t impulse = ei_construct_impulse();
#else
       const ei_impulse_t impulse = ei_default_impulse;
#endif
    return process_impulse(&impulse, signal, result, debug);
}

/**
 * Run the impulse over a raw features array
 * @param impulse struct with information about model and DSP
 * @param raw_features Raw features array
 * @param raw_features_size Size of the features array
 * @param result Object to store the results in
 * @param debug Whether to show debug messages (default: false)
 */
__attribute__((unused)) EI_IMPULSE_ERROR run_classifier(
    const ei_impulse_t *impulse,
    signal_t *signal,
    ei_impulse_result_t *result,
    bool debug = false)
{
    ei_printf("%s\n", impulse->project_name);
    return process_impulse(impulse, signal, result, debug);
}

/* Deprecated functions ------------------------------------------------------- */

/* These functions are being deprecated and possibly will be removed or moved in future.
Do not use these - if possible, change your code to reflect the upcoming changes. */

#if EIDSP_SIGNAL_C_FN_POINTER == 0

/**
 * Run the impulse, if you provide an instance of sampler it will also persist the data for you
 * @param sampler Instance to an **initialized** sampler
 * @param result Object to store the results in
 * @param data_fn Function to retrieve data from sensors
 * @param debug Whether to log debug messages (default false)
 */
__attribute__((unused)) EI_IMPULSE_ERROR run_impulse(
#if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1
        EdgeSampler *sampler,
#endif
        ei_impulse_result_t *result,
#ifdef __MBED__
        mbed::Callback<void(float*, size_t)> data_fn,
#else
        std::function<void(float*, size_t)> data_fn,
#endif
        bool debug = false) {

#if EI_CLASSIFIER_STUDIO_VERSION < 3
        const ei_impulse_t impulse = ei_construct_impulse();
#else
       const ei_impulse_t impulse = ei_default_impulse;
#endif

    float *x = (float*)calloc(impulse.dsp_input_frame_size, sizeof(float));
    if (!x) {
        return EI_IMPULSE_OUT_OF_MEMORY;
    }

    uint64_t next_tick = 0;

    uint64_t sampling_us_start = ei_read_timer_us();

    // grab some data
    for (int i = 0; i < impulse.dsp_input_frame_size; i += impulse.raw_samples_per_frame) {
        uint64_t curr_us = ei_read_timer_us() - sampling_us_start;

        next_tick = curr_us + (impulse.interval_ms * 1000);

        data_fn(x + i, impulse.raw_samples_per_frame);
#if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1
        if (sampler != NULL) {
            sampler->write_sensor_data(x + i, impulse.raw_samples_per_frame);
        }
#endif

        if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) {
            free(x);
            return EI_IMPULSE_CANCELED;
        }

        while (next_tick > ei_read_timer_us() - sampling_us_start);
    }

    result->timing.sampling = (ei_read_timer_us() - sampling_us_start) / 1000;

    signal_t signal;
    int err = numpy::signal_from_buffer(x, impulse.dsp_input_frame_size, &signal);
    if (err != 0) {
        free(x);
        ei_printf("ERR: signal_from_buffer failed (%d)\n", err);
        return EI_IMPULSE_DSP_ERROR;
    }

    EI_IMPULSE_ERROR r = run_classifier(&signal, result, debug);
    free(x);
    return r;
}

#if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1
/**
 * Run the impulse, does not persist data
 * @param result Object to store the results in
 * @param data_fn Function to retrieve data from sensors
 * @param debug Whether to log debug messages (default false)
 */
__attribute__((unused)) EI_IMPULSE_ERROR run_impulse(
        ei_impulse_result_t *result,
#ifdef __MBED__
        mbed::Callback<void(float*, size_t)> data_fn,
#else
        std::function<void(float*, size_t)> data_fn,
#endif
        bool debug = false) {
    return run_impulse(NULL, result, data_fn, debug);
}
#endif

#endif // #if EIDSP_SIGNAL_C_FN_POINTER == 0

#ifdef __cplusplus
}
#endif // __cplusplus

#endif // _EDGE_IMPULSE_RUN_CLASSIFIER_H_

/*
 * Copyright (c) 2022 EdgeImpulse Inc.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an "AS
 * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
 * express or implied. See the License for the specific language
 * governing permissions and limitations under the License.
 *
 * SPDX-License-Identifier: Apache-2.0
 */

#ifndef _EI_CLASSIFIER_SMOOTH_H_
#define _EI_CLASSIFIER_SMOOTH_H_

#if EI_CLASSIFIER_OBJECT_DETECTION != 1

#include <stdint.h>

typedef struct ei_classifier_smooth {
    int *last_readings;
    size_t last_readings_size;
    uint8_t min_readings_same;
    float classifier_confidence;
    float anomaly_confidence;
    uint8_t count[EI_CLASSIFIER_LABEL_COUNT + 2] = { 0 };
    size_t count_size = EI_CLASSIFIER_LABEL_COUNT + 2;
} ei_classifier_smooth_t;

/**
 * Initialize a smooth structure. This is useful if you don't want to trust
 * single readings, but rather want consensus
 * (e.g. 7 / 10 readings should be the same before I draw any ML conclusions).
 * This allocates memory on the heap!
 * @param smooth Pointer to an uninitialized ei_classifier_smooth_t struct
 * @param n_readings Number of readings you want to store
 * @param min_readings_same Minimum readings that need to be the same before concluding (needs to be lower than n_readings)
 * @param classifier_confidence Minimum confidence in a class (default 0.8)
 * @param anomaly_confidence Maximum error for anomalies (default 0.3)
 */
void ei_classifier_smooth_init(ei_classifier_smooth_t *smooth, size_t n_readings,
                               uint8_t min_readings_same, float classifier_confidence = 0.8,
                               float anomaly_confidence = 0.3) {
    smooth->last_readings = (int*)ei_malloc(n_readings * sizeof(int));
    for (size_t ix = 0; ix < n_readings; ix++) {
        smooth->last_readings[ix] = -1; // -1 == uncertain
    }
    smooth->last_readings_size = n_readings;
    smooth->min_readings_same = min_readings_same;
    smooth->classifier_confidence = classifier_confidence;
    smooth->anomaly_confidence = anomaly_confidence;
    smooth->count_size = EI_CLASSIFIER_LABEL_COUNT + 2;
}

/**
 * Call when a new reading comes in.
 * @param smooth Pointer to an initialized ei_classifier_smooth_t struct
 * @param result Pointer to a result structure (after calling ei_run_classifier)
 * @returns Label, either 'uncertain', 'anomaly', or a label from the result struct
 */
const char* ei_classifier_smooth_update(ei_classifier_smooth_t *smooth, ei_impulse_result_t *result) {
    // clear out the count array
    memset(smooth->count, 0, EI_CLASSIFIER_LABEL_COUNT + 2);

    // roll through the last_readings buffer
    numpy::roll(smooth->last_readings, smooth->last_readings_size, -1);

    int reading = -1; // uncertain

    // print the predictions
    // printf("[");
    for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) {
        if (result->classification[ix].value >= smooth->classifier_confidence) {
            reading = (int)ix;
        }
    }
#if EI_CLASSIFIER_HAS_ANOMALY == 1
    if (result->anomaly >= smooth->anomaly_confidence) {
        reading = -2; // anomaly
    }
#endif

    smooth->last_readings[smooth->last_readings_size - 1] = reading;

    // now count last 10 readings and see what we actually see...
    for (size_t ix = 0; ix < smooth->last_readings_size; ix++) {
        if (smooth->last_readings[ix] >= 0) {
            smooth->count[smooth->last_readings[ix]]++;
        }
        else if (smooth->last_readings[ix] == -1) { // uncertain
            smooth->count[EI_CLASSIFIER_LABEL_COUNT]++;
        }
        else if (smooth->last_readings[ix] == -2) { // anomaly
            smooth->count[EI_CLASSIFIER_LABEL_COUNT + 1]++;
        }
    }

    // then loop over the count and see which is highest
    uint8_t top_result = 0;
    uint8_t top_count = 0;
    bool met_confidence_threshold = false;
    uint8_t confidence_threshold = smooth->min_readings_same; // XX% of windows should be the same
    for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT + 2; ix++) {
        if (smooth->count[ix] > top_count) {
            top_result = ix;
            top_count = smooth->count[ix];
        }
        if (smooth->count[ix] >= confidence_threshold) {
            met_confidence_threshold = true;
        }
    }

    if (met_confidence_threshold) {
        if (top_result == EI_CLASSIFIER_LABEL_COUNT) {
            return "uncertain";
        }
        else if (top_result == EI_CLASSIFIER_LABEL_COUNT + 1) {
            return "anomaly";
        }
        else {
            return result->classification[top_result].label;
        }
    }
    return "uncertain";
}

/**
 * Clear up a smooth structure
 */
void ei_classifier_smooth_free(ei_classifier_smooth_t *smooth) {
    ei_free(smooth->last_readings);
}

#endif // #if EI_CLASSIFIER_OBJECT_DETECTION != 1

#endif // _EI_CLASSIFIER_SMOOTH_H_

These files have been automatically generated by Edge Impulse.

This topic was automatically closed 180 days after the last reply. New replies are no longer allowed.