[feat][examples] add tensorflow-lite vww demo detection person

This commit is contained in:
qqwang 2022-04-25 20:42:07 +08:00
parent 21621d54cf
commit 014113cd95
18 changed files with 885 additions and 3 deletions

View File

@ -42,7 +42,7 @@
#define RGB565 0
#define UYVY 1
#define YUYV 2
#define FORMAT_SEL RGB565
#define FORMAT_SEL UYVY
#if (IMAGE_SENSOR_USE == IMAGE_SENSOR_BF2013)
#define I2C_CAMERA_ADDR 0x6E
@ -684,8 +684,7 @@ uint8_t image_sensor_init(BL_Fun_Type mjpeg_en, cam_device_t *cam_cfg, mjpeg_dev
cam_register(CAM0_INDEX, "camera0");
struct device *cam0 = device_find("camera0");
if(!cam0)
{
if (!cam0) {
MSG("cam do not find\r\n");
return 1;
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,2 @@
extern const unsigned char g_person_detect_model_data[];
extern const unsigned int g_person_detect_model_data_size;

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,2 @@
extern const unsigned char g_vww2_50_50_INT8_model_data[];
extern const unsigned int g_vww2_50_50_INT8_model_data_size;

View File

@ -0,0 +1,12 @@
set(BSP_COMMON_DIR ${CMAKE_SOURCE_DIR}/bsp/bsp_common)
set(TARGET_REQUIRED_LIBS usb_stack tflite)
set(LINKER_SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/tflite_camera_psram.ld)
set(TARGET_REQUIRED_PRIVATE_INCLUDE ${BSP_COMMON_DIR}/usb ${BSP_COMMON_DIR}/psram ${BSP_COMMON_DIR}/image_sensor ${BSP_COMMON_DIR}/mcu_lcd ${CMAKE_CURRENT_SOURCE_DIR}/vww)
file(GLOB_RECURSE ref
"${CMAKE_CURRENT_SOURCE_DIR}/vww/*.cc"
)
set(TARGET_REQUIRED_SRCS ${BSP_COMMON_DIR}/usb/usb_dc.c ${BSP_COMMON_DIR}/psram/bsp_sf_psram.c ${BSP_COMMON_DIR}/image_sensor/bsp_image_sensor.c ${BSP_COMMON_DIR}/mcu_lcd/mcu_lcd.c ${BSP_COMMON_DIR}/mcu_lcd/font.c ${BSP_COMMON_DIR}/mcu_lcd/ili9341.c ${ref})
set(mains main.c)
generate_bin()

View File

@ -0,0 +1,174 @@
/**
* @file main.c
* @brief
*
* Copyright (c) 2021 Bouffalolab team
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
*/
#include "hal_usb.h"
#include "usbd_core.h"
#include "usbd_video.h"
#include "mcu_lcd.h"
#include "hal_dma.h"
#include "hal_cam.h"
#include "hal_mjpeg.h"
#include "bsp_sf_psram.h"
#include "bsp_image_sensor.h"
#include "image_proc.h"
#include "tensorflow/lite/micro/examples/person_detection/main_functions.h"
#define CAMERA_RESOLUTION_X (640)
#define CAMERA_RESOLUTION_Y (480)
#define CAMERA_FRAME_SIZE (CAMERA_RESOLUTION_X * CAMERA_RESOLUTION_Y)
#define CAMERA_WRITE_ADDR (0x26000000)
#define CAMERA_BUFFER_SIZE (0x96000)
#define PERSON_THRESHOLD (-20)
static mjpeg_device_t mjpeg_cfg;
static cam_device_t camera_cfg = {
.software_mode = CAM_MANUAL_MODE,
.frame_mode = CAM_FRAME_INTERLEAVE_MODE,
.yuv_format = CAM_YUV_FORMAT_YUV400_ODD,
.cam_write_ram_addr = CAMERA_WRITE_ADDR,
.cam_write_ram_size = CAMERA_BUFFER_SIZE,
.cam_frame_size = CAMERA_FRAME_SIZE,
.cam_write_ram_addr1 = 0,
.cam_write_ram_size1 = 0,
.cam_frame_size1 = 0,
};
/**
* @brief Convert YUV400 to grayscale image
*
* @param y_400 YUV400 image ptr
* @param gray gray image ptr
* @param len image size
*/
void Y_to_rgb565_gray(uint8_t *y_400, uint16_t *gray, uint32_t len)
{
int8_t tmp = 0;
for (uint32_t i = 0; i < len; i++) {
if ((uint8_t)(y_400[i]) >= 0xaf) {
tmp = (uint8_t)(y_400[i] >> 3);
} else {
tmp = (uint8_t)((y_400[i] + 0x30) >> 3);
}
gray[i] = tmp;
gray[i] = gray[i] << 5;
gray[i] += tmp;
gray[i] = gray[i] << 6;
gray[i] += tmp;
}
}
void y400_to_rgb565_gray(uint8_t *y_400, uint16_t *gray, uint32_t y400_x, uint32_t y400_y, uint32_t ratio)
{
uint8_t tmp = 0;
uint32_t g = 0;
for (uint32_t i = 0; i < y400_y; i += ratio) {
for (uint32_t j = 0; j < y400_x; j += ratio) {
tmp = (*y_400) >> 3;
gray[g] = tmp;
gray[g] = gray[g] << 5;
gray[g] += tmp;
gray[g] = gray[g] << 6;
gray[g] += tmp;
g++;
y_400 += ratio;
}
y_400 += y400_x * (ratio - 1);
}
}
int main(void)
{
int score = 0;
uint8_t *picture;
uint8_t *picture_lcd;
uint32_t pic_offset;
uint32_t length;
uint16_t *gray_565 = (uint16_t *)(0x26098000);
bflb_platform_init(0);
bsp_sf_psram_init(1);
MSG("Setup LCD\r\n");
if (lcd_init()) {
MSG("lcd err \r\n");
}
// lcd_set_dir(0, 0);
lcd_clear(0x0);
lcd_draw_str_ascii16(0, 0, 0xFFFF, 0x0000, (uint8_t *)"Bouffalo LAB\nVWW DEMO @ BL706", 30);
// TFLite setup
MSG("Setup TFLite\r\n");
setup();
if (SUCCESS != image_sensor_init(DISABLE, &camera_cfg, &mjpeg_cfg)) {
MSG("Init error!\n");
BL_CASE_FAIL;
while (1) {
}
}
struct device *cam0 = device_find("camera0");
device_control(cam0, DEVICE_CTRL_RESUME, NULL);
while (1) {
while (SUCCESS != cam_get_one_frame_interleave(&picture, &length)) {
}
device_control(cam0, DEVICE_CTRL_SUSPEND, NULL);
// UART_SendData(0, (uint8_t *)(picture), length);
//AI data preprocess
int8_t *scaled_img = image_proc(picture, length, CAMERA_RESOLUTION_X, CAMERA_RESOLUTION_Y, (int *)&pic_offset);
picture_lcd = (uint8_t *)(0x26000000 + pic_offset);
loop(scaled_img, &score);
// UART_SendData(0, (uint8_t *)(0x2604b9c4), 160 * 160);
Y_to_rgb565_gray((uint8_t *)(picture_lcd), gray_565, (160 * 160));
while (lcd_draw_is_busy()) {
};
printf("person score %d\r\n\r\n", score);
lcd_set_dir(0, 0);
if (score > PERSON_THRESHOLD) {
lcd_draw_str_ascii16(68, 48, 0xFFFF, 0x07e0, (uint8_t *)" PERSON ", 13);
} else {
lcd_draw_str_ascii16(68, 48, 0xFFFF, 0xf800, (uint8_t *)" NOPERSON ", 13);
}
lcd_set_dir(2, 0);
lcd_draw_picture_nonblocking(40, 80, (160 + 40 - 1), (160 + 80 - 1), gray_565);
device_control(cam0, DEVICE_CTRL_CAM_FRAME_DROP, NULL);
device_control(cam0, DEVICE_CTRL_RESUME, NULL);
}
BL_CASE_SUCCESS;
while (1) {
bflb_platform_delay_ms(100);
}
}

View File

@ -0,0 +1,20 @@
**bsp_common/image_sensor/bsp_image_sensor.c** 中**FORMAT_SEL** 选择 **UYVY**
本实验,通过摄像头采集环境图像,使用 tensorflow-lite 实现了一个可以检测环境中是否有人的 demo。
实验需要在 BL706 AVB 板上连接 ili9341 屏幕;用于显示图像,和检测结果
编译命令:
```bash
$ make build SUPPORT_FLOAT=y BOARD=bl706_avb APP=vww_demo
```
NMSIS加速库:
conv, depthwise_conv
实验现象:
在 log 串口中可以看到 person score 的参数
person score < 0, No Person
person score > 0, Person
同时在 ili9341 屏幕上可以显示图像和检测结果

View File

@ -0,0 +1,220 @@
/****************************************************************************************
* @file map.txt
*
* @brief This file is the map file (gnuarm or armgcc).
*
* Copyright (C) BouffaloLab 2018
*
****************************************************************************************
*/
/* configure the CPU type */
OUTPUT_ARCH( "riscv" )
/* link with the standard c library */
INPUT(-lc)
/* link with the standard GCC library */
INPUT(-lgcc)
/* configure the entry point */
ENTRY(_enter)
StackSize = 0x1000; /* 4KB */
MEMORY
{
xip_memory (rx) : ORIGIN = 0x23000000, LENGTH = 1024K
itcm_memory (rx) : ORIGIN = 0x22014000, LENGTH = 16K
dtcm_memory (rx) : ORIGIN = 0x42018000, LENGTH = 16K
ram_memory (!rx) : ORIGIN = 0x4201C000, LENGTH = 80K
hbn_memory (rx) : ORIGIN = 0x40010000, LENGTH = 0xE00 /* hbn ram 4K used 3.5K*/
}
SECTIONS
{
PROVIDE(__metal_chicken_bit = 0);
.text :
{
. = ALIGN(4);
__text_code_start__ = .;
KEEP (*(.text.metal.init.enter))
KEEP (*(SORT_NONE(.init)))
/* section information for finsh shell */
. = ALIGN(4);
_shell_command_start = .;
KEEP(*(shellCommand))
_shell_command_end = .;
/* section information for usb desc */
. = ALIGN(4);
_usb_desc_start = .;
KEEP(*(usb_desc))
. = ALIGN(4);
_usb_desc_end = .;
*(.text)
*(.text.*)
/*put .rodata**/
*(EXCLUDE_FILE( *bl702_glb*.o* \
*bl702_pds*.o* \
*bl702_common*.o* \
*bl702_sf_cfg*.o* \
*bl702_sf_cfg_ext*.o* \
*bl702_sf_ctrl*.o* \
*bl702_sflash*.o* \
*bl702_sflash_ext*.o* \
*bl702_xip_sflash*.o* \
*bl702_xip_sflash_ext*.o* \
*bl702_ef_ctrl*.o*) .rodata*)
*(.rodata)
*(.rodata.*)
*(.srodata)
*(.srodata.*)
. = ALIGN(4);
__text_code_end__ = .;
} > xip_memory
. = ALIGN(4);
__itcm_load_addr = .;
.itcm_region : AT (__itcm_load_addr)
{
. = ALIGN(4);
__tcm_code_start__ = .;
*(.tcm_code.*)
*(.tcm_const.*)
*(.sclock_rlt_code.*)
*(.sclock_rlt_const.*)
*bl702_glb*.o*(.rodata*)
*bl702_pds*.o*(.rodata*)
*bl702_common*.o*(.rodata*)
*bl702_sf_cfg*.o*(.rodata*)
*bl702_sf_cfg_ext*.o*(.rodata*)
*bl702_sf_ctrl*.o*(.rodata*)
*bl702_sflash*.o*(.rodata*)
*bl702_sflash_ext*.o*(.rodata*)
*bl702_xip_sflash*.o*(.rodata*)
*bl702_xip_sflash_ext*.o*(.rodata*)
*bl702_ef_ctrl*.o*(.rodata*)
. = ALIGN(4);
__tcm_code_end__ = .;
} > itcm_memory
__dtcm_load_addr = __itcm_load_addr + SIZEOF(.itcm_region);
.dtcm_region : AT (__dtcm_load_addr)
{
. = ALIGN(4);
__tcm_data_start__ = .;
*(.tcm_data)
/* *finger_print.o(.data*) */
. = ALIGN(4);
__tcm_data_end__ = .;
} > dtcm_memory
/*************************************************************************/
/* .stack_dummy section doesn't contains any symbols. It is only
* used for linker to calculate size of stack sections, and assign
* values to stack symbols later */
.stack_dummy (NOLOAD):
{
. = ALIGN(0x4);
. = . + StackSize;
. = ALIGN(0x4);
} > dtcm_memory
/* Set stack top to end of RAM, and stack limit move down by
* size of stack_dummy section */
__StackTop = ORIGIN(dtcm_memory) + LENGTH(dtcm_memory);
PROVIDE( __freertos_irq_stack_top = __StackTop);
__StackLimit = __StackTop - SIZEOF(.stack_dummy);
/* Check if data + heap + stack exceeds RAM limit */
ASSERT(__StackLimit >= __tcm_data_end__, "region RAM overflowed with stack")
/*************************************************************************/
__system_ram_load_addr = __dtcm_load_addr + SIZEOF(.dtcm_region);
.system_ram_data_region : AT (__system_ram_load_addr)
{
. = ALIGN(4);
__system_ram_data_start__ = .;
*(.system_ram)
. = ALIGN(4);
__system_ram_data_end__ = .;
} > ram_memory
__ram_load_addr = __system_ram_load_addr + SIZEOF(.system_ram_data_region);
/* Data section */
RAM_DATA : AT (__ram_load_addr)
{
. = ALIGN(4);
__ram_data_start__ = .;
PROVIDE( __global_pointer$ = . + 0x800 );
*(.data)
*(.data.*)
*(.sdata)
*(.sdata.*)
*(.sdata2)
*(.sdata2.*)
. = ALIGN(4);
__ram_data_end__ = .;
} > ram_memory
.bss (NOLOAD) :
{
. = ALIGN(4);
__bss_start__ = .;
*(.bss*)
*(.sbss*)
*(COMMON)
. = ALIGN(4);
__bss_end__ = .;
} > ram_memory
.noinit_data (NOLOAD) :
{
. = ALIGN(4);
__noinit_data_start__ = .;
*(.noinit_data*)
. = ALIGN(4);
__noinit_data_end__ = .;
} > ram_memory
.heap (NOLOAD):
{
. = ALIGN(4);
__HeapBase = .;
KEEP(*(.heap*))
. = ALIGN(4);
__HeapLimit = .;
} > ram_memory
PROVIDE (__heap_min_size = 0x400);
__HeapLimit = ORIGIN(ram_memory) + LENGTH(ram_memory);
ASSERT((__HeapLimit - __HeapBase ) >= __heap_min_size, "heap size is too short.")
}

View File

@ -0,0 +1,25 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/person_detection/detection_responder.h"
#include <stdio.h>
// This dummy implementation writes person and no person scores to the error
// console. Real applications will want to take some custom action instead, and
// should implement their own versions of this function.
void RespondToDetection(tflite::ErrorReporter* error_reporter,
int8_t person_score, int8_t no_person_score) {
TF_LITE_REPORT_ERROR(error_reporter, "person score:%d no person score %d",
person_score, no_person_score);
}

View File

@ -0,0 +1,152 @@
#include "image_proc.h"
#define ai_input_w 50
#define ai_input_h 50
#define ai_input_c 1
#define PSRAM_START_ADDR (0x26000000)
static float get_pixel(int8_t *data, int x, int y, int c, int hin, int win)
{
return data[c * hin * win + y * win + x];
}
static void set_pixel(int8_t *data, int x, int y, int c, float val, int cin, int hin, int win)
{
if (x < 0 || y < 0 || c < 0 || x >= win || y >= hin || c >= cin)
return;
data[c * hin * win + y * win + x] = val;
}
static void add_pixel(int8_t *data, int x, int y, int c, float val, int hin, int win)
{
data[c * hin * win + y * win + x] += val;
}
void resize_image(uint8_t *src_8, int8_t *BL_resized, int win, int hin, int cin, int size_o, int8_t *buffer)
{
//int8_t src[win*hin*cin];
//int8_t BL_part[size_o*hin*cin];
int8_t *src = buffer;
int8_t *BL_part = buffer + win * hin * cin;
int r, c, k, wout, hout;
wout = size_o;
hout = size_o;
float w_scale = (float)(win - 1) / (wout - 1);
float h_scale = (float)(hin - 1) / (hout - 1);
// normalize image to zero
for (int i = 0; i < win * hin * cin; i++) {
src[i] = (int8_t)(src_8[i] - 128);
}
for (k = 0; k < cin; ++k) {
for (r = 0; r < hin; ++r) {
for (c = 0; c < wout; ++c) {
float val = 0;
if (c == wout - 1 || win == 1) {
val = get_pixel(src, win - 1, r, k, hin, win);
} else {
float sx = c * w_scale;
int ix = (int)sx;
float dx = sx - ix;
val = (1 - dx) * get_pixel(src, ix, r, k, hin, win) + dx * get_pixel(src, ix + 1, r, k, hin, win);
}
set_pixel(BL_part, c, r, k, val, cin, hin, wout);
}
}
}
for (k = 0; k < cin; ++k) {
for (r = 0; r < hout; ++r) {
float sy = r * h_scale;
int iy = (int)sy;
float dy = sy - iy;
for (c = 0; c < wout; ++c) {
float val = (1 - dy) * get_pixel(BL_part, c, iy, k, hin, wout);
set_pixel(BL_resized, c, r, k, val, cin, hout, wout);
}
if (r == hout - 1 || hin == 1)
continue;
for (c = 0; c < wout; ++c) {
float val = dy * get_pixel(BL_part, c, iy + 1, k, hin, wout);
add_pixel(BL_resized, c, r, k, val, hout, wout);
}
}
}
}
void resize_image_i8(int8_t *src_8, int8_t *BL_resized, int win, int hin, int cin, int size_o, int8_t *buffer)
{
//int8_t src[win*hin*cin];
//int8_t BL_part[size_o*hin*cin];
int8_t *src = buffer;
int8_t *BL_part = buffer + win * hin * cin;
int r, c, k, wout, hout;
wout = size_o;
hout = size_o;
float w_scale = (float)(win - 1) / (wout - 1);
float h_scale = (float)(hin - 1) / (hout - 1);
// normalize image to zero
for (int i = 0; i < win * hin * cin; i++) {
src[i] = (int8_t)(src_8[i]);
}
for (k = 0; k < cin; ++k) {
for (r = 0; r < hin; ++r) {
for (c = 0; c < wout; ++c) {
float val = 0;
if (c == wout - 1 || win == 1) {
val = get_pixel(src, win - 1, r, k, hin, win);
} else {
float sx = c * w_scale;
int ix = (int)sx;
float dx = sx - ix;
val = (1 - dx) * get_pixel(src, ix, r, k, hin, win) + dx * get_pixel(src, ix + 1, r, k, hin, win);
}
set_pixel(BL_part, c, r, k, val, cin, hin, wout);
}
}
}
for (k = 0; k < cin; ++k) {
for (r = 0; r < hout; ++r) {
float sy = r * h_scale;
int iy = (int)sy;
float dy = sy - iy;
for (c = 0; c < wout; ++c) {
float val = (1 - dy) * get_pixel(BL_part, c, iy, k, hin, wout);
set_pixel(BL_resized, c, r, k, val, cin, hout, wout);
}
if (r == hout - 1 || hin == 1)
continue;
for (c = 0; c < wout; ++c) {
float val = dy * get_pixel(BL_part, c, iy + 1, k, hin, wout);
add_pixel(BL_resized, c, r, k, val, hout, wout);
}
}
}
}
int8_t *image_proc(uint8_t *picture, int frame_size, int CAMERA_RESOLUTION_X, int CAMERA_RESOLUTION_Y, int *piture_lcd)
{
int psram_ptr = CAMERA_RESOLUTION_X * CAMERA_RESOLUTION_Y;
int8_t *resized = (int8_t *)(BL702_PSRAM_XIP_BASE + psram_ptr);
int resized_size = (int)ai_input_w * (int)ai_input_h * (int)ai_input_c;
psram_ptr += resized_size;
int mid_size = 160;
int8_t *m_resized = (int8_t *)(BL702_PSRAM_XIP_BASE + psram_ptr);
*piture_lcd = psram_ptr;
int m_resized_size = (int)mid_size * (int)mid_size * (int)ai_input_c;
psram_ptr += m_resized_size;
resize_image(picture, m_resized, CAMERA_RESOLUTION_X, CAMERA_RESOLUTION_Y, ai_input_c, mid_size, (int8_t *)(BL702_PSRAM_XIP_BASE + psram_ptr));
resize_image_i8(m_resized, resized, mid_size, mid_size, ai_input_c, ai_input_w, (int8_t *)(BL702_PSRAM_XIP_BASE + psram_ptr));
for (int i = 0; i < m_resized_size; i++) {
m_resized[i] = (uint8_t)(((int16_t)m_resized[i]) + 128);
}
return resized;
}

View File

@ -0,0 +1,15 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include "bsp_sf_psram.h"
#ifdef __cplusplus
extern "C" {
#endif
int8_t *image_proc(uint8_t *picture, int frame_size, int CAMERA_RESOLUTION_X, int CAMERA_RESOLUTION_Y, int *piture_lcd);
// all of your legacy C code here
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,29 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "image_provider.h"
#include "misc.h"
#include "tensorflow/lite/micro/examples/person_detection/model_settings.h"
//#include "tensorflow/lite/micro/examples/person_detection/testdata/person_image_data.h"
//#include "tensorflow/lite/micro/examples/person_detection/testdata/no_person_image_data.h"
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
int image_height, int channels, int8_t* image_data, int8_t* scale_img) {
for (int i = 0; i < image_width * image_height * channels; ++i) {
image_data[i] = scale_img[i];
}
return kTfLiteOk;
}

View File

@ -0,0 +1,39 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_IMAGE_PROVIDER_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_IMAGE_PROVIDER_H_
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
// This is an abstraction around an image source like a camera, and is
// expected to return 8-bit sample data. The assumption is that this will be
// called in a low duty-cycle fashion in a low-power application. In these
// cases, the imaging sensor need not be run in a streaming mode, but rather can
// be idled in a relatively low-power mode between calls to GetImage(). The
// assumption is that the overhead and time of bringing the low-power sensor out
// of this standby mode is commensurate with the expected duty cycle of the
// application. The underlying sensor may actually be put into a streaming
// configuration, but the image buffer provided to GetImage should not be
// overwritten by the driver code until the next call to GetImage();
//
// The reference implementation can have no platform-specific dependencies, so
// it just returns a static image. For real applications, you should
// ensure there's a specialized implementation that accesses hardware APIs.
TfLiteStatus GetImage(tflite::ErrorReporter *error_reporter, int image_width,
int image_height, int channels, int8_t *image_data, int8_t *scale_img);
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_IMAGE_PROVIDER_H_

View File

@ -0,0 +1,123 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "main_functions.h"
#include "image_provider.h"
#include "tensorflow/lite/micro/examples/person_detection/detection_responder.h"
#include "tensorflow/lite/micro/examples/person_detection/model_settings.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/micro/models/vww2_50_50_INT8_model_data.h"
#include "tensorflow/lite/micro/system_setup.h"
#include "tensorflow/lite/schema/schema_generated.h"
void *__dso_handle = NULL;
// Globals, used for compatibility with Arduino-style sketches.
namespace {
tflite::ErrorReporter *error_reporter = nullptr;
const tflite::Model *model = nullptr;
tflite::MicroInterpreter *interpreter = nullptr;
TfLiteTensor *input = nullptr;
// In order to use optimized tensorflow lite kernels, a signed int8_t quantized
// model is preferred over the legacy unsigned model format. This means that
// throughout this project, input images must be converted from unisgned to
// signed format. The easiest and quickest way to convert from unsigned to
// signed 8-bit integers is to subtract 128 from the unsigned value to get a
// signed value.
// An area of memory to use for input, output, and intermediate arrays.
constexpr int kTensorArenaSize = 72 * 1024;
static uint8_t tensor_arena[kTensorArenaSize];
} // namespace
// The name of this function is important for Arduino compatibility.
void setup()
{
tflite::InitializeTarget();
// Set up logging. Google style is to avoid globals or statics because of
// lifetime uncertainty, but since this has a trivial destructor it's okay.
// NOLINTNEXTLINE(runtime-global-variables)
static tflite::MicroErrorReporter micro_error_reporter;
error_reporter = &micro_error_reporter;
// Map the model into a usable data structure. This doesn't involve any
// copying or parsing, it's a very lightweight operation.
model = tflite::GetModel(g_vww2_50_50_INT8_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
return;
}
// Pull in only the operation implementations we need.
// This relies on a complete list of all the ops needed by this graph.
// An easier approach is to just use the AllOpsResolver, but this will
// incur some penalty in code space for op implementations that are not
// needed by this graph.
//
// tflite::AllOpsResolver resolver;
// NOLINTNEXTLINE(runtime-global-variables)
static tflite::MicroMutableOpResolver<7> micro_op_resolver;
micro_op_resolver.AddAveragePool2D();
micro_op_resolver.AddConv2D();
micro_op_resolver.AddDepthwiseConv2D();
micro_op_resolver.AddReshape();
micro_op_resolver.AddSoftmax();
micro_op_resolver.AddPad();
micro_op_resolver.AddAdd();
// Build an interpreter to run the model with.
// NOLINTNEXTLINE(runtime-global-variables)
static tflite::MicroInterpreter static_interpreter(
model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter);
interpreter = &static_interpreter;
// Allocate memory from the tensor_arena for the model's tensors.
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
return;
}
// Get information about the memory area to use for the model's input.
input = interpreter->input(0);
}
// The name of this function is important for Arduino compatibility.
void loop(int8_t *scale_img, int *score)
{
// Get image from provider.
if (kTfLiteOk != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels,
input->data.int8, scale_img)) {
TF_LITE_REPORT_ERROR(error_reporter, "Image capture failed.");
}
// Run the model on this input and make sure it succeeds.
if (kTfLiteOk != interpreter->Invoke()) {
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed.");
}
TfLiteTensor *output = interpreter->output(0);
//printf("TFLite done\r\n");
// Process the inference results.
int8_t person_score = output->data.uint8[kPersonIndex];
int8_t no_person_score = output->data.uint8[kNotAPersonIndex];
//printf("person score %d\r\n\r\n", (int)-1*no_person_score);
//RespondToDetection(error_reporter, person_score, no_person_score);
*score = (int)-1 * no_person_score;
}

View File

@ -0,0 +1,37 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_MAIN_FUNCTIONS_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_MAIN_FUNCTIONS_H_
#include <stdint.h>
// Expose a C friendly interface for main functions.
#ifdef __cplusplus
extern "C" {
#endif
// Initializes all data needed for the example. The name is important, and needs
// to be setup() for Arduino compatibility.
void setup();
// Runs one iteration of data gathering and inference. This should be called
// repeatedly from the application code. The name needs to be loop() for Arduino
// compatibility.
void loop(int8_t *scale_img, int *score);
#ifdef __cplusplus
}
#endif
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_MAIN_FUNCTIONS_H_

View File

@ -0,0 +1,21 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/person_detection/model_settings.h"
const char* kCategoryLabels[kCategoryCount] = {
"notperson",
"person",
};

File diff suppressed because one or more lines are too long