mirror of
https://github.com/Bloktard/workshop_stressometer.git
synced 2025-10-29 02:06:01 +00:00
one push to rule them all
This commit is contained in:
parent
c75f2b0133
commit
52917778fc
16788
Pics_and_video_and_other/3d_model_figure.stl
Normal file
16788
Pics_and_video_and_other/3d_model_figure.stl
Normal file
File diff suppressed because it is too large
Load Diff
BIN
Pics_and_video_and_other/IMG_6420.mov
Normal file
BIN
Pics_and_video_and_other/IMG_6420.mov
Normal file
Binary file not shown.
46
Pics_and_video_and_other/Liste stress pas stress.txt
Normal file
46
Pics_and_video_and_other/Liste stress pas stress.txt
Normal file
@ -0,0 +1,46 @@
|
||||
Phrases stressantes :
|
||||
|
||||
"Dépêche-toi, on n’a pas le temps !"
|
||||
|
||||
"il faut recommencer."
|
||||
|
||||
"On doit parler, c’est important."
|
||||
|
||||
"Tu ne peux pas échouer"
|
||||
|
||||
"C’est maintenant ou jamais."
|
||||
|
||||
"tout repose sur toi"
|
||||
|
||||
"Tu me déçois vraiment"
|
||||
|
||||
"Tout le monde te regarde"
|
||||
|
||||
"Il faut une solution!"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
🌿 Phrases apaisantes :
|
||||
|
||||
|
||||
"Prends ton temps,tout va bien"
|
||||
|
||||
"tu fais de ton mieux."
|
||||
|
||||
"Peu importe le résultat"
|
||||
|
||||
"Je suis là si tu as besoin."
|
||||
|
||||
"détends-toi."
|
||||
|
||||
"Tu es en sécurité."
|
||||
|
||||
"pas de stress."
|
||||
|
||||
"Je comprends ce que tu ressens"
|
||||
|
||||
"Tu n’es pas seul(e)."
|
||||
|
||||
"tu peux le faire."
|
||||
BIN
Pics_and_video_and_other/Stressometer.png
Normal file
BIN
Pics_and_video_and_other/Stressometer.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 24 KiB |
BIN
Pics_and_video_and_other/cec.jpg
Normal file
BIN
Pics_and_video_and_other/cec.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.7 MiB |
Binary file not shown.
|
After Width: | Height: | Size: 221 KiB |
BIN
Pics_and_video_and_other/ev.jpg
Normal file
BIN
Pics_and_video_and_other/ev.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.5 MiB |
BIN
Pics_and_video_and_other/ffef.jpg
Normal file
BIN
Pics_and_video_and_other/ffef.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.2 MiB |
BIN
Pics_and_video_and_other/image.jpg
Normal file
BIN
Pics_and_video_and_other/image.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.0 MiB |
40
README.md
40
README.md
@ -1,2 +1,42 @@
|
||||
# workshop_stressometer
|
||||
Using and creating arduino Nano 33 BLE sense rev2 to train own ai-recognetion model
|
||||
|
||||
We worked in a team of two between 42 student and art school student.
|
||||
|
||||
The goal of this workshop was : AI - Arduino - Psychological issues
|
||||
|
||||
Overall, the goal was to reflect on what is the norme, what is the norm in an AI.
|
||||
|
||||
Our approach is based in the childhood, where most of the trauma begin.
|
||||
In order to ilustrate the trauma using arduino and an AI, we decided to base on reasoning on "traumatizing" a child.
|
||||
|
||||
So, we trained an AI using Edge-Impulse to recognize some predetermined sentences. The library is in the github (the sentences are in french).
|
||||
|
||||
We portrayed the trauma using a stress variable. This variable is influenced by different factors :
|
||||
|
||||
- The mouvment of the arduino
|
||||
- The proximity of someting near the arduino
|
||||
- The level of sound the arduino hears
|
||||
- The recognetion of the sentences
|
||||
|
||||
In order to show the trauma, the stress variable move up and down and influence a base-stress variable that start at 0. Once the base-stress variable moves up, the only way to lower it is to appease the "child".
|
||||
|
||||
How we trained the ai-model :
|
||||
|
||||
We make lot of record on Edge Impusle with different label like "Stress" and "calm" in data acquisition. After that we create an Impulse design with MFCC block audioand and add a classifier, save your impusle.
|
||||
Now you can test or train every category.If you are fine go to deployment and choose your output as "arduino or C++"
|
||||
|
||||
|
||||
How to proceed :
|
||||
|
||||
- First, install the ai-trained-library in the arduino IDE
|
||||
- Second, you need to push the "retrieve_data.ino" in you arduino board (The board we used is the Arduino Nano 33 BLE sense REV2)
|
||||
- Then, in VSC (or other) run the code
|
||||
|
||||
In the end, the project works, but is open to a lot of improvment.
|
||||
For exemple :
|
||||
- The proximity captor dont have any influence on the stress
|
||||
- The proximity should lower the base-stress, but it doesnt...
|
||||
- The ai-recognition isn't always spot-on, leading to a variationof the stress when nothing is happening, this comes from the ai-training in edge-impulse directly
|
||||
- There is a slight delay in reaction (but nothing too big)
|
||||
- The sound level doesnt work because the mic is already used by the ai-recognition model
|
||||
|
||||
BIN
ai_trained _library/ei-projet-micro-arduino-arduino-1.0.21.zip
Normal file
BIN
ai_trained _library/ei-projet-micro-arduino-arduino-1.0.21.zip
Normal file
Binary file not shown.
227
main.py
Normal file
227
main.py
Normal file
@ -0,0 +1,227 @@
|
||||
from serial.tools import list_ports
|
||||
import serial
|
||||
import time
|
||||
import threading
|
||||
import pygame
|
||||
import speech_recognition as sr
|
||||
import sys
|
||||
import math
|
||||
|
||||
# Shared variables and lock
|
||||
stress = 0 # Current smoothed stress
|
||||
target_stress = 0 # Target stress from serial data
|
||||
base_stress = 0 # Permanent base level of stress
|
||||
stress_lock = threading.Lock()
|
||||
|
||||
# Variables to track sensor history
|
||||
last_z = None # Last accelerometer z-value
|
||||
last_db = None # Last decibel value
|
||||
proximity_buffer = [] # Buffer for proximity values
|
||||
peak_stress = 0 # Track peak stress for base level adjustment
|
||||
|
||||
def get_data():
|
||||
global target_stress, base_stress, last_z, last_db, proximity_buffer, peak_stress
|
||||
|
||||
# Identify the correct port
|
||||
ports = list_ports.comports()
|
||||
for port in ports:
|
||||
print(port)
|
||||
|
||||
# Open the serial com (adjust the port as needed)
|
||||
try:
|
||||
serialCom = serial.Serial("/dev/ttyACM0", 9600)
|
||||
except serial.SerialException as e:
|
||||
print(f"Failed to open serial port: {e}")
|
||||
return
|
||||
|
||||
# Toggle DTR to reset the Arduino
|
||||
serialCom.setDTR(False)
|
||||
time.sleep(1)
|
||||
serialCom.flushInput()
|
||||
serialCom.setDTR(True)
|
||||
|
||||
# Buffer to accumulate multi-line data
|
||||
data_buffer = []
|
||||
|
||||
while True:
|
||||
try:
|
||||
# Read the line from serial
|
||||
s_bytes = serialCom.readline()
|
||||
decoded_bytes = s_bytes.decode("utf-8").strip('\r\n')
|
||||
data_buffer.append(decoded_bytes)
|
||||
print(f"Received: {decoded_bytes}") # Debug raw input
|
||||
|
||||
# Check if we’ve reached the end of a prediction block
|
||||
if "|" in decoded_bytes: # Assuming accelerometer data marks the end
|
||||
with stress_lock:
|
||||
stress_modifier = 0
|
||||
significant_change = False
|
||||
|
||||
# Parse the buffered prediction data
|
||||
apaisant = None
|
||||
stress_val = None
|
||||
for line in data_buffer:
|
||||
if "Apaisant:" in line:
|
||||
apaisant = float(line.split(':')[1].strip())
|
||||
if "Stress:" in line:
|
||||
stress_val = float(line.split(':')[1].strip())
|
||||
|
||||
# Process Apaisant and Stress if available
|
||||
if apaisant is not None and stress_val is not None:
|
||||
print(f"Parsed - Apaisant: {apaisant:.5f}, Stress: {stress_val:.5f}")
|
||||
|
||||
# High Apaisant reduces stress
|
||||
if apaisant > 0.5: # Significant calming
|
||||
stress_modifier -= apaisant * 50
|
||||
significant_change = True
|
||||
elif apaisant > 0.2: # Moderate calming
|
||||
stress_modifier -= apaisant * 20
|
||||
significant_change = True
|
||||
|
||||
# High Stress increases stress
|
||||
if stress_val > 0.5: # Significant stress
|
||||
stress_modifier += stress_val * 50
|
||||
significant_change = True
|
||||
elif stress_val > 0.2: # Moderate stress
|
||||
stress_modifier += stress_val * 20
|
||||
significant_change = True
|
||||
|
||||
# Parse accelerometer data (e.g., "| 1.04, 0.24, -0.06 0 0")
|
||||
accel_data = [val.strip().replace('|', '') for val in decoded_bytes.split(",")]
|
||||
parsed_values = []
|
||||
for val in accel_data:
|
||||
try:
|
||||
cleaned_val = val.split()[0]
|
||||
parsed_values.append(float(cleaned_val))
|
||||
except (ValueError, IndexError):
|
||||
continue
|
||||
|
||||
if len(parsed_values) >= 3: # Accelerometer (x, y, z)
|
||||
x, y, z = parsed_values[:3]
|
||||
if last_z is not None:
|
||||
z_change = abs(z - last_z)
|
||||
if z_change > 0.1: # Threshold for movement
|
||||
stress_modifier += z_change * 1
|
||||
significant_change = True
|
||||
last_z = z
|
||||
print(f"Accelerometer - X: {x:.2f}, Y: {y:.2f}, Z: {z:.2f}")
|
||||
|
||||
# Apply the modifier
|
||||
target_stress += stress_modifier
|
||||
|
||||
# Update peak stress for base level adjustment (with a cap)
|
||||
if target_stress > peak_stress:
|
||||
peak_stress = target_stress
|
||||
base_stress += peak_stress * 0.05 # Small increase
|
||||
base_stress = min(50, base_stress) # Cap base_stress at 50
|
||||
|
||||
# Enhanced decay logic
|
||||
if not significant_change:
|
||||
if target_stress > base_stress + 20: # Stronger decay when far above base
|
||||
target_stress *= 0.15 # Very fast decay (85% reduction)
|
||||
else:
|
||||
target_stress *= 0.25 # Fast decay (75% reduction)
|
||||
else:
|
||||
target_stress *= 0.88 # Slower decay when active (12% reduction)
|
||||
|
||||
# Ensure target_stress doesn’t drop below base_stress
|
||||
target_stress = max(base_stress, target_stress)
|
||||
base_stress = max(0, base_stress) # Prevent negative base stress
|
||||
|
||||
# Clamp target_stress
|
||||
target_stress = max(-100, min(100, target_stress))
|
||||
|
||||
# Debug print final values
|
||||
print(f"Stress Modifier: {stress_modifier:.2f}, Target Stress: {target_stress:.2f}, Base Stress: {base_stress:.2f}")
|
||||
|
||||
# Clear buffer after processing
|
||||
data_buffer = []
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in get_data: {e}")
|
||||
break
|
||||
|
||||
serialCom.close()
|
||||
|
||||
def long_dot():
|
||||
global stress, target_stress
|
||||
|
||||
# Initialize Pygame
|
||||
pygame.init()
|
||||
|
||||
# Define constants
|
||||
WINDOW_WIDTH = 800
|
||||
WINDOW_HEIGHT = 600
|
||||
DOT_RADIUS = 3
|
||||
BACKGROUND_COLOR = (0, 0, 0)
|
||||
DOT_COLOR = (255, 0, 0)
|
||||
TRAIL_COLOR = (255, 255, 255)
|
||||
WIND_SPEED = 6
|
||||
SPEED = 5
|
||||
DAMPING_FACTOR = 0.1
|
||||
OSCILLATION_FREQ = 1.0 # Fast oscillation
|
||||
|
||||
# Create the screen
|
||||
screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
|
||||
pygame.display.set_caption("Dot with Wind Pushed Trail (Smoothed with Fast Oscillation)")
|
||||
|
||||
# Initial dot position
|
||||
dot_x = WINDOW_WIDTH // 2
|
||||
dot_y = WINDOW_HEIGHT // 2
|
||||
|
||||
# Main game loop
|
||||
running = True
|
||||
trail = []
|
||||
time_elapsed = 0
|
||||
|
||||
while running:
|
||||
# Handle events
|
||||
for event in pygame.event.get():
|
||||
if event.type == pygame.QUIT:
|
||||
running = False
|
||||
|
||||
# Smooth stress towards target_stress
|
||||
with stress_lock:
|
||||
current_target_stress = target_stress
|
||||
stress += (current_target_stress - stress) * DAMPING_FACTOR
|
||||
|
||||
# Add fast oscillation
|
||||
time_elapsed += 1 / 30 # Assuming 30 FPS
|
||||
oscillation = math.sin(time_elapsed * OSCILLATION_FREQ * 2 * math.pi) * abs(stress)
|
||||
display_stress = oscillation
|
||||
|
||||
# Update dot position
|
||||
dot_y = (WINDOW_HEIGHT // 2) - (display_stress * SPEED)
|
||||
dot_y = max(0, min(WINDOW_HEIGHT - DOT_RADIUS * 2, dot_y))
|
||||
|
||||
# Add to trail
|
||||
trail.append((dot_x, dot_y))
|
||||
trail = [(x - WIND_SPEED, y) for (x, y) in trail]
|
||||
|
||||
# Clear the screen
|
||||
screen.fill(BACKGROUND_COLOR)
|
||||
|
||||
# Draw the trail
|
||||
for (trail_x, trail_y) in trail:
|
||||
pygame.draw.circle(screen, TRAIL_COLOR, (int(trail_x) + DOT_RADIUS, int(trail_y) + DOT_RADIUS), DOT_RADIUS)
|
||||
|
||||
# Draw the dot
|
||||
pygame.draw.circle(screen, DOT_COLOR, (int(dot_x) + DOT_RADIUS, int(dot_y) + DOT_RADIUS), DOT_RADIUS)
|
||||
|
||||
# Update the screen
|
||||
pygame.display.flip()
|
||||
|
||||
# Control the frame rate
|
||||
pygame.time.Clock().tick(30)
|
||||
|
||||
pygame.quit()
|
||||
sys.exit()
|
||||
|
||||
# Create and start the threads
|
||||
thread1 = threading.Thread(target=get_data, daemon=True)
|
||||
thread2 = threading.Thread(target=long_dot)
|
||||
|
||||
thread1.start()
|
||||
thread2.start()
|
||||
|
||||
thread2.join()
|
||||
267
retrieve_data.ino
Normal file
267
retrieve_data.ino
Normal file
@ -0,0 +1,267 @@
|
||||
#include "Arduino_BMI270_BMM150.h" // Library for the BMI270 (accelerometer/gyroscope) and BMM150 (magnetometer)
|
||||
#include <Arduino_APDS9960.h> // Library for the APDS9960 (light sensor)
|
||||
#include <PDM.h> // Library for the microphone (PDM)
|
||||
|
||||
#define MIC_PIN 2 // Microphone connected to pin 2 (PDM interface)
|
||||
|
||||
// // Create instances for the sensors
|
||||
// Arduino_BMI270_BMM150 IMU; // Instance for the BMI270 IMU (accelerometer/gyroscope + magnetometer)
|
||||
// Arduino_APDS9960 APDS; // Instance for the APDS9960 light sensor
|
||||
|
||||
short samples[256]; // Buffer for microphone samples
|
||||
|
||||
// Number of audio samples read
|
||||
volatile int samplesRead;
|
||||
|
||||
// edge impulse
|
||||
#include <PDM.h>
|
||||
#include <PROJET_MICRO_ARDUINO_inferencing.h>
|
||||
|
||||
/** Audio buffers, pointers and selectors */
|
||||
typedef struct {
|
||||
int16_t *buffer;
|
||||
uint8_t buf_ready;
|
||||
uint32_t buf_count;
|
||||
uint32_t n_samples;
|
||||
} inference_t;
|
||||
|
||||
static inference_t inference;
|
||||
static signed short sampleBuffer[2048];
|
||||
static bool debug_nn = false; // Set this to true to see e.g. features generated from the raw signal
|
||||
|
||||
/**
|
||||
* @brief Arduino setup function
|
||||
*/
|
||||
|
||||
void setup() {
|
||||
// Start serial communication
|
||||
Serial.begin(9600);
|
||||
while (!Serial);
|
||||
Serial.println("Started");
|
||||
|
||||
// Initialize the IMU (BMI270 and BMM150)
|
||||
if (!IMU.begin()) {
|
||||
Serial.println("Error initializing BMI270 IMU!");
|
||||
while (1); // Halt if initialization fails
|
||||
}
|
||||
|
||||
// Initialize the light sensor (APDS9960)
|
||||
if (!APDS.begin()) {
|
||||
Serial.println("Error initializing APDS9960 Light Sensor!");
|
||||
while (1); // Halt if initialization fails
|
||||
}
|
||||
|
||||
// Initialize the microphone (PDM)
|
||||
PDM.onReceive(onPDMData);
|
||||
PDM.begin(1, 16000); // Initialize PDM microphone with one channel and 16 kHz sample rate
|
||||
PDM.setGain(20); // Set microphone gain
|
||||
|
||||
|
||||
|
||||
|
||||
// summary of inferencing settings (from model_metadata.h)
|
||||
ei_printf("Inferencing settings:\n");
|
||||
ei_printf("\tInterval: %.2f ms.\n", (float)EI_CLASSIFIER_INTERVAL_MS);
|
||||
ei_printf("\tFrame size: %d\n", EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE);
|
||||
ei_printf("\tSample length: %d ms.\n", EI_CLASSIFIER_RAW_SAMPLE_COUNT / 16);
|
||||
ei_printf("\tNo. of classes: %d\n", sizeof(ei_classifier_inferencing_categories) / sizeof(ei_classifier_inferencing_categories[0]));
|
||||
|
||||
if (microphone_inference_start(EI_CLASSIFIER_RAW_SAMPLE_COUNT) == false) {
|
||||
ei_printf("ERR: Could not allocate audio buffer (size %d), this could be due to the window length of your model\r\n", EI_CLASSIFIER_RAW_SAMPLE_COUNT);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void loop() {
|
||||
// Read accelerometer and gyroscope data
|
||||
float gx, gy, gz;
|
||||
IMU.readGyroscope(gx, gy, gz); // Get gyroscope values
|
||||
|
||||
// Read ambient light level (APDS9960)
|
||||
int proximity = 0;
|
||||
if (APDS.proximityAvailable()) {
|
||||
proximity = APDS.readProximity();
|
||||
}
|
||||
|
||||
// Read sound level (from microphone)
|
||||
int soundLevel = getSoundLevel();
|
||||
|
||||
// edge impulse
|
||||
ei_printf("Starting inferencing in 2 seconds...\n");
|
||||
|
||||
delay(100);
|
||||
|
||||
ei_printf("Recording...\n");
|
||||
|
||||
bool m = microphone_inference_record();
|
||||
if (!m) {
|
||||
ei_printf("ERR: Failed to record audio...\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ei_printf("Recording done\n");
|
||||
|
||||
signal_t signal;
|
||||
signal.total_length = EI_CLASSIFIER_RAW_SAMPLE_COUNT;
|
||||
signal.get_data = µphone_audio_signal_get_data;
|
||||
ei_impulse_result_t result = { 0 };
|
||||
|
||||
EI_IMPULSE_ERROR r = run_classifier(&signal, &result, debug_nn);
|
||||
if (r != EI_IMPULSE_OK) {
|
||||
ei_printf("ERR: Failed to run classifier (%d)\n", r);
|
||||
return;
|
||||
}
|
||||
|
||||
// print the predictions
|
||||
ei_printf("Predictions ");
|
||||
ei_printf("(DSP: %d ms., Classification: %d ms., Anomaly: %d ms.)",
|
||||
result.timing.dsp, result.timing.classification, result.timing.anomaly);
|
||||
ei_printf(": \n");
|
||||
for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) {
|
||||
ei_printf(" %s: %.5f\n", result.classification[ix].label, result.classification[ix].value);
|
||||
}
|
||||
#if EI_CLASSIFIER_HAS_ANOMALY == 1
|
||||
ei_printf(" anomaly score: %.3f\n", result.anomaly);
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
Serial.print(" | ");
|
||||
Serial.print(gx);
|
||||
Serial.print(", ");
|
||||
Serial.print(gy);
|
||||
Serial.print(", ");
|
||||
Serial.print(gz);
|
||||
|
||||
|
||||
Serial.print(" ");
|
||||
Serial.print(proximity);
|
||||
|
||||
Serial.print(" ");
|
||||
Serial.println(soundLevel);
|
||||
|
||||
// Wait for a while before collecting the next set of data
|
||||
delay(500);
|
||||
}
|
||||
|
||||
// Function to calculate sound level based on microphone data
|
||||
int getSoundLevel() {
|
||||
int soundValue = 0;
|
||||
int sampleCount = sizeof(samples) / sizeof(samples[0]);
|
||||
|
||||
// Accumulate absolute values of all microphone samples
|
||||
for (int i = 0; i < sampleCount; i++) {
|
||||
soundValue += abs(samples[i]);
|
||||
}
|
||||
|
||||
// Return the average sound value
|
||||
return soundValue / sampleCount;
|
||||
}
|
||||
|
||||
// PDM callback function to collect samples from the microphone
|
||||
void onPDMData() {
|
||||
int bytesRead = PDM.available();
|
||||
if (bytesRead) {
|
||||
PDM.read(samples, bytesRead); // Read PDM data into the samples buffer
|
||||
}
|
||||
}
|
||||
static void pdm_data_ready_inference_callback(void)
|
||||
{
|
||||
int bytesAvailable = PDM.available();
|
||||
|
||||
// read into the sample buffer
|
||||
int bytesRead = PDM.read((char *)&sampleBuffer[0], bytesAvailable);
|
||||
|
||||
if (inference.buf_ready == 0) {
|
||||
for(int i = 0; i < bytesRead>>1; i++) {
|
||||
inference.buffer[inference.buf_count++] = sampleBuffer[i];
|
||||
|
||||
if(inference.buf_count >= inference.n_samples) {
|
||||
inference.buf_count = 0;
|
||||
inference.buf_ready = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Init inferencing struct and setup/start PDM
|
||||
*
|
||||
* @param[in] n_samples The n samples
|
||||
*
|
||||
* @return { description_of_the_return_value }
|
||||
*/
|
||||
static bool microphone_inference_start(uint32_t n_samples)
|
||||
{
|
||||
inference.buffer = (int16_t *)malloc(n_samples * sizeof(int16_t));
|
||||
|
||||
if(inference.buffer == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
inference.buf_count = 0;
|
||||
inference.n_samples = n_samples;
|
||||
inference.buf_ready = 0;
|
||||
|
||||
// configure the data receive callback
|
||||
PDM.onReceive(&pdm_data_ready_inference_callback);
|
||||
|
||||
PDM.setBufferSize(4096);
|
||||
|
||||
// initialize PDM with:
|
||||
// - one channel (mono mode)
|
||||
// - a 16 kHz sample rate
|
||||
if (!PDM.begin(1, EI_CLASSIFIER_FREQUENCY)) {
|
||||
ei_printf("Failed to start PDM!");
|
||||
microphone_inference_end();
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// set the gain, defaults to 20
|
||||
PDM.setGain(127);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Wait on new data
|
||||
*
|
||||
* @return True when finished
|
||||
*/
|
||||
static bool microphone_inference_record(void)
|
||||
{
|
||||
inference.buf_ready = 0;
|
||||
inference.buf_count = 0;
|
||||
|
||||
while(inference.buf_ready == 0) {
|
||||
delay(10);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get raw audio signal data
|
||||
*/
|
||||
static int microphone_audio_signal_get_data(size_t offset, size_t length, float *out_ptr)
|
||||
{
|
||||
numpy::int16_to_float(&inference.buffer[offset], out_ptr, length);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stop PDM and release buffers
|
||||
*/
|
||||
static void microphone_inference_end(void)
|
||||
{
|
||||
PDM.end();
|
||||
free(inference.buffer);
|
||||
}
|
||||
|
||||
#if !defined(EI_CLASSIFIER_SENSOR) || EI_CLASSIFIER_SENSOR != EI_CLASSIFIER_SENSOR_MICROPHONE
|
||||
#error "Invalid model for current sensor."
|
||||
#endif
|
||||
Loading…
Reference in New Issue
Block a user