Skip to content

Commit

Permalink
Release 1
Browse files Browse the repository at this point in the history
  • Loading branch information
solasolo committed May 25, 2022
1 parent 744f724 commit b8d298d
Show file tree
Hide file tree
Showing 13 changed files with 24,344 additions and 16,117 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
__pycache__
.ipynb_checkpoints
.vscode
*.csv
4 changes: 4 additions & 0 deletions Host/PerformanceCounter.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@

class PerformanceCounter:
def __init__(self):
self.Reset()


def Reset(self):
self.Count = 0
self.LastTick = time.perf_counter()

Expand Down
26 changes: 24 additions & 2 deletions Host/SensorChart.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,19 +20,41 @@ def Run(self, proc):
GLUT.glutMainLoop()


def Draw(self, data=None):
def Draw(self, data=None, data_his=None):
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glBegin(GL.GL_LINES)

YScale = lambda y: y / 5 * 0.00001

# Draw Bars
if data != None:
for i in range(len(data)):
x = i * 0.1 - 0.9
y = 0.75 + data[i] / 4 * 0.00001
y = 0.75 + YScale(data[i])
c = AxisColor[i % 3]

GL.glColor3f(c[0], c[1], c[2])
GL.glVertex3f(x, 0.75, 0)
GL.glVertex3f(x, y, 0)

# Draw Curves
if data_his != None and len(data_his) > 0:
size = len(data_his)
for i in range(6):
c = AxisColor[i % 3]
y0 = 0.1 if i > 2 else -0.6
GL.glColor3f(c[0], c[1], c[2])

px = -1
py = y0 + YScale(data_his[0][i])
for k in range(size - 1):
x = -1 + (k + 1) * 2 / 120
y = y0 + YScale(data_his[k + 1][i])
GL.glVertex3f(px, py, 0)
GL.glVertex3f(x, y, 0)
px = x
py = y


GL.glEnd()
GL.glFlush()
55 changes: 41 additions & 14 deletions Host/SensorProcess.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,25 @@
##

from asyncio.windows_events import NULL
import socket
from unittest import skip
from PerformanceCounter import PerformanceCounter
from SensorChart import SensorChart
from DataFile import DataFile

raw_data = []

class UdpServer:
def __init__(self):
self.Sample = []
self.Data = []
self.Idle = True
self.SampleCount = 0

self.Counter = PerformanceCounter()
self.Chart = SensorChart()
self.DataFile = DataFile()

self.Server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.Server.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.Server.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 65536 * 16)
self.Server.settimeout(1)
self.Server.settimeout(0.5)

ip = self.GetLocalIP("192.168.10")
print(ip)
Expand All @@ -30,30 +32,55 @@ def Run(self):


def Recv(self):
raw_data = [0] * 9
self.Data = [0] * 9

try:
data, address = self.Server.recvfrom(36)
data, address = self.Server.recvfrom(40)

#print(len(data), data)
reader = lambda p: int.from_bytes(data[p:p + 4], byteorder="little", signed=True)

c = reader(0)
for i in range(9):
raw_data[i] = reader(i * 4)
self.Data[i] = reader((i + 1) * 4)

if self.Idle:
self.Sample = []
self.Idle = False

while self.AddData(c, self.Data[:]) < c:
continue


except socket.timeout:
raw_data = []
if not self.Idle:
if len(self.Sample) == 120:
self.SampleCount += 1
for d in self.Sample:
self.DataFile.Write(d)
else:
self.Sample = []

print("Sample: ", self.SampleCount)

self.DataFile.Close()
self.Counter.Reset()
self.Idle = True

except Exception as e:
raw_data = []
self.Data = []
print("error", e.args)

[count, t] = self.Counter.Frame()
print(count, t, raw_data)

self.DataFile.Write(raw_data)
self.Chart.Draw(raw_data)
def AddData(self, c, d):
self.Sample.append(d)

[count, t] = self.Counter.Frame()
print(count, int(t * 1000), c, d)

self.Chart.Draw(d, self.Sample)

return count


def GetLocalIP(self, mask):
Expand Down
16,053 changes: 8,035 additions & 8,018 deletions Host/model.h

Large diffs are not rendered by default.

64 changes: 44 additions & 20 deletions Host/tinyml.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def CreateModel():
model.add(layers.Dense(2, activation='softmax'))

opt_adam = keras.optimizers.Adam()
model.compile(optimizer=opt_adam, loss='categorical_crossentropy', metrics=['categorical_accuracy'])
model.compile(optimizer=opt_adam, loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
model.summary()

return model
Expand All @@ -37,12 +37,14 @@ def PrepareModel(model):
y_values = np.random.uniform(low=0, high=1, size=(SAMPLES, 2))
#y_values = np.random.randn(*y_values.shape)

Train(model, x_values, y_values)
return x_values, y_values


def TrainModel(model, x, y):
SampleCount = len(x)

# split into train, validation, test
TRAIN_SPLIT = int(0.8 * SAMPLES)
TRAIN_SPLIT = int(0.8 * SampleCount)
x_train, x_validate = np.split(x, [TRAIN_SPLIT, ])
y_train, y_validate = np.split(y, [TRAIN_SPLIT, ])

Expand All @@ -52,18 +54,18 @@ def TrainModel(model, x, y):
def ConvertModel(model):
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()

return tflite_model

def SaveModel(model):
with open("model.tflite", "wb") as f:
f.write(tflite_model)
f.write(model)


# Function: Convert some hex value into an array for C programming
def Hex2H(model, h_model_name):
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()

def Hex2H(model, h_model_name):
c_str = ''
model_len = len(tflite_model)
model_len = len(model)

# Create header guard
c_str += '#ifndef ' + h_model_name.upper() + '_H\n'
Expand All @@ -75,7 +77,7 @@ def Hex2H(model, h_model_name):
# Declare C variable
c_str += 'unsigned char ' + h_model_name + '[] = {'
hex_array = []
for i, val in enumerate(tflite_model) :
for i, val in enumerate(model) :
# Construct string from hex
hex_str = format(val, '#04x')

Expand All @@ -93,23 +95,26 @@ def Hex2H(model, h_model_name):
c_str += '#endif //' + h_model_name.upper() + '_H\n'

# Write TFLite model to a C source (or header) file
with open(h_model_name + '.h', 'w') as file:
base_path = os.path.dirname(__file__)
with open(base_path + "/" + h_model_name + '.h', 'w') as file:
file.write(c_str)
file.flush()


def ReadData(file, v):
def ReadDataFile(file, v):
size = SAMPLES_PER_GESTURE * 6

dataX = np.empty([0, size])
dataY = np.empty([0, 2])
dataY = np.empty([0,])
# dataY = np.empty([0, 2])

base_path = os.path.dirname(__file__)
file = open(base_path + "/data/" + file, "r")

data = []
for line in file.readlines():
items = line.split()
values = [int(v) for v in items ]
values = [int(v) / 32768 for v in items ]
data += values

count = len(data)
Expand All @@ -120,13 +125,32 @@ def ReadData(file, v):
tmp = np.expand_dims(tmp, axis=0)

dataX = np.concatenate((dataX, tmp), axis=0)
dataY = np.concatenate((dataY, [[0, 1]] if v == 0 else [[1, 0]]))
dataY = np.append(dataY, v)
#dataY = np.concatenate((dataY, [[0, 1]] if v == 0 else [[1, 0]]))

return dataX, dataY


def ReadTrainingData():
circle_x, circle_y = ReadDataFile("circle.csv", 1)
cross_x, cross_y = ReadDataFile("cross.csv", 0)

dataX = np.concatenate((circle_x, cross_x), axis=0)
dataY = np.concatenate((circle_y, cross_y), axis=0)

return dataX, dataY

ReadData("circle.csv", 1)
if __name__ == '__main__':
Model = CreateModel()
#PrepareModel(Model)

x, y = ReadTrainingData()
TrainModel(Model, x, y)

print("==== Convert Model")
tfModel = ConvertModel(Model)

print("==== Make Header File")
Hex2H(tfModel, "model")

Model = CreateModel()
PrepareModel(Model)
ConvertModel(Model)
Hex2H(Model, "model")
print("==== Model Build Finished")
30 changes: 25 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,19 +1,39 @@
# TinyML for Predict Gesture on ESP32-C3
---

## 概述
基于6轴传感器数据采样(加速度计和陀螺仪),识别两种手势动作
## 硬件
![image](Images/beetle.jpg)
![image](Images/mu9250.jpg)
## 流程

## 框架
Tensorflow
Keras
Tensorflow Lite Micro
## 环境
### Python
Python 3.8
Tensorflow
PyOpenGL

### Arduino
ESP32 Arduino
Tensorflow Lite-ESP32
ESP32 Arduino 2.0.3
TensorFlowLite_ESP32 0.9.0

## 设计
### 数据
1.2s 数据长度,采样率 100/s

### 模型
线性全连接网络
720-向量输入,2-向量输出

## 流程
数据采集
预处理
训练
推理应用

## 代码
## 代码
Host
prediect_gesture
Loading

0 comments on commit b8d298d

Please sign in to comment.