[WIP] bugfixes

This commit is contained in:
Timo Volkmann 2020-12-11 18:38:39 +01:00
parent ae9b08e0c4
commit c3f6a9ff5b
5 changed files with 160 additions and 143 deletions

View File

@ -18,11 +18,11 @@ func main() {
service := core.TrackingService(repo, disp, conf) service := core.TrackingService(repo, disp, conf)
go func() { go func() {
service.NewTracking(core.TCP, core.SERIAL) service.NewTracking(core.TCP)
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
service.StartRecord() service.StartRecord()
time.Sleep(15 * time.Second) time.Sleep(15 * time.Second)
service.StopRecord() //service.StopRecord()
}() }()
web.CreateServer(service, disp, conf) web.CreateServer(service, disp, conf)

View File

@ -2,13 +2,11 @@ package core
import ( import (
"fmt" "fmt"
"git.timovolkmann.de/gyrogpsc/ublox"
"go.bug.st/serial"
"log" "log"
"net" "net"
"os" "os"
"time"
"git.timovolkmann.de/gyrogpsc/ublox"
"go.bug.st/serial"
) )
type Collector interface { type Collector interface {
@ -74,11 +72,14 @@ func (s *serialCollector) Collect() {
break break
} }
sd, err := ConvertUbxToSensorData(meas) sd, err := ConvertUbxToSensorData(meas)
if err != nil || sd == nil { if err != nil {
log.Println("convert err:", err, meas, sd) log.Println("convert err:", err, meas, sd)
continue continue
} }
// skip irrelevant messages
if sd == nil {
continue
}
err = s.proc.Push(sd) err = s.proc.Push(sd)
if err != nil { if err != nil {
@ -172,7 +173,7 @@ func (c *tcpCollector) jsonHandler(conn net.Conn) {
continue continue
} }
if !c.active { if !c.active {
time.Sleep(50 * time.Millisecond) //time.Sleep(50 * time.Millisecond)
continue continue
} }
err = c.processor.Push(sd) err = c.processor.Push(sd)

View File

@ -4,6 +4,8 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"log" "log"
"sync" "sync"
"time" "time"
@ -12,10 +14,10 @@ import (
type pipeline struct { type pipeline struct {
active bool active bool
record bool record bool
syn synchronizer synchroniz synchronizer
agr aggregator buffer pipeBuffer
pub Publisher publisher Publisher
stor Storer storer Storer
publishTicker *time.Ticker publishTicker *time.Ticker
} }
@ -29,7 +31,7 @@ func NewPipeline(d Publisher, s Storer, conf *Configuration) *pipeline {
mutex: &sync.Mutex{}, mutex: &sync.Mutex{},
updateTicker: time.NewTicker(time.Duration(conf.Pipeline.SyncUpdateIntervalMs) * time.Millisecond), updateTicker: time.NewTicker(time.Duration(conf.Pipeline.SyncUpdateIntervalMs) * time.Millisecond),
}, },
aggregator{ pipeBuffer{
tcpMutex: &sync.Mutex{}, tcpMutex: &sync.Mutex{},
serialMutex: &sync.Mutex{}, serialMutex: &sync.Mutex{},
}, },
@ -44,7 +46,7 @@ func (p *pipeline) Run() {
log.Println("pipe: processing service started") log.Println("pipe: processing service started")
go func() { go func() {
for p.active { for p.active {
<-p.syn.updateTicker.C <-p.synchroniz.updateTicker.C
err := p.refreshDelay() err := p.refreshDelay()
if err != nil { if err != nil {
log.Println(err) log.Println(err)
@ -55,7 +57,7 @@ func (p *pipeline) Run() {
go func() { go func() {
for p.active { for p.active {
<-p.publishTicker.C <-p.publishTicker.C
err := p.Publish() err := p.publish()
if err != nil && err.Error() != "no data available" { if err != nil && err.Error() != "no data available" {
log.Println(err) log.Println(err)
} }
@ -71,38 +73,51 @@ func (p *pipeline) Stop() {
p.record = false p.record = false
} }
func (p *pipeline) Publish() error { func (p *pipeline) publish() error {
p.agr.tcpMutex.Lock() p.buffer.tcpMutex.Lock()
p.agr.serialMutex.Lock() p.buffer.serialMutex.Lock()
if (p.agr.tcpSensorData == sensorData{} && p.agr.serialSensorData == sensorData{}) { if (p.buffer.MeasTcp == sensorData{} && p.buffer.MeasSerial == sensorData{}) {
p.agr.tcpMutex.Unlock() p.buffer.tcpMutex.Unlock()
p.agr.serialMutex.Unlock() p.buffer.serialMutex.Unlock()
return errors.New("no data available") return errors.New("no data available")
} }
if cmp.Equal(p.buffer.MeasTcp, p.buffer.LastMeasTcp, cmpopts.IgnoreUnexported(sensorData{})) &&
p.stor.EnqueuePair(p.agr.tcpSensorData, p.agr.serialSensorData) cmp.Equal(p.buffer.MeasSerial, p.buffer.LastMeasSerial, cmpopts.IgnoreUnexported(sensorData{})) {
p.buffer.tcpMutex.Unlock()
p.buffer.serialMutex.Unlock()
return errors.New("same data")
}
log.Println("")
log.Printf("MEAS old: %v", p.buffer.LastMeasTcp)
log.Printf("MEAS new: %v", p.buffer.MeasTcp)
log.Println("")
p.buffer.LastMeasTcp = p.buffer.MeasTcp
p.buffer.LastMeasSerial = p.buffer.MeasSerial
p.storer.EnqueuePair(p.buffer.MeasTcp, p.buffer.MeasSerial)
data := map[string]sensorData{ data := map[string]sensorData{
string(SOURCE_TCP): p.agr.tcpSensorData, string(SOURCE_TCP): p.buffer.MeasTcp,
string(SOURCE_SERIAL): p.agr.serialSensorData, string(SOURCE_SERIAL): p.buffer.MeasSerial,
} }
p.agr.tcpMutex.Unlock() p.buffer.tcpMutex.Unlock()
p.agr.serialMutex.Unlock() p.buffer.serialMutex.Unlock()
jdata, err := json.Marshal(data) jdata, err := json.Marshal(data)
//log.Println(string(pretty.Pretty(jdata))) //log.Println(string(pretty.Pretty(jdata)))
if err != nil { if err != nil {
return err return err
} }
p.pub.Publish(string(jdata)) p.publisher.Publish(string(jdata))
return nil return nil
} }
type aggregator struct { type pipeBuffer struct {
tcpSensorData sensorData MeasTcp sensorData
serialSensorData sensorData MeasSerial sensorData
LastMeasTcp sensorData
LastMeasSerial sensorData
tcpMutex *sync.Mutex tcpMutex *sync.Mutex
serialMutex *sync.Mutex serialMutex *sync.Mutex
} }
@ -117,26 +132,26 @@ type synchronizer struct {
func (p *pipeline) refreshDelay() error { func (p *pipeline) refreshDelay() error {
log.Println("refreshing delay....") log.Println("refreshing delay....")
fmt.Println("Delay TCP/SERIAL", p.syn.tcpSerialDelayMs) fmt.Println("Delay TCP/SERIAL", p.synchroniz.tcpSerialDelayMs)
p.agr.serialMutex.Lock() p.buffer.serialMutex.Lock()
p.agr.tcpMutex.Lock() p.buffer.tcpMutex.Lock()
tcpTime := time.Unix(0, p.agr.tcpSensorData.Timestamp) tcpTime := time.Unix(0, p.buffer.MeasTcp.Timestamp)
serTime := time.Unix(0, p.agr.serialSensorData.Timestamp) serTime := time.Unix(0, p.buffer.MeasSerial.Timestamp)
p.agr.serialMutex.Unlock() p.buffer.serialMutex.Unlock()
p.agr.tcpMutex.Unlock() p.buffer.tcpMutex.Unlock()
if tcpTime.UnixNano() == 0 || serTime.UnixNano() == 0 { if tcpTime.UnixNano() == 0 || serTime.UnixNano() == 0 {
return errors.New("no sync possible. check if both collectors running. otherwise check GPS fix") return errors.New("no sync possible. check if both collectors running. otherwise check GPS fix")
} }
currentDelay := tcpTime.Sub(serTime).Milliseconds() currentDelay := tcpTime.Sub(serTime).Milliseconds()
if currentDelay > 5000 || currentDelay < -5000 { if currentDelay > 5000 || currentDelay < -5000 {
p.syn.tcpSerialDelayMs = 0 p.synchroniz.tcpSerialDelayMs = 0
return errors.New("skipping synchronisation! time not properly configured or facing network problems.") return errors.New("skipping synchronisation! time not properly configured or facing network problems.")
} }
log.Println("TCP", tcpTime.String()) log.Println("TCP", tcpTime.String())
log.Println("SER", serTime.String()) log.Println("SER", serTime.String())
log.Println("Difference", tcpTime.Sub(serTime).Milliseconds(), "ms") log.Println("Difference", tcpTime.Sub(serTime).Milliseconds(), "ms")
delay := tcpTime.Sub(serTime).Milliseconds() delay := tcpTime.Sub(serTime).Milliseconds()
p.syn.tcpSerialDelayMs += delay p.synchroniz.tcpSerialDelayMs += delay
return nil return nil
} }
@ -144,9 +159,8 @@ func (p *pipeline) Push(data *sensorData) error {
if data == nil { if data == nil {
return errors.New("nil processing not allowed") return errors.New("nil processing not allowed")
} }
//log.Println(string(data.source)) //log.Println("push data to pipe:", string(data.source))
// TODO: persist data here with current timestamp p.storer.EnqueueRaw(*data)
p.stor.EnqueueRaw(*data)
switch data.source { switch data.source {
case SOURCE_TCP: case SOURCE_TCP:
go p.pushTcpDataToBuffer(*data) go p.pushTcpDataToBuffer(*data)
@ -159,20 +173,20 @@ func (p *pipeline) Push(data *sensorData) error {
} }
func (p *pipeline) pushTcpDataToBuffer(data sensorData) { func (p *pipeline) pushTcpDataToBuffer(data sensorData) {
if p.syn.tcpSerialDelayMs > 0 { if p.synchroniz.tcpSerialDelayMs > 0 {
time.Sleep(time.Duration(p.syn.tcpSerialDelayMs) * time.Millisecond) time.Sleep(time.Duration(p.synchroniz.tcpSerialDelayMs) * time.Millisecond)
} }
p.agr.tcpMutex.Lock() p.buffer.tcpMutex.Lock()
p.agr.tcpSensorData = p.agr.tcpSensorData.ConsolidateExTime(data) p.buffer.MeasTcp = p.buffer.MeasTcp.ConsolidateExTime(data)
p.agr.tcpMutex.Unlock() p.buffer.tcpMutex.Unlock()
} }
func (p *pipeline) pushSerialDataToBuffer(data sensorData) { func (p *pipeline) pushSerialDataToBuffer(data sensorData) {
if p.syn.tcpSerialDelayMs < 0 { if p.synchroniz.tcpSerialDelayMs < 0 {
time.Sleep(time.Duration(-p.syn.tcpSerialDelayMs) * time.Millisecond) time.Sleep(time.Duration(-p.synchroniz.tcpSerialDelayMs) * time.Millisecond)
} }
p.agr.serialMutex.Lock() p.buffer.serialMutex.Lock()
p.agr.serialSensorData = p.agr.serialSensorData.ConsolidateEpochsOnly(data) p.buffer.MeasSerial = p.buffer.MeasSerial.ConsolidateEpochsOnly(data)
p.agr.serialMutex.Unlock() p.buffer.serialMutex.Unlock()
} }
func (p *pipeline) Close() { func (p *pipeline) Close() {

1
go.mod
View File

@ -5,6 +5,7 @@ go 1.15
require ( require (
github.com/gofiber/fiber/v2 v2.2.4 github.com/gofiber/fiber/v2 v2.2.4
github.com/gofiber/websocket/v2 v2.0.2 github.com/gofiber/websocket/v2 v2.0.2
github.com/google/go-cmp v0.3.0
github.com/google/uuid v1.1.2 github.com/google/uuid v1.1.2
github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket v1.4.2
github.com/spf13/viper v1.7.1 github.com/spf13/viper v1.7.1

1
go.sum
View File

@ -74,6 +74,7 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=