[WIP] eliminated data races & formatted files

This commit is contained in:
Timo Volkmann 2020-12-12 02:29:22 +01:00
parent a4a739c64b
commit 09791727a4
17 changed files with 1158 additions and 910 deletions

1
.gitignore vendored
View File

@ -161,3 +161,4 @@ Temporary Items
.env
gpsconfig.yml
config.yml
_db

View File

@ -18,18 +18,19 @@ func main() {
service := core.TrackingService(repo, disp, conf)
go func() {
service.NewTracking(core.TCP, core.SERIAL)
service.NewSetup(core.TCP, core.SERIAL)
time.Sleep(5 * time.Second)
service.StartRecord()
time.Sleep(20 * time.Second)
time.Sleep(5 * time.Second)
service.StopRecord()
time.Sleep(5 * time.Second)
service.NewTracking(core.TCP)
service.NewTracking(core.SERIAL)
service.NewSetup(core.TCP)
time.Sleep(5 * time.Second)
service.StartRecord()
time.Sleep(20 * time.Second)
time.Sleep(5 * time.Second)
service.StopRecord()
time.Sleep(5 * time.Second)
service.StopAll()
}()
web.CreateServer(service, disp, conf)

View File

@ -7,11 +7,12 @@ import (
"go.bug.st/serial"
"net"
"os"
"sync"
)
type Collector interface {
Collect()
Stop()
Close()
}
type CollectorType uint8
@ -45,10 +46,20 @@ type serialCollector struct {
active bool
proc Pusher
config *Configuration
mu sync.RWMutex
}
func (s *serialCollector) isSerialCollActive() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.active
}
func (s *serialCollector) Collect() {
s.mu.Lock()
s.active = true
s.mu.Unlock()
go func() {
logrus.Println("start serial collector")
mode := &serial.Mode{
@ -56,13 +67,13 @@ func (s *serialCollector) Collect() {
}
port, err := serial.Open(s.config.Collectors.SerialCollectorPort, mode)
if err != nil {
logrus.Fatalln(err.Error())
logrus.Fatalln("can't open serial port:", err.Error())
}
defer port.Close()
decoder := ublox.NewDecoder(port)
for s.active {
for s.isSerialCollActive() {
meas, err := decoder.Decode()
if err != nil {
if err.Error() == "NMEA not implemented" {
@ -91,8 +102,12 @@ func (s *serialCollector) Collect() {
}()
}
func (s *serialCollector) Stop() {
func (s *serialCollector) Close() {
s.mu.Lock()
s.active = false
s.mu.Unlock()
}
func newSerial(proc Pusher, config *Configuration) *serialCollector {
@ -113,7 +128,7 @@ func (t *tcpCollector) Collect() {
t.active = true
}
func (t *tcpCollector) Stop() {
func (t *tcpCollector) Close() {
t.active = false
}
@ -126,7 +141,7 @@ func newTcp(proc Pusher, config *Configuration) *tcpCollector {
listener, err := net.Listen("tcp", config.Collectors.TcpCollectorPort)
if err != nil {
fmt.Println("Error listening:", err.Error())
fmt.Println("Error listening:", err)
//os.Exit(1)
}
coll := &tcpCollector{

View File

@ -4,7 +4,7 @@ type Configuration struct {
Collectors struct {
TcpCollectorPort string `mapstructure:"porttcp"`
SerialCollectorPort string `mapstructure:"portserial"`
} `mapstructure:"collectors"`
} `mapstructure:"Collectors"`
Webserver struct {
Port string `mapstructure:"port"`
} `mapstructure:"webserver"`

View File

@ -18,7 +18,8 @@ func NewDispatcher() *dispatcher {
}
func (d *dispatcher) Publish(message string) {
logrus.Debugf("publish to %v listeners:\n%v\n", len(d.listeners), message)
logrus.Debugf("publish to %v listeners:\n%v\n", len(d.listeners))
logrus.Debug(message)
for _, ch := range d.listeners {
ch <- message
}

View File

@ -15,27 +15,23 @@ type Pusher interface {
Push(data *sensorData) error
}
type Storer interface {
EnqueuePair(tcp sensorData, ser sensorData)
EnqueueRaw(data sensorData)
}
type Repo interface {
Save(tracking Tracking) error
LoadAll() ([]TrackingMetadata, error)
Load(id uuid.UUID) (Tracking, error)
}
type Service interface {
AllTrackings()
NewTracking(cols ...CollectorType)
NewSetup(cols ...CollectorType)
StartRecord()
StopRecord()
Reset()
StopAll()
LoadTracking(trackingId uuid.UUID)
DeleteTracking(trackingId uuid.UUID)

View File

@ -1,11 +1,13 @@
package core
import (
"context"
"encoding/json"
"errors"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/sirupsen/logrus"
"golang.org/x/sync/semaphore"
"sync"
"time"
)
@ -18,6 +20,8 @@ type pipeline struct {
publisher Publisher
storer Storer
publishTicker *time.Ticker
mu sync.RWMutex
sema *semaphore.Weighted
}
// pipe implements Runner & Pusher
@ -37,30 +41,43 @@ func NewPipeline(d Publisher, s Storer, conf *Configuration) *pipeline {
d,
s,
time.NewTicker(time.Duration(conf.Pipeline.PublishIntervalMs) * time.Millisecond),
sync.RWMutex{},
semaphore.NewWeighted(2),
}
}
func (p *pipeline) isPipeActive() bool {
p.mu.RLock()
defer p.mu.RUnlock()
return p.active
}
func (p *pipeline) Run() {
p.sema.Acquire(context.Background(), 2)
p.mu.Lock()
p.active = true
p.mu.Unlock()
logrus.Println("pipe: processing service started")
go func() {
for p.active {
for p.isPipeActive() {
<-p.synchroniz.updateTicker.C
err := p.refreshDelay()
if err != nil {
logrus.Debugln(err)
}
}
p.sema.Release(1)
logrus.Println("pipe: updater stopped")
}()
go func() {
for p.active {
for p.isPipeActive() {
<-p.publishTicker.C
err := p.publish()
if err != nil && err.Error() != "no data available" {
logrus.Debug(err)
}
}
p.sema.Release(1)
logrus.Println("pipe: publisher stopped")
}()
}
@ -68,7 +85,7 @@ func (p *pipeline) Run() {
func (p *pipeline) Record() {
p.record = true
}
func (p *pipeline) Stop() {
func (p *pipeline) StopRecord() {
p.record = false
}
@ -88,8 +105,10 @@ func (p *pipeline) publish() error {
return errors.New("same data")
}
logrus.Debug("")
logrus.Debugf("MEAS old: %v", p.buffer.LastMeasTcp)
logrus.Debugf("MEAS new: %v", p.buffer.MeasTcp)
logrus.Debugf("SER old: %v", p.buffer.LastMeasSerial)
logrus.Debugf("SER new: %v", p.buffer.MeasSerial)
logrus.Debugf("TCP old: %v", p.buffer.LastMeasTcp)
logrus.Debugf("TCP new: %v", p.buffer.MeasTcp)
logrus.Debug("")
p.buffer.LastMeasTcp = p.buffer.MeasTcp
p.buffer.LastMeasSerial = p.buffer.MeasSerial
@ -142,7 +161,7 @@ func (p *pipeline) refreshDelay() error {
p.buffer.tcpMutex.Unlock()
p.buffer.serialMutex.Unlock()
if tcpTime.UnixNano() == 0 || serTime.UnixNano() == 0 {
return errors.New("no sync possible. check if both collectors running. otherwise check GPS fix")
return errors.New("no sync possible. check if both Collectors running. otherwise check GPS fix")
}
currentDelay := tcpTime.Sub(serTime).Milliseconds()
if currentDelay > 5000 || currentDelay < -5000 {
@ -200,5 +219,7 @@ func (p *pipeline) pushSerialDataToBuffer(data sensorData) {
}
func (p *pipeline) Close() {
p.mu.Lock()
p.active = false
p.mu.Unlock()
}

View File

@ -11,6 +11,7 @@ type OpMode uint8
const (
STOPPED OpMode = iota
LIVE
RECORDING
REPLAY
)
@ -25,7 +26,7 @@ type trackingService struct {
func TrackingService(r Repo, d Publisher, c *Configuration) *trackingService {
t := &Tracking{}
return &trackingService{
ts := &trackingService{
current: t,
opMode: STOPPED,
config: c,
@ -33,54 +34,88 @@ func TrackingService(r Repo, d Publisher, c *Configuration) *trackingService {
pipe: NewPipeline(d, t, c),
collectors: nil,
}
//ts.pipe.Run()
return ts
}
//const(
// errA error = errors.New("A")
//)
func (t *trackingService) AllTrackings() {
panic("implement me")
}
func (t *trackingService) NewTracking(cols ...CollectorType) {
func (t *trackingService) NewSetup(cols ...CollectorType) {
logrus.Info("SERVICE: NEW SETUP")
if t.opMode == RECORDING {
logrus.Println("trackingservice: no reset while recording")
return
}
if t.opMode == LIVE {
logrus.Println("trackingservice: stop currently running setup before creating new one")
t.StopAll()
}
logrus.Debug("new tracking:", cols)
t.opMode = LIVE
t.collectors = nil
for _, col := range cols {
t.collectors = append(t.collectors, NewCollector(col, t.pipe, t.config))
}
*t.current = emptyTracking()
t.current.collectors = cols
t.safelyReplaceTracking(emptyTracking())
t.current.Collectors = cols
for _, e := range t.collectors {
e.Collect()
}
t.pipe.Run()
//time.Sleep(3 * time.Second)
}
func (t *trackingService) StartRecord() {
logrus.Info("SERVICE: START RECORD")
if t.opMode != LIVE {
logrus.Println("trackingservice: wrong mode of operation")
return
}
t.opMode = RECORDING
t.current.TimeCreated = time.Now()
t.pipe.Record()
}
func (t *trackingService) StopRecord() {
if t.opMode != LIVE {
logrus.Println("trackingservice: wrong mode of operation")
}
t.pipe.Stop()
for _, e := range t.collectors {
e.Stop()
logrus.Info("SERVICE: STOP RECORD")
if t.opMode != RECORDING {
logrus.Println("trackingservice: couldn't stop. not recording")
return
}
t.opMode = LIVE
t.pipe.StopRecord()
m1.Lock()
m2.Lock()
err := t.repo.Save(*t.current)
m2.Unlock()
m1.Unlock()
if err != nil {
logrus.Println(err)
}
t.NewTracking(t.current.collectors...)
t.safelyReplaceTracking(emptyTracking())
}
func (t *trackingService) Reset() {
func (t *trackingService) StopAll() {
logrus.Info("SERVICE: STOP ALL")
if t.opMode == RECORDING {
logrus.Println("trackingservice: stop recording gracefully")
t.StopRecord()
}
t.opMode = STOPPED
t.pipe.Close()
for _, e := range t.collectors {
e.Close()
}
t.collectors = nil
t.safelyReplaceTracking(emptyTracking())
}
func (t *trackingService) DeleteTracking(trackingId uuid.UUID) {
@ -102,3 +137,13 @@ func (t *trackingService) StopReplay() {
func (t *trackingService) LoadTracking(trackingId uuid.UUID) {
}
func (t *trackingService) safelyReplaceTracking(tr Tracking) {
m1.Lock()
m2.Lock()
*t.current = tr
m2.Unlock()
m1.Unlock()
}

View File

@ -7,22 +7,22 @@ import (
"time"
)
var m1 sync.RWMutex
var m2 sync.RWMutex
type Tracking struct {
TrackingMetadata
Records []recordPair
Rawdata []rawRecord
mu sync.Mutex
}
type TrackingMetadata struct {
UUID uuid.UUID
TimeCreated time.Time
collectors []CollectorType
Collectors []CollectorType
}
func (s *Tracking) EnqueuePair(tcp sensorData, ser sensorData) {
s.mu.Lock()
defer s.mu.Unlock()
rp := recordPair{
RecordTime: time.Now(),
data: map[sourceId]sensorData{
@ -30,19 +30,21 @@ func (s *Tracking) EnqueuePair(tcp sensorData, ser sensorData) {
ser.source: ser,
},
}
m1.Lock()
s.Records = append(s.Records, rp)
logrus.Debugln("tracking Records: len->", len(s.Records))
m1.Unlock()
}
func (s *Tracking) EnqueueRaw(data sensorData) {
s.mu.Lock()
defer s.mu.Unlock()
sr := rawRecord{
time.Now(),
data,
}
m1.Lock()
s.Rawdata = append(s.Rawdata, sr)
logrus.Debugln("raw data points: len->", len(s.Records))
logrus.Debugln("raw data points: len->", len(s.Rawdata))
m1.Unlock()
}
func emptyTracking() Tracking {

1
go.mod
View File

@ -14,4 +14,5 @@ require (
github.com/tidwall/gjson v1.6.0
github.com/tidwall/pretty v1.0.2 // indirect
go.bug.st/serial v1.1.1
golang.org/x/sync v0.0.0-20190423024810-112230192c58
)

1
go.sum
View File

@ -304,6 +304,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

View File

@ -1,28 +1,168 @@
package storage
import (
"encoding/binary"
"encoding/json"
"git.timovolkmann.de/gyrogpsc/core"
"github.com/dgraph-io/badger/v2"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
"os"
"path/filepath"
)
// Must implement Repo
type badgerStore struct {
db *badger.DB
trackings *badger.DB
records *badger.DB
rawdata *badger.DB
}
func NewRepository(c *core.Configuration) *badgerStore {
db, err := badger.Open(badger.DefaultOptions("."))
if err != nil {
logrus.Warn(err)
}
return &badgerStore{db}
dir, _ := os.Getwd()
logrus.Debug(dir)
if _, err := os.Stat(filepath.Join(dir,"_db")); os.IsNotExist(err) {
os.Mkdir(filepath.Join(dir,"_db"), os.ModePerm)
}
func (r *badgerStore) Save(tracking core.Tracking) error {
panic("implement me")
tr, err := badger.Open(badger.DefaultOptions("_db/trackings"))
dp, err := badger.Open(badger.DefaultOptions("_db/records"))
rd, err := badger.Open(badger.DefaultOptions("_db/raw"))
if err != nil {
logrus.Error(err)
}
return &badgerStore{trackings: tr, records: dp, rawdata: rd}
}
func (r *badgerStore) isDbAvailable() bool {
return r.trackings.IsClosed() || r.records.IsClosed() || r.rawdata.IsClosed()
}
func (r *badgerStore) Save(tr core.Tracking) error {
if ok := r.isDbAvailable(); ok {
logrus.Error("unable to write to database. database closed!")
return badger.ErrDBClosed
}
ts, err := tr.TimeCreated.MarshalText()
if err != nil {
logrus.Error(err, tr)
}
logrus.Info("save tracking:", tr.TimeCreated)
meta, err := json.Marshal(tr.TrackingMetadata)
if err != nil {
logrus.Error(err, tr)
return err
}
err = r.records.Update(func(txn *badger.Txn) error {
for _, v := range tr.Records {
k := createDataKey(tr.UUID, v.RecordTime.UnixNano())
j, err := json.Marshal(v)
if err != nil {
return err
}
txn.Set(k, j)
}
return nil
})
if err != nil {
logrus.Error(err, tr)
return err
}
err = r.records.Update(func(txn *badger.Txn) error {
for _, v := range tr.Rawdata {
k := createDataKey(tr.UUID, v.Timestamp)
j, err := json.Marshal(v)
if err != nil {
return err
}
txn.Set(k, j)
}
return nil
})
if err != nil {
logrus.Error(err, tr)
return err
}
err = r.trackings.Update(func(txn *badger.Txn) error {
err := txn.Set(ts, meta)
return err
})
if err != nil {
logrus.Error(err, tr)
return err
}
logrus.Info("sucessfully saved tracking")
return nil
}
//func (r *badgerStore) Save(tracking *core.Tracking) error {
// ts, err := tracking.TimeCreated.MarshalText()
// if err != nil {
// logrus.Error(err, tracking)
// }
// logrus.Info("save tracking:", ts)
// meta, err := json.Marshal(tracking.TrackingMetadata)
// if err != nil {
// logrus.Error(err, tracking)
// return err
// }
// wg := sync.WaitGroup{}
// wg.Add(3)
// ch := make(chan error, 3)
// go func() {
// defer wg.Done()
// err = r.records.Update(func(txn *badger.Txn) error {
// for _, v := range tracking.Records {
// k := createDataKey(tracking.UUID, v.RecordTime.UnixNano())
// j, err := json.Marshal(v)
// if err != nil {
// return err
// }
// txn.Set(k, j)
// }
// return nil
// })
// ch <- err
// }()
// go func() {
// defer wg.Done()
// err = r.records.Update(func(txn *badger.Txn) error {
// for _, v := range tracking.Rawdata {
// k := createDataKey(tracking.UUID, v.Timestamp)
// j, err := json.Marshal(v)
// if err != nil {
// return err
// }
// txn.Set(k, j)
// }
// return nil
// })
// ch <- err
// }()
// go func() {
// defer wg.Done()
// err = r.trackings.Update(func(txn *badger.Txn) error {
// err := txn.Set(ts, meta)
// return err
// })
// ch <- err
// }()
// wg.Wait()
// for {
// select {
// case err := <-ch:
// if err != nil {
// logrus.Error(err, tracking)
// return err
// }
// default:
// close(ch)
// break
// }
// }
// return nil
//}
func (r *badgerStore) LoadAll() ([]core.TrackingMetadata, error) {
panic("implement me")
@ -31,3 +171,27 @@ func (r *badgerStore) LoadAll() ([]core.TrackingMetadata, error) {
func (r *badgerStore) Load(id uuid.UUID) (core.Tracking, error) {
panic("implement me")
}
func createDataKey(uid uuid.UUID, timestamp int64) []byte {
prefix, err := uid.MarshalText()
if err != nil || timestamp < 0 {
logrus.Error("unable to create key", err)
}
suffix := make([]byte, 8)
binary.LittleEndian.PutUint64(suffix, uint64(timestamp))
return append(prefix, suffix...)
}
func unmarshalDataKey(key []byte) (uuid.UUID, int64) {
if len(key) != 24 {
panic("corrupted key")
}
prefix := key[0:15]
suffix := key[15:24]
uid, err := uuid.FromBytes(prefix)
if err != nil {
panic("corrupted key")
}
timestamp := int64(binary.LittleEndian.Uint64(suffix))
return uid, timestamp
}