some improvements in logging and naming/structuring

This commit is contained in:
Timo Volkmann 2020-12-12 16:34:35 +01:00
parent 1ea6822202
commit 391020ec47
6 changed files with 329 additions and 308 deletions

View File

@ -1,44 +1,44 @@
package core
import (
"errors"
"github.com/sirupsen/logrus"
"errors"
"github.com/sirupsen/logrus"
)
type dispatcher struct {
listeners map[int16]chan string
counter int16
listeners map[int16]chan string
counter int16
}
func NewDispatcher() *dispatcher {
return &dispatcher{
listeners: make(map[int16]chan string),
counter: 0,
}
return &dispatcher{
listeners: make(map[int16]chan string),
counter: 0,
}
}
func (d *dispatcher) Publish(message string) {
logrus.Debugf("publish to %v listeners:\n%v\n", len(d.listeners))
logrus.Debug(message)
for _, ch := range d.listeners {
ch <- message
}
logrus.Debugf("publish to %v listeners\n", len(d.listeners))
logrus.Debug(message)
for _, ch := range d.listeners {
ch <- message
}
}
func (d *dispatcher) Subscribe() (id int16, receiver <-chan string) {
key := d.counter
d.counter++
rec := make(chan string)
d.listeners[key] = rec
return key, rec
key := d.counter
d.counter++
rec := make(chan string)
d.listeners[key] = rec
return key, rec
}
func (d *dispatcher) Unsubscribe(id int16) error {
receiver, ok := d.listeners[id]
if !ok {
return errors.New("no subscription with id")
}
delete(d.listeners, id)
close(receiver)
return nil
receiver, ok := d.listeners[id]
if !ok {
return errors.New("no subscription with id")
}
delete(d.listeners, id)
close(receiver)
return nil
}

View File

@ -1,174 +1,164 @@
package core
import (
"errors"
"git.timovolkmann.de/gyrogpsc/ublox"
"github.com/sirupsen/logrus"
"github.com/tidwall/gjson"
"math"
"time"
"errors"
"git.timovolkmann.de/gyrogpsc/ublox"
"github.com/sirupsen/logrus"
"github.com/tidwall/gjson"
"math"
"time"
)
type sourceId string
const (
SOURCE_TCP sourceId = "SOURCE_TCP"
SOURCE_SERIAL sourceId = "SOURCE_SERIAL"
SOURCE_TCP sourceId = "SOURCE_TCP"
SOURCE_SERIAL sourceId = "SOURCE_SERIAL"
)
type sensorData struct {
itow uint32
source sourceId
ServerTime time.Time
Timestamp int64
Position [3]float64
Orientation [3]float64
}
type recordPair struct {
RecordTime time.Time
data map[sourceId]sensorData
}
type rawRecord struct {
RecordTime time.Time
sensorData
itow uint32
source sourceId
ServerTime time.Time
Timestamp int64
Position [3]float64
Orientation [3]float64
}
func (s sensorData) isSameEpoch(n sensorData) bool {
if n.itow == 0 {
return false
}
return s.itow == n.itow
if n.itow == 0 {
return false
}
return s.itow == n.itow
}
// Consolidates two sensordata elements if they are in the same epoch
func (s sensorData) ConsolidateEpochsOnly(n sensorData) sensorData {
s.checkSources(&n)
if s.isSameEpoch(n) {
null := sensorData{}
s.checkSources(&n)
if s.isSameEpoch(n) {
null := sensorData{}
if n.Timestamp == null.Timestamp {
n.Timestamp = s.Timestamp
}
if n.Position == null.Position {
n.Position = s.Position
}
if n.Orientation == null.Orientation {
n.Orientation = s.Orientation
}
}
return n
if n.Timestamp == null.Timestamp {
n.Timestamp = s.Timestamp
}
if n.Position == null.Position {
n.Position = s.Position
}
if n.Orientation == null.Orientation {
n.Orientation = s.Orientation
}
}
return n
}
// Consolidates two sensordata elements but ignores timestamps
func (s sensorData) ConsolidateExTime(n sensorData) sensorData {
s.checkSources(&n)
null := sensorData{}
s.checkSources(&n)
null := sensorData{}
if n.Position == null.Position {
n.Position = s.Position
}
if n.Orientation == null.Orientation {
n.Orientation = s.Orientation
}
return n
if n.Position == null.Position {
n.Position = s.Position
}
if n.Orientation == null.Orientation {
n.Orientation = s.Orientation
}
return n
}
func (s *sensorData) checkSources(n *sensorData) {
if (s.source != n.source && *s != sensorData{}) {
logrus.Println(s)
logrus.Println(n)
logrus.Fatalln("Do not consolidate sensorData from different Sources")
}
if (s.source != n.source && *s != sensorData{}) {
logrus.Println(s)
logrus.Println(n)
logrus.Fatalln("Do not consolidate sensorData from different Sources")
}
}
var (
errNotImplemented = errors.New("message not implemented")
errRawMessage = errors.New("raw message")
errNotImplemented = errors.New("message not implemented")
errRawMessage = errors.New("raw message")
)
func ConvertUbxToSensorData(msg interface{}) (*sensorData, error) {
sd := &sensorData{
ServerTime: time.Now(),
source: SOURCE_SERIAL,
}
switch v := msg.(type) {
case *ublox.NavPvt:
//logrus.Println("NAV-PVT")
sd.itow = v.ITOW_ms
sd.Timestamp = time.Date(int(v.Year_y), time.Month(v.Month_month), int(v.Day_d), int(v.Hour_h), int(v.Min_min), int(v.Sec_s), int(v.Nano_ns), time.UTC).UnixNano()
sd.Position[0] = float64(v.Lat_dege7) / 1e+7
sd.Position[1] = float64(v.Lon_dege7) / 1e+7
sd.Position[2] = float64(v.HMSL_mm) / 1e+3 // mm in m
case *ublox.HnrPvt:
//logrus.Println("HNR-PVT")
sd.itow = v.ITOW_ms
sd.Timestamp = time.Date(int(v.Year_y), time.Month(v.Month_month), int(v.Day_d), int(v.Hour_h), int(v.Min_min), int(v.Sec_s), int(v.Nano_ns), time.UTC).UnixNano()
sd.Position[0] = float64(v.Lat_dege7) / 1e+7
sd.Position[1] = float64(v.Lon_dege7) / 1e+7
sd.Position[2] = float64(v.HMSL_mm) / 1e+3 // mm in m
case *ublox.NavAtt:
//logrus.Println("NAV-ATT")
sd.itow = v.ITOW_ms
sd.Orientation[0] = float64(v.Pitch_deg) * 1e-5
sd.Orientation[1] = float64(v.Roll_deg) * 1e-5
sd.Orientation[2] = float64(v.Heading_deg) * 1e-5
case *ublox.RawMessage:
//class := make([]byte, 2)
//binary.LittleEndian.PutUint16(class, v.ClassID())
//logrus.Printf("%#v, %#v", class[0],class[1])
return nil, nil
default:
return nil, errNotImplemented
}
return sd, nil
sd := &sensorData{
ServerTime: time.Now(),
source: SOURCE_SERIAL,
}
switch v := msg.(type) {
case *ublox.NavPvt:
//logrus.Println("NAV-PVT")
sd.itow = v.ITOW_ms
sd.Timestamp = time.Date(int(v.Year_y), time.Month(v.Month_month), int(v.Day_d), int(v.Hour_h), int(v.Min_min), int(v.Sec_s), int(v.Nano_ns), time.UTC).UnixNano()
sd.Position[0] = float64(v.Lat_dege7) / 1e+7
sd.Position[1] = float64(v.Lon_dege7) / 1e+7
sd.Position[2] = float64(v.HMSL_mm) / 1e+3 // mm in m
case *ublox.HnrPvt:
//logrus.Println("HNR-PVT")
sd.itow = v.ITOW_ms
sd.Timestamp = time.Date(int(v.Year_y), time.Month(v.Month_month), int(v.Day_d), int(v.Hour_h), int(v.Min_min), int(v.Sec_s), int(v.Nano_ns), time.UTC).UnixNano()
sd.Position[0] = float64(v.Lat_dege7) / 1e+7
sd.Position[1] = float64(v.Lon_dege7) / 1e+7
sd.Position[2] = float64(v.HMSL_mm) / 1e+3 // mm in m
case *ublox.NavAtt:
//logrus.Println("NAV-ATT")
sd.itow = v.ITOW_ms
sd.Orientation[0] = float64(v.Pitch_deg) * 1e-5
sd.Orientation[1] = float64(v.Roll_deg) * 1e-5
sd.Orientation[2] = float64(v.Heading_deg) * 1e-5
case *ublox.RawMessage:
//class := make([]byte, 2)
//binary.LittleEndian.PutUint16(class, v.ClassID())
//logrus.Printf("%#v, %#v", class[0],class[1])
return nil, nil
default:
return nil, errNotImplemented
}
return sd, nil
}
func ConvertSensorDataPhone(jsonData []byte) (*sensorData, error) {
if gjson.Get(string(jsonData), "os").String() == "hyperimu" {
return convertAndroidHyperImu(jsonData)
}
return convertIPhoneSensorLog(jsonData)
if gjson.Get(string(jsonData), "os").String() == "hyperimu" {
return convertAndroidHyperImu(jsonData)
}
return convertIPhoneSensorLog(jsonData)
}
func convertIPhoneSensorLog(jsonData []byte) (*sensorData, error) {
timestamp := gjson.Get(string(jsonData), "locationTimestamp_since1970").Float()
lat := gjson.Get(string(jsonData), "locationLatitude").Float()
lon := gjson.Get(string(jsonData), "locationLongitude").Float()
alt := gjson.Get(string(jsonData), "locationAltitude").Float()
pitch := gjson.Get(string(jsonData), "motionPitch").Float() * 180 / math.Pi
roll := gjson.Get(string(jsonData), "motionRoll").Float() * 180 / math.Pi
yaw := gjson.Get(string(jsonData), "motionYaw").Float() * 180 / math.Pi
sd := &sensorData{
ServerTime: time.Now(),
source: SOURCE_TCP,
Timestamp: int64(timestamp * float64(time.Second)),
Position: [3]float64{lat, lon, alt},
Orientation: [3]float64{pitch, roll, yaw},
//Timestamp: time.Unix(0, prep.Timestamp * int64(time.Millisecond)),
}
//logrus.Println(string(pretty.Pretty(jsonData)))
//logrus.Println(sd)
return sd, nil
timestamp := gjson.Get(string(jsonData), "locationTimestamp_since1970").Float()
lat := gjson.Get(string(jsonData), "locationLatitude").Float()
lon := gjson.Get(string(jsonData), "locationLongitude").Float()
alt := gjson.Get(string(jsonData), "locationAltitude").Float()
pitch := gjson.Get(string(jsonData), "motionPitch").Float() * 180 / math.Pi
roll := gjson.Get(string(jsonData), "motionRoll").Float() * 180 / math.Pi
yaw := gjson.Get(string(jsonData), "motionYaw").Float() * 180 / math.Pi
sd := &sensorData{
ServerTime: time.Now(),
source: SOURCE_TCP,
Timestamp: int64(timestamp * float64(time.Second)),
Position: [3]float64{lat, lon, alt},
Orientation: [3]float64{pitch, roll, yaw},
//Timestamp: time.Unix(0, prep.Timestamp * int64(time.Millisecond)),
}
//logrus.Println(string(pretty.Pretty(jsonData)))
//logrus.Println(sd)
return sd, nil
}
func convertAndroidHyperImu(jsonData []byte) (*sensorData, error) {
timestamp := gjson.Get(string(jsonData), "Timestamp").Int()
lat := gjson.Get(string(jsonData), "GPS.0").Float()
lon := gjson.Get(string(jsonData), "GPS.1").Float()
alt := gjson.Get(string(jsonData), "GPS.2").Float()
pitch := gjson.Get(string(jsonData), "orientation.0").Float()
roll := gjson.Get(string(jsonData), "orientation.1").Float()
yaw := gjson.Get(string(jsonData), "orientation.2").Float()
timestamp := gjson.Get(string(jsonData), "Timestamp").Int()
lat := gjson.Get(string(jsonData), "GPS.0").Float()
lon := gjson.Get(string(jsonData), "GPS.1").Float()
alt := gjson.Get(string(jsonData), "GPS.2").Float()
pitch := gjson.Get(string(jsonData), "orientation.0").Float()
roll := gjson.Get(string(jsonData), "orientation.1").Float()
yaw := gjson.Get(string(jsonData), "orientation.2").Float()
sd := &sensorData{
ServerTime: time.Now(),
source: SOURCE_TCP,
Timestamp: timestamp * int64(time.Millisecond),
Position: [3]float64{lat, lon, alt},
Orientation: [3]float64{pitch, roll, yaw},
//Timestamp: time.Unix(0, prep.Timestamp * int64(time.Millisecond)),
}
return sd, nil
sd := &sensorData{
ServerTime: time.Now(),
source: SOURCE_TCP,
Timestamp: timestamp * int64(time.Millisecond),
Position: [3]float64{lat, lon, alt},
Orientation: [3]float64{pitch, roll, yaw},
//Timestamp: time.Unix(0, prep.Timestamp * int64(time.Millisecond)),
}
return sd, nil
}

View File

@ -92,11 +92,11 @@ func (t *trackingService) StopRecord() {
t.opMode = LIVE
t.pipe.StopRecord()
m1.Lock()
m2.Lock()
mRec.Lock()
mRaw.Lock()
err := t.repo.Save(*t.current)
m2.Unlock()
m1.Unlock()
mRaw.Unlock()
mRec.Unlock()
if err != nil {
logrus.Println(err)
@ -140,9 +140,9 @@ func (t *trackingService) LoadTracking(trackingId uuid.UUID) {
}
func (t *trackingService) safelyReplaceTracking(tr Tracking) {
m1.Lock()
m2.Lock()
mRec.Lock()
mRaw.Lock()
*t.current = tr
m2.Unlock()
m1.Unlock()
mRaw.Unlock()
mRec.Unlock()
}

View File

@ -1,62 +1,75 @@
package core
import (
"github.com/google/uuid"
"github.com/sirupsen/logrus"
"sync"
"time"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
"sync"
"time"
)
var m1 sync.RWMutex
var m2 sync.RWMutex
type Tracking struct {
TrackingMetadata
Records []recordPair
Rawdata []rawRecord
TrackingMetadata
Records []recordPair
Rawdata []rawRecord
}
var mRec sync.RWMutex
var mRaw sync.RWMutex
type TrackingMetadata struct {
UUID uuid.UUID
TimeCreated time.Time
Collectors []CollectorType
UUID uuid.UUID
TimeCreated time.Time
Collectors []CollectorType
}
// persistence wrapper for sensordata
type recordPair struct {
RecordTimeKey time.Time // uniqueness ensured through mutex
DataPair map[sourceId]sensorData
}
type rawRecord struct {
RecordTimeKey time.Time // uniqueness ensured through mutex
Data sensorData
}
// END persistence wrapper for sensordata
func (s *Tracking) EnqueuePair(tcp sensorData, ser sensorData) {
rp := recordPair{
RecordTime: time.Now(),
data: map[sourceId]sensorData{
tcp.source: tcp,
ser.source: ser,
},
}
m1.Lock()
s.Records = append(s.Records, rp)
logrus.Debugln("tracking Records: len->", len(s.Records))
m1.Unlock()
rp := recordPair{
RecordTimeKey: time.Now(),
DataPair: map[sourceId]sensorData{
tcp.source: tcp,
ser.source: ser,
},
}
mRec.Lock()
s.Records = append(s.Records, rp)
logrus.Debugln("tracking Records: len->", len(s.Records))
mRec.Unlock()
}
func (s *Tracking) EnqueueRaw(data sensorData) {
sr := rawRecord{
time.Now(),
data,
}
m1.Lock()
s.Rawdata = append(s.Rawdata, sr)
logrus.Debugln("raw data points: len->", len(s.Rawdata))
m1.Unlock()
sr := rawRecord{
time.Now(),
data,
}
mRec.Lock()
s.Rawdata = append(s.Rawdata, sr)
logrus.Debugln("raw data points: len->", len(s.Rawdata))
mRec.Unlock()
}
func emptyTracking() Tracking {
return Tracking{
TrackingMetadata: TrackingMetadata{
UUID: uuid.New(),
},
Records: []recordPair{},
Rawdata: []rawRecord{},
}
return Tracking{
TrackingMetadata: TrackingMetadata{
UUID: uuid.New(),
},
Records: []recordPair{},
Rawdata: []rawRecord{},
}
}
func (s *Tracking) isEmpty() bool {
return len(s.Rawdata)+len(s.Records) == 0
return len(s.Rawdata)+len(s.Records) == 0
}

2
go.mod
View File

@ -12,7 +12,7 @@ require (
github.com/spf13/viper v1.7.1
github.com/stretchr/testify v1.6.1 // indirect
github.com/tidwall/gjson v1.6.0
github.com/tidwall/pretty v1.0.2 // indirect
github.com/tidwall/pretty v1.0.2
go.bug.st/serial v1.1.1
golang.org/x/sync v0.0.0-20190423024810-112230192c58
)

View File

@ -1,99 +1,116 @@
package storage
import (
"encoding/binary"
"encoding/json"
"git.timovolkmann.de/gyrogpsc/core"
"github.com/dgraph-io/badger/v2"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
"os"
"path/filepath"
"encoding/binary"
"encoding/json"
"git.timovolkmann.de/gyrogpsc/core"
"github.com/dgraph-io/badger/v2"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
"github.com/tidwall/pretty"
"os"
"path/filepath"
"strconv"
)
// Must implement Repo
type badgerStore struct {
trackings *badger.DB
records *badger.DB
rawdata *badger.DB
trackingsDb *badger.DB
recordsDb *badger.DB
rawdataDb *badger.DB
}
func NewRepository(c *core.Configuration) *badgerStore {
dir, _ := os.Getwd()
logrus.Debug(dir)
if _, err := os.Stat(filepath.Join(dir,"_db")); os.IsNotExist(err) {
os.Mkdir(filepath.Join(dir,"_db"), os.ModePerm)
}
dir, _ := os.Getwd()
logrus.Debug(dir)
if _, err := os.Stat(filepath.Join(dir, "_db")); os.IsNotExist(err) {
os.Mkdir(filepath.Join(dir, "_db"), os.ModePerm)
}
tr, err := badger.Open(badger.DefaultOptions("_db/trackings"))
dp, err := badger.Open(badger.DefaultOptions("_db/records"))
rd, err := badger.Open(badger.DefaultOptions("_db/raw"))
tr, err := badger.Open(badger.DefaultOptions("_db/trackings"))
dp, err := badger.Open(badger.DefaultOptions("_db/records"))
rd, err := badger.Open(badger.DefaultOptions("_db/raw"))
if err != nil {
logrus.Error(err)
}
return &badgerStore{trackings: tr, records: dp, rawdata: rd}
if err != nil {
logrus.Error(err)
}
return &badgerStore{trackingsDb: tr, recordsDb: dp, rawdataDb: rd}
}
func (r *badgerStore) isDbAvailable() bool {
return r.trackings.IsClosed() || r.records.IsClosed() || r.rawdata.IsClosed()
return r.trackingsDb.IsClosed() || r.recordsDb.IsClosed() || r.rawdataDb.IsClosed()
}
func (r *badgerStore) Save(tr core.Tracking) error {
if ok := r.isDbAvailable(); ok {
logrus.Error("unable to write to database. database closed!")
return badger.ErrDBClosed
}
ts, err := tr.TimeCreated.MarshalText()
if err != nil {
logrus.Error(err, tr)
}
logrus.Info("save tracking:", tr.TimeCreated)
meta, err := json.Marshal(tr.TrackingMetadata)
if err != nil {
logrus.Error(err, tr)
return err
}
err = r.records.Update(func(txn *badger.Txn) error {
for _, v := range tr.Records {
k := createDataKey(tr.UUID, v.RecordTime.UnixNano())
j, err := json.Marshal(v)
if err != nil {
return err
}
txn.Set(k, j)
}
return nil
})
if err != nil {
logrus.Error(err, tr)
return err
}
err = r.records.Update(func(txn *badger.Txn) error {
for _, v := range tr.Rawdata {
k := createDataKey(tr.UUID, v.Timestamp)
j, err := json.Marshal(v)
if err != nil {
return err
}
txn.Set(k, j)
}
return nil
})
if err != nil {
logrus.Error(err, tr)
return err
}
err = r.trackings.Update(func(txn *badger.Txn) error {
err := txn.Set(ts, meta)
return err
})
if err != nil {
logrus.Error(err, tr)
return err
}
logrus.Info("sucessfully saved tracking")
return nil
if ok := r.isDbAvailable(); ok {
logrus.Error("unable to write to database. database closed!")
return badger.ErrDBClosed
}
ts, err := tr.TimeCreated.MarshalText()
if err != nil {
logrus.Error(err, tr)
}
logrus.Info("save tracking:", tr.TimeCreated)
meta, err := json.Marshal(tr.TrackingMetadata)
if err != nil {
logrus.Error(err, tr)
return err
}
err = r.recordsDb.Update(func(txn *badger.Txn) error {
for _, v := range tr.Records {
k := createRecordKey(tr.UUID, v.RecordTimeKey.UnixNano())
j, err := json.Marshal(v.DataPair)
logrus.Debugln("save record k/v:\n", tr.UUID.String(), strconv.FormatInt(v.RecordTimeKey.UnixNano(), 10))
logrus.Debugln(string(pretty.Pretty(j)))
if err != nil {
return err
}
txn.Set(k, j)
}
return nil
})
if err != nil {
logrus.Error(err, tr)
return err
}
err = r.rawdataDb.Update(func(txn *badger.Txn) error {
for _, v := range tr.Rawdata {
k := createRecordKey(tr.UUID, v.RecordTimeKey.UnixNano())
j, err := json.Marshal(v)
logrus.Debugln("save raw k/v:\n", tr.UUID.String(), strconv.FormatInt(v.RecordTimeKey.UnixNano(), 10))
logrus.Debugln(string(pretty.Pretty(j)))
if err != nil {
return err
}
txn.Set(k, j)
}
return nil
})
if err != nil {
logrus.Error(err, tr)
return err
}
err = r.trackingsDb.Update(func(txn *badger.Txn) error {
logrus.Debug("save tracking meta k/v:\n", string(ts), string(meta))
err := txn.Set(ts, meta)
return err
})
if err != nil {
logrus.Error(err, tr)
return err
}
r.trackingsDb.PrintHistogram(nil)
dr := 0.5
err = r.trackingsDb.RunValueLogGC(dr)
logrus.Debug("DB GC:", err)
err = r.recordsDb.RunValueLogGC(dr)
logrus.Debug("DB GC:", err)
err = r.rawdataDb.RunValueLogGC(dr)
logrus.Debug("DB GC:", err)
r.trackingsDb.PrintHistogram(nil)
logrus.Info("sucessfully saved tracking")
return nil
}
//func (r *badgerStore) Save(tracking *core.Tracking) error {
@ -112,9 +129,9 @@ func (r *badgerStore) Save(tr core.Tracking) error {
// ch := make(chan error, 3)
// go func() {
// defer wg.Done()
// err = r.records.Update(func(txn *badger.Txn) error {
// err = r.recordsDb.Update(func(txn *badger.Txn) error {
// for _, v := range tracking.Records {
// k := createDataKey(tracking.UUID, v.RecordTime.UnixNano())
// k := createRecordKey(tracking.UUID, v.RecordTime.UnixNano())
// j, err := json.Marshal(v)
// if err != nil {
// return err
@ -127,9 +144,9 @@ func (r *badgerStore) Save(tr core.Tracking) error {
// }()
// go func() {
// defer wg.Done()
// err = r.records.Update(func(txn *badger.Txn) error {
// err = r.recordsDb.Update(func(txn *badger.Txn) error {
// for _, v := range tracking.Rawdata {
// k := createDataKey(tracking.UUID, v.Timestamp)
// k := createRecordKey(tracking.UUID, v.Timestamp)
// j, err := json.Marshal(v)
// if err != nil {
// return err
@ -142,7 +159,7 @@ func (r *badgerStore) Save(tr core.Tracking) error {
// }()
// go func() {
// defer wg.Done()
// err = r.trackings.Update(func(txn *badger.Txn) error {
// err = r.trackingsDb.Update(func(txn *badger.Txn) error {
// err := txn.Set(ts, meta)
// return err
// })
@ -165,33 +182,34 @@ func (r *badgerStore) Save(tr core.Tracking) error {
//}
func (r *badgerStore) LoadAll() ([]core.TrackingMetadata, error) {
panic("implement me")
panic("implement me")
}
func (r *badgerStore) Load(id uuid.UUID) (core.Tracking, error) {
panic("implement me")
panic("implement me")
}
func createDataKey(uid uuid.UUID, timestamp int64) []byte {
prefix, err := uid.MarshalText()
if err != nil || timestamp < 0 {
logrus.Error("unable to create key", err)
}
suffix := make([]byte, 8)
binary.LittleEndian.PutUint64(suffix, uint64(timestamp))
return append(prefix, suffix...)
func createRecordKey(uid uuid.UUID, timestamp int64) []byte {
prefix, err := uid.MarshalText()
if err != nil || timestamp < 0 {
logrus.Error("unable to create key", err)
}
suffix := make([]byte, 8)
binary.BigEndian.PutUint64(suffix, uint64(timestamp))
return append(prefix, suffix...)
}
func unmarshalDataKey(key []byte) (uuid.UUID, int64) {
if len(key) != 24 {
panic("corrupted key")
}
prefix := key[0:15]
suffix := key[15:24]
uid, err := uuid.FromBytes(prefix)
if err != nil {
panic("corrupted key")
}
timestamp := int64(binary.LittleEndian.Uint64(suffix))
return uid, timestamp
if len(key) != 24 {
panic("corrupted key")
}
prefix := key[0:15]
suffix := key[15:24]
uid, err := uuid.FromBytes(prefix)
if err != nil {
panic("corrupted key")
}
timestamp := int64(binary.BigEndian.Uint64(suffix))
return uid, timestamp
}