reaction/app/persist.go

247 lines
6.0 KiB
Go
Raw Normal View History

2023-09-06 02:00:33 +02:00
package app
import (
"encoding/gob"
"errors"
"io"
"os"
"time"
2023-10-12 12:00:00 +02:00
"framagit.org/ppom/reaction/logger"
2023-09-06 02:00:33 +02:00
)
const (
logDBName = "./reaction-matches.db"
logDBNewName = "./reaction-matches.new.db"
flushDBName = "./reaction-flushes.db"
2023-09-06 02:00:33 +02:00
)
func openDB(path string) (bool, *ReadDB) {
2023-09-06 02:00:33 +02:00
file, err := os.Open(path)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
2023-10-12 12:00:00 +02:00
logger.Printf(logger.WARN, "No DB found at %s. It's ok if this is the first time reaction is running.\n", path)
return true, nil
}
2023-10-12 12:00:00 +02:00
logger.Fatalln("Failed to open DB:", err)
2023-09-06 02:00:33 +02:00
}
return false, &ReadDB{file, gob.NewDecoder(file)}
2023-09-06 02:00:33 +02:00
}
func createDB(path string) *WriteDB {
2023-09-06 02:00:33 +02:00
file, err := os.Create(path)
if err != nil {
2023-10-12 12:00:00 +02:00
logger.Fatalln("Failed to create DB:", err)
2023-09-06 02:00:33 +02:00
}
return &WriteDB{file, gob.NewEncoder(file)}
2023-09-06 02:00:33 +02:00
}
2023-09-09 20:42:47 +02:00
func DatabaseManager(c *Conf) {
logDB, flushDB := c.RotateDB(true)
2023-09-22 23:18:08 +02:00
close(startupMatchesC)
c.manageLogs(logDB, flushDB)
2023-09-06 02:00:33 +02:00
}
func (c *Conf) manageLogs(logDB *WriteDB, flushDB *WriteDB) {
cpt := 0
writeSF2int := make(map[SF]int)
writeCpt := 1
2023-09-06 02:00:33 +02:00
for {
select {
2023-09-25 20:42:42 +02:00
case entry := <-flushToDatabaseC:
flushDB.enc.Encode(entry)
case entry := <-logsC:
encodeOrFatal(logDB.enc, entry, writeSF2int, &writeCpt)
cpt++
// let's say 100 000 entries ~ 10 MB
if cpt == 500_000 {
cpt = 0
2023-10-12 12:00:00 +02:00
logger.Printf(logger.INFO, "Rotating database...")
logDB.file.Close()
flushDB.file.Close()
logDB, flushDB = c.RotateDB(false)
2023-10-12 12:00:00 +02:00
logger.Printf(logger.INFO, "Rotated database")
}
2023-09-06 02:00:33 +02:00
}
}
}
func (c *Conf) RotateDB(startup bool) (*WriteDB, *WriteDB) {
2023-09-06 02:00:33 +02:00
var (
doesntExist bool
err error
logReadDB *ReadDB
flushReadDB *ReadDB
logWriteDB *WriteDB
flushWriteDB *WriteDB
2023-09-06 02:00:33 +02:00
)
doesntExist, logReadDB = openDB(logDBName)
if doesntExist {
return createDB(logDBName), createDB(flushDBName)
}
doesntExist, flushReadDB = openDB(flushDBName)
if doesntExist {
2023-10-12 12:00:00 +02:00
logger.Println(logger.WARN, "Strange! No flushes db, opening /dev/null instead")
doesntExist, flushReadDB = openDB("/dev/null")
if doesntExist {
2023-10-12 12:00:00 +02:00
logger.Fatalln("Opening dummy /dev/null failed")
2023-09-06 02:00:33 +02:00
}
}
logWriteDB = createDB(logDBNewName)
2023-09-06 02:00:33 +02:00
rotateDB(c, logReadDB.dec, flushReadDB.dec, logWriteDB.enc, startup)
2023-09-06 02:00:33 +02:00
err = logReadDB.file.Close()
2023-09-06 02:00:33 +02:00
if err != nil {
2023-10-12 12:00:00 +02:00
logger.Fatalln("Failed to close old DB:", err)
2023-09-06 02:00:33 +02:00
}
// It should be ok to rename an open file
err = os.Rename(logDBNewName, logDBName)
2023-09-06 02:00:33 +02:00
if err != nil {
2023-10-12 12:00:00 +02:00
logger.Fatalln("Failed to replace old DB with new one:", err)
2023-09-06 02:00:33 +02:00
}
err = os.Remove(flushDBName)
if err != nil && !errors.Is(err, os.ErrNotExist) {
2023-10-12 12:00:00 +02:00
logger.Fatalln("Failed to delete old DB:", err)
}
flushWriteDB = createDB(flushDBName)
return logWriteDB, flushWriteDB
2023-09-06 02:00:33 +02:00
}
func rotateDB(c *Conf, logDec *gob.Decoder, flushDec *gob.Decoder, logEnc *gob.Encoder, startup bool) {
// This mapping is a space optimization feature
// It permits to compress stream+filter to a small number (which is a byte in gob)
// We do this only for matches, not for flushes
readSF2int := make(map[int]SF)
writeSF2int := make(map[SF]int)
writeCounter := 1
2023-09-06 02:00:33 +02:00
// This extra code is made to warn only one time for each non-existant filter
discardedEntries := make(map[SF]int)
malformedEntries := 0
defer func() {
for sf, t := range discardedEntries {
if t > 0 {
2023-10-12 12:00:00 +02:00
logger.Printf(logger.WARN, "info discarded %v times from the DBs: stream/filter not found: %s.%s\n", t, sf.s, sf.f)
2023-09-06 02:00:33 +02:00
}
}
if malformedEntries > 0 {
2023-10-12 12:00:00 +02:00
logger.Printf(logger.WARN, "%v malformed entries discarded from the DBs\n", malformedEntries)
2023-09-06 02:00:33 +02:00
}
}()
// pattern, stream, fitler → last flush
flushes := make(map[PSF]time.Time)
for {
2023-10-20 12:00:00 +02:00
var entry LogEntry
var filter *Filter
// decode entry
2023-10-20 12:00:00 +02:00
err := flushDec.Decode(&entry)
2023-09-06 02:00:33 +02:00
if err != nil {
if err == io.EOF {
break
}
malformedEntries++
continue
}
// retrieve related filter
if entry.Stream != "" || entry.Filter != "" {
if stream := c.Streams[entry.Stream]; stream != nil {
filter = stream.Filters[entry.Filter]
}
if filter == nil {
discardedEntries[SF{entry.Stream, entry.Filter}]++
continue
}
2023-09-06 02:00:33 +02:00
}
// store
flushes[PSF{entry.Pattern, entry.Stream, entry.Filter}] = entry.T
2023-09-06 02:00:33 +02:00
}
now := time.Now()
for {
2023-10-20 12:00:00 +02:00
var entry LogEntry
var filter *Filter
2023-09-06 02:00:33 +02:00
// decode entry
2023-10-20 12:00:00 +02:00
err := logDec.Decode(&entry)
2023-09-06 02:00:33 +02:00
if err != nil {
if err == io.EOF {
break
2023-09-06 02:00:33 +02:00
}
malformedEntries++
continue
}
// retrieve related stream & filter
if entry.Stream == "" && entry.Filter == "" {
2023-10-20 12:00:00 +02:00
sf, ok := readSF2int[entry.SF]
if !ok {
discardedEntries[SF{"", ""}]++
continue
}
2023-10-20 12:00:00 +02:00
entry.Stream = sf.s
entry.Filter = sf.f
}
if stream := c.Streams[entry.Stream]; stream != nil {
filter = stream.Filters[entry.Filter]
}
if filter == nil {
discardedEntries[SF{entry.Stream, entry.Filter}]++
continue
}
if entry.SF != 0 {
readSF2int[entry.SF] = SF{entry.Stream, entry.Filter}
2023-09-06 02:00:33 +02:00
}
2023-10-01 12:00:00 +02:00
// check if it hasn't been flushed
lastGlobalFlush := flushes[PSF{entry.Pattern, "", ""}].Unix()
lastLocalFlush := flushes[PSF{entry.Pattern, entry.Stream, entry.Filter}].Unix()
entryTime := entry.T.Unix()
if lastLocalFlush > entryTime || lastGlobalFlush > entryTime {
continue
}
2023-09-06 02:00:33 +02:00
// store matches
if !entry.Exec && entry.T.Add(filter.retryDuration).Unix() > now.Unix() {
if startup {
startupMatchesC <- PFT{entry.Pattern, filter, entry.T}
2023-09-06 02:00:33 +02:00
}
encodeOrFatal(logEnc, entry, writeSF2int, &writeCounter)
2023-09-06 02:00:33 +02:00
}
// replay executions
if entry.Exec && entry.T.Add(*filter.longuestActionDuration).Unix() > now.Unix() {
if startup {
2023-10-01 12:00:00 +02:00
flushToMatchesC <- FlushMatchOrder{entry.Pattern, nil}
filter.sendActions(entry.Pattern, entry.T)
2023-09-06 02:00:33 +02:00
}
encodeOrFatal(logEnc, entry, writeSF2int, &writeCounter)
2023-09-06 02:00:33 +02:00
}
}
}
func encodeOrFatal(enc *gob.Encoder, entry LogEntry, writeSF2int map[SF]int, writeCounter *int) {
sf, ok := writeSF2int[SF{entry.Stream, entry.Filter}]
if ok {
entry.SF = sf
entry.Stream = ""
entry.Filter = ""
} else {
entry.SF = *writeCounter
writeSF2int[SF{entry.Stream, entry.Filter}] = *writeCounter
*writeCounter++
}
err := enc.Encode(entry)
if err != nil {
2023-10-12 12:00:00 +02:00
logger.Fatalln("Failed to write to new DB:", err)
}
}