properly handle timemachine buckets

This commit is contained in:
Sebastien Blot 2022-08-10 12:12:55 +02:00
parent 10c97e46af
commit 9f676844d9
No known key found for this signature in database
GPG key ID: DFC2902F40449F6A
2 changed files with 37 additions and 20 deletions

View file

@ -9,17 +9,16 @@ import (
"gopkg.in/tomb.v2" "gopkg.in/tomb.v2"
) )
type HiddenKey struct {
key string
expiration time.Time
}
type Blackhole struct { type Blackhole struct {
duration time.Duration duration time.Duration
hiddenKeys []HiddenKey
DumbProcessor DumbProcessor
} }
type BlackholeExpiration struct {
blExpiration time.Time
cleanupExpiration time.Time //we need a separate expiration for the cleanup to properly handle timemachine buckets
}
func NewBlackhole(bucketFactory *BucketFactory) (*Blackhole, error) { func NewBlackhole(bucketFactory *BucketFactory) (*Blackhole, error) {
duration, err := time.ParseDuration(bucketFactory.Blackhole) duration, err := time.ParseDuration(bucketFactory.Blackhole)
if err != nil { if err != nil {
@ -28,7 +27,6 @@ func NewBlackhole(bucketFactory *BucketFactory) (*Blackhole, error) {
} }
return &Blackhole{ return &Blackhole{
duration: duration, duration: duration,
hiddenKeys: []HiddenKey{},
DumbProcessor: DumbProcessor{}, DumbProcessor: DumbProcessor{},
}, nil }, nil
} }
@ -46,8 +44,8 @@ func CleanupBlackhole(bucketsTomb *tomb.Tomb) error {
return nil return nil
case <-ticker.C: case <-ticker.C:
BlackholeTracking.Range(func(key, value interface{}) bool { BlackholeTracking.Range(func(key, value interface{}) bool {
expirationDate := value.(time.Time) cleanupDate := value.(BlackholeExpiration).cleanupExpiration
if expirationDate.Before(time.Now().UTC()) { if cleanupDate.Before(time.Now().UTC()) {
log.Debugf("Expiring blackhole for %s", key) log.Debugf("Expiring blackhole for %s", key)
BlackholeTracking.Delete(key) BlackholeTracking.Delete(key)
} }
@ -59,9 +57,11 @@ func CleanupBlackhole(bucketsTomb *tomb.Tomb) error {
func (bl *Blackhole) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue) { func (bl *Blackhole) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue) {
return func(leaky *Leaky, alert types.RuntimeAlert, queue *Queue) (types.RuntimeAlert, *Queue) { return func(leaky *Leaky, alert types.RuntimeAlert, queue *Queue) (types.RuntimeAlert, *Queue) {
if expirationDate, ok := BlackholeTracking.Load(leaky.Mapkey); ok {
if expirationDate.(time.Time).After(time.Now().UTC()) { if expiration, ok := BlackholeTracking.Load(leaky.Mapkey); ok {
leaky.logger.Debugf("Blackhole already triggered for %s", leaky.Mapkey) x := expiration.(BlackholeExpiration)
if x.blExpiration.After(leaky.Ovflw_ts) {
leaky.logger.Debugf("Blackhole already triggered for %s (remaining : %s", leaky.Mapkey, x.blExpiration.Sub(time.Now().UTC()))
return types.RuntimeAlert{ return types.RuntimeAlert{
Mapkey: leaky.Mapkey, Mapkey: leaky.Mapkey,
}, nil }, nil
@ -71,10 +71,12 @@ func (bl *Blackhole) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky,
} }
} }
BlackholeTracking.Store(leaky.Mapkey, time.Now().UTC().Add(bl.duration)) BlackholeTracking.Store(leaky.Mapkey, BlackholeExpiration{
blExpiration: leaky.Ovflw_ts.Add(bl.duration),
leaky.logger.Debugf("Blackhole triggered for %s", leaky.Mapkey) cleanupExpiration: time.Now().UTC().Add(bl.duration),
})
leaky.logger.Debugf("Blackhole triggered for %s (expiration : %s)", leaky.Mapkey, leaky.Ovflw_ts.Add(bl.duration))
return alert, queue return alert, queue
} }
} }

View file

@ -277,7 +277,7 @@ func LoadOrStoreBucketFromHolder(partitionKey string, buckets *Buckets, holder B
//once the created goroutine is ready to process event, we can return it //once the created goroutine is ready to process event, we can return it
<-fresh_bucket.Signal <-fresh_bucket.Signal
} else { } else {
holder.logger.Debugf("Unexpectedly found exisint bucket for %s", partitionKey) holder.logger.Debugf("Unexpectedly found existing bucket for %s", partitionKey)
biface = actual biface = actual
} }
holder.logger.Debugf("Created new bucket %s", partitionKey) holder.logger.Debugf("Created new bucket %s", partitionKey)
@ -345,10 +345,25 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc
} }
} }
buckey := GetKey(holders[idx], groupby) buckey := GetKey(holders[idx], groupby)
if _, ok := BlackholeTracking.Load(buckey); ok { if x, ok := BlackholeTracking.Load(buckey); ok {
holders[idx].logger.Tracef("Event is blackholed: %s", buckey) holders[idx].logger.Debugf("Checking if blackhole has expired for %s", buckey)
blackholeExp := x.(BlackholeExpiration)
t := time.Now().UTC()
if parsed.ExpectMode == TIMEMACHINE {
//This is not optimal at all, date enrichment should also set parsed.Time to avoid parsing the date twice
err := t.UnmarshalText([]byte(parsed.MarshaledTime))
if err != nil {
holders[idx].logger.Errorf("failed parsing time : %v", err)
}
holders[idx].logger.Debugf("Found TIMEMACHINE bucket, using %s as time, comparing against %s ", t, blackholeExp.blExpiration)
}
if blackholeExp.blExpiration.After(t) {
holders[idx].logger.Debugf("Event is blackholed: %s (remaining: %s)", buckey, blackholeExp.blExpiration.Sub(t))
continue continue
} }
holders[idx].logger.Debugf("Event is no longer blackholed: %s", buckey)
BlackholeTracking.Delete(buckey)
}
//we need to either find the existing bucket, or create a new one (if it's the first event to hit it for this partition key) //we need to either find the existing bucket, or create a new one (if it's the first event to hit it for this partition key)
bucket, err := LoadOrStoreBucketFromHolder(buckey, buckets, holders[idx], parsed.ExpectMode) bucket, err := LoadOrStoreBucketFromHolder(buckey, buckets, holders[idx], parsed.ExpectMode)
if err != nil { if err != nil {