local control flow cleanup (#1215)

removed redundant/unreachable returns, else branches, type declarations, unused variables
This commit is contained in:
mmetc 2022-02-01 22:08:06 +01:00 committed by GitHub
parent d5f17ee377
commit ad28a979e9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 187 additions and 242 deletions

View file

@ -15,7 +15,6 @@ import (
"github.com/spf13/cobra"
)
var keyName string
var keyIP string
var keyLength int
var key string

View file

@ -157,10 +157,8 @@ func NewCapiCmd() *cobra.Command {
_, err = Client.Auth.AuthenticateWatcher(context.Background(), t)
if err != nil {
log.Fatalf("Failed to authenticate to Central API (CAPI) : %s", err)
} else {
log.Infof("You can successfully interact with Central API (CAPI)")
}
log.Infof("You can successfully interact with Central API (CAPI)")
},
}
cmdCapi.AddCommand(cmdCapiStatus)

View file

@ -33,11 +33,11 @@ func NewConsoleCmd() *cobra.Command {
var fdErr *fs.PathError
if errors.As(err, &fdErr) {
log.Fatalf("Unable to load Local API : %s", fdErr)
} else if err != nil {
log.Fatalf("Unable to load required Local API Configuration : %s", err)
} else {
log.Fatal("Local API is disabled, please run this command on the local API machine")
}
if err != nil {
log.Fatalf("Unable to load required Local API Configuration : %s", err)
}
log.Fatal("Local API is disabled, please run this command on the local API machine")
}
if csConfig.DisableAPI {
log.Fatal("Local API is disabled, please run this command on the local API machine")

View file

@ -24,7 +24,6 @@ var (
metabaseConfigPath string
metabaseConfigFolder = "metabase/"
metabaseConfigFile = "metabase.yaml"
metabaseImage = "metabase/metabase"
/**/
metabaseListenAddress = "127.0.0.1"
metabaseListenPort = "3000"
@ -33,7 +32,6 @@ var (
forceYes bool
dockerGatewayIPAddr = "172.17.0.1"
/*informations needed to setup a random password on user's behalf*/
)

View file

@ -143,10 +143,7 @@ cscli simulation disable crowdsecurity/ssh-bf`,
if len(args) > 0 {
for _, scenario := range args {
var (
item *cwhub.Item
)
item = cwhub.GetItem(cwhub.SCENARIOS, scenario)
var item = cwhub.GetItem(cwhub.SCENARIOS, scenario)
if item == nil {
log.Errorf("'%s' doesn't exist or is not a scenario", scenario)
continue

View file

@ -201,7 +201,6 @@ func InstallItem(name string, obtype string, force bool) {
}
cwhub.AddItem(obtype, item)
log.Infof("Enabled %s", item.Name)
return
}
func RemoveMany(itemType string, name string) {
@ -557,8 +556,7 @@ func ShowParserMetric(itemName string, metrics map[string]map[string]int) {
//it's a rip of the cli version, but in silent-mode
func silenceInstallItem(name string, obtype string) (string, error) {
var item *cwhub.Item
item = cwhub.GetItem(obtype, name)
var item = cwhub.GetItem(obtype, name)
if item == nil {
return "", fmt.Errorf("error retrieving item")
}

View file

@ -66,16 +66,6 @@ type Flags struct {
type labelsMap map[string]string
type parsers struct {
ctx *parser.UnixParserCtx
povfwctx *parser.UnixParserCtx
stageFiles []parser.Stagefile
povfwStageFiles []parser.Stagefile
nodes []parser.Node
povfwnodes []parser.Node
enricherCtx []parser.EnricherCtx
}
// Return new parsers
// nodes and povfwnodes are already initialized in parser.LoadStages
func newParsers() *parser.Parsers {

View file

@ -218,11 +218,10 @@ func GetMetrics(sources []DataSource, aggregated bool) error {
}
for _, metric := range metrics {
if err := prometheus.Register(metric); err != nil {
if _, ok := err.(prometheus.AlreadyRegisteredError); ok {
//ignore the error
} else {
if _, ok := err.(prometheus.AlreadyRegisteredError); !ok {
return errors.Wrapf(err, "could not register metrics for datasource %s", sources[i].GetName())
}
//ignore the error
}
}

View file

@ -182,9 +182,8 @@ wowo: ajsajasjas
}
if !strings.Contains(err.Error(), test.ExpectedError) {
t.Fatalf("%s : expected error '%s' in '%s'", test.TestName, test.ExpectedError, err.Error())
} else {
continue
}
continue
} else {
if err != nil {
t.Fatalf("%s : unexpected error '%s'", test.TestName, err)
@ -289,9 +288,8 @@ func TestLoadAcquisitionFromFile(t *testing.T) {
}
if !strings.Contains(err.Error(), test.ExpectedError) {
t.Fatalf("%s : expected error '%s' in '%s'", test.TestName, test.ExpectedError, err.Error())
} else {
continue
}
continue
} else {
if err != nil {
t.Fatalf("%s : unexpected error '%s'", test.TestName, err)

View file

@ -492,9 +492,8 @@ stream_name: test_stream`),
if test.expectedResLen != -1 {
if test.expectedResLen != len(rcvd_evts) {
t.Fatalf("%s : expected %d results got %d -> %v", test.name, test.expectedResLen, len(rcvd_evts), rcvd_evts)
} else {
dbgLogger.Debugf("got %d expected messages", len(rcvd_evts))
}
dbgLogger.Debugf("got %d expected messages", len(rcvd_evts))
}
if len(test.expectedResMessages) != 0 {
res := test.expectedResMessages
@ -504,9 +503,8 @@ stream_name: test_stream`),
}
if res[0] != v.Line.Raw {
t.Fatalf("result %d/%d : expected '%s', received '%s' (recvd:%d, expected:%d)", idx, len(rcvd_evts), res[0], v.Line.Raw, len(rcvd_evts), len(test.expectedResMessages))
} else {
dbgLogger.Debugf("got message '%s'", res[0])
}
dbgLogger.Debugf("got message '%s'", res[0])
res = res[1:]
}
if len(res) != 0 {
@ -853,9 +851,8 @@ func TestOneShotAcquisition(t *testing.T) {
}
if res[0] != v.Line.Raw {
t.Fatalf("result %d/%d : expected '%s', received '%s' (recvd:%d, expected:%d)", idx, len(rcvd_evts), res[0], v.Line.Raw, len(rcvd_evts), len(test.expectedResMessages))
} else {
dbgLogger.Debugf("got message '%s'", res[0])
}
dbgLogger.Debugf("got message '%s'", res[0])
res = res[1:]
}
if len(res) != 0 {

View file

@ -111,8 +111,7 @@ func (t *JWTTransport) refreshJwtToken() error {
/*
we don't use the main client, so let's build the body
*/
var buf io.ReadWriter
buf = &bytes.Buffer{}
var buf io.ReadWriter = &bytes.Buffer{}
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
err = enc.Encode(auth)

View file

@ -78,9 +78,8 @@ func (c *ApiClient) Do(ctx context.Context, req *http.Request, v interface{}) (*
if url, err := url.Parse(e.URL); err == nil {
e.URL = url.String()
return newResponse(resp), e
} else {
return newResponse(resp), err
}
return newResponse(resp), err
}
return newResponse(resp), err
}

View file

@ -20,12 +20,11 @@ import (
func FormatOneAlert(alert *ent.Alert) *models.Alert {
var outputAlert models.Alert
var machineID string
startAt := alert.StartedAt.String()
StopAt := alert.StoppedAt.String()
if alert.Edges.Owner == nil {
machineID = "N/A"
} else {
machineID := "N/A"
if alert.Edges.Owner != nil {
machineID = alert.Edges.Owner.MachineId
}

View file

@ -42,10 +42,9 @@ func (c *Config) LoadSimulation() error {
rcfg, err := ioutil.ReadFile(c.ConfigPaths.SimulationFilePath)
if err != nil {
return errors.Wrapf(err, "while reading '%s'", c.ConfigPaths.SimulationFilePath)
} else {
if err := yaml.UnmarshalStrict(rcfg, &simCfg); err != nil {
return fmt.Errorf("while unmarshaling simulation file '%s' : %s", c.ConfigPaths.SimulationFilePath, err)
}
}
if err := yaml.UnmarshalStrict(rcfg, &simCfg); err != nil {
return fmt.Errorf("while unmarshaling simulation file '%s' : %s", c.ConfigPaths.SimulationFilePath, err)
}
if simCfg.Simulation == nil {
simCfg.Simulation = new(bool)

View file

@ -443,7 +443,7 @@ func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo, opts Dum
fmt.Printf("\t├ Scenarios\n")
}
bnames := make([]string, 0, len(state[tstamp]["buckets"]))
for k, _ := range state[tstamp]["buckets"] {
for k := range state[tstamp]["buckets"] {
//there is a trick : to know if an event succesfully exit the parsers, we check if it reached the pour() phase
//we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered
if k == "OK" {

View file

@ -173,13 +173,10 @@ func GetItemByPath(itemType string, itemPath string) (*Item, error) {
if m := GetItemMap(itemType); m != nil {
if v, ok := m[finalName]; ok {
return &v, nil
} else {
return nil, fmt.Errorf("%s not found in %s", finalName, itemType)
}
} else {
return nil, fmt.Errorf("item type %s doesn't exist", itemType)
return nil, fmt.Errorf("%s not found in %s", finalName, itemType)
}
return nil, fmt.Errorf("item type %s doesn't exist", itemType)
}
func GetItem(itemType string, itemName string) *Item {
@ -213,25 +210,21 @@ func DisplaySummary() {
//returns: human-text, Enabled, Warning, Unmanaged
func ItemStatus(v Item) (string, bool, bool, bool) {
var Ok, Warning, Managed bool
var strret string
if !v.Installed {
strret = "disabled"
Ok = false
} else {
strret := "disabled"
Ok := false
if v.Installed {
Ok = true
strret = "enabled"
}
Managed := true
if v.Local {
Managed = false
strret += ",local"
} else {
Managed = true
}
//tainted or out of date
Warning := false
if v.Tainted {
Warning = true
strret += ",tainted"

View file

@ -22,8 +22,6 @@ import (
- Upgrade collection
*/
var testDataFolder = "."
func TestItemStatus(t *testing.T) {
cfg := test_prepenv()
@ -43,7 +41,7 @@ func TestItemStatus(t *testing.T) {
}
//Get item : good and bad
for k, _ := range x {
for k := range x {
item := GetItem(COLLECTIONS, k)
if item == nil {
t.Fatalf("expected item")

View file

@ -85,36 +85,37 @@ func DownloadLatest(hub *csconfig.Hub, target Item, overwrite bool, updateOnly b
for idx, ptr := range tmp {
ptrtype := ItemTypes[idx]
for _, p := range ptr {
if val, ok := hubIdx[ptrtype][p]; ok {
if !val.Installed && updateOnly {
log.Debugf("skipping upgrade of %s : not installed", target.Name)
continue
}
log.Debugf("Download %s sub-item : %s %s (%t -> %t)", target.Name, ptrtype, p, target.Installed, updateOnly)
//recurse as it's a collection
if ptrtype == COLLECTIONS {
log.Tracef("collection, recurse")
hubIdx[ptrtype][p], err = DownloadLatest(hub, val, overwrite, updateOnly)
if err != nil {
return target, errors.Wrap(err, fmt.Sprintf("while downloading %s", val.Name))
}
}
item, err := DownloadItem(hub, val, overwrite)
val, ok := hubIdx[ptrtype][p]
if !ok {
return target, fmt.Errorf("required %s %s of %s doesn't exist, abort", ptrtype, p, target.Name)
}
if !val.Installed && updateOnly {
log.Debugf("skipping upgrade of %s : not installed", target.Name)
continue
}
log.Debugf("Download %s sub-item : %s %s (%t -> %t)", target.Name, ptrtype, p, target.Installed, updateOnly)
//recurse as it's a collection
if ptrtype == COLLECTIONS {
log.Tracef("collection, recurse")
hubIdx[ptrtype][p], err = DownloadLatest(hub, val, overwrite, updateOnly)
if err != nil {
return target, errors.Wrap(err, fmt.Sprintf("while downloading %s", val.Name))
}
// We need to enable an item when it has been added to a collection since latest release of the collection.
// We check if val.Downloaded is false because maybe the item has been disabled by the user.
if !item.Installed && !val.Downloaded {
if item, err = EnableItem(hub, item); err != nil {
return target, errors.Wrapf(err, "enabling '%s'", item.Name)
}
}
hubIdx[ptrtype][p] = item
} else {
return target, fmt.Errorf("required %s %s of %s doesn't exist, abort", ptrtype, p, target.Name)
}
item, err := DownloadItem(hub, val, overwrite)
if err != nil {
return target, errors.Wrap(err, fmt.Sprintf("while downloading %s", val.Name))
}
// We need to enable an item when it has been added to a collection since latest release of the collection.
// We check if val.Downloaded is false because maybe the item has been disabled by the user.
if !item.Installed && !val.Downloaded {
if item, err = EnableItem(hub, item); err != nil {
return target, errors.Wrapf(err, "enabling '%s'", item.Name)
}
}
hubIdx[ptrtype][p] = item
}
}
target, err = DownloadItem(hub, target, overwrite)
@ -245,11 +246,10 @@ func downloadData(dataFolder string, force bool, reader io.Reader) error {
data := &types.DataSet{}
err = dec.Decode(data)
if err != nil {
if err == io.EOF {
break
} else {
if err != io.EOF {
return errors.Wrap(err, "while reading file")
}
break
}
download := false

View file

@ -21,7 +21,7 @@ import (
)
/*the walk/parser_visit function can't receive extra args*/
var hubdir, installdir, indexpath string
var hubdir, installdir string
func parser_visit(path string, f os.FileInfo, err error) error {
@ -193,27 +193,26 @@ func parser_visit(path string, f os.FileInfo, err error) error {
if sha != val.Digest {
//log.Printf("matching filenames, wrong hash %s != %s -- %s", sha, val.Digest, spew.Sdump(v))
continue
} else {
/*we got an exact match, update struct*/
if !inhub {
log.Tracef("found exact match for %s, version is %s, latest is %s", v.Name, version, v.Version)
v.LocalPath = path
v.LocalVersion = version
v.Tainted = false
v.Downloaded = true
/*if we're walking the hub, present file doesn't means installed file*/
v.Installed = true
v.LocalHash = sha
x := strings.Split(path, "/")
target.FileName = x[len(x)-1]
}
if version == v.Version {
log.Tracef("%s is up-to-date", v.Name)
v.UpToDate = true
}
match = true
break
}
/*we got an exact match, update struct*/
if !inhub {
log.Tracef("found exact match for %s, version is %s, latest is %s", v.Name, version, v.Version)
v.LocalPath = path
v.LocalVersion = version
v.Tainted = false
v.Downloaded = true
/*if we're walking the hub, present file doesn't means installed file*/
v.Installed = true
v.LocalHash = sha
x := strings.Split(path, "/")
target.FileName = x[len(x)-1]
}
if version == v.Version {
log.Tracef("%s is up-to-date", v.Name)
v.UpToDate = true
}
match = true
break
}
if !match {
log.Tracef("got tainted match for %s : %s", v.Name, path)
@ -257,44 +256,46 @@ func CollecDepsCheck(v *Item) error {
for idx, ptr := range tmp {
ptrtype := ItemTypes[idx]
for _, p := range ptr {
if val, ok := hubIdx[ptrtype][p]; ok {
log.Tracef("check %s installed:%t", val.Name, val.Installed)
if !v.Installed {
continue
}
if val.Type == COLLECTIONS {
log.Tracef("collec, recurse.")
if err := CollecDepsCheck(&val); err != nil {
return fmt.Errorf("sub collection %s is broken : %s", val.Name, err)
}
hubIdx[ptrtype][p] = val
}
//propagate the state of sub-items to set
if val.Tainted {
v.Tainted = true
return fmt.Errorf("tainted %s %s, tainted.", ptrtype, p)
} else if !val.Installed && v.Installed {
v.Tainted = true
return fmt.Errorf("missing %s %s, tainted.", ptrtype, p)
} else if !val.UpToDate {
v.UpToDate = false
return fmt.Errorf("outdated %s %s", ptrtype, p)
}
skip := false
for idx := range val.BelongsToCollections {
if val.BelongsToCollections[idx] == v.Name {
skip = true
}
}
if !skip {
val.BelongsToCollections = append(val.BelongsToCollections, v.Name)
}
hubIdx[ptrtype][p] = val
log.Tracef("checking for %s - tainted:%t uptodate:%t", p, v.Tainted, v.UpToDate)
} else {
val, ok := hubIdx[ptrtype][p]
if !ok {
log.Fatalf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, v.Name)
}
log.Tracef("check %s installed:%t", val.Name, val.Installed)
if !v.Installed {
continue
}
if val.Type == COLLECTIONS {
log.Tracef("collec, recurse.")
if err := CollecDepsCheck(&val); err != nil {
return fmt.Errorf("sub collection %s is broken : %s", val.Name, err)
}
hubIdx[ptrtype][p] = val
}
//propagate the state of sub-items to set
if val.Tainted {
v.Tainted = true
return fmt.Errorf("tainted %s %s, tainted.", ptrtype, p)
}
if !val.Installed && v.Installed {
v.Tainted = true
return fmt.Errorf("missing %s %s, tainted.", ptrtype, p)
}
if !val.UpToDate {
v.UpToDate = false
return fmt.Errorf("outdated %s %s", ptrtype, p)
}
skip := false
for idx := range val.BelongsToCollections {
if val.BelongsToCollections[idx] == v.Name {
skip = true
}
}
if !skip {
val.BelongsToCollections = append(val.BelongsToCollections, v.Name)
}
hubIdx[ptrtype][p] = val
log.Tracef("checking for %s - tainted:%t uptodate:%t", p, v.Tainted, v.UpToDate)
}
}
}
@ -304,7 +305,6 @@ func CollecDepsCheck(v *Item) error {
func SyncDir(hub *csconfig.Hub, dir string) (error, []string) {
hubdir = hub.HubDir
installdir = hub.ConfigDir
indexpath = hub.HubIndexFile
warnings := []string{}
/*For each, scan PARSERS, PARSERS_OVFLW, SCENARIOS and COLLECTIONS last*/

View file

@ -19,13 +19,10 @@ type Blackhole struct {
}
func NewBlackhole(bucketFactory *BucketFactory) (*Blackhole, error) {
var duration time.Duration
if d, err := time.ParseDuration(bucketFactory.Blackhole); err != nil {
duration, err := time.ParseDuration(bucketFactory.Blackhole)
if err != nil {
bucketFactory.logger.Warning("Blackhole duration not valid, using 1h")
return nil, fmt.Errorf("blackhole duration not valid '%s'", bucketFactory.Blackhole)
} else {
duration = d
}
return &Blackhole{
duration: duration,

View file

@ -154,12 +154,11 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res
tf := TestFile{}
err = dec.Decode(&tf)
if err != nil {
if err == io.EOF {
log.Warningf("end of test file")
} else {
if err != io.EOF {
t.Errorf("Failed to load testfile '%s' yaml error : %v", file, err)
return false
}
log.Warningf("end of test file")
}
var latest_ts time.Time
for _, in := range tf.Lines {
@ -232,19 +231,17 @@ POLL_AGAIN:
log.Infof("dumped bucket to %s", tmpFile)
}
return true
} else {
log.Warningf("%d results to check against %d expected results", len(results), len(tf.Results))
if len(tf.Results) != len(results) {
if dump {
if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil {
t.Fatalf("Failed dumping bucket state : %s", err)
}
log.Infof("dumped bucket to %s", tmpFile)
}
log.Warningf("%d results to check against %d expected results", len(results), len(tf.Results))
if len(tf.Results) != len(results) {
if dump {
if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil {
t.Fatalf("Failed dumping bucket state : %s", err)
}
log.Errorf("results / expected count doesn't match results = %d / expected = %d", len(results), len(tf.Results))
return false
log.Infof("dumped bucket to %s", tmpFile)
}
log.Errorf("results / expected count doesn't match results = %d / expected = %d", len(results), len(tf.Results))
return false
}
checkresultsloop:
for eidx, out := range results {
@ -260,29 +257,27 @@ POLL_AGAIN:
log.Printf("Here ?")
continue
}
//Scenario
//Scenario
if *out.Overflow.Alert.Scenario != *expected.Overflow.Alert.Scenario {
log.Errorf("(scenario) %v != %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario)
continue
} else {
log.Infof("(scenario) %v == %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario)
}
log.Infof("(scenario) %v == %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario)
//EventsCount
if *out.Overflow.Alert.EventsCount != *expected.Overflow.Alert.EventsCount {
log.Errorf("(EventsCount) %d != %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount)
continue
} else {
log.Infof("(EventsCount) %d == %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount)
}
log.Infof("(EventsCount) %d == %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount)
//Sources
if !reflect.DeepEqual(out.Overflow.Sources, expected.Overflow.Sources) {
log.Errorf("(Sources %s != %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources))
continue
} else {
log.Infof("(Sources: %s == %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources))
}
log.Infof("(Sources: %s == %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources))
}
//Events
// if !reflect.DeepEqual(out.Overflow.Alert.Events, expected.Overflow.Alert.Events) {
@ -309,9 +304,7 @@ POLL_AGAIN:
log.Errorf("we got: %s", spew.Sdump(results))
log.Errorf("we expected: %s", spew.Sdump(tf.Results))
return false
} else {
log.Warningf("entry valid at end of loop")
}
log.Warningf("entry valid at end of loop")
}
return false
}

View file

@ -156,13 +156,12 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, files []string, tomb *tomb.
bucketFactory := BucketFactory{}
err = dec.Decode(&bucketFactory)
if err != nil {
if err == io.EOF {
log.Tracef("End of yaml file")
break
} else {
if err != io.EOF {
log.Errorf("Bad yaml in %s : %v", f, err)
return nil, nil, fmt.Errorf("bad yaml in %s : %v", f, err)
}
log.Tracef("End of yaml file")
break
}
bucketFactory.DataDir = cscfg.DataDir
//check empty

View file

@ -59,15 +59,15 @@ func GarbageCollectBuckets(deadline time.Time, buckets *Buckets) error {
toflush = append(toflush, key)
val.tomb.Kill(nil)
return true
} else {
val.logger.Tracef("(%s) not dead, count:%f capacity:%f", val.First_ts, tokat, tokcapa)
}
val.logger.Tracef("(%s) not dead, count:%f capacity:%f", val.First_ts, tokat, tokcapa)
if _, ok := serialized[key]; ok {
log.Errorf("entry %s already exists", key)
return false
} else {
log.Debugf("serialize %s of %s : %s", val.Name, val.Uuid, val.Mapkey)
}
log.Debugf("serialize %s of %s : %s", val.Name, val.Uuid, val.Mapkey)
return true
})
log.Infof("Cleaned %d buckets", len(toflush))
@ -118,15 +118,14 @@ func DumpBucketsStateAt(deadline time.Time, outputdir string, buckets *Buckets)
val.logger.Debugf("UNDERFLOW : first_ts:%s tokens_at:%f capcity:%f", val.First_ts, tokat, tokcapa)
discard += 1
return true
} else {
val.logger.Debugf("(%s) not dead, count:%f capacity:%f", val.First_ts, tokat, tokcapa)
}
val.logger.Debugf("(%s) not dead, count:%f capacity:%f", val.First_ts, tokat, tokcapa)
if _, ok := serialized[key]; ok {
log.Errorf("entry %s already exists", key)
return false
} else {
log.Debugf("serialize %s of %s : %s", val.Name, val.Uuid, val.Mapkey)
}
log.Debugf("serialize %s of %s : %s", val.Name, val.Uuid, val.Mapkey)
val.SerializedState = val.Limiter.Dump()
serialized[key] = *val
return true

View file

@ -51,13 +51,12 @@ func (u *OverflowFilter) OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, ty
}
/*filter returned false, event is blackholded*/
if !element {
l.logger.Infof("Event is discard by overflow filter (%s)", u.Filter)
l.logger.Infof("Event is discarded by overflow filter (%s)", u.Filter)
return types.RuntimeAlert{
Mapkey: l.Mapkey,
}, nil
} else {
l.logger.Tracef("Event is not discard by overflow filter (%s)", u.Filter)
}
l.logger.Tracef("Event is not discarded by overflow filter (%s)", u.Filter)
return s, q
}
}

View file

@ -64,16 +64,14 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e
src := models.Source{}
switch leaky.scopeType.Scope {
case types.Range, types.Ip:
if v, ok := evt.Meta["source_ip"]; ok {
if net.ParseIP(v) == nil {
return srcs, fmt.Errorf("scope is %s but '%s' isn't a valid ip", leaky.scopeType.Scope, v)
} else {
src.IP = v
}
} else {
v, ok := evt.Meta["source_ip"]
if !ok {
return srcs, fmt.Errorf("scope is %s but Meta[source_ip] doesn't exist", leaky.scopeType.Scope)
}
if net.ParseIP(v) == nil {
return srcs, fmt.Errorf("scope is %s but '%s' isn't a valid ip", leaky.scopeType.Scope, v)
}
src.IP = v
src.Scope = &leaky.scopeType.Scope
if v, ok := evt.Enriched["ASNumber"]; ok {
src.AsNumber = v
@ -104,7 +102,8 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e
_, ipNet, err := net.ParseCIDR(v)
if err != nil {
return srcs, fmt.Errorf("Declared range %s of %s can't be parsed", v, src.IP)
} else if ipNet != nil {
}
if ipNet != nil {
src.Range = ipNet.String()
leaky.logger.Tracef("Valid range from %s : %s", src.IP, src.Range)
}
@ -116,23 +115,22 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e
}
srcs[*src.Value] = src
default:
if leaky.scopeType.RunTimeFilter != nil {
retValue, err := expr.Run(leaky.scopeType.RunTimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &evt}))
if err != nil {
return srcs, errors.Wrapf(err, "while running scope filter")
}
value, ok := retValue.(string)
if !ok {
value = ""
}
src.Value = &value
src.Scope = new(string)
*src.Scope = leaky.scopeType.Scope
srcs[*src.Value] = src
} else {
if leaky.scopeType.RunTimeFilter == nil {
return srcs, fmt.Errorf("empty scope information")
}
retValue, err := expr.Run(leaky.scopeType.RunTimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &evt}))
if err != nil {
return srcs, errors.Wrapf(err, "while running scope filter")
}
value, ok := retValue.(string)
if !ok {
value = ""
}
src.Value = &value
src.Scope = new(string)
*src.Scope = leaky.scopeType.Scope
srcs[*src.Value] = src
}
return srcs, nil
}

View file

@ -43,9 +43,8 @@ func (u *CancelOnFilter) OnBucketPour(bucketFactory *BucketFactory) func(types.E
leaky.logger.Debugf("reset_filter matched, kill bucket")
leaky.Suicide <- true
return nil //counter intuitively, we need to keep the message so that it doesn't trigger an endless loop
} else {
leaky.logger.Debugf("reset_filter didn't match")
}
leaky.logger.Debugf("reset_filter didn't match")
}
return &msg
}

View file

@ -97,7 +97,6 @@ func (lim *Limiter) Load(st Lstate) {
lim.tokens = st.Tokens
lim.last = st.Last
lim.lastEvent = st.LastEvent
return
}
// Limit returns the maximum overall event rate.

View file

@ -81,7 +81,7 @@ type RuntimeAlert struct {
func (r RuntimeAlert) GetSources() []string {
ret := make([]string, 0)
for key, _ := range r.Sources {
for key := range r.Sources {
ret = append(ret, key)
}
return ret

View file

@ -38,17 +38,19 @@ func Addr2Ints(any string) (int, int64, int64, int64, int64, error) {
return -1, 0, 0, 0, 0, errors.Wrapf(err, "while parsing range %s", any)
}
return Range2Ints(*net)
} else {
ip := net.ParseIP(any)
if ip == nil {
return -1, 0, 0, 0, 0, fmt.Errorf("invalid address")
}
sz, start, end, err := IP2Ints(ip)
if err != nil {
return -1, 0, 0, 0, 0, errors.Wrapf(err, "while parsing ip %s", any)
}
return sz, start, end, start, end, nil
}
ip := net.ParseIP(any)
if ip == nil {
return -1, 0, 0, 0, 0, fmt.Errorf("invalid address")
}
sz, start, end, err := IP2Ints(ip)
if err != nil {
return -1, 0, 0, 0, 0, errors.Wrapf(err, "while parsing ip %s", any)
}
return sz, start, end, start, end, nil
}
/*size (16|4), nw_start, suffix_start, nw_end, suffix_end, error*/

View file

@ -96,9 +96,8 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi
err = email.Send(smtpClient)
if err != nil {
return &protobufs.Empty{}, err
} else {
logger.Info(fmt.Sprintf("sent email to %v", cfg.ReceiverEmails))
}
logger.Info(fmt.Sprintf("sent email to %v", cfg.ReceiverEmails))
return &protobufs.Empty{}, nil
}