fix linter warnings : dead code, simplification

This commit is contained in:
Thibault bui Koechlin 2020-05-20 10:49:17 +02:00
parent 4bed585d6b
commit db9e1e280d
24 changed files with 110 additions and 115 deletions

View file

@ -265,7 +265,9 @@ cscli api credentials # Display your API credentials
cmdAPI.AddCommand(cmdAPICreds) cmdAPI.AddCommand(cmdAPICreds)
cmdAPIEnroll.Flags().StringVarP(&userID, "user", "u", "", "User ID (required)") cmdAPIEnroll.Flags().StringVarP(&userID, "user", "u", "", "User ID (required)")
cmdAPIEnroll.MarkFlagRequired("user") if err := cmdAPIEnroll.MarkFlagRequired("user"); err != nil {
log.Errorf("'user' flag : %s", err)
}
cmdAPI.AddCommand(cmdAPIEnroll) cmdAPI.AddCommand(cmdAPIEnroll)
cmdAPI.AddCommand(cmdAPIResetPassword) cmdAPI.AddCommand(cmdAPIResetPassword)
cmdAPI.AddCommand(cmdAPIRegister) cmdAPI.AddCommand(cmdAPIRegister)

View file

@ -164,15 +164,6 @@ func BanAdd(target string, duration string, reason string, action string) error
return nil return nil
} }
func banFlush() error {
allBa := types.BanApplication{}
records := dbctx.Db.Delete(&allBa)
if records.Error != nil {
return records.Error
}
return nil
}
func NewBanCmds() *cobra.Command { func NewBanCmds() *cobra.Command {
/*TODO : add a remediation type*/ /*TODO : add a remediation type*/
var cmdBan = &cobra.Command{ var cmdBan = &cobra.Command{

View file

@ -16,10 +16,8 @@ import (
/*CliCfg is the cli configuration structure, might be unexported*/ /*CliCfg is the cli configuration structure, might be unexported*/
type cliConfig struct { type cliConfig struct {
configured bool configured bool
simulation bool /*are we in simulation mode*/ configFolder string `yaml:"cliconfig,omitempty"` /*overload ~/.cscli/*/
configFolder string `yaml:"cliconfig,omitempty"` /*overload ~/.cscli/*/ output string /*output is human, json*/
output string /*output is human, json*/
logLevel log.Level /*debug,info,warning,error*/
hubFolder string hubFolder string
InstallFolder string `yaml:"installdir"` /*/etc/crowdsec/*/ InstallFolder string `yaml:"installdir"` /*/etc/crowdsec/*/
BackendPluginFolder string `yaml:"backend"` BackendPluginFolder string `yaml:"backend"`

View file

@ -128,7 +128,7 @@ func downloadMetabaseDB(force bool) error {
metabaseDBSubpath := path.Join(metabaseDbPath, "metabase.db") metabaseDBSubpath := path.Join(metabaseDbPath, "metabase.db")
_, err := os.Stat(metabaseDBSubpath) _, err := os.Stat(metabaseDBSubpath)
if err == nil && force == false { if err == nil && !force {
log.Printf("%s exists, skip.", metabaseDBSubpath) log.Printf("%s exists, skip.", metabaseDBSubpath)
return nil return nil
} }

View file

@ -41,23 +41,6 @@ func InstallItem(name string, obtype string) {
/*iterate of pkg index data*/ /*iterate of pkg index data*/
} }
func InstallScenario(name string) {
InstallItem(name, cwhub.SCENARIOS)
}
func InstallCollection(name string) {
InstallItem(name, cwhub.COLLECTIONS)
}
func InstallParser(name string) {
InstallItem(name, cwhub.PARSERS)
}
func InstallPostoverflow(name string) {
InstallItem(name, cwhub.PARSERS_OVFLW)
}
func NewInstallCmd() *cobra.Command { func NewInstallCmd() *cobra.Command {
/* ---- INSTALL COMMAND */ /* ---- INSTALL COMMAND */

View file

@ -135,5 +135,7 @@ API interaction:
rootCmd.AddCommand(NewDashboardCmd()) rootCmd.AddCommand(NewDashboardCmd())
rootCmd.AddCommand(NewInspectCmd()) rootCmd.AddCommand(NewInspectCmd())
rootCmd.Execute() if err := rootCmd.Execute(); err != nil {
log.Fatalf("While executing root command : %s", err)
}
} }

View file

@ -51,9 +51,9 @@ func UpgradeConfig(ttype string, name string) {
} }
cwhub.HubIdx[ttype][v.Name] = v cwhub.HubIdx[ttype][v.Name] = v
} }
if found == false { if !found {
log.Errorf("Didn't find %s", name) log.Errorf("Didn't find %s", name)
} else if updated == 0 && found == true { } else if updated == 0 && found {
log.Errorf("Nothing to update") log.Errorf("Nothing to update")
} else if updated != 0 { } else if updated != 0 {
log.Infof("Upgraded %d items", updated) log.Infof("Upgraded %d items", updated)
@ -93,14 +93,14 @@ cscli upgrade --force # Overwrite tainted configuration
return nil return nil
}, },
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
if upgrade_all == false && len(args) < 2 { if !upgrade_all && len(args) < 2 {
_ = cmd.Help() _ = cmd.Help()
return return
} }
if err := cwhub.GetHubIdx(); err != nil { if err := cwhub.GetHubIdx(); err != nil {
log.Fatalf("Failed to get Hub index : %v", err) log.Fatalf("Failed to get Hub index : %v", err)
} }
if upgrade_all == true && len(args) == 0 { if upgrade_all && len(args) == 0 {
log.Warningf("Upgrade all : parsers, scenarios, collections.") log.Warningf("Upgrade all : parsers, scenarios, collections.")
UpgradeConfig(cwhub.PARSERS, "") UpgradeConfig(cwhub.PARSERS, "")
UpgradeConfig(cwhub.PARSERS_OVFLW, "") UpgradeConfig(cwhub.PARSERS_OVFLW, "")
@ -127,7 +127,7 @@ cscli upgrade --force # Overwrite tainted configuration
if len(args) == 1 { if len(args) == 1 {
UpgradeConfig(cwhub.PARSERS, args[0]) UpgradeConfig(cwhub.PARSERS, args[0])
//UpgradeConfig(cwhub.PARSERS_OVFLW, "") //UpgradeConfig(cwhub.PARSERS_OVFLW, "")
} else if upgrade_all == true { } else if upgrade_all {
UpgradeConfig(cwhub.PARSERS, "") UpgradeConfig(cwhub.PARSERS, "")
} else { } else {
_ = cmd.Help() _ = cmd.Help()
@ -148,7 +148,7 @@ cscli upgrade --force # Overwrite tainted configuration
} }
if len(args) == 1 { if len(args) == 1 {
UpgradeConfig(cwhub.SCENARIOS, args[0]) UpgradeConfig(cwhub.SCENARIOS, args[0])
} else if upgrade_all == true { } else if upgrade_all {
UpgradeConfig(cwhub.SCENARIOS, "") UpgradeConfig(cwhub.SCENARIOS, "")
} else { } else {
_ = cmd.Help() _ = cmd.Help()
@ -170,7 +170,7 @@ cscli upgrade --force # Overwrite tainted configuration
} }
if len(args) == 1 { if len(args) == 1 {
UpgradeConfig(cwhub.COLLECTIONS, args[0]) UpgradeConfig(cwhub.COLLECTIONS, args[0])
} else if upgrade_all == true { } else if upgrade_all {
UpgradeConfig(cwhub.COLLECTIONS, "") UpgradeConfig(cwhub.COLLECTIONS, "")
} else { } else {
_ = cmd.Help() _ = cmd.Help()
@ -193,7 +193,7 @@ cscli upgrade --force # Overwrite tainted configuration
} }
if len(args) == 1 { if len(args) == 1 {
UpgradeConfig(cwhub.PARSERS_OVFLW, args[0]) UpgradeConfig(cwhub.PARSERS_OVFLW, args[0])
} else if upgrade_all == true { } else if upgrade_all {
UpgradeConfig(cwhub.PARSERS_OVFLW, "") UpgradeConfig(cwhub.PARSERS_OVFLW, "")
} else { } else {
_ = cmd.Help() _ = cmd.Help()

View file

@ -98,7 +98,7 @@ func main() {
log.Infof("Crowdwatch %s", cwversion.VersionStr()) log.Infof("Crowdwatch %s", cwversion.VersionStr())
if cConfig.Prometheus == true { if cConfig.Prometheus {
registerPrometheus() registerPrometheus()
cConfig.Profiling = true cConfig.Profiling = true
} }
@ -117,7 +117,7 @@ func main() {
} }
/*enable profiling*/ /*enable profiling*/
if cConfig.Profiling == true { if cConfig.Profiling {
go runTachymeter(cConfig.HTTPListen) go runTachymeter(cConfig.HTTPListen)
parserCTX.Profiling = true parserCTX.Profiling = true
postOverflowCTX.Profiling = true postOverflowCTX.Profiling = true
@ -233,7 +233,7 @@ func main() {
log.Fatalf("unable to restore buckets : %s", err) log.Fatalf("unable to restore buckets : %s", err)
} }
} }
if cConfig.Profiling == true { if cConfig.Profiling {
//force the profiling in all buckets //force the profiling in all buckets
for holderIndex := range holders { for holderIndex := range holders {
holders[holderIndex].Profiling = true holders[holderIndex].Profiling = true

View file

@ -10,9 +10,10 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"net/http" "net/http"
log "github.com/sirupsen/logrus"
"runtime" "runtime"
) )
@ -67,7 +68,7 @@ var globalBucketPourOk = prometheus.NewCounter(
func dumpMetrics() { func dumpMetrics() {
if cConfig.DumpBuckets == true { if cConfig.DumpBuckets {
log.Infof("!! Dumping buckets state") log.Infof("!! Dumping buckets state")
if err := leaky.DumpBucketsStateAt("buckets_state.json", time.Now(), buckets); err != nil { if err := leaky.DumpBucketsStateAt("buckets_state.json", time.Now(), buckets); err != nil {
log.Fatalf("Failed dumping bucket state : %s", err) log.Fatalf("Failed dumping bucket state : %s", err)
@ -98,7 +99,9 @@ func dumpMetrics() {
log.Infof("Lines never poured : %d (%.2f%%)", linesPouredKO, float64(linesPouredKO)/float64(linesPouredOK)*100.0) log.Infof("Lines never poured : %d (%.2f%%)", linesPouredKO, float64(linesPouredKO)/float64(linesPouredOK)*100.0)
} }
log.Infof("Writting metrics dump to %s", cConfig.WorkingFolder+"/crowdsec.profile") log.Infof("Writting metrics dump to %s", cConfig.WorkingFolder+"/crowdsec.profile")
prometheus.WriteToTextfile(cConfig.WorkingFolder+"/crowdsec.profile", prometheus.DefaultGatherer) if err := prometheus.WriteToTextfile(cConfig.WorkingFolder+"/crowdsec.profile", prometheus.DefaultGatherer); err != nil {
log.Errorf("failed to write metrics to %s : %s", cConfig.WorkingFolder+"/crowdsec.profile", err)
}
} }
} }

View file

@ -26,7 +26,7 @@ LOOP:
if cConfig.Profiling { if cConfig.Profiling {
start = time.Now() start = time.Now()
} }
if event.Process == false { if !event.Process {
if cConfig.Profiling { if cConfig.Profiling {
atomic.AddUint64(&linesReadKO, 1) atomic.AddUint64(&linesReadKO, 1)
} }
@ -43,7 +43,7 @@ LOOP:
log.Errorf("failed parsing : %v\n", error) log.Errorf("failed parsing : %v\n", error)
return errors.New("parsing failed :/") return errors.New("parsing failed :/")
} }
if parsed.Process == false { if !parsed.Process {
if cConfig.Profiling { if cConfig.Profiling {
globalParserHitsKo.With(prometheus.Labels{"source": event.Line.Src}).Inc() globalParserHitsKo.With(prometheus.Labels{"source": event.Line.Src}).Inc()
atomic.AddUint64(&linesParsedKO, 1) atomic.AddUint64(&linesParsedKO, 1)
@ -57,7 +57,7 @@ LOOP:
atomic.AddUint64(&linesParsedOK, 1) atomic.AddUint64(&linesParsedOK, 1)
} }
processCPT++ processCPT++
if parsed.Whitelisted == true { if parsed.Whitelisted {
log.Debugf("event whitelisted, discard") log.Debugf("event whitelisted, discard")
continue continue
} }

View file

@ -37,7 +37,9 @@ LOOP:
log.Warningf("Failed to unmarshal time from event '%s' : %s", parsed.MarshaledTime, err) log.Warningf("Failed to unmarshal time from event '%s' : %s", parsed.MarshaledTime, err)
} else { } else {
log.Warningf("Starting buckets garbage collection ...") log.Warningf("Starting buckets garbage collection ...")
leaky.GarbageCollectBuckets(*z, buckets) if err = leaky.GarbageCollectBuckets(*z, buckets); err != nil {
return fmt.Errorf("failed to start bucket GC : %s", err)
}
} }
} }
} }
@ -57,7 +59,9 @@ LOOP:
if cConfig.Profiling { if cConfig.Profiling {
bucketStat.AddTime(time.Since(start)) bucketStat.AddTime(time.Since(start))
} }
lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime)) if err := lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil {
return fmt.Errorf("failed to unmarshal item : %s", err)
}
} }
} }
log.Infof("Sending signal Bucketify") log.Infof("Sending signal Bucketify")

View file

@ -117,7 +117,7 @@ func serveOneTimeRun(outputRunner outputs.Output) error {
func serve(outputRunner outputs.Output) error { func serve(outputRunner outputs.Output) error {
var err error var err error
if cConfig.Daemonize == true { if cConfig.Daemonize {
if err = serveDaemon(); err != nil { if err = serveDaemon(); err != nil {
return fmt.Errorf(err.Error()) return fmt.Errorf(err.Error())
} }

View file

@ -82,7 +82,7 @@ func (c *Crowdwatch) GetOPT() error {
flag.Parse() flag.Parse()
if *printVersion == true { if *printVersion {
cwversion.Show() cwversion.Show()
os.Exit(0) os.Exit(0)
} }
@ -112,7 +112,7 @@ func (c *Crowdwatch) GetOPT() error {
if *AcquisitionFile != "" { if *AcquisitionFile != "" {
c.AcquisitionFile = *AcquisitionFile c.AcquisitionFile = *AcquisitionFile
} }
if *dumpMode == true { if *dumpMode {
c.DumpBuckets = true c.DumpBuckets = true
} }
if *prometheus { if *prometheus {

View file

@ -187,7 +187,7 @@ func parser_visit(path string, f os.FileInfo, err error) error {
} }
//if it's not a symlink and not in hub, it's a local file, don't bother //if it's not a symlink and not in hub, it's a local file, don't bother
if local == true && inhub == false { if local && !inhub {
log.Debugf("%s is a local file, skip", path) log.Debugf("%s is a local file, skip", path)
skippedLocal++ skippedLocal++
// log.Printf("local scenario, skip.") // log.Printf("local scenario, skip.")
@ -252,8 +252,7 @@ func parser_visit(path string, f os.FileInfo, err error) error {
continue continue
} else { } else {
/*we got an exact match, update struct*/ /*we got an exact match, update struct*/
// log.Printf("got exact match") if !inhub {
if inhub == false {
log.Debugf("found exact match for %s, version is %s, latest is %s", v.Name, version, v.Version) log.Debugf("found exact match for %s, version is %s, latest is %s", v.Name, version, v.Version)
v.LocalPath = path v.LocalPath = path
v.LocalVersion = version v.LocalVersion = version
@ -275,11 +274,11 @@ func parser_visit(path string, f os.FileInfo, err error) error {
} }
} }
if match == false { if !match {
log.Debugf("got tainted match for %s : %s", v.Name, path) log.Debugf("got tainted match for %s : %s", v.Name, path)
skippedTainted += 1 skippedTainted += 1
//the file and the stage is right, but the hash is wrong, it has been tainted by user //the file and the stage is right, but the hash is wrong, it has been tainted by user
if inhub == false { if !inhub {
v.LocalPath = path v.LocalPath = path
v.Installed = true v.Installed = true
} }
@ -321,13 +320,13 @@ func CollecDepsCheck(v *Item) error {
} }
//propagate the state of sub-items to set //propagate the state of sub-items to set
if val.Tainted == true { if val.Tainted {
v.Tainted = true v.Tainted = true
return fmt.Errorf("tainted %s %s, tainted.", ptrtype, p) return fmt.Errorf("tainted %s %s, tainted.", ptrtype, p)
} else if val.Installed == false && v.Installed == true { } else if !val.Installed && v.Installed {
v.Tainted = true v.Tainted = true
return fmt.Errorf("missing %s %s, tainted.", ptrtype, p) return fmt.Errorf("missing %s %s, tainted.", ptrtype, p)
} else if val.UpToDate == false { } else if !val.UpToDate {
v.UpToDate = false v.UpToDate = false
return fmt.Errorf("outdated %s %s", ptrtype, p) return fmt.Errorf("outdated %s %s", ptrtype, p)
} }
@ -553,14 +552,14 @@ func DisableItem(target Item, tdir string, hdir string, purge bool) (Item, error
func EnableItem(target Item, tdir string, hdir string) (Item, error) { func EnableItem(target Item, tdir string, hdir string) (Item, error) {
parent_dir := filepath.Clean(tdir + "/" + target.Type + "/" + target.Stage + "/") parent_dir := filepath.Clean(tdir + "/" + target.Type + "/" + target.Stage + "/")
/*create directories if needed*/ /*create directories if needed*/
if target.Installed == true { if target.Installed {
if target.Tainted == true { if target.Tainted {
return target, fmt.Errorf("%s is tainted, won't enable unless --force", target.Name) return target, fmt.Errorf("%s is tainted, won't enable unless --force", target.Name)
} }
if target.Local == true { if target.Local {
return target, fmt.Errorf("%s is local, won't enable", target.Name) return target, fmt.Errorf("%s is local, won't enable", target.Name)
} }
if target.UpToDate == true { if target.UpToDate {
log.Debugf("%s is installed and up-to-date, skip.", target.Name) log.Debugf("%s is installed and up-to-date, skip.", target.Name)
return target, nil return target, nil
} }
@ -690,7 +689,9 @@ func DownloadItem(target Item, tdir string, overwrite bool) (Item, error) {
return target, err return target, err
} }
h := sha256.New() h := sha256.New()
h.Write([]byte(body)) if _, err := h.Write([]byte(body)); err != nil {
return target, fmt.Errorf("%s : failed to write : %s", target.Name, err)
}
meow := fmt.Sprintf("%x", h.Sum(nil)) meow := fmt.Sprintf("%x", h.Sum(nil))
if meow != target.Versions[target.Version].Digest { if meow != target.Versions[target.Version].Digest {
log.Errorf("Downloaded version doesn't match index, please 'hub update'") log.Errorf("Downloaded version doesn't match index, please 'hub update'")
@ -737,7 +738,7 @@ func ItemStatus(v Item) (string, bool, bool, bool) {
var Ok, Warning, Managed bool var Ok, Warning, Managed bool
var strret string var strret string
if v.Installed == false { if !v.Installed {
strret = "disabled" strret = "disabled"
Ok = false Ok = false
} else { } else {
@ -745,7 +746,7 @@ func ItemStatus(v Item) (string, bool, bool, bool) {
strret = "enabled" strret = "enabled"
} }
if v.Local == true { if v.Local {
Managed = false Managed = false
strret += ",local" strret += ",local"
} else { } else {

View file

@ -134,7 +134,9 @@ func (b *BackendManager) DeleteAll() error {
// Insert the signal for the plugin specified in the config["plugin"] parameter // Insert the signal for the plugin specified in the config["plugin"] parameter
func (b *BackendManager) InsertOnePlugin(sig types.SignalOccurence, pluginName string) error { func (b *BackendManager) InsertOnePlugin(sig types.SignalOccurence, pluginName string) error {
if val, ok := b.backendPlugins[pluginName]; ok { if val, ok := b.backendPlugins[pluginName]; ok {
val.funcs.Insert(sig) if err := val.funcs.Insert(sig); err != nil {
return fmt.Errorf("failed to load %s : %s", pluginName, err)
}
} else { } else {
return fmt.Errorf("plugin '%s' not loaded", pluginName) return fmt.Errorf("plugin '%s' not loaded", pluginName)
} }

View file

@ -135,7 +135,7 @@ func FromFactory(g BucketFactory) *Leaky {
} else { } else {
limiter = rate.NewLimiter(rate.Every(g.leakspeed), g.Capacity) limiter = rate.NewLimiter(rate.Every(g.leakspeed), g.Capacity)
} }
if g.Profiling == true { if g.Profiling {
BucketsInstanciation.With(prometheus.Labels{"name": g.Name}).Inc() BucketsInstanciation.With(prometheus.Labels{"name": g.Name}).Inc()
} }
//create the leaky bucket per se //create the leaky bucket per se
@ -208,7 +208,7 @@ func LeakRoutine(l *Leaky) {
l.logger.Tracef("Pour event: %s", spew.Sdump(msg)) l.logger.Tracef("Pour event: %s", spew.Sdump(msg))
l.logger.Debugf("Pouring event.") l.logger.Debugf("Pouring event.")
if l.Profiling == true { if l.Profiling {
BucketsPour.With(prometheus.Labels{"name": l.Name, "source": msg.Line.Src}).Inc() BucketsPour.With(prometheus.Labels{"name": l.Name, "source": msg.Line.Src}).Inc()
} }
l.Pour(l, msg) // glue for now l.Pour(l, msg) // glue for now
@ -236,7 +236,7 @@ func LeakRoutine(l *Leaky) {
l.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: sig})) l.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: sig}))
mt, _ := l.Ovflw_ts.MarshalText() mt, _ := l.Ovflw_ts.MarshalText()
l.logger.Tracef("overflow time : %s", mt) l.logger.Tracef("overflow time : %s", mt)
if l.Profiling == true { if l.Profiling {
BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc() BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc()
} }
l.AllOut <- types.Event{Overflow: sig, Type: types.OVFLW, MarshaledTime: string(mt)} l.AllOut <- types.Event{Overflow: sig, Type: types.OVFLW, MarshaledTime: string(mt)}
@ -249,7 +249,7 @@ func LeakRoutine(l *Leaky) {
sig := types.SignalOccurence{MapKey: l.Mapkey} sig := types.SignalOccurence{MapKey: l.Mapkey}
if l.timedOverflow { if l.timedOverflow {
if l.Profiling == true { if l.Profiling {
BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc() BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc()
} }
sig = FormatOverflow(l, ofw) sig = FormatOverflow(l, ofw)

View file

@ -82,7 +82,10 @@ func testOneBucket(t *testing.T, dir string) error {
files = append(files, x.Filename) files = append(files, x.Filename)
} }
holders, response, err := LoadBuckets(files) holders, response, err := LoadBuckets(files)
if testFile(t, dir+"/test.yaml", dir+"/in-buckets_state.json", holders, response) == false { if err != nil {
t.Fatalf("failed loading bucket : %s", err)
}
if !testFile(t, dir+"/test.yaml", dir+"/in-buckets_state.json", holders, response) {
t.Fatalf("the test failed") t.Fatalf("the test failed")
} }
return nil return nil
@ -241,7 +244,7 @@ POLL_AGAIN:
//CheckFailed: //CheckFailed:
if valid == true { if valid {
log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx) log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx)
//don't do this at home : delete current element from list and redo //don't do this at home : delete current element from list and redo
results[eidx] = results[len(results)-1] results[eidx] = results[len(results)-1]
@ -252,7 +255,7 @@ POLL_AGAIN:
} }
} }
} }
if valid == false { if !valid {
t.Fatalf("mismatching entries left") t.Fatalf("mismatching entries left")
} else { } else {
log.Warningf("entry valid at end of loop") log.Warningf("entry valid at end of loop")

View file

@ -189,7 +189,7 @@ func LoadBucketDir(dir string) ([]BucketFactory, chan types.Event, error) {
/* Init recursively process yaml files from a directory and loads them as BucketFactory */ /* Init recursively process yaml files from a directory and loads them as BucketFactory */
func LoadBucket(g *BucketFactory) error { func LoadBucket(g *BucketFactory) error {
var err error var err error
if g.Debug == true { if g.Debug {
var clog = logrus.New() var clog = logrus.New()
clog.SetFormatter(&log.TextFormatter{FullTimestamp: true}) clog.SetFormatter(&log.TextFormatter{FullTimestamp: true})
clog.SetLevel(log.DebugLevel) clog.SetLevel(log.DebugLevel)
@ -326,12 +326,12 @@ func LoadBucketsState(file string, buckets *Buckets, holders []BucketFactory) er
tbucket.Total_count = v.Total_count tbucket.Total_count = v.Total_count
buckets.Bucket_map.Store(k, tbucket) buckets.Bucket_map.Store(k, tbucket)
go LeakRoutine(tbucket) go LeakRoutine(tbucket)
_ = <-tbucket.Signal <-tbucket.Signal
found = true found = true
break break
} }
} }
if found == false { if !found {
log.Fatalf("Unable to find holder for bucket %s : %s", k, spew.Sdump(v)) log.Fatalf("Unable to find holder for bucket %s : %s", k, spew.Sdump(v))
} }
} }
@ -530,7 +530,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc
go LeakRoutine(fresh_bucket) go LeakRoutine(fresh_bucket)
log.Debugf("Created new bucket %s", buckey) log.Debugf("Created new bucket %s", buckey)
//wait for signal to be opened //wait for signal to be opened
_ = <-fresh_bucket.Signal <-fresh_bucket.Signal
continue continue
} }

View file

@ -50,7 +50,7 @@ func (u *OverflowFilter) OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, ty
return s, q return s, q
} }
/*filter returned false, event is blackholded*/ /*filter returned false, event is blackholded*/
if element == false { if !element {
l.logger.Infof("Event is discard by overflow filter (%s)", u.Filter) l.logger.Infof("Event is discard by overflow filter (%s)", u.Filter)
return types.SignalOccurence{ return types.SignalOccurence{
MapKey: l.Mapkey, MapKey: l.Mapkey,

View file

@ -35,11 +35,11 @@ func OvflwToOrder(sig types.SignalOccurence, prof types.Profile) (*types.BanOrde
var warn error var warn error
//Identify remediation type //Identify remediation type
if prof.Remediation.Ban == true { if prof.Remediation.Ban {
ordr.MeasureType = "ban" ordr.MeasureType = "ban"
} else if prof.Remediation.Slow == true { } else if prof.Remediation.Slow {
ordr.MeasureType = "slow" ordr.MeasureType = "slow"
} else if prof.Remediation.Captcha == true { } else if prof.Remediation.Captcha {
ordr.MeasureType = "captcha" ordr.MeasureType = "captcha"
} else { } else {
/*if the profil has no remediation, no order */ /*if the profil has no remediation, no order */
@ -120,10 +120,10 @@ func (o *Output) ProcessOutput(sig types.SignalOccurence, profiles []types.Profi
logger.Warningf("failed to run filter : %v", err) logger.Warningf("failed to run filter : %v", err)
continue continue
} }
switch output.(type) { switch out := output.(type) {
case bool: case bool:
/* filter returned false, don't process Node */ /* filter returned false, don't process Node */
if output.(bool) == false { if !out {
logger.Debugf("eval(FALSE) '%s'", profile.Filter) logger.Debugf("eval(FALSE) '%s'", profile.Filter)
continue continue
} }
@ -158,7 +158,9 @@ func (o *Output) ProcessOutput(sig types.SignalOccurence, profiles []types.Profi
// if ApiPush is nil (not specified in profile configuration) we use global api config (from default.yaml) // if ApiPush is nil (not specified in profile configuration) we use global api config (from default.yaml)
if profile.ApiPush == nil { if profile.ApiPush == nil {
if o.API != nil { // if API is not nil, we can push if o.API != nil { // if API is not nil, we can push
o.API.AppendSignal((sig)) if err = o.API.AppendSignal((sig)); err != nil {
return fmt.Errorf("failed to append signal : %s", err)
}
} }
} }
for _, outputConfig := range profile.OutputConfigs { for _, outputConfig := range profile.OutputConfigs {
@ -173,7 +175,9 @@ func (o *Output) ProcessOutput(sig types.SignalOccurence, profiles []types.Profi
continue continue
} }
} }
o.bManager.InsertOnePlugin(sig, pluginName) if err = o.bManager.InsertOnePlugin(sig, pluginName); err != nil {
return fmt.Errorf("failed to insert plugin %s : %s", pluginName, err)
}
} }
} }
} }

View file

@ -92,7 +92,7 @@ func (n *Node) validate(pctx *UnixParserCtx) error {
break break
} }
} }
if method_found == false { if !method_found {
return fmt.Errorf("the method '%s' doesn't exist", static.Method) return fmt.Errorf("the method '%s' doesn't exist", static.Method)
} }
} else { } else {
@ -120,10 +120,10 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
clog.Debugf("Event leaving node : ko") clog.Debugf("Event leaving node : ko")
return false, nil return false, nil
} }
switch output.(type) { switch out := output.(type) {
case bool: case bool:
/* filter returned false, don't process Node */ /* filter returned false, don't process Node */
if output.(bool) == false { if !out {
NodeState = false NodeState = false
clog.Debugf("eval(FALSE) '%s'", n.Filter) clog.Debugf("eval(FALSE) '%s'", n.Filter)
clog.Debugf("Event leaving node : ko") clog.Debugf("Event leaving node : ko")
@ -142,7 +142,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
NodeState = true NodeState = true
} }
if n.Profiling == true && n.Name != "" { if n.Profiling && n.Name != "" {
NodesHits.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc() NodesHits.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
} }
set := false set := false
@ -188,14 +188,14 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
switch output.(type) { switch output.(type) {
case bool: case bool:
/* filter returned false, don't process Node */ /* filter returned false, don't process Node */
if output.(bool) == true { if output.(bool) {
clog.Infof("Event is whitelisted by Expr !") clog.Infof("Event is whitelisted by Expr !")
p.Whitelisted = true p.Whitelisted = true
set = true set = true
} }
} }
} }
if set == true { if set {
p.WhiteListReason = n.Whitelist.Reason p.WhiteListReason = n.Whitelist.Reason
/*huglily wipe the ban order if the event is whitelisted and it's an overflow */ /*huglily wipe the ban order if the event is whitelisted and it's an overflow */
if p.Type == types.OVFLW { /*don't do this at home kids */ if p.Type == types.OVFLW { /*don't do this at home kids */
@ -217,7 +217,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
return false, err return false, err
} }
clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess) clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess)
if ret == true { if ret {
NodeState = true NodeState = true
/* if chil is successful, stop processing */ /* if chil is successful, stop processing */
if n.OnSuccess == "next_stage" { if n.OnSuccess == "next_stage" {
@ -280,15 +280,15 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
} }
//grok or leafs failed, don't process statics //grok or leafs failed, don't process statics
if NodeState == false { if !NodeState {
if n.Profiling == true && n.Name != "" { if n.Profiling && n.Name != "" {
NodesHitsKo.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc() NodesHitsKo.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
} }
clog.Debugf("Event leaving node : ko") clog.Debugf("Event leaving node : ko")
return NodeState, nil return NodeState, nil
} }
if n.Profiling == true && n.Name != "" { if n.Profiling && n.Name != "" {
NodesHitsOk.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc() NodesHitsOk.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
} }
if len(n.Statics) > 0 { if len(n.Statics) > 0 {
@ -302,7 +302,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
clog.Tracef("! No node statics") clog.Tracef("! No node statics")
} }
if NodeState == true { if NodeState {
clog.Debugf("Event leaving node : ok") clog.Debugf("Event leaving node : ok")
log.Tracef("node is successful, check strategy") log.Tracef("node is successful, check strategy")
if n.OnSuccess == "next_stage" { if n.OnSuccess == "next_stage" {
@ -336,7 +336,7 @@ func (n *Node) compile(pctx *UnixParserCtx) error {
log.Debugf("compile, node is %s", n.Stage) log.Debugf("compile, node is %s", n.Stage)
/* if the node has debugging enabled, create a specific logger with debug /* if the node has debugging enabled, create a specific logger with debug
that will be used only for processing this node ;) */ that will be used only for processing this node ;) */
if n.Debug == true { if n.Debug {
var clog = logrus.New() var clog = logrus.New()
clog.SetLevel(log.DebugLevel) clog.SetLevel(log.DebugLevel)
n.logger = clog.WithFields(log.Fields{ n.logger = clog.WithFields(log.Fields{
@ -414,10 +414,10 @@ func (n *Node) compile(pctx *UnixParserCtx) error {
if len(n.SuccessNodes) > 0 { if len(n.SuccessNodes) > 0 {
for idx, _ := range n.SuccessNodes { for idx, _ := range n.SuccessNodes {
/*propagate debug/stats to child nodes*/ /*propagate debug/stats to child nodes*/
if n.SuccessNodes[idx].Debug == false && n.Debug == true { if !n.SuccessNodes[idx].Debug && n.Debug {
n.SuccessNodes[idx].Debug = true n.SuccessNodes[idx].Debug = true
} }
if n.SuccessNodes[idx].Profiling == false && n.Profiling == true { if !n.SuccessNodes[idx].Profiling && n.Profiling {
n.SuccessNodes[idx].Profiling = true n.SuccessNodes[idx].Profiling = true
} }
n.SuccessNodes[idx].Stage = n.Stage n.SuccessNodes[idx].Stage = n.Stage
@ -468,7 +468,7 @@ func (n *Node) compile(pctx *UnixParserCtx) error {
valid = true valid = true
} }
if valid == false { if !valid {
/* node is empty, error force return */ /* node is empty, error force return */
n.logger.Infof("Node is empty: %s", spew.Sdump(n)) n.logger.Infof("Node is empty: %s", spew.Sdump(n))
n.Stage = "" n.Stage = ""

View file

@ -48,7 +48,7 @@ func testOneParser(t *testing.T, dir string) error {
var p UnixParser var p UnixParser
var pctx *UnixParserCtx var pctx *UnixParserCtx
var err error var err error
var pnodes []Node = make([]Node, 0) var pnodes []Node
log.SetLevel(log.DebugLevel) log.SetLevel(log.DebugLevel)
@ -64,7 +64,7 @@ func testOneParser(t *testing.T, dir string) error {
//Init the enricher //Init the enricher
pplugins, err := Loadplugin(datadir) pplugins, err := Loadplugin(datadir)
if err != nil { if err != nil {
return fmt.Errorf("Failed to load plugin geoip : %v", err) return fmt.Errorf("failed to load plugin geoip : %v", err)
} }
ECTX = append(ECTX, pplugins) ECTX = append(ECTX, pplugins)
log.Debugf("Geoip ctx : %v", ECTX) log.Debugf("Geoip ctx : %v", ECTX)
@ -92,7 +92,7 @@ func testOneParser(t *testing.T, dir string) error {
pnodes, err = LoadStages(parser_configs, pctx) pnodes, err = LoadStages(parser_configs, pctx)
if err != nil { if err != nil {
return fmt.Errorf("Unable to load parser config : %s", err) return fmt.Errorf("unable to load parser config : %s", err)
} }
//TBD: Load post overflows //TBD: Load post overflows
@ -238,7 +238,7 @@ func testFile(t *testing.T, file string, pctx UnixParserCtx, nodes []Node) bool
CheckFailed: CheckFailed:
if valid == true { if valid {
//log.Infof("Found result [%s], skip", spew.Sdump(tf.Results[ridx])) //log.Infof("Found result [%s], skip", spew.Sdump(tf.Results[ridx]))
log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx) log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx)
//don't do this at home : delete current element from list and redo //don't do this at home : delete current element from list and redo

View file

@ -170,7 +170,7 @@ func ProcessStatics(statics []types.ExtraField, p *types.Event, clog *logrus.Ent
clog.Warningf("method '%s' doesn't exist", static.Method) clog.Warningf("method '%s' doesn't exist", static.Method)
} }
} }
if processed == false { if !processed {
clog.Warningf("method '%s' doesn't exist", static.Method) clog.Warningf("method '%s' doesn't exist", static.Method)
} }
} else if static.Parsed != "" { } else if static.Parsed != "" {
@ -278,7 +278,7 @@ func /*(u types.UnixParser)*/ Parse(ctx UnixParserCtx, xp types.Event, nodes []N
continue continue
} }
clog.Tracef("Processing node %d/%d -> %s", idx, len(nodes), node.rn) clog.Tracef("Processing node %d/%d -> %s", idx, len(nodes), node.rn)
if ctx.Profiling == true { if ctx.Profiling {
node.Profiling = true node.Profiling = true
} }
ret, err := node.process(&event, ctx) ret, err := node.process(&event, ctx)
@ -286,10 +286,10 @@ func /*(u types.UnixParser)*/ Parse(ctx UnixParserCtx, xp types.Event, nodes []N
clog.Fatalf("Error while processing node : %v", err) clog.Fatalf("Error while processing node : %v", err)
} }
clog.Tracef("node (%s) ret : %v", node.rn, ret) clog.Tracef("node (%s) ret : %v", node.rn, ret)
if ret == true { if ret {
isStageOK = true isStageOK = true
} }
if ret == true && node.OnSuccess == "next_stage" { if ret && node.OnSuccess == "next_stage" {
clog.Debugf("node successful, stop end stage %s", stage) clog.Debugf("node successful, stop end stage %s", stage)
break break
} }
@ -299,7 +299,7 @@ func /*(u types.UnixParser)*/ Parse(ctx UnixParserCtx, xp types.Event, nodes []N
break break
} }
} }
if isStageOK == false { if !isStageOK {
log.Debugf("Log didn't finish stage %s", event.Stage) log.Debugf("Log didn't finish stage %s", event.Stage)
event.Process = false event.Process = false
return event, nil return event, nil

View file

@ -472,6 +472,8 @@ func BenchmarkWaitNNoDelay(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
lim.WaitN(ctx, 1) if err := lim.WaitN(ctx, 1); err != nil {
b.Errorf("failed limiter : %s", err)
}
} }
} }