light pkg/parser cleanup (#2279)

* pkg/parser: clean up imports
* remove duplicate import
* simplify boolean expression
* don't check length before range
* if..else if.. -> switch/case
* errors.Wrap -> fmt.Errorf
* typo, lint
* redundant break
This commit is contained in:
mmetc 2023-06-13 13:16:13 +02:00 committed by GitHub
parent 76429f033a
commit b9a3acb03f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 108 additions and 115 deletions

View file

@ -1,11 +1,12 @@
package parser
import (
"github.com/crowdsecurity/crowdsec/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
/* should be part of a packaged shared with enrich/geoip.go */
/* should be part of a package shared with enrich/geoip.go */
type EnrichFunc func(string, *types.Event, interface{}, *log.Entry) (map[string]string, error)
type InitFunc func(map[string]string) (interface{}, error)

View file

@ -3,9 +3,10 @@ package parser
import (
"time"
log "github.com/sirupsen/logrus"
expr "github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/types"
log "github.com/sirupsen/logrus"
)
func parseDateWithFormat(date, format string) (string, time.Time) {

View file

@ -3,9 +3,9 @@ package parser
import (
"net"
"github.com/crowdsecurity/crowdsec/pkg/types"
log "github.com/sirupsen/logrus"
//"github.com/crowdsecurity/crowdsec/pkg/parser"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
/* All plugins must export a list of function pointers for exported symbols */

View file

@ -5,11 +5,11 @@ import (
"net"
"strconv"
"github.com/crowdsecurity/crowdsec/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/oschwald/geoip2-golang"
"github.com/oschwald/maxminddb-golang"
log "github.com/sirupsen/logrus"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
func IpToRange(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) {

View file

@ -3,8 +3,9 @@ package parser
import (
"encoding/json"
"github.com/crowdsecurity/crowdsec/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
func unmarshalJSON(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) {

View file

@ -4,6 +4,7 @@ import (
"time"
"github.com/antonmedv/expr/vm"
"github.com/crowdsecurity/grokky"
)

View file

@ -1,24 +1,24 @@
package parser
import (
"errors"
"fmt"
"net"
"strings"
"time"
"github.com/antonmedv/expr"
"github.com/crowdsecurity/grokky"
"github.com/pkg/errors"
"github.com/antonmedv/expr/vm"
"github.com/davecgh/go-spew/spew"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
yaml "gopkg.in/yaml.v2"
"github.com/antonmedv/expr/vm"
"github.com/crowdsecurity/grokky"
"github.com/crowdsecurity/crowdsec/pkg/cache"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/davecgh/go-spew/spew"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
)
type Node struct {
@ -356,29 +356,27 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
}
//Iterate on leafs
if len(n.LeavesNodes) > 0 {
for _, leaf := range n.LeavesNodes {
ret, err := leaf.process(p, ctx, cachedExprEnv)
if err != nil {
clog.Tracef("\tNode (%s) failed : %v", leaf.rn, err)
clog.Debugf("Event leaving node : ko")
return false, err
}
clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess)
if ret {
NodeState = true
/* if child is successful, stop processing */
if n.OnSuccess == "next_stage" {
clog.Debugf("child is success, OnSuccess=next_stage, skip")
break
}
} else if !NodeHasOKGrok {
/*
If the parent node has a successful grok pattern, it's state will stay successful even if one or more chil fails.
If the parent node is a skeleton node (no grok pattern), then at least one child must be successful for it to be a success.
*/
NodeState = false
for _, leaf := range n.LeavesNodes {
ret, err := leaf.process(p, ctx, cachedExprEnv)
if err != nil {
clog.Tracef("\tNode (%s) failed : %v", leaf.rn, err)
clog.Debugf("Event leaving node : ko")
return false, err
}
clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess)
if ret {
NodeState = true
/* if child is successful, stop processing */
if n.OnSuccess == "next_stage" {
clog.Debugf("child is success, OnSuccess=next_stage, skip")
break
}
} else if !NodeHasOKGrok {
/*
If the parent node has a successful grok pattern, it's state will stay successful even if one or more chil fails.
If the parent node is a skeleton node (no grok pattern), then at least one child must be successful for it to be a success.
*/
NodeState = false
}
}
/*todo : check if a node made the state change ?*/
@ -398,10 +396,11 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
if n.Name != "" {
NodesHitsOk.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
}
/*
Please kill me. this is to apply statics when the node *has* whitelists that successfully matched the node.
This is to apply statics when the node *has* whitelists that successfully matched the node.
*/
if hasWhitelist && isWhitelisted && len(n.Statics) > 0 || len(n.Statics) > 0 && !hasWhitelist {
if len(n.Statics) > 0 && (isWhitelisted || !hasWhitelist) {
clog.Debugf("+ Processing %d statics", len(n.Statics))
// if all else is good in whitelist, process node's statics
err := n.ProcessStatics(n.Statics, p)
@ -449,8 +448,8 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
/* if the node has debugging enabled, create a specific logger with debug
that will be used only for processing this node ;) */
if n.Debug {
var clog = logrus.New()
if err := types.ConfigureLogger(clog); err != nil {
var clog = log.New()
if err = types.ConfigureLogger(clog); err != nil {
log.Fatalf("While creating bucket-specific logger : %s", err)
}
clog.SetLevel(log.DebugLevel)
@ -489,7 +488,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
/* handle pattern_syntax and groks */
for _, pattern := range n.SubGroks {
n.Logger.Tracef("Adding subpattern '%s' : '%s'", pattern.Key, pattern.Value)
if err := pctx.Grok.Add(pattern.Key.(string), pattern.Value.(string)); err != nil {
if err = pctx.Grok.Add(pattern.Key.(string), pattern.Value.(string)); err != nil {
if errors.Is(err, grokky.ErrAlreadyExist) {
n.Logger.Warningf("grok '%s' already registred", pattern.Key)
continue
@ -532,20 +531,18 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
n.Grok.RunTimeValue, err = expr.Compile(n.Grok.ExpValue,
exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
if err != nil {
return errors.Wrap(err, "while compiling grok's expression")
return fmt.Errorf("while compiling grok's expression: %w", err)
}
}
/* load grok statics */
if len(n.Grok.Statics) > 0 {
//compile expr statics if present
for idx := range n.Grok.Statics {
if n.Grok.Statics[idx].ExpValue != "" {
n.Grok.Statics[idx].RunTimeValue, err = expr.Compile(n.Grok.Statics[idx].ExpValue,
exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
if err != nil {
return err
}
//compile expr statics if present
for idx := range n.Grok.Statics {
if n.Grok.Statics[idx].ExpValue != "" {
n.Grok.Statics[idx].RunTimeValue, err = expr.Compile(n.Grok.Statics[idx].ExpValue,
exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
if err != nil {
return err
}
}
valid = true
@ -556,54 +553,53 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
n.Stash[i].ValueExpression, err = expr.Compile(stash.Value,
exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
if err != nil {
return errors.Wrap(err, "while compiling stash value expression")
return fmt.Errorf("while compiling stash value expression: %w", err)
}
n.Stash[i].KeyExpression, err = expr.Compile(stash.Key,
exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
if err != nil {
return errors.Wrap(err, "while compiling stash key expression")
return fmt.Errorf("while compiling stash key expression: %w", err)
}
n.Stash[i].TTLVal, err = time.ParseDuration(stash.TTL)
if err != nil {
return errors.Wrap(err, "while parsing stash ttl")
return fmt.Errorf("while parsing stash ttl: %w", err)
}
logLvl := n.Logger.Logger.GetLevel()
//init the cache, does it make sense to create it here just to be sure everything is fine ?
if err := cache.CacheInit(cache.CacheCfg{
if err = cache.CacheInit(cache.CacheCfg{
Size: n.Stash[i].MaxMapSize,
TTL: n.Stash[i].TTLVal,
Name: n.Stash[i].Name,
Strategy: n.Stash[i].Strategy,
LogLevel: &logLvl,
}); err != nil {
return errors.Wrap(err, "while initializing cache")
return fmt.Errorf("while initializing cache: %w", err)
}
}
/* compile leafs if present */
if len(n.LeavesNodes) > 0 {
for idx := range n.LeavesNodes {
if n.LeavesNodes[idx].Name == "" {
n.LeavesNodes[idx].Name = fmt.Sprintf("child-%s", n.Name)
}
/*propagate debug/stats to child nodes*/
if !n.LeavesNodes[idx].Debug && n.Debug {
n.LeavesNodes[idx].Debug = true
}
if !n.LeavesNodes[idx].Profiling && n.Profiling {
n.LeavesNodes[idx].Profiling = true
}
n.LeavesNodes[idx].Stage = n.Stage
err = n.LeavesNodes[idx].compile(pctx, ectx)
if err != nil {
return err
}
for idx := range n.LeavesNodes {
if n.LeavesNodes[idx].Name == "" {
n.LeavesNodes[idx].Name = fmt.Sprintf("child-%s", n.Name)
}
/*propagate debug/stats to child nodes*/
if !n.LeavesNodes[idx].Debug && n.Debug {
n.LeavesNodes[idx].Debug = true
}
if !n.LeavesNodes[idx].Profiling && n.Profiling {
n.LeavesNodes[idx].Profiling = true
}
n.LeavesNodes[idx].Stage = n.Stage
err = n.LeavesNodes[idx].compile(pctx, ectx)
if err != nil {
return err
}
valid = true
}
/* load statics if present */
for idx := range n.Statics {
if n.Statics[idx].ExpValue != "" {
@ -622,6 +618,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
n.Logger.Debugf("adding ip %s to whitelists", net.ParseIP(v))
valid = true
}
for _, v := range n.Whitelist.Cidrs {
_, tnet, err := net.ParseCIDR(v)
if err != nil {
@ -631,6 +628,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
n.Logger.Debugf("adding cidr %s to whitelists", tnet)
valid = true
}
for _, filter := range n.Whitelist.Exprs {
expression := &ExprWhitelist{}
expression.Filter, err = expr.Compile(filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
@ -656,5 +654,6 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
if err := n.validate(pctx, ectx); err != nil {
return err
}
return nil
}

View file

@ -63,6 +63,5 @@ func TestParserConfigs(t *testing.T) {
if CfgTests[idx].Valid == false && err == nil {
t.Fatalf("Valid: (%d/%d) expected error", idx+1, len(CfgTests))
}
}
}

View file

@ -11,11 +11,12 @@ import (
"strings"
"testing"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/davecgh/go-spew/spew"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
type TestFile struct {
@ -54,7 +55,6 @@ func TestParser(t *testing.T) {
}
}
}
}
func BenchmarkParser(t *testing.B) {
@ -90,7 +90,6 @@ func BenchmarkParser(t *testing.B) {
}
func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing.B) error {
var (
err error
pnodes []Node
@ -112,7 +111,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing
if err != nil {
panic(err)
}
if err := yaml.UnmarshalStrict(out.Bytes(), &parser_configs); err != nil {
if err = yaml.UnmarshalStrict(out.Bytes(), &parser_configs); err != nil {
return fmt.Errorf("failed unmarshaling %s : %s", parser_cfg_file, err)
}
@ -399,7 +398,7 @@ func TestGeneratePatternsDoc(t *testing.T) {
t.Fatal("failed to write to file")
}
for _, k := range p {
if _, err := f.WriteString(fmt.Sprintf("## %s\n\nPattern :\n```\n%s\n```\n\n", k.Key, k.Value)); err != nil {
if _, err := fmt.Fprintf(f, "## %s\n\nPattern :\n```\n%s\n```\n\n", k.Key, k.Value); err != nil {
t.Fatal("failed to write to file")
}
fmt.Printf("%v\t%v\n", k.Key, k.Value)
@ -414,5 +413,4 @@ func TestGeneratePatternsDoc(t *testing.T) {
t.Fatal("failed to write to file")
}
f.Close()
}

View file

@ -9,24 +9,21 @@ import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/crowdsecurity/crowdsec/pkg/types"
"strconv"
"github.com/antonmedv/expr"
"github.com/mohae/deepcopy"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"github.com/antonmedv/expr"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
/* ok, this is kinda experimental, I don't know how bad of an idea it is .. */
func SetTargetByName(target string, value string, evt *types.Event) bool {
if evt == nil {
return false
}
@ -71,8 +68,6 @@ func SetTargetByName(target string, value string, evt *types.Event) bool {
tmp = reflect.Indirect(tmp)
}
iter = tmp
//nolint: gosimple
break
case reflect.Ptr:
tmp := iter.Elem()
iter = reflect.Indirect(tmp.FieldByName(f))
@ -95,18 +90,18 @@ func SetTargetByName(target string, value string, evt *types.Event) bool {
}
func printStaticTarget(static ExtraField) string {
if static.Method != "" {
switch {
case static.Method != "":
return static.Method
} else if static.Parsed != "" {
case static.Parsed != "":
return fmt.Sprintf(".Parsed[%s]", static.Parsed)
} else if static.Meta != "" {
case static.Meta != "":
return fmt.Sprintf(".Meta[%s]", static.Meta)
} else if static.Enriched != "" {
case static.Enriched != "":
return fmt.Sprintf(".Enriched[%s]", static.Enriched)
} else if static.TargetByName != "" {
case static.TargetByName != "":
return static.TargetByName
} else {
default:
return "?"
}
}
@ -195,7 +190,6 @@ func (n *Node) ProcessStatics(statics []ExtraField, event *types.Event) error {
} else {
clog.Fatal("unable to process static : unknown target")
}
}
return nil
}
@ -355,10 +349,8 @@ func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error)
event.Process = false
return event, nil
}
}
event.Process = true
return event, nil
}

View file

@ -109,17 +109,16 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx, ectx EnricherCtx) (
continue
}
if len(node.Data) > 0 {
for _, data := range node.Data {
err = exprhelpers.FileInit(pctx.DataFolder, data.DestPath, data.Type)
if err != nil {
log.Error(err)
}
if data.Type == "regexp" { //cache only makes sense for regexp
exprhelpers.RegexpCacheInit(data.DestPath, *data)
}
for _, data := range node.Data {
err = exprhelpers.FileInit(pctx.DataFolder, data.DestPath, data.Type)
if err != nil {
log.Error(err)
}
if data.Type == "regexp" { //cache only makes sense for regexp
exprhelpers.RegexpCacheInit(data.DestPath, *data)
}
}
nodes = append(nodes, node)
nodesCount++
}

View file

@ -7,12 +7,13 @@ import (
"sort"
"strings"
log "github.com/sirupsen/logrus"
"github.com/crowdsecurity/grokky"
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/cwhub"
"github.com/crowdsecurity/crowdsec/pkg/fflag"
"github.com/crowdsecurity/grokky"
log "github.com/sirupsen/logrus"
)
type UnixParserCtx struct {
@ -117,7 +118,7 @@ func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) {
parsers.EnricherCtx, err = Loadplugin(cConfig.Crowdsec.DataDir)
if err != nil {
return parsers, fmt.Errorf("Failed to load enrich plugin : %v", err)
return parsers, fmt.Errorf("failed to load enrich plugin : %v", err)
}
/*
@ -135,8 +136,8 @@ func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) {
log.Infof("Loading postoverflow parsers")
parsers.Povfwnodes, err = LoadStages(parsers.PovfwStageFiles, parsers.Povfwctx, parsers.EnricherCtx)
} else {
parsers.Povfwnodes = []Node{}
log.Infof("No postoverflow parsers to load")
parsers.Povfwnodes = []Node{}
}
if err != nil {

View file

@ -4,6 +4,7 @@ import (
"net"
"github.com/antonmedv/expr/vm"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
)