Add debug expr (#168)
* debug expr in node.go * update documentation Co-authored-by: AlteredCoder <AlteredCoder> Co-authored-by: Thibault bui Koechlin <thibault@crowdsec.net>
This commit is contained in:
parent
085dcc5eb6
commit
d8f0f5a3a9
|
@ -3,7 +3,7 @@
|
||||||
The output mechanism is composed of plugins. In order to store the bans for {{blockers.htmlname}}, at least one backend plugin must be loaded. Else, bans will not be stored and decisions cannot be applied.
|
The output mechanism is composed of plugins. In order to store the bans for {{blockers.htmlname}}, at least one backend plugin must be loaded. Else, bans will not be stored and decisions cannot be applied.
|
||||||
|
|
||||||
|
|
||||||
Currently the supported {{backend.name}} are SQLite (default) and MySQL.
|
Currently the supported backends are SQLite (default) and MySQL.
|
||||||
|
|
||||||
In order to filter which signals will be sent to which plugin, {{crowdsec.name}} use a system of `profile` that can allow to granularly process your bans and signals.
|
In order to filter which signals will be sent to which plugin, {{crowdsec.name}} use a system of `profile` that can allow to granularly process your bans and signals.
|
||||||
|
|
||||||
|
|
|
@ -142,7 +142,37 @@ debug: true|false
|
||||||
_default: false_
|
_default: false_
|
||||||
|
|
||||||
If set to to `true`, enabled node level debugging.
|
If set to to `true`, enabled node level debugging.
|
||||||
It is meant to help understanding parser node behaviour by providing contextual logging.
|
It is meant to help understanding parser node behavior by providing contextual logging :
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>assignments made by statics</summary>
|
||||||
|
```
|
||||||
|
DEBU[31-07-2020 16:36:28] + Processing 4 statics id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
DEBU[31-07-2020 16:36:28] .Meta[service] = 'http' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
DEBU[31-07-2020 16:36:28] .Meta[source_ip] = '127.0.0.1' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
DEBU[31-07-2020 16:36:28] .Meta[http_status] = '200' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
DEBU[31-07-2020 16:36:28] .Meta[http_path] = '/' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
```
|
||||||
|
</details>
|
||||||
|
<details>
|
||||||
|
<summary>assignments made by grok pattern</summary>
|
||||||
|
```
|
||||||
|
DEBU[31-07-2020 16:36:28] + Grok 'NGINXACCESS' returned 10 entries to merge in Parsed id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
DEBU[31-07-2020 16:36:28] .Parsed['time_local'] = '21/Jul/2020:16:13:05 +0200' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
DEBU[31-07-2020 16:36:28] .Parsed['method'] = 'GET' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
DEBU[31-07-2020 16:36:28] .Parsed['request'] = '/' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
DEBU[31-07-2020 16:36:28] .Parsed['http_user_agent'] = 'curl/7.58.0' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
DEBU[31-07-2020 16:36:28] .Parsed['remote_addr'] = '127.0.0.1' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
```
|
||||||
|
</details>
|
||||||
|
<details>
|
||||||
|
<summary>debug of filters and expression results</summary>
|
||||||
|
```
|
||||||
|
DEBU[31-07-2020 16:36:28] eval(evt.Parsed.program == 'nginx') = TRUE id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
DEBU[31-07-2020 16:36:28] eval variables: id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
DEBU[31-07-2020 16:36:28] evt.Parsed.program = 'nginx' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
|
||||||
|
```
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
### filter
|
### filter
|
||||||
|
|
|
@ -311,7 +311,16 @@ _default: false_
|
||||||
|
|
||||||
|
|
||||||
If set to to `true`, enabled scenario level debugging.
|
If set to to `true`, enabled scenario level debugging.
|
||||||
It is meant to help understanding scenario behavior by providing contextual logging.
|
It is meant to help understanding scenario behavior by providing contextual logging :
|
||||||
|
|
||||||
|
<summary>debug of filters and expression results</summary>
|
||||||
|
```
|
||||||
|
DEBU[31-07-2020 16:34:58] eval(evt.Meta.log_type in ["http_access-log", "http_error-log"] && any(File("bad_user_agents.txt"), {evt.Parsed.http_user_agent contains #})) = TRUE cfg=still-feather file=config/scenarios/http-bad-user-agent.yaml name=crowdsecurity/http-bad-user-agent
|
||||||
|
DEBU[31-07-2020 16:34:58] eval variables: cfg=still-feather file=config/scenarios/http-bad-user-agent.yaml name=crowdsecurity/http-bad-user-agent
|
||||||
|
DEBU[31-07-2020 16:34:58] evt.Meta.log_type = 'http_access-log' cfg=still-feather file=config/scenarios/http-bad-user-agent.yaml name=crowdsecurity/http-bad-user-agent
|
||||||
|
DEBU[31-07-2020 16:34:58] evt.Parsed.http_user_agent = 'Mozilla/5.00 (Nikto/2.1.5) (Evasions:None) (Test:002810)' cfg=still-feather file=config/scenarios/http-bad-user-agent.yaml name=crowdsecurity/http-bad-user-agent
|
||||||
|
```
|
||||||
|
</details>
|
||||||
|
|
||||||
### reprocess
|
### reprocess
|
||||||
|
|
||||||
|
|
120
pkg/exprhelpers/visitor.go
Normal file
120
pkg/exprhelpers/visitor.go
Normal file
|
@ -0,0 +1,120 @@
|
||||||
|
package exprhelpers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/antonmedv/expr/parser"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/antonmedv/expr"
|
||||||
|
"github.com/antonmedv/expr/ast"
|
||||||
|
"github.com/antonmedv/expr/vm"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Visitor is used to reconstruct variables with its property called in an expr filter
|
||||||
|
Thus, we can debug expr filter by displaying all variables contents present in the filter
|
||||||
|
*/
|
||||||
|
type visitor struct {
|
||||||
|
newVar bool
|
||||||
|
currentID string
|
||||||
|
properties []string
|
||||||
|
vars []string
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Enter should be present for the interface but is never used
|
||||||
|
*/
|
||||||
|
func (v *visitor) Enter(node *ast.Node) {}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Exit is called when running ast.Walk(node, visitor), each time a node exit.
|
||||||
|
So we have the node information and we can get the identifier (first level of the struct)
|
||||||
|
and its properties to reconstruct the complete variable.
|
||||||
|
*/
|
||||||
|
func (v *visitor) Exit(node *ast.Node) {
|
||||||
|
if n, ok := (*node).(*ast.IdentifierNode); ok {
|
||||||
|
if !v.newVar {
|
||||||
|
v.newVar = true
|
||||||
|
v.currentID = n.Value
|
||||||
|
} else {
|
||||||
|
v.newVar = false
|
||||||
|
fullVar := fmt.Sprintf("%s.%s", v.currentID, strings.Join(v.properties, "."))
|
||||||
|
v.vars = append(v.vars, fullVar)
|
||||||
|
v.properties = []string{}
|
||||||
|
v.currentID = n.Value
|
||||||
|
}
|
||||||
|
} else if n, ok := (*node).(*ast.PropertyNode); ok {
|
||||||
|
v.properties = append(v.properties, n.Property)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Build reconstruct all the variables used in a filter (to display their content later).
|
||||||
|
*/
|
||||||
|
func (v *visitor) Build(filter string, exprEnv expr.Option) (*ExprDebugger, error) {
|
||||||
|
var expressions []*expression
|
||||||
|
ret := &ExprDebugger{
|
||||||
|
filter: filter,
|
||||||
|
}
|
||||||
|
v.newVar = false
|
||||||
|
tree, err := parser.Parse(filter)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ast.Walk(&tree.Node, v)
|
||||||
|
fullVar := fmt.Sprintf("%s.%s", v.currentID, strings.Join(v.properties, "."))
|
||||||
|
v.vars = append(v.vars, fullVar)
|
||||||
|
v.properties = []string{}
|
||||||
|
v.currentID = ""
|
||||||
|
for _, variable := range v.vars {
|
||||||
|
debugFilter, err := expr.Compile(variable, exprEnv)
|
||||||
|
tmpExpression := &expression{
|
||||||
|
variable,
|
||||||
|
debugFilter,
|
||||||
|
}
|
||||||
|
expressions = append(expressions, tmpExpression)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("compilation of variable '%s' failed: %v", variable, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ret.expression = expressions
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExprDebugger contains the list of expression to be run when debugging an expression filter
|
||||||
|
type ExprDebugger struct {
|
||||||
|
filter string
|
||||||
|
expression []*expression
|
||||||
|
}
|
||||||
|
|
||||||
|
// expression is the structure that represents the variable in string and compiled format
|
||||||
|
type expression struct {
|
||||||
|
Str string
|
||||||
|
Compiled *vm.Program
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Run display the content of each variable of a filter by evaluating them with expr,
|
||||||
|
again the expr environment given in parameter
|
||||||
|
*/
|
||||||
|
func (e *ExprDebugger) Run(logger *logrus.Entry, filterResult bool, exprEnv map[string]interface{}) {
|
||||||
|
logger.Debugf("eval(%s) = %s", e.filter, strings.ToUpper(strconv.FormatBool(filterResult)))
|
||||||
|
logger.Debugf("eval variables:")
|
||||||
|
for _, expression := range e.expression {
|
||||||
|
debug, err := expr.Run(expression.Compiled, exprEnv)
|
||||||
|
if err != nil {
|
||||||
|
logger.Errorf("unable to print debug expression for '%s': %s", expression.Str, err)
|
||||||
|
}
|
||||||
|
logger.Debugf(" %s = '%s'", expression.Str, debug)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDebugger is the exported function that build the debuggers expressions
|
||||||
|
func NewDebugger(filter string, exprEnv expr.Option) (*ExprDebugger, error) {
|
||||||
|
visitor := &visitor{}
|
||||||
|
exprDebugger, err := visitor.Build(filter, exprEnv)
|
||||||
|
return exprDebugger, err
|
||||||
|
}
|
|
@ -31,36 +31,37 @@ import (
|
||||||
// BucketFactory struct holds all fields for any bucket configuration. This is to have a
|
// BucketFactory struct holds all fields for any bucket configuration. This is to have a
|
||||||
// generic struct for buckets. This can be seen as a bucket factory.
|
// generic struct for buckets. This can be seen as a bucket factory.
|
||||||
type BucketFactory struct {
|
type BucketFactory struct {
|
||||||
FormatVersion string `yaml:"format"`
|
FormatVersion string `yaml:"format"`
|
||||||
Author string `yaml:"author"`
|
Author string `yaml:"author"`
|
||||||
Description string `yaml:"description"`
|
Description string `yaml:"description"`
|
||||||
References []string `yaml:"references"`
|
References []string `yaml:"references"`
|
||||||
Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics
|
Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics
|
||||||
Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique
|
Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique
|
||||||
Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity
|
Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity
|
||||||
LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket
|
LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket
|
||||||
Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time
|
Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time
|
||||||
Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct
|
Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct
|
||||||
GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip
|
GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip
|
||||||
Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on uniq_filter expr result)
|
Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on uniq_filter expr result)
|
||||||
Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically
|
Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically
|
||||||
Labels map[string]string `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow
|
Labels map[string]string `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow
|
||||||
Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration
|
Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration
|
||||||
logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well)
|
logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well)
|
||||||
Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain
|
Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain
|
||||||
CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket
|
CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket
|
||||||
Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc.
|
Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc.
|
||||||
OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through
|
OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through
|
||||||
BucketName string `yaml:"-"`
|
BucketName string `yaml:"-"`
|
||||||
Filename string `yaml:"-"`
|
Filename string `yaml:"-"`
|
||||||
RunTimeFilter *vm.Program `json:"-"`
|
RunTimeFilter *vm.Program `json:"-"`
|
||||||
RunTimeGroupBy *vm.Program `json:"-"`
|
ExprDebugger *exprhelpers.ExprDebugger `yaml:"-" json:"-"` // used to debug expression by printing the content of each variable of the expression
|
||||||
Data []*types.DataSource `yaml:"data,omitempty"`
|
RunTimeGroupBy *vm.Program `json:"-"`
|
||||||
leakspeed time.Duration //internal representation of `Leakspeed`
|
Data []*types.DataSource `yaml:"data,omitempty"`
|
||||||
duration time.Duration //internal representation of `Duration`
|
leakspeed time.Duration //internal representation of `Leakspeed`
|
||||||
ret chan types.Event //the bucket-specific output chan for overflows
|
duration time.Duration //internal representation of `Duration`
|
||||||
processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.)
|
ret chan types.Event //the bucket-specific output chan for overflows
|
||||||
output bool //??
|
processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.)
|
||||||
|
output bool //??
|
||||||
}
|
}
|
||||||
|
|
||||||
func ValidateFactory(b *BucketFactory) error {
|
func ValidateFactory(b *BucketFactory) error {
|
||||||
|
@ -232,6 +233,12 @@ func LoadBucket(g *BucketFactory, dataFolder string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid filter '%s' in %s : %v", g.Filter, g.Filename, err)
|
return fmt.Errorf("invalid filter '%s' in %s : %v", g.Filter, g.Filename, err)
|
||||||
}
|
}
|
||||||
|
if g.Debug {
|
||||||
|
g.ExprDebugger, err = exprhelpers.NewDebugger(g.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("unable to build debug filter for '%s' : %s", g.Filter, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if g.GroupBy != "" {
|
if g.GroupBy != "" {
|
||||||
g.RunTimeGroupBy, err = expr.Compile(g.GroupBy, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
|
g.RunTimeGroupBy, err = expr.Compile(g.GroupBy, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
|
||||||
|
@ -500,13 +507,13 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc
|
||||||
holder.logger.Errorf("unexpected non-bool return : %T", output)
|
holder.logger.Errorf("unexpected non-bool return : %T", output)
|
||||||
log.Fatalf("Filter issue")
|
log.Fatalf("Filter issue")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if holder.Debug {
|
||||||
|
holder.ExprDebugger.Run(holder.logger, condition, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &parsed}))
|
||||||
|
}
|
||||||
if !condition {
|
if !condition {
|
||||||
holder.logger.Debugf("eval(FALSE) %s", holder.Filter)
|
holder.logger.Debugf("Event leaving node : ko")
|
||||||
//log.Debugf("%s -> FALSE", holder.Filter)
|
|
||||||
//holder.logger.Debugf("Filter eval failed")
|
|
||||||
continue
|
continue
|
||||||
} else {
|
|
||||||
holder.logger.Debugf("eval(TRUE) %s", holder.Filter)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"github.com/antonmedv/expr"
|
"github.com/antonmedv/expr"
|
||||||
|
|
||||||
"github.com/antonmedv/expr/vm"
|
"github.com/antonmedv/expr/vm"
|
||||||
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
|
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
|
||||||
"github.com/crowdsecurity/crowdsec/pkg/types"
|
"github.com/crowdsecurity/crowdsec/pkg/types"
|
||||||
|
@ -36,9 +37,9 @@ type Node struct {
|
||||||
rn string //this is only for us in debug, a random generated name for each node
|
rn string //this is only for us in debug, a random generated name for each node
|
||||||
//Filter is executed at runtime (with current log line as context)
|
//Filter is executed at runtime (with current log line as context)
|
||||||
//and must succeed or node is exited
|
//and must succeed or node is exited
|
||||||
Filter string `yaml:"filter,omitempty"`
|
Filter string `yaml:"filter,omitempty"`
|
||||||
RunTimeFilter *vm.Program `yaml:"-" json:"-"` //the actual compiled filter
|
RunTimeFilter *vm.Program `yaml:"-" json:"-"` //the actual compiled filter
|
||||||
|
ExprDebugger *exprhelpers.ExprDebugger `yaml:"-" json:"-"` //used to debug expression by printing the content of each variable of the expression
|
||||||
//If node has leafs, execute all of them until one asks for a 'break'
|
//If node has leafs, execute all of them until one asks for a 'break'
|
||||||
SuccessNodes []Node `yaml:"nodes,omitempty"`
|
SuccessNodes []Node `yaml:"nodes,omitempty"`
|
||||||
//Flag used to describe when to 'break' or return an 'error'
|
//Flag used to describe when to 'break' or return an 'error'
|
||||||
|
@ -121,11 +122,13 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
||||||
clog.Debugf("Event leaving node : ko")
|
clog.Debugf("Event leaving node : ko")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
switch out := output.(type) {
|
switch out := output.(type) {
|
||||||
case bool:
|
case bool:
|
||||||
/* filter returned false, don't process Node */
|
if n.Debug {
|
||||||
|
n.ExprDebugger.Run(clog, out, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p}))
|
||||||
|
}
|
||||||
if !out {
|
if !out {
|
||||||
clog.Debugf("eval(FALSE) '%s'", n.Filter)
|
|
||||||
clog.Debugf("Event leaving node : ko")
|
clog.Debugf("Event leaving node : ko")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
@ -135,7 +138,6 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
NodeState = true
|
NodeState = true
|
||||||
clog.Debugf("eval(TRUE) '%s'", n.Filter)
|
|
||||||
} else {
|
} else {
|
||||||
clog.Debugf("Node has not filter, enter")
|
clog.Debugf("Node has not filter, enter")
|
||||||
NodeState = true
|
NodeState = true
|
||||||
|
@ -183,7 +185,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
||||||
}
|
}
|
||||||
/* run whitelist expression tests anyway */
|
/* run whitelist expression tests anyway */
|
||||||
for eidx, e := range n.Whitelist.B_Exprs {
|
for eidx, e := range n.Whitelist.B_Exprs {
|
||||||
output, err := expr.Run(e, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p}))
|
output, err := expr.Run(e.Filter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
clog.Warningf("failed to run whitelist expr : %v", err)
|
clog.Warningf("failed to run whitelist expr : %v", err)
|
||||||
clog.Debugf("Event leaving node : ko")
|
clog.Debugf("Event leaving node : ko")
|
||||||
|
@ -191,7 +193,9 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
||||||
}
|
}
|
||||||
switch out := output.(type) {
|
switch out := output.(type) {
|
||||||
case bool:
|
case bool:
|
||||||
/* filter returned false, don't process Node */
|
if n.Debug {
|
||||||
|
e.ExprDebugger.Run(clog, out, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p}))
|
||||||
|
}
|
||||||
if out {
|
if out {
|
||||||
clog.Debugf("Event is whitelisted by Expr !")
|
clog.Debugf("Event is whitelisted by Expr !")
|
||||||
p.Whitelisted = true
|
p.Whitelisted = true
|
||||||
|
@ -377,6 +381,14 @@ func (n *Node) compile(pctx *UnixParserCtx) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compilation of '%s' failed: %v", n.Filter, err)
|
return fmt.Errorf("compilation of '%s' failed: %v", n.Filter, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if n.Debug {
|
||||||
|
n.ExprDebugger, err = exprhelpers.NewDebugger(n.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("unable to build debug filter for '%s' : %s", n.Filter, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* handle pattern_syntax and groks */
|
/* handle pattern_syntax and groks */
|
||||||
|
@ -474,13 +486,18 @@ func (n *Node) compile(pctx *UnixParserCtx) error {
|
||||||
n.logger.Debugf("adding cidr %s to whitelists", tnet)
|
n.logger.Debugf("adding cidr %s to whitelists", tnet)
|
||||||
valid = true
|
valid = true
|
||||||
}
|
}
|
||||||
for _, v := range n.Whitelist.Exprs {
|
for _, filter := range n.Whitelist.Exprs {
|
||||||
expr, err := expr.Compile(v, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
|
expression := &types.ExprWhitelist{}
|
||||||
|
expression.Filter, err = expr.Compile(filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.logger.Fatalf("Unable to compile whitelist expression '%s' : %v.", v, err)
|
n.logger.Fatalf("Unable to compile whitelist expression '%s' : %v.", filter, err)
|
||||||
}
|
}
|
||||||
n.Whitelist.B_Exprs = append(n.Whitelist.B_Exprs, expr)
|
expression.ExprDebugger, err = exprhelpers.NewDebugger(filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
|
||||||
n.logger.Debugf("adding expression %s to whitelists", v)
|
if err != nil {
|
||||||
|
log.Errorf("unable to build debug filter for '%s' : %s", filter, err)
|
||||||
|
}
|
||||||
|
n.Whitelist.B_Exprs = append(n.Whitelist.B_Exprs, expression)
|
||||||
|
n.logger.Debugf("adding expression %s to whitelists", filter)
|
||||||
valid = true
|
valid = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -48,6 +48,7 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx) ([]Node, error) {
|
||||||
log.Warningf("skip non yaml : %s", stageFile.Filename)
|
log.Warningf("skip non yaml : %s", stageFile.Filename)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
log.Debugf("loading parser file '%s'", stageFile)
|
||||||
st, err := os.Stat(stageFile.Filename)
|
st, err := os.Stat(stageFile.Filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to stat %s : %v", stageFile, err)
|
return nil, fmt.Errorf("failed to stat %s : %v", stageFile, err)
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"github.com/antonmedv/expr/vm"
|
"github.com/antonmedv/expr/vm"
|
||||||
|
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Whitelist struct {
|
type Whitelist struct {
|
||||||
|
@ -13,5 +14,10 @@ type Whitelist struct {
|
||||||
Cidrs []string `yaml:"cidr,omitempty"`
|
Cidrs []string `yaml:"cidr,omitempty"`
|
||||||
B_Cidrs []*net.IPNet
|
B_Cidrs []*net.IPNet
|
||||||
Exprs []string `yaml:"expression,omitempty"`
|
Exprs []string `yaml:"expression,omitempty"`
|
||||||
B_Exprs []*vm.Program
|
B_Exprs []*ExprWhitelist
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExprWhitelist struct {
|
||||||
|
Filter *vm.Program
|
||||||
|
ExprDebugger *exprhelpers.ExprDebugger // used to debug expression by printing the content of each variable of the expression
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue