diff --git a/docs/references/output.md b/docs/references/output.md
index 8831f785f..bb7e750e4 100644
--- a/docs/references/output.md
+++ b/docs/references/output.md
@@ -3,7 +3,7 @@
The output mechanism is composed of plugins. In order to store the bans for {{blockers.htmlname}}, at least one backend plugin must be loaded. Else, bans will not be stored and decisions cannot be applied.
-Currently the supported {{backend.name}} are SQLite (default) and MySQL.
+Currently the supported backends are SQLite (default) and MySQL.
In order to filter which signals will be sent to which plugin, {{crowdsec.name}} use a system of `profile` that can allow to granularly process your bans and signals.
diff --git a/docs/references/parsers.md b/docs/references/parsers.md
index d887fbe1f..ea5e24d91 100644
--- a/docs/references/parsers.md
+++ b/docs/references/parsers.md
@@ -142,7 +142,37 @@ debug: true|false
_default: false_
If set to to `true`, enabled node level debugging.
-It is meant to help understanding parser node behaviour by providing contextual logging.
+It is meant to help understanding parser node behavior by providing contextual logging :
+
+
+ assignments made by statics
+```
+DEBU[31-07-2020 16:36:28] + Processing 4 statics id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
+DEBU[31-07-2020 16:36:28] .Meta[service] = 'http' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
+DEBU[31-07-2020 16:36:28] .Meta[source_ip] = '127.0.0.1' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
+DEBU[31-07-2020 16:36:28] .Meta[http_status] = '200' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
+DEBU[31-07-2020 16:36:28] .Meta[http_path] = '/' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
+```
+
+
+ assignments made by grok pattern
+```
+DEBU[31-07-2020 16:36:28] + Grok 'NGINXACCESS' returned 10 entries to merge in Parsed id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse
+DEBU[31-07-2020 16:36:28] .Parsed['time_local'] = '21/Jul/2020:16:13:05 +0200' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse
+DEBU[31-07-2020 16:36:28] .Parsed['method'] = 'GET' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse
+DEBU[31-07-2020 16:36:28] .Parsed['request'] = '/' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse
+DEBU[31-07-2020 16:36:28] .Parsed['http_user_agent'] = 'curl/7.58.0' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse
+DEBU[31-07-2020 16:36:28] .Parsed['remote_addr'] = '127.0.0.1' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse
+```
+
+
+ debug of filters and expression results
+```
+DEBU[31-07-2020 16:36:28] eval(evt.Parsed.program == 'nginx') = TRUE id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
+DEBU[31-07-2020 16:36:28] eval variables: id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
+DEBU[31-07-2020 16:36:28] evt.Parsed.program = 'nginx' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse
+```
+
### filter
diff --git a/docs/references/scenarios.md b/docs/references/scenarios.md
index 51634bfac..0f744d3c5 100644
--- a/docs/references/scenarios.md
+++ b/docs/references/scenarios.md
@@ -311,7 +311,16 @@ _default: false_
If set to to `true`, enabled scenario level debugging.
-It is meant to help understanding scenario behavior by providing contextual logging.
+It is meant to help understanding scenario behavior by providing contextual logging :
+
+debug of filters and expression results
+```
+DEBU[31-07-2020 16:34:58] eval(evt.Meta.log_type in ["http_access-log", "http_error-log"] && any(File("bad_user_agents.txt"), {evt.Parsed.http_user_agent contains #})) = TRUE cfg=still-feather file=config/scenarios/http-bad-user-agent.yaml name=crowdsecurity/http-bad-user-agent
+DEBU[31-07-2020 16:34:58] eval variables: cfg=still-feather file=config/scenarios/http-bad-user-agent.yaml name=crowdsecurity/http-bad-user-agent
+DEBU[31-07-2020 16:34:58] evt.Meta.log_type = 'http_access-log' cfg=still-feather file=config/scenarios/http-bad-user-agent.yaml name=crowdsecurity/http-bad-user-agent
+DEBU[31-07-2020 16:34:58] evt.Parsed.http_user_agent = 'Mozilla/5.00 (Nikto/2.1.5) (Evasions:None) (Test:002810)' cfg=still-feather file=config/scenarios/http-bad-user-agent.yaml name=crowdsecurity/http-bad-user-agent
+```
+
### reprocess
diff --git a/pkg/exprhelpers/visitor.go b/pkg/exprhelpers/visitor.go
new file mode 100644
index 000000000..1028b02ed
--- /dev/null
+++ b/pkg/exprhelpers/visitor.go
@@ -0,0 +1,120 @@
+package exprhelpers
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/antonmedv/expr/parser"
+ "github.com/sirupsen/logrus"
+
+ "github.com/antonmedv/expr"
+ "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/vm"
+)
+
+/*
+Visitor is used to reconstruct variables with its property called in an expr filter
+Thus, we can debug expr filter by displaying all variables contents present in the filter
+*/
+type visitor struct {
+ newVar bool
+ currentID string
+ properties []string
+ vars []string
+}
+
+/*
+Enter should be present for the interface but is never used
+*/
+func (v *visitor) Enter(node *ast.Node) {}
+
+/*
+Exit is called when running ast.Walk(node, visitor), each time a node exit.
+So we have the node information and we can get the identifier (first level of the struct)
+and its properties to reconstruct the complete variable.
+*/
+func (v *visitor) Exit(node *ast.Node) {
+ if n, ok := (*node).(*ast.IdentifierNode); ok {
+ if !v.newVar {
+ v.newVar = true
+ v.currentID = n.Value
+ } else {
+ v.newVar = false
+ fullVar := fmt.Sprintf("%s.%s", v.currentID, strings.Join(v.properties, "."))
+ v.vars = append(v.vars, fullVar)
+ v.properties = []string{}
+ v.currentID = n.Value
+ }
+ } else if n, ok := (*node).(*ast.PropertyNode); ok {
+ v.properties = append(v.properties, n.Property)
+ }
+}
+
+/*
+Build reconstruct all the variables used in a filter (to display their content later).
+*/
+func (v *visitor) Build(filter string, exprEnv expr.Option) (*ExprDebugger, error) {
+ var expressions []*expression
+ ret := &ExprDebugger{
+ filter: filter,
+ }
+ v.newVar = false
+ tree, err := parser.Parse(filter)
+ if err != nil {
+ return nil, err
+ }
+ ast.Walk(&tree.Node, v)
+ fullVar := fmt.Sprintf("%s.%s", v.currentID, strings.Join(v.properties, "."))
+ v.vars = append(v.vars, fullVar)
+ v.properties = []string{}
+ v.currentID = ""
+ for _, variable := range v.vars {
+ debugFilter, err := expr.Compile(variable, exprEnv)
+ tmpExpression := &expression{
+ variable,
+ debugFilter,
+ }
+ expressions = append(expressions, tmpExpression)
+ if err != nil {
+ return nil, fmt.Errorf("compilation of variable '%s' failed: %v", variable, err)
+ }
+ }
+ ret.expression = expressions
+ return ret, nil
+}
+
+// ExprDebugger contains the list of expression to be run when debugging an expression filter
+type ExprDebugger struct {
+ filter string
+ expression []*expression
+}
+
+// expression is the structure that represents the variable in string and compiled format
+type expression struct {
+ Str string
+ Compiled *vm.Program
+}
+
+/*
+Run display the content of each variable of a filter by evaluating them with expr,
+again the expr environment given in parameter
+*/
+func (e *ExprDebugger) Run(logger *logrus.Entry, filterResult bool, exprEnv map[string]interface{}) {
+ logger.Debugf("eval(%s) = %s", e.filter, strings.ToUpper(strconv.FormatBool(filterResult)))
+ logger.Debugf("eval variables:")
+ for _, expression := range e.expression {
+ debug, err := expr.Run(expression.Compiled, exprEnv)
+ if err != nil {
+ logger.Errorf("unable to print debug expression for '%s': %s", expression.Str, err)
+ }
+ logger.Debugf(" %s = '%s'", expression.Str, debug)
+ }
+}
+
+// NewDebugger is the exported function that build the debuggers expressions
+func NewDebugger(filter string, exprEnv expr.Option) (*ExprDebugger, error) {
+ visitor := &visitor{}
+ exprDebugger, err := visitor.Build(filter, exprEnv)
+ return exprDebugger, err
+}
diff --git a/pkg/leakybucket/manager.go b/pkg/leakybucket/manager.go
index f6ec8c8ca..7a8f2a5a6 100644
--- a/pkg/leakybucket/manager.go
+++ b/pkg/leakybucket/manager.go
@@ -31,36 +31,37 @@ import (
// BucketFactory struct holds all fields for any bucket configuration. This is to have a
// generic struct for buckets. This can be seen as a bucket factory.
type BucketFactory struct {
- FormatVersion string `yaml:"format"`
- Author string `yaml:"author"`
- Description string `yaml:"description"`
- References []string `yaml:"references"`
- Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics
- Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique
- Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity
- LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket
- Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time
- Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct
- GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip
- Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on uniq_filter expr result)
- Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically
- Labels map[string]string `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow
- Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration
- logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well)
- Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain
- CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket
- Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc.
- OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through
- BucketName string `yaml:"-"`
- Filename string `yaml:"-"`
- RunTimeFilter *vm.Program `json:"-"`
- RunTimeGroupBy *vm.Program `json:"-"`
- Data []*types.DataSource `yaml:"data,omitempty"`
- leakspeed time.Duration //internal representation of `Leakspeed`
- duration time.Duration //internal representation of `Duration`
- ret chan types.Event //the bucket-specific output chan for overflows
- processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.)
- output bool //??
+ FormatVersion string `yaml:"format"`
+ Author string `yaml:"author"`
+ Description string `yaml:"description"`
+ References []string `yaml:"references"`
+ Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics
+ Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique
+ Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity
+ LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket
+ Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time
+ Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct
+ GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip
+ Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on uniq_filter expr result)
+ Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically
+ Labels map[string]string `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow
+ Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration
+ logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well)
+ Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain
+ CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket
+ Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc.
+ OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through
+ BucketName string `yaml:"-"`
+ Filename string `yaml:"-"`
+ RunTimeFilter *vm.Program `json:"-"`
+ ExprDebugger *exprhelpers.ExprDebugger `yaml:"-" json:"-"` // used to debug expression by printing the content of each variable of the expression
+ RunTimeGroupBy *vm.Program `json:"-"`
+ Data []*types.DataSource `yaml:"data,omitempty"`
+ leakspeed time.Duration //internal representation of `Leakspeed`
+ duration time.Duration //internal representation of `Duration`
+ ret chan types.Event //the bucket-specific output chan for overflows
+ processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.)
+ output bool //??
}
func ValidateFactory(b *BucketFactory) error {
@@ -232,6 +233,12 @@ func LoadBucket(g *BucketFactory, dataFolder string) error {
if err != nil {
return fmt.Errorf("invalid filter '%s' in %s : %v", g.Filter, g.Filename, err)
}
+ if g.Debug {
+ g.ExprDebugger, err = exprhelpers.NewDebugger(g.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
+ if err != nil {
+ log.Errorf("unable to build debug filter for '%s' : %s", g.Filter, err)
+ }
+ }
if g.GroupBy != "" {
g.RunTimeGroupBy, err = expr.Compile(g.GroupBy, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
@@ -500,13 +507,13 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc
holder.logger.Errorf("unexpected non-bool return : %T", output)
log.Fatalf("Filter issue")
}
+
+ if holder.Debug {
+ holder.ExprDebugger.Run(holder.logger, condition, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &parsed}))
+ }
if !condition {
- holder.logger.Debugf("eval(FALSE) %s", holder.Filter)
- //log.Debugf("%s -> FALSE", holder.Filter)
- //holder.logger.Debugf("Filter eval failed")
+ holder.logger.Debugf("Event leaving node : ko")
continue
- } else {
- holder.logger.Debugf("eval(TRUE) %s", holder.Filter)
}
}
diff --git a/pkg/parser/node.go b/pkg/parser/node.go
index 157da54b7..aa71ade19 100644
--- a/pkg/parser/node.go
+++ b/pkg/parser/node.go
@@ -5,6 +5,7 @@ import (
"net"
"github.com/antonmedv/expr"
+
"github.com/antonmedv/expr/vm"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/types"
@@ -36,9 +37,9 @@ type Node struct {
rn string //this is only for us in debug, a random generated name for each node
//Filter is executed at runtime (with current log line as context)
//and must succeed or node is exited
- Filter string `yaml:"filter,omitempty"`
- RunTimeFilter *vm.Program `yaml:"-" json:"-"` //the actual compiled filter
-
+ Filter string `yaml:"filter,omitempty"`
+ RunTimeFilter *vm.Program `yaml:"-" json:"-"` //the actual compiled filter
+ ExprDebugger *exprhelpers.ExprDebugger `yaml:"-" json:"-"` //used to debug expression by printing the content of each variable of the expression
//If node has leafs, execute all of them until one asks for a 'break'
SuccessNodes []Node `yaml:"nodes,omitempty"`
//Flag used to describe when to 'break' or return an 'error'
@@ -121,11 +122,13 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
clog.Debugf("Event leaving node : ko")
return false, nil
}
+
switch out := output.(type) {
case bool:
- /* filter returned false, don't process Node */
+ if n.Debug {
+ n.ExprDebugger.Run(clog, out, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p}))
+ }
if !out {
- clog.Debugf("eval(FALSE) '%s'", n.Filter)
clog.Debugf("Event leaving node : ko")
return false, nil
}
@@ -135,7 +138,6 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
return false, nil
}
NodeState = true
- clog.Debugf("eval(TRUE) '%s'", n.Filter)
} else {
clog.Debugf("Node has not filter, enter")
NodeState = true
@@ -183,7 +185,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
}
/* run whitelist expression tests anyway */
for eidx, e := range n.Whitelist.B_Exprs {
- output, err := expr.Run(e, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p}))
+ output, err := expr.Run(e.Filter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p}))
if err != nil {
clog.Warningf("failed to run whitelist expr : %v", err)
clog.Debugf("Event leaving node : ko")
@@ -191,7 +193,9 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
}
switch out := output.(type) {
case bool:
- /* filter returned false, don't process Node */
+ if n.Debug {
+ e.ExprDebugger.Run(clog, out, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p}))
+ }
if out {
clog.Debugf("Event is whitelisted by Expr !")
p.Whitelisted = true
@@ -377,6 +381,14 @@ func (n *Node) compile(pctx *UnixParserCtx) error {
if err != nil {
return fmt.Errorf("compilation of '%s' failed: %v", n.Filter, err)
}
+
+ if n.Debug {
+ n.ExprDebugger, err = exprhelpers.NewDebugger(n.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
+ if err != nil {
+ log.Errorf("unable to build debug filter for '%s' : %s", n.Filter, err)
+ }
+ }
+
}
/* handle pattern_syntax and groks */
@@ -474,13 +486,18 @@ func (n *Node) compile(pctx *UnixParserCtx) error {
n.logger.Debugf("adding cidr %s to whitelists", tnet)
valid = true
}
- for _, v := range n.Whitelist.Exprs {
- expr, err := expr.Compile(v, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
+ for _, filter := range n.Whitelist.Exprs {
+ expression := &types.ExprWhitelist{}
+ expression.Filter, err = expr.Compile(filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
if err != nil {
- n.logger.Fatalf("Unable to compile whitelist expression '%s' : %v.", v, err)
+ n.logger.Fatalf("Unable to compile whitelist expression '%s' : %v.", filter, err)
}
- n.Whitelist.B_Exprs = append(n.Whitelist.B_Exprs, expr)
- n.logger.Debugf("adding expression %s to whitelists", v)
+ expression.ExprDebugger, err = exprhelpers.NewDebugger(filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
+ if err != nil {
+ log.Errorf("unable to build debug filter for '%s' : %s", filter, err)
+ }
+ n.Whitelist.B_Exprs = append(n.Whitelist.B_Exprs, expression)
+ n.logger.Debugf("adding expression %s to whitelists", filter)
valid = true
}
diff --git a/pkg/parser/stage.go b/pkg/parser/stage.go
index f26594a95..e2da66e59 100644
--- a/pkg/parser/stage.go
+++ b/pkg/parser/stage.go
@@ -48,6 +48,7 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx) ([]Node, error) {
log.Warningf("skip non yaml : %s", stageFile.Filename)
continue
}
+ log.Debugf("loading parser file '%s'", stageFile)
st, err := os.Stat(stageFile.Filename)
if err != nil {
return nil, fmt.Errorf("failed to stat %s : %v", stageFile, err)
diff --git a/pkg/types/whitelist.go b/pkg/types/whitelist.go
index 8c856fd26..bc8723222 100644
--- a/pkg/types/whitelist.go
+++ b/pkg/types/whitelist.go
@@ -4,6 +4,7 @@ import (
"net"
"github.com/antonmedv/expr/vm"
+ "github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
)
type Whitelist struct {
@@ -13,5 +14,10 @@ type Whitelist struct {
Cidrs []string `yaml:"cidr,omitempty"`
B_Cidrs []*net.IPNet
Exprs []string `yaml:"expression,omitempty"`
- B_Exprs []*vm.Program
+ B_Exprs []*ExprWhitelist
+}
+
+type ExprWhitelist struct {
+ Filter *vm.Program
+ ExprDebugger *exprhelpers.ExprDebugger // used to debug expression by printing the content of each variable of the expression
}