Dedicated whitelist metrics (#2813)

* add proper whitelist metrics : both its own table and an extension to acquis metrics to track discarded/whitelisted lines
This commit is contained in:
Thibault "bui" Koechlin 2024-02-06 18:04:17 +01:00 committed by GitHub
parent 4e724f6c0a
commit 3208a40ef3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 178 additions and 58 deletions

View file

@ -11,7 +11,7 @@ run:
linters-settings:
cyclop:
# lower this after refactoring
max-complexity: 66
max-complexity: 70
gci:
sections:
@ -22,11 +22,11 @@ linters-settings:
gocognit:
# lower this after refactoring
min-complexity: 145
min-complexity: 150
gocyclo:
# lower this after refactoring
min-complexity: 64
min-complexity: 70
funlen:
# Checks the number of lines in a function.

View file

@ -21,21 +21,22 @@ import (
)
type (
statAcquis map[string]map[string]int
statParser map[string]map[string]int
statBucket map[string]map[string]int
statLapi map[string]map[string]int
statLapiMachine map[string]map[string]map[string]int
statLapiBouncer map[string]map[string]map[string]int
statAcquis map[string]map[string]int
statParser map[string]map[string]int
statBucket map[string]map[string]int
statWhitelist map[string]map[string]map[string]int
statLapi map[string]map[string]int
statLapiMachine map[string]map[string]map[string]int
statLapiBouncer map[string]map[string]map[string]int
statLapiDecision map[string]struct {
NonEmpty int
Empty int
}
statDecision map[string]map[string]map[string]int
statDecision map[string]map[string]map[string]int
statAppsecEngine map[string]map[string]int
statAppsecRule map[string]map[string]map[string]int
statAlert map[string]int
statStash map[string]struct {
statAppsecRule map[string]map[string]map[string]int
statAlert map[string]int
statStash map[string]struct {
Type string
Count int
}
@ -62,6 +63,7 @@ func NewMetricStore() metricStore {
"stash": statStash{},
"appsec-engine": statAppsecEngine{},
"appsec-rule": statAppsecRule{},
"whitelists": statWhitelist{},
}
}
@ -111,6 +113,7 @@ func (ms metricStore) Fetch(url string) error {
mAppsecRule := ms["appsec-rule"].(statAppsecRule)
mAlert := ms["alerts"].(statAlert)
mStash := ms["stash"].(statStash)
mWhitelist := ms["whitelists"].(statWhitelist)
for idx, fam := range result {
if !strings.HasPrefix(fam.Name, "cs_") {
@ -160,7 +163,9 @@ func (ms metricStore) Fetch(url string) error {
ival := int(fval)
switch fam.Name {
/*buckets*/
//
// buckets
//
case "cs_bucket_created_total":
if _, ok := mBucket[name]; !ok {
mBucket[name] = make(map[string]int)
@ -190,7 +195,9 @@ func (ms metricStore) Fetch(url string) error {
mBucket[name] = make(map[string]int)
}
mBucket[name]["underflow"] += ival
/*acquis*/
//
// parsers
//
case "cs_parser_hits_total":
if _, ok := mAcquis[source]; !ok {
mAcquis[source] = make(map[string]int)
@ -221,6 +228,33 @@ func (ms metricStore) Fetch(url string) error {
mParser[name] = make(map[string]int)
}
mParser[name]["unparsed"] += ival
//
// whitelists
//
case "cs_node_wl_hits_total":
if _, ok := mWhitelist[name]; !ok {
mWhitelist[name] = make(map[string]map[string]int)
}
if _, ok := mWhitelist[name][reason]; !ok {
mWhitelist[name][reason] = make(map[string]int)
}
mWhitelist[name][reason]["hits"] += ival
case "cs_node_wl_hits_ok_total":
if _, ok := mWhitelist[name]; !ok {
mWhitelist[name] = make(map[string]map[string]int)
}
if _, ok := mWhitelist[name][reason]; !ok {
mWhitelist[name][reason] = make(map[string]int)
}
mWhitelist[name][reason]["whitelisted"] += ival
// track as well whitelisted lines at acquis level
if _, ok := mAcquis[source]; !ok {
mAcquis[source] = make(map[string]int)
}
mAcquis[source]["whitelisted"] += ival
//
// lapi
//
case "cs_lapi_route_requests_total":
if _, ok := mLapi[route]; !ok {
mLapi[route] = make(map[string]int)
@ -256,6 +290,9 @@ func (ms metricStore) Fetch(url string) error {
x.NonEmpty += ival
}
mLapiDecision[bouncer] = x
//
// decisions
//
case "cs_active_decisions":
if _, ok := mDecision[reason]; !ok {
mDecision[reason] = make(map[string]map[string]int)
@ -265,15 +302,18 @@ func (ms metricStore) Fetch(url string) error {
}
mDecision[reason][origin][action] += ival
case "cs_alerts":
/*if _, ok := mAlert[scenario]; !ok {
mAlert[scenario] = make(map[string]int)
}*/
mAlert[reason] += ival
//
// stash
//
case "cs_cache_size":
mStash[name] = struct {
Type string
Count int
}{Type: mtype, Count: ival}
//
// appsec
//
case "cs_appsec_reqs_total":
if _, ok := mAppsecEngine[metric.Labels["appsec_engine"]]; !ok {
mAppsecEngine[metric.Labels["appsec_engine"]] = make(map[string]int, 0)
@ -392,15 +432,15 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error {
func (cli *cliMetrics) NewCommand() *cobra.Command {
var (
url string
url string
noUnit bool
)
cmd := &cobra.Command{
Use: "metrics",
Short: "Display crowdsec prometheus metrics.",
Long: `Fetch metrics from a Local API server and display them`,
Example: `# Show all Metrics, skip empty tables (same as "cecli metrics show")
Use: "metrics",
Short: "Display crowdsec prometheus metrics.",
Long: `Fetch metrics from a Local API server and display them`,
Example: `# Show all Metrics, skip empty tables (same as "cecli metrics show")
cscli metrics
# Show only some metrics, connect to a different url
@ -431,7 +471,7 @@ func (cli *cliMetrics) expandSectionGroups(args []string) []string {
for _, section := range args {
switch section {
case "engine":
ret = append(ret, "acquisition", "parsers", "buckets", "stash")
ret = append(ret, "acquisition", "parsers", "buckets", "stash", "whitelists")
case "lapi":
ret = append(ret, "alerts", "decisions", "lapi", "lapi-bouncer", "lapi-decisions", "lapi-machine")
case "appsec":
@ -446,15 +486,15 @@ func (cli *cliMetrics) expandSectionGroups(args []string) []string {
func (cli *cliMetrics) newShowCmd() *cobra.Command {
var (
url string
url string
noUnit bool
)
cmd := &cobra.Command{
Use: "show [type]...",
Short: "Display all or part of the available metrics.",
Long: `Fetch metrics from a Local API server and display them, optionally filtering on specific types.`,
Example: `# Show all Metrics, skip empty tables
Use: "show [type]...",
Short: "Display all or part of the available metrics.",
Long: `Fetch metrics from a Local API server and display them, optionally filtering on specific types.`,
Example: `# Show all Metrics, skip empty tables
cscli metrics show
# Use an alias: "engine", "lapi" or "appsec" to show a group of metrics
@ -482,9 +522,9 @@ cscli metrics show acquisition parsers buckets stash -o json`,
func (cli *cliMetrics) list() error {
type metricType struct {
Type string `json:"type" yaml:"type"`
Title string `json:"title" yaml:"title"`
Description string `json:"description" yaml:"description"`
Type string `json:"type" yaml:"type"`
Title string `json:"title" yaml:"title"`
Description string `json:"description" yaml:"description"`
}
var allMetrics []metricType

View file

@ -45,6 +45,38 @@ func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]i
return numRows
}
func wlMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) {
if t == nil {
return 0, fmt.Errorf("nil table")
}
numRows := 0
for _, name := range maptools.SortedKeys(stats) {
for _, reason := range maptools.SortedKeys(stats[name]) {
row := make([]string, 4)
row[0] = name
row[1] = reason
row[2] = "-"
row[3] = "-"
for _, action := range maptools.SortedKeys(stats[name][reason]) {
value := stats[name][reason][action]
if action == "whitelisted" {
row[3] = fmt.Sprintf("%d", value)
} else if action == "hits" {
row[2] = fmt.Sprintf("%d", value)
} else {
log.Debugf("unexpected counter '%s' for whitelists = %d", action, value)
}
}
t.AddRow(row...)
numRows++
}
}
return numRows, nil
}
func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) {
if t == nil {
return 0, fmt.Errorf("nil table")
@ -95,7 +127,7 @@ func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) {
log.Warningf("while collecting bucket stats: %s", err)
} else if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n" + title + ":")
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
@ -108,16 +140,16 @@ func (s statAcquis) Description() (string, string) {
func (s statAcquis) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket")
t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted")
t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft)
keys := []string{"reads", "parsed", "unparsed", "pour"}
keys := []string{"reads", "parsed", "unparsed", "pour", "whitelisted"}
if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil {
log.Warningf("while collecting acquis stats: %s", err)
} else if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n" + title + ":")
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
@ -137,7 +169,7 @@ func (s statAppsecEngine) Table(out io.Writer, noUnit bool, showEmpty bool) {
log.Warningf("while collecting appsec stats: %s", err)
} else if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n" + title + ":")
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
@ -156,7 +188,7 @@ func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) {
keys := []string{"triggered"}
if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil {
log.Warningf("while collecting appsec rules stats: %s", err)
} else if numRows > 0 || showEmpty{
} else if numRows > 0 || showEmpty {
renderTableTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine))
t.Render()
}
@ -164,6 +196,26 @@ func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) {
}
func (s statWhitelist) Description() (string, string) {
return "Whitelist Metrics",
`Tracks the number of events processed and possibly whitelisted by each parser whitelist.`
}
func (s statWhitelist) Table(out io.Writer, noUnit bool, showEmpty bool) {
t := newTable(out)
t.SetRowLines(false)
t.SetHeaders("Whitelist", "Reason", "Hits", "Whitelisted")
t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft)
if numRows, err := wlMetricsToTable(t, s, noUnit); err != nil {
log.Warningf("while collecting parsers stats: %s", err)
} else if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
func (s statParser) Description() (string, string) {
return "Parser Metrics",
`Tracks the number of events processed by each parser and indicates success of failure. Zero parsed lines means the parer(s) failed. Non-zero unparsed lines are fine as crowdsec select relevant lines.`
@ -181,7 +233,7 @@ func (s statParser) Table(out io.Writer, noUnit bool, showEmpty bool) {
log.Warningf("while collecting parsers stats: %s", err)
} else if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n" + title + ":")
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
@ -213,7 +265,7 @@ func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) {
}
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n" + title + ":")
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
@ -254,7 +306,7 @@ func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) {
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n" + title + ":")
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
@ -272,9 +324,9 @@ func (s statLapiMachine) Table(out io.Writer, noUnit bool, showEmpty bool) {
numRows := lapiMetricsToTable(t, s)
if numRows > 0 || showEmpty{
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n" + title + ":")
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
@ -294,7 +346,7 @@ func (s statLapiBouncer) Table(out io.Writer, noUnit bool, showEmpty bool) {
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n" + title + ":")
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
@ -320,9 +372,9 @@ func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) {
numRows++
}
if numRows > 0 || showEmpty{
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n" + title + ":")
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
@ -353,9 +405,9 @@ func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) {
}
}
if numRows > 0 || showEmpty{
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n" + title + ":")
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}
@ -380,9 +432,9 @@ func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) {
numRows++
}
if numRows > 0 || showEmpty{
if numRows > 0 || showEmpty {
title, _ := s.Description()
renderTableTitle(out, "\n" + title + ":")
renderTableTitle(out, "\n"+title+":")
t.Render()
}
}

View file

@ -161,7 +161,7 @@ func registerPrometheus(config *csconfig.PrometheusCfg) {
leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow,
v1.LapiRouteHits,
leaky.BucketsCurrentCount,
cache.CacheMetrics, exprhelpers.RegexpCacheMetrics,
cache.CacheMetrics, exprhelpers.RegexpCacheMetrics, parser.NodesWlHitsOk, parser.NodesWlHits,
)
} else {
log.Infof("Loading prometheus collectors")
@ -170,7 +170,7 @@ func registerPrometheus(config *csconfig.PrometheusCfg) {
globalCsInfo, globalParsingHistogram, globalPourHistogram,
v1.LapiRouteHits, v1.LapiMachineHits, v1.LapiBouncerHits, v1.LapiNilDecisions, v1.LapiNonNilDecisions, v1.LapiResponseTime,
leaky.BucketsPour, leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow, leaky.BucketsCurrentCount,
globalActiveDecisions, globalAlerts,
globalActiveDecisions, globalAlerts, parser.NodesWlHitsOk, parser.NodesWlHits,
cache.CacheMetrics, exprhelpers.RegexpCacheMetrics,
)

View file

@ -168,9 +168,9 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc()
}
exprErr := error(nil)
isWhitelisted := n.CheckIPsWL(p.ParseIPSources())
isWhitelisted := n.CheckIPsWL(p)
if !isWhitelisted {
isWhitelisted, exprErr = n.CheckExprWL(cachedExprEnv)
isWhitelisted, exprErr = n.CheckExprWL(cachedExprEnv, p)
}
if exprErr != nil {
// Previous code returned nil if there was an error, so we keep this behavior

View file

@ -221,6 +221,24 @@ var NodesHitsKo = prometheus.NewCounterVec(
[]string{"source", "type", "name"},
)
//
var NodesWlHitsOk = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "cs_node_wl_hits_ok_total",
Help: "Total events successfully whitelisted by node.",
},
[]string{"source", "type", "name", "reason"},
)
var NodesWlHits = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "cs_node_wl_hits_total",
Help: "Total events processed by whitelist node.",
},
[]string{"source", "type", "name", "reason"},
)
func stageidx(stage string, stages []string) int {
for i, v := range stages {
if stage == v {

View file

@ -8,6 +8,7 @@ import (
"github.com/antonmedv/expr/vm"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/prometheus/client_golang/prometheus"
)
type Whitelist struct {
@ -36,11 +37,13 @@ func (n *Node) ContainsIPLists() bool {
return len(n.Whitelist.B_Ips) > 0 || len(n.Whitelist.B_Cidrs) > 0
}
func (n *Node) CheckIPsWL(srcs []net.IP) bool {
func (n *Node) CheckIPsWL(p *types.Event) bool {
srcs := p.ParseIPSources()
isWhitelisted := false
if !n.ContainsIPLists() {
return isWhitelisted
}
NodesWlHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name, "reason": n.Whitelist.Reason}).Inc()
for _, src := range srcs {
if isWhitelisted {
break
@ -62,15 +65,19 @@ func (n *Node) CheckIPsWL(srcs []net.IP) bool {
n.Logger.Tracef("whitelist: %s not in [%s]", src, v)
}
}
if isWhitelisted {
NodesWlHitsOk.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name, "reason": n.Whitelist.Reason}).Inc()
}
return isWhitelisted
}
func (n *Node) CheckExprWL(cachedExprEnv map[string]interface{}) (bool, error) {
func (n *Node) CheckExprWL(cachedExprEnv map[string]interface{}, p *types.Event) (bool, error) {
isWhitelisted := false
if !n.ContainsExprLists() {
return false, nil
}
NodesWlHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name, "reason": n.Whitelist.Reason}).Inc()
/* run whitelist expression tests anyway */
for eidx, e := range n.Whitelist.B_Exprs {
//if we already know the event is whitelisted, skip the rest of the expressions
@ -94,6 +101,9 @@ func (n *Node) CheckExprWL(cachedExprEnv map[string]interface{}) (bool, error) {
n.Logger.Errorf("unexpected type %t (%v) while running '%s'", output, output, n.Whitelist.Exprs[eidx])
}
}
if isWhitelisted {
NodesWlHitsOk.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name, "reason": n.Whitelist.Reason}).Inc()
}
return isWhitelisted, nil
}

View file

@ -289,9 +289,9 @@ func TestWhitelistCheck(t *testing.T) {
var err error
node.Whitelist = tt.whitelist
node.CompileWLs()
isWhitelisted := node.CheckIPsWL(tt.event.ParseIPSources())
isWhitelisted := node.CheckIPsWL(tt.event)
if !isWhitelisted {
isWhitelisted, err = node.CheckExprWL(map[string]interface{}{"evt": tt.event})
isWhitelisted, err = node.CheckExprWL(map[string]interface{}{"evt": tt.event}, tt.event)
}
require.NoError(t, err)
require.Equal(t, tt.expected, isWhitelisted)