make message better

This commit is contained in:
Thibault "bui" Koechlin 2020-05-27 16:31:08 +02:00 committed by Farica
parent c03235ed64
commit 1abdfc9b10
14 changed files with 265 additions and 97 deletions

View file

@ -44,21 +44,21 @@ Besides detecting and stopping attacks in real time based on your logs, it allow
### Out of the box detection
<details>
<summary>Baseline detection is effective out-of-the-box, no fine-tuning required (click me!)</summary>
<summary>Baseline detection is effective out-of-the-box, no fine-tuning required (click to expand)</summary>
<img src="https://github.com/crowdsecurity/crowdsec/blob/master/docs/assets/images/out-of-the-box-protection.gif">
</details>
### Easy blocker deployment
<details>
<summary>It's trivial to add blockers to enforce decisions of crowdsec (click me!)</summary>
<summary>It's trivial to add blockers to enforce decisions of crowdsec (click to expand)</summary>
<img src="https://github.com/crowdsecurity/crowdsec/blob/master/docs/assets/images/blocker-installation.gif">
</details>
### Easy dashboard access
<details>
<summary>It's easy to deploy a metabase interface to view your data simply with cscli (click me!)</summary>
<summary>It's easy to deploy a metabase interface to view your data simply with cscli (click to expand)</summary>
<img src="https://github.com/crowdsecurity/crowdsec/blob/master/docs/assets/images/cscli-metabase.gif">
</details>

View file

@ -105,12 +105,12 @@ func main() {
log.Infof("Loading grok library")
/* load base regexps for two grok parsers */
parserCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/")})
parserCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/"), "data": cConfig.DataFolder})
if err != nil {
log.Errorf("failed to initialize parser : %v", err)
return
}
postOverflowCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/")})
postOverflowCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/"), "data": cConfig.DataFolder})
if err != nil {
log.Errorf("failed to initialize postoverflow : %v", err)
return
@ -204,11 +204,11 @@ func main() {
for _, scenarios := range CustomScenarios {
bucketFiles = append(bucketFiles, scenarios.Filename)
}
holders, outputEventChan, err = leaky.LoadBuckets(bucketFiles)
holders, outputEventChan, err = leaky.LoadBuckets(bucketFiles, cConfig.DataFolder)
} else {
log.Infof("Loading scenarios")
holders, outputEventChan, err = leaky.Init(map[string]string{"patterns": cConfig.ConfigFolder + "/scenarios/"})
holders, outputEventChan, err = leaky.Init(map[string]string{"patterns": cConfig.ConfigFolder + "/scenarios/", "data": cConfig.DataFolder})
}
if err != nil {
log.Fatalf("Scenario loading failed : %v", err)

View file

@ -3,8 +3,7 @@ filenames:
- ./tests/nginx/nginx.log
#this is not a syslog log, indicate which kind of logs it is
labels:
prog_name: nginx
type: nginx_raw_log
type: nginx
---
filenames:
- /var/log/auth.log
@ -15,38 +14,5 @@ labels:
---
filename: /var/log/apache2/*.log
labels:
prog_name: apache2
type: nginx_raw_log
---
filenames:
- ./tests/tcpdump.out
- /root/granola/tcpdump.out
labels:
prog_name: tcpdump
type: tcpdump_raw_log
---
filename: ./tests/apache.log
labels:
prog_name: apache2
---
filename: ./tests/nginx.log
labels:
prog_name: nginx
# #list of files to be tailed
# #it's ok to add files that don't exist, they will juste be skipped :)
# - /var/log/nginx/*.log
# - /root/granola/tcpdump.out
# - /var/log/auth.log
# - tests/*.log
# - tests/tcpdump.out
# - tests/nginx/nginx.log
# # for honeypots
# - /data/logs/*.log
# - /var/log/tcpdump.out
# - /var/log/auth.log
# - /var/log/syslog
type: apache2
---

View file

@ -1,6 +1,7 @@
package cwhub
import (
"bytes"
"crypto/sha256"
"encoding/json"
"errors"
@ -15,8 +16,10 @@ import (
"path/filepath"
"strings"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/enescakir/emoji"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
var PARSERS = "parsers"
@ -495,12 +498,14 @@ func LoadPkgIndex(buff []byte) (map[string]map[string]Item, error) {
//DisableItem to disable an item managed by the hub, removes the symlink
func DisableItem(target Item, tdir string, hdir string, purge bool) (Item, error) {
syml := tdir + "/" + target.Type + "/" + target.Stage + "/" + target.FileName
syml, err := filepath.Abs(tdir + "/" + target.Type + "/" + target.Stage + "/" + target.FileName)
if err != nil {
return Item{}, err
}
if target.Local {
return target, fmt.Errorf("%s isn't managed by hub. Please delete manually", target.Name)
}
var err error
/*for a COLLECTIONS, disable sub-items*/
if target.Type == COLLECTIONS {
var tmp = [][]string{target.Parsers, target.PostOverflows, target.Scenarios, target.Collections}
@ -534,8 +539,12 @@ func DisableItem(target Item, tdir string, hdir string, purge bool) (Item, error
if err != nil {
return target, fmt.Errorf("unable to read symlink of %s (%s)", target.Name, syml)
}
if hubpath != filepath.Clean(hdir+"/"+target.RemotePath) {
log.Warningf("%s (%s) isn't a symlink to %s", target.Name, syml, filepath.Clean(hdir+"/"+target.RemotePath))
absPath, err := filepath.Abs(hdir + "/" + target.RemotePath)
if err != nil {
return target, err
}
if hubpath != absPath {
log.Warningf("%s (%s) isn't a symlink to %s", target.Name, syml, absPath)
return target, fmt.Errorf("%s isn't managed by hub", target.Name)
}
@ -740,6 +749,23 @@ func DownloadItem(target Item, tdir string, overwrite bool, dataFolder string) (
target.Tainted = false
target.UpToDate = true
dec := yaml.NewDecoder(bytes.NewReader(body))
for {
data := &types.DataSet{}
err = dec.Decode(data)
if err != nil {
if err == io.EOF {
break
} else {
return target, fmt.Errorf("unable to read file %s data: %s", tdir+"/"+target.RemotePath, err)
}
}
err = types.GetData(data.Data, dataFolder)
if err != nil {
return target, fmt.Errorf("unable to get data: %s", err)
}
}
return target, nil
}

View file

@ -26,7 +26,7 @@ var (
Tag string // = "dev"
GoVersion string // = "1.13"
Constraint_parser = ">= 1.0, < 2.0"
Constraint_scenario = ">= 1.0, < 2.0"
Constraint_scenario = ">= 1.0, < 3.0"
Constraint_api = "v1"
Constraint_acquis = ">= 1.0, < 2.0"
)

View file

@ -1,12 +1,19 @@
package exprhelpers
import (
"bufio"
"os"
"path"
"regexp"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
)
var dataFile map[string][]string
var dataFileRegex map[string][]*regexp.Regexp
func Atof(x string) float64 {
log.Debugf("debug atof %s", x)
ret, err := strconv.ParseFloat(x, 64)
@ -26,9 +33,71 @@ func EndsWith(s string, suff string) bool {
func GetExprEnv(ctx map[string]interface{}) map[string]interface{} {
var ExprLib = map[string]interface{}{"Atof": Atof, "JsonExtract": JsonExtract, "JsonExtractLib": JsonExtractLib}
var ExprLib = map[string]interface{}{"Atof": Atof, "JsonExtract": JsonExtract, "JsonExtractLib": JsonExtractLib, "File": File, "RegexpInFile": RegexpInFile}
for k, v := range ctx {
ExprLib[k] = v
}
return ExprLib
}
func Init() error {
log.Infof("Expr helper initiated")
dataFile = make(map[string][]string)
dataFileRegex = make(map[string][]*regexp.Regexp)
return nil
}
func FileInit(fileFolder string, filename string, fileType string) error {
filepath := path.Join(fileFolder, filename)
file, err := os.Open(filepath)
if err != nil {
log.Fatal(err)
}
defer file.Close()
if _, ok := dataFile[filename]; !ok {
dataFile[filename] = []string{}
}
if fileType == "" {
fileType = "string"
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
switch fileType {
case "regex":
dataFileRegex[filename] = append(dataFileRegex[filename], regexp.MustCompile(scanner.Text()))
case "regexp":
dataFileRegex[filename] = append(dataFileRegex[filename], regexp.MustCompile(scanner.Text()))
case "string":
dataFile[filename] = append(dataFile[filename], scanner.Text())
default:
log.Errorf("unknown data type '%s' for : '%s'", fileType, filename)
}
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
return nil
}
func File(filename string) []string {
if _, ok := dataFile[filename]; ok {
return dataFile[filename]
}
log.Errorf("file '%s' not found for expr library", filename)
return []string{}
}
func RegexpInFile(data string, filename string) bool {
if _, ok := dataFileRegex[filename]; ok {
for _, re := range dataFileRegex[filename] {
if re.Match([]byte(data)) {
return true
}
}
} else {
log.Errorf("file '%s' not found for expr library", filename)
}
return false
}

View file

@ -76,7 +76,7 @@ func testOneBucket(t *testing.T, dir string) error {
for _, x := range stages {
files = append(files, x.Filename)
}
holders, response, err := LoadBuckets(files)
holders, response, err := LoadBuckets(files, dir+"/data")
if err != nil {
t.Fatalf("failed loading bucket : %s", err)
}

View file

@ -17,7 +17,6 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/antonmedv/expr"
@ -31,35 +30,36 @@ import (
// BucketFactory struct holds all fields for any bucket configuration. This is to have a
// generic struct for buckets. This can be seen as a bucket factory.
type BucketFactory struct {
FormatVersion string `yaml:"format"`
Author string `yaml:"author"`
Description string `yaml:"description"`
References []string `yaml:"references"`
Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics
Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique
Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity
LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket
Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time
Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct
GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip
Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on uniq_filter expr result)
Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically
Labels map[string]string `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow
Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration
logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well)
Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain
CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket
Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc.
OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through
BucketName string `yaml:"-"`
Filename string `yaml:"-"`
RunTimeFilter *vm.Program `json:"-"`
RunTimeGroupBy *vm.Program `json:"-"`
leakspeed time.Duration //internal representation of `Leakspeed`
duration time.Duration //internal representation of `Duration`
ret chan types.Event //the bucket-specific output chan for overflows
processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.)
output bool //??
FormatVersion string `yaml:"format"`
Author string `yaml:"author"`
Description string `yaml:"description"`
References []string `yaml:"references"`
Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics
Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique
Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity
LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket
Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time
Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct
GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip
Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on uniq_filter expr result)
Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically
Labels map[string]string `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow
Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration
logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well)
Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain
CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket
Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc.
OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through
BucketName string `yaml:"-"`
Filename string `yaml:"-"`
RunTimeFilter *vm.Program `json:"-"`
RunTimeGroupBy *vm.Program `json:"-"`
Data []*types.DataSource `yaml:"data,omitempty"`
leakspeed time.Duration //internal representation of `Leakspeed`
duration time.Duration //internal representation of `Duration`
ret chan types.Event //the bucket-specific output chan for overflows
processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.)
output bool //??
}
func ValidateFactory(b *BucketFactory) error {
@ -101,16 +101,21 @@ func ValidateFactory(b *BucketFactory) error {
/* Init recursively process yaml files from a directory and loads them as BucketFactory */
func Init(cfg map[string]string) ([]BucketFactory, chan types.Event, error) {
return LoadBucketDir(cfg["patterns"])
return LoadBucketDir(cfg["patterns"], cfg["data"])
}
func LoadBuckets(files []string) ([]BucketFactory, chan types.Event, error) {
func LoadBuckets(files []string, dataFolder string) ([]BucketFactory, chan types.Event, error) {
var (
ret []BucketFactory = []BucketFactory{}
response chan types.Event
)
var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano())
err := exprhelpers.Init()
if err != nil {
return nil, nil, err
}
response = make(chan types.Event, 1)
for _, f := range files {
log.Debugf("Loading %s", f)
@ -160,7 +165,7 @@ func LoadBuckets(files []string) ([]BucketFactory, chan types.Event, error) {
g.Filename = filepath.Clean(f)
g.BucketName = seed.Generate()
g.ret = response
err = LoadBucket(&g)
err = LoadBucket(&g, dataFolder)
if err != nil {
log.Errorf("Failed to load bucket : %v", err)
return nil, nil, fmt.Errorf("loadBucket failed : %v", err)
@ -172,7 +177,7 @@ func LoadBuckets(files []string) ([]BucketFactory, chan types.Event, error) {
return ret, response, nil
}
func LoadBucketDir(dir string) ([]BucketFactory, chan types.Event, error) {
func LoadBucketDir(dir string, dataFolder string) ([]BucketFactory, chan types.Event, error) {
var (
filenames []string
)
@ -183,14 +188,14 @@ func LoadBucketDir(dir string) ([]BucketFactory, chan types.Event, error) {
for _, f := range files {
filenames = append(filenames, dir+f.Name())
}
return LoadBuckets(filenames)
return LoadBuckets(filenames, dataFolder)
}
/* Init recursively process yaml files from a directory and loads them as BucketFactory */
func LoadBucket(g *BucketFactory) error {
func LoadBucket(g *BucketFactory, dataFolder string) error {
var err error
if g.Debug {
var clog = logrus.New()
var clog = log.New()
clog.SetFormatter(&log.TextFormatter{FullTimestamp: true})
clog.SetLevel(log.DebugLevel)
g.logger = clog.WithFields(log.Fields{
@ -275,6 +280,15 @@ func LoadBucket(g *BucketFactory) error {
g.processors = append(g.processors, blackhole)
}
if len(g.Data) > 0 {
for _, data := range g.Data {
err = exprhelpers.FileInit(dataFolder, data.DestPath, data.Type)
if err != nil {
log.Errorf(err.Error())
}
}
}
g.output = false
if err := ValidateFactory(g); err != nil {
return fmt.Errorf("invalid bucket from %s : %v", g.Filename, err)

View file

@ -53,7 +53,8 @@ type Node struct {
//Statics can be present in any type of node and is executed last
Statics []types.ExtraField `yaml:"statics,omitempty"`
//Whitelists
Whitelist types.Whitelist `yaml:"whitelist,omitempty"`
Whitelist types.Whitelist `yaml:"whitelist,omitempty"`
Data []*types.DataSource `yaml:"data,omitempty"`
}
func (n *Node) validate(pctx *UnixParserCtx) error {

View file

@ -149,7 +149,7 @@ func prepTests() (*UnixParserCtx, error) {
/* this should be refactored to 2 lines :p */
// Init the parser
pctx, err = p.Init(map[string]interface{}{"patterns": cfgdir + string("/patterns/")})
pctx, err = p.Init(map[string]interface{}{"patterns": cfgdir + string("/patterns/"), "data": datadir})
if err != nil {
return nil, fmt.Errorf("failed to initialize parser : %v", err)
}

View file

@ -19,6 +19,7 @@ import (
"time"
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
"github.com/crowdsecurity/crowdsec/pkg/exprhelpers"
log "github.com/sirupsen/logrus"
@ -42,6 +43,10 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx) ([]Node, error) {
tmpstages := make(map[string]bool)
pctx.Stages = []string{}
err := exprhelpers.Init()
if err != nil {
return nil, err
}
for _, stageFile := range stageFiles {
if !strings.HasSuffix(stageFile.Filename, ".yaml") {
log.Warningf("skip non yaml : %s", stageFile.Filename)
@ -109,6 +114,15 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx) ([]Node, error) {
if node.Stage == "" {
continue
}
if len(node.Data) > 0 {
for _, data := range node.Data {
err = exprhelpers.FileInit(pctx.DataFolder, data.DestPath, data.Type)
if err != nil {
log.Errorf(err.Error())
}
}
}
nodes = append(nodes, node)
nodesCount++
}

View file

@ -12,9 +12,10 @@ type UnixParser struct {
}
type UnixParserCtx struct {
Grok grokky.Host
Stages []string
Profiling bool
Grok grokky.Host
Stages []string
Profiling bool
DataFolder string
}
func (u UnixParser) IsParsable(ctx interface{}, l types.Line) (bool, error) {
@ -28,6 +29,7 @@ func (u UnixParser) Init(c map[string]interface{}) (*UnixParserCtx, error) {
if err != nil {
return nil, err
}
r.DataFolder = c["data"].(string)
for _, f := range files {
log.Debugf("Loading %s", f.Name())
if err := r.Grok.AddFromFile(c["patterns"].(string) + f.Name()); err != nil {

74
pkg/types/dataset.go Normal file
View file

@ -0,0 +1,74 @@
package types
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
log "github.com/sirupsen/logrus"
)
type DataSource struct {
SourceURL string `yaml:"source_url"`
DestPath string `yaml:"dest_file"`
Type string `yaml:"type"`
}
type DataSet struct {
Data []*DataSource `yaml:"data,omitempty"`
}
func downloadFile(url string, destPath string) error {
log.Debugf("downloading %s in %s", url, destPath)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("download response 'HTTP %d' : %s", resp.StatusCode, string(body))
}
file, err := os.OpenFile(destPath, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return err
}
_, err = file.WriteString(string(body))
if err != nil {
return err
}
err = file.Sync()
if err != nil {
return err
}
return nil
}
func GetData(data []*DataSource, dataDir string) error {
for _, dataS := range data {
destPath := path.Join(dataDir, dataS.DestPath)
log.Infof("downloading data '%s' in '%s'", dataS.SourceURL, destPath)
err := downloadFile(dataS.SourceURL, destPath)
if err != nil {
return err
}
}
return nil
}

View file

@ -30,6 +30,7 @@ DATA_DIR="$BASE/data"
LOG_DIR="$BASE/logs/"
CONFIG_DIR="$BASE/config"
CONFIG_FILE="$BASE/dev.yaml"
CSCLI_DIR="$CONFIG_DIR/crowdsec-cli"
PARSER_DIR="$CONFIG_DIR/parsers"
PARSER_S00="$PARSER_DIR/s00-raw"
@ -81,10 +82,8 @@ copy_files() {
setup() {
$BASE/cscli -c "$CSCLI_DIR" config installdir "$CONFIG_DIR"
$BASE/cscli -c "$CSCLI_DIR" config backend "$PLUGIN_BACKEND_DIR"
$BASE/cscli -c "$CSCLI_DIR" update
$BASE/cscli -c "$CSCLI_DIR" install collection crowdsecurity/linux
$BASE/cscli -c "$CONFIG_FILE" update
$BASE/cscli -c "$CONFIG_FILE" install collection crowdsecurity/linux
}
@ -96,7 +95,10 @@ main() {
copy_files
log_info "Files copied"
log_info "Setting up configurations"
CURRENT_PWD=$(pwd)
cd $BASE
setup
cd $CURRENT_PWD
gen_sqlite_config
log_info "Environment is ready in $BASE"
}