LGMT fixes + avoid capitalizing fmt.Errorf strings

This commit is contained in:
Thibault bui Koechlin 2020-05-19 21:31:06 +02:00
parent b8fb2ab555
commit 604b97a519
21 changed files with 101 additions and 586 deletions

View file

@ -120,6 +120,9 @@ func pullTOP() error {
}
var signalOcc types.SignalOccurence
signalOcc, err = simpleBanToSignal(item["range_ip"], item["scenario"], item["expiration"], item["action"], item["as_name"], item["as_num"], item["country"], "api")
if err != nil {
return fmt.Errorf("failed to convert ban to signal : %s", err)
}
if err := outputCTX.Insert(signalOcc); err != nil {
log.Fatalf("Unable to write pull to sqliteDB : %+s", err.Error())
}

View file

@ -85,7 +85,7 @@ func copyFile(sourceSymLink, destinationFile string) (err error) {
if !sourceFileStat.Mode().IsRegular() {
// cannot copy non-regular files (e.g., directories,
// symlinks, devices, etc.)
return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sourceFileStat.Name(), sourceFileStat.Mode().String())
return fmt.Errorf("copyFile: non-regular source file %s (%q)", sourceFileStat.Name(), sourceFileStat.Mode().String())
}
destinationFileStat, err := os.Stat(destinationFile)
if err != nil {
@ -94,7 +94,7 @@ func copyFile(sourceSymLink, destinationFile string) (err error) {
}
} else {
if !(destinationFileStat.Mode().IsRegular()) {
return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", destinationFileStat.Name(), destinationFileStat.Mode().String())
return fmt.Errorf("copyFile: non-regular destination file %s (%q)", destinationFileStat.Name(), destinationFileStat.Mode().String())
}
if os.SameFile(sourceFileStat, destinationFileStat) {
return
@ -143,7 +143,7 @@ func restoreFromDirectory(source string) error {
/*restore the local and tainted items*/
files, err := ioutil.ReadDir(itemDirectory)
if err != nil {
return fmt.Errorf("Failed enumerating files of %s : %s", itemDirectory, err)
return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory, err)
}
for _, file := range files {
//dir are stages, keep track
@ -154,12 +154,12 @@ func restoreFromDirectory(source string) error {
stagedir := fmt.Sprintf("%s/%s/%s/", config.InstallFolder, itype, stage)
log.Debugf("Found stage %s in %s, target directory : %s", stage, itype, stagedir)
if err = os.MkdirAll(stagedir, os.ModePerm); err != nil {
return fmt.Errorf("Error while creating stage directory %s : %s", stagedir, err)
return fmt.Errorf("error while creating stage directory %s : %s", stagedir, err)
}
/*find items*/
ifiles, err := ioutil.ReadDir(itemDirectory + "/" + stage + "/")
if err != nil {
return fmt.Errorf("Failed enumerating files of %s : %s", itemDirectory+"/"+stage, err)
return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory+"/"+stage, err)
}
//finaly copy item
for _, tfile := range ifiles {
@ -183,7 +183,7 @@ func restoreFromDirectory(source string) error {
// - if not, restore
// -> try login
if err := restoreAPICreds(source); err != nil {
return fmt.Errorf("Failed to restore api credentials : %s", err)
return fmt.Errorf("failed to restore api credentials : %s", err)
}
/*
Restore acquis
@ -206,7 +206,7 @@ func restoreAPICreds(source string) error {
api := &cwapi.ApiCtx{}
if err = api.LoadConfig(apiyaml); err != nil {
return fmt.Errorf("Unable to load api config %s : %s", apiyaml, err)
return fmt.Errorf("unable to load api config %s : %s", apiyaml, err)
}
if api.Creds.User != "" {
log.Infof("Credentials present in existing configuration, try before override")
@ -221,23 +221,23 @@ func restoreAPICreds(source string) error {
/*existing config isn't good, override it !*/
ret, err := ioutil.ReadFile(path.Join(source, "api_creds.json"))
if err != nil {
return fmt.Errorf("Failed to read api creds from save : %s", err)
return fmt.Errorf("failed to read api creds from save : %s", err)
}
if err := json.Unmarshal(ret, &api.Creds); err != nil {
return fmt.Errorf("Failed unmarshaling saved credentials : %s", err)
return fmt.Errorf("failed unmarshaling saved credentials : %s", err)
}
api.CfgUser = api.Creds.User
api.CfgPassword = api.Creds.Password
/*override the existing yaml file*/
if err := api.WriteConfig(apiyaml); err != nil {
return fmt.Errorf("Failed writing to %s : %s", apiyaml, err)
return fmt.Errorf("failed writing to %s : %s", apiyaml, err)
} else {
log.Infof("Overwritting %s with backup info", apiyaml)
}
/*reload to check everything is safe*/
if err = api.LoadConfig(apiyaml); err != nil {
return fmt.Errorf("Unable to load api config %s : %s", apiyaml, err)
return fmt.Errorf("unable to load api config %s : %s", apiyaml, err)
}
if err := api.Signin(); err != nil {
@ -262,7 +262,7 @@ func backupToDirectory(target string) error {
return fmt.Errorf("%s already exists", target)
}
if err = os.MkdirAll(target, os.ModePerm); err != nil {
return fmt.Errorf("Error while creating %s : %s", target, err)
return fmt.Errorf("error while creating %s : %s", target, err)
}
/*
backup configurations :
@ -276,7 +276,7 @@ func backupToDirectory(target string) error {
if _, ok := cwhub.HubIdx[itemType]; ok {
itemDirectory = fmt.Sprintf("%s/%s/", target, itemType)
if err := os.MkdirAll(itemDirectory, os.ModePerm); err != nil {
return fmt.Errorf("Error while creating %s : %s", itemDirectory, err)
return fmt.Errorf("error while creating %s : %s", itemDirectory, err)
}
upstreamParsers = []string{}
stage := ""
@ -297,7 +297,7 @@ func backupToDirectory(target string) error {
stage = "/" + tmp[len(tmp)-2] + "/"
fstagedir := fmt.Sprintf("%s%s", itemDirectory, stage)
if err := os.MkdirAll(fstagedir, os.ModePerm); err != nil {
return fmt.Errorf("Error while creating stage dir %s : %s", fstagedir, err)
return fmt.Errorf("error while creating stage dir %s : %s", fstagedir, err)
}
}
clog.Debugf("[%s] : backuping file (tainted:%t local:%t up-to-date:%t)", k, v.Tainted, v.Local, v.UpToDate)

View file

@ -51,11 +51,11 @@ func simpleBanToSignal(targetIP string, reason string, expirationStr string, act
var parsedIP net.IP
var parsedRange *net.IPNet
if strings.Contains(targetIP, "/") {
if parsedIP, parsedRange, err = net.ParseCIDR(targetIP); err != nil {
if _, parsedRange, err = net.ParseCIDR(targetIP); err != nil {
return signalOcc, fmt.Errorf("'%s' is not a valid CIDR", targetIP)
}
if parsedRange == nil {
return signalOcc, fmt.Errorf("Unable to parse network : %s", err)
return signalOcc, fmt.Errorf("unable to parse network : %s", err)
}
banApp.StartIp = types.IP2Int(parsedRange.IP)
banApp.EndIp = types.IP2Int(types.LastAddress(parsedRange))
@ -87,7 +87,7 @@ func BanList() error {
if atTime != "" {
_, at = parser.GenDateParse(atTime)
if at.IsZero() {
return fmt.Errorf("Unable to parse date '%s'", atTime)
return fmt.Errorf("unable to parse date '%s'", atTime)
}
}
ret, err := outputCTX.ReadAT(at)
@ -150,7 +150,7 @@ func BanAdd(target string, duration string, reason string, action string) error
signalOcc, err = simpleBanToSignal(target, reason, duration, action, "", "", "", "cli")
if err != nil {
return fmt.Errorf("Unable to insert ban : %v", err)
return fmt.Errorf("unable to insert ban : %v", err)
}
err = outputCTX.Insert(signalOcc)
if err != nil {

View file

@ -12,6 +12,7 @@ import (
"net/http"
"os"
"path"
"strings"
"time"
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
@ -168,6 +169,9 @@ func extractMetabaseDB(buf *bytes.Reader) error {
log.Fatal(err)
}
for _, f := range r.File {
if strings.Contains(f.Name, "..") {
return fmt.Errorf("invalid path '%s' in archive", f.Name)
}
tfname := fmt.Sprintf("%s/%s", metabaseDbPath, f.Name)
log.Debugf("%s -> %d", f.Name, f.UncompressedSize64)
if f.UncompressedSize64 == 0 {
@ -175,17 +179,17 @@ func extractMetabaseDB(buf *bytes.Reader) error {
}
tfd, err := os.OpenFile(tfname, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0644)
if err != nil {
return fmt.Errorf("Failed opening target file '%s' : %s", tfname, err)
return fmt.Errorf("failed opening target file '%s' : %s", tfname, err)
}
rc, err := f.Open()
if err != nil {
return fmt.Errorf("While opening zip content %s : %s", f.Name, err)
return fmt.Errorf("while opening zip content %s : %s", f.Name, err)
}
written, err := io.Copy(tfd, rc)
if err == io.EOF {
log.Printf("files finished ok")
} else if err != nil {
return fmt.Errorf("While copying content to %s : %s", tfname, err)
return fmt.Errorf("while copying content to %s : %s", tfname, err)
}
log.Infof("written %d bytes to %s", written, tfname)
rc.Close()
@ -240,15 +244,15 @@ func resetMetabasePassword(newpassword string) error {
httpClient := http.Client{Timeout: 20 * time.Second}
resp, err := httpClient.Do(sessionreq)
if err != nil {
return fmt.Errorf("While trying to do rescan api call to metabase : %s", err)
return fmt.Errorf("while trying to do rescan api call to metabase : %s", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("While reading rescan api call response : %s", err)
return fmt.Errorf("while reading rescan api call response : %s", err)
}
if resp.StatusCode != 200 {
return fmt.Errorf("Got '%s' (http:%d) while trying to rescan metabase", string(body), resp.StatusCode)
return fmt.Errorf("got '%s' (http:%d) while trying to rescan metabase", string(body), resp.StatusCode)
}
/*update password*/
sessionreq, err = httpctx.New().Put(metabaseURIUpdatepwd).BodyJSON(map[string]string{
@ -261,7 +265,7 @@ func resetMetabasePassword(newpassword string) error {
httpClient = http.Client{Timeout: 20 * time.Second}
resp, err = httpClient.Do(sessionreq)
if err != nil {
return fmt.Errorf("While trying to reset metabase password : %s", err)
return fmt.Errorf("while trying to reset metabase password : %s", err)
}
defer resp.Body.Close()
body, err = ioutil.ReadAll(resp.Body)
@ -282,11 +286,11 @@ func startMetabase() error {
ctx := context.Background()
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return fmt.Errorf("Failed to create docker client : %s", err)
return fmt.Errorf("failed to create docker client : %s", err)
}
if err := cli.ContainerStart(ctx, metabaseContainerID, types.ContainerStartOptions{}); err != nil {
return fmt.Errorf("Failed while starting %s : %s", metabaseContainerID, err)
return fmt.Errorf("failed while starting %s : %s", metabaseContainerID, err)
}
return nil
@ -297,17 +301,17 @@ func stopMetabase(remove bool) error {
ctx := context.Background()
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return fmt.Errorf("Failed to create docker client : %s", err)
return fmt.Errorf("failed to create docker client : %s", err)
}
var to time.Duration = 20 * time.Second
if err := cli.ContainerStop(ctx, metabaseContainerID, &to); err != nil {
return fmt.Errorf("Failed while stopping %s : %s", metabaseContainerID, err)
return fmt.Errorf("failed while stopping %s : %s", metabaseContainerID, err)
}
if remove {
log.Printf("Removing docker metabase %s", metabaseContainerID)
if err := cli.ContainerRemove(ctx, metabaseContainerID, types.ContainerRemoveOptions{}); err != nil {
return fmt.Errorf("Failed remove container %s : %s", metabaseContainerID, err)
return fmt.Errorf("failed remove container %s : %s", metabaseContainerID, err)
}
}
return nil
@ -317,13 +321,13 @@ func createMetabase() error {
ctx := context.Background()
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return fmt.Errorf("Failed to start docker client : %s", err)
return fmt.Errorf("failed to start docker client : %s", err)
}
log.Printf("Pulling docker image %s", metabaseImage)
reader, err := cli.ImagePull(ctx, metabaseImage, types.ImagePullOptions{})
if err != nil {
return fmt.Errorf("Failed to pull docker image : %s", err)
return fmt.Errorf("failed to pull docker image : %s", err)
}
defer reader.Close()
scanner := bufio.NewScanner(reader)
@ -361,11 +365,11 @@ func createMetabase() error {
log.Printf("Creating container")
resp, err := cli.ContainerCreate(ctx, dockerConfig, hostConfig, nil, metabaseContainerID)
if err != nil {
return fmt.Errorf("Failed to create container : %s", err)
return fmt.Errorf("failed to create container : %s", err)
}
log.Printf("Starting container")
if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
return fmt.Errorf("Failed to start docker container : %s", err)
return fmt.Errorf("failed to start docker container : %s", err)
}
return nil
}

View file

@ -20,7 +20,6 @@ func RemoveMany(ttype string, name string) {
if err != nil {
log.Fatalf("unable to disable %s : %v", v.Name, err)
}
disabled += 1
cwhub.HubIdx[ttype][v.Name] = v
return
} else if name == "" && remove_all {

View file

@ -20,7 +20,7 @@ Fetches the [.index.json](https://github.com/crowdsecurity/hub/blob/master/.inde
Args: cobra.ExactArgs(0),
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if !config.configured {
return fmt.Errorf("You must configure cli before interacting with hub.")
return fmt.Errorf("you must configure cli before interacting with hub.")
}
return nil
},

View file

@ -309,7 +309,9 @@ func main() {
//Init the acqusition : from cli or from acquis.yaml file
acquisitionCTX, err = loadAcquisition()
if err != nil {
log.Fatalf("Failed to start acquisition : %s", err)
}
//start reading in the background
acquisition.AcquisStartReading(acquisitionCTX, inputLineChan, &acquisTomb)

View file

@ -201,7 +201,7 @@ func AcquisReadOneFile(ctx FileCtx, output chan types.Event, AcquisTomb *tomb.To
if ctx.Type != FILETYPE {
log.Errorf("Can't tail %s type for %+v", ctx.Type, ctx.Filenames)
return fmt.Errorf("Can't tail %s type for %+v", ctx.Type, ctx.Filenames)
return fmt.Errorf("can't tail %s type for %+v", ctx.Type, ctx.Filenames)
}
log.Infof("Starting tail of %s", ctx.Filename)
timeout := time.Tick(20 * time.Second)

View file

@ -59,10 +59,10 @@ type PullResp struct {
func (ctx *ApiCtx) WriteConfig(cfg string) error {
ret, err := yaml.Marshal(ctx)
if err != nil {
return fmt.Errorf("Failed to marshal config : %s", err)
return fmt.Errorf("failed to marshal config : %s", err)
}
if err := ioutil.WriteFile(cfg, ret, 0600); err != nil {
return fmt.Errorf("Failed to write api file %s : %s", cfg, ret)
return fmt.Errorf("failed to write api file %s : %s", cfg, ret)
}
return nil
}

View file

@ -36,7 +36,9 @@ func (ctx *ApiCtx) pushSignals() error {
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read body : %s", err)
}
log.Debugf("api push signal: HTTP Code: %+v | Body: %s \n", resp.StatusCode, string(body))
if resp.StatusCode != 200 {
if resp.StatusCode == 401 && !ctx.tokenExpired {

View file

@ -1,498 +0,0 @@
{
"collections" : {
"crowdsec/linux" : {
"path" : "collections/crowdsec/linux.yaml",
"version" : "0.1",
"versions" : {
"0.1" : { "digest" : "1fc917c7ad66487470e466c0ad40ddd45b9f7730a4b43e1b2542627f0596bbdc", "deprecated" : false }
},
"description" : "generic linux : ssh/nginx/apache + ssh/http scenarios",
"author" : "crowdsec",
"tags" : null,
"parsers" : ["crowdsec/apache2-logs", "crowdsec/sshd-logs", "crowdsec/nginx-logs"],
"scenarios" : ["crowdsec/ssh_enum"]
}
},
"parsers": {
"crowdsec/apache2-logs": {
"path": "parsers/s01-parse/crowdsec/apache2-logs.yaml",
"stage": "s01-parse",
"version": "0.2",
"versions": {
"0.1": {
"digest": "e09bb847fb9a80aedaa4b682309b7e5876398a9a28c28911d969c5dd4aa2c0cf",
"deprecated": false
},
"0.2": {
"digest": "809d2de8c0a9bb7faa69cd53fd2f78bb4fb67b8e85a61b7179243913073890b8",
"deprecated": false
}
},
"description": "Parse Apache2 access and error logs",
"author": "crowdsec",
"tags": null
},
"crowdsec/cowrie-logs": {
"path": "parsers/s01-parse/crowdsec/cowrie-logs.yaml",
"stage": "s01-parse",
"version": "0.2",
"versions": {
"0.1": {
"digest": "5914721479adf812e27fa7d8ef7d533698d773faa863e658c9a9a9b996a2008e",
"deprecated": false
},
"0.2": {
"digest": "86240cc3887580304a1662213ba08e5993d790dcb14b3f08576cb988e449b8b2",
"deprecated": false
}
},
"description": "Parse cowrie honeypots logs",
"author": "crowdsec",
"tags": null
},
"crowdsec/geoip": {
"path": "parsers/s02-enrich/crowdsec/geoip.yaml",
"stage": "s02-enrich",
"version": "0.2",
"versions": {
"0.1": {
"digest": "a80dd157205988b209c95017af56adcd415f7d05e2106d255853016d3068d993",
"deprecated": false
},
"0.2": {
"digest": "9546892698b3e52ee2ad835521093e11edef9c3bbd86a30c8a6b25bc2f732721",
"deprecated": false
}
},
"description": "Enrich geolocalisation data associated to the source IP",
"author": "crowdsec",
"tags": null
},
"crowdsec/http-logs": {
"path": "parsers/s02-enrich/crowdsec/http-logs.yaml",
"stage": "s02-enrich",
"version": "0.2",
"versions": {
"0.1": {
"digest": "17c20627760a32f372fabacc1933ed53ad533bc3cb6b36dc9f2237e768798abe",
"deprecated": false
},
"0.2": {
"digest": "a9c76d274bf69c3e64c486a162f589355c3a53978c2bc2b34dbdaa8c5d65b73c",
"deprecated": false
}
},
"description": "Parse more Specifically HTTP logs, such as HTTP Code, HTTP path, HTTP args and if its a static ressource",
"author": "crowdsec",
"tags": null
},
"crowdsec/mysql-logs": {
"path": "parsers/s01-parse/crowdsec/mysql-logs.yaml",
"stage": "s01-parse",
"version": "0.2",
"versions": {
"0.1": {
"digest": "b5bf9052c14f6a5887804247f58088d9da364b923d61a14791722f7a695e99e4",
"deprecated": false
},
"0.2": {
"digest": "f3975dea7bb749ee0e0bd8b8f444af2f5bb028afd5f78c4198daf2de8c17a9e8",
"deprecated": false
}
},
"description": "Parse MySQL logs",
"author": "crowdsec",
"tags": null
},
"crowdsec/naxsi-logs": {
"path": "parsers/s02-enrich/crowdsec/naxsi-logs.yaml",
"stage": "s02-enrich",
"version": "0.2",
"versions": {
"0.1": {
"digest": "75b0ef4d320aced623327bca496f77d606e2449990dd0f6ef849aa9aaf91aad2",
"deprecated": false
},
"0.2": {
"digest": "a93b89b1cb2a9d61d61c50c6dd4c89707d770c7e9c504d8683d802bb1ec57d07",
"deprecated": false
}
},
"description": "Enrich logs if its from NAXSI",
"author": "crowdsec",
"tags": null
},
"crowdsec/nginx-logs": {
"path": "parsers/s01-parse/crowdsec/nginx-logs.yaml",
"stage": "s01-parse",
"version": "0.2",
"versions": {
"0.1": {
"digest": "86c5d6cb6671f1c233b06b0afbd43a33740dd55df004ae01ff43714d2ca822bf",
"deprecated": false
},
"0.2": {
"digest": "36200096b897494563d31f38bee86c22868ac9bd54b74591398474547d968339",
"deprecated": false
}
},
"description": "Parse nginx access and error logs",
"author": "crowdsec",
"tags": null
},
"crowdsec/skip-pretag": {
"path": "parsers/s00-raw/crowdsec/skip-pretag.yaml",
"stage": "s00-raw",
"version": "0.1",
"versions": {
"0.1": {
"digest": "c43d625b9854a5d66a5227068e943a77d57111b3411262a856a4d3c9415dd6c4",
"deprecated": false
}
},
"author": "crowdsec",
"tags": null
},
"crowdsec/smb-logs": {
"path": "parsers/s01-parse/crowdsec/smb-logs.yaml",
"stage": "s01-parse",
"version": "0.2",
"versions": {
"0.1": {
"digest": "edba72ee6bdbfad7d453e8564de4c6cfbaa3f99c907f3ad9da3e8d499f6d264d",
"deprecated": false
},
"0.2": {
"digest": "86a5cfaf053da6a820fb6f3679633dce76dc6b75a3f84cf18b1502d8c0d2a519",
"deprecated": false
}
},
"description": "Parse SMB logs",
"author": "crowdsec",
"tags": null
},
"crowdsec/sshd-logs": {
"path": "parsers/s01-parse/crowdsec/sshd-logs.yaml",
"stage": "s01-parse",
"version": "0.2",
"versions": {
"0.1": {
"digest": "ede920fb15f97c8fe559e2687d200232074ea2d76e57a80db147451e5fded359",
"deprecated": false
},
"0.2": {
"digest": "43c2602153722d2bfc8f1851278469fa7838a82ce752ce1bbdde192299a93c6d",
"deprecated": false
}
},
"description": "Parse openSSH logs",
"author": "crowdsec",
"tags": null
},
"crowdsec/syslog-parse": {
"path": "parsers/s00-raw/crowdsec/syslog-parse.yaml",
"stage": "s00-raw",
"version": "0.2",
"versions": {
"0.1": {
"digest": "ea6d39fdfd9c73ece96bd57ecdff952e6db99e4d1652f3c1b74ed9d52d185846",
"deprecated": false
},
"0.2": {
"digest": "98feb5259f175e0e17db44bc911ef458f9f55c5b524fa2e201847e16f4e83a1b",
"deprecated": false
}
},
"author": "crowdsec",
"tags": null
},
"crowdsec/tcpdump-logs": {
"path": "parsers/s01-parse/crowdsec/tcpdump-logs.yaml",
"stage": "s01-parse",
"version": "0.2",
"versions": {
"0.1": {
"digest": "f3a55b79061bc1dbfce85855363b73a09e7cce5c0ff9972bdb4f7ec7fabcd9f8",
"deprecated": false
},
"0.2": {
"digest": "8d0dc2230eefc35d9c7aec97cbf95a824fbdd66582aa4e5ededf17131ecd6103",
"deprecated": false
}
},
"description": "Parse tcpdump raw logs",
"author": "crowdsec",
"tags": null
},
"crowdsec/timemachine": {
"path": "parsers/s02-enrich/crowdsec/timemachine.yaml",
"stage": "s02-enrich",
"version": "0.1",
"versions": {
"0.1": {
"digest": "cd9f202305b3210511bce32950e0e06ce416391ab53875cc17d5f6aecc8bbf19",
"deprecated": false
}
},
"author": "crowdsec",
"tags": null
}
},
"postoverflows": {
"crowdsec/rdns": {
"path": "postoverflows/s00-enrich/crowdsec/rdns.yaml",
"stage": "s00-enrich",
"version": "0.2",
"versions": {
"0.1": {
"digest": "d04e28fa2c74f4c1ba3f1daeeeaa8a95858f620e7587123cde224b6b376ad16a",
"deprecated": false
},
"0.2": {
"digest": "e1f7905318e7d8c432e4cf1428e3e7c943aec7c625a5d598e5b26b36a6231f1e",
"deprecated": false
}
},
"description": "Lookup the DNS assiocated to the source IP only for overflows",
"author": "crowdsec",
"tags": null
}
},
"scenarios": {
"crowdsec/counters": {
"path": "scenarios/crowdsec/counters.yaml",
"version": "0.2",
"versions": {
"0.1": {
"digest": "edd898e179c89ddc85890e702dc2975ecf411546fa3082b8f190ccb5d7304aa8",
"deprecated": false
},
"0.2": {
"digest": "04ef21d6f7f48d66119098e8ecd23b5c1107e8fdd274ffddb5f8309252c1dfd1",
"deprecated": false
}
},
"description": "Count unique ssh bruteforces",
"author": "crowdsec",
"tags": [
"ssh"
]
},
"crowdsec/double_drop": {
"path": "scenarios/crowdsec/double_drop.yaml",
"version": "0.1",
"versions": {
"0.1": {
"digest": "0f6bd279437d9ef8061d8b69c6567c0389101811cc741a2ad766ffee1f7a8dc6",
"deprecated": false
}
},
"description": "Ban a range if more than 5 ips from it are banned at a time",
"author": "crowdsec",
"tags": null
},
"crowdsec/http_404_scan": {
"path": "scenarios/crowdsec/http_404_scan.yaml",
"version": "0.3",
"versions": {
"0.1": {
"digest": "4224c98f088b553cf65db1608dc448ee5e679de31437bfe2f65352362c66b24f",
"deprecated": false
},
"0.2": {
"digest": "62768595d349c174078057534ebc21de37560a258b98fbc63ddc5106edb4db40",
"deprecated": false
},
"0.3": {
"digest": "9ec1df959e637d08d6fc969bbfa94deba72230cb1cb528ecba4180b62670032a",
"deprecated": false
}
},
"description": "Detect multiple unique 404 from a single ip",
"author": "crowdsec",
"tags": [
"http",
"scan"
]
},
"crowdsec/http_aggressive_crawl": {
"path": "scenarios/crowdsec/http_aggressive_crawl.yaml",
"version": "0.1",
"versions": {
"0.1": {
"digest": "e0b6a1c40f8009bec4698fb0562ad34d8159aa7e1006dedbd9d28c397ab4db1a",
"deprecated": false
}
},
"description": "Detect aggressive crawl from multiple ips",
"author": "crowdsec",
"tags": [
"http",
"distributed_crawl"
]
},
"crowdsec/http_distributed_crawl": {
"path": "scenarios/crowdsec/http_distributed_crawl.yaml",
"version": "0.2",
"versions": {
"0.1": {
"digest": "8eb442380f5a996a4ccba30b6dd39391ea021c0dead7cb3b7a7eea8f216a468f",
"deprecated": false
},
"0.2": {
"digest": "bf778e2c091bb9099a019317311a191ece7b027389231f13a2c684f647e06a66",
"deprecated": false
}
},
"description": "an aggressive crawl distributed amongst several ips",
"author": "crowdsec",
"tags": [
"http",
"distributed_crawl"
]
},
"crowdsec/mysql_bf": {
"path": "scenarios/crowdsec/mysql_bf.yaml",
"version": "0.2",
"versions": {
"0.1": {
"digest": "058a37a9d144c25586c6cb6f5cd471436bd8adb87f54e66a0a7dfc3509bb20d0",
"deprecated": false
},
"0.2": {
"digest": "74356430e1ff91b08b95e213e5fc8bb7b9894a3f131ffc31a6507cbfba7f2abb",
"deprecated": false
}
},
"description": "Detect mysql bruteforce",
"author": "crowdsec",
"tags": [
"mysql",
"bruteforce"
]
},
"crowdsec/naxsi": {
"path": "scenarios/crowdsec/naxsi.yaml",
"version": "0.2",
"versions": {
"0.1": {
"digest": "7004c206a2fc5e4f786ae226ebca142a5eb372bb22b56276811bf2b43b9e8c22",
"deprecated": false
},
"0.2": {
"digest": "16838eae3b5515e732084e1508518ecdc8c35968631d617f10314e5d95950493",
"deprecated": false
}
},
"description": "Detect custom blacklist triggered in naxsi",
"author": "crowdsec",
"tags": [
"http",
"scan"
]
},
"crowdsec/smb_bf": {
"path": "scenarios/crowdsec/smb_bf.yaml",
"version": "0.1",
"versions": {
"0.1": {
"digest": "0078c276a111618d89203fac5e192d2564d186b9da7575e9cd75a186ca573e72",
"deprecated": false
}
},
"description": "Detect smb bruteforce",
"author": "crowdsec",
"tags": [
"smb",
"bruteforce"
]
},
"crowdsec/ssh_bf": {
"path": "scenarios/crowdsec/ssh_bf.yaml",
"version": "0.2",
"versions": {
"0.1": {
"digest": "252354885e933ed8f6fb255c764d15e529c285443eee5efac3bc3d801f2789fe",
"deprecated": false
},
"0.2": {
"digest": "8e4bf46e185e8a0764535bf84ba5d8a5515e266272a363c8f8929fc85dbc4609",
"deprecated": false
}
},
"description": "Detect ssh user enum bruteforce",
"author": "crowdsec",
"tags": [
"ssh",
"bruteforce"
]
},
"crowdsec/ssh_enum": {
"path": "scenarios/crowdsec/ssh_enum.yaml",
"version": "0.1",
"versions": {
"0.1": {
"digest": "335776aafa070073abdc1c9cf333c5fd2513c982443a29476e0b31c339b6b17f",
"deprecated": false
}
},
"description": "Detect ssh user enum bruteforce",
"author": "crowdsec",
"tags": [
"ssh",
"bruteforce"
]
},
"crowdsec/tcpdump": {
"path": "scenarios/crowdsec/tcpdump.yaml",
"version": "0.2",
"versions": {
"0.1": {
"digest": "2fe9e4ce72a8552bfd65d2d28759e4724bd0a85c716685d9d9b992f9cecb5a1f",
"deprecated": false
},
"0.2": {
"digest": "fe9392749ad32925ebd7a5c776bbde8527a1a02f8a531de04da51726bdb54bcb",
"deprecated": false
}
},
"description": "Detect new connection with tcpdump",
"author": "crowdsec",
"tags": [
"tcp"
]
},
"crowdsec/telnet_bf": {
"path": "scenarios/crowdsec/telnet_bf.yaml",
"version": "0.1",
"versions": {
"0.1": {
"digest": "c0dcbfcfc86f3f3ecbc4888e78e06f322ca7d4dc11fd6604893f76bb52ca6c9d",
"deprecated": false
}
},
"description": "detect telnet bruteforce",
"author": "crowdsec",
"tags": [
"telnet",
"bruteforce"
]
},
"crowdsec/wordpress_bf": {
"path": "scenarios/crowdsec/wordpress_bf.yaml",
"version": "0.1",
"versions": {
"0.1": {
"digest": "a89253d2f02f0dc0bfecd85998ba5dd45eecf94929c1fa058ef9fe1646b511d9",
"deprecated": false
}
},
"description": "detect wordpress bruteforce",
"author": "crowdsec",
"tags": [
"http",
"bruteforce"
]
}
}
}

View file

@ -153,7 +153,7 @@ func parser_visit(path string, f os.FileInfo, err error) error {
ftype = COLLECTIONS
stage = ""
} else if ftype != PARSERS && ftype != PARSERS_OVFLW /*its a PARSER / PARSER_OVFLW with a stage */ {
return fmt.Errorf("Unknown prefix in %s : fname:%s, fauthor:%s, stage:%s, ftype:%s", path, fname, fauthor, stage, ftype)
return fmt.Errorf("unknown prefix in %s : fname:%s, fauthor:%s, stage:%s, ftype:%s", path, fname, fauthor, stage, ftype)
}
log.Debugf("CORRECTED [%s] by [%s] in stage [%s] of type [%s]", fname, fauthor, stage, ftype)
@ -179,7 +179,7 @@ func parser_visit(path string, f os.FileInfo, err error) error {
log.Infof("%s is a symlink to %s that doesn't exist, deleting symlink", path, hubpath)
//remove the symlink
if err = os.Remove(path); err != nil {
return fmt.Errorf("Failed to unlink %s: %+v", path, err)
return fmt.Errorf("failed to unlink %s: %+v", path, err)
}
return nil
}
@ -453,7 +453,7 @@ func LoadPkgIndex(buff []byte) (map[string]map[string]Item, error) {
var RawIndex map[string]map[string]Item
if err = json.Unmarshal(buff, &RawIndex); err != nil {
return nil, fmt.Errorf("Failed to unmarshal index : %v", err)
return nil, fmt.Errorf("failed to unmarshal index : %v", err)
}
/*Iterate over the different types to complete struct */
@ -532,7 +532,7 @@ func DisableItem(target Item, tdir string, hdir string, purge bool) (Item, error
//remove the symlink
if err = os.Remove(syml); err != nil {
return target, fmt.Errorf("Failed to unlink %s: %+v", syml, err)
return target, fmt.Errorf("failed to unlink %s: %+v", syml, err)
}
log.Infof("Removed symlink [%s] : %s", target.Name, syml)
}
@ -542,7 +542,7 @@ func DisableItem(target Item, tdir string, hdir string, purge bool) (Item, error
hubpath := hdir + "/" + target.RemotePath
//if purge, disable hub file
if err = os.Remove(hubpath); err != nil {
return target, fmt.Errorf("Failed to purge hub file %s: %+v", hubpath, err)
return target, fmt.Errorf("failed to purge hub file %s: %+v", hubpath, err)
}
target.Downloaded = false
log.Infof("Removed source file [%s] : %s", target.Name, hubpath)
@ -568,7 +568,7 @@ func EnableItem(target Item, tdir string, hdir string) (Item, error) {
if _, err := os.Stat(parent_dir); os.IsNotExist(err) {
log.Printf("%s doesn't exist, create", parent_dir)
if err := os.MkdirAll(parent_dir, os.ModePerm); err != nil {
return target, fmt.Errorf("Unable to create parent directories")
return target, fmt.Errorf("unable to create parent directories")
}
}
if _, err := os.Lstat(parent_dir + "/" + target.FileName); os.IsNotExist(err) {
@ -582,11 +582,11 @@ func EnableItem(target Item, tdir string, hdir string) (Item, error) {
HubIdx[ptrtype][p], err = EnableItem(val, Installdir, Hubdir)
if err != nil {
log.Errorf("Encountered error while installing sub-item %s %s : %s.", ptrtype, p, err)
return target, fmt.Errorf("Encountered error while install %s for %s, abort.", val.Name, target.Name)
return target, fmt.Errorf("encountered error while install %s for %s, abort.", val.Name, target.Name)
}
} else {
//log.Errorf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, target.Name)
return target, fmt.Errorf("Required %s %s of %s doesn't exist, abort.", ptrtype, p, target.Name)
return target, fmt.Errorf("required %s %s of %s doesn't exist, abort.", ptrtype, p, target.Name)
}
}
}
@ -603,7 +603,7 @@ func EnableItem(target Item, tdir string, hdir string) (Item, error) {
err = os.Symlink(srcPath, dstPath)
if err != nil {
log.Fatalf("Failed to symlink %s to %s : %v", srcPath, dstPath, err)
return target, fmt.Errorf("Failed to symlink %s to %s", srcPath, dstPath)
return target, fmt.Errorf("failed to symlink %s to %s", srcPath, dstPath)
}
log.Printf("Enabled %s : %s", target.Type, target.Name)
} else {
@ -630,21 +630,24 @@ func DownloadLatest(target Item, tdir string, overwrite bool) (Item, error) {
HubIdx[ptrtype][p], err = DownloadLatest(val, tdir, overwrite)
if err != nil {
log.Errorf("Encountered error while downloading sub-item %s %s : %s.", ptrtype, p, err)
return target, fmt.Errorf("Encountered error while downloading %s for %s, abort.", val.Name, target.Name)
return target, fmt.Errorf("encountered error while downloading %s for %s, abort", val.Name, target.Name)
}
}
HubIdx[ptrtype][p], err = DownloadItem(val, tdir, overwrite)
if err != nil {
log.Errorf("Encountered error while downloading sub-item %s %s : %s.", ptrtype, p, err)
return target, fmt.Errorf("Encountered error while downloading %s for %s, abort.", val.Name, target.Name)
return target, fmt.Errorf("encountered error while downloading %s for %s, abort", val.Name, target.Name)
}
} else {
//log.Errorf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, target.Name)
return target, fmt.Errorf("Required %s %s of %s doesn't exist, abort.", ptrtype, p, target.Name)
return target, fmt.Errorf("required %s %s of %s doesn't exist, abort", ptrtype, p, target.Name)
}
}
}
target, err = DownloadItem(target, tdir, overwrite)
if err != nil {
return target, fmt.Errorf("failed to download item : %s", err)
}
} else {
return DownloadItem(target, tdir, overwrite)
}
@ -702,7 +705,7 @@ func DownloadItem(target Item, tdir string, overwrite bool) (Item, error) {
if _, err = os.Stat(parent_dir); os.IsNotExist(err) {
log.Debugf("%s doesn't exist, create", parent_dir)
if err := os.MkdirAll(parent_dir, os.ModePerm); err != nil {
return target, fmt.Errorf("Unable to create parent directories")
return target, fmt.Errorf("unable to create parent directories")
}
}
/*check actual file*/
@ -715,12 +718,12 @@ func DownloadItem(target Item, tdir string, overwrite bool) (Item, error) {
f, err := os.OpenFile(tdir+"/"+target.RemotePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
return target, fmt.Errorf("Failed to open destination file %s : %v", tdir+"/"+target.RemotePath, err)
return target, fmt.Errorf("failed to open destination file %s : %v", tdir+"/"+target.RemotePath, err)
}
defer f.Close()
_, err = f.WriteString(string(body))
if err != nil {
return target, fmt.Errorf("Failed to write destination file %s : %v", tdir+"/"+target.RemotePath, err)
return target, fmt.Errorf("failed to write destination file %s : %v", tdir+"/"+target.RemotePath, err)
}
target.Downloaded = true
target.Tainted = false

View file

@ -49,11 +49,11 @@ func VersionStr() string {
func Statisfies(strvers string, constraint string) (bool, error) {
vers, err := version.NewVersion(strvers)
if err != nil {
return false, fmt.Errorf("Failed to parse '%s' : %v", strvers, err)
return false, fmt.Errorf("failed to parse '%s' : %v", strvers, err)
}
constraints, err := version.NewConstraint(constraint)
if err != nil {
return false, fmt.Errorf("Failed to parse constraint '%s'", constraint)
return false, fmt.Errorf("failed to parse constraint '%s'", constraint)
}
if !constraints.Check(vers) {
return false, nil

View file

@ -23,7 +23,7 @@ func NewBlackhole(g *BucketFactory) (*Blackhole, error) {
var duration time.Duration
if d, err := time.ParseDuration(g.Blackhole); err != nil {
g.logger.Warning("Blackhole duration not valid, using 1h")
return nil, fmt.Errorf("Blackhole duration not valid '%s'", g.Blackhole)
return nil, fmt.Errorf("blackhole duration not valid '%s'", g.Blackhole)
} else {
duration = d
}

View file

@ -64,37 +64,37 @@ type BucketFactory struct {
func ValidateFactory(b *BucketFactory) error {
if b.Name == "" {
return fmt.Errorf("A bucket must have name")
return fmt.Errorf("bucket must have name")
}
if b.Description == "" {
return fmt.Errorf("Description is mandatory")
return fmt.Errorf("description is mandatory")
}
if b.Type == "leaky" {
if b.Capacity <= 0 { //capacity must be a positive int
return fmt.Errorf("Bad capacity for leaky '%d'", b.Capacity)
return fmt.Errorf("bad capacity for leaky '%d'", b.Capacity)
}
if b.LeakSpeed == "" {
return fmt.Errorf("leakspeed can't be empty for leaky")
}
if b.leakspeed == 0 {
return fmt.Errorf("Bad leakspeed for leaky '%s'", b.LeakSpeed)
return fmt.Errorf("bad leakspeed for leaky '%s'", b.LeakSpeed)
}
} else if b.Type == "counter" {
if b.Duration == "" {
return fmt.Errorf("Duration ca't be empty for counter")
return fmt.Errorf("duration ca't be empty for counter")
}
if b.duration == 0 {
return fmt.Errorf("Bad duration for counter bucket '%d'", b.duration)
return fmt.Errorf("bad duration for counter bucket '%d'", b.duration)
}
if b.Capacity != -1 {
return fmt.Errorf("Counter bucket must have -1 capacity")
return fmt.Errorf("counter bucket must have -1 capacity")
}
} else if b.Type == "trigger" {
if b.Capacity != 0 {
return fmt.Errorf("Trigger bucket must have 0 capacity")
return fmt.Errorf("trigger bucket must have 0 capacity")
}
} else {
return fmt.Errorf("Unknown bucket type '%s'", b.Type)
return fmt.Errorf("unknown bucket type '%s'", b.Type)
}
return nil
}
@ -136,13 +136,13 @@ func LoadBuckets(files []string) ([]BucketFactory, chan types.Event, error) {
break
} else {
log.Errorf("Bad yaml in %s : %v", f, err)
return nil, nil, fmt.Errorf("Bad yaml in %s : %v", f, err)
return nil, nil, fmt.Errorf("bad yaml in %s : %v", f, err)
}
}
//check empty
if g.Name == "" {
log.Errorf("Won't load nameless bucket")
return nil, nil, fmt.Errorf("Nameless bucket")
return nil, nil, fmt.Errorf("nameless bucket")
}
//check compat
if g.FormatVersion == "" {
@ -163,7 +163,7 @@ func LoadBuckets(files []string) ([]BucketFactory, chan types.Event, error) {
err = LoadBucket(&g)
if err != nil {
log.Errorf("Failed to load bucket : %v", err)
return nil, nil, fmt.Errorf("LoadBucket failed : %v", err)
return nil, nil, fmt.Errorf("loadBucket failed : %v", err)
}
ret = append(ret, g)
}
@ -209,30 +209,30 @@ func LoadBucket(g *BucketFactory) error {
if g.LeakSpeed != "" {
if g.leakspeed, err = time.ParseDuration(g.LeakSpeed); err != nil {
return fmt.Errorf("Bad leakspeed '%s' in %s : %v", g.LeakSpeed, g.Filename, err)
return fmt.Errorf("bad leakspeed '%s' in %s : %v", g.LeakSpeed, g.Filename, err)
}
} else {
g.leakspeed = time.Duration(0)
}
if g.Duration != "" {
if g.duration, err = time.ParseDuration(g.Duration); err != nil {
return fmt.Errorf("Invalid Duration '%s' in %s : %v", g.Duration, g.Filename, err)
return fmt.Errorf("invalid Duration '%s' in %s : %v", g.Duration, g.Filename, err)
}
}
if g.Filter == "" {
g.logger.Warningf("Bucket without filter, abort.")
return fmt.Errorf("Bucket without filter directive.")
return fmt.Errorf("bucket without filter directive.")
}
g.RunTimeFilter, err = expr.Compile(g.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
if err != nil {
return fmt.Errorf("Invalid filter '%s' in %s : %v", g.Filter, g.Filename, err)
return fmt.Errorf("invalid filter '%s' in %s : %v", g.Filter, g.Filename, err)
}
if g.GroupBy != "" {
g.RunTimeGroupBy, err = expr.Compile(g.GroupBy, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}})))
if err != nil {
return fmt.Errorf("Invalid groupby '%s' in %s : %v", g.GroupBy, g.Filename, err)
return fmt.Errorf("invalid groupby '%s' in %s : %v", g.GroupBy, g.Filename, err)
}
}
@ -247,7 +247,7 @@ func LoadBucket(g *BucketFactory) error {
case "counter":
g.processors = append(g.processors, &DumbProcessor{})
default:
return fmt.Errorf("Invalid type '%s' in %s : %v", g.Type, g.Filename, err)
return fmt.Errorf("invalid type '%s' in %s : %v", g.Type, g.Filename, err)
}
if g.Distinct != "" {
@ -260,7 +260,7 @@ func LoadBucket(g *BucketFactory) error {
filovflw, err := NewOverflowFilter(g)
if err != nil {
g.logger.Errorf("Error creating overflow_filter : %s", err)
return fmt.Errorf("Error creating overflow_filter : %s", err)
return fmt.Errorf("error creating overflow_filter : %s", err)
}
g.processors = append(g.processors, filovflw)
}
@ -270,14 +270,14 @@ func LoadBucket(g *BucketFactory) error {
blackhole, err := NewBlackhole(g)
if err != nil {
g.logger.Errorf("Error creating blackhole : %s", err)
return fmt.Errorf("Error creating blackhole : %s", err)
return fmt.Errorf("error creating blackhole : %s", err)
}
g.processors = append(g.processors, blackhole)
}
g.output = false
if err := ValidateFactory(g); err != nil {
return fmt.Errorf("Invalid bucket from %s : %v", g.Filename, err)
return fmt.Errorf("invalid bucket from %s : %v", g.Filename, err)
}
return nil

View file

@ -31,7 +31,7 @@ func NewOverflowFilter(g *BucketFactory) (*OverflowFilter, error) {
"queue": &Queue{}, "signal": &types.SignalOccurence{}, "leaky": &Leaky{}})))
if err != nil {
g.logger.Errorf("Unable to compile filter : %v", err)
return nil, fmt.Errorf("Unable to compile filter : %v", err)
return nil, fmt.Errorf("unable to compile filter : %v", err)
}
return &u, nil
}

View file

@ -56,7 +56,7 @@ func OvflwToOrder(sig types.SignalOccurence, prof types.Profile) (*types.BanOrde
ordr.Scope = v
asn, err := strconv.Atoi(sig.Source.AutonomousSystemNumber)
if err != nil {
warn = fmt.Errorf("Invalid as number : %s : %s", sig.Source.AutonomousSystemNumber, err)
warn = fmt.Errorf("invalid as number : %s : %s", sig.Source.AutonomousSystemNumber, err)
}
ordr.TargetAS = asn
ordr.TargetASName = sig.Source.AutonomousSystemOrganization

View file

@ -60,7 +60,7 @@ func (n *Node) validate(pctx *UnixParserCtx) error {
//stage is being set automagically
if n.Stage == "" {
return fmt.Errorf("Stage needs to be an existing stage")
return fmt.Errorf("stage needs to be an existing stage")
}
/* "" behaves like continue */

View file

@ -48,14 +48,14 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx) ([]Node, error) {
}
st, err := os.Stat(stageFile.Filename)
if err != nil {
return nil, fmt.Errorf("Failed to stat %s : %v", stageFile, err)
return nil, fmt.Errorf("failed to stat %s : %v", stageFile, err)
}
if st.IsDir() {
continue
}
yamlFile, err := os.Open(stageFile.Filename)
if err != nil {
return nil, fmt.Errorf("Can't access parsing configuration file %s : %s", stageFile.Filename, err)
return nil, fmt.Errorf("can't access parsing configuration file %s : %s", stageFile.Filename, err)
}
//process the yaml
dec := yaml.NewDecoder(yamlFile)
@ -128,7 +128,7 @@ func LoadStageDir(dir string, pctx *UnixParserCtx) ([]Node, error) {
m, err := filepath.Glob(dir + "/*/*")
if err != nil {
return nil, fmt.Errorf("Unable to find configs in '%s' : %v", dir, err)
return nil, fmt.Errorf("unable to find configs in '%s' : %v", dir, err)
}
for _, f := range m {
tmp := Stagefile{}

View file

@ -18,7 +18,7 @@ func (c *Context) DeleteBan(target string) (int, error) {
}
return int(ret.RowsAffected), nil
}
return 0, fmt.Errorf("No target provided")
return 0, fmt.Errorf("no target provided")
}
func (c *Context) DeleteAll() error {

View file

@ -89,7 +89,7 @@ func (c *Context) GetBansAt(at time.Time) ([]map[string]string, error) {
*/
ret := c.Db.Table("ban_applications").Order("updated_at desc").Where(`ip_text = ? AND strftime("%s", until) >= strftime("%s", ?) AND strftime("%s", created_at) < strftime("%s", ?) AND deleted_at is NULL`, ba.IpText, at, at).Count(&count)
if ret.Error != nil {
return nil, fmt.Errorf("Failed to fetch records count for %s : %v", ba.IpText, ret.Error)
return nil, fmt.Errorf("failed to fetch records count for %s : %v", ba.IpText, ret.Error)
}
sOs := []types.SignalOccurence{}
nbSo := 0