diff --git a/.golangci.yml b/.golangci.yml index c09c15772..3047402bf 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -71,6 +71,7 @@ linters: # - exportloopref # checks for pointers to enclosing loop variables # - funlen # Tool for detection of long functions # - gochecknoinits # Checks that no init functions are present in Go code + # - gocritic # Provides diagnostics that check for bugs, performance and style issues. # - goheader # Checks is file header matches to pattern # - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. # - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations. @@ -84,6 +85,7 @@ linters: # - logrlint # Check logr arguments. # - makezero # Finds slice declarations with non-zero initial length # - misspell # Finds commonly misspelled English words in comments + # - nilerr # Finds the code that returns nil even if it checks that the error is not nil. # - nolintlint # Reports ill-formed or insufficient nolint directives # - predeclared # find code that shadows one of Go's predeclared identifiers # - reassign # Checks that package variables are not reassigned @@ -107,14 +109,12 @@ linters: - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. - exhaustive # check exhaustiveness of enum switch statements - gci # Gci control golang package import order and make it always deterministic. - - gocritic # Provides diagnostics that check for bugs, performance and style issues. - godot # Check if comments end in a period - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification - goimports # In addition to fixing imports, goimports also formats your code in the same style as gofmt. - gosec # (gas): Inspects source code for security problems - lll # Reports long lines - nakedret # Finds naked returns in functions greater than a specified function length - - nilerr # Finds the code that returns nil even if it checks that the error is not nil. - nonamedreturns # Reports all named returns - nosprintfhostport # Checks for misuse of Sprintf to construct a host with port in a URL. - promlinter # Check Prometheus metrics naming via promlint @@ -199,3 +199,23 @@ issues: - linters: - errcheck text: "Error return value of `.*` is not checked" + + # + # gocritic + # + + - linters: + - gocritic + text: "ifElseChain: rewrite if-else to switch statement" + + - linters: + - gocritic + text: "captLocal: `.*' should not be capitalized" + + - linters: + - gocritic + text: "appendAssign: append result not assigned to the same slice" + + - linters: + - gocritic + text: "commentFormatting: put a space between `//` and comment text" diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 3437a9536..c7061570f 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -189,31 +189,27 @@ cscli alerts list --type ban`, if *alertListFilter.Until == "" { alertListFilter.Until = nil - } else { + } else if strings.HasSuffix(*alertListFilter.Until, "d") { /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - if strings.HasSuffix(*alertListFilter.Until, "d") { - realDuration := strings.TrimSuffix(*alertListFilter.Until, "d") - days, err := strconv.Atoi(realDuration) - if err != nil { - printHelp(cmd) - log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Until) - } - *alertListFilter.Until = fmt.Sprintf("%d%s", days*24, "h") + realDuration := strings.TrimSuffix(*alertListFilter.Until, "d") + days, err := strconv.Atoi(realDuration) + if err != nil { + printHelp(cmd) + log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Until) } + *alertListFilter.Until = fmt.Sprintf("%d%s", days*24, "h") } if *alertListFilter.Since == "" { alertListFilter.Since = nil - } else { + } else if strings.HasSuffix(*alertListFilter.Since, "d") { /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - if strings.HasSuffix(*alertListFilter.Since, "d") { - realDuration := strings.TrimSuffix(*alertListFilter.Since, "d") - days, err := strconv.Atoi(realDuration) - if err != nil { - printHelp(cmd) - log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Since) - } - *alertListFilter.Since = fmt.Sprintf("%d%s", days*24, "h") + realDuration := strings.TrimSuffix(*alertListFilter.Since, "d") + days, err := strconv.Atoi(realDuration) + if err != nil { + printHelp(cmd) + log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Since) } + *alertListFilter.Since = fmt.Sprintf("%d%s", days*24, "h") } if *alertListFilter.IncludeCAPI { diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index 9c8dc0ec1..22563f714 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -179,31 +179,28 @@ cscli decisions list -t ban /* nullify the empty entries to avoid bad filter */ if *filter.Until == "" { filter.Until = nil - } else { + } else if strings.HasSuffix(*filter.Until, "d") { /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - if strings.HasSuffix(*filter.Until, "d") { - realDuration := strings.TrimSuffix(*filter.Until, "d") - days, err := strconv.Atoi(realDuration) - if err != nil { - printHelp(cmd) - log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Until) - } - *filter.Until = fmt.Sprintf("%d%s", days*24, "h") + realDuration := strings.TrimSuffix(*filter.Until, "d") + days, err := strconv.Atoi(realDuration) + if err != nil { + printHelp(cmd) + log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Until) } + *filter.Until = fmt.Sprintf("%d%s", days*24, "h") } + if *filter.Since == "" { filter.Since = nil - } else { + } else if strings.HasSuffix(*filter.Since, "d") { /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - if strings.HasSuffix(*filter.Since, "d") { - realDuration := strings.TrimSuffix(*filter.Since, "d") - days, err := strconv.Atoi(realDuration) - if err != nil { - printHelp(cmd) - log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Until) - } - *filter.Since = fmt.Sprintf("%d%s", days*24, "h") + realDuration := strings.TrimSuffix(*filter.Since, "d") + days, err := strconv.Atoi(realDuration) + if err != nil { + printHelp(cmd) + log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Until) } + *filter.Since = fmt.Sprintf("%d%s", days*24, "h") } if *filter.IncludeCAPI { *filter.Limit = 0 diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 33b59d0aa..0b2b86548 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -113,7 +113,7 @@ title: %s ` name := filepath.Base(filename) base := strings.TrimSuffix(name, path.Ext(name)) - return fmt.Sprintf(header, base, strings.Replace(base, "_", " ", -1)) + return fmt.Sprintf(header, base, strings.ReplaceAll(base, "_", " ")) } func linkHandler(name string) string { diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index e7981863e..9a3a66d5e 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -292,7 +292,7 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig return false } cw.logger.Tracef("stream %s is elligible for monitoring", *event.LogStreamName) - //the stream has been update recently, check if we should monitor it + //the stream has been updated recently, check if we should monitor it var expectMode int if !cw.Config.UseTimeMachine { expectMode = leaky.LIVE @@ -329,7 +329,7 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig } } -//LogStreamManager receives the potential streams to monitor, and start a go routine when needed +//LogStreamManager receives the potential streams to monitor, and starts a go routine when needed func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outChan chan types.Event) error { cw.logger.Debugf("starting to monitor streams for %s", cw.Config.GroupName) @@ -350,11 +350,9 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha match, err := regexp.Match(*cw.Config.StreamRegexp, []byte(newStream.StreamName)) if err != nil { cw.logger.Warningf("invalid regexp : %s", err) - } else { - if !match { - cw.logger.Tracef("stream %s doesn't match %s", newStream.StreamName, *cw.Config.StreamRegexp) - continue - } + } else if !match { + cw.logger.Tracef("stream %s doesn't match %s", newStream.StreamName, *cw.Config.StreamRegexp) + continue } } @@ -414,7 +412,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan types.Event) error { var startFrom *string - var lastReadMessage time.Time = time.Now().UTC() + lastReadMessage := time.Now().UTC() ticker := time.NewTicker(cfg.PollStreamInterval) //resume at existing index if we already had streamIndexMutex.Lock() diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index 2e7bfb4da..674563753 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -54,7 +54,8 @@ func readLine(scanner *bufio.Scanner, out chan string, errChan chan error) error if errChan != nil && scanner.Err() != nil { errChan <- scanner.Err() close(errChan) - return nil //error is already consumed by runJournalCtl + // the error is already consumed by runJournalCtl + return nil //nolint:nilerr } if errChan != nil { close(errChan) diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go index 7f9a6666a..04b730735 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go @@ -38,10 +38,8 @@ func TestPri(t *testing.T) { } else { if test.expectedErr != "" { t.Errorf("expected error %s, got no error", test.expectedErr) - } else { - if r.PRI != test.expected { - t.Errorf("expected %d, got %d", test.expected, r.PRI) - } + } else if r.PRI != test.expected { + t.Errorf("expected %d, got %d", test.expected, r.PRI) } } }) @@ -85,10 +83,8 @@ func TestTimestamp(t *testing.T) { } else { if test.expectedErr != "" { t.Errorf("expected error %s, got no error", test.expectedErr) - } else { - if r.Timestamp.Format(time.RFC3339) != test.expected { - t.Errorf("expected %s, got %s", test.expected, r.Timestamp.Format(time.RFC3339)) - } + } else if r.Timestamp.Format(time.RFC3339) != test.expected { + t.Errorf("expected %s, got %s", test.expected, r.Timestamp.Format(time.RFC3339)) } } }) @@ -141,10 +137,8 @@ func TestHostname(t *testing.T) { } else { if test.expectedErr != "" { t.Errorf("expected error %s, got no error", test.expectedErr) - } else { - if r.Hostname != test.expected { - t.Errorf("expected %s, got %s", test.expected, r.Hostname) - } + } else if r.Hostname != test.expected { + t.Errorf("expected %s, got %s", test.expected, r.Hostname) } } }) @@ -228,10 +222,8 @@ func TestMessage(t *testing.T) { } else { if test.expectedErr != "" { t.Errorf("expected error %s, got no error", test.expectedErr) - } else { - if r.Message != test.expected { - t.Errorf("expected message %s, got %s", test.expected, r.Tag) - } + } else if r.Message != test.expected { + t.Errorf("expected message %s, got %s", test.expected, r.Tag) } } }) diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go index 56eb1a645..af123ad8d 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go @@ -38,10 +38,8 @@ func TestPri(t *testing.T) { } else { if test.expectedErr != "" { t.Errorf("expected error %s, got no error", test.expectedErr) - } else { - if r.PRI != test.expected { - t.Errorf("expected %d, got %d", test.expected, r.PRI) - } + } else if r.PRI != test.expected { + t.Errorf("expected %d, got %d", test.expected, r.PRI) } } }) @@ -94,10 +92,8 @@ func TestHostname(t *testing.T) { } else { if test.expectedErr != "" { t.Errorf("expected error %s, got no error", test.expectedErr) - } else { - if r.Hostname != test.expected { - t.Errorf("expected %s, got %s", test.expected, r.Hostname) - } + } else if r.Hostname != test.expected { + t.Errorf("expected %s, got %s", test.expected, r.Hostname) } } }) diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index e129f20cc..503f4d43d 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -115,17 +115,15 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() return - } else { + } else if bouncer.AuthType != types.TlsAuthType { //bouncer was found in DB - if bouncer.AuthType != types.TlsAuthType { - log.WithFields(log.Fields{ - "ip": c.ClientIP(), - "cn": extractedCN, - }).Errorf("bouncer isn't allowed to auth by TLS") - c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) - c.Abort() - return - } + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + "cn": extractedCN, + }).Errorf("bouncer isn't allowed to auth by TLS") + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return } } else { //API Key Authentication diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 59aec40fe..0f94b725d 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -77,7 +77,7 @@ func (l *LocalApiClientCfg) Load() error { if l.Credentials != nil && l.Credentials.URL != "" { if !strings.HasSuffix(l.Credentials.URL, "/") { - l.Credentials.URL = l.Credentials.URL + "/" + l.Credentials.URL += "/" } } diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index 944d84202..b06554ab8 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -329,7 +329,7 @@ func (pb *PluginBroker) pushNotificationsToPlugin(pluginName string, alerts []*m }, ) if err == nil { - return err + return nil } log.WithField("plugin", pluginName).Errorf("%s error, retry num %d", err, i) time.Sleep(backoffDuration) diff --git a/pkg/cwhub/loader.go b/pkg/cwhub/loader.go index 5ba322292..496ef42a5 100644 --- a/pkg/cwhub/loader.go +++ b/pkg/cwhub/loader.go @@ -169,12 +169,10 @@ func parser_visit(path string, f os.DirEntry, err error) error { log.Tracef("marking %s as downloaded", v.Name) v.Downloaded = true } - } else { + } else if CheckSuffix(hubpath, v.RemotePath) { //wrong file /////.yaml - if CheckSuffix(hubpath, v.RemotePath) { - continue - } + continue } sha, err := getSHA256(path) if err != nil { diff --git a/pkg/exprhelpers/jsonextract.go b/pkg/exprhelpers/jsonextract.go index 1dfbb5754..61c927524 100644 --- a/pkg/exprhelpers/jsonextract.go +++ b/pkg/exprhelpers/jsonextract.go @@ -52,7 +52,7 @@ func JsonExtractUnescape(jsblob string, target ...string) string { func JsonExtract(jsblob string, target string) string { if !strings.HasPrefix(target, "[") { - target = strings.Replace(target, "[", ".[", -1) + target = strings.ReplaceAll(target, "[", ".[") } fullpath := strings.Split(target, ".") @@ -62,7 +62,7 @@ func JsonExtract(jsblob string, target string) string { func jsonExtractType(jsblob string, target string, t jsonparser.ValueType) ([]byte, error) { if !strings.HasPrefix(target, "[") { - target = strings.Replace(target, "[", ".[", -1) + target = strings.ReplaceAll(target, "[", ".[") } fullpath := strings.Split(target, ".") diff --git a/pkg/hubtest/coverage.go b/pkg/hubtest/coverage.go index 3bfe04914..eeff24b57 100644 --- a/pkg/hubtest/coverage.go +++ b/pkg/hubtest/coverage.go @@ -152,8 +152,8 @@ func (h *HubTest) GetScenariosCoverage() ([]ScenarioCoverage, error) { coverage[idx].PresentIn[assert] = true continue } - fixedProbingWord := strings.Replace(pcover.Scenario, "probbing", "probing", -1) - fixedProbingAssert := strings.Replace(scanner_name, "probbing", "probing", -1) + fixedProbingWord := strings.ReplaceAll(pcover.Scenario, "probbing", "probing") + fixedProbingAssert := strings.ReplaceAll(scanner_name, "probbing", "probing") if fixedProbingWord == fixedProbingAssert { coverage[idx].TestsCount++ coverage[idx].PresentIn[assert] = true diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index 95271c987..585996791 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -131,7 +131,7 @@ func DumpBucketsStateAt(deadline time.Time, outputdir string, buckets *Buckets) }) bbuckets, err := json.MarshalIndent(serialized, "", " ") if err != nil { - log.Fatalf("Failed to unmarshal buckets : %s", err) + return "", fmt.Errorf("Failed to unmarshal buckets : %s", err) } size, err := tmpFd.Write(bbuckets) if err != nil { diff --git a/pkg/parser/enrich_dns.go b/pkg/parser/enrich_dns.go index d333f40ad..5bbc0e94d 100644 --- a/pkg/parser/enrich_dns.go +++ b/pkg/parser/enrich_dns.go @@ -19,7 +19,7 @@ func reverse_dns(field string, p *types.Event, ctx interface{}, plog *log.Entry) rets, err := net.LookupAddr(field) if err != nil { plog.Debugf("failed to resolve '%s'", field) - return nil, nil + return nil, nil //nolint:nilerr } //When using the host C library resolver, at most one result will be returned. To bypass the host resolver, use a custom Resolver. ret["reverse_dns"] = rets[0] diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go index 0f96d4fb9..8c25bef44 100644 --- a/pkg/parser/enrich_geoip.go +++ b/pkg/parser/enrich_geoip.go @@ -51,7 +51,7 @@ func GeoIpASN(field string, p *types.Event, ctx interface{}, plog *log.Entry) (m record, err := ctx.(*geoip2.Reader).ASN(ip) if err != nil { plog.Errorf("Unable to enrich ip '%s'", field) - return nil, nil + return nil, nil //nolint:nilerr } ret["ASNNumber"] = fmt.Sprintf("%d", record.AutonomousSystemNumber) ret["ASNumber"] = fmt.Sprintf("%d", record.AutonomousSystemNumber) @@ -75,7 +75,7 @@ func GeoIpCity(field string, p *types.Event, ctx interface{}, plog *log.Entry) ( record, err := ctx.(*geoip2.Reader).City(ip) if err != nil { plog.Debugf("Unable to enrich ip '%s'", ip) - return nil, nil + return nil, nil //nolint:nilerr } if record.Country.IsoCode != "" { ret["IsoCode"] = record.Country.IsoCode diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 96107f19f..38af6fb35 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -302,14 +302,12 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri clog.Debugf("child is success, OnSuccess=next_stage, skip") break } - } else { + } else if !NodeHasOKGrok { /* If the parent node has a successful grok pattern, it's state will stay successful even if one or more chil fails. If the parent node is a skeleton node (no grok pattern), then at least one child must be successful for it to be a success. */ - if !NodeHasOKGrok { - NodeState = false - } + NodeState = false } } } diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 7fe99b5ee..bebee3376 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -375,7 +375,7 @@ func TestGeneratePatternsDoc(t *testing.T) { i := 0 for key, val := range pctx.Grok { p[i] = Pair{key, val} - p[i].Value = strings.Replace(p[i].Value, "{%{", "\\{\\%\\{", -1) + p[i].Value = strings.ReplaceAll(p[i].Value, "{%{", "\\{\\%\\{") i++ } sort.Sort(p) diff --git a/pkg/types/utils.go b/pkg/types/utils.go index c460bc7e7..342fa6372 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -210,10 +210,9 @@ func CopyFile(sourceSymLink, destinationFile string) (err error) { return } } - if err = os.Link(sourceFile, destinationFile); err == nil { - return + if err = os.Link(sourceFile, destinationFile); err != nil { + err = copyFileContents(sourceFile, destinationFile) } - err = copyFileContents(sourceFile, destinationFile) return }