enable staticcheck linter; fixes (#1806)

- explicitly ignore returned parameters
 - replace Walk with faster WalkDir
 - log path error during hub dir sync
 - colorize static unit tests
 - removed duplicate import in crowdsec/main.go
 - typos
 - func tests: default datasource in tests/var/log instead of /tmp
 - action setup-go v3
This commit is contained in:
mmetc 2022-10-14 16:12:21 +02:00 committed by GitHub
parent 708fa8280a
commit 8fecc2c00b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
16 changed files with 54 additions and 60 deletions

View file

@ -15,7 +15,7 @@ jobs:
runs-on: windows-2019 runs-on: windows-2019
steps: steps:
- name: Set up Go 1.19 - name: Set up Go 1.19
uses: actions/setup-go@v1 uses: actions/setup-go@v3
with: with:
go-version: 1.19 go-version: 1.19
id: go id: go

View file

@ -24,7 +24,7 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- name: Set up Go 1.19 - name: Set up Go 1.19
uses: actions/setup-go@v1 uses: actions/setup-go@v3
with: with:
go-version: 1.19 go-version: 1.19
id: go id: go

View file

@ -132,7 +132,9 @@ jobs:
- name: Build and run tests (static) - name: Build and run tests (static)
run: | run: |
make clean build test BUILD_STATIC=yes make clean build BUILD_STATIC=yes
make test \
| richgo testfilter
- name: Upload unit coverage to Codecov - name: Upload unit coverage to Codecov
uses: codecov/codecov-action@v2 uses: codecov/codecov-action@v2

View file

@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.19 - name: Set up Go 1.19
uses: actions/setup-go@v1 uses: actions/setup-go@v3
with: with:
go-version: 1.19 go-version: 1.19
id: go id: go

View file

@ -5,6 +5,7 @@ run:
- pkg/time/rate - pkg/time/rate
skip-files: skip-files:
- pkg/database/ent/generate.go - pkg/database/ent/generate.go
- pkg/yamlpatch/merge_test.go
linters-settings: linters-settings:
gocyclo: gocyclo:
@ -97,7 +98,6 @@ linters:
- errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occations, where the check for the returned error can be omitted. - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occations, where the check for the returned error can be omitted.
- errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13.
- exhaustive # check exhaustiveness of enum switch statements - exhaustive # check exhaustiveness of enum switch statements
- forcetypeassert # finds forced type assertions
- gci # Gci control golang package import order and make it always deterministic. - gci # Gci control golang package import order and make it always deterministic.
- gocritic # Provides diagnostics that check for bugs, performance and style issues. - gocritic # Provides diagnostics that check for bugs, performance and style issues.
- godot # Check if comments end in a period - godot # Check if comments end in a period
@ -143,6 +143,7 @@ linters:
# #
- cyclop # checks function and package cyclomatic complexity - cyclop # checks function and package cyclomatic complexity
- dupl # Tool for code clone detection - dupl # Tool for code clone detection
- forcetypeassert # finds forced type assertions
- gocognit # Computes and checks the cognitive complexity of functions - gocognit # Computes and checks the cognitive complexity of functions
- gocyclo # Computes and checks the cyclomatic complexity of functions - gocyclo # Computes and checks the cyclomatic complexity of functions
- godox # Tool for detection of FIXME, TODO and other comment keywords - godox # Tool for detection of FIXME, TODO and other comment keywords
@ -189,20 +190,3 @@ issues:
- linters: - linters:
- errcheck - errcheck
text: "Error return value of `.*` is not checked" text: "Error return value of `.*` is not checked"
#
# staticcheck
#
- linters:
- staticcheck
text: "SA4009: argument .* is overwritten before first use"
- linters:
- staticcheck
text: "SA4009.related information.: assignment to .*"
- linters:
- staticcheck
text: "SA4006: this value of .* is never used"
- linters:
- staticcheck
text: "SA1006: printf-style function with dynamic format string and no further arguments should use print-style function instead"

View file

@ -3,29 +3,26 @@ package main
import ( import (
"flag" "flag"
"fmt" "fmt"
_ "net/http/pprof"
"os" "os"
"runtime" "runtime"
"sort" "sort"
"strings" "strings"
_ "net/http/pprof"
"time" "time"
"github.com/confluentinc/bincover" "github.com/confluentinc/bincover"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"gopkg.in/tomb.v2"
"github.com/crowdsecurity/crowdsec/pkg/acquisition" "github.com/crowdsecurity/crowdsec/pkg/acquisition"
"github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csconfig"
"github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/csplugin"
"github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/cwhub"
"github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/cwversion"
"github.com/crowdsecurity/crowdsec/pkg/leakybucket" "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket"
"github.com/crowdsecurity/crowdsec/pkg/parser" "github.com/crowdsecurity/crowdsec/pkg/parser"
"github.com/crowdsecurity/crowdsec/pkg/types" "github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"gopkg.in/tomb.v2"
) )
var ( var (
@ -43,8 +40,8 @@ var (
/*the state of acquisition*/ /*the state of acquisition*/
dataSources []acquisition.DataSource dataSources []acquisition.DataSource
/*the state of the buckets*/ /*the state of the buckets*/
holders []leaky.BucketFactory holders []leakybucket.BucketFactory
buckets *leaky.Buckets buckets *leakybucket.Buckets
outputEventChan chan types.Event //the buckets init returns its own chan that is used for multiplexing outputEventChan chan types.Event //the buckets init returns its own chan that is used for multiplexing
/*settings*/ /*settings*/
lastProcessedItem time.Time /*keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them.*/ lastProcessedItem time.Time /*keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them.*/
@ -122,13 +119,13 @@ func LoadBuckets(cConfig *csconfig.Config) error {
files = append(files, hubScenarioItem.LocalPath) files = append(files, hubScenarioItem.LocalPath)
} }
} }
buckets = leaky.NewBuckets() buckets = leakybucket.NewBuckets()
log.Infof("Loading %d scenario files", len(files)) log.Infof("Loading %d scenario files", len(files))
holders, outputEventChan, err = leaky.LoadBuckets(cConfig.Crowdsec, files, &bucketsTomb, buckets) holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, files, &bucketsTomb, buckets)
if err != nil { if err != nil {
return fmt.Errorf("Scenario loading failed : %v", err) return fmt.Errorf("scenario loading failed: %v", err)
} }
if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled {

View file

@ -152,8 +152,8 @@ func registerPrometheus(config *csconfig.PrometheusCfg) {
config.ListenPort = 6060 config.ListenPort = 6060
} }
/*Registering prometheus*/ // Registering prometheus
/*If in aggregated mode, do not register events associated to a source, keeps cardinality low*/ // If in aggregated mode, do not register events associated with a source, to keep the cardinality low
if config.Level == "aggregated" { if config.Level == "aggregated" {
log.Infof("Loading aggregated prometheus collectors") log.Infof("Loading aggregated prometheus collectors")
prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo, prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo,

View file

@ -32,14 +32,13 @@ func debugHandler(sig os.Signal, cConfig *csconfig.Config) error {
// todo: properly stop acquis with the tail readers // todo: properly stop acquis with the tail readers
if tmpFile, err = leaky.DumpBucketsStateAt(time.Now().UTC(), cConfig.Crowdsec.BucketStateDumpDir, buckets); err != nil { if tmpFile, err = leaky.DumpBucketsStateAt(time.Now().UTC(), cConfig.Crowdsec.BucketStateDumpDir, buckets); err != nil {
log.Warningf("Failed dumping bucket state : %s", err) log.Warningf("Failed to dump bucket state : %s", err)
} }
if err := leaky.ShutdownAllBuckets(buckets); err != nil { if err := leaky.ShutdownAllBuckets(buckets); err != nil {
log.Warningf("while shutting down routines : %s", err) log.Warningf("Failed to shut down routines : %s", err)
} }
log.Printf("Shutdown is finished, buckets are in %s", tmpFile)
log.Printf("shutdown is finished buckets are in %s", tmpFile)
return nil return nil
} }

View file

@ -399,7 +399,7 @@ func TestAlertsGetAsMachine(t *testing.T) {
} }
//fail //fail
_, resp, err = client.Alerts.GetByID(context.Background(), 2) _, _, err = client.Alerts.GetByID(context.Background(), 2)
assert.Contains(t, fmt.Sprintf("%s", err), "API error: object not found") assert.Contains(t, fmt.Sprintf("%s", err), "API error: object not found")
} }

View file

@ -77,7 +77,7 @@ func TestApiAuth(t *testing.T) {
t.Fatalf("new api client: %s", err) t.Fatalf("new api client: %s", err)
} }
_, resp, err = newcli.Decisions.List(context.Background(), alert) _, _, err = newcli.Decisions.List(context.Background(), alert)
require.Error(t, err) require.Error(t, err)
log.Infof("--> %s", err) log.Infof("--> %s", err)

View file

@ -112,13 +112,13 @@ func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uin
} }
} }
// CreateAlert : write received alerts in body to the database // CreateAlert writes the alerts received in the body to the database
func (c *Controller) CreateAlert(gctx *gin.Context) { func (c *Controller) CreateAlert(gctx *gin.Context) {
var input models.AddAlertsRequest var input models.AddAlertsRequest
claims := jwt.ExtractClaims(gctx) claims := jwt.ExtractClaims(gctx)
/*TBD : use defines rather than hardcoded key to find back owner*/ // TBD: use defined rather than hardcoded key to find back owner
machineID := claims["id"].(string) machineID := claims["id"].(string)
if err := gctx.ShouldBindJSON(&input); err != nil { if err := gctx.ShouldBindJSON(&input); err != nil {
@ -200,7 +200,7 @@ func (c *Controller) CreateAlert(gctx *gin.Context) {
gctx.JSON(http.StatusCreated, alerts) gctx.JSON(http.StatusCreated, alerts)
} }
// FindAlerts : return alerts from database based on the specified filter // FindAlerts: returns alerts from the database based on the specified filter
func (c *Controller) FindAlerts(gctx *gin.Context) { func (c *Controller) FindAlerts(gctx *gin.Context) {
result, err := c.DBClient.QueryAlertWithFilter(gctx.Request.URL.Query()) result, err := c.DBClient.QueryAlertWithFilter(gctx.Request.URL.Query())
if err != nil { if err != nil {
@ -217,7 +217,7 @@ func (c *Controller) FindAlerts(gctx *gin.Context) {
gctx.JSON(http.StatusOK, data) gctx.JSON(http.StatusOK, data)
} }
// FindAlertByID return the alert associated to the ID // FindAlertByID returns the alert associated with the ID
func (c *Controller) FindAlertByID(gctx *gin.Context) { func (c *Controller) FindAlertByID(gctx *gin.Context) {
alertIDStr := gctx.Param("alert_id") alertIDStr := gctx.Param("alert_id")
alertID, err := strconv.Atoi(alertIDStr) alertID, err := strconv.Atoi(alertIDStr)
@ -239,7 +239,7 @@ func (c *Controller) FindAlertByID(gctx *gin.Context) {
gctx.JSON(http.StatusOK, data) gctx.JSON(http.StatusOK, data)
} }
// DeleteAlerts : delete alerts from database based on the specified filter // DeleteAlerts deletes alerts from the database based on the specified filter
func (c *Controller) DeleteAlerts(gctx *gin.Context) { func (c *Controller) DeleteAlerts(gctx *gin.Context) {
incomingIP := gctx.ClientIP() incomingIP := gctx.ClientIP()
if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) { if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) {

View file

@ -10,7 +10,7 @@ import (
func (c *Controller) HeartBeat(gctx *gin.Context) { func (c *Controller) HeartBeat(gctx *gin.Context) {
claims := jwt.ExtractClaims(gctx) claims := jwt.ExtractClaims(gctx)
/*TBD : use defines rather than hardcoded key to find back owner*/ // TBD: use defined rather than hardcoded key to find back owner
machineID := claims["id"].(string) machineID := claims["id"].(string)
if err := c.DBClient.UpdateMachineLastHeartBeat(machineID); err != nil { if err := c.DBClient.UpdateMachineLastHeartBeat(machineID); err != nil {

View file

@ -552,7 +552,7 @@ func (t *HubTestItem) Run() error {
// assert parsers // assert parsers
if !t.Config.IgnoreParsers { if !t.Config.IgnoreParsers {
assertFileStat, err := os.Stat(t.ParserAssert.File) _, err := os.Stat(t.ParserAssert.File)
if os.IsNotExist(err) { if os.IsNotExist(err) {
parserAssertFile, err := os.Create(t.ParserAssert.File) parserAssertFile, err := os.Create(t.ParserAssert.File)
if err != nil { if err != nil {
@ -560,7 +560,7 @@ func (t *HubTestItem) Run() error {
} }
parserAssertFile.Close() parserAssertFile.Close()
} }
assertFileStat, err = os.Stat(t.ParserAssert.File) assertFileStat, err := os.Stat(t.ParserAssert.File)
if err != nil { if err != nil {
return fmt.Errorf("error while stats '%s': %s", t.ParserAssert.File, err) return fmt.Errorf("error while stats '%s': %s", t.ParserAssert.File, err)
} }
@ -588,7 +588,7 @@ func (t *HubTestItem) Run() error {
nbScenario += 1 nbScenario += 1
} }
if nbScenario > 0 { if nbScenario > 0 {
assertFileStat, err := os.Stat(t.ScenarioAssert.File) _, err := os.Stat(t.ScenarioAssert.File)
if os.IsNotExist(err) { if os.IsNotExist(err) {
scenarioAssertFile, err := os.Create(t.ScenarioAssert.File) scenarioAssertFile, err := os.Create(t.ScenarioAssert.File)
if err != nil { if err != nil {
@ -596,7 +596,7 @@ func (t *HubTestItem) Run() error {
} }
scenarioAssertFile.Close() scenarioAssertFile.Close()
} }
assertFileStat, err = os.Stat(t.ScenarioAssert.File) assertFileStat, err := os.Stat(t.ScenarioAssert.File)
if err != nil { if err != nil {
return fmt.Errorf("error while stats '%s': %s", t.ScenarioAssert.File, err) return fmt.Errorf("error while stats '%s': %s", t.ScenarioAssert.File, err)
} }

View file

@ -17,7 +17,7 @@ import (
/*the walk/parser_visit function can't receive extra args*/ /*the walk/parser_visit function can't receive extra args*/
var hubdir, installdir string var hubdir, installdir string
func parser_visit(path string, f os.FileInfo, err error) error { func parser_visit(path string, f os.DirEntry, err error) error {
var target Item var target Item
var local bool var local bool
@ -28,6 +28,12 @@ func parser_visit(path string, f os.FileInfo, err error) error {
var fauthor string var fauthor string
var stage string var stage string
if err != nil {
log.Warningf("error while syncing hub dir: %v", err)
// there is a path error, we ignore the file
return nil
}
path, err = filepath.Abs(path) path, err = filepath.Abs(path)
if err != nil { if err != nil {
return err return err
@ -96,7 +102,7 @@ func parser_visit(path string, f os.FileInfo, err error) error {
when the collection is installed, both files are created when the collection is installed, both files are created
*/ */
//non symlinks are local user files or hub files //non symlinks are local user files or hub files
if f.Mode()&os.ModeSymlink == 0 { if f.Type() & os.ModeSymlink == 0 {
local = true local = true
log.Tracef("%s isn't a symlink", path) log.Tracef("%s isn't a symlink", path)
} else { } else {
@ -309,7 +315,7 @@ func SyncDir(hub *csconfig.Hub, dir string) (error, []string) {
if err != nil { if err != nil {
log.Errorf("failed %s : %s", cpath, err) log.Errorf("failed %s : %s", cpath, err)
} }
err = filepath.Walk(cpath, parser_visit) err = filepath.WalkDir(cpath, parser_visit)
if err != nil { if err != nil {
return err, warnings return err, warnings
} }

View file

@ -225,7 +225,7 @@ POLL_AGAIN:
log.Warning("Test is successful") log.Warning("Test is successful")
if dump { if dump {
if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil { if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil {
t.Fatalf("Failed dumping bucket state : %s", err) t.Fatalf("Failed to dump bucket state: %s", err)
} }
log.Infof("dumped bucket to %s", tmpFile) log.Infof("dumped bucket to %s", tmpFile)
} }
@ -235,7 +235,7 @@ POLL_AGAIN:
if len(tf.Results) != len(results) { if len(tf.Results) != len(results) {
if dump { if dump {
if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil { if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil {
t.Fatalf("Failed dumping bucket state : %s", err) t.Fatalf("Failed to dump bucket state: %s", err)
} }
log.Infof("dumped bucket to %s", tmpFile) log.Infof("dumped bucket to %s", tmpFile)
} }

View file

@ -60,8 +60,14 @@ config_generate() {
"${CONFIG_DIR}/" "${CONFIG_DIR}/"
# the default acquis file contains files that are not readable by everyone # the default acquis file contains files that are not readable by everyone
# We use a noop configuration that forces nevertheless crowdsec to keep watching touch "$LOG_DIR/empty.log"
echo '{"filenames":["/tmp/should-not-exist.log"],"labels":{"type":"syslog"},"force_inotify":true}' > "${CONFIG_DIR}/acquis.yaml" cat <<-EOT >"$CONFIG_DIR/acquis.yaml"
source: file
filenames:
- $LOG_DIR/empty.log
labels:
type: syslog
EOT
cp ../plugins/notifications/*/{http,email,slack,splunk,dummy}.yaml \ cp ../plugins/notifications/*/{http,email,slack,splunk,dummy}.yaml \
"${CONFIG_DIR}/notifications/" "${CONFIG_DIR}/notifications/"