diff --git a/cmd/crowdsec-cli/explain.go b/cmd/crowdsec-cli/explain.go index 79428ad4e..900a75104 100644 --- a/cmd/crowdsec-cli/explain.go +++ b/cmd/crowdsec-cli/explain.go @@ -60,7 +60,7 @@ cscli explain --dsn "file://myfile.log" --type nginx if logFile != "" { absolutePath, err := filepath.Abs(logFile) if err != nil { - log.Fatalf("unable to get absolue path of '%s', exiting", logFile) + log.Fatalf("unable to get absolute path of '%s', exiting", logFile) } dsn = fmt.Sprintf("file://%s", absolutePath) lineCount := types.GetLineCountForFile(absolutePath) diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index d554172e4..00a544437 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -463,7 +463,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios status = emoji.GreenCircle.String() parserTested += 1 } - table.Append([]string{test.Parser, status, fmt.Sprintf("%d times (accross %d tests)", test.TestsCount, len(test.PresentIn))}) + table.Append([]string{test.Parser, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))}) } table.Render() } @@ -482,7 +482,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios if test.TestsCount > 0 { status = emoji.GreenCircle.String() } - table.Append([]string{test.Scenario, status, fmt.Sprintf("%d times (accross %d tests)", test.TestsCount, len(test.PresentIn))}) + table.Append([]string{test.Scenario, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))}) } table.Render() } diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 0f4fe0f65..2ac6278ad 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -23,7 +23,7 @@ import ( func lapiMetricsToTable(table *tablewriter.Table, stats map[string]map[string]map[string]int) error { //stats : machine -> route -> method -> count - /*we want consistant display order*/ + /*we want consistent display order*/ machineKeys := []string{} for k := range stats { machineKeys = append(machineKeys, k) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 56138c3ef..71dcfe4a1 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -417,7 +417,7 @@ func (a *apic) PullTop() error { return errors.Wrap(err, "get stream") } a.startup = false - /*to count additions/deletions accross lists*/ + /*to count additions/deletions across lists*/ add_counters, delete_counters := makeAddAndDeleteCounters() // process deleted decisions diff --git a/pkg/cstest/hubtest_item.go b/pkg/cstest/hubtest_item.go index e12576f75..daedde733 100644 --- a/pkg/cstest/hubtest_item.go +++ b/pkg/cstest/hubtest_item.go @@ -390,7 +390,7 @@ func (t *HubTestItem) InstallHub() error { if err := cwhub.DownloadDataIfNeeded(t.RuntimeHubConfig, item, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", parserName, err) } - log.Debugf("parser '%s' installed succesfully in runtime environment", parserName) + log.Debugf("parser '%s' installed successfully in runtime environment", parserName) } } @@ -401,7 +401,7 @@ func (t *HubTestItem) InstallHub() error { if err := cwhub.DownloadDataIfNeeded(t.RuntimeHubConfig, item, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", scenarioName, err) } - log.Debugf("scenario '%s' installed succesfully in runtime environment", scenarioName) + log.Debugf("scenario '%s' installed successfully in runtime environment", scenarioName) } } @@ -412,7 +412,7 @@ func (t *HubTestItem) InstallHub() error { if err := cwhub.DownloadDataIfNeeded(t.RuntimeHubConfig, item, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", postoverflowName, err) } - log.Debugf("postoverflow '%s' installed succesfully in runtime environment", postoverflowName) + log.Debugf("postoverflow '%s' installed successfully in runtime environment", postoverflowName) } } diff --git a/pkg/cstest/parser_assert.go b/pkg/cstest/parser_assert.go index e6638079d..ba057d16e 100644 --- a/pkg/cstest/parser_assert.go +++ b/pkg/cstest/parser_assert.go @@ -309,7 +309,7 @@ func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo, opts Dum state[evt.Line.Time] = make(map[string]map[string]ParserResult) assoc[evt.Line.Time] = evt.Line.Raw } - //there is a trick : to know if an event succesfully exit the parsers, we check if it reached the pour() phase + //there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase //we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered if _, ok := state[evt.Line.Time]["buckets"]; !ok { state[evt.Line.Time]["buckets"] = make(map[string]ParserResult) @@ -330,7 +330,7 @@ func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo, opts Dum fmt.Printf("line: %s\n", rawstr) skeys := make([]string, 0, len(state[tstamp])) for k := range state[tstamp] { - //there is a trick : to know if an event succesfully exit the parsers, we check if it reached the pour() phase + //there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase //we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered if k == "buckets" { continue @@ -444,7 +444,7 @@ func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo, opts Dum } bnames := make([]string, 0, len(state[tstamp]["buckets"])) for k := range state[tstamp]["buckets"] { - //there is a trick : to know if an event succesfully exit the parsers, we check if it reached the pour() phase + //there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase //we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered if k == "OK" { continue diff --git a/pkg/cwhub/download.go b/pkg/cwhub/download.go index ba3324bb1..6d498ea5c 100644 --- a/pkg/cwhub/download.go +++ b/pkg/cwhub/download.go @@ -69,7 +69,7 @@ func DownloadHubIdx(hub *csconfig.Hub) ([]byte, error) { wsize, err := file.WriteString(string(body)) if err != nil { - return nil, errors.Wrap(err, "while writting hub index file") + return nil, errors.Wrap(err, "while writing hub index file") } log.Infof("Wrote new %d bytes index to %s", wsize, hub.HubIndexFile) return body, nil @@ -208,7 +208,7 @@ func DownloadItem(hub *csconfig.Hub, target Item, overwrite bool) (Item, error) defer f.Close() _, err = f.WriteString(string(body)) if err != nil { - return target, errors.Wrap(err, "while writting file") + return target, errors.Wrap(err, "while writing file") } target.Downloaded = true target.Tainted = false diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 5304ab7e9..e1373712a 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -299,7 +299,7 @@ func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([ ret := []string{} bulkSize := 20 - c.Log.Debugf("writting %d items", len(alertList)) + c.Log.Debugf("writing %d items", len(alertList)) bulk := make([]*ent.AlertCreate, 0, bulkSize) alertDecisions := make([][]*ent.Decision, 0, bulkSize) for i, alertItem := range alertList { @@ -922,7 +922,7 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { } if MaxItems > 0 { //We get the highest id for the alerts - //We substract MaxItems to avoid deleting alerts that are not old enough + //We subtract MaxItems to avoid deleting alerts that are not old enough //This gives us the oldest alert that we want to keep //We then delete all the alerts with an id lower than this one //We can do this because the id is auto-increment, and the database won't reuse the same id twice diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index b8c565c85..5875a7fc1 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -223,7 +223,7 @@ POLL_AGAIN: for { if len(tf.Results) == 0 && len(results) == 0 { - log.Warningf("Test is successfull") + log.Warningf("Test is successful") if dump { if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil { t.Fatalf("Failed dumping bucket state : %s", err) diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 6790f092e..69afdc4a3 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -32,9 +32,9 @@ type Node struct { Rerferences []string `yaml:"references,omitempty"` //if debug is present in the node, keep its specific Logger in runtime structure Logger *log.Entry `yaml:"-"` - //This is mostly a hack to make writting less repetive. + //This is mostly a hack to make writing less repetitive. //relying on stage, we know which field to parse, and we - //can as well promote log to next stage on success + //can also promote log to next stage on success Stage string `yaml:"stage,omitempty"` //OnSuccess allows to tag a node to be able to move log to next stage on success OnSuccess string `yaml:"onsuccess,omitempty"` @@ -259,7 +259,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) { } grok := n.Grok.RunTimeRegexp.Parse(gstr) if len(grok) > 0 { - /*tag explicitely that the *current* node had a successful grok pattern. it's important to know success state*/ + /*tag explicitly that the *current* node had a successful grok pattern. it's important to know success state*/ NodeHasOKGrok = true clog.Debugf("+ Grok '%s' returned %d entries to merge in Parsed", groklabel, len(grok)) //We managed to grok stuff, merged into parse @@ -301,7 +301,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) { } } else { /* - If the parent node has a successful grok pattern, it's state will stay successfull even if one or more chil fails. + If the parent node has a successful grok pattern, it's state will stay successful even if one or more chil fails. If the parent node is a skeleton node (no grok pattern), then at least one child must be successful for it to be a success. */ if !NodeHasOKGrok { diff --git a/pkg/types/grok_pattern.go b/pkg/types/grok_pattern.go index 69a803224..53e2765a4 100644 --- a/pkg/types/grok_pattern.go +++ b/pkg/types/grok_pattern.go @@ -36,6 +36,6 @@ type GrokPattern struct { //the output of the expression is going to be the source for regexp ExpValue string `yaml:"expression,omitempty"` RunTimeValue *vm.Program `json:"-"` //the actual compiled filter - //a grok can contain statics that apply if pattern is successfull + //a grok can contain statics that apply if pattern is successful Statics []ExtraField `yaml:"statics,omitempty"` } diff --git a/tests/bats.mk b/tests/bats.mk index 3f57d9ec9..d648e680b 100644 --- a/tests/bats.mk +++ b/tests/bats.mk @@ -63,7 +63,7 @@ endef bats-all: bats-clean bats-build bats-fixture bats-test bats-test-hub # Source this to run the scripts outside of the Makefile -# Old version of make don't have $(file) directive +# Old versions of make don't have $(file) directive bats-environment: export ENV:=$(ENV) bats-environment: @echo "$${ENV}" > $(TEST_DIR)/.environment.sh diff --git a/tests/lib/db/instance-sqlite b/tests/lib/db/instance-sqlite index 5603e0e01..7f32b80c0 100755 --- a/tests/lib/db/instance-sqlite +++ b/tests/lib/db/instance-sqlite @@ -30,7 +30,7 @@ exec_sql() { [ -z "${CONFIG_YAML-}" ] && die "\$CONFIG_YAML must be defined." # --------------------------- -# In most cases this called with setup argument, and it shouldn't fail for missinf config file. +# In most cases this is called with setup argument, and it shouldn't fail for missing config file. if [ -f "${CONFIG_YAML}" ] ; then DATA_DIR=$(yq '.config_paths.data_dir' <"${CONFIG_YAML}") DB_FILE="${DATA_DIR}/crowdsec.db"