diff --git a/.github/workflows/basic_functionnals_tests.yml b/.github/workflows/basic_functionnals_tests.yml deleted file mode 100644 index bb4e629d3..000000000 --- a/.github/workflows/basic_functionnals_tests.yml +++ /dev/null @@ -1,86 +0,0 @@ -name: Basic functionals tests - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -jobs: - build: - name: Install generated release and perform basic tests - runs-on: ubuntu-latest - steps: - - name: Set up Go 1.13 - uses: actions/setup-go@v1 - with: - go-version: 1.13 - id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - name: Build release - run: BUILD_VERSION=xxx make release - # - name: Cache release directory - # uses: actions/cache@v2 - # with: - # path: ./crowdsec-xxx - # key: ${{ runner.os }}-crowdsec-xxx - - name: Install release - run: | - cd crowdsec-xxx - sudo ./wizard.sh --bininstall - sudo cscli update - sudo sed -i 's/api: true/api: false/g' /etc/crowdsec/config/default.yaml - - name: Install collection - run: | - sudo cscli list -a - sudo cscli install parser crowdsecurity/syslog-logs crowdsecurity/sshd-logs crowdsecurity/dateparse-enrich - sudo cscli install scenario crowdsecurity/ssh-bf - - name: Crowdsec Startup check - run: | - sudo crowdsec -c /etc/crowdsec/config/user.yaml -t - - name: Generate fake ssh bf logs - run: | - for i in `seq 1 10` ; do - echo `date '+%b %d %H:%M:%S '`'sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424' >> ssh-bf.log - done; - - name: Process ssh-bf logs in time-machine - run: | - sudo crowdsec -c /etc/crowdsec/config/user.yaml -file ./ssh-bf.log -type syslog - - name: Cscli ban list check - run: | - sudo cscli ban list - sudo cscli ban list -o json | jq -e '.[].iptext == "1.1.1.172"' - sudo cscli ban list --range 1.1.1.0/24 -o json | jq -e '.[].iptext == "1.1.1.172"' - - name: Cscli ban del check - run: | - sudo cscli ban del ip 1.1.1.172 - sudo cscli -c /etc/crowdsec/config/user.yaml ban list -o json | jq -e '. == null' - - name: Service start - run: | - sudo rm -f /etc/crowdsec/config/acquis.yaml - touch /tmp/test.log - echo "filename: /tmp/test.log" | sudo tee -a /etc/crowdsec/config/acquis.yaml > /dev/null - echo "labels:" | sudo tee -a /etc/crowdsec/config/acquis.yaml > /dev/null - echo " type: syslog" | sudo tee -a /etc/crowdsec/config/acquis.yaml > /dev/null - sudo systemctl restart crowdsec - - name: Service status check - run: | - sleep 3 - sudo cat /var/log/crowdsec.log - sudo systemctl status crowdsec - sudo cscli metrics - - name: Inject logs - run: | - cat ssh-bf.log >> /tmp/test.log - sleep 1 - - name: Check results - run: | - sudo cscli ban list - sudo cscli ban list -o json | jq -e '.[].iptext == "1.1.1.172"' - sudo cat /var/log/crowdsec.log - - name: Check metrics - run: | - sudo cscli metrics - - diff --git a/.github/workflows/ci_functests-install.yml b/.github/workflows/ci_functests-install.yml new file mode 100644 index 000000000..375a2d611 --- /dev/null +++ b/.github/workflows/ci_functests-install.yml @@ -0,0 +1,90 @@ +name: Hub-CI + +on: + push: + branches: + - wip_lapi + - master + pull_request: + branches: + - wip_lapi + - master + +jobs: + build: + name: Install generated release and perform basic tests + runs-on: ubuntu-latest + steps: + - name: Set up Go 1.13 + uses: actions/setup-go@v1 + with: + go-version: 1.13 + id: go + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + - name: Build release + run: BUILD_VERSION=xxx make release + - name: Install release + run: | + cd crowdsec-xxx + sudo bash -x ./wizard.sh --bininstall + sudo cscli machines add -a + - name: Post-installation check + run: | + sudo cscli hub update + - name: Install collection + run: | + sudo cscli hub list -a + sudo cscli parsers install crowdsecurity/syslog-logs crowdsecurity/sshd-logs crowdsecurity/dateparse-enrich + sudo cscli scenarios install crowdsecurity/ssh-bf + - name: Crowdsec start service + run: | + sudo systemctl start crowdsec + - name: Generate fake ssh bf logs + run: | + for i in `seq 1 10` ; do + echo `date '+%b %d %H:%M:%S '`'sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424' >> ssh-bf.log + done; + - name: Process ssh-bf logs in time-machine + run: | + sudo crowdsec -file ./ssh-bf.log -type syslog -no-api + - name: Cscli ban list check + #check that we got the expected ban and that the filters are working properly + run: | + sudo cscli decisions list + sudo cscli decisions list -o=json | jq -e '.[].decisions[0].value == "1.1.1.172"' + sudo cscli decisions list -r 1.1.1.0/24 -o=json | jq -e '.[].decisions[0].value == "1.1.1.172"' + sudo cscli decisions list -r 1.1.2.0/24 -o=json | jq -e '. == null' + sudo cscli decisions list -i 1.1.1.172 -o=json | jq -e '.[].decisions[0].value == "1.1.1.172"' + sudo cscli decisions list -i 1.1.1.173 -o=json | jq -e '. == null' + - name: Cscli ban del check + #check that the delete is working and that filters are working properly + run: | + sudo cscli decisions delete -i 1.1.1.173 + sudo cscli decisions list -o=json | jq -e '.[].decisions[0].value == "1.1.1.172"' + sudo cscli decisions delete -i 1.1.1.172 + sudo cscli decisions list -o=json | jq -e '. == null' + - name: Metrics check + run: | + sudo cscli metrics + - name: Service stop & config change + #shutdown the service, edit that acquisition.yaml + run: | + sudo systemctl stop crowdsec + echo "" | sudo tee -a /etc/crowdsec/acquis.yaml > /dev/null + echo "filename: /tmp/test.log" | sudo tee -a /etc/crowdsec/acquis.yaml > /dev/null + echo "labels:" | sudo tee -a /etc/crowdsec/acquis.yaml > /dev/null + echo " type: syslog" | sudo tee -a /etc/crowdsec/acquis.yaml > /dev/null + touch /tmp/test.log + - name: Service start & check + run: | + sudo systemctl start crowdsec || sudo journalctl -xe + - name: Trigger events via normal acquisition + run: | + cat ssh-bf.log >> /tmp/test.log + sleep 1 + - name: Check results + run: | + sudo cscli decisions list -o=json | jq -e '.[].decisions[0].value == "1.1.1.172"' + + diff --git a/.github/workflows/ci_go-test.yml b/.github/workflows/ci_go-test.yml index 10bdfe4cc..28c5c13f0 100644 --- a/.github/workflows/ci_go-test.yml +++ b/.github/workflows/ci_go-test.yml @@ -27,9 +27,9 @@ jobs: uses: jandelgado/gcov2lcov-action@v1.0.2 with: infile: coverage.out - outfile: coverage.lcov + outfile: coverage.txt - name: Coveralls uses: coverallsapp/github-action@master with: github-token: ${{ secrets.GITHUB_TOKEN }} - path-to-lcov: coverage.lcov + path-to-lcov: coverage.txt diff --git a/.github/workflows/ci_golangci-lint.yml b/.github/workflows/ci_golangci-lint.yml index 8052f9797..0f45afaf7 100644 --- a/.github/workflows/ci_golangci-lint.yml +++ b/.github/workflows/ci_golangci-lint.yml @@ -20,3 +20,5 @@ jobs: # Optional: golangci-lint command line arguments. args: --issues-exit-code=0 --timeout 5m only-new-issues: true + + diff --git a/.github/workflows/hub_tests.yml b/.github/workflows/ci_hub-tests.yml similarity index 76% rename from .github/workflows/hub_tests.yml rename to .github/workflows/ci_hub-tests.yml index aa80ae2d7..4b59c77a4 100644 --- a/.github/workflows/hub_tests.yml +++ b/.github/workflows/ci_hub-tests.yml @@ -1,4 +1,4 @@ -name: Hub tests +name: Hub-CI on: push: @@ -33,27 +33,26 @@ jobs: git clone https://github.com/crowdsecurity/hub-tests.git cd hub-tests make - - id: keydb - uses: pozetroninc/github-action-get-latest-release@master + - uses: oprypin/find-latest-tag@v1 with: - owner: crowdsecurity - repo: crowdsec - excludes: prerelease, draft + repository: crowdsecurity/crowdsec # The repository to scan. + releases-only: false # We know that all relevant tags have a GitHub release for them. + id: crowdsec # The step ID to refer to later. - name: Create crowdsec test env with all parsers from the release run: | - cd crowdsec-${{ steps.keydb.outputs.release }} + cd crowdsec-${{ steps.crowdsec.outputs.tag }} ./test_env.sh cd tests for i in `./cscli -c dev.yaml list parsers -a -o json | jq -r ".[].name" ` ; do ./cscli -c dev.yaml install parser $i ; done - name: Setup hub ci in crowdsec - working-directory: ./crowdsec-${{ steps.keydb.outputs.release }}/tests/ + working-directory: ./crowdsec-${{ steps.crowdsec.outputs.tag }}/tests/ run: | cp -R ../../hub-tests/tests . cp ../../hub-tests/main . - name: Run the HUB CI - working-directory: ./crowdsec-${{ steps.keydb.outputs.release }}/tests/ + working-directory: ./crowdsec-${{ steps.crowdsec.outputs.tag }}/tests/ run: | for i in `find ./tests -mindepth 1 -maxdepth 1 -type d` ; do echo "::group::Test-${i}" ; diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..b18642f12 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,22 @@ +ARG GOVERSION=1.14 + +FROM golang:${GOVERSION}-alpine AS build + +WORKDIR /go/src/crowdsec +COPY . . + +RUN apk update && apk add git jq gcc libc-dev make bash gettext +RUN BUILD_VERSION="$(git describe --tags `git rev-list --tags --max-count=1`)" make release +RUN /bin/bash wizard.sh --docker-mode +RUN cscli hub update && cscli collections install crowdsecurity/linux + +FROM alpine:latest +COPY --from=build /etc/crowdsec /etc/crowdsec +COPY --from=build /var/lib/crowdsec /var/lib/crowdsec +COPY --from=build /usr/local/bin/crowdsec /usr/local/bin/crowdsec +COPY --from=build /usr/local/bin/cscli /usr/local/bin/cscli +COPY --from=build /go/src/crowdsec/docker/docker_start.sh / +COPY --from=build /go/src/crowdsec/docker/config.yaml /etc/crowdsec/config.yaml + + +ENTRYPOINT /bin/sh docker_start.sh \ No newline at end of file diff --git a/Makefile b/Makefile index 217c571ba..a7e68b2e8 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,6 @@ CFG_PREFIX = $(PREFIX)"/etc/crowdsec/" BIN_PREFIX = $(PREFIX)"/usr/local/bin/" DATA_PREFIX = $(PREFIX)"/var/run/crowdsec/" -PLUGIN_FOLDER="./plugins" PID_DIR = $(PREFIX)"/var/run/" CROWDSEC_FOLDER = "./cmd/crowdsec" CSCLI_FOLDER = "./cmd/crowdsec-cli/" @@ -14,14 +13,13 @@ BUILD_CMD="build" GOARCH=amd64 GOOS=linux - -#Current versioning information from env +#Golang version info GO_MAJOR_VERSION = $(shell go version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f1) GO_MINOR_VERSION = $(shell go version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f2) MINIMUM_SUPPORTED_GO_MAJOR_VERSION = 1 MINIMUM_SUPPORTED_GO_MINOR_VERSION = 13 GO_VERSION_VALIDATION_ERR_MSG = Golang version ($(BUILD_GOVERSION)) is not supported, please use least $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION).$(MINIMUM_SUPPORTED_GO_MINOR_VERSION) - +#Current versioning information from env BUILD_VERSION?="$(shell git describe --tags `git rev-list --tags --max-count=1`)" BUILD_GOVERSION="$(shell go version | cut -d " " -f3 | sed -r 's/[go]+//g')" BUILD_CODENAME=$(shell cat RELEASE.json | jq -r .CodeName) @@ -36,20 +34,20 @@ RELDIR = crowdsec-$(BUILD_VERSION) all: clean test build -build: clean goversion crowdsec cscli +build: goversion crowdsec cscli static: goversion crowdsec_static cscli_static goversion: @if [ $(GO_MAJOR_VERSION) -gt $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) ]; then \ - exit 0 ;\ - elif [ $(GO_MAJOR_VERSION) -lt $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) ]; then \ - echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ - exit 1; \ - elif [ $(GO_MINOR_VERSION) -lt $(MINIMUM_SUPPORTED_GO_MINOR_VERSION) ] ; then \ - echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ - exit 1; \ - fi + exit 0 ;\ + elif [ $(GO_MAJOR_VERSION) -lt $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) ]; then \ + echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ + exit 1; \ + elif [ $(GO_MINOR_VERSION) -lt $(MINIMUM_SUPPORTED_GO_MINOR_VERSION) ] ; then \ + echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ + exit 1; \ + fi hubci: @rm -rf crowdsec-xxx hub-tests @@ -69,26 +67,50 @@ clean: @rm -f $(CSCLI_BIN) @rm -f *.log -cscli: goversion +cscli: +ifeq ($(lastword $(RESPECT_VERSION)), $(CURRENT_GOVERSION)) @make -C $(CSCLI_FOLDER) build --no-print-directory +else + @echo "Required golang version is $(REQUIRE_GOVERSION). The current one is $(CURRENT_GOVERSION). Exiting.." + @exit 1; +endif - -crowdsec: goversion +crowdsec: +ifeq ($(lastword $(RESPECT_VERSION)), $(CURRENT_GOVERSION)) @make -C $(CROWDSEC_FOLDER) build --no-print-directory - @bash ./scripts/build_plugins.sh +else + @echo "Required golang version is $(REQUIRE_GOVERSION). The current one is $(CURRENT_GOVERSION). Exiting.." + @exit 1; +endif -cscli_static: goversion +cscli_static: +ifeq ($(lastword $(RESPECT_VERSION)), $(CURRENT_GOVERSION)) @make -C $(CSCLI_FOLDER) static --no-print-directory +else + @echo "Required golang version is $(REQUIRE_GOVERSION). The current one is $(CURRENT_GOVERSION). Exiting.." + @exit 1; +endif -crowdsec_static: goversion +crowdsec_static: +ifeq ($(lastword $(RESPECT_VERSION)), $(CURRENT_GOVERSION)) @make -C $(CROWDSEC_FOLDER) static --no-print-directory +else + @echo "Required golang version is $(REQUIRE_GOVERSION). The current one is $(CURRENT_GOVERSION). Exiting.." + @exit 1; +endif + #.PHONY: test test: +ifeq ($(lastword $(RESPECT_VERSION)), $(CURRENT_GOVERSION)) @make -C $(CROWDSEC_FOLDER) test --no-print-directory +else + @echo "Required golang version is $(REQUIRE_GOVERSION). The current one is $(CURRENT_GOVERSION). Exiting.." + @exit 1; +endif .PHONY: uninstall uninstall: @@ -110,7 +132,4 @@ release: check_release build @cp -R ./config/ $(RELDIR) @cp wizard.sh $(RELDIR) @cp scripts/test_env.sh $(RELDIR) - @bash ./scripts/build_plugins.sh - @mkdir -p "$(RELDIR)/plugins/backend" - @find ./plugins -type f -name "*.so" -exec install -Dm 644 {} "$(RELDIR)/{}" \; || exiting @tar cvzf crowdsec-release.tgz $(RELDIR) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go new file mode 100644 index 000000000..13adafad3 --- /dev/null +++ b/cmd/crowdsec-cli/alerts.go @@ -0,0 +1,451 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/go-openapi/strfmt" + "github.com/olekukonko/tablewriter" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" +) + +var printMachine bool +var limit *int + +func DecisionsFromAlert(alert *models.Alert) string { + ret := "" + var decMap = make(map[string]int) + for _, decision := range alert.Decisions { + k := *decision.Type + if *decision.Simulated { + k = fmt.Sprintf("(simul)%s", k) + } + v := decMap[k] + decMap[k] = v + 1 + } + for k, v := range decMap { + if len(ret) > 0 { + ret += " " + } + ret += fmt.Sprintf("%s:%d", k, v) + } + return ret +} + +func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { + + if csConfig.Cscli.Output == "raw" { + if printMachine { + fmt.Printf("id,scope,value,reason,country,as,decisions,created_at,machine\n") + } else { + fmt.Printf("id,scope,value,reason,country,as,decisions,created_at\n") + } + for _, alertItem := range *alerts { + if printMachine { + fmt.Printf("%v,%v,%v,%v,%v,%v,%v,%v,%v\n", + alertItem.ID, + *alertItem.Source.Scope, + *alertItem.Source.Value, + *alertItem.Scenario, + alertItem.Source.Cn, + alertItem.Source.AsNumber+" "+alertItem.Source.AsName, + DecisionsFromAlert(alertItem), + *alertItem.StartAt, + alertItem.MachineID) + } else { + fmt.Printf("%v,%v,%v,%v,%v,%v,%v,%v\n", + alertItem.ID, + *alertItem.Source.Scope, + *alertItem.Source.Value, + *alertItem.Scenario, + alertItem.Source.Cn, + alertItem.Source.AsNumber+" "+alertItem.Source.AsName, + DecisionsFromAlert(alertItem), + *alertItem.StartAt) + } + + } + } else if csConfig.Cscli.Output == "json" { + x, _ := json.MarshalIndent(alerts, "", " ") + fmt.Printf("%s", string(x)) + } else if csConfig.Cscli.Output == "human" { + + table := tablewriter.NewWriter(os.Stdout) + if printMachine { + table.SetHeader([]string{"ID", "value", "reason", "country", "as", "decisions", "created_at", "machine"}) + } else { + table.SetHeader([]string{"ID", "value", "reason", "country", "as", "decisions", "created_at"}) + } + + if len(*alerts) == 0 { + fmt.Println("No active alerts") + return nil + } + for _, alertItem := range *alerts { + + displayVal := *alertItem.Source.Scope + if *alertItem.Source.Value != "" { + displayVal += ":" + *alertItem.Source.Value + } + if printMachine { + table.Append([]string{ + strconv.Itoa(int(alertItem.ID)), + displayVal, + *alertItem.Scenario, + alertItem.Source.Cn, + alertItem.Source.AsNumber + " " + alertItem.Source.AsName, + DecisionsFromAlert(alertItem), + *alertItem.StartAt, + alertItem.MachineID, + }) + } else { + table.Append([]string{ + strconv.Itoa(int(alertItem.ID)), + displayVal, + *alertItem.Scenario, + alertItem.Source.Cn, + alertItem.Source.AsNumber + " " + alertItem.Source.AsName, + DecisionsFromAlert(alertItem), + *alertItem.StartAt, + }) + } + } + table.Render() // Send output + } + return nil +} + +func DisplayOneAlert(alert *models.Alert, withDetail bool) error { + if csConfig.Cscli.Output == "human" { + fmt.Printf("\n################################################################################################\n\n") + scopeAndValue := *alert.Source.Scope + if *alert.Source.Value != "" { + scopeAndValue += ":" + *alert.Source.Value + } + fmt.Printf(" - ID : %d\n", alert.ID) + fmt.Printf(" - Date : %s\n", alert.CreatedAt) + fmt.Printf(" - Machine : %s\n", alert.MachineID) + fmt.Printf(" - Simulation : %v\n", *alert.Simulated) + fmt.Printf(" - Reason : %s\n", *alert.Scenario) + fmt.Printf(" - Events Count : %d\n", *alert.EventsCount) + fmt.Printf(" - Scope:Value: %s\n", scopeAndValue) + fmt.Printf(" - Country : %s\n", alert.Source.Cn) + fmt.Printf(" - AS : %s\n\n", alert.Source.AsName) + foundActive := false + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"ID", "scope:value", "action", "expiration", "created_at"}) + for _, decision := range alert.Decisions { + parsedDuration, err := time.ParseDuration(*decision.Duration) + if err != nil { + log.Errorf(err.Error()) + } + expire := time.Now().Add(parsedDuration) + if time.Now().After(expire) { + continue + } + foundActive = true + scopeAndValue := *decision.Scope + if *decision.Value != "" { + scopeAndValue += ":" + *decision.Value + } + table.Append([]string{ + strconv.Itoa(int(decision.ID)), + scopeAndValue, + *decision.Type, + *decision.Duration, + alert.CreatedAt, + }) + } + if foundActive { + fmt.Printf(" - Active Decisions :\n") + table.Render() // Send output + } + + if withDetail { + fmt.Printf("\n - Events :\n") + for _, event := range alert.Events { + fmt.Printf("\n- Date: %s\n", *event.Timestamp) + table = tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Key", "Value"}) + for _, meta := range event.Meta { + table.Append([]string{ + meta.Key, + meta.Value, + }) + } + table.Render() // Send output + } + } + } + return nil +} + +func NewAlertsCmd() *cobra.Command { + /* ---- ALERTS COMMAND */ + var cmdAlerts = &cobra.Command{ + Use: "alerts [action]", + Short: "Manage alerts", + Args: cobra.MinimumNArgs(1), + PersistentPreRun: func(cmd *cobra.Command, args []string) { + var err error + if csConfig.API.Client == nil { + log.Fatalln("There is no configuration on 'api_client:'") + } + if csConfig.API.Client.Credentials == nil { + log.Fatalf("Please provide credentials for the API in '%s'", csConfig.API.Client.CredentialsFilePath) + } + apiURL, err := url.Parse(csConfig.API.Client.Credentials.URL) + if err != nil { + log.Fatalf("parsing api url: %s", apiURL) + } + Client, err = apiclient.NewClient(&apiclient.Config{ + MachineID: csConfig.API.Client.Credentials.Login, + Password: strfmt.Password(csConfig.API.Client.Credentials.Password), + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }) + + if err != nil { + log.Fatalf("new api client: %s", err.Error()) + } + }, + } + + var alertListFilter = apiclient.AlertsListOpts{ + ScopeEquals: new(string), + ValueEquals: new(string), + ScenarioEquals: new(string), + IPEquals: new(string), + RangeEquals: new(string), + Since: new(string), + Until: new(string), + TypeEquals: new(string), + } + limit = new(int) + var cmdAlertsList = &cobra.Command{ + Use: "list [filters]", + Short: "List alerts", + Example: `cscli alerts list +cscli alerts list --ip 1.2.3.4 +cscli alerts list --range 1.2.3.0/24 +cscli alerts list -s crowdsecurity/ssh-bf +cscli alerts list --type ban`, + Run: func(cmd *cobra.Command, args []string) { + var err error + + if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals, + alertListFilter.ScopeEquals, alertListFilter.ValueEquals); err != nil { + _ = cmd.Help() + log.Fatalf("%s", err) + } + if limit != nil { + alertListFilter.Limit = limit + } + + if *alertListFilter.Until == "" { + alertListFilter.Until = nil + } else { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + if strings.HasSuffix(*alertListFilter.Until, "d") { + realDuration := strings.TrimSuffix(*alertListFilter.Until, "d") + days, err := strconv.Atoi(realDuration) + if err != nil { + cmd.Help() + log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Until) + } + *alertListFilter.Until = fmt.Sprintf("%d%s", days*24, "h") + } + } + if *alertListFilter.Since == "" { + alertListFilter.Since = nil + } else { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + if strings.HasSuffix(*alertListFilter.Since, "d") { + realDuration := strings.TrimSuffix(*alertListFilter.Since, "d") + days, err := strconv.Atoi(realDuration) + if err != nil { + cmd.Help() + log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Since) + } + *alertListFilter.Since = fmt.Sprintf("%d%s", days*24, "h") + } + } + if *alertListFilter.TypeEquals == "" { + alertListFilter.TypeEquals = nil + } + if *alertListFilter.ScopeEquals == "" { + alertListFilter.ScopeEquals = nil + } + if *alertListFilter.ValueEquals == "" { + alertListFilter.ValueEquals = nil + } + if *alertListFilter.ScenarioEquals == "" { + alertListFilter.ScenarioEquals = nil + } + if *alertListFilter.IPEquals == "" { + alertListFilter.IPEquals = nil + } + if *alertListFilter.RangeEquals == "" { + alertListFilter.RangeEquals = nil + } + alerts, _, err := Client.Alerts.List(context.Background(), alertListFilter) + if err != nil { + log.Fatalf("Unable to list alerts : %v", err.Error()) + } + + err = AlertsToTable(alerts, printMachine) + if err != nil { + log.Fatalf("unable to list alerts : %v", err.Error()) + } + }, + } + cmdAlertsList.Flags().SortFlags = false + cmdAlertsList.Flags().StringVar(alertListFilter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") + cmdAlertsList.Flags().StringVar(alertListFilter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") + cmdAlertsList.Flags().StringVarP(alertListFilter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") + cmdAlertsList.Flags().StringVarP(alertListFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") + cmdAlertsList.Flags().StringVarP(alertListFilter.RangeEquals, "range", "r", "", "restrict to alerts from this range (shorthand for --scope range --value )") + cmdAlertsList.Flags().StringVar(alertListFilter.TypeEquals, "type", "", "restrict to alerts with given decision type (ie. ban, captcha)") + cmdAlertsList.Flags().StringVar(alertListFilter.ScopeEquals, "scope", "", "restrict to alerts of this scope (ie. ip,range)") + cmdAlertsList.Flags().StringVarP(alertListFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + cmdAlertsList.Flags().BoolVarP(&printMachine, "machine", "m", false, "print machines that sended alerts") + cmdAlertsList.Flags().IntVarP(limit, "limit", "l", 50, "limit size of alerts list table (0 to view all alerts)") + cmdAlerts.AddCommand(cmdAlertsList) + + var ActiveDecision *bool + var AlertDeleteAll bool + var alertDeleteFilter = apiclient.AlertsDeleteOpts{ + ScopeEquals: new(string), + ValueEquals: new(string), + ScenarioEquals: new(string), + IPEquals: new(string), + RangeEquals: new(string), + } + var cmdAlertsDelete = &cobra.Command{ + Use: "delete [filters] [--all]", + Short: `Delete alerts +/!\ This command can be use only on the same machine than the local API.`, + Example: `cscli alerts delete --ip 1.2.3.4 +cscli alerts delete --range 1.2.3.0/24 +cscli alerts delete -s crowdsecurity/ssh-bf"`, + Args: cobra.ExactArgs(0), + PreRun: func(cmd *cobra.Command, args []string) { + if AlertDeleteAll { + return + } + if *alertDeleteFilter.ScopeEquals == "" && *alertDeleteFilter.ValueEquals == "" && + *alertDeleteFilter.ScenarioEquals == "" && *alertDeleteFilter.IPEquals == "" && + *alertDeleteFilter.RangeEquals == "" { + _ = cmd.Usage() + log.Fatalln("At least one filter or --all must be specified") + } + }, + Run: func(cmd *cobra.Command, args []string) { + var err error + + if !AlertDeleteAll { + if err := manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals, + alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil { + _ = cmd.Help() + log.Fatalf("%s", err) + } + if ActiveDecision != nil { + alertDeleteFilter.ActiveDecisionEquals = ActiveDecision + } + + if *alertDeleteFilter.ScopeEquals == "" { + alertDeleteFilter.ScopeEquals = nil + } + if *alertDeleteFilter.ValueEquals == "" { + alertDeleteFilter.ValueEquals = nil + } + if *alertDeleteFilter.ScenarioEquals == "" { + alertDeleteFilter.ScenarioEquals = nil + } + if *alertDeleteFilter.IPEquals == "" { + alertDeleteFilter.IPEquals = nil + } + if *alertDeleteFilter.RangeEquals == "" { + alertDeleteFilter.RangeEquals = nil + } + } else { + alertDeleteFilter = apiclient.AlertsDeleteOpts{} + } + alerts, _, err := Client.Alerts.Delete(context.Background(), alertDeleteFilter) + if err != nil { + log.Fatalf("Unable to delete alerts : %v", err.Error()) + } + log.Infof("%s alert(s) deleted", alerts.NbDeleted) + + }, + } + cmdAlertsDelete.Flags().SortFlags = false + cmdAlertsDelete.Flags().StringVar(alertDeleteFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") + cmdAlertsDelete.Flags().StringVarP(alertDeleteFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + cmdAlertsDelete.Flags().StringVarP(alertDeleteFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") + cmdAlertsDelete.Flags().StringVarP(alertDeleteFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + cmdAlertsDelete.Flags().StringVarP(alertDeleteFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + cmdAlertsDelete.Flags().BoolVarP(&AlertDeleteAll, "all", "a", false, "delete all alerts") + + cmdAlerts.AddCommand(cmdAlertsDelete) + + var details bool + var cmdAlertsInspect = &cobra.Command{ + Use: "inspect ", + Short: `Show info about an alert`, + Example: `cscli alerts inspect 123`, + Run: func(cmd *cobra.Command, args []string) { + if len(args) == 0 { + _ = cmd.Help() + return + } + for _, alertID := range args { + id, err := strconv.Atoi(alertID) + if err != nil { + log.Fatalf("bad alert id %s", alertID) + continue + } + alert, _, err := Client.Alerts.GetByID(context.Background(), id) + if err != nil { + log.Fatalf("can't find alert with id %s: %s", alertID, err) + } + switch csConfig.Cscli.Output { + case "human": + if err := DisplayOneAlert(alert, details); err != nil { + continue + } + case "json": + data, err := json.MarshalIndent(alert, "", " ") + if err != nil { + log.Fatalf("unable to marshal alert with id %s: %s", alertID, err) + } + fmt.Printf("%s\n", string(data)) + case "raw": + data, err := yaml.Marshal(alert) + if err != nil { + log.Fatalf("unable to marshal alert with id %s: %s", alertID, err) + } + fmt.Printf("%s\n", string(data)) + } + } + }, + } + cmdAlertsInspect.Flags().SortFlags = false + cmdAlertsInspect.Flags().BoolVarP(&details, "details", "d", false, "show alerts with events") + + cmdAlerts.AddCommand(cmdAlertsInspect) + + return cmdAlerts +} diff --git a/cmd/crowdsec-cli/api.go b/cmd/crowdsec-cli/api.go deleted file mode 100644 index 7ed1519dd..000000000 --- a/cmd/crowdsec-cli/api.go +++ /dev/null @@ -1,289 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "math/rand" - "path" - "strings" - "time" - - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/outputs" - "github.com/crowdsecurity/crowdsec/pkg/types" - - "github.com/denisbrodbeck/machineid" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "gopkg.in/yaml.v2" -) - -var ( - upper = "ABCDEFGHIJKLMNOPQRSTUVWXY" - lower = "abcdefghijklmnopqrstuvwxyz" - digits = "0123456789" -) - -var ( - userID string // for flag parsing - outputCTX *outputs.Output -) - -const ( - uuid = "/proc/sys/kernel/random/uuid" - apiConfigFile = "api.yaml" -) - -func dumpCredentials() error { - if config.output == "json" { - credsYaml, err := json.Marshal(&outputCTX.API.Creds) - if err != nil { - log.Fatalf("Can't marshal credentials : %v", err) - } - fmt.Printf("%s\n", string(credsYaml)) - } else { - credsYaml, err := yaml.Marshal(&outputCTX.API.Creds) - if err != nil { - log.Fatalf("Can't marshal credentials : %v", err) - } - fmt.Printf("%s\n", string(credsYaml)) - } - return nil -} - -func generatePassword(passwordLength int) string { - rand.Seed(time.Now().UnixNano()) - charset := upper + lower + digits - - buf := make([]byte, passwordLength) - buf[0] = digits[rand.Intn(len(digits))] - buf[1] = upper[rand.Intn(len(upper))] - buf[2] = lower[rand.Intn(len(lower))] - - for i := 3; i < passwordLength; i++ { - buf[i] = charset[rand.Intn(len(charset))] - } - rand.Shuffle(len(buf), func(i, j int) { - buf[i], buf[j] = buf[j], buf[i] - }) - - return string(buf) -} - -func pullTOP() error { - /*profile from cwhub*/ - var profiles []string - if _, ok := cwhub.HubIdx[cwhub.SCENARIOS]; !ok || len(cwhub.HubIdx[cwhub.SCENARIOS]) == 0 { - log.Errorf("no loaded scenarios, can't fill profiles") - return fmt.Errorf("no profiles") - } - for _, item := range cwhub.HubIdx[cwhub.SCENARIOS] { - if item.Tainted || !item.Installed { - continue - } - profiles = append(profiles, item.Name) - } - outputCTX.API.Creds.Profile = strings.Join(profiles[:], ",") - if err := outputCTX.API.Signin(); err != nil { - log.Fatalf(err.Error()) - } - - ret, err := outputCTX.API.PullTop() - if err != nil { - log.Fatalf(err.Error()) - } - log.Warningf("api pull returned %d entries", len(ret)) - for _, item := range ret { - if _, ok := item["range_ip"]; !ok { - continue - } - if _, ok := item["scenario"]; !ok { - continue - } - - if _, ok := item["action"]; !ok { - continue - } - if _, ok := item["expiration"]; !ok { - continue - } - if _, ok := item["country"]; !ok { - item["country"] = "" - } - if _, ok := item["as_org"]; !ok { - item["as_org"] = "" - } - if _, ok := item["as_num"]; !ok { - item["as_num"] = "" - } - var signalOcc types.SignalOccurence - signalOcc, err = simpleBanToSignal(item["range_ip"], item["scenario"], item["expiration"], item["action"], item["as_name"], item["as_num"], item["country"], "api") - if err != nil { - return fmt.Errorf("failed to convert ban to signal : %s", err) - } - if err := outputCTX.Insert(signalOcc); err != nil { - log.Fatalf("Unable to write pull to Database : %+s", err.Error()) - } - } - outputCTX.Flush() - log.Infof("Wrote %d bans from api to database.", len(ret)) - return nil -} - -func NewAPICmd() *cobra.Command { - - var cmdAPI = &cobra.Command{ - Use: "api [action]", - Short: "Crowdsec API interaction", - Long: ` -Allow to register your machine into crowdsec API to send and receive signal. - `, - Example: ` -cscli api register # Register to Crowdsec API -cscli api pull # Pull malevolant IPs from Crowdsec API -cscli api reset # Reset your machines credentials -cscli api enroll # Enroll your machine to the user account you created on Crowdsec backend -cscli api credentials # Display your API credentials -`, - Args: cobra.MinimumNArgs(1), - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - var err error - if !config.configured { - return fmt.Errorf("you must configure cli before interacting with hub") - } - - outputConfig := outputs.OutputFactory{ - BackendFolder: config.BackendPluginFolder, - Flush: false, - } - outputCTX, err = outputs.NewOutput(&outputConfig) - if err != nil { - return err - } - - err = outputCTX.LoadAPIConfig(path.Join(config.InstallFolder, apiConfigFile)) - if err != nil { - return err - } - return nil - }, - } - - var cmdAPIRegister = &cobra.Command{ - Use: "register", - Short: "Register on Crowdsec API", - Long: `This command will register your machine to crowdsec API to allow you to receive list of malveolent IPs. - The printed machine_id and password should be added to your api.yaml file.`, - Example: `cscli api register`, - Args: cobra.MinimumNArgs(0), - Run: func(cmd *cobra.Command, args []string) { - id, err := machineid.ID() - if err != nil { - log.Debugf("failed to get machine-id with usual files : %s", err) - } - if id == "" || err != nil { - bID, err := ioutil.ReadFile(uuid) - if err != nil { - log.Fatalf("can'get a valid machine_id") - } - id = string(bID) - id = strings.ReplaceAll(id, "-", "")[:32] - } - password := generatePassword(64) - - if err := outputCTX.API.RegisterMachine(fmt.Sprintf("%s%s", id, generatePassword(16)), password); err != nil { - log.Fatalf(err.Error()) - } - fmt.Printf("machine_id: %s\n", outputCTX.API.Creds.User) - fmt.Printf("password: %s\n", outputCTX.API.Creds.Password) - fmt.Printf("#You need to append these credentials in /etc/crowdsec/config/api.yaml") - }, - } - - var cmdAPIEnroll = &cobra.Command{ - Use: "enroll", - Short: "Associate your machine to an existing crowdsec user", - Long: `Enrolling your machine into your user account will allow for more accurate lists and threat detection. See website to create user account.`, - Example: `cscli api enroll -u 1234567890ffff`, - Args: cobra.MinimumNArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := outputCTX.API.Signin(); err != nil { - log.Fatalf("unable to signin : %s", err) - } - if err := outputCTX.API.Enroll(userID); err != nil { - log.Fatalf(err.Error()) - } - }, - } - - var cmdAPIResetPassword = &cobra.Command{ - Use: "reset", - Short: "Reset password on CrowdSec API", - Long: `Attempts to reset your credentials to the API.`, - Example: `cscli api reset`, - Args: cobra.MinimumNArgs(0), - Run: func(cmd *cobra.Command, args []string) { - id, err := machineid.ID() - if err != nil { - log.Debugf("failed to get machine-id with usual files : %s", err) - } - if id == "" || err != nil { - bID, err := ioutil.ReadFile(uuid) - if err != nil { - log.Fatalf("can'get a valid machine_id") - } - id = string(bID) - id = strings.ReplaceAll(id, "-", "")[:32] - } - - password := generatePassword(64) - if err := outputCTX.API.ResetPassword(fmt.Sprintf("%s%s", id, generatePassword(16)), password); err != nil { - log.Fatalf(err.Error()) - } - fmt.Printf("machine_id: %s\n", outputCTX.API.Creds.User) - fmt.Printf("password: %s\n", outputCTX.API.Creds.Password) - }, - } - - var cmdAPIPull = &cobra.Command{ - Use: "pull", - Short: "Pull crowdsec API TopX", - Long: `Pulls a list of malveolent IPs relevant to your situation and add them into the local ban database.`, - Example: `cscli api pull`, - Args: cobra.MinimumNArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf(err.Error()) - } - err := pullTOP() - if err != nil { - log.Fatalf(err.Error()) - } - }, - } - - var cmdAPICreds = &cobra.Command{ - Use: "credentials", - Short: "Display api credentials", - Long: ``, - Example: `cscli api credentials`, - Args: cobra.MinimumNArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := dumpCredentials(); err != nil { - log.Fatalf(err.Error()) - } - }, - } - - cmdAPI.AddCommand(cmdAPICreds) - cmdAPIEnroll.Flags().StringVarP(&userID, "user", "u", "", "User ID (required)") - if err := cmdAPIEnroll.MarkFlagRequired("user"); err != nil { - log.Errorf("'user' flag : %s", err) - } - cmdAPI.AddCommand(cmdAPIEnroll) - cmdAPI.AddCommand(cmdAPIResetPassword) - cmdAPI.AddCommand(cmdAPIRegister) - cmdAPI.AddCommand(cmdAPIPull) - return cmdAPI -} diff --git a/cmd/crowdsec-cli/backup-restore.go b/cmd/crowdsec-cli/backup-restore.go deleted file mode 100644 index 3cd14026d..000000000 --- a/cmd/crowdsec-cli/backup-restore.go +++ /dev/null @@ -1,556 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - - "github.com/crowdsecurity/crowdsec/pkg/cwapi" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/outputs" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" -) - -//it's a rip of the cli version, but in silent-mode -func silenceInstallItem(name string, obtype string) (string, error) { - for _, it := range cwhub.HubIdx[obtype] { - if it.Name == name { - if download_only && it.Downloaded && it.UpToDate { - return fmt.Sprintf("%s is already downloaded and up-to-date", it.Name), nil - } - it, err := cwhub.DownloadLatest(it, cwhub.Hubdir, force_install, config.DataFolder) - if err != nil { - return "", fmt.Errorf("error while downloading %s : %v", it.Name, err) - } - cwhub.HubIdx[obtype][it.Name] = it - if download_only { - return fmt.Sprintf("Downloaded %s to %s", it.Name, cwhub.Hubdir+"/"+it.RemotePath), nil - } - it, err = cwhub.EnableItem(it, cwhub.Installdir, cwhub.Hubdir) - if err != nil { - return "", fmt.Errorf("error while enabled %s : %v", it.Name, err) - } - cwhub.HubIdx[obtype][it.Name] = it - - return fmt.Sprintf("Enabled %s", it.Name), nil - } - } - return "", fmt.Errorf("%s not found in hub index", name) -} - -/*help to copy the file, ioutil doesn't offer the feature*/ - -func copyFileContents(src, dst string) (err error) { - in, err := os.Open(src) - if err != nil { - return - } - defer in.Close() - out, err := os.Create(dst) - if err != nil { - return - } - defer func() { - cerr := out.Close() - if err == nil { - err = cerr - } - }() - if _, err = io.Copy(out, in); err != nil { - return - } - err = out.Sync() - return -} - -/*copy the file, ioutile doesn't offer the feature*/ -func copyFile(sourceSymLink, destinationFile string) (err error) { - - sourceFile, err := filepath.EvalSymlinks(sourceSymLink) - if err != nil { - log.Infof("Not a symlink : %s", err) - sourceFile = sourceSymLink - } - - sourceFileStat, err := os.Stat(sourceFile) - if err != nil { - return - } - if !sourceFileStat.Mode().IsRegular() { - // cannot copy non-regular files (e.g., directories, - // symlinks, devices, etc.) - return fmt.Errorf("copyFile: non-regular source file %s (%q)", sourceFileStat.Name(), sourceFileStat.Mode().String()) - } - destinationFileStat, err := os.Stat(destinationFile) - if err != nil { - if !os.IsNotExist(err) { - return - } - } else { - if !(destinationFileStat.Mode().IsRegular()) { - return fmt.Errorf("copyFile: non-regular destination file %s (%q)", destinationFileStat.Name(), destinationFileStat.Mode().String()) - } - if os.SameFile(sourceFileStat, destinationFileStat) { - return - } - } - if err = os.Link(sourceFile, destinationFile); err == nil { - return - } - err = copyFileContents(sourceFile, destinationFile) - return -} - -/*given a backup directory, restore configs (parser,collections..) both tainted and untainted. -as well attempts to restore api credentials after verifying the existing ones aren't good -finally restores the acquis.yaml file*/ -func restoreFromDirectory(source string) error { - var err error - - /*restore simulation configuration*/ - backSimul := fmt.Sprintf("%s/simulation.yaml", source) - if _, err = os.Stat(backSimul); err == nil { - if err = copyFile(backSimul, config.SimulationCfgPath); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", backSimul, config.SimulationCfgPath, err) - } - } - - /*restore scenarios etc.*/ - for _, itype := range cwhub.ItemTypes { - itemDirectory := fmt.Sprintf("%s/%s/", source, itype) - if _, err = os.Stat(itemDirectory); err != nil { - log.Infof("no %s in backup", itype) - continue - } - /*restore the upstream items*/ - upstreamListFN := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itype) - file, err := ioutil.ReadFile(upstreamListFN) - if err != nil { - return fmt.Errorf("error while opening %s : %s", upstreamListFN, err) - } - var upstreamList []string - err = json.Unmarshal([]byte(file), &upstreamList) - if err != nil { - return fmt.Errorf("error unmarshaling %s : %s", upstreamListFN, err) - } - for _, toinstall := range upstreamList { - label, err := silenceInstallItem(toinstall, itype) - if err != nil { - log.Errorf("Error while installing %s : %s", toinstall, err) - } else if label != "" { - log.Infof("Installed %s : %s", toinstall, label) - } else { - log.Printf("Installed %s : ok", toinstall) - } - } - /*restore the local and tainted items*/ - files, err := ioutil.ReadDir(itemDirectory) - if err != nil { - return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory, err) - } - for _, file := range files { - //dir are stages, keep track - if !file.IsDir() { - continue - } - stage := file.Name() - stagedir := fmt.Sprintf("%s/%s/%s/", config.InstallFolder, itype, stage) - log.Debugf("Found stage %s in %s, target directory : %s", stage, itype, stagedir) - if err = os.MkdirAll(stagedir, os.ModePerm); err != nil { - return fmt.Errorf("error while creating stage directory %s : %s", stagedir, err) - } - /*find items*/ - ifiles, err := ioutil.ReadDir(itemDirectory + "/" + stage + "/") - if err != nil { - return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory+"/"+stage, err) - } - //finaly copy item - for _, tfile := range ifiles { - log.Infof("Going to restore local/tainted [%s]", tfile.Name()) - sourceFile := fmt.Sprintf("%s/%s/%s", itemDirectory, stage, tfile.Name()) - destinationFile := fmt.Sprintf("%s%s", stagedir, tfile.Name()) - if err = copyFile(sourceFile, destinationFile); err != nil { - return fmt.Errorf("failed copy %s %s to %s : %s", itype, sourceFile, destinationFile, err) - } - log.Infof("restored %s to %s", sourceFile, destinationFile) - } - } - } - /*restore api credentials*/ - //check if credentials exists : - // - if no, restore - // - if yes, try them : - // - if it works, left untouched - // - if not, restore - // -> try login - if err := restoreAPICreds(source); err != nil { - return fmt.Errorf("failed to restore api credentials : %s", err) - } - /* - Restore acquis - */ - yamlAcquisFile := fmt.Sprintf("%s/acquis.yaml", config.InstallFolder) - bac := fmt.Sprintf("%s/acquis.yaml", source) - if err = copyFile(bac, yamlAcquisFile); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", bac, yamlAcquisFile, err) - } - log.Infof("Restore acquis to %s", yamlAcquisFile) - - /* Restore plugins configuration */ - var pluginsConfigFile []string - walkErr := filepath.Walk(fmt.Sprintf("%s/plugins/backend/", source), func(path string, info os.FileInfo, err error) error { - if err != nil { - return fmt.Errorf("walk error : %s", err) - } - fi, err := os.Stat(path) - if err != nil { - return fmt.Errorf("unable to stats file '%s' : %s", path, err) - } - mode := fi.Mode() - if mode.IsRegular() { - pluginsConfigFile = append(pluginsConfigFile, path) - } - return nil - }) - if walkErr != nil { - return fmt.Errorf("error while listing folder '%s' : %s", fmt.Sprintf("%s/plugins/backend/", source), walkErr) - } - - if err := os.MkdirAll(outputCTX.Config.BackendFolder, os.ModePerm); err != nil { - return fmt.Errorf("error while creating backup folder dir %s : %s", outputCTX.Config.BackendFolder, err) - } - - for _, file := range pluginsConfigFile { - _, filename := path.Split(file) - backupFile := fmt.Sprintf("%s/%s", outputCTX.Config.BackendFolder, filename) - log.Printf("Restoring '%s' to '%s'", file, backupFile) - if err := copyFile(file, backupFile); err != nil { - return fmt.Errorf("error while copying '%s' to '%s' : %s", file, backupFile, err) - } - } - - return nil -} - -func restoreAPICreds(source string) error { - var err error - - /*check existing configuration*/ - apiyaml := path.Join(config.InstallFolder, apiConfigFile) - - api := &cwapi.ApiCtx{} - if err = api.LoadConfig(apiyaml); err != nil { - return fmt.Errorf("unable to load api config %s : %s", apiyaml, err) - } - if api.Creds.User != "" { - log.Infof("Credentials present in existing configuration, try before override") - err := api.Signin() - if err == nil { - log.Infof("Credentials present allow authentication, don't override !") - return nil - } else { - log.Infof("Credentials aren't valid : %s", err) - } - } - /*existing config isn't good, override it !*/ - ret, err := ioutil.ReadFile(path.Join(source, "api_creds.json")) - if err != nil { - return fmt.Errorf("failed to read api creds from save : %s", err) - } - if err := json.Unmarshal(ret, &api.Creds); err != nil { - return fmt.Errorf("failed unmarshaling saved credentials : %s", err) - } - api.CfgUser = api.Creds.User - api.CfgPassword = api.Creds.Password - /*override the existing yaml file*/ - if err := api.WriteConfig(apiyaml); err != nil { - return fmt.Errorf("failed writing to %s : %s", apiyaml, err) - } else { - log.Infof("Overwritting %s with backup info", apiyaml) - } - - /*reload to check everything is safe*/ - if err = api.LoadConfig(apiyaml); err != nil { - return fmt.Errorf("unable to load api config %s : %s", apiyaml, err) - } - - if err := api.Signin(); err != nil { - log.Errorf("Failed to authenticate after credentials restaurtion : %v", err) - } else { - log.Infof("Successfully auth to API after credentials restauration") - } - - return nil -} - -func backupToDirectory(target string) error { - var itemDirectory string - var upstreamParsers []string - var err error - if target == "" { - return fmt.Errorf("target directory can't be empty") - } - log.Warningf("Starting configuration backup") - _, err = os.Stat(target) - if err == nil { - return fmt.Errorf("%s already exists", target) - } - if err = os.MkdirAll(target, os.ModePerm); err != nil { - return fmt.Errorf("error while creating %s : %s", target, err) - } - /* - backup configurations : - - parers, scenarios, collections, postoverflows - - simulation configuration - */ - if config.SimulationCfgPath != "" { - backSimul := fmt.Sprintf("%s/simulation.yaml", target) - if err = copyFile(config.SimulationCfgPath, backSimul); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", config.SimulationCfgPath, backSimul, err) - } - } - - for _, itemType := range cwhub.ItemTypes { - clog := log.WithFields(log.Fields{ - "type": itemType, - }) - if _, ok := cwhub.HubIdx[itemType]; ok { - itemDirectory = fmt.Sprintf("%s/%s/", target, itemType) - if err := os.MkdirAll(itemDirectory, os.ModePerm); err != nil { - return fmt.Errorf("error while creating %s : %s", itemDirectory, err) - } - upstreamParsers = []string{} - stage := "" - for k, v := range cwhub.HubIdx[itemType] { - clog = clog.WithFields(log.Fields{ - "file": v.Name, - }) - if !v.Installed { //only backup installed ones - clog.Debugf("[%s] : not installed", k) - continue - } - - //for the local/tainted ones, we backup the full file - if v.Tainted || v.Local || !v.UpToDate { - //we need to backup stages for parsers - if itemType == cwhub.PARSERS || itemType == cwhub.PARSERS_OVFLW { - tmp := strings.Split(v.LocalPath, "/") - stage = "/" + tmp[len(tmp)-2] + "/" - fstagedir := fmt.Sprintf("%s%s", itemDirectory, stage) - if err := os.MkdirAll(fstagedir, os.ModePerm); err != nil { - return fmt.Errorf("error while creating stage dir %s : %s", fstagedir, err) - } - } - clog.Debugf("[%s] : backuping file (tainted:%t local:%t up-to-date:%t)", k, v.Tainted, v.Local, v.UpToDate) - tfile := fmt.Sprintf("%s%s%s", itemDirectory, stage, v.FileName) - //clog.Infof("item : %s", spew.Sdump(v)) - if err = copyFile(v.LocalPath, tfile); err != nil { - return fmt.Errorf("failed copy %s %s to %s : %s", itemType, v.LocalPath, tfile, err) - } - clog.Infof("local/tainted saved %s to %s", v.LocalPath, tfile) - continue - } - clog.Debugf("[%s] : from hub, just backup name (up-to-date:%t)", k, v.UpToDate) - clog.Infof("saving, version:%s, up-to-date:%t", v.Version, v.UpToDate) - upstreamParsers = append(upstreamParsers, v.Name) - } - //write the upstream items - upstreamParsersFname := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itemType) - upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ") - if err != nil { - return fmt.Errorf("failed marshaling upstream parsers : %s", err) - } - err = ioutil.WriteFile(upstreamParsersFname, upstreamParsersContent, 0644) - if err != nil { - return fmt.Errorf("unable to write to %s %s : %s", itemType, upstreamParsersFname, err) - } - clog.Infof("Wrote %d entries for %s to %s", len(upstreamParsers), itemType, upstreamParsersFname) - - } else { - clog.Infof("No %s to backup.", itemType) - } - } - /* - Backup acquis - */ - yamlAcquisFile := fmt.Sprintf("%s/acquis.yaml", config.InstallFolder) - bac := fmt.Sprintf("%s/acquis.yaml", target) - if err = copyFile(yamlAcquisFile, bac); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", yamlAcquisFile, bac, err) - } - log.Infof("Saved acquis to %s", bac) - /* - Backup default.yaml - */ - defyaml := fmt.Sprintf("%s/default.yaml", config.InstallFolder) - bac = fmt.Sprintf("%s/default.yaml", target) - if err = copyFile(defyaml, bac); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", yamlAcquisFile, bac, err) - } - log.Infof("Saved default yaml to %s", bac) - /* - Backup API info - */ - if outputCTX == nil { - log.Fatalf("no API output context, won't save api credentials") - } - outputCTX.API = &cwapi.ApiCtx{} - if err = outputCTX.API.LoadConfig(path.Join(config.InstallFolder, apiConfigFile)); err != nil { - return fmt.Errorf("unable to load api config %s : %s", path.Join(config.InstallFolder, apiConfigFile), err) - } - credsYaml, err := json.Marshal(&outputCTX.API.Creds) - if err != nil { - log.Fatalf("can't marshal credentials : %v", err) - } - apiCredsDumped := fmt.Sprintf("%s/api_creds.json", target) - err = ioutil.WriteFile(apiCredsDumped, credsYaml, 0600) - if err != nil { - return fmt.Errorf("unable to write credentials to %s : %s", apiCredsDumped, err) - } - log.Infof("Saved configuration to %s", target) - - /* Backup plugins configuration */ - var pluginsConfigFile []string - walkErr := filepath.Walk(outputCTX.Config.BackendFolder, func(path string, info os.FileInfo, err error) error { - if err != nil { - return fmt.Errorf("walk error : %s", err) - } - fi, err := os.Stat(path) - if err != nil { - return fmt.Errorf("unable to stats file '%s' : %s", path, err) - } - mode := fi.Mode() - if mode.IsRegular() { - pluginsConfigFile = append(pluginsConfigFile, path) - } - return nil - }) - if walkErr != nil { - return fmt.Errorf("error while listing folder '%s' : %s", outputCTX.Config.BackendFolder, walkErr) - } - - targetDir := fmt.Sprintf("%s/plugins/backend/", target) - if err := os.MkdirAll(targetDir, os.ModePerm); err != nil { - return fmt.Errorf("error while creating backup folder dir %s : %s", targetDir, err) - } - - for _, file := range pluginsConfigFile { - _, filename := path.Split(file) - backupFile := fmt.Sprintf("%s/plugins/backend/%s", target, filename) - if err := copyFile(file, backupFile); err != nil { - return fmt.Errorf("unable to copy file '%s' to '%s' : %s", file, backupFile, err) - } - } - - return nil -} - -func NewBackupCmd() *cobra.Command { - var cmdBackup = &cobra.Command{ - Use: "backup [save|restore] ", - Short: "Backup or restore configuration (api, parsers, scenarios etc.) to/from directory", - Long: `This command is here to help you save and/or restore crowdsec configurations to simple replication`, - Example: `cscli backup save ./my-backup -cscli backup restore ./my-backup`, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if !config.configured { - return fmt.Errorf("you must configure cli before interacting with hub") - } - return nil - }, - } - - var cmdBackupSave = &cobra.Command{ - Use: "save ", - Short: "Backup configuration (api, parsers, scenarios etc.) to directory", - Long: `backup command will try to save all relevant informations to crowdsec config, including : - -- List of scenarios, parsers, postoverflows and collections that are up-to-date - -- Actual backup of tainted/local/out-of-date scenarios, parsers, postoverflows and collections - -- Backup of API credentials - -- Backup of acquisition configuration - - `, - Example: `cscli backup save ./my-backup`, - Args: cobra.ExactArgs(1), - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if !config.configured { - return fmt.Errorf("you must configure cli before interacting with hub") - } - return nil - }, - Run: func(cmd *cobra.Command, args []string) { - var err error - - outputConfig := outputs.OutputFactory{ - BackendFolder: config.BackendPluginFolder, - Flush: false, - } - outputCTX, err = outputs.NewOutput(&outputConfig) - if err != nil { - log.Fatalf("Failed to load output plugins : %v", err) - } - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - if err := backupToDirectory(args[0]); err != nil { - log.Fatalf("Failed backuping to %s : %s", args[0], err) - } - }, - } - cmdBackup.AddCommand(cmdBackupSave) - - var cmdBackupRestore = &cobra.Command{ - Use: "restore ", - Short: "Restore configuration (api, parsers, scenarios etc.) from directory", - Long: `restore command will try to restore all saved information from to yor local setup, including : - -- Installation of up-to-date scenarios/parsers/... via cscli - -- Restauration of tainted/local/out-of-date scenarios/parsers/... file - -- Restauration of API credentials (if the existing ones aren't working) - -- Restauration of acqusition configuration -`, - Example: `cscli backup restore ./my-backup`, - Args: cobra.ExactArgs(1), - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if !config.configured { - return fmt.Errorf("you must configure cli before interacting with hub") - } - return nil - }, - Run: func(cmd *cobra.Command, args []string) { - var err error - - outputConfig := outputs.OutputFactory{ - BackendFolder: config.BackendPluginFolder, - Flush: false, - } - outputCTX, err = outputs.NewOutput(&outputConfig) - if err != nil { - log.Fatalf("Failed to load output plugins : %v", err) - } - - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("failed to get Hub index : %v", err) - } - if err := restoreFromDirectory(args[0]); err != nil { - log.Fatalf("failed restoring from %s : %s", args[0], err) - } - }, - } - cmdBackup.AddCommand(cmdBackupRestore) - - return cmdBackup -} diff --git a/cmd/crowdsec-cli/ban.go b/cmd/crowdsec-cli/ban.go deleted file mode 100644 index f408ccc81..000000000 --- a/cmd/crowdsec-cli/ban.go +++ /dev/null @@ -1,470 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "net" - "os" - "strconv" - "strings" - "time" - - "github.com/crowdsecurity/crowdsec/pkg/outputs" - "github.com/crowdsecurity/crowdsec/pkg/parser" - "github.com/crowdsecurity/crowdsec/pkg/types" - - "github.com/olekukonko/tablewriter" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" -) - -var remediationType string -var atTime string - -//user supplied filters -var ipFilter, rangeFilter, reasonFilter, countryFilter, asFilter string -var displayLimit int -var displayAPI, displayALL bool - -func simpleBanToSignal(targetIP string, reason string, expirationStr string, action string, asName string, asNum string, country string, banSource string) (types.SignalOccurence, error) { - var signalOcc types.SignalOccurence - - expiration, err := time.ParseDuration(expirationStr) - if err != nil { - return signalOcc, err - } - - asOrgInt := 0 - if asNum != "" { - asOrgInt, err = strconv.Atoi(asNum) - if err != nil { - log.Infof("Invalid as value %s : %s", asNum, err) - } - } - - banApp := types.BanApplication{ - MeasureSource: banSource, - MeasureType: action, - Until: time.Now().Add(expiration), - IpText: targetIP, - TargetCN: country, - TargetAS: asOrgInt, - TargetASName: asName, - Reason: reason, - } - var parsedIP net.IP - var parsedRange *net.IPNet - if strings.Contains(targetIP, "/") { - if _, parsedRange, err = net.ParseCIDR(targetIP); err != nil { - return signalOcc, fmt.Errorf("'%s' is not a valid CIDR", targetIP) - } - if parsedRange == nil { - return signalOcc, fmt.Errorf("unable to parse network : %s", err) - } - banApp.StartIp = types.IP2Int(parsedRange.IP) - banApp.EndIp = types.IP2Int(types.LastAddress(parsedRange)) - } else { - parsedIP = net.ParseIP(targetIP) - if parsedIP == nil { - return signalOcc, fmt.Errorf("'%s' is not a valid IP", targetIP) - } - banApp.StartIp = types.IP2Int(parsedIP) - banApp.EndIp = types.IP2Int(parsedIP) - } - - var banApps = make([]types.BanApplication, 0) - banApps = append(banApps, banApp) - signalOcc = types.SignalOccurence{ - Scenario: reason, - Events_count: 1, - Start_at: time.Now(), - Stop_at: time.Now(), - BanApplications: banApps, - Source_ip: targetIP, - Source_AutonomousSystemNumber: asNum, - Source_AutonomousSystemOrganization: asName, - Source_Country: country, - } - return signalOcc, nil -} - -func filterBans(bans []map[string]string) ([]map[string]string, error) { - - var retBans []map[string]string - - for _, ban := range bans { - var banIP net.IP - var banRange *net.IPNet - var keep bool = true - var err error - - if ban["iptext"] != "" { - if strings.Contains(ban["iptext"], "/") { - log.Debugf("%s is a range", ban["iptext"]) - banIP, banRange, err = net.ParseCIDR(ban["iptext"]) - if err != nil { - log.Warningf("failed to parse range '%s' from database : %s", ban["iptext"], err) - } - } else { - log.Debugf("%s is IP", ban["iptext"]) - banIP = net.ParseIP(ban["iptext"]) - } - } - - if ipFilter != "" { - var filterBinIP net.IP = net.ParseIP(ipFilter) - - if banRange != nil { - if banRange.Contains(filterBinIP) { - log.Debugf("[keep] ip filter is set, and range contains ip") - keep = true - } else { - log.Debugf("[discard] ip filter is set, and range doesn't contain ip") - keep = false - } - } else { - if ipFilter == ban["iptext"] { - log.Debugf("[keep] (ip) %s == %s", ipFilter, ban["iptext"]) - keep = true - } else { - log.Debugf("[discard] (ip) %s == %s", ipFilter, ban["iptext"]) - keep = false - } - } - } - if rangeFilter != "" { - _, filterBinRange, err := net.ParseCIDR(rangeFilter) - if err != nil { - return nil, fmt.Errorf("failed to parse range '%s' : %s", rangeFilter, err) - } - if filterBinRange.Contains(banIP) { - log.Debugf("[keep] range filter %s contains %s", rangeFilter, banIP.String()) - keep = true - } else { - log.Debugf("[discard] range filter %s doesn't contain %s", rangeFilter, banIP.String()) - keep = false - } - } - if reasonFilter != "" { - if strings.Contains(ban["reason"], reasonFilter) { - log.Debugf("[keep] reason filter %s matches %s", reasonFilter, ban["reason"]) - keep = true - } else { - log.Debugf("[discard] reason filter %s doesn't match %s", reasonFilter, ban["reason"]) - keep = false - } - } - - if countryFilter != "" { - if ban["cn"] == countryFilter { - log.Debugf("[keep] country filter %s matches %s", countryFilter, ban["cn"]) - keep = true - } else { - log.Debugf("[discard] country filter %s matches %s", countryFilter, ban["cn"]) - keep = false - } - } - - if asFilter != "" { - if strings.Contains(ban["as"], asFilter) { - log.Debugf("[keep] AS filter %s matches %s", asFilter, ban["as"]) - keep = true - } else { - log.Debugf("[discard] AS filter %s doesn't match %s", asFilter, ban["as"]) - keep = false - } - } - - if keep { - retBans = append(retBans, ban) - } else { - log.Debugf("[discard] discard %v", ban) - } - } - return retBans, nil -} - -func BanList() error { - at := time.Now() - if atTime != "" { - _, at = parser.GenDateParse(atTime) - if at.IsZero() { - return fmt.Errorf("unable to parse date '%s'", atTime) - } - } - ret, err := outputCTX.ReadAT(at) - if err != nil { - return fmt.Errorf("unable to get records from Database : %v", err) - } - ret, err = filterBans(ret) - if err != nil { - log.Errorf("Error while filtering : %s", err) - } - if config.output == "raw" { - fmt.Printf("source,ip,reason,bans,action,country,as,events_count,expiration\n") - for _, rm := range ret { - fmt.Printf("%s,%s,%s,%s,%s,%s,%s,%s,%s\n", rm["source"], rm["iptext"], rm["reason"], rm["bancount"], rm["action"], rm["cn"], rm["as"], rm["events_count"], rm["until"]) - } - } else if config.output == "json" { - x, _ := json.MarshalIndent(ret, "", " ") - fmt.Printf("%s", string(x)) - } else if config.output == "human" { - - uniqAS := map[string]bool{} - uniqCN := map[string]bool{} - - table := tablewriter.NewWriter(os.Stdout) - table.SetHeader([]string{"Source", "Ip", "Reason", "Bans", "Action", "Country", "AS", "Events", "Expiration"}) - - dispcount := 0 - apicount := 0 - for _, rm := range ret { - if !displayAPI && rm["source"] == "api" { - apicount++ - if _, ok := uniqAS[rm["as"]]; !ok { - uniqAS[rm["as"]] = true - } - if _, ok := uniqCN[rm["cn"]]; !ok { - uniqCN[rm["cn"]] = true - } - } - if displayALL { - if rm["source"] == "api" { - if displayAPI { - table.Append([]string{rm["source"], rm["iptext"], rm["reason"], rm["bancount"], rm["action"], rm["cn"], rm["as"], rm["events_count"], rm["until"]}) - dispcount++ - continue - } - } else { - table.Append([]string{rm["source"], rm["iptext"], rm["reason"], rm["bancount"], rm["action"], rm["cn"], rm["as"], rm["events_count"], rm["until"]}) - dispcount++ - continue - } - } else if dispcount < displayLimit { - if displayAPI { - if rm["source"] == "api" { - table.Append([]string{rm["source"], rm["iptext"], rm["reason"], rm["bancount"], rm["action"], rm["cn"], rm["as"], rm["events_count"], rm["until"]}) - dispcount++ - continue - } - } else { - if rm["source"] != "api" { - table.Append([]string{rm["source"], rm["iptext"], rm["reason"], rm["bancount"], rm["action"], rm["cn"], rm["as"], rm["events_count"], rm["until"]}) - dispcount++ - continue - } - } - } - } - if dispcount > 0 { - if !displayAPI { - fmt.Printf("%d local decisions:\n", dispcount) - } else if displayAPI && !displayALL { - fmt.Printf("%d decision from API\n", dispcount) - } else if displayALL && displayAPI { - fmt.Printf("%d decision from crowdsec and API\n", dispcount) - } - table.Render() // Send output - if dispcount > displayLimit && !displayALL { - fmt.Printf("Additional records stripped.\n") - } - } else { - if displayAPI { - fmt.Println("No API decisions") - } else { - fmt.Println("No local decisions") - } - } - if !displayAPI { - fmt.Printf("And %d records from API, %d distinct AS, %d distinct countries\n", apicount, len(uniqAS), len(uniqCN)) - } - } - return nil -} - -func BanAdd(target string, duration string, reason string, action string) error { - var signalOcc types.SignalOccurence - var err error - - signalOcc, err = simpleBanToSignal(target, reason, duration, action, "", "", "", "cli") - if err != nil { - return fmt.Errorf("unable to insert ban : %v", err) - } - err = outputCTX.Insert(signalOcc) - if err != nil { - return err - } - err = outputCTX.Flush() - if err != nil { - return err - } - log.Infof("%s %s for %s (%s)", action, target, duration, reason) - return nil -} - -func NewBanCmds() *cobra.Command { - /*TODO : add a remediation type*/ - var cmdBan = &cobra.Command{ - Use: "ban [command] ", - Short: "Manage bans/mitigations", - Long: `This is the main interaction point with local ban database for humans. - -You can add/delete/list or flush current bans in your local ban DB.`, - Args: cobra.MinimumNArgs(1), - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - var err error - if !config.configured { - return fmt.Errorf("you must configure cli before using bans") - } - - outputConfig := outputs.OutputFactory{ - BackendFolder: config.BackendPluginFolder, - Flush: false, - } - - outputCTX, err = outputs.NewOutput(&outputConfig) - if err != nil { - return fmt.Errorf(err.Error()) - } - return nil - }, - } - cmdBan.PersistentFlags().StringVar(&remediationType, "remediation", "ban", "Set specific remediation type : ban|slow|captcha") - cmdBan.Flags().SortFlags = false - cmdBan.PersistentFlags().SortFlags = false - - var cmdBanAdd = &cobra.Command{ - Use: "add [ip|range] ", - Short: "Adds a ban against a given ip/range for the provided duration", - Long: ` -Allows to add a ban against a specific ip or range target for a specific duration. - -The duration argument can be expressed in seconds(s), minutes(m) or hours (h). - -See [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) for more informations.`, - Example: `cscli ban add ip 1.2.3.4 24h "scan" -cscli ban add range 1.2.3.0/24 24h "the whole range"`, - Args: cobra.MinimumNArgs(4), - } - cmdBan.AddCommand(cmdBanAdd) - var cmdBanAddIp = &cobra.Command{ - Use: "ip ", - Short: "Adds the specific ip to the ban db", - Long: `Duration must be [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration), expressed in s/m/h.`, - Example: `cscli ban add ip 1.2.3.4 12h "the scan"`, - Args: cobra.MinimumNArgs(3), - Run: func(cmd *cobra.Command, args []string) { - reason := strings.Join(args[2:], " ") - if err := BanAdd(args[0], args[1], reason, remediationType); err != nil { - log.Fatalf("failed to add ban to database : %v", err) - } - }, - } - cmdBanAdd.AddCommand(cmdBanAddIp) - var cmdBanAddRange = &cobra.Command{ - Use: "range ", - Short: "Adds the specific ip to the ban db", - Long: `Duration must be [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) compatible, expressed in s/m/h.`, - Example: `cscli ban add range 1.2.3.0/24 12h "the whole range"`, - Args: cobra.MinimumNArgs(3), - Run: func(cmd *cobra.Command, args []string) { - reason := strings.Join(args[2:], " ") - if err := BanAdd(args[0], args[1], reason, remediationType); err != nil { - log.Fatalf("failed to add ban to database : %v", err) - } - }, - } - cmdBanAdd.AddCommand(cmdBanAddRange) - var cmdBanDel = &cobra.Command{ - Use: "del [command] ", - Short: "Delete bans from db", - Long: "The removal of the bans can be applied on a single IP address or directly on a IP range.", - Example: `cscli ban del ip 1.2.3.4 -cscli ban del range 1.2.3.0/24`, - Args: cobra.MinimumNArgs(2), - } - cmdBan.AddCommand(cmdBanDel) - - var cmdBanFlush = &cobra.Command{ - Use: "flush", - Short: "Fush ban DB", - Example: `cscli ban flush`, - Args: cobra.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - if err := outputCTX.DeleteAll(); err != nil { - log.Fatalf(err.Error()) - } - log.Printf("Ban DB flushed") - }, - } - cmdBan.AddCommand(cmdBanFlush) - var cmdBanDelIp = &cobra.Command{ - Use: "ip ", - Short: "Delete bans for given ip from db", - Example: `cscli ban del ip 1.2.3.4`, - Args: cobra.ExactArgs(1), - Run: func(cmd *cobra.Command, args []string) { - count, err := outputCTX.Delete(args[0]) - if err != nil { - log.Fatalf("failed to delete %s : %v", args[0], err) - } - log.Infof("Deleted %d entries", count) - }, - } - cmdBanDel.AddCommand(cmdBanDelIp) - var cmdBanDelRange = &cobra.Command{ - Use: "range ", - Short: "Delete bans for given ip from db", - Example: `cscli ban del range 1.2.3.0/24`, - Args: cobra.ExactArgs(1), - Run: func(cmd *cobra.Command, args []string) { - count, err := outputCTX.Delete(args[0]) - if err != nil { - log.Fatalf("failed to delete %s : %v", args[0], err) - } - log.Infof("Deleted %d entries", count) - }, - } - cmdBanDel.AddCommand(cmdBanDelRange) - - var cmdBanList = &cobra.Command{ - Use: "list", - Short: "List local or api bans/remediations", - Long: `List the bans, by default only local decisions. - -If --all/-a is specified, bans will be displayed without limit (--limit). -Default limit is 50. - -Time can be specified with --at and support a variety of date formats: - - Jan 2 15:04:05 - - Mon Jan 02 15:04:05.000000 2006 - - 2006-01-02T15:04:05Z07:00 - - 2006/01/02 - - 2006/01/02 15:04 - - 2006-01-02 - - 2006-01-02 15:04 -`, - Example: `ban list --range 0.0.0.0/0 : will list all - ban list --country CN - ban list --reason crowdsecurity/http-probing - ban list --as OVH`, - Args: cobra.ExactArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := BanList(); err != nil { - log.Fatalf("failed to list bans : %v", err) - } - }, - } - cmdBanList.PersistentFlags().StringVar(&atTime, "at", "", "List bans at given time") - cmdBanList.PersistentFlags().BoolVarP(&displayALL, "all", "a", false, "List bans without limit") - cmdBanList.PersistentFlags().BoolVarP(&displayAPI, "api", "", false, "List as well bans received from API") - cmdBanList.PersistentFlags().StringVar(&ipFilter, "ip", "", "List bans for given IP") - cmdBanList.PersistentFlags().StringVar(&rangeFilter, "range", "", "List bans belonging to given range") - cmdBanList.PersistentFlags().StringVar(&reasonFilter, "reason", "", "List bans containing given reason") - cmdBanList.PersistentFlags().StringVar(&countryFilter, "country", "", "List bans belonging to given country code") - cmdBanList.PersistentFlags().StringVar(&asFilter, "as", "", "List bans belonging to given AS name") - cmdBanList.PersistentFlags().IntVar(&displayLimit, "limit", 50, "Limit of bans to display (default 50)") - - cmdBan.AddCommand(cmdBanList) - return cmdBan -} diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go new file mode 100644 index 000000000..018c5ce8e --- /dev/null +++ b/cmd/crowdsec-cli/bouncers.go @@ -0,0 +1,153 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "time" + + middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/enescakir/emoji" + "github.com/olekukonko/tablewriter" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var keyName string +var keyIP string +var keyLength int + +func NewBouncersCmd() *cobra.Command { + /* ---- DECISIONS COMMAND */ + var cmdBouncers = &cobra.Command{ + Use: "bouncers [action]", + Short: "Manage bouncers", + Long: ` +Bouncers Management. + +To list/add/delete bouncers +`, + Args: cobra.MinimumNArgs(1), + PersistentPreRun: func(cmd *cobra.Command, args []string) { + var err error + dbClient, err = database.NewClient(csConfig.DbConfig) + if err != nil { + log.Fatalf("unable to create new database client: %s", err) + } + }, + } + + var cmdBouncersList = &cobra.Command{ + Use: "list", + Short: "List bouncers", + Long: `List bouncers`, + Example: `cscli bouncers list`, + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, arg []string) { + blockers, err := dbClient.ListBouncers() + if err != nil { + log.Errorf("unable to list blockers: %s", err) + } + if csConfig.Cscli.Output == "human" { + + table := tablewriter.NewWriter(os.Stdout) + table.SetCenterSeparator("") + table.SetColumnSeparator("") + + table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetHeader([]string{"Name", "IP Address", "Valid", "Last API pull", "Type", "Version"}) + for _, b := range blockers { + var revoked string + if !b.Revoked { + revoked = fmt.Sprintf("%s", emoji.CheckMark) + } else { + revoked = fmt.Sprintf("%s", emoji.Prohibited) + } + table.Append([]string{b.Name, b.IPAddress, revoked, b.LastPull.Format(time.RFC3339), b.Type, b.Version}) + } + table.Render() + } else if csConfig.Cscli.Output == "json" { + x, err := json.MarshalIndent(blockers, "", " ") + if err != nil { + log.Fatalf("failed to unmarshal") + } + fmt.Printf("%s", string(x)) + } else if csConfig.Cscli.Output == "raw" { + for _, b := range blockers { + var revoked string + if !b.Revoked { + revoked = "validated" + } else { + revoked = "pending" + } + fmt.Printf("%s,%s,%s,%s,%s\n", b.Name, b.IPAddress, revoked, b.LastPull.Format(time.RFC3339), b.Version) + } + } + }, + } + cmdBouncers.AddCommand(cmdBouncersList) + + var cmdBouncersAdd = &cobra.Command{ + Use: "add MyBouncerName [--length 16]", + Short: "add bouncer", + Long: `add bouncer`, + Example: `cscli bouncers add MyBouncerName +cscli bouncers add MyBouncerName -l 24`, + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, arg []string) { + keyName := arg[0] + if keyName == "" { + log.Errorf("Please provide a name for the api key") + return + } + apiKey, err := middlewares.GenerateAPIKey(keyLength) + if err != nil { + log.Errorf("unable to generate api key: %s", err) + return + } + err = dbClient.CreateBouncer(keyName, keyIP, middlewares.HashSHA512(apiKey)) + if err != nil { + log.Errorf("unable to create blocker: %s", err) + return + } + + if csConfig.Cscli.Output == "human" { + fmt.Printf("Api key for '%s':\n\n", keyName) + fmt.Printf(" %s\n\n", apiKey) + fmt.Print("Please keep this key since you will not be able to retrive it!\n") + } else if csConfig.Cscli.Output == "raw" { + fmt.Printf("%s", apiKey) + } else if csConfig.Cscli.Output == "json" { + j, err := json.Marshal(apiKey) + if err != nil { + log.Fatalf("unable to marshal api key") + } + fmt.Printf("%s", string(j)) + } + }, + } + cmdBouncersAdd.Flags().IntVarP(&keyLength, "length", "l", 16, "length of the api key") + cmdBouncers.AddCommand(cmdBouncersAdd) + + var cmdBouncersDelete = &cobra.Command{ + Use: "delete MyBouncerName", + Short: "delete bouncer", + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, arg []string) { + keyName := arg[0] + if keyName == "" { + log.Errorf("Please provide a bouncer name") + return + } + err := dbClient.DeleteBouncer(keyName) + if err != nil { + log.Errorf("unable to create blocker: %s", err) + return + } + }, + } + cmdBouncers.AddCommand(cmdBouncersDelete) + return cmdBouncers +} diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go new file mode 100644 index 000000000..7e7bc85ed --- /dev/null +++ b/cmd/crowdsec-cli/capi.go @@ -0,0 +1,161 @@ +package main + +import ( + "context" + "fmt" + "io/ioutil" + "net/http/httputil" + "net/url" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "gopkg.in/yaml.v2" +) + +var CAPIURLPrefix string = "v2" +var CAPIBaseURL string = "https://api.crowdsec.net/" + +func NewCapiCmd() *cobra.Command { + var cmdCapi = &cobra.Command{ + Use: "capi [action]", + Short: "Manage interaction with Central API (CAPI)", + Args: cobra.MinimumNArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if csConfig.API.Server == nil { + log.Fatalln("There is no API->server configuration") + } + if csConfig.API.Server.OnlineClient == nil { + log.Fatalf("no configuration for crowdsec API in '%s'", *csConfig.Self) + } + + return nil + }, + } + + var cmdCapiRegister = &cobra.Command{ + Use: "register", + Short: "Register to Central API (CAPI)", + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + var err error + + id, err := generateID() + if err != nil { + log.Fatalf("unable to generate machine id: %s", err) + } + password := strfmt.Password(generatePassword(passwordLength)) + apiurl, err := url.Parse(CAPIBaseURL) + if err != nil { + log.Fatalf("unable to parse api url %s : %s", CAPIBaseURL, err) + } + _, err = apiclient.RegisterClient(&apiclient.Config{ + MachineID: id, + Password: password, + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiurl, + VersionPrefix: CAPIURLPrefix, + }, nil) + + if err != nil { + log.Fatalf("api client register ('%s'): %s", CAPIBaseURL, err) + } + log.Printf("Successfully registered to Central API (CAPI)") + + var dumpFile string + + if outputFile != "" { + dumpFile = outputFile + } else if csConfig.API.Server.OnlineClient.CredentialsFilePath != "" { + dumpFile = csConfig.API.Server.OnlineClient.CredentialsFilePath + } else { + dumpFile = "" + } + apiCfg := csconfig.ApiCredentialsCfg{ + Login: id, + Password: password.String(), + URL: CAPIBaseURL, + } + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + log.Fatalf("unable to marshal api credentials: %s", err) + } + if dumpFile != "" { + err = ioutil.WriteFile(dumpFile, apiConfigDump, 0600) + if err != nil { + log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err) + } + log.Printf("API credentials dumped to '%s'", dumpFile) + } else { + fmt.Printf("%s\n", string(apiConfigDump)) + } + + log.Warningf("Run 'systemctl reload crowdsec' for the new configuration to be effective") + }, + } + cmdCapiRegister.Flags().StringVarP(&outputFile, "file", "f", "", "output file destination") + cmdCapi.AddCommand(cmdCapiRegister) + + var cmdCapiStatus = &cobra.Command{ + Use: "status", + Short: "Check status with the Central API (CAPI)", + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + var err error + if csConfig.API.Server == nil { + log.Fatalln("There is no configuration on 'api_client:'") + } + if csConfig.API.Server.OnlineClient == nil { + log.Fatalf("Please provide credentials for the API in '%s'", csConfig.API.Server.OnlineClient.CredentialsFilePath) + } + + if csConfig.API.Server.OnlineClient.Credentials == nil { + log.Fatalf("no credentials for crowdsec API in '%s'", csConfig.API.Server.OnlineClient.CredentialsFilePath) + } + + password := strfmt.Password(csConfig.API.Server.OnlineClient.Credentials.Password) + apiurl, err := url.Parse(csConfig.API.Server.OnlineClient.Credentials.URL) + if err != nil { + log.Fatalf("parsing api url ('%s'): %s", csConfig.API.Server.OnlineClient.Credentials.URL, err) + } + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("Failed to load hub index : %s", err) + } + scenarios, err := cwhub.GetUpstreamInstalledScenariosAsString() + if err != nil { + log.Fatalf("failed to get scenarios : %s", err.Error()) + } + + Client, err = apiclient.NewDefaultClient(apiurl, CAPIURLPrefix, fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), nil) + if err != nil { + log.Fatalf("init default client: %s", err) + } + t := models.WatcherAuthRequest{ + MachineID: &csConfig.API.Server.OnlineClient.Credentials.Login, + Password: &password, + Scenarios: scenarios, + } + log.Infof("Loaded credentials from %s", csConfig.API.Server.OnlineClient.CredentialsFilePath) + log.Infof("Trying to authenticate with username %s on %s", csConfig.API.Server.OnlineClient.Credentials.Login, apiurl) + resp, err := Client.Auth.AuthenticateWatcher(context.Background(), t) + if err != nil { + log.Fatalf("Failed to authenticate to Central API (CAPI) : %s", err) + } else { + log.Infof("You can successfully interact with Central API (CAPI)") + } + for k, v := range resp.Response.Header { + log.Debugf("[headers] %s : %s", k, v) + } + dump, _ := httputil.DumpResponse(resp.Response, true) + log.Debugf("Response: %s", string(dump)) + }, + } + cmdCapi.AddCommand(cmdCapiStatus) + return cmdCapi +} diff --git a/cmd/crowdsec-cli/collections.go b/cmd/crowdsec-cli/collections.go new file mode 100644 index 000000000..409b831d3 --- /dev/null +++ b/cmd/crowdsec-cli/collections.go @@ -0,0 +1,139 @@ +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + + log "github.com/sirupsen/logrus" + + "github.com/spf13/cobra" +) + +func NewCollectionsCmd() *cobra.Command { + var cmdCollections = &cobra.Command{ + Use: "collections [action]", + Short: "Manage collections from hub", + Long: `Install/Remove/Upgrade/Inspect collections from the CrowdSec Hub.`, + /*TBD fix help*/ + Args: cobra.MinimumNArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if csConfig.Cscli == nil { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + if err := setHubBranch(); err != nil { + return fmt.Errorf("error while setting hub branch: %s", err) + } + return nil + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + if cmd.Name() == "inspect" || cmd.Name() == "list" { + return + } + log.Infof("Run 'systemctl reload crowdsec' for the new configuration to be effective.") + }, + } + + var cmdCollectionsInstall = &cobra.Command{ + Use: "install collection", + Short: "Install given collection(s)", + Long: `Fetch and install given collection(s) from hub`, + Example: `cscli collections install crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + for _, name := range args { + InstallItem(name, cwhub.COLLECTIONS, forceInstall) + } + }, + } + cmdCollectionsInstall.PersistentFlags().BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") + cmdCollectionsInstall.PersistentFlags().BoolVar(&forceInstall, "force", false, "Force install : Overwrite tainted and outdated files") + cmdCollections.AddCommand(cmdCollectionsInstall) + + var cmdCollectionsRemove = &cobra.Command{ + Use: "remove collection", + Short: "Remove given collection(s)", + Long: `Remove given collection(s) from hub`, + Example: `cscli collections remove crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + + if removeAll { + RemoveMany(cwhub.COLLECTIONS, "") + } else { + for _, name := range args { + RemoveMany(cwhub.COLLECTIONS, name) + } + } + }, + } + cmdCollectionsRemove.PersistentFlags().BoolVar(&purgeRemove, "purge", false, "Delete source file too") + cmdCollectionsRemove.PersistentFlags().BoolVar(&removeAll, "all", false, "Delete all the files in selected scope") + cmdCollections.AddCommand(cmdCollectionsRemove) + + var cmdCollectionsUpgrade = &cobra.Command{ + Use: "upgrade collection", + Short: "Upgrade given collection(s)", + Long: `Fetch and upgrade given collection(s) from hub`, + Example: `cscli collections upgrade crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + if upgradeAll { + UpgradeConfig(cwhub.COLLECTIONS, "", forceUpgrade) + } else { + for _, name := range args { + UpgradeConfig(cwhub.COLLECTIONS, name, forceUpgrade) + } + } + }, + } + cmdCollectionsUpgrade.PersistentFlags().BoolVarP(&upgradeAll, "download-only", "d", false, "Only download packages, don't enable") + cmdCollectionsUpgrade.PersistentFlags().BoolVar(&forceUpgrade, "force", false, "Force install : Overwrite tainted and outdated files") + cmdCollections.AddCommand(cmdCollectionsUpgrade) + + var cmdCollectionsInspect = &cobra.Command{ + Use: "inspect collection", + Short: "Inspect given collection", + Long: `Inspect given collection`, + Example: `cscli collections inspect crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + for _, name := range args { + InspectItem(name, cwhub.COLLECTIONS) + } + }, + } + cmdCollectionsInspect.PersistentFlags().StringVarP(&prometheusURL, "url", "u", "http://127.0.0.1:6060/metrics", "Prometheus url") + cmdCollections.AddCommand(cmdCollectionsInspect) + + var cmdCollectionsList = &cobra.Command{ + Use: "list collection [-a]", + Short: "List all collections or given one", + Long: `List all collections or given one`, + Example: `cscli collections list`, + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + ListItem(cwhub.COLLECTIONS, args) + }, + } + cmdCollectionsList.PersistentFlags().BoolVarP(&listAll, "all", "a", false, "List as well disabled items") + cmdCollections.AddCommand(cmdCollectionsList) + + return cmdCollections +} diff --git a/cmd/crowdsec-cli/config.go b/cmd/crowdsec-cli/config.go index 0952fcce3..f383ba9c0 100644 --- a/cmd/crowdsec-cli/config.go +++ b/cmd/crowdsec-cli/config.go @@ -1,39 +1,206 @@ package main import ( + "encoding/json" "fmt" + "io/ioutil" + "os" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v2" ) -/*CliCfg is the cli configuration structure, might be unexported*/ -type cliConfig struct { - configured bool - ConfigFilePath string `yaml:"config_file"` - configFolder string - output string - HubFolder string `yaml:"hub_folder"` - InstallFolder string - BackendPluginFolder string `yaml:"backend_folder"` - DataFolder string `yaml:"data_folder"` - SimulationCfgPath string `yaml:"simulation_path,omitempty"` - SimulationCfg *csconfig.SimulationConfig +type OldAPICfg struct { + MachineID string `json:"machine_id"` + Password string `json:"password"` +} + +/* Backup crowdsec configurations to directory : + +- Main config (config.yaml) +- Profiles config (profiles.yaml) +- Simulation config (simulation.yaml) +- Backup of API credentials (local API and online API) +- List of scenarios, parsers, postoverflows and collections that are up-to-date +- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections +*/ +func backupConfigToDirectory(dirPath string) error { + var err error + + if dirPath == "" { + return fmt.Errorf("directory path can't be empty") + } + log.Infof("Starting configuration backup") + _, err = os.Stat(dirPath) + if err == nil { + return fmt.Errorf("%s already exists", dirPath) + } + if err = os.MkdirAll(dirPath, os.ModePerm); err != nil { + return fmt.Errorf("error while creating %s : %s", dirPath, err) + } + + if csConfig.ConfigPaths.SimulationFilePath != "" { + backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath) + if err = types.CopyFile(csConfig.ConfigPaths.SimulationFilePath, backupSimulation); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", csConfig.ConfigPaths.SimulationFilePath, backupSimulation, err) + } + log.Infof("Saved simulation to %s", backupSimulation) + } + if csConfig.Crowdsec != nil && csConfig.Crowdsec.AcquisitionFilePath != "" { + backupAcquisition := fmt.Sprintf("%s/acquis.yaml", dirPath) + if err = types.CopyFile(csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition, err) + } + log.Infof("Saved acquis to %s", backupAcquisition) + } + if ConfigFilePath != "" { + backupMain := fmt.Sprintf("%s/config.yaml", dirPath) + if err = types.CopyFile(ConfigFilePath, backupMain); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", ConfigFilePath, backupMain, err) + } + log.Infof("Saved default yaml to %s", backupMain) + } + if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" { + backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) + if err = types.CopyFile(csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds, err) + } + log.Infof("Saved online API credentials to %s", backupCAPICreds) + } + if csConfig.API != nil && csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" { + backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) + if err = types.CopyFile(csConfig.API.Client.CredentialsFilePath, backupLAPICreds); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Client.CredentialsFilePath, backupLAPICreds, err) + } + log.Infof("Saved local API credentials to %s", backupLAPICreds) + } + if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.ProfilesPath != "" { + backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) + if err = types.CopyFile(csConfig.API.Server.ProfilesPath, backupProfiles); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Server.ProfilesPath, backupProfiles, err) + } + log.Infof("Saved profiles to %s", backupProfiles) + } + + if err = BackupHub(dirPath); err != nil { + return fmt.Errorf("failed to backup hub config : %s", err) + } + + return nil +} + +/* Restore crowdsec configurations to directory : + +- Main config (config.yaml) +- Profiles config (profiles.yaml) +- Simulation config (simulation.yaml) +- Backup of API credentials (local API and online API) +- List of scenarios, parsers, postoverflows and collections that are up-to-date +- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections +*/ +func restoreConfigFromDirectory(dirPath string) error { + var err error + + if !restoreOldBackup { + backupMain := fmt.Sprintf("%s/config.yaml", dirPath) + if _, err = os.Stat(backupMain); err == nil { + if csConfig.ConfigPaths != nil && csConfig.ConfigPaths.ConfigDir != "" { + if err = types.CopyFile(backupMain, csConfig.ConfigPaths.ConfigDir); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", backupMain, csConfig.ConfigPaths.ConfigDir, err) + } + } + } + + // Now we have config.yaml, we should regenerate config struct to have rights paths etc + ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir) + initConfig() + + backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) + if _, err = os.Stat(backupCAPICreds); err == nil { + if err = types.CopyFile(backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath, err) + } + } + + backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) + if _, err = os.Stat(backupLAPICreds); err == nil { + if err = types.CopyFile(backupLAPICreds, csConfig.API.Client.CredentialsFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", backupLAPICreds, csConfig.API.Client.CredentialsFilePath, err) + } + } + + backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) + if _, err = os.Stat(backupProfiles); err == nil { + if err = types.CopyFile(backupProfiles, csConfig.API.Server.ProfilesPath); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", backupProfiles, csConfig.API.Server.ProfilesPath, err) + } + } + } else { + var oldAPICfg OldAPICfg + backupOldAPICfg := fmt.Sprintf("%s/api_creds.json", dirPath) + + jsonFile, err := os.Open(backupOldAPICfg) + if err != nil { + log.Warningf("failed to open %s : %s", backupOldAPICfg, err) + } else { + byteValue, _ := ioutil.ReadAll(jsonFile) + err = json.Unmarshal(byteValue, &oldAPICfg) + if err != nil { + return fmt.Errorf("failed to load json file %s : %s", backupOldAPICfg, err) + } + + apiCfg := csconfig.ApiCredentialsCfg{ + Login: oldAPICfg.MachineID, + Password: oldAPICfg.Password, + URL: CAPIBaseURL, + } + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + return fmt.Errorf("unable to dump api credentials: %s", err) + } + apiConfigDumpFile := fmt.Sprintf("%s/online_api_credentials.yaml", csConfig.ConfigPaths.ConfigDir) + if csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" { + apiConfigDumpFile = csConfig.API.Server.OnlineClient.CredentialsFilePath + } + err = ioutil.WriteFile(apiConfigDumpFile, apiConfigDump, 0644) + if err != nil { + return fmt.Errorf("write api credentials in '%s' failed: %s", apiConfigDumpFile, err) + } + log.Infof("Saved API credentials to %s", apiConfigDumpFile) + } + } + + backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath) + if _, err = os.Stat(backupSimulation); err == nil { + if err = types.CopyFile(backupSimulation, csConfig.ConfigPaths.SimulationFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", backupSimulation, csConfig.ConfigPaths.SimulationFilePath, err) + } + } + + backupAcquisition := fmt.Sprintf("%s/acquis.yaml", dirPath) + if _, err = os.Stat(backupAcquisition); err == nil { + if err = types.CopyFile(backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath, err) + } + } + + if err = RestoreHub(dirPath); err != nil { + return fmt.Errorf("failed to restore hub config : %s", err) + } + + return nil } func NewConfigCmd() *cobra.Command { var cmdConfig = &cobra.Command{ - Use: "config [command] ", - Short: "Allows to view/edit cscli config", - Long: `Allow to configure database plugin path and installation directory. -If no commands are specified, config is in interactive mode.`, - Example: `- cscli config show -- cscli config prompt`, - Args: cobra.ExactArgs(1), + Use: "config [command]", + Short: "Allows to view current config", + Args: cobra.ExactArgs(0), } var cmdConfigShow = &cobra.Command{ Use: "show", @@ -41,21 +208,128 @@ If no commands are specified, config is in interactive mode.`, Long: `Displays the current cli configuration.`, Args: cobra.ExactArgs(0), Run: func(cmd *cobra.Command, args []string) { - if config.output == "json" { - log.WithFields(log.Fields{ - "crowdsec_configuration_file": config.ConfigFilePath, - "backend_folder": config.BackendPluginFolder, - "data_folder": config.DataFolder, - }).Warning("Current config") - } else { - x, err := yaml.Marshal(config) - if err != nil { - log.Fatalf("failed to marshal current configuration : %v", err) + switch csConfig.Cscli.Output { + case "human": + fmt.Printf("Global:\n") + fmt.Printf(" - Configuration Folder : %s\n", csConfig.ConfigPaths.ConfigDir) + fmt.Printf(" - Data Folder : %s\n", csConfig.ConfigPaths.DataDir) + fmt.Printf(" - Log Folder : %s\n", csConfig.Common.LogDir) + fmt.Printf(" - Hub Folder : %s\n", csConfig.ConfigPaths.HubDir) + fmt.Printf(" - Simulation File : %s\n", csConfig.ConfigPaths.SimulationFilePath) + fmt.Printf(" - Log level : %s\n", csConfig.Common.LogLevel) + fmt.Printf(" - Log Media : %s\n", csConfig.Common.LogMedia) + fmt.Printf("Crowdsec:\n") + fmt.Printf(" - Acquisition File : %s\n", csConfig.Crowdsec.AcquisitionFilePath) + fmt.Printf(" - Parsers routines : %d\n", csConfig.Crowdsec.ParserRoutinesCount) + fmt.Printf("cscli:\n") + fmt.Printf(" - Output : %s\n", csConfig.Cscli.Output) + fmt.Printf(" - Hub Branch : %s\n", csConfig.Cscli.HubBranch) + fmt.Printf(" - Hub Folder : %s\n", csConfig.Cscli.HubDir) + fmt.Printf("API Client:\n") + fmt.Printf(" - URL : %s\n", csConfig.API.Client.Credentials.URL) + fmt.Printf(" - Login : %s\n", csConfig.API.Client.Credentials.Login) + fmt.Printf(" - Credentials File : %s\n", csConfig.API.Client.CredentialsFilePath) + fmt.Printf("Local API Server:\n") + fmt.Printf(" - Listen URL : %s\n", csConfig.API.Server.ListenURI) + fmt.Printf(" - Profile File : %s\n", csConfig.API.Server.ProfilesPath) + if csConfig.API.Server.TLS != nil { + if csConfig.API.Server.TLS.CertFilePath != "" { + fmt.Printf(" - Cert File : %s\n", csConfig.API.Server.TLS.CertFilePath) + } + if csConfig.API.Server.TLS.KeyFilePath != "" { + fmt.Printf(" - Key File : %s\n", csConfig.API.Server.TLS.KeyFilePath) + } } - fmt.Printf("%s", x) + fmt.Printf(" - Database:\n") + fmt.Printf(" - Type : %s\n", csConfig.DbConfig.Type) + switch csConfig.DbConfig.Type { + case "sqlite": + fmt.Printf(" - Path : %s\n", csConfig.DbConfig.DbPath) + case "mysql", "postgresql", "postgres": + fmt.Printf(" - Host : %s\n", csConfig.DbConfig.Host) + fmt.Printf(" - Port : %d\n", csConfig.DbConfig.Port) + fmt.Printf(" - User : %s\n", csConfig.DbConfig.User) + fmt.Printf(" - DB Name : %s\n", csConfig.DbConfig.DbName) + } + if csConfig.DbConfig.Flush != nil { + if *csConfig.DbConfig.Flush.MaxAge != "" { + fmt.Printf(" - Flush age : %s\n", *csConfig.DbConfig.Flush.MaxAge) + } + if *csConfig.DbConfig.Flush.MaxItems != 0 { + fmt.Printf(" - Flush size : %d\n", *csConfig.DbConfig.Flush.MaxItems) + } + } + + fmt.Printf("Central API:\n") + fmt.Printf(" - URL : %s\n", csConfig.API.Server.OnlineClient.Credentials.URL) + fmt.Printf(" - Login : %s\n", csConfig.API.Server.OnlineClient.Credentials.Login) + fmt.Printf(" - Credentials File : %s\n", csConfig.API.Server.OnlineClient.CredentialsFilePath) + case "json": + data, err := json.MarshalIndent(csConfig, "", " ") + if err != nil { + log.Fatalf("failed to marshal configuration: %s", err) + } + fmt.Printf("%s\n", string(data)) + case "raw": + data, err := yaml.Marshal(csConfig) + if err != nil { + log.Fatalf("failed to marshal configuration: %s", err) + } + fmt.Printf("%s\n", string(data)) } }, } cmdConfig.AddCommand(cmdConfigShow) + + var cmdConfigBackup = &cobra.Command{ + Use: "backup ", + Short: "Backup current config", + Long: `Backup the current crowdsec configuration including : + +- Main config (config.yaml) +- Simulation config (simulation.yaml) +- Profiles config (profiles.yaml) +- List of scenarios, parsers, postoverflows and collections that are up-to-date +- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections +- Backup of API credentials (local API and online API)`, + Example: `cscli config backup ./my-backup`, + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + var err error + if err = cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + if err = backupConfigToDirectory(args[0]); err != nil { + log.Fatalf("Failed to backup configurations: %s", err) + } + }, + } + cmdConfig.AddCommand(cmdConfigBackup) + + var cmdConfigRestore = &cobra.Command{ + Use: "restore ", + Short: "Restore config in backup ", + Long: `Restore the crowdsec configuration from specified backup including: + +- Main config (config.yaml) +- Simulation config (simulation.yaml) +- Profiles config (profiles.yaml) +- List of scenarios, parsers, postoverflows and collections that are up-to-date +- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections +- Backup of API credentials (local API and online API)`, + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + var err error + if err = cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + if err := restoreConfigFromDirectory(args[0]); err != nil { + log.Fatalf("failed restoring configurations from %s : %s", args[0], err) + } + }, + } + cmdConfigRestore.PersistentFlags().BoolVar(&restoreOldBackup, "old-backup", false, "To use when you are upgrading crowdsec v0.X to v1.X and you need to restore backup from v0.X") + cmdConfig.AddCommand(cmdConfigRestore) + return cmdConfig } diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 37f36d17c..af7317b18 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -1,59 +1,56 @@ package main import ( - "archive/zip" - "bufio" - "bytes" - "context" - "encoding/json" "fmt" - "io" - "io/ioutil" - "net/http" "os" - "path" - "strings" - "time" + "path/filepath" + + "github.com/AlecAivazis/survey/v2" + "github.com/crowdsecurity/crowdsec/pkg/metabase" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" - "github.com/dghubble/sling" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/client" - "github.com/docker/go-connections/nat" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) var ( - metabaseImage = "metabase/metabase" - metabaseDbURI = "https://crowdsec-statics-assets.s3-eu-west-1.amazonaws.com/metabase.db.zip" - metabaseDbPath = "/var/lib/crowdsec/data" + metabaseUser = "crowdsec@crowdsec.net" + metabasePassword string + metabaseDbPath string + metabaseConfigPath string + metabaseConfigFolder = "metabase/" + metabaseConfigFile = "metabase.yaml" + metabaseImage = "metabase/metabase" /**/ metabaseListenAddress = "127.0.0.1" metabaseListenPort = "3000" metabaseContainerID = "/crowdsec-metabase" + + forceYes bool + + dockerGatewayIPAddr = "172.17.0.1" /*informations needed to setup a random password on user's behalf*/ - metabaseURI = "http://localhost:3000/api/" - metabaseURISession = "session" - metabaseURIRescan = "database/2/rescan_values" - metabaseURIUpdatepwd = "user/1/password" - defaultPassword = "c6cmetabase" - defaultEmail = "metabase@crowdsec.net" ) func NewDashboardCmd() *cobra.Command { /* ---- UPDATE COMMAND */ var cmdDashboard = &cobra.Command{ - Use: "dashboard", - Short: "Start a dashboard (metabase) container.", - Long: `Start a metabase container exposing dashboards and metrics.`, + Use: "dashboard [command]", + Short: "Manage your metabase dashboard container", + Long: `Install/Start/Stop/Remove a metabase container exposing dashboard and metrics.`, Args: cobra.ExactArgs(1), - Example: `cscli dashboard setup + Example: ` +cscli dashboard setup cscli dashboard start cscli dashboard stop -cscli dashboard setup --force`, +cscli dashboard remove +`, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + metabaseConfigFolderPath := filepath.Join(csConfig.ConfigPaths.ConfigDir, metabaseConfigFolder) + metabaseConfigPath = filepath.Join(metabaseConfigFolderPath, metabaseConfigFile) + if err := os.MkdirAll(metabaseConfigFolderPath, os.ModePerm); err != nil { + log.Fatalf(err.Error()) + } + }, } var force bool @@ -62,33 +59,42 @@ cscli dashboard setup --force`, Short: "Setup a metabase container.", Long: `Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container`, Args: cobra.ExactArgs(0), - Example: `cscli dashboard setup -cscli dashboard setup --force -cscli dashboard setup -l 0.0.0.0 -p 443 + Example: ` +cscli dashboard setup +cscli dashboard setup --listen 0.0.0.0 +cscli dashboard setup -l 0.0.0.0 -p 443 --password `, Run: func(cmd *cobra.Command, args []string) { - if err := downloadMetabaseDB(force); err != nil { - log.Fatalf("Failed to download metabase DB : %s", err) + if metabaseDbPath == "" { + metabaseDbPath = csConfig.ConfigPaths.DataDir } - log.Infof("Downloaded metabase DB") - if err := createMetabase(); err != nil { - log.Fatalf("Failed to start metabase container : %s", err) + + if metabasePassword == "" { + metabasePassword = generatePassword(16) } - log.Infof("Started metabase") - newpassword := generatePassword(64) - if err := resetMetabasePassword(newpassword); err != nil { - log.Fatalf("Failed to reset password : %s", err) + mb, err := metabase.SetupMetabase(csConfig.API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath) + if err != nil { + log.Fatalf(err.Error()) } - log.Infof("Setup finished") - log.Infof("url : http://%s:%s", metabaseListenAddress, metabaseListenPort) - log.Infof("username: %s", defaultEmail) - log.Infof("password: %s", newpassword) + + if err := mb.DumpConfig(metabaseConfigPath); err != nil { + log.Fatalf(err.Error()) + } + + log.Infof("Metabase is ready") + fmt.Println() + fmt.Printf("\tURL : '%s'\n", mb.Config.ListenURL) + fmt.Printf("\tusername : '%s'\n", mb.Config.Username) + fmt.Printf("\tpassword : '%s'\n", mb.Config.Password) }, } cmdDashSetup.Flags().BoolVarP(&force, "force", "f", false, "Force setup : override existing files.") - cmdDashSetup.Flags().StringVarP(&metabaseDbPath, "dir", "d", metabaseDbPath, "Shared directory with metabase container.") + cmdDashSetup.Flags().StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container.") cmdDashSetup.Flags().StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container") cmdDashSetup.Flags().StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container") + //cmdDashSetup.Flags().StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user") + cmdDashSetup.Flags().StringVar(&metabasePassword, "password", "", "metabase password") + cmdDashboard.AddCommand(cmdDashSetup) var cmdDashStart = &cobra.Command{ @@ -97,7 +103,11 @@ cscli dashboard setup -l 0.0.0.0 -p 443 Long: `Stats the metabase container using docker.`, Args: cobra.ExactArgs(0), Run: func(cmd *cobra.Command, args []string) { - if err := startMetabase(); err != nil { + mb, err := metabase.NewMetabase(metabaseConfigPath) + if err != nil { + log.Fatalf(err.Error()) + } + if err := mb.Container.Start(); err != nil { log.Fatalf("Failed to start metabase container : %s", err) } log.Infof("Started metabase") @@ -106,270 +116,69 @@ cscli dashboard setup -l 0.0.0.0 -p 443 } cmdDashboard.AddCommand(cmdDashStart) - var remove bool var cmdDashStop = &cobra.Command{ Use: "stop", Short: "Stops the metabase container.", Long: `Stops the metabase container using docker.`, Args: cobra.ExactArgs(0), Run: func(cmd *cobra.Command, args []string) { - if err := stopMetabase(remove); err != nil { - log.Fatalf("Failed to stop metabase container : %s", err) + if err := metabase.StopContainer(metabaseContainerID); err != nil { + log.Fatalf("unable to stop container '%s': %s", metabaseContainerID, err) } }, } - cmdDashStop.Flags().BoolVarP(&remove, "remove", "r", false, "remove (docker rm) container as well.") cmdDashboard.AddCommand(cmdDashStop) + + var cmdDashRemove = &cobra.Command{ + Use: "remove", + Short: "removes the metabase container.", + Long: `removes the metabase container using docker.`, + Args: cobra.ExactArgs(0), + Example: ` +cscli dashboard remove +cscli dashboard remove --force + `, + Run: func(cmd *cobra.Command, args []string) { + answer := true + if !forceYes { + prompt := &survey.Confirm{ + Message: "Do you really want to remove crowdsec dashboard? (all your changes will be lost)", + Default: true, + } + if err := survey.AskOne(prompt, &answer); err != nil { + log.Fatalf("unable to ask to force: %s", err) + } + } + + if answer { + if metabase.IsContainerExist(metabaseContainerID) { + log.Debugf("Stopping container %s", metabaseContainerID) + if err := metabase.StopContainer(metabaseContainerID); err != nil { + log.Warningf("unable to stop container '%s': %s", metabaseContainerID, err) + } + log.Debugf("Removing container %s", metabaseContainerID) + if err := metabase.RemoveContainer(metabaseContainerID); err != nil { + log.Warningf("unable to remove container '%s': %s", metabaseContainerID, err) + } + log.Infof("container %s stopped & removed", metabaseContainerID) + } + log.Debugf("Removing database %s", csConfig.ConfigPaths.DataDir) + if err := metabase.RemoveDatabase(csConfig.ConfigPaths.DataDir); err != nil { + log.Warningf("failed to remove metabase internal db : %s", err) + } + if force { + log.Debugf("Removing image %s", metabaseImage) + if err := metabase.RemoveImageContainer(metabaseImage); err != nil { + log.Warningf("Failed to remove metabase container %s : %s", metabaseImage, err) + + } + } + } + }, + } + cmdDashRemove.Flags().BoolVarP(&force, "force", "f", false, "Force remove : stop the container if running and remove.") + cmdDashRemove.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes") + cmdDashboard.AddCommand(cmdDashRemove) + return cmdDashboard } - -func downloadMetabaseDB(force bool) error { - - metabaseDBSubpath := path.Join(metabaseDbPath, "metabase.db") - - _, err := os.Stat(metabaseDBSubpath) - if err == nil && !force { - log.Printf("%s exists, skip.", metabaseDBSubpath) - return nil - } - - if err := os.MkdirAll(metabaseDBSubpath, 0755); err != nil { - return fmt.Errorf("failed to create %s : %s", metabaseDBSubpath, err) - } - - req, err := http.NewRequest("GET", metabaseDbURI, nil) - if err != nil { - return fmt.Errorf("failed to build request to fetch metabase db : %s", err) - } - //This needs to be removed once we move the zip out of github - req.Header.Add("Accept", `application/vnd.github.v3.raw`) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return fmt.Errorf("failed request to fetch metabase db : %s", err) - } - if resp.StatusCode != 200 { - return fmt.Errorf("got http %d while requesting metabase db %s, stop", resp.StatusCode, metabaseDbURI) - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("failed request read while fetching metabase db : %s", err) - } - - log.Printf("Got %d bytes archive", len(body)) - if err := extractMetabaseDB(bytes.NewReader(body)); err != nil { - return fmt.Errorf("while extracting zip : %s", err) - } - return nil -} - -func extractMetabaseDB(buf *bytes.Reader) error { - r, err := zip.NewReader(buf, int64(buf.Len())) - if err != nil { - log.Fatal(err) - } - for _, f := range r.File { - if strings.Contains(f.Name, "..") { - return fmt.Errorf("invalid path '%s' in archive", f.Name) - } - tfname := fmt.Sprintf("%s/%s", metabaseDbPath, f.Name) - log.Debugf("%s -> %d", f.Name, f.UncompressedSize64) - if f.UncompressedSize64 == 0 { - continue - } - tfd, err := os.OpenFile(tfname, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0644) - if err != nil { - return fmt.Errorf("failed opening target file '%s' : %s", tfname, err) - } - rc, err := f.Open() - if err != nil { - return fmt.Errorf("while opening zip content %s : %s", f.Name, err) - } - written, err := io.Copy(tfd, rc) - if err == io.EOF { - log.Printf("files finished ok") - } else if err != nil { - return fmt.Errorf("while copying content to %s : %s", tfname, err) - } - log.Infof("written %d bytes to %s", written, tfname) - rc.Close() - } - return nil -} - -func resetMetabasePassword(newpassword string) error { - - httpctx := sling.New().Base(metabaseURI).Set("User-Agent", fmt.Sprintf("Crowdsec/%s", cwversion.VersionStr())) - - log.Printf("Waiting for metabase API to be up (can take up to a minute)") - for { - sessionreq, err := httpctx.New().Post(metabaseURISession).BodyJSON(map[string]string{"username": defaultEmail, "password": defaultPassword}).Request() - if err != nil { - return fmt.Errorf("api signin: HTTP request creation failed: %s", err) - } - httpClient := http.Client{Timeout: 20 * time.Second} - resp, err := httpClient.Do(sessionreq) - if err != nil { - fmt.Printf(".") - log.Debugf("While waiting for metabase to be up : %s", err) - time.Sleep(1 * time.Second) - continue - } - defer resp.Body.Close() - fmt.Printf("\n") - log.Printf("Metabase API is up") - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("metabase session unable to read API response body: '%s'", err) - } - if resp.StatusCode != 200 { - return fmt.Errorf("metabase session http error (%d): %s", resp.StatusCode, string(body)) - } - log.Printf("Successfully authenticated") - jsonResp := make(map[string]string) - err = json.Unmarshal(body, &jsonResp) - if err != nil { - return fmt.Errorf("failed to unmarshal metabase api response '%s': %s", string(body), err.Error()) - } - log.Debugf("unmarshaled response : %v", jsonResp) - httpctx = httpctx.Set("Cookie", fmt.Sprintf("metabase.SESSION=%s", jsonResp["id"])) - break - } - - /*rescan values*/ - sessionreq, err := httpctx.New().Post(metabaseURIRescan).Request() - if err != nil { - return fmt.Errorf("metabase rescan_values http error : %s", err) - } - httpClient := http.Client{Timeout: 20 * time.Second} - resp, err := httpClient.Do(sessionreq) - if err != nil { - return fmt.Errorf("while trying to do rescan api call to metabase : %s", err) - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("while reading rescan api call response : %s", err) - } - if resp.StatusCode != 200 { - return fmt.Errorf("got '%s' (http:%d) while trying to rescan metabase", string(body), resp.StatusCode) - } - /*update password*/ - sessionreq, err = httpctx.New().Put(metabaseURIUpdatepwd).BodyJSON(map[string]string{ - "id": "1", - "password": newpassword, - "old_password": defaultPassword}).Request() - if err != nil { - return fmt.Errorf("metabase password change http error : %s", err) - } - httpClient = http.Client{Timeout: 20 * time.Second} - resp, err = httpClient.Do(sessionreq) - if err != nil { - return fmt.Errorf("while trying to reset metabase password : %s", err) - } - defer resp.Body.Close() - body, err = ioutil.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("while reading from %s: '%s'", metabaseURIUpdatepwd, err) - } - if resp.StatusCode != 200 { - log.Printf("Got %s (http:%d) while trying to reset password.", string(body), resp.StatusCode) - log.Printf("Password has probably already been changed.") - log.Printf("Use the dashboard install command to reset existing setup.") - return fmt.Errorf("got http error %d on %s : %s", resp.StatusCode, metabaseURIUpdatepwd, string(body)) - } - log.Printf("Changed password !") - return nil -} - -func startMetabase() error { - ctx := context.Background() - cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return fmt.Errorf("failed to create docker client : %s", err) - } - - if err := cli.ContainerStart(ctx, metabaseContainerID, types.ContainerStartOptions{}); err != nil { - return fmt.Errorf("failed while starting %s : %s", metabaseContainerID, err) - } - - return nil -} - -func stopMetabase(remove bool) error { - log.Printf("Stop docker metabase %s", metabaseContainerID) - ctx := context.Background() - cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return fmt.Errorf("failed to create docker client : %s", err) - } - var to time.Duration = 20 * time.Second - if err := cli.ContainerStop(ctx, metabaseContainerID, &to); err != nil { - return fmt.Errorf("failed while stopping %s : %s", metabaseContainerID, err) - } - - if remove { - log.Printf("Removing docker metabase %s", metabaseContainerID) - if err := cli.ContainerRemove(ctx, metabaseContainerID, types.ContainerRemoveOptions{}); err != nil { - return fmt.Errorf("failed remove container %s : %s", metabaseContainerID, err) - } - } - return nil -} - -func createMetabase() error { - ctx := context.Background() - cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return fmt.Errorf("failed to start docker client : %s", err) - } - - log.Printf("Pulling docker image %s", metabaseImage) - reader, err := cli.ImagePull(ctx, metabaseImage, types.ImagePullOptions{}) - if err != nil { - return fmt.Errorf("failed to pull docker image : %s", err) - } - defer reader.Close() - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - fmt.Print(".") - } - if err := scanner.Err(); err != nil { - return fmt.Errorf("failed to read imagepull reader: %s", err) - } - fmt.Print("\n") - - hostConfig := &container.HostConfig{ - PortBindings: nat.PortMap{ - "3000/tcp": []nat.PortBinding{ - { - HostIP: metabaseListenAddress, - HostPort: metabaseListenPort, - }, - }, - }, - Mounts: []mount.Mount{ - { - Type: mount.TypeBind, - Source: metabaseDbPath, - Target: "/metabase-data", - }, - }, - } - dockerConfig := &container.Config{ - Image: metabaseImage, - Tty: true, - Env: []string{"MB_DB_FILE=/metabase-data/metabase.db"}, - } - - log.Printf("Creating container") - resp, err := cli.ContainerCreate(ctx, dockerConfig, hostConfig, nil, metabaseContainerID) - if err != nil { - return fmt.Errorf("failed to create container : %s", err) - } - log.Printf("Starting container") - if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil { - return fmt.Errorf("failed to start docker container : %s", err) - } - return nil -} diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go new file mode 100644 index 000000000..8a7a9788f --- /dev/null +++ b/cmd/crowdsec-cli/decisions.go @@ -0,0 +1,443 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/go-openapi/strfmt" + "github.com/olekukonko/tablewriter" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var Client *apiclient.ApiClient + +func DecisionsToTable(alerts *models.GetAlertsResponse) error { + /*here we cheat a bit : to make it more readable for the user, we dedup some entries*/ + var spamLimit map[string]bool = make(map[string]bool) + + /*process in reverse order to keep the latest item only*/ + for aIdx := len(*alerts) - 1; aIdx >= 0; aIdx-- { + alertItem := (*alerts)[aIdx] + newDecisions := make([]*models.Decision, 0) + for _, decisionItem := range alertItem.Decisions { + spamKey := fmt.Sprintf("%t:%s:%s:%s", *decisionItem.Simulated, *decisionItem.Type, *decisionItem.Scope, *decisionItem.Value) + if _, ok := spamLimit[spamKey]; ok { + continue + } + spamLimit[spamKey] = true + newDecisions = append(newDecisions, decisionItem) + } + alertItem.Decisions = newDecisions + } + if csConfig.Cscli.Output == "raw" { + fmt.Printf("id,source,ip,reason,action,country,as,events_count,expiration,simulated,alert_id\n") + for _, alertItem := range *alerts { + for _, decisionItem := range alertItem.Decisions { + fmt.Printf("%v,%v,%v,%v,%v,%v,%v,%v,%v,%v,%v\n", + decisionItem.ID, + *decisionItem.Origin, + *decisionItem.Scope+":"+*decisionItem.Value, + *decisionItem.Scenario, + *decisionItem.Type, + alertItem.Source.Cn, + alertItem.Source.AsNumber+" "+alertItem.Source.AsName, + *alertItem.EventsCount, + *decisionItem.Duration, + *decisionItem.Simulated, + alertItem.ID) + } + } + } else if csConfig.Cscli.Output == "json" { + x, _ := json.MarshalIndent(alerts, "", " ") + fmt.Printf("%s", string(x)) + } else if csConfig.Cscli.Output == "human" { + + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"ID", "Source", "Scope:Value", "Reason", "Action", "Country", "AS", "Events", "expiration", "Alert ID"}) + + if len(*alerts) == 0 { + fmt.Println("No active decisions") + return nil + } + + for _, alertItem := range *alerts { + for _, decisionItem := range alertItem.Decisions { + if *alertItem.Simulated { + *decisionItem.Type = fmt.Sprintf("(simul)%s", *decisionItem.Type) + } + table.Append([]string{ + strconv.Itoa(int(decisionItem.ID)), + *decisionItem.Origin, + *decisionItem.Scope + ":" + *decisionItem.Value, + *decisionItem.Scenario, + *decisionItem.Type, + alertItem.Source.Cn, + alertItem.Source.AsNumber + " " + alertItem.Source.AsName, + strconv.Itoa(int(*alertItem.EventsCount)), + *decisionItem.Duration, + strconv.Itoa(int(alertItem.ID)), + }) + } + } + table.Render() // Send output + } + return nil +} + +func NewDecisionsCmd() *cobra.Command { + /* ---- DECISIONS COMMAND */ + var cmdDecisions = &cobra.Command{ + Use: "decisions [action]", + Short: "Manage decisions", + Long: `Add/List/Delete decisions from LAPI`, + Example: `cscli decisions [action] [filter]`, + /*TBD example*/ + Args: cobra.MinimumNArgs(1), + PersistentPreRun: func(cmd *cobra.Command, args []string) { + if csConfig.API.Client == nil { + log.Fatalln("There is no configuration on 'api_client:'") + } + if csConfig.API.Client.Credentials == nil { + log.Fatalf("Please provide credentials for the API in '%s'", csConfig.API.Client.CredentialsFilePath) + } + password := strfmt.Password(csConfig.API.Client.Credentials.Password) + apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL) + if err != nil { + log.Fatalf("parsing api url ('%s'): %s", csConfig.API.Client.Credentials.URL, err) + } + Client, err = apiclient.NewClient(&apiclient.Config{ + MachineID: csConfig.API.Client.Credentials.Login, + Password: password, + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiurl, + VersionPrefix: "v1", + }) + if err != nil { + log.Fatalf("creating api client : %s", err) + } + }, + } + + var filter = apiclient.AlertsListOpts{ + ValueEquals: new(string), + ScopeEquals: new(string), + ScenarioEquals: new(string), + IPEquals: new(string), + RangeEquals: new(string), + Since: new(string), + Until: new(string), + TypeEquals: new(string), + IncludeCAPI: new(bool), + } + NoSimu := new(bool) + var cmdDecisionsList = &cobra.Command{ + Use: "list [options]", + Short: "List decisions from LAPI", + Example: `cscli decisions list -i 1.2.3.4 +cscli decisions list -r 1.2.3.0/24 +cscli decisions list -s crowdsecurity/ssh-bf +cscli decisions list -t ban +`, + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + var err error + /*take care of shorthand options*/ + if err := manageCliDecisionAlerts(filter.IPEquals, filter.RangeEquals, filter.ScopeEquals, filter.ValueEquals); err != nil { + log.Fatalf("%s", err) + } + filter.ActiveDecisionEquals = new(bool) + *filter.ActiveDecisionEquals = true + if NoSimu != nil && *NoSimu { + *filter.IncludeSimulated = false + } + /*nulify the empty entries to avoid bad filter*/ + if *filter.Until == "" { + filter.Until = nil + } else { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + if strings.HasSuffix(*filter.Until, "d") { + realDuration := strings.TrimSuffix(*filter.Until, "d") + days, err := strconv.Atoi(realDuration) + if err != nil { + cmd.Help() + log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Until) + } + *filter.Until = fmt.Sprintf("%d%s", days*24, "h") + } + } + if *filter.Since == "" { + filter.Since = nil + } else { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + if strings.HasSuffix(*filter.Since, "d") { + realDuration := strings.TrimSuffix(*filter.Since, "d") + days, err := strconv.Atoi(realDuration) + if err != nil { + cmd.Help() + log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Until) + } + *filter.Since = fmt.Sprintf("%d%s", days*24, "h") + } + } + if *filter.TypeEquals == "" { + filter.TypeEquals = nil + } + if *filter.ValueEquals == "" { + filter.ValueEquals = nil + } + if *filter.ScopeEquals == "" { + filter.ScopeEquals = nil + } + if *filter.ScenarioEquals == "" { + filter.ScenarioEquals = nil + } + if *filter.IPEquals == "" { + filter.IPEquals = nil + } + if *filter.RangeEquals == "" { + filter.RangeEquals = nil + } + alerts, _, err := Client.Alerts.List(context.Background(), filter) + if err != nil { + log.Fatalf("Unable to list decisions : %v", err.Error()) + } + + err = DecisionsToTable(alerts) + if err != nil { + log.Fatalf("unable to list decisions : %v", err.Error()) + } + }, + } + cmdDecisionsList.Flags().SortFlags = false + cmdDecisionsList.Flags().BoolVarP(filter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") + cmdDecisionsList.Flags().StringVar(filter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") + cmdDecisionsList.Flags().StringVar(filter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") + cmdDecisionsList.Flags().StringVarP(filter.TypeEquals, "type", "t", "", "restrict to this decision type (ie. ban,captcha)") + cmdDecisionsList.Flags().StringVar(filter.ScopeEquals, "scope", "", "restrict to this scope (ie. ip,range,session)") + cmdDecisionsList.Flags().StringVarP(filter.ValueEquals, "value", "v", "", "restrict to this value (ie. 1.2.3.4,userName)") + cmdDecisionsList.Flags().StringVarP(filter.ScenarioEquals, "scenario", "s", "", "restrict to this scenario (ie. crowdsecurity/ssh-bf)") + cmdDecisionsList.Flags().StringVarP(filter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") + cmdDecisionsList.Flags().StringVarP(filter.RangeEquals, "range", "r", "", "restrict to alerts from this source range (shorthand for --scope range --value )") + cmdDecisionsList.Flags().BoolVar(NoSimu, "no-simu", false, "exclude decisions in simulation mode") + cmdDecisions.AddCommand(cmdDecisionsList) + + var ( + addIP string + addRange string + addDuration string + addValue string + addScope string + addReason string + addType string + ) + + var cmdDecisionsAdd = &cobra.Command{ + Use: "add [options]", + Short: "Add decision to LAPI", + Example: `cscli decisions add --ip 1.2.3.4 +cscli decisions add --range 1.2.3.0/24 +cscli decisions add --ip 1.2.3.4 --duration 24h --type captcha +cscli decisions add --scope username --value foobar +`, + /*TBD : fix long and example*/ + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + var startIP, endIP int64 + var err error + var ip, ipRange string + alerts := models.AddAlertsRequest{} + origin := "cscli" + capacity := int32(0) + leakSpeed := "0" + eventsCount := int32(1) + empty := "" + simulated := false + startAt := time.Now().Format(time.RFC3339) + stopAt := time.Now().Format(time.RFC3339) + createdAt := time.Now().Format(time.RFC3339) + + /*take care of shorthand options*/ + if err := manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil { + log.Fatalf("%s", err) + } + + if addIP != "" { + addValue = addIP + addScope = types.Ip + } else if addRange != "" { + addValue = addRange + addScope = types.Range + } else if addValue == "" { + cmd.Help() + log.Errorf("Missing arguments, a value is required (--ip, --range or --scope and --value)") + return + } + + if addScope == types.Ip { + startIP, endIP, err = database.GetIpsFromIpRange(addValue + "/32") + if err != nil { + log.Fatalf("unable to parse IP : '%s'", addValue) + } + } + if addScope == types.Range { + startIP, endIP, err = database.GetIpsFromIpRange(addValue) + if err != nil { + log.Fatalf("unable to parse Range : '%s'", addValue) + } + ipRange = addValue + } + + if addReason == "" { + addReason = fmt.Sprintf("manual '%s' from '%s'", addType, csConfig.API.Client.Credentials.Login) + } + + decision := models.Decision{ + Duration: &addDuration, + Scope: &addScope, + Value: &addValue, + Type: &addType, + Scenario: &addReason, + Origin: &origin, + StartIP: startIP, + EndIP: endIP, + } + alert := models.Alert{ + Capacity: &capacity, + Decisions: []*models.Decision{&decision}, + Events: []*models.Event{}, + EventsCount: &eventsCount, + Leakspeed: &leakSpeed, + Message: &addReason, + ScenarioHash: &empty, + Scenario: &addReason, + ScenarioVersion: &empty, + Simulated: &simulated, + Source: &models.Source{ + AsName: empty, + AsNumber: empty, + Cn: empty, + IP: ip, + Range: ipRange, + Scope: &addScope, + Value: &addValue, + }, + StartAt: &startAt, + StopAt: &stopAt, + CreatedAt: createdAt, + } + alerts = append(alerts, &alert) + + _, _, err = Client.Alerts.Add(context.Background(), alerts) + if err != nil { + log.Fatalf(err.Error()) + } + + log.Info("Decision successfully added") + }, + } + + cmdDecisionsAdd.Flags().SortFlags = false + cmdDecisionsAdd.Flags().StringVarP(&addIP, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + cmdDecisionsAdd.Flags().StringVarP(&addRange, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + cmdDecisionsAdd.Flags().StringVarP(&addDuration, "duration", "d", "4h", "Decision duration (ie. 1h,4h,30m)") + cmdDecisionsAdd.Flags().StringVarP(&addValue, "value", "v", "", "The value (ie. --scope username --value foobar)") + cmdDecisionsAdd.Flags().StringVar(&addScope, "scope", types.Ip, "Decision scope (ie. ip,range,username)") + cmdDecisionsAdd.Flags().StringVarP(&addReason, "reason", "R", "", "Decision reason (ie. scenario-name)") + cmdDecisionsAdd.Flags().StringVarP(&addType, "type", "t", "ban", "Decision type (ie. ban,captcha,throttle)") + cmdDecisions.AddCommand(cmdDecisionsAdd) + + var delFilter = apiclient.DecisionsDeleteOpts{ + ScopeEquals: new(string), + ValueEquals: new(string), + TypeEquals: new(string), + IPEquals: new(string), + RangeEquals: new(string), + } + var delDecisionId string + var delDecisionAll bool + var cmdDecisionsDelete = &cobra.Command{ + Use: "delete [options]", + Short: "Delete decisions", + Example: `cscli decisions delete -r 1.2.3.0/24 +cscli decisions delete -i 1.2.3.4 +cscli decisions delete -s crowdsecurity/ssh-bf +cscli decisions delete --id 42 +cscli decisions delete --type captcha +`, + /*TBD : refaire le Long/Example*/ + PreRun: func(cmd *cobra.Command, args []string) { + if delDecisionAll { + return + } + if *delFilter.ScopeEquals == "" && *delFilter.ValueEquals == "" && + *delFilter.TypeEquals == "" && *delFilter.IPEquals == "" && + *delFilter.RangeEquals == "" && delDecisionId == "" { + cmd.Usage() + log.Fatalln("At least one filter or --all must be specified") + } + }, + Run: func(cmd *cobra.Command, args []string) { + var err error + var decisions *models.DeleteDecisionResponse + + /*take care of shorthand options*/ + if err := manageCliDecisionAlerts(delFilter.IPEquals, delFilter.RangeEquals, delFilter.ScopeEquals, delFilter.ValueEquals); err != nil { + log.Fatalf("%s", err) + } + if *delFilter.ScopeEquals == "" { + delFilter.ScopeEquals = nil + } + if *delFilter.ValueEquals == "" { + delFilter.ValueEquals = nil + } + + if *delFilter.TypeEquals == "" { + delFilter.TypeEquals = nil + } + + if *delFilter.IPEquals == "" { + delFilter.IPEquals = nil + } + + if *delFilter.RangeEquals == "" { + delFilter.RangeEquals = nil + } + + if delDecisionId == "" { + decisions, _, err = Client.Decisions.Delete(context.Background(), delFilter) + if err != nil { + log.Fatalf("Unable to delete decisions : %v", err.Error()) + } + } else { + decisions, _, err = Client.Decisions.DeleteOne(context.Background(), delDecisionId) + if err != nil { + log.Fatalf("Unable to delete decision : %v", err.Error()) + } + } + log.Infof("%s decision(s) deleted", decisions.NbDeleted) + }, + } + + cmdDecisionsDelete.Flags().SortFlags = false + cmdDecisionsDelete.Flags().StringVarP(delFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + cmdDecisionsDelete.Flags().StringVarP(delFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + cmdDecisionsDelete.Flags().StringVar(&delDecisionId, "id", "", "decision id") + cmdDecisionsDelete.Flags().StringVarP(delFilter.TypeEquals, "type", "t", "", "the decision type (ie. ban,captcha)") + cmdDecisionsDelete.Flags().StringVarP(delFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + cmdDecisionsDelete.Flags().BoolVar(&delDecisionAll, "all", false, "delete all decisions") + cmdDecisions.AddCommand(cmdDecisionsDelete) + + return cmdDecisions +} diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go new file mode 100644 index 000000000..e680dfe48 --- /dev/null +++ b/cmd/crowdsec-cli/hub.go @@ -0,0 +1,87 @@ +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +func NewHubCmd() *cobra.Command { + /* ---- HUB COMMAND */ + var cmdHub = &cobra.Command{ + Use: "hub [action]", + Short: "Manage Hub", + Long: ` +Hub management + +List/update parsers/scenarios/postoverflows/collections from [Crowdsec Hub](https://hub.crowdsec.net). +Hub is manage by cscli, to get latest hub files from [Crowdsec Hub](https://hub.crowdsec.net), you need to update. + `, + Example: ` +cscli hub list # List all installed configurations +cscli hub update # Download list of available configurations from the hub + `, + Args: cobra.ExactArgs(0), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if csConfig.Cscli == nil { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + return nil + }, + } + cmdHub.PersistentFlags().StringVarP(&cwhub.HubBranch, "branch", "b", "", "Use given branch from hub") + + var cmdHubList = &cobra.Command{ + Use: "list [-a]", + Short: "List installed configs", + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + + cwhub.DisplaySummary() + log.Printf("PARSERS:") + ListItem(cwhub.PARSERS, args) + log.Printf("SCENARIOS:") + ListItem(cwhub.SCENARIOS, args) + log.Printf("COLLECTIONS:") + ListItem(cwhub.COLLECTIONS, args) + log.Printf("POSTOVERFLOWS:") + ListItem(cwhub.PARSERS_OVFLW, args) + }, + } + cmdHub.PersistentFlags().BoolVarP(&listAll, "all", "a", false, "List as well disabled items") + cmdHub.AddCommand(cmdHubList) + + var cmdHubUpdate = &cobra.Command{ + Use: "update", + Short: "Fetch available configs from hub", + Long: ` +Fetches the [.index.json](https://github.com/crowdsecurity/hub/blob/master/.index.json) file from hub, containing the list of available configs. +`, + Args: cobra.ExactArgs(0), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if csConfig.Cscli == nil { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + if err := setHubBranch(); err != nil { + return fmt.Errorf("error while setting hub branch: %s", err) + } + return nil + }, + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.UpdateHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + }, + } + cmdHub.AddCommand(cmdHubUpdate) + + return cmdHub +} diff --git a/cmd/crowdsec-cli/inspect.go b/cmd/crowdsec-cli/inspect.go deleted file mode 100644 index d1c6cec04..000000000 --- a/cmd/crowdsec-cli/inspect.go +++ /dev/null @@ -1,110 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "gopkg.in/yaml.v2" - - log "github.com/sirupsen/logrus" - - "github.com/spf13/cobra" -) - -func InspectItem(name string, objectType string) { - - for _, hubItem := range cwhub.HubIdx[objectType] { - if hubItem.Name != name { - continue - } - buff, err := yaml.Marshal(hubItem) - if err != nil { - log.Fatalf("unable to marshal item : %s", err) - } - fmt.Printf("%s", string(buff)) - } -} - -func NewInspectCmd() *cobra.Command { - var cmdInspect = &cobra.Command{ - Use: "inspect [type] [config]", - Short: "Inspect configuration(s)", - Long: ` -Inspect give you full detail about local installed configuration. - -[type] must be parser, scenario, postoverflow, collection. - -[config_name] must be a valid config name from [Crowdsec Hub](https://hub.crowdsec.net) or locally installed. -`, - Example: `cscli inspect parser crowdsec/xxx -cscli inspect collection crowdsec/xxx`, - Args: cobra.MinimumNArgs(1), - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if !config.configured { - return fmt.Errorf("you must configure cli before interacting with hub") - } - return nil - }, - } - - var cmdInspectParser = &cobra.Command{ - Use: "parser [config]", - Short: "Inspect given log parser", - Long: `Inspect given parser from hub`, - Example: `cscli inspect parser crowdsec/xxx`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("failed to get Hub index : %v", err) - } - InspectItem(args[0], cwhub.PARSERS) - }, - } - cmdInspect.AddCommand(cmdInspectParser) - var cmdInspectScenario = &cobra.Command{ - Use: "scenario [config]", - Short: "Inspect given scenario", - Long: `Inspect given scenario from hub`, - Example: `cscli inspect scenario crowdsec/xxx`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("failed to get Hub index : %v", err) - } - InspectItem(args[0], cwhub.SCENARIOS) - }, - } - cmdInspect.AddCommand(cmdInspectScenario) - - var cmdInspectCollection = &cobra.Command{ - Use: "collection [config]", - Short: "Inspect given collection", - Long: `Inspect given collection from hub`, - Example: `cscli inspect collection crowdsec/xxx`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("failed to get Hub index : %v", err) - } - InspectItem(args[0], cwhub.COLLECTIONS) - }, - } - cmdInspect.AddCommand(cmdInspectCollection) - - var cmdInspectPostoverflow = &cobra.Command{ - Use: "postoverflow [config]", - Short: "Inspect given postoverflow parser", - Long: `Inspect given postoverflow from hub.`, - Example: `cscli inspect postoverflow crowdsec/xxx`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("failed to get Hub index : %v", err) - } - InspectItem(args[0], cwhub.PARSERS_OVFLW) - }, - } - cmdInspect.AddCommand(cmdInspectPostoverflow) - - return cmdInspect -} diff --git a/cmd/crowdsec-cli/install.go b/cmd/crowdsec-cli/install.go deleted file mode 100644 index bcc476321..000000000 --- a/cmd/crowdsec-cli/install.go +++ /dev/null @@ -1,148 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - - log "github.com/sirupsen/logrus" - - "github.com/spf13/cobra" -) - -var download_only, force_install bool - -func InstallItem(name string, obtype string) { - for _, it := range cwhub.HubIdx[obtype] { - if it.Name == name { - if download_only && it.Downloaded && it.UpToDate { - log.Warningf("%s is already downloaded and up-to-date", it.Name) - return - } - it, err := cwhub.DownloadLatest(it, cwhub.Hubdir, force_install, config.DataFolder) - if err != nil { - log.Fatalf("error while downloading %s : %v", it.Name, err) - } - cwhub.HubIdx[obtype][it.Name] = it - if download_only { - log.Infof("Downloaded %s to %s", it.Name, cwhub.Hubdir+"/"+it.RemotePath) - return - } - it, err = cwhub.EnableItem(it, cwhub.Installdir, cwhub.Hubdir) - if err != nil { - log.Fatalf("error while enabled %s : %v.", it.Name, err) - } - cwhub.HubIdx[obtype][it.Name] = it - log.Infof("Enabled %s", it.Name) - return - } - } - log.Warningf("%s not found in hub index", name) - /*iterate of pkg index data*/ -} - -func NewInstallCmd() *cobra.Command { - /* ---- INSTALL COMMAND */ - - var cmdInstall = &cobra.Command{ - Use: "install [type] [config]", - Short: "Install configuration(s) from hub", - Long: ` -Install configuration from the CrowdSec Hub. - -In order to download latest versions of configuration, -you should [update cscli](./cscli_update.md). - -[type] must be parser, scenario, postoverflow, collection. - -[config_name] must be a valid config name from [Crowdsec Hub](https://hub.crowdsec.net). -`, - Example: `cscli install [type] [config_name]`, - Args: cobra.MinimumNArgs(1), - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if !config.configured { - return fmt.Errorf("you must configure cli before interacting with hub") - } - - if err := setHubBranch(); err != nil { - return fmt.Errorf("error while setting hub branch: %s", err) - } - return nil - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - log.Infof("Run 'systemctl reload crowdsec' for the new configuration to be effective.") - }, - } - cmdInstall.PersistentFlags().BoolVarP(&download_only, "download-only", "d", false, "Only download packages, don't enable") - cmdInstall.PersistentFlags().BoolVar(&force_install, "force", false, "Force install : Overwrite tainted and outdated files") - - var cmdInstallParser = &cobra.Command{ - Use: "parser [config]", - Short: "Install given parser", - Long: `Fetch and install given parser from hub`, - Example: `cscli install parser crowdsec/xxx`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("failed to get Hub index : %v", err) - } - for _, name := range args { - InstallItem(name, cwhub.PARSERS) - } - }, - } - cmdInstall.AddCommand(cmdInstallParser) - var cmdInstallScenario = &cobra.Command{ - Use: "scenario [config]", - Short: "Install given scenario", - Long: `Fetch and install given scenario from hub`, - Example: `cscli install scenario crowdsec/xxx`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("failed to get Hub index : %v", err) - } - for _, name := range args { - InstallItem(name, cwhub.SCENARIOS) - } - }, - } - cmdInstall.AddCommand(cmdInstallScenario) - - var cmdInstallCollection = &cobra.Command{ - Use: "collection [config]", - Short: "Install given collection", - Long: `Fetch and install given collection from hub`, - Example: `cscli install collection crowdsec/xxx`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("failed to get Hub index : %v", err) - } - for _, name := range args { - InstallItem(name, cwhub.COLLECTIONS) - } - }, - } - cmdInstall.AddCommand(cmdInstallCollection) - - var cmdInstallPostoverflow = &cobra.Command{ - Use: "postoverflow [config]", - Short: "Install given postoverflow parser", - Long: `Fetch and install given postoverflow from hub. -As a reminder, postoverflows are parsing configuration that will occur after the overflow (before a decision is applied).`, - Example: `cscli install collection crowdsec/xxx`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("failed to get Hub index : %v", err) - } - for _, name := range args { - InstallItem(name, cwhub.PARSERS_OVFLW) - } - }, - } - cmdInstall.AddCommand(cmdInstallPostoverflow) - - return cmdInstall -} diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go new file mode 100644 index 000000000..81f158e8c --- /dev/null +++ b/cmd/crowdsec-cli/lapi.go @@ -0,0 +1,167 @@ +package main + +import ( + "context" + "fmt" + "io/ioutil" + "net/http/httputil" + "net/url" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" +) + +var LAPIURLPrefix string = "v1" + +func NewLapiCmd() *cobra.Command { + var cmdLapi = &cobra.Command{ + Use: "lapi [action]", + Short: "Manage interaction with Local API (LAPI)", + Args: cobra.MinimumNArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if csConfig.API.Client == nil { + log.Fatalln("There is no API->client configuration") + } + if csConfig.API.Client.Credentials == nil { + log.Fatalf("no configuration for crowdsec API in '%s'", *csConfig.Self) + } + return nil + }, + } + + var cmdLapiRegister = &cobra.Command{ + Use: "register", + Short: "Register a machine to Local API (LAPI)", + Long: `Register you machine to the Local API (LAPI). +Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`, + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + var err error + id, err := generateID() + if err != nil { + log.Fatalf("unable to generate machine id: %s", err) + } + password := strfmt.Password(generatePassword(passwordLength)) + if apiURL == "" { + if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" { + apiURL = csConfig.API.Client.Credentials.URL + } else { + log.Fatalf("No Local API URL. Please provide it in your configuration or with the -u parameter") + } + } + /*URL needs to end with /, but user doesn't care*/ + if !strings.HasSuffix(apiURL, "/") { + apiURL += "/" + } + /*URL needs to start with http://, but user doesn't care*/ + if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") { + apiURL = "http://" + apiURL + } + apiurl, err := url.Parse(apiURL) + if err != nil { + log.Fatalf("parsing api url: %s", err) + } + _, err = apiclient.RegisterClient(&apiclient.Config{ + MachineID: id, + Password: password, + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiurl, + VersionPrefix: LAPIURLPrefix, + }, nil) + + if err != nil { + log.Fatalf("api client register: %s", err) + } + + var dumpFile string + if outputFile != "" { + dumpFile = outputFile + } else if csConfig.API.Client.CredentialsFilePath != "" { + dumpFile = csConfig.API.Client.CredentialsFilePath + } else { + dumpFile = "" + } + apiCfg := csconfig.ApiCredentialsCfg{ + Login: id, + Password: password.String(), + URL: apiURL, + } + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + log.Fatalf("unable to marshal api credentials: %s", err) + } + if dumpFile != "" { + err = ioutil.WriteFile(dumpFile, apiConfigDump, 0644) + if err != nil { + log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err) + } + log.Printf("API credentials dumped to '%s'", dumpFile) + } else { + fmt.Printf("%s\n", string(apiConfigDump)) + } + log.Warningf("Run 'systemctl reload crowdsec' for the new configuration to be effective") + }, + } + cmdLapiRegister.Flags().StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)") + cmdLapiRegister.Flags().StringVarP(&outputFile, "file", "f", "", "output file destination") + cmdLapi.AddCommand(cmdLapiRegister) + + var cmdLapiStatus = &cobra.Command{ + Use: "status", + Short: "Check authentication to Local API (LAPI)", + Args: cobra.MinimumNArgs(0), + Run: func(cmd *cobra.Command, args []string) { + var err error + + password := strfmt.Password(csConfig.API.Client.Credentials.Password) + apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL) + login := csConfig.API.Client.Credentials.Login + if err != nil { + log.Fatalf("parsing api url ('%s'): %s", apiurl, err) + } + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("Failed to load hub index : %s", err) + } + scenarios, err := cwhub.GetUpstreamInstalledScenariosAsString() + if err != nil { + log.Fatalf("failed to get scenarios : %s", err.Error()) + } + + Client, err = apiclient.NewDefaultClient(apiurl, + LAPIURLPrefix, + fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + nil) + if err != nil { + log.Fatalf("init default client: %s", err) + } + t := models.WatcherAuthRequest{ + MachineID: &login, + Password: &password, + Scenarios: scenarios, + } + log.Infof("Loaded credentials from %s", csConfig.API.Client.CredentialsFilePath) + log.Infof("Trying to authenticate with username %s on %s", login, apiurl) + resp, err := Client.Auth.AuthenticateWatcher(context.Background(), t) + if err != nil { + log.Fatalf("Failed to authenticate to Local API (LAPI) : %s", err) + } else { + log.Infof("You can successfully interact with Local API (LAPI)") + } + for k, v := range resp.Response.Header { + log.Debugf("[headers] %s : %s", k, v) + } + dump, _ := httputil.DumpResponse(resp.Response, true) + log.Debugf("Response: %s", string(dump)) + }, + } + cmdLapi.AddCommand(cmdLapiStatus) + return cmdLapi +} diff --git a/cmd/crowdsec-cli/list.go b/cmd/crowdsec-cli/list.go deleted file mode 100644 index 5e9f5502e..000000000 --- a/cmd/crowdsec-cli/list.go +++ /dev/null @@ -1,153 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - - "github.com/enescakir/emoji" - "github.com/olekukonko/tablewriter" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" -) - -var listAll bool - -func doListing(ttype string, args []string) { - - var pkgst []map[string]string - - if len(args) == 1 { - pkgst = cwhub.HubStatus(ttype, args[0], listAll) - } else { - pkgst = cwhub.HubStatus(ttype, "", listAll) - } - - if config.output == "human" { - - table := tablewriter.NewWriter(os.Stdout) - table.SetCenterSeparator("") - table.SetColumnSeparator("") - - table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) - table.SetAlignment(tablewriter.ALIGN_LEFT) - table.SetHeader([]string{"Name", fmt.Sprintf("%v Status", emoji.Package), "Version", "Local Path"}) - for _, v := range pkgst { - table.Append([]string{v["name"], v["utf8_status"], v["local_version"], v["local_path"]}) - } - table.Render() - } else if config.output == "json" { - x, err := json.MarshalIndent(pkgst, "", " ") - if err != nil { - log.Fatalf("failed to unmarshal") - } - fmt.Printf("%s", string(x)) - } else if config.output == "raw" { - for _, v := range pkgst { - fmt.Printf("%s %s\n", v["name"], v["description"]) - } - } -} - -func NewListCmd() *cobra.Command { - /* ---- LIST COMMAND */ - var cmdList = &cobra.Command{ - Use: "list [-a]", - Short: "List enabled configs", - Long: ` -List enabled configurations (parser/scenarios/collections) on your host. - -It is possible to list also configuration from [Crowdsec Hub](https://hub.crowdsec.net) with the '-a' options. - -[type] must be parsers, scenarios, postoverflows, collections - `, - Example: `cscli list # List all local configurations -cscli list [type] # List all local configuration of type [type] -cscli list -a # List all local and remote configurations - `, - Args: cobra.ExactArgs(0), - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if !config.configured { - return fmt.Errorf("you must configure cli before interacting with hub") - } - - return nil - }, - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - - cwhub.DisplaySummary() - log.Printf("PARSERS:") - doListing(cwhub.PARSERS, args) - log.Printf("SCENARIOS:") - doListing(cwhub.SCENARIOS, args) - log.Printf("COLLECTIONS:") - doListing(cwhub.COLLECTIONS, args) - log.Printf("POSTOVERFLOWS:") - doListing(cwhub.PARSERS_OVFLW, args) - }, - } - cmdList.PersistentFlags().BoolVarP(&listAll, "all", "a", false, "List as well disabled items") - - var cmdListParsers = &cobra.Command{ - Use: "parsers [-a]", - Short: "List enabled parsers", - Long: ``, - Args: cobra.ExactArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - doListing(cwhub.PARSERS, args) - }, - } - cmdList.AddCommand(cmdListParsers) - - var cmdListScenarios = &cobra.Command{ - Use: "scenarios [-a]", - Short: "List enabled scenarios", - Long: ``, - Args: cobra.ExactArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - doListing(cwhub.SCENARIOS, args) - }, - } - cmdList.AddCommand(cmdListScenarios) - - var cmdListCollections = &cobra.Command{ - Use: "collections [-a]", - Short: "List enabled collections", - Long: ``, - Args: cobra.ExactArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - doListing(cwhub.COLLECTIONS, args) - }, - } - cmdList.AddCommand(cmdListCollections) - - var cmdListPostoverflows = &cobra.Command{ - Use: "postoverflows [-a]", - Short: "List enabled postoverflow parsers", - Long: ``, - Args: cobra.ExactArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - doListing(cwhub.PARSERS_OVFLW, args) - }, - } - cmdList.AddCommand(cmdListPostoverflows) - - return cmdList -} diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go new file mode 100644 index 000000000..14d5f13eb --- /dev/null +++ b/cmd/crowdsec-cli/machines.go @@ -0,0 +1,305 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "math/rand" + "os" + "strings" + "time" + + "github.com/AlecAivazis/survey/v2" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/denisbrodbeck/machineid" + "github.com/enescakir/emoji" + "github.com/go-openapi/strfmt" + "github.com/olekukonko/tablewriter" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" +) + +var machineID string +var machinePassword string +var interactive bool +var apiURL string +var outputFile string +var forceAdd bool +var autoAdd bool + +var ( + passwordLength = 64 + upper = "ABCDEFGHIJKLMNOPQRSTUVWXY" + lower = "abcdefghijklmnopqrstuvwxyz" + digits = "0123456789" +) + +const ( + uuid = "/proc/sys/kernel/random/uuid" +) + +func generatePassword(length int) string { + rand.Seed(time.Now().UnixNano()) + charset := upper + lower + digits + + buf := make([]byte, length) + buf[0] = digits[rand.Intn(len(digits))] + buf[1] = upper[rand.Intn(len(upper))] + buf[2] = lower[rand.Intn(len(lower))] + + for i := 3; i < length; i++ { + buf[i] = charset[rand.Intn(len(charset))] + } + rand.Shuffle(len(buf), func(i, j int) { + buf[i], buf[j] = buf[j], buf[i] + }) + + return string(buf) +} + +func generateID() (string, error) { + id, err := machineid.ID() + if err != nil { + log.Debugf("failed to get machine-id with usual files : %s", err) + } + if id == "" || err != nil { + bID, err := ioutil.ReadFile(uuid) + if err != nil { + return "", errors.Wrap(err, "generating machine id") + } + id = string(bID) + id = strings.ReplaceAll(id, "-", "")[:32] + } + id = fmt.Sprintf("%s%s", id, generatePassword(16)) + return id, nil +} + +func NewMachinesCmd() *cobra.Command { + /* ---- DECISIONS COMMAND */ + var cmdMachines = &cobra.Command{ + Use: "machines [action]", + Short: "Manage local API machines", + Long: ` +Machines Management. + +To list/add/delete/register/validate machines +`, + Example: `cscli machines [action]`, + } + + var cmdMachinesList = &cobra.Command{ + Use: "list", + Short: "List machines", + Long: `List `, + Example: `cscli machines list`, + Args: cobra.MaximumNArgs(1), + PersistentPreRun: func(cmd *cobra.Command, args []string) { + var err error + dbClient, err = database.NewClient(csConfig.DbConfig) + if err != nil { + log.Fatalf("unable to create new database client: %s", err) + } + }, + Run: func(cmd *cobra.Command, args []string) { + machines, err := dbClient.ListMachines() + if err != nil { + log.Errorf("unable to list blockers: %s", err) + } + if csConfig.Cscli.Output == "human" { + table := tablewriter.NewWriter(os.Stdout) + table.SetCenterSeparator("") + table.SetColumnSeparator("") + + table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetHeader([]string{"Name", "IP Address", "Last Update", "Status", "Version"}) + for _, w := range machines { + var validated string + if w.IsValidated { + validated = fmt.Sprintf("%s", emoji.CheckMark) + } else { + validated = fmt.Sprintf("%s", emoji.Prohibited) + } + table.Append([]string{w.MachineId, w.IpAddress, w.UpdatedAt.Format(time.RFC3339), validated, w.Version}) + } + table.Render() + } else if csConfig.Cscli.Output == "json" { + x, err := json.MarshalIndent(machines, "", " ") + if err != nil { + log.Fatalf("failed to unmarshal") + } + fmt.Printf("%s", string(x)) + } else if csConfig.Cscli.Output == "raw" { + for _, w := range machines { + var validated string + if w.IsValidated { + validated = "true" + } else { + validated = "false" + } + fmt.Printf("%s,%s,%s,%s,%s\n", w.MachineId, w.IpAddress, w.UpdatedAt.Format(time.RFC3339), validated, w.Version) + } + } else { + log.Errorf("unknown output '%s'", csConfig.Cscli.Output) + } + }, + } + cmdMachines.AddCommand(cmdMachinesList) + + var cmdMachinesAdd = &cobra.Command{ + Use: "add", + Short: "add machine to the database.", + Long: `Register a new machine in the database. cscli should be on the same machine as LAPI.`, + Example: ` +cscli machines add --auto +cscli machines add MyTestMachine --auto +cscli machines add MyTestMachine --password MyPassword +`, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + var err error + dbClient, err = database.NewClient(csConfig.DbConfig) + if err != nil { + log.Fatalf("unable to create new database client: %s", err) + } + }, + Run: func(cmd *cobra.Command, args []string) { + var dumpFile string + var err error + + // create machineID if doesn't specified by user + if len(args) == 0 { + if !autoAdd { + err = cmd.Help() + if err != nil { + log.Fatalf("unable to print help(): %s", err) + } + return + } + machineID, err = generateID() + if err != nil { + log.Fatalf("unable to generate machine id : %s", err) + } + } else { + machineID = args[0] + } + + /*check if file already exists*/ + if outputFile != "" { + dumpFile = outputFile + } else if csConfig.API.Client.CredentialsFilePath != "" { + dumpFile = csConfig.API.Client.CredentialsFilePath + } + + // create password if doesn't specified by user + if machinePassword == "" && !interactive { + if !autoAdd { + err = cmd.Help() + if err != nil { + log.Fatalf("unable to print help(): %s", err) + } + return + } + machinePassword = generatePassword(passwordLength) + } else if machinePassword == "" && interactive { + qs := &survey.Password{ + Message: "Please provide a password for the machine", + } + survey.AskOne(qs, &machinePassword) + } + password := strfmt.Password(machinePassword) + _, err = dbClient.CreateMachine(&machineID, &password, "", true, forceAdd) + if err != nil { + log.Fatalf("unable to create machine: %s", err) + } + log.Infof("Machine '%s' created successfully", machineID) + + if apiURL == "" { + if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" { + apiURL = csConfig.API.Client.Credentials.URL + } else if csConfig.API.Server != nil && csConfig.API.Server.ListenURI != "" { + apiURL = "http://" + csConfig.API.Server.ListenURI + } else { + log.Fatalf("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") + } + } + apiCfg := csconfig.ApiCredentialsCfg{ + Login: machineID, + Password: password.String(), + URL: apiURL, + } + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + log.Fatalf("unable to marshal api credentials: %s", err) + } + if dumpFile != "" { + err = ioutil.WriteFile(dumpFile, apiConfigDump, 0644) + if err != nil { + log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err) + } + log.Printf("API credentials dumped to '%s'", dumpFile) + } else { + fmt.Printf("%s\n", string(apiConfigDump)) + } + }, + } + cmdMachinesAdd.Flags().StringVarP(&machinePassword, "password", "p", "", "machine password to login to the API") + cmdMachinesAdd.Flags().StringVarP(&outputFile, "file", "f", "", "output file destination") + cmdMachinesAdd.Flags().StringVarP(&apiURL, "url", "u", "", "URL of the local API") + cmdMachinesAdd.Flags().BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password") + cmdMachinesAdd.Flags().BoolVarP(&autoAdd, "auto", "a", false, "add the machine automatically (will generate also the username if not provided)") + cmdMachinesAdd.Flags().BoolVar(&forceAdd, "force", false, "will force add the machine if it already exist") + cmdMachines.AddCommand(cmdMachinesAdd) + + var cmdMachinesDelete = &cobra.Command{ + Use: "delete --machine MyTestMachine", + Short: "delete machines", + Example: `cscli machines delete `, + Args: cobra.ExactArgs(1), + PersistentPreRun: func(cmd *cobra.Command, args []string) { + var err error + dbClient, err = database.NewClient(csConfig.DbConfig) + if err != nil { + log.Fatalf("unable to create new database client: %s", err) + } + }, + Run: func(cmd *cobra.Command, args []string) { + machineID = args[0] + err := dbClient.DeleteWatcher(machineID) + if err != nil { + log.Errorf("unable to create blocker: %s", err) + return + } + log.Infof("machine '%s' deleted successfully", machineID) + }, + } + cmdMachinesDelete.Flags().StringVarP(&machineID, "machine", "m", "", "machine to delete") + cmdMachines.AddCommand(cmdMachinesDelete) + + var cmdMachinesValidate = &cobra.Command{ + Use: "validate", + Short: "validate a machine to access the local API", + Long: `validate a machine to access the local API.`, + Example: `cscli machines validate `, + Args: cobra.ExactArgs(1), + PersistentPreRun: func(cmd *cobra.Command, args []string) { + var err error + dbClient, err = database.NewClient(csConfig.DbConfig) + if err != nil { + log.Fatalf("unable to create new database client: %s", err) + } + }, + Run: func(cmd *cobra.Command, args []string) { + machineID = args[0] + if err := dbClient.ValidateMachine(machineID); err != nil { + log.Fatalf("unable to validate machine '%s': %s", machineID, err) + } + log.Infof("machine '%s' validated successfuly", machineID) + }, + } + cmdMachines.AddCommand(cmdMachinesValidate) + + return cmdMachines +} diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 057df3f28..8f0cdfdb1 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -1,26 +1,40 @@ package main import ( - "os/user" - "path/filepath" - "strings" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/database" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/cobra/doc" ) -var dbg_lvl, nfo_lvl, wrn_lvl, err_lvl bool +var trace_lvl, dbg_lvl, nfo_lvl, wrn_lvl, err_lvl bool -var config cliConfig +var ConfigFilePath string +var csConfig *csconfig.GlobalConfig +var dbClient *database.Client + +var OutputFormat string + +var downloadOnly bool +var forceInstall bool +var forceUpgrade bool +var removeAll bool +var purgeRemove bool +var upgradeAll bool +var listAll bool +var restoreOldBackup bool + +var prometheusURL string func initConfig() { - if dbg_lvl { + if trace_lvl { + log.SetLevel(log.TraceLevel) + } else if dbg_lvl { log.SetLevel(log.DebugLevel) } else if nfo_lvl { log.SetLevel(log.InfoLevel) @@ -29,42 +43,33 @@ func initConfig() { } else if err_lvl { log.SetLevel(log.ErrorLevel) } - if config.output == "json" { + + csConfig = csconfig.NewConfig() + + log.Debugf("Using %s as configuration file", ConfigFilePath) + if err := csConfig.LoadConfigurationFile(ConfigFilePath); err != nil { + log.Fatalf(err.Error()) + } + if cwhub.HubBranch == "" && csConfig.Cscli.HubBranch != "" { + cwhub.HubBranch = csConfig.Cscli.HubBranch + } + if OutputFormat != "" { + csConfig.Cscli.Output = OutputFormat + if OutputFormat != "json" && OutputFormat != "raw" && OutputFormat != "human" { + log.Fatalf("output format %s unknown", OutputFormat) + } + } + if csConfig.Cscli.Output == "" { + csConfig.Cscli.Output = "human" + } + + if csConfig.Cscli.Output == "json" { log.SetLevel(log.WarnLevel) log.SetFormatter(&log.JSONFormatter{}) - } else if config.output == "raw" { + } else if csConfig.Cscli.Output == "raw" { log.SetLevel(log.ErrorLevel) } - csConfig := csconfig.NewCrowdSecConfig() - if err := csConfig.LoadConfigurationFile(&config.ConfigFilePath); err != nil { - log.Fatalf(err.Error()) - } - config.configFolder = filepath.Clean(csConfig.CsCliFolder) - - if strings.HasPrefix(config.configFolder, "~/") { - usr, err := user.Current() - if err != nil { - log.Fatalf("failed to resolve path ~/ : %s", err) - } - config.configFolder = usr.HomeDir + "/" + config.configFolder[2:] - } - - /*read config*/ - config.InstallFolder = filepath.Clean(csConfig.ConfigFolder) - config.HubFolder = filepath.Clean(config.configFolder + "/hub/") - if csConfig.OutputConfig == nil { - log.Fatalf("Missing backend plugin configuration in %s", config.ConfigFilePath) - } - config.BackendPluginFolder = filepath.Clean(csConfig.OutputConfig.BackendFolder) - config.DataFolder = filepath.Clean(csConfig.DataFolder) - // - cwhub.Installdir = config.InstallFolder - cwhub.Cfgdir = config.configFolder - cwhub.Hubdir = config.HubFolder - config.configured = true - config.SimulationCfg = csConfig.SimulationCfg - config.SimulationCfgPath = csConfig.SimulationCfgPath } func main() { @@ -74,22 +79,8 @@ func main() { Short: "cscli allows you to manage crowdsec", Long: `cscli is the main command to interact with your crowdsec service, scenarios & db. It is meant to allow you to manage bans, parsers/scenarios/etc, api and generally manage you crowdsec setup.`, - Example: `View/Add/Remove bans: - - cscli ban list - - cscli ban add ip 1.2.3.4 24h 'go away' - - cscli ban del 1.2.3.4 - -View/Add/Upgrade/Remove scenarios and parsers: - - cscli list - - cscli install collection crowdsec/linux-web - - cscli remove scenario crowdsec/ssh_enum - - cscli upgrade --all - -API interaction: - - cscli api pull - - cscli api register - `} - /*TODO : add a remediation type*/ + /*TBD examples*/ + } var cmdDocGen = &cobra.Command{ Use: "doc", Short: "Generate the documentation in `./doc/`. Directory must exist.", @@ -97,7 +88,7 @@ API interaction: Hidden: true, Run: func(cmd *cobra.Command, args []string) { if err := doc.GenMarkdownTree(rootCmd, "./doc/"); err != nil { - log.Fatalf("Failed to generate cobra doc") + log.Fatalf("Failed to generate cobra doc: %s", err.Error()) } }, } @@ -114,16 +105,15 @@ API interaction: } rootCmd.AddCommand(cmdVersion) - //rootCmd.PersistentFlags().BoolVarP(&config.simulation, "simulate", "s", false, "No action; perform a simulation of events that would occur based on the current arguments.") - rootCmd.PersistentFlags().StringVarP(&config.ConfigFilePath, "config", "c", "/etc/crowdsec/config/default.yaml", "path to crowdsec config file") - - rootCmd.PersistentFlags().StringVarP(&config.output, "output", "o", "human", "Output format : human, json, raw.") + rootCmd.PersistentFlags().StringVarP(&ConfigFilePath, "config", "c", "/etc/crowdsec/config.yaml", "path to crowdsec config file") + rootCmd.PersistentFlags().StringVarP(&OutputFormat, "output", "o", "", "Output format : human, json, raw.") rootCmd.PersistentFlags().BoolVar(&dbg_lvl, "debug", false, "Set logging to debug.") rootCmd.PersistentFlags().BoolVar(&nfo_lvl, "info", false, "Set logging to info.") rootCmd.PersistentFlags().BoolVar(&wrn_lvl, "warning", false, "Set logging to warning.") rootCmd.PersistentFlags().BoolVar(&err_lvl, "error", false, "Set logging to error.") + rootCmd.PersistentFlags().BoolVar(&trace_lvl, "trace", false, "Set logging to trace.") - rootCmd.PersistentFlags().StringVar(&cwhub.HubBranch, "branch", "master", "Override hub branch on github") + rootCmd.PersistentFlags().StringVar(&cwhub.HubBranch, "branch", "", "Override hub branch on github") if err := rootCmd.PersistentFlags().MarkHidden("branch"); err != nil { log.Fatalf("failed to make branch hidden : %s", err) } @@ -132,19 +122,22 @@ API interaction: rootCmd.Flags().SortFlags = false rootCmd.PersistentFlags().SortFlags = false - rootCmd.AddCommand(NewBanCmds()) rootCmd.AddCommand(NewConfigCmd()) - rootCmd.AddCommand(NewInstallCmd()) - rootCmd.AddCommand(NewListCmd()) - rootCmd.AddCommand(NewRemoveCmd()) - rootCmd.AddCommand(NewUpdateCmd()) - rootCmd.AddCommand(NewUpgradeCmd()) - rootCmd.AddCommand(NewAPICmd()) + rootCmd.AddCommand(NewHubCmd()) rootCmd.AddCommand(NewMetricsCmd()) - rootCmd.AddCommand(NewBackupCmd()) rootCmd.AddCommand(NewDashboardCmd()) - rootCmd.AddCommand(NewInspectCmd()) + rootCmd.AddCommand(NewDecisionsCmd()) + rootCmd.AddCommand(NewAlertsCmd()) + // rootCmd.AddCommand(NewInspectCmd()) rootCmd.AddCommand(NewSimulationCmds()) + rootCmd.AddCommand(NewBouncersCmd()) + rootCmd.AddCommand(NewMachinesCmd()) + rootCmd.AddCommand(NewParsersCmd()) + rootCmd.AddCommand(NewScenariosCmd()) + rootCmd.AddCommand(NewCollectionsCmd()) + rootCmd.AddCommand(NewPostOverflowsCmd()) + rootCmd.AddCommand(NewCapiCmd()) + rootCmd.AddCommand(NewLapiCmd()) if err := rootCmd.Execute(); err != nil { log.Fatalf("While executing root command : %s", err) } diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 6bbae936b..2946706ff 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" @@ -19,6 +20,37 @@ import ( "github.com/spf13/cobra" ) +func lapiMetricsToTable(table *tablewriter.Table, stats map[string]map[string]map[string]int) error { + + //stats : machine -> route -> method -> count + /*we want consistant display order*/ + machineKeys := []string{} + for k := range stats { + machineKeys = append(machineKeys, k) + } + sort.Strings(machineKeys) + + for _, machine := range machineKeys { + //oneRow : route -> method -> count + machineRow := stats[machine] + for routeName, route := range machineRow { + for methodName, count := range route { + row := []string{} + row = append(row, machine) + row = append(row, routeName) + row = append(row, methodName) + if count != 0 { + row = append(row, fmt.Sprintf("%d", count)) + } else { + row = append(row, "-") + } + table.Append(row) + } + } + } + return nil +} + func metricsToTable(table *tablewriter.Table, stats map[string]map[string]int, keys []string) error { var sortedKeys []string @@ -65,6 +97,7 @@ func ShowPrometheus(url string) { transport.ResponseHeaderTimeout = time.Minute go func() { + defer types.CatchPanic("crowdsec/ShowPrometheus") err := prom2json.FetchMetricFamilies(url, mfChan, transport) if err != nil { log.Fatalf("failed to fetch prometheus metrics : %v", err) @@ -77,14 +110,22 @@ func ShowPrometheus(url string) { } log.Debugf("Finished reading prometheus output, %d entries", len(result)) /*walk*/ + lapi_decisions_stats := map[string]struct { + NonEmpty int + Empty int + }{} acquis_stats := map[string]map[string]int{} parsers_stats := map[string]map[string]int{} buckets_stats := map[string]map[string]int{} + lapi_stats := map[string]map[string]int{} + lapi_machine_stats := map[string]map[string]map[string]int{} + lapi_bouncer_stats := map[string]map[string]map[string]int{} + for idx, fam := range result { if !strings.HasPrefix(fam.Name, "cs_") { continue } - log.Debugf("round %d", idx) + log.Tracef("round %d", idx) for _, m := range fam.Metrics { metric := m.(prom2json.Metric) name, ok := metric.Labels["name"] @@ -96,6 +137,12 @@ func ShowPrometheus(url string) { log.Debugf("no source in Metric %v", metric.Labels) } value := m.(prom2json.Metric).Value + machine := metric.Labels["machine"] + bouncer := metric.Labels["bouncer"] + + route := metric.Labels["route"] + method := metric.Labels["method"] + fval, err := strconv.ParseFloat(value, 32) if err != nil { log.Errorf("Unexpected int value %s : %s", value, err) @@ -163,13 +210,48 @@ func ShowPrometheus(url string) { parsers_stats[name] = make(map[string]int) } parsers_stats[name]["unparsed"] += ival + case "cs_lapi_route_requests_total": + if _, ok := lapi_stats[route]; !ok { + lapi_stats[route] = make(map[string]int) + } + lapi_stats[route][method] += ival + case "cs_lapi_machine_requests_total": + if _, ok := lapi_machine_stats[machine]; !ok { + lapi_machine_stats[machine] = make(map[string]map[string]int) + } + if _, ok := lapi_machine_stats[machine][route]; !ok { + lapi_machine_stats[machine][route] = make(map[string]int) + } + lapi_machine_stats[machine][route][method] += ival + case "cs_lapi_bouncer_requests_total": + if _, ok := lapi_bouncer_stats[bouncer]; !ok { + lapi_bouncer_stats[bouncer] = make(map[string]map[string]int) + } + if _, ok := lapi_bouncer_stats[bouncer][route]; !ok { + lapi_bouncer_stats[bouncer][route] = make(map[string]int) + } + lapi_bouncer_stats[bouncer][route][method] += ival + case "cs_lapi_decisions_ko_total", "cs_lapi_decisions_ok_total": + if _, ok := lapi_decisions_stats[bouncer]; !ok { + lapi_decisions_stats[bouncer] = struct { + NonEmpty int + Empty int + }{} + } + x := lapi_decisions_stats[bouncer] + if fam.Name == "cs_lapi_decisions_ko_total" { + x.Empty += ival + } else if fam.Name == "cs_lapi_decisions_ok_total" { + x.NonEmpty += ival + } + lapi_decisions_stats[bouncer] = x default: continue } } } - if config.output == "human" { + if csConfig.Cscli.Output == "human" { acquisTable := tablewriter.NewWriter(os.Stdout) acquisTable.SetHeader([]string{"Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket"}) @@ -191,22 +273,93 @@ func ShowPrometheus(url string) { log.Warningf("while collecting acquis stats : %s", err) } - log.Printf("Buckets Metrics:") - bucketsTable.Render() - log.Printf("Acquisition Metrics:") - acquisTable.Render() - log.Printf("Parser Metrics:") - parsersTable.Render() - } else if config.output == "json" { - for _, val := range []map[string]map[string]int{acquis_stats, parsers_stats, buckets_stats} { + lapiMachinesTable := tablewriter.NewWriter(os.Stdout) + lapiMachinesTable.SetHeader([]string{"Machine", "Route", "Method", "Hits"}) + if err := lapiMetricsToTable(lapiMachinesTable, lapi_machine_stats); err != nil { + log.Warningf("while collecting machine lapi stats : %s", err) + } + + //lapiMetricsToTable + lapiBouncersTable := tablewriter.NewWriter(os.Stdout) + lapiBouncersTable.SetHeader([]string{"Bouncer", "Route", "Method", "Hits"}) + if err := lapiMetricsToTable(lapiBouncersTable, lapi_bouncer_stats); err != nil { + log.Warningf("while collecting bouncer lapi stats : %s", err) + } + + lapiDecisionsTable := tablewriter.NewWriter(os.Stdout) + lapiDecisionsTable.SetHeader([]string{"Bouncer", "Empty answers", "Non-empty answers"}) + for bouncer, hits := range lapi_decisions_stats { + row := []string{} + row = append(row, bouncer) + row = append(row, fmt.Sprintf("%d", hits.Empty)) + row = append(row, fmt.Sprintf("%d", hits.NonEmpty)) + lapiDecisionsTable.Append(row) + } + + /*unfortunately, we can't reuse metricsToTable as the structure is too different :/*/ + lapiTable := tablewriter.NewWriter(os.Stdout) + lapiTable.SetHeader([]string{"Route", "Method", "Hits"}) + sortedKeys := []string{} + for akey := range lapi_stats { + sortedKeys = append(sortedKeys, akey) + } + sort.Strings(sortedKeys) + for _, alabel := range sortedKeys { + astats := lapi_stats[alabel] + subKeys := []string{} + for skey := range astats { + subKeys = append(subKeys, skey) + } + sort.Strings(subKeys) + for _, sl := range subKeys { + row := []string{} + row = append(row, alabel) + row = append(row, sl) + row = append(row, fmt.Sprintf("%d", astats[sl])) + lapiTable.Append(row) + } + } + + if bucketsTable.NumLines() > 0 { + log.Printf("Buckets Metrics:") + bucketsTable.Render() + } + if acquisTable.NumLines() > 0 { + log.Printf("Acquisition Metrics:") + acquisTable.Render() + } + if parsersTable.NumLines() > 0 { + log.Printf("Parser Metrics:") + parsersTable.Render() + } + if lapiTable.NumLines() > 0 { + log.Printf("Local Api Metrics:") + lapiTable.Render() + } + if lapiMachinesTable.NumLines() > 0 { + log.Printf("Local Api Machines Metrics:") + lapiMachinesTable.Render() + } + if lapiBouncersTable.NumLines() > 0 { + log.Printf("Local Api Bouncers Metrics:") + lapiBouncersTable.Render() + } + + if lapiDecisionsTable.NumLines() > 0 { + log.Printf("Local Api Bouncers Decisions:") + lapiDecisionsTable.Render() + } + + } else if csConfig.Cscli.Output == "json" { + for _, val := range []interface{}{acquis_stats, parsers_stats, buckets_stats, lapi_stats, lapi_bouncer_stats, lapi_machine_stats, lapi_decisions_stats} { x, err := json.MarshalIndent(val, "", " ") if err != nil { log.Fatalf("failed to unmarshal metrics : %v", err) } fmt.Printf("%s\n", string(x)) } - } else if config.output == "raw" { - for _, val := range []map[string]map[string]int{acquis_stats, parsers_stats, buckets_stats} { + } else if csConfig.Cscli.Output == "raw" { + for _, val := range []interface{}{acquis_stats, parsers_stats, buckets_stats, lapi_stats, lapi_bouncer_stats, lapi_machine_stats, lapi_decisions_stats} { x, err := yaml.Marshal(val) if err != nil { log.Fatalf("failed to unmarshal metrics : %v", err) @@ -216,8 +369,6 @@ func ShowPrometheus(url string) { } } -var purl string - func NewMetricsCmd() *cobra.Command { /* ---- UPDATE COMMAND */ var cmdMetrics = &cobra.Command{ @@ -226,10 +377,10 @@ func NewMetricsCmd() *cobra.Command { Long: `Fetch metrics from the prometheus server and display them in a human-friendly way`, Args: cobra.ExactArgs(0), Run: func(cmd *cobra.Command, args []string) { - ShowPrometheus(purl) + ShowPrometheus(prometheusURL) }, } - cmdMetrics.PersistentFlags().StringVarP(&purl, "url", "u", "http://127.0.0.1:6060/metrics", "Prometheus url") + cmdMetrics.PersistentFlags().StringVarP(&prometheusURL, "url", "u", "http://127.0.0.1:6060/metrics", "Prometheus url") return cmdMetrics } diff --git a/cmd/crowdsec-cli/parsers.go b/cmd/crowdsec-cli/parsers.go new file mode 100644 index 000000000..4ca87adf9 --- /dev/null +++ b/cmd/crowdsec-cli/parsers.go @@ -0,0 +1,141 @@ +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + + log "github.com/sirupsen/logrus" + + "github.com/spf13/cobra" +) + +func NewParsersCmd() *cobra.Command { + var cmdParsers = &cobra.Command{ + Use: "parsers [action] [config]", + Short: "Install/Remove/Upgrade/Inspect parser(s) from hub", + Example: `cscli parsers install crowdsecurity/sshd-logs +cscli parsers inspect crowdsecurity/sshd-logs +cscli parsers upgrade crowdsecurity/sshd-logs +cscli parsers list +cscli parsers remove crowdsecurity/sshd-logs +`, + Args: cobra.MinimumNArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if csConfig.Cscli == nil { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + if err := setHubBranch(); err != nil { + return fmt.Errorf("error while setting hub branch: %s", err) + } + return nil + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + if cmd.Name() == "inspect" || cmd.Name() == "list" { + return + } + log.Infof("Run 'systemctl reload crowdsec' for the new configuration to be effective.") + }, + } + + var cmdParsersInstall = &cobra.Command{ + Use: "install [config]", + Short: "Install given parser(s)", + Long: `Fetch and install given parser(s) from hub`, + Example: `cscli parsers install crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + for _, name := range args { + InstallItem(name, cwhub.PARSERS, forceInstall) + } + }, + } + cmdParsersInstall.PersistentFlags().BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") + cmdParsersInstall.PersistentFlags().BoolVar(&forceInstall, "force", false, "Force install : Overwrite tainted and outdated files") + cmdParsers.AddCommand(cmdParsersInstall) + + var cmdParsersRemove = &cobra.Command{ + Use: "remove [config]", + Short: "Remove given parser(s)", + Long: `Remove given parse(s) from hub`, + Example: `cscli parsers remove crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + + if removeAll { + RemoveMany(cwhub.PARSERS, "") + } else { + for _, name := range args { + RemoveMany(cwhub.PARSERS, name) + } + } + }, + } + cmdParsersRemove.PersistentFlags().BoolVar(&purgeRemove, "purge", false, "Delete source file too") + cmdParsersRemove.PersistentFlags().BoolVar(&removeAll, "all", false, "Delete all the parsers") + cmdParsers.AddCommand(cmdParsersRemove) + + var cmdParsersUpgrade = &cobra.Command{ + Use: "upgrade [config]", + Short: "Upgrade given parser(s)", + Long: `Fetch and upgrade given parser(s) from hub`, + Example: `cscli parsers upgrade crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + if upgradeAll { + UpgradeConfig(cwhub.PARSERS, "", forceUpgrade) + } else { + for _, name := range args { + UpgradeConfig(cwhub.PARSERS, name, forceUpgrade) + } + } + }, + } + cmdParsersUpgrade.PersistentFlags().BoolVar(&upgradeAll, "all", false, "Upgrade all the parsers") + cmdParsersUpgrade.PersistentFlags().BoolVar(&forceUpgrade, "force", false, "Force install : Overwrite tainted and outdated files") + cmdParsers.AddCommand(cmdParsersUpgrade) + + var cmdParsersInspect = &cobra.Command{ + Use: "inspect [name]", + Short: "Inspect given parser", + Long: `Inspect given parser`, + Example: `cscli parsers inspect crowdsec/xxx`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + InspectItem(args[0], cwhub.PARSERS) + }, + } + cmdParsersInspect.PersistentFlags().StringVarP(&prometheusURL, "url", "u", "http://127.0.0.1:6060/metrics", "Prometheus url") + cmdParsers.AddCommand(cmdParsersInspect) + + var cmdParsersList = &cobra.Command{ + Use: "list [name]", + Short: "List all parsers or given one", + Long: `List all parsers or given one`, + Example: `cscli parsers list +cscli parser list crowdsecurity/xxx`, + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + ListItem(cwhub.PARSERS, args) + }, + } + cmdParsersList.PersistentFlags().BoolVarP(&listAll, "all", "a", false, "List as well disabled items") + cmdParsers.AddCommand(cmdParsersList) + + return cmdParsers +} diff --git a/cmd/crowdsec-cli/postoverflows.go b/cmd/crowdsec-cli/postoverflows.go new file mode 100644 index 000000000..016012861 --- /dev/null +++ b/cmd/crowdsec-cli/postoverflows.go @@ -0,0 +1,139 @@ +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + + log "github.com/sirupsen/logrus" + + "github.com/spf13/cobra" +) + +func NewPostOverflowsCmd() *cobra.Command { + var cmdPostOverflows = &cobra.Command{ + Use: "postoverflows [action] [config]", + Short: "Install/Remove/Upgrade/Inspect postoverflow(s) from hub", + Example: `cscli postoverflows install crowdsecurity/cdn-whitelist + cscli postoverflows inspect crowdsecurity/cdn-whitelist + cscli postoverflows upgrade crowdsecurity/cdn-whitelist + cscli postoverflows list + cscli postoverflows remove crowdsecurity/cdn-whitelist`, + Args: cobra.MinimumNArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if csConfig.Cscli == nil { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + if err := setHubBranch(); err != nil { + return fmt.Errorf("error while setting hub branch: %s", err) + } + return nil + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + if cmd.Name() == "inspect" || cmd.Name() == "list" { + return + } + log.Infof("Run 'systemctl reload crowdsec' for the new configuration to be effective.") + }, + } + + var cmdPostOverflowsInstall = &cobra.Command{ + Use: "install [config]", + Short: "Install given postoverflow(s)", + Long: `Fetch and install given postoverflow(s) from hub`, + Example: `cscli postoverflows install crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + for _, name := range args { + InstallItem(name, cwhub.PARSERS_OVFLW, forceInstall) + } + }, + } + cmdPostOverflowsInstall.PersistentFlags().BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") + cmdPostOverflowsInstall.PersistentFlags().BoolVar(&forceInstall, "force", false, "Force install : Overwrite tainted and outdated files") + cmdPostOverflows.AddCommand(cmdPostOverflowsInstall) + + var cmdPostOverflowsRemove = &cobra.Command{ + Use: "remove [config]", + Short: "Remove given postoverflow(s)", + Long: `remove given postoverflow(s)`, + Example: `cscli postoverflows remove crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + + if removeAll { + RemoveMany(cwhub.PARSERS_OVFLW, "") + } else { + for _, name := range args { + RemoveMany(cwhub.PARSERS_OVFLW, name) + } + } + }, + } + cmdPostOverflowsRemove.PersistentFlags().BoolVar(&purgeRemove, "purge", false, "Delete source file in ~/.cscli/hub/ too") + cmdPostOverflowsRemove.PersistentFlags().BoolVar(&removeAll, "all", false, "Delete all the files in selected scope") + cmdPostOverflows.AddCommand(cmdPostOverflowsRemove) + + var cmdPostOverflowsUpgrade = &cobra.Command{ + Use: "upgrade [config]", + Short: "Upgrade given postoverflow(s)", + Long: `Fetch and Upgrade given postoverflow(s) from hub`, + Example: `cscli postoverflows upgrade crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + if upgradeAll { + UpgradeConfig(cwhub.PARSERS_OVFLW, "", forceUpgrade) + } else { + for _, name := range args { + UpgradeConfig(cwhub.PARSERS_OVFLW, name, forceUpgrade) + } + } + }, + } + cmdPostOverflowsUpgrade.PersistentFlags().BoolVarP(&upgradeAll, "download-only", "d", false, "Only download packages, don't enable") + cmdPostOverflowsUpgrade.PersistentFlags().BoolVar(&forceUpgrade, "force", false, "Force install : Overwrite tainted and outdated files") + cmdPostOverflows.AddCommand(cmdPostOverflowsUpgrade) + + var cmdPostOverflowsInspect = &cobra.Command{ + Use: "inspect [config]", + Short: "Inspect given postoverflow", + Long: `Inspect given postoverflow`, + Example: `cscli postoverflows inspect crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + InspectItem(args[0], cwhub.PARSERS_OVFLW) + }, + } + cmdPostOverflows.AddCommand(cmdPostOverflowsInspect) + + var cmdPostOverflowsList = &cobra.Command{ + Use: "list [config]", + Short: "List all postoverflows or given one", + Long: `List all postoverflows or given one`, + Example: `cscli postoverflows list +cscli postoverflows list crowdsecurity/xxx`, + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + ListItem(cwhub.PARSERS_OVFLW, args) + }, + } + cmdPostOverflowsList.PersistentFlags().BoolVarP(&listAll, "all", "a", false, "List as well disabled items") + cmdPostOverflows.AddCommand(cmdPostOverflowsList) + + return cmdPostOverflows +} diff --git a/cmd/crowdsec-cli/remove.go b/cmd/crowdsec-cli/remove.go deleted file mode 100644 index 48fbe2cda..000000000 --- a/cmd/crowdsec-cli/remove.go +++ /dev/null @@ -1,147 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" -) - -var purge_remove, remove_all bool - -func RemoveMany(ttype string, name string) { - var err error - var disabled int - for _, v := range cwhub.HubIdx[ttype] { - if name != "" && v.Name == name { - v, err = cwhub.DisableItem(v, cwhub.Installdir, cwhub.Hubdir, purge_remove) - if err != nil { - log.Fatalf("unable to disable %s : %v", v.Name, err) - } - cwhub.HubIdx[ttype][v.Name] = v - return - } else if name == "" && remove_all { - v, err = cwhub.DisableItem(v, cwhub.Installdir, cwhub.Hubdir, purge_remove) - if err != nil { - log.Fatalf("unable to disable %s : %v", v.Name, err) - } - cwhub.HubIdx[ttype][v.Name] = v - disabled += 1 - } - } - if name != "" && !remove_all { - log.Errorf("%s not found", name) - return - } - log.Infof("Disabled %d items", disabled) -} - -func NewRemoveCmd() *cobra.Command { - - var cmdRemove = &cobra.Command{ - Use: "remove [type] ", - Short: "Remove/disable configuration(s)", - Long: ` - Remove local configuration. - -[type] must be parser, scenario, postoverflow, collection - -[config_name] must be a valid config name from [Crowdsec Hub](https://hub.crowdsec.net) or locally installed. - `, - Example: `cscli remove [type] [config_name]`, - Args: cobra.MinimumNArgs(1), - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if !config.configured { - return fmt.Errorf("you must configure cli before interacting with hub") - } - return nil - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - log.Infof("Run 'systemctl reload crowdsec' for the new configuration to be effective.") - }, - } - cmdRemove.PersistentFlags().BoolVar(&purge_remove, "purge", false, "Delete source file in ~/.cscli/hub/ too") - cmdRemove.PersistentFlags().BoolVar(&remove_all, "all", false, "Delete all the files in selected scope") - var cmdRemoveParser = &cobra.Command{ - Use: "parser ", - Short: "Remove/disable parser", - Long: ` must be a valid parser.`, - Args: cobra.MinimumNArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - - if remove_all { - RemoveMany(cwhub.PARSERS, "") - } else { - for _, name := range args { - RemoveMany(cwhub.PARSERS, name) - } - } - }, - } - cmdRemove.AddCommand(cmdRemoveParser) - var cmdRemoveScenario = &cobra.Command{ - Use: "scenario [config]", - Short: "Remove/disable scenario", - Long: ` must be a valid scenario.`, - Args: cobra.MinimumNArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - if remove_all { - RemoveMany(cwhub.SCENARIOS, "") - } else { - for _, name := range args { - RemoveMany(cwhub.SCENARIOS, name) - } - } - }, - } - cmdRemove.AddCommand(cmdRemoveScenario) - var cmdRemoveCollection = &cobra.Command{ - Use: "collection [config]", - Short: "Remove/disable collection", - Long: ` must be a valid collection.`, - Args: cobra.MinimumNArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - if remove_all { - RemoveMany(cwhub.COLLECTIONS, "") - } else { - for _, name := range args { - RemoveMany(cwhub.COLLECTIONS, name) - } - } - }, - } - cmdRemove.AddCommand(cmdRemoveCollection) - - var cmdRemovePostoverflow = &cobra.Command{ - Use: "postoverflow [config]", - Short: "Remove/disable postoverflow parser", - Long: ` must be a valid collection.`, - Args: cobra.MinimumNArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - if remove_all { - RemoveMany(cwhub.PARSERS_OVFLW, "") - } else { - for _, name := range args { - RemoveMany(cwhub.PARSERS_OVFLW, name) - } - } - }, - } - cmdRemove.AddCommand(cmdRemovePostoverflow) - - return cmdRemove -} diff --git a/cmd/crowdsec-cli/scenarios.go b/cmd/crowdsec-cli/scenarios.go new file mode 100644 index 000000000..33f92f382 --- /dev/null +++ b/cmd/crowdsec-cli/scenarios.go @@ -0,0 +1,141 @@ +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + + log "github.com/sirupsen/logrus" + + "github.com/spf13/cobra" +) + +func NewScenariosCmd() *cobra.Command { + var cmdScenarios = &cobra.Command{ + Use: "scenarios [action] [config]", + Short: "Install/Remove/Upgrade/Inspect scenario(s) from hub", + Example: `cscli scenarios list [-a] +cscli scenarios install crowdsecurity/ssh-bf +cscli scenarios inspect crowdsecurity/ssh-bf +cscli scenarios upgrade crowdsecurity/ssh-bf +cscli scenarios remove crowdsecurity/ssh-bf +`, + Args: cobra.MinimumNArgs(1), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if csConfig.Cscli == nil { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + if err := setHubBranch(); err != nil { + return fmt.Errorf("error while setting hub branch: %s", err) + } + return nil + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + if cmd.Name() == "inspect" || cmd.Name() == "list" { + return + } + log.Infof("Run 'systemctl reload crowdsec' for the new configuration to be effective.") + }, + } + + var cmdScenariosInstall = &cobra.Command{ + Use: "install [config]", + Short: "Install given scenario(s)", + Long: `Fetch and install given scenario(s) from hub`, + Example: `cscli scenarios install crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + for _, name := range args { + InstallItem(name, cwhub.SCENARIOS, forceInstall) + } + }, + } + cmdScenariosInstall.PersistentFlags().BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") + cmdScenariosInstall.PersistentFlags().BoolVar(&forceInstall, "force", false, "Force install : Overwrite tainted and outdated files") + cmdScenarios.AddCommand(cmdScenariosInstall) + + var cmdScenariosRemove = &cobra.Command{ + Use: "remove [config]", + Short: "Remove given scenario(s)", + Long: `remove given scenario(s)`, + Example: `cscli scenarios remove crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + + if removeAll { + RemoveMany(cwhub.SCENARIOS, "") + } else { + for _, name := range args { + RemoveMany(cwhub.SCENARIOS, name) + } + } + }, + } + cmdScenariosRemove.PersistentFlags().BoolVar(&purgeRemove, "purge", false, "Delete source file in ~/.cscli/hub/ too") + cmdScenariosRemove.PersistentFlags().BoolVar(&removeAll, "all", false, "Delete all the files in selected scope") + cmdScenarios.AddCommand(cmdScenariosRemove) + + var cmdScenariosUpgrade = &cobra.Command{ + Use: "upgrade [config]", + Short: "Upgrade given scenario(s)", + Long: `Fetch and Upgrade given scenario(s) from hub`, + Example: `cscli scenarios upgrade crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + if upgradeAll { + UpgradeConfig(cwhub.SCENARIOS, "", forceUpgrade) + } else { + for _, name := range args { + UpgradeConfig(cwhub.SCENARIOS, name, forceUpgrade) + } + } + }, + } + cmdScenariosUpgrade.PersistentFlags().BoolVarP(&upgradeAll, "download-only", "d", false, "Only download packages, don't enable") + cmdScenariosUpgrade.PersistentFlags().BoolVar(&forceUpgrade, "force", false, "Force install : Overwrite tainted and outdated files") + cmdScenarios.AddCommand(cmdScenariosUpgrade) + + var cmdScenariosInspect = &cobra.Command{ + Use: "inspect [config]", + Short: "Inspect given scenario", + Long: `Inspect given scenario`, + Example: `cscli scenarios inspect crowdsec/xxx`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + InspectItem(args[0], cwhub.SCENARIOS) + }, + } + cmdScenariosInspect.PersistentFlags().StringVarP(&prometheusURL, "url", "u", "http://127.0.0.1:6060/metrics", "Prometheus url") + cmdScenarios.AddCommand(cmdScenariosInspect) + + var cmdScenariosList = &cobra.Command{ + Use: "list [config]", + Short: "List all scenario(s) or given one", + Long: `List all scenario(s) or given one`, + Example: `cscli scenarios list +cscli scenarios list crowdsecurity/xxx`, + Run: func(cmd *cobra.Command, args []string) { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { + log.Fatalf("failed to get Hub index : %v", err) + } + ListItem(cwhub.SCENARIOS, args) + }, + } + cmdScenariosList.PersistentFlags().BoolVarP(&listAll, "all", "a", false, "List as well disabled items") + cmdScenarios.AddCommand(cmdScenariosList) + + return cmdScenarios +} diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index 4df8a8158..a6339212c 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -11,24 +11,25 @@ import ( ) func addToExclusion(name string) error { - config.SimulationCfg.Exclusions = append(config.SimulationCfg.Exclusions, name) + csConfig.Crowdsec.SimulationConfig.Exclusions = append(csConfig.Crowdsec.SimulationConfig.Exclusions, name) return nil } func removeFromExclusion(name string) error { - index := indexOf(name, config.SimulationCfg.Exclusions) + index := indexOf(name, csConfig.Crowdsec.SimulationConfig.Exclusions) // Remove element from the slice - config.SimulationCfg.Exclusions[index] = config.SimulationCfg.Exclusions[len(config.SimulationCfg.Exclusions)-1] - config.SimulationCfg.Exclusions[len(config.SimulationCfg.Exclusions)-1] = "" - config.SimulationCfg.Exclusions = config.SimulationCfg.Exclusions[:len(config.SimulationCfg.Exclusions)-1] + csConfig.Crowdsec.SimulationConfig.Exclusions[index] = csConfig.Crowdsec.SimulationConfig.Exclusions[len(csConfig.Crowdsec.SimulationConfig.Exclusions)-1] + csConfig.Crowdsec.SimulationConfig.Exclusions[len(csConfig.Crowdsec.SimulationConfig.Exclusions)-1] = "" + csConfig.Crowdsec.SimulationConfig.Exclusions = csConfig.Crowdsec.SimulationConfig.Exclusions[:len(csConfig.Crowdsec.SimulationConfig.Exclusions)-1] return nil } func enableGlobalSimulation() error { - config.SimulationCfg.Simulation = true - config.SimulationCfg.Exclusions = []string{} + csConfig.Crowdsec.SimulationConfig.Simulation = new(bool) + *csConfig.Crowdsec.SimulationConfig.Simulation = true + csConfig.Crowdsec.SimulationConfig.Exclusions = []string{} if err := dumpSimulationFile(); err != nil { log.Fatalf("unable to dump simulation file: %s", err.Error()) @@ -40,28 +41,31 @@ func enableGlobalSimulation() error { } func dumpSimulationFile() error { - newConfigSim, err := yaml.Marshal(config.SimulationCfg) + newConfigSim, err := yaml.Marshal(csConfig.Crowdsec.SimulationConfig) if err != nil { return fmt.Errorf("unable to marshal simulation configuration: %s", err) } - err = ioutil.WriteFile(config.SimulationCfgPath, newConfigSim, 0644) + err = ioutil.WriteFile(csConfig.ConfigPaths.SimulationFilePath, newConfigSim, 0644) if err != nil { - return fmt.Errorf("write simulation config in '%s' : %s", config.SimulationCfgPath, err) + return fmt.Errorf("write simulation config in '%s' failed: %s", csConfig.ConfigPaths.SimulationFilePath, err) } + log.Debugf("updated simulation file %s", csConfig.ConfigPaths.SimulationFilePath) return nil } func disableGlobalSimulation() error { - config.SimulationCfg.Simulation = false - config.SimulationCfg.Exclusions = []string{} - newConfigSim, err := yaml.Marshal(config.SimulationCfg) + csConfig.Crowdsec.SimulationConfig.Simulation = new(bool) + *csConfig.Crowdsec.SimulationConfig.Simulation = false + + csConfig.Crowdsec.SimulationConfig.Exclusions = []string{} + newConfigSim, err := yaml.Marshal(csConfig.Crowdsec.SimulationConfig) if err != nil { return fmt.Errorf("unable to marshal new simulation configuration: %s", err) } - err = ioutil.WriteFile(config.SimulationCfgPath, newConfigSim, 0644) + err = ioutil.WriteFile(csConfig.ConfigPaths.SimulationFilePath, newConfigSim, 0644) if err != nil { - return fmt.Errorf("unable to write new simulation config in '%s' : %s", config.SimulationCfgPath, err) + return fmt.Errorf("unable to write new simulation config in '%s' : %s", csConfig.ConfigPaths.SimulationFilePath, err) } log.Printf("global simulation: disabled") @@ -69,23 +73,23 @@ func disableGlobalSimulation() error { } func simulationStatus() error { - if config.SimulationCfg == nil { + if csConfig.Crowdsec.SimulationConfig == nil { log.Printf("global simulation: disabled (configuration file is missing)") return nil } - if config.SimulationCfg.Simulation { + if *csConfig.Crowdsec.SimulationConfig.Simulation { log.Println("global simulation: enabled") - if len(config.SimulationCfg.Exclusions) > 0 { + if len(csConfig.Crowdsec.SimulationConfig.Exclusions) > 0 { log.Println("Scenarios not in simulation mode :") - for _, scenario := range config.SimulationCfg.Exclusions { + for _, scenario := range csConfig.Crowdsec.SimulationConfig.Exclusions { log.Printf(" - %s", scenario) } } } else { log.Println("global simulation: disabled") - if len(config.SimulationCfg.Exclusions) > 0 { + if len(csConfig.Crowdsec.SimulationConfig.Exclusions) > 0 { log.Println("Scenarios in simulation mode :") - for _, scenario := range config.SimulationCfg.Exclusions { + for _, scenario := range csConfig.Crowdsec.SimulationConfig.Exclusions { log.Printf(" - %s", scenario) } } @@ -95,55 +99,59 @@ func simulationStatus() error { func NewSimulationCmds() *cobra.Command { var cmdSimulation = &cobra.Command{ - Use: "simulation enable|disable [scenario_name]", - Short: "", - Long: ``, + Use: "simulation [command]", + Short: "Manage simulation status of scenarios", + Example: `cscli simulation status +cscli simulation enable crowdsecurity/ssh-bf +cscli simulation disable crowdsecurity/ssh-bf`, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if !config.configured { + if csConfig.Cscli == nil { return fmt.Errorf("you must configure cli before using simulation") } return nil }, PersistentPostRun: func(cmd *cobra.Command, args []string) { - log.Infof("Run 'systemctl reload crowdsec' for the new configuration to be effective.") + if cmd.Name() != "status" { + log.Infof("Run 'systemctl reload crowdsec' for the new configuration to be effective.") + } }, } cmdSimulation.Flags().SortFlags = false cmdSimulation.PersistentFlags().SortFlags = false + var forceGlobalSimulation bool var cmdSimulationEnable = &cobra.Command{ - Use: "enable [scenario_name]", + Use: "enable [scenario] [-global]", Short: "Enable the simulation, globally or on specified scenarios", - Long: ``, Example: `cscli simulation enable`, Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { + if err := cwhub.GetHubIdx(csConfig.Cscli); err != nil { log.Fatalf("failed to get Hub index : %v", err) } if len(args) > 0 { for _, scenario := range args { - var v cwhub.Item - var ok bool - if _, ok = cwhub.HubIdx[cwhub.SCENARIOS]; ok { - if v, ok = cwhub.HubIdx[cwhub.SCENARIOS][scenario]; !ok { - log.Errorf("'%s' isn't present in hub index", scenario) - continue - } - if !v.Installed { - log.Warningf("'%s' isn't enabled", scenario) - } + var ( + item *cwhub.Item + ) + item = cwhub.GetItem(cwhub.SCENARIOS, scenario) + if item == nil { + log.Errorf("'%s' doesn't exist or is not a scenario", scenario) + continue } - isExcluded := inSlice(scenario, config.SimulationCfg.Exclusions) - if config.SimulationCfg.Simulation && !isExcluded { + if !item.Installed { + log.Warningf("'%s' isn't enabled", scenario) + } + isExcluded := inSlice(scenario, csConfig.Crowdsec.SimulationConfig.Exclusions) + if *csConfig.Crowdsec.SimulationConfig.Simulation && !isExcluded { log.Warningf("global simulation is already enabled") continue } - if !config.SimulationCfg.Simulation && isExcluded { + if !*csConfig.Crowdsec.SimulationConfig.Simulation && isExcluded { log.Warningf("simulation for '%s' already enabled", scenario) continue } - if config.SimulationCfg.Simulation && isExcluded { + if *csConfig.Crowdsec.SimulationConfig.Simulation && isExcluded { if err := removeFromExclusion(scenario); err != nil { log.Fatalf(err.Error()) } @@ -158,29 +166,31 @@ func NewSimulationCmds() *cobra.Command { if err := dumpSimulationFile(); err != nil { log.Fatalf("simulation enable: %s", err.Error()) } - } else { + } else if forceGlobalSimulation { if err := enableGlobalSimulation(); err != nil { log.Fatalf("unable to enable global simulation mode : %s", err.Error()) } + } else { + cmd.Help() } }, } + cmdSimulationEnable.Flags().BoolVarP(&forceGlobalSimulation, "global", "g", false, "Enable global simulation (reverse mode)") cmdSimulation.AddCommand(cmdSimulationEnable) var cmdSimulationDisable = &cobra.Command{ - Use: "disable [scenario_name]", + Use: "disable [scenario]", Short: "Disable the simulation mode. Disable only specified scenarios", - Long: ``, Example: `cscli simulation disable`, Run: func(cmd *cobra.Command, args []string) { if len(args) > 0 { for _, scenario := range args { - isExcluded := inSlice(scenario, config.SimulationCfg.Exclusions) - if !config.SimulationCfg.Simulation && !isExcluded { + isExcluded := inSlice(scenario, csConfig.Crowdsec.SimulationConfig.Exclusions) + if !*csConfig.Crowdsec.SimulationConfig.Simulation && !isExcluded { log.Warningf("%s isn't in simulation mode", scenario) continue } - if !config.SimulationCfg.Simulation && isExcluded { + if !*csConfig.Crowdsec.SimulationConfig.Simulation && isExcluded { if err := removeFromExclusion(scenario); err != nil { log.Fatalf(err.Error()) } @@ -199,19 +209,21 @@ func NewSimulationCmds() *cobra.Command { if err := dumpSimulationFile(); err != nil { log.Fatalf("simulation disable: %s", err.Error()) } - } else { + } else if forceGlobalSimulation { if err := disableGlobalSimulation(); err != nil { log.Fatalf("unable to disable global simulation mode : %s", err.Error()) } + } else { + cmd.Help() } }, } + cmdSimulationDisable.Flags().BoolVarP(&forceGlobalSimulation, "global", "g", false, "Disable global simulation (reverse mode)") cmdSimulation.AddCommand(cmdSimulationDisable) var cmdSimulationStatus = &cobra.Command{ Use: "status", Short: "Show simulation mode status", - Long: ``, Example: `cscli simulation status`, Run: func(cmd *cobra.Command, args []string) { if err := simulationStatus(); err != nil { diff --git a/cmd/crowdsec-cli/update.go b/cmd/crowdsec-cli/update.go deleted file mode 100644 index 15f241c28..000000000 --- a/cmd/crowdsec-cli/update.go +++ /dev/null @@ -1,38 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" -) - -func NewUpdateCmd() *cobra.Command { - /* ---- UPDATE COMMAND */ - var cmdUpdate = &cobra.Command{ - Use: "update", - Short: "Fetch available configs from hub", - Long: ` -Fetches the [.index.json](https://github.com/crowdsecurity/hub/blob/master/.index.json) file from hub, containing the list of available configs. -`, - Args: cobra.ExactArgs(0), - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if !config.configured { - return fmt.Errorf("you must configure cli before interacting with hub") - } - - if err := setHubBranch(); err != nil { - return fmt.Errorf("error while setting hub branch: %s", err) - } - return nil - }, - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.UpdateHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - }, - } - return cmdUpdate -} diff --git a/cmd/crowdsec-cli/upgrade.go b/cmd/crowdsec-cli/upgrade.go deleted file mode 100644 index 6c121167f..000000000 --- a/cmd/crowdsec-cli/upgrade.go +++ /dev/null @@ -1,214 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - - "github.com/enescakir/emoji" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" -) - -var upgrade_all, force_upgrade bool - -func UpgradeConfig(ttype string, name string) { - var err error - var updated int - var found bool - - for _, v := range cwhub.HubIdx[ttype] { - //name mismatch - if name != "" && name != v.Name { - continue - } - if !v.Installed { - log.Debugf("skip %s, not installed", v.Name) - continue - } - if !v.Downloaded { - log.Warningf("%s : not downloaded, please install.", v.Name) - continue - } - found = true - if v.UpToDate && !force_upgrade { - log.Infof("%s : up-to-date", v.Name) - continue - } - v, err = cwhub.DownloadLatest(v, cwhub.Hubdir, force_upgrade, config.DataFolder) - if err != nil { - log.Fatalf("%s : download failed : %v", v.Name, err) - } - if !v.UpToDate { - if v.Tainted && !force_upgrade { - log.Infof("%v %s is tainted, --force to overwrite", emoji.Warning, v.Name) - continue - } else if v.Local { - log.Infof("%v %s is local", emoji.Prohibited, v.Name) - continue - } - } else { - log.Infof("%v %s : updated", emoji.Package, v.Name) - updated += 1 - } - cwhub.HubIdx[ttype][v.Name] = v - } - if !found { - log.Errorf("Didn't find %s", name) - } else if updated == 0 && found { - log.Errorf("Nothing to update") - } else if updated != 0 { - log.Infof("Upgraded %d items", updated) - } - -} - -func NewUpgradeCmd() *cobra.Command { - - var cmdUpgrade = &cobra.Command{ - Use: "upgrade [type] [config]", - Short: "Upgrade configuration(s)", - Long: ` -Upgrade configuration from the CrowdSec Hub. - -In order to upgrade latest versions of configuration, -the Hub cache should be [updated](./cscli_update.md). - -Tainted configuration will not be updated (use --force to update them). - -[type] must be parser, scenario, postoverflow, collection. - -[config_name] must be a valid config name from [Crowdsec Hub](https://hub.crowdsec.net). - - - `, - Example: `cscli upgrade [type] [config_name] -cscli upgrade --all # Upgrade all configurations types -cscli upgrade --force # Overwrite tainted configuration - `, - - Args: cobra.MinimumNArgs(0), - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if !config.configured { - return fmt.Errorf("you must configure cli before interacting with hub") - } - - if err := setHubBranch(); err != nil { - return fmt.Errorf("error while setting hub branch: %s", err) - } - return nil - }, - Run: func(cmd *cobra.Command, args []string) { - if !upgrade_all && len(args) < 2 { - _ = cmd.Help() - return - } - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - if upgrade_all && len(args) == 0 { - log.Warningf("Upgrade all : parsers, scenarios, collections.") - UpgradeConfig(cwhub.PARSERS, "") - UpgradeConfig(cwhub.PARSERS_OVFLW, "") - UpgradeConfig(cwhub.SCENARIOS, "") - UpgradeConfig(cwhub.COLLECTIONS, "") - } - //fmt.Println("upgrade all ?!: " + strings.Join(args, " ")) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - log.Infof("Run 'systemctl reload crowdsec' for the new configuration to be effective.") - }, - } - cmdUpgrade.PersistentFlags().BoolVar(&upgrade_all, "all", false, "Upgrade all configuration in scope") - cmdUpgrade.PersistentFlags().BoolVar(&force_upgrade, "force", false, "Overwrite existing files, even if tainted") - var cmdUpgradeParser = &cobra.Command{ - Use: "parser [config]", - Short: "Upgrade parser configuration(s)", - Long: `Upgrade one or more parser configurations`, - Example: ` - cscli upgrade parser crowdsec/apache-logs - - cscli upgrade parser -all - - cscli upgrade parser crowdsec/apache-logs --force`, - Args: cobra.MinimumNArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - if upgrade_all { - UpgradeConfig(cwhub.PARSERS, "") - } else { - for _, name := range args { - UpgradeConfig(cwhub.PARSERS, name) - } - } - - }, - } - cmdUpgrade.AddCommand(cmdUpgradeParser) - var cmdUpgradeScenario = &cobra.Command{ - Use: "scenario [config]", - Short: "Upgrade scenario configuration(s)", - Long: `Upgrade one or more scenario configurations`, - Example: ` - cscli upgrade scenario -all - - cscli upgrade scenario crowdsec/http-404 --force `, - Args: cobra.MinimumNArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - if upgrade_all { - UpgradeConfig(cwhub.SCENARIOS, "") - } else { - for _, name := range args { - UpgradeConfig(cwhub.SCENARIOS, name) - } - } - }, - } - cmdUpgrade.AddCommand(cmdUpgradeScenario) - var cmdUpgradeCollection = &cobra.Command{ - Use: "collection [config]", - Short: "Upgrade collection configuration(s)", - Long: `Upgrade one or more collection configurations`, - Example: ` - cscli upgrade collection crowdsec/apache-lamp - - cscli upgrade collection -all - - cscli upgrade collection crowdsec/apache-lamp --force`, - Args: cobra.MinimumNArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - if upgrade_all { - UpgradeConfig(cwhub.COLLECTIONS, "") - } else { - for _, name := range args { - UpgradeConfig(cwhub.COLLECTIONS, name) - } - } - }, - } - cmdUpgrade.AddCommand(cmdUpgradeCollection) - - var cmdUpgradePostoverflow = &cobra.Command{ - Use: "postoverflow [config]", - Short: "Upgrade postoverflow parser configuration(s)", - Long: `Upgrade one or more postoverflow parser configurations`, - Example: ` - cscli upgrade postoverflow crowdsec/enrich-rdns - - cscli upgrade postoverflow -all - - cscli upgrade postoverflow crowdsec/enrich-rdns --force`, - Args: cobra.MinimumNArgs(0), - Run: func(cmd *cobra.Command, args []string) { - if err := cwhub.GetHubIdx(); err != nil { - log.Fatalf("Failed to get Hub index : %v", err) - } - if upgrade_all { - UpgradeConfig(cwhub.PARSERS_OVFLW, "") - } else { - for _, name := range args { - UpgradeConfig(cwhub.PARSERS_OVFLW, name) - } - } - }, - } - cmdUpgrade.AddCommand(cmdUpgradePostoverflow) - return cmdUpgrade -} diff --git a/cmd/crowdsec-cli/utils.go b/cmd/crowdsec-cli/utils.go index 75353a1e5..45e37d8cb 100644 --- a/cmd/crowdsec-cli/utils.go +++ b/cmd/crowdsec-cli/utils.go @@ -1,10 +1,26 @@ package main import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "strconv" + "strings" + "time" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/enescakir/emoji" + "github.com/olekukonko/tablewriter" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/prom2json" log "github.com/sirupsen/logrus" "golang.org/x/mod/semver" + "gopkg.in/yaml.v2" ) func inSlice(s string, slice []string) bool { @@ -25,6 +41,32 @@ func indexOf(s string, slice []string) int { return -1 } +func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value *string) error { + + /*if a range is provided, change the scope*/ + if *ipRange != "" { + _, _, err := net.ParseCIDR(*ipRange) + if err != nil { + return fmt.Errorf("%s isn't a valid range", *ipRange) + } + } + if *ip != "" { + ipRepr := net.ParseIP(*ip) + if ipRepr == nil { + return fmt.Errorf("%s isn't a valid ip", *ip) + } + } + + //avoid confusion on scope (ip vs Ip and range vs Range) + switch strings.ToLower(*scope) { + case "ip": + *scope = types.Ip + case "range": + *scope = types.Range + } + return nil +} + func setHubBranch() error { /* if no branch has been specified in flags for the hub, then use the one corresponding to crowdsec version @@ -51,3 +93,549 @@ func setHubBranch() error { } return nil } + +func ListItem(itemType string, args []string) { + + var hubStatus []map[string]string + + if len(args) == 1 { + hubStatus = cwhub.HubStatus(itemType, args[0], listAll) + } else { + hubStatus = cwhub.HubStatus(itemType, "", listAll) + } + + if csConfig.Cscli.Output == "human" { + + table := tablewriter.NewWriter(os.Stdout) + table.SetCenterSeparator("") + table.SetColumnSeparator("") + + table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetHeader([]string{"Name", fmt.Sprintf("%v Status", emoji.Package), "Version", "Local Path"}) + for _, v := range hubStatus { + table.Append([]string{v["name"], v["utf8_status"], v["local_version"], v["local_path"]}) + } + table.Render() + } else if csConfig.Cscli.Output == "json" { + x, err := json.MarshalIndent(hubStatus, "", " ") + if err != nil { + log.Fatalf("failed to unmarshal") + } + fmt.Printf("%s", string(x)) + } else if csConfig.Cscli.Output == "raw" { + for _, v := range hubStatus { + fmt.Printf("%s %s\n", v["name"], v["description"]) + } + } +} + +func InstallItem(name string, obtype string, force bool) { + it := cwhub.GetItem(obtype, name) + if it == nil { + log.Fatalf("unable to retrive item : %s", name) + } + item := *it + if downloadOnly && item.Downloaded && item.UpToDate { + log.Warningf("%s is already downloaded and up-to-date", item.Name) + if !force { + return + } + } + item, err := cwhub.DownloadLatest(csConfig.Cscli, item, forceInstall) + if err != nil { + log.Fatalf("error while downloading %s : %v", item.Name, err) + } + cwhub.AddItem(obtype, item) + if downloadOnly { + log.Infof("Downloaded %s to %s", item.Name, csConfig.Cscli.HubDir+"/"+item.RemotePath) + return + } + item, err = cwhub.EnableItem(csConfig.Cscli, item) + if err != nil { + log.Fatalf("error while enabled %s : %v.", item.Name, err) + } + cwhub.AddItem(obtype, item) + log.Infof("Enabled %s", item.Name) + return +} + +func RemoveMany(itemType string, name string) { + var err error + var disabled int + if name != "" { + it := cwhub.GetItem(itemType, name) + if it == nil { + log.Fatalf("unable to retrieve: %s", name) + } + item := *it + item, err = cwhub.DisableItem(csConfig.Cscli, item, purgeRemove) + if err != nil { + log.Fatalf("unable to disable %s : %v", item.Name, err) + } + cwhub.AddItem(itemType, item) + return + } else if name == "" && removeAll { + for _, v := range cwhub.GetItemMap(itemType) { + v, err = cwhub.DisableItem(csConfig.Cscli, v, purgeRemove) + if err != nil { + log.Fatalf("unable to disable %s : %v", v.Name, err) + } + cwhub.AddItem(itemType, v) + disabled++ + } + } + if name != "" && !removeAll { + log.Errorf("%s not found", name) + return + } + log.Infof("Disabled %d items", disabled) +} + +func UpgradeConfig(itemType string, name string, force bool) { + var err error + var updated int + var found bool + + for _, v := range cwhub.GetItemMap(itemType) { + if name != "" && name != v.Name { + continue + } + if !v.Installed { + log.Tracef("skip %s, not installed", v.Name) + if !force { + continue + } + } + if !v.Downloaded { + log.Warningf("%s : not downloaded, please install.", v.Name) + if !force { + continue + } + } + found = true + if v.UpToDate { + log.Infof("%s : up-to-date", v.Name) + if !force { + continue + } + } + v, err = cwhub.DownloadLatest(csConfig.Cscli, v, forceUpgrade) + if err != nil { + log.Fatalf("%s : download failed : %v", v.Name, err) + } + if !v.UpToDate { + if v.Tainted { + log.Infof("%v %s is tainted, --force to overwrite", emoji.Warning, v.Name) + } else if v.Local { + log.Infof("%v %s is local", emoji.Prohibited, v.Name) + } + } else { + log.Infof("%v %s : updated", emoji.Package, v.Name) + updated++ + } + cwhub.AddItem(itemType, v) + } + if !found { + log.Errorf("Didn't find %s", name) + } else if updated == 0 && found { + log.Errorf("Nothing to update") + } else if updated != 0 { + log.Infof("Upgraded %d items", updated) + } + +} + +func InspectItem(name string, objecitemType string) { + + hubItem := cwhub.GetItem(objecitemType, name) + if hubItem == nil { + log.Fatalf("unable to retrieve item.") + } + buff, err := yaml.Marshal(*hubItem) + if err != nil { + log.Fatalf("unable to marshal item : %s", err) + } + fmt.Printf("%s", string(buff)) + + fmt.Printf("\nCurrent metrics : \n\n") + ShowMetrics(hubItem) + +} + +func ShowMetrics(hubItem *cwhub.Item) { + switch hubItem.Type { + case cwhub.PARSERS: + metrics := GetParserMetric(prometheusURL, hubItem.Name) + ShowParserMetric(hubItem.Name, metrics) + case cwhub.SCENARIOS: + metrics := GetScenarioMetric(prometheusURL, hubItem.Name) + ShowScenarioMetric(hubItem.Name, metrics) + case cwhub.COLLECTIONS: + for _, item := range hubItem.Parsers { + metrics := GetParserMetric(prometheusURL, item) + ShowParserMetric(item, metrics) + } + for _, item := range hubItem.Scenarios { + metrics := GetScenarioMetric(prometheusURL, item) + ShowScenarioMetric(item, metrics) + } + for _, item := range hubItem.Collections { + hubItem := cwhub.GetItem(cwhub.COLLECTIONS, item) + if hubItem == nil { + log.Fatalf("unable to retrieve item '%s' from collection '%s'", item, hubItem.Name) + } + ShowMetrics(hubItem) + } + default: + log.Errorf("item of type '%s' is unknown", hubItem.Type) + } +} + +/*This is a complete rip from prom2json*/ +func GetParserMetric(url string, itemName string) map[string]map[string]int { + stats := make(map[string]map[string]int) + + result := GetPrometheusMetric(url) + for idx, fam := range result { + if !strings.HasPrefix(fam.Name, "cs_") { + continue + } + log.Tracef("round %d", idx) + for _, m := range fam.Metrics { + metric := m.(prom2json.Metric) + name, ok := metric.Labels["name"] + if !ok { + log.Debugf("no name in Metric %v", metric.Labels) + } + if name != itemName { + continue + } + source, ok := metric.Labels["source"] + if !ok { + log.Debugf("no source in Metric %v", metric.Labels) + } + value := m.(prom2json.Metric).Value + fval, err := strconv.ParseFloat(value, 32) + if err != nil { + log.Errorf("Unexpected int value %s : %s", value, err) + continue + } + ival := int(fval) + + switch fam.Name { + case "cs_reader_hits_total": + if _, ok := stats[source]; !ok { + stats[source] = make(map[string]int) + stats[source]["parsed"] = 0 + stats[source]["reads"] = 0 + stats[source]["unparsed"] = 0 + stats[source]["hits"] = 0 + } + stats[source]["reads"] += ival + case "cs_parser_hits_ok_total": + if _, ok := stats[source]; !ok { + stats[source] = make(map[string]int) + } + stats[source]["parsed"] += ival + case "cs_parser_hits_ko_total": + if _, ok := stats[source]; !ok { + stats[source] = make(map[string]int) + } + stats[source]["unparsed"] += ival + case "cs_node_hits_total": + if _, ok := stats[source]; !ok { + stats[source] = make(map[string]int) + } + stats[source]["hits"] += ival + case "cs_node_hits_ok_total": + if _, ok := stats[source]; !ok { + stats[source] = make(map[string]int) + } + stats[source]["parsed"] += ival + case "cs_node_hits_ko_total": + if _, ok := stats[source]; !ok { + stats[source] = make(map[string]int) + } + stats[source]["unparsed"] += ival + default: + continue + } + } + } + return stats +} + +func GetScenarioMetric(url string, itemName string) map[string]int { + stats := make(map[string]int) + + stats["instanciation"] = 0 + stats["curr_count"] = 0 + stats["overflow"] = 0 + stats["pour"] = 0 + stats["underflow"] = 0 + + result := GetPrometheusMetric(url) + for idx, fam := range result { + if !strings.HasPrefix(fam.Name, "cs_") { + continue + } + log.Tracef("round %d", idx) + for _, m := range fam.Metrics { + metric := m.(prom2json.Metric) + name, ok := metric.Labels["name"] + if !ok { + log.Debugf("no name in Metric %v", metric.Labels) + } + if name != itemName { + continue + } + value := m.(prom2json.Metric).Value + fval, err := strconv.ParseFloat(value, 32) + if err != nil { + log.Errorf("Unexpected int value %s : %s", value, err) + continue + } + ival := int(fval) + + switch fam.Name { + case "cs_bucket_created_total": + stats["instanciation"] += ival + case "cs_buckets": + stats["curr_count"] += ival + case "cs_bucket_overflowed_total": + stats["overflow"] += ival + case "cs_bucket_poured_total": + stats["pour"] += ival + case "cs_bucket_underflowed_total": + stats["underflow"] += ival + default: + continue + } + } + } + return stats +} + +func GetPrometheusMetric(url string) []*prom2json.Family { + mfChan := make(chan *dto.MetricFamily, 1024) + + // Start with the DefaultTransport for sane defaults. + transport := http.DefaultTransport.(*http.Transport).Clone() + // Conservatively disable HTTP keep-alives as this program will only + // ever need a single HTTP request. + transport.DisableKeepAlives = true + // Timeout early if the server doesn't even return the headers. + transport.ResponseHeaderTimeout = time.Minute + + go func() { + defer types.CatchPanic("crowdsec/GetPrometheusMetric") + err := prom2json.FetchMetricFamilies(url, mfChan, transport) + if err != nil { + log.Fatalf("failed to fetch prometheus metrics : %v", err) + } + }() + + result := []*prom2json.Family{} + for mf := range mfChan { + result = append(result, prom2json.NewFamily(mf)) + } + log.Debugf("Finished reading prometheus output, %d entries", len(result)) + + return result +} + +func ShowScenarioMetric(itemName string, metrics map[string]int) { + if metrics["instanciation"] == 0 { + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Current Count", "Overflows", "Instanciated", "Poured", "Expired"}) + table.Append([]string{fmt.Sprintf("%d", metrics["curr_count"]), fmt.Sprintf("%d", metrics["overflow"]), fmt.Sprintf("%d", metrics["instanciation"]), fmt.Sprintf("%d", metrics["pour"]), fmt.Sprintf("%d", metrics["underflow"])}) + + fmt.Printf(" - (Scenario) %s: \n", itemName) + table.Render() + fmt.Println() +} + +func ShowParserMetric(itemName string, metrics map[string]map[string]int) { + skip := true + + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Parsers", "Hits", "Parsed", "Unparsed"}) + for source, stats := range metrics { + if stats["hits"] > 0 { + table.Append([]string{source, fmt.Sprintf("%d", stats["hits"]), fmt.Sprintf("%d", stats["parsed"]), fmt.Sprintf("%d", stats["unparsed"])}) + skip = false + } + } + if !skip { + fmt.Printf(" - (Parser) %s: \n", itemName) + table.Render() + fmt.Println() + } +} + +//it's a rip of the cli version, but in silent-mode +func silenceInstallItem(name string, obtype string) (string, error) { + var item *cwhub.Item + item = cwhub.GetItem(obtype, name) + if item == nil { + return "", fmt.Errorf("error retrieving item") + } + it := *item + if downloadOnly && it.Downloaded && it.UpToDate { + return fmt.Sprintf("%s is already downloaded and up-to-date", it.Name), nil + } + it, err := cwhub.DownloadLatest(csConfig.Cscli, it, forceInstall) + if err != nil { + return "", fmt.Errorf("error while downloading %s : %v", it.Name, err) + } + if err := cwhub.AddItem(obtype, it); err != nil { + return "", err + } + + if downloadOnly { + return fmt.Sprintf("Downloaded %s to %s", it.Name, csConfig.Cscli.HubDir+"/"+it.RemotePath), nil + } + it, err = cwhub.EnableItem(csConfig.Cscli, it) + if err != nil { + return "", fmt.Errorf("error while enabled %s : %v", it.Name, err) + } + if err := cwhub.AddItem(obtype, it); err != nil { + return "", err + } + return fmt.Sprintf("Enabled %s", it.Name), nil +} + +func RestoreHub(dirPath string) error { + var err error + + for _, itype := range cwhub.ItemTypes { + itemDirectory := fmt.Sprintf("%s/%s/", dirPath, itype) + if _, err = os.Stat(itemDirectory); err != nil { + log.Infof("no %s in backup", itype) + continue + } + /*restore the upstream items*/ + upstreamListFN := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itype) + file, err := ioutil.ReadFile(upstreamListFN) + if err != nil { + return fmt.Errorf("error while opening %s : %s", upstreamListFN, err) + } + var upstreamList []string + err = json.Unmarshal([]byte(file), &upstreamList) + if err != nil { + return fmt.Errorf("error unmarshaling %s : %s", upstreamListFN, err) + } + for _, toinstall := range upstreamList { + label, err := silenceInstallItem(toinstall, itype) + if err != nil { + log.Errorf("Error while installing %s : %s", toinstall, err) + } else if label != "" { + log.Infof("Installed %s : %s", toinstall, label) + } else { + log.Printf("Installed %s : ok", toinstall) + } + } + /*restore the local and tainted items*/ + files, err := ioutil.ReadDir(itemDirectory) + if err != nil { + return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory, err) + } + for _, file := range files { + //dir are stages, keep track + if !file.IsDir() { + continue + } + stage := file.Name() + stagedir := fmt.Sprintf("%s/%s/%s/", csConfig.ConfigPaths.ConfigDir, itype, stage) + log.Debugf("Found stage %s in %s, target directory : %s", stage, itype, stagedir) + if err = os.MkdirAll(stagedir, os.ModePerm); err != nil { + return fmt.Errorf("error while creating stage directory %s : %s", stagedir, err) + } + /*find items*/ + ifiles, err := ioutil.ReadDir(itemDirectory + "/" + stage + "/") + if err != nil { + return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory+"/"+stage, err) + } + //finaly copy item + for _, tfile := range ifiles { + log.Infof("Going to restore local/tainted [%s]", tfile.Name()) + sourceFile := fmt.Sprintf("%s/%s/%s", itemDirectory, stage, tfile.Name()) + destinationFile := fmt.Sprintf("%s%s", stagedir, tfile.Name()) + if err = types.CopyFile(sourceFile, destinationFile); err != nil { + return fmt.Errorf("failed copy %s %s to %s : %s", itype, sourceFile, destinationFile, err) + } + log.Infof("restored %s to %s", sourceFile, destinationFile) + } + } + } + return nil +} + +func BackupHub(dirPath string) error { + var err error + var itemDirectory string + var upstreamParsers []string + + for _, itemType := range cwhub.ItemTypes { + clog := log.WithFields(log.Fields{ + "type": itemType, + }) + itemMap := cwhub.GetItemMap(itemType) + if itemMap != nil { + itemDirectory = fmt.Sprintf("%s/%s/", dirPath, itemType) + if err := os.MkdirAll(itemDirectory, os.ModePerm); err != nil { + return fmt.Errorf("error while creating %s : %s", itemDirectory, err) + } + upstreamParsers = []string{} + for k, v := range itemMap { + clog = clog.WithFields(log.Fields{ + "file": v.Name, + }) + if !v.Installed { //only backup installed ones + clog.Debugf("[%s] : not installed", k) + continue + } + + //for the local/tainted ones, we backup the full file + if v.Tainted || v.Local || !v.UpToDate { + //we need to backup stages for parsers + if itemType == cwhub.PARSERS || itemType == cwhub.PARSERS_OVFLW { + fstagedir := fmt.Sprintf("%s%s", itemDirectory, v.Stage) + if err := os.MkdirAll(fstagedir, os.ModePerm); err != nil { + return fmt.Errorf("error while creating stage dir %s : %s", fstagedir, err) + } + } + clog.Debugf("[%s] : backuping file (tainted:%t local:%t up-to-date:%t)", k, v.Tainted, v.Local, v.UpToDate) + tfile := fmt.Sprintf("%s%s%s", itemDirectory, v.Stage, v.FileName) + if err = types.CopyFile(v.LocalPath, tfile); err != nil { + return fmt.Errorf("failed copy %s %s to %s : %s", itemType, v.LocalPath, tfile, err) + } + clog.Infof("local/tainted saved %s to %s", v.LocalPath, tfile) + continue + } + clog.Debugf("[%s] : from hub, just backup name (up-to-date:%t)", k, v.UpToDate) + clog.Infof("saving, version:%s, up-to-date:%t", v.Version, v.UpToDate) + upstreamParsers = append(upstreamParsers, v.Name) + } + //write the upstream items + upstreamParsersFname := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itemType) + upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ") + if err != nil { + return fmt.Errorf("failed marshaling upstream parsers : %s", err) + } + err = ioutil.WriteFile(upstreamParsersFname, upstreamParsersContent, 0644) + if err != nil { + return fmt.Errorf("unable to write to %s %s : %s", itemType, upstreamParsersFname, err) + } + clog.Infof("Wrote %d entries for %s to %s", len(upstreamParsers), itemType, upstreamParsersFname) + + } else { + clog.Infof("No %s to backup.", itemType) + } + } + + return nil +} diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go new file mode 100644 index 000000000..44e79eef5 --- /dev/null +++ b/cmd/crowdsec/api.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/apiserver" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +func initAPIServer() (*apiserver.APIServer, error) { + apiServer, err := apiserver.NewServer(cConfig.API.Server) + if err != nil { + return nil, fmt.Errorf("unable to run local API: %s", err) + } + + return apiServer, nil +} + +func serveAPIServer(apiServer *apiserver.APIServer) { + apiTomb.Go(func() error { + defer types.CatchPanic("crowdsec/serveAPIServer") + go func() { + defer types.CatchPanic("crowdsec/runAPIServer") + if err := apiServer.Run(); err != nil { + log.Fatalf(err.Error()) + } + }() + <-apiTomb.Dying() // lock until go routine is dying + log.Infof("serve: shutting down api server") + if err := apiServer.Shutdown(); err != nil { + return err + } + return nil + }) +} diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go new file mode 100644 index 000000000..b20110edf --- /dev/null +++ b/cmd/crowdsec/crowdsec.go @@ -0,0 +1,166 @@ +package main + +import ( + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +func initCrowdsec() (*parser.Parsers, error) { + err := exprhelpers.Init() + if err != nil { + return &parser.Parsers{}, fmt.Errorf("Failed to init expr helpers : %s", err) + } + + // Populate cwhub package tools + if err := cwhub.GetHubIdx(cConfig.Cscli); err != nil { + return &parser.Parsers{}, fmt.Errorf("Failed to load hub index : %s", err) + } + + // Start loading configs + csParsers := newParsers() + if csParsers, err = parser.LoadParsers(cConfig, csParsers); err != nil { + return &parser.Parsers{}, fmt.Errorf("Failed to load parsers: %s", err) + } + + if err := LoadBuckets(cConfig); err != nil { + return &parser.Parsers{}, fmt.Errorf("Failed to load scenarios: %s", err) + } + + if err := LoadAcquisition(cConfig); err != nil { + return &parser.Parsers{}, fmt.Errorf("Error while loading acquisition config : %s", err) + } + return csParsers, nil +} + +func runCrowdsec(parsers *parser.Parsers) error { + inputLineChan := make(chan types.Event) + inputEventChan := make(chan types.Event) + + //start go-routines for parsing, buckets pour and ouputs. + for i := 0; i < cConfig.Crowdsec.ParserRoutinesCount; i++ { + parsersTomb.Go(func() error { + defer types.CatchPanic("crowdsec/runParse") + err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes) + if err != nil { + log.Fatalf("starting parse error : %s", err) + return err + } + return nil + }) + } + + for i := 0; i < cConfig.Crowdsec.BucketsRoutinesCount; i++ { + bucketsTomb.Go(func() error { + defer types.CatchPanic("crowdsec/runPour") + err := runPour(inputEventChan, holders, buckets) + if err != nil { + log.Fatalf("starting pour error : %s", err) + return err + } + return nil + }) + } + for i := 0; i < cConfig.Crowdsec.OutputRoutinesCount; i++ { + + outputsTomb.Go(func() error { + defer types.CatchPanic("crowdsec/runOutput") + err := runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, *cConfig.API.Client.Credentials) + if err != nil { + log.Fatalf("starting outputs error : %s", err) + return err + } + return nil + }) + } + log.Warningf("Starting processing data") + + if err := acquisition.StartAcquisition(dataSources, inputLineChan, &acquisTomb); err != nil { + log.Fatalf("starting acquisition error : %s", err) + return err + } + + return nil +} + +func serveCrowdsec(parsers *parser.Parsers) { + crowdsecTomb.Go(func() error { + defer types.CatchPanic("crowdsec/serveCrowdsec") + go func() { + defer types.CatchPanic("crowdsec/runCrowdsec") + runCrowdsec(parsers) + }() + + /*we should stop in two cases : + - crowdsecTomb has been Killed() : it might be shutdown or reload, so stop + - acquisTomb is dead, it means that we were in "cat" mode and files are done reading, quit + */ + waitOnTomb() + log.Debugf("Shutting down crowdsec routines") + if err := ShutdownCrowdsecRoutines(); err != nil { + log.Fatalf("unable to shutdown crowdsec routines: %s", err) + } + log.Debugf("everything is dead, return crowdsecTomb") + return nil + }) +} + +func waitOnTomb() { + for { + select { + case <-acquisTomb.Dead(): + /*if it's acquisition dying it means that we were in "cat" mode. + while shutting down, we need to give time for all buckets to process in flight data*/ + log.Warningf("Acquisition is finished, shutting down") + bucketCount := leaky.LeakyRoutineCount + rounds := 0 + successiveStillRounds := 0 + /* + While it might make sense to want to shut-down parser/buckets/etc. as soon as acquisition is finished, + we might have some pending buckets : buckets that overflowed, but which LeakRoutine are still alive because they + are waiting to be able to "commit" (push to api). This can happens specifically in a context where a lot of logs + are going to trigger overflow (ie. trigger buckets with ~100% of the logs triggering an overflow). + + To avoid this (which would mean that we would "lose" some overflows), let's monitor the number of live buckets. + However, because of the blackhole mechanism, you can't really wait for the number of LeakRoutine to go to zero (we might have to wait $blackhole_duration). + + So : we are waiting for the number of buckets to stop decreasing before returning. "how long" we should wait is a bit of the trick question, + as some operations (ie. reverse dns or such in post-overflow) can take some time :) + */ + for { + currBucketCount := leaky.LeakyRoutineCount + + if currBucketCount == 0 { + /*no bucket to wait on*/ + break + } + if currBucketCount != bucketCount { + if rounds == 0 || rounds%2 == 0 { + log.Printf("Still %d live LeakRoutines, waiting (was %d)", currBucketCount, bucketCount) + } + bucketCount = currBucketCount + successiveStillRounds = 0 + } else { + if successiveStillRounds > 1 { + log.Printf("LeakRoutines commit over.") + break + } + successiveStillRounds++ + } + rounds++ + time.Sleep(5 * time.Second) + } + return + case <-crowdsecTomb.Dying(): + log.Infof("Crowdsec engine shutting down") + return + } + } +} diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index dc2e61f56..546b0d9e4 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -1,22 +1,23 @@ package main import ( + "flag" "fmt" - "syscall" + "os" + "strings" _ "net/http/pprof" "time" + "sort" + "github.com/crowdsecurity/crowdsec/pkg/acquisition" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/cwversion" - "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" - "github.com/crowdsecurity/crowdsec/pkg/outputs" "github.com/crowdsecurity/crowdsec/pkg/parser" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/pkg/errors" - "github.com/sevlyar/go-daemon" log "github.com/sirupsen/logrus" @@ -25,102 +26,108 @@ import ( var ( /*tombs for the parser, buckets and outputs.*/ - acquisTomb tomb.Tomb - parsersTomb tomb.Tomb - bucketsTomb tomb.Tomb - outputsTomb tomb.Tomb + acquisTomb tomb.Tomb + parsersTomb tomb.Tomb + bucketsTomb tomb.Tomb + outputsTomb tomb.Tomb + apiTomb tomb.Tomb + crowdsecTomb tomb.Tomb + + disableAPI bool + disableAgent bool + + flags *Flags + /*global crowdsec config*/ - cConfig *csconfig.CrowdSec + cConfig *csconfig.GlobalConfig /*the state of acquisition*/ - acquisitionCTX *acquisition.FileAcquisCtx + dataSources []acquisition.DataSource /*the state of the buckets*/ holders []leaky.BucketFactory buckets *leaky.Buckets outputEventChan chan types.Event //the buckets init returns its own chan that is used for multiplexing - /*the state of outputs*/ - OutputRunner *outputs.Output - outputProfiles []types.Profile - /*the state of the parsers*/ - parserCTX *parser.UnixParserCtx - postOverflowCTX *parser.UnixParserCtx - parserNodes []parser.Node - postOverflowNodes []parser.Node /*settings*/ lastProcessedItem time.Time /*keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them.*/ ) -func LoadParsers(cConfig *csconfig.CrowdSec) error { - var p parser.UnixParser - var err error - - parserNodes = make([]parser.Node, 0) - postOverflowNodes = make([]parser.Node, 0) - - log.Infof("Loading grok library") - /* load base regexps for two grok parsers */ - parserCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/"), "data": cConfig.DataFolder}) - if err != nil { - return fmt.Errorf("failed to load parser patterns : %v", err) - } - postOverflowCTX, err = p.Init(map[string]interface{}{"patterns": cConfig.ConfigFolder + string("/patterns/"), "data": cConfig.DataFolder}) - if err != nil { - return fmt.Errorf("failed to load postovflw parser patterns : %v", err) - } - - /* - Load enrichers - */ - log.Infof("Loading enrich plugins") - parserPlugins, err := parser.Loadplugin(cConfig.DataFolder) - if err != nil { - return fmt.Errorf("Failed to load enrich plugin : %v", err) - } - parser.ECTX = []parser.EnricherCtx{parserPlugins} - - /* - Load the actual parsers - */ - - log.Infof("Loading parsers") - parserNodes, err = parser.LoadStageDir(cConfig.ConfigFolder+"/parsers/", parserCTX) - - if err != nil { - return fmt.Errorf("failed to load parser config : %v", err) - } - - log.Infof("Loading postoverflow parsers") - postOverflowNodes, err = parser.LoadStageDir(cConfig.ConfigFolder+"/postoverflows/", postOverflowCTX) - - if err != nil { - return fmt.Errorf("failed to load postoverflow config : %v", err) - } - - if cConfig.Profiling { - parserCTX.Profiling = true - postOverflowCTX.Profiling = true - } - - return nil +type Flags struct { + ConfigFile string + TraceLevel bool + DebugLevel bool + InfoLevel bool + PrintVersion bool + SingleFilePath string + SingleJournalctlFilter string + SingleFileType string + SingleFileJsonOutput string + TestMode bool + DisableAgent bool + DisableAPI bool } -func GetEnabledScenarios() string { - /*keep track of scenarios name for consensus profiling*/ - var scenariosEnabled string - for _, x := range holders { - if scenariosEnabled != "" { - scenariosEnabled += "," +type parsers struct { + ctx *parser.UnixParserCtx + povfwctx *parser.UnixParserCtx + stageFiles []parser.Stagefile + povfwStageFiles []parser.Stagefile + nodes []parser.Node + povfwnodes []parser.Node + enricherCtx []parser.EnricherCtx +} + +// Return new parsers +// nodes and povfwnodes are already initialized in parser.LoadStages +func newParsers() *parser.Parsers { + parsers := &parser.Parsers{ + Ctx: &parser.UnixParserCtx{}, + Povfwctx: &parser.UnixParserCtx{}, + StageFiles: make([]parser.Stagefile, 0), + PovfwStageFiles: make([]parser.Stagefile, 0), + } + for _, itemType := range []string{cwhub.PARSERS, cwhub.PARSERS_OVFLW} { + for _, hubParserItem := range cwhub.GetItemMap(itemType) { + if hubParserItem.Installed { + stagefile := parser.Stagefile{ + Filename: hubParserItem.LocalPath, + Stage: hubParserItem.Stage, + } + if itemType == cwhub.PARSERS { + parsers.StageFiles = append(parsers.StageFiles, stagefile) + } + if itemType == cwhub.PARSERS_OVFLW { + parsers.PovfwStageFiles = append(parsers.PovfwStageFiles, stagefile) + } + } } - scenariosEnabled += x.Name } - return scenariosEnabled + if parsers.StageFiles != nil { + sort.Slice(parsers.StageFiles, func(i, j int) bool { + return parsers.StageFiles[i].Filename < parsers.StageFiles[j].Filename + }) + } + if parsers.PovfwStageFiles != nil { + sort.Slice(parsers.PovfwStageFiles, func(i, j int) bool { + return parsers.PovfwStageFiles[i].Filename < parsers.PovfwStageFiles[j].Filename + }) + } + + return parsers } -func LoadBuckets(cConfig *csconfig.CrowdSec) error { +func LoadBuckets(cConfig *csconfig.GlobalConfig) error { - var err error + var ( + err error + files []string + ) + for _, hubScenarioItem := range cwhub.GetItemMap(cwhub.SCENARIOS) { + if hubScenarioItem.Installed { + files = append(files, hubScenarioItem.LocalPath) + } + } - log.Infof("Loading scenarios") - holders, outputEventChan, err = leaky.Init(map[string]string{"patterns": cConfig.ConfigFolder + "/scenarios/", "data": cConfig.DataFolder}) + log.Infof("Loading %d scenario files", len(files)) + holders, outputEventChan, err = leaky.LoadBuckets(cConfig.Crowdsec, files) if err != nil { return fmt.Errorf("Scenario loading failed : %v", err) @@ -128,13 +135,13 @@ func LoadBuckets(cConfig *csconfig.CrowdSec) error { buckets = leaky.NewBuckets() /*restore as well previous state if present*/ - if cConfig.RestoreMode != "" { - log.Warningf("Restoring buckets state from %s", cConfig.RestoreMode) - if err := leaky.LoadBucketsState(cConfig.RestoreMode, buckets, holders); err != nil { + if cConfig.Crowdsec.BucketStateFile != "" { + log.Warningf("Restoring buckets state from %s", cConfig.Crowdsec.BucketStateFile) + if err := leaky.LoadBucketsState(cConfig.Crowdsec.BucketStateFile, buckets, holders); err != nil { return fmt.Errorf("unable to restore buckets : %s", err) } } - if cConfig.Profiling { + if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { for holderIndex := range holders { holders[holderIndex].Profiling = true } @@ -142,98 +149,127 @@ func LoadBuckets(cConfig *csconfig.CrowdSec) error { return nil } -func LoadOutputs(cConfig *csconfig.CrowdSec) error { +func LoadAcquisition(cConfig *csconfig.GlobalConfig) error { var err error - /* - Load output profiles - */ - log.Infof("Loading output profiles") - outputProfiles, err = outputs.LoadOutputProfiles(cConfig.ConfigFolder + "/profiles.yaml") - if err != nil || len(outputProfiles) == 0 { - return fmt.Errorf("Failed to load output profiles : %v", err) - } - //If the user is providing a single file (ie forensic mode), don't flush expired records - if cConfig.SingleFile != "" { - log.Infof("forensic mode, disable flush") - cConfig.OutputConfig.Flush = false + if flags.SingleFilePath != "" || flags.SingleJournalctlFilter != "" { + + tmpCfg := acquisition.DataSourceCfg{} + tmpCfg.Mode = acquisition.CAT_MODE + tmpCfg.Labels = map[string]string{"type": flags.SingleFileType} + + if flags.SingleFilePath != "" { + tmpCfg.Filename = flags.SingleFilePath + } else if flags.SingleJournalctlFilter != "" { + tmpCfg.JournalctlFilters = strings.Split(flags.SingleJournalctlFilter, " ") + } + + datasrc, err := acquisition.DataSourceConfigure(tmpCfg) + if err != nil { + return fmt.Errorf("while configuring specified file datasource : %s", err) + } + if dataSources == nil { + dataSources = make([]acquisition.DataSource, 0) + } + dataSources = append(dataSources, datasrc) } else { - cConfig.OutputConfig.Flush = true - } - OutputRunner, err = outputs.NewOutput(cConfig.OutputConfig) - if err != nil { - return fmt.Errorf("output plugins initialization error : %s", err.Error()) - } - - if err := OutputRunner.StartAutoCommit(); err != nil { - return errors.Wrap(err, "failed to start autocommit") - } - - /* Init the API connector */ - if cConfig.APIMode { - log.Infof("Loading API client") - var apiConfig = map[string]string{ - "path": cConfig.ConfigFolder + "/api.yaml", - "profile": GetEnabledScenarios(), - } - if err := OutputRunner.InitAPI(apiConfig); err != nil { - return fmt.Errorf("failed to load api : %s", err) + dataSources, err = acquisition.LoadAcquisitionFromFile(cConfig.Crowdsec) + if err != nil { + log.Fatalf("While loading acquisition configuration : %s", err) } } + return nil } -func LoadAcquisition(cConfig *csconfig.CrowdSec) error { - var err error - //Init the acqusition : from cli or from acquis.yaml file - acquisitionCTX, err = acquisition.LoadAcquisitionConfig(cConfig) - if err != nil { - return fmt.Errorf("Failed to start acquisition : %s", err) - } - return nil +func (f *Flags) Parse() { + + flag.StringVar(&f.ConfigFile, "c", "/etc/crowdsec/config.yaml", "configuration file") + flag.BoolVar(&f.TraceLevel, "trace", false, "VERY verbose") + flag.BoolVar(&f.DebugLevel, "debug", false, "print debug-level on stdout") + flag.BoolVar(&f.InfoLevel, "info", false, "print info-level on stdout") + flag.BoolVar(&f.PrintVersion, "version", false, "display version") + flag.StringVar(&f.SingleFilePath, "file", "", "Process a single file in time-machine") + flag.StringVar(&f.SingleJournalctlFilter, "jfilter", "", "Process a single journalctl output in time-machine") + flag.StringVar(&f.SingleFileType, "type", "", "Labels.type for file in time-machine") + flag.BoolVar(&f.TestMode, "t", false, "only test configs") + flag.BoolVar(&f.DisableAgent, "no-cs", false, "disable crowdsec agent") + flag.BoolVar(&f.DisableAPI, "no-api", false, "disable local API") + + flag.Parse() } -func StartProcessingRoutines(cConfig *csconfig.CrowdSec) (chan types.Event, error) { +// LoadConfig return configuration parsed from configuration file +func LoadConfig(config *csconfig.GlobalConfig) error { - acquisTomb = tomb.Tomb{} - parsersTomb = tomb.Tomb{} - bucketsTomb = tomb.Tomb{} - outputsTomb = tomb.Tomb{} + if flags.ConfigFile != "" { + if err := config.LoadConfigurationFile(flags.ConfigFile); err != nil { + return fmt.Errorf("while loading configuration : %s", err) + } + } else { + log.Warningf("no configuration file provided") + } + disableAPI = flags.DisableAPI + disableAgent = flags.DisableAgent - inputLineChan := make(chan types.Event) - inputEventChan := make(chan types.Event) - - //start go-routines for parsing, buckets pour and ouputs. - for i := 0; i < cConfig.NbParsers; i++ { - parsersTomb.Go(func() error { - err := runParse(inputLineChan, inputEventChan, *parserCTX, parserNodes) - if err != nil { - log.Errorf("runParse error : %s", err) - return err - } - return nil - }) + if !disableAPI && (cConfig.API == nil || cConfig.API.Server == nil) { + log.Errorf("no API server configuration found, will not start the local API") + disableAPI = true } - bucketsTomb.Go(func() error { - err := runPour(inputEventChan, holders, buckets) - if err != nil { - log.Errorf("runPour error : %s", err) - return err - } - return nil - }) + if !disableAgent && cConfig.Crowdsec == nil { + log.Errorf("no configuration found crowdsec agent, will not start the agent") + disableAgent = true + } - outputsTomb.Go(func() error { - err := runOutput(inputEventChan, outputEventChan, holders, buckets, *postOverflowCTX, postOverflowNodes, outputProfiles, OutputRunner) - if err != nil { - log.Errorf("runPour error : %s", err) - return err - } - return nil - }) + if !disableAgent && (cConfig.API == nil || cConfig.API.Client == nil || cConfig.API.Client.Credentials == nil) { + log.Fatalf("missing local API credentials for crowdsec agent, abort") + } - return inputLineChan, nil + if disableAPI && disableAgent { + log.Fatalf("You must run at least the API Server or crowdsec") + } + + if flags.SingleFilePath != "" { + if flags.SingleFileType == "" { + return fmt.Errorf("-file requires -type") + } + } + + if flags.SingleJournalctlFilter != "" { + if flags.SingleFileType == "" { + return fmt.Errorf("-jfilter requires -type") + } + } + + if flags.DebugLevel { + logLevel := log.DebugLevel + config.Common.LogLevel = &logLevel + } + if flags.InfoLevel || config.Common.LogLevel == nil { + logLevel := log.InfoLevel + config.Common.LogLevel = &logLevel + } + if flags.TraceLevel { + logLevel := log.TraceLevel + config.Common.LogLevel = &logLevel + } + + if flags.TestMode && !disableAgent { + config.Crowdsec.LintOnly = true + } + + if flags.SingleFilePath != "" || flags.SingleJournalctlFilter != "" { + config.API.Server.OnlineClient = nil + /*if the api is disabled as well, just read file and exit, don't daemonize*/ + if disableAPI { + config.Common.Daemonize = false + } + config.Common.LogMedia = "stdout" + log.Infof("single file mode : log_media=%s daemonize=%t", config.Common.LogMedia, config.Common.Daemonize) + } + + return nil } func main() { @@ -241,114 +277,47 @@ func main() { err error ) - cConfig = csconfig.NewCrowdSecConfig() + defer types.CatchPanic("crowdsec/main") + cConfig = csconfig.NewConfig() // Handle command line arguments - if err := cConfig.LoadConfig(); err != nil { + flags = &Flags{} + flags.Parse() + if flags.PrintVersion { + cwversion.Show() + os.Exit(0) + } + + if err := LoadConfig(cConfig); err != nil { log.Fatalf(err.Error()) } // Configure logging - if err = types.SetDefaultLoggerConfig(cConfig.LogMode, cConfig.LogFolder, cConfig.LogLevel); err != nil { + if err = types.SetDefaultLoggerConfig(cConfig.Common.LogMedia, cConfig.Common.LogDir, *cConfig.Common.LogLevel); err != nil { log.Fatal(err.Error()) } - daemonCTX := &daemon.Context{ - PidFileName: cConfig.PIDFolder + "/crowdsec.pid", - PidFilePerm: 0644, - WorkDir: "./", - Umask: 027, - } - if cConfig.Daemonize { - daemon.SetSigHandler(termHandler, syscall.SIGTERM) - daemon.SetSigHandler(reloadHandler, syscall.SIGHUP) - daemon.SetSigHandler(debugHandler, syscall.SIGUSR1) - - d, err := daemonCTX.Reborn() - if err != nil { - log.Fatalf("unable to run daemon: %s ", err.Error()) - } - if d != nil { - return - } - } - log.Infof("Crowdsec %s", cwversion.VersionStr()) + if !flags.DisableAPI && (cConfig.API == nil || cConfig.API.Server == nil) { + log.Errorf("no API server configuration found, will not start the local API") + flags.DisableAPI = true + } + + if !flags.DisableAgent && cConfig.Crowdsec == nil { + log.Errorf("no configuration found crowdsec agent, will not start the agent") + flags.DisableAgent = true + } + + if !flags.DisableAgent && (cConfig.API == nil || cConfig.API.Client == nil || cConfig.API.Client.Credentials == nil) { + log.Fatalf("missing local API credentials for crowdsec agent, abort") + } // Enable profiling early - if cConfig.Prometheus { - registerPrometheus(cConfig.PrometheusMode) - cConfig.Profiling = true - } - if cConfig.Profiling { - go runTachymeter(cConfig.HTTPListen) + if cConfig.Prometheus != nil { + go registerPrometheus(cConfig.Prometheus) } - err = exprhelpers.Init() - if err != nil { - log.Fatalf("Failed to init expr helpers : %s", err) + if err := Serve(); err != nil { + log.Fatalf(err.Error()) } - // Start loading configs - if err := LoadParsers(cConfig); err != nil { - log.Fatalf("Failed to load parsers: %s", err) - } - - if err := LoadBuckets(cConfig); err != nil { - log.Fatalf("Failed to load scenarios: %s", err) - } - - if err := LoadOutputs(cConfig); err != nil { - log.Fatalf("failed to initialize outputs : %s", err) - } - - if err := LoadAcquisition(cConfig); err != nil { - log.Fatalf("Error while loading acquisition config : %s", err) - } - - /* if it's just linting, we're done */ - if cConfig.Linter { - return - } - - /*if the user is in "single file mode" (might be writting scenario or parsers), - allow loading **without** parsers or scenarios */ - if cConfig.SingleFile == "" { - if len(parserNodes) == 0 { - log.Fatalf("no parser(s) loaded, abort.") - } - - if len(holders) == 0 { - log.Fatalf("no bucket(s) loaded, abort.") - } - - if len(outputProfiles) == 0 { - log.Fatalf("no output profile(s) loaded, abort.") - } - } - - //Start the background routines that comunicate via chan - log.Infof("Starting processing routines") - inputLineChan, err := StartProcessingRoutines(cConfig) - if err != nil { - log.Fatalf("failed to start processing routines : %s", err) - } - - //Fire! - log.Warningf("Starting processing data") - - acquisition.AcquisStartReading(acquisitionCTX, inputLineChan, &acquisTomb) - - if !cConfig.Daemonize { - if err = serveOneTimeRun(*OutputRunner); err != nil { - log.Errorf(err.Error()) - } else { - return - } - } else { - defer daemonCTX.Release() //nolint:errcheck // won't bother checking this error in defer statement - err = daemon.ServeSignals() - if err != nil { - log.Fatalf("serveDaemon error : %s", err.Error()) - } - } } diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index 501609eaf..35d6ac366 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -1,33 +1,22 @@ package main import ( + "fmt" "time" "github.com/crowdsecurity/crowdsec/pkg/acquisition" + v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwversion" leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" "github.com/crowdsecurity/crowdsec/pkg/parser" - "github.com/jamiealquiza/tachymeter" + "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "net/http" log "github.com/sirupsen/logrus" - - "runtime" -) - -var ( - parseStat *tachymeter.Tachymeter - bucketStat *tachymeter.Tachymeter - outputStat *tachymeter.Tachymeter - linesReadOK uint64 - linesReadKO uint64 - linesParsedOK uint64 - linesParsedKO uint64 - linesPouredOK uint64 - linesPouredKO uint64 ) /*prometheus*/ @@ -79,69 +68,49 @@ func dumpMetrics() { var tmpFile string var err error - if cConfig.DumpBuckets { + if cConfig.Crowdsec.BucketStateDumpDir != "" { log.Infof("!! Dumping buckets state") - if tmpFile, err = leaky.DumpBucketsStateAt(time.Now(), buckets); err != nil { + if tmpFile, err = leaky.DumpBucketsStateAt(time.Now(), cConfig.Crowdsec.BucketStateDumpDir, buckets); err != nil { log.Fatalf("Failed dumping bucket state : %s", err) } log.Infof("Buckets state dumped to %s", tmpFile) } +} - if cConfig.Profiling { - var memoryStats runtime.MemStats - runtime.ReadMemStats(&memoryStats) - - log.Infof("parser evt/s : %s", parseStat.Calc()) - log.Infof("bucket pour evt/s : %s", bucketStat.Calc()) - log.Infof("outputs evt/s : %s", outputStat.Calc()) - log.Infof("Alloc = %v MiB", bToMb(memoryStats.Alloc)) - log.Infof("TotalAlloc = %v MiB", bToMb(memoryStats.TotalAlloc)) - log.Infof("Sys = %v MiB", bToMb(memoryStats.Sys)) - log.Infof("NumGC = %v", memoryStats.NumGC) - log.Infof("Lines read ok : %d", linesReadOK) - if linesReadKO > 0 { - log.Infof("Lines discarded : %d (%.2f%%)", linesReadKO, float64(linesReadKO)/float64(linesReadOK)*100.0) - } - log.Infof("Lines parsed ok : %d", linesParsedOK) - if linesParsedKO > 0 { - log.Infof("Lines unparsed : %d (%.2f%%)", linesParsedKO, float64(linesParsedKO)/float64(linesParsedOK)*100.0) - } - log.Infof("Lines poured ok : %d", linesPouredOK) - if linesPouredKO > 0 { - log.Infof("Lines never poured : %d (%.2f%%)", linesPouredKO, float64(linesPouredKO)/float64(linesPouredOK)*100.0) - } - log.Infof("Writting metrics dump to %s", cConfig.WorkingFolder+"/crowdsec.profile") - if err := prometheus.WriteToTextfile(cConfig.WorkingFolder+"/crowdsec.profile", prometheus.DefaultGatherer); err != nil { - log.Errorf("failed to write metrics to %s : %s", cConfig.WorkingFolder+"/crowdsec.profile", err) - } +func registerPrometheus(config *csconfig.PrometheusCfg) { + if !config.Enabled { + return + } + if config.ListenAddr == "" { + log.Warningf("prometheus is enabled, but the listen address is empty, using '127.0.0.1'") + config.ListenAddr = "127.0.0.1" + } + if config.ListenPort == 0 { + log.Warningf("prometheus is enabled, but the listen port is empty, using '6060'") + config.ListenPort = 6060 } -} -func runTachymeter(HTTPListen string) { - log.Warningf("Starting profiling and http server") - /*Tachymeter for global perfs */ - parseStat = tachymeter.New(&tachymeter.Config{Size: 100}) - bucketStat = tachymeter.New(&tachymeter.Config{Size: 100}) - outputStat = tachymeter.New(&tachymeter.Config{Size: 100}) - log.Fatal(http.ListenAndServe(HTTPListen, nil)) -} - -func registerPrometheus(mode string) { + defer types.CatchPanic("crowdsec/registerPrometheus") /*Registering prometheus*/ /*If in aggregated mode, do not register events associated to a source, keeps cardinality low*/ - if mode == "aggregated" { + if config.Level == "aggregated" { log.Infof("Loading aggregated prometheus collectors") prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo, acquisition.ReaderHits, globalCsInfo, leaky.BucketsUnderflow, leaky.BucketsInstanciation, leaky.BucketsOverflow, + v1.LapiRouteHits, leaky.BucketsCurrentCount) } else { log.Infof("Loading prometheus collectors") prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo, parser.NodesHits, parser.NodesHitsOk, parser.NodesHitsKo, acquisition.ReaderHits, globalCsInfo, + v1.LapiRouteHits, v1.LapiMachineHits, v1.LapiBouncerHits, v1.LapiNilDecisions, v1.LapiNonNilDecisions, leaky.BucketsPour, leaky.BucketsUnderflow, leaky.BucketsInstanciation, leaky.BucketsOverflow, leaky.BucketsCurrentCount) } http.Handle("/metrics", promhttp.Handler()) + if err := http.ListenAndServe(fmt.Sprintf("%s:%d", config.ListenAddr, config.ListenPort), nil); err != nil { + log.Warningf("prometheus: %s", err) + } } diff --git a/cmd/crowdsec/output.go b/cmd/crowdsec/output.go index 5ff0f64dd..83d51759e 100644 --- a/cmd/crowdsec/output.go +++ b/cmd/crowdsec/output.go @@ -1,79 +1,158 @@ package main import ( + "context" "fmt" - - log "github.com/sirupsen/logrus" - + "net/url" + "sync" "time" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" - "github.com/crowdsecurity/crowdsec/pkg/outputs" + "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/parser" "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" ) -func runOutput(input chan types.Event, overflow chan types.Event, holders []leaky.BucketFactory, buckets *leaky.Buckets, - poctx parser.UnixParserCtx, ponodes []parser.Node, outputProfiles []types.Profile, output *outputs.Output) error { - var ( - //action string - start time.Time - ) +func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) { + var dedupCache []*models.Alert + + for idx, alert := range alerts { + log.Tracef("alert %d/%d", idx, len(alerts)) + /*if we have more than one source, we need to dedup */ + if len(alert.Sources) == 0 || len(alert.Sources) == 1 { + dedupCache = append(dedupCache, alert.Alert) + continue + } + for k, src := range alert.Sources { + refsrc := *alert.Alert //copy + log.Tracef("source[%s]", k) + refsrc.Source = &src + dedupCache = append(dedupCache, &refsrc) + } + } + if len(dedupCache) != len(alerts) { + log.Tracef("went from %d to %d alerts", len(alerts), len(dedupCache)) + } + return dedupCache, nil +} + +func PushAlerts(alerts []types.RuntimeAlert, client *apiclient.ApiClient) error { + ctx := context.Background() + alertsToPush, err := dedupAlerts(alerts) + + if err != nil { + return errors.Wrap(err, "failed to transform alerts for api") + } + _, _, err = client.Alerts.Add(ctx, alertsToPush) + if err != nil { + return errors.Wrap(err, "failed sending alert to LAPI") + } + return nil +} + +func runOutput(input chan types.Event, overflow chan types.Event, buckets *leaky.Buckets, + postOverflowCTX parser.UnixParserCtx, postOverflowNodes []parser.Node, apiConfig csconfig.ApiCredentialsCfg) error { + + var err error + ticker := time.NewTicker(1 * time.Second) + + var cache []types.RuntimeAlert + var cacheMutex sync.Mutex + + scenarios, err := cwhub.GetUpstreamInstalledScenariosAsString() + if err != nil { + return errors.Wrapf(err, "loading list of installed hub scenarios: %s", err) + } + + apiURL, err := url.Parse(apiConfig.URL) + if err != nil { + return errors.Wrapf(err, "parsing api url ('%s'): %s", apiConfig.URL, err) + } + + password := strfmt.Password(apiConfig.Password) + + Client, err := apiclient.NewClient(&apiclient.Config{ + MachineID: apiConfig.Login, + Password: password, + Scenarios: scenarios, + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + UpdateScenario: cwhub.GetUpstreamInstalledScenariosAsString, + }) + if err != nil { + return errors.Wrapf(err, "new client api: %s", err) + } + if _, err = Client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ + MachineID: &apiConfig.Login, + Password: &password, + Scenarios: scenarios, + }); err != nil { + return errors.Wrapf(err, "authenticate watcher (%s)", apiConfig.Login) + } LOOP: for { select { - case <-outputsTomb.Dying(): - log.Infof("Flushing outputs") - output.FlushAll() - log.Debugf("Shuting down output routines") - if err := output.Shutdown(); err != nil { - log.Errorf("error while in output shutdown: %s", err) + case <-ticker.C: + if len(cache) > 0 { + cacheMutex.Lock() + cachecopy := cache + newcache := make([]types.RuntimeAlert, 0) + cache = newcache + cacheMutex.Unlock() + if err := PushAlerts(cachecopy, Client); err != nil { + log.Errorf("while pushing to api : %s", err) + } + } + case <-outputsTomb.Dying(): + if len(cache) > 0 { + cacheMutex.Lock() + cachecopy := cache + newcache := make([]types.RuntimeAlert, 0) + cache = newcache + cacheMutex.Unlock() + if err := PushAlerts(cachecopy, Client); err != nil { + log.Errorf("while pushing leftovers to api : %s", err) + } } - log.Infof("Done shutdown down output") break LOOP case event := <-overflow: - //if global simulation -> everything is simulation unless told otherwise - if cConfig.SimulationCfg != nil && cConfig.SimulationCfg.Simulation { - event.Overflow.Simulation = true - } - if cConfig.Profiling { - start = time.Now() - } + /*if alert is empty and mapKey is present, the overflow is just to cleanup bucket*/ + if event.Overflow.Alert == nil && event.Overflow.Mapkey != "" { + buckets.Bucket_map.Delete(event.Overflow.Mapkey) + break + } if event.Overflow.Reprocess { log.Debugf("Overflow being reprocessed.") input <- event } - /* process post overflow parser nodes */ - event, err := parser.Parse(poctx, event, ponodes) + event, err := parser.Parse(postOverflowCTX, event, postOverflowNodes) if err != nil { return fmt.Errorf("postoverflow failed : %s", err) } - //check scenarios in simulation - if cConfig.SimulationCfg != nil { - for _, scenario_name := range cConfig.SimulationCfg.Exclusions { - if event.Overflow.Scenario == scenario_name { - event.Overflow.Simulation = !event.Overflow.Simulation - } - } + log.Printf("%s", *event.Overflow.Alert.Message) + if event.Overflow.Whitelisted { + log.Printf("[%s] is whitelisted, skip.", *event.Overflow.Alert.Message) + continue } + cacheMutex.Lock() + cache = append(cache, event.Overflow) + cacheMutex.Unlock() - if event.Overflow.Scenario == "" && event.Overflow.MapKey != "" { - //log.Infof("Deleting expired entry %s", event.Overflow.MapKey) - buckets.Bucket_map.Delete(event.Overflow.MapKey) - } else { - /*let's handle output profiles */ - if err := output.ProcessOutput(event.Overflow, outputProfiles); err != nil { - log.Warningf("Error while processing overflow/output : %s", err) - } - } - } - if cConfig.Profiling { - outputStat.AddTime(time.Since(start)) } } + + ticker.Stop() return nil } diff --git a/cmd/crowdsec/parse.go b/cmd/crowdsec/parse.go index ff92d3806..e472f3c86 100644 --- a/cmd/crowdsec/parse.go +++ b/cmd/crowdsec/parse.go @@ -2,8 +2,6 @@ package main import ( "errors" - "sync/atomic" - "time" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" @@ -13,8 +11,6 @@ import ( ) func runParse(input chan types.Event, output chan types.Event, parserCTX parser.UnixParserCtx, nodes []parser.Node) error { - var start time.Time - var discardCPT, processCPT int LOOP: for { @@ -23,18 +19,9 @@ LOOP: log.Infof("Killing parser routines") break LOOP case event := <-input: - if cConfig.Profiling { - start = time.Now() - } if !event.Process { - if cConfig.Profiling { - atomic.AddUint64(&linesReadKO, 1) - } continue } - if cConfig.Profiling { - atomic.AddUint64(&linesReadOK, 1) - } globalParserHits.With(prometheus.Labels{"source": event.Line.Src}).Inc() /* parse the log using magic */ @@ -44,30 +31,16 @@ LOOP: return errors.New("parsing failed :/") } if !parsed.Process { - if cConfig.Profiling { - atomic.AddUint64(&linesParsedKO, 1) - } globalParserHitsKo.With(prometheus.Labels{"source": event.Line.Src}).Inc() log.Debugf("Discarding line %+v", parsed) - discardCPT++ continue } - if cConfig.Profiling { - atomic.AddUint64(&linesParsedOK, 1) - } globalParserHitsOk.With(prometheus.Labels{"source": event.Line.Src}).Inc() - processCPT++ if parsed.Whitelisted { log.Debugf("event whitelisted, discard") continue } - if processCPT%1000 == 0 { - log.Debugf("%d lines processed, %d lines discarded (unparsed)", processCPT, discardCPT) - } output <- parsed - if cConfig.Profiling { - parseStat.AddTime(time.Since(start)) - } } } return nil diff --git a/cmd/crowdsec/pour.go b/cmd/crowdsec/pour.go index b80fc6740..4b47c018e 100644 --- a/cmd/crowdsec/pour.go +++ b/cmd/crowdsec/pour.go @@ -2,7 +2,6 @@ package main import ( "fmt" - "sync/atomic" "time" leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" @@ -12,34 +11,29 @@ import ( func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *leaky.Buckets) error { var ( - start time.Time count int ) -LOOP: for { //bucket is now ready select { case <-bucketsTomb.Dying(): - log.Infof("Exiting pour routine") - - break LOOP + log.Infof("Bucket routine exiting") + return nil case parsed := <-input: count++ - if cConfig.Profiling { - start = time.Now() - } - if count%5000 == 0 { log.Warningf("%d existing LeakyRoutine", leaky.LeakyRoutineCount) //when in forensics mode, garbage collect buckets - if parsed.MarshaledTime != "" && cConfig.SingleFile != "" { - var z *time.Time = &time.Time{} - if err := z.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { - log.Warningf("Failed to unmarshal time from event '%s' : %s", parsed.MarshaledTime, err) - } else { - log.Warningf("Starting buckets garbage collection ...") - if err = leaky.GarbageCollectBuckets(*z, buckets); err != nil { - return fmt.Errorf("failed to start bucket GC : %s", err) + if cConfig.Crowdsec.BucketsGCEnabled { + if parsed.MarshaledTime != "" { + var z *time.Time = &time.Time{} + if err := z.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { + log.Warningf("Failed to unmarshal time from event '%s' : %s", parsed.MarshaledTime, err) + } else { + log.Warningf("Starting buckets garbage collection ...") + if err = leaky.GarbageCollectBuckets(*z, buckets); err != nil { + return fmt.Errorf("failed to start bucket GC : %s", err) + } } } } @@ -52,22 +46,14 @@ LOOP: } if poured { globalBucketPourOk.Inc() - atomic.AddUint64(&linesPouredOK, 1) } else { globalBucketPourKo.Inc() - atomic.AddUint64(&linesPouredKO, 1) - } - if cConfig.Profiling { - bucketStat.AddTime(time.Since(start)) } if len(parsed.MarshaledTime) != 0 { if err := lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { - log.Debugf("failed to unmarshal time from event : %s", err) + log.Warningf("failed to unmarshal time from event : %s", err) } } - } } - log.Infof("Sending signal Bucketify") - return nil } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index be076be2d..44a66fba4 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -1,15 +1,20 @@ package main import ( + "fmt" "os" + "os/signal" + "syscall" "time" - "github.com/crowdsecurity/crowdsec/pkg/acquisition" - leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" - "github.com/crowdsecurity/crowdsec/pkg/outputs" - log "github.com/sirupsen/logrus" + "github.com/coreos/go-systemd/daemon" + "github.com/pkg/errors" - "github.com/sevlyar/go-daemon" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + //"github.com/sevlyar/go-daemon" ) //debugHandler is kept as a dev convenience : it shuts down and serialize internal state @@ -17,11 +22,11 @@ func debugHandler(sig os.Signal) error { var tmpFile string var err error //stop go routines - if err := ShutdownRoutines(); err != nil { + if err := ShutdownCrowdsecRoutines(); err != nil { log.Warningf("Failed to shut down routines: %s", err) } //todo : properly stop acquis with the tail readers - if tmpFile, err = leaky.DumpBucketsStateAt(time.Now(), buckets); err != nil { + if tmpFile, err = leaky.DumpBucketsStateAt(time.Now(), cConfig.Crowdsec.BucketStateDumpDir, buckets); err != nil { log.Warningf("Failed dumping bucket state : %s", err) } if err := leaky.ShutdownAllBuckets(buckets); err != nil { @@ -34,154 +39,237 @@ func debugHandler(sig os.Signal) error { func reloadHandler(sig os.Signal) error { var tmpFile string var err error + //stop go routines - if err := ShutdownRoutines(); err != nil { - log.Fatalf("Failed to shut down routines: %s", err) - } - if tmpFile, err = leaky.DumpBucketsStateAt(time.Now(), buckets); err != nil { - log.Fatalf("Failed dumping bucket state : %s", err) + if !disableAgent { + if err := shutdownCrowdsec(); err != nil { + log.Fatalf("Failed to shut down crowdsec routines: %s", err) + } + if cConfig.Crowdsec != nil && cConfig.Crowdsec.BucketStateDumpDir != "" { + if tmpFile, err = leaky.DumpBucketsStateAt(time.Now(), cConfig.Crowdsec.BucketStateDumpDir, buckets); err != nil { + log.Fatalf("Failed dumping bucket state : %s", err) + } + } + + if err := leaky.ShutdownAllBuckets(buckets); err != nil { + log.Fatalf("while shutting down routines : %s", err) + } } - if err := leaky.ShutdownAllBuckets(buckets); err != nil { - log.Fatalf("while shutting down routines : %s", err) - } - //reload the simulation state - if err := cConfig.LoadSimulation(); err != nil { - log.Errorf("reload error (simulation) : %s", err) + if !disableAPI { + if err := shutdownAPI(); err != nil { + log.Fatalf("Failed to shut down api routines: %s", err) + } } - //reload all and start processing again :) - if err := LoadParsers(cConfig); err != nil { - log.Fatalf("Failed to load parsers: %s", err) + /* + re-init tombs + */ + acquisTomb = tomb.Tomb{} + parsersTomb = tomb.Tomb{} + bucketsTomb = tomb.Tomb{} + outputsTomb = tomb.Tomb{} + apiTomb = tomb.Tomb{} + crowdsecTomb = tomb.Tomb{} + + if err := LoadConfig(cConfig); err != nil { + log.Fatalf(err.Error()) + } + // Configure logging + if err = types.SetDefaultLoggerConfig(cConfig.Common.LogMedia, cConfig.Common.LogDir, *cConfig.Common.LogLevel); err != nil { + log.Fatal(err.Error()) } - if err := LoadBuckets(cConfig); err != nil { - log.Fatalf("Failed to load scenarios: %s", err) + if !disableAPI { + apiServer, err := initAPIServer() + if err != nil { + return fmt.Errorf("unable to init api server: %s", err) + } - } - //restore bucket state - log.Warningf("Restoring buckets state from %s", tmpFile) - if err := leaky.LoadBucketsState(tmpFile, buckets, holders); err != nil { - log.Fatalf("unable to restore buckets : %s", err) + serveAPIServer(apiServer) } - if err := LoadOutputs(cConfig); err != nil { - log.Fatalf("failed to initialize outputs : %s", err) + if !disableAgent { + csParsers, err := initCrowdsec() + if err != nil { + return fmt.Errorf("unable to init crowdsec: %s", err) + } + //restore bucket state + if tmpFile != "" { + log.Warningf("Restoring buckets state from %s", tmpFile) + if err := leaky.LoadBucketsState(tmpFile, buckets, holders); err != nil { + log.Fatalf("unable to restore buckets : %s", err) + } + } + //reload the simulation state + if err := cConfig.LoadSimulation(); err != nil { + log.Errorf("reload error (simulation) : %s", err) + } + serveCrowdsec(csParsers) } - if err := LoadAcquisition(cConfig); err != nil { - log.Fatalf("Error while loading acquisition config : %s", err) - } - //Start the background routines that comunicate via chan - log.Infof("Starting processing routines") - inputLineChan, err := StartProcessingRoutines(cConfig) - if err != nil { - log.Fatalf("failed to start processing routines : %s", err) - } - - //Fire! - log.Warningf("Starting processing data") - - acquisition.AcquisStartReading(acquisitionCTX, inputLineChan, &acquisTomb) - log.Printf("Reload is finished") //delete the tmp file, it's safe now :) - if err := os.Remove(tmpFile); err != nil { - log.Warningf("Failed to delete temp file (%s) : %s", tmpFile, err) + if tmpFile != "" { + if err := os.Remove(tmpFile); err != nil { + log.Warningf("Failed to delete temp file (%s) : %s", tmpFile, err) + } } return nil } -func ShutdownRoutines() error { +func ShutdownCrowdsecRoutines() error { var reterr error + log.Debugf("Shutting down crowdsec sub-routines") acquisTomb.Kill(nil) - log.Infof("waiting for acquisition to finish") + log.Debugf("waiting for acquisition to finish") if err := acquisTomb.Wait(); err != nil { log.Warningf("Acquisition returned error : %s", err) reterr = err } - log.Infof("acquisition is finished, wait for parser/bucket/ouputs.") + log.Debugf("acquisition is finished, wait for parser/bucket/ouputs.") parsersTomb.Kill(nil) if err := parsersTomb.Wait(); err != nil { log.Warningf("Parsers returned error : %s", err) reterr = err } - log.Infof("parsers is done") + log.Debugf("parsers is done") bucketsTomb.Kill(nil) if err := bucketsTomb.Wait(); err != nil { log.Warningf("Buckets returned error : %s", err) reterr = err } - log.Infof("buckets is done") + log.Debugf("buckets is done") outputsTomb.Kill(nil) if err := outputsTomb.Wait(); err != nil { log.Warningf("Ouputs returned error : %s", err) reterr = err } - log.Infof("outputs are done") + log.Debugf("outputs are done") + //everything is dead johny + crowdsecTomb.Kill(nil) + return reterr } -func termHandler(sig os.Signal) error { - log.Infof("Shutting down routines") - if err := ShutdownRoutines(); err != nil { - log.Errorf("Error encountered while shutting down routines : %s", err) +func shutdownAPI() error { + log.Debugf("shutting down api via Tomb") + apiTomb.Kill(nil) + if err := apiTomb.Wait(); err != nil { + return err } - log.Warningf("all routines are done, bye.") - return daemon.ErrStop + log.Debugf("done") + return nil } -func serveOneTimeRun(outputRunner outputs.Output) error { - if err := acquisTomb.Wait(); err != nil { - log.Warningf("acquisition returned error : %s", err) +func shutdownCrowdsec() error { + log.Debugf("shutting down crowdsec via Tomb") + crowdsecTomb.Kill(nil) + if err := crowdsecTomb.Wait(); err != nil { + return err } - log.Infof("acquisition is finished, wait for parser/bucket/ouputs.") + log.Debugf("done") + return nil +} - /* - While it might make sense to want to shut-down parser/buckets/etc. as soon as acquisition is finished, - we might have some pending buckets : buckets that overflowed, but which LeakRoutine are still alive because they - are waiting to be able to "commit" (push to db). This can happens specifically in a context where a lot of logs - are going to trigger overflow (ie. trigger buckets with ~100% of the logs triggering an overflow). +func termHandler(sig os.Signal) error { + if err := shutdownCrowdsec(); err != nil { + log.Errorf("Error encountered while shutting down crowdsec: %s", err) + } + if err := shutdownAPI(); err != nil { + log.Errorf("Error encountered while shutting down api: %s", err) + } + log.Debugf("termHandler done") + return nil +} - To avoid this (which would mean that we would "lose" some overflows), let's monitor the number of live buckets. - However, because of the blackhole mechanism, you can't really wait for the number of LeakRoutine to go to zero (we might have to wait $blackhole_duration). +func HandleSignals() { + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, + syscall.SIGHUP, + syscall.SIGTERM) - So : we are waiting for the number of buckets to stop decreasing before returning. "how long" we should wait is a bit of the trick question, - as some operations (ie. reverse dns or such in post-overflow) can take some time :) - */ - - bucketCount := leaky.LeakyRoutineCount - rounds := 0 - successiveStillRounds := 0 - for { - rounds++ - time.Sleep(5 * time.Second) - currBucketCount := leaky.LeakyRoutineCount - if currBucketCount != bucketCount { - if rounds == 0 || rounds%2 == 0 { - log.Printf("Still %d live LeakRoutines, waiting (was %d)", currBucketCount, bucketCount) + exitChan := make(chan int) + go func() { + defer types.CatchPanic("crowdsec/HandleSignals") + for { + s := <-signalChan + switch s { + // kill -SIGHUP XXXX + case syscall.SIGHUP: + log.Warningf("SIGHUP received, reloading") + if err := reloadHandler(s); err != nil { + log.Fatalf("Reload handler failure : %s", err) + } + // kill -SIGTERM XXXX + case syscall.SIGTERM: + log.Warningf("SIGTERM received, shutting down") + if err := termHandler(s); err != nil { + log.Fatalf("Term handler failure : %s", err) + } + exitChan <- 0 } - bucketCount = currBucketCount - successiveStillRounds = 0 - } else { - if successiveStillRounds > 1 { - log.Printf("LeakRoutines commit over.") - break - } - successiveStillRounds++ + } + }() + + code := <-exitChan + log.Warningf("Crowdsec service shutting down") + os.Exit(code) +} + +func Serve() error { + acquisTomb = tomb.Tomb{} + parsersTomb = tomb.Tomb{} + bucketsTomb = tomb.Tomb{} + outputsTomb = tomb.Tomb{} + apiTomb = tomb.Tomb{} + crowdsecTomb = tomb.Tomb{} + + if !disableAPI { + apiServer, err := initAPIServer() + if err != nil { + return errors.Wrap(err, "api server init") + } + if !flags.TestMode { + serveAPIServer(apiServer) } } - time.Sleep(5 * time.Second) - - // wait for the parser to parse all events - if err := ShutdownRoutines(); err != nil { - log.Errorf("failed shutting down routines : %s", err) + if !disableAgent { + csParsers, err := initCrowdsec() + if err != nil { + return errors.Wrap(err, "crowdsec init") + } + /* if it's just linting, we're done */ + if !flags.TestMode { + serveCrowdsec(csParsers) + } + } + if flags.TestMode { + log.Infof("test done") + os.Exit(0) + } + + if cConfig.Common != nil && cConfig.Common.Daemonize { + sent, err := daemon.SdNotify(false, daemon.SdNotifyReady) + if !sent || err != nil { + log.Errorf("Failed to notify(sent: %v): %v", sent, err) + } + /*wait for signals*/ + HandleSignals() + } else { + for { + select { + case <-apiTomb.Dead(): + log.Infof("api shutdown") + os.Exit(0) + case <-crowdsecTomb.Dead(): + log.Infof("crowdsec shutdown") + os.Exit(0) + } + } } - dumpMetrics() - outputRunner.Flush() - log.Warningf("all routines are done, bye.") return nil } diff --git a/config/api.yaml b/config/api.yaml deleted file mode 100644 index 08577237b..000000000 --- a/config/api.yaml +++ /dev/null @@ -1,8 +0,0 @@ -version: v1 -url: https://tmsov6x2n9.execute-api.eu-west-1.amazonaws.com -signin_path: signin -push_path: signals -pull_path: pull -enroll_path: enroll -reset_pwd_path: resetpassword -register_path: register diff --git a/config/config.yaml b/config/config.yaml new file mode 100644 index 000000000..36917e74e --- /dev/null +++ b/config/config.yaml @@ -0,0 +1,49 @@ +common: + daemonize: true + pid_dir: /var/run/ + log_media: file + log_level: info + log_dir: /var/log/ + working_dir: . +config_paths: + config_dir: /etc/crowdsec/ + data_dir: /var/lib/crowdsec/data/ + simulation_path: /etc/crowdsec/simulation.yaml + hub_dir: /etc/crowdsec/hub/ + index_path: /etc/crowdsec/hub/.index.json +crowdsec_service: + acquisition_path: /etc/crowdsec/acquis.yaml + parser_routines: 1 +cscli: + output: human + hub_branch: wip_lapi +db_config: + log_level: info + type: sqlite + db_path: /var/lib/crowdsec/data/crowdsec.db + #user: + #password: + #db_name: + #host: + #port: + flush: + max_items: 5000 + max_age: 7d +api: + client: + insecure_skip_verify: true + credentials_path: /etc/crowdsec/local_api_credentials.yaml + server: + log_level: info + listen_uri: localhost:8080 + profiles_path: /etc/crowdsec/profiles.yaml + online_client: # Crowdsec API credentials (to push signals and receive bad IPs) + credentials_path: /etc/crowdsec/online_api_credentials.yaml +# tls: +# cert_file: /etc/crowdsec/ssl/cert.pem +# key_file: /etc/crowdsec/ssl/key.pem +prometheus: + enabled: true + level: full + listen_addr: 127.0.0.1 + listen_port: 6060 diff --git a/config/crowdsec.service b/config/crowdsec.service index 6a12bc6f3..6718aaf17 100644 --- a/config/crowdsec.service +++ b/config/crowdsec.service @@ -1,13 +1,13 @@ [Unit] -Description=Crowdwatch agent +Description=Crowdsec agent After=syslog.target network.target remote-fs.target nss-lookup.target [Service] -Type=forking -PIDFile=${PID}/crowdsec.pid -#ExecStartPre=${BIN} -c ${CFG}/default.yaml -t -ExecStart=${BIN} -c ${CFG}/default.yaml -ExecStartPost=/bin/sleep 0.1 +Type=notify +PIDFile=/var/run/crowdsec.pid +ExecStartPre=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t +ExecStart=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml +#ExecStartPost=/bin/sleep 0.1 ExecReload=/bin/kill -HUP $MAINPID [Install] diff --git a/config/dev.yaml b/config/dev.yaml index 906ed34f5..9d0175988 100644 --- a/config/dev.yaml +++ b/config/dev.yaml @@ -1,18 +1,43 @@ -working_dir: "." -data_dir: "./data" -config_dir: "./config" -pid_dir: "./" -cscli_dir: "./config/crowdsec-cli" -log_dir: "./logs" -log_mode: "stdout" -log_level: info -prometheus: false -simulation_path: ./config/simulation.yaml -profiling: false -apimode: false -plugin: - backend: "./config/plugins/backend" - max_records: 10000 - #30 days = 720 hours - max_records_age: 720h - \ No newline at end of file +common: + daemonize: true + pid_dir: /tmp/ + log_media: stdout + log_level: info + working_dir: . +config_paths: + config_dir: ./config + data_dir: ./data/ + #simulation_path: /etc/crowdsec/config/simulation.yaml + #hub_dir: /etc/crowdsec/hub/ + #index_path: ./config/hub/.index.json +crowdsec_service: + #acquisition_path: ./config/acquis.yaml + parser_routines: 1 +cscli: + output: human +db_config: + type: sqlite + db_path: ./data/crowdsec.db + user: root + password: crowdsec + db_name: crowdsec + host: "172.17.0.2" + port: 3306 + flush: + #max_items: 10000 + #max_age: 168h +api: + client: + credentials_path: ./config/local_api_credentials.yaml + server: + #insecure_skip_verify: true + listen_uri: localhost:8081 + profiles_path: ./config/profiles.yaml + tls: + #cert_file: ./cert.pem + #key_file: ./key.pem + online_client: # Crowdsec API + credentials_path: ./config/online_api_credentials.yaml +prometheus: + enabled: true + level: full diff --git a/config/local_api_credentials.yaml b/config/local_api_credentials.yaml new file mode 100644 index 000000000..c009e7c5d --- /dev/null +++ b/config/local_api_credentials.yaml @@ -0,0 +1 @@ +url: http://localhost:8080 \ No newline at end of file diff --git a/docs/cheat_sheets/usecase_howto.md b/config/online_api_credentials.yaml similarity index 100% rename from docs/cheat_sheets/usecase_howto.md rename to config/online_api_credentials.yaml diff --git a/config/plugins/backend/database.yaml b/config/plugins/backend/database.yaml deleted file mode 100644 index d4ca52252..000000000 --- a/config/plugins/backend/database.yaml +++ /dev/null @@ -1,19 +0,0 @@ -name: database -path: /usr/local/lib/crowdsec/plugins/backend/database.so -config: - ## DB type supported (mysql, sqlite) - ## By default it using sqlite - type: sqlite - - ## mysql options - # db_host: localhost - # db_username: crowdsec - # db_password: crowdsec - # db_name: crowdsec - - ## sqlite options - db_path: /var/lib/crowdsec/data/crowdsec.db - - ## Other options - flush: true - # debug: true diff --git a/config/prod.yaml b/config/prod.yaml deleted file mode 100644 index 0a2edc640..000000000 --- a/config/prod.yaml +++ /dev/null @@ -1,18 +0,0 @@ -working_dir: /tmp/ -data_dir: ${DATA} -config_dir: ${CFG} -pid_dir: ${PID} -log_dir: /var/log/ -cscli_dir: ${CFG}/cscli -simulation_path: ${CFG}/simulation.yaml -log_mode: file -log_level: info -profiling: false -apimode: true -daemon: true -prometheus: true -#for prometheus agent / golang debugging -http_listen: 127.0.0.1:6060 -plugin: - backend: "/etc/crowdsec/plugins/backend" - max_records_age: 720h diff --git a/config/profiles.yaml b/config/profiles.yaml index 8507839d2..d9a12c9c5 100644 --- a/config/profiles.yaml +++ b/config/profiles.yaml @@ -1,26 +1,8 @@ -profile: default_remediation -filter: "sig.Labels.remediation == 'true' && not sig.Whitelisted" -api: true # If no api: specified, will use the default config in default.yaml -remediation: - ban: true - slow: true - captcha: true - duration: 4h -outputs: - - plugin: database ---- -profile: default_notification -filter: "sig.Labels.remediation != 'true'" -#remediation is empty, it means non taken -api: false -outputs: - - plugin: database # If we do not want to push, we can remove this line and the next one - store: false ---- -profile: send_false_positif_to_API -filter: "sig.Whitelisted == true && sig.Labels.remediation == 'true'" -#remediation is empty, it means non taken -api: true -outputs: - - plugin: database # If we do not want to push, we can remove this line and the next one - store: false \ No newline at end of file +name: default_ip_remediation +#debug: true +filters: + - Alert.Remediation == true && Alert.GetScope() == "Ip" +decisions: + - type: ban + duration: 1h +on_success: break diff --git a/config/user.yaml b/config/user.yaml index 61631fa74..4f66e6611 100644 --- a/config/user.yaml +++ b/config/user.yaml @@ -1,16 +1,40 @@ -working_dir: /tmp/ -data_dir: ${DATA} -config_dir: ${CFG} -pid_dir: ${PID} -log_dir: /var/log/ -cscli_dir: ${CFG}/cscli -log_mode: stdout -log_level: info -profiling: false -apimode: false -daemon: false -prometheus: false -#for prometheus agent / golang debugging -http_listen: 127.0.0.1:6060 -plugin: - backend: "/etc/crowdsec/plugins/backend" +common: + daemonize: false + pid_dir: /var/run/ + log_media: stdout + log_level: info + log_dir: /var/log/ + working_dir: . +config_paths: + config_dir: /etc/crowdsec/ + data_dir: /var/lib/crowdsec/data + #simulation_path: /etc/crowdsec/config/simulation.yaml + #hub_dir: /etc/crowdsec/hub/ + #index_path: ./config/hub/.index.json +crowdsec_service: + #acquisition_path: ./config/acquis.yaml + parser_routines: 1 +cscli: + output: human +db_config: + type: sqlite + db_path: /var/lib/crowdsec/data/crowdsec.db + user: crowdsec + #log_level: info + password: crowdsec + db_name: crowdsec + host: "127.0.0.1" + port: 3306 +api: + client: + insecure_skip_verify: true # default true + credentials_path: /etc/crowdsec/local_api_credentials.yaml + server: + #log_level: info + listen_uri: localhost:8080 + profiles_path: /etc/crowdsec/profiles.yaml + online_client: # Crowdsec API + credentials_path: /etc/crowdsec/online_api_credentials.yaml +prometheus: + enabled: true + level: full diff --git a/data/GeoLite2-ASN.mmdb b/data/GeoLite2-ASN.mmdb deleted file mode 100644 index 8d16066cc..000000000 Binary files a/data/GeoLite2-ASN.mmdb and /dev/null differ diff --git a/data/GeoLite2-City.mmdb b/data/GeoLite2-City.mmdb deleted file mode 100644 index 6ab05184b..000000000 Binary files a/data/GeoLite2-City.mmdb and /dev/null differ diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 000000000..702adda58 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,129 @@ +# Crowdsec + +Crowdsec - An open-source, lightweight agent to detect and respond to bad behaviours. It also automatically benefits from our global community-wide IP reputation database. + +## Getting Started + +Before starting using docker image, we suggest you to read our documentation to understand all [crowdsec concepts](https://docs.crowdsec.net/). + +### Prerequisities + + +In order to run this container you'll need docker installed. + +* [Windows](https://docs.docker.com/windows/started) +* [OS X](https://docs.docker.com/mac/started/) +* [Linux](https://docs.docker.com/linux/started/) + +### How to use ? + +#### Build + +```shell +git clone https://github.com/crowdsecurity/crowdsec.git && cd crowdsec +docker build -t crowdsec . +``` + +#### Run + +The container is built with specific docker [configuration](https://github.com/crowdsecurity/crowdsec/blob/master/docker/config.yaml) : + +You should apply following configuration before starting it : + +* Specify collections|scenarios|parsers/postoverflows to install via the environment variables (by default [`crowdsecurity/linux`](https://hub.crowdsec.net/author/crowdsecurity/collections/linux) is installed) +* Mount volumes to specify your configuration +* Mount volumes to specify your log files that should be ingested by crowdsec (set up in acquis.yaml) +* Mount other volumes : if you want to share the database for example + +```shell +docker run -d -v config.yaml:/etc/crowdsec/config.yaml \ + -v acquis.yaml:/etc/crowdsec/acquis.yaml \ + -e COLLECTIONS="crowdsecurity/sshd" + -v /var/log/auth.log:/var/log/auth.log \ + -v /path/mycustom.log:/var/log/mycustom.log \ + --name crowdsec +``` + +#### Example + +I have my own configuration : +```shell +user@cs ~/crowdsec/config $ ls +acquis.yaml config.yaml +``` + +Here is my acquis.yaml file: +```shell +filenames: + - /logs/auth.log + - /logs/syslog +labels: + type: syslog +--- +filename: /logs/apache2/*.log +labels: + type: apache2 +``` + +So, I want to run crowdsec with : + +* My configuration files +* Ingested my path logs specified in acquis.yaml +* Share the crowdsec sqlite database with my host (You need to create empty file first, otherwise docker will create a directory instead of simple file) +* Expose local API through host (listen by default on `8080`) +* Expose prometheus handler through host (listen by default on `6060`) + +```shell +touch /path/myDatabase.db +docker run -d -v config.yaml:/etc/crowdsec/config.yaml \ + -v acquis.yaml:/etc/crowdsec/acquis.yaml \ + -v /var/log/auth.log:/logs/auth.log \ + -v /var/log/syslog.log:/logs/syslog.log \ + -v /var/log/apache:/logs/apache \ + -v /path/myDatabase.db:/var/lib/crowdsec/data/crowdsec.db \ + -e COLLECTIONS="crowdsecurity/apache2 crowdsecurity/sshd" \ + -p 8080:8080 -p 6060:6060 \ + --name crowdsec +``` + +### Environment Variables + +* `COLLECTIONS` - Collections to install from the [hub](https://hub.crowdsec.net/browse/#collections), separated by space : `-e COLLECTIONS="crowdsecurity/linux crowdsecurity/apache2"` +* `SCENARIOS` - Scenarios to install from the [hub](https://hub.crowdsec.net/browse/#configurations), separated by space : `-e SCENARIOS="crowdsecurity/http-bad-user-agent crowdsecurity/http-xss-probing"` +* `PARSERS` - Parsers to install from the [hub](https://hub.crowdsec.net/browse/#configurations), separated by space : `-e PARSERS="crowdsecurity/http-logs crowdsecurity/modsecurity"` +* `POSTOVERFLOWS` - Postoverflows to install from the [hub](https://hub.crowdsec.net/browse/#configurations), separated by space : `-e POSTOVERFLOWS="crowdsecurity/cdn-whitelist"` +* `CONFIG_FILE` - Configuration file (default: `/etc/crowdsec/config.yaml`) : `-e CONFIG_FILE=""` +* `FILE_PATH` - Process a single file in time-machine : `-e FILE_PATH=""` +* `JOURNALCTL_FILTER` - Process a single journalctl output in time-machine : `-e JOURNALCTL_FILTER=""` +* `TYPE` - [`Labels.type`](https://https://docs.crowdsec.net/Crowdsec/v1/references/acquisition/) for file in time-machine : `-e TYPE=""` +* `TEST_MODE` - Only test configs (default: `false`) : `-e TEST_MODE=""` +* `DISABLE_AGENT` - Only test configs (default: `false`) : `-e DISABLE_AGENT=""` +* `DISABLE_LOCAL_API` - Disable local API (default: `false`) : `-e DISABLE_API=""` +* `REGISTER_TO_ONLINE_API` - Register to Online API (default: `false`) : `-e REGISTER_TO_ONLINE_API=""` +* `LEVEL_TRACE` - Trace-level (VERY verbose) on stdout (default: `false`) : `-e LEVEL_TRACE=""` +* `LEVEL_DEBUG` - Debug-level on stdout (default: `false`) : `-e LEVEL_DEBUG=""` +* `LEVEL_INFO` - Info-level on stdout (default: `false`) : `-e LEVEL_INFO=""` + +### Volumes + +* `/var/lib/crowdsec/data/` - Directory where all crowdsec data (Databases) is located + +* `/etc/crowdsec/` - Directory where all crowdsec configurations are located + +#### Useful File Locations + +* `/usr/local/bin/crowdsec` - Crowdsec binary + +* `/usr/local/bin/cscli` - Crowdsec CLI binary to interact with crowdsec + +## Find Us + +* [GitHub](https://github.com/crowdsecurity/crowdsec) + +## Contributing + +Please read [contributing](https://docs.crowdsec.net/Crowdsec/v1/contributing/) for details on our code of conduct, and the process for submitting pull requests to us. + +## License + +This project is licensed under the MIT License - see the [LICENSE](https://github.com/crowdsecurity/crowdsec/blob/master/LICENSE) file for details. \ No newline at end of file diff --git a/docker/config.yaml b/docker/config.yaml new file mode 100644 index 000000000..1b6864f43 --- /dev/null +++ b/docker/config.yaml @@ -0,0 +1,49 @@ +common: + daemonize: false + pid_dir: /var/run/ + log_media: stdout + log_level: info + log_dir: /var/log/ + working_dir: . +config_paths: + config_dir: /etc/crowdsec/ + data_dir: /var/lib/crowdsec/data/ + simulation_path: /etc/crowdsec/simulation.yaml + hub_dir: /etc/crowdsec/hub/ + index_path: /etc/crowdsec/hub/.index.json +crowdsec_service: + acquisition_path: /etc/crowdsec/acquis.yaml + parser_routines: 1 +cscli: + output: human + hub_branch: wip_lapi +db_config: + log_level: info + type: sqlite + db_path: /var/lib/crowdsec/data/crowdsec.db + #user: + #password: + #db_name: + #host: + #port: + flush: + max_items: 5000 + max_age: 7d +api: + client: + insecure_skip_verify: true + credentials_path: /etc/crowdsec/local_api_credentials.yaml + server: + log_level: info + listen_uri: 0.0.0.0:8080 + profiles_path: /etc/crowdsec/profiles.yaml + online_client: # Crowdsec API credentials (to push signals and receive bad IPs) + #credentials_path: /etc/crowdsec/online_api_credentials.yaml +# tls: +# cert_file: /etc/crowdsec/ssl/cert.pem +# key_file: /etc/crowdsec/ssl/key.pem +prometheus: + enabled: true + level: full + listen_addr: 0.0.0.0 + listen_port: 6060 diff --git a/docker/docker_start.sh b/docker/docker_start.sh new file mode 100644 index 000000000..ea807fb65 --- /dev/null +++ b/docker/docker_start.sh @@ -0,0 +1,65 @@ +#!/bin/sh + +# Check if the container has already been started +cscli machines list | grep 127.0.0.1 +if [ $? == 1 ]; then + cscli machines add --force --auto -f /etc/crowdsec/local_api_credentials.yaml +fi + +if [ "$REGISTER_TO_ONLINE_API" == "true" ] || [ "$REGISTER_TO_ONLINE_API" == "TRUE" ] && [ "$CONFIG_FILE" == "" ] ; then + cat /etc/crowdsec/config.yaml | grep online_api_credentials.yaml + if [ $? == 1 ]; then + sed -ri 's/^(\s*)(#credentials_path\s*:\s*$)/\1credentials_path: \/etc\/crowdsec\/online_api_credentials.yaml/' /etc/crowdsec/config.yaml + cscli capi register > /etc/crowdsec/online_api_credentials.yaml + fi +fi + +## Install collections, parsers & scenarios +cscli hub update +cscli collections upgrade crowdsecurity/linux +if [ "$COLLECTIONS" != "" ]; then + cscli collections install $COLLECTIONS +fi +if [ "$PARSERS" != "" ]; then + cscli parsers install $PARSERS +fi +if [ "$SCENARIOS" != "" ]; then + cscli scenarios install $SCENARIOS +fi +if [ "$POSTOVERFLOWS" != "" ]; then + cscli postoverflows install $POSTOVERFLOWS +fi + +ARGS="" +if [ "$CONFIG_FILE" != "" ]; then + ARGS="-c $CONFIG_FILE" +fi +if [ "$FILE_PATH" != "" ]; then + ARGS="$ARGS -file $FILE" +fi +if [ "$JOURNALCTL_FILTER" != "" ]; then + ARGS="$ARGS -jfilter $JOURNALCTL_FILTER" +fi +if [ "$TYPE" != "" ]; then + ARGS="$ARGS -type $TYPE" +fi +if [ "$TEST_MODE" == "true" ] || [ "$TEST_MODE" == "TRUE" ]; then + ARGS="$ARGS -t" +fi +if [ "$DISABLE_AGENT" == "true" ] || [ "$DISABLE_AGENT" == "TRUE" ]; then + ARGS="$ARGS -no-cs" +fi +if [ "$DISABLE_API" == "true" ] || [ "$DISABLE_API" == "TRUE" ]; then + ARGS="$ARGS -no-api" +fi +if [ "$LEVEL_TRACE" == "true" ] || [ "$LEVEL_TRACE" == "TRUE" ]; then + ARGS="$ARGS -trace" +fi +if [ "$LEVEL_DEBUG" == "true" ] || [ "$LEVEL_DEBUG" == "TRUE" ]; then + ARGS="$ARGS -debug" +fi +if [ "$LEVEL_INFO" == "true" ] || [ "$LEVEL_INFO" == "TRUE" ]; then + ARGS="$ARGS -info" +fi + +exec crowdsec $ARGS \ No newline at end of file diff --git a/docs/about.md b/docs/about.md deleted file mode 100644 index 7a0ef4e88..000000000 --- a/docs/about.md +++ /dev/null @@ -1,3 +0,0 @@ -# Crowdsec - -{{ macros_info() }} diff --git a/docs/assets/images/crowdsec2.png b/docs/assets/images/crowdsec2.png index 27302f9b8..bbf619a73 100644 Binary files a/docs/assets/images/crowdsec2.png and b/docs/assets/images/crowdsec2.png differ diff --git a/docs/assets/images/crowdsec_architecture.png b/docs/assets/images/crowdsec_architecture.png deleted file mode 100644 index 25f351198..000000000 Binary files a/docs/assets/images/crowdsec_architecture.png and /dev/null differ diff --git a/docs/assets/images/crowdsec_logo.png b/docs/assets/images/crowdsec_logo.png deleted file mode 100644 index 6b2953dac..000000000 Binary files a/docs/assets/images/crowdsec_logo.png and /dev/null differ diff --git a/docs/assets/images/hub_parser.png b/docs/assets/images/hub_parser.png deleted file mode 100644 index 24efc3716..000000000 Binary files a/docs/assets/images/hub_parser.png and /dev/null differ diff --git a/docs/assets/images/hub_scenario.png b/docs/assets/images/hub_scenario.png deleted file mode 100644 index 19a3ba1f3..000000000 Binary files a/docs/assets/images/hub_scenario.png and /dev/null differ diff --git a/docs/bouncers/index.md b/docs/bouncers/index.md deleted file mode 100644 index 55d3618fa..000000000 --- a/docs/bouncers/index.md +++ /dev/null @@ -1,12 +0,0 @@ -# bouncers - - -{{bouncers.Name}} are standalone software pieces in charge of acting upon blocked IPs. - -They can either be within the applicative stack, or work out of band : - -[nginx bouncer](https://github.com/crowdsecurity/cs-nginx-bouncer) will check every unknown IP against the database before letting go through or serving a *403* to the user, while a [netfilter bouncer](https://github.com/crowdsecurity/cs-netfilter-bouncer) will simply "add" malevolent IPs to nftables/ipset set of blacklisted IPs. - - -You can explore [available {{bouncers.name}} on the hub]({{hub.plugins_url}}), and find below a few of the "main" {{bouncers.name}}. - diff --git a/docs/faq.md b/docs/faq.md new file mode 100644 index 000000000..846efc098 --- /dev/null +++ b/docs/faq.md @@ -0,0 +1,135 @@ +# FREQUENTLY ASKED QUESTIONS + +## What is {{v1X.crowdsec.name}} ? + +{{v1X.crowdsec.Name}} is a security open-source software. See the [overview](/#what-is-crowdsec) + + +## What language is it written in ? + +{{v1X.crowdsec.Name}} is written in [Golang](https://golang.org/) + +## What licence is {{v1X.crowdsec.name}} released under ? + +{{v1X.crowdsec.Name}} is under [MIT license]({{v1X.crowdsec.url}}/blob/master/LICENSE) + +## Which information is sent to the APIs ? + +Our aim is to build a strong community that can share malevolent attackers IPs, for that we need to collect the bans triggered locally by each user. + +The signal sent by your {{v1X.crowdsec.name}} to the central API only contains only meta-data about the attack : + + - Attacker IP + - Scenario name + - Time of start/end of attack + +Your logs are not sent to our central API, only meta-data about blocked attacks will be. + +## What is the performance impact ? + +As {{v1X.crowdsec.name}} only works on logs, it shouldn't impact your production. +When it comes to {{v1X.bouncers.name}}, it should perform **one** request to the database when a **new** IP is discovered thus have minimal performance impact. + +## How fast is it ? + +{{v1X.crowdsec.name}} can easily handle several thousands of events per second on a rich pipeline (multiple parsers, geoip enrichment, scenarios and so on). Logs are a good fit for sharding by default, so it is definitely the way to go if you need to handle higher throughput. + +If you need help for large scale deployment, please get in touch with us on the {{v1X.doc.discourse}}, we love challenges ;) + +## What backend database does {{v1X.crowdsec.Name}} supports and how to switch ? + +{{v1X.crowdsec.name}} versions (under v0.3.X) supports SQLite (default) and MySQL databases. +See [backend configuration](/Crowdsec/v0/references/output/#switching-backend-database) for relevant configuration. MySQL here is more suitable for distributed architectures where bouncers across the applicative stack need to access a centralized ban database. + +{{v1X.crowdsec.name}} versions (after v1) supports SQLite (default), MySQL and PostgreSQL databases. +See [databases configuration](/Crowdsec/v1/user_guide/database/) for relevant configuration. Thanks to the {{v1X.lapi.Htmlname}}, distributed architectures are resolved even with sqlite database. + +SQLite by default as it's suitable for standalone/single-machine setups. + +## How to control granularity of actions ? (whitelists, simulation etc.) + +{{v1X.crowdsec.name}} support both [whitelists]((/Crowdsec/v1/write_configurations/whitelist/) and [simulation](/Crowdsec/v1/references/simulation/) : + + - Whitelists allows you to "discard" events or overflows + - Simulation allows you to simply cancel the decision that is going to be taken, but keep track of it + +## How to add whitelists ? + +You can follow this [guide](/Crowdsec/v1/write_configurations/whitelist/) + +## How to set up proxy ? + +Setting up a proxy works out of the box, the [net/http golang library](https://golang.org/src/net/http/transport.go) can handle those environment variables: + +* `HTTP_PROXY` +* `HTTPS_PROXY` +* `NO_PROXY` + +Since {{v1X.cli.name}} uses `sudo`, you just this line in `visudo` after setting up the previous environment variables: + +``` +Defaults env_keep += "HTTP_PROXY HTTPS_PROXY NO_PROXY" +``` + +## How to report a bug ? + +To report a bug, please open an issue on the [repository]({{v1X.crowdsec.bugreport}}) + +## What about false positives ? + +Several initiatives have been taken to tackle the false positives approach as early as possible : + + - The scenarios published on the hub are tailored to favor low false positive rates + - You can find [generic whitelists](https://hub.crowdsec.net/author/crowdsecurity/collections/whitelist-good-actors) that should allow to cover most common cases (SEO whitelists, CDN whitelists etc.) + - The [simulation configuration](/Crowdsec/v1/references/simulation/) allows you to keep a tight control over scenario and their false positives + + +## I need some help + +Feel free to ask for some help to the {{v1X.doc.community}}. + +## How to use crowdsec on raspberry pi OS (formerly known as rasbian) + +Please keep in mind that raspberry pi OS is designed to work on all +raspberry pi versions. Even if the port target is known as armhf, it's +not exactly the same target as the debian named armhf port. + +The best way to have a crowdsec version for such an architecture is to +do: +1. install golang (all versions from 1.13 will do) +2. `export GOARCH=arm` +3. `export CGO=1` +4. Update the GOARCH variable in the Makefile to `arm` +5. install the arm gcc cross compilator (On debian the package is gcc-arm-linux-gnueabihf) +6. Compile crowdsec using the usual `make` command + + + diff --git a/docs/getting_started/FAQ.md b/docs/getting_started/FAQ.md deleted file mode 100644 index ab00e5cfa..000000000 --- a/docs/getting_started/FAQ.md +++ /dev/null @@ -1,137 +0,0 @@ -# FREQUENTLY ASKED QUESTIONS - -## What is {{crowdsec.name}} ? - -{{crowdsec.Name}} is a security open-source software. See the [overview](/#what-is-crowdsec) - - -## What language is it written in ? - -{{crowdsec.Name}} is written in [Golang](https://golang.org/) - -## What licence is {{crowdsec.name}} released under ? - -{{crowdsec.Name}} is under [MIT license]({{crowdsec.url}}/blob/master/LICENSE) - -## Which information is sent to the APIs ? - -Our aim is to build a strong community that can share malevolent attackers IPs, for that we need to collect the bans triggered locally by each user. - -The alert sent by your {{crowdsec.name}} to the central API only contains only meta-data about the attack : - - - Attacker IP - - Scenario name - - Time of start/end of attack - -Your logs are not sent to our central API, only meta-data about blocked attacks will be. - -When your crowdsec is authenticating to the central API, it as well sends the list of the scenarios you have enabled from the hub. This is used by us to provide you the most accurate consensus list, so that we can provide you IPs that have triggered scenario(s) that you are interested into as well. - - -## What is the performance impact ? - -As {{crowdsec.name}} only works on logs, it shouldn't impact your production. -When it comes to {{bouncers.name}}, it should perform **one** request to the database when a **new** IP is discovered thus have minimal performance impact. - -## How fast is it ? - -{{crowdsec.name}} can easily handle several thousands of events per second on a rich pipeline (multiple parsers, geoip enrichment, scenarios and so on). Logs are a good fit for sharding by default, so it is definitely the way to go if you need to handle higher throughput. - -If you need help for large scale deployment, please get in touch with us on the {{doc.discourse}}, we love challenges ;) - -## What backend database does {{crowdsec.Name}} supports and how to switch ? - -Currently (0.3.0), {{crowdsec.name}} supports SQLite (default) and MySQL databases. -See [backend configuration](/references/output/#switching-backend-database) for relevant configuration. - -SQLite is the default backend as it's suitable for standalone/single-machine setups. -On the other hand, MySQL is more suitable for distributed architectures where bouncers across the applicative stack need to access a centralized ban database. - -## How to control granularity of actions ? (whitelists, learning etc.) - -{{crowdsec.name}} support both [whitelists]((/write_configurations/whitelist/) and [learning](/guide/crowdsec/simulation/) : - - - Whitelists allows you to "discard" events or overflows - - Learning allows you to simply cancel the decision that is going to be taken, but keep track of it - -## How to add whitelists ? - -You can follow this [guide](/write_configurations/whitelist/) - -## How to set up proxy ? - -Setting up a proxy works out of the box, the [net/http golang library](https://golang.org/src/net/http/transport.go) can handle those environment variables: - -* `HTTP_PROXY` -* `HTTPS_PROXY` -* `NO_PROXY` - -Since {{cli.name}} uses `sudo`, you just this line in `visudo` after setting up the previous environment variables: - -``` -Defaults env_keep += "HTTP_PROXY HTTPS_PROXY NO_PROXY" -``` - -## How to report a bug ? - -To report a bug, please open an issue on the [repository]({{crowdsec.bugreport}}) - -## What about false positives ? - -Several initiatives have been taken to tackle the false positives approach as early as possible : - - - The scenarios published on the hub are tailored to favor low false positive rates - - You can find [generic whitelists](https://hub.crowdsec.net/author/crowdsecurity/collections/whitelist-good-actors) that should allow to cover most common cases (SEO whitelists, CDN whitelists etc.) - - The [simulation configuration](/guide/crowdsec/simulation/) allows you to keep a tight control over scenario and their false positives - - -## I need some help - -Feel free to ask for some help to the {{doc.community}}. - -## I don't see anything in the dashboard ! - -Whenever in doubt with what is being processed or not, check [cscli metrics](/observability/command_line/). It should allow you to check : - - if the logs are properly read - - if the logs are properly parsed - - if the scenarios are being triggered - -If logs are being read, parsed and overflows are being triggered, but still nothing appears in the dashboard, ask for some help on discourse or gitter ! - - -## I have installed crowdsec and it detect attacks, but nothing is blocked ! - -Keep in mind that {{crowdsec.Htmlname}} is only in charge of the detection. The decision/remediation is applied by {{bouncers.Htmlname}}. -If you don't install any bouncer, you will detect attack, but not block them. Explore the [bouncers in the hub]({{bouncers.url}}) to find the relevant ones ! - - - - diff --git a/docs/getting_started/installation.md b/docs/getting_started/installation.md deleted file mode 100644 index 7828fbc59..000000000 --- a/docs/getting_started/installation.md +++ /dev/null @@ -1,139 +0,0 @@ -# Installation - -Fetch {{crowdsec.name}}'s latest version [here]({{crowdsec.download_url}}). - -```bash -tar xvzf crowdsec-release.tgz -``` -```bash -cd crowdsec-v0.X.X -``` - -A {{wizard.name}} is provided to help you deploy {{crowdsec.name}} and {{cli.name}}. - -## Using the interactive wizard - -``` -sudo {{wizard.bin}} -i -``` - -![crowdsec](../assets/images/crowdsec_install.gif) - -The {{wizard.name}} is going to guide you through the following steps : - - - detect services that are present on your machine - - detect selected services logs - - suggest collections (parsers and scenarios) to deploy - - deploy & configure {{crowdsec.name}} in order to watch selected logs for selected scenarios - -The process should take less than a minute, [please report if there are any issues]({{wizard.bugreport}}). - -You are then ready to [take a tour](/getting_started/crowdsec-tour/) of your freshly deployed {{crowdsec.name}} ! - -## Binary installation - -> you of little faith - -``` -sudo {{wizard.bin}} --bininstall -``` - -This will deploy a valid/empty {{crowdsec.name}} configuration files and binaries. -Beware, in this state, {{crowdsec.name}} won't monitor/detect anything unless configured. - -``` -cscli install collection crowdsecurity/linux -``` - - -Installing at least the `crowdsecurity/linux` collection will provide you : - - - syslog parser - - geoip enrichment - - date parsers - - -You will need as well to configure your {{ref.acquis}} file to feed {{crowdsec.name}} some logs. - - - - - -## From source - -!!! warning "Requirements" - - * [Go](https://golang.org/doc/install) v1.13+ - * `git clone {{crowdsec.url}}` - * [jq](https://stedolan.github.io/jq/download/) - - -Go in {{crowdsec.name}} folder and build the binaries : - -```bash -cd crowdsec -``` -```bash -make build -``` - - -{{crowdsec.name}} bin will be located in `./cmd/crowdsec/crowdsec` and {{cli.name}} bin in `cmd/crowdsec-cli/{{cli.bin}}` - -Now, you can install either with [interactive wizard](#using-the-interactive-wizard) or the [unattended mode](#using-unattended-mode). - - -# Upgrading - -The wizard itself comes with a `--upgrade` option, that will upgrade existing crowdsec version. - -If you have installed crowdsec `v0.1.0` and you downloaded `v0.1.1`, you can run `sudo ./wizard.sh --upgrade` from the extracted `v0.1.1` version. (_note_: the wizard doesn't *yet* download the latest version, you have to download it) - - -The wizard takes care of backing up configurations on your behalf, and puts them into an archive : - - - backup your parsers,scenarios,collections, either from hub or your local ones - - simulation configuration - - API credentials - - acquisition.yaml file - - plugin(s) configuration - -It will then install the new/current crowdsec version, and restore everything that has been backed up! - - -```bash -$ sudo ./wizard.sh --upgrade -[10/05/2020:11:27:34 AM][INF] crowdsec_wizard: Backing up existing configuration -WARN[0000] Starting configuration backup -INFO[0000] saving, version:0.1, up-to-date:true file=crowdsecurity/syslog-logs type=parsers -... -INFO[0000] Wrote 7 entries for parsers to /tmp/tmp.z54P27aaW0/parsers//upstream-parsers.json file=crowdsecurity/geoip-enrich type=parsers -INFO[0000] Wrote 0 entries for postoverflows to /tmp/tmp.z54P27aaW0/postoverflows//upstream-postoverflows.json file=crowdsecurity/seo-bots-whitelist type=postoverflows -INFO[0000] Wrote 9 entries for scenarios to /tmp/tmp.z54P27aaW0/scenarios//upstream-scenarios.json file=crowdsecurity/smb-bf type=scenarios -INFO[0000] Wrote 4 entries for collections to /tmp/tmp.z54P27aaW0/collections//upstream-collections.json file=crowdsecurity/vsftpd type=collections -INFO[0000] Saved acquis to /tmp/tmp.z54P27aaW0/acquis.yaml -INFO[0000] Saved default yaml to /tmp/tmp.z54P27aaW0/default.yaml -INFO[0000] Saved configuration to /tmp/tmp.z54P27aaW0 -INFO[0000] Stop docker metabase /crowdsec-metabase -[10/05/2020:11:27:36 AM][INF] crowdsec_wizard: Removing crowdsec binaries -[10/05/2020:11:27:36 AM][INF] crowdsec_wizard: crowdsec successfully uninstalled -[10/05/2020:11:27:36 AM][INF] crowdsec_wizard: Installing crowdsec -... -[10/05/2020:11:27:36 AM][INF] crowdsec_wizard: Restoring configuration -... -INFO[0004] Restore acquis to /etc/crowdsec/config/acquis.yaml -INFO[0004] Restoring '/tmp/tmp.z54P27aaW0/plugins/backend/database.yaml' to '/etc/crowdsec/plugins/backend/database.yaml' -[10/05/2020:11:27:41 AM][INF] crowdsec_wizard: Restoring saved database -[10/05/2020:11:27:41 AM][INF] crowdsec_wizard: Finished, restarting - -``` - -As usual, if you experience any issues, let us know :) - -# Uninstalling - -You can uninstall crowdsec using the wizard : `sudo ./wizard.sh --uninstall` - - - - diff --git a/docs/getting_started/known_issues.md b/docs/getting_started/known_issues.md deleted file mode 100644 index d780336d6..000000000 --- a/docs/getting_started/known_issues.md +++ /dev/null @@ -1,15 +0,0 @@ -# Known bugs and limitations - -## Some users experience crash on 32bits architecture - -For now, on 32bit architecture there's a alignment bug in the way -https://github.com/jamiealquiza/tachymeter library uses the [sync package](https://golang.org/pkg/sync/atomic/#pkg-note-BUG) that prevents crowdsec from running properly with prometheus gathering metrics. - -All versions v0.3.X up to v0.3.5 are affected. - -The workaround is to disable prometheus until the bug is fixed. For -doing this you'll have to set `prometheus` to `false` in the file -`/etc/crowdsec/config/default.yaml`. - -We are working on solving this issue by getting rid of the culprit -library. diff --git a/docs/guide/cscli.md b/docs/guide/cscli.md deleted file mode 100644 index 3aafd5710..000000000 --- a/docs/guide/cscli.md +++ /dev/null @@ -1,39 +0,0 @@ -`{{cli.bin}}` is the utility that will help you to manage {{crowdsec.name}}. This tools has the following functionalities: - - - [manage bans]({{ cli.ban_doc }}) - - [backup and restore configuration]({{ cli.backup_doc }}) - - [display metrics]({{ cli.metrics_doc }}) - - [install configurations]({{ cli.install_doc }}) - - [remove configurations]({{ cli.remove_doc }}) - - [update configurations]({{ cli.update_doc }}) - - [upgrade configurations]({{ cli.upgrade_doc }}) - - [list configurations]({{ cli.list_doc }}) - - [interact with CrowdSec API]({{ cli.api_doc }}) - - [manage simulation]({{cli.simulation_doc}}) - - Take a look at the [dedicated documentation]({{cli.main_doc}}) - -## Overview - -{{cli.name}} configuration location is `/etc/crowdsec/cscli/`. - -In this folder, we store the {{cli.name}} configuration and the hub cache files. - -## Config - -The {{cli.name}} configuration is light for now, stored in `/etc/crowdsec/cscli/config`. - -```yaml -installdir: /etc/crowdsec/config # {{crowdsec.name}} configuration location -backend: /etc/crowdsec/plugins/backend # path to the backend plugin used -``` - -For {{cli.name}} to be able to pull the {{api.topX.htmlname}}, you need a valid API configuration in [api.yaml](/guide/crowdsec/overview/#apiyaml). - - -## Hub cache - -- `.index.json`: The file containing the metadata of all the existing {{collections.htmlname}}, {{parsers.htmlname}} and {{scenarios.htmlname}} stored in the {{hub.htmlname}}. -- `hub/*`: Folder containing all the {{collections.htmlname}}, {{parsers.htmlname}} and {{scenarios.htmlname}} stored in the {{hub.htmlname}}. - -This is used to manage configurations from the {{cli.name}} \ No newline at end of file diff --git a/docs/guide/overview.md b/docs/guide/overview.md deleted file mode 100644 index 516f5431e..000000000 --- a/docs/guide/overview.md +++ /dev/null @@ -1,10 +0,0 @@ - -When talking about {{crowdsec.name}} or {{cli.name}} configurations, most of things are going to gravitate around {{parsers.htmlname}}, {{scenarios.htmlname}} and {{collections.htmlname}}. - -In most common setup, all these configurations should be found on the {{hub.htmlname}} and installed with {{cli.name}}. - -It is important to keep those configurations up-to-date via the `{{cli.name}} upgrade` command. - -See the [{{cli.name}} list](/cheat_sheets/cscli-collections-tour/) command to view the state of your deployed configurations. - - diff --git a/docs/index.md b/docs/index.md index 49316069d..f26b02c6b 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,65 +1,41 @@ -
[[Hub]]({{hub.url}}) [[Releases]]({{crowdsec.download_url}})
+
[[Hub]]({{v1X.hub.url}}) [[Releases]]({{v1X.crowdsec.download_url}})
-# What is {{crowdsec.Name}} ? -[{{crowdsec.Name}}]({{crowdsec.url}}) is an open-source and lightweight software that allows you to detect peers with malevolent behaviors and block them (using {{bouncers.Htmlname}}) from accessing your systems at various levels (infrastructural, system, applicative). +!!! warning + For crowdsec versions `<= 1.0` please refer to [v0.3.X](/Crowdsec/v0/) -To achieve this, {{crowdsec.Name}} reads logs from different sources (files, streams ...) to parse, normalize and enrich them before matching them to threats patterns called scenarios. + For crowdsec versions `>= 1.0` please refer to [v1.X](/Crowdsec/v1/) -{{crowdsec.Name}} is a modular and plug-able framework, it ships a large variety of [well known popular scenarios](https://hub.crowdsec.net/browse/#configurations); users can choose what scenarios they want to be protected from as well as easily adding new custom ones to better fit their environment. +# What is {{v1X.crowdsec.Name}} ? -Detected malevolent peers can then be prevented from accessing your resources by deploying [bouncers]({{hub.plugins_url}}) at various levels (applicative, system, infrastructural) of your stack. +[{{v1X.crowdsec.Name}}]({{v1X.crowdsec.url}}) is an open-source and lightweight software that allows you to detect peers with malevolent behaviors and block them from accessing your systems at various level (infrastructural, system, applicative). + +To achieve this, {{v1X.crowdsec.Name}} reads logs from different sources (files, streams ...) to parse, normalize and enrich them before matching them to threats patterns called scenarios. + +{{v1X.crowdsec.Name}} is a modular and plug-able framework, it ships a large variety of [well known popular scenarios](https://hub.crowdsec.net/browse/#configurations); users can choose what scenarios they want to be protected from as well as easily adding new custom ones to better fit their environment. + +Detected malevolent peers can then be prevented from accessing your resources by deploying [bouncers]({{v1X.hub.bouncers_url}}) at various levels (applicative, system, infrastructural) of your stack. One of the advantages of Crowdsec when compared to other solutions is its crowd-sourced aspect : Meta information about detected attacks (source IP, time and triggered scenario) are sent to a central API and then shared amongst all users. Thanks to this, besides detecting and stopping attacks in real time based on your logs, it allows you to preemptively block known bad actors from accessing your information system. -## Components - -{{crowdsec.name}} ecosystem is based on the following components : - - - [{{crowdsec.Name}}]({{crowdsec.url}}) is the lightweight service that processes logs and keeps track of attacks. - - [{{cli.name}}]({{cli.main_doc}}) is the command line interface for humans, it allows you to view, add, or remove bans as well as to install, find, or update scenarios and parsers - - [{{bouncers.name}}]({{hub.plugins_url}}) are the components that block malevolent traffic, and can be deployed anywhere in your stack - -## Architecture - -![Architecture](assets/images/crowdsec_architecture.png) - - -## Core concepts - -{{crowdsec.name}} relies on {{parsers.htmlname}} to normalize and enrich logs, and {{scenarios.htmlname}} to detect attacks, often bundled together in {{collections.htmlname}} to form a coherent configuration set. For example the collection [`crowdsecurity/nginx`](https://hub.crowdsec.net/author/crowdsecurity/collections/nginx) contains all the necessary parsers and scenarios to deal with nginx logs and the common attacks that can be seen on http servers. - -All of those are represented as YAML files, that can be found, shared and kept up-to-date thanks to the {{hub.htmlname}}, or [easily hand-crafted](/write_configurations/scenarios/) to address specific needs. - - ## Main features -{{crowdsec.Name}}, besides the core "detect and react" mechanism, is committed to a few other key points : +{{v0X.crowdsec.Name}}, besides the core "detect and react" mechanism, is committed to a few other key points : - - **Easy Installation** : The provided wizard allows a [trivial deployment](/getting_started/installation/#using-the-interactive-wizard) on most standard setups - - **Easy daily operations** : Using [cscli](/cscli/cscli_upgrade/) and the {{hub.htmlname}}, keeping your detection mechanisms up-to-date is trivial - - **Observability** : Providing strongs insights on what is going on and what {{crowdsec.name}} is doing : - - Humans have [access to a trivially deployable web interface](/observability/dashboard/) - - OPs have [access to detailed prometheus metrics](/observability/prometheus/) - - Admins have [a friendly command-line interface tool](/observability/command_line/) + - **Easy Installation** : The provided wizard allows a [trivial deployment](/Crowdsec/v1/getting_started/installation/#using-the-interactive-wizard) on most standard setups + - **Easy daily operations** : Using [cscli](/Crowdsec/v1/cscli/cscli_upgrade/) and the {{v0X.hub.htmlname}}, keeping your detection mechanisms up-to-date is trivial + - **Reproducibility** : Crowdsec can run not only against live logs, but as well against cold logs. It makes it a lot easier to detect potential false-positives, perform forensic ou generate reporting + - **Observability** : Providing strongs insights on what is going on and what {{v0X.crowdsec.name}} is doing : + - Humans have [access to a trivially deployable web interface](/Crowdsec/v1/observability/dashboard/) + - OPs have [access to detailed prometheus metrics](/Crowdsec/v1/observability/prometheus/) + - Admins have [a friendly command-line interface tool](/Crowdsec/v1/observability/command_line/) -## Moving forward +## About this documentation -To learn more about {{crowdsec.name}} and give it a try, please see : +This document is split according to major {{v1X.crowdsec.Name}} versions : - - [How to install {{crowdsec.name}}](/getting_started/installation/) - - [Take a quick tour of {{crowdsec.name}} and {{cli.name}} features](/getting_started/crowdsec-tour/) - - [Observability of {{crowdsec.name}}](/observability/overview/) - - [Understand {{crowdsec.name}} configuration](/getting_started/concepts/) - - [Deploy {{bouncers.name}} to stop malevolent peers](/bouncers/) - - [FAQ](getting_started/FAQ/) - - [Known bugs and limitations](/getting_started/known_issues) - -Don't hesitate to reach out if you're facing issues : - - - [report a bug](https://github.com/crowdsecurity/crowdsec/issues/new?assignees=&labels=bug&template=bug_report.md&title=Bug%2F) - - [suggest an improvement](https://github.com/crowdsecurity/crowdsec/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=Improvment%2F) - - [ask for help on the forums](https://discourse.crowdsec.net) + - [Crowdsec v0](/Crowdsec/v0/) Refers to versions `0.3.X`, before the local API was introduced. (_note: this is going to be deprecated and your are strongly incited to migrate to versions 1.X_) + - [Crowdsec v1](/Crowdsec/v1/) Refers to versions `1.X`, it is the current version \ No newline at end of file diff --git a/docs/observability/overview.md b/docs/observability/overview.md deleted file mode 100644 index 9abed3188..000000000 --- a/docs/observability/overview.md +++ /dev/null @@ -1,13 +0,0 @@ -# Observability Overview - -Observability in security software is crucial, especially when this software might take important decision such as blocking IP addresses. - -We attempt to provide good observability of {{crowdsec.name}}'s behavior : - - - {{crowdsec.name}} itself exposes a [prometheus instrumentation](/observability/prometheus/) - - {{cli.Name}} allows you to view part of prometheus metrics in [cli (`{{cli.bin}} metrics`)](/observability/command_line/) - - {{crowdsec.name}} logging is contextualized for easy processing - - for **humans**, {{cli.name}} allows you to trivially start a service [exposing dashboards](/observability/dashboard/) (using [metabase](https://www.metabase.com/)) - -Furthermore, most of {{crowdsec.name}} configuration should allow you to enable partial debug (ie. per-scenario, per-parser etc.) - diff --git a/docs/requirements.txt b/docs/requirements.txt index f07e15654..ce6f64836 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,24 +1,27 @@ --i https://pypi.org/simple/ click==7.1.1 future==0.18.2 -jinja2==2.11.1 +Jinja2==2.11.1 joblib==0.14.1 livereload==2.6.1 -lunr[languages]==0.5.6 -markdown==3.2.1 -markupsafe==1.1.1 -mkdocs-material==4.6.3 +lunr==0.5.6 +Markdown==3.2.1 +MarkupSafe==1.1.1 mkdocs==1.1 -mkdocs-macros-plugin==0.4.6 +mkdocs-macros-plugin==0.4.18 +mkdocs-material==6.1.0 +mkdocs-material-extensions==1.0.1 +mkdocs-monorepo-plugin==0.4.11 +mkdocs-redirects==1.0.1 nltk==3.5b1 -prompt-toolkit==3.0.5 -pygments==2.6.1 -pymdown-extensions==7.0rc1 +prompt-toolkit==2.0.10 +Pygments==2.6.1 +pymdown-extensions==7.0 python-markdown-math==0.6 -pyyaml==5.3.1 +PyYAML==5.3.1 regex==2020.2.20 +repackage==0.7.3 six==1.14.0 +termcolor==1.1.0 tornado==6.0.4 tqdm==4.43.0 wcwidth==0.1.9 - diff --git a/docs/assets/images/blocker-installation.gif b/docs/v0.3.X/docs/assets/images/blocker-installation.gif similarity index 100% rename from docs/assets/images/blocker-installation.gif rename to docs/v0.3.X/docs/assets/images/blocker-installation.gif diff --git a/docs/assets/images/crowdsec_logo1.png b/docs/v0.3.X/docs/assets/images/crowdsec2.png similarity index 100% rename from docs/assets/images/crowdsec_logo1.png rename to docs/v0.3.X/docs/assets/images/crowdsec2.png diff --git a/docs/v0.3.X/docs/assets/images/crowdsec_architecture.png b/docs/v0.3.X/docs/assets/images/crowdsec_architecture.png new file mode 100644 index 000000000..5e5e6184d Binary files /dev/null and b/docs/v0.3.X/docs/assets/images/crowdsec_architecture.png differ diff --git a/docs/assets/images/crowdsec_install.gif b/docs/v0.3.X/docs/assets/images/crowdsec_install.gif similarity index 100% rename from docs/assets/images/crowdsec_install.gif rename to docs/v0.3.X/docs/assets/images/crowdsec_install.gif diff --git a/docs/v0.3.X/docs/assets/images/crowdsec_logo1.png b/docs/v0.3.X/docs/assets/images/crowdsec_logo1.png new file mode 100644 index 000000000..c9142c134 Binary files /dev/null and b/docs/v0.3.X/docs/assets/images/crowdsec_logo1.png differ diff --git a/docs/assets/images/cscli-metabase.gif b/docs/v0.3.X/docs/assets/images/cscli-metabase.gif similarity index 100% rename from docs/assets/images/cscli-metabase.gif rename to docs/v0.3.X/docs/assets/images/cscli-metabase.gif diff --git a/docs/assets/images/dashboard_view.png b/docs/v0.3.X/docs/assets/images/dashboard_view.png similarity index 100% rename from docs/assets/images/dashboard_view.png rename to docs/v0.3.X/docs/assets/images/dashboard_view.png diff --git a/docs/assets/images/dashboard_view2.png b/docs/v0.3.X/docs/assets/images/dashboard_view2.png similarity index 100% rename from docs/assets/images/dashboard_view2.png rename to docs/v0.3.X/docs/assets/images/dashboard_view2.png diff --git a/docs/assets/images/grafana_details.png b/docs/v0.3.X/docs/assets/images/grafana_details.png similarity index 100% rename from docs/assets/images/grafana_details.png rename to docs/v0.3.X/docs/assets/images/grafana_details.png diff --git a/docs/assets/images/grafana_insight.png b/docs/v0.3.X/docs/assets/images/grafana_insight.png similarity index 100% rename from docs/assets/images/grafana_insight.png rename to docs/v0.3.X/docs/assets/images/grafana_insight.png diff --git a/docs/assets/images/grafana_overview.png b/docs/v0.3.X/docs/assets/images/grafana_overview.png similarity index 100% rename from docs/assets/images/grafana_overview.png rename to docs/v0.3.X/docs/assets/images/grafana_overview.png diff --git a/docs/assets/images/out-of-the-box-protection.gif b/docs/v0.3.X/docs/assets/images/out-of-the-box-protection.gif similarity index 100% rename from docs/assets/images/out-of-the-box-protection.gif rename to docs/v0.3.X/docs/assets/images/out-of-the-box-protection.gif diff --git a/docs/v0.3.X/docs/bouncers/index.md b/docs/v0.3.X/docs/bouncers/index.md new file mode 100644 index 000000000..97c3b0152 --- /dev/null +++ b/docs/v0.3.X/docs/bouncers/index.md @@ -0,0 +1,12 @@ +# bouncers + + +{{v0X.bouncers.Name}} are standalone software pieces in charge of acting upon blocked IPs. + +They can either within the applicative stack, or work out of band : + +[nginx blocker](https://github.com/crowdsecurity/cs-nginx-blocker) will check every unknown IP against the database before letting go through or serving a *403* to the user, while a [netfilter blocker](https://github.com/crowdsecurity/cs-netfilter-blocker) will simply "add" malevolent IPs to nftables/ipset set of blacklisted IPs. + + +You can explore [available {{v0X.bouncers.name}} on the hub]({{v0X.hub.plugins_url}}), and find below a few of the "main" {{v0X.bouncers.name}} : + diff --git a/docs/cheat_sheets/ban-mgmt.md b/docs/v0.3.X/docs/cheat_sheets/ban-mgmt.md similarity index 78% rename from docs/cheat_sheets/ban-mgmt.md rename to docs/v0.3.X/docs/cheat_sheets/ban-mgmt.md index 02d99d1c8..e074fbceb 100644 --- a/docs/cheat_sheets/ban-mgmt.md +++ b/docs/v0.3.X/docs/cheat_sheets/ban-mgmt.md @@ -1,11 +1,11 @@ !!! info - Please see your local `{{cli.bin}} help ban` for up-to-date documentation. + Please see your local `{{v0X.cli.bin}} help ban` for up-to-date documentation. ## List bans ```bash -{{cli.bin}} ban list +{{v0X.cli.bin}} ban list ```
@@ -28,8 +28,8 @@ And 64 records from API, 32 distinct AS, 19 distinct countries
- `SOURCE` is the source of the decision : - - "local" : the decision has been taken by {{crowdsec.name}} - - "cli" : the decision has been made with {{cli.name}} (ie. `{{cli.name}} ban ip 1.2.3.4 24h "because"`) + - "local" : the decision has been taken by {{v0X.crowdsec.name}} + - "cli" : the decision has been made with {{v0X.cli.name}} (ie. `{{v0X.cli.name}} ban ip 1.2.3.4 24h "because"`) - "api" : the decision has been pushed to you by the API (because there is a consensus about this ip) - `IP` is the IP or the IP range impacted by the decision - `REASON` is the scenario that was triggered (or human-supplied reason) @@ -38,7 +38,7 @@ And 64 records from API, 32 distinct AS, 19 distinct countries - `EXPIRATION` is the time left on remediation -Check [command usage](/cscli/cscli_ban_list/) for additional filtering and output control flags. +Check [command usage](/Crowdsec/v0/cscli/cscli_ban_list/) for additional filtering and output control flags. ## Delete a ban @@ -46,13 +46,13 @@ Check [command usage](/cscli/cscli_ban_list/) for additional filtering and outpu > delete the ban on IP `1.2.3.4` ```bash -{{cli.bin}} ban del ip 1.2.3.4 +{{v0X.cli.bin}} ban del ip 1.2.3.4 ``` > delete the ban on range 1.2.3.0/24 ```bash -{{cli.bin}} ban del range 1.2.3.0/24 +{{v0X.cli.bin}} ban del range 1.2.3.0/24 ``` @@ -61,13 +61,13 @@ Check [command usage](/cscli/cscli_ban_list/) for additional filtering and outpu > Add a ban on IP `1.2.3.4` for 24 hours, with reason 'web bruteforce' ```bash -{{cli.bin}} ban add ip 1.2.3.4 24h "web bruteforce" +{{v0X.cli.bin}} ban add ip 1.2.3.4 24h "web bruteforce" ``` > Add a ban on range `1.2.3.0/24` for 24 hours, with reason 'web bruteforce' ```bash -{{cli.bin}} ban add range 1.2.3.0/24 "web bruteforce" +{{v0X.cli.bin}} ban add range 1.2.3.0/24 "web bruteforce" ``` @@ -77,7 +77,7 @@ Check [command usage](/cscli/cscli_ban_list/) for additional filtering and outpu > Flush all the existing bans ```bash -{{cli.bin}} ban flush +{{v0X.cli.bin}} ban flush ``` !!! warning diff --git a/docs/cheat_sheets/config-mgmt.md b/docs/v0.3.X/docs/cheat_sheets/config-mgmt.md similarity index 78% rename from docs/cheat_sheets/config-mgmt.md rename to docs/v0.3.X/docs/cheat_sheets/config-mgmt.md index b4ea65b69..c5eaf848e 100644 --- a/docs/cheat_sheets/config-mgmt.md +++ b/docs/v0.3.X/docs/cheat_sheets/config-mgmt.md @@ -1,4 +1,4 @@ -{{cli.bin}} allows you install, list, upgrade and remove configurations : parsers, enrichment, scenarios. +{{v0X.cli.bin}} allows you install, list, upgrade and remove configurations : parsers, enrichment, scenarios. !!! warning If you're not running the latest CrowdSec version, configurations might not be the latest available. `cscli` will use the branch of the corresponding CrowdSec version to download and install configurations from the hub (it will use the `master` branch if you are on the latest CrowdSec version). @@ -9,26 +9,26 @@ _Parsers, Scenarios and Enrichers are often bundled together in "collections" to Parsers, scenarios, enrichers and collections all follow the same principle : - - `{{cli.bin}} install parser crowdsec/nginx-logs` - - `{{cli.bin}} update collection crowdsec/base-http-scenarios` - - `{{cli.bin}} remove scenario crowdsec/mysql-bf` + - `{{v0X.cli.bin}} install parser crowdsec/nginx-logs` + - `{{v0X.cli.bin}} update collection crowdsec/base-http-scenarios` + - `{{v0X.cli.bin}} remove scenario crowdsec/mysql-bf` -> Please see your local `{{cli.bin}} help` for up-to-date documentation +> Please see your local `{{v0X.cli.bin}} help` for up-to-date documentation ## List configurations ``` -{{cli.bin}} list +{{v0X.cli.bin}} list ``` **note** `-a` allows for listing of uninstalled configurations as well
- {{cli.name}} list example + {{v0X.cli.name}} list example ```bash -$ {{cli.bin}} list +$ {{v0X.cli.bin}} list INFO[0000] Loaded 9 collecs, 14 parsers, 12 scenarios, 1 post-overflow parsers INFO[0000] PARSERS: -------------------------------------------------------------------------------------------------------------------- @@ -67,29 +67,29 @@ INFO[0000] POSTOVERFLOWS: -For {{parsers.htmlname}}, {{scenarios.htmlname}}, {{collections.htmlname}} the outputs include, beside the version, the path and the name, a `STATUS` column : +For {{v0X.parsers.htmlname}}, {{v0X.scenarios.htmlname}}, {{v0X.collections.htmlname}} the outputs include, beside the version, the path and the name, a `STATUS` column : - `✔️ enabled` : configuration is up-to-date - `⚠️ enabled,outdated` : a newer version is available - - `🚫 enabled,local` : configuration is not managed by {{cli.name}} + - `🚫 enabled,local` : configuration is not managed by {{v0X.cli.name}} - `⚠️ enabled,tainted` : configuration has been locally modified -(see `{{cli.name}} upgrade` to upgrade/sync your configurations with {{hub.htmlname}}) +(see `{{v0X.cli.name}} upgrade` to upgrade/sync your configurations with {{v0X.hub.htmlname}}) ## Install new configurations -`{{cli.bin}} install parser|scenario|postoverflow [--force]` +`{{v0X.cli.bin}} install parser|scenario|postoverflow [--force]` - - `{{cli.bin}} install parser crowdsec/nginx-logs` - - `{{cli.bin}} install scenario crowdsec/http-scan-uniques_404` + - `{{v0X.cli.bin}} install parser crowdsec/nginx-logs` + - `{{v0X.cli.bin}} install scenario crowdsec/http-scan-uniques_404` ## Remove configurations -`{{cli.bin}} remove parser|scenario|postoverflow [--force]` +`{{v0X.cli.bin}} remove parser|scenario|postoverflow [--force]` ## Upgrade configurations @@ -97,19 +97,19 @@ For {{parsers.htmlname}}, {{scenarios.htmlname}}, {{collections.htmlname}} the o > upgrade a specific scenario ``` -{{cli.bin}} upgrade scenario crowdsec/http-scan-uniques_404 +{{v0X.cli.bin}} upgrade scenario crowdsec/http-scan-uniques_404 ``` > upgrade **all** scenarios ``` -{{cli.bin}} upgrade scenario --all +{{v0X.cli.bin}} upgrade scenario --all ``` > upgrade **all** configurations (parsers, scenarios, collections, postoverflows) ``` -{{cli.bin}} upgrade --all +{{v0X.cli.bin}} upgrade --all ``` diff --git a/docs/cheat_sheets/debugging_configs.md b/docs/v0.3.X/docs/cheat_sheets/debugging_configs.md similarity index 91% rename from docs/cheat_sheets/debugging_configs.md rename to docs/v0.3.X/docs/cheat_sheets/debugging_configs.md index 83b029e61..be0489235 100644 --- a/docs/cheat_sheets/debugging_configs.md +++ b/docs/v0.3.X/docs/cheat_sheets/debugging_configs.md @@ -25,12 +25,12 @@ WARN[05-08-2020 16:16:12] 182.x.x.x triggered a 4h0m0s ip ban remediation for [c - `-type` must respect expected log type (ie. `nginx` `syslog` etc.) - `-file` must point to a flat file or a gzip file -When processing logs like this, {{crowdsec.name}} runs in "time machine" mode, and relies on the timestamps *in* the logs to evaluate scenarios. You will most likely need the `crowdsecurity/dateparse-enrich` parser for this. +When processing logs like this, {{v0X.crowdsec.name}} runs in "time machine" mode, and relies on the timestamps *in* the logs to evaluate scenarios. You will most likely need the `crowdsecurity/dateparse-enrich` parser for this. ## Testing configurations on live system -If you're playing around with parser/scenarios on a live system, you can use the `-t` (lint) option of {{crowdsec.Name}} to check your configurations validity before restarting/reloading services : +If you're playing around with parser/scenarios on a live system, you can use the `-t` (lint) option of {{v0X.crowdsec.Name}} to check your configurations validity before restarting/reloading services : ```bash $ emacs /etc/crowdsec/config/scenarios/ssh-bf.yaml @@ -91,7 +91,7 @@ DEBU[05-08-2020 16:02:26] evt.Parsed.static_ressource = 'false' cfg=blac # Test environments -From a [{{crowdsec.name}} release archive]({{crowdsec.download_url}}), you can deploy a test (non-root) environment that is very suitable to write/debug/test parsers and scenarios. Environment is deployed using `./test_env.sh` script from tgz directory, and creates a test environment in `./tests` : +From a [{{v0X.crowdsec.name}} release archive]({{v0X.crowdsec.download_url}}), you can deploy a test (non-root) environment that is very suitable to write/debug/test parsers and scenarios. Environment is deployed using `./test_env.sh` script from tgz directory, and creates a test environment in `./tests` : ```bash $ cd crowdsec-v0.3.0/ diff --git a/docs/v0.3.X/docs/cheat_sheets/usecase_howto.md b/docs/v0.3.X/docs/cheat_sheets/usecase_howto.md new file mode 100644 index 000000000..e69de29bb diff --git a/docs/contributing.md b/docs/v0.3.X/docs/contributing.md similarity index 56% rename from docs/contributing.md rename to docs/v0.3.X/docs/contributing.md index 85c79b3e4..5906a5776 100644 --- a/docs/contributing.md +++ b/docs/v0.3.X/docs/contributing.md @@ -9,8 +9,8 @@ Help us improve the software and the user experience, to make the internet a saf If you spotted some mistakes in the documentation or have improvement suggestions, you can : - - open a {{doc.new_issue}} if you are comfortable with github - - let us know on {{doc.discourse}} if you want to discuss about it + - open a {{v0X.doc.new_issue}} if you are comfortable with github + - let us know on {{v0X.doc.discourse}} if you want to discuss about it Let us as well know if you have some improvement suggestions ! @@ -18,13 +18,13 @@ Let us as well know if you have some improvement suggestions ! ## Contributing to the code - - If you want to report a bug, you can use [the github bugtracker]({{crowdsec.bugreport}}) - - If you want to suggest an improvement you can use either [the github bugtracker]({{crowdsec.bugreport}}) or the {{doc.discourse}} if you want to discuss + - If you want to report a bug, you can use [the github bugtracker]({{v0X.crowdsec.bugreport}}) + - If you want to suggest an improvement you can use either [the github bugtracker]({{v0X.crowdsec.bugreport}}) or the {{v0X.doc.discourse}} if you want to discuss ## Contributing to the parsers/scenarios -If you want to contribute your parser or scenario to the community and have them appear on the {{hub.htmlname}}, you should [open a merge request](https://github.com/crowdsecurity/hub/pulls) on the hub. +If you want to contribute your parser or scenario to the community and have them appear on the {{v0X.hub.htmlname}}, you should [open a merge request](https://github.com/crowdsecurity/hub/pulls) on the hub. -We are currently working on a proper [CI](https://en.wikipedia.org/wiki/Continuous_integration) for the {{hub.htmlname}}, so for now all contribution are subject to peer-review, please bear with us ! +We are currently working on a proper [CI](https://en.wikipedia.org/wiki/Continuous_integration) for the {{v0X.hub.htmlname}}, so for now all contribution are subject to peer-review, please bear with us ! diff --git a/docs/cscli/cscli.md b/docs/v0.3.X/docs/cscli/cscli.md similarity index 100% rename from docs/cscli/cscli.md rename to docs/v0.3.X/docs/cscli/cscli.md diff --git a/docs/cscli/cscli_api.md b/docs/v0.3.X/docs/cscli/cscli_api.md similarity index 99% rename from docs/cscli/cscli_api.md rename to docs/v0.3.X/docs/cscli/cscli_api.md index 3ef59263e..e8b0843ed 100644 --- a/docs/cscli/cscli_api.md +++ b/docs/v0.3.X/docs/cscli/cscli_api.md @@ -5,7 +5,7 @@ Crowdsec API interaction ### Synopsis -Allow to register your machine into crowdsec API to send and receive alert. +Allow to register your machine into crowdsec API to send and receive signal. ### Examples diff --git a/docs/cscli/cscli_api_credentials.md b/docs/v0.3.X/docs/cscli/cscli_api_credentials.md similarity index 100% rename from docs/cscli/cscli_api_credentials.md rename to docs/v0.3.X/docs/cscli/cscli_api_credentials.md diff --git a/docs/cscli/cscli_api_enroll.md b/docs/v0.3.X/docs/cscli/cscli_api_enroll.md similarity index 100% rename from docs/cscli/cscli_api_enroll.md rename to docs/v0.3.X/docs/cscli/cscli_api_enroll.md diff --git a/docs/cscli/cscli_api_pull.md b/docs/v0.3.X/docs/cscli/cscli_api_pull.md similarity index 100% rename from docs/cscli/cscli_api_pull.md rename to docs/v0.3.X/docs/cscli/cscli_api_pull.md diff --git a/docs/cscli/cscli_api_register.md b/docs/v0.3.X/docs/cscli/cscli_api_register.md similarity index 100% rename from docs/cscli/cscli_api_register.md rename to docs/v0.3.X/docs/cscli/cscli_api_register.md diff --git a/docs/cscli/cscli_api_reset.md b/docs/v0.3.X/docs/cscli/cscli_api_reset.md similarity index 100% rename from docs/cscli/cscli_api_reset.md rename to docs/v0.3.X/docs/cscli/cscli_api_reset.md diff --git a/docs/cscli/cscli_backup.md b/docs/v0.3.X/docs/cscli/cscli_backup.md similarity index 100% rename from docs/cscli/cscli_backup.md rename to docs/v0.3.X/docs/cscli/cscli_backup.md diff --git a/docs/cscli/cscli_backup_restore.md b/docs/v0.3.X/docs/cscli/cscli_backup_restore.md similarity index 100% rename from docs/cscli/cscli_backup_restore.md rename to docs/v0.3.X/docs/cscli/cscli_backup_restore.md diff --git a/docs/cscli/cscli_backup_save.md b/docs/v0.3.X/docs/cscli/cscli_backup_save.md similarity index 100% rename from docs/cscli/cscli_backup_save.md rename to docs/v0.3.X/docs/cscli/cscli_backup_save.md diff --git a/docs/cscli/cscli_ban.md b/docs/v0.3.X/docs/cscli/cscli_ban.md similarity index 100% rename from docs/cscli/cscli_ban.md rename to docs/v0.3.X/docs/cscli/cscli_ban.md diff --git a/docs/cscli/cscli_ban_add.md b/docs/v0.3.X/docs/cscli/cscli_ban_add.md similarity index 100% rename from docs/cscli/cscli_ban_add.md rename to docs/v0.3.X/docs/cscli/cscli_ban_add.md diff --git a/docs/cscli/cscli_ban_add_ip.md b/docs/v0.3.X/docs/cscli/cscli_ban_add_ip.md similarity index 100% rename from docs/cscli/cscli_ban_add_ip.md rename to docs/v0.3.X/docs/cscli/cscli_ban_add_ip.md diff --git a/docs/cscli/cscli_ban_add_range.md b/docs/v0.3.X/docs/cscli/cscli_ban_add_range.md similarity index 100% rename from docs/cscli/cscli_ban_add_range.md rename to docs/v0.3.X/docs/cscli/cscli_ban_add_range.md diff --git a/docs/cscli/cscli_ban_del.md b/docs/v0.3.X/docs/cscli/cscli_ban_del.md similarity index 100% rename from docs/cscli/cscli_ban_del.md rename to docs/v0.3.X/docs/cscli/cscli_ban_del.md diff --git a/docs/cscli/cscli_ban_del_ip.md b/docs/v0.3.X/docs/cscli/cscli_ban_del_ip.md similarity index 100% rename from docs/cscli/cscli_ban_del_ip.md rename to docs/v0.3.X/docs/cscli/cscli_ban_del_ip.md diff --git a/docs/cscli/cscli_ban_del_range.md b/docs/v0.3.X/docs/cscli/cscli_ban_del_range.md similarity index 100% rename from docs/cscli/cscli_ban_del_range.md rename to docs/v0.3.X/docs/cscli/cscli_ban_del_range.md diff --git a/docs/cscli/cscli_ban_flush.md b/docs/v0.3.X/docs/cscli/cscli_ban_flush.md similarity index 100% rename from docs/cscli/cscli_ban_flush.md rename to docs/v0.3.X/docs/cscli/cscli_ban_flush.md diff --git a/docs/cscli/cscli_ban_list.md b/docs/v0.3.X/docs/cscli/cscli_ban_list.md similarity index 100% rename from docs/cscli/cscli_ban_list.md rename to docs/v0.3.X/docs/cscli/cscli_ban_list.md diff --git a/docs/cscli/cscli_config.md b/docs/v0.3.X/docs/cscli/cscli_config.md similarity index 100% rename from docs/cscli/cscli_config.md rename to docs/v0.3.X/docs/cscli/cscli_config.md diff --git a/docs/cscli/cscli_config_show.md b/docs/v0.3.X/docs/cscli/cscli_config_show.md similarity index 100% rename from docs/cscli/cscli_config_show.md rename to docs/v0.3.X/docs/cscli/cscli_config_show.md diff --git a/docs/cscli/cscli_dashboard.md b/docs/v0.3.X/docs/cscli/cscli_dashboard.md similarity index 100% rename from docs/cscli/cscli_dashboard.md rename to docs/v0.3.X/docs/cscli/cscli_dashboard.md diff --git a/docs/cscli/cscli_dashboard_setup.md b/docs/v0.3.X/docs/cscli/cscli_dashboard_setup.md similarity index 100% rename from docs/cscli/cscli_dashboard_setup.md rename to docs/v0.3.X/docs/cscli/cscli_dashboard_setup.md diff --git a/docs/cscli/cscli_dashboard_start.md b/docs/v0.3.X/docs/cscli/cscli_dashboard_start.md similarity index 100% rename from docs/cscli/cscli_dashboard_start.md rename to docs/v0.3.X/docs/cscli/cscli_dashboard_start.md diff --git a/docs/cscli/cscli_dashboard_stop.md b/docs/v0.3.X/docs/cscli/cscli_dashboard_stop.md similarity index 100% rename from docs/cscli/cscli_dashboard_stop.md rename to docs/v0.3.X/docs/cscli/cscli_dashboard_stop.md diff --git a/docs/cscli/cscli_inspect.md b/docs/v0.3.X/docs/cscli/cscli_inspect.md similarity index 100% rename from docs/cscli/cscli_inspect.md rename to docs/v0.3.X/docs/cscli/cscli_inspect.md diff --git a/docs/cscli/cscli_inspect_collection.md b/docs/v0.3.X/docs/cscli/cscli_inspect_collection.md similarity index 100% rename from docs/cscli/cscli_inspect_collection.md rename to docs/v0.3.X/docs/cscli/cscli_inspect_collection.md diff --git a/docs/cscli/cscli_inspect_parser.md b/docs/v0.3.X/docs/cscli/cscli_inspect_parser.md similarity index 100% rename from docs/cscli/cscli_inspect_parser.md rename to docs/v0.3.X/docs/cscli/cscli_inspect_parser.md diff --git a/docs/cscli/cscli_inspect_postoverflow.md b/docs/v0.3.X/docs/cscli/cscli_inspect_postoverflow.md similarity index 100% rename from docs/cscli/cscli_inspect_postoverflow.md rename to docs/v0.3.X/docs/cscli/cscli_inspect_postoverflow.md diff --git a/docs/cscli/cscli_inspect_scenario.md b/docs/v0.3.X/docs/cscli/cscli_inspect_scenario.md similarity index 100% rename from docs/cscli/cscli_inspect_scenario.md rename to docs/v0.3.X/docs/cscli/cscli_inspect_scenario.md diff --git a/docs/cscli/cscli_install.md b/docs/v0.3.X/docs/cscli/cscli_install.md similarity index 100% rename from docs/cscli/cscli_install.md rename to docs/v0.3.X/docs/cscli/cscli_install.md diff --git a/docs/cscli/cscli_install_collection.md b/docs/v0.3.X/docs/cscli/cscli_install_collection.md similarity index 100% rename from docs/cscli/cscli_install_collection.md rename to docs/v0.3.X/docs/cscli/cscli_install_collection.md diff --git a/docs/cscli/cscli_install_parser.md b/docs/v0.3.X/docs/cscli/cscli_install_parser.md similarity index 100% rename from docs/cscli/cscli_install_parser.md rename to docs/v0.3.X/docs/cscli/cscli_install_parser.md diff --git a/docs/cscli/cscli_install_postoverflow.md b/docs/v0.3.X/docs/cscli/cscli_install_postoverflow.md similarity index 100% rename from docs/cscli/cscli_install_postoverflow.md rename to docs/v0.3.X/docs/cscli/cscli_install_postoverflow.md diff --git a/docs/cscli/cscli_install_scenario.md b/docs/v0.3.X/docs/cscli/cscli_install_scenario.md similarity index 100% rename from docs/cscli/cscli_install_scenario.md rename to docs/v0.3.X/docs/cscli/cscli_install_scenario.md diff --git a/docs/cscli/cscli_list.md b/docs/v0.3.X/docs/cscli/cscli_list.md similarity index 100% rename from docs/cscli/cscli_list.md rename to docs/v0.3.X/docs/cscli/cscli_list.md diff --git a/docs/cscli/cscli_list_collections.md b/docs/v0.3.X/docs/cscli/cscli_list_collections.md similarity index 100% rename from docs/cscli/cscli_list_collections.md rename to docs/v0.3.X/docs/cscli/cscli_list_collections.md diff --git a/docs/cscli/cscli_list_parsers.md b/docs/v0.3.X/docs/cscli/cscli_list_parsers.md similarity index 100% rename from docs/cscli/cscli_list_parsers.md rename to docs/v0.3.X/docs/cscli/cscli_list_parsers.md diff --git a/docs/cscli/cscli_list_postoverflows.md b/docs/v0.3.X/docs/cscli/cscli_list_postoverflows.md similarity index 100% rename from docs/cscli/cscli_list_postoverflows.md rename to docs/v0.3.X/docs/cscli/cscli_list_postoverflows.md diff --git a/docs/cscli/cscli_list_scenarios.md b/docs/v0.3.X/docs/cscli/cscli_list_scenarios.md similarity index 100% rename from docs/cscli/cscli_list_scenarios.md rename to docs/v0.3.X/docs/cscli/cscli_list_scenarios.md diff --git a/docs/cscli/cscli_metrics.md b/docs/v0.3.X/docs/cscli/cscli_metrics.md similarity index 100% rename from docs/cscli/cscli_metrics.md rename to docs/v0.3.X/docs/cscli/cscli_metrics.md diff --git a/docs/cscli/cscli_remove.md b/docs/v0.3.X/docs/cscli/cscli_remove.md similarity index 100% rename from docs/cscli/cscli_remove.md rename to docs/v0.3.X/docs/cscli/cscli_remove.md diff --git a/docs/cscli/cscli_remove_collection.md b/docs/v0.3.X/docs/cscli/cscli_remove_collection.md similarity index 100% rename from docs/cscli/cscli_remove_collection.md rename to docs/v0.3.X/docs/cscli/cscli_remove_collection.md diff --git a/docs/cscli/cscli_remove_parser.md b/docs/v0.3.X/docs/cscli/cscli_remove_parser.md similarity index 100% rename from docs/cscli/cscli_remove_parser.md rename to docs/v0.3.X/docs/cscli/cscli_remove_parser.md diff --git a/docs/cscli/cscli_remove_postoverflow.md b/docs/v0.3.X/docs/cscli/cscli_remove_postoverflow.md similarity index 100% rename from docs/cscli/cscli_remove_postoverflow.md rename to docs/v0.3.X/docs/cscli/cscli_remove_postoverflow.md diff --git a/docs/cscli/cscli_remove_scenario.md b/docs/v0.3.X/docs/cscli/cscli_remove_scenario.md similarity index 100% rename from docs/cscli/cscli_remove_scenario.md rename to docs/v0.3.X/docs/cscli/cscli_remove_scenario.md diff --git a/docs/cscli/cscli_simulation.md b/docs/v0.3.X/docs/cscli/cscli_simulation.md similarity index 100% rename from docs/cscli/cscli_simulation.md rename to docs/v0.3.X/docs/cscli/cscli_simulation.md diff --git a/docs/cscli/cscli_simulation_disable.md b/docs/v0.3.X/docs/cscli/cscli_simulation_disable.md similarity index 100% rename from docs/cscli/cscli_simulation_disable.md rename to docs/v0.3.X/docs/cscli/cscli_simulation_disable.md diff --git a/docs/cscli/cscli_simulation_enable.md b/docs/v0.3.X/docs/cscli/cscli_simulation_enable.md similarity index 100% rename from docs/cscli/cscli_simulation_enable.md rename to docs/v0.3.X/docs/cscli/cscli_simulation_enable.md diff --git a/docs/cscli/cscli_simulation_status.md b/docs/v0.3.X/docs/cscli/cscli_simulation_status.md similarity index 100% rename from docs/cscli/cscli_simulation_status.md rename to docs/v0.3.X/docs/cscli/cscli_simulation_status.md diff --git a/docs/cscli/cscli_update.md b/docs/v0.3.X/docs/cscli/cscli_update.md similarity index 100% rename from docs/cscli/cscli_update.md rename to docs/v0.3.X/docs/cscli/cscli_update.md diff --git a/docs/cscli/cscli_upgrade.md b/docs/v0.3.X/docs/cscli/cscli_upgrade.md similarity index 100% rename from docs/cscli/cscli_upgrade.md rename to docs/v0.3.X/docs/cscli/cscli_upgrade.md diff --git a/docs/cscli/cscli_upgrade_collection.md b/docs/v0.3.X/docs/cscli/cscli_upgrade_collection.md similarity index 100% rename from docs/cscli/cscli_upgrade_collection.md rename to docs/v0.3.X/docs/cscli/cscli_upgrade_collection.md diff --git a/docs/cscli/cscli_upgrade_parser.md b/docs/v0.3.X/docs/cscli/cscli_upgrade_parser.md similarity index 100% rename from docs/cscli/cscli_upgrade_parser.md rename to docs/v0.3.X/docs/cscli/cscli_upgrade_parser.md diff --git a/docs/cscli/cscli_upgrade_postoverflow.md b/docs/v0.3.X/docs/cscli/cscli_upgrade_postoverflow.md similarity index 100% rename from docs/cscli/cscli_upgrade_postoverflow.md rename to docs/v0.3.X/docs/cscli/cscli_upgrade_postoverflow.md diff --git a/docs/cscli/cscli_upgrade_scenario.md b/docs/v0.3.X/docs/cscli/cscli_upgrade_scenario.md similarity index 100% rename from docs/cscli/cscli_upgrade_scenario.md rename to docs/v0.3.X/docs/cscli/cscli_upgrade_scenario.md diff --git a/docs/getting_started/concepts.md b/docs/v0.3.X/docs/getting_started/concepts.md similarity index 86% rename from docs/getting_started/concepts.md rename to docs/v0.3.X/docs/getting_started/concepts.md index 7413ea951..475a96bb1 100644 --- a/docs/getting_started/concepts.md +++ b/docs/v0.3.X/docs/getting_started/concepts.md @@ -1,9 +1,9 @@ -{{crowdsec.Name}}'s main goal is to crunch logs to detect things (duh). +{{v0X.crowdsec.Name}}'s main goal is to crunch logs to detect things (duh). You will find below an introduction to the concepts that are frequently used within the documentation. ## Acquisition -[Acquistion configuration](/guide/crowdsec/acquisition/) defines which streams of information {{crowdsec.name}} is going to process. +[Acquistion configuration](/Crowdsec/v0/guide/crowdsec/acquisition/) defines which streams of information {{v0X.crowdsec.name}} is going to process. At the time of writing, it's mostly files, but it should be more or less any kind of stream, such as a kafka topic or a cloudtrail. @@ -27,23 +27,23 @@ labels: The `labels` part is here to tag the incoming logs with a type. `labels.type` are used by the parsers to know which logs to process. -## Parsers [[reference](/references/parsers/)] +## Parsers [[reference](/Crowdsec/v0/references/parsers/)] For logs to be able to be exploited and analyzed, they need to be parsed and normalized, and this is where parsers are used. A parser is a YAML configuration file that describes how a string is being parsed. Said string can be a log line, or a field extracted from a previous parser. While a lot of parsers rely on the **GROK** approach (a.k.a regular expression named capture groups), parsers can as well reference enrichment modules to allow specific data processing. -A parser usually has a specific scope. For example, if you are using [nginx](https://nginx.org), you will probably want to use the `crowdsecurity/nginx-logs` which allows your {{crowdsec.name}} setup to parse nginx's access and error logs. +A parser usually has a specific scope. For example, if you are using [nginx](https://nginx.org), you will probably want to use the `crowdsecurity/nginx-logs` which allows your {{v0X.crowdsec.name}} setup to parse nginx's access and error logs. Parsers are organized into stages to allow pipelines and branching in parsing. -See the [{{hub.name}}]({{hub.url}}) to explore parsers, or see below some examples : +See the [{{v0X.hub.name}}]({{v0X.hub.url}}) to explore parsers, or see below some examples : - [apache2 access/error log parser](https://github.com/crowdsecurity/hub/blob/master/parsers/s01-parse/crowdsecurity/apache2-logs.yaml) - [iptables logs parser](https://github.com/crowdsecurity/hub/blob/master/parsers/s01-parse/crowdsecurity/iptables-logs.yaml) - [http logs post-processing](https://github.com/crowdsecurity/hub/blob/master/parsers/s02-enrich/crowdsecurity/http-logs.yaml) -You can as well [write your own](/write_configurations/parsers/) ! +You can as well [write your own](/Crowdsec/v0/write_configurations/parsers/) ! ## Stages @@ -66,15 +66,15 @@ Every event starts in the first stage, and will move to the next stage once it h ## Enrichers -Enrichment is the action of adding extra context to an event based on the information we already have, so that better decision can later be taken. In most cases, you should be able to find the relevant enrichers on our {{hub.htmlname}}. +Enrichment is the action of adding extra context to an event based on the information we already have, so that better decision can later be taken. In most cases, you should be able to find the relevant enrichers on our {{v0X.hub.htmlname}}. A common/simple type of enrichment would be [geoip-enrich](https://github.com/crowdsecurity/hub/blob/master/parsers/s02-enrich/crowdsecurity/geoip-enrich.yaml) of an event (adding information such as : origin country, origin AS and origin IP range to an event). -Once again, you should be able to find the ones you're looking for on the {{hub.htmlname}} ! +Once again, you should be able to find the ones you're looking for on the {{v0X.hub.htmlname}} ! -## Scenarios [[reference](/references/scenarios/)] +## Scenarios [[reference](/Crowdsec/v0/references/scenarios/)] -Scenarios is the expression of a heuristic that allows you to qualify a specific event (usually an attack).It is a YAML file that describes a set of events characterizing a scenario. Scenarios in {{crowdsec.name}} gravitate around the [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) principle. +Scenarios is the expression of a heuristic that allows you to qualify a specific event (usually an attack).It is a YAML file that describes a set of events characterizing a scenario. Scenarios in {{v0X.crowdsec.name}} gravitate around the [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) principle. A scenario description includes at least : @@ -84,13 +84,13 @@ A scenario description includes at least : The description allows for many other rules to be specified (blackhole, distinct filters etc.), to allow rather complex scenarios. -See the [{{hub.name}}]({{hub.url}}) to explore scenarios and their capabilities, or see below some examples : +See the [{{v0X.hub.name}}]({{v0X.hub.url}}) to explore scenarios and their capabilities, or see below some examples : - [ssh bruteforce detection](https://github.com/crowdsecurity/hub/blob/master/scenarios/crowdsecurity/ssh-bf.yaml) - [distinct http-404 scan](https://github.com/crowdsecurity/hub/blob/master/scenarios/crowdsecurity/http-scan-uniques_404.yaml) - [iptables port scan](https://github.com/crowdsecurity/hub/blob/master/scenarios/crowdsecurity/iptables-scan-multi_ports.yaml) -You can as well [write your own](/write_configurations/scenarios/) ! +You can as well [write your own](/Crowdsec/v0/write_configurations/scenarios/) ! ## Collections @@ -98,11 +98,11 @@ You can as well [write your own](/write_configurations/scenarios/) ! To make user's life easier, "collections" are available, which are just a bundle of parsers and scenarios. In this way, if you want to cover basic use-cases of let's say "nginx", you can just install the `crowdsecurity/nginx` collection that is composed of `crowdsecurity/nginx-logs` parser, as well as generic http scenarios such as `crowdsecurity/base-http-scenarios`. -As usual, those can be found on the {{hub.htmlname}} ! +As usual, those can be found on the {{v0X.hub.htmlname}} ! ## Event -The objects that are processed within {{crowdsec.name}} are named "Events". +The objects that are processed within {{v0X.crowdsec.name}} are named "Events". An Event can be a log line, or an overflow result. This object layout evolves around a few important items : - `Parsed` is an associative array that will be used during parsing to store temporary variables or processing results. diff --git a/docs/getting_started/crowdsec-tour.md b/docs/v0.3.X/docs/getting_started/crowdsec-tour.md similarity index 87% rename from docs/getting_started/crowdsec-tour.md rename to docs/v0.3.X/docs/getting_started/crowdsec-tour.md index 4da4c70cf..527550565 100644 --- a/docs/getting_started/crowdsec-tour.md +++ b/docs/v0.3.X/docs/getting_started/crowdsec-tour.md @@ -4,18 +4,18 @@ > List installed parsers/scenarios/collections/enricher ```bash -{{cli.bin}} list +{{v0X.cli.bin}} list ``` -On the machine where you deployed {{crowdsec.name}}, type `{{cli.bin}} list` to see deployed configurations. -This list represents the parsers, scenarios and/or collections that you deployed. They represent what your {{crowdsec.name}} setup can read (logs) and detect (scenarios). +On the machine where you deployed {{v0X.crowdsec.name}}, type `{{v0X.cli.bin}} list` to see deployed configurations. +This list represents the parsers, scenarios and/or collections that you deployed. They represent what your {{v0X.crowdsec.name}} setup can read (logs) and detect (scenarios). -Check [{{cli.name}} configuration](/guide/cscli/) management for more ! +Check [{{v0X.cli.name}} configuration](/Crowdsec/v0/guide/cscli/) management for more !
output example ```bash -bui@sd:~$ {{cli.bin}} list +bui@sd:~$ {{v0X.cli.bin}} list INFO[0000] Loaded 9 collecs, 14 parsers, 12 scenarios, 1 post-overflow parsers INFO[0000] PARSERS: -------------------------------------------------------------------------------------------------------------------- @@ -63,9 +63,9 @@ INFO[0000] POSTOVERFLOWS: ## Finding configurations -{{crowdsec.Name}} efficiency is dictated by installed parsers and scenarios, so [take a look at the {{hub.name}}]({{hub.url}}) to find the appropriated ones ! +{{v0X.crowdsec.Name}} efficiency is dictated by installed parsers and scenarios, so [take a look at the {{v0X.hub.name}}]({{v0X.hub.url}}) to find the appropriated ones ! -If you didn't perform the setup with the wizard, or if you are reading logs from other machines, you will have to pick the right {{collections.htmlname}}. This will ensure that {{crowdsec.name}} can parse the logs and has the corresponding scenarios. +If you didn't perform the setup with the wizard, or if you are reading logs from other machines, you will have to pick the right {{v0X.collections.htmlname}}. This will ensure that {{v0X.crowdsec.name}} can parse the logs and has the corresponding scenarios. For example, if you're processing [nginx](http://nginx.org) logs, you might want to install the [nginx collection](https://hub.crowdsec.net/author/crowdsecurity/collections/nginx). @@ -73,7 +73,7 @@ A collection can be installed by typing `cscli install collection crowdsecurity/ In the same spirit, the [crowdsecurity/sshd](https://hub.crowdsec.net/author/crowdsecurity/collections/sshd)'s collection will fit most sshd setups ! -While {{crowdsec.name}} is running, a quick look at [`cscli metrics`](/observability/command_line/) should help you ensure that your log sources are correctly parsed. +While {{v0X.crowdsec.name}} is running, a quick look at [`cscli metrics`](/Crowdsec/v0/observability/command_line/) should help you ensure that your log sources are correctly parsed. ## List existing bans @@ -81,20 +81,20 @@ While {{crowdsec.name}} is running, a quick look at [`cscli metrics`](/observabi > List current bans ```bash -{{cli.bin}} ban list +{{v0X.cli.bin}} ban list ``` -On the machine where you deployed {{crowdsec.name}}, type `{{cli.bin}} ban list` to see existing bans. -If you just deployed {{crowdsec.name}}, the list might be empty, but don't worry, it simply means you haven't yet been attacked, congrats! +On the machine where you deployed {{v0X.crowdsec.name}}, type `{{v0X.cli.bin}} ban list` to see existing bans. +If you just deployed {{v0X.crowdsec.name}}, the list might be empty, but don't worry, it simply means you haven't yet been attacked, congrats! -Check [{{cli.name}} ban](/cheat_sheets/ban-mgmt/) management for more ! +Check [{{v0X.cli.name}} ban](/Crowdsec/v0/cheat_sheets/ban-mgmt/) management for more !
output example ```bash -bui@sd:~$ {{cli.bin}} ban list +bui@sd:~$ {{v0X.cli.bin}} ban list 7 local decisions: +--------+----------------+--------------------------------+------+--------+---------+--------------------------------+--------+------------+ | SOURCE | IP | REASON | BANS | ACTION | COUNTRY | AS | EVENTS | EXPIRATION | @@ -117,17 +117,17 @@ There are different bans sources: - local : bans triggered locally - api : bans fetched from the API as part of the global consensus - - cli : bans added via `{{cli.bin}} ban add` + - cli : bans added via `{{v0X.cli.bin}} ban add` ## Monitor on-going activity (prometheus) > List metrics ```bash -{{cli.bin}} metrics +{{v0X.cli.bin}} metrics ``` -The metrics displayed are extracted from {{crowdsec.name}} prometheus. +The metrics displayed are extracted from {{v0X.crowdsec.name}} prometheus. The indicators are grouped by scope : - Buckets : Know which buckets are created and/or overflew (scenario efficiency) @@ -138,7 +138,7 @@ The indicators are grouped by scope : output example ```bash -bui@sd:~$ {{cli.bin}} metrics +bui@sd:~$ {{v0X.cli.bin}} metrics INFO[0000] Buckets Metrics: +---------------------------------+-----------+--------------+--------+---------+ | BUCKET | OVERFLOWS | INSTANTIATED | POURED | EXPIRED | @@ -179,9 +179,9 @@ INFO[0000] Parser Metrics: ## Monitor on-going activity (log files) -The {{crowdsec.main_log}} file will tell you what is going on and when an IP is blocked. +The {{v0X.crowdsec.main_log}} file will tell you what is going on and when an IP is blocked. -Check [{{crowdsec.name}} monitoring](/observability/overview/) for more ! +Check [{{v0X.crowdsec.name}} monitoring](/Crowdsec/v0/observability/overview/) for more !
diff --git a/docs/v0.3.X/docs/getting_started/installation.md b/docs/v0.3.X/docs/getting_started/installation.md new file mode 100644 index 000000000..ac99cd301 --- /dev/null +++ b/docs/v0.3.X/docs/getting_started/installation.md @@ -0,0 +1,83 @@ +# Installation + +Fetch {{v0X.crowdsec.name}}'s latest version [here]({{v0X.crowdsec.download_url}}). + +```bash +tar xvzf crowdsec-release.tgz +``` +```bash +cd crowdsec-v0.X.X +``` + +A {{v0X.wizard.name}} is provided to help you deploy {{v0X.crowdsec.name}} and {{v0X.cli.name}}. + +## Using the interactive wizard + +``` +sudo {{v0X.wizard.bin}} -i +``` + +![crowdsec](../assets/images/crowdsec_install.gif) + +The {{v0X.wizard.name}} is going to guide you through the following steps : + + - detect services that are present on your machine + - detect selected services logs + - suggest collections (parsers and scenarios) to deploy + - deploy & configure {{v0X.crowdsec.name}} in order to watch selected logs for selected scenarios + +The process should take less than a minute, [please report if there are any issues]({{v0X.wizard.bugreport}}). + +You are then ready to [take a tour](/Crowdsec/v0/getting_started/crowdsec-tour/) of your freshly deployed {{v0X.crowdsec.name}} ! + +## Binary installation + +> you of little faith + +``` +sudo {{v0X.wizard.bin}} --bininstall +``` + +This will deploy a valid/empty {{v0X.crowdsec.name}} configuration files and binaries. +Beware, in this state, {{v0X.crowdsec.name}} won't monitor/detect anything unless configured. + +``` +cscli install collection crowdsecurity/linux +``` + + +Installing at least the `crowdsecurity/linux` collection will provide you : + + - syslog parser + - geoip enrichment + - date parsers + + +You will need as well to configure your {{v0X.ref.acquis}} file to feed {{v0X.crowdsec.name}} some logs. + + + + + +## From source + +!!! warning "Requirements" + + * [Go](https://golang.org/doc/install) v1.13+ + * `git clone {{v0X.crowdsec.url}}` + * [jq](https://stedolan.github.io/jq/download/) + + +Go in {{v0X.crowdsec.name}} folder and build the binaries : + +```bash +cd crowdsec +``` +```bash +make build +``` + + +{{v0X.crowdsec.name}} bin will be located in `./cmd/crowdsec/crowdsec` and {{v0X.cli.name}} bin in `cmd/crowdsec-cli/{{v0X.cli.bin}}` + +Now, you can install either with [interactive wizard](#using-the-interactive-wizard) or the [unattended mode](#using-unattended-mode). \ No newline at end of file diff --git a/docs/guide/crowdsec/acquisition.md b/docs/v0.3.X/docs/guide/crowdsec/acquisition.md similarity index 80% rename from docs/guide/crowdsec/acquisition.md rename to docs/v0.3.X/docs/guide/crowdsec/acquisition.md index b7d728d78..a08a0c701 100644 --- a/docs/guide/crowdsec/acquisition.md +++ b/docs/v0.3.X/docs/guide/crowdsec/acquisition.md @@ -1,12 +1,12 @@ !!! info - Please note that the `{{config.acquis_path}}` should be auto generated by the {{wizard.name}} in most case. + Please note that the `{{v0X.config.acquis_path}}` should be auto generated by the {{v0X.wizard.name}} in most case. The acquisition configuration specifies lists of files to monitor and associated "labels". The `type` label is mandatory as it's later used in the process to determine which parser(s) can handle lines coming from this source. -Acquisition can be found in `{{config.acquis_path}}`, for example : +Acquisition can be found in `{{v0X.config.acquis_path}}`, for example :
Acquisition example ```yaml @@ -29,7 +29,7 @@ labels: ### At startup -At startup, you will see the monitored files in `{{crowdsec.main_log}}` : +At startup, you will see the monitored files in `{{v0X.crowdsec.main_log}}` : ``` ... @@ -44,20 +44,20 @@ time="30-04-2020 08:57:25" level=info msg="Opening file '/var/log/kern.log' (pat ### At runtime -{{cli.name}} allows you to view {{crowdsec.name}} metrics info via the `metrics` command. +{{v0X.cli.name}} allows you to view {{v0X.crowdsec.name}} metrics info via the `metrics` command. This allows you to see how many lines are coming from each source, and if they are parsed correctly. You can see those metrics with the following command: ``` -{{cli.bin}} metrics +{{v0X.cli.bin}} metrics ```
- {{cli.name}} metrics example + {{v0X.cli.name}} metrics example ```bash -## {{cli.bin}} metrics +## {{v0X.cli.bin}} metrics ... INFO[0000] Acquisition Metrics: +------------------------------------------+------------+--------------+----------------+------------------------+ @@ -78,6 +78,6 @@ INFO[0000] Acquisition Metrics: !!! info - All these metrics are actually coming from {{crowdsec.name}}'s prometheus agent. See [prometheus](/observability/prometheus/) directly for more insights. + All these metrics are actually coming from {{v0X.crowdsec.name}}'s prometheus agent. See [prometheus](/Crowdsec/v0/observability/prometheus/) directly for more insights. diff --git a/docs/guide/crowdsec/enrichers.md b/docs/v0.3.X/docs/guide/crowdsec/enrichers.md similarity index 64% rename from docs/guide/crowdsec/enrichers.md rename to docs/v0.3.X/docs/guide/crowdsec/enrichers.md index 2915bb4f3..3bb6f4512 100644 --- a/docs/guide/crowdsec/enrichers.md +++ b/docs/v0.3.X/docs/guide/crowdsec/enrichers.md @@ -1,6 +1,6 @@ -Enrichers are basically {{parsers.htmlname}} that can rely on external methods to provide extra contextual information to the event. The enrichers are usually in the `s02-enrich` {{stage.htmlname}} (after most of the parsing happened). +Enrichers are basically {{v0X.parsers.htmlname}} that can rely on external methods to provide extra contextual information to the event. The enrichers are usually in the `s02-enrich` {{v0X.stage.htmlname}} (after most of the parsing happened). -Enrichers functions should all accept a string as a parameter, and return an associative string array, that will be automatically merged into the `Enriched` map of the {{event.htmlname}}. +Enrichers functions should all accept a string as a parameter, and return an associative string array, that will be automatically merged into the `Enriched` map of the {{v0X.event.htmlname}}. !!! warning At the time of writing, enrichers plugin mechanism implementation is still ongoing (read: the list of available enrichment methods is currently hardcoded). @@ -15,7 +15,7 @@ It exposes three methods : `GeoIpCity` `GeoIpASN` and `IpToRange` that are used Enrichers can be installed as any other parsers with the following command: ``` -{{cli.bin}} install parser crowdsecurity/geoip-enrich +{{v0X.cli.bin}} install parser crowdsecurity/geoip-enrich ``` -Take a tour at the {{hub.htmlname}} to find them ! +Take a tour at the {{v0X.hub.htmlname}} to find them ! diff --git a/docs/guide/crowdsec/overview.md b/docs/v0.3.X/docs/guide/crowdsec/overview.md similarity index 55% rename from docs/guide/crowdsec/overview.md rename to docs/v0.3.X/docs/guide/crowdsec/overview.md index 814a61107..7e0e51d32 100644 --- a/docs/guide/crowdsec/overview.md +++ b/docs/v0.3.X/docs/guide/crowdsec/overview.md @@ -1,5 +1,5 @@ -{{crowdsec.name}} configuration lives under `{{config.crowdsec_dir}}` and should be as : +{{v0X.crowdsec.name}} configuration lives under `{{v0X.config.crowdsec_dir}}` and should be as : ## default.yaml @@ -36,13 +36,13 @@ plugin: The working directory where Prometheus will write metrics in text file. #### `data_dir:` -Directory where {{crowdsec.Name}} will install its data ({{crowdsec.Name}} database for example). +Directory where {{v0X.crowdsec.Name}} will install its data ({{v0X.crowdsec.Name}} database for example). #### `pid_dir:` -To specify where {{crowdsec.Name}} PID file will be stored. +To specify where {{v0X.crowdsec.Name}} PID file will be stored. #### `config_dir:` -To specify where {{crowdsec.Name}} configuration will be stored. +To specify where {{v0X.crowdsec.Name}} configuration will be stored. #### `log_dir:` To specify where the logs should be stored. @@ -62,13 +62,13 @@ To specify the logging level, available levels: * `error` #### `profiling:` -To enable or disable the profiling in {{crowdsec.Name}}. +To enable or disable the profiling in {{v0X.crowdsec.Name}}. #### `apimode:` -To enable or disable alerts sending to the {{api.htmlname}}. +To enable or disable signals sending to the {{v0X.api.htmlname}}. #### `daemon:` -To enable or disable {{crowdsec.Name}} daemon mode. +To enable or disable {{v0X.crowdsec.Name}} daemon mode. #### `prometheus:` To enable or disable Prometheus metrics. @@ -77,24 +77,24 @@ To enable or disable Prometheus metrics. If `prometheus` is enabled, and is set to `aggregated`, will restrict prometheus metrics to global ones. All metrics containing a source as a label will be unregistered. Meant to keep cardinality low when relevant. #### `http_listen:` -To configure the Prometheus service listening `address:port` or {{crowdsec.Name}} profiling +To configure the Prometheus service listening `address:port` or {{v0X.crowdsec.Name}} profiling #### `plugin:` -To specify the directories where {{ref.output}} plugins will be stored : -* `backend:` : the path where all {{crowdsec.Name}} backend plugins (database output, ...) will be located. +To specify the directories where {{v0X.ref.output}} plugins will be stored : +* `backend:` : the path where all {{v0X.crowdsec.Name}} backend plugins (database output, ...) will be located. ## acquis.yaml -This is the file that tells which streams (or files) {{crowdsec.name}} is reading, and their types (so that it knows how to parse them). If you're lucky, this file should be auto-generated by the wizard. +This is the file that tells which streams (or files) {{v0X.crowdsec.name}} is reading, and their types (so that it knows how to parse them). If you're lucky, this file should be auto-generated by the wizard. -You can find details on the configuration file format [here](/guide/crowdsec/acquisition/). +You can find details on the configuration file format [here](/Crowdsec/v0/guide/crowdsec/acquisition/). ## api.yaml Name is self-explanatory : it holds API configuration. -This file should never be edited by a human : the wizard will deploy safe default for it, and {{cli.name}} will alter it on your behalf when you register or enroll your machine. +This file should never be edited by a human : the wizard will deploy safe default for it, and {{v0X.cli.name}} will alter it on your behalf when you register or enroll your machine. You can look into it, and you should see : @@ -103,11 +103,11 @@ You can look into it, and you should see : To get new credentials : ```bash -{{cli.name}} api register +{{v0X.cli.name}} api register ``` Or if you loose your credentials: ```bash -{{cli.name}} api reset +{{v0X.cli.name}} api reset ``` @@ -116,25 +116,25 @@ Or if you loose your credentials: The profiles is what allows you to decide how do you react when a scenario is triggered : - do you notify yourself on mattermost/slack ? - - do you push the alert to a database so that your bouncers can stop the IP from continuing its attack ? - - do you want to avoid pushing this alert to the API ? + - do you push the signal to a database so that your bouncers can stop the IP from continuing its attack ? + - do you want to avoid pushing this signal to the API ? Behind the scenes, the "profiles" system actually allows you to dispatch an event/overflow to various output plugins. -You can find details on the configuration file format of {{ref.output}}. +You can find details on the configuration file format of {{v0X.ref.output}}. ## parsers/ -This directory holds all the {{parsers.htmlname}} that are enabled on your system. +This directory holds all the {{v0X.parsers.htmlname}} that are enabled on your system. -The parsers are organized in {{stage.htmlname}} (which are just folders) and the {{parsers.htmlname}} themselves are yaml files. +The parsers are organized in {{v0X.stage.htmlname}} (which are just folders) and the {{v0X.parsers.htmlname}} themselves are yaml files. ## scenarios/ -This directory holds all the {{scenarios.htmlname}} that are enabled on your system. +This directory holds all the {{v0X.scenarios.htmlname}} that are enabled on your system. -The {{scenarios.htmlname}} are yaml files. +The {{v0X.scenarios.htmlname}} are yaml files. diff --git a/docs/guide/crowdsec/parsers.md b/docs/v0.3.X/docs/guide/crowdsec/parsers.md similarity index 77% rename from docs/guide/crowdsec/parsers.md rename to docs/v0.3.X/docs/guide/crowdsec/parsers.md index 9d98c9fa9..788e73bb7 100644 --- a/docs/guide/crowdsec/parsers.md +++ b/docs/v0.3.X/docs/guide/crowdsec/parsers.md @@ -1,23 +1,23 @@ ## Listing installed parsers -{{parsers.Htmlname}} are yaml files in `{{config.crowdsec_dir}}parsers//parser.yaml`. +{{v0X.parsers.Htmlname}} are yaml files in `{{v0X.config.crowdsec_dir}}parsers//parser.yaml`. !!! info - Alphabetical file order dictates the order of {{stage.htmlname}} and the orders of parsers within stage. + Alphabetical file order dictates the order of {{v0X.stage.htmlname}} and the orders of parsers within stage. You can use the following command to view installed parsers: ``` -{{cli.bin}} list parsers +{{v0X.cli.bin}} list parsers ```
- {{cli.name}} list example + {{v0X.cli.name}} list example ```bash -# {{cli.name}} list parsers +# {{v0X.cli.name}} list parsers INFO[0000] Loaded 9 collecs, 14 parsers, 12 scenarios, 1 post-overflow parsers -------------------------------------------------------------------------------------------------------------------- NAME 📦 STATUS VERSION LOCAL PATH @@ -40,12 +40,10 @@ INFO[0000] Loaded 9 collecs, 14 parsers, 12 scenarios, 1 post-overflow parsers ### From the hub -[{{hub.name}}]({{hub.parsers_url}}) allows you to find needed scenarios, just paste the command on your machine : - -![Hub Screenshot](/assets/images/hub_parser.png) +{{v0X.hub.htmlname}} allows you to find needed parsers. ```bash -# {{cli.name}} install parser crowdsec/nginx-logs +# {{v0X.cli.name}} install parser crowdsec/nginx-logs INFO[0000] Loaded 9 collecs, 14 parsers, 12 scenarios, 1 post-overflow parsers INFO[0000] crowdsec/nginx-logs : OK INFO[0000] Enabled parsers : crowdsec/nginx-logs @@ -55,13 +53,13 @@ INFO[0000] Enabled crowdsec/nginx-logs ### Your own parsers -[Write your parser configuration](/write_configurations/parsers/) and deploy yaml file in `{{config.crowdsec_dir}}parsers//`. +[Write your parser configuration](/Crowdsec/v0/write_configurations/parsers/) and deploy yaml file in `{{v0X.config.crowdsec_dir}}parsers//`. ## Monitoring parsers behavior -{{cli.name}} allows you to view {{crowdsec.name}} metrics info via the `metrics` command. +{{v0X.cli.name}} allows you to view {{v0X.crowdsec.name}} metrics info via the `metrics` command. This allows you to see how many logs were ingested and then parsed or unparsed by said parser. You can see those metrics with the following command: @@ -70,10 +68,10 @@ cscli metrics ```
- {{cli.name}} metrics example + {{v0X.cli.name}} metrics example ```bash -# {{cli.name}} metrics +# {{v0X.cli.name}} metrics ... INFO[0000] Parser Metrics: +---------------------------+--------+--------+----------+ @@ -97,5 +95,5 @@ INFO[0000] Parser Metrics: ## Going further -If you're interested into [understanding how parsers are made](/references/parsers/) or writing your own, please have a look at [this page](/write_configurations/parsers/). +If you're interested into [understanding how parsers are made](/Crowdsec/v0/references/parsers/) or writing your own, please have a look at [this page](/Crowdsec/v0/write_configurations/parsers/). diff --git a/docs/guide/crowdsec/scenarios.md b/docs/v0.3.X/docs/guide/crowdsec/scenarios.md similarity index 78% rename from docs/guide/crowdsec/scenarios.md rename to docs/v0.3.X/docs/guide/crowdsec/scenarios.md index f97ea96f5..3316a2ceb 100644 --- a/docs/guide/crowdsec/scenarios.md +++ b/docs/v0.3.X/docs/guide/crowdsec/scenarios.md @@ -4,24 +4,24 @@ Scenarios lead to the instantiation, and sometime the overflow, of buckets. When a bucket "overflows", the scenario is considered as having been realized. -This event leads to the creation of a new {{event.htmlname}} that describes the scenario that just happened (via a {{signal.htmlname}}). +This event leads to the creation of a new {{v0X.event.htmlname}} that describes the scenario that just happened (via a {{v0X.signal.htmlname}}). ## Listing installed scenarios -scenarios are yaml files in `{{config.crowdsec_dir}}scenarios/.yaml`. +scenarios are yaml files in `{{v0X.config.crowdsec_dir}}scenarios/.yaml`. You can view installed scenarios with the following command: ``` -{{cli.bin}} list scenarios +{{v0X.cli.bin}} list scenarios ```
- {{cli.name}} list example + {{v0X.cli.name}} list example ```bash -# {{cli.name}} list scenarios +# {{v0X.cli.name}} list scenarios INFO[0000] Loaded 9 collecs, 14 parsers, 12 scenarios, 1 post-overflow parsers ----------------------------------------------------------------------------------------------------------------------------- NAME 📦 STATUS VERSION LOCAL PATH @@ -39,13 +39,11 @@ INFO[0000] Loaded 9 collecs, 14 parsers, 12 scenarios, 1 post-overflow parsers ### From the hub -[{{hub.name}}]({{hub.scenarios_url}}) allows you to find needed scenarios, just paste the command on your machine : - -![Hub Screenshot](/assets/images/hub_scenario.png) +{{v0X.hub.htmlname}} allows you to find needed scenarios. ```bash -# {{cli.name}} install scenario crowdsec/ssh-bf +# {{v0X.cli.name}} install scenario crowdsec/ssh-bf INFO[0000] Loaded 9 collecs, 14 parsers, 12 scenarios, 1 post-overflow parsers INFO[0000] crowdsec/ssh-bf : OK INFO[0000] Enabled scenarios : crowdsec/ssh-bf @@ -55,27 +53,27 @@ INFO[0000] Enabled crowdsec/ssh-bf ### Your own scenarios -[Write your scenario configuration](/write_configurations/scenarios/) and deploy yaml file in `{{config.crowdsec_dir}}scenarios/`. +[Write your scenario configuration](/Crowdsec/v0/write_configurations/scenarios/) and deploy yaml file in `{{v0X.config.crowdsec_dir}}scenarios/`. ## Monitoring scenarios behavior -{{cli.name}} allows you to view {{crowdsec.name}} metrics info via the `metrics` command. +{{v0X.cli.name}} allows you to view {{v0X.crowdsec.name}} metrics info via the `metrics` command. This allows you to see how many "buckets" associated to each scenario have been created (an event eligible from said scenario has arrived), poured (how many subsequent events have been pushed to said bucket), overflowed (the scenario happened) or underflow (there was not enough event to make the bucket overflow, and it thus expired after a while). You can see those metrics with the following command: ``` -{{cli.bin}} metrics +{{v0X.cli.bin}} metrics ```
- {{cli.name}} metrics example + {{v0X.cli.name}} metrics example ```bash -# {{cli.name}} metrics +# {{v0X.cli.name}} metrics INFO[0000] Buckets Metrics: +------------------------------------+-----------+--------------+--------+---------+ | BUCKET | OVERFLOWS | INSTANTIATED | POURED | EXPIRED | diff --git a/docs/guide/crowdsec/simulation.md b/docs/v0.3.X/docs/guide/crowdsec/simulation.md similarity index 78% rename from docs/guide/crowdsec/simulation.md rename to docs/v0.3.X/docs/guide/crowdsec/simulation.md index 0b05ddab2..5dfd27241 100644 --- a/docs/guide/crowdsec/simulation.md +++ b/docs/v0.3.X/docs/guide/crowdsec/simulation.md @@ -5,13 +5,13 @@ You can tag some (or all) scenarios as being in **simulation mode**, which is es -!!! warning "Simulation vs [Whitelists](/write_configurations/whitelist/)" - Simulation and [Whitelists](/write_configurations/whitelist/) are not to be mixed. [Whitelists](/write_configurations/whitelist/) allows you to purely discard an overflow or a log, while simulation will only "cancel" the action against a peer, while keeping track of events and overflows. +!!! warning "Simulation vs [Whitelists](/Crowdsec/v0/write_configurations/whitelist/)" + Simulation and [Whitelists](/Crowdsec/v0/write_configurations/whitelist/) are not to be mixed. [Whitelists](/Crowdsec/v0/write_configurations/whitelist/) allows you to purely discard an overflow or a log, while simulation will only "cancel" the action against a peer, while keeping track of events and overflows. When this happens, the scenarios are still triggered, but the action is prefixed with `simulation:`, which means that bouncers won't take action against the peer(s) that triggered the scenario. -Simulation can be managed with [cscli simulation](/cscli/cscli_simulation/) command, and allows you to have settings such as _"all in simulation except scenarios X,Y,Z"_ or _"only scenarios X,Y,Z in simulation mode"_ : +Simulation can be managed with [cscli simulation](/Crowdsec/v0/cscli/cscli_simulation/) command, and allows you to have settings such as _"all in simulation except scenarios X,Y,Z"_ or _"only scenarios X,Y,Z in simulation mode"_ : - `cscli simulation enable` : Globally enables simulation (all scenarios will be in simulation mode) - `cscli simulation enable author/my_scenario` : Enables simulation only for a specific scenario diff --git a/docs/v0.3.X/docs/guide/cscli.md b/docs/v0.3.X/docs/guide/cscli.md new file mode 100644 index 000000000..12ef513d1 --- /dev/null +++ b/docs/v0.3.X/docs/guide/cscli.md @@ -0,0 +1,39 @@ +`{{v0X.cli.bin}}` is the utility that will help you to manage {{v0X.crowdsec.name}}. This tools has the following functionalities: + + - [manage bans]({{v0X. cli.ban_doc }}) + - [backup and restore configuration]({{v0X. cli.backup_doc }}) + - [display metrics]({{v0X. cli.metrics_doc }}) + - [install configurations]({{v0X. cli.install_doc }}) + - [remove configurations]({{v0X. cli.remove_doc }}) + - [update configurations]({{v0X. cli.update_doc }}) + - [upgrade configurations]({{v0X. cli.upgrade_doc }}) + - [list configurations]({{v0X. cli.list_doc }}) + - [interact with CrowdSec API]({{v0X. cli.api_doc }}) + - [manage simulation]({{v0X.cli.simulation_doc}}) + + Take a look at the [dedicated documentation]({{v0X.cli.main_doc}}) + +## Overview + +{{v0X.cli.name}} configuration location is `/etc/crowdsec/cscli/`. + +In this folder, we store the {{v0X.cli.name}} configuration and the hub cache files. + +## Config + +The {{v0X.cli.name}} configuration is light for now, stored in `/etc/crowdsec/cscli/config`. + +```yaml +installdir: /etc/crowdsec/config # {{v0X.crowdsec.name}} configuration location +backend: /etc/crowdsec/plugins/backend # path to the backend plugin used +``` + +For {{v0X.cli.name}} to be able to pull the {{v0X.api.topX.htmlname}}, you need a valid API configuration in [api.yaml](/Crowdsec/v0/guide/crowdsec/overview/#apiyaml). + + +## Hub cache + +- `.index.json`: The file containing the metadata of all the existing {{v0X.collections.htmlname}}, {{v0X.parsers.htmlname}} and {{v0X.scenarios.htmlname}} stored in the {{v0X.hub.htmlname}}. +- `hub/*`: Folder containing all the {{v0X.collections.htmlname}}, {{v0X.parsers.htmlname}} and {{v0X.scenarios.htmlname}} stored in the {{v0X.hub.htmlname}}. + +This is used to manage configurations from the {{v0X.cli.name}} \ No newline at end of file diff --git a/docs/v0.3.X/docs/guide/overview.md b/docs/v0.3.X/docs/guide/overview.md new file mode 100644 index 000000000..744025dff --- /dev/null +++ b/docs/v0.3.X/docs/guide/overview.md @@ -0,0 +1,10 @@ + +When talking about {{v0X.crowdsec.name}} or {{v0X.cli.name}} configurations, most of things are going to gravitate around {{v0X.parsers.htmlname}}, {{v0X.scenarios.htmlname}} and {{v0X.collections.htmlname}}. + +In most common setup, all these configurations should be found on the {{v0X.hub.htmlname}} and installed with {{v0X.cli.name}}. + +It is important to keep those configurations up-to-date via the `{{v0X.cli.name}} upgrade` command. + +See the [{{v0X.cli.name}} list](/Crowdsec/v0/cheat_sheets/cscli-collections-tour/) command to view the state of your deployed configurations. + + diff --git a/docs/v0.3.X/docs/index.md b/docs/v0.3.X/docs/index.md new file mode 100644 index 000000000..9222d5b83 --- /dev/null +++ b/docs/v0.3.X/docs/index.md @@ -0,0 +1,31 @@ +
[[Hub]]({{v0X.hub.url}}) [[Releases]]({{v0X.crowdsec.download_url}})
+ +# Architecture + +![Architecture](assets/images/crowdsec_architecture.png) + +## Components + +{{v0X.crowdsec.name}} ecosystem is based on the following components : + + - [{{v0X.crowdsec.Name}}]({{v0X.crowdsec.url}}) is the lightweight service that processes logs and keeps track of attacks. + - [{{v0X.cli.name}}]({{v0X.cli.main_doc}}) is the command line interface for humans, it allows you to view, add, or remove bans as well as to install, find, or update scenarios and parsers + - [{{v0X.bouncers.name}}]({{v0X.hub.plugins_url}}) are the components that block malevolent traffic, and can be deployed anywhere in your stack + +## Moving forward + +To learn more about {{v0X.crowdsec.name}} and give it a try, please see : + + - [How to install {{v0X.crowdsec.name}}](/Crowdsec/v0/getting_started/installation/) + - [Take a quick tour of {{v0X.crowdsec.name}} and {{v0X.cli.name}} features](/Crowdsec/v0/getting_started/crowdsec-tour/) + - [Observability of {{v0X.crowdsec.name}}](/Crowdsec/v0/observability/overview/) + - [Understand {{v0X.crowdsec.name}} configuration](/Crowdsec/v0/getting_started/concepts/) + - [Deploy {{v0X.bouncers.name}} to stop malevolent peers](/Crowdsec/v0/bouncers/) + - [FAQ](/faq/) + +Don't hesitate to reach out if you're facing issues : + + - [report a bug](https://github.com/crowdsecurity/crowdsec/issues/new?assignees=&labels=bug&template=bug_report.md&title=Bug%2F) + - [suggest an improvement](https://github.com/crowdsecurity/crowdsec/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=Improvment%2F) + - [ask for help on the forums](https://discourse.crowdsec.net) + diff --git a/docs/v0.3.X/docs/migration.md b/docs/v0.3.X/docs/migration.md new file mode 100644 index 000000000..b04cc008e --- /dev/null +++ b/docs/v0.3.X/docs/migration.md @@ -0,0 +1,72 @@ +# Migration from v0.X to v1.X + +!!! warning + Migrating to V1.X will impact (any change you made will be lost and must be adapted to the new configuration) : + * Database model : your existing database will be lost, a new one will be created in the V1. + * {{v1X.crowdsec.Name}} configuration : + * `/etc/crowdsec/config/default.yaml` : check [new format](/Crowdsec/v1/references/crowdsec-config/#configuration-format) + * `/etc/crowdsec/config/profiles.yaml` : check [new format](/Crowdsec/v1/references/profiles/#profiles-configurations) + +To upgrade {{v0X.crowdsec.name}} from v0.X to v1, we'll follow those steps + +#### Backup up configuration + +``` +sudo cscli backup save /tmp/crowdsec_backup +sudo cp -R /etc/crowdsec/config/patterns /tmp/crowdsec_backup +``` + +#### Uninstall old version & install new + +Download latest V1 {{v0X.crowdsec.name}} version [here]({{v0X.crowdsec.download_url}}) + +``` +tar xvzf crowdsec-release.tgz +cd crowdsec-v1*/ +sudo ./wizard.sh --uninstall +sudo ./wizard.sh --bininstall +``` + +!!! warning + Don't forget to remove {{v0X.metabase.name}} dashboard if you installed it manually (without {{v0X.cli.name}}). + +#### Restore configuration + +!!! warning + Before restoring old backup, if you have `local` or `tainted` postoverflows, be aware that they are no longer compatible. You should update the syntax (the community and us are available to help you doing this part). +``` +sudo cscli hub update +sudo cscli config restore --old-backup /tmp/crowdsec_backup/ +sudo cp -R /tmp/crowdsec_backup/patterns /etc/crowdsec/ +``` + +#### Start & health check + +Finally, you will be able to start {{v0X.crowdsec.name}} service. Before that, just check if {{v1X.lapi.name}} and {{v0X.api.name}} are correctly configured. + +``` +ubuntu@ip-:~$ sudo cscli lapi status +INFO[0000] Loaded credentials from /etc/crowdsec/local_api_credentials.yaml +INFO[0000] Trying to authenticate with username 941c3fxxxxxxxxxxxxxxxxxxxxxx on http://localhost:8080/ +INFO[0000] You can successfully interact with Local API (LAPI) + +ubuntu@ip-:~$ sudo cscli capi status +INFO[0000] Loaded credentials from /etc/crowdsec/online_api_credentials.yaml +INFO[0000] Trying to authenticate with username 941c3fxxxxxxxxxxxxxxxxxxxxxxx on https://api.crowdsec.net/ +INFO[0000] You can successfully interact with Central API (CAPI) + +ubuntu@ip-:~$ sudo systemctl start crowdsec.service +ubuntu@ip-:~$ sudo systemctl status crowdsec.service +``` + +You can even check logs (located by default here: `/var/log/crowdsec.log` & `/var/log/crowdsec_api.log`). + +You can now navigate documentation to learn new {{v0X.cli.name}} commands to interact with crowdsec. + +#### Upgrade {{v0X.bouncers.name}} + +If you were using **{{v0X.bouncers.name}}** (formerly called **blocker(s)**), you need to replace them by the new compatibles {{v0X.bouncers.name}}, available on the [hub](https://hub.crowdsec.net/browse/#bouncers) (selecting `agent version` to `v1`). + +Following your bouncer type (netfilter, nginx, wordpress etc...), you need to replace them by the new available {{v0X.bouncers.name}} on the hub, please follow the {{v0X.bouncers.name}} documentation that will help you to install easily. + +We're also available to help (on [discourse](https://discourse.crowdsec.net/) or [gitter](https://gitter.im/crowdsec-project/community)) upgrading your {{v0X.bouncers.name}}. \ No newline at end of file diff --git a/docs/observability/command_line.md b/docs/v0.3.X/docs/observability/command_line.md similarity index 92% rename from docs/observability/command_line.md rename to docs/v0.3.X/docs/observability/command_line.md index f775f7371..85ee51ebf 100644 --- a/docs/observability/command_line.md +++ b/docs/v0.3.X/docs/observability/command_line.md @@ -1,8 +1,8 @@ ```bash -{{cli.name}} metrics +{{v0X.cli.name}} metrics ``` -This command provides an overview of {{crowdsec.name}} statistics provided by [prometheus client](/observability/prometheus/). By default it assumes that the {{crowdsec.name}} is installed on the same machine. +This command provides an overview of {{v0X.crowdsec.name}} statistics provided by [prometheus client](/Crowdsec/v0/observability/prometheus/). By default it assumes that the {{v0X.crowdsec.name}} is installed on the same machine. The metrics are split in 3 main sections : @@ -20,7 +20,7 @@ The metrics are split in 3 main sections : As well, if you have scenarios that are never instantiated, it might be a hint that they are not relevant to your configuration.
- {{cli.name}} metrics example + {{v0X.cli.name}} metrics example ```bash INFO[0000] Buckets Metrics: +-----------------------------------------+-----------+--------------+--------+---------+ diff --git a/docs/observability/dashboard.md b/docs/v0.3.X/docs/observability/dashboard.md similarity index 65% rename from docs/observability/dashboard.md rename to docs/v0.3.X/docs/observability/dashboard.md index 5f8800e00..4a4909a23 100644 --- a/docs/observability/dashboard.md +++ b/docs/v0.3.X/docs/observability/dashboard.md @@ -1,10 +1,10 @@ !!! warning "SQLite & MySQL" - The default database of {{crowdsec.Name}} is SQLite. While MySQL is supported as well (>= 0.3.0), it is not in the scope of this documentation. + The default database of {{v0X.crowdsec.Name}} is SQLite. While MySQL is supported as well (>= 0.3.0), it is not in the scope of this documentation. -The {{cli.name}} command `{{cli.bin}} dashboard setup` will use [docker](https://docs.docker.com/get-docker/) to install [metabase docker image](https://hub.docker.com/r/metabase/metabase/) and fetch our metabase template to have a configured and ready dashboard. +The {{v0X.cli.name}} command `{{v0X.cli.bin}} dashboard setup` will use [docker](https://docs.docker.com/get-docker/) to install [metabase docker image](https://hub.docker.com/r/metabase/metabase/) and fetch our metabase template to have a configured and ready dashboard. ## Deployment @@ -12,7 +12,7 @@ The {{cli.name}} command `{{cli.bin}} dashboard setup` will use [docker](https:/ The metabase dashboard can be setup with : ```bash -{{cli.bin}} dashboard setup +{{v0X.cli.bin}} dashboard setup ``` @@ -20,7 +20,7 @@ The metabase dashboard can be setup with :
- {{cli.name}} dashboard setup output + {{v0X.cli.name}} dashboard setup output ```bash INFO[0000] /var/lib/crowdsec/data/metabase.db exists, skip. @@ -46,7 +46,7 @@ INFO[0034] password: W1XJb8iw1A02U5nW7xxxxXXXxxXXXxXXxxXXXxxxXxXxXxXPdbvQdLlshqq the `dashboard setup` command will output generated credentials for metabase. Don't forget to save those ! -Now you can connect to your dashboard, sign-in with your saved credentials then click on {{crowdsec.Name}} Dashboard to get this: +Now you can connect to your dashboard, sign-in with your saved credentials then click on {{v0X.crowdsec.Name}} Dashboard to get this: ![Dashboard_view](../assets/images/dashboard_view.png) @@ -54,8 +54,8 @@ Now you can connect to your dashboard, sign-in with your saved credentials then -Dashboard docker image can be managed by {{cli.name}} and docker cli also. Look at the {{cli.name}} help command using +Dashboard docker image can be managed by {{v0X.cli.name}} and docker cli also. Look at the {{v0X.cli.name}} help command using ```bash -{{cli.bin}} dashboard -h +{{v0X.cli.bin}} dashboard -h ``` \ No newline at end of file diff --git a/docs/v0.3.X/docs/observability/logs.md b/docs/v0.3.X/docs/observability/logs.md new file mode 100644 index 000000000..a7691d5a3 --- /dev/null +++ b/docs/v0.3.X/docs/observability/logs.md @@ -0,0 +1,30 @@ +Logs concern everything that happens to {{v0X.crowdsec.Name}} itself (startup, configuration, events like IP ban or an alert, shutdown, and so on). + +By default, logs are written to `{{v0X.crowdsec.main_log}}`, in text format. + +
+ Logs example + + +```bash +time="12-05-2020 15:34:21" level=info msg="setting loglevel to info" +time="12-05-2020 15:34:21" level=info msg="Crowdsec v0.0.19-9ae496aa9cfd008513976a096accc7cfc43f2d9b" +time="12-05-2020 15:34:21" level=warning msg="Loading prometheus collectors" +time="12-05-2020 15:34:23" level=warning msg="no version in /etc/crowdsec/config/parsers/s00-raw/syslog-logs.yaml, assuming '1.0'" +time="12-05-2020 15:34:23" level=warning msg="Starting profiling and http server" +time="12-05-2020 15:34:24" level=warning msg="no version in /etc/crowdsec/config/parsers/s00-raw/syslog-logs.yaml, assuming '1.0'" +time="12-05-2020 15:34:24" level=info msg="Node has no name,author or description. Skipping." +time="12-05-2020 15:34:24" level=info msg="Loading 2 parser nodes" file=/etc/crowdsec/config/parsers/s00-raw/syslog-logs.yaml +time="12-05-2020 15:34:24" level=warning msg="no version in /etc/crowdsec/config/parsers/s01-parse/nginx-logs.yaml, assuming '1.0'" +time="12-05-2020 15:34:24" level=info msg="Loading 3 parser nodes" file=/etc/crowdsec/config/parsers/s01-parse/nginx-logs.yaml +time="12-05-2020 15:34:24" level=warning msg="no version in /etc/crowdsec/config/parsers/s01-parse/sshd-logs.yaml, assuming '1.0'" +time="13-05-2020 17:42:53" level=warning msg="24 existing LeakyRoutine" +time="13-05-2020 18:02:51" level=info msg="Flushed 1 expired entries from Ban Application" +time="13-05-2020 18:12:46" level=info msg="Flushed 1 expired entries from Ban Application" +time="13-05-2020 18:20:29" level=warning msg="11.11.11.11 triggered a 4h0m0s ip ban remediation for [crowdsecurity/ssh-bf]" bucket_id=winter-shadow event_time="2020-05-13 18:20:29.855776892 +0200 CEST m=+96112.558589990" scenario=crowdsecurity/ssh-bf source_ip=11.11.11.11 +time="13-05-2020 18:31:26" level=warning msg="22.22.22.22 triggered a 4h0m0s ip ban remediation for [crowdsecurity/ssh-bf]" bucket_id=dry-mountain event_time="2020-05-13 18:31:26.989769738 +0200 CEST m=+96769.692582872" scenario=crowdsecurity/ssh-bf source_ip=22.22.22.22 +time="13-05-2020 18:41:10" level=warning msg="16 existing LeakyRoutine" +time="13-05-2020 18:46:19" level=warning msg="33.33.33.33 triggered a 4h0m0s ip ban remediation for [crowdsecurity/iptables-scan-multi_ports]" bucket_id=holy-paper event_time="2020-05-13 18:46:19.825693323 +0200 CEST m=+97662.528506421" scenario=crowdsecurity/iptables-scan-multi_ports source_ip=33.33.33.33 +``` + +
\ No newline at end of file diff --git a/docs/v0.3.X/docs/observability/overview.md b/docs/v0.3.X/docs/observability/overview.md new file mode 100644 index 000000000..9763a03c8 --- /dev/null +++ b/docs/v0.3.X/docs/observability/overview.md @@ -0,0 +1,13 @@ +# Observability Overview + +Observability in security software is crucial, especially when this software might take important decision such as blocking IP addresses. + +We attempt to provide good observability of {{v0X.crowdsec.name}}'s behavior : + + - {{v0X.crowdsec.name}} itself exposes a [prometheus instrumentation](/Crowdsec/v0/observability/prometheus/) + - {{v0X.cli.Name}} allows you to view part of prometheus metrics in [cli (`{{v0X.cli.bin}} metrics`)](/Crowdsec/v0/observability/command_line/) + - {{v0X.crowdsec.name}} logging is contextualized for easy processing + - for **humans**, {{v0X.cli.name}} allows you to trivially start a service [exposing dashboards](/Crowdsec/v0/observability/dashboard/) (using [metabase](https://www.metabase.com/)) + +Furthermore, most of {{v0X.crowdsec.name}} configuration should allow you to enable partial debug (ie. per-scenario, per-parser etc.) + diff --git a/docs/observability/prometheus.md b/docs/v0.3.X/docs/observability/prometheus.md similarity index 86% rename from docs/observability/prometheus.md rename to docs/v0.3.X/docs/observability/prometheus.md index 9891d7aaf..a43dd185e 100644 --- a/docs/observability/prometheus.md +++ b/docs/v0.3.X/docs/observability/prometheus.md @@ -1,13 +1,13 @@ -{{crowdsec.name}} can expose a {{prometheus.htmlname}} endpoint for collection (on `http://127.0.0.1:6060/metrics` by default). +{{v0X.crowdsec.name}} can expose a {{v0X.prometheus.htmlname}} endpoint for collection (on `http://127.0.0.1:6060/metrics` by default). -The goal of this endpoint, besides the usual resources consumption monitoring, aims at offering a view of {{crowdsec.name}} "applicative" behavior : +The goal of this endpoint, besides the usual resources consumption monitoring, aims at offering a view of {{v0X.crowdsec.name}} "applicative" behavior : - is it processing a lot of logs ? is it parsing them successfully ? - are a lot of scenarios being triggered ? - are a lot of IPs banned ? - etc. -All the counters are "since {{crowdsec.name}} start". +All the counters are "since {{v0X.crowdsec.name}} start". ### Metrics details @@ -72,7 +72,7 @@ cs_node_hits_ko_total{name="crowdsecurity/http-logs",source="/var/log/nginx/erro #### Info - - `cs_info` : Information about {{crowdsec.name}} (software version) + - `cs_info` : Information about {{v0X.crowdsec.name}} (software version) ### Exploitation with prometheus server & grafana diff --git a/docs/references/output.md b/docs/v0.3.X/docs/references/output.md similarity index 73% rename from docs/references/output.md rename to docs/v0.3.X/docs/references/output.md index d033d8aef..658ace32f 100644 --- a/docs/references/output.md +++ b/docs/v0.3.X/docs/references/output.md @@ -1,11 +1,11 @@ # Output -The output mechanism is composed of plugins. In order to store the bans for {{bouncers.htmlname}}, at least one backend plugin must be loaded. Else, bans will not be stored and decisions cannot be applied. +The output mechanism is composed of plugins. In order to store the bans for {{v0X.bouncers.htmlname}}, at least one backend plugin must be loaded. Else, bans will not be stored and decisions cannot be applied. Currently the supported backends are SQLite (default) and MySQL. -In order to filter which alerts will be sent to which plugin, {{crowdsec.name}} use a system of `profile` that can allow to granularly process your bans and alerts. +In order to filter which signals will be sent to which plugin, {{v0X.crowdsec.name}} use a system of `profile` that can allow to granularly process your bans and signals. ## Profile @@ -14,7 +14,7 @@ Here is a sample of a profile configuration: ```yaml profile: filter: "" -api: true # default true : send alert to crowdsec API +api: true # default true : send signal to crowdsec API outputs: # here choose your output plugins for this profile - plugin: plugin1 custom_config: @@ -22,7 +22,7 @@ outputs: # here choose your output plugins for this profile ``` -The default configuration that is deployed with {{crowdsec.name}} is the following one: +The default configuration that is deployed with {{v0X.crowdsec.name}} is the following one: ```yaml profile: default_remediation filter: "sig.Labels.remediation == 'true'" @@ -44,13 +44,13 @@ outputs: store: false ``` -Here we can use {{filter.htmlname}} like in parsers and scenarios with the {{signal.htmlname}} object to choose which alert will be process by which plugin. +Here we can use {{v0X.filter.htmlname}} like in parsers and scenarios with the {{v0X.signal.htmlname}} object to choose which signal will be process by which plugin. # Backend database configuration -The `/etc/crowdsec/plugins/backend/database.yaml` file allows you to configure to which backend database you'd like to write. {{crowdsec.Name}} support SQLite and MySQL via [gorm](https://gorm.io/docs/). +The `/etc/crowdsec/plugins/backend/database.yaml` file allows you to configure to which backend database you'd like to write. {{v0X.crowdsec.Name}} support SQLite and MySQL via [gorm](https://gorm.io/docs/). ```yaml name: database @@ -121,12 +121,12 @@ config: # Plugins -Plugins configuration file are stored in `{{plugins.configpath}}`. {{crowdsec.Name}} will scan this folder to load all the plugins. Each configuration file should provide the path to the plugin binary. By default they are stored in `{{plugins.binpath}}`. +Plugins configuration file are stored in `{{v0X.plugins.configpath}}`. {{v0X.crowdsec.Name}} will scan this folder to load all the plugins. Each configuration file should provide the path to the plugin binary. By default they are stored in `{{v0X.plugins.binpath}}`. !!! info - If you want crowdsec to not load a plugin, `mv` or `rm` its configuration file in `{{plugins.configpath}}` + If you want crowdsec to not load a plugin, `mv` or `rm` its configuration file in `{{v0X.plugins.configpath}}` -Here is a sample of a plugin configuration file stored in `{{plugins.configpath}}`: +Here is a sample of a plugin configuration file stored in `{{v0X.plugins.configpath}}`: ```yaml name: path: # diff --git a/docs/references/parsers.md b/docs/v0.3.X/docs/references/parsers.md similarity index 91% rename from docs/references/parsers.md rename to docs/v0.3.X/docs/references/parsers.md index ba354b705..abca9a9c8 100644 --- a/docs/references/parsers.md +++ b/docs/v0.3.X/docs/references/parsers.md @@ -1,11 +1,11 @@ ## Understanding parsers -A parser is a YAML configuration file that describes how a string is being parsed. Said string can be a log line, or a field extracted from a previous parser. While a lot of parsers rely on the **GROK** approach (a.k.a regular expression named capture groups), parsers can as well reference enrichment modules to allow specific data processing, or use specific {{expr.htmlname}} feature to perform parsing on specific data, such as JSON. +A parser is a YAML configuration file that describes how a string is being parsed. Said string can be a log line, or a field extracted from a previous parser. While a lot of parsers rely on the **GROK** approach (a.k.a regular expression named capture groups), parsers can as well reference enrichment modules to allow specific data processing, or use specific {{v0X.expr.htmlname}} feature to perform parsing on specific data, such as JSON. Parsers are organized into stages to allow pipelines and branching in parsing. -See the [{{hub.name}}]({{hub.url}}) to explore parsers, or see below some examples : +See the [{{v0X.hub.name}}]({{v0X.hub.url}}) to explore parsers, or see below some examples : - [apache2 access/error log parser](https://github.com/crowdsecurity/hub/blob/master/parsers/s01-parse/crowdsecurity/apache2-logs.yaml) - [iptables logs parser](https://github.com/crowdsecurity/hub/blob/master/parsers/s01-parse/crowdsecurity/iptables-logs.yaml) @@ -14,7 +14,7 @@ See the [{{hub.name}}]({{hub.url}}) to explore parsers, or see below some exampl ## Stages -Stages concept is central to data parsing in {{crowdsec.name}}, as it allows to have various "steps" of parsing. All parsers belong to a given stage. While users can add or modify the stages order, the following stages exist : +Stages concept is central to data parsing in {{v0X.crowdsec.name}}, as it allows to have various "steps" of parsing. All parsers belong to a given stage. While users can add or modify the stages order, the following stages exist : - `s00-raw` : low level parser, such as syslog - `s01-parse` : most of the services parsers (ssh, nginx etc.) @@ -65,8 +65,8 @@ statics: expression: "evt.Parsed.src_ip" ``` -The parser nodes are processed sequentially based on the alphabetical order of {{stage.htmlname}} and subsequent files. -If the node is considered successful (grok is present and returned data or no grok is present) and "onsuccess" equals to `next_stage`, then the {{event.name}} is moved to the next stage. +The parser nodes are processed sequentially based on the alphabetical order of {{v0X.stage.htmlname}} and subsequent files. +If the node is considered successful (grok is present and returned data or no grok is present) and "onsuccess" equals to `next_stage`, then the {{v0X.event.name}} is moved to the next stage. ## Parser trees @@ -200,7 +200,7 @@ DEBU[31-07-2020 16:36:28] evt.Parsed.program = 'nginx' id=withe filter: expression ``` -`filter` must be a valid {{expr.htmlname}} expression that will be evaluated against the {{event.htmlname}}. +`filter` must be a valid {{v0X.expr.htmlname}} expression that will be evaluated against the {{v0X.event.htmlname}}. If `filter` evaluation returns true or is absent, node will be processed. @@ -228,11 +228,11 @@ grok: apply_on: source_field ``` -The `grok` structure in a node represent a regular expression with capture group (grok pattern) that must be applied on a field of {{event.name}}. +The `grok` structure in a node represent a regular expression with capture group (grok pattern) that must be applied on a field of {{v0X.event.name}}. The pattern can : - - be imported by name (if present within the core of {{crowdsec.name}}) + - be imported by name (if present within the core of {{v0X.crowdsec.name}}) - defined in place In both case, the pattern must be a valid RE2 expression. @@ -268,7 +268,7 @@ onsuccess: next_stage|continue _default: continue_ -if set to `next_stage` and the node is considered successful, the {{event.name}} will be moved directly to next stage without processing other nodes in the current stage. +if set to `next_stage` and the node is considered successful, the {{v0X.event.name}} will be moved directly to next stage without processing other nodes in the current stage. ### pattern_syntax @@ -306,7 +306,7 @@ Each entry of the list is composed of a target (where to write) and a source (wh **Target** -The target aims at being any part of the {{event.htmlname}} object, and can be expressed in different ways : +The target aims at being any part of the {{v0X.event.htmlname}} object, and can be expressed in different ways : - `meta: ` - `parsed: ` @@ -319,7 +319,7 @@ The target aims at being any part of the {{event.htmlname}} object, and can be e **Source** - The source itself can be either a static value, or an {{expr.htmlname}} result : + The source itself can be either a static value, or an {{v0X.expr.htmlname}} result : ```yaml statics: @@ -341,7 +341,7 @@ data: ``` `data` allows user to specify an external source of data. -This section is only relevant when `cscli` is used to install parser from hub, as it will download the `source_url` and store it to `dest_file`. When the parser is not installed from the hub, {{crowdsec.name}} won't download the URL, but the file must exist for the parser to be loaded correctly. +This section is only relevant when `cscli` is used to install parser from hub, as it will download the `source_url` and store it to `dest_file`. When the parser is not installed from the hub, {{v0X.crowdsec.name}} won't download the URL, but the file must exist for the parser to be loaded correctly. The `type` is mandatory if you want to evaluate the data in the file, and should be `regex` for valid (re2) regular expression per line or `string` for string per line. The regexps will be compiled, the strings will be loaded into a list and both will be kept in memory. diff --git a/docs/references/plugins_api.md b/docs/v0.3.X/docs/references/plugins_api.md similarity index 95% rename from docs/references/plugins_api.md rename to docs/v0.3.X/docs/references/plugins_api.md index 23885906d..a807c32bd 100644 --- a/docs/references/plugins_api.md +++ b/docs/v0.3.X/docs/references/plugins_api.md @@ -1,7 +1,7 @@ ## Foreword Output plugins handle Signal Occurences resulting from bucket overflows. -This allows to either make a simple notification/alerting plugin or fully manage a backend (this is what {{crowdsec.name}} uses to manage SQLite and MySQL). +This allows to either make a simple notification/alerting plugin or fully manage a backend (this is what {{v0X.crowdsec.name}} uses to manage SQLite and MySQL). You can create your own plugins to perform specific actions when a scenario is triggered. @@ -27,7 +27,7 @@ type Backend interface { > Startup/shutdown methods - `Init` : called at startup time and receives the custom configuration as a string map. Errors aren't fatal, but plugin will be discarded. - - `Shutdown` : called when {{crowdsec.Name}} is shutting down or restarting + - `Shutdown` : called when {{v0X.crowdsec.Name}} is shutting down or restarting > Writing/Deleting events @@ -42,7 +42,7 @@ type Backend interface { > Backend - - `Flush` is called regulary by crowdsec for each plugin that received events. For example it will be called after each write in `cscli` (as it's one-shot) and every few hundreds of ms / few events in {{crowdsec.name}} itself. It might be a good place to deal with slower write operations. + - `Flush` is called regulary by crowdsec for each plugin that received events. For example it will be called after each write in `cscli` (as it's one-shot) and every few hundreds of ms / few events in {{v0X.crowdsec.name}} itself. It might be a good place to deal with slower write operations. ## Configurations diff --git a/docs/references/scenarios.md b/docs/v0.3.X/docs/references/scenarios.md similarity index 84% rename from docs/references/scenarios.md rename to docs/v0.3.X/docs/references/scenarios.md index e0bb81693..c23ca77bc 100644 --- a/docs/references/scenarios.md +++ b/docs/v0.3.X/docs/references/scenarios.md @@ -3,18 +3,18 @@ Scenarios are YAML files that allow to detect and qualify a specific behavior, usually an attack. -Scenarios receive {{event.htmlname}}(s) and can produce {{overflow.htmlname}}(s) using the [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) algorithm. +Scenarios receive {{v0X.event.htmlname}}(s) and can produce {{v0X.overflow.htmlname}}(s) using the [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) algorithm. -As an {{event.htmlname}} can be the representation of a log line, or an overflow, it allows scenarios to process both logs or overflows to allow inference. +As an {{v0X.event.htmlname}} can be the representation of a log line, or an overflow, it allows scenarios to process both logs or overflows to allow inference. Scenarios can be of different types (leaky, trigger, counter), and are based on various factors, such as : - the speed/frequency of the [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) - the capacity of the [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) - - the characteristic(s) of eligible {{event.htmlname}}(s) : "log type XX with field YY set to ZZ" - - various filters/directives that can alter the bucket's behavior, such as [groupby](/references/scenarios/#groupby), [distinct](references/scenarios/#distinct) or [blackhole](/references/scenarios/#blackhole) + - the characteristic(s) of eligible {{v0X.event.htmlname}}(s) : "log type XX with field YY set to ZZ" + - various filters/directives that can alter the bucket's behavior, such as [groupby](/Crowdsec/v0/references/scenarios/#groupby), [distinct](references/scenarios/#distinct) or [blackhole](/Crowdsec/v0/references/scenarios/#blackhole) -Behind the scenes, {{crowdsec.name}} is going to create one or more buckets when events with matching characteristics arrive to the scenario. When any of these buckets overflows, the scenario has been triggered. +Behind the scenes, {{v0X.crowdsec.name}} is going to create one or more buckets when events with matching characteristics arrive to the scenario. When any of these buckets overflows, the scenario has been triggered. _Bucket partitioning_ : One scenario usually leads to many buckets creation, as each bucket is only tracking a specific subset of events. For example, if we are tracking brute-force, each "offending peer" get its own bucket. @@ -52,7 +52,7 @@ labels: ### TimeMachine -{{crowdsec.name}} can be used not only to process live logs, but as well to process "cold" logs (think forensics). +{{v0X.crowdsec.name}} can be used not only to process live logs, but as well to process "cold" logs (think forensics). For this to be able to work, the date/time from the log must have been properly parsed for the scenario temporal aspect to be able to work properly. This relies on the [dateparser enrichment](https://github.com/crowdsecurity/hub/blob/master/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml) @@ -68,9 +68,9 @@ type: leaky|trigger|counter Defines the type of the bucket. Currently three types are supported : - - `leaky` : a [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) that must be configured with a {{capacity.htmlname}} and a {{leakspeed.htmlname}} + - `leaky` : a [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) that must be configured with a {{v0X.capacity.htmlname}} and a {{v0X.leakspeed.htmlname}} - `trigger` : a bucket that overflows as soon as an event is poured (it's like a leaky bucket is a capacity of 0) - - `counter` : a bucket that only overflows every {{duration.htmlname}}. It's especially useful to count things. + - `counter` : a bucket that only overflows every {{v0X.duration.htmlname}}. It's especially useful to count things. ### name & description @@ -90,7 +90,7 @@ The name must be unique (and will define the scenario's name in the hub), and th filter: expression ``` -`filter` must be a valid {{expr.htmlname}} expression that will be evaluated against the {{event.htmlname}}. +`filter` must be a valid {{v0X.expr.htmlname}} expression that will be evaluated against the {{v0X.event.htmlname}}. If `filter` evaluation returns true or is absent, event will be pour in the bucket. @@ -139,7 +139,7 @@ groupby: evt.Meta.source_ip ``` -an {{expr.htmlname}} that must return a string. This string will be used as to partition the buckets. +an {{v0X.expr.htmlname}} that must return a string. This string will be used as to partition the buckets. Examples : @@ -174,7 +174,7 @@ distinct: evt.Meta.http_path ``` -an {{expr.htmlname}} that must return a string. The event will be poured **only** if the string is not already present in the bucket. +an {{v0X.expr.htmlname}} that must return a string. The event will be poured **only** if the string is not already present in the bucket. Examples : @@ -340,7 +340,7 @@ It is useful when you want to have further scenarios that will rely on past-over cache_size: 5 ``` -By default, a bucket holds {{capacity.htmlname}} events "in memory". +By default, a bucket holds {{v0X.capacity.htmlname}} events "in memory". However, for a number of cases, you don't want this, as it might lead to excessive memory consumption. By setting `cache_size` to a positive integer, we can control the maximum in-memory cache size of the bucket, without changing its capacity and such. This is especially useful when using `counter` buckets on long duration that might end up counting (and this storing in memory) an important number of events. @@ -352,7 +352,7 @@ By setting `cache_size` to a positive integer, we can control the maximum in-mem overflow_filter: any(queue.Queue, { .Enriched.IsInEU == "true" }) ``` -`overflow_filter` is an {{expr.htmlname}} that is run when the bucket overflows. +`overflow_filter` is an {{v0X.expr.htmlname}} that is run when the bucket overflows. If this expression is present and returns false, the overflow will be discarded. @@ -366,7 +366,7 @@ data: ``` `data` allows user to specify an external source of data. -This section is only relevant when `cscli` is used to install scenario from hub, as ill download the `source_url` and store it to `dest_file`. When the scenario is not installed from the hub, {{crowdsec.name}} won't download the URL, but the file must exist for the scenario to be loaded correctly. +This section is only relevant when `cscli` is used to install scenario from hub, as ill download the `source_url` and store it to `dest_file`. When the scenario is not installed from the hub, {{v0X.crowdsec.name}} won't download the URL, but the file must exist for the scenario to be loaded correctly. The `type` is mandatory if you want to evaluate the data in the file, and should be `regex` for valid (re2) regular expression per line or `string` for string per line. The regexps will be compiled, the strings will be loaded into a list and both will be kept in memory. Without specifying a `type`, the file will be downloaded and stored as file and not in memory. diff --git a/docs/v0.3.X/docs/write_configurations/acquisition.md b/docs/v0.3.X/docs/write_configurations/acquisition.md new file mode 100644 index 000000000..ad49db74d --- /dev/null +++ b/docs/v0.3.X/docs/write_configurations/acquisition.md @@ -0,0 +1,41 @@ +# Write the acquisition file (optional for test) + +In order for your log to be processed by the good parser, it must match the filter that you will configure in your parser file. +There are two options: + + - Your logs are written by a syslog server, so you just have to install the [syslog parser](https://master.d3padiiorjhf1k.amplifyapp.com/author/crowdsecurity/configurations/syslog-logs) + - Your logs are read from a log file. Please add this kind of configuration in your `acquis.yaml` file: + +ⓘ the `type` will be matched by the parsers's `filter` in stage `s01-parse`. + + +```yaml +--- +filename: +labels: + type: + +``` +Here an example: + +
+ Nginx acquisition + +```yaml +--- +filename: /var/log/nginx/access.log +labels: + type: nginx +``` + +
+ +
+ Nginx parser filter + +```yaml +--- +filter: evt.Parsed.program == 'nginx' +``` + +
diff --git a/docs/v0.3.X/docs/write_configurations/expressions.md b/docs/v0.3.X/docs/write_configurations/expressions.md new file mode 100644 index 000000000..17a9a18db --- /dev/null +++ b/docs/v0.3.X/docs/write_configurations/expressions.md @@ -0,0 +1,62 @@ +# Expressions + +> [antonmedv/expr](https://github.com/antonmedv/expr) - Expression evaluation engine for Go: fast, non-Turing complete, dynamic typing, static typing + +Several places of {{v0X.crowdsec.name}}'s configuration use [expr](https://github.com/antonmedv/expr), notably : + + - {{v0X.filter.Htmlname}} that are used to determine events eligibility in {{v0X.parsers.htmlname}} and {{v0X.scenarios.htmlname}} or `profiles` + - {{v0X.statics.Htmlname}} use expr in the `expression` directive, to compute complex values + - {{v0X.whitelists.Htmlname}} rely on `expression` directive to allow more complex whitelists filters + +To learn more about [expr](https://github.com/antonmedv/expr), [check the github page of the project](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md). + + +When {{v0X.crowdsec.name}} relies on `expr`, a context is provided to let the expression access relevant objects : + + - `evt.` is the representation of the current {{v0X.event.htmlname}} and is the most relevant object + - in [profiles](/Crowdsec/v0/references/output/#profile), {{v0X.signal.htmlname}} is accessible via the `sig.` object + +If the `debug` is enabled (in the scenario or parser where expr is used), additional debug will be displayed regarding evaluated expressions. + + +# Helpers + +In order to makes its use in {{v0X.crowdsec.name}} more efficient, we added a few helpers that are documented bellow. + +## Atof(string) float64 + +Parses a string representation of a float number to an actual float number (binding on `strconv.ParseFloat`) + +> Atof(evt.Parsed.tcp_port) + + +## JsonExtract(JsonBlob, FieldName) string + +Extract the `FieldName` from the `JsonBlob` and returns it as a string. (binding on [jsonparser](https://github.com/buger/jsonparser/)) + +> JsonExtract(evt.Parsed.some_json_blob, "foo.bar[0].one_item") + +## File(FileName) []string + +Returns the content of `FileName` as an array of string, while providing cache mechanism. + +> evt.Parsed.some_field in File('some_patterns.txt') +> any(File('rdns_seo_bots.txt'), { evt.Enriched.reverse_dns endsWith #}) + +## RegexpInFile(StringToMatch, FileName) bool + +Returns `true` if the `StringToMatch` is matched by one of the expressions contained in `FileName` (uses RE2 regexp engine). + +> RegexpInFile( evt.Enriched.reverse_dns, 'my_legit_seo_whitelists.txt') + +## Upper(string) string + +Returns the uppercase version of the string + +> Upper("yop") + +## IpInRange(IPStr, RangeStr) bool + +Returns true if the IP `IPStr` is contained in the IP range `RangeStr` (uses `net.ParseCIDR`) + +> IpInRange("1.2.3.4", "1.2.3.0/24") diff --git a/docs/write_configurations/parsers.md b/docs/v0.3.X/docs/write_configurations/parsers.md similarity index 89% rename from docs/write_configurations/parsers.md rename to docs/v0.3.X/docs/write_configurations/parsers.md index 2e143150d..3846b89c1 100644 --- a/docs/write_configurations/parsers.md +++ b/docs/v0.3.X/docs/write_configurations/parsers.md @@ -1,8 +1,8 @@ -# Writing {{crowdsec.Name}} parser +# Writing {{v0X.crowdsec.Name}} parser !!! warning "Parser dependency" The crowdsecurity/syslog-logs parsers is needed by the core parsing - engine. Deletion or modification of this could result of {{crowdsec.name}} + engine. Deletion or modification of this could result of {{v0X.crowdsec.name}} being unable to parse logs, so this should be done very carefully. > In the current example, we'll write a parser for the logs produced by `iptables` (netfilter) with the `-j LOG` target. @@ -29,10 +29,10 @@ statics: value: yes ``` - - a {{filter.htmlname}} : if the expression is `true`, the event will enter the parser, otherwise, it won't - - a {{onsuccess.htmlname}} : defines what happens when the {{event.htmlname}} was successfully parsed : shall we continue ? shall we move to next stage ? etc. + - a {{v0X.filter.htmlname}} : if the expression is `true`, the event will enter the parser, otherwise, it won't + - a {{v0X.onsuccess.htmlname}} : defines what happens when the {{v0X.event.htmlname}} was successfully parsed : shall we continue ? shall we move to next stage ? etc. - a name & a description - - some {{statics.htmlname}} that will modify the {{event.htmlname}} + - some {{v0X.statics.htmlname}} that will modify the {{v0X.event.htmlname}} - a `debug` flag that allows to enable local debugging information. @@ -49,9 +49,9 @@ May 11 16:23:50 sd-126005 kernel: [47615902.763137] IN=enp1s0 OUT= MAC=00:08:a2: For example it can be `~/crowdsec-v0.0.19/tests/config/parsers/s01-parser/myparser.yaml`, or `/etc/crowdsec/config/parsers/s01-parser/myparser.yaml`. - The {{stage.htmlname}} directory might not exist, don't forget to create it. + The {{v0X.stage.htmlname}} directory might not exist, don't forget to create it. -(deployment is assuming [you're using a test environment](/write_configurations/requirements/)) +(deployment is assuming [you're using a test environment](/Crowdsec/v0/write_configurations/requirements/)) Setting up our new parser : ```bash @@ -85,11 +85,11 @@ DEBU[0000] move Event from stage s01-parser to s02-enrich id=dark-water name=me We can see our "mock" parser is working, let's see what happened : - The event enter the node - - The `filter` returned true (`1 == 1`) so the {{event.htmlname}} will be processed + - The `filter` returned true (`1 == 1`) so the {{v0X.event.htmlname}} will be processed - Our grok pattern (just a `.*` capture) "worked" and captured data (the whole line actually) - - The grok captures (under the name "some_data") are merged into the `.Parsed` map of the {{event.htmlname}} - - The {{statics.htmlname}} section is processed, and `.Parsed[is_my_service]` is set to `yes` - - The {{event.htmlname}} leaves the parser successfully, and because "next_stage" is set, we move the event to the next "stage" + - The grok captures (under the name "some_data") are merged into the `.Parsed` map of the {{v0X.event.htmlname}} + - The {{v0X.statics.htmlname}} section is processed, and `.Parsed[is_my_service]` is set to `yes` + - The {{v0X.event.htmlname}} leaves the parser successfully, and because "next_stage" is set, we move the event to the next "stage" ## Writing the GROK pattern @@ -198,7 +198,7 @@ statics: ### filter -We changed the {{filter.htmlname}} to correctly filter on the program name. +We changed the {{v0X.filter.htmlname}} to correctly filter on the program name. In the current example, our logs are produced by the kernel (netfilter), and thus the program is `kernel` : ```bash @@ -214,7 +214,7 @@ We are setting various entries to static or dynamic values to give "context" to - `.Meta.source_ip` is set the the source ip captured `.Parsed.src_ip` - `.Meta.service` is set the the result of an expression that relies on the GROK output (`proto` field) -Look into dedicated {{statics.htmlname}} documentation to know more about its possibilities. +Look into dedicated {{v0X.statics.htmlname}} documentation to know more about its possibilities. ### Testing our finalized parser @@ -252,8 +252,8 @@ DEBU[0000] move Event from stage s01-parser to s02-enrich id=shy-forest name=cr ## Closing word -We have now a fully functional parser for {{crowdsec.name}} ! -We can either deploy it to our production systems to do stuff, or even better, contribute to the {{hub.htmlname}} ! +We have now a fully functional parser for {{v0X.crowdsec.name}} ! +We can either deploy it to our production systems to do stuff, or even better, contribute to the {{v0X.hub.htmlname}} ! -If you want to know more about directives and possibilities, take a look at [the parser reference documentation](/references/parsers/) ! +If you want to know more about directives and possibilities, take a look at [the parser reference documentation](/Crowdsec/v0/references/parsers/) ! diff --git a/docs/write_configurations/requirements.md b/docs/v0.3.X/docs/write_configurations/requirements.md similarity index 92% rename from docs/write_configurations/requirements.md rename to docs/v0.3.X/docs/write_configurations/requirements.md index a892e5cc9..68f695c00 100644 --- a/docs/write_configurations/requirements.md +++ b/docs/v0.3.X/docs/write_configurations/requirements.md @@ -5,7 +5,7 @@ ## Create the test environment -First of all, please [download the latest release of {{crowdsec.name}}](https://github.com/crowdsecurity/crowdsec/releases). +First of all, please [download the latest release of {{v0X.crowdsec.name}}](https://github.com/crowdsecurity/crowdsec/releases). Then run the following commands: @@ -22,7 +22,7 @@ cd ./crowdsec-vX.Y/ cd ./tests/ ``` -The `./test_env.sh` script creates a local (non privileged) working environement for {{crowdsec.name}} and {{cli.name}}. +The `./test_env.sh` script creates a local (non privileged) working environement for {{v0X.crowdsec.name}} and {{v0X.cli.name}}. The deployed environment is intended to write and test parsers and scenarios easily. @@ -80,12 +80,12 @@ s02-enrich/rdns.yaml - The preliminary stage (`s00-raw`) is mostly the one that will parse the structure of the log. This is where [syslog-logs](https://hub.crowdsec.net/author/crowdsecurity/configurations/syslog-logs) are parsed for example. Such a parser will parse the syslog header to detect the program source. -- The main stage (`s01-parse`) is the one that will parse actual applications logs and output parsed data and static assigned values. There is one parser for each type of software. To parse the logs, regexp or GROK pattern are used. If the parser is configured to go to the [`next_stage`](/references/parsers/#onsuccess), then it will be process by the `enrichment` stage. +- The main stage (`s01-parse`) is the one that will parse actual applications logs and output parsed data and static assigned values. There is one parser for each type of software. To parse the logs, regexp or GROK pattern are used. If the parser is configured to go to the [`next_stage`](/Crowdsec/v0/references/parsers/#onsuccess), then it will be process by the `enrichment` stage. - The enrichment (`s02-enrich`) stage is the one that will enrich the normalized log (we call it an event now that it is normalized) in order to get more information for the heuristic process. This stage can be composed of grok patterns and so on, but as well of plugins that can be writen by the community (geiop enrichment, rdns ...) for example [geoip-enrich](https://hub.crowdsec.net/author/crowdsecurity/configurations/geoip-enrich). -You can now jump to the next step : [writing our own parser !](/write_configurations/parsers/) +You can now jump to the next step : [writing our own parser !](/Crowdsec/v0/write_configurations/parsers/) ### Custom stage diff --git a/docs/write_configurations/scenarios.md b/docs/v0.3.X/docs/write_configurations/scenarios.md similarity index 93% rename from docs/write_configurations/scenarios.md rename to docs/v0.3.X/docs/write_configurations/scenarios.md index f822be380..7769289af 100644 --- a/docs/write_configurations/scenarios.md +++ b/docs/v0.3.X/docs/write_configurations/scenarios.md @@ -1,9 +1,9 @@ -# Writing {{crowdsec.Name}} scenarios +# Writing {{v0X.crowdsec.Name}} scenarios !!! info Please ensure that you have working env or setup test environment before writing your scenario. - Ensure that [your logs are properly parsed](/write_configurations/parsers/). + Ensure that [your logs are properly parsed](/Crowdsec/v0/write_configurations/parsers/). Have some sample logs at hand reach to test your scenario as you progress. @@ -33,7 +33,7 @@ labels: type: my_test ``` - - a {{filter.htmlname}} : if the expression is `true`, the event will enter the scenario, otherwise, it won't + - a {{v0X.filter.htmlname}} : if the expression is `true`, the event will enter the scenario, otherwise, it won't - a name & a description - a capacity for our [Leaky Bucket](https://en.wikipedia.org/wiki/Leaky_bucket) - a leak speed for our [Leaky Bucket](https://en.wikipedia.org/wiki/Leaky_bucket) @@ -94,13 +94,13 @@ We can see our "mock" scenario is working, let's see what happened : - The first event (parsed line) is processed : - - The `filter` returned true (`evt.Meta.log_type == 'iptables_drop'`) so the {{event.htmlname}} will be processed by our bucket - - The bucket is instantiated in {{timeMachine.htmlname}} mode, and its creation date is set to the timestamp from the first log - - The {{event.htmlname}} is poured in the actual bucket + - The `filter` returned true (`evt.Meta.log_type == 'iptables_drop'`) so the {{v0X.event.htmlname}} will be processed by our bucket + - The bucket is instantiated in {{v0X.timeMachine.htmlname}} mode, and its creation date is set to the timestamp from the first log + - The {{v0X.event.htmlname}} is poured in the actual bucket - The second event is processed - The `filter` is still true, and the event is poured - - As our bucket's capacity is `1`, pouring this second overflow leads to an {{overflow.htmlname}} + - As our bucket's capacity is `1`, pouring this second overflow leads to an {{v0X.overflow.htmlname}} - Because we set a blackhole directive of `1 minute`, we remember to prevent this bucket to overflowing again for the next minute The overflow itself is produced and we get this message : @@ -135,7 +135,7 @@ labels: What did we change ? - - we added a meaningful filter : we are only going to look into `iptables_drop` events, and only take care of `tcp` ones (see the parser we wrote in the [previous step](/write_configurations/parsers/)) + - we added a meaningful filter : we are only going to look into `iptables_drop` events, and only take care of `tcp` ones (see the parser we wrote in the [previous step](/Crowdsec/v0/write_configurations/parsers/)) - we added a `groupby` directive : it's going to ensure that each offending peer get its own bucket @@ -296,17 +296,17 @@ labels: ``` -Adding `remediation: true` into the labels tells {{crowdsec.name}} that we should write a ban for the IP when the scenario is triggered ! +Adding `remediation: true` into the labels tells {{v0X.crowdsec.name}} that we should write a ban for the IP when the scenario is triggered ! Let's try : - - I copied the yaml file to a production system (`/etc/crowdsec/config/scenarios/mytest.yaml`) - - I restart {{crowdsec.name}} (`systemctl reload crowdsec`) + - I copied the yaml file to a production system (`/etc/crowdsec/crowdsec/scenarios/mytest.yaml`) + - I restart {{v0X.crowdsec.name}} (`systemctl reload crowdsec`) Let's check if it seems correctly enabled : ```bash -$ {{cli.bin}} list +$ {{v0X.cli.bin}} list ... INFO[0000] SCENARIOS: ---------------------------------------------------------------------------------------------------------------------------------- @@ -318,7 +318,7 @@ INFO[0000] SCENARIOS: ``` -Let's launch (from an external machine, as {{crowdsec.name}} ignores events from private IPs by default) a real port-scan with a good old `nmap` : +Let's launch (from an external machine, as {{v0X.crowdsec.name}} ignores events from private IPs by default) a real port-scan with a good old `nmap` : ```bash sudo nmap -sS xx.xx.xx.xx @@ -333,7 +333,7 @@ $ tail -f /var/log/crowdsec.log time="12-05-2020 12:31:43" level=warning msg="xx.xx.16.6 triggered a 4h0m0s ip ban remediation for [me/my-cool-scenario]" bucket_id=wispy-breeze event_time="2020-05-12 12:31:43.953498645 +0200 CEST m=+64.533521568" scenario=me/my-cool-scenario source_ip=xx.xx.16.6 ... ^C -$ {{cli.bin}} ban list +$ {{v0X.cli.bin}} ban list INFO[0000] backend plugin 'database' loaded 8 local decisions: +--------+-----------------+----------------------+------+--------+---------+--------------------------+--------+------------+ diff --git a/docs/write_configurations/whitelist.md b/docs/v0.3.X/docs/write_configurations/whitelist.md similarity index 94% rename from docs/write_configurations/whitelist.md rename to docs/v0.3.X/docs/write_configurations/whitelist.md index ae0e74136..7ea157aea 100644 --- a/docs/write_configurations/whitelist.md +++ b/docs/v0.3.X/docs/write_configurations/whitelist.md @@ -14,7 +14,7 @@ The whitelist can be based on several criteria : - specific ip address : if the event/overflow IP is the same, event is whitelisted - ip ranges : if the event/overflow IP belongs to this range, event is whitelisted - - a list of {{expr.htmlname}} expressions : if any expression returns true, event is whitelisted + - a list of {{v0X.expr.htmlname}} expressions : if any expression returns true, event is whitelisted Here is an example showcasing configuration : @@ -62,7 +62,7 @@ time="07-07-2020 16:13:16" level=warning msg="80.x.x.x triggered a 4h0m0s ip ban time="07-07-2020 16:13:17" level=warning msg="80.x.x.x triggered a 4h0m0s ip ban remediation for [crowdsecurity/http-crawl-non_statics]" bucket_id=purple-snowflake event_time="2020-07-07 16:13:17.353641625 +0200 CEST m=+358820.187621068" scenario=crowdsecurity/http-crawl-non_statics source_ip=80.x.x.x time="07-07-2020 16:13:18" level=warning msg="80.x.x.x triggered a 4h0m0s ip ban remediation for [crowdsecurity/http-sensitive-files]" bucket_id=small-hill event_time="2020-07-07 16:13:18.005919055 +0200 CEST m=+358820.839898498" scenario=crowdsecurity/http-sensitive-files source_ip=80.x.x.x ^C -$ {{cli.bin}} ban list +$ {{v0X.cli.bin}} ban list 4 local decisions: +--------+---------------+-----------------------------------+------+--------+---------+---------------------------+--------+------------+ | SOURCE | IP | REASON | BANS | ACTION | COUNTRY | AS | EVENTS | EXPIRATION | @@ -75,7 +75,7 @@ $ {{cli.bin}} ban list ### Create the whitelist by IP -Let's create a `/etc/crowdsec/config/parsers/s02-enrich/mywhitelists.yaml` file with the following content : +Let's create a `/etc/crowdsec/crowdsec/parsers/s02-enrich/mywhitelists.yaml` file with the following content : ```yaml name: crowdsecurity/whitelists @@ -86,7 +86,7 @@ whitelist: - "80.x.x.x" ``` -and reload {{crowdsec.name}} : `sudo systemctl reload crowdsec` +and reload {{v0X.crowdsec.name}} : `sudo systemctl reload crowdsec` ### Test the whitelist @@ -102,7 +102,7 @@ And we don't get bans : $ tail -f /var/log/crowdsec.log ... ^C -$ {{cli.bin}} ban list +$ {{v0X.cli.bin}} ban list No local decisions. And 21 records from API, 15 distinct AS, 12 distinct countries @@ -126,7 +126,7 @@ whitelist: - evt.Parsed.http_user_agent == 'MySecretUserAgent' ``` -again, let's restart {{crowdsec.name}} ! +again, let's restart {{v0X.crowdsec.name}} ! For the record, I edited nikto's configuration to use 'MySecretUserAgent' as user-agent, and thus : @@ -164,7 +164,7 @@ whitelist: - evt.Enriched.reverse_dns endsWith '.asnieres.rev.numericable.fr.' ``` -After reloading {{crowdsec.name}}, and launching (again!) nikto : +After reloading {{v0X.crowdsec.name}}, and launching (again!) nikto : ```bash nikto -host myfqdn.com diff --git a/docs/v0.3.X/mkdocs.yml b/docs/v0.3.X/mkdocs.yml new file mode 100644 index 000000000..19f0f835a --- /dev/null +++ b/docs/v0.3.X/mkdocs.yml @@ -0,0 +1,56 @@ +site_name: Crowdsec/v0 +nav: + - Home: index.md + - Getting Started: + - Concepts & Glossary : getting_started/concepts.md + - Install Crowdsec : getting_started/installation.md + - Crowdsec Tour: getting_started/crowdsec-tour.md + - Guide: + - Overview: guide/crowdsec/overview.md + - Acquisition: guide/crowdsec/acquisition.md + - Parsers: guide/crowdsec/parsers.md + - Enrichers: guide/crowdsec/enrichers.md + - Scenarios: guide/crowdsec/scenarios.md + - Cscli: guide/cscli.md + - Simulation Mode: guide/crowdsec/simulation.md + - Cheat Sheets: + - Ban Management: cheat_sheets/ban-mgmt.md + - Configuration Management: cheat_sheets/config-mgmt.md + - Debugging Parsers & Scenarios: cheat_sheets/debugging_configs.md + - Observability: + - Overview: observability/overview.md + - Logs: observability/logs.md + - Metrics: + - Prometheus: observability/prometheus.md + - Command line: observability/command_line.md + - Dashboard: observability/dashboard.md + - References: + - Parsers format: references/parsers.md + - Scenarios format: references/scenarios.md + - Outputs format: references/output.md + - Write Configurations: + - Requirements: write_configurations/requirements.md + - Acquisition: write_configurations/acquisition.md + - Parsers: write_configurations/parsers.md + - Scenarios: write_configurations/scenarios.md + - Whitelists: write_configurations/whitelist.md + - Expressions: write_configurations/expressions.md + - bouncers: bouncers/index.md + - Contributing: + - General: contributing.md + - Writing Output Plugins: references/plugins_api.md + - Cscli commands: + - Cscli: cscli/cscli.md + - API: cscli/cscli_api.md + - Backup: cscli/cscli_backup.md + - Bans: cscli/cscli_ban.md + - Metrics: cscli/cscli_metrics.md + - Update: cscli/cscli_update.md + - Install configurations: cscli/cscli_install.md + - Remove configurations: cscli/cscli_remove.md + - Upgrade configurations: cscli/cscli_upgrade.md + - List configurations: cscli/cscli_list.md + - Inspect configurations: cscli/cscli_inspect.md + - Manage simulation: cscli/cscli_simulation.md + - Dashboard: cscli/cscli_dashboard.md + - Upgrade V0.X to V1.X: migration.md diff --git a/docs/v1.X/docs/about.md b/docs/v1.X/docs/about.md new file mode 100644 index 000000000..a0c46d408 --- /dev/null +++ b/docs/v1.X/docs/about.md @@ -0,0 +1,3 @@ +# Crowdsec + +{{macros_info() }} diff --git a/docs/v1.X/docs/admin_guide/architecture.md b/docs/v1.X/docs/admin_guide/architecture.md new file mode 100644 index 000000000..e69de29bb diff --git a/docs/v1.X/docs/admin_guide/services_configuration.md b/docs/v1.X/docs/admin_guide/services_configuration.md new file mode 100644 index 000000000..e69de29bb diff --git a/docs/v1.X/docs/assets/images/blocker-installation.gif b/docs/v1.X/docs/assets/images/blocker-installation.gif new file mode 100644 index 000000000..9846e97fc Binary files /dev/null and b/docs/v1.X/docs/assets/images/blocker-installation.gif differ diff --git a/docs/v1.X/docs/assets/images/crowdsec2.png b/docs/v1.X/docs/assets/images/crowdsec2.png new file mode 100644 index 000000000..bbf619a73 Binary files /dev/null and b/docs/v1.X/docs/assets/images/crowdsec2.png differ diff --git a/docs/v1.X/docs/assets/images/crowdsec_architecture.png b/docs/v1.X/docs/assets/images/crowdsec_architecture.png new file mode 100644 index 000000000..4764c0d2c Binary files /dev/null and b/docs/v1.X/docs/assets/images/crowdsec_architecture.png differ diff --git a/docs/v1.X/docs/assets/images/crowdsec_install.gif b/docs/v1.X/docs/assets/images/crowdsec_install.gif new file mode 100644 index 000000000..ceddd7f1d Binary files /dev/null and b/docs/v1.X/docs/assets/images/crowdsec_install.gif differ diff --git a/docs/v1.X/docs/assets/images/crowdsec_logo1.png b/docs/v1.X/docs/assets/images/crowdsec_logo1.png new file mode 100644 index 000000000..c9142c134 Binary files /dev/null and b/docs/v1.X/docs/assets/images/crowdsec_logo1.png differ diff --git a/docs/v1.X/docs/assets/images/cscli-metabase.gif b/docs/v1.X/docs/assets/images/cscli-metabase.gif new file mode 100644 index 000000000..b21d41191 Binary files /dev/null and b/docs/v1.X/docs/assets/images/cscli-metabase.gif differ diff --git a/docs/v1.X/docs/assets/images/dashboard_view.png b/docs/v1.X/docs/assets/images/dashboard_view.png new file mode 100644 index 000000000..6db945c8c Binary files /dev/null and b/docs/v1.X/docs/assets/images/dashboard_view.png differ diff --git a/docs/v1.X/docs/assets/images/dashboard_view2.png b/docs/v1.X/docs/assets/images/dashboard_view2.png new file mode 100644 index 000000000..6a91381eb Binary files /dev/null and b/docs/v1.X/docs/assets/images/dashboard_view2.png differ diff --git a/docs/v1.X/docs/assets/images/grafana_details.png b/docs/v1.X/docs/assets/images/grafana_details.png new file mode 100644 index 000000000..bf6b504f5 Binary files /dev/null and b/docs/v1.X/docs/assets/images/grafana_details.png differ diff --git a/docs/v1.X/docs/assets/images/grafana_insight.png b/docs/v1.X/docs/assets/images/grafana_insight.png new file mode 100644 index 000000000..8a1c6af85 Binary files /dev/null and b/docs/v1.X/docs/assets/images/grafana_insight.png differ diff --git a/docs/v1.X/docs/assets/images/grafana_overview.png b/docs/v1.X/docs/assets/images/grafana_overview.png new file mode 100644 index 000000000..52de69b81 Binary files /dev/null and b/docs/v1.X/docs/assets/images/grafana_overview.png differ diff --git a/docs/v1.X/docs/assets/images/out-of-the-box-protection.gif b/docs/v1.X/docs/assets/images/out-of-the-box-protection.gif new file mode 100644 index 000000000..a309f794c Binary files /dev/null and b/docs/v1.X/docs/assets/images/out-of-the-box-protection.gif differ diff --git a/docs/v1.X/docs/bouncers/index.md b/docs/v1.X/docs/bouncers/index.md new file mode 100644 index 000000000..8dd839ba6 --- /dev/null +++ b/docs/v1.X/docs/bouncers/index.md @@ -0,0 +1,25 @@ +# Bouncers + + +{{v1X.bouncers.Name}} are standalone software pieces in charge of acting upon blocked IPs. + +They can either be within the applicative stack, or work out of band : + +[nginx bouncer](https://github.com/crowdsecurity/cs-nginx-bouncer) will check every unknown IP against the database before letting go through or serving a *403* to the user, while a [firewall bouncer](https://github.com/crowdsecurity/cs-firewall-bouncer) will simply "add" malevolent IPs to nftables/ipset set of blacklisted IPs. + + +You can explore [available {{v1X.bouncers.name}} on the hub]({{v1X.hub.bouncers_url}}). + + +To be able for your {{v1X.bouncers.Name}} to communicate with the local API, you have to generate an API token with `cscli` and put it in your {{v1X.bouncers.Name}} configuration file: + +```bash +$ cscli bouncers add testBouncer +Api key for 'testBouncer': + + 6dcfe93f18675265e905aef390330a35 + +Please keep this key since you will not be able to retrive it! +``` + +Note: this command must be run on the server where the local API is installed (or at least with a cscli that has valid credentials to communicate with the database used by the API). \ No newline at end of file diff --git a/docs/v1.X/docs/contributing.md b/docs/v1.X/docs/contributing.md new file mode 100644 index 000000000..dbb26fbfe --- /dev/null +++ b/docs/v1.X/docs/contributing.md @@ -0,0 +1,76 @@ +# Contributing + +You have an idea, a suggestion or you spotted a mistake ? +Help us improve the software and the user experience, to make the internet a safer place together ! + + + +## Contributing to the documentation + +If you spotted some mistakes in the documentation or have improvement suggestions, you can : + + - open a {{v1X.doc.new_issue}} if you are comfortable with github + - let us know on {{v1X.doc.discourse}} if you want to discuss about it + +Let us as well know if you have some improvement suggestions ! + + + +## Contributing to the code + + - If you want to report a bug, you can use [the github bugtracker]({{v1X.crowdsec.bugreport}}) + - If you want to suggest an improvement you can use either [the github bugtracker]({{v1X.crowdsec.bugreport}}) or the {{v1X.doc.discourse}} if you want to discuss + + +## Contributing to the parsers/scenarios + +If you want to contribute your parser or scenario to the community and have them appear on the {{v1X.hub.htmlname}}, you should [open a merge request](https://github.com/crowdsecurity/hub/pulls) on the hub. + +We are currently working on a proper [CI](https://en.wikipedia.org/wiki/Continuous_integration) for the {{v1X.hub.htmlname}}, so for now all contribution are subject to peer-review, please bear with us ! + +## Contacting the team + +If you want to contact us using non-public media, you can contact us on `support` AT `crowdsec` DOT `net` with the following gpg-key : + +``` +-----BEGIN PGP PUBLIC KEY BLOCK----- +mQGNBF+VOSUBDADP6bxKDv88CdLBNhQMFNI37LE82vyfIAQmrGszON1m1EtL/LSQ +b/vC9mmlkUmJHM+bdxJ0BSl/xlWwrXjHVpaZNoluQDngVUe62cybN4tpFCvtVTMr +lo4Y0UhETgOmBFxaQLVd7Xc/jaSZGoHtSzh9hpGHg9pKrcYviG0MR173JYQfilw3 +L8yJ+K/oUUpvh2MRRwXiCNUVLtTppb7oxlcdExb0Px2PcaC34e/M30xFwiu7VJFj +0D7IIdKs6gvZuqwkNSUBF8/jtuzzM/YGzJHIdvOj15z+81/o/e6p3xvY/IKmyXC/ +1FMD8f4g5T/5fNDVq6QgJLel/g0bJ+kG75ccXfY45xKFo/YhdQ2Wg9JQX5Yjc5k7 +5AI0iuJjatXlym2Ek1niPEqR5H0C/KXFG4mPyCu9wzJu11jtY34e5TNYl9DA31F6 +81BbMmVFg4EbhYSN/2DuxpCvt2qQpk33bmdT7tFWcd2hYB/bSq2f8+K6ho50Sqwk +PK68LNZzi5ZXqGEAEQEAAbQnQ3Jvd2RTZWMgc3VwcG9ydCA8c3VwcG9ydEBjcm93 +ZHNlYy5uZXQ+iQHUBBMBCgA+FiEEpRXNfWM+DON/Satp2MpQXYwzLTEFAl+VOSUC +GwMFCQPCZwAFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQ2MpQXYwzLTEhuwwA +wWdsuSrTC4ryVOYnfHRcT2b/rfbJXIUYXqAy75qsdUGwvueYdYSBMCMXqRB65J+J +juofCF0kTQKuhjtyJezwUfr5C+Sd08JWlZwf9F7CO83/ztLOPIUUp69H3m9heW7C ++A/Lpq3epALytC/QSkDHYnKBBZbLhoR/7WXhdLFvh+A475/ggn4GAOnZMg8WULpR +Kisu1GbEBPcVr1Xl6VTYVX5ghA/1W2WTY/NxAcLhCiJO/ENeka7xy4EKdCE5pDxM +QO/fnpCHsWDIHTxpCx+JAhdkb2BIvzSiF2+o+9y+vwzcPxdGemx7y8MjSGXIp1xp +TJparq309nljh+wqI6w/K+NjzNn/qJL0tpGqiHQXtYDbi86KaAXT9IYCGAIP36w8 +XUHYGgo0s6zMEP1NEFHWAgGy5elO403vm+NO5vpHv59FTjgoK2UcjeSjqtAYwzvc +bWQ6wZHwhoqD0WevFcAMmgdbebyOdPoA7+8eCPnkjER4eKxE23ffFU75HDuQNRYk +uQGNBF+VOSUBDADNHEm33IcwhO+uJQxjKtcF0DdAMqbjU5cXxeryo1i7A1WkTH5/ +wHfyJAmtLrY4abkQ1LEJ4bMYKdJz2vmvWq0fKCAXC18yLnxU+l0Ld4tWME8hJ/Wh +p+aePsW5BdLpHQeqmQ5MCsw1cZllbURcee22hLJ/PIM2bRsZp7goSj4wXBFjhJyq +EepVmasI17dBbIBFWBSSIJW4UnSBk+Zqbj6C6PDmsket68qcEebsqduWXPxegAzh +IIFD2qhC5t+nn5i+hPwKZN5ZYLQJeAjI4Z7wi3FIBZCzZ214421BbohxPo+GKkFp +mUQ7ZrIa+goHXAcj6ZHMeNNP0lsJRl91lK6NVu3p+Ygl0+wbMOAqDRguMfFdbnV8 +gcoYpAyk4YFCfgVQLuKGaYcGjcMP8+nZnPsbaTwbUKkjDAUo+JGmrB4XyAQPugZq +TiUN+lYgTs0cJALEQkKTh2w10TPyV6/YsYDSSnwJeVDIpNCQVg5EB0eRvhaCs9fd +dVni1C5RMcb+Q4MAEQEAAYkBvAQYAQoAJhYhBKUVzX1jPgzjf0mradjKUF2MMy0x +BQJflTklAhsMBQkDwmcAAAoJENjKUF2MMy0xkIcL/johqZbyHskQIaTfQUgASbbu +bdLXSrIkB8Ort9WULxdqs8hveFy6RjXFJWFitFHk46Bj6FJ1ZykfozL+k9uOGrL9 +lBk1e3bhqMVhW1o00DufgawNU2FU9NuH/rCuGpum9DE0cc1fFmQ3pjeiHV55GYxr +BGuyyals1ORwK06h+1VFMHrGB12SR7Imgo7FWuexhgLyOK4t1MXg3E4h72qaowpj +5B45qG9jUXgIFKR1D8G8tPeDYLbd37pskNDFozzfAe/H2fqmEjQxMLHrk7J8I3wQ +FPvKIvUF8M3NqZjyaFSiisOn32AS3RAsI8RuD4T2XgpE2L6e29u3RpJkvhPbcAN6 +w0W8yw3z1/2uHSvYbwoH1cn4akAikYR9aVVHv86AvNlr0BguqWdzEfiGT6mcJ/hH +2sGQJ1nJRgGpAlx/2HpsLJxhJwLVbXSDSk6Bu2T9G/VIda95niVgq6MfE9GSS+MS +ucVcwqjIXn/9V6+pFZ11soXNKuTk4Wx+uO2r/i5bVA== +=Edl+ +-----END PGP PUBLIC KEY BLOCK----- +``` diff --git a/docs/v1.X/docs/cscli/cscli.md b/docs/v1.X/docs/cscli/cscli.md new file mode 100644 index 000000000..4cddd1aa3 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli.md @@ -0,0 +1,39 @@ +## cscli + +cscli allows you to manage crowdsec + +### Synopsis + +cscli is the main command to interact with your crowdsec service, scenarios & db. +It is meant to allow you to manage bans, parsers/scenarios/etc, api and generally manage you crowdsec setup. + +### Options + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + -o, --output string Output format : human, json, raw. + --debug Set logging to debug. + --info Set logging to info. + --warning Set logging to warning. + --error Set logging to error. + --trace Set logging to trace. + -h, --help help for cscli +``` + +### SEE ALSO + +* [cscli alerts](cscli_alerts.md) - Manage alerts +* [cscli bouncers](cscli_bouncers.md) - Manage bouncers +* [cscli collections](cscli_collections.md) - Manage collections from hub +* [cscli config](cscli_config.md) - Allows to view current config +* [cscli dashboard](cscli_dashboard.md) - Manage your metabase dashboard container +* [cscli decisions](cscli_decisions.md) - Manage decisions +* [cscli hub](cscli_hub.md) - Manage Hub +* [cscli machines](cscli_machines.md) - Manage local API machines +* [cscli metrics](cscli_metrics.md) - Display crowdsec prometheus metrics. +* [cscli parsers](cscli_parsers.md) - Install/Remove/Upgrade/Inspect parser(s) from hub +* [cscli postoverflows](cscli_postoverflows.md) - Install/Remove/Upgrade/Inspect postoverflow(s) from hub +* [cscli scenarios](cscli_scenarios.md) - Install/Remove/Upgrade/Inspect scenario(s) from hub +* [cscli simulation](cscli_simulation.md) - Manage simulation status of scenarios + + diff --git a/docs/v1.X/docs/cscli/cscli_alerts.md b/docs/v1.X/docs/cscli/cscli_alerts.md new file mode 100644 index 000000000..ee8ee72ca --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_alerts.md @@ -0,0 +1,34 @@ +## cscli alerts + +Manage alerts + +### Synopsis + +Manage alerts + +### Options + +``` + -h, --help help for alerts +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli alerts delete](cscli_alerts_delete.md) - Delete alerts +/!\ This command can be use only on the same machine than the local API. +* [cscli alerts list](cscli_alerts_list.md) - List alerts + + diff --git a/docs/v1.X/docs/cscli/cscli_alerts_delete.md b/docs/v1.X/docs/cscli/cscli_alerts_delete.md new file mode 100644 index 000000000..2b3ef50df --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_alerts_delete.md @@ -0,0 +1,50 @@ +## cscli alerts delete + +Delete alerts +/!\ This command can be use only on the same machine than the local API. + +### Synopsis + +Delete alerts +/!\ This command can be use only on the same machine than the local API. + +``` +cscli alerts delete [filters] [--all] [flags] +``` + +### Examples + +``` +cscli alerts delete --ip 1.2.3.4 +cscli alerts delete --range 1.2.3.0/24 +cscli alerts delete -s crowdsecurity/ssh-bf" +``` + +### Options + +``` + --scope string the scope (ie. ip,range) + -v, --value string the value to match for in the specified scope + -s, --scenario string the scenario (ie. crowdsecurity/ssh-bf) + -i, --ip string Source ip (shorthand for --scope ip --value ) + -r, --range string Range source ip (shorthand for --scope range --value ) + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli alerts](cscli_alerts.md) - Manage alerts + + diff --git a/docs/v1.X/docs/cscli/cscli_alerts_list.md b/docs/v1.X/docs/cscli/cscli_alerts_list.md new file mode 100644 index 000000000..b3c8ace00 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_alerts_list.md @@ -0,0 +1,53 @@ +## cscli alerts list + +List alerts + +### Synopsis + +List alerts + +``` +cscli alerts list [filters] [flags] +``` + +### Examples + +``` +cscli alerts list +cscli alerts list --ip 1.2.3.4 +cscli alerts list --range 1.2.3.0/24 +cscli alerts list -s crowdsecurity/ssh-bf +cscli alerts list --type ban +``` + +### Options + +``` + --until string restrict to alerts older than until (ie. 4h, 30d) + --since string restrict to alerts newer than since (ie. 4h, 30d) + -i, --ip string restrict to alerts from this source ip (shorthand for --scope ip --value ) + -s, --scenario string the scenario (ie. crowdsecurity/ssh-bf) + -r, --range string restrict to alerts from this range (shorthand for --scope range --value ) + --type string restrict to alerts with given decision type (ie. ban, captcha) + --scope string restrict to alerts of this scope (ie. ip,range) + -v, --value string the value to match for in the specified scope + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli alerts](cscli_alerts.md) - Manage alerts + + diff --git a/docs/v1.X/docs/cscli/cscli_bouncers.md b/docs/v1.X/docs/cscli/cscli_bouncers.md new file mode 100644 index 000000000..0d70c3603 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_bouncers.md @@ -0,0 +1,38 @@ +## cscli bouncers + +Manage bouncers + +### Synopsis + + +Bouncers Management. + +To list/add/delete bouncers + + +### Options + +``` + -h, --help help for bouncers +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli bouncers add](cscli_bouncers_add.md) - add bouncer +* [cscli bouncers delete](cscli_bouncers_delete.md) - delete bouncer +* [cscli bouncers list](cscli_bouncers_list.md) - List bouncers + + diff --git a/docs/v1.X/docs/cscli/cscli_bouncers_add.md b/docs/v1.X/docs/cscli/cscli_bouncers_add.md new file mode 100644 index 000000000..04b189a12 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_bouncers_add.md @@ -0,0 +1,43 @@ +## cscli bouncers add + +add bouncer + +### Synopsis + +add bouncer + +``` +cscli bouncers add MyBouncerName [--length 16] [flags] +``` + +### Examples + +``` +cscli bouncers add MyBouncerName +cscli bouncers add MyBouncerName -l 24 +``` + +### Options + +``` + -h, --help help for add + -l, --length int length of the api key (default 16) +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli bouncers](cscli_bouncers.md) - Manage bouncers + + diff --git a/docs/v1.X/docs/cscli/cscli_bouncers_delete.md b/docs/v1.X/docs/cscli/cscli_bouncers_delete.md new file mode 100644 index 000000000..737c98e9f --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_bouncers_delete.md @@ -0,0 +1,35 @@ +## cscli bouncers delete + +delete bouncer + +### Synopsis + +delete bouncer + +``` +cscli bouncers delete MyBouncerName [flags] +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli bouncers](cscli_bouncers.md) - Manage bouncers + + diff --git a/docs/v1.X/docs/cscli/cscli_bouncers_list.md b/docs/v1.X/docs/cscli/cscli_bouncers_list.md new file mode 100644 index 000000000..4a2b8426b --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_bouncers_list.md @@ -0,0 +1,41 @@ +## cscli bouncers list + +List bouncers + +### Synopsis + +List bouncers + +``` +cscli bouncers list [flags] +``` + +### Examples + +``` +cscli bouncers list +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli bouncers](cscli_bouncers.md) - Manage bouncers + + diff --git a/docs/v1.X/docs/cscli/cscli_collections.md b/docs/v1.X/docs/cscli/cscli_collections.md new file mode 100644 index 000000000..61646547b --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_collections.md @@ -0,0 +1,36 @@ +## cscli collections + +Manage collections from hub + +### Synopsis + +Install/Remove/Upgrade/Inspect collections from the CrowdSec Hub. + +### Options + +``` + -h, --help help for collections +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli collections inspect](cscli_collections_inspect.md) - Inspect given collection +* [cscli collections install](cscli_collections_install.md) - Install given collection(s) +* [cscli collections list](cscli_collections_list.md) - List all collections or given one +* [cscli collections remove](cscli_collections_remove.md) - Remove given collection(s) +* [cscli collections upgrade](cscli_collections_upgrade.md) - Upgrade given collection(s) + + diff --git a/docs/v1.X/docs/cscli/cscli_collections_inspect.md b/docs/v1.X/docs/cscli/cscli_collections_inspect.md new file mode 100644 index 000000000..e8367c34b --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_collections_inspect.md @@ -0,0 +1,43 @@ +## cscli collections inspect + +Inspect given collection + +### Synopsis + +Inspect given collection + +``` +cscli collections inspect collection [flags] +``` + +### Examples + +``` +cscli collections inspect crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + -h, --help help for inspect + -u, --url string Prometheus url (default "http://127.0.0.1:6060/metrics") +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli collections](cscli_collections.md) - Manage collections from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_collections_install.md b/docs/v1.X/docs/cscli/cscli_collections_install.md new file mode 100644 index 000000000..bf3e13b40 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_collections_install.md @@ -0,0 +1,44 @@ +## cscli collections install + +Install given collection(s) + +### Synopsis + +Fetch and install given collection(s) from hub + +``` +cscli collections install collection [flags] +``` + +### Examples + +``` +cscli collections install crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + -d, --download-only Only download packages, don't enable + --force Force install : Overwrite tainted and outdated files + -h, --help help for install +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli collections](cscli_collections.md) - Manage collections from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_collections_list.md b/docs/v1.X/docs/cscli/cscli_collections_list.md new file mode 100644 index 000000000..85e5c938e --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_collections_list.md @@ -0,0 +1,43 @@ +## cscli collections list + +List all collections or given one + +### Synopsis + +List all collections or given one + +``` +cscli collections list collection [-a] [flags] +``` + +### Examples + +``` +cscli collections list +``` + +### Options + +``` + -a, --all List as well disabled items + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli collections](cscli_collections.md) - Manage collections from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_collections_remove.md b/docs/v1.X/docs/cscli/cscli_collections_remove.md new file mode 100644 index 000000000..f402d74d5 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_collections_remove.md @@ -0,0 +1,44 @@ +## cscli collections remove + +Remove given collection(s) + +### Synopsis + +Remove given collection(s) from hub + +``` +cscli collections remove collection [flags] +``` + +### Examples + +``` +cscli collections remove crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + --all Delete all the files in selected scope + -h, --help help for remove + --purge Delete source file too +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli collections](cscli_collections.md) - Manage collections from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_collections_upgrade.md b/docs/v1.X/docs/cscli/cscli_collections_upgrade.md new file mode 100644 index 000000000..796602c52 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_collections_upgrade.md @@ -0,0 +1,44 @@ +## cscli collections upgrade + +Upgrade given collection(s) + +### Synopsis + +Fetch and upgrade given collection(s) from hub + +``` +cscli collections upgrade collection [flags] +``` + +### Examples + +``` +cscli collections upgrade crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + -d, --download-only Only download packages, don't enable + --force Force install : Overwrite tainted and outdated files + -h, --help help for upgrade +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli collections](cscli_collections.md) - Manage collections from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_config.md b/docs/v1.X/docs/cscli/cscli_config.md new file mode 100644 index 000000000..febdb19a2 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_config.md @@ -0,0 +1,32 @@ +## cscli config + +Allows to view current config + +### Synopsis + +Allows to view current config + +### Options + +``` + -h, --help help for config +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli config show](cscli_config_show.md) - Displays current config + + diff --git a/docs/v1.X/docs/cscli/cscli_config_show.md b/docs/v1.X/docs/cscli/cscli_config_show.md new file mode 100644 index 000000000..0b2711ce5 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_config_show.md @@ -0,0 +1,35 @@ +## cscli config show + +Displays current config + +### Synopsis + +Displays the current cli configuration. + +``` +cscli config show [flags] +``` + +### Options + +``` + -h, --help help for show +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli config](cscli_config.md) - Allows to view current config + + diff --git a/docs/v1.X/docs/cscli/cscli_dashboard.md b/docs/v1.X/docs/cscli/cscli_dashboard.md new file mode 100644 index 000000000..b5a722df9 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_dashboard.md @@ -0,0 +1,46 @@ +## cscli dashboard + +Manage your metabase dashboard container + +### Synopsis + +Install/Start/Stop/Remove a metabase container exposing dashboard and metrics. + +### Examples + +``` + +cscli dashboard setup +cscli dashboard start +cscli dashboard stop +cscli dashboard remove + +``` + +### Options + +``` + -h, --help help for dashboard +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli dashboard remove](cscli_dashboard_remove.md) - removes the metabase container. +* [cscli dashboard setup](cscli_dashboard_setup.md) - Setup a metabase container. +* [cscli dashboard start](cscli_dashboard_start.md) - Start the metabase container. +* [cscli dashboard stop](cscli_dashboard_stop.md) - Stops the metabase container. + + diff --git a/docs/v1.X/docs/cscli/cscli_dashboard_remove.md b/docs/v1.X/docs/cscli/cscli_dashboard_remove.md new file mode 100644 index 000000000..71a665685 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_dashboard_remove.md @@ -0,0 +1,45 @@ +## cscli dashboard remove + +removes the metabase container. + +### Synopsis + +removes the metabase container using docker. + +``` +cscli dashboard remove [flags] +``` + +### Examples + +``` + +cscli dashboard remove +cscli dashboard remove --force + +``` + +### Options + +``` + -f, --force Force remove : stop the container if running and remove. + -h, --help help for remove +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli dashboard](cscli_dashboard.md) - Manage your metabase dashboard container + + diff --git a/docs/v1.X/docs/cscli/cscli_dashboard_setup.md b/docs/v1.X/docs/cscli/cscli_dashboard_setup.md new file mode 100644 index 000000000..538e732da --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_dashboard_setup.md @@ -0,0 +1,49 @@ +## cscli dashboard setup + +Setup a metabase container. + +### Synopsis + +Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container + +``` +cscli dashboard setup [flags] +``` + +### Examples + +``` + +cscli dashboard setup +cscli dashboard setup --listen 0.0.0.0 +cscli dashboard setup -l 0.0.0.0 -p 443 + +``` + +### Options + +``` + -d, --dir string Shared directory with metabase container. (default "/var/lib/crowdsec/data") + -f, --force Force setup : override existing files. + -h, --help help for setup + -l, --listen string Listen address of container (default "127.0.0.1") + -p, --port string Listen port of container (default "3000") +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli dashboard](cscli_dashboard.md) - Manage your metabase dashboard container + + diff --git a/docs/v1.X/docs/cscli/cscli_dashboard_start.md b/docs/v1.X/docs/cscli/cscli_dashboard_start.md new file mode 100644 index 000000000..fbfa46024 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_dashboard_start.md @@ -0,0 +1,35 @@ +## cscli dashboard start + +Start the metabase container. + +### Synopsis + +Stats the metabase container using docker. + +``` +cscli dashboard start [flags] +``` + +### Options + +``` + -h, --help help for start +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli dashboard](cscli_dashboard.md) - Manage your metabase dashboard container + + diff --git a/docs/v1.X/docs/cscli/cscli_dashboard_stop.md b/docs/v1.X/docs/cscli/cscli_dashboard_stop.md new file mode 100644 index 000000000..5c65fa380 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_dashboard_stop.md @@ -0,0 +1,35 @@ +## cscli dashboard stop + +Stops the metabase container. + +### Synopsis + +Stops the metabase container using docker. + +``` +cscli dashboard stop [flags] +``` + +### Options + +``` + -h, --help help for stop +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli dashboard](cscli_dashboard.md) - Manage your metabase dashboard container + + diff --git a/docs/v1.X/docs/cscli/cscli_decisions.md b/docs/v1.X/docs/cscli/cscli_decisions.md new file mode 100644 index 000000000..cd7798792 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_decisions.md @@ -0,0 +1,40 @@ +## cscli decisions + +Manage decisions + +### Synopsis + +Add/List/Delete decisions from LAPI + +### Examples + +``` +cscli decisions [action] [filter] +``` + +### Options + +``` + -h, --help help for decisions +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli decisions add](cscli_decisions_add.md) - Add decision to LAPI +* [cscli decisions delete](cscli_decisions_delete.md) - Delete decisions +* [cscli decisions list](cscli_decisions_list.md) - List decisions from LAPI + + diff --git a/docs/v1.X/docs/cscli/cscli_decisions_add.md b/docs/v1.X/docs/cscli/cscli_decisions_add.md new file mode 100644 index 000000000..be66ffa2f --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_decisions_add.md @@ -0,0 +1,52 @@ +## cscli decisions add + +Add decision to LAPI + +### Synopsis + +Add decision to LAPI + +``` +cscli decisions add [options] [flags] +``` + +### Examples + +``` +cscli decisions add --ip 1.2.3.4 +cscli decisions add --range 1.2.3.0/24 +cscli decisions add --ip 1.2.3.4 --duration 24h --type captcha +cscli decisions add --scope username --value foobar + +``` + +### Options + +``` + -i, --ip string Source ip (shorthand for --scope ip --value ) + -r, --range string Range source ip (shorthand for --scope range --value ) + -d, --duration string Decision duration (ie. 1h,4h,30m) (default "4h") + -v, --value string The value (ie. --scope username --value foobar) + --scope string Decision scope (ie. ip,range,username) (default "Ip") + -R, --reason string Decision reason (ie. scenario-name) + -t, --type string Decision type (ie. ban,captcha,throttle) (default "ban") + -h, --help help for add +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli decisions](cscli_decisions.md) - Manage decisions + + diff --git a/docs/v1.X/docs/cscli/cscli_decisions_delete.md b/docs/v1.X/docs/cscli/cscli_decisions_delete.md new file mode 100644 index 000000000..4bc8d564a --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_decisions_delete.md @@ -0,0 +1,52 @@ +## cscli decisions delete + +Delete decisions + +### Synopsis + +Delete decisions + +``` +cscli decisions delete [options] [flags] +``` + +### Examples + +``` +cscli decisions delete -r 1.2.3.0/24 +cscli decisions delete -i 1.2.3.4 +cscli decisions delete -s crowdsecurity/ssh-bf +cscli decisions delete --id 42 +cscli decisions delete --type captcha + +``` + +### Options + +``` + -i, --ip string Source ip (shorthand for --scope ip --value ) + -r, --range string Range source ip (shorthand for --scope range --value ) + --id string decision id + -t, --type string the decision type (ie. ban,captcha) + -v, --value string the value to match for in the specified scope + --all delete all decisions + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli decisions](cscli_decisions.md) - Manage decisions + + diff --git a/docs/v1.X/docs/cscli/cscli_decisions_list.md b/docs/v1.X/docs/cscli/cscli_decisions_list.md new file mode 100644 index 000000000..738e0b4d0 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_decisions_list.md @@ -0,0 +1,54 @@ +## cscli decisions list + +List decisions from LAPI + +### Synopsis + +List decisions from LAPI + +``` +cscli decisions list [options] [flags] +``` + +### Examples + +``` +cscli decisions list -i 1.2.3.4 +cscli decisions list -r 1.2.3.0/24 +cscli decisions list -s crowdsecurity/ssh-bf +cscli decisions list -t ban + +``` + +### Options + +``` + --since string restrict to alerts newer than since (ie. 4h, 30d) + --until string restrict to alerts older than until (ie. 4h, 30d) + -t, --type string restrict to this decision type (ie. ban,captcha) + --scope string restrict to this scope (ie. ip,range,session) + -v, --value string restrict to this value (ie. 1.2.3.4,userName) + -s, --scenario string restrict to this scenario (ie. crowdsecurity/ssh-bf) + -i, --ip string restrict to alerts from this source ip (shorthand for --scope ip --value ) + -r, --range string restrict to alerts from this source range (shorthand for --scope range --value ) + --no-simu exclude decisions in simulation mode + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli decisions](cscli_decisions.md) - Manage decisions + + diff --git a/docs/v1.X/docs/cscli/cscli_hub.md b/docs/v1.X/docs/cscli/cscli_hub.md new file mode 100644 index 000000000..e6f5f1d92 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_hub.md @@ -0,0 +1,48 @@ +## cscli hub + +Manage Hub + +### Synopsis + + +Hub management + +List/update parsers/scenarios/postoverflows/collections from [Crowdsec Hub](https://hub.crowdsec.net). +Hub is manage by cscli, to get latest hub files from [Crowdsec Hub](https://hub.crowdsec.net), you need to update. + + +### Examples + +``` + +cscli hub list # List all installed configurations +cscli hub update # Download list of available configurations from the hub + +``` + +### Options + +``` + -a, --all List as well disabled items + -h, --help help for hub +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli hub list](cscli_hub_list.md) - List installed configs +* [cscli hub update](cscli_hub_update.md) - Fetch available configs from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_hub_list.md b/docs/v1.X/docs/cscli/cscli_hub_list.md new file mode 100644 index 000000000..c8beb2c73 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_hub_list.md @@ -0,0 +1,37 @@ +## cscli hub list + +List installed configs + +### Synopsis + +List installed configs + +``` +cscli hub list [-a] [flags] +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + -a, --all List as well disabled items + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli hub](cscli_hub.md) - Manage Hub + + diff --git a/docs/v1.X/docs/cscli/cscli_hub_update.md b/docs/v1.X/docs/cscli/cscli_hub_update.md new file mode 100644 index 000000000..5a45f2d84 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_hub_update.md @@ -0,0 +1,39 @@ +## cscli hub update + +Fetch available configs from hub + +### Synopsis + + +Fetches the [.index.json](https://github.com/crowdsecurity/hub/blob/master/.index.json) file from hub, containing the list of available configs. + + +``` +cscli hub update [flags] +``` + +### Options + +``` + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + -a, --all List as well disabled items + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli hub](cscli_hub.md) - Manage Hub + + diff --git a/docs/v1.X/docs/cscli/cscli_machines.md b/docs/v1.X/docs/cscli/cscli_machines.md new file mode 100644 index 000000000..c298a28cb --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_machines.md @@ -0,0 +1,46 @@ +## cscli machines + +Manage local API machines + +### Synopsis + + +Machines Management. + +To list/add/delete machines + + +### Examples + +``` +cscli machines [action] +``` + +### Options + +``` + -h, --help help for machines +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli machines add](cscli_machines_add.md) - add machine to the database. +* [cscli machines delete](cscli_machines_delete.md) - delete machines +* [cscli machines list](cscli_machines_list.md) - List machines +* [cscli machines register](cscli_machines_register.md) - register a machine to a remote API +* [cscli machines validate](cscli_machines_validate.md) - validate a machine to access the local API + + diff --git a/docs/v1.X/docs/cscli/cscli_machines_add.md b/docs/v1.X/docs/cscli/cscli_machines_add.md new file mode 100644 index 000000000..bf53382eb --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_machines_add.md @@ -0,0 +1,50 @@ +## cscli machines add + +add machine to the database. + +### Synopsis + +Register a new machine in the database. cscli should be on the same machine as LAPI. + +``` +cscli machines add [flags] +``` + +### Examples + +``` +cscli machines add -m MyTestMachine +cscli machines add --machine TestMachine --password password + +``` + +### Options + +``` + -a, --auto add the machine automatically (generate the machine ID and the password) + -f, --file string output file destination + --force will force if the machine was already added + -h, --help help for add + -i, --interactive machine ip address + -m, --machine string machine ID to login to the API + -p, --password string machine password to login to the API + -u, --url string URL of the API +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli machines](cscli_machines.md) - Manage local API machines + + diff --git a/docs/v1.X/docs/cscli/cscli_machines_delete.md b/docs/v1.X/docs/cscli/cscli_machines_delete.md new file mode 100644 index 000000000..79755cb90 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_machines_delete.md @@ -0,0 +1,42 @@ +## cscli machines delete + +delete machines + +### Synopsis + +delete machines + +``` +cscli machines delete --machine MyTestMachine [flags] +``` + +### Examples + +``` +cscli machines delete --machine test +``` + +### Options + +``` + -h, --help help for delete + -m, --machine string machine to delete +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli machines](cscli_machines.md) - Manage local API machines + + diff --git a/docs/v1.X/docs/cscli/cscli_machines_list.md b/docs/v1.X/docs/cscli/cscli_machines_list.md new file mode 100644 index 000000000..6cef8fe28 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_machines_list.md @@ -0,0 +1,41 @@ +## cscli machines list + +List machines + +### Synopsis + +List + +``` +cscli machines list [flags] +``` + +### Examples + +``` +cscli machines list +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli machines](cscli_machines.md) - Manage local API machines + + diff --git a/docs/v1.X/docs/cscli/cscli_machines_register.md b/docs/v1.X/docs/cscli/cscli_machines_register.md new file mode 100644 index 000000000..e95d90dc6 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_machines_register.md @@ -0,0 +1,44 @@ +## cscli machines register + +register a machine to a remote API + +### Synopsis + +register a machine to a remote API. +/!\ The machine will not be validated. You have to connect on the remote API server and run 'cscli machine validate -m ' + +``` +cscli machines register -u http://127.0.0.1:8080/ [flags] +``` + +### Examples + +``` +cscli machine register +``` + +### Options + +``` + -f, --file string output file destination + -h, --help help for register + -u, --url string URL of the API +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli machines](cscli_machines.md) - Manage local API machines + + diff --git a/docs/v1.X/docs/cscli/cscli_machines_validate.md b/docs/v1.X/docs/cscli/cscli_machines_validate.md new file mode 100644 index 000000000..cbf208159 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_machines_validate.md @@ -0,0 +1,42 @@ +## cscli machines validate + +validate a machine to access the local API + +### Synopsis + +validate a machine to access the local API. + +``` +cscli machines validate [flags] +``` + +### Examples + +``` +cscli machines validate --machine test +``` + +### Options + +``` + -h, --help help for validate + -m, --machine string machine to validate +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli machines](cscli_machines.md) - Manage local API machines + + diff --git a/docs/v1.X/docs/cscli/cscli_metrics.md b/docs/v1.X/docs/cscli/cscli_metrics.md new file mode 100644 index 000000000..0e129ad03 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_metrics.md @@ -0,0 +1,36 @@ +## cscli metrics + +Display crowdsec prometheus metrics. + +### Synopsis + +Fetch metrics from the prometheus server and display them in a human-friendly way + +``` +cscli metrics [flags] +``` + +### Options + +``` + -h, --help help for metrics + -u, --url string Prometheus url (default "http://127.0.0.1:6060/metrics") +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec + + diff --git a/docs/v1.X/docs/cscli/cscli_parsers.md b/docs/v1.X/docs/cscli/cscli_parsers.md new file mode 100644 index 000000000..1dc40a604 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_parsers.md @@ -0,0 +1,47 @@ +## cscli parsers + +Install/Remove/Upgrade/Inspect parser(s) from hub + +### Synopsis + +Install/Remove/Upgrade/Inspect parser(s) from hub + +### Examples + +``` +cscli parsers install crowdsecurity/sshd-logs +cscli parsers inspect crowdsecurity/sshd-logs +cscli parsers upgrade crowdsecurity/sshd-logs +cscli parsers list +cscli parsers remove crowdsecurity/sshd-logs + +``` + +### Options + +``` + -h, --help help for parsers +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli parsers inspect](cscli_parsers_inspect.md) - Inspect given parser +* [cscli parsers install](cscli_parsers_install.md) - Install given parser(s) +* [cscli parsers list](cscli_parsers_list.md) - List all parsers or given one +* [cscli parsers remove](cscli_parsers_remove.md) - Remove given parser(s) +* [cscli parsers upgrade](cscli_parsers_upgrade.md) - Upgrade given parser(s) + + diff --git a/docs/v1.X/docs/cscli/cscli_parsers_inspect.md b/docs/v1.X/docs/cscli/cscli_parsers_inspect.md new file mode 100644 index 000000000..db18ec222 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_parsers_inspect.md @@ -0,0 +1,43 @@ +## cscli parsers inspect + +Inspect given parser + +### Synopsis + +Inspect given parser + +``` +cscli parsers inspect [name] [flags] +``` + +### Examples + +``` +cscli parsers inspect crowdsec/xxx +``` + +### Options + +``` + -h, --help help for inspect + -u, --url string Prometheus url (default "http://127.0.0.1:6060/metrics") +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli parsers](cscli_parsers.md) - Install/Remove/Upgrade/Inspect parser(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_parsers_install.md b/docs/v1.X/docs/cscli/cscli_parsers_install.md new file mode 100644 index 000000000..18d72cadb --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_parsers_install.md @@ -0,0 +1,44 @@ +## cscli parsers install + +Install given parser(s) + +### Synopsis + +Fetch and install given parser(s) from hub + +``` +cscli parsers install [config] [flags] +``` + +### Examples + +``` +cscli parsers install crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + -d, --download-only Only download packages, don't enable + --force Force install : Overwrite tainted and outdated files + -h, --help help for install +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli parsers](cscli_parsers.md) - Install/Remove/Upgrade/Inspect parser(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_parsers_list.md b/docs/v1.X/docs/cscli/cscli_parsers_list.md new file mode 100644 index 000000000..86f3ae1dd --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_parsers_list.md @@ -0,0 +1,44 @@ +## cscli parsers list + +List all parsers or given one + +### Synopsis + +List all parsers or given one + +``` +cscli parsers list [name] [flags] +``` + +### Examples + +``` +cscli parsers list +cscli parser list crowdsecurity/xxx +``` + +### Options + +``` + -a, --all List as well disabled items + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli parsers](cscli_parsers.md) - Install/Remove/Upgrade/Inspect parser(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_parsers_remove.md b/docs/v1.X/docs/cscli/cscli_parsers_remove.md new file mode 100644 index 000000000..1baa68d86 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_parsers_remove.md @@ -0,0 +1,44 @@ +## cscli parsers remove + +Remove given parser(s) + +### Synopsis + +Remove given parse(s) from hub + +``` +cscli parsers remove [config] [flags] +``` + +### Examples + +``` +cscli parsers remove crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + --all Delete all the parsers + -h, --help help for remove + --purge Delete source file too +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli parsers](cscli_parsers.md) - Install/Remove/Upgrade/Inspect parser(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_parsers_upgrade.md b/docs/v1.X/docs/cscli/cscli_parsers_upgrade.md new file mode 100644 index 000000000..7e5d2d3cf --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_parsers_upgrade.md @@ -0,0 +1,44 @@ +## cscli parsers upgrade + +Upgrade given parser(s) + +### Synopsis + +Fetch and upgrade given parser(s) from hub + +``` +cscli parsers upgrade [config] [flags] +``` + +### Examples + +``` +cscli parsers upgrade crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + --all Upgrade all the parsers + --force Force install : Overwrite tainted and outdated files + -h, --help help for upgrade +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli parsers](cscli_parsers.md) - Install/Remove/Upgrade/Inspect parser(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_postoverflows.md b/docs/v1.X/docs/cscli/cscli_postoverflows.md new file mode 100644 index 000000000..287d35222 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_postoverflows.md @@ -0,0 +1,46 @@ +## cscli postoverflows + +Install/Remove/Upgrade/Inspect postoverflow(s) from hub + +### Synopsis + +Install/Remove/Upgrade/Inspect postoverflow(s) from hub + +### Examples + +``` +cscli postoverflows install crowdsecurity/cdn-whitelist + cscli postoverflows inspect crowdsecurity/cdn-whitelist + cscli postoverflows upgrade crowdsecurity/cdn-whitelist + cscli postoverflows list + cscli postoverflows remove crowdsecurity/cdn-whitelist +``` + +### Options + +``` + -h, --help help for postoverflows +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli postoverflows inspect](cscli_postoverflows_inspect.md) - Inspect given postoverflow +* [cscli postoverflows list](cscli_postoverflows_list.md) - List all postoverflows or given one +* [cscli postoverflows postoverflows](cscli_postoverflows_postoverflows.md) - Install given postoverflow(s) +* [cscli postoverflows remove](cscli_postoverflows_remove.md) - Remove given postoverflow(s) +* [cscli postoverflows upgrade](cscli_postoverflows_upgrade.md) - Upgrade given postoverflow(s) + + diff --git a/docs/v1.X/docs/cscli/cscli_postoverflows_inspect.md b/docs/v1.X/docs/cscli/cscli_postoverflows_inspect.md new file mode 100644 index 000000000..cbfa2528d --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_postoverflows_inspect.md @@ -0,0 +1,42 @@ +## cscli postoverflows inspect + +Inspect given postoverflow + +### Synopsis + +Inspect given postoverflow + +``` +cscli postoverflows inspect [config] [flags] +``` + +### Examples + +``` +cscli postoverflows inspect crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + -h, --help help for inspect +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli postoverflows](cscli_postoverflows.md) - Install/Remove/Upgrade/Inspect postoverflow(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_postoverflows_list.md b/docs/v1.X/docs/cscli/cscli_postoverflows_list.md new file mode 100644 index 000000000..ed8f2f364 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_postoverflows_list.md @@ -0,0 +1,44 @@ +## cscli postoverflows list + +List all postoverflows or given one + +### Synopsis + +List all postoverflows or given one + +``` +cscli postoverflows list [config] [flags] +``` + +### Examples + +``` +cscli postoverflows list +cscli postoverflows list crowdsecurity/xxx +``` + +### Options + +``` + -a, --all List as well disabled items + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli postoverflows](cscli_postoverflows.md) - Install/Remove/Upgrade/Inspect postoverflow(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_postoverflows_postoverflows.md b/docs/v1.X/docs/cscli/cscli_postoverflows_postoverflows.md new file mode 100644 index 000000000..c1afa8986 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_postoverflows_postoverflows.md @@ -0,0 +1,44 @@ +## cscli postoverflows postoverflows + +Install given postoverflow(s) + +### Synopsis + +Fetch and install given postoverflow(s) from hub + +``` +cscli postoverflows postoverflows [config] [flags] +``` + +### Examples + +``` +cscli postoverflows install crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + -d, --download-only Only download packages, don't enable + --force Force install : Overwrite tainted and outdated files + -h, --help help for postoverflows +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli postoverflows](cscli_postoverflows.md) - Install/Remove/Upgrade/Inspect postoverflow(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_postoverflows_remove.md b/docs/v1.X/docs/cscli/cscli_postoverflows_remove.md new file mode 100644 index 000000000..0b4fab93c --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_postoverflows_remove.md @@ -0,0 +1,44 @@ +## cscli postoverflows remove + +Remove given postoverflow(s) + +### Synopsis + +remove given postoverflow(s) + +``` +cscli postoverflows remove [config] [flags] +``` + +### Examples + +``` +cscli postoverflows remove crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + --all Delete all the files in selected scope + -h, --help help for remove + --purge Delete source file in ~/.cscli/hub/ too +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli postoverflows](cscli_postoverflows.md) - Install/Remove/Upgrade/Inspect postoverflow(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_postoverflows_upgrade.md b/docs/v1.X/docs/cscli/cscli_postoverflows_upgrade.md new file mode 100644 index 000000000..1881906cf --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_postoverflows_upgrade.md @@ -0,0 +1,44 @@ +## cscli postoverflows upgrade + +Upgrade given postoverflow(s) + +### Synopsis + +Fetch and Upgrade given postoverflow(s) from hub + +``` +cscli postoverflows upgrade [config] [flags] +``` + +### Examples + +``` +cscli postoverflows upgrade crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + -d, --download-only Only download packages, don't enable + --force Force install : Overwrite tainted and outdated files + -h, --help help for upgrade +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli postoverflows](cscli_postoverflows.md) - Install/Remove/Upgrade/Inspect postoverflow(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_scenarios.md b/docs/v1.X/docs/cscli/cscli_scenarios.md new file mode 100644 index 000000000..4912451c6 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_scenarios.md @@ -0,0 +1,47 @@ +## cscli scenarios + +Install/Remove/Upgrade/Inspect scenario(s) from hub + +### Synopsis + +Install/Remove/Upgrade/Inspect scenario(s) from hub + +### Examples + +``` +cscli scenarios list [-a] +cscli scenarios install crowdsecurity/ssh-bf +cscli scenarios inspect crowdsecurity/ssh-bf +cscli scenarios upgrade crowdsecurity/ssh-bf +cscli scenarios remove crowdsecurity/ssh-bf + +``` + +### Options + +``` + -h, --help help for scenarios +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli scenarios inspect](cscli_scenarios_inspect.md) - Inspect given scenario +* [cscli scenarios install](cscli_scenarios_install.md) - Install given scenario(s) +* [cscli scenarios list](cscli_scenarios_list.md) - List all scenario(s) or given one +* [cscli scenarios remove](cscli_scenarios_remove.md) - Remove given scenario(s) +* [cscli scenarios upgrade](cscli_scenarios_upgrade.md) - Upgrade given scenario(s) + + diff --git a/docs/v1.X/docs/cscli/cscli_scenarios_inspect.md b/docs/v1.X/docs/cscli/cscli_scenarios_inspect.md new file mode 100644 index 000000000..297d4d721 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_scenarios_inspect.md @@ -0,0 +1,43 @@ +## cscli scenarios inspect + +Inspect given scenario + +### Synopsis + +Inspect given scenario + +``` +cscli scenarios inspect [config] [flags] +``` + +### Examples + +``` +cscli scenarios inspect crowdsec/xxx +``` + +### Options + +``` + -h, --help help for inspect + -u, --url string Prometheus url (default "http://127.0.0.1:6060/metrics") +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli scenarios](cscli_scenarios.md) - Install/Remove/Upgrade/Inspect scenario(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_scenarios_install.md b/docs/v1.X/docs/cscli/cscli_scenarios_install.md new file mode 100644 index 000000000..d8fecad22 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_scenarios_install.md @@ -0,0 +1,44 @@ +## cscli scenarios install + +Install given scenario(s) + +### Synopsis + +Fetch and install given scenario(s) from hub + +``` +cscli scenarios install [config] [flags] +``` + +### Examples + +``` +cscli scenarios install crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + -d, --download-only Only download packages, don't enable + --force Force install : Overwrite tainted and outdated files + -h, --help help for install +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli scenarios](cscli_scenarios.md) - Install/Remove/Upgrade/Inspect scenario(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_scenarios_list.md b/docs/v1.X/docs/cscli/cscli_scenarios_list.md new file mode 100644 index 000000000..af9449012 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_scenarios_list.md @@ -0,0 +1,44 @@ +## cscli scenarios list + +List all scenario(s) or given one + +### Synopsis + +List all scenario(s) or given one + +``` +cscli scenarios list [config] [flags] +``` + +### Examples + +``` +cscli scenarios list +cscli scenarios list crowdsecurity/xxx +``` + +### Options + +``` + -a, --all List as well disabled items + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli scenarios](cscli_scenarios.md) - Install/Remove/Upgrade/Inspect scenario(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_scenarios_remove.md b/docs/v1.X/docs/cscli/cscli_scenarios_remove.md new file mode 100644 index 000000000..364d47252 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_scenarios_remove.md @@ -0,0 +1,44 @@ +## cscli scenarios remove + +Remove given scenario(s) + +### Synopsis + +remove given scenario(s) + +``` +cscli scenarios remove [config] [flags] +``` + +### Examples + +``` +cscli scenarios remove crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + --all Delete all the files in selected scope + -h, --help help for remove + --purge Delete source file in ~/.cscli/hub/ too +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli scenarios](cscli_scenarios.md) - Install/Remove/Upgrade/Inspect scenario(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_scenarios_upgrade.md b/docs/v1.X/docs/cscli/cscli_scenarios_upgrade.md new file mode 100644 index 000000000..c8a45fdf6 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_scenarios_upgrade.md @@ -0,0 +1,44 @@ +## cscli scenarios upgrade + +Upgrade given scenario(s) + +### Synopsis + +Fetch and Upgrade given scenario(s) from hub + +``` +cscli scenarios upgrade [config] [flags] +``` + +### Examples + +``` +cscli scenarios upgrade crowdsec/xxx crowdsec/xyz +``` + +### Options + +``` + -d, --download-only Only download packages, don't enable + --force Force install : Overwrite tainted and outdated files + -h, --help help for upgrade +``` + +### Options inherited from parent commands + +``` + -b, --branch string Use given branch from hub + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli scenarios](cscli_scenarios.md) - Install/Remove/Upgrade/Inspect scenario(s) from hub + + diff --git a/docs/v1.X/docs/cscli/cscli_simulation.md b/docs/v1.X/docs/cscli/cscli_simulation.md new file mode 100644 index 000000000..5ee1ebafd --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_simulation.md @@ -0,0 +1,42 @@ +## cscli simulation + +Manage simulation status of scenarios + +### Synopsis + +Manage simulation status of scenarios + +### Examples + +``` +cscli simulation status +cscli simulation enable crowdsecurity/ssh-bf +cscli simulation disable crowdsecurity/ssh-bf +``` + +### Options + +``` + -h, --help help for simulation +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli](cscli.md) - cscli allows you to manage crowdsec +* [cscli simulation disable](cscli_simulation_disable.md) - Disable the simulation mode. Disable only specified scenarios +* [cscli simulation enable](cscli_simulation_enable.md) - Enable the simulation, globally or on specified scenarios +* [cscli simulation status](cscli_simulation_status.md) - Show simulation mode status + + diff --git a/docs/v1.X/docs/cscli/cscli_simulation_disable.md b/docs/v1.X/docs/cscli/cscli_simulation_disable.md new file mode 100644 index 000000000..57a8fe79e --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_simulation_disable.md @@ -0,0 +1,41 @@ +## cscli simulation disable + +Disable the simulation mode. Disable only specified scenarios + +### Synopsis + +Disable the simulation mode. Disable only specified scenarios + +``` +cscli simulation disable [scenario] [flags] +``` + +### Examples + +``` +cscli simulation disable +``` + +### Options + +``` + -h, --help help for disable +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli simulation](cscli_simulation.md) - Manage simulation status of scenarios + + diff --git a/docs/v1.X/docs/cscli/cscli_simulation_enable.md b/docs/v1.X/docs/cscli/cscli_simulation_enable.md new file mode 100644 index 000000000..040c96748 --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_simulation_enable.md @@ -0,0 +1,41 @@ +## cscli simulation enable + +Enable the simulation, globally or on specified scenarios + +### Synopsis + +Enable the simulation, globally or on specified scenarios + +``` +cscli simulation enable [scenario] [flags] +``` + +### Examples + +``` +cscli simulation enable +``` + +### Options + +``` + -h, --help help for enable +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli simulation](cscli_simulation.md) - Manage simulation status of scenarios + + diff --git a/docs/v1.X/docs/cscli/cscli_simulation_status.md b/docs/v1.X/docs/cscli/cscli_simulation_status.md new file mode 100644 index 000000000..6c59544be --- /dev/null +++ b/docs/v1.X/docs/cscli/cscli_simulation_status.md @@ -0,0 +1,41 @@ +## cscli simulation status + +Show simulation mode status + +### Synopsis + +Show simulation mode status + +``` +cscli simulation status [flags] +``` + +### Examples + +``` +cscli simulation status +``` + +### Options + +``` + -h, --help help for status +``` + +### Options inherited from parent commands + +``` + -c, --config string path to crowdsec config file (default "/etc/crowdsec/config.yaml") + --debug Set logging to debug. + --error Set logging to error. + --info Set logging to info. + -o, --output string Output format : human, json, raw. + --trace Set logging to trace. + --warning Set logging to warning. +``` + +### SEE ALSO + +* [cscli simulation](cscli_simulation.md) - Manage simulation status of scenarios + + diff --git a/docs/v1.X/docs/docker/README.md b/docs/v1.X/docs/docker/README.md new file mode 100644 index 000000000..702adda58 --- /dev/null +++ b/docs/v1.X/docs/docker/README.md @@ -0,0 +1,129 @@ +# Crowdsec + +Crowdsec - An open-source, lightweight agent to detect and respond to bad behaviours. It also automatically benefits from our global community-wide IP reputation database. + +## Getting Started + +Before starting using docker image, we suggest you to read our documentation to understand all [crowdsec concepts](https://docs.crowdsec.net/). + +### Prerequisities + + +In order to run this container you'll need docker installed. + +* [Windows](https://docs.docker.com/windows/started) +* [OS X](https://docs.docker.com/mac/started/) +* [Linux](https://docs.docker.com/linux/started/) + +### How to use ? + +#### Build + +```shell +git clone https://github.com/crowdsecurity/crowdsec.git && cd crowdsec +docker build -t crowdsec . +``` + +#### Run + +The container is built with specific docker [configuration](https://github.com/crowdsecurity/crowdsec/blob/master/docker/config.yaml) : + +You should apply following configuration before starting it : + +* Specify collections|scenarios|parsers/postoverflows to install via the environment variables (by default [`crowdsecurity/linux`](https://hub.crowdsec.net/author/crowdsecurity/collections/linux) is installed) +* Mount volumes to specify your configuration +* Mount volumes to specify your log files that should be ingested by crowdsec (set up in acquis.yaml) +* Mount other volumes : if you want to share the database for example + +```shell +docker run -d -v config.yaml:/etc/crowdsec/config.yaml \ + -v acquis.yaml:/etc/crowdsec/acquis.yaml \ + -e COLLECTIONS="crowdsecurity/sshd" + -v /var/log/auth.log:/var/log/auth.log \ + -v /path/mycustom.log:/var/log/mycustom.log \ + --name crowdsec +``` + +#### Example + +I have my own configuration : +```shell +user@cs ~/crowdsec/config $ ls +acquis.yaml config.yaml +``` + +Here is my acquis.yaml file: +```shell +filenames: + - /logs/auth.log + - /logs/syslog +labels: + type: syslog +--- +filename: /logs/apache2/*.log +labels: + type: apache2 +``` + +So, I want to run crowdsec with : + +* My configuration files +* Ingested my path logs specified in acquis.yaml +* Share the crowdsec sqlite database with my host (You need to create empty file first, otherwise docker will create a directory instead of simple file) +* Expose local API through host (listen by default on `8080`) +* Expose prometheus handler through host (listen by default on `6060`) + +```shell +touch /path/myDatabase.db +docker run -d -v config.yaml:/etc/crowdsec/config.yaml \ + -v acquis.yaml:/etc/crowdsec/acquis.yaml \ + -v /var/log/auth.log:/logs/auth.log \ + -v /var/log/syslog.log:/logs/syslog.log \ + -v /var/log/apache:/logs/apache \ + -v /path/myDatabase.db:/var/lib/crowdsec/data/crowdsec.db \ + -e COLLECTIONS="crowdsecurity/apache2 crowdsecurity/sshd" \ + -p 8080:8080 -p 6060:6060 \ + --name crowdsec +``` + +### Environment Variables + +* `COLLECTIONS` - Collections to install from the [hub](https://hub.crowdsec.net/browse/#collections), separated by space : `-e COLLECTIONS="crowdsecurity/linux crowdsecurity/apache2"` +* `SCENARIOS` - Scenarios to install from the [hub](https://hub.crowdsec.net/browse/#configurations), separated by space : `-e SCENARIOS="crowdsecurity/http-bad-user-agent crowdsecurity/http-xss-probing"` +* `PARSERS` - Parsers to install from the [hub](https://hub.crowdsec.net/browse/#configurations), separated by space : `-e PARSERS="crowdsecurity/http-logs crowdsecurity/modsecurity"` +* `POSTOVERFLOWS` - Postoverflows to install from the [hub](https://hub.crowdsec.net/browse/#configurations), separated by space : `-e POSTOVERFLOWS="crowdsecurity/cdn-whitelist"` +* `CONFIG_FILE` - Configuration file (default: `/etc/crowdsec/config.yaml`) : `-e CONFIG_FILE=""` +* `FILE_PATH` - Process a single file in time-machine : `-e FILE_PATH=""` +* `JOURNALCTL_FILTER` - Process a single journalctl output in time-machine : `-e JOURNALCTL_FILTER=""` +* `TYPE` - [`Labels.type`](https://https://docs.crowdsec.net/Crowdsec/v1/references/acquisition/) for file in time-machine : `-e TYPE=""` +* `TEST_MODE` - Only test configs (default: `false`) : `-e TEST_MODE=""` +* `DISABLE_AGENT` - Only test configs (default: `false`) : `-e DISABLE_AGENT=""` +* `DISABLE_LOCAL_API` - Disable local API (default: `false`) : `-e DISABLE_API=""` +* `REGISTER_TO_ONLINE_API` - Register to Online API (default: `false`) : `-e REGISTER_TO_ONLINE_API=""` +* `LEVEL_TRACE` - Trace-level (VERY verbose) on stdout (default: `false`) : `-e LEVEL_TRACE=""` +* `LEVEL_DEBUG` - Debug-level on stdout (default: `false`) : `-e LEVEL_DEBUG=""` +* `LEVEL_INFO` - Info-level on stdout (default: `false`) : `-e LEVEL_INFO=""` + +### Volumes + +* `/var/lib/crowdsec/data/` - Directory where all crowdsec data (Databases) is located + +* `/etc/crowdsec/` - Directory where all crowdsec configurations are located + +#### Useful File Locations + +* `/usr/local/bin/crowdsec` - Crowdsec binary + +* `/usr/local/bin/cscli` - Crowdsec CLI binary to interact with crowdsec + +## Find Us + +* [GitHub](https://github.com/crowdsecurity/crowdsec) + +## Contributing + +Please read [contributing](https://docs.crowdsec.net/Crowdsec/v1/contributing/) for details on our code of conduct, and the process for submitting pull requests to us. + +## License + +This project is licensed under the MIT License - see the [LICENSE](https://github.com/crowdsecurity/crowdsec/blob/master/LICENSE) file for details. \ No newline at end of file diff --git a/docs/v1.X/docs/getting_started/concepts.md b/docs/v1.X/docs/getting_started/concepts.md new file mode 100644 index 000000000..ef8b57167 --- /dev/null +++ b/docs/v1.X/docs/getting_started/concepts.md @@ -0,0 +1,171 @@ + +# Global overview + +{{v1X.crowdsec.Name}} runtime revolves around a few simple concepts : + + - It read logs (defined via {{v1X.ref.acquis}} config) + - Those logs are parsed via {{v1X.ref.parsers}} and eventually enriched + - Those normalized logs are then matched against the {{v1X.ref.scenarios}} that the user deployed + - When a scenario is "triggered", {{v1X.crowdsec.Name}} generates an {{v1X.alert.Htmlname}} and eventually one or more associated {{v1X.decision.Htmlname}} : + - The alert is here mostly for tracability, and will stay even after the decision expires + - The decision on the other hand, is short lived, and tells *what* action should be taken against the offending ip/range/user... + - Those information (the signal, the associated decisions) are then sent to crowdsec's {{v1X.lapi.htmlname}} and stored in the database + +As you might have guessed by now, {{v1X.crowdsec.Name}} itself does the detection part and stores those decisions. +Then, {{v1X.bouncers.htmlname}} can "consume" those decisions (via the very same {{v1X.lapi.htmlname}}) and apply some actual remediation. + +## Crowd sourced aspect + + [[References](/Crowdsec/v1/getting_started/crowd-power)] + +Whenever the {{v1X.lapi.htmlname}} receives an alert with associated decisions, the meta information about the alert are shared with our central api : + + - The source ip that triggered the alert + - The scenario that was triggered + - The timestamp of the attack + +These are the only information that are sent to our API. Those are then processed on our side to be able to redistribute relevant blocklists to all the participants. + +# Configuration items + +## Acquisition + +[[References](/Crowdsec/v1/references/acquisition/)] + +Acquistion configuration defines which streams of information {{v1X.crowdsec.name}} is going to process. + +At the time of writing, it's mostly files or journald, but it should be more or less any kind of stream, such as a kafka topic or a cloudtrail. + +Acquisition configuration always contains a stream (ie. a file to tail) and a tag (ie. "these are in syslog format" "these are non-syslog nginx logs"). + +File acquisition configuration is defined as : + +```yaml +filenames: + - /var/log/auth.log +labels: + type: syslog +``` + +The `labels` part is here to tag the incoming logs with a type. `labels.type` are used by the parsers to know which logs to process. + +## Stages + +[[References](/Crowdsec/v1/references/parsers/#stages)] + +Stages concept is central to data parsing in {{v1X.crowdsec.name}}, as it allows to have various "steps" of parsing. All parsers belong to a given stage. While users can add or modify the stages order, the following stages exist : + + - `s00-raw` : low level parser, such as syslog + - `s01-parse` : most of the services parsers (ssh, nginx etc.) + - `s02-enrich` : enrichment that requires parsed events (ie. geoip-enrichment) or generic parsers that apply on parsed logs (ie. second stage http parser) + + +Every event starts in the first stage, and will move to the next stage once it has been successfully processed by a parser that has the `onsuccess` directive set to `next_stage`, and so on until it reaches the last stage, when it's going to start to be matched against scenarios. Thus a sshd log might follow this pipeline : + + - `s00-raw` : be parsed by `crowdsecurity/syslog-logs` (will move event to the next stage) + - `s01-raw` : be parsed by `crowdsecurity/sshd-logs` (will move event to the next stage) + - `s02-enrich` : will be parsed by `crowdsecurity/geoip-enrich` and `crowdsecurity/dateparse-enrich` + +## Parsers + +[[References](/Crowdsec/v1/references/parsers/)] + +For logs to be able to be exploited and analyzed, they need to be parsed and normalized, and this is where parsers are used. + +A parser is a YAML configuration file that describes how a string is being parsed. Said string can be a log line, or a field extracted from a previous parser. While a lot of parsers rely on the **GROK** approach (a.k.a regular expression named capture groups), parsers can as well reference enrichment modules to allow specific data processing. + +A parser usually has a specific scope. For example, if you are using [nginx](https://nginx.org), you will probably want to use the `crowdsecurity/nginx-logs` which allows your {{v1X.crowdsec.name}} setup to parse nginx's access and error logs. + +Parsers are organized into stages to allow pipelines and branching in parsing. + +See the [{{v1X.hub.name}}]({{v1X.hub.url}}) to explore parsers, or see below some examples : + + - [apache2 access/error log parser](https://github.com/crowdsecurity/hub/blob/master/parsers/s01-parse/crowdsecurity/apache2-logs.yaml) + - [iptables logs parser](https://github.com/crowdsecurity/hub/blob/master/parsers/s01-parse/crowdsecurity/iptables-logs.yaml) + - [http logs post-processing](https://github.com/crowdsecurity/hub/blob/master/parsers/s02-enrich/crowdsecurity/http-logs.yaml) + +You can as well [write your own](/Crowdsec/v1/write_configurations/parsers/) ! + + + + + +## Enrichers + +[[References](/Crowdsec/v1/references/enrichers/)] + +Enrichment is the action of adding extra context to an event based on the information we already have, so that better decision can later be taken. In most cases, you should be able to find the relevant enrichers on our {{v1X.hub.htmlname}}. + +A common/simple type of enrichment would be [geoip-enrich](https://github.com/crowdsecurity/hub/blob/master/parsers/s02-enrich/crowdsecurity/geoip-enrich.yaml) of an event (adding information such as : origin country, origin AS and origin IP range to an event). + +Once again, you should be able to find the ones you're looking for on the {{v1X.hub.htmlname}} ! + +## Scenarios + +[[References](/Crowdsec/v1/references/scenarios/)] + +Scenarios is the expression of a heuristic that allows you to qualify a specific event (usually an attack).It is a YAML file that describes a set of events characterizing a scenario. Scenarios in {{v1X.crowdsec.name}} gravitate around the [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) principle. + +A scenario description includes at least : + + - Event eligibility rules. (For example if we're writing a ssh bruteforce detection we only focus on logs of type `ssh_failed_auth`) + - Bucket configuration such as the leak speed or its capacity (in our same ssh bruteforce example, we might allow 1 failed auth per 10s and no more than 5 in a short amount of time: `leakspeed: 10s` `capacity: 5`) + - Aggregation rules : per source ip or per other criterias (in our ssh bruteforce example, we will group per source ip) + +The description allows for many other rules to be specified (blackhole, distinct filters etc.), to allow rather complex scenarios. + +See the [{{v1X.hub.name}}]({{v1X.hub.url}}) to explore scenarios and their capabilities, or see below some examples : + + - [ssh bruteforce detection](https://github.com/crowdsecurity/hub/blob/master/scenarios/crowdsecurity/ssh-bf.yaml) + - [distinct http-404 scan](https://github.com/crowdsecurity/hub/blob/master/scenarios/crowdsecurity/http-scan-uniques_404.yaml) + - [iptables port scan](https://github.com/crowdsecurity/hub/blob/master/scenarios/crowdsecurity/iptables-scan-multi_ports.yaml) + +You can as well [write your own](/Crowdsec/v1/write_configurations/scenarios/) ! + + +## Collections + +[[References](/Crowdsec/v1/references/collections/)] + +To make user's life easier, "collections" are available, which are just a bundle of parsers and scenarios. +In this way, if you want to cover basic use-cases of let's say "nginx", you can just install the `crowdsecurity/nginx` collection that is composed of `crowdsecurity/nginx-logs` parser, as well as generic http scenarios such as `crowdsecurity/base-http-scenarios`. + +As usual, those can be found on the {{v1X.hub.htmlname}} ! + +## PostOverflows + +[[References](/Crowdsec/v1/references/postoverflows)] + +A postoverflow is a parser that will be applied on overflows (scenario results) before the decision is written to local DB or pushed to API. Parsers in postoverflows are meant to be used for "expensive" enrichment/parsing process that you do not want to perform on all incoming events, but rather on decision that are about to be taken. + +An example could be slack/mattermost enrichment plugin that requires human confirmation before applying the decision or reverse-dns lookup operations. + + +# Runtime items + +## Events + +[[References](/Crowdsec/v1/references/events)] + +An `Event` is the runtime representation of an item being processed by crowdsec : It be a Log line being parsed, or an Overflow being reprocessed. + +The `Event` object is modified by parses, scenarios, and directly via user [statics expressions](/Crowdsec/v1/references/parsers/#statics) (for example). + + + + +## Alerts + +[[References](/Crowdsec/v1/references/alerts)] + +An `Alert` is the runtime representation of a bucket overflow being processed by crowdsec : It is embedded in an Event. + +The `Alert` object is modified by post-overflows and {{v1X.profiles.htmlname}}. + +## Decisions + +[[References](/Crowdsec/v1/references/decisions)] + +A `Decision` is the representation of the consequence of a bucket overflow : a decision against an IP, a range, an AS, a Country, a User, a Session etc. + +`Decisions` are generated by Local API (LAPI) when an `Alert` is received, according to the existing {{v1X.profiles.htmlname}} \ No newline at end of file diff --git a/docs/v1.X/docs/getting_started/crowdsec-tour.md b/docs/v1.X/docs/getting_started/crowdsec-tour.md new file mode 100644 index 000000000..a319704ed --- /dev/null +++ b/docs/v1.X/docs/getting_started/crowdsec-tour.md @@ -0,0 +1,189 @@ + +## List installed configurations + +```bash +{{v1X.cli.bin}} hub list + +``` + +On the machine where you deployed {{v1X.crowdsec.name}}, type `{{v1X.cli.bin}} hub list` to see install configurations. +This list represents the parsers, scenarios and/or collections that you deployed. They represent what your {{v1X.crowdsec.name}} setup can read (logs) and detect (scenarios). `{{v1X.cli.bin}} hub list -a` will list all available configurations in the hub. + + +Check [{{v1X.cli.name}} configuration](/Crowdsec/v1/user_guide/cscli/) management for more ! + +
+ output example +```bash +$ ./cscli -c dev.yaml hub list +INFO[0000] Loaded 13 collecs, 17 parsers, 20 scenarios, 3 post-overflow parsers +INFO[0000] unmanaged items : 7 local, 0 tainted +INFO[0000] PARSERS: +---------------------------------------------------------------------------------------------------------------------------------------------------------------- + NAME 📦 STATUS VERSION LOCAL PATH +---------------------------------------------------------------------------------------------------------------------------------------------------------------- + crowdsecurity/syslog-logs ✔️ enabled 0.1 /.../config/parsers/s00-raw/syslog-logs.yaml + crowdsecurity/dateparse-enrich ✔️ enabled 0.1 /.../config/parsers/s02-enrich/dateparse-enrich.yaml + crowdsecurity/geoip-enrich ✔️ enabled 0.2 /.../config/parsers/s02-enrich/geoip-enrich.yaml + crowdsecurity/sshd-logs ✔️ enabled 0.1 /.../config/parsers/s01-parse/sshd-logs.yaml +---------------------------------------------------------------------------------------------------------------------------------------------------------------- +INFO[0000] SCENARIOS: +----------------------------------------------------------------------------------------------------------------------------------- + NAME 📦 STATUS VERSION LOCAL PATH +----------------------------------------------------------------------------------------------------------------------------------- + crowdsecurity/ssh-bf ✔️ enabled 0.1 /.../config/scenarios/ssh-bf.yaml +----------------------------------------------------------------------------------------------------------------------------------- +INFO[0000] COLLECTIONS: +----------------------------------------------------------------------------------------------------------------------------------- + NAME 📦 STATUS VERSION LOCAL PATH +----------------------------------------------------------------------------------------------------------------------------------- + crowdsecurity/sshd ✔️ enabled 0.1 /.../config/collections/sshd.yaml + crowdsecurity/linux ✔️ enabled 0.2 /.../config/collections/linux.yaml +----------------------------------------------------------------------------------------------------------------------------------- +INFO[0000] POSTOVERFLOWS: +-------------------------------------- + NAME 📦 STATUS VERSION LOCAL PATH +-------------------------------------- +-------------------------------------- +``` +
+ +## List active decisions + + +```bash +{{v1X.cli.bin}} decisions list +``` + +If you just deployed {{v1X.crowdsec.name}}, the list might be empty, but don't worry, it simply means you haven't yet been attacked, congrats! + +Check [{{v1X.cli.name}} decisions](/Crowdsec/v1/user_guide/decision_management/) management for more ! + + +
+ output example +```bash +$ cscli decisions list ++----+----------+-------------+----------------------+--------+---------+----+--------+------------------+ +| ID | SOURCE | SCOPE:VALUE | REASON | ACTION | COUNTRY | AS | EVENTS | EXPIRATION | ++----+----------+-------------+----------------------+--------+---------+----+--------+------------------+ +| 1 | crowdsec | Ip:1.2.3.6 | crowdsecurity/ssh-bf | ban | US | | 6 | 59m48.467053872s | +| 2 | cscli | Ip:1.2.3.4 | | ban | | | 1 | 3h59m57.671401352s | ++----+----------+-------------+----------------------+--------+---------+----+--------+--------------------+ +``` +
+ +There are different bans sources: + + - crowdsec : bans triggered locally + - api : bans fetched from the API as part of the global consensus + - csli : bans added via `{{v1X.cli.bin}} decisions add` + + +## List alerts + + +```bash +{{v1X.cli.bin}} alerts list +``` + +While decisions won't be shown anymore once they expire (or are manually deleted), the alerts will stay visible, allowing you to keep track of past decisions. +You will here see the alerts, even if the associated decisions expired. + +
+ output example +```bash +$ cscli alerts list --since 1h ++----+-------------+----------------------------+---------+----+-----------+---------------------------+ +| ID | SCOPE:VALUE | REASON | COUNTRY | AS | DECISIONS | CREATED AT | ++----+-------------+----------------------------+---------+----+-----------+---------------------------+ +| 5 | Ip:1.2.3.6 | crowdsecurity/ssh-bf (0.1) | US | | ban:1 | 2020-10-29T11:33:36+01:00 | ++----+-------------+----------------------------+---------+----+-----------+---------------------------+ + +``` +
+ + +## Monitor on-going activity (prometheus) + +```bash +{{v1X.cli.bin}} metrics +``` + +The metrics displayed are extracted from {{v1X.crowdsec.name}} prometheus. +The indicators are grouped by scope : + + - Buckets : Know which buckets are created and/or overflew (scenario efficiency) + - Acquisition : Know which file produce logs and if thy are parsed (or end up in bucket) + - Parser : Know how frequently the individual parsers are triggered and their success rate + - Local Api Metrics : Know how often each endpoint of crowdsec's local API has been used + +
+ output example + +```bash +$ {{v1X.cli.bin}} metrics +INFO[0000] Buckets Metrics: ++--------------------------------+---------------+-----------+--------------+--------+---------+ +| BUCKET | CURRENT COUNT | OVERFLOWS | INSTANCIATED | POURED | EXPIRED | ++--------------------------------+---------------+-----------+--------------+--------+---------+ +| crowdsecurity/ssh-bf | 1 | 1 | 2 | 10 | - | +| crowdsecurity/ssh-bf_user-enum | 1 | - | 1 | 1 | - | ++--------------------------------+---------------+-----------+--------------+--------+---------+ +INFO[0000] Acquisition Metrics: ++-------------------+------------+--------------+----------------+------------------------+ +| SOURCE | LINES READ | LINES PARSED | LINES UNPARSED | LINES POURED TO BUCKET | ++-------------------+------------+--------------+----------------+------------------------+ +| /tmp/test.log | 10 | 10 | - | 11 | +| /var/log/auth.log | 2 | - | 2 | - | +| /var/log/syslog | 4 | - | 4 | - | ++-------------------+------------+--------------+----------------+------------------------+ +INFO[0000] Parser Metrics: ++--------------------------------+------+--------+----------+ +| PARSERS | HITS | PARSED | UNPARSED | ++--------------------------------+------+--------+----------+ +| child-crowdsecurity/sshd-logs | 10 | 10 | - | +| crowdsecurity/dateparse-enrich | 10 | 10 | - | +| crowdsecurity/geoip-enrich | 10 | 10 | - | +| crowdsecurity/sshd-logs | 10 | 10 | - | +| crowdsecurity/syslog-logs | 16 | 16 | - | ++--------------------------------+------+--------+----------+ +INFO[0000] Local Api Metrics: ++--------------------+--------+------+ +| ROUTE | METHOD | HITS | ++--------------------+--------+------+ +| /v1/alerts | GET | 2 | +| /v1/alerts | POST | 2 | +| /v1/watchers/login | POST | 4 | ++--------------------+--------+------+ +``` + +
+ +## Deploy dashboard + +```bash +cscli dashboard setup --listen 0.0.0.0 +``` + +A docker metabase {{v1X.metabase.Htmlname}} container can be deployed with `cscli dashboard`. +It requires docker, [installation instructions are available here](https://docs.docker.com/engine/install/). + +## Logs + +```bash +tail -f /var/log/crowdsec.log +``` + + - `/var/log/crowdsec.log` is the main log, it shows ongoing decisions and acquisition/parsing/scenario errors. + - `/var/log/crowdsec_api.log` is the access log of the local api (LAPI) + +## Installing collections + +```bash +cscli collections install crowdsecurity/nginx +``` + +Collections are bundles of parsers/scenarios that form a coherent ensemble to analyze/detect attacks for a specific service. It is the most common way to deploy configurations. + +They can be found and browsed on the {{v1X.hub.htmlname}} diff --git a/docs/v1.X/docs/getting_started/installation.md b/docs/v1.X/docs/getting_started/installation.md new file mode 100644 index 000000000..87665a732 --- /dev/null +++ b/docs/v1.X/docs/getting_started/installation.md @@ -0,0 +1,86 @@ +# Installation + +Fetch {{v1X.crowdsec.name}}'s latest version [here]({{v1X.crowdsec.download_url}}). + +```bash +tar xvzf crowdsec-release.tgz +``` +```bash +cd crowdsec-v1.X.X +``` + +A {{v1X.wizard.name}} is provided to help you deploy {{v1X.crowdsec.name}} and {{v1X.cli.name}}. + +## Using the interactive wizard + +``` +sudo {{v1X.wizard.bin}} -i +``` + +![crowdsec](../assets/images/crowdsec_install.gif) + +The {{v1X.wizard.name}} is going to guide you through the following steps : + + - detect services that are present on your machine + - detect selected services logs + - suggest collections (parsers and scenarios) to deploy + - deploy & configure {{v1X.crowdsec.name}} in order to watch selected logs for selected scenarios + +The process should take less than a minute, [please report if there are any issues]({{v1X.wizard.bugreport}}). + +You are then ready to [take a tour](/Crowdsec/v1/getting_started/crowdsec-tour/) of your freshly deployed {{v1X.crowdsec.name}} ! + +!!! info + Keep in mind the {{v1X.crowdsec.name}} is only in charge of the "detection", and won't block anything on its own. You need to deploy a {{v1X.bouncers.Htmlname}} to "apply" decisions. + +## Binary installation + +> you of little faith + +``` +sudo {{v1X.wizard.bin}} --bininstall +``` + +This will deploy a valid/empty {{v1X.crowdsec.name}} configuration files and binaries. +Beware, in this state, {{v1X.crowdsec.name}} won't monitor/detect anything unless configured. + +``` +cscli install collection crowdsecurity/linux +``` + + +Installing at least the `crowdsecurity/linux` collection will provide you : + + - syslog parser + - geoip enrichment + - date parsers + + +You will need as well to configure your {{v1X.ref.acquis}} file to feed {{v1X.crowdsec.name}} some logs. + + + + + +## From source + +!!! warning "Requirements" + + * [Go](https://golang.org/doc/install) v1.13+ + * `git clone {{v1X.crowdsec.url}}` + * [jq](https://stedolan.github.io/jq/download/) + + +Go in {{v1X.crowdsec.name}} folder and build the binaries : + +```bash +cd crowdsec +``` +```bash +make build +``` + + +{{v1X.crowdsec.name}} bin will be located in `./cmd/crowdsec/crowdsec` and {{v1X.cli.name}} bin in `cmd/crowdsec-cli/{{v1X.cli.bin}}` + +Now, you can install either with [interactive wizard](#using-the-interactive-wizard) or the [unattended mode](#using-unattended-mode). \ No newline at end of file diff --git a/docs/v1.X/docs/index.md b/docs/v1.X/docs/index.md new file mode 100644 index 000000000..01d5e76d4 --- /dev/null +++ b/docs/v1.X/docs/index.md @@ -0,0 +1,32 @@ +
[[Hub]]({{v1X.hub.url}}) [[Releases]]({{v1X.crowdsec.download_url}})
+ +# Architecture + +![Architecture](assets/images/crowdsec_architecture.png) + + +## Components + +{{v1X.crowdsec.name}} ecosystem is based on the following components : + + - [{{v1X.crowdsec.Name}}]({{v1X.crowdsec.url}}) is the lightweight service that processes logs and keeps track of attacks. + - [{{v1X.lapi.Name}}]({{v1X.lapi.url}}) is a core component of crowdsec-agent that exposes a local API to interact with crowdsec-agent. + - [{{v1X.cli.name}}]({{v1X.cli.main_doc}}) is the command line interface for humans, it allows you to view, add, or remove bans as well as to install, find, or update scenarios and parsers + - [{{v1X.bouncers.name}}]({{v1X.hub.bouncers_url}}) are the components that block malevolent traffic, and can be deployed anywhere in your stack + +## Moving forward + +To learn more about {{v1X.crowdsec.name}} and give it a try, please see : + + - [How to install {{v1X.crowdsec.name}}](/Crowdsec/v1/getting_started/installation/) + - [Take a quick tour of {{v1X.crowdsec.name}} and {{v1X.cli.name}} features](/Crowdsec/v1/getting_started/crowdsec-tour/) + - [Observability of {{v1X.crowdsec.name}}](/Crowdsec/v1/observability/overview/) + - [Understand {{v1X.crowdsec.name}} configuration](/Crowdsec/v1/getting_started/concepts/) + - [Deploy {{v1X.bouncers.name}} to stop malevolent peers](/Crowdsec/v1/bouncers/) + - [FAQ](/faq/) + +Don't hesitate to reach out if you're facing issues : + + - [report a bug](https://github.com/crowdsecurity/crowdsec/issues/new?assignees=&labels=bug&template=bug_report.md&title=Bug%2F) + - [suggest an improvement](https://github.com/crowdsecurity/crowdsec/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=Improvment%2F) + - [ask for help on the forums](https://discourse.crowdsec.net) \ No newline at end of file diff --git a/docs/v1.X/docs/localAPI/index.md b/docs/v1.X/docs/localAPI/index.md new file mode 100644 index 000000000..3b1ca4985 --- /dev/null +++ b/docs/v1.X/docs/localAPI/index.md @@ -0,0 +1,99 @@ +# Local API + +The Local API (LAPI) is a core component of {{v1X.crowdsec.name}} and has a few essential missions : + + - Allow crowdsec machines to push alerts & decisions to a database + - Allow bouncers to consume said alerts & decisions from database + - Allow `cscli` to view add or delete decisions + + +## Authentication + +There is two kinds of authentication to the local API : + + - {{v1X.bouncers.Name}} : they authenticate with a simple API key and can only read decisions + + - Machines : they authenticate with a login&password and can not only read decisions, but create new ones + + +### {{v1X.bouncers.Name}} + +To register a bouncer to your API, you need to run the following command on the server where the API is installed: + +```bash +$ cscli bouncers add testBouncer +``` + +and keep the generated API token to use it in your {{v1X.bouncers.Name}} configuration file. + +### Machines + +To allow a machine to communicate with the local API, the machine need to be validated by an administrator of the local API. + +There is two ways to register a crowdsec to a local API. + +* You can create a machine directly on the API server that will be automatically validated, by running the following command on the server where the API is installed: + +```bash +$ cscli machines add testMachine +``` + +If your crowdsec run on the same server that the local API, then your credentials file will be generated automatically, else you will have to copy/paste them in your remote crowdsec credentials file (`/etc/crowdsec/local_api_credentials.yaml`) + +* You can use `cscli` to register to the API server: + +``` +cscli lapi register -u +``` + +And validate it with `cscli` on the server where the API is installed: + +``` +cscli machines validate +``` + +!!! tips + You can use `cscli machines list` to list all the machines registered to the API, and view the ones that are not validated yet. + +## Configuration + +### Client + +By default, `crowdsec` and `cscli` use `127.0.0.1:8080` as a default local API. But you might want to use a remote API and so configure a different endpoint for your api client. + +#### Register to a remote API server + +* On the remote crowdsec server, run: + +``` +$ cscli lapi register -u http://: +``` + +* On the local API server, validate the machine by running the command: + +``` +$ cscli machines validate +``` + + +### Server + +#### Configure listen URL + +If you want your local API to be used by a remote crowdsec, it is possible to modify the URL it listen on. +You just have to modify the [`listen_uri` option](/Crowdsec/v1/references/crowdsec-config/#listen_uri) in the main configuration file. +Then see [how to configure your crowdsec to use a remote API](/Crowdsec/v1/localAPI/#register-to-a-remote-api-server). + + +#### Enable SSL + +The most common use case of the local API is to listen on localhost. In that case there's no need for +configuring any ssl layer. In some cases, the local API will listen for other crowdsec installation that +will report its triggered scenarios. In that case, the endpoint may be configured with ssl. +You can see how to configure SSL on your local API [here](/Crowdsec/v1/references/crowdsec-config/#tls). + + +See the [Local API public documentation]({{v1X.lapi.swagger}}). + + + diff --git a/docs/v1.X/docs/migration.md b/docs/v1.X/docs/migration.md new file mode 100644 index 000000000..efff12a70 --- /dev/null +++ b/docs/v1.X/docs/migration.md @@ -0,0 +1,72 @@ +# Migration from v0.X to v1.X + +!!! warning + Migrating to V1.X will impact (any change you made will be lost and must be adapted to the new configuration) : + * Database model : your existing database will be lost, a new one will be created in the V1. + * {{v1X.crowdsec.Name}} configuration : + * `/etc/crowdsec/config/default.yaml` : check [new format](/Crowdsec/v1/references/crowdsec-config/#configuration-format) + * `/etc/crowdsec/config/profiles.yaml` : check [new format](/Crowdsec/v1/references/profiles/#profiles-configurations) + +To upgrade {{v1X.crowdsec.name}} from v0.X to v1, we'll follow those steps + +#### Backup up configuration + +``` +sudo cscli backup save /tmp/crowdsec_backup +sudo cp -R /etc/crowdsec/config/patterns /tmp/crowdsec_backup +``` + +#### Uninstall old version & install new + +Download latest V1 {{v1X.crowdsec.name}} version [here]({{v1X.crowdsec.download_url}}) + +``` +tar xvzf crowdsec-release.tgz +cd crowdsec-v1*/ +sudo ./wizard.sh --uninstall +sudo ./wizard.sh --bininstall +``` + +!!! warning + Don't forget to remove {{v1X.metabase.name}} dashboard if you installed it manually (without {{v1X.cli.name}}). + +#### Restore configuration + +!!! warning + Before restoring old backup, if you have `local` or `tainted` postoverflows, be aware that they are no longer compatible. You should update the syntax (the community and us are available to help you doing this part). +``` +sudo cscli hub update +sudo cscli config restore --old-backup /tmp/crowdsec_backup/ +sudo cp -R /tmp/crowdsec_backup/patterns /etc/crowdsec/ +``` + +#### Start & health check + +Finally, you will be able to start {{v1X.crowdsec.name}} service. Before that, just check if {{v1X.lapi.name}} and {{v1X.api.name}} are correctly configured. + +``` +ubuntu@ip-:~$ sudo cscli lapi status +INFO[0000] Loaded credentials from /etc/crowdsec/local_api_credentials.yaml +INFO[0000] Trying to authenticate with username 941c3fxxxxxxxxxxxxxxxxxxxxxx on http://localhost:8080/ +INFO[0000] You can successfully interact with Local API (LAPI) + +ubuntu@ip-:~$ sudo cscli capi status +INFO[0000] Loaded credentials from /etc/crowdsec/online_api_credentials.yaml +INFO[0000] Trying to authenticate with username 941c3fxxxxxxxxxxxxxxxxxxxxxxx on https://api.crowdsec.net/ +INFO[0000] You can successfully interact with Central API (CAPI) + +ubuntu@ip-:~$ sudo systemctl start crowdsec.service +ubuntu@ip-:~$ sudo systemctl status crowdsec.service +``` + +You can even check logs (located by default here: `/var/log/crowdsec.log` & `/var/log/crowdsec_api.log`). + +You can now navigate documentation to learn new {{v1X.cli.name}} commands to interact with crowdsec. + +#### Upgrade {{v1X.bouncers.name}} + +If you were using **{{v1X.bouncers.name}}** (formerly called **blocker(s)**), you need to replace them by the new compatibles {{v1X.bouncers.name}}, available on the [hub](https://hub.crowdsec.net/browse/#bouncers) (selecting `agent version` to `v1`). + +Following your bouncer type (netfilter, nginx, wordpress etc...), you need to replace them by the new available {{v1X.bouncers.name}} on the hub, please follow the {{v1X.bouncers.name}} documentation that will help you to install easily. + +We're also available to help (on [discourse](https://discourse.crowdsec.net/) or [gitter](https://gitter.im/crowdsec-project/community)) upgrading your {{v1X.bouncers.name}}. \ No newline at end of file diff --git a/docs/v1.X/docs/observability/command_line.md b/docs/v1.X/docs/observability/command_line.md new file mode 100644 index 000000000..beadd83a7 --- /dev/null +++ b/docs/v1.X/docs/observability/command_line.md @@ -0,0 +1,61 @@ +```bash +{{v1X.cli.name}} metrics +``` + +This command provides an overview of {{v1X.crowdsec.name}} statistics provided by [prometheus client](/Crowdsec/v1/observability/prometheus/). By default it assumes that the {{v1X.crowdsec.name}} is installed on the same machine. + +The metrics are split in 3 main sections : + + - Acquisition metrics : How many lines were read from which sources, how many were successfully or unsuccessfully parsed, and how many of those lines ultimately ended up being poured to a bucket. + - Parser metrics : How many lines were fed (eligible) to each parser, and how many of those were successfully or unsuccessfully parsed. + - Bucket metrics : How many time each scenario lead to a bucket instantiation, and for each of those : + - how many times it overflowed + - how many times it expired (underflows) + - how many subsequent events were poured to said bucket + +!!! hint + These metrics should help you identify potential configuration errors. + + For example, if you have a source that has mostly unparsed logs, you know you might be missing some parsers. + As well, if you have scenarios that are never instantiated, it might be a hint that they are not relevant to your configuration. + +
+ {{v1X.cli.name}} metrics example +```bash +INFO[0000] Buckets Metrics: ++-----------------------------------------+-----------+--------------+--------+---------+ +| BUCKET | OVERFLOWS | INSTANTIATED | POURED | EXPIRED | ++-----------------------------------------+-----------+--------------+--------+---------+ +| crowdsecurity/http-scan-uniques_404 | - | 8 | 9 | 8 | +| crowdsecurity/iptables-scan-multi_ports | 1 | 8306 | 9097 | 8288 | +| crowdsecurity/ssh-bf | 42 | 281 | 1434 | 238 | +| crowdsecurity/ssh-bf_user-enum | 13 | 659 | 777 | 646 | +| crowdsecurity/http-crawl-non_statics | - | 10 | 12 | 10 | ++-----------------------------------------+-----------+--------------+--------+---------+ +INFO[0000] Acquisition Metrics: ++------------------------------------------+------------+--------------+----------------+------------------------+ +| SOURCE | LINES READ | LINES PARSED | LINES UNPARSED | LINES POURED TO BUCKET | ++------------------------------------------+------------+--------------+----------------+------------------------+ +| /var/log/nginx/https.access.log | 25 | 25 | - | 7 | +| /var/log/kern.log | 18078 | 18078 | - | 4066 | +| /var/log/syslog | 18499 | 18078 | 421 | 5031 | +| /var/log/auth.log | 6086 | 1434 | 4652 | 2211 | +| /var/log/nginx/error.log | 170243 | 169632 | 611 | - | +| /var/log/nginx/http.access.log | 44 | 44 | - | 14 | ++------------------------------------------+------------+--------------+----------------+------------------------+ +INFO[0000] Parser Metrics: ++--------------------------------+--------+--------+----------+ +| PARSERS | HITS | PARSED | UNPARSED | ++--------------------------------+--------+--------+----------+ +| crowdsecurity/geoip-enrich | 37659 | 37659 | 0 | +| crowdsecurity/http-logs | 169701 | 27 | 169674 | +| crowdsecurity/iptables-logs | 36156 | 36156 | 0 | +| crowdsecurity/nginx-logs | 170316 | 169701 | 615 | +| crowdsecurity/non-syslog | 170312 | 170312 | 0 | +| crowdsecurity/sshd-logs | 6053 | 1434 | 4619 | +| crowdsecurity/syslog-logs | 42663 | 42663 | 0 | +| crowdsecurity/dateparse-enrich | 207291 | 207291 | 0 | ++--------------------------------+--------+--------+----------+ + +``` +
\ No newline at end of file diff --git a/docs/v1.X/docs/observability/dashboard.md b/docs/v1.X/docs/observability/dashboard.md new file mode 100644 index 000000000..8fcad2a67 --- /dev/null +++ b/docs/v1.X/docs/observability/dashboard.md @@ -0,0 +1,80 @@ + +!!! warning "MySQL & PostgreSQL" + MySQL and PostgreSQL are currently not supported by `cscli dashboard`. It means that you can run cscli dashboard only if you use `SQLite` (default) as storage database with your local API. + + + +The {{v1X.cli.name}} command `{{v1X.cli.bin}} dashboard setup` will use [docker](https://docs.docker.com/get-docker/) to install [metabase docker image](https://hub.docker.com/r/metabase/metabase/) and fetch our metabase template to have a configured and ready dashboard. + + +## Setup +> Setup and Start crowdsec metabase dashboard + +```bash +{{v1X.cli.bin}} dashboard setup +``` + +Optional arguments: + + - `-l` |`--listen` : ip address to listen on for docker (default is `127.0.0.1`) + - `-p` |`--port` : port to listen on for docker (default is `8080`) + - `--password` : password for metabase user (default is generated randomly) + - `-f` | `--force` : override existing setup + + + +
+ {{v1X.cli.name}} dashboard setup + +```bash +INFO[0000] Pulling docker image metabase/metabase +........... +INFO[0002] creating container '/crowdsec-metabase' +INFO[0002] Waiting for metabase API to be up (can take up to a minute) +.............. +INFO[0051] Metabase is ready + + URL : 'http://127.0.0.1:3000' + username : 'crowdsec@crowdsec.net' + password : '' + +``` +
+ +!!! tip "Protip" + the `dashboard setup` command will output generated credentials for metabase. + Those are stored in `/etc/crowdsec/metabase/metabase.yaml` + +Now you can connect to your dashboard, sign-in with your saved credentials then click on {{v1X.crowdsec.Name}} Dashboard to get this: + + +Dashboard docker image can be managed by {{v1X.cli.name}} and docker cli also. Look at the {{v1X.cli.name}} help command using + +```bash +{{v1X.cli.bin}} dashboard -h +``` + +## Remove the dashboard +> Remove crowdsec metabase dashboard + +```bash +{{v1X.cli.bin}} dashboard remove [-f] +``` +Optional arguments: + +- `-f` | `--force` : will force remove the dashboard + +## Stop the dashboard +> Stop crowdsec metabase dashboard + +```bash +{{v1X.cli.bin}} dashboard stop +``` + +## Start the dashboard +> Start crowdsec metabase dashboard + +```bash +{{v1X.cli.bin}} dashboard start +``` + diff --git a/docs/observability/logs.md b/docs/v1.X/docs/observability/logs.md similarity index 91% rename from docs/observability/logs.md rename to docs/v1.X/docs/observability/logs.md index 4122c244c..35c5c3f38 100644 --- a/docs/observability/logs.md +++ b/docs/v1.X/docs/observability/logs.md @@ -1,6 +1,6 @@ -Logs concern everything that happens to {{crowdsec.Name}} itself (startup, configuration, events like IP ban or an alert, shutdown, and so on). +Logs concern everything that happens to {{v1X.crowdsec.Name}} itself (startup, configuration, events like IP ban or an alert, shutdown, and so on). -By default, logs are written to `{{crowdsec.main_log}}`, in text format. +By default, logs are written to `/var/log/crowdsec.log`, in text format.
Logs example diff --git a/docs/v1.X/docs/observability/overview.md b/docs/v1.X/docs/observability/overview.md new file mode 100644 index 000000000..8cd9565ec --- /dev/null +++ b/docs/v1.X/docs/observability/overview.md @@ -0,0 +1,13 @@ +# Observability Overview + +Observability in security software is crucial, especially when this software might take important decision such as blocking IP addresses. + +We attempt to provide good observability of {{v1X.crowdsec.name}}'s behavior : + + - {{v1X.crowdsec.name}} itself exposes a [prometheus instrumentation](/Crowdsec/v1/observability/prometheus/) + - {{v1X.cli.Name}} allows you to view part of prometheus metrics in [cli (`{{v1X.cli.bin}} metrics`)](/Crowdsec/v1/observability/command_line/) + - {{v1X.crowdsec.name}} logging is contextualized for easy processing + - for **humans**, {{v1X.cli.name}} allows you to trivially start a service [exposing dashboards](/Crowdsec/v1/observability/dashboard/) (using [metabase](https://www.metabase.com/)) + +Furthermore, most of {{v1X.crowdsec.name}} configuration should allow you to enable partial debug (ie. per-scenario, per-parser etc.) + diff --git a/docs/v1.X/docs/observability/prometheus.md b/docs/v1.X/docs/observability/prometheus.md new file mode 100644 index 000000000..21d57fce7 --- /dev/null +++ b/docs/v1.X/docs/observability/prometheus.md @@ -0,0 +1,85 @@ +{{v1X.crowdsec.name}} can expose a {{v1X.prometheus.htmlname}} endpoint for collection (on `http://127.0.0.1:6060/metrics` by default). + +The goal of this endpoint, besides the usual resources consumption monitoring, aims at offering a view of {{v1X.crowdsec.name}} "applicative" behavior : + + - is it processing a lot of logs ? is it parsing them successfully ? + - are a lot of scenarios being triggered ? + - are a lot of IPs banned ? + - etc. + +All the counters are "since {{v1X.crowdsec.name}} start". + +### Metrics details + +#### Scenarios + + - `cs_buckets` : number of scenario that currently exist + - `cs_bucket_created_total` : total number of instantiation of each scenario + - `cs_bucket_overflowed_total` : total number of overflow of each scenario + - `cs_bucket_underflowed_total` : total number of underflow of each scenario (bucket was created but expired because of lack of events) + - `cs_bucket_poured_total` : total number of event poured to each scenario with source as complementary key + +
+ example + + +``` +#2030 lines from `/var/log/nginx/access.log` were poured to `crowdsecurity/http-scan-uniques_404` scenario +cs_bucket_poured_total{name="crowdsecurity/http-scan-uniques_404",source="/var/log/nginx/access.log"} 2030 +``` + +
+ + +#### Parsers + - `cs_node_hits_total` : how many time an event from a specific source was processed by a parser node : + + +
+ example + + +``` +# 235 lines from `auth.log` were processed by the `crowdsecurity/dateparse-enrich` parser +cs_node_hits_total{name="crowdsecurity/dateparse-enrich",source="/var/log/auth.log"} 235 +``` + +
+ + - `cs_node_hits_ko_total` : how many times an event from a specific was unsuccessfully parsed by a specific parser + +
+ example + + +``` +# 2112 lines from `error.log` failed to be parsed by `crowdsecurity/http-logs` +cs_node_hits_ko_total{name="crowdsecurity/http-logs",source="/var/log/nginx/error.log"} 2112 +``` + +
+ + - `cs_node_hits_ok_total` : how many times an event from a specific source was successfully parsed by a specific parser + + - `cs_parser_hits_total` : how many times an event from a source has hit the parser + - `cs_parser_hits_ok_total` : how many times an event from a source was successfully parsed + - `cs_parser_hits_ko_total` : how many times an event from a source was unsuccessfully parsed + + +#### Acquisition + + - `cs_reader_hits_total` : how many events were read from a specific source + +#### Info + + - `cs_info` : Information about {{v1X.crowdsec.name}} (software version) + +### Exploitation with prometheus server & grafana + +Those metrics can be scaped by [prometheus server](https://prometheus.io/docs/introduction/overview/#architecture) and visualized with [grafana](https://grafana.com/). They [can be downloaded here](https://github.com/crowdsecurity/grafana-dashboards) : + +![Overview](../assets/images/grafana_overview.png) + +![Insight](../assets/images/grafana_insight.png) + +![Details](../assets/images/grafana_details.png) \ No newline at end of file diff --git a/docs/v1.X/docs/references/acquisition.md b/docs/v1.X/docs/references/acquisition.md new file mode 100644 index 000000000..19f5453e4 --- /dev/null +++ b/docs/v1.X/docs/references/acquisition.md @@ -0,0 +1,49 @@ +# Acquisition format + +The `/etc/crowdsec/acquis.yaml` defines which files are read by crowdsec at runtime. +The file is a list of object representing groups of files to read, with the following properties. + +A least one of : + + - filename: a string representing the path to a file (globbing supported) + - filenames: a list of string represent paths to files (globbing supported) + - journalctl_filter: a list of string passed as arguments to `journalctl` + +And a `labels` object with a field `type` indicating the log's type : +```yaml +filenames: + - /var/log/nginx/access-*.log + - /var/log/nginx/error.log +labels: + type: nginx +--- +filenames: + - /var/log/auth.log +labels: + type: syslog +--- +journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" +labels: + type: syslog + +``` + +The `labels.type` is *important* as it is what will determine which parser will try to process the logs. + +The log won't be processed by the syslog parser if its type is not syslog : +```bash +$ cat /etc/crowdsec/parsers/s00-raw/syslog-logs.yaml +filter: "evt.Line.Labels.type == 'syslog'" +... +``` + +On the other hand, nginx tends to write its own logs without using syslog : +```bash +$ cat /etc/crowdsec/parsers/s01-parse/nginx-logs.yaml +filter: "evt.Parsed.program startsWith 'nginx'" +... +``` + +If for example your nginx was logging via syslog, you need to set its `labels.type` to `syslog` so that it's first parsed by the syslog parser, and *then* by the nginx parser (notice they are in different stages). + diff --git a/docs/v1.X/docs/references/alerts.md b/docs/v1.X/docs/references/alerts.md new file mode 100644 index 000000000..6102bcab3 --- /dev/null +++ b/docs/v1.X/docs/references/alerts.md @@ -0,0 +1,8 @@ +# Alerts + +An `Alert` is the runtime representation of a bucket overflow. + +The representation of the object can be found here : + +[Alert object documentation](https://pkg.go.dev/github.com/crowdsecurity/crowdsec/pkg/types#RuntimeAlert) + diff --git a/docs/v1.X/docs/references/collections.md b/docs/v1.X/docs/references/collections.md new file mode 100644 index 000000000..48a78b3e0 --- /dev/null +++ b/docs/v1.X/docs/references/collections.md @@ -0,0 +1,29 @@ +# Collections + +Collections are bundle of parsers, scenarios, postoverflows that form a coherent package. +Collections are present in `/etc/crowdsec/collections/` and follow this format : + +> `/etc/crowdsec/collections/linux.yaml` + +```yaml +#the list of parsers it contains +parsers: + - crowdsecurity/syslog-logs + - crowdsecurity/geoip-enrich + - crowdsecurity/dateparse-enrich +#the list of collections it contains +collections: + - crowdsecurity/sshd +# the list of postoverflows it contains +# postoverflows: +# - crowdsecurity/seo-bots-whitelist +# the list of scenarios it contains +# scenarios: +# - crowdsecurity/http-crawl-non_statics +description: "core linux support : syslog+geoip+ssh" +author: crowdsecurity +tags: + - linux +``` + +It mostly exists as a convenience for the user when using the hub. diff --git a/docs/v1.X/docs/references/crowdsec-config.md b/docs/v1.X/docs/references/crowdsec-config.md new file mode 100644 index 000000000..5324ddfc4 --- /dev/null +++ b/docs/v1.X/docs/references/crowdsec-config.md @@ -0,0 +1,400 @@ +# Crowdsec configuration + +{{v1X.crowdsec.Name}} has a main `yaml` configuration file, usually located in `/etc/crowdsec/config.yaml`. + +## Configuration example + +
+ Default configuration + +```yaml +common: + daemonize: true + pid_dir: /var/run/ + log_media: file + log_level: info + log_dir: /var/log/ + working_dir: . +config_paths: + config_dir: /etc/crowdsec/ + data_dir: /var/lib/crowdsec/data/ + simulation_path: /etc/crowdsec/simulation.yaml + hub_dir: /etc/crowdsec/hub/ + index_path: /etc/crowdsec/hub/.index.json +crowdsec_service: + acquisition_path: /etc/crowdsec/acquis.yaml + parser_routines: 1 + buckets_routines: 1 + output_routines: 1 +cscli: + output: human + hub_branch: wip_lapi +db_config: + log_level: info + type: sqlite + db_path: /var/lib/crowdsec/data/crowdsec.db + #user: + #password: + #db_name: + #host: + #port: + flush: + max_items: 5000 + max_age: 7d +api: + client: + insecure_skip_verify: true + credentials_path: /etc/crowdsec/local_api_credentials.yaml + server: + log_level: info + listen_uri: localhost:8080 + profiles_path: /etc/crowdsec/profiles.yaml + online_client: # Crowdsec API + credentials_path: /etc/crowdsec/online_api_credentials.yaml +# tls: +# cert_file: /etc/crowdsec/ssl/cert.pem +# key_file: /etc/crowdsec/ssl/key.pem +prometheus: + enabled: true + level: full + listen_addr: 127.0.0.1 + listen_port: 6060 +``` + +
+ + +## Configuration format + +```yaml +common: + daemonize: (true|false) + pid_dir: + log_media: (file|stdout) + log_level: (error|info|debug|trace) + log_dir: + working_dir: +config_paths: + config_dir: + data_dir: + simulation_path: + hub_dir: + index_path: +crowdsec_service: + acquisition_path: + parser_routines: + buckets_routines: + output_routines: +cscli: + output: (human|json|raw) + hub_branch: +db_config: + type: + db_path: + user: # for mysql/pgsql + password: # for mysql/pgsql + db_name: # for mysql/pgsql + host: # for mysql/pgsql + port: # for mysql/pgsql + flush: + max_items: + max_age: +api: + client: + insecure_skip_verify: (true|false) + credentials_path: + server: + log_level: (error|info|debug|trace>) + listen_uri: # host:port + profiles_path: + online_client: + credentials_path: + tls: + cert_file: + key_file: +prometheus: + enabled: (true|false) + level: (full|aggregated) + listen_addr: + listen_port: +``` + +## Configuration directives + +### `common` + +```yaml +common: + daemonize: (true|false) + pid_dir: + log_media: (file|stdout) + log_level: (error|info|debug|trace) + log_dir: + working_dir: +``` + +#### `daemonize` +> bool + +Daemonize or not the crowdsec daemon. + +#### `pid_dir` +> string + +Folder to store PID file. + +#### `log_media` +> string + +Log media. Can be `stdout` or `file`. + +#### `log_level` +> string + +Log level. Can be `error`, `info`, `debug`, `trace`. + +#### `log_folder` +> string + +Folder to write log file. + +!!! warning + Works only with `log_media = file`. + +#### `working_dir` +> string + +Current working directory. + + +### `config_paths` + +This section contains most paths to various sub configuration items. + + +```yaml +config_paths: + config_dir: + data_dir: + simulation_path: + hub_dir: + index_path: +``` + +#### `config_dir` +> string + +The main configuration directory of crowdsec. + +#### `data_dir` +> string + +This is where crowdsec is going to store data, such as files downloaded by scenarios, geolocalisation database, metabase configuration database, or even SQLite database. + +#### `simulation_path` +> string + +The path to the {{v1X.simulation.htmlname}} configuration. + +#### `hub_dir` +> string + +The directory where `cscli` will store parsers, scenarios, collections and such. + +#### `index_path` +> string + +Tath to the `.index.json` file downloaded by `cscli` to know the list of available configurations. + + +### `crowdsec_service` + +This section is only used by crowdsec agent. + + +```yaml +crowdsec_service: + acquisition_path: + parser_routines: + buckets_routines: + output_routines: +``` + + +#### `parser_routines` +> int + +Number of dedicated goroutines for parsing files. + +#### `buckets_routines` +> int + +Number of dedicated goroutines for managing live buckets. + +#### `output_routines` +> int + +Number of dedicated goroutines for pushing data to local api. + +#### `acquisition_path` +> string + +Path to the yaml file containing logs that needs to be read. + + +### `cscli` + +This section is only used by `cscli`. + +```yaml +cscli: + output: (human|json|raw) + hub_branch: +``` + +#### `output` +> string + +The default output format (human, json or raw). + +#### `hub_branch` +> string + +The git branch on which `cscli` is going to fetch configurations. + +## `db_config` + +Please refer to the [database configuration](/Crowdsec/v1/references/database). + +## `api` + +The api section is used by both `cscli`, `crowdsec` and the local API. + +```yaml +api: + client: + insecure_skip_verify: (true|false) + credentials_path: + server: + log_level: (error|info|debug|trace>) + listen_uri: # host:port + profiles_path: + online_client: + credentials_path: + tls: + cert_file: + key_file: +``` + +### `client` + +The client subsection is used by `crowdsec` and `cscli` to read and write decisions to the local API. + +```yaml +client: + insecure_skip_verify: (true|false) + credentials_path: +``` + +#### `insecure_skip_verify` +>bool + +Allows the use of https with self-signed certificates. + +#### `credentials_path` +>string + +Path to the credential files (contains API url + login/password). + +### `server` + +The `server` subsection is the local API configuration. + +```yaml +server: + log_level: (error|info|debug|trace) + listen_uri: # host:port + profiles_path: + online_client: + credentials_path: + tls: + cert_file: + key_file: +``` + +#### `listen_uri` +> string + +Address and port listen configuration, the form `host:port`. + +#### `profiles_path` +> string + +The path to the {{v1X.profiles.htmlname}} configuration. + +#### `online_client` + +Configuration to push signals and receive bad IPs from Crowdsec API. + +```yaml +online_client: + credentials_path: +``` + +##### `credentials_path` +> string + +Path to a file containing credentials for the Central API. + +#### `tls` + +if present, holds paths to certs and key files. + +```yaml +tls: + cert_file: + key_file: +``` + +##### `cert_file` +> string + +Path to certificate file. + +##### `key_file` +> string + +Path to certficate key file. + +### `prometheus` + +This section is used by local API and crowdsec. + +```yaml +prometheus: + enabled: (true|false) + level: (full|aggregated) + listen_addr: + listen_port: +``` + + +#### `enabled` +> bool + +Allows to enable/disable prometheus instrumentation. + +#### `level` +> string + +Can be `full` (all metrics) or `aggregated` (to allow minimal metrics that will keep cardinality low). + +#### `listen_addr` +> string + +Prometheus listen url. + +#### `listen_port` +> int + +Prometheus listen port. diff --git a/docs/v1.X/docs/references/database.md b/docs/v1.X/docs/references/database.md new file mode 100644 index 000000000..61faf5980 --- /dev/null +++ b/docs/v1.X/docs/references/database.md @@ -0,0 +1,334 @@ + +# Database + +The database is mostly used by the {{v1X.lapi.htmlname}} but also by {{v1X.cli.user_guide}} for some tasks. + +Currently, 3 databases are supported: + +- `sqlite` (default database) + +- `mysql` + +- `postgresql` + + +!!! warning + It is recommanded to use `mysql` or `postgresql` if you expect to have a lot of traffic on the API. + + +The database configuration can be found in the `crowdsec` configuration file (default: {{v1X.config.crowdsec_config_file}}). + +Its located under the `db_config` block. + +## Configuration Examples + +
+ SQLite + +```yaml +db_config: + type: sqlite + db_path: /var/lib/crowdsec/data/crowdsec.db + flush: + max_items: 5000 + max_age: 7d +``` +
+
+MySQL + +```yaml +db_config: + type: mysql + user: crowdsec + password: crowdsecpassword + db_name: crowdsec + host: "127.0.0.1" + port: 3306 + flush: + max_items: 5000 + max_age: 7d +``` +
+
+PostgreSQL + +```yaml +db_config: + type: postgresql + user: crowdsec + password: crowdsecpassword + db_name: crowdsec + host: "127.0.0.1" + port: 3306 + flush: + max_items: 5000 + max_age: 7d +``` + +
+ +## Configuration Format + +### `db_config` +> Contains the configuration of the database + +```yaml +db_config: + type: + + db_path: # for sqlite + + user: # for mysql/pgsql + password: # for mysql/pgsql + db_name: # for mysql/pgsql + host: # for mysql/pgsql + port: # for mysql/pgsql + flush: + max_items: + max_age: +``` + + +## Configuration Directives + +### `type` + +```yaml +db_config: + type: sqlite +``` + +The `type` of database to use. It can be: + +- `sqlite` +- `mysql` +- `postgresql` + +### `db_path` + +```yaml +db_config: + type: sqlite + db_path: "/var/lib/crowdsec/data/crowdsec.db +``` + +The path to the database file (only if the type of database is `sqlite`) + +### `user` + +```yaml +db_config: + type: mysql|postgresql + + user: foo +``` +The username to connect to the database (only if the type of database is `mysql` or `postgresql`) + +### `password` + +```yaml +db_config: + type: mysql|postgresql + + password: foobar +``` +The password to connect to the database (only if the type of database is `mysql` or `postgresql`) + +### `db_name` + +```yaml +db_config: + type: mysql|postgresql + + db_name: crowdsec +``` +The database name to connect to (only if the type of database is `mysql` or `postgresql`) + +### `db_host` + +```yaml +db_config: + type: mysql|postgresql + + user: foo +``` +The host to connect to (only if the type of database is `mysql` or `postgresql`) + +### `db_port` + +```yaml +db_config: + type: mysql|postgresql + + user: foo +``` +The port to connect to (only if the type of database is `mysql` or `postgresql`) + +### `flush` + +```yaml +flush: + max_items: + max_age: +``` + +#### `max_items` +> int + +Number max of alerts in database. + +#### `max_age` +> string + +Alerts retention time. + +Supported units: + + - `s`: seconds + + - `m`: minutes + + - `h`: hours + + - `d`: days + + +# Behind the scenes + +{{v1X.crowdsec.name}} uses the [ent framework](https://entgo.io/) to manage the database. + +This is the schema of the database (as seen by `entc describe`) + +``` +Alert: + +-----------------+-----------+--------+----------+----------+---------+---------------+-----------+----------------------------------+------------+ + | Field | Type | Unique | Optional | Nillable | Default | UpdateDefault | Immutable | StructTag | Validators | + +-----------------+-----------+--------+----------+----------+---------+---------------+-----------+----------------------------------+------------+ + | id | | false | false | false | false | false | false | json:"id,omitempty" | 0 | + | created_at | time.Time | false | false | false | true | false | false | json:"created_at,omitempty" | 0 | + | updated_at | time.Time | false | false | false | true | false | false | json:"updated_at,omitempty" | 0 | + | scenario | string | false | false | false | false | false | false | json:"scenario,omitempty" | 0 | + | bucketId | string | false | true | false | true | false | false | json:"bucketId,omitempty" | 0 | + | message | string | false | true | false | true | false | false | json:"message,omitempty" | 0 | + | eventsCount | int32 | false | true | false | true | false | false | json:"eventsCount,omitempty" | 0 | + | startedAt | time.Time | false | true | false | true | false | false | json:"startedAt,omitempty" | 0 | + | stoppedAt | time.Time | false | true | false | true | false | false | json:"stoppedAt,omitempty" | 0 | + | sourceIp | string | false | true | false | false | false | false | json:"sourceIp,omitempty" | 0 | + | sourceRange | string | false | true | false | false | false | false | json:"sourceRange,omitempty" | 0 | + | sourceAsNumber | string | false | true | false | false | false | false | json:"sourceAsNumber,omitempty" | 0 | + | sourceAsName | string | false | true | false | false | false | false | json:"sourceAsName,omitempty" | 0 | + | sourceCountry | string | false | true | false | false | false | false | json:"sourceCountry,omitempty" | 0 | + | sourceLatitude | float32 | false | true | false | false | false | false | json:"sourceLatitude,omitempty" | 0 | + | sourceLongitude | float32 | false | true | false | false | false | false | json:"sourceLongitude,omitempty" | 0 | + | sourceScope | string | false | true | false | false | false | false | json:"sourceScope,omitempty" | 0 | + | sourceValue | string | false | true | false | false | false | false | json:"sourceValue,omitempty" | 0 | + | capacity | int32 | false | true | false | false | false | false | json:"capacity,omitempty" | 0 | + | leakSpeed | string | false | true | false | false | false | false | json:"leakSpeed,omitempty" | 0 | + | scenarioVersion | string | false | true | false | false | false | false | json:"scenarioVersion,omitempty" | 0 | + | scenarioHash | string | false | true | false | false | false | false | json:"scenarioHash,omitempty" | 0 | + | simulated | bool | false | false | false | true | false | false | json:"simulated,omitempty" | 0 | + +-----------------+-----------+--------+----------+----------+---------+---------------+-----------+----------------------------------+------------+ + +-----------+----------+---------+---------+----------+--------+----------+ + | Edge | Type | Inverse | BackRef | Relation | Unique | Optional | + +-----------+----------+---------+---------+----------+--------+----------+ + | owner | Machine | true | alerts | M2O | true | true | + | decisions | Decision | false | | O2M | false | true | + | events | Event | false | | O2M | false | true | + | metas | Meta | false | | O2M | false | true | + +-----------+----------+---------+---------+----------+--------+----------+ + +Bouncer: + +------------+-----------+--------+----------+----------+---------+---------------+-----------+-----------------------------+------------+ + | Field | Type | Unique | Optional | Nillable | Default | UpdateDefault | Immutable | StructTag | Validators | + +------------+-----------+--------+----------+----------+---------+---------------+-----------+-----------------------------+------------+ + | id | | false | false | false | false | false | false | json:"id,omitempty" | 0 | + | created_at | time.Time | false | false | false | true | false | false | json:"created_at,omitempty" | 0 | + | updated_at | time.Time | false | false | false | true | false | false | json:"updated_at,omitempty" | 0 | + | name | string | true | false | false | false | false | false | json:"name,omitempty" | 0 | + | api_key | string | false | false | false | false | false | false | json:"api_key,omitempty" | 0 | + | revoked | bool | false | false | false | false | false | false | json:"revoked,omitempty" | 0 | + | ip_address | string | false | true | false | true | false | false | json:"ip_address,omitempty" | 0 | + | type | string | false | true | false | false | false | false | json:"type,omitempty" | 0 | + | version | string | false | true | false | false | false | false | json:"version,omitempty" | 0 | + | until | time.Time | false | true | false | true | false | false | json:"until,omitempty" | 0 | + | last_pull | time.Time | false | false | false | true | false | false | json:"last_pull,omitempty" | 0 | + +------------+-----------+--------+----------+----------+---------+---------------+-----------+-----------------------------+------------+ + +Decision: + +------------+-----------+--------+----------+----------+---------+---------------+-----------+-----------------------------+------------+ + | Field | Type | Unique | Optional | Nillable | Default | UpdateDefault | Immutable | StructTag | Validators | + +------------+-----------+--------+----------+----------+---------+---------------+-----------+-----------------------------+------------+ + | id | | false | false | false | false | false | false | json:"id,omitempty" | 0 | + | created_at | time.Time | false | false | false | true | false | false | json:"created_at,omitempty" | 0 | + | updated_at | time.Time | false | false | false | true | false | false | json:"updated_at,omitempty" | 0 | + | until | time.Time | false | false | false | false | false | false | json:"until,omitempty" | 0 | + | scenario | string | false | false | false | false | false | false | json:"scenario,omitempty" | 0 | + | type | string | false | false | false | false | false | false | json:"type,omitempty" | 0 | + | start_ip | int64 | false | true | false | false | false | false | json:"start_ip,omitempty" | 0 | + | end_ip | int64 | false | true | false | false | false | false | json:"end_ip,omitempty" | 0 | + | scope | string | false | false | false | false | false | false | json:"scope,omitempty" | 0 | + | value | string | false | false | false | false | false | false | json:"value,omitempty" | 0 | + | origin | string | false | false | false | false | false | false | json:"origin,omitempty" | 0 | + | simulated | bool | false | false | false | true | false | false | json:"simulated,omitempty" | 0 | + +------------+-----------+--------+----------+----------+---------+---------------+-----------+-----------------------------+------------+ + +-------+-------+---------+-----------+----------+--------+----------+ + | Edge | Type | Inverse | BackRef | Relation | Unique | Optional | + +-------+-------+---------+-----------+----------+--------+----------+ + | owner | Alert | true | decisions | M2O | true | true | + +-------+-------+---------+-----------+----------+--------+----------+ + +Event: + +------------+-----------+--------+----------+----------+---------+---------------+-----------+-----------------------------+------------+ + | Field | Type | Unique | Optional | Nillable | Default | UpdateDefault | Immutable | StructTag | Validators | + +------------+-----------+--------+----------+----------+---------+---------------+-----------+-----------------------------+------------+ + | id | | false | false | false | false | false | false | json:"id,omitempty" | 0 | + | created_at | time.Time | false | false | false | true | false | false | json:"created_at,omitempty" | 0 | + | updated_at | time.Time | false | false | false | true | false | false | json:"updated_at,omitempty" | 0 | + | time | time.Time | false | false | false | false | false | false | json:"time,omitempty" | 0 | + | serialized | string | false | false | false | false | false | false | json:"serialized,omitempty" | 0 | + +------------+-----------+--------+----------+----------+---------+---------------+-----------+-----------------------------+------------+ + +-------+-------+---------+---------+----------+--------+----------+ + | Edge | Type | Inverse | BackRef | Relation | Unique | Optional | + +-------+-------+---------+---------+----------+--------+----------+ + | owner | Alert | true | events | M2O | true | true | + +-------+-------+---------+---------+----------+--------+----------+ + +Machine: + +-------------+-----------+--------+----------+----------+---------+---------------+-----------+------------------------------+------------+ + | Field | Type | Unique | Optional | Nillable | Default | UpdateDefault | Immutable | StructTag | Validators | + +-------------+-----------+--------+----------+----------+---------+---------------+-----------+------------------------------+------------+ + | id | | false | false | false | false | false | false | json:"id,omitempty" | 0 | + | created_at | time.Time | false | false | false | true | false | false | json:"created_at,omitempty" | 0 | + | updated_at | time.Time | false | false | false | true | false | false | json:"updated_at,omitempty" | 0 | + | machineId | string | true | false | false | false | false | false | json:"machineId,omitempty" | 0 | + | password | string | false | false | false | false | false | false | json:"password,omitempty" | 0 | + | ipAddress | string | false | false | false | false | false | false | json:"ipAddress,omitempty" | 0 | + | scenarios | string | false | true | false | false | false | false | json:"scenarios,omitempty" | 0 | + | version | string | false | true | false | false | false | false | json:"version,omitempty" | 0 | + | isValidated | bool | false | false | false | true | false | false | json:"isValidated,omitempty" | 0 | + | status | string | false | true | false | false | false | false | json:"status,omitempty" | 0 | + +-------------+-----------+--------+----------+----------+---------+---------------+-----------+------------------------------+------------+ + +--------+-------+---------+---------+----------+--------+----------+ + | Edge | Type | Inverse | BackRef | Relation | Unique | Optional | + +--------+-------+---------+---------+----------+--------+----------+ + | alerts | Alert | false | | O2M | false | true | + +--------+-------+---------+---------+----------+--------+----------+ + +Meta: + +------------+-----------+--------+----------+----------+---------+---------------+-----------+-----------------------------+------------+ + | Field | Type | Unique | Optional | Nillable | Default | UpdateDefault | Immutable | StructTag | Validators | + +------------+-----------+--------+----------+----------+---------+---------------+-----------+-----------------------------+------------+ + | id | | false | false | false | false | false | false | json:"id,omitempty" | 0 | + | created_at | time.Time | false | false | false | true | false | false | json:"created_at,omitempty" | 0 | + | updated_at | time.Time | false | false | false | true | false | false | json:"updated_at,omitempty" | 0 | + | key | string | false | false | false | false | false | false | json:"key,omitempty" | 0 | + | value | string | false | false | false | false | false | false | json:"value,omitempty" | 0 | + +------------+-----------+--------+----------+----------+---------+---------------+-----------+-----------------------------+------------+ + +-------+-------+---------+---------+----------+--------+----------+ + | Edge | Type | Inverse | BackRef | Relation | Unique | Optional | + +-------+-------+---------+---------+----------+--------+----------+ + | owner | Alert | true | metas | M2O | true | true | + +-------+-------+---------+---------+----------+--------+----------+ + +``` diff --git a/docs/v1.X/docs/references/decisions.md b/docs/v1.X/docs/references/decisions.md new file mode 100644 index 000000000..7eeea7707 --- /dev/null +++ b/docs/v1.X/docs/references/decisions.md @@ -0,0 +1,10 @@ +# Decisions + +A `Decision` is the runtime representation of a bucket overflow consequence : an action being taken against an IP, a Range, a User etc. + +The representation of the object can be found here : + +[Decision object documentation](https://pkg.go.dev/github.com/crowdsecurity/crowdsec/pkg/models#Decision) + +Those objects are not meant to be manipulated directly by parsers and such, but rather be consumed by the {{v1X.bouncers.htmlname}} via the {{v1X.lapi.htmlname}}. + diff --git a/docs/v1.X/docs/references/enrichers.md b/docs/v1.X/docs/references/enrichers.md new file mode 100644 index 000000000..b269d3e8a --- /dev/null +++ b/docs/v1.X/docs/references/enrichers.md @@ -0,0 +1,23 @@ +# Enrichers + +Enrichers are {{v1X.parsers.htmlname}} that can rely on external methods to provide extra contextual information to the event. The enrichers are usually in the `s02-enrich` {{v1X.stage.htmlname}} (after most of the parsing happened). + +Enrichers functions should all accept a string as a parameter, and return an associative string array, that will be automatically merged into the `Enriched` map of the {{v1X.event.htmlname}}. + +!!! warning + At the time of writing, enrichers plugin mechanism implementation is still ongoing (read: the list of available enrichment methods is currently hardcoded). + + +As an example let's look into the geoip-enrich parser/enricher : + +It relies on [the geolite2 data created by maxmind](https://www.maxmind.com) and the [geoip2 golang module](https://github.com/oschwald/geoip2-golang) to provide the actual data. + + +It exposes three methods : `GeoIpCity` `GeoIpASN` and `IpToRange` that are used by the `crowdsecurity/geoip-enrich`. +Enrichers can be installed as any other parsers with the following command: + +``` +{{v1X.cli.bin}} install parser crowdsecurity/geoip-enrich +``` + +Take a tour at the {{v1X.hub.htmlname}} to find them ! diff --git a/docs/v1.X/docs/references/events.md b/docs/v1.X/docs/references/events.md new file mode 100644 index 000000000..8302709cb --- /dev/null +++ b/docs/v1.X/docs/references/events.md @@ -0,0 +1,42 @@ +# Events + +An `Event` is the runtime representation of an item being processed by crowdsec : It be a Log line being parsed, or an Overflow being reprocessed. + +The `Event` object is modified by parsers, scenarios, and directly via user [statics expressions](/Crowdsec/v1/references/parsers/#statics) (for example). + +The representation of the object can be found here : + +[Event object documentation](https://pkg.go.dev/github.com/crowdsecurity/crowdsec/pkg/types#Event) + +## LOG relevant fields + + - `Type` is `types.LOG` + - `Whitelisted` : if `true` the LOG or OVFLW will be dropped + - `Line` : representation of the raw line + - `Raw` : the raw line representation + - `Src` : a label for the source + - `Time` : acquisition timestamp + - `Labels` : the static labels (from acquis.yaml) associated to the source + - `Process`: if set to false, processing of line will stop + - `Parsed` : a `map[string]string` that can be used during parsing and enrichment. This is where GROK patterns will output their captures by default + - `Enriched` : a `map[string]string` that can be used during parsing and enrichment. This is where enrichment functions will output their captures by default + - `Meta` : a `map[string]string` that can be used to store *important* information about a log. This map is serialized into DB when storing event. + - `Overflow` : representation of an Overflow if `Type` is set to `OVFLW` + - `Time` : processing timestamp + - `StrTime` : string representation of log timestamp. Can be set by parsers that capture timestamp in logs. Will be automatically processed by `crowdsecurity/dateparse-enrich` when processing logs in forensic mode to set `MarshaledTime` + - `MarshaledTime` : if non-empty, the event's timestamp that will be used when processing buckets (for forensic mode) + +## OVERFLOW relevant fields + + - `Type` is `types.OVFLW` + - `Whitelisted` : if `true` the LOG or OVFLW will be dropped + - `Overflow` : representation of an Overflow if `Type` is set to `OVFLW` + - `Time` : processing timestamp + - `StrTime` : string representation of log timestamp. Can be set by parsers that capture timestamp in logs. Will be automatically processed by `crowdsecurity/dateparse-enrich` when processing logs in forensic mode to set `MarshaledTime` + - `MarshaledTime` : if non-empty, the event's timestamp that will be used when processing buckets (for forensic mode) + - `Overflow` : + - `Whitelisted` : if true the OVFLW will be dropped + - `Reprocess` : if true, the OVFLOW will be reprocessed (inference) + - `Sources` : a `map[string]models.Source` representing the distinct sources that triggered the overflow, with their types and values. + - `Alert` and `APIAlerts` : representation of the signals that will be sent to LAPI. + diff --git a/docs/write_configurations/expressions.md b/docs/v1.X/docs/references/expressions.md similarity index 67% rename from docs/write_configurations/expressions.md rename to docs/v1.X/docs/references/expressions.md index 1974deead..412ef581d 100644 --- a/docs/write_configurations/expressions.md +++ b/docs/v1.X/docs/references/expressions.md @@ -2,26 +2,26 @@ > [antonmedv/expr](https://github.com/antonmedv/expr) - Expression evaluation engine for Go: fast, non-Turing complete, dynamic typing, static typing -Several places of {{crowdsec.name}}'s configuration use [expr](https://github.com/antonmedv/expr), notably : +Several places of {{v1X.crowdsec.name}}'s configuration use [expr](https://github.com/antonmedv/expr), notably : - - {{filter.Htmlname}} that are used to determine events eligibility in {{parsers.htmlname}} and {{scenarios.htmlname}} or `profiles` - - {{statics.Htmlname}} use expr in the `expression` directive, to compute complex values - - {{whitelists.Htmlname}} rely on `expression` directive to allow more complex whitelists filters + - {{v1X.filter.Htmlname}} that are used to determine events eligibility in {{v1X.parsers.htmlname}} and {{v1X.scenarios.htmlname}} or `profiles` + - {{v1X.statics.Htmlname}} use expr in the `expression` directive, to compute complex values + - {{v1X.whitelists.Htmlname}} rely on `expression` directive to allow more complex whitelists filters To learn more about [expr](https://github.com/antonmedv/expr), [check the github page of the project](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md). -When {{crowdsec.name}} relies on `expr`, a context is provided to let the expression access relevant objects : +When {{v1X.crowdsec.name}} relies on `expr`, a context is provided to let the expression access relevant objects : - - `evt.` is the representation of the current {{event.htmlname}} and is the most relevant object - - in [profiles](/references/output/#profile), {{signal.htmlname}} is accessible via the `sig.` object + - `evt.` is the representation of the current {{v1X.event.htmlname}} and is the most relevant object + - in [profiles](/Crowdsec/v1/references/profiles/), {{v1X.alert.htmlname}} is accessible via the `sig.` object If the `debug` is enabled (in the scenario or parser where expr is used), additional debug will be displayed regarding evaluated expressions. # Helpers -In order to makes its use in {{crowdsec.name}} more efficient, we added a few helpers that are documented bellow. +In order to makes its use in {{v1X.crowdsec.name}} more efficient, we added a few helpers that are documented bellow. ## Atof(string) float64 diff --git a/docs/v1.X/docs/references/parsers.md b/docs/v1.X/docs/references/parsers.md new file mode 100644 index 000000000..ff139129c --- /dev/null +++ b/docs/v1.X/docs/references/parsers.md @@ -0,0 +1,360 @@ +## Understanding parsers + + +A parser is a YAML configuration file that describes how a string is being parsed. Said string can be a log line, or a field extracted from a previous parser. While a lot of parsers rely on the **GROK** approach (a.k.a regular expression named capture groups), parsers can as well reference enrichment modules to allow specific data processing, or use specific {{v1X.expr.htmlname}} feature to perform parsing on specific data, such as JSON. + +Parsers are organized into stages to allow pipelines and branching in parsing. + +See the [{{v1X.hub.name}}]({{v1X.hub.url}}) to explore parsers, or see below some examples : + + - [apache2 access/error log parser](https://github.com/crowdsecurity/hub/blob/master/parsers/s01-parse/crowdsecurity/apache2-logs.yaml) + - [iptables logs parser](https://github.com/crowdsecurity/hub/blob/master/parsers/s01-parse/crowdsecurity/iptables-logs.yaml) + - [http logs post-processing](https://github.com/crowdsecurity/hub/blob/master/parsers/s02-enrich/crowdsecurity/http-logs.yaml) + +The parsers usually reside in `/etc/crowdsec/parsers//`. + + +## Parser configuration format + +A parser node might look like : +```yaml +onsuccess: next_stage +debug: true +filter: "evt.Parsed.program == 'kernel'" +name: crowdsecurity/demo-iptables +description: "Parse iptables drop logs" +pattern_syntax: + MYCAP: ".*" +grok: + pattern: ^xxheader %{MYCAP:extracted_value} trailing stuff$ + apply_on: evt.Parsed.some_field +statics: + - parsed: something + expression: JsonExtract(evt.Event.extracted_value, "nested.an_array[0]") + - meta: log_type + value: parsed_testlog + - meta: source_ip + expression: "evt.Parsed.src_ip" +``` + +The parser nodes are processed sequentially based on the alphabetical order of {{v1X.stage.htmlname}} and subsequent files. +If the node is considered successful (grok is present and returned data or no grok is present) and "onsuccess" equals to `next_stage`, then the {{v1X.event.name}} is moved to the next stage. + +## Parser trees + +A parser node can contain sub-nodes, to provide proper branching (on top of stages). +It can be useful when you want to apply different parsing based on different criterias, or when you have a set of candidates parsers that you want to apply to an event : + +```yaml +#This first node will capture/extract some value +filter: "evt.Line.Labels.type == 'type1'" +name: tests/base-grok-root +pattern_syntax: + MYCAP: ".*" +grok: + pattern: ^... %{MYCAP:extracted_value} ...$ + apply_on: Line.Raw +statics: + - meta: state + value: root-done + - meta: state_sub + expression: evt.Parsed.extracted_value +--- +#and this node will apply different patterns to it +filter: "evt.Line.Labels.type == 'type1' && evt.Meta.state == 'root-done'" +name: tests/base-grok-leafs +onsuccess: next_stage +#the sub-nodes will process the result of the master node +nodes: + - filter: "evt.Parsed.extracted_value == 'VALUE1'" + debug: true + statics: + - meta: final_state + value: leaf1 + - filter: "evt.Parsed.extracted_value == 'VALUE2'" + debug: true + statics: + - meta: final_state + value: leaf2 +``` + +The logic is that the `tests/base-grok-root` node will be processed first and will alter the event (here mostly by extracting some text from the `Line.Raw` field into `Parsed` thanks to the `grok` pattern and the `statics` directive). + +The event will then continue its life and be parsed by the the following `tests/base-grok-leafs` node. +This node has `onsuccess` set to `next_stage` which means that if the node is successful, the event will be moved to the next stage. + +This node consists actually of two sub-nodes that have different conditions (branching) to allow differential treatment of said event. + +A real-life example can be seen when it comes to parsing HTTP logs. +HTTP ACCESS and ERROR logs often have different formats, and thus our "nginx" parser needs to handle both formats +
+ Nginx parser + +```yaml +filter: "evt.Parsed.program == 'nginx'" +onsuccess: next_stage +name: crowdsecurity/nginx-logs +nodes: + - grok: + #this is the access log + name: NGINXACCESS + apply_on: message + statics: + - meta: log_type + value: http_access-log + - target: evt.StrTime + expression: evt.Parsed.time_local + - grok: + # and this one the error log + name: NGINXERROR + apply_on: message + statics: + - meta: log_type + value: http_error-log + - target: evt.StrTime + expression: evt.Parsed.time +# these ones apply for both grok patterns +statics: + - meta: service + value: http + - meta: source_ip + expression: "evt.Parsed.remote_addr" + - meta: http_status + expression: "evt.Parsed.status" + - meta: http_path + expression: "evt.Parsed.request" +``` +
+ +## Parser directives + +### `debug` + +```yaml +debug: true|false +``` +_default: false_ + +If set to to `true`, enabled node level debugging. +It is meant to help understanding parser node behavior by providing contextual logging : + +
+ assignments made by statics +``` +DEBU[31-07-2020 16:36:28] + Processing 4 statics id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse +DEBU[31-07-2020 16:36:28] .Meta[service] = 'http' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse +DEBU[31-07-2020 16:36:28] .Meta[source_ip] = '127.0.0.1' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse +DEBU[31-07-2020 16:36:28] .Meta[http_status] = '200' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse +DEBU[31-07-2020 16:36:28] .Meta[http_path] = '/' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse +``` +
+
+ assignments made by grok pattern +``` +DEBU[31-07-2020 16:36:28] + Grok 'NGINXACCESS' returned 10 entries to merge in Parsed id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[31-07-2020 16:36:28] .Parsed['time_local'] = '21/Jul/2020:16:13:05 +0200' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[31-07-2020 16:36:28] .Parsed['method'] = 'GET' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[31-07-2020 16:36:28] .Parsed['request'] = '/' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[31-07-2020 16:36:28] .Parsed['http_user_agent'] = 'curl/7.58.0' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[31-07-2020 16:36:28] .Parsed['remote_addr'] = '127.0.0.1' id=dark-glitter name=child-crowdsecurity/nginx-logs stage=s01-parse +``` +
+
+ debug of filters and expression results +``` +DEBU[31-07-2020 16:36:28] eval(evt.Parsed.program == 'nginx') = TRUE id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse +DEBU[31-07-2020 16:36:28] eval variables: id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse +DEBU[31-07-2020 16:36:28] evt.Parsed.program = 'nginx' id=withered-rain name=crowdsecurity/nginx-logs stage=s01-parse +``` +
+ + +### `filter` + +```yaml +filter: expression +``` + +`filter` must be a valid {{v1X.expr.htmlname}} expression that will be evaluated against the {{v1X.event.htmlname}}. + +If `filter` evaluation returns true or is absent, node will be processed. + +If `filter` returns `false` or a non-boolean, node won't be processed. + +Here is the [expr documentation](https://github.com/antonmedv/expr/tree/master/docs). + +Examples : + + - `filter: "evt.Meta.foo == 'test'"` + - `filter: "evt.Meta.bar == 'test' && evt.Meta.foo == 'test2'` + + +### `grok` + +```yaml +grok: + name: NAMED_EXISTING_PATTERN + apply_on: source_field +``` + +```yaml +grok: + pattern: ^a valid RE2 expression with %{CAPTURE:field}$ + apply_on: source_field +``` + +The `grok` structure in a node represent a regular expression with capture group (grok pattern) that must be applied on a field of {{v1X.event.name}}. + +The pattern can : + + - be imported by name (if present within the core of {{v1X.crowdsec.name}}) + - defined in place + +In both case, the pattern must be a valid RE2 expression. +The field(s) returned by the regular expression are going to be merged into the `Parsed` associative array of the `Event`. + + + +### `name` + +```yaml +name: explicit_string +``` + +The *mandatory* name of the node. If not present, node will be skipped at runtime. +It is used for example in debug log to help you track things. + +### `nodes` + +```yaml +nodes: + - filter: ... + grok: ... +``` + +`nodes` is a list of parser nodes, allowing you to build trees. +Each subnode must be valid, and if any of the subnodes succeed, the whole node is considered successful. + +### `onsuccess` + +``` +onsuccess: next_stage|continue +``` + +_default: continue_ + +if set to `next_stage` and the node is considered successful, the {{v1X.event.name}} will be moved directly to next stage without processing other nodes in the current stage. + +### `pattern_syntax` + +```yaml +pattern_syntax: + CAPTURE_NAME: VALID_RE2_EXPRESSION +``` + +`pattern_syntax` allows user to define named capture group expressions for future use in grok patterns. +Regexp must be a valid RE2 expression. + +```yaml +pattern_syntax: + MYCAP: ".*" +grok: + pattern: ^xxheader %{MYCAP:extracted_value} trailing stuff$ + apply_on: Line.Raw +``` + + +### `statics` + +```yaml +statics: + - target: evt.Meta.target_field + value: static_value + - meta: target_field + expression: evt.Meta.target_field + ' this_is' + ' a dynamic expression' + - enriched: target_field + value: static_value +``` + +`statics` is a list of directives that will be executed when the node is considered successful. +Each entry of the list is composed of a target (where to write) and a source (what data to write). + +#### `target` + +The target aims at being any part of the {{v1X.event.htmlname}} object, and can be expressed in different ways : + +- `meta: ` +- `parsed: ` +- `enriched: ` +- a dynamic target (please note that the **current** event is accessible via the `evt.` variable) : + - `target: evt.Meta.foobar` + - `target: Meta.foobar` + - `target: evt.StrTime` + + +#### `value` + + The source itself can be either a static value, or an {{v1X.expr.htmlname}} result : + +```yaml +statics: + - meta: target_field + value: static_value + - meta: target_field + expression: evt.Meta.another_field + - meta: target_field + expression: evt.Meta.target_field + ' this_is' + ' a dynamic expression' +``` + +##### `value` +> string + +A static value + +##### `expression` +> string + +A valid [`expr`](https://github.com/antonmedv/expr) expression to eval. +The result of the evaluation will be set in the target field. + +### `data` + +```yaml +data: + - source_url: https://URL/TO/FILE + dest_file: LOCAL_FILENAME + type: (regexp|string) +``` + +`data` allows user to specify an external source of data. +This section is only relevant when `cscli` is used to install parser from hub, as it will download the `source_url` and store it to `dest_file`. When the parser is not installed from the hub, {{v1X.crowdsec.name}} won't download the URL, but the file must exist for the parser to be loaded correctly. + +The `type` is mandatory if you want to evaluate the data in the file, and should be `regex` for valid (re2) regular expression per line or `string` for string per line. +The regexps will be compiled, the strings will be loaded into a list and both will be kept in memory. +Without specifying a `type`, the file will be downloaded and stored as file and not in memory. + + +```yaml +name: crowdsecurity/cdn-whitelist +... +data: + - source_url: https://www.cloudflare.com/ips-v4 + dest_file: cloudflare_ips.txt + type: string +``` + + +## Parser concepts + +### Success and failure + +A parser is considered "successful" if : + + - A grok pattern was present and successfully matched + - No grok pattern was present + + +### Patterns documentation + + +You can find [exhaustive patterns documentation here](/Crowdsec/v1/references/patterns-documentation). diff --git a/docs/v1.X/docs/references/patterns-documentation.md b/docs/v1.X/docs/references/patterns-documentation.md new file mode 100644 index 000000000..2c3af424e --- /dev/null +++ b/docs/v1.X/docs/references/patterns-documentation.md @@ -0,0 +1,2795 @@ +# Patterns documentation + +You will find here a generated documentation of all the patterns loaded by crowdsec. +They are sorted by pattern length, and are meant to be used in parsers, in the form %{PATTERN_NAME}. + + +## MONGO3_SEVERITY + +Pattern : +``` +\w +``` + +## GREEDYDATA + +Pattern : +``` +.* +``` + +## DATA + +Pattern : +``` +.*? +``` + +## NOTSPACE + +Pattern : +``` +\S+ +``` + +## SPACE + +Pattern : +``` +\s* +``` + +## RAIL_ACTION + +Pattern : +``` +\w+ +``` + +## JAVALOGMESSAGE + +Pattern : +``` +(.*) +``` + +## DAY2 + +Pattern : +``` +\d{2} +``` + +## NOTDQUOTE + +Pattern : +``` +[^"]* +``` + +## RAILS_CONSTROLLER + +Pattern : +``` +[^#]+ +``` + +## RUUID + +Pattern : +``` +\s{32} +``` + +## SYSLOG5424PRINTASCII + +Pattern : +``` +[!-~]+ +``` + +## BACULA_VERSION + +Pattern : +``` +%{USER} +``` + +## WORD + +Pattern : +``` +\b\w+\b +``` + +## BACULA_JOB + +Pattern : +``` +%{USER} +``` + +## CRON_ACTION + +Pattern : +``` +[A-Z ]+ +``` + +## BACULA_VOLUME + +Pattern : +``` +%{USER} +``` + +## BACULA_DEVICE + +Pattern : +``` +%{USER} +``` + +## TZ + +Pattern : +``` +[A-Z]{3} +``` + +## NUMTZ + +Pattern : +``` +[+-]\d{4} +``` + +## MONGO3_COMPONENT + +Pattern : +``` +%{WORD}|- +``` + +## MONGO_WORDDASH + +Pattern : +``` +\b[\w-]+\b +``` + +## NAGIOS_TYPE_HOST_ALERT + +Pattern : +``` +HOST ALERT +``` + +## NONNEGINT + +Pattern : +``` +\b[0-9]+\b +``` + +## MINUTE + +Pattern : +``` +[0-5][0-9] +``` + +## BACULA_DEVICEPATH + +Pattern : +``` +%{UNIXPATH} +``` + +## SYSLOGHOST + +Pattern : +``` +%{IPORHOST} +``` + +## REDISLOG1 + +Pattern : +``` +%{REDISLOG} +``` + +## USER + +Pattern : +``` +%{USERNAME} +``` + +## NUMBER + +Pattern : +``` +%{BASE10NUM} +``` + +## SYSLOG5424SD + +Pattern : +``` +\[%{DATA}\]+ +``` + +## ISO8601_SECOND + +Pattern : +``` +%{SECOND}|60 +``` + +## NGUSER + +Pattern : +``` +%{NGUSERNAME} +``` + +## MONTHNUM2 + +Pattern : +``` +0[1-9]|1[0-2] +``` + +## BACULA_HOST + +Pattern : +``` +[a-zA-Z0-9-]+ +``` + +## EXIM_PID + +Pattern : +``` +\[%{POSINT}\] +``` + +## NAGIOS_TYPE_SERVICE_ALERT + +Pattern : +``` +SERVICE ALERT +``` + +## YEAR + +Pattern : +``` +(?:\d\d){1,2} +``` + +## MONTHNUM + +Pattern : +``` +0?[1-9]|1[0-2] +``` + +## CISCO_XLATE_TYPE + +Pattern : +``` +static|dynamic +``` + +## RAILS_CONTEXT + +Pattern : +``` +(?:%{DATA}\n)* +``` + +## BACULA_LOG_ENDPRUNE + +Pattern : +``` +End auto prune. +``` + +## POSINT + +Pattern : +``` +\b[1-9][0-9]*\b +``` + +## INT + +Pattern : +``` +[+-]?(?:[0-9]+) +``` + +## USERNAME + +Pattern : +``` +[a-zA-Z0-9._-]+ +``` + +## IP + +Pattern : +``` +%{IPV6}|%{IPV4} +``` + +## QS + +Pattern : +``` +%{QUOTEDSTRING} +``` + +## MODSECRULEVERS + +Pattern : +``` +\[ver "[^"]+"\] +``` + +## NAGIOS_TYPE_EXTERNAL_COMMAND + +Pattern : +``` +EXTERNAL COMMAND +``` + +## NAGIOS_EC_ENABLE_SVC_CHECK + +Pattern : +``` +ENABLE_SVC_CHECK +``` + +## IPORHOST + +Pattern : +``` +%{IP}|%{HOSTNAME} +``` + +## NAGIOS_EC_ENABLE_HOST_CHECK + +Pattern : +``` +ENABLE_HOST_CHECK +``` + +## NAGIOS_TYPE_HOST_NOTIFICATION + +Pattern : +``` +HOST NOTIFICATION +``` + +## NAGIOS_EC_DISABLE_SVC_CHECK + +Pattern : +``` +DISABLE_SVC_CHECK +``` + +## NAGIOS_TYPE_PASSIVE_HOST_CHECK + +Pattern : +``` +PASSIVE HOST CHECK +``` + +## NAGIOS_TYPE_HOST_EVENT_HANDLER + +Pattern : +``` +HOST EVENT HANDLER +``` + +## HOUR + +Pattern : +``` +2[0123]|[01]?[0-9] +``` + +## DATESTAMP + +Pattern : +``` +%{DATE}[- ]%{TIME} +``` + +## NAGIOS_TYPE_CURRENT_HOST_STATE + +Pattern : +``` +CURRENT HOST STATE +``` + +## NAGIOS_EC_DISABLE_HOST_CHECK + +Pattern : +``` +DISABLE_HOST_CHECK +``` + +## NGUSERNAME + +Pattern : +``` +[a-zA-Z\.\@\-\+_%]+ +``` + +## NAGIOS_TYPE_HOST_FLAPPING_ALERT + +Pattern : +``` +HOST FLAPPING ALERT +``` + +## NAGIOS_TYPE_HOST_DOWNTIME_ALERT + +Pattern : +``` +HOST DOWNTIME ALERT +``` + +## JAVAFILE + +Pattern : +``` +(?:[A-Za-z0-9_. -]+) +``` + +## NAGIOS_TYPE_SERVICE_NOTIFICATION + +Pattern : +``` +SERVICE NOTIFICATION +``` + +## BACULA_LOG_BEGIN_PRUNE_FILES + +Pattern : +``` +Begin pruning Files. +``` + +## NAGIOS_TYPE_CURRENT_SERVICE_STATE + +Pattern : +``` +CURRENT SERVICE STATE +``` + +## NAGIOS_TYPE_PASSIVE_SERVICE_CHECK + +Pattern : +``` +PASSIVE SERVICE CHECK +``` + +## NAGIOS_TYPE_TIMEPERIOD_TRANSITION + +Pattern : +``` +TIMEPERIOD TRANSITION +``` + +## HOSTPORT + +Pattern : +``` +%{IPORHOST}:%{POSINT} +``` + +## NAGIOS_TYPE_SERVICE_EVENT_HANDLER + +Pattern : +``` +SERVICE EVENT HANDLER +``` + +## NAGIOS_EC_SCHEDULE_HOST_DOWNTIME + +Pattern : +``` +SCHEDULE_HOST_DOWNTIME +``` + +## EXIM_FLAGS + +Pattern : +``` +(<=|[-=>*]>|[*]{2}|==) +``` + +## NAGIOS_TYPE_SERVICE_DOWNTIME_ALERT + +Pattern : +``` +SERVICE DOWNTIME ALERT +``` + +## EXIM_SUBJECT + +Pattern : +``` +(T=%{QS:exim_subject}) +``` + +## PATH + +Pattern : +``` +%{UNIXPATH}|%{WINPATH} +``` + +## NAGIOS_TYPE_SERVICE_FLAPPING_ALERT + +Pattern : +``` +SERVICE FLAPPING ALERT +``` + +## SSHD_CORRUPT_MAC + +Pattern : +``` +Corrupted MAC on input +``` + +## BACULA_LOG_NOPRUNE_JOBS + +Pattern : +``` +No Jobs found to prune. +``` + +## HTTPDUSER + +Pattern : +``` +%{EMAILADDRESS}|%{USER} +``` + +## BACULA_LOG_NOPRUNE_FILES + +Pattern : +``` +No Files found to prune. +``` + +## NAGIOS_EC_ENABLE_SVC_NOTIFICATIONS + +Pattern : +``` +ENABLE_SVC_NOTIFICATIONS +``` + +## BACULA_CAPACITY + +Pattern : +``` +%{INT}{1,3}(,%{INT}{3})* +``` + +## EXIM_PROTOCOL + +Pattern : +``` +(P=%{NOTSPACE:protocol}) +``` + +## URIPROTO + +Pattern : +``` +[A-Za-z]+(\+[A-Za-z+]+)? +``` + +## PROG + +Pattern : +``` +[\x21-\x5a\x5c\x5e-\x7e]+ +``` + +## NAGIOS_EC_ENABLE_HOST_NOTIFICATIONS + +Pattern : +``` +ENABLE_HOST_NOTIFICATIONS +``` + +## NAGIOS_EC_PROCESS_HOST_CHECK_RESULT + +Pattern : +``` +PROCESS_HOST_CHECK_RESULT +``` + +## BACULA_LOG_VSS + +Pattern : +``` +(Generate )?VSS (Writer)? +``` + +## NAGIOS_EC_DISABLE_SVC_NOTIFICATIONS + +Pattern : +``` +DISABLE_SVC_NOTIFICATIONS +``` + +## NAGIOS_EC_SCHEDULE_SERVICE_DOWNTIME + +Pattern : +``` +SCHEDULE_SERVICE_DOWNTIME +``` + +## MONGO_QUERY + +Pattern : +``` +\{ \{ .* \} ntoreturn: \} +``` + +## URIPATHPARAM + +Pattern : +``` +%{URIPATH}(?:%{URIPARAM})? +``` + +## NAGIOS_EC_DISABLE_HOST_NOTIFICATIONS + +Pattern : +``` +DISABLE_HOST_NOTIFICATIONS +``` + +## UNIXPATH + +Pattern : +``` +(/([\w_%!$@:.,~-]+|\\.)*)+ +``` + +## KITCHEN + +Pattern : +``` +\d{1,2}:\d{2}(AM|PM|am|pm) +``` + +## NAGIOSTIME + +Pattern : +``` +\[%{NUMBER:nagios_epoch}\] +``` + +## EMAILLOCALPART + +Pattern : +``` +[a-zA-Z][a-zA-Z0-9_.+-=:]+ +``` + +## JAVATHREAD + +Pattern : +``` +(?:[A-Z]{2}-Processor[\d]+) +``` + +## TIME + +Pattern : +``` +%{HOUR}:%{MINUTE}:%{SECOND} +``` + +## EXIM_MSG_SIZE + +Pattern : +``` +(S=%{NUMBER:exim_msg_size}) +``` + +## RUBY_LOGLEVEL + +Pattern : +``` +DEBUG|FATAL|ERROR|WARN|INFO +``` + +## BASE16NUM + +Pattern : +``` +[+-]?(?:0x)?(?:[0-9A-Fa-f]+) +``` + +## ISO8601_TIMEZONE + +Pattern : +``` +Z|[+-]%{HOUR}(?::?%{MINUTE}) +``` + +## REDISTIMESTAMP + +Pattern : +``` +%{MONTHDAY} %{MONTH} %{TIME} +``` + +## NAGIOS_EC_PROCESS_SERVICE_CHECK_RESULT + +Pattern : +``` +PROCESS_SERVICE_CHECK_RESULT +``` + +## SSHD_PACKET_CORRUPT + +Pattern : +``` +Disconnecting: Packet corrupt +``` + +## SYSLOG5424PRI + +Pattern : +``` +<%{NONNEGINT:syslog5424_pri}> +``` + +## EMAILADDRESS + +Pattern : +``` +%{EMAILLOCALPART}@%{HOSTNAME} +``` + +## MODSECRULEID + +Pattern : +``` +\[id %{QUOTEDSTRING:ruleid}\] +``` + +## SYSLOGTIMESTAMP + +Pattern : +``` +%{MONTH} +%{MONTHDAY} %{TIME} +``` + +## NAGIOS_EC_ENABLE_HOST_SVC_NOTIFICATIONS + +Pattern : +``` +ENABLE_HOST_SVC_NOTIFICATIONS +``` + +## NAGIOS_EC_DISABLE_HOST_SVC_NOTIFICATIONS + +Pattern : +``` +DISABLE_HOST_SVC_NOTIFICATIONS +``` + +## EXIM_HEADER_ID + +Pattern : +``` +(id=%{NOTSPACE:exim_header_id}) +``` + +## URIHOST + +Pattern : +``` +%{IPORHOST}(?::%{POSINT:port})? +``` + +## DATE + +Pattern : +``` +%{DATE_US}|%{DATE_EU}|%{DATE_X} +``` + +## SSHD_TUNN_TIMEOUT + +Pattern : +``` +Timeout, client not responding. +``` + +## MCOLLECTIVEAUDIT + +Pattern : +``` +%{TIMESTAMP_ISO8601:timestamp}: +``` + +## CISCOTAG + +Pattern : +``` +[A-Z0-9]+-%{INT}-(?:[A-Z0-9_]+) +``` + +## MODSECRULEREV + +Pattern : +``` +\[rev %{QUOTEDSTRING:rulerev}\] +``` + +## HAPROXYCAPTUREDREQUESTHEADERS + +Pattern : +``` +%{DATA:captured_request_headers} +``` + +## CISCO_INTERVAL + +Pattern : +``` +first hit|%{INT}-second interval +``` + +## DATE_X + +Pattern : +``` +%{YEAR}/%{MONTHNUM2}/%{MONTHDAY} +``` + +## SSHD_INIT + +Pattern : +``` +%{SSHD_LISTEN}|%{SSHD_TERMINATE} +``` + +## WINPATH + +Pattern : +``` +(?:[A-Za-z]+:|\\)(?:\\[^\\?*]*)+ +``` + +## HAPROXYCAPTUREDRESPONSEHEADERS + +Pattern : +``` +%{DATA:captured_response_headers} +``` + +## MODSECURI + +Pattern : +``` +\[uri ["']%{DATA:targeturi}["']\] +``` + +## CISCO_DIRECTION + +Pattern : +``` +Inbound|inbound|Outbound|outbound +``` + +## MODSECRULEDATA + +Pattern : +``` +\[data %{QUOTEDSTRING:ruledata}\] +``` + +## MODSECRULELINE + +Pattern : +``` +\[line %{QUOTEDSTRING:ruleline}\] +``` + +## MODSECRULEFILE + +Pattern : +``` +\[file %{QUOTEDSTRING:rulefile}\] +``` + +## SECOND + +Pattern : +``` +(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)? +``` + +## BACULA_LOG_CANCELLING + +Pattern : +``` +Cancelling duplicate JobId=%{INT}. +``` + +## MODSECRULEMSG + +Pattern : +``` +\[msg %{QUOTEDSTRING:rulemessage}\] +``` + +## SSHD_TUNN_ERR3 + +Pattern : +``` +error: bind: Address already in use +``` + +## BACULA_LOG_STARTRESTORE + +Pattern : +``` +Start Restore Job %{BACULA_JOB:job} +``` + +## SYSLOGLINE + +Pattern : +``` +%{SYSLOGBASE2} %{GREEDYDATA:message} +``` + +## COMMONMAC + +Pattern : +``` +(?:[A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2} +``` + +## WINDOWSMAC + +Pattern : +``` +(?:[A-Fa-f0-9]{2}-){5}[A-Fa-f0-9]{2} +``` + +## SYSLOGPROG + +Pattern : +``` +%{PROG:program}(?:\[%{POSINT:pid}\])? +``` + +## JAVAMETHOD + +Pattern : +``` +(?:()|[a-zA-Z$_][a-zA-Z$_0-9]*) +``` + +## DATE_US + +Pattern : +``` +%{MONTHNUM}[/-]%{MONTHDAY}[/-]%{YEAR} +``` + +## CISCOMAC + +Pattern : +``` +(?:[A-Fa-f0-9]{4}\.){2}[A-Fa-f0-9]{4} +``` + +## MODSECUID + +Pattern : +``` +\[unique_id %{QUOTEDSTRING:uniqueid}\] +``` + +## MAC + +Pattern : +``` +%{CISCOMAC}|%{WINDOWSMAC}|%{COMMONMAC} +``` + +## ELB_URIPATHPARAM + +Pattern : +``` +%{URIPATH:path}(?:%{URIPARAM:params})? +``` + +## BACULA_LOG_NOPRIOR + +Pattern : +``` +No prior Full backup Job record found. +``` + +## MODSECMATCHOFFSET + +Pattern : +``` +\[offset %{QUOTEDSTRING:matchoffset}\] +``` + +## BACULA_TIMESTAMP + +Pattern : +``` +%{MONTHDAY}-%{MONTH} %{HOUR}:%{MINUTE} +``` + +## MODSECHOSTNAME + +Pattern : +``` +\[hostname ['"]%{DATA:targethost}["']\] +``` + +## TTY + +Pattern : +``` +/dev/(pts|tty([pq])?)(\w+)?/?(?:[0-9]+) +``` + +## DATE_EU + +Pattern : +``` +%{MONTHDAY}[./-]%{MONTHNUM}[./-]%{YEAR} +``` + +## URIPATH + +Pattern : +``` +(?:/[A-Za-z0-9$.+!*'(){},~:;=@#%_\-]*)+ +``` + +## HTTPD_ERRORLOG + +Pattern : +``` +%{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG} +``` + +## MONTHDAY + +Pattern : +``` +(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9] +``` + +## BACULA_LOG_USEDEVICE + +Pattern : +``` +Using Device \"%{BACULA_DEVICE:device}\" +``` + +## RFC822Z + +Pattern : +``` +[0-3]\d %{MONTH} %{YEAR} %{TIME} %{NUMTZ} +``` + +## MODSECRULESEVERITY + +Pattern : +``` +\[severity ["']%{WORD:ruleseverity}["']\] +``` + +## ANSIC + +Pattern : +``` +%{DAY} %{MONTH} [_123]\d %{TIME} %{YEAR}" +``` + +## GENERICAPACHEERROR + +Pattern : +``` +%{APACHEERRORPREFIX} %{GREEDYDATA:message} +``` + +## SSHD_CONN_CLOSE + +Pattern : +``` +Connection closed by %{IP:sshd_client_ip}$ +``` + +## CISCOTIMESTAMP + +Pattern : +``` +%{MONTH} +%{MONTHDAY}(?: %{YEAR})? %{TIME} +``` + +## APACHEERRORTIME + +Pattern : +``` +%{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{YEAR} +``` + +## CISCOFW104004 + +Pattern : +``` +\((?:Primary|Secondary)\) Switching to OK\. +``` + +## HTTPDATE + +Pattern : +``` +%{MONTHDAY}/%{MONTH}/%{YEAR}:%{TIME} %{INT} +``` + +## HTTPDERROR_DATE + +Pattern : +``` +%{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{YEAR} +``` + +## EXIM_QT + +Pattern : +``` +((\d+y)?(\d+w)?(\d+d)?(\d+h)?(\d+m)?(\d+s)?) +``` + +## BACULA_LOG_NOJOBSTAT + +Pattern : +``` +Fatal error: No Job status returned from FD. +``` + +## NAGIOS_WARNING + +Pattern : +``` +Warning:%{SPACE}%{GREEDYDATA:nagios_message} +``` + +## EXIM_MSGID + +Pattern : +``` +[0-9A-Za-z]{6}-[0-9A-Za-z]{6}-[0-9A-Za-z]{2} +``` + +## BASE10NUM + +Pattern : +``` +[+-]?(?:(?:[0-9]+(?:\.[0-9]+)?)|(?:\.[0-9]+)) +``` + +## REDISLOG + +Pattern : +``` +\[%{POSINT:pid}\] %{REDISTIMESTAMP:time} \*\s +``` + +## URIPARAM + +Pattern : +``` +\?[A-Za-z0-9$.+!*'|(){},~@#%&/=:;_?\-\[\]<>]* +``` + +## COMBINEDAPACHELOG + +Pattern : +``` +%{COMMONAPACHELOG} %{QS:referrer} %{QS:agent} +``` + +## SYSLOGFACILITY + +Pattern : +``` +<%{NONNEGINT:facility}.%{NONNEGINT:priority}> +``` + +## RFC1123 + +Pattern : +``` +%{DAY}, [0-3]\d %{MONTH} %{YEAR} %{TIME} %{TZ} +``` + +## UNIXDATE + +Pattern : +``` +%{DAY} %{MONTH} [_123]\d %{TIME} %{TZ} %{YEAR} +``` + +## RFC850 + +Pattern : +``` +%{DAY}, [0-3]\d-%{MONTH}-%{YEAR} %{TIME} %{TZ} +``` + +## SYSLOG5424LINE + +Pattern : +``` +%{SYSLOG5424BASE} +%{GREEDYDATA:syslog5424_msg} +``` + +## CISCOFW104003 + +Pattern : +``` +\((?:Primary|Secondary)\) Switching to FAILED\. +``` + +## RUBYDATE + +Pattern : +``` +%{DAY} %{MONTH} [0-3]\d %{TIME} %{NUMTZ} %{YEAR} +``` + +## BACULA_LOG_NOOPEN + +Pattern : +``` +\s+Cannot open %{DATA}: ERR=%{GREEDYDATA:berror} +``` + +## BACULA_LOG_STARTJOB + +Pattern : +``` +Start Backup JobId %{INT}, Job=%{BACULA_JOB:job} +``` + +## DATESTAMP_RFC822 + +Pattern : +``` +%{DAY} %{MONTH} %{MONTHDAY} %{YEAR} %{TIME} %{TZ} +``` + +## DATESTAMP_OTHER + +Pattern : +``` +%{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{TZ} %{YEAR} +``` + +## RFC3339 + +Pattern : +``` +%{YEAR}-[01]\d-[0-3]\dT%{TIME}%{ISO8601_TIMEZONE} +``` + +## RFC1123Z + +Pattern : +``` +%{DAY}, [0-3]\d %{MONTH} %{YEAR} %{TIME} %{NUMTZ} +``` + +## BACULA_LOG_NOSTAT + +Pattern : +``` +\s+Could not stat %{DATA}: ERR=%{GREEDYDATA:berror} +``` + +## SSHD_TERMINATE + +Pattern : +``` +Received signal %{NUMBER:sshd_signal}; terminating. +``` + +## UUID + +Pattern : +``` +[A-Fa-f0-9]{8}-(?:[A-Fa-f0-9]{4}-){3}[A-Fa-f0-9]{12} +``` + +## SSHD_LOGOUT_ERR + +Pattern : +``` +syslogin_perform_logout: logout\(\) returned an error +``` + +## RCONTROLLER + +Pattern : +``` +%{RAILS_CONSTROLLER:controller}#%{RAIL_ACTION:action} +``` + +## JAVACLASS + +Pattern : +``` +(?:[a-zA-Z$_][a-zA-Z$_0-9]*\.)*[a-zA-Z$_][a-zA-Z$_0-9]* +``` + +## DATESTAMP_EVENTLOG + +Pattern : +``` +%{YEAR}%{MONTHNUM2}%{MONTHDAY}%{HOUR}%{MINUTE}%{SECOND} +``` + +## NGINXERRTIME + +Pattern : +``` +%{YEAR}/%{MONTHNUM2}/%{DAY2} %{HOUR}:%{MINUTE}:%{SECOND} +``` + +## BACULA_LOG_BEGIN_PRUNE_JOBS + +Pattern : +``` +Begin pruning Jobs older than %{INT} month %{INT} days . +``` + +## RFC3339NANO + +Pattern : +``` +%{YEAR}-[01]\d-[0-3]\dT%{TIME}\.\d{9}%{ISO8601_TIMEZONE} +``` + +## BACULA_LOG_MARKCANCEL + +Pattern : +``` +JobId %{INT}, Job %{BACULA_JOB:job} marked to be canceled. +``` + +## BACULA_LOG_NEW_VOLUME + +Pattern : +``` +Created new Volume \"%{BACULA_VOLUME:volume}\" in catalog. +``` + +## SSHD_TCPWRAP_FAIL5 + +Pattern : +``` +warning: can't get client address: Connection reset by peer +``` + +## EXIM_INTERFACE + +Pattern : +``` +(I=\[%{IP:exim_interface}\](:%{NUMBER:exim_interface_port})) +``` + +## BACULA_LOG_NOOPENDIR + +Pattern : +``` +\s+Could not open directory %{DATA}: ERR=%{GREEDYDATA:berror} +``` + +## BACULA_LOG_CLIENT_RBJ + +Pattern : +``` +shell command: run ClientRunBeforeJob \"%{GREEDYDATA:runjob}\" +``` + +## SSHD_IDENT_FAIL + +Pattern : +``` +Did not receive identification string from %{IP:sshd_client_ip} +``` + +## DATESTAMP_RFC2822 + +Pattern : +``` +%{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{TIME} %{ISO8601_TIMEZONE} +``` + +## BACULA_LOG_MAXSTART + +Pattern : +``` +Fatal error: Job canceled because max start delay time exceeded. +``` + +## QUOTEDSTRING + +Pattern : +``` +("(\\.|[^\\"]+)+")|""|('(\\.|[^\\']+)+')|''|(`(\\.|[^\\`]+)+`)|`` +``` + +## REDISLOG2 + +Pattern : +``` +%{POSINT:pid}:M %{REDISTIMESTAMP:time} [*#] %{GREEDYDATA:message} +``` + +## BACULA_LOG_PRUNED_JOBS + +Pattern : +``` +Pruned %{INT} Jobs* for client %{BACULA_HOST:client} from catalog. +``` + +## RT_FLOW_EVENT + +Pattern : +``` +(RT_FLOW_SESSION_CREATE|RT_FLOW_SESSION_CLOSE|RT_FLOW_SESSION_DENY) +``` + +## BACULA_LOG_NOSUIT + +Pattern : +``` +No prior or suitable Full backup found in catalog. Doing FULL backup. +``` + +## CISCOFW302010 + +Pattern : +``` +%{INT:connection_count} in use, %{INT:connection_count_max} most used +``` + +## SSHD_INVAL_USER + +Pattern : +``` +Invalid user\s*%{USERNAME:sshd_invalid_user}? from %{IP:sshd_client_ip} +``` + +## SSHD_SESSION_CLOSE + +Pattern : +``` +pam_unix\(sshd:session\): session closed for user %{USERNAME:sshd_user} +``` + +## MONGO_LOG + +Pattern : +``` +%{SYSLOGTIMESTAMP:timestamp} \[%{WORD:component}\] %{GREEDYDATA:message} +``` + +## BACULA_LOG_READYAPPEND + +Pattern : +``` +Ready to append to end of Volume \"%{BACULA_VOLUME:volume}\" size=%{INT} +``` + +## CRONLOG + +Pattern : +``` +%{SYSLOGBASE} \(%{USER:user}\) %{CRON_ACTION:action} \(%{DATA:message}\) +``` + +## BACULA_LOG_JOB + +Pattern : +``` +(Error: )?Bacula %{BACULA_HOST} %{BACULA_VERSION} \(%{BACULA_VERSION}\): +``` + +## SSHD_LISTEN + +Pattern : +``` +Server listening on %{IP:sshd_listen_ip} port %{NUMBER:sshd_listen_port}. +``` + +## URI + +Pattern : +``` +%{URIPROTO}://(?:%{USER}(?::[^@]*)?@)?(?:%{URIHOST})?(?:%{URIPATHPARAM})? +``` + +## RAILS3 + +Pattern : +``` +%{RAILS3HEAD}(?:%{RPROCESSING})?%{RAILS_CONTEXT:context}(?:%{RAILS3FOOT})? +``` + +## BASE16FLOAT + +Pattern : +``` +\b[+-]?(?:0x)?(?:(?:[0-9A-Fa-f]+(?:\.[0-9A-Fa-f]*)?)|(?:\.[0-9A-Fa-f]+))\b +``` + +## HAPROXYTIME + +Pattern : +``` +%{HOUR:haproxy_hour}:%{MINUTE:haproxy_minute}(?::%{SECOND:haproxy_second}) +``` + +## CISCOFW104001 + +Pattern : +``` +\((?:Primary|Secondary)\) Switching to ACTIVE - %{GREEDYDATA:switch_reason} +``` + +## CATALINA_DATESTAMP + +Pattern : +``` +%{MONTH} %{MONTHDAY}, 20%{YEAR} %{HOUR}:?%{MINUTE}(?::?%{SECOND}) (?:AM|PM) +``` + +## CISCOFW105008 + +Pattern : +``` +\((?:Primary|Secondary)\) Testing [Ii]nterface %{GREEDYDATA:interface_name} +``` + +## HOSTNAME + +Pattern : +``` +\b[0-9A-Za-z][0-9A-Za-z-]{0,62}(?:\.[0-9A-Za-z][0-9A-Za-z-]{0,62})*(\.?|\b) +``` + +## CISCOFW104002 + +Pattern : +``` +\((?:Primary|Secondary)\) Switching to STANDBY - %{GREEDYDATA:switch_reason} +``` + +## BACULA_LOG_VOLUME_PREVWRITTEN + +Pattern : +``` +Volume \"%{BACULA_VOLUME:volume}\" previously written, moving to end of data. +``` + +## SSHD_BAD_VERSION + +Pattern : +``` +Bad protocol version identification '%{GREEDYDATA}' from %{IP:sshd_client_ip} +``` + +## BACULA_LOG_PRUNED_FILES + +Pattern : +``` +Pruned Files from %{INT} Jobs* for client %{BACULA_HOST:client} from catalog. +``` + +## SSHD_BADL_PREAUTH + +Pattern : +``` +Bad packet length %{NUMBER:sshd_packet_length}. \[%{GREEDYDATA:sshd_privsep}\] +``` + +## CATALINALOG + +Pattern : +``` +%{CATALINA_DATESTAMP:timestamp} %{JAVACLASS:class} %{JAVALOGMESSAGE:logmessage} +``` + +## RAILS_TIMESTAMP + +Pattern : +``` +%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{ISO8601_TIMEZONE} +``` + +## SSHD_TUNN_ERR1 + +Pattern : +``` +error: connect_to %{IP:sshd_listen_ip} port %{NUMBER:sshd_listen_port}: failed. +``` + +## EXIM_DATE + +Pattern : +``` +%{YEAR:exim_year}-%{MONTHNUM:exim_month}-%{MONTHDAY:exim_day} %{TIME:exim_time} +``` + +## BACULA_LOG_DUPLICATE + +Pattern : +``` +Fatal error: JobId %{INT:duplicate} already running. Duplicate job not allowed. +``` + +## SSHD_REFUSE_CONN + +Pattern : +``` +refused connect from %{DATA:sshd_client_hostname} \(%{IPORHOST:sshd_client_ip}\) +``` + +## SSHD_TOOMANY_AUTH + +Pattern : +``` +Disconnecting: Too many authentication failures for %{USERNAME:sshd_invalid_user} +``` + +## BACULA_LOG_ALL_RECORDS_PRUNED + +Pattern : +``` +All records pruned from Volume \"%{BACULA_VOLUME:volume}\"; marking it \"Purged\" +``` + +## SSHD_DISR_PREAUTH + +Pattern : +``` +Disconnecting: %{GREEDYDATA:sshd_disconnect_status} \[%{GREEDYDATA:sshd_privsep}\] +``` + +## MCOLLECTIVE + +Pattern : +``` +., \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:pid}\]%{SPACE}%{LOGLEVEL:event_level} +``` + +## BACULA_LOG_DIFF_FS + +Pattern : +``` +\s+%{UNIXPATH} is a different filesystem. Will not descend from %{UNIXPATH} into it. +``` + +## SSHD_TUNN_ERR2 + +Pattern : +``` +error: channel_setup_fwd_listener: cannot listen to port: %{NUMBER:sshd_listen_port} +``` + +## CISCOFW321001 + +Pattern : +``` +Resource '%{WORD:resource_name}' limit of %{POSINT:resource_limit} reached for system +``` + +## BACULA_LOG_NO_AUTH + +Pattern : +``` +Fatal error: Unable to authenticate with File daemon at %{HOSTNAME}. Possible causes: +``` + +## POSTGRESQL + +Pattern : +``` +%{DATESTAMP:timestamp} %{TZ} %{DATA:user_id} %{GREEDYDATA:connection_id} %{POSINT:pid} +``` + +## ELB_REQUEST_LINE + +Pattern : +``` +(?:%{WORD:verb} %{ELB_URI:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest}) +``` + +## SSHD_SESSION_OPEN + +Pattern : +``` +pam_unix\(sshd:session\): session opened for user %{USERNAME:sshd_user} by \(uid=\d+\) +``` + +## TOMCAT_DATESTAMP + +Pattern : +``` +20%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}(?::?%{SECOND}) %{ISO8601_TIMEZONE} +``` + +## S3_REQUEST_LINE + +Pattern : +``` +(?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest}) +``` + +## RAILS3FOOT + +Pattern : +``` +Completed %{NUMBER:response}%{DATA} in %{NUMBER:totalms}ms %{RAILS3PROFILE}%{GREEDYDATA} +``` + +## CISCOFW105004 + +Pattern : +``` +\((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{GREEDYDATA:interface_name} normal +``` + +## CISCOFW105003 + +Pattern : +``` +\((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{GREEDYDATA:interface_name} waiting +``` + +## BACULA_LOG_JOBEND + +Pattern : +``` +Job write elapsed time = %{DATA:elapsed}, Transfer rate = %{NUMBER} (K|M|G)? Bytes/second +``` + +## TIMESTAMP_ISO8601 + +Pattern : +``` +%{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE}? +``` + +## SYSLOGBASE + +Pattern : +``` +%{SYSLOGTIMESTAMP:timestamp} (?:%{SYSLOGFACILITY} )?%{SYSLOGHOST:logsource} %{SYSLOGPROG}: +``` + +## SSHD_TUNN_ERR4 + +Pattern : +``` +error: channel_setup_fwd_listener_tcpip: cannot listen to port: %{NUMBER:sshd_listen_port} +``` + +## MODSECPREFIX + +Pattern : +``` +%{APACHEERRORPREFIX} ModSecurity: %{NOTSPACE:modsecseverity}\. %{GREEDYDATA:modsecmessage} +``` + +## JAVASTACKTRACEPART + +Pattern : +``` +%{SPACE}at %{JAVACLASS:class}\.%{JAVAMETHOD:method}\(%{JAVAFILE:file}(?::%{NUMBER:line})?\) +``` + +## EXIM_REMOTE_HOST + +Pattern : +``` +(H=(%{NOTSPACE:remote_hostname} )?(\(%{NOTSPACE:remote_heloname}\) )?\[%{IP:remote_host}\]) +``` + +## ELB_URI + +Pattern : +``` +%{URIPROTO:proto}://(?:%{USER}(?::[^@]*)?@)?(?:%{URIHOST:urihost})?(?:%{ELB_URIPATHPARAM})? +``` + +## DAY + +Pattern : +``` +Mon(?:day)?|Tue(?:sday)?|Wed(?:nesday)?|Thu(?:rsday)?|Fri(?:day)?|Sat(?:urday)?|Sun(?:day)? +``` + +## SSHD_TUNN + +Pattern : +``` +%{SSHD_TUNN_ERR1}|%{SSHD_TUNN_ERR2}|%{SSHD_TUNN_ERR3}|%{SSHD_TUNN_ERR4}|%{SSHD_TUNN_TIMEOUT} +``` + +## SSHD_SESSION_FAIL + +Pattern : +``` +pam_systemd\(sshd:session\): Failed to release session: %{GREEDYDATA:sshd_disconnect_status} +``` + +## BACULA_LOG_NOJOBS + +Pattern : +``` +There are no more Jobs associated with Volume \"%{BACULA_VOLUME:volume}\". Marking it purged. +``` + +## RPROCESSING + +Pattern : +``` +\W*Processing by %{RCONTROLLER} as %{NOTSPACE:format}(?:\W*Parameters: \{\%\{DATA:params}}\W*)? +``` + +## CISCOFW105009 + +Pattern : +``` +\((?:Primary|Secondary)\) Testing on [Ii]nterface %{GREEDYDATA:interface_name} (?:Passed|Failed) +``` + +## SSHD_LOG + +Pattern : +``` +%{SSHD_INIT}|%{SSHD_NORMAL_LOG}|%{SSHD_PROBE_LOG}|%{SSHD_CORRUPTED}|%{SSHD_TUNN}|%{SSHD_PREAUTH} +``` + +## SSHD_DISC_PREAUTH + +Pattern : +``` +Disconnected from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +``` + +## SSHD_REST_PREAUTH + +Pattern : +``` +Connection reset by %{IP:sshd_client_ip} port %{NUMBER:sshd_port}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +``` + +## TOMCATLOG + +Pattern : +``` +%{TOMCAT_DATESTAMP:timestamp} \| %{LOGLEVEL:level} \| %{JAVACLASS:class} - %{JAVALOGMESSAGE:logmessage} +``` + +## SSHD_CLOS_PREAUTH + +Pattern : +``` +Connection closed by %{IP:sshd_client_ip} port %{NUMBER:sshd_port}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +``` + +## CISCO_TAGGED_SYSLOG + +Pattern : +``` +^<%{POSINT:syslog_pri}>%{CISCOTIMESTAMP:timestamp}( %{SYSLOGHOST:sysloghost})? ?: %%{CISCOTAG:ciscotag}: +``` + +## SSHD_INVA_PREAUTH + +Pattern : +``` +input_userauth_request: invalid user %{USERNAME:sshd_invalid_user}?\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +``` + +## RAILS3HEAD + +Pattern : +``` +(?m)Started %{WORD:verb} "%{URIPATHPARAM:request}" for %{IPORHOST:clientip} at %{RAILS_TIMESTAMP:timestamp} +``` + +## CISCOFW105005 + +Pattern : +``` +\((?:Primary|Secondary)\) Lost Failover communications with mate on [Ii]nterface %{GREEDYDATA:interface_name} +``` + +## BACULA_LOG_NEW_LABEL + +Pattern : +``` +Labeled new Volume \"%{BACULA_VOLUME:volume}\" on device \"%{BACULA_DEVICE:device}\" \(%{BACULA_DEVICEPATH}\). +``` + +## NAGIOS_EC_LINE_ENABLE_HOST_CHECK + +Pattern : +``` +%{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_HOST_CHECK:nagios_command};%{DATA:nagios_hostname} +``` + +## COWRIE_NEW_CO + +Pattern : +``` +New connection: %{IPV4:source_ip}:[0-9]+ \(%{IPV4:dest_ip}:%{INT:dest_port}\) \[session: %{DATA:telnet_session}\]$ +``` + +## CISCO_ACTION + +Pattern : +``` +Built|Teardown|Deny|Denied|denied|requested|permitted|denied by ACL|discarded|est-allowed|Dropping|created|deleted +``` + +## NAGIOS_EC_LINE_DISABLE_HOST_CHECK + +Pattern : +``` +%{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_HOST_CHECK:nagios_command};%{DATA:nagios_hostname} +``` + +## CISCOFW402117 + +Pattern : +``` +%{WORD:protocol}: Received a non-IPSec packet \(protocol= %{WORD:orig_protocol}\) from %{IP:src_ip} to %{IP:dst_ip} +``` + +## BACULA_LOG_WROTE_LABEL + +Pattern : +``` +Wrote label to prelabeled Volume \"%{BACULA_VOLUME:volume}\" on device \"%{BACULA_DEVICE}\" \(%{BACULA_DEVICEPATH}\) +``` + +## RAILS3PROFILE + +Pattern : +``` +(?:\(Views: %{NUMBER:viewms}ms \| ActiveRecord: %{NUMBER:activerecordms}ms|\(ActiveRecord: %{NUMBER:activerecordms}ms)? +``` + +## CISCOFW500004 + +Pattern : +``` +%{CISCO_REASON:reason} for protocol=%{WORD:protocol}, from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} +``` + +## NAGIOS_TIMEPERIOD_TRANSITION + +Pattern : +``` +%{NAGIOS_TYPE_TIMEPERIOD_TRANSITION:nagios_type}: %{DATA:nagios_service};%{DATA:nagios_unknown1};%{DATA:nagios_unknown2} +``` + +## NAGIOS_PASSIVE_HOST_CHECK + +Pattern : +``` +%{NAGIOS_TYPE_PASSIVE_HOST_CHECK:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} +``` + +## NAGIOS_HOST_DOWNTIME_ALERT + +Pattern : +``` +%{NAGIOS_TYPE_HOST_DOWNTIME_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} +``` + +## NAGIOS_HOST_FLAPPING_ALERT + +Pattern : +``` +%{NAGIOS_TYPE_HOST_FLAPPING_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_message} +``` + +## HTTPD20_ERRORLOG + +Pattern : +``` +\[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:errormsg} +``` + +## NGINXERROR + +Pattern : +``` +%{NGINXERRTIME:time} \[%{LOGLEVEL:loglevel}\] %{NONNEGINT:pid}#%{NONNEGINT:tid}: (\*%{NONNEGINT:cid} )?%{GREEDYDATA:message} +``` + +## MYSQL_AUTH_FAIL + +Pattern : +``` +%{TIMESTAMP_ISO8601:time} %{NUMBER} \[Note\] Access denied for user '%{DATA:user}'@'%{IP:source_ip}' \(using password: YES\) +``` + +## BACULA_LOG_MAX_CAPACITY + +Pattern : +``` +User defined maximum volume capacity %{BACULA_CAPACITY} exceeded on device \"%{BACULA_DEVICE:device}\" \(%{BACULA_DEVICEPATH}\) +``` + +## NAGIOS_EC_LINE_ENABLE_HOST_NOTIFICATIONS + +Pattern : +``` +%{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_HOST_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +``` + +## HAPROXYDATE + +Pattern : +``` +%{MONTHDAY:haproxy_monthday}/%{MONTH:haproxy_month}/%{YEAR:haproxy_year}:%{HAPROXYTIME:haproxy_time}.%{INT:haproxy_milliseconds} +``` + +## CISCOFW106021 + +Pattern : +``` +%{CISCO_ACTION:action} %{WORD:protocol} reverse path check from %{IP:src_ip} to %{IP:dst_ip} on interface %{GREEDYDATA:interface} +``` + +## RUBY_LOGGER + +Pattern : +``` +[DFEWI], \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:pid}\] *%{RUBY_LOGLEVEL:loglevel} -- +%{DATA:progname}: %{GREEDYDATA:message} +``` + +## NAGIOS_EC_LINE_DISABLE_HOST_NOTIFICATIONS + +Pattern : +``` +%{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_HOST_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +``` + +## CISCOFW110002 + +Pattern : +``` +%{CISCO_REASON:reason} for %{WORD:protocol} from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} +``` + +## NAGIOS_EC_LINE_ENABLE_HOST_SVC_NOTIFICATIONS + +Pattern : +``` +%{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_HOST_SVC_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +``` + +## NAGIOS_EC_LINE_DISABLE_HOST_SVC_NOTIFICATIONS + +Pattern : +``` +%{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_HOST_SVC_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +``` + +## SSHD_RMAP_FAIL + +Pattern : +``` +reverse mapping checking getaddrinfo for %{HOSTNAME:sshd_client_hostname} \[%{IP:sshd_client_ip}\] failed - POSSIBLE BREAK-IN ATTEMPT! +``` + +## HAPROXYHTTP + +Pattern : +``` +(?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{HAPROXYHTTPBASE} +``` + +## SSHD_USER_FAIL + +Pattern : +``` +Failed password for invalid user %{USERNAME:sshd_invalid_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol} +``` + +## SYSLOGBASE2 + +Pattern : +``` +(?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) (?:%{SYSLOGFACILITY} )?%{SYSLOGHOST:logsource}+(?: %{SYSLOGPROG}:|) +``` + +## SSHD_NORMAL_LOG + +Pattern : +``` +%{SSHD_SUCCESS}|%{SSHD_DISCONNECT}|%{SSHD_CONN_CLOSE}|%{SSHD_SESSION_OPEN}|%{SSHD_SESSION_CLOSE}|%{SSHD_SESSION_FAIL}|%{SSHD_LOGOUT_ERR} +``` + +## SSHD_FAIL + +Pattern : +``` +Failed %{WORD:sshd_auth_type} for %{USERNAME:sshd_invalid_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol} +``` + +## NAGIOS_EC_LINE_ENABLE_SVC_CHECK + +Pattern : +``` +%{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_SVC_CHECK:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_service} +``` + +## NAGIOS_EC_LINE_DISABLE_SVC_CHECK + +Pattern : +``` +%{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_SVC_CHECK:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_service} +``` + +## CISCO_REASON + +Pattern : +``` +Duplicate TCP SYN|Failed to locate egress interface|Invalid transport field|No matching connection|DNS Response|DNS Query|(?:%{WORD}\s*)* +``` + +## SSHD_CORRUPTED + +Pattern : +``` +%{SSHD_IDENT_FAIL}|%{SSHD_MAPB_FAIL}|%{SSHD_RMAP_FAIL}|%{SSHD_TOOMANY_AUTH}|%{SSHD_CORRUPT_MAC}|%{SSHD_PACKET_CORRUPT}|%{SSHD_BAD_VERSION} +``` + +## BACULA_LOG_NO_CONNECT + +Pattern : +``` +Warning: bsock.c:127 Could not connect to (Client: %{BACULA_HOST:client}|Storage daemon) on %{HOSTNAME}:%{POSINT}. ERR=%{GREEDYDATA:berror} +``` + +## SSHD_DISCONNECT + +Pattern : +``` +Received disconnect from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:%{NUMBER:sshd_disconnect_code}: %{GREEDYDATA:sshd_disconnect_status} +``` + +## SSHD_MAPB_FAIL + +Pattern : +``` +Address %{IP:sshd_client_ip} maps to %{HOSTNAME:sshd_client_hostname}, but this does not map back to the address - POSSIBLE BREAK-IN ATTEMPT! +``` + +## SSHD_TCPWRAP_FAIL2 + +Pattern : +``` +warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: host name/address mismatch: %{IPORHOST:sshd_client_ip} != %{HOSTNAME:sshd_paranoid_hostname} +``` + +## MONGO3_LOG + +Pattern : +``` +%{TIMESTAMP_ISO8601:timestamp} %{MONGO3_SEVERITY:severity} %{MONGO3_COMPONENT:component}%{SPACE}(?:\[%{DATA:context}\])? %{GREEDYDATA:message} +``` + +## BACULA_LOG_FATAL_CONN + +Pattern : +``` +Fatal error: bsock.c:133 Unable to connect to (Client: %{BACULA_HOST:client}|Storage daemon) on %{HOSTNAME}:%{POSINT}. ERR=%{GREEDYDATA:berror} +``` + +## SSHD_TCPWRAP_FAIL4 + +Pattern : +``` +warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: host name/name mismatch: reverse lookup results in non-FQDN %{HOSTNAME:sshd_paranoid_hostname} +``` + +## CISCOFW710001_710002_710003_710005_710006 + +Pattern : +``` +%{WORD:protocol} (?:request|access) %{CISCO_ACTION:action} from %{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port} +``` + +## NAGIOS_PASSIVE_SERVICE_CHECK + +Pattern : +``` +%{NAGIOS_TYPE_PASSIVE_SERVICE_CHECK:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} +``` + +## NAGIOS_SERVICE_FLAPPING_ALERT + +Pattern : +``` +%{NAGIOS_TYPE_SERVICE_FLAPPING_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_message} +``` + +## NAGIOS_SERVICE_DOWNTIME_ALERT + +Pattern : +``` +%{NAGIOS_TYPE_SERVICE_DOWNTIME_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} +``` + +## TCPDUMP_OUTPUT + +Pattern : +``` +%{GREEDYDATA:timestamp} IP %{IPORHOST:source_ip}\.%{INT:source_port} > %{IPORHOST:dest_ip}\.%{INT:dest_port}: Flags \[%{GREEDYDATA:tcpflags}\], seq +``` + +## SSHD_TCPWRAP_FAIL1 + +Pattern : +``` +warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: can't verify hostname: getaddrinfo\(%{DATA:sshd_paranoid_hostname}, %{DATA:sshd_sa_family}\) failed +``` + +## SSHD_FAIL_PREAUTH + +Pattern : +``` +fatal: Unable to negotiate with %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:\s*%{GREEDYDATA:sshd_disconnect_status}? \[%{GREEDYDATA:sshd_privsep}\] +``` + +## NAGIOS_EC_LINE_ENABLE_SVC_NOTIFICATIONS + +Pattern : +``` +%{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_SVC_NOTIFICATIONS:nagios_command};%{DATA:nagios_hostname};%{GREEDYDATA:nagios_service} +``` + +## SSHD_TCPWRAP_FAIL3 + +Pattern : +``` +warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: host name/name mismatch: %{HOSTNAME:sshd_paranoid_hostname_1} != %{HOSTNAME:sshd_paranoid_hostname_2} +``` + +## NAGIOS_EC_LINE_DISABLE_SVC_NOTIFICATIONS + +Pattern : +``` +%{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_SVC_NOTIFICATIONS:nagios_command};%{DATA:nagios_hostname};%{GREEDYDATA:nagios_service} +``` + +## NAGIOS_HOST_EVENT_HANDLER + +Pattern : +``` +%{NAGIOS_TYPE_HOST_EVENT_HANDLER:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{DATA:nagios_event_handler_name} +``` + +## CISCOFW313001_313004_313008 + +Pattern : +``` +%{CISCO_ACTION:action} %{WORD:protocol} type=%{INT:icmp_type}, code=%{INT:icmp_code} from %{IP:src_ip} on interface %{DATA:interface}( to %{IP:dst_ip})? +``` + +## BACULA_LOG_END_VOLUME + +Pattern : +``` +End of medium on Volume \"%{BACULA_VOLUME:volume}\" Bytes=%{BACULA_CAPACITY} Blocks=%{BACULA_CAPACITY} at %{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}. +``` + +## SSHD_SUCCESS + +Pattern : +``` +Accepted %{WORD:sshd_auth_type} for %{USERNAME:sshd_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol}: %{GREEDYDATA:sshd_cipher} +``` + +## SMB_AUTH_FAIL + +Pattern : +``` +Auth:%{GREEDYDATA} user \[%{DATA:smb_domain}\]\\\[%{DATA:user}\]%{GREEDYDATA} status \[NT_STATUS_NO_SUCH_USER\]%{GREEDYDATA} remote host \[ipv4:%{IP:ip_source} +``` + +## BACULA_LOG_NEW_MOUNT + +Pattern : +``` +New volume \"%{BACULA_VOLUME:volume}\" mounted on device \"%{BACULA_DEVICE:device}\" \(%{BACULA_DEVICEPATH}\) at %{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}. +``` + +## NAGIOS_HOST_ALERT + +Pattern : +``` +%{NAGIOS_TYPE_HOST_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{NUMBER:nagios_attempt};%{GREEDYDATA:nagios_message} +``` + +## NAGIOS_HOST_NOTIFICATION + +Pattern : +``` +%{NAGIOS_TYPE_HOST_NOTIFICATION:nagios_type}: %{DATA:nagios_notifyname};%{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_contact};%{GREEDYDATA:nagios_message} +``` + +## SYSLOGPAMSESSION + +Pattern : +``` +%{SYSLOGBASE} %{GREEDYDATA:message}%{WORD:pam_module}\(%{DATA:pam_caller}\): session %{WORD:pam_session_state} for user %{USERNAME:username}(?: by %{GREEDYDATA:pam_by})? +``` + +## NAGIOS_CURRENT_HOST_STATE + +Pattern : +``` +%{NAGIOS_TYPE_CURRENT_HOST_STATE:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_statetype};%{DATA:nagios_statecode};%{GREEDYDATA:nagios_message} +``` + +## CISCOFW419002 + +Pattern : +``` +%{CISCO_REASON:reason} from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port} with different initial sequence number +``` + +## IPV4 + +Pattern : +``` +(?:(?:[0-1]?[0-9]{1,2}|2[0-4][0-9]|25[0-5])[.](?:[0-1]?[0-9]{1,2}|2[0-4][0-9]|25[0-5])[.](?:[0-1]?[0-9]{1,2}|2[0-4][0-9]|25[0-5])[.](?:[0-1]?[0-9]{1,2}|2[0-4][0-9]|25[0-5])) +``` + +## SSHD_FAI2_PREAUTH + +Pattern : +``` +fatal: %{GREEDYDATA:sshd_fatal_status}: Connection from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:\s*%{GREEDYDATA:sshd_disconnect_status}? \[%{GREEDYDATA:sshd_privsep}\] +``` + +## APACHEERRORPREFIX + +Pattern : +``` +\[%{APACHEERRORTIME:timestamp}\] \[%{NOTSPACE:apacheseverity}\] (\[pid %{INT}:tid %{INT}\] )?\[client %{IPORHOST:sourcehost}(:%{INT:source_port})?\] (\[client %{IPORHOST}\])? +``` + +## NAGIOS_SERVICE_EVENT_HANDLER + +Pattern : +``` +%{NAGIOS_TYPE_SERVICE_EVENT_HANDLER:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{DATA:nagios_event_handler_name} +``` + +## NAGIOS_EC_LINE_PROCESS_HOST_CHECK_RESULT + +Pattern : +``` +%{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_PROCESS_HOST_CHECK_RESULT:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_check_result} +``` + +## SSHD_PROBE_LOG + +Pattern : +``` +%{SSHD_REFUSE_CONN}|%{SSHD_TCPWRAP_FAIL1}|%{SSHD_TCPWRAP_FAIL2}|%{SSHD_TCPWRAP_FAIL3}|%{SSHD_TCPWRAP_FAIL4}|%{SSHD_TCPWRAP_FAIL5}|%{SSHD_FAIL}|%{SSHD_USER_FAIL}|%{SSHD_INVAL_USER} +``` + +## NAXSI_EXLOG + +Pattern : +``` +^NAXSI_EXLOG: ip=%{IPORHOST:naxsi_src_ip}&server=%{IPORHOST:naxsi_dst_ip}&uri=%{PATH:http_path}&id=%{INT:naxsi_id}&zone=%{WORD:naxsi_zone}&var_name=%{DATA:naxsi_var_name}&content= +``` + +## SSHD_RECE_PREAUTH + +Pattern : +``` +(?:error: |)Received disconnect from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:%{NUMBER:sshd_disconnect_code}: %{GREEDYDATA:sshd_disconnect_status}? \[%{GREEDYDATA:sshd_privsep}\] +``` + +## MONTH + +Pattern : +``` +\bJan(?:uary|uar)?|Feb(?:ruary|ruar)?|M(?:a|ä)?r(?:ch|z)?|Apr(?:il)?|Ma(?:y|i)?|Jun(?:e|i)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|O(?:c|k)?t(?:ober)?|Nov(?:ember)?|De(?:c|z)(?:ember)?\b +``` + +## CISCOFW419001 + +Pattern : +``` +%{CISCO_ACTION:action} %{WORD:protocol} packet from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port}, reason: %{GREEDYDATA:reason} +``` + +## NAGIOS_SERVICE_ALERT + +Pattern : +``` +%{NAGIOS_TYPE_SERVICE_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{NUMBER:nagios_attempt};%{GREEDYDATA:nagios_message} +``` + +## CISCOFW106015 + +Pattern : +``` +%{CISCO_ACTION:action} %{WORD:protocol} \(%{DATA:policy_id}\) from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} flags %{DATA:tcp_flags} on interface %{GREEDYDATA:interface} +``` + +## CISCOFW602303_602304 + +Pattern : +``` +%{WORD:protocol}: An %{CISCO_DIRECTION:direction} %{GREEDYDATA:tunnel_type} SA \(SPI= %{DATA:spi}\) between %{IP:src_ip} and %{IP:dst_ip} \(user= %{DATA:user}\) has been %{CISCO_ACTION:action} +``` + +## NAGIOS_SERVICE_NOTIFICATION + +Pattern : +``` +%{NAGIOS_TYPE_SERVICE_NOTIFICATION:nagios_type}: %{DATA:nagios_notifyname};%{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_contact};%{GREEDYDATA:nagios_message} +``` + +## RT_FLOW3 + +Pattern : +``` +%{RT_FLOW_EVENT:event}: session denied %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{INT:protocol-id}\(\d\) %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} .* +``` + +## NAGIOS_CURRENT_SERVICE_STATE + +Pattern : +``` +%{NAGIOS_TYPE_CURRENT_SERVICE_STATE:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_statetype};%{DATA:nagios_statecode};%{GREEDYDATA:nagios_message} +``` + +## CISCOFW713172 + +Pattern : +``` +Group = %{GREEDYDATA:group}, IP = %{IP:src_ip}, Automatic NAT Detection Status:\s+Remote end\s*%{DATA:is_remote_natted}\s*behind a NAT device\s+This\s+end\s*%{DATA:is_local_natted}\s*behind a NAT device +``` + +## NAGIOS_EC_LINE_PROCESS_SERVICE_CHECK_RESULT + +Pattern : +``` +%{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_PROCESS_SERVICE_CHECK_RESULT:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_check_result} +``` + +## CISCOFW402119 + +Pattern : +``` +%{WORD:protocol}: Received an %{WORD:orig_protocol} packet \(SPI= %{DATA:spi}, sequence number= %{DATA:seq_num}\) from %{IP:src_ip} \(user= %{DATA:user}\) to %{IP:dst_ip} that failed anti-replay checking +``` + +## SSHD_PREAUTH + +Pattern : +``` +%{SSHD_DISC_PREAUTH}|%{SSHD_RECE_PREAUTH}|%{SSHD_MAXE_PREAUTH}|%{SSHD_DISR_PREAUTH}|%{SSHD_INVA_PREAUTH}|%{SSHD_REST_PREAUTH}|%{SSHD_FAIL_PREAUTH}|%{SSHD_CLOS_PREAUTH}|%{SSHD_FAI2_PREAUTH}|%{SSHD_BADL_PREAUTH} +``` + +## COMMONAPACHELOG + +Pattern : +``` +%{IPORHOST:clientip} %{HTTPDUSER:ident} %{USER:auth} \[%{HTTPDATE:timestamp}\] "(?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest})" %{NUMBER:response} (?:%{NUMBER:bytes}|-) +``` + +## SSHD_MAXE_PREAUTH + +Pattern : +``` +error: maximum authentication attempts exceeded for (?:invalid user |)%{USERNAME:sshd_invalid_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +``` + +## CISCOFW106001 + +Pattern : +``` +%{CISCO_DIRECTION:direction} %{WORD:protocol} connection %{CISCO_ACTION:action} from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} flags %{GREEDYDATA:tcp_flags} on interface %{GREEDYDATA:interface} +``` + +## LOGLEVEL + +Pattern : +``` +[Aa]lert|ALERT|[Tt]race|TRACE|[Dd]ebug|DEBUG|[Nn]otice|NOTICE|[Ii]nfo|INFO|[Ww]arn?(?:ing)?|WARN?(?:ING)?|[Ee]rr?(?:or)?|ERR?(?:OR)?|[Cc]rit?(?:ical)?|CRIT?(?:ICAL)?|[Ff]atal|FATAL|[Ss]evere|SEVERE|EMERG(?:ENCY)?|[Ee]merg(?:ency)? +``` + +## CISCOFW305011 + +Pattern : +``` +%{CISCO_ACTION:action} %{CISCO_XLATE_TYPE:xlate_type} %{WORD:protocol} translation from %{DATA:src_interface}:%{IP:src_ip}(/%{INT:src_port})?(\(%{DATA:src_fwuser}\))? to %{DATA:src_xlated_interface}:%{IP:src_xlated_ip}/%{DATA:src_xlated_port} +``` + +## MONGO_SLOWQUERY + +Pattern : +``` +%{WORD} %{MONGO_WORDDASH:database}\.%{MONGO_WORDDASH:collection} %{WORD}: %{MONGO_QUERY:query} %{WORD}:%{NONNEGINT:ntoreturn} %{WORD}:%{NONNEGINT:ntoskip} %{WORD}:%{NONNEGINT:nscanned}.*nreturned:%{NONNEGINT:nreturned}..+ %{POSINT:duration}ms +``` + +## NAXSI_FMT + +Pattern : +``` +^NAXSI_FMT: ip=%{IPORHOST:src_ip}&server=%{IPORHOST:target_ip}&uri=%{PATH:http_path}&learning=\d&vers=%{DATA:naxsi_version}&total_processed=\d+&total_blocked=\d+&block=\d+(&cscore\d=%{WORD:score_label}&score\d=%{INT:score})+&zone0=%{WORD:zone} +``` + +## CISCOFW106014 + +Pattern : +``` +%{CISCO_ACTION:action} %{CISCO_DIRECTION:direction} %{WORD:protocol} src %{DATA:src_interface}:%{IP:src_ip}(\(%{DATA:src_fwuser}\))? dst %{DATA:dst_interface}:%{IP:dst_ip}(\(%{DATA:dst_fwuser}\))? \(type %{INT:icmp_type}, code %{INT:icmp_code}\) +``` + +## NGINXACCESS + +Pattern : +``` +%{IPORHOST:remote_addr} - %{NGUSER:remote_user} \[%{HTTPDATE:time_local}\] "%{WORD:method} %{URIPATHPARAM:request} HTTP/%{NUMBER:http_version}" %{NUMBER:status} %{NUMBER:body_bytes_sent} "%{NOTDQUOTE:http_referer}" "%{NOTDQUOTE:http_user_agent}" +``` + +## EXIM_EXCLUDE_TERMS + +Pattern : +``` +(Message is frozen|(Start|End) queue run| Warning: | retry time not reached | no (IP address|host name) found for (IP address|host) | unexpected disconnection while reading SMTP command | no immediate delivery: |another process is handling this message) +``` + +## CISCOFW302020_302021 + +Pattern : +``` +%{CISCO_ACTION:action}(?: %{CISCO_DIRECTION:direction})? %{WORD:protocol} connection for faddr %{IP:dst_ip}/%{INT:icmp_seq_num}(?:\(%{DATA:fwuser}\))? gaddr %{IP:src_xlated_ip}/%{INT:icmp_code_xlated} laddr %{IP:src_ip}/%{INT:icmp_code}( \(%{DATA:user}\))? +``` + +## CISCOFW106006_106007_106010 + +Pattern : +``` +%{CISCO_ACTION:action} %{CISCO_DIRECTION:direction} %{WORD:protocol} (?:from|src) %{IP:src_ip}/%{INT:src_port}(\(%{DATA:src_fwuser}\))? (?:to|dst) %{IP:dst_ip}/%{INT:dst_port}(\(%{DATA:dst_fwuser}\))? (?:on interface %{DATA:interface}|due to %{CISCO_REASON:reason}) +``` + +## HTTPD24_ERRORLOG + +Pattern : +``` +\[%{HTTPDERROR_DATE:timestamp}\] \[%{WORD:module}:%{LOGLEVEL:loglevel}\] \[pid %{POSINT:pid}:tid %{NUMBER:tid}\]( \(%{POSINT:proxy_errorcode}\)%{DATA:proxy_errormessage}:)?( \[client %{IPORHOST:client}:%{POSINT:clientport}\])? %{DATA:errorcode}: %{GREEDYDATA:message} +``` + +## MODSECAPACHEERROR + +Pattern : +``` +%{MODSECPREFIX} %{MODSECRULEFILE} %{MODSECRULELINE} (?:%{MODSECMATCHOFFSET} )?(?:%{MODSECRULEID} )?(?:%{MODSECRULEREV} )?(?:%{MODSECRULEMSG} )?(?:%{MODSECRULEDATA} )?(?:%{MODSECRULESEVERITY} )?(?:%{MODSECRULEVERS} )?%{MODSECRULETAGS}%{MODSECHOSTNAME} %{MODSECURI} %{MODSECUID} +``` + +## NAGIOS_EC_LINE_SCHEDULE_HOST_DOWNTIME + +Pattern : +``` +%{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_SCHEDULE_HOST_DOWNTIME:nagios_command};%{DATA:nagios_hostname};%{NUMBER:nagios_start_time};%{NUMBER:nagios_end_time};%{NUMBER:nagios_fixed};%{NUMBER:nagios_trigger_id};%{NUMBER:nagios_duration};%{DATA:author};%{DATA:comment} +``` + +## SYSLOG5424BASE + +Pattern : +``` +%{SYSLOG5424PRI}%{NONNEGINT:syslog5424_ver} +(?:%{TIMESTAMP_ISO8601:syslog5424_ts}|-) +(?:%{HOSTNAME:syslog5424_host}|-) +(-|%{SYSLOG5424PRINTASCII:syslog5424_app}) +(-|%{SYSLOG5424PRINTASCII:syslog5424_proc}) +(-|%{SYSLOG5424PRINTASCII:syslog5424_msgid}) +(?:%{SYSLOG5424SD:syslog5424_sd}|-|) +``` + +## CISCOFW106100_2_3 + +Pattern : +``` +access-list %{NOTSPACE:policy_id} %{CISCO_ACTION:action} %{WORD:protocol} for user '%{DATA:src_fwuser}' %{DATA:src_interface}/%{IP:src_ip}\(%{INT:src_port}\) -> %{DATA:dst_interface}/%{IP:dst_ip}\(%{INT:dst_port}\) hit-cnt %{INT:hit_count} %{CISCO_INTERVAL:interval} \[%{DATA:hashcode1}, %{DATA:hashcode2}\] +``` + +## CISCOFW106100 + +Pattern : +``` +access-list %{NOTSPACE:policy_id} %{CISCO_ACTION:action} %{WORD:protocol} %{DATA:src_interface}/%{IP:src_ip}\(%{INT:src_port}\)(\(%{DATA:src_fwuser}\))? -> %{DATA:dst_interface}/%{IP:dst_ip}\(%{INT:dst_port}\)(\(%{DATA:src_fwuser}\))? hit-cnt %{INT:hit_count} %{CISCO_INTERVAL:interval} \[%{DATA:hashcode1}, %{DATA:hashcode2}\] +``` + +## RT_FLOW2 + +Pattern : +``` +%{RT_FLOW_EVENT:event}: session created %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{IP:nat-src-ip}/%{INT:nat-src-port}->%{IP:nat-dst-ip}/%{INT:nat-dst-port} %{DATA:src-nat-rule-name} %{DATA:dst-nat-rule-name} %{INT:protocol-id} %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} %{INT:session-id} .* +``` + +## CISCOFW733100 + +Pattern : +``` +\[\s*%{DATA:drop_type}\s*\] drop %{DATA:drop_rate_id} exceeded. Current burst rate is %{INT:drop_rate_current_burst} per second, max configured rate is %{INT:drop_rate_max_burst}; Current average rate is %{INT:drop_rate_current_avg} per second, max configured rate is %{INT:drop_rate_max_avg}; Cumulative total count is %{INT:drop_total_count} +``` + +## CISCOFW106023 + +Pattern : +``` +%{CISCO_ACTION:action}( protocol)? %{WORD:protocol} src %{DATA:src_interface}:%{DATA:src_ip}(/%{INT:src_port})?(\(%{DATA:src_fwuser}\))? dst %{DATA:dst_interface}:%{DATA:dst_ip}(/%{INT:dst_port})?(\(%{DATA:dst_fwuser}\))?( \(type %{INT:icmp_type}, code %{INT:icmp_code}\))? by access-group "?%{DATA:policy_id}"? \[%{DATA:hashcode1}, %{DATA:hashcode2}\] +``` + +## ELB_ACCESS_LOG + +Pattern : +``` +%{TIMESTAMP_ISO8601:timestamp} %{NOTSPACE:elb} %{IP:clientip}:%{INT:clientport:int} (?:(%{IP:backendip}:?:%{INT:backendport:int})|-) %{NUMBER:request_processing_time:float} %{NUMBER:backend_processing_time:float} %{NUMBER:response_processing_time:float} %{INT:response:int} %{INT:backend_response:int} %{INT:received_bytes:int} %{INT:bytes:int} "%{ELB_REQUEST_LINE}" +``` + +## MODSECRULETAGS + +Pattern : +``` +(?:\[tag %{QUOTEDSTRING:ruletag0}\] )?(?:\[tag %{QUOTEDSTRING:ruletag1}\] )?(?:\[tag %{QUOTEDSTRING:ruletag2}\] )?(?:\[tag %{QUOTEDSTRING:ruletag3}\] )?(?:\[tag %{QUOTEDSTRING:ruletag4}\] )?(?:\[tag %{QUOTEDSTRING:ruletag5}\] )?(?:\[tag %{QUOTEDSTRING:ruletag6}\] )?(?:\[tag %{QUOTEDSTRING:ruletag7}\] )?(?:\[tag %{QUOTEDSTRING:ruletag8}\] )?(?:\[tag %{QUOTEDSTRING:ruletag9}\] )?(?:\[tag %{QUOTEDSTRING}\] )* +``` + +## RT_FLOW1 + +Pattern : +``` +%{RT_FLOW_EVENT:event}: %{GREEDYDATA:close-reason}: %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{IP:nat-src-ip}/%{INT:nat-src-port}->%{IP:nat-dst-ip}/%{INT:nat-dst-port} %{DATA:src-nat-rule-name} %{DATA:dst-nat-rule-name} %{INT:protocol-id} %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} %{INT:session-id} \d+\(%{DATA:sent}\) \d+\(%{DATA:received}\) %{INT:elapsed-time} .* +``` + +## BRO_CONN + +Pattern : +``` +%{NUMBER:ts}\t%{NOTSPACE:uid}\t%{IP:orig_h}\t%{INT:orig_p}\t%{IP:resp_h}\t%{INT:resp_p}\t%{WORD:proto}\t%{GREEDYDATA:service}\t%{NUMBER:duration}\t%{NUMBER:orig_bytes}\t%{NUMBER:resp_bytes}\t%{GREEDYDATA:conn_state}\t%{GREEDYDATA:local_orig}\t%{GREEDYDATA:missed_bytes}\t%{GREEDYDATA:history}\t%{GREEDYDATA:orig_pkts}\t%{GREEDYDATA:orig_ip_bytes}\t%{GREEDYDATA:resp_pkts}\t%{GREEDYDATA:resp_ip_bytes}\t%{GREEDYDATA:tunnel_parents} +``` + +## S3_ACCESS_LOG + +Pattern : +``` +%{WORD:owner} %{NOTSPACE:bucket} \[%{HTTPDATE:timestamp}\] %{IP:clientip} %{NOTSPACE:requester} %{NOTSPACE:request_id} %{NOTSPACE:operation} %{NOTSPACE:key} (?:"%{S3_REQUEST_LINE}"|-) (?:%{INT:response:int}|-) (?:-|%{NOTSPACE:error_code}) (?:%{INT:bytes:int}|-) (?:%{INT:object_size:int}|-) (?:%{INT:request_time_ms:int}|-) (?:%{INT:turnaround_time_ms:int}|-) (?:%{QS:referrer}|-) (?:"?%{QS:agent}"?|-) (?:-|%{NOTSPACE:version_id}) +``` + +## BRO_DNS + +Pattern : +``` +%{NUMBER:ts}\t%{NOTSPACE:uid}\t%{IP:orig_h}\t%{INT:orig_p}\t%{IP:resp_h}\t%{INT:resp_p}\t%{WORD:proto}\t%{INT:trans_id}\t%{GREEDYDATA:query}\t%{GREEDYDATA:qclass}\t%{GREEDYDATA:qclass_name}\t%{GREEDYDATA:qtype}\t%{GREEDYDATA:qtype_name}\t%{GREEDYDATA:rcode}\t%{GREEDYDATA:rcode_name}\t%{GREEDYDATA:AA}\t%{GREEDYDATA:TC}\t%{GREEDYDATA:RD}\t%{GREEDYDATA:RA}\t%{GREEDYDATA:Z}\t%{GREEDYDATA:answers}\t%{GREEDYDATA:TTLs}\t%{GREEDYDATA:rejected} +``` + +## CISCOFW302013_302014_302015_302016 + +Pattern : +``` +%{CISCO_ACTION:action}(?: %{CISCO_DIRECTION:direction})? %{WORD:protocol} connection %{INT:connection_id} for %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port}( \(%{IP:src_mapped_ip}/%{INT:src_mapped_port}\))?(\(%{DATA:src_fwuser}\))? to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port}( \(%{IP:dst_mapped_ip}/%{INT:dst_mapped_port}\))?(\(%{DATA:dst_fwuser}\))?( duration %{TIME:duration} bytes %{INT:bytes})?(?: %{CISCO_REASON:reason})?( \(%{DATA:user}\))? +``` + +## SHOREWALL + +Pattern : +``` +(%{SYSLOGTIMESTAMP:timestamp}) (%{WORD:nf_host}) kernel:.*Shorewall:(%{WORD:nf_action1})?:(%{WORD:nf_action2})?.*IN=(%{USERNAME:nf_in_interface})?.*(OUT= *MAC=(%{COMMONMAC:nf_dst_mac}):(%{COMMONMAC:nf_src_mac})?|OUT=%{USERNAME:nf_out_interface}).*SRC=(%{IPV4:nf_src_ip}).*DST=(%{IPV4:nf_dst_ip}).*LEN=(%{WORD:nf_len}).*?TOS=(%{WORD:nf_tos}).*?PREC=(%{WORD:nf_prec}).*?TTL=(%{INT:nf_ttl}).*?ID=(%{INT:nf_id}).*?PROTO=(%{WORD:nf_protocol}).*?SPT=(%{INT:nf_src_port}?.*DPT=%{INT:nf_dst_port}?.*) +``` + +## HAPROXYTCP + +Pattern : +``` +(?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_queue}/%{INT:time_backend_connect}/%{NOTSPACE:time_duration} %{NOTSPACE:bytes_read} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} +``` + +## CISCOFW313005 + +Pattern : +``` +%{CISCO_REASON:reason} for %{WORD:protocol} error message: %{WORD:err_protocol} src %{DATA:err_src_interface}:%{IP:err_src_ip}(\(%{DATA:err_src_fwuser}\))? dst %{DATA:err_dst_interface}:%{IP:err_dst_ip}(\(%{DATA:err_dst_fwuser}\))? \(type %{INT:err_icmp_type}, code %{INT:err_icmp_code}\) on %{DATA:interface} interface\. Original IP payload: %{WORD:protocol} src %{IP:orig_src_ip}/%{INT:orig_src_port}(\(%{DATA:orig_src_fwuser}\))? dst %{IP:orig_dst_ip}/%{INT:orig_dst_port}(\(%{DATA:orig_dst_fwuser}\))? +``` + +## BRO_FILES + +Pattern : +``` +%{NUMBER:ts}\t%{NOTSPACE:fuid}\t%{IP:tx_hosts}\t%{IP:rx_hosts}\t%{NOTSPACE:conn_uids}\t%{GREEDYDATA:source}\t%{GREEDYDATA:depth}\t%{GREEDYDATA:analyzers}\t%{GREEDYDATA:mime_type}\t%{GREEDYDATA:filename}\t%{GREEDYDATA:duration}\t%{GREEDYDATA:local_orig}\t%{GREEDYDATA:is_orig}\t%{GREEDYDATA:seen_bytes}\t%{GREEDYDATA:total_bytes}\t%{GREEDYDATA:missing_bytes}\t%{GREEDYDATA:overflow_bytes}\t%{GREEDYDATA:timedout}\t%{GREEDYDATA:parent_fuid}\t%{GREEDYDATA:md5}\t%{GREEDYDATA:sha1}\t%{GREEDYDATA:sha256}\t%{GREEDYDATA:extracted} +``` + +## BRO_HTTP + +Pattern : +``` +%{NUMBER:ts}\t%{NOTSPACE:uid}\t%{IP:orig_h}\t%{INT:orig_p}\t%{IP:resp_h}\t%{INT:resp_p}\t%{INT:trans_depth}\t%{GREEDYDATA:method}\t%{GREEDYDATA:domain}\t%{GREEDYDATA:uri}\t%{GREEDYDATA:referrer}\t%{GREEDYDATA:user_agent}\t%{NUMBER:request_body_len}\t%{NUMBER:response_body_len}\t%{GREEDYDATA:status_code}\t%{GREEDYDATA:status_msg}\t%{GREEDYDATA:info_code}\t%{GREEDYDATA:info_msg}\t%{GREEDYDATA:filename}\t%{GREEDYDATA:bro_tags}\t%{GREEDYDATA:username}\t%{GREEDYDATA:password}\t%{GREEDYDATA:proxied}\t%{GREEDYDATA:orig_fuids}\t%{GREEDYDATA:orig_mime_types}\t%{GREEDYDATA:resp_fuids}\t%{GREEDYDATA:resp_mime_types} +``` + +## NETSCREENSESSIONLOG + +Pattern : +``` +%{SYSLOGTIMESTAMP:date} %{IPORHOST:device} %{IPORHOST}: NetScreen device_id=%{WORD:device_id}%{DATA}: start_time=%{QUOTEDSTRING:start_time} duration=%{INT:duration} policy_id=%{INT:policy_id} service=%{DATA:service} proto=%{INT:proto} src zone=%{WORD:src_zone} dst zone=%{WORD:dst_zone} action=%{WORD:action} sent=%{INT:sent} rcvd=%{INT:rcvd} src=%{IPORHOST:src_ip} dst=%{IPORHOST:dst_ip} src_port=%{INT:src_port} dst_port=%{INT:dst_port} src-xlated ip=%{IPORHOST:src_xlated_ip} port=%{INT:src_xlated_port} dst-xlated ip=%{IPORHOST:dst_xlated_ip} port=%{INT:dst_xlated_port} session_id=%{INT:session_id} reason=%{GREEDYDATA:reason} +``` + +## HAPROXYHTTPBASE + +Pattern : +``` +%{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_request}/%{INT:time_queue}/%{INT:time_backend_connect}/%{INT:time_backend_response}/%{NOTSPACE:time_duration} %{INT:http_status_code} %{NOTSPACE:bytes_read} %{DATA:captured_request_cookie} %{DATA:captured_response_cookie} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} (\\{\%\{HAPROXYCAPTUREDREQUESTHEADERS}\})?( )?(\\{\%\{HAPROXYCAPTUREDRESPONSEHEADERS}\})?( )?"(|(%{WORD:http_verb} (%{URIPROTO:http_proto}://)?(?:%{USER:http_user}(?::[^@]*)?@)?(?:%{URIHOST:http_host})?(?:%{URIPATHPARAM:http_request})?( HTTP/%{NUMBER:http_version})?))?" +``` + +## BACULA_LOGLINE + +Pattern : +``` +%{BACULA_TIMESTAMP:bts} %{BACULA_HOST:hostname} JobId %{INT:jobid}: (%{BACULA_LOG_MAX_CAPACITY}|%{BACULA_LOG_END_VOLUME}|%{BACULA_LOG_NEW_VOLUME}|%{BACULA_LOG_NEW_LABEL}|%{BACULA_LOG_WROTE_LABEL}|%{BACULA_LOG_NEW_MOUNT}|%{BACULA_LOG_NOOPEN}|%{BACULA_LOG_NOOPENDIR}|%{BACULA_LOG_NOSTAT}|%{BACULA_LOG_NOJOBS}|%{BACULA_LOG_ALL_RECORDS_PRUNED}|%{BACULA_LOG_BEGIN_PRUNE_JOBS}|%{BACULA_LOG_BEGIN_PRUNE_FILES}|%{BACULA_LOG_PRUNED_JOBS}|%{BACULA_LOG_PRUNED_FILES}|%{BACULA_LOG_ENDPRUNE}|%{BACULA_LOG_STARTJOB}|%{BACULA_LOG_STARTRESTORE}|%{BACULA_LOG_USEDEVICE}|%{BACULA_LOG_DIFF_FS}|%{BACULA_LOG_JOBEND}|%{BACULA_LOG_NOPRUNE_JOBS}|%{BACULA_LOG_NOPRUNE_FILES}|%{BACULA_LOG_VOLUME_PREVWRITTEN}|%{BACULA_LOG_READYAPPEND}|%{BACULA_LOG_CANCELLING}|%{BACULA_LOG_MARKCANCEL}|%{BACULA_LOG_CLIENT_RBJ}|%{BACULA_LOG_VSS}|%{BACULA_LOG_MAXSTART}|%{BACULA_LOG_DUPLICATE}|%{BACULA_LOG_NOJOBSTAT}|%{BACULA_LOG_FATAL_CONN}|%{BACULA_LOG_NO_CONNECT}|%{BACULA_LOG_NO_AUTH}|%{BACULA_LOG_NOSUIT}|%{BACULA_LOG_JOB}|%{BACULA_LOG_NOPRIOR}) +``` + +## NAGIOSLOGLINE + +Pattern : +``` +%{NAGIOSTIME} (?:%{NAGIOS_WARNING}|%{NAGIOS_CURRENT_SERVICE_STATE}|%{NAGIOS_CURRENT_HOST_STATE}|%{NAGIOS_SERVICE_NOTIFICATION}|%{NAGIOS_HOST_NOTIFICATION}|%{NAGIOS_SERVICE_ALERT}|%{NAGIOS_HOST_ALERT}|%{NAGIOS_SERVICE_FLAPPING_ALERT}|%{NAGIOS_HOST_FLAPPING_ALERT}|%{NAGIOS_SERVICE_DOWNTIME_ALERT}|%{NAGIOS_HOST_DOWNTIME_ALERT}|%{NAGIOS_PASSIVE_SERVICE_CHECK}|%{NAGIOS_PASSIVE_HOST_CHECK}|%{NAGIOS_SERVICE_EVENT_HANDLER}|%{NAGIOS_HOST_EVENT_HANDLER}|%{NAGIOS_TIMEPERIOD_TRANSITION}|%{NAGIOS_EC_LINE_DISABLE_SVC_CHECK}|%{NAGIOS_EC_LINE_ENABLE_SVC_CHECK}|%{NAGIOS_EC_LINE_DISABLE_HOST_CHECK}|%{NAGIOS_EC_LINE_ENABLE_HOST_CHECK}|%{NAGIOS_EC_LINE_PROCESS_HOST_CHECK_RESULT}|%{NAGIOS_EC_LINE_PROCESS_SERVICE_CHECK_RESULT}|%{NAGIOS_EC_LINE_SCHEDULE_HOST_DOWNTIME}|%{NAGIOS_EC_LINE_DISABLE_HOST_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_HOST_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_DISABLE_HOST_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_HOST_NOTIFICATIONS}|%{NAGIOS_EC_LINE_DISABLE_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_SVC_NOTIFICATIONS}) +``` + +## IPV6 + +Pattern : +``` +((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)? +``` + + +# Documentation generation +This documentation is generated by `pkg/parser` : `GO_WANT_TEST_DOC=1 go test -run TestGeneratePatternsDoc` diff --git a/docs/v1.X/docs/references/plugins_api.md b/docs/v1.X/docs/references/plugins_api.md new file mode 100644 index 000000000..89bd3c160 --- /dev/null +++ b/docs/v1.X/docs/references/plugins_api.md @@ -0,0 +1,178 @@ +## Foreword + +Output plugins handle Signal Occurences resulting from bucket overflows. +This allows to either make a simple notification/alerting plugin or fully manage a backend (this is what {{v1X.crowdsec.name}} uses to manage SQLite and MySQL). + +You can create your own plugins to perform specific actions when a scenario is triggered. + +The plugin itself will be compiled into a `.so` and will have its dedicated configuration. + +## Interface + +Plugins are created in golang and must conform to the following interface : + +```go +type Backend interface { + Insert(types.SignalOccurence) error + ReadAT(time.Time) ([]map[string]string, error) + Delete(string) (int, error) + Init(map[string]string) error + Flush() error + Shutdown() error + DeleteAll() error + StartAutoCommit() error +} +``` + +> Startup/shutdown methods + + - `Init` : called at startup time and receives the custom configuration as a string map. Errors aren't fatal, but plugin will be discarded. + - `Shutdown` : called when {{v1X.crowdsec.Name}} is shutting down or restarting + + +> Writing/Deleting events + + - `Insert` : called every time an overflow happens, receives the `SignalOccurence` as a single parameter. Returned errors are non-fatal and will be logged in warning level. + - `Delete` : called to delete existing bans. Receives the exact `ip_text` (ban target) to delete. Only used by `cscli ban del`, only relevant for read/write plugins such as database ones. + - `DeleteAll` : called to delete *all* existing bans. Only used by `cscli ban flush`, only relevant for read/write plugins such as database ones) + +> Reading events + + - `ReadAT` : returns the list of bans that where active at the given time. The following keys are relevant in the list returned : source, iptext, reason, bancount, action, cn, as, events_count, until. Only used by `cscli ban list`, only relevant for read/write plugins such as database ones) + +> Backend + + - `Flush` is called regulary by crowdsec for each plugin that received events. For example it will be called after each write in `cscli` (as it's one-shot) and every few hundreds of ms / few events in {{v1X.crowdsec.name}} itself. It might be a good place to deal with slower write operations. + + +## Configurations + +Each plugin has its own configuration file : + +```bash +$ cat config/plugins/backend/dummy.yaml +# name of the plugin, is used by profiles.yaml +name: dummy +# path to the .so +path: ./plugins/backend/dummy.so +# your plugin specific configuration +config: + some_parameter: some value + other_parameter: more data + token: fooobarjajajajaja +``` + + +## Dummy plugin + +```go +package main + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +//This is where you would hold your plugin-specific context +type pluginDummy struct { + //some persistent data +} + +func (p *pluginDummy) Shutdown() error { + return nil +} + +func (p *pluginDummy) StartAutoCommit() error { + return nil +} + +func (p *pluginDummy) Init(config map[string]string) error { + log.Infof("pluginDummy config : %+v ", config) + return nil +} + +func (p *pluginDummy) Delete(target string) (int, error) { + return 0, nil +} + +func (p *pluginDummy) DeleteAll() error { + return nil +} + +func (p *pluginDummy) Insert(sig types.SignalOccurence) error { + log.Infof("insert signal : %+v", sig) + return nil +} + +func (p *pluginDummy) Flush() error { + return nil +} + +func (p *pluginDummy) ReadAT(timeAT time.Time) ([]map[string]string, error) { + return nil, nil +} + +// New is used by the plugin system to get the context +func New() interface{} { + return &pluginDummy + {} +} + +// empty main function is mandatory since we are in a main package +func main() {} +``` + + +## Building plugin + +```bash +$ go build -buildmode=plugin -o dummy.so +``` + + +## Testing plugin + + +
+ Get a test env from fresh crowdsec release + +```bash +$ cd crowdsec-v0.3.0 +$ ./test_env.sh +$ cd tests +``` +
+ + + + +```bash +$ cp ../../plugins/backend/dummy/dummy.so ./plugins/backend/ +$ cat > config/plugins/backend/dummy.yaml +name: dummy +path: ./plugins/backend/dummy.so +config: + some_parameter: some value + other_parameter: more data + token: fooobarjajajajaja +$ ./crowdsec -c dev.yaml -file test.log -type mylog +... +INFO[06-08-2020 17:21:30] pluginDummy config : map[flush:false max_records:10000 max_records_age:720h other_parameter:more data some_parameter:some value token:fooobarjajajajaja] +... +INFO[06-08-2020 17:21:30] Starting processing routines +... +INFO[06-08-2020 17:21:30] Processing Overflow ... +INFO[06-08-2020 17:21:30] insert signal : {Model:{ID:0 CreatedAt:0001-01-01 00:00:00 +0000 UTC UpdatedAt:0001-01-01 00:00:00 +0000 UTC DeletedAt:} MapKey:97872dfae02c523577eff8ec8e19706eec5fa21e Scenario:trigger on stuff Bucket_id:summer-field Alert_message:0.0.0.0 performed 'trigger on stuff' (1 events over 59ns) at 2020-08-06 17:21:30.491000439 +0200 CEST m=+0.722674306 Events_count:1 Events_sequence:[{Model:{ID:0 CreatedAt:0001-01-01 00:00:00 +0000 UTC UpdatedAt:0001-01-01 00:00:00 +0000 UTC DeletedAt:} Time:2020-08-06 17:21:30.491000368 +0200 CEST m=+0.722674247 Source:{Model:{ID:0 CreatedAt:0001-01-01 00:00:00 +0000 UTC UpdatedAt:0001-01-01 00:00:00 +0000 UTC DeletedAt:} Ip:0.0.0.0 Range:{IP: Mask:} AutonomousSystemNumber:0 AutonomousSystemOrganization: Country: Latitude:0 Longitude:0 Flags:map[]} Source_ip:0.0.0.0 Source_range: Source_AutonomousSystemNumber:0 Source_AutonomousSystemOrganization: Source_Country: SignalOccurenceID:0 Serialized:{"ASNNumber":"0","IsInEU":"false","command":"...","cwd":"...":"...","orig_uid":"...","orig_user":"...","parent":"bash","service":"...","source_ip":"...","user":"..."}}] Start_at:2020-08-06 17:21:30.491000368 +0200 CEST m=+0.722674247 BanApplications:[] Stop_at:2020-08-06 17:21:30.491000439 +0200 CEST m=+0.722674306 Source:0xc000248410 Source_ip:0.0.0.0 Source_range: Source_AutonomousSystemNumber:0 Source_AutonomousSystemOrganization: Source_Country: Source_Latitude:0 Source_Longitude:0 Sources:map[0.0.0.0:{Model:{ID:0 CreatedAt:0001-01-01 00:00:00 +0000 UTC UpdatedAt:0001-01-01 00:00:00 +0000 UTC DeletedAt:} Ip:0.0.0.0 Range:{IP: Mask:} AutonomousSystemNumber:0 AutonomousSystemOrganization: Country: Latitude:0 Longitude:0 Flags:map[]}] Dest_ip: Capacity:0 Leak_speed:0s Whitelisted:false Simulation:false Reprocess:false Labels:map[type:foobar]} +... +``` + + +## Notes + + - All the calls to the plugin methods are blocking. If you need to perform long running operations, it's the plugin's task to handle the background processing with [tombs](https://godoc.org/gopkg.in/tomb.v2) or such. + - Due to [a golang limitation](https://github.com/golang/go/issues/31354) you might have to build crowdsec in the same environment as the plugins. + + + diff --git a/docs/v1.X/docs/references/postoverflows.md b/docs/v1.X/docs/references/postoverflows.md new file mode 100644 index 000000000..4c6119d3d --- /dev/null +++ b/docs/v1.X/docs/references/postoverflows.md @@ -0,0 +1,7 @@ +# Post Overflows + +PostOverflows is secondary parsing phase that happens *after* a bucket overflowed. +It behaves exactly like a [Normal Parsing](/Crowdsec/v1/references/parsers/). However, instead of receiving {{v1X.event.htmlname}} with logs, the parser receive events with {{v1X.alert.htmlname}} representing the overflows. + +The configuration resides in `/etc/crowdsec/postoverflows/`. + diff --git a/docs/v1.X/docs/references/profiles.md b/docs/v1.X/docs/references/profiles.md new file mode 100644 index 000000000..872df9922 --- /dev/null +++ b/docs/v1.X/docs/references/profiles.md @@ -0,0 +1,92 @@ +# Profiles configurations + +The profiles configuration (`/etc/crowdsec/profiles.yaml`) allow to configure what kind of remediation needs to be applied when a scenario is triggered : + +The configuration file is a yaml file that looks like : + +```yaml +name: enforce_mfa +#debug: true +filters: + - 'Alert.Remediation == true && Alert.GetScenario() == "crowdsecurity/ssh-enforce-mfa" && Alert.GetScope() == "username"' +decisions: #remediation vs decision + - type: enforce_mfa + scope: "username" + duration: 1h +on_success: continue +--- +name: default_ip_remediation +#debug: true +filters: +# try types.Ip here :) + - Alert.Remediation == true && Alert.GetScope() == "Ip" +decisions: + - type: ban + duration: 1h +on_success: break +``` + +Each YAML object in the file contains a list of `models.Decision` that contains : + +## Name + +```yaml +name: foobar +``` + +A label for the profile (used in logging) + +## Debug + +```yaml +debug: true +``` + +A boolean flag that provides contextual debug. + +## Filters + +```yaml +filters: + - Alert.Remediation == true && Alert.GetScope() == "Session" + - Alert.Remediation == true && Alert.GetScope() == "Ip" +``` + +If any `filter` of the list returns `true`, the profile is elligible and the `decisions` will be applied. + +## Decisions + +```yaml +decisions: + - type: captcha + duration: 1h + scope: custom_app1_captcha + - type: ban + duration: 2h +``` + +If the profile applies, decisions objects will be created for each of the sources that triggered the scenario. + +It is a list of `models.Decision` objects. The following fields, when present, allows to alter the resulting decision : + + - `scope` : defines the scope of the resulting decision + - `duration` : defines for how long will the decision be valid + - `type` : defines the type of the remediation that will be applied by available {{v1X.bouncers.htmlname}}, for example `ban`, `captcha` + - `value` : define a hardcoded value for the decision (ie. `1.2.3.4`) + +## on_success + +```yaml +on_success: break +``` + +If the profile applies and `on_success` is set to `break`, decisions processing will stop here and it won't evaluate against following profiles. + +## on_failure + +```yaml +on_failure: break +``` + +If the profile didn't apply and `on_failure` is set to `break`, decisions processing will stop here and it won't evaluate against following profiles. + diff --git a/docs/v1.X/docs/references/scenarios.md b/docs/v1.X/docs/references/scenarios.md new file mode 100644 index 000000000..71a74d84f --- /dev/null +++ b/docs/v1.X/docs/references/scenarios.md @@ -0,0 +1,477 @@ +## Understanding scenarios + + +Scenarios are YAML files that allow to detect and qualify a specific behavior, usually an attack. + +Scenarios receive {{v1X.event.htmlname}}(s) and can produce {{v1X.alert.htmlname}}(s) using the [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) algorithm. + +As an {{v1X.event.htmlname}} can be the representation of a log line, or an overflow, it allows scenarios to process both logs or overflows to allow inference. + +Scenarios can be of different types (leaky, trigger, counter), and are based on various factors, such as : + + - the speed/frequency of the [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) + - the capacity of the [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) + - the characteristic(s) of eligible {{v1X.event.htmlname}}(s) : "log type XX with field YY set to ZZ" + - various filters/directives that can alter the bucket's behavior, such as [groupby](/Crowdsec/v1/references/scenarios/#groupby), [distinct](/Crowdsec/v1/references/scenarios/#distinct) or [blackhole](/Crowdsec/v1/references/scenarios/#blackhole) + +Behind the scenes, {{v1X.crowdsec.name}} is going to create one or more buckets when events with matching characteristics arrive to the scenario. When any of these buckets overflows, the scenario has been triggered. + +_Bucket partitioning_ : One scenario usually leads to many buckets creation, as each bucket is only tracking a specific subset of events. For example, if we are tracking brute-force, each "offending peer" get its own bucket. + + +A way to detect a http scanner might be to track the number of distinct non-existing pages it's requesting, and the scenario might look like this : + + +```yaml +#the bucket type : leaky, trigger, counter +type: leaky +#name and description for humans +name: crowdsecurity/http-scan-uniques_404 +description: "Detect multiple unique 404 from a single ip" +#a filter to know which events are eligible +filter: "evt.Meta.service == 'http' && evt.Meta.http_status in ['404', '403', '400']" +#how we are going to partition buckets +groupby: "evt.Meta.source_ip" +#we are only interested into counting UNIQUE/DISTINCT requested URLs +distinct: "evt.Meta.http_path" +#we specify the bucket capacity and leak speed +capacity: 5 +leakspeed: "10s" +#this will prevent the same bucket from overflowing more often than every 5 minutes +blackhole: 5m +#some labels to give context to the overflow +labels: + service: http + type: scan + #yes we want to ban people triggering this + remediation: true +``` + + +## Scenario concepts + +### TimeMachine + +{{v1X.crowdsec.name}} can be used not only to process live logs, but as well to process "cold" logs (think forensics). + +For this to be able to work, the date/time from the log must have been properly parsed for the scenario temporal aspect to be able to work properly. This relies on the [date-parse enrichment](https://github.com/crowdsecurity/hub/blob/master/parsers/s02-enrich/crowdsecurity/dateparse-enrich.yaml). For this to work the field `evt.StrTime` must have been filled with a string that represents the date & time. the date-parse enrichment support a large variety of formats. + + +## Scenario directives + +### type + + +```yaml +type: leaky|trigger|counter +``` + +Defines the type of the bucket. Currently three types are supported : + + - `leaky` : a [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) that must be configured with a {{v1X.capacity.htmlname}} and a {{v1X.leakspeed.htmlname}} + - `trigger` : a bucket that overflows as soon as an event is poured (it's like a leaky bucket is a capacity of 0) + - `counter` : a bucket that only overflows every {{v1X.duration.htmlname}}. It's especially useful to count things. + +### name & description + +```yaml +name: my_author_name/my_scenario_name +description: A scenario that detect XXXX behavior +``` + + +Mandatory `name` and `description` for said scenario. +The name must be unique (and will define the scenario's name in the hub), and the description must be a quick sentence describing what it detects. + + +### filter + +```yaml +filter: expression +``` + +`filter` must be a valid {{v1X.expr.htmlname}} expression that will be evaluated against the {{v1X.event.htmlname}}. + +If `filter` evaluation returns true or is absent, event will be pour in the bucket. + +If `filter` returns `false` or a non-boolean, the event will be skip for this bucket. + +Here is the [expr documentation](https://github.com/antonmedv/expr/tree/master/docs). + +Examples : + + - `evt.Meta.log_type == 'telnet_new_session'` + - `evt.Meta.log_type in ['http_access-log', 'http_error-log'] && evt.Parsed.static_ressource == 'false'` + - `evt.Meta.log_type == 'ssh_failed-auth'` + + +### duration + +```yaml +duration: 45s +duration: 10m +``` + +(applicable to `counter` buckets only) + +A duration after which the bucket will overflow. +The format must be compatible with [golang ParseDuration format](https://golang.org/pkg/time/#ParseDuration) + +Examples : + +```yaml +type: counter +name: crowdsecurity/ban-reports-ssh_bf_report +description: "Count unique ips performing ssh bruteforce" +filter: "evt.Overflow.Scenario == 'ssh_bruteforce'" +distinct: "evt.Overflow.Source_ip" +capacity: -1 +duration: 10m +labels: + service: ssh +``` + + +### groupby + +```yaml +groupby: evt.Meta.source_ip +``` + + +an {{v1X.expr.htmlname}} that must return a string. This string will be used as to partition the buckets. + + +Examples : + +Here, each `source_ip` will get its own bucket. + +```yaml +type: leaky +... +groupby: evt.Meta.source_ip +... +``` + + + +Here, each unique combo of `source_ip` + `target_username` will get its own bucket. + +```yaml +type: leaky +... +groupby: evt.Meta.source_ip + '--' + evt.Parsed.target_username +... +``` + + + +### distinct + + +```yaml +distinct: evt.Meta.http_path +``` + + +an {{v1X.expr.htmlname}} that must return a string. The event will be poured **only** if the string is not already present in the bucket. + +Examples : + +This will ensure that events that keep triggering the same `.Meta.http_path` will be poured only once. + +```yaml +type: leaky +... +distinct: "evt.Meta.http_path" +... +``` + +In the logs, you can see it like this (for example from the iptables-logs portscan detection) : + +```bash +DEBU[2020-05-13T11:29:51+02:00] Uniq(7681) : ok buck.. +DEBU[2020-05-13T11:29:51+02:00] Uniq(7681) : ko, discard event buck.. +``` + +The first event has been poured (value `7681`) was not yet present in the events, while the second time, the event got discarded because the value was already present in the bucket. + + +### capacity + +```yaml +capacity: 5 +``` + + +(Applies only to `leaky` buckets) + +A positive integer representing the bucket capacity. +If there are more than `capacity` item in the bucket, it will overflow. + + +### cache_size + +```yaml +cache_size: 10 +``` + +A positive integer representing the number of events we hold in the +bucket. All events are still accounted for the bucket, it only +prevents the bucket event history to grow larger than +`cache_size`. When the bucket reachs `cache_size` events, then events +are discarded on an first-in first-out basis. + + +### leakspeed + +```yaml +leakspeed: "10s" +``` + +(Applies only to `leaky` buckets) + +A duration that represent how often an event will be leaking from the bucket. + +Must be compatible with [golang ParseDuration format](https://golang.org/pkg/time/#ParseDuration). + + +Example: + +Here the bucket will leak one item every 10 seconds, and can hold up to 5 items before overflowing. + +```yaml +type: leaky +... +leakspeed: "10s" +capacity: 5 +... +``` + + +### labels + +```yaml +labels: + service: ssh + type: bruteforce + remediation: true +``` + +Labels is a list of `label: values` that provide context to an overflow. +The labels are (currently) not stored in the database, nor they are sent to the API. + +Special labels : + + - The **remediation** label, if set to `true` indicate the the originating IP should be ban. + - The **scope** label, can be set to `ip` or `range` when **remediation** is set to true, and indicate to which scope should the decision apply. If you set a scenario with **remediation** to true and **scope** to `range` and the range of the IP could have been determined by the GeoIP library, the whole range to which the IP belongs will be banned. + + +Example : + +The IP that triggered the overflow (`.Meta.source_ip`) will be banned. +```yaml +type: leaky +... +labels: + service: ssh + type: bruteforce + remediation: true +``` + +The range to which the offending IP belong (`.Meta.source_ip`) will be banned. +```yaml +type: leaky +... +labels: + type: distributed_attack + remediation: true + scope: range +``` + +### blackhole + +```yaml +blackhole: 10m +``` + +A duration for which a bucket will be "silenced" after overflowing. +This is intended to limit / avoid spam of buckets that might be very rapidly triggered. + +The blackhole only applies to the individual bucket rather than the whole scenario. + +Must be compatible with [golang ParseDuration format](https://golang.org/pkg/time/#ParseDuration). + +Example : + +The same `source_ip` won't be able to trigger this overflow more than once every 10 minutes. +The potential overflows in the meanwhile will be discarded (but will still appear in logs as being blackholed). + +```yaml +type: trigger +... +blackhole: 10m +groupby: evt.Meta.source_ip +``` + +### debug + +```yaml +debug: true|false +``` + +_default: false_ + + +If set to to `true`, enabled scenario level debugging. +It is meant to help understanding scenario behavior by providing contextual logging : + +debug of filters and expression results +``` +DEBU[31-07-2020 16:34:58] eval(evt.Meta.log_type in ["http_access-log", "http_error-log"] && any(File("bad_user_agents.txt"), {evt.Parsed.http_user_agent contains #})) = TRUE cfg=still-feather file=config/scenarios/http-bad-user-agent.yaml name=crowdsecurity/http-bad-user-agent +DEBU[31-07-2020 16:34:58] eval variables: cfg=still-feather file=config/scenarios/http-bad-user-agent.yaml name=crowdsecurity/http-bad-user-agent +DEBU[31-07-2020 16:34:58] evt.Meta.log_type = 'http_access-log' cfg=still-feather file=config/scenarios/http-bad-user-agent.yaml name=crowdsecurity/http-bad-user-agent +DEBU[31-07-2020 16:34:58] evt.Parsed.http_user_agent = 'Mozilla/5.00 (Nikto/2.1.5) (Evasions:None) (Test:002810)' cfg=still-feather file=config/scenarios/http-bad-user-agent.yaml name=crowdsecurity/http-bad-user-agent +``` +
+ +### reprocess + +```yaml +reprocess: true|false +``` + +_default: false_ + +If set to `true`, the resulting overflow will be sent again in the scenario/parsing pipeline. +It is useful when you want to have further scenarios that will rely on past-overflows to take decisions. + + +### cache_size + +```yaml +cache_size: 5 +``` + +By default, a bucket holds {{v1X.capacity.htmlname}} events "in memory". +However, for a number of cases, you don't want this, as it might lead to excessive memory consumption. + +By setting `cache_size` to a positive integer, we can control the maximum in-memory cache size of the bucket, without changing its capacity and such. This is especially useful when using `counter` buckets on long duration that might end up counting (and this storing in memory) an important number of events. + + +### overflow_filter + +```yaml +overflow_filter: any(queue.Queue, { .Enriched.IsInEU == "true" }) +``` + +`overflow_filter` is an {{v1X.expr.htmlname}} that is run when the bucket overflows. +If this expression is present and returns false, the overflow will be discarded. + + +### data + +```yaml +data: + - source_url: https://URL/TO/FILE + dest_file: LOCAL_FILENAME + [type: (regexp|string)] +``` + +`data` allows user to specify an external source of data. +This section is only relevant when `cscli` is used to install scenario from hub, as ill download the `source_url` and store it to `dest_file`. When the scenario is not installed from the hub, {{v1X.crowdsec.name}} won't download the URL, but the file must exist for the scenario to be loaded correctly. +The `type` is mandatory if you want to evaluate the data in the file, and should be `regex` for valid (re2) regular expression per line or `string` for string per line. +The regexps will be compiled, the strings will be loaded into a list and both will be kept in memory. +Without specifying a `type`, the file will be downloaded and stored as file and not in memory. + + +```yaml +name: crowdsecurity/cdn-whitelist +... +data: + - source_url: https://www.cloudflare.com/ips-v4 + dest_file: cloudflare_ips.txt + type: string +``` + + +### format + +```yaml +format: 2.0 +``` + +{{v1X.crowdsec.name}} has a notion of format support for parsers & scenarios for compatibility management. +Running `cscli version` will show you such compatibility matrix : + +```bash +$ cscli version +2020/11/05 09:35:05 version: v0.3.6-183e34c966c475e0d2cdb3c60d0b7426499aa573 +2020/11/05 09:35:05 Codename: beta +2020/11/05 09:35:05 BuildDate: 2020-11-04_17:56:46 +2020/11/05 09:35:05 GoVersion: 1.13 +2020/11/05 09:35:05 Constraint_parser: >= 1.0, < 2.0 +2020/11/05 09:35:05 Constraint_scenario: >= 1.0, < 3.0 +2020/11/05 09:35:05 Constraint_api: v1 +2020/11/05 09:35:05 Constraint_acquis: >= 1.0, < 2.0 +``` + +### Scope + +```yaml +scope: + type: Range + expression: evt.Parsed.mySourceRange +``` + +While most scenarios might focus on Ips, {{v1X.crowdsec.name}} and {{v1X.bouncers.name}} can work with any scope. +The `scope` directive allows you to override the default scope : + + - `type` is a string representing the scope name + - `expression` is an `expr` expression that will be evaluated to fetch the value + + +let's imagine a scenario such as : + +```yaml +# ssh bruteforce +type: leaky +name: crowdsecurity/ssh-enforce-mfa +description: "Enforce mfa on users that have been bruteforced" +filter: "evt.Meta.log_type == 'ssh_failed-auth'" +leakspeed: "10s" +capacity: 5 +groupby: evt.Meta.source_ip +blackhole: 1m +labels: + service: ssh + type: bruteforce + remediation: true +scope: + type: username + expression: evt.Meta.target_user +``` + +and a profile such as : + +```yaml +name: enforce_mfa +filters: + - 'Alert.Remediation == true && Alert.GetScope() == "username"' +decisions: + - type: enforce_mfa + scope: "username" + duration: 1h +on_success: continue +``` + +the resulting overflow will be : + +```bash +$ ./cscli -c dev.yaml decisions list ++----+----------+---------------+-------------------------------+-------------+---------+----+--------+------------------+ +| ID | SOURCE | SCOPE:VALUE | REASON | ACTION | COUNTRY | AS | EVENTS | EXPIRATION | ++----+----------+---------------+-------------------------------+-------------+---------+----+--------+------------------+ +| 2 | crowdsec | username:rura | crowdsecurity/ssh-enforce-mfa | enforce_mfa | | | 6 | 59m46.121840343s | +``` + diff --git a/docs/v1.X/docs/references/simulation.md b/docs/v1.X/docs/references/simulation.md new file mode 100644 index 000000000..828e7cfe3 --- /dev/null +++ b/docs/v1.X/docs/references/simulation.md @@ -0,0 +1,13 @@ +# Simulation + +Simulation config is in `/etc/crowdsec/simulation.yaml` and looks like : + +```yaml +#if simulation is set to 'true' here, *all* scenarios will be in simulation unless in exclusion list +simulation: false +#exclusion to the policy - here, the scenarios that are in simulation mode +exclusions: +- crowdsecurity/ssh-bf + +``` + diff --git a/docs/v1.X/docs/references/stages.md b/docs/v1.X/docs/references/stages.md new file mode 100644 index 000000000..205348492 --- /dev/null +++ b/docs/v1.X/docs/references/stages.md @@ -0,0 +1,24 @@ +# Stages + +Parsers are organized into "stages" (named using a "sXX-" convention) to allow pipelines and branching in parsing. Each parser belongs to a stage, and can trigger next stage when successful. At the time of writing, the parsers are organized around 3 stages : + + - `s00-raw` : low level parser, such as syslog + - `s01-parse` : most of the services parsers (ssh, nginx etc.) + - `s02-enrich` : enrichment that requires parsed events (ie. geoip-enrichment) or generic parsers that apply on parsed logs (ie. second stage http parser) + +The number and structure of stages can be altered by the user, the directory structure and their alphabetical order dictates in which order stages and parsers are processed. + +Every event starts in the first stage, and will move to the next stage once it has been successfully processed by a parser that has the `onsuccess` directive set to `next_stage`, and so on until it reaches the last stage, when it's going to start to be matched against scenarios. + +## Default stages + +- The preliminary stage (`s00-raw`) is mostly the one that will parse the structure of the log. This is where [syslog-logs](https://hub.crowdsec.net/author/crowdsecurity/configurations/syslog-logs) are parsed for example. Such a parser will parse the syslog header to detect the program source. + +- The main stage (`s01-parse`) is the one that will parse actual applications logs and output parsed data and static assigned values. There is one parser for each type of software. To parse the logs, regexp or GROK pattern are used. If the parser is configured to go to the [`next_stage`](/Crowdsec/v1/references/parsers/#onsuccess), then it will be process by the `enrichment` stage. + +- The enrichment (`s02-enrich`) stage is the one that will enrich the normalized log (we call it an event now that it is normalized) in order to get more information for the heuristic process. This stage can be composed of grok patterns and so on, but as well of plugins that can be writen by the community (geiop enrichment, rdns ...) for example [geoip-enrich](https://hub.crowdsec.net/author/crowdsecurity/configurations/geoip-enrich). + + +## Custom stage + +It is possible to write custom stage. If you want some specific parsing or enrichment to be done after the `s02-enrich` stage, it is possible by creating a new folder `s03-` (and so on). The configuration that will be created in this folder will process the logs configured to go to `next_stage` in the `s02-enrich` stage. \ No newline at end of file diff --git a/docs/v1.X/docs/user_guide/bouncer_machine_management.md b/docs/v1.X/docs/user_guide/bouncer_machine_management.md new file mode 100644 index 000000000..f51a7bdf1 --- /dev/null +++ b/docs/v1.X/docs/user_guide/bouncer_machine_management.md @@ -0,0 +1,121 @@ +# Bouncers & Machines management + +Crowdsec is composed of different components, that communicate via a local API. +To access this API, the various components (crowdsec agent, cscli and bouncers) need to be authenticated. + +!!! info + This documentation should be relevant mostly for administrators that want to setup distributed architectures. Single machine setup users can likely this part. + + +There are two kind of access to the local api : + + - `machines` : it's a login/password authentication used by {{v1X.cli.name}} and {{v1X.crowdsec.name}}, this one allows to post, get and delete decisions and alerts. + - `bouncers` : it's a token authentication used by {{v1X.bouncers.htmlname}} to query the decisions, and only allows to perform get on decisions and alerts. + +## Bouncers authentication + +!!! warning + The `cscli bouncers` command interacts directly with the database (bouncers add and delete are not implemented in API), and thus it must have correct database configuration. + +```bash +$ cscli bouncers list +``` + + +You can view the registered bouncers with `list`, as well as add or delete them : + +```bash +$ cscli bouncers add mybouncersname +Api key for 'mybouncersname': + + 23........b5a0c + +Please keep this key since will not be able to retrive it! +$ cscli bouncers delete mybouncersname +``` + +The API KEY must be kept and given to the {{v1X.bouncers.htmlname}}. + +
+ cscli bouncers example +```bash +$ sudo cscli bouncers add mybouncersname +Api key for 'mybouncersname': + + 23........b5a0c + +Please keep this key since will not be able to retrive it! +$ sudo cscli bouncers list +----------------------------------------------------------------------------- + NAME IP ADDRESS VALID LAST API PULL TYPE VERSION +----------------------------------------------------------------------------- + mybouncersname ✔️ 2020-11-01T11:45:05+01:00 +----------------------------------------------------------------------------- +$ sudo cscli bouncers add jlkqweq +Api key for 'jlkqweq': + + a7........efdc9c + +Please keep this key since will not be able to retrive it! +$ sudo cscli bouncers delete mybouncersname +$ sudo cscli bouncers list +---------------------------------------------------------------------- + NAME IP ADDRESS VALID LAST API PULL TYPE VERSION +---------------------------------------------------------------------- + jlkqweq ✔️ 2020-11-01T11:49:32+01:00 +---------------------------------------------------------------------- +``` + +
+ +## Machines authentication + +!!! warning + The `cscli machines` command interacts directly with the database (machines add and delete are not implemented in API), and thus it must have correct database configuration. + +```bash +$ cscli machines list +``` + +You can view the registered machines with `list`, as well as add or delete them : + +```bash +$ cscli machines add -m mytestmachine -a +INFO[0004] Machine 'mytestmachine' created successfully +INFO[0004] API credentials dumped to '/etc/crowdsec/local_api_credentials.yaml' +$ cscli machines delete -m 82929df7ee394b73b81252fe3b4e5020 +``` + + +
+ cscli machines example + +```bash +$ cscli machines list +---------------------------------------------------------------------------------------------------------------------------------- + NAME IP ADDRESS LAST UPDATE STATUS VERSION +---------------------------------------------------------------------------------------------------------------------------------- + 82929df7ee394b73b81252fe3b4e5020 127.0.0.1 2020-10-31T14:06:32+01:00 ✔️ v0.3.6-3d6ce33908409f2a830af6551a7f5e37f2a4728f +---------------------------------------------------------------------------------------------------------------------------------- +$ cscli machines add -m mytestmachine -a +INFO[0004] Machine 'mytestmachine' created successfully +INFO[0004] API credentials dumped to '/etc/crowdsec/local_api_credentials.yaml' +$ sudo cscli machines list +---------------------------------------------------------------------------------------------------------------------------------- + NAME IP ADDRESS LAST UPDATE STATUS VERSION +---------------------------------------------------------------------------------------------------------------------------------- + 82929df7ee394b73b81252fe3b4e5020 127.0.0.1 2020-10-31T14:06:32+01:00 ✔️ v0.3.6-3d6ce33908409f2a830af6551a7f5e37f2a4728f + mytestmachine 127.0.0.1 2020-11-01T11:37:19+01:00 ✔️ v0.3.6-6a18458badf8ae5fed8d5f1bb96fc7a59c96163c +---------------------------------------------------------------------------------------------------------------------------------- +$ cscli machines delete -m 82929df7ee394b73b81252fe3b4e5020 +$ cscli machines list +--------------------------------------------------------------------------------------------------------- + NAME IP ADDRESS LAST UPDATE STATUS VERSION +--------------------------------------------------------------------------------------------------------- + mytestmachine 127.0.0.1 2020-11-01T11:37:19+01:00 ✔️ v0.3.6-6a18458badf8ae5fed8d5f1bb96fc7a59c96163c +--------------------------------------------------------------------------------------------------------- + + +``` + +
diff --git a/docs/v1.X/docs/user_guide/configurations_management/acquisition.md b/docs/v1.X/docs/user_guide/configurations_management/acquisition.md new file mode 100644 index 000000000..bc36a1d67 --- /dev/null +++ b/docs/v1.X/docs/user_guide/configurations_management/acquisition.md @@ -0,0 +1,87 @@ +!!! info + + Please note that the `{{v1X.config.acquis_path}}` should be auto generated by the {{v1X.wizard.name}} in most case. + +The acquisition configuration specifies lists of logs that {{v1X.crowdsec.name}} will ingest and feed to parsers. +Acquisition provides two information about a given log : + + - its source (a path to a file, or a journalctl filter) + - its type, given in the form of a label + +The `type` label is crucial as it's later used in the process to determine which parser(s) can handle lines coming from this source. + +Acquisition can be found in `{{v1X.config.acquis_path}}`, for example : +
+ Acquisition example +```yaml +filenames: + - /var/log/nginx/access-*.log + - /var/log/nginx/error.log +labels: + type: nginx +--- +filenames: + - /var/log/auth.log +labels: + type: syslog +--- +journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" +labels: + type: syslog +``` +
+ + +## Testing and viewing acquisition + +### At startup + +At startup, you will see the monitored files in `/var/log/crowdsec.log` : + +``` +... +INFO[23-11-2020 15:21:17] [file datasource] opening file '/tmp/test.log' +WARN[23-11-2020 15:21:17] [file datasource] no results for /tmp/ratata.log +INFO[23-11-2020 15:21:17] [journald datasource] Configured with filters : [--follow _SYSTEMD_UNIT=ssh.service] +... +``` + +### At runtime + +{{v1X.cli.name}} allows you to view {{v1X.crowdsec.name}} metrics info via the `metrics` command. +This allows you to see how many lines are coming from each source, and if they are parsed correctly. + +You can see those metrics with the following command: +``` +{{v1X.cli.bin}} metrics +``` + + +
+ {{v1X.cli.name}} metrics example + +```bash +## {{v1X.cli.bin}} metrics +... +INFO[0000] Acquisition Metrics: ++--------------------------------------+------------+--------------+----------------+------------------------+ +| SOURCE | LINES READ | LINES PARSED | LINES UNPARSED | LINES POURED TO BUCKET | ++--------------------------------------+------------+--------------+----------------+------------------------+ +| /tmp/test.log | 10 | 10 | - | 11 | +| journalctl-_SYSTEMD_UNIT=ssh.service | 36 | 12 | 24 | 17 | ++--------------------------------------+------------+--------------+----------------+------------------------+ +... +``` + +
+ + +!!! info + + All these metrics are actually coming from {{v1X.crowdsec.name}}'s prometheus agent. See [prometheus](/Crowdsec/v1/observability/prometheus/) directly for more insights. + + +## Reference documentation + +[Link to acquisition reference documentation](/Crowdsec/v1/references/acquisition/) diff --git a/docs/v1.X/docs/user_guide/configurations_management/collections.md b/docs/v1.X/docs/user_guide/configurations_management/collections.md new file mode 100644 index 000000000..b533f800b --- /dev/null +++ b/docs/v1.X/docs/user_guide/configurations_management/collections.md @@ -0,0 +1,138 @@ + +{{v1X.hub.htmlname}} allows you to find needed collections. + +## Installing collections + +```bash +$ cscli collections install crowdsecurity/whitelist-good-actors +``` + +
+ {{v1X.cli.name}} collection install example + +```bash +$ cscli collections install crowdsecurity/whitelist-good-actors +INFO[0000] crowdsecurity/seo-bots-whitelist : OK +INFO[0000] downloading data 'https://raw.githubusercontent.com/crowdsecurity/sec-lists/master/whitelists/benign_bots/search_engine_crawlers/rdns_seo_bots.txt' in '/var/lib/crowdsec/data/rdns_seo_bots.txt' +INFO[0001] downloading data 'https://raw.githubusercontent.com/crowdsecurity/sec-lists/master/whitelists/benign_bots/search_engine_crawlers/rnds_seo_bots.regex' in '/var/lib/crowdsec/data/rdns_seo_bots.regex' +INFO[0002] downloading data 'https://raw.githubusercontent.com/crowdsecurity/sec-lists/master/whitelists/benign_bots/search_engine_crawlers/ip_seo_bots.txt' in '/var/lib/crowdsec/data/ip_seo_bots.txt' +INFO[0002] crowdsecurity/cdn-whitelist : OK +INFO[0002] downloading data 'https://www.cloudflare.com/ips-v4' in '/var/lib/crowdsec/data/cloudflare_ips.txt' +INFO[0003] crowdsecurity/rdns : OK +INFO[0003] crowdsecurity/whitelist-good-actors : OK +INFO[0003] /etc/crowdsec/postoverflows/s01-whitelist doesn't exist, create +INFO[0003] Enabled postoverflows : crowdsecurity/seo-bots-whitelist +INFO[0003] Enabled postoverflows : crowdsecurity/cdn-whitelist +INFO[0003] /etc/crowdsec/postoverflows/s00-enrich doesn't exist, create +INFO[0003] Enabled postoverflows : crowdsecurity/rdns +INFO[0003] Enabled collections : crowdsecurity/whitelist-good-actors +INFO[0003] Enabled crowdsecurity/whitelist-good-actors +INFO[0003] Run 'systemctl reload crowdsec' for the new configuration to be effective. +$ systemctl reload crowdsec +``` +
+ + +## Listing installed collections + +```bash +$ {{v1X.cli.bin}} collections list +``` + +
+ cscli collections list example + +```bash +$ cscli collections list +------------------------------------------------------------------------------------------------------------- + NAME 📦 STATUS VERSION LOCAL PATH +------------------------------------------------------------------------------------------------------------- + crowdsecurity/nginx ✔️ enabled 0.1 /etc/crowdsec/collections/nginx.yaml + crowdsecurity/base-http-scenarios ✔️ enabled 0.1 /etc/crowdsec/collections/base-http-scenarios.yaml + crowdsecurity/sshd ✔️ enabled 0.1 /etc/crowdsec/collections/sshd.yaml + crowdsecurity/linux ✔️ enabled 0.2 /etc/crowdsec/collections/linux.yaml +------------------------------------------------------------------------------------------------------------- +``` + +
+ +## Upgrading installed collections + +```bash +$ {{v1X.cli.bin}} hub update +$ {{v1X.cli.bin}} collections upgrade crowdsecurity/sshd +``` + +Collection upgrade allows you to upgrade an existing collection (and its items) to the latest version. + + +
+ cscli collections upgrade example + +```bash +$ cscli collections upgrade crowdsecurity/sshd +INFO[0000] crowdsecurity/sshd : up-to-date +WARN[0000] crowdsecurity/sshd-logs : overwrite +WARN[0000] crowdsecurity/ssh-bf : overwrite +WARN[0000] crowdsecurity/sshd : overwrite +INFO[0000] 📦 crowdsecurity/sshd : updated +INFO[0000] Upgraded 1 items +INFO[0000] Run 'systemctl reload crowdsec' for the new configuration to be effective. +$ systemctl reload crowdsec + +``` + +
+ +## Monitoring collections + +```bash +$ cscli collections inspect crowdsecurity/sshd +``` + +Collections inspect will give you detailed information about a given collection, including versioning information *and* runtime metrics (fetched from prometheus). + +
+ cscli collections inspect example + +```bash +$ cscli collections inspect crowdsecurity/sshd +type: collections +name: crowdsecurity/sshd +filename: sshd.yaml +description: 'sshd support : parser and brute-force detection' +author: crowdsecurity +belongs_to_collections: +- crowdsecurity/linux +- crowdsecurity/linux +remote_path: collections/crowdsecurity/sshd.yaml +version: "0.1" +local_path: /etc/crowdsec/collections/sshd.yaml +localversion: "0.1" +localhash: 21159aeb87529efcf1a5033f720413d5321a6451bab679a999f7f01a7aa972b3 +installed: true +downloaded: true +uptodate: true +tainted: false +local: false +parsers: +- crowdsecurity/sshd-logs +scenarios: +- crowdsecurity/ssh-bf + +Current metrics : + + - (Scenario) crowdsecurity/ssh-bf: ++---------------+-----------+--------------+--------+---------+ +| CURRENT COUNT | OVERFLOWS | INSTANCIATED | POURED | EXPIRED | ++---------------+-----------+--------------+--------+---------+ +| 0 | 1 | 2 | 10 | 1 | ++---------------+-----------+--------------+--------+---------+ + +``` + +
+ +## Reference documentation + +[Link to collections reference documentation](/Crowdsec/v1/references/collections/) diff --git a/docs/v1.X/docs/user_guide/configurations_management/enrichers.md b/docs/v1.X/docs/user_guide/configurations_management/enrichers.md new file mode 100644 index 000000000..c9aa22495 --- /dev/null +++ b/docs/v1.X/docs/user_guide/configurations_management/enrichers.md @@ -0,0 +1,26 @@ +Enrichers are basically {{v1X.parsers.htmlname}} that can rely on external methods to provide extra contextual information to the event. The enrichers are usually in the `s02-enrich` {{v1X.stage.htmlname}} (after most of the parsing happened). + +Enrichers functions should all accept a string as a parameter, and return an associative string array, that will be automatically merged into the `Enriched` map of the {{v1X.event.htmlname}}. + +!!! warning + At the time of writing, enrichers plugin mechanism implementation is still ongoing (read: the list of available enrichment methods is currently hardcoded). + + +As an example let's look into the geoip-enrich parser/enricher : + +It relies on [the geolite2 data created by maxmind](https://www.maxmind.com) and the [geoip2 golang module](https://github.com/oschwald/geoip2-golang) to provide the actual data. + + +It exposes three methods : `GeoIpCity` `GeoIpASN` and `IpToRange` that are used by the `crowdsecurity/geoip-enrich`. +Enrichers can be installed as any other parsers with the following command: + +``` +{{v1X.cli.bin}} install parser crowdsecurity/geoip-enrich +``` + +Take a tour at the {{v1X.hub.htmlname}} to find them ! + +## Reference documentation + +[Link to enrichers reference documentation](/Crowdsec/v1/references/enrichers/) + diff --git a/docs/v1.X/docs/user_guide/configurations_management/local-configurations.md b/docs/v1.X/docs/user_guide/configurations_management/local-configurations.md new file mode 100644 index 000000000..1301e7bec --- /dev/null +++ b/docs/v1.X/docs/user_guide/configurations_management/local-configurations.md @@ -0,0 +1,4 @@ +# Managing local configurations + + + diff --git a/docs/v1.X/docs/user_guide/configurations_management/parsers.md b/docs/v1.X/docs/user_guide/configurations_management/parsers.md new file mode 100644 index 000000000..e0e6a9ecb --- /dev/null +++ b/docs/v1.X/docs/user_guide/configurations_management/parsers.md @@ -0,0 +1,133 @@ +{{v1X.hub.htmlname}} allows you to find needed parsers. + +## Installing parsers + +```bash +$ cscli parsers install crowdsecurity/sshd-logs +``` + +
+ cscli parsers install example + +```bash +$ cscli parsers install crowdsecurity/iptables-logs +INFO[0000] crowdsecurity/iptables-logs : OK +INFO[0000] Enabled parsers : crowdsecurity/iptables-logs +INFO[0000] Enabled crowdsecurity/iptables-logs +INFO[0000] Run 'systemctl reload crowdsec' for the new configuration to be effective. +``` +
+ +## Listing installed parsers + +```bash +cscli parsers list +``` + +{{v1X.parsers.Htmlname}} are yaml files in `{{v1X.config.crowdsec_dir}}parsers//parser.yaml`. + + + + +
+ cscli parsers list example + +```bash +$ cscli parsers list +-------------------------------------------------------------------------------------------------------------- + NAME 📦 STATUS VERSION LOCAL PATH +-------------------------------------------------------------------------------------------------------------- + crowdsecurity/whitelists ✔️ enabled 0.1 /etc/crowdsec/parsers/s02-enrich/whitelists.yaml + crowdsecurity/dateparse-enrich ✔️ enabled 0.1 /etc/crowdsec/parsers/s02-enrich/dateparse-enrich.yaml + crowdsecurity/iptables-logs ✔️ enabled 0.1 /etc/crowdsec/parsers/s01-parse/iptables-logs.yaml + crowdsecurity/syslog-logs ✔️ enabled 0.1 /etc/crowdsec/parsers/s00-raw/syslog-logs.yaml + crowdsecurity/sshd-logs ✔️ enabled 0.1 /etc/crowdsec/parsers/s01-parse/sshd-logs.yaml + crowdsecurity/geoip-enrich ✔️ enabled 0.2 /etc/crowdsec/parsers/s02-enrich/geoip-enrich.yaml + crowdsecurity/http-logs ✔️ enabled 0.2 /etc/crowdsec/parsers/s02-enrich/http-logs.yaml + crowdsecurity/nginx-logs ✔️ enabled 0.1 /etc/crowdsec/parsers/s01-parse/nginx-logs.yaml +-------------------------------------------------------------------------------------------------------------- + +``` + +
+ + +## Upgrading installed parsers + +```bash +$ {{v1X.cli.bin}} parsers upgrade crowdsecurity/sshd-logs +``` + +Parsers upgrade allows you to upgrade an existing parser to the latest version. + +
+ cscli parsers upgrade example + +```bash +$ cscli collections upgrade crowdsecurity/sshd +INFO[0000] crowdsecurity/sshd : up-to-date +WARN[0000] crowdsecurity/sshd-logs : overwrite +WARN[0000] crowdsecurity/ssh-bf : overwrite +WARN[0000] crowdsecurity/sshd : overwrite +INFO[0000] 📦 crowdsecurity/sshd : updated +INFO[0000] Upgraded 1 items +INFO[0000] Run 'systemctl reload crowdsec' for the new configuration to be effective. + +``` + +
+ +## Monitoring parsers + +```bash +$ cscli collections inspect crowdsecurity/sshd +``` + +Collections inspect will give you detailed information about a given collection, including versioning information *and* runtime metrics (fetched from prometheus). + + +
+ cscli collections inspect example + +```bash +$ cscli collections inspect crowdsecurity/sshd +type: collections +name: crowdsecurity/sshd +filename: sshd.yaml +description: 'sshd support : parser and brute-force detection' +author: crowdsecurity +belongs_to_collections: +- crowdsecurity/linux +- crowdsecurity/linux +remote_path: collections/crowdsecurity/sshd.yaml +version: "0.1" +local_path: /etc/crowdsec/collections/sshd.yaml +localversion: "0.1" +localhash: 21159aeb87529efcf1a5033f720413d5321a6451bab679a999f7f01a7aa972b3 +installed: true +downloaded: true +uptodate: true +tainted: false +local: false +parsers: +- crowdsecurity/sshd-logs +scenarios: +- crowdsecurity/ssh-bf + +Current metrics : + + - (Scenario) crowdsecurity/ssh-bf: ++---------------+-----------+--------------+--------+---------+ +| CURRENT COUNT | OVERFLOWS | INSTANCIATED | POURED | EXPIRED | ++---------------+-----------+--------------+--------+---------+ +| 0 | 1 | 2 | 10 | 1 | ++---------------+-----------+--------------+--------+---------+ + +``` + +
+ +## Reference documentation + +[Link to parsers reference documentation](/Crowdsec/v1/references/parsers/) + diff --git a/docs/v1.X/docs/user_guide/configurations_management/scenarios.md b/docs/v1.X/docs/user_guide/configurations_management/scenarios.md new file mode 100644 index 000000000..4f1973566 --- /dev/null +++ b/docs/v1.X/docs/user_guide/configurations_management/scenarios.md @@ -0,0 +1,132 @@ +{{v1X.hub.htmlname}} allows you to find needed scenarios. + +## Installing scenarios + +```bash +$ cscli scenarios install crowdsecurity/http-bf-wordpress_bf +``` + +
+ cscli scenarios install example + +```bash +$ cscli scenarios install crowdsecurity/http-bf-wordpress_bf +INFO[0000] crowdsecurity/http-bf-wordpress_bf : OK +INFO[0000] Enabled scenarios : crowdsecurity/http-bf-wordpress_bf +INFO[0000] Enabled crowdsecurity/http-bf-wordpress_bf +INFO[0000] Run 'systemctl reload crowdsec' for the new configuration to be effective. +$ systemctl reload crowdsec +``` + +
+ + +## Listing installed scenarios + +```bash +cscli scenarios list +``` + +{{v1X.scenarios.Htmlname}} are yaml files in `{{v1X.config.crowdsec_dir}}scenarios/`. + + +
+ cscli scenarios list example + +```bash +$ cscli scenarios list +--------------------------------------------------------------------------------------------------------------------------- + NAME 📦 STATUS VERSION LOCAL PATH +--------------------------------------------------------------------------------------------------------------------------- + crowdsecurity/ssh-bf ✔️ enabled 0.1 /etc/crowdsec/scenarios/ssh-bf.yaml + crowdsecurity/http-bf-wordpress_bf ✔️ enabled 0.1 /etc/crowdsec/scenarios/http-bf-wordpress_bf.yaml + crowdsecurity/http-crawl-non_statics ✔️ enabled 0.2 /etc/crowdsec/scenarios/http-crawl-non_statics.yaml + crowdsecurity/http-probing ✔️ enabled 0.1 /etc/crowdsec/scenarios/http-probing.yaml + crowdsecurity/http-sensitive-files ✔️ enabled 0.2 /etc/crowdsec/scenarios/http-sensitive-files.yaml + crowdsecurity/http-bad-user-agent ✔️ enabled 0.2 /etc/crowdsec/scenarios/http-bad-user-agent.yaml + crowdsecurity/http-path-traversal-probing ✔️ enabled 0.2 /etc/crowdsec/scenarios/http-path-traversal-probing.yaml + crowdsecurity/http-sqli-probing ✔️ enabled 0.2 /etc/crowdsec/scenarios/http-sqli-probing.yaml + crowdsecurity/http-backdoors-attempts ✔️ enabled 0.2 /etc/crowdsec/scenarios/http-backdoors-attempts.yaml + crowdsecurity/http-xss-probing ✔️ enabled 0.2 /etc/crowdsec/scenarios/http-xss-probing.yaml +--------------------------------------------------------------------------------------------------------------------------- + +``` + +
+ + +## Upgrading installed scenarios + +```bash +$ cscli scenarios upgrade crowdsecurity/sshd-bf +``` + +Scenarios upgrade allows you to upgrade an existing scenario to the latest version. + +
+ cscli scenarios upgrade example + +```bash +$ cscli scenarios upgrade crowdsecurity/ssh-bf +INFO[0000] crowdsecurity/ssh-bf : up-to-date +WARN[0000] crowdsecurity/ssh-bf : overwrite +INFO[0000] 📦 crowdsecurity/ssh-bf : updated +INFO[0000] Upgraded 1 items +INFO[0000] Run 'systemctl reload crowdsec' for the new configuration to be effective. +``` + +
+ +## Monitoring scenarios + +```bash +$ cscli scenarios inspect crowdsecurity/ssh-bf +``` + +Collections inspect will give you detailed information about a given collection, including versioning information *and* runtime metrics (fetched from prometheus). + + +
+ cscli collections inspect example + +```bash +$ cscli collections inspect crowdsecurity/sshd +type: collections +name: crowdsecurity/sshd +filename: sshd.yaml +description: 'sshd support : parser and brute-force detection' +author: crowdsecurity +belongs_to_collections: +- crowdsecurity/linux +- crowdsecurity/linux +remote_path: collections/crowdsecurity/sshd.yaml +version: "0.1" +local_path: /etc/crowdsec/collections/sshd.yaml +localversion: "0.1" +localhash: 21159aeb87529efcf1a5033f720413d5321a6451bab679a999f7f01a7aa972b3 +installed: true +downloaded: true +uptodate: true +tainted: false +local: false +parsers: +- crowdsecurity/sshd-logs +scenarios: +- crowdsecurity/ssh-bf + +Current metrics : + + - (Scenario) crowdsecurity/ssh-bf: ++---------------+-----------+--------------+--------+---------+ +| CURRENT COUNT | OVERFLOWS | INSTANCIATED | POURED | EXPIRED | ++---------------+-----------+--------------+--------+---------+ +| 0 | 1 | 2 | 10 | 1 | ++---------------+-----------+--------------+--------+---------+ + +``` + +
+ +## Reference documentation + +[Link to scenarios reference documentation](/Crowdsec/v1/references/scenarios/) diff --git a/docs/v1.X/docs/user_guide/cscli.md b/docs/v1.X/docs/user_guide/cscli.md new file mode 100644 index 000000000..bb5d19f2c --- /dev/null +++ b/docs/v1.X/docs/user_guide/cscli.md @@ -0,0 +1,17 @@ +# Overview + +`{{v1X.cli.name}}` is the utility that will help you to manage {{v1X.crowdsec.name}}. This tool has the following functionalities: + + - manage [decisions](/Crowdsec/v1/cscli/cscli_decisions/) and [alerts](/Crowdsec/v1/cscli/cscli_alerts/) : This is how you monitor ongoing remediation and detections + - manage configurations such as [collections](/Crowdsec/v1/cscli/cscli_collections/), [parsers](/Crowdsec/v1/cscli/cscli_parsers/), [scenarios](/Crowdsec/v1/cscli/cscli_scenarios/) : This is how you install/update {{v1X.crowdsec.htmname}}'s detection capabilities and manage whitelists + - interact with the [hub](/Crowdsec/v1/cscli/cscli_hub/) to find new configurations or update existing ones + - manage local api (LAPI) [bouncers](/Crowdsec/v1/cscli/cscli_bouncers/) and [machines](/Crowdsec/v1/cscli/cscli_machines/) : This allows you to manage LAPI credentials, this is how you make {{v1X.crowdsec.htmname}} and bouncers comunicate + - observe crowdsec via [metrics](/Crowdsec/v1/cscli/cscli_metrics/) or the [dashboard](/Crowdsec/v1/cscli/cscli_dashboard/) : This is how you gain real-time observability + - manage [simulation](/Crowdsec/v1/cscli/cscli_simulation/) configurations, allowing you to disable/modify remediation triggered by specific scenarios + + +Take a look at the [dedicated documentation](/Crowdsec/v1/cscli/cscli) + +# Configuration + +`{{v1X.cli.name}}` shares the configuration file of {{v1X.crowdsec.name}}, usually in `/etc/crowdsec/config.yaml` diff --git a/docs/v1.X/docs/user_guide/database.md b/docs/v1.X/docs/user_guide/database.md new file mode 100644 index 000000000..b0f28023b --- /dev/null +++ b/docs/v1.X/docs/user_guide/database.md @@ -0,0 +1,34 @@ +# Databases + +By default, the crowdsec Local API use `SQLite` as backend storage. But in case you expect a lot of traffic on your local API, you should use `MySQL` or `PostgreSQL`. + +For `SQLite`, there is nothing to do on your side in order to make it work with crowdsec. But for `MySQL` and `PostgreSQL` , you have to create the database and the user. + +## MySQL + +Connect to your `MySQL` server and run the following commands: + +``` +mysql> CREATE DATABASE crowdsec; +mysql> CREATE USER 'crowdsec'@'%' IDENTIFIED BY ''; +mysql> GRANT ALL PRIVILEGES ON crowdsec.* TO 'crowdsec'@'%'; +mysql> FLUSH PRIVILEGES; +``` + +Then edit `{{v1X.config.crowdsec_config_file}}` to update the [`db_config`](/Crowdsec/v1/references/database/#db_config) part. + +You can now start/restart crowdsec. + +## PostgreSQL + +Connect to your `PostgreSQL` server and run the following commands: + +``` +postgres=# CREATE DATABASE crowdsec; +postgres=# CREATE USER crowdsec WITH PASSWORD ''; +postgres=# GRANT ALL PRIVILEGES ON DATABASE crowdsec TO crowdsec; +``` + +Then edit `{{v1X.config.crowdsec_config_file}}` to update the [`db_config`](/Crowdsec/v1/references/database/#db_config) part. + +You can now start/restart crowdsec. \ No newline at end of file diff --git a/docs/v1.X/docs/user_guide/debugging_configs.md b/docs/v1.X/docs/user_guide/debugging_configs.md new file mode 100644 index 000000000..13d170774 --- /dev/null +++ b/docs/v1.X/docs/user_guide/debugging_configs.md @@ -0,0 +1,124 @@ +# Debugging Scenarios and Parsers + +## General Advice + +When trying to debug a parser or a scenario : + + - Work on "cold logs" (with the `-file` and `-type` options) rather than live ones + - Use the `/etc/crowdsec/config/user.yaml` configuration files to have logs on stdout + +## Using user-mode configuration + +```bash +crowdsec -c /etc/crowdsec/config/user.yaml -file mylogs.log.gz -type syslog +INFO[05-08-2020 16:15:47] Crowdsec v0.3.0-rc3-7525f11975a0107746213862dc41c69e00122ac7 +INFO[05-08-2020 16:15:47] Loading grok library +... +WARN[05-08-2020 16:16:12] 182.x.x.x triggered a 4h0m0s ip ban remediation for [crowdsecurity/http-probing] bucket_id=misty-moon event_time="2019-01-01 22:58:32 +0100 CET" scenario=crowdsecurity/http-probing source_ip=182.x.x.x +... +``` + + - `/etc/crowdsec/config/user.yaml` disables demonization and push logs to stdout/stderr + - `-type` must respect expected log type (ie. `nginx` `syslog` etc.) + - `-file` must point to a flat file or a gzip file + +When processing logs like this, {{v1X.crowdsec.name}} runs in "time machine" mode, and relies on the timestamps *in* the logs to evaluate scenarios. You will most likely need the `crowdsecurity/dateparse-enrich` parser for this. + + +## Testing configurations on live system + +If you're playing around with parser/scenarios on a live system, you can use the `-t` (lint) option of {{v1X.crowdsec.Name}} to check your configurations validity before restarting/reloading services : + +```bash +$ emacs /etc/crowdsec/config/scenarios/ssh-bf.yaml +... +$ crowdsec -c /etc/crowdsec/config/user.yaml -t +INFO[06-08-2020 13:36:04] Crowdsec v0.3.0-rc3-4cffef42732944d4b81b3e62a03d4040ad74f185 +... +ERRO[06-08-2020 13:36:05] Bad yaml in /etc/crowdsec/config/scenarios/ssh-bf.yaml : yaml: unmarshal errors: + line 2: field typex not found in type leakybucket.BucketFactory +FATA[06-08-2020 13:36:05] Failed to load scenarios: Scenario loading failed : bad yaml in /etc/crowdsec/config/scenarios/ssh-bf.yaml : yaml: unmarshal errors: + line 2: field typex not found in type leakybucket.BucketFactory +``` + +Using this, you won't have to kill your running service before you know the scenarios/parsers are at least syntactically correct. + + +## Using debug + +Both scenarios and parsers support a `debug: true|false` option which produce useful debug. + +
+ Debug parsing output (expand) +```bash +DEBU[05-08-2020 15:25:36] eval(evt.Parsed.program == 'nginx') = TRUE id=lively-smoke name=crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] eval variables: id=lively-smoke name=crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] evt.Parsed.program = 'nginx' id=lively-smoke name=crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] Event entering node id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] + Grok 'NGINXACCESS' returned 10 entries to merge in Parsed id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] .Parsed['request'] = '/data.php' id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] .Parsed['http_user_agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0' id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] .Parsed['http_referer'] = '-' id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] .Parsed['remote_addr'] = '123.x.x.x' id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] .Parsed['remote_user'] = '-' id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] .Parsed['time_local'] = '01/Jan/2019:01:39:06 +0100' id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] .Parsed['method'] = 'POST' id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] .Parsed['body_bytes_sent'] = '162' id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] .Parsed['http_version'] = '1.1' id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] .Parsed['status'] = '404' id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] .Meta[log_type] = 'http_access-log' id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] evt.StrTime = '01/Jan/2019:01:39:06 +0100' id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] Event leaving node : ok id=icy-dew name=child-crowdsecurity/nginx-logs stage=s01-parse +DEBU[05-08-2020 15:25:36] child is success, OnSuccess=next_stage, skip id=lively-smoke name=crowdsecurity/nginx-logs stage=s01-parse +``` +
+ + +
+ Debug scenario output (expand) +```bash +DEBU[05-08-2020 16:02:26] eval(evt.Meta.service == 'http' && evt.Meta.http_status in ['404', '403', '400'] && evt.Parsed.static_ressource == 'false') = TRUE cfg=black-wave file=config/scenarios/http-probing.yaml name=crowdsecurity/http-probing +DEBU[05-08-2020 16:02:26] eval variables: cfg=black-wave file=config/scenarios/http-probing.yaml name=crowdsecurity/http-probing +DEBU[05-08-2020 16:02:26] evt.Meta.service = 'http' cfg=black-wave file=config/scenarios/http-probing.yaml name=crowdsecurity/http-probing +DEBU[05-08-2020 16:02:26] evt.Meta.http_status = '404' cfg=black-wave file=config/scenarios/http-probing.yaml name=crowdsecurity/http-probing +DEBU[05-08-2020 16:02:26] evt.Parsed.static_ressource = 'false' cfg=black-wave file=config/scenarios/http-probing.yaml name=crowdsecurity/http-probing +``` +
+ + +# Test environments + +From a [{{v1X.crowdsec.name}} release archive]({{v1X.crowdsec.download_url}}), you can deploy a test (non-root) environment that is very suitable to write/debug/test parsers and scenarios. Environment is deployed using `./test_env.sh` script from tgz directory, and creates a test environment in `./tests` : + +```bash +$ cd crowdsec-v0.3.0/ +$ ./test_env.sh +... +[08/05/2020:04:19:18 PM][INFO] Setting up configurations +INFO[0000] Wrote new 75065 bytes index to config/crowdsec-cli/.index.json +INFO[0000] crowdsecurity/syslog-logs : OK +INFO[0000] crowdsecurity/geoip-enrich : OK +... +INFO[0007] Enabled collections : crowdsecurity/linux +INFO[0007] Enabled crowdsecurity/linux +[08/05/2020:04:19:26 PM][INFO] Environment is ready in /home/bui/github/crowdsec/crowdsec/crowdsec-v0.3.0/tests +$ cd tests +$ ./cscli -c dev.yaml list +... +INFO[0000] PARSERS: +------------------------------------------------------------------------------------------------------- + NAME 📦 STATUS VERSION LOCAL PATH +------------------------------------------------------------------------------------------------------- + crowdsecurity/geoip-enrich ✔️ enabled 0.2 config/parsers/s02-enrich/geoip-enrich.yaml + crowdsecurity/syslog-logs ✔️ enabled 0.3 config/parsers/s00-raw/syslog-logs.yaml + crowdsecurity/sshd-logs ✔️ enabled 0.2 config/parsers/s01-parse/sshd-logs.yaml + crowdsecurity/dateparse-enrich ✔️ enabled 0.1 config/parsers/s02-enrich/dateparse-enrich.yaml +------------------------------------------------------------------------------------------------------- +... +$ ./crowdsec -c dev.yaml -file sshd.log -type syslog +INFO[05-08-2020 16:23:32] Crowdsec v0.3.0-rc3-7525f11975a0107746213862dc41c69e00122ac7 +INFO[05-08-2020 16:23:32] Loading grok library +... +``` + + diff --git a/docs/v1.X/docs/user_guide/decision_management.md b/docs/v1.X/docs/user_guide/decision_management.md new file mode 100644 index 000000000..22394daee --- /dev/null +++ b/docs/v1.X/docs/user_guide/decision_management.md @@ -0,0 +1,102 @@ +!!! info + + Please see your local `{{v1X.cli.bin}} help decisions` for up-to-date documentation. + +## List active decisions + +```bash +{{v1X.cli.bin}} decisions list +``` + +
+ example +```bash +bui@sd:~$ cscli decisions list ++-----+-----------+-------------+----------------------------------+--------+---------+-------------------------+--------+--------------------+ +| ID | SOURCE | SCOPE:VALUE | REASON | ACTION | COUNTRY | AS | EVENTS | EXPIRATION | ++-----+-----------+------------------------------------------------+--------+---------+-------------------------+--------+--------------------+ +| 1 | crowdsec | Ip:1.2.3.4 | crowdsecurity/ssh-bf (v0.5) | ban | CN | No.31,Jin-rong Street | 6 | 3h59m14.803995692s | +| 2 | crowdsec | Ip:1.2.3.4 | crowdsecurity/ssh-bf (v0.5) | ban | CN | No.31,Jin-rong Street | 6 | 3h59m14.803995692s | +| 3 | cscli | Ip:1.2.3.4 | manual ban | ban | | | 1 | 3h59m14.803995692s | +| 4 | cscli | Ip:1.2.3.5 | manual ban | ban | | | 1 | 3h59m58.986924109s | ++-----+-----------+-------------+----------------------------------+--------+---------+-------------------------+--------+--------------------+ + + + +``` + +
+ - `SOURCE` : the source of the decisions: + - `crowdsec` : decision from crowdsec agent + - `cscli` : decision from `cscli` (manual decision) + - `api` : decision from crowdsec API + - `SCOPE:VALUE` is the target of the decisions : + - "scope" : the scope of the decisions (`ip`, `range`, `user` ...) + - "value" : the value to apply on the decisions (, , ...) + - `REASON` is the scenario that was triggered (or human-supplied reason) + - `ACTION` is the type of the decision (`ban`, `captcha` ...) + - `COUNTRY` and `AS` are provided by GeoIP enrichment if present + - `EVENTS` number of event that triggered this decison + - `EXPIRATION` is the time left on remediation + + +Check [command usage](/Crowdsec/v1/cscli/cscli_decisions_list/) for additional filtering and output control flags. + + +## Add a decision + * default `duration`: `4h` + * default `type` : `ban` + + +> Add a decision (ban) on IP `1.2.3.4` for 24 hours, with reason 'web bruteforce' + +```bash +{{v1X.cli.bin}} decisions add --ip 1.2.3.4 --duration 24h --reason "web bruteforce" +``` + +> Add a decision (ban) on range `1.2.3.0/24` for 4 hours, with reason 'web bruteforce' + +```bash +{{v1X.cli.bin}} decisions add --range 1.2.3.0/24 --reason "web bruteforce" +``` + + +> Add a decision (captcha) on ip `1.2.3.4` for 4hours (default duration), with reason 'web bruteforce' + +```bash +{{v1X.cli.bin}} decisions add --ip 1.2.3.4 --reason "web bruteforce" --type captcha +``` + + + +## Delete a decision + +> delete the decision on IP `1.2.3.4` + +```bash +{{v1X.cli.bin}} decisions delete --ip 1.2.3.4 +``` + +> delete the decision on range 1.2.3.0/24 + +```bash +{{v1X.cli.bin}} decisions delete --range 1.2.3.0/24 +``` + + + + + +## Delete all existing bans + +> Flush all the existing bans + +```bash +{{v1X.cli.bin}} decisions delete --all +``` + +!!! warning + This will as well remove any existing ban + + + diff --git a/docs/v1.X/docs/user_guide/forensic_mode.md b/docs/v1.X/docs/user_guide/forensic_mode.md new file mode 100644 index 000000000..8c53b3509 --- /dev/null +++ b/docs/v1.X/docs/user_guide/forensic_mode.md @@ -0,0 +1,191 @@ +## Forensic mode + +While {{v1X.crowdsec.name}} can be used to monitor "live" logs, it can as well be used on cold logs. +It is a *great* way to test scenario, detect false positives & false negatives or simply generate reporting on a past time period. + +When doing so, {{v1X.crowdsec.name}} will read the logs, extract timestamps from those, so that the scenarios/buckets can be evaluated with the log's timestamps. The resulting overflows will be pushed to the API as any other alert, but the timestamp will be the timestamps of the logs, properly allowing you to view the alerts in their original time line. + + +you can run : + +```bash +crowdsec -c /etc/crowdsec/user.yaml -file /path/to/your/log/file.log -type log_file_type +``` + +Where `-file` points to the log file you want to process, and the `-type` is similar to what you would put in your acquisition's label field, for example : + +```bash +crowdsec -c /etc/crowdsec/user.yaml -file /var/log/nginx/2019.log -type nginx +crowdsec -c /etc/crowdsec/user.yaml -file /var/log/sshd-2019.log -type syslog +crowdsec -c /etc/crowdsec/user.yaml -jfilter "_SYSTEMD_UNIT=ssh.service --since yesterday" -type syslog +``` + +When running crowdsec in forensic mode, the alerts will be displayed to stdout, and as well pushed to database : + +```bash +# crowdsec -c /etc/crowdsec/user.yaml -file /var/log/nginx/nginx-2019.log.1 -type nginx +... +INFO[13-11-2020 13:05:23] Ip 123.206.50.249 performed 'crowdsecurity/http-probing' (11 events over 6s) at 2019-01-01 01:37:32 +0100 CET +INFO[13-11-2020 13:05:23] Ip 123.206.50.249 performed 'crowdsecurity/http-backdoors-attempts' (2 events over 1s) at 2019-01-01 01:37:33 +0100 CET +INFO[13-11-2020 13:05:24] (14baeedafc1e44c08b806fc0c1cd92c4/crowdsec) crowdsecurity/http-probing by ip 123.206.50.249 (CN) : 1h ban on Ip 123.206.50.249 +INFO[13-11-2020 13:05:24] (14baeedafc1e44c08b806fc0c1cd92c4/crowdsec) crowdsecurity/http-backdoors-attempts by ip 123.206.50.249 (CN) : 1h ban on Ip 123.206.50.249 +... +``` + +And as these alerts are as well pushed to database, it mean you can view them in metabase, or using cscli ! + + +## Injecting alerts into existing database + +If you already have a running crowdsec/Local API running and want to inject events into existing database, you can run crowdsec directly : + +```bash +crowdsec -file ~/logs/nginx/access.log -type nginx --no-api +``` + +Crowdsec will process `~/logs/nginx/access.log` and push alerts to the Local API configured in your default configuration file (`/etc/crowdsec/config.yaml`, see `api.client.credentials_path`) + +## Injection alerts into new database - no local instance running + +If you don't have a service currently running, you can run crowdsec directly : + +```bash +crowdsec -file ~/logs/nginx/access.log -type nginx +``` + +Crowdsec will start a Local API and process `~/logs/nginx/access.log`. + + +## Injection alerts into new database - while local instance is running + +If you have a local instance running and you don't want to pollute your existing database, we are going to configure a separate instance of Local API & database. + +Let's copy the existing configuration to edit it : + +```bash +$ cp /etc/crowdsec/config.yaml ./forensic.yaml +$ emacs ./forensic.yaml +``` + +In our file, let's edit the local API & database config to ensure we're not going to pollute existing data : + +```bash +$ emacs ./forensic.yaml +... +db_config: + type: sqlite + # we edit the db_path to point to a different SQLite database + db_path: /var/lib/crowdsec/data/crowdsec_alt.db + # let's comment out the auto-flush (database garbage collection) + #flush: + # max_items: 5000 + # max_age: 7d + +... +api: + client: + # we edit credentials_path to point to a local file + credentials_path: /tmp/local_api_credentials.yaml + server: + # we edit the listen_uri so that it doesn't try to listen on the same port as the existing Local API + listen_uri: localhost:8081 +``` + +With the following edits, we ensure that : + + - The SQLite database path will be different : it avoids conflicts if you already had one running locally + - Edit the local api credentials path : we're going to register our machine to the ephemeral Local API + - Edit the listen uri of the local api : it avoids conflicts for listen port in case you already had one running locally + - Comment out the `flush` section : it ensure the database garbage collector won't run and delete your old events you're injecting ;) + + + +Let's create the new database and register a machine to it : + +```bash +$ touch /tmp/local_api_credentials.yaml +$ cscli -c forensic.yaml machines add --auto +INFO[0000] Machine '...' created successfully +INFO[0000] API credentials dumped to '/tmp/local_api_credentials.yaml' +$ cat /tmp/local_api_credentials.yaml +url: http://localhost:8081 +login: ... +password: ... +``` + +Now we can start the new Local API and crowdsec : + +```bash +$ crowdsec -c ./forensic.yaml -file ~/github/crowdsec/OLDS/LOGS/nginx/10k_ACCESS_LOGS.log -type nginx +... +INFO[15-11-2020 10:09:20] Ip x.x.x.x performed 'crowdsecurity/http-bad-user-agent' (2 events over 0s) at 2017-10-21 13:58:38 +0200 CEST +INFO[15-11-2020 10:09:20] Ip y.y.y.y performed 'crowdsecurity/http-probing' (11 events over 0s) at 2017-10-23 12:00:34 +0200 CEST +... +``` + +And we can even fire a dedicated dashboard to view the data : + +```bash +$ cscli -c forensic.yaml dashboard setup +INFO[0000] /var/lib/crowdsec/data/metabase.db exists, skip. +INFO[0000] Pulling docker image metabase/metabase:v0.37.0.2 +... +INFO[0001] creating container '/crowdsec-metabase' +INFO[0002] waiting for metabase to be up (can take up to a minute) +......... +INFO[0040] Metabase is ready + + URL : 'http://127.0.0.1:3000' + username : 'crowdsec@crowdsec.net' + password : ... +``` + +## Injection alerts into new database - dev env + +From a fresh release : + +```bash +$ tar xvzf crowdsec-release.tgz +$ cd crowdsec-v1.0.0-rc +$ ./test_env.sh +$ cd tests +``` + +Install the needed collection(s) : + +```bash +$ ./cscli -c dev.yaml collections install crowdsecurity/nginx +``` + +And we can process logs : + +```bash +$ ./crowdsec -c dev.yaml -file ~/github/crowdsec/OLDS/LOGS/nginx/10k_ACCESS_LOGS.log -type nginx +INFO[0000] single file mode : log_media=stdout daemonize=true +INFO[15-11-2020 11:18:27] Crowdsec v1.0.0-rc-0ecb142dfffc89b019b6d9044cb7cc5569d12c70 +INFO[15-11-2020 11:18:38] Ip x.x.x.x performed 'crowdsecurity/http-sensitive-files' (5 events over 4s) at 2017-10-23 12:35:54 +0200 CEST +INFO[15-11-2020 11:18:39] (test/crowdsec) crowdsecurity/http-probing by ip x.x.x.x (DE) : 1h ban on Ip x.x.x.x +``` + +And we can then query the local api (while letting the {{v1X.crowdsec.name}} running) : +```bash +$ ./cscli -c dev.yaml alerts list ++----+--------------------+---------------------------------------+---------+--------------+-----------+--------------------------------+ +| ID | VALUE | REASON | COUNTRY | AS | DECISIONS | CREATED AT | ++----+--------------------+---------------------------------------+---------+--------------+-----------+--------------------------------+ +| 28 | Ip:x.x.x.x | crowdsecurity/http-crawl-non_statics | DE | Linode, LLC | ban:1 | 2017-10-23 12:36:48 +0200 | +| | | | | | | +0200 | +| 27 | Ip:x.x.x.x | crowdsecurity/http-sensitive-files | DE | Linode, LLC | ban:1 | 2017-10-23 12:35:50 +0200 | +| | | | | | | +0200 | + +``` + +Or even start a dashboard to view data : + +```bash +$ sudo ./cscli dashboard setup +... +INFO[0002] waiting for metabase to be up (can take up to a minute) +........ + +``` \ No newline at end of file diff --git a/docs/v1.X/docs/user_guide/simulation_mode.md b/docs/v1.X/docs/user_guide/simulation_mode.md new file mode 100644 index 000000000..62b1543a8 --- /dev/null +++ b/docs/v1.X/docs/user_guide/simulation_mode.md @@ -0,0 +1,32 @@ +# Simulation + +```bash +$ cscli simulation status +INFO[0000] global simulation: disabled +INFO[0000] Scenarios in simulation mode : +INFO[0000] - crowdsecurity/ssh-bf +``` + +`cscli simulation` allows to manage a list of scenarios that have their remediation "simulated" : they won't be effective (but will still be showed by `cscli decisions list`). This configuration file is present in `/etc/crowdsec/simulation.yaml`. + +You can add and remove scenarios to the simulation list : + +```bash +$ cscli simulation enable crowdsecurity/ssh-bf +INFO[0000] simulation mode for 'crowdsecurity/ssh-bf' enabled +INFO[0000] Run 'systemctl reload crowdsec' for the new configuration to be effective. +$ systemctl reload crowdsec +$ tail -f /var/log/crowdsec.log +... +time="01-11-2020 14:08:58" level=info msg="Ip 1.2.3.6 performed 'crowdsecurity/ssh-bf' (6 events over 986.769µs) at 2020-11-01 14:08:58.575885389 +0100 CET m=+437.524832750" +time="01-11-2020 14:08:58" level=info msg="Ip 1.2.3.6 decision : 1h (simulation) ban" +$ cscli decisions list ++----+----------+--------------+-----------------------------------+------------+---------+----+--------+------------------+ +| ID | SOURCE | SCOPE:VALUE | REASON | ACTION | COUNTRY | AS | EVENTS | EXPIRATION | ++----+----------+--------------+-----------------------------------+------------+---------+----+--------+------------------+ +| 4 | crowdsec | Ip:1.2.3.6 | crowdsecurity/ssh-bf | (simul)ban | US | | 6 | 59m38.293036072s | ++----+----------+--------------+-----------------------------------+------------+---------+----+--------+------------------+ + +``` + +But as well turn on "global simulation" : in this case, only scenarios in the exclusion list will have their decisions applied. \ No newline at end of file diff --git a/docs/v1.X/docs/write_configurations/acquisition.md b/docs/v1.X/docs/write_configurations/acquisition.md new file mode 100644 index 000000000..c361fc6bf --- /dev/null +++ b/docs/v1.X/docs/write_configurations/acquisition.md @@ -0,0 +1,34 @@ +# Write the acquisition file (optional for test) + +In order for your log to be processed by the good parser, it must match the filter that you will configure in your parser file. + +The filters of the parsers in the first (`s00-raw`) stage will usually check `evt.Line.Labels.type`, which is the label of your acquisition file : + +With an acquisition file like this : + +```yaml +filename: /path/to/log/file.log +labels: + type: my_program +``` + + - The log line will enter the parsing pipeline with `evt.Line.Labels.type` set to `my_program` + - The parsers in the 1st stage (`s00-raw`) are dealing with the raw format, and the program name will end up in `evt.Parsed.program` + - When the log line arrive the main parsing stage (`s01-parse`), `evt.Parsed.program` will be `my_program` + + +For example, this file line(s) : + +```yaml +filename: /var/log/nginx/access.log +labels: + type: nginx +``` + +will be read by this parser : + +```yaml +filter: "evt.Parsed.program startsWith 'nginx'" +onsuccess: next_stage +... +``` diff --git a/docs/v1.X/docs/write_configurations/parsers.md b/docs/v1.X/docs/write_configurations/parsers.md new file mode 100644 index 000000000..2d07cf4f4 --- /dev/null +++ b/docs/v1.X/docs/write_configurations/parsers.md @@ -0,0 +1,259 @@ +# Writing {{v1X.crowdsec.Name}} parser + +!!! warning "Parser dependency" + The crowdsecurity/syslog-logs parsers is needed by the core parsing + engine. Deletion or modification of this could result of {{v1X.crowdsec.name}} + being unable to parse logs, so this should be done very carefully. + +> In the current example, we'll write a parser for the logs produced by `iptables` (netfilter) with the `-j LOG` target. +> This document aims at detailing the process of writing and testing new parsers. + +## Base parser file + +The most simple parser can be defined as : + + +```yaml +filter: 1 == 1 +debug: true +onsuccess: next_stage +name: me/myparser +description: a cool parser for my service +grok: +#our grok pattern : capture .* + pattern: ^%{DATA:some_data}$ +#the field to which we apply the grok pattern : the log message itself + apply_on: message +statics: + - parsed: is_my_service + value: yes +``` + + - a {{v1X.filter.htmlname}} : if the expression is `true`, the event will enter the parser, otherwise, it won't + - a {{v1X.onsuccess.htmlname}} : defines what happens when the {{v1X.event.htmlname}} was successfully parsed : shall we continue ? shall we move to next stage ? etc. + - a name & a description + - some {{v1X.statics.htmlname}} that will modify the {{v1X.event.htmlname}} + - a `debug` flag that allows to enable local debugging information. + + +We are going to use to following sample log as an example : +```bash +May 11 16:23:43 sd-126005 kernel: [47615895.771900] IN=enp1s0 OUT= MAC=00:08:a2:0c:1f:12:00:c8:8b:e2:d6:87:08:00 SRC=99.99.99.99 DST=127.0.0.1 LEN=40 TOS=0x00 PREC=0x00 TTL=245 ID=51006 PROTO=TCP SPT=45225 DPT=8888 WINDOW=1024 RES=0x00 SYN URGP=0 +May 11 16:23:50 sd-126005 kernel: [47615902.763137] IN=enp1s0 OUT= MAC=00:08:a2:0c:1f:12:00:c8:8b:e2:d6:87:08:00 SRC=44.44.44.44 DST=127.0.0.1 LEN=60 TOS=0x00 PREC=0x00 TTL=49 ID=17451 DF PROTO=TCP SPT=53668 DPT=80 WINDOW=14600 RES=0x00 SYN URGP=0 +``` + +## Trying our mock parser + +!!! warning + Your yaml file must be in the `config/parsers/s01-parser/` directory. + + For example it can be `~/crowdsec-v0.0.19/tests/config/parsers/s01-parser/myparser.yaml`, or `/etc/crowdsec/config/parsers/s01-parser/myparser.yaml`. + + The {{v1X.stage.htmlname}} directory might not exist, don't forget to create it. + +(deployment is assuming [you're using a test environment](/Crowdsec/v1/write_configurations/requirements/)) + +Setting up our new parser : +```bash +cd crowdsec-v0.X.Y/tests +mkdir -p config/parsers/s01-parser +cp myparser.yaml config/parsers/s01-parser/ +./crowdsec -c ./dev.yaml -file ./x.log -type foobar +``` + +
+ Expected output + +```bash +INFO[0000] setting loglevel to info +INFO[11-05-2020 15:48:28] Crowdsec v0.0.18-6b1281ba76819fed4b89247a5a673c592a3a9f88 +... +DEBU[0000] Event entering node id=dark-water name=me/myparser stage=s01-parser +DEBU[0000] eval(TRUE) '1 == 1' id=dark-water name=me/myparser stage=s01-parser +DEBU[0000] no ip in event, cidr/ip whitelists not checked id=dark-water name=me/myparser stage=s01-parser +DEBU[0000] + Grok '' returned 1 entries to merge in Parsed id=dark-water name=me/myparser stage=s01-parser +DEBU[0000] .Parsed['some_data'] = 'May 11 16:23:41 sd-126005 kernel: [47615893.721616] IN=enp1s0 OUT= MAC=00:08:a2:0c:1f:12:00:c8:8b:e2:d6:87:08:00 SRC=99.99.99.99 DST=127.0.0.1 LEN=40 TOS=0x00 PREC=0x00 TTL=245 ID=54555 PROTO=TCP SPT=45225 DPT=8080 WINDOW=1024 RES=0x00 SYN URGP=0 ' id=dark-water name=me/myparser stage=s01-parser +DEBU[0000] + Processing 1 statics id=dark-water name=me/myparser stage=s01-parser +DEBU[0000] .Parsed[is_my_service] = 'yes' id=dark-water name=me/myparser stage=s01-parser +DEBU[0000] Event leaving node : ok id=dark-water name=me/myparser stage=s01-parser +DEBU[0000] move Event from stage s01-parser to s02-enrich id=dark-water name=me/myparser stage=s01-parser +... +``` +
+ + +We can see our "mock" parser is working, let's see what happened : + + - The event enter the node + - The `filter` returned true (`1 == 1`) so the {{v1X.event.htmlname}} will be processed + - Our grok pattern (just a `.*` capture) "worked" and captured data (the whole line actually) + - The grok captures (under the name "some_data") are merged into the `.Parsed` map of the {{v1X.event.htmlname}} + - The {{v1X.statics.htmlname}} section is processed, and `.Parsed[is_my_service]` is set to `yes` + - The {{v1X.event.htmlname}} leaves the parser successfully, and because "next_stage" is set, we move the event to the next "stage" + +## Writing the GROK pattern + +We are going to write a parser for `iptables` logs, they look like this : + +``` +May 11 16:23:43 sd-126005 kernel: [47615895.771900] IN=enp1s0 OUT= MAC=00:08:a2:0c:1f:12:00:c8:8b:e2:d6:87:08:00 SRC=99.99.99.99 DST=127.0.0.1 LEN=40 TOS=0x00 PREC=0x00 TTL=245 ID=51006 PROTO=TCP SPT=45225 DPT=8888 WINDOW=1024 RES=0x00 SYN URGP=0 +May 11 16:23:50 sd-126005 kernel: [47615902.763137] IN=enp1s0 OUT= MAC=00:08:a2:0c:1f:12:00:c8:8b:e2:d6:87:08:00 SRC=44.44.44.44 DST=127.0.0.1 LEN=60 TOS=0x00 PREC=0x00 TTL=49 ID=17451 DF PROTO=TCP SPT=53668 DPT=80 WINDOW=14600 RES=0x00 SYN URGP=0 + +``` + +Using an [online grok debugger](https://grokdebug.herokuapp.com/) or an [online regex debugger](https://www.debuggex.com/), we come up with the following grok pattern : + +`\[%{DATA}\]+.*(%{WORD:action})? IN=%{WORD:int_eth} OUT= MAC=%{IP}:%{MAC} SRC=%{IP:src_ip} DST=%{IP:dst_ip} LEN=%{INT:length}.*PROTO=%{WORD:proto} SPT=%{INT:src_port} DPT=%{INT:dst_port}.*` + +!!! warning + Check if the pattern you are looking for is not already present in [patterns configuration](https://github.com/crowdsecurity/crowdsec/tree/master/config/patterns). + + +## Test our new pattern + +Now, let's integrate our GROK pattern within our YAML : + +```yaml +#let's set onsuccess to "next_stage" : if the log is parsed, we can consider it has been dealt with +onsuccess: next_stage +#debug, for reasons (don't do this in production) +debug: true +#as seen in our sample log, those logs are processed by the system and have a progname set to 'kernel' +filter: "1 == 1" +#name and description: +name: crowdsecurity/iptables-logs +description: "Parse iptables drop logs" +grok: +#our grok pattern + pattern: \[%{DATA}\]+.*(%{WORD:action})? IN=%{WORD:int_eth} OUT= MAC=%{IP}:%{MAC} SRC=%{IP:src_ip} DST=%{IP:dst_ip} LEN=%{INT:length}.*PROTO=%{WORD:proto} SPT=%{INT:src_port} DPT=%{INT:dst_port}.* +#the field to which we apply the grok pattern : the log message itself + apply_on: message +statics: + - parsed: is_my_service + value: yes +``` + + +```bash +./crowdsec -c ./dev.yaml -file ./x.log -type foobar +``` + + +
+ Expected output + +```bash +INFO[0000] setting loglevel to info +INFO[11-05-2020 16:18:58] Crowdsec v0.0.18-6b1281ba76819fed4b89247a5a673c592a3a9f88 +... +DEBU[0000] Event entering node id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] eval(TRUE) '1 == 1' id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] no ip in event, cidr/ip whitelists not checked id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] + Grok '' returned 8 entries to merge in Parsed id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['dst_port'] = '8080' id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['action'] = '' id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['int_eth'] = 'enp1s0' id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['src_ip'] = '99.99.99.99' id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['dst_ip'] = '127.0.0.1' id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['length'] = '40' id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['proto'] = 'TCP' id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['src_port'] = '45225' id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] + Processing 1 statics id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed[is_my_service] = 'yes' id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] Event leaving node : ok id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] move Event from stage s01-parser to s02-enrich id=lingering-breeze name=crowdsecurity/iptables-logs stage=s01-parser +... +``` + +
+ +What changed ? We can now see that the fragment captured by the GROK pattern are merged in the `Parsed` array ! +We now have parsed data, only a few more changes and we will be done :) + +## Finalizing our parser + +```yaml +#let's set onsuccess to "next_stage" : if the log is parsed, we can consider it has been dealt with +onsuccess: next_stage +#debug, for reasons (don't do this in production) +debug: true +#as seen in our sample log, those logs are processed by the system and have a progname set to 'kernel' +filter: "evt.Parsed.program == 'kernel'" +#name and description: +name: crowdsecurity/iptables-logs +description: "Parse iptables drop logs" +grok: +#our grok pattern + pattern: \[%{DATA}\]+.*(%{WORD:action})? IN=%{WORD:int_eth} OUT= MAC=%{IP}:%{MAC} SRC=%{IP:src_ip} DST=%{IP:dst_ip} LEN=%{INT:length}.*PROTO=%{WORD:proto} SPT=%{INT:src_port} DPT=%{INT:dst_port}.* +#the field to which we apply the grok pattern : the log message itself + apply_on: message +statics: + - meta: log_type + value: iptables_drop + - meta: service + expression: "evt.Parsed.proto == 'TCP' ? 'tcp' : 'unknown'" + - meta: source_ip + expression: "evt.Parsed.src_ip" +``` + +### filter + +We changed the {{v1X.filter.htmlname}} to correctly filter on the program name. +In the current example, our logs are produced by the kernel (netfilter), and thus the program is `kernel` : + +```bash +tail -f /var/log/kern.log +May 11 16:23:50 sd-126005 kernel: [47615902.763137] IN=enp1s0 OUT= MAC=00:08:a2:0c:1f:12:00:c8:8b:e2:d6:87:08:00 SRC=44.44.44.44 DST=127.0.0.1 LEN=60 TOS=0x00 PREC=0x00 TTL=49 ID=17451 DF PROTO=TCP SPT=53668 DPT=80 WINDOW=14600 RES=0x00 SYN URGP=0 +``` + +### statics + +We are setting various entries to static or dynamic values to give "context" to the log : + + - `.Meta.log_type` is set to `iptables_drop` (so that we later can filter events coming from this) + - `.Meta.source_ip` is set the the source ip captured `.Parsed.src_ip` + - `.Meta.service` is set the the result of an expression that relies on the GROK output (`proto` field) + +Look into dedicated {{v1X.statics.htmlname}} documentation to know more about its possibilities. + + +### Testing our finalized parser + + +```bash +./crowdsec -c ./dev.yaml -file ./x.log -type kernel +``` + +
+ Expected output +```bash +... +DEBU[0000] Event entering node id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] eval(TRUE) 'evt.Parsed.program == 'kernel'' id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] no ip in event, cidr/ip whitelists not checked id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] + Grok '' returned 8 entries to merge in Parsed id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['src_port'] = '45225' id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['dst_port'] = '8118' id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['action'] = '' id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['int_eth'] = 'enp1s0' id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['src_ip'] = '44.44.44.44' id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['dst_ip'] = '127.0.0.1' id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['length'] = '40' id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Parsed['proto'] = 'TCP' id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] + Processing 3 statics id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Meta[log_type] = 'iptables_drop' id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Meta[service] = 'tcp' id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] .Meta[source_ip] = '44.44.44.44' id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] Event leaving node : ok id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +DEBU[0000] move Event from stage s01-parser to s02-enrich id=shy-forest name=crowdsecurity/iptables-logs stage=s01-parser +... +``` +
+ +## Closing word + +We have now a fully functional parser for {{v1X.crowdsec.name}} ! +We can either deploy it to our production systems to do stuff, or even better, contribute to the {{v1X.hub.htmlname}} ! + +If you want to know more about directives and possibilities, take a look at [the parser reference documentation](/Crowdsec/v1/references/parsers/) ! + diff --git a/docs/v1.X/docs/write_configurations/requirements.md b/docs/v1.X/docs/write_configurations/requirements.md new file mode 100644 index 000000000..f9321c1db --- /dev/null +++ b/docs/v1.X/docs/write_configurations/requirements.md @@ -0,0 +1,72 @@ +# Requirements + +> - Having read and understood [`crowdsec` concepts](/Crowdsec/v1/getting_started/concepts/) + +> - Some requirements are needed in order to be able to write your own end-to-end configurations. + +> - During all this documentation, we are going to show as an exemple how we wrote a full port scan detection scenario (from acqusition to scenario, including parser) + + +## Create the test environment + +First of all, please [download the latest release of {{v1X.crowdsec.name}}](https://github.com/crowdsecurity/crowdsec/releases). + +Then run the following commands: + +```bash +tar xzvf crowdsec-release.tgz +``` +```bash +cd ./crowdsec-vX.Y/ +``` +```bash +./test_env.sh # the -o is facultative, default is "./tests/" +``` +```bash +cd ./tests/ +``` + +The `./test_env.sh` script creates a local (non privileged) working environement for {{v1X.crowdsec.name}} and {{v1X.cli.name}}. +The deployed environment is intended to write and test parsers and scenarios easily. + + +
+ Example + +```bash +$ tar xzvf ./crowdsec-release.tgz +$ cd ./crowdsec-v*/ +$ ./test_env.sh +[12/11/2020:11:45:19][INFO] Creating test arboresence in /tmp/crowdsec-v1.0.0/tests +[12/11/2020:11:45:19][INFO] Arboresence created +[12/11/2020:11:45:19][INFO] Copying needed files for tests environment +[12/11/2020:11:45:19][INFO] Files copied +[12/11/2020:11:45:19][INFO] Setting up configurations +INFO[0000] Machine 'test' created successfully +INFO[0000] API credentials dumped to '/tmp/crowdsec-v1.0.0/tests/config/local_api_credentials.yaml' +INFO[0000] Wrote new 73826 bytes index to /tmp/crowdsec-v1.0.0/tests/config/hub/.index.json +INFO[0000] crowdsecurity/syslog-logs : OK +INFO[0000] crowdsecurity/geoip-enrich : OK +INFO[0000] downloading data 'https://crowdsec-statics-assets.s3-eu-west-1.amazonaws.com/GeoLite2-City.mmdb' in '/tmp/crowdsec-v1.0.0/tests/data/GeoLite2-City.mmdb' +INFO[0002] downloading data 'https://crowdsec-statics-assets.s3-eu-west-1.amazonaws.com/GeoLite2-ASN.mmdb' in '/tmp/crowdsec-v1.0.0/tests/data/GeoLite2-ASN.mmdb' +INFO[0003] crowdsecurity/dateparse-enrich : OK +INFO[0003] crowdsecurity/sshd-logs : OK +INFO[0004] crowdsecurity/ssh-bf : OK +INFO[0004] crowdsecurity/sshd : OK +WARN[0004] crowdsecurity/sshd : overwrite +INFO[0004] crowdsecurity/linux : OK +INFO[0004] /tmp/crowdsec-v1.0.0/tests/config/collections doesn't exist, create +INFO[0004] Enabled parsers : crowdsecurity/syslog-logs +INFO[0004] Enabled parsers : crowdsecurity/geoip-enrich +INFO[0004] Enabled parsers : crowdsecurity/dateparse-enrich +INFO[0004] Enabled parsers : crowdsecurity/sshd-logs +INFO[0004] Enabled scenarios : crowdsecurity/ssh-bf +INFO[0004] Enabled collections : crowdsecurity/sshd +INFO[0004] Enabled collections : crowdsecurity/linux +INFO[0004] Enabled crowdsecurity/linux +INFO[0004] Run 'systemctl reload crowdsec' for the new configuration to be effective. +[12/11/2020:11:45:25][INFO] Environment is ready in /tmp/crowdsec-v1.0.0/tests + +``` + + diff --git a/docs/v1.X/docs/write_configurations/scenarios.md b/docs/v1.X/docs/write_configurations/scenarios.md new file mode 100644 index 000000000..5b3cbd04c --- /dev/null +++ b/docs/v1.X/docs/write_configurations/scenarios.md @@ -0,0 +1,348 @@ +# Writing {{v1X.crowdsec.Name}} scenarios + +!!! info + Please ensure that you have working env or [setup test environment](/Crowdsec/v1/write_configurations/requirements/) before writing your scenario. + + Ensure that [your logs are properly parsed](/Crowdsec/v1/write_configurations/parsers/). + + Have some sample logs at hand reach to test your scenario as you progress. + + +> In the current example, we'll write a scenario to detect port scans relying on the logs produced by `iptables` (netfilter) with the `-j LOG` target. + +> This document aims at detailing the process of writing and testing new scenarios. + +> If you're writing scenario for existing logs, [take a look at the taxonomy](https://hub.crowdsec.net/fields) to find your way ! + + +## Base scenario file + + +A rudimentary scenario can be defined as : +!!! warning + Your yaml file must be in the `config/scenarios/` directory. + +```yaml +type: leaky +debug: true +name: me/my-cool-scenario +description: "detect cool stuff" +filter: evt.Meta.log_type == 'iptables_drop' +capacity: 1 +leakspeed: 1m +blackhole: 1m +labels: + type: my_test +``` + + - a {{v1X.filter.htmlname}} : if the expression is `true`, the event will enter the scenario, otherwise, it won't + - a name & a description + - a capacity for our [Leaky Bucket](https://en.wikipedia.org/wiki/Leaky_bucket) + - a leak speed for our [Leaky Bucket](https://en.wikipedia.org/wiki/Leaky_bucket) + - a blackhole duration (it will prevent the same bucket from overflowing too often to limit spam) + - some labels to qualify the events that just happen + - a `debug` flag that allows to enable local debugging information. + + +We are going to use the following sample log in our example : + +```bash +May 12 09:40:15 sd-126005 kernel: [47678084.929208] IN=enp1s0 OUT= MAC=00:08:a2:0c:1f:12:00:c8:8b:e2:d6:87:08:00 SRC=66.66.66.66 DST=127.0.0.1 LEN=40 TOS=0x08 PREC=0x20 TTL=244 ID=54321 PROTO=TCP SPT=42403 DPT=7681 WINDOW=65535 RES=0x00 SYN URGP=0 +May 12 09:40:15 sd-126005 kernel: [47678084.929245] IN=enp1s0 OUT= MAC=00:08:a2:0c:1f:12:00:c8:8b:e2:d6:87:08:00 SRC=99.99.99.99 DST=127.0.0.1 LEN=40 TOS=0x08 PREC=0x20 TTL=244 ID=54321 PROTO=TCP SPT=42403 DPT=7681 WINDOW=65535 RES=0x00 SYN URGP=0 +May 12 09:40:16 sd-126005 kernel: [47678084.929208] IN=enp1s0 OUT= MAC=00:08:a2:0c:1f:12:00:c8:8b:e2:d6:87:08:00 SRC=99.99.99.99 DST=127.0.0.1 LEN=40 TOS=0x08 PREC=0x20 TTL=244 ID=54321 PROTO=TCP SPT=42403 DPT=7681 WINDOW=65535 RES=0x00 SYN URGP=0 +May 12 09:40:16 sd-126005 kernel: [47678084.929208] IN=enp1s0 OUT= MAC=00:08:a2:0c:1f:12:00:c8:8b:e2:d6:87:08:00 SRC=44.44.44.44 DST=127.0.0.1 LEN=40 TOS=0x08 PREC=0x20 TTL=244 ID=54321 PROTO=TCP SPT=42403 DPT=7681 WINDOW=65535 RES=0x00 SYN URGP=0 +``` + +## Let's try our mock scenario + +!!! info + This assumes that you've followed the previous tutorial and that your iptables logs are properly parsed + + +```bash +./crowdsec -c ./dev.yaml -file ./x.log -type syslog +``` + + +
+ Expected output +```bash +DEBU[04-08-2020 10:44:26] eval(evt.Meta.log_type == 'iptables_drop') = TRUE cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario +DEBU[04-08-2020 10:44:26] eval variables: cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario +DEBU[04-08-2020 10:44:26] evt.Meta.log_type = 'iptables_drop' cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario +... +DEBU[04-08-2020 10:44:26] eval(evt.Meta.log_type == 'iptables_drop') = TRUE cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario +DEBU[04-08-2020 10:44:26] eval variables: cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario +DEBU[04-08-2020 10:44:26] evt.Meta.log_type = 'iptables_drop' cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario +... +DEBU[04-08-2020 10:44:26] Overflow (start: 2020-05-12 09:40:15 +0000 UTC, end: 2020-05-12 09:40:15 +0000 UTC) bucket_id=sparkling-thunder capacity=1 cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario partition=ea2fed6bf8bb70d462ef8acacc4c96f5f8754413 +DEBU[04-08-2020 10:44:26] Adding overflow to blackhole (2020-05-12 09:40:15 +0000 UTC) bucket_id=sparkling-thunder capacity=1 cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario partition=ea2fed6bf8bb70d462ef8acacc4c96f5f8754413 +DEBU[04-08-2020 10:44:26] eval(evt.Meta.log_type == 'iptables_drop') = TRUE cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario +DEBU[04-08-2020 10:44:26] eval variables: cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario +DEBU[04-08-2020 10:44:26] evt.Meta.log_type = 'iptables_drop' cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario +DEBU[04-08-2020 10:44:26] Bucket ea2fed6bf8bb70d462ef8acacc4c96f5f8754413 found dead, cleanup the body bucket_id=sparkling-thunder capacity=1 cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario partition=ea2fed6bf8bb70d462ef8acacc4c96f5f8754413 +WARN[04-08-2020 10:44:26] read 4 lines file=./x.log +... +INFO[04-08-2020 10:44:26] Processing Overflow with no decisions 2 IPs performed 'me/my-cool-scenario' (2 events over 0s) at 2020-05-12 09:40:15 +0000 UTC bucket_id=sparkling-thunder event_time="2020-05-12 09:40:15 +0000 UTC" scenario=me/my-cool-scenario source_ip=66.66.66.66 +... +DEBU[04-08-2020 10:44:26] Overflow discarded, still blackholed for 59s bucket_id=long-pine capacity=1 cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario partition=ea2fed6bf8bb70d462ef8acacc4c96f5f8754413 +DEBU[04-08-2020 10:44:26] Overflow has been discard (*leakybucket.Blackhole) bucket_id=long-pine capacity=1 cfg=shy-dust file=config/scenarios/iptables-scan.yaml name=me/my-cool-scenario partition=ea2fed6bf8bb70d462ef8acacc4c96f5f8754413 +... +``` +
+ + +We can see our "mock" scenario is working, let's see what happened : + +- The first event (parsed line) is processed : + + - The `filter` returned true (`evt.Meta.log_type == 'iptables_drop'`) so the {{v1X.event.htmlname}} will be processed by our bucket + - The bucket is instantiated in {{v1X.timeMachine.htmlname}} mode, and its creation date is set to the timestamp from the first log + - The {{v1X.event.htmlname}} is poured in the actual bucket + +- The second event is processed + - The `filter` is still true, and the event is poured + - As our bucket's capacity is `1`, pouring this second overflow leads to an {{v1X.alert.htmlname}} + - Because we set a blackhole directive of `1 minute`, we remember to prevent this bucket to overflowing again for the next minute + +The overflow itself is produced and we get this message : + +``` +INFO[12-05-2020 11:22:17] Processing Overflow with no decisions 2 IPs performed 'me/my-cool-scenario' (2 events over 0s) at 2020-05-12 09:40:15 +0000 UTC bucket_id=withered-brook event_time="2020-05-12 09:40:15 +0000 UTC" scenario=me/my-cool-scenario source_ip=66.66.66.66 + +``` + +!!! warning + While it "worked" we can see the first issue : the offending IP is reported to be `66.66.66.66` but there are actually 3 IPs involved (`66.66.66.66`, `99.99.99.99` and `44.44.44.44`). To make sense our "detect port scans" should detect events coming from a single IP ! + + +## One step forward : peer attribution + +Let's evolve our scenario to be closer to something meaningful : + + +```yaml +type: leaky +debug: true +name: me/my-cool-scenario +description: "detect cool stuff" +filter: "evt.Meta.log_type == 'iptables_drop' && evt.Meta.service == 'tcp'" +groupby: evt.Meta.source_ip +capacity: 1 +leakspeed: 1m +blackhole: 1m +labels: + type: my_test +``` + +What did we change ? + + - we added a meaningful filter : we are only going to look into `iptables_drop` events, and only take care of `tcp` ones (see the parser we wrote in the [previous step](/Crowdsec/v1/write_configurations/parsers/)) + - we added a `groupby` directive : it's going to ensure that each offending peer get its own bucket + + +Let's try again ! + +```bash +./crowdsec -c ./dev.yaml -file ./x.log -type syslog +``` + +
+ Expected output +```bash +... +DEBU[2020-05-12T11:25:20+02:00] eval(TRUE) evt.Meta.log_type == 'iptables_drop' && evt.Meta.service == 'tcp' cfg=holy-breeze file=config/scenarios/mytest.yaml name=me/my-cool-scenario +DEBU[2020-05-12T11:25:20+02:00] Leaky routine starting, lifetime : 2m0s bucket_id=cold-lake capacity=1 cfg=holy-breeze file=config/scenarios/mytest.yaml name=me/my-cool-scenario partition=2308799e2cc5b57331df10eb93a495aff7725922 +... +DEBU[2020-05-12T11:25:20+02:00] eval(TRUE) evt.Meta.log_type == 'iptables_drop' && evt.Meta.service == 'tcp' cfg=holy-breeze file=config/scenarios/mytest.yaml name=me/my-cool-scenario +DEBU[2020-05-12T11:25:20+02:00] Instanciating TimeMachine bucket cfg=holy-breeze file=config/scenarios/mytest.yaml name=me/my-cool-scenario +DEBU[2020-05-12T11:25:20+02:00] Leaky routine starting, lifetime : 2m0s bucket_id=muddy-haze capacity=1 cfg=holy-breeze file=config/scenarios/mytest.yaml name=me/my-cool-scenario partition=6236f134d0f34d0061748c065bdcb64d8ac6dc54 +... +INFO[12-05-2020 11:25:20] node warning : no remediation bucket_id=muddy-haze event_time="2020-05-12 09:40:16 +0000 UTC" scenario=me/my-cool-scenario source_ip=99.99.99.99 +INFO[12-05-2020 11:25:20] Processing Overflow with no decisions 99.99.99.99 performed 'me/my-cool-scenario' (2 events over 1s) at 2020-05-12 09:40:16 +0000 UTC bucket_id=muddy-haze event_time="2020-05-12 09:40:16 +0000 UTC" scenario=me/my-cool-scenario source_ip=99.99.99.99 +... + +``` +
+ +Let's see what happened : + + - Thanks to our `groupby` key, we now see two different partition keys appearing (`partition=...`). + It means that each peer will get its own bucket, and a "unique key" is derived from the groupby field value (here : the source IP) + + - We see that we only have one overflow, and it correctly concerns `99.99.99.99` (it's the one that actually triggered two events). This is again thanks to the groupby key + + +## One step forward : unique ports + + + +Is it done ? not yet, but we're getting close ! + +To really qualify a port-scan, we want to rely on the number of unique probed ports. Let's arbitrarily decide that a port-scan is : "One peer trying to probe AT LEAST 15 different ports within a few seconds" + +Our evolved scenario is now : + +```yaml +type: leaky +debug: true +name: me/my-cool-scenario +description: "detect cool stuff" +filter: "evt.Meta.log_type == 'iptables_drop' && evt.Meta.service == 'tcp'" +groupby: evt.Meta.source_ip +distinct: evt.Parsed.dst_port +capacity: 15 +leakspeed: 5s +blackhole: 1m +labels: + type: scan + service: tcp + +``` + +What did we changed : + + - We add a `distinct` directive on the `evt.Parsed.dst_port`. It allows the bucket to discard any event with an already seen `evt.Parsed.dst_port`. (yes, like in SQL) + - We changed `capacity` and `leakspeed` to be more relevant to our target + - We fixed the `labels` so that the event makes sense ! + + +Let's see what it changes : + +```bash +./crowdsec -c ./dev.yaml -file ./x.log -type syslog +``` + +
+ Expected output +```bash +... +DEBU[2020-05-12T11:49:01+02:00] eval(TRUE) evt.Meta.log_type == 'iptables_drop' && evt.Meta.service == 'tcp' cfg=dark-pond file=config/scenarios/mytest.yaml name=me/my-cool-scenario +DEBU[2020-05-12T11:49:01+02:00] Instantiating TimeMachine bucket cfg=dark-pond file=config/scenarios/mytest.yaml name=me/my-cool-scenario +DEBU[2020-05-12T11:49:01+02:00] Leaky routine starting, lifetime : 1m20s bucket_id=nameless-feather capacity=15 cfg=dark-pond file=config/scenarios/mytest.yaml name=me/my-cool-scenario partition=2308799e2cc5b57331df10eb93a495aff7725922 +DEBU[2020-05-12T11:49:01+02:00] Uniq 'evt.Parsed.dst_port' -> '7681' bucket_id=nameless-feather capacity=15 cfg=dark-pond file=config/scenarios/mytest.yaml name=me/my-cool-scenario partition=2308799e2cc5b57331df10eb93a495aff7725922 +DEBU[2020-05-12T11:49:01+02:00] Uniq(7681) : false, discard bucket_id=nameless-feather capacity=15 cfg=dark-pond file=config/scenarios/mytest.yaml name=me/my-cool-scenario partition=2308799e2cc5b57331df10eb93a495aff7725922 +DEBU[2020-05-12T11:49:01+02:00] Pouring event bucket_id=nameless-feather capacity=15 cfg=dark-pond file=config/scenarios/mytest.yaml name=me/my-cool-scenario partition=2308799e2cc5b57331df10eb93a495aff7725922 +... + +``` +
+ + - We can see that the second event was discarded, because it had a destination port similar to the first one + - No overflow were produced + + +## Is it really working + +Ok, **fingers crossed** our thing should be working. + +Let's grab some real-life logs ! + +```bash +$ wc -l kern.log +78215 kern.log +$ head -n1 kern.log +May 11 06:25:20 sd-126005 kernel: ... +$ tail -n1 kern.log +May 12 12:09:00 sd-126005 kernel: ... +``` + +We have around 80k lines averaging about 24h of logs, let's try ! + +```bash +./crowdsec -c ./dev.yaml -file ./kern.log -type syslog +``` + +
+ Expected output +```bash +INFO[0000] setting loglevel to info +INFO[12-05-2020 11:50:38] Crowdsec v0.0.18-f672dbb4aec29ca2b24080a33d4d92eb9d4441cc +... +INFO[12-05-2020 11:50:42] node warning : no remediation bucket_id=sparkling-violet event_time="2020-05-11 10:41:45 +0000 UTC" scenario=me/my-cool-scenario source_ip=xx.xx.xx.xx +INFO[12-05-2020 11:50:42] Processing Overflow with no decisions xx.xx.xx.xx performed 'me/my-cool-scenario' (16 events over 0s) at 2020-05-11 10:41:45 +0000 UTC bucket_id=sparkling-violet event_time="2020-05-11 10:41:45 +0000 UTC" scenario=me/my-cool-scenario source_ip=xx.xx.xx.xx +... +INFO[12-05-2020 11:50:43] node warning : no remediation bucket_id=quiet-leaf event_time="2020-05-11 11:34:11 +0000 UTC" scenario=me/my-cool-scenario source_ip=yy.yy.yy.yy +INFO[12-05-2020 11:50:43] Processing Overflow with no decisions yy.yy.yy.yy performed 'me/my-cool-scenario' (16 events over 2s) at 2020-05-11 11:34:11 +0000 UTC bucket_id=quiet-leaf event_time="2020-05-11 11:34:11 +0000 UTC" scenario=me/my-cool-scenario source_ip=yy.yy.yy.yy +... +WARN[12-05-2020 11:51:05] read 78215 lines file=./kern.log +... +``` +
+ +It seems to work correctly ! + + +## Hold my beer and watch this + + +Once I have acquire confidence in my scenario and I want it to trigger some bans, we can simply add : + + +```yaml +type: leaky +debug: true +name: me/my-cool-scenario +description: "detect cool stuff" +filter: "evt.Meta.log_type == 'iptables_drop' && evt.Meta.service == 'tcp'" +groupby: evt.Meta.source_ip +distinct: evt.Parsed.dst_port +capacity: 15 +leakspeed: 5s +blackhole: 1m +labels: + type: scan + service: tcp + remediation: true + scope: ip +``` + + +Adding `remediation: true` into the labels tells {{v1X.crowdsec.name}} that we should write a ban for the IP when the scenario is triggered ! + +Let's try : + + - I copied the yaml file to a production system (`/etc/crowdsec/crowdsec/scenarios/mytest.yaml`) + - I restart {{v1X.crowdsec.name}} (`systemctl reload crowdsec`) + +Let's check if it seems correctly enabled : + +```bash +$ {{v1X.cli.bin}} list +... +INFO[0000] SCENARIOS: +---------------------------------------------------------------------------------------------------------------------------------- + NAME 📦 STATUS VERSION LOCAL PATH +---------------------------------------------------------------------------------------------------------------------------------- +... + mytest.yaml 🚫 enabled,local /etc/crowdsec/config/scenarios/mytest.yaml +... +``` + + +Let's launch (from an external machine, as {{v1X.crowdsec.name}} ignores events from private IPs by default) a real port-scan with a good old `nmap` : + +```bash +sudo nmap -sS xx.xx.xx.xx +``` + + +and on our server : + +```bash +$ tail -f /var/log/crowdsec.log +... +time="12-05-2020 12:31:43" level=warning msg="xx.xx.16.6 triggered a 4h0m0s ip ban remediation for [me/my-cool-scenario]" bucket_id=wispy-breeze event_time="2020-05-12 12:31:43.953498645 +0200 CEST m=+64.533521568" scenario=me/my-cool-scenario source_ip=xx.xx.16.6 +... +^C +$ {{v1X.cli.bin}} ban list +INFO[0000] backend plugin 'database' loaded +8 local decisions: ++--------+-----------------+----------------------+------+--------+---------+--------------------------+--------+------------+ +| SOURCE | IP | REASON | BANS | ACTION | COUNTRY | AS | EVENTS | EXPIRATION | ++--------+-----------------+----------------------+------+--------+---------+--------------------------+--------+------------+ +| local | xx.xx.xx.xx | me/my-cool-scenario | 4 | ban | FR | 21502 SFR SA | 79 | 3h58m27s | +... +``` + +It worked !!! diff --git a/docs/v1.X/docs/write_configurations/whitelist.md b/docs/v1.X/docs/write_configurations/whitelist.md new file mode 100644 index 000000000..0c7595e70 --- /dev/null +++ b/docs/v1.X/docs/write_configurations/whitelist.md @@ -0,0 +1,184 @@ +# What are whitelists + +Whitelists are special parsers that allow you to "discard" events, and can exist at two different steps : + + - *Parser whitelists* : Allows you to discard an event at parse time, so that it never hits the buckets. + - *PostOverflow whitelists* : Those are whitelists that are checked *after* the overflow happens. It is usually best for whitelisting process that can be expensive (such as performing reverse DNS on an IP, or performing a `whois` of an IP). + +!!! info + While the whitelists are the same for parser or postoverflows, beware that field names might change. + Source ip is usually in `evt.Meta.source_ip` when it's a log, but `evt.Overflow.Source_ip` when it's an overflow + + +The whitelist can be based on several criteria : + + - specific ip address : if the event/overflow IP is the same, event is whitelisted + - ip ranges : if the event/overflow IP belongs to this range, event is whitelisted + - a list of {{v1X.expr.htmlname}} expressions : if any expression returns true, event is whitelisted + +Here is an example showcasing configuration : + +```yaml +name: crowdsecurity/my-whitelists +description: "Whitelist events from my ipv4 addresses" +#it's a normal parser, so we can restrict its scope with filter +filter: "1 == 1" +whitelist: + reason: "my ipv4 ranges" + ip: + - "127.0.0.1" + cidr: + - "192.168.0.0/16" + - "10.0.0.0/8" + - "172.16.0.0/12" + expression: + #beware, this one will work *only* if you enabled the reverse dns (crowdsecurity/rdns) enrichment postoverflow parser + - evt.Enriched.reverse_dns endsWith ".mycoolorg.com." + #this one will work *only* if you enabled the geoip (crowdsecurity/geoip-enrich) enrichment parser + - evt.Enriched.IsoCode == 'FR' +``` + + +# Whitelists in parsing + +When a whitelist is present in parsing `/etc/crowdsec/config/parsers/...`, it will be checked/discarded before being poured to any bucket. These whitelists intentionally generate no logs and are useful to discard noisy false positive sources. + +## Whitelist by ip + +Let's assume we have a setup with a `crowdsecurity/nginx` collection enabled and no whitelists. + +Thus, if I "attack" myself : + +```bash +nikto -host myfqdn.com +``` + +my own IP will be flagged as being an attacker : + +```bash +$ tail -f /var/log/crowdsec.log +ime="07-07-2020 16:13:16" level=warning msg="80.x.x.x triggered a 4h0m0s ip ban remediation for [crowdsecurity/http-bad-user-agent]" bucket_id=cool-smoke event_time="2020-07-07 16:13:16.579581642 +0200 CEST m=+358819.413561109" scenario=crowdsecurity/http-bad-user-agent source_ip=80.x.x.x +time="07-07-2020 16:13:16" level=warning msg="80.x.x.x triggered a 4h0m0s ip ban remediation for [crowdsecurity/http-probing]" bucket_id=green-silence event_time="2020-07-07 16:13:16.737579458 +0200 CEST m=+358819.571558901" scenario=crowdsecurity/http-probing source_ip=80.x.x.x +time="07-07-2020 16:13:17" level=warning msg="80.x.x.x triggered a 4h0m0s ip ban remediation for [crowdsecurity/http-crawl-non_statics]" bucket_id=purple-snowflake event_time="2020-07-07 16:13:17.353641625 +0200 CEST m=+358820.187621068" scenario=crowdsecurity/http-crawl-non_statics source_ip=80.x.x.x +time="07-07-2020 16:13:18" level=warning msg="80.x.x.x triggered a 4h0m0s ip ban remediation for [crowdsecurity/http-sensitive-files]" bucket_id=small-hill event_time="2020-07-07 16:13:18.005919055 +0200 CEST m=+358820.839898498" scenario=crowdsecurity/http-sensitive-files source_ip=80.x.x.x +^C +$ {{v1X.cli.bin}} ban list +4 local decisions: ++--------+---------------+-----------------------------------+------+--------+---------+---------------------------+--------+------------+ +| SOURCE | IP | REASON | BANS | ACTION | COUNTRY | AS | EVENTS | EXPIRATION | ++--------+---------------+-----------------------------------+------+--------+---------+---------------------------+--------+------------+ +| local | 80.x.x.x | crowdsecurity/http-bad-user-agent | 4 | ban | FR | 21502 SFR SA | 60 | 3h59m3s | +... + +``` + + +### Create the whitelist by IP + +Let's create a `/etc/crowdsec/crowdsec/parsers/s02-enrich/mywhitelists.yaml` file with the following content : + +```yaml +name: crowdsecurity/whitelists +description: "Whitelist events from my ip addresses" +whitelist: + reason: "my ip ranges" + ip: + - "80.x.x.x" +``` + +and reload {{v1X.crowdsec.name}} : `sudo systemctl reload crowdsec` + +### Test the whitelist + +Thus, if we restart our attack : + +```bash +nikto -host myfqdn.com +``` + +And we don't get bans : + +```bash +$ tail -f /var/log/crowdsec.log +... +^C +$ {{v1X.cli.bin}} ban list +No local decisions. +And 21 records from API, 15 distinct AS, 12 distinct countries + +``` + +Here, we don't get *any* logs, as the event have been discarded at parsing time. + + +## Create whitelist by expression + +Now, let's make something more tricky : let's whitelist a **specific** user-agent (of course, it's just an example, don't do this at home !). The [hub's taxonomy](https://hub.crowdsec.net/fields) will helps us to find which data is present in which field. + +Let's change our whitelist to : + +```yaml +name: crowdsecurity/whitelists +description: "Whitelist events from private ipv4 addresses" +whitelist: + reason: "private ipv4 ranges" + expression: + - evt.Parsed.http_user_agent == 'MySecretUserAgent' +``` + +again, let's restart {{v1X.crowdsec.name}} ! + +For the record, I edited nikto's configuration to use 'MySecretUserAgent' as user-agent, and thus : + +```bash +nikto -host myfqdn.com +``` + +```bash +$ tail -f /var/log/crowdsec.log +... +time="07-05-2020 09:39:09" level=info msg="Event is whitelisted by Expr !" filter= name=solitary-leaf stage=s02-enrich +... +``` + + +# Whitelist in PostOverflows + +Whitelists in PostOverflows are applied *after* the bucket overflow happens. +It has the advantage of being triggered only once we are about to take decision about an IP or Range, and thus happens a lot less often. + +A good example is the [crowdsecurity/whitelist-good-actors](https://hub.crowdsec.net/author/crowdsecurity/collections/whitelist-good-actors) collection. + +But let's craft ours based on our previous example ! +First of all, install the [crowdsecurity/rdns postoverflow](https://hub.crowdsec.net/author/crowdsecurity/configurations/rdns) : it will be in charge of enriching overflows with reverse dns information of the offending IP. + +Let's put the following file in `/etc/crowdsec/config/postoverflows/s01-whitelists/mywhitelists.yaml` : + +```yaml +name: me/my_cool_whitelist +description: lets whitelist our own reverse dns +whitelist: + reason: dont ban my ISP + expression: + #this is the reverse of my ip, you can get it by performing a "host" command on your public IP for example + - evt.Enriched.reverse_dns endsWith '.asnieres.rev.numericable.fr.' +``` + +After reloading {{v1X.crowdsec.name}}, and launching (again!) nikto : + +```bash +nikto -host myfqdn.com +``` + + +```bash +$ tail -f /var/log/crowdsec.log +ime="07-07-2020 17:11:09" level=info msg="Ban for 80.x.x.x whitelisted, reason [dont ban my ISP]" id=cold-sunset name=me/my_cool_whitelist stage=s01 +time="07-07-2020 17:11:09" level=info msg="node warning : no remediation" bucket_id=blue-cloud event_time="2020-07-07 17:11:09.175068053 +0200 CEST m=+2308.040825320" scenario=crowdsecurity/http-probing source_ip=80.x.x.x +time="07-07-2020 17:11:09" level=info msg="Processing Overflow with no decisions 80.x.x.x performed 'crowdsecurity/http-probing' (11 events over 313.983994ms) at 2020-07-07 17:11:09.175068053 +0200 CEST m=+2308.040825320" bucket_id=blue-cloud event_time="2020-07-07 17:11:09.175068053 +0200 CEST m=+2308.040825320" scenario=crowdsecurity/http-probing source_ip=80.x.x.x +... + +``` + +This time, we can see that logs are being produced when the event is discarded. + diff --git a/docs/v1.X/mkdocs.yml b/docs/v1.X/mkdocs.yml new file mode 100644 index 000000000..d354d8dd6 --- /dev/null +++ b/docs/v1.X/mkdocs.yml @@ -0,0 +1,84 @@ +site_name: Crowdsec/v1 +nav: + - Home: index.md + - Getting Started: +# - Glossary: getting_started/glossary.md + - Concepts : getting_started/concepts.md + - Install Crowdsec : getting_started/installation.md + - Crowdsec Tour: getting_started/crowdsec-tour.md + - User guide: + - CLI: user_guide/cscli.md + - Configurations management: + - Acquisition: user_guide/configurations_management/acquisition.md + - Collections: user_guide/configurations_management/collections.md + - Parsers: user_guide/configurations_management/parsers.md + - Enrichers: user_guide/configurations_management/enrichers.md + - Scenarios: user_guide/configurations_management/scenarios.md + - Managing local/custom configuration: user_guide/configurations_management/local-configurations.md + - Decisions Management: user_guide/decision_management.md + - Bouncers & machines management: user_guide/bouncer_machine_management.md + - Databases: user_guide/database.md + - Simulation Management: user_guide/simulation_mode.md + - Crowdsec forensic mode: user_guide/forensic_mode.md + - Debugging: user_guide/debugging_configs.md + - Docker: docker/README.md + - CLI: + - Cscli: cscli/cscli.md + - Alerts: cscli/cscli_alerts.md + - Bouncers: cscli/cscli_bouncers.md + - Collections: cscli/cscli_collections.md + - Config: cscli/cscli_config.md + - Dashboard: cscli/cscli_dashboard.md + - Decisions: cscli/cscli_decisions.md + - Hub: cscli/cscli_hub.md + - Machines: cscli/cscli_machines.md + - Metrics: cscli/cscli_metrics.md + - Parsers: cscli/cscli_parsers.md + - Postoverflows: cscli/cscli_postoverflows.md + - Scenarios: cscli/cscli_scenarios.md + - Simulation: cscli/cscli_simulation.md + - Local API: localAPI/index.md + - Observability: + - Overview: observability/overview.md + - Logs: observability/logs.md + - Metrics: + - Prometheus: observability/prometheus.md + - Command line: observability/command_line.md + - Dashboard: observability/dashboard.md + - Bouncers: bouncers/index.md + - References: + - Parsers, Scenarios etc.: + - Stages format: references/stages.md + - Parsers format: references/parsers.md + - Scenarios format: references/scenarios.md + - PostOverlows format: references/postoverflows.md + - Enrichers format: references/enrichers.md + - Collections format: references/collections.md + - Expressions helpers: references/expressions.md + - Patterns references: references/patterns-documentation.md + - Configuration files: + - Configuration format: references/crowdsec-config.md + - Database format: references/database.md + - Acquisition format: references/acquisition.md + - Profiles format: references/profiles.md + - Simulation configuration: references/simulation.md + - Runtime objects: + - Event object: references/events.md + - Alert object: references/alerts.md + - Decision object: references/decisions.md + - Write Configurations: + - Requirements: write_configurations/requirements.md + - Acquisition: write_configurations/acquisition.md + - Parsers: write_configurations/parsers.md + - Scenarios: write_configurations/scenarios.md + - Whitelists: write_configurations/whitelist.md + - Admin Guide: + - Services Configuration: admin_guide/services_configuration.md + - Architecture: admin_guide/architecture.md + - Contributing: + - General: contributing.md + - Reporting bugs: contributing.md + - Asking questions: contributing.md + - Publishing parsers & scenarios: contributing.md + - Upgrade V0.X to V1.X: migration.md + diff --git a/docs/write_configurations/acquisition.md b/docs/write_configurations/acquisition.md deleted file mode 100644 index 23d35de49..000000000 --- a/docs/write_configurations/acquisition.md +++ /dev/null @@ -1,89 +0,0 @@ -# What is acquisition - -Acquisition configuration (`/etc/crowdsec/config/acquis.yaml`) defines what log files are read by {{crowdsec.name}}. As log files determines what {{crowdsec.name}} can detect, it is a simple yet crucial part of the configuration. - -This `acquisition.yaml` file is composed of sections that look like this : - -```yaml -filename: /var/log/myservice/foobar.log -labels: - type: myservice ---- -filenames: - - /var/log/myservice/*.log - - /var/log/something/else.log -labels: - type: myservice -``` - -Each section indicate : - - - path(s) to a log file (or a regular expression for globing) - - label(s) indicating the log's type - -While the path(s) is straightforward, the `labels->type` will depend on log's format. -If you're using syslog format, `labels->type` can simply be set to `syslog`, as it contains the program name itself. If your logs are written directly by a daemon (ie. nginx) with its own format, it must be set accordingly to the parser : `nginx` for nginx etc. - -If you don't know to which value you need to set the `labels->type`. First check if logs are written in syslog format (it's the case for a lot of services on linux) : in this case simply set it to `syslog`. If the service **- and only if -** the service write its own logs, have a look at the associated parser : - -```bash -$ head /etc/crowdsec/config/parsers/s01-parse/mysql-logs.yaml -... -#labels->type must be set to 'mysql' -filter: "evt.Parsed.program == 'mysql'" -... -``` - -!!! warning - Properly picking the log type is crucial. If the `labels->type` is wrong, your logs won't be parsed and thus will be discarded. You can see if your logs are parsed from `cscli metrics`. - - -```yaml ---- -filename: -labels: - type: - -``` -Here are some examples: - -
- Nginx acquisition - -```yaml ---- -filename: /var/log/nginx/*.log -labels: - type: nginx -``` - -
- - - -
- sshd acquisition - -```yaml -#Generated acquisition file - wizard.sh (service: sshd) / files : /var/log/auth.log -filenames: - - /var/log/auth.log -labels: - type: syslog ---- -``` - -
- -
- mysql acquisition - -```yaml -#Generated acquisition file - wizard.sh (service: mysql) / files : /var/log/mysql/error.log -filenames: - - /var/log/mysql/error.log -labels: - type: mysql -``` - -
diff --git a/go.mod b/go.mod index fcb446bff..9df0cc912 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,16 @@ module github.com/crowdsecurity/crowdsec go 1.13 require ( + github.com/AlecAivazis/survey/v2 v2.2.1 + github.com/KyleBanks/depth v1.2.1 // indirect github.com/Microsoft/go-winio v0.4.14 // indirect - github.com/antonmedv/expr v1.8.2 + github.com/antonmedv/expr v1.8.9 + github.com/appleboy/gin-jwt/v2 v2.6.4 + github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 // indirect + github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect github.com/buger/jsonparser v1.0.0 github.com/containerd/containerd v1.3.4 // indirect + github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e github.com/davecgh/go-spew v1.1.1 github.com/denisbrodbeck/machineid v1.0.1 github.com/dghubble/sling v1.3.0 @@ -15,37 +21,64 @@ require ( github.com/docker/go-connections v0.4.0 github.com/docker/go-units v0.4.0 // indirect github.com/enescakir/emoji v1.0.0 + github.com/facebook/ent v0.5.0 + github.com/gin-gonic/gin v1.6.3 + github.com/go-co-op/gocron v0.3.3 + github.com/go-openapi/analysis v0.19.12 // indirect + github.com/go-openapi/errors v0.19.8 + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/loads v0.19.6 // indirect + github.com/go-openapi/runtime v0.19.24 // indirect + github.com/go-openapi/spec v0.19.13 // indirect + github.com/go-openapi/strfmt v0.19.10 + github.com/go-openapi/swag v0.19.11 + github.com/go-openapi/validate v0.19.12 + github.com/go-playground/validator/v10 v10.4.1 // indirect + github.com/go-sql-driver/mysql v1.5.1-0.20200311113236-681ffa848bae + github.com/google/go-querystring v1.0.0 github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e - github.com/hashicorp/go-version v1.2.0 + github.com/hashicorp/go-version v1.2.1 github.com/jamiealquiza/tachymeter v2.0.0+incompatible github.com/jinzhu/gorm v1.9.12 github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/lib/pq v1.8.0 github.com/logrusorgru/grokky v0.0.0-20180829062225-47edf017d42c + github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/mattn/go-sqlite3 v2.0.3+incompatible + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 github.com/nxadm/tail v1.4.4 github.com/olekukonko/tablewriter v0.0.4 github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect github.com/oschwald/geoip2-golang v1.4.0 github.com/oschwald/maxminddb-golang v1.6.0 - github.com/pkg/errors v0.8.1 - github.com/prometheus/client_golang v1.5.1 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.8.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.9.1 + github.com/prometheus/common v0.15.0 github.com/prometheus/prom2json v1.3.0 + github.com/rogpeppe/godef v1.1.2 // indirect github.com/sevlyar/go-daemon v0.1.5 - github.com/sirupsen/logrus v1.5.0 - github.com/spf13/cobra v0.0.7 - github.com/stretchr/testify v1.5.1 + github.com/sirupsen/logrus v1.7.0 + github.com/spf13/cobra v1.1.1 + github.com/stretchr/testify v1.6.1 + github.com/ugorji/go v1.2.0 // indirect + github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 // indirect + go.mongodb.org/mongo-driver v1.4.3 // indirect + golang.org/x/crypto v0.0.0-20201116153603-4be66e5b6582 golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - golang.org/x/mod v0.2.0 - golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 + golang.org/x/mod v0.3.0 + golang.org/x/net v0.0.0-20201110031124-69a78807bb2b // indirect + golang.org/x/sys v0.0.0-20201116161645-c061ba923fbb + golang.org/x/text v0.3.4 // indirect golang.org/x/time v0.0.0-20191024005414-555d28b269f0 - golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e // indirect + google.golang.org/protobuf v1.25.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 - gopkg.in/yaml.v2 v2.2.8 + gopkg.in/yaml.v2 v2.3.0 ) replace golang.org/x/time/rate => github.com/crowdsecurity/crowdsec/pkg/time/rate v0.0.0 diff --git a/go.sum b/go.sum index beef02757..af6b6c826 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,41 @@ +9fans.net/go v0.0.0-20181112161441-237454027057 h1:OcHlKWkAMJEF1ndWLGxp5dnJQkYM/YImUOvsBoz6h5E= +9fans.net/go v0.0.0-20181112161441-237454027057/go.mod h1:diCsxrliIURU9xsYtjCp5AbpQKqdhKmf0ujWDUSkfoY= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AlecAivazis/survey v1.8.8 h1:Y4yypp763E8cbqb5RBqZhGgkCFLRFnbRBHrxnpMMsgQ= +github.com/AlecAivazis/survey/v2 v2.2.1 h1:QnyYpsSkNw0Zgf51CMF3vzDjCuhB+ANPH8jp3FN+4EM= +github.com/AlecAivazis/survey/v2 v2.2.1/go.mod h1:9FJRdMdDm8rnT+zHVbvQT2RTSTLq0Ttd6q3Vl2fahjk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -11,29 +43,70 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antonmedv/expr v1.8.2 h1:BfkVHGudYqq7jp3Ji33kTn+qZ9D19t/Mndg0ag/Ycq4= github.com/antonmedv/expr v1.8.2/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/antonmedv/expr v1.8.9 h1:O9stiHmHHww9b4ozhPx7T6BK7fXfOCHJ8ybxf0833zw= +github.com/antonmedv/expr v1.8.9/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/appleboy/gin-jwt v1.0.1 h1:cbLi6ol8KhWUoCBvLFCGpssiBB2iM9eXWwmI9SDG+W0= +github.com/appleboy/gin-jwt v2.5.0+incompatible h1:oLQTP1fiGDoDKoC2UDqXD9iqCP44ABIZMMenfH/xCqw= +github.com/appleboy/gin-jwt/v2 v2.6.4 h1:4YlMh3AjCFnuIRiL27b7TXns7nLx8tU/TiSgh40RRUI= +github.com/appleboy/gin-jwt/v2 v2.6.4/go.mod h1:CZpq1cRw+kqi0+yD2CwVw7VGXrrx4AqBdeZnwxVmoAs= +github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/buger/jsonparser v1.0.0 h1:etJTGF5ESxjI0Ic2UaLQs2LQQpa8G9ykQScukbh4L8A= github.com/buger/jsonparser v1.0.0/go.mod h1:tgcrVJ81GPSF0mz+0nu1Xaz0fazGPrmmJfJtxjbHhUQ= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/containerd v1.3.4 h1:3o0smo5SKY7H6AJCmJhsnCjR2/V2T8VmiHt7seN2/kI= github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -43,6 +116,7 @@ github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbj github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dghubble/sling v1.3.0 h1:pZHjCJq4zJvc6qVQ5wN1jo5oNZlNE0+8T/h0XeXBUKU= github.com/dghubble/sling v1.3.0/go.mod h1:XXShWaBWKzNLhu2OxikSNFrlsvowtz4kyRuXUG7oQKY= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= @@ -53,260 +127,857 @@ github.com/docker/docker v17.12.0-ce-rc1.0.20200419140219-55e6d7d36faf+incompati github.com/docker/docker v17.12.0-ce-rc1.0.20200419140219-55e6d7d36faf+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog= github.com/enescakir/emoji v1.0.0/go.mod h1:Bt1EKuLnKDTYpLALApstIkAjdDrS/8IAgTkKp+WKFD0= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= +github.com/facebook/ent v0.5.0 h1:NlDQDxJi1X6+20CCjRQgu8UqvRhQNm5ocPBCQYdxC/8= +github.com/facebook/ent v0.5.0/go.mod h1:HrrMNGsvgZoGQ74PGBQJ9r9WNOVMqKQefcOJFXuOUlw= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-bindata/go-bindata v1.0.1-0.20190711162640-ee3c2418e368/go.mod h1:7xCgX1lzlrXPHkfvn3EhumqHkmSlzt8at9q7v0ax19c= +github.com/go-co-op/gocron v0.3.2 h1:dij1B64q5euTenSBBv7Aoaqtr/pOEDmPv3Iz8qhQ8J0= +github.com/go-co-op/gocron v0.3.2/go.mod h1:Y9PWlYqDChf2Nbgg7kfS+ZsXHDTZbMZYPEQ0MILqH+M= +github.com/go-co-op/gocron v0.3.3 h1:QnarcMZWWKrEP25uCbtDiLsnnGw+PhCjL3wNITdWJOs= +github.com/go-co-op/gocron v0.3.3/go.mod h1:Y9PWlYqDChf2Nbgg7kfS+ZsXHDTZbMZYPEQ0MILqH+M= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10 h1:5BHISBAXOc/aJK25irLZnx2D3s6WyYaY9D4gmuz9fdE= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/analysis v0.19.12 h1:zmopuQzzL2egB/47YhmOJGHK6c+sFEZpD04UgG9ySKo= +github.com/go-openapi/analysis v0.19.12/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.7 h1:Lcq+o0mSwCLKACMxZhreVHigB9ebghJ/lrmeaqASbjo= +github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.8 h1:doM+tQdZbUm9gydV9yR+iQNmztbjj7I3sW4sIcAwIzc= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.4 h1:3Vw+rh13uq2JFNxgnMTGE1rnoieU9FmyE1gvnyylsYg= +github.com/go-openapi/jsonreference v0.19.4/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.5 h1:jZVYWawIQiA1NBnHla28ktg6hrcfTHsCE+3QLVRBIls= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/loads v0.19.6 h1:6IAtnx22MNSjPocZZ2sV7EjgF6wW5rDC9r6ZkNxjiN8= +github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/runtime v0.19.16 h1:tQMAY5s5BfmmCC31+ufDCsGrr8iO1A8UIdYfDo5ADvs= +github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= +github.com/go-openapi/runtime v0.19.24 h1:TqagMVlRAOTwllE/7hNKx6rQ10O6T8ZzeJdMjSTKaD4= +github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8 h1:qAdZLh1r6QF/hI/gTq+TJTvsQUodZsM7KLqkAJdiJNg= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.13 h1:AcZVcWsrfW7LqyHKVbTZYpFF7jQcMxmAsWrw2p/b9ew= +github.com/go-openapi/spec v0.19.13/go.mod h1:gwrgJS15eCUgjLpMjBJmbZezCsw88LmgeEip0M63doA= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.5 h1:0utjKrw+BAh8s57XE9Xz8DUBsVvPmRUB6styvl9wWIM= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.10 h1:FEv6Pt/V4wLwP4vOCZbWlpfmi8kj4UiRip34IDE6SGw= +github.com/go-openapi/strfmt v0.19.10/go.mod h1:qBBipho+3EoIqn6YDI+4RnQEtj6jT/IdKm+PAlXxSUc= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9 h1:1IxuqvBUU3S2Bi4YC7tlP9SJF1gVpCvqN0T2Qof4azE= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.11 h1:RFTu/dlFySpyVvJDfp/7674JY4SDglYWKztbiIGFpmc= +github.com/go-openapi/swag v0.19.11/go.mod h1:Uc0gKkdR+ojzsEpjh39QChyu92vPgIr72POcgHMAgSY= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= +github.com/go-openapi/validate v0.19.11 h1:8lCr0b9lNWKjVjW/hSZZvltUy+bULl7vbnCTsOzlhPo= +github.com/go-openapi/validate v0.19.11/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= +github.com/go-openapi/validate v0.19.12 h1:mPLM/bfbd00PGOCJlU0yJL7IulkZ+q9VjPv7U11RMQQ= +github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-redis/redis v6.15.5+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.5.1-0.20200311113236-681ffa848bae h1:L6V0ANsMIMdLgXly241UXhXNFWYgXbgjHupTAAURrV0= +github.com/go-sql-driver/mysql v1.5.1-0.20200311113236-681ffa848bae/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e h1:XmA6L9IPRdUr28a+SK/oMchGgQy159wvzXA5tJ7l+40= github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e/go.mod h1:AFIo+02s+12CEg8Gzz9kzhCbmbq6JcKNrhHffCGA9z4= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/jamiealquiza/tachymeter v2.0.0+incompatible h1:mGiF1DGo8l6vnGT8FXNNcIXht/YmjzfraiUprXYwJ6g= github.com/jamiealquiza/tachymeter v2.0.0+incompatible/go.mod h1:Ayf6zPZKEnLsc3winWEXJRkTBhdHo58HODAu1oFJkYU= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/gorm v1.9.12 h1:Drgk1clyWT9t9ERbzHza6Mj/8FY/CqMyVzOiHviMo6Q= github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/logrusorgru/grokky v0.0.0-20180829062225-47edf017d42c h1:S3P1IbG7Z7V2p9juEttr1oRwozZd2kxw+RQiYBYB1wQ= github.com/logrusorgru/grokky v0.0.0-20180829062225-47edf017d42c/go.mod h1:YnDG6D6tn35XF4NJXUtoqoC84FYlBPao8PZ8QzN4Zxo= github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0= github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.14.4/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI= +github.com/mattn/go-sqlite3 v1.14.5 h1:1IdxlwTNazvbKJQSxoJ5/9ECbEeaTTyeU7sEAZ5KKTQ= +github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI= github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/oschwald/geoip2-golang v1.4.0 h1:5RlrjCgRyIGDz/mBmPfnAF4h8k0IAcRv9PvrpOfz+Ug= github.com/oschwald/geoip2-golang v1.4.0/go.mod h1:8QwxJvRImBH+Zl6Aa6MaIcs5YdlZSTKtzmPGzQqi9ng= github.com/oschwald/maxminddb-golang v1.6.0 h1:KAJSjdHQ8Kv45nFIbtoLGrGWqHFajOIm7skTyz/+Dls= github.com/oschwald/maxminddb-golang v1.6.0/go.mod h1:DUJFucBg2cvqx42YmDa/+xHvb0elJtOm3o4aFQ/nb/w= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw= +github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y= github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/godef v1.1.2 h1:c5mCx0EcCORJOdVMREX7Lgh1raTxAHFmOfXdEB9u8Jw= +github.com/rogpeppe/godef v1.1.2/go.mod h1:WtY9A/ovuQ+UakAJ1/CEqwwulX/WJjb2kgkokCHi/GY= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sevlyar/go-daemon v0.1.5 h1:Zy/6jLbM8CfqJ4x4RPr7MJlSKt90f00kNM1D401C+Qk= github.com/sevlyar/go-daemon v0.1.5/go.mod h1:6dJpPatBT9eUwM5VCw9Bt6CdX9Tk6UWvhW3MebLDRKE= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU= github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.0 h1:6eXlzYLLwZwXroJx9NyqbYcbv/d93twiOzQLDewE6qM= +github.com/ugorji/go v1.2.0/go.mod h1:1ny++pKMXhLWrwWV5Nf+CbOuZJhMoaFD+0GMFfd8fEc= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.0 h1:As6RccOIlbm9wHuWYMlB30dErcI+4WiKWsYsmPkyrUw= +github.com/ugorji/go/codec v1.2.0/go.mod h1:dXvG35r7zTX6QImXOSFhGMmKtX+wJ7VTWzGvYQGIjBs= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.4 h1:zs/dKNwX0gYUtzwrN9lLiR15hCO0nDwQj5xXx+vjCdE= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.2/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.3 h1:moga+uhicpVshTyaqY9L23E6QqwcHRUv1sqyOsoyOO8= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd h1:GGJVjV8waZKRHrgwvtH66z9ZGVurTD1MT0n1Bb+q4aM= golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201116153603-4be66e5b6582 h1:0WDrJ1E7UolDk1KhTXxxw3Fc8qtk5x7dHP431KHEJls= +golang.org/x/crypto v0.0.0-20201116153603-4be66e5b6582/go.mod h1:tCqSYrHVcf3i63Co2FzBkTCo2gdF6Zak62921dSfraU= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201116161645-c061ba923fbb h1:+EHGEcgeA7ESswi5i4ojbo7sRzlz7vWoxFGcMuEZtu8= +golang.org/x/sys v0.0.0-20201116161645-c061ba923fbb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201113234701-d7a72108b828 h1:htWEtQEuEVJ4tU/Ngx7Cd/4Q7e3A5Up1owgyBtVsTwk= +golang.org/x/term v0.0.0-20201113234701-d7a72108b828/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200226224502-204d844ad48d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e h1:3Dzrrxi54Io7Aoyb0PYLsI47K2TxkRQg+cqUn+m04do= golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= @@ -314,6 +985,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs= gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -321,4 +993,18 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/mkdocs.yml b/mkdocs.yml index 0a0c8334f..02995e2fe 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,76 +1,14 @@ site_name: Crowdsec nav: - Home: index.md - - Getting Started: - - Installation : getting_started/installation.md - - Crowdsec Tour: getting_started/crowdsec-tour.md - - Concepts & Glossary : getting_started/concepts.md - - FAQ: getting_started/FAQ.md - - Guide: - - Overview: guide/crowdsec/overview.md - - Acquisition: guide/crowdsec/acquisition.md - - Parsers: guide/crowdsec/parsers.md - - Enrichers: guide/crowdsec/enrichers.md - - Scenarios: guide/crowdsec/scenarios.md - - Cscli: guide/cscli.md - - Simulation Mode: guide/crowdsec/simulation.md - - Cheat Sheets: - - Ban Management: cheat_sheets/ban-mgmt.md - - Configuration Management: cheat_sheets/config-mgmt.md - - Hub's taxonomy: https://hub.crowdsec.net/fields - - Debugging Parsers & Scenarios: cheat_sheets/debugging_configs.md - - Observability: - - Overview: observability/overview.md - - Logs: observability/logs.md - - Metrics: - - Prometheus: observability/prometheus.md - - Command line: observability/command_line.md - - Dashboard: observability/dashboard.md - - References: - - Parsers format: references/parsers.md - - Scenarios format: references/scenarios.md - - Outputs format: references/output.md - - Write Configurations: - - Requirements: write_configurations/requirements.md - - Acquisition: write_configurations/acquisition.md - - Parsers: write_configurations/parsers.md - - Scenarios: write_configurations/scenarios.md - - Whitelists: write_configurations/whitelist.md - - Expressions: write_configurations/expressions.md - - Bouncers: - - Overview : bouncers/index.md - - Nginx: - - Installation : "https://github.com/crowdsecurity/cs-nginx-blocker#installation" - - Configuration : "https://github.com/crowdsecurity/cs-nginx-blocker#configuration" - - Netfilter: - - Installation : "https://github.com/crowdsecurity/cs-netfilter-blocker#installation" - - Configuration : "https://github.com/crowdsecurity/cs-netfilter-blocker#configuration" - - Custom: - - Installation: "https://github.com/crowdsecurity/cs-custom-blocker#installation" - - Configuration: "https://github.com/crowdsecurity/cs-custom-blocker#configuration" - - Cloudflare: - - Installation: "https://github.com/crowdsecurity/cs-cloudflare-blocker#installation" - - Configuration: "https://github.com/crowdsecurity/cs-cloudflare-blocker#configuration" - - Wordpress: - - Installation : "https://github.com/crowdsecurity/cs-wordpress-plugin/#installation" - - Configuration : "https://github.com/crowdsecurity/cs-wordpress-plugin/#configuration" - - Contributing: - - General: contributing/ - - Writing Output Plugins: references/plugins_api.md - - Cscli commands: - - API: cscli/cscli_api.md - - Backup: cscli/cscli_backup.md - - Bans: cscli/cscli_ban.md - - Metrics: cscli/cscli_metrics.md - - Update: cscli/cscli_update.md - - Install configurations: cscli/cscli_install.md - - Remove configurations: cscli/cscli_remove.md - - Upgrade configurations: cscli/cscli_upgrade.md - - List configurations: cscli/cscli_list.md - - Inspect configurations: cscli/cscli_inspect.md - - Manage simulation: cscli/cscli_simulation.md - - Dashboard: cscli/cscli_dashboard.md - - About: about.md + - Crowdsec v0: '!include ./docs/v0.3.X/mkdocs.yml' + - Crowdsec v1 : '!include ./docs/v1.X/mkdocs.yml' + - Developers : https://crowdsecurity.github.io/api_doc/index.html?urls.primaryName=LAPI" target="_blank + - Hub : https://hub.crowdsec.net/" target="_blank + - Releases : https://github.com/crowdsecurity/crowdsec/releases" target="_blank + - FAQ: faq.md + + markdown_extensions: - codehilite: guess_lang: false @@ -79,8 +17,12 @@ markdown_extensions: - admonition theme: name: material - logo: assets/images/crowdsec_logo1.png - favicon: assets/images/crowdsec_logo1.png + logo: assets/images/crowdsec2.png + favicon: assets/images/crowdsec2.png + features: + - navigation.tabs + - navigation.expand + - navigation.instant palette: primary: "#3d85c6" language: en @@ -94,169 +36,327 @@ repo_name: GitHub plugins: - search - macros + - monorepo google_analytics: - UA-164365852-3 - auto -extra: - doc: - new_issue: "[new documentation issue](https://github.com/crowdsecurity/crowdsec/issues/new)" - discourse: "[CrowdSecurity discourse](http://discourse.crowdsec.net)" - community: "[community](http://discourse.crowdsec.net)" - hub: - name: Crowdsec Hub - htmlname: "[Crowdsec Hub](https://hub.crowdsec.net/)" - url: "https://hub.crowdsec.net/" - plugins_url: "https://hub.crowdsec.net/browse/#bouncers" - scenarios_url: "https://hub.crowdsec.net/browse/#configurations" - parsers_url: "https://hub.crowdsec.net/browse/#configurations" - collections_url: "https://hub.crowdsec.net/browse/#collections" - crowdsec: - name: Crowdsec - Name: Crowdsec - bin: crowdsec-agent - path: /usr/bin/crowdsec-agent - url: https://github.com/crowdsecurity/crowdsec - bugreport: "https://github.com/crowdsecurity/crowdsec/issues" - main_log: "/var/log/crowdsec.log" - download_url: https://github.com/crowdsecurity/crowdsec/releases/latest - cli: - name: cscli - Name: Cscli - bin: cscli - path: /usr/bin/cscli - main_doc: /cscli/cscli/ - url: "https://github.com/crowdsecurity/crowdsec" - bugreport: "https://github.com/crowdsecurity/crowdsec/issues" - api_doc: /cscli/cscli_api/ - ban_doc: /cscli/cscli_ban/ - metrics_doc: /cscli/cscli_metrics/ - remove_doc: /cscli/cscli_remove/ - install_doc: /cscli/cscli_install/ - list_doc: /cscli/cscli_list/ - update_doc: /cscli/cscli_update/ - upgrade_doc: /cscli/cscli_upgrade/ - backup_doc: /cscli/cscli_backup/ - simulation_doc: /cscli/cscli_simulation/ - config: - cli_dir: /etc/crowdsec/cscli/ - crowdsec_dir: "/etc/crowdsec/config/" - acquis_path: "/etc/crowdsec/config/acquis.yaml" - bouncers: - name: bouncers - Name: Bouncers - url: "https://hub.crowdsec.net/browse/#bouncers" - htmlname: "[bouncers](/bouncers/)" - Htmlname: "[Bouncers](/bouncers/)" - plugins: - name: backend plugins - configpath: "/etc/crowdsec/plugins/backend/" - binpath: "/usr/local/lib/crowdsec/plugins/" - metabase: - name: metabase - url: https://github.com/crowdsecurity/crowdsec - wizard: - name: wizard - url: "https://github.com/crowdsecurity/crowdsec" - bin: "./wizard.sh" - bugreport: "https://github.com/crowdsecurity/crowdsec/issues" - ref: - parser: "[parser](/references/parsers/)" - Parser: "[Parser](/references/parsers/)" - scenario: "[scenarios](/references/scenarios/)" - Scenario: "[Scenarios](/references/scenarios/)" - acquis: "[acquisition](/guide/crowdsec/acquisition/)" - Acquis: "[Acquisition](/guide/crowdsec/acquisition/)" - output: "[output](/references/output/)" - Output: "[Output](/references/output/)" -# All the technical terms - event: - name: event - Name: Event - htmlname: "[event](/getting_started/concepts/#event)" - Htmlname: "[Event](/getting_started/concepts/#event)" - expr: - name: expr - Name: Expr - htmlname: "[expr](/write_configurations/expressions/)" - Htmlname: "[Expr](/write_configurations/expressions/)" - filter: - name: filter - Name: Filter - htmlname: "[filter](/references/parsers/#filter)" - Htmlname: "[Filter](/references/parsers/#filter)" - onsuccess: - name: onsuccess - Name: Onsuccess - htmlname: "[onsuccess](/references/parsers/#onsuccess)" - Htmlname: "[Onsuccess](/references/parsers/#onsuccess)" - statics: - name: statics - Name: Statics - htmlname: "[statics](/references/parsers/#statics)" - Htmlname: "[Statics](/references/parsers/#statics)" - parsers: - name: parsers - Name: Parsers - htmlname: "[parsers](/getting_started/concepts/#parser)" - Htmlname: "[Parsers](/getting_started/concepts/#parser)" - scenarios: - name: scenarios - Name: Scenarios - htmlname: "[scenarios](/getting_started/concepts/#scenario)" - Htmlname: "[Scenarios](/getting_started/concepts/#scenario)" - collections: - name: collections - Name: Collections - htmlname: "[collections](/getting_started/concepts/#collections)" - Htmlname: "[Collections](/getting_started/concepts/#collections)" - timeMachine: - name: timeMachine - Name: TimeMachine - htmlname: "[timeMachine](/getting_started/concepts/#timemachine)" - Htmlname: "[TimeMachine](/getting_started/concepts/#timemachine)" - overflow: - name: overflow - Name: Overflow - htmlname: "[overflow](/getting_started/concepts/#overflow-or-signaloccurence)" - Htmlname: "[Overflow](/getting_started/concepts/#overflow-or-signaloccurence)" - whitelists: - name: whitelists - Name: Whitelists - htmlname: "[whitelists](/write_configurations/whitelist/)" - Htmlname: "[Whitelists](/write_configurations/whitelist/)" - signal: - name: alert - Name: Alert - htmlname: "[alert](/getting_started/concepts/#overflow-or-signaloccurence)" - Htmlname: "[Alert](/getting_started/concepts/#overflow-or-signaloccurence)" -#scenario stuff - stage: - name: stage - Name: Stage - htmlname: "[stage](/getting_started/concepts/#stages)" - Htmlname: "[Stage](/getting_started/concepts/#stages)" - leakspeed: - name: leakspeed - Name: Leakspeed - htmlname: "[leakspeed](/references/scenarios/#leakspeed)" - Htmlname: "[Leakspeed](/references/scenarios/#leakspeed)" - capacity: - name: capacity - Name: Capacity - htmlname: "[capacity](/references/scenarios/#capacity)" - Htmlname: "[Capacity](/references/scenarios/#capacity)" - duration: - name: duration - Name: Duration - htmlname: "[duration](/references/scenarios/#duration)" - Htmlname: "[Duration](/references/scenarios/#duration)" - prometheus: - name: prometheus - htmlname: "[prometheus](https://github.com/prometheus/client_golang)" - api: - name: API - htmlname: "[API](TBD)" - topX: - name: topX - htmlname: "[topX](TBD)" + +extra: + swagger_url: "https://raw.githubusercontent.com/crowdsecurity/crowdsec/wip_lapi/pkg/models/localapi_swagger.yaml" + v0X: + doc: + new_issue: "[new documentation issue](https://github.com/crowdsecurity/crowdsec/issues/new)" + discourse: "[CrowdSecurity discourse](http://discourse.crowdsec.net)" + community: "[community](http://discourse.crowdsec.net)" + hub: + name: Crowdsec Hub + htmlname: "[Crowdsec Hub](https://hub.crowdsec.net/)" + url: "https://hub.crowdsec.net/" + plugins_url: "https://hub.crowdsec.net/browse/#bouncers" + scenarios_url: "https://hub.crowdsec.net/browse/#configurations" + parsers_url: "https://hub.crowdsec.net/browse/#configurations" + collections_url: "https://hub.crowdsec.net/browse/#collections" + crowdsec: + name: Crowdsec + Name: Crowdsec + bin: crowdsec-agent + path: /usr/bin/crowdsec-agent + url: https://github.com/crowdsecurity/crowdsec + bugreport: "https://github.com/crowdsecurity/crowdsec/issues" + main_log: "/var/log/crowdsec.log" + download_url: https://github.com/crowdsecurity/crowdsec/releases + cli: + name: cscli + Name: cscli + main_doc: /Crowdsec/v0/cscli/cscli/ + url: "https://github.com/crowdsecurity/crowdsec" + bugreport: "https://github.com/crowdsecurity/crowdsec/issues" + # alerts_doc: /Crowdsec/v1/cscli/cscli_alerts/ + # decisions_doc: /Crowdsec/v1/cscli/cscli_decisions/ + # collections_doc: /Crowdsec/v1/cscli/cscli_collections/ + # parsers_doc: /Crowdsec/v1/cscli/cscli_parsers/ + # scenarios_doc: /Crowdsec/v1/cscli/cscli_scenarios/ + + # api_doc: /Crowdsec/v0/cscli/cscli_api/ + # ban_doc: /Crowdsec/v0/cscli/cscli_ban/ + # metrics_doc: /Crowdsec/v0/cscli/cscli_metrics/ + # remove_doc: /Crowdsec/v0/cscli/cscli_remove/ + # install_doc: /Crowdsec/v0/cscli/cscli_install/ + # list_doc: /Crowdsec/v0/cscli/cscli_list/ + # update_doc: /Crowdsec/v0/cscli/cscli_update/ + # upgrade_doc: /Crowdsec/v0/cscli/cscli_upgrade/ + # backup_doc: /Crowdsec/v0/cscli/cscli_backup/ + # simulation_doc: /Crowdsec/v0/cscli/cscli_simulation/ + config: + cli_dir: /etc/crowdsec/cscli/ + crowdsec_dir: "/etc/crowdsec/config/" + acquis_path: "/etc/crowdsec/config/acquis.yaml" + bouncers: + name: bouncers + Name: bouncers + url: "https://hub.crowdsec.net/browse/#bouncers" + htmlname: "[bouncers](/Crowdsec/v0/bouncers/)" + Htmlname: "[Bouncers](/Crowdsec/v0/bouncers/)" + plugins: + name: backend plugins + configpath: "/etc/crowdsec/plugins/backend/" + binpath: "/usr/local/lib/crowdsec/plugins/" + metabase: + name: metabase + url: https://github.com/crowdsecurity/crowdsec + wizard: + name: wizard + url: "https://github.com/crowdsecurity/crowdsec" + bin: "./wizard.sh" + bugreport: "https://github.com/crowdsecurity/crowdsec/issues" + ref: + parser: "[parser](/Crowdsec/v0/references/parsers/)" + Parser: "[Parser](/Crowdsec/v0/references/parsers/)" + scenario: "[scenarios](/Crowdsec/v0/references/scenarios/)" + Scenario: "[Scenarios](/Crowdsec/v0/references/scenarios/)" + acquis: "[acquisition](/Crowdsec/v0/guide/crowdsec/acquisition/)" + Acquis: "[Acquisition](/Crowdsec/v0/guide/crowdsec/acquisition/)" + output: "[output](/Crowdsec/v0/references/output/)" + Output: "[Output](/Crowdsec/v0/references/output/)" + # All the technical terms + event: + name: event + Name: Event + htmlname: "[event](/Crowdsec/v0/getting_started/concepts/#event)" + Htmlname: "[Event](/Crowdsec/v0/getting_started/concepts/#event)" + expr: + name: expr + Name: Expr + htmlname: "[expr](/Crowdsec/v0/write_configurations/expressions/)" + Htmlname: "[Expr](/Crowdsec/v0/write_configurations/expressions/)" + filter: + name: filter + Name: Filter + htmlname: "[filter](/Crowdsec/v0/references/parsers/#filter)" + Htmlname: "[Filter](/Crowdsec/v0/references/parsers/#filter)" + onsuccess: + name: onsuccess + Name: Onsuccess + htmlname: "[onsuccess](/Crowdsec/v0/references/parsers/#onsuccess)" + Htmlname: "[Onsuccess](/Crowdsec/v0/references/parsers/#onsuccess)" + statics: + name: statics + Name: Statics + htmlname: "[statics](/Crowdsec/v0/references/parsers/#statics)" + Htmlname: "[Statics](/Crowdsec/v0/references/parsers/#statics)" + parsers: + name: parsers + Name: Parsers + htmlname: "[parsers](/Crowdsec/v0/getting_started/concepts/#parsers)" + Htmlname: "[Parsers](/Crowdsec/v0/getting_started/concepts/#parsers)" + scenarios: + name: scenarios + Name: Scenarios + htmlname: "[scenarios](/Crowdsec/v0/getting_started/concepts/#scenarios)" + Htmlname: "[Scenarios](/Crowdsec/v0/getting_started/concepts/#scenarios)" + collections: + name: collections + Name: Collections + htmlname: "[collections](/Crowdsec/v0/getting_started/concepts/#collections)" + Htmlname: "[Collections](/Crowdsec/v0/getting_started/concepts/#collections)" + timeMachine: + name: timeMachine + Name: TimeMachine + htmlname: "[timeMachine](/Crowdsec/v0/getting_started/concepts/#timemachine)" + Htmlname: "[TimeMachine](/Crowdsec/v0/getting_started/concepts/#timemachine)" + overflow: + name: overflow + Name: Overflow + htmlname: "[overflow](/Crowdsec/v0/getting_started/concepts/#overflow-or-signaloccurence)" + Htmlname: "[Overflow](/Crowdsec/v0/getting_started/concepts/#overflow-or-signaloccurence)" + whitelists: + name: whitelists + Name: Whitelists + htmlname: "[whitelists](/Crowdsec/v0/write_configurations/whitelist/)" + Htmlname: "[Whitelists](/Crowdsec/v0/write_configurations/whitelist/)" + signal: + name: signal + Name: Signal + htmlname: "[signal](/Crowdsec/v0/getting_started/concepts/#overflow-or-signaloccurence)" + Htmlname: "[Signal](/Crowdsec/v0/getting_started/concepts/#overflow-or-signaloccurence)" + #scenario stuff + stage: + name: stage + Name: Stage + htmlname: "[stage](/Crowdsec/v0/getting_started/concepts/#stages)" + Htmlname: "[Stage](/Crowdsec/v0/getting_started/concepts/#stages)" + leakspeed: + name: leakspeed + Name: Leakspeed + htmlname: "[leakspeed](/Crowdsec/v0/references/scenarios/#leakspeed)" + Htmlname: "[Leakspeed](/Crowdsec/v0/references/scenarios/#leakspeed)" + capacity: + name: capacity + Name: Capacity + htmlname: "[capacity](/Crowdsec/v0/references/scenarios/#capacity)" + Htmlname: "[Capacity](/Crowdsec/v0/references/scenarios/#capacity)" + duration: + name: duration + Name: Duration + htmlname: "[duration](/Crowdsec/v0/references/scenarios/#duration)" + Htmlname: "[Duration](/Crowdsec/v0/references/scenarios/#duration)" + prometheus: + name: prometheus + htmlname: "[prometheus](https://github.com/prometheus/client_golang)" + api: + name: API + htmlname: "[API](TBD)" + topX: + name: topX + htmlname: "[topX](TBD)" +############################################################### +### Bellow are defines for V1 and later of the documentation ## +############################################################### + v1X: + doc: + new_issue: "[new documentation issue](https://github.com/crowdsecurity/crowdsec/issues/new)" + discourse: "[CrowdSecurity discourse](http://discourse.crowdsec.net)" + community: "[community](http://discourse.crowdsec.net)" + hub: + name: Crowdsec Hub + htmlname: "[Crowdsec Hub](https://hub.crowdsec.net/)" + url: "https://hub.crowdsec.net/" + bouncers_url: "https://hub.crowdsec.net/browse/#bouncers" + scenarios_url: "https://hub.crowdsec.net/browse/#configurations" + parsers_url: "https://hub.crowdsec.net/browse/#configurations" + collections_url: "https://hub.crowdsec.net/browse/#collections" + crowdsec: + name: crowdsec-agent + Name: Crowdsec-agent + url: https://github.com/crowdsecurity/crowdsec + bugreport: "https://github.com/crowdsecurity/crowdsec/issues" + download_url: https://github.com/crowdsecurity/crowdsec/releases + lapi: + name: local API (LAPI) + Name: Local API (LAPI) + Htmlname: "[Local API](/Crowdsec/v1/localAPI/)" + htmlname: "[local API](/Crowdsec/v1/localAPI/)" + url: /Crowdsec/v1/localAPI/ + swagger: https://crowdsecurity.github.io/api_doc/index.html?urls.primaryName=LAPI + cli: + name: cscli + Name: Cscli + bin: cscli + user_guide: "[cscli](/Crowdsec/v1/user_guide/cscli/)" + config: + crowdsec_dir: "/etc/crowdsec/config/" + acquis_path: "/etc/crowdsec/config/acquis.yaml" + crowdsec_config_file: "/etc/crowdsec/config.yaml" + bouncers: + name: bouncers + Name: Bouncers + url: "https://hub.crowdsec.net/browse/#bouncers" + htmlname: "[bouncers](/Crowdsec/v1/bouncers/)" + Htmlname: "[bouncers](/Crowdsec/v1/bouncers/)" + metabase: + name: metabase + htmlName: "[dashboard](/Crowdsec/v1/observability/dashboard)" + wizard: + name: wizard + bin: "./wizard.sh" + bugreport: "https://github.com/crowdsecurity/crowdsec/issues" + ref: + acquis: "[acquisition](/Crowdsec/v1/user_guide/configurations_management/acquisition/)" + parsers: "[parsers](/Crowdsec/v1/references/parsers/)" + scenarios: "[scenarios](/Crowdsec/v1/references/scenarios/)" + # All the technical terms + event: + name: event + Name: Event + htmlname: "[event](/Crowdsec/v1/getting_started/concepts/#events)" + Htmlname: "[Event](/Crowdsec/v1/getting_started/concepts/#events)" + expr: + name: expr + Name: Expr + htmlname: "[expr](/Crowdsec/v1/references/expressions/)" + Htmlname: "[Expr](/Crowdsec/v1/references/expressions/)" + filter: + name: filter + Name: Filter + htmlname: "[filter](/Crowdsec/v1/references/parsers/#filter)" + Htmlname: "[Filter](/Crowdsec/v1/references/parsers/#filter)" + onsuccess: + name: onsuccess + Name: Onsuccess + htmlname: "[onsuccess](/Crowdsec/v1/references/parsers/#onsuccess)" + Htmlname: "[Onsuccess](/Crowdsec/v1/references/parsers/#onsuccess)" + profiles: + htmlname: "[profiles](/Crowdsec/v1/references/profiles/)" + simulation: + htmlname: "[profiles](/Crowdsec/v1/references/simulation/)" + statics: + name: statics + Name: Statics + htmlname: "[statics](/Crowdsec/v1/references/parsers/#statics)" + Htmlname: "[Statics](/Crowdsec/v1/references/parsers/#statics)" + parsers: + name: parsers + Name: Parsers + htmlname: "[parsers](/Crowdsec/v1/getting_started/concepts/#parsers)" + Htmlname: "[Parsers](/Crowdsec/v1/getting_started/concepts/#parsers)" + scenarios: + name: scenarios + Name: Scenarios + htmlname: "[scenarios](/Crowdsec/v1/getting_started/concepts/#scenarios)" + Htmlname: "[Scenarios](/Crowdsec/v1/getting_started/concepts/#scenarios)" + collections: + name: collections + Name: Collections + htmlname: "[collections](/Crowdsec/v1/getting_started/concepts/#collections)" + Htmlname: "[Collections](/Crowdsec/v1/getting_started/concepts/#collections)" + timeMachine: + name: timeMachine + Name: TimeMachine + htmlname: "[timeMachine](/Crowdsec/v1/getting_started/concepts/#timemachine)" + Htmlname: "[TimeMachine](/Crowdsec/v1/getting_started/concepts/#timemachine)" + alert: + name: alert + Name: Alert + htmlname: "[alert](/Crowdsec/v1/getting_started/concepts/#alerts)" + Htmlname: "[Alert](/Crowdsec/v1/getting_started/concepts/#alerts)" + decision: + name: decision + Name: Decision + htmlname: "[decision](/Crowdsec/v1/getting_started/concepts/#decisions)" + Htmlname: "[Decision](/Crowdsec/v1/getting_started/concepts/#decisions)" + whitelists: + name: whitelists + Name: Whitelists + htmlname: "[whitelists](/Crowdsec/v1/write_configurations/whitelist/)" + Htmlname: "[Whitelists](/Crowdsec/v1/write_configurations/whitelist/)" + #scenario stuff + stage: + name: stage + Name: Stage + htmlname: "[stage](/Crowdsec/v1/getting_started/concepts/#stages)" + Htmlname: "[Stage](/Crowdsec/v1/getting_started/concepts/#stages)" + leakspeed: + name: leakspeed + Name: Leakspeed + htmlname: "[leakspeed](/Crowdsec/v1/references/scenarios/#leakspeed)" + Htmlname: "[Leakspeed](/Crowdsec/v1/references/scenarios/#leakspeed)" + capacity: + name: capacity + Name: Capacity + htmlname: "[capacity](/Crowdsec/v1/references/scenarios/#capacity)" + Htmlname: "[Capacity](/Crowdsec/v1/references/scenarios/#capacity)" + duration: + name: duration + Name: Duration + htmlname: "[duration](/Crowdsec/v1/references/scenarios/#duration)" + Htmlname: "[Duration](/Crowdsec/v1/references/scenarios/#duration)" + prometheus: + name: prometheus + htmlname: "[prometheus](https://github.com/prometheus/client_golang)" + api: + name: API + htmlname: "[API](TBD)" + topX: + name: topX + htmlname: "[topX](TBD)" diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go new file mode 100644 index 000000000..18476e871 --- /dev/null +++ b/pkg/acquisition/acquisition.go @@ -0,0 +1,154 @@ +package acquisition + +import ( + "fmt" + "io" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" + + tomb "gopkg.in/tomb.v2" +) + +var ReaderHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_reader_hits_total", + Help: "Total lines where read.", + }, + []string{"source"}, +) + +/* + current limits : + - The acquisition is not yet modular (cf. traefik/yaegi), but we start with an interface to pave the road for it. + - The configuration item unmarshaled (DataSourceCfg) isn't generic neither yet. + - This changes should be made when we're ready to have acquisition managed by the hub & cscli + once this change is done, we might go for the following configuration format instead : + ```yaml + --- + type: nginx + source: journald + filter: "PROG=nginx" + --- + type: nginx + source: files + filenames: + - "/var/log/nginx/*.log" + ``` +*/ + +/* Approach + +We support acquisition in two modes : + - tail mode : we're following a stream of info (tail -f $src). this is used when monitoring live logs + - cat mode : we're reading a file/source one-shot (cat $src), and scenarios will match the timestamp extracted from logs. + +One DataSourceCfg can lead to multiple goroutines, hence the Tombs passing around to allow proper tracking. +tail mode shouldn't return except on errors or when externally killed via tombs. +cat mode will return once source has been exhausted. + + + TBD in current iteration : + - how to deal with "file was not present at startup but might appear later" ? +*/ + +var TAIL_MODE = "tail" +var CAT_MODE = "cat" + +type DataSourceCfg struct { + Mode string `yaml:"mode,omitempty"` //tail|cat|... + Filename string `yaml:"filename,omitempty"` + Filenames []string `yaml:"filenames,omitempty"` + JournalctlFilters []string `yaml:"journalctl_filter,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + Profiling bool `yaml:"profiling,omitempty"` +} + +type DataSource interface { + Configure(DataSourceCfg) error + /*the readers must watch the tomb (especially in tail mode) to know when to shutdown. + tomb is as well used to trigger general shutdown when a datasource errors */ + StartReading(chan types.Event, *tomb.Tomb) error + Mode() string //return CAT_MODE or TAIL_MODE + //Not sure it makes sense to make those funcs part of the interface. + //While 'cat' and 'tail' are the only two modes we see now, other modes might appear + //StartTail(chan types.Event, *tomb.Tomb) error + //StartCat(chan types.Event, *tomb.Tomb) error +} + +func DataSourceConfigure(config DataSourceCfg) (DataSource, error) { + if config.Mode == "" { /*default mode is tail*/ + config.Mode = TAIL_MODE + } + + if len(config.Filename) > 0 || len(config.Filenames) > 0 { /*it's file acquisition*/ + + fileSrc := new(FileSource) + if err := fileSrc.Configure(config); err != nil { + return nil, errors.Wrap(err, "configuring file datasource") + } + return fileSrc, nil + } else if len(config.JournalctlFilters) > 0 { /*it's journald acquisition*/ + + journaldSrc := new(JournaldSource) + if err := journaldSrc.Configure(config); err != nil { + return nil, errors.Wrap(err, "configuring journald datasource") + } + return journaldSrc, nil + } else { + return nil, fmt.Errorf("empty filename(s) and journalctl filter, malformed datasource") + } +} + +func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg) ([]DataSource, error) { + + var sources []DataSource + + yamlFile, err := os.Open(config.AcquisitionFilePath) + if err != nil { + return nil, errors.Wrapf(err, "can't open %s", config.AcquisitionFilePath) + } + dec := yaml.NewDecoder(yamlFile) + dec.SetStrict(true) + for { + sub := DataSourceCfg{} + err = dec.Decode(&sub) + if err != nil { + if err == io.EOF { + log.Tracef("End of yaml file") + break + } + return nil, errors.Wrap(err, fmt.Sprintf("failed to yaml decode %s", config.AcquisitionFilePath)) + } + src, err := DataSourceConfigure(sub) + if err != nil { + log.Warningf("while configuring datasource : %s", err) + continue + } + sources = append(sources, src) + } + return sources, nil +} + +func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { + + for i := 0; i < len(sources); i++ { + subsrc := sources[i] //ensure its a copy + log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc) + AcquisTomb.Go(func() error { + defer types.CatchPanic("crowdsec/acquis") + if err := subsrc.StartReading(output, AcquisTomb); err != nil { + return err + } + return nil + }) + } + /*return only when acquisition is over (cat) or never (tail)*/ + err := AcquisTomb.Wait() + return err +} diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go new file mode 100644 index 000000000..9af19de3b --- /dev/null +++ b/pkg/acquisition/acquisition_test.go @@ -0,0 +1,138 @@ +package acquisition + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + tomb "gopkg.in/tomb.v2" +) + +func TestConfigLoading(t *testing.T) { + //bad filename + cfg := csconfig.CrowdsecServiceCfg{ + AcquisitionFilePath: "./tests/xxx.yaml", + } + _, err := LoadAcquisitionFromFile(&cfg) + assert.Contains(t, fmt.Sprintf("%s", err), "can't open ./tests/xxx.yaml: open ./tests/xxx.yaml: no such file or directory") + //bad config file + cfg = csconfig.CrowdsecServiceCfg{ + AcquisitionFilePath: "./tests/test.log", + } + _, err = LoadAcquisitionFromFile(&cfg) + assert.Contains(t, fmt.Sprintf("%s", err), "failed to yaml decode ./tests/test.log: yaml: unmarshal errors") + //correct config file + cfg = csconfig.CrowdsecServiceCfg{ + AcquisitionFilePath: "./tests/acquis_test.yaml", + } + srcs, err := LoadAcquisitionFromFile(&cfg) + if err != nil { + t.Fatalf("unexpected error : %s", err) + } + assert.Equal(t, len(srcs), 1) +} + +func TestDataSourceConfigure(t *testing.T) { + tests := []struct { + cfg DataSourceCfg + //tombState + config_error string + read_error string + tomb_error string + lines int + }{ + { //missing filename(s) + cfg: DataSourceCfg{ + Mode: CAT_MODE, + }, + config_error: "empty filename(s) and journalctl filter, malformed datasource", + }, + { //missing filename(s) + cfg: DataSourceCfg{ + Mode: TAIL_MODE, + }, + config_error: "empty filename(s) and journalctl filter, malformed datasource", + }, + { //bad mode(s) + cfg: DataSourceCfg{ + Filename: "./tests/test.log", + Mode: "ratata", + }, + config_error: "configuring file datasource: unknown mode ratata for file acquisition", + }, + { //ok test + cfg: DataSourceCfg{ + Mode: CAT_MODE, + Filename: "./tests/test.log", + }, + }, + { //missing mode, default to CAT_MODE + cfg: DataSourceCfg{ + Filename: "./tests/test.log", + }, + }, + { //ok test for journalctl + cfg: DataSourceCfg{ + Mode: CAT_MODE, + JournalctlFilters: []string{"-test.run=TestSimJournalctlCatOneLine", "--"}, + }, + }, + } + + for tidx, test := range tests { + + srcs, err := DataSourceConfigure(test.cfg) + if test.config_error != "" { + assert.Contains(t, fmt.Sprintf("%s", err), test.config_error) + log.Infof("expected config error ok : %s", test.config_error) + continue + } else { + if err != nil { + t.Fatalf("%d/%d unexpected config error %s", tidx, len(tests), err) + } + } + + //check we got the expected mode + if tests[tidx].cfg.Mode == "" { + tests[tidx].cfg.Mode = TAIL_MODE + } + assert.Equal(t, srcs.Mode(), tests[tidx].cfg.Mode) + + out := make(chan types.Event) + tomb := tomb.Tomb{} + + go func() { + err = StartAcquisition([]DataSource{srcs}, out, &tomb) + if test.read_error != "" { + assert.Contains(t, fmt.Sprintf("%s", err), test.read_error) + log.Infof("expected read error ok : %s", test.read_error) + } else { + if err != nil { + log.Fatalf("%d/%d unexpected read error %s", tidx, len(tests), err) + } + } + }() + + log.Printf("kill iiittt") + //we're actually not interested in the result :) + tomb.Kill(nil) + time.Sleep(1 * time.Second) + + if test.tomb_error != "" { + assert.Contains(t, fmt.Sprintf("%s", tomb.Err()), test.tomb_error) + log.Infof("expected tomb error ok : %s", test.read_error) + continue + } else { + if tomb.Err() != nil { + t.Fatalf("%d/%d unexpected tomb error %s", tidx, len(tests), tomb.Err()) + } + } + + } + +} diff --git a/pkg/acquisition/file_reader.go b/pkg/acquisition/file_reader.go index a4ee92233..c525b3188 100644 --- a/pkg/acquisition/file_reader.go +++ b/pkg/acquisition/file_reader.go @@ -3,257 +3,158 @@ package acquisition import ( "bufio" "compress/gzip" - "encoding/json" - "errors" "fmt" - "io" "os" - "strings" - - "github.com/crowdsecurity/crowdsec/pkg/csconfig" - leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" - "github.com/crowdsecurity/crowdsec/pkg/types" - - tomb "gopkg.in/tomb.v2" - "gopkg.in/yaml.v2" - - //"log" "path/filepath" + "strings" "time" - "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" + "github.com/pkg/errors" "golang.org/x/sys/unix" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + + "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/nxadm/tail" + log "github.com/sirupsen/logrus" + + "github.com/prometheus/client_golang/prometheus" + tomb "gopkg.in/tomb.v2" ) -type Acquisition interface { - Init(map[string]interface{}) (interface{}, error) - ReadOne(interface{}) (string, error) +type FileSource struct { + Config DataSourceCfg + tails []*tail.Tail + Files []string } -type FileCtx struct { - Type string `yaml:"type,omitempty"` //file|bin|... - Mode string `yaml:"mode,omitempty"` //tail|cat|... - Filename string `yaml:"filename,omitempty"` - Filenames []string `yaml:"filenames,omitempty"` - tail *tail.Tail - - Labels map[string]string `yaml:"labels,omitempty"` - Profiling bool `yaml:"profiling,omitempty"` -} - -type FileAcquisCtx struct { - Files []FileCtx - Profiling bool -} - -const ( - TAILMODE = "tail" - CATMODE = "cat" -) - -const ( - FILETYPE = "file" - BINTYPE = "bin" -) - -var ReaderHits = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "cs_reader_hits_total", - Help: "Total lines where read.", - }, - []string{"source"}, -) - -func LoadAcquisitionConfig(cConfig *csconfig.CrowdSec) (*FileAcquisCtx, error) { - var acquisitionCTX *FileAcquisCtx - var err error - /*Init the acqusition : from cli or from acquis.yaml file*/ - if cConfig.SingleFile != "" { - var input FileCtx - input.Filename = cConfig.SingleFile - input.Mode = CATMODE - input.Labels = make(map[string]string) - input.Labels["type"] = cConfig.SingleFileLabel - acquisitionCTX, err = InitReaderFromFileCtx([]FileCtx{input}) - } else { /* Init file reader if we tail */ - acquisitionCTX, err = InitReader(cConfig.AcquisitionFile) - } - if err != nil { - return nil, fmt.Errorf("unable to start file acquisition, bailout %v", err) - } - if acquisitionCTX == nil { - return nil, fmt.Errorf("no inputs to process") - } - if cConfig.Profiling { - acquisitionCTX.Profiling = true +func (f *FileSource) Configure(Config DataSourceCfg) error { + f.Config = Config + if len(Config.Filename) == 0 && len(Config.Filenames) == 0 { + return fmt.Errorf("no filename or filenames") } - return acquisitionCTX, nil -} - -func InitReader(cfg string) (*FileAcquisCtx, error) { - var files []FileCtx - - yamlFile, err := os.Open(cfg) - if err != nil { - log.Errorf("Can't access acquisition configuration file with '%v'.", err) - return nil, err + //let's deal with the array no matter what + if len(Config.Filename) != 0 { + Config.Filenames = append(Config.Filenames, Config.Filename) } - //process the yaml - dec := yaml.NewDecoder(yamlFile) - dec.SetStrict(true) - for { - t := FileCtx{} - err = dec.Decode(&t) + + for _, fexpr := range Config.Filenames { + files, err := filepath.Glob(fexpr) if err != nil { - if err == io.EOF { - log.Tracef("End of yaml file") - break - } - log.Fatalf("Error decoding acquisition configuration file with '%s': %v", cfg, err) - break + return errors.Wrapf(err, "while globbing %s", fexpr) } - files = append(files, t) - } - return InitReaderFromFileCtx(files) -} - -//InitReader iterates over the FileCtx objects of cfg and resolves globbing to open files -func InitReaderFromFileCtx(files []FileCtx) (*FileAcquisCtx, error) { - - var ctx *FileAcquisCtx = &FileAcquisCtx{} - - for _, t := range files { - //defaults to file type in tail mode. - if t.Type == "" { - t.Type = FILETYPE - } - if t.Mode == "" { - t.Mode = TAILMODE - } - //minimalist sanity check - if t.Filename == "" && len(t.Filenames) == 0 { - log.Infof("No filename or filenames, skipping empty item %+v", t) - continue - } - if len(t.Labels) == 0 { - log.Infof("Acquisition has no tags, skipping empty item %+v", t) + if len(files) == 0 { + log.Warningf("[file datasource] no results for %s", fexpr) continue } - if len(t.Filename) > 0 { - t.Filenames = append(t.Filenames, t.Filename) - } - var opcpt int - //open the files indicated by `filename` and `filesnames` - for _, fglob := range t.Filenames { - opcpt = 0 - files, err := filepath.Glob(fglob) - if err != nil { - log.Errorf("error while globing '%s' : %v", fglob, err) - return nil, err + for _, file := range files { + /*check that we can read said file*/ + if err := unix.Access(file, unix.R_OK); err != nil { + return fmt.Errorf("unable to open %s : %s", file, err) } - if len(files) == 0 { - log.Errorf("nothing to glob for '%s'", fglob) - continue - } - for _, file := range files { - /*check that we can read said file*/ - if err := unix.Access(file, unix.R_OK); err != nil { - log.Errorf("Unable to open file [%s] : %v", file, err) + log.Infof("[file datasource] opening file '%s'", file) + + if f.Config.Mode == TAIL_MODE { + tail, err := tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: true, Location: &tail.SeekInfo{Offset: 0, Whence: 2}}) + if err != nil { + log.Errorf("[file datasource] skipping %s : %v", file, err) continue } - log.Infof("Opening file '%s' (pattern:%s)", file, fglob) - fdesc := t - fdesc.Filename = file - fdesc.Filenames = []string{} - - switch t.Type { - case FILETYPE: - if t.Mode == TAILMODE { - fdesc.tail, err = tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: true, Location: &tail.SeekInfo{Offset: 0, Whence: 2}}) - if err != nil { - log.Errorf("skipping '%s' : %v", file, err) - continue - } - } - case BINTYPE: - - default: - log.Fatalf("unexpected type %s for %+v", t.Type, t.Filenames) + f.Files = append(f.Files, file) + f.tails = append(f.tails, tail) + } else if f.Config.Mode == CAT_MODE { + //simply check that the file exists, it will be read differently + if _, err := os.Stat(file); err != nil { + return fmt.Errorf("can't open file %s : %s", file, err) } - opcpt++ - ctx.Files = append(ctx.Files, fdesc) + f.Files = append(f.Files, file) + } else { + return fmt.Errorf("unknown mode %s for file acquisition", f.Config.Mode) } + } - log.Debugf("'%v' opened %d files", t.Filenames, opcpt) } - return ctx, nil + if len(f.Files) == 0 { + return fmt.Errorf("no files to read for %+v", Config.Filenames) + } + + return nil } -//let's return an array of chans for signaling for now -func AcquisStartReading(ctx *FileAcquisCtx, output chan types.Event, AcquisTomb *tomb.Tomb) { +func (f *FileSource) Mode() string { + return f.Config.Mode +} - if len(ctx.Files) == 0 { - log.Errorf("No files to read") +func (f *FileSource) StartReading(out chan types.Event, t *tomb.Tomb) error { + + if f.Config.Mode == CAT_MODE { + return f.StartCat(out, t) + } else if f.Config.Mode == TAIL_MODE { + return f.StartTail(out, t) + } else { + return fmt.Errorf("unknown mode '%s' for file acquisition", f.Config.Mode) } - /* start one go routine reading for each file, and pushing to chan output */ - for idx, fctx := range ctx.Files { - if ctx.Profiling { - fctx.Profiling = true - } - fctx := fctx - mode := "?" - switch fctx.Mode { - case TAILMODE: - mode = "tail" - AcquisTomb.Go(func() error { - return AcquisReadOneFile(fctx, output, AcquisTomb) - }) - case CATMODE: - mode = "cat" - AcquisTomb.Go(func() error { - return ReadAtOnce(fctx, output, AcquisTomb) - }) - default: - log.Fatalf("unknown read mode %s for %+v", fctx.Mode, fctx.Filenames) - } - log.Printf("starting (%s) reader file %d/%d : %s", mode, idx, len(ctx.Files), fctx.Filename) - } - log.Printf("Started %d routines for polling/read", len(ctx.Files)) } /*A tail-mode file reader (tail) */ -func AcquisReadOneFile(ctx FileCtx, output chan types.Event, AcquisTomb *tomb.Tomb) error { - clog := log.WithFields(log.Fields{ - "acquisition file": ctx.Filename, - }) - if ctx.Type != FILETYPE { - log.Errorf("Can't tail %s type for %+v", ctx.Type, ctx.Filenames) - return fmt.Errorf("can't tail %s type for %+v", ctx.Type, ctx.Filenames) +func (f *FileSource) StartTail(output chan types.Event, AcquisTomb *tomb.Tomb) error { + log.Debugf("starting file tail with %d items", len(f.tails)) + for i := 0; i < len(f.tails); i++ { + idx := i + log.Debugf("starting %d", idx) + AcquisTomb.Go(func() error { + defer types.CatchPanic("crowdsec/acquis/tailfile") + return f.TailOneFile(output, AcquisTomb, idx) + }) } - log.Infof("Starting tail of %s", ctx.Filename) - timeout := time.Tick(20 * time.Second) -LOOP: + return nil +} + +/*A one shot file reader (cat) */ +func (f *FileSource) StartCat(output chan types.Event, AcquisTomb *tomb.Tomb) error { + for i := 0; i < len(f.Files); i++ { + idx := i + log.Debugf("starting %d", idx) + AcquisTomb.Go(func() error { + defer types.CatchPanic("crowdsec/acquis/catfile") + return f.CatOneFile(output, AcquisTomb, idx) + }) + } + return nil +} + +/*A tail-mode file reader (tail) */ +func (f *FileSource) TailOneFile(output chan types.Event, AcquisTomb *tomb.Tomb, idx int) error { + + file := f.Files[idx] + tail := f.tails[idx] + + clog := log.WithFields(log.Fields{ + "acquisition file": f.Files[idx], + }) + clog.Debugf("starting") + + timeout := time.Tick(1 * time.Second) + for { l := types.Line{} select { case <-AcquisTomb.Dying(): //we are being killed by main - clog.Infof("Killing acquistion routine") - if err := ctx.tail.Stop(); err != nil { + clog.Infof("file datasource %s stopping", file) + if err := tail.Stop(); err != nil { clog.Errorf("error in stop : %s", err) } - break LOOP - case <-ctx.tail.Tomb.Dying(): //our tailer is dying - clog.Warningf("Reader is dying/dead") - return errors.New("reader is dead") - case line := <-ctx.tail.Lines: + return nil + case <-tail.Tomb.Dying(): //our tailer is dying + clog.Warningf("File reader of %s died", file) + AcquisTomb.Kill(fmt.Errorf("dead reader for %s", file)) + return fmt.Errorf("reader for %s is dead", file) + case line := <-tail.Lines: if line == nil { clog.Debugf("Nil line") - return errors.New("Tail is empty") + return fmt.Errorf("tail for %s is empty", file) } if line.Err != nil { log.Warningf("fetch error : %v", line.Err) @@ -262,33 +163,29 @@ LOOP: if line.Text == "" { //skip empty lines continue } - ReaderHits.With(prometheus.Labels{"source": ctx.Filename}).Inc() + ReaderHits.With(prometheus.Labels{"source": file}).Inc() l.Raw = line.Text - l.Labels = ctx.Labels + l.Labels = f.Config.Labels l.Time = line.Time - l.Src = ctx.Filename + l.Src = file l.Process = true //we're tailing, it must be real time logs + log.Debugf("pushing %+v", l) output <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE} case <-timeout: //time out, shall we do stuff ? - clog.Tracef("timeout") + clog.Debugf("timeout") } } - return nil } /*A one shot file reader (cat) */ -func ReadAtOnce(ctx FileCtx, output chan types.Event, AcquisTomb *tomb.Tomb) error { +func (f *FileSource) CatOneFile(output chan types.Event, AcquisTomb *tomb.Tomb, idx int) error { var scanner *bufio.Scanner - if len(ctx.Filenames) > 0 { - log.Errorf("no multi-file support for this mode.") - return fmt.Errorf("no multi-file support for this mode") - } - log.Infof("reading %s at once", ctx.Filename) - file := ctx.Filename + log.Infof("reading %s at once", f.Files[idx]) + file := f.Files[idx] clog := log.WithFields(log.Fields{ "file": file, @@ -297,58 +194,34 @@ func ReadAtOnce(ctx FileCtx, output chan types.Event, AcquisTomb *tomb.Tomb) err defer fd.Close() if err != nil { clog.Errorf("Failed opening file: %s", err) - return err + return errors.Wrapf(err, "failed opening %s", f.Files[idx]) } - if ctx.Type == FILETYPE { - if strings.HasSuffix(file, ".gz") { - gz, err := gzip.NewReader(fd) - if err != nil { - clog.Errorf("Failed to read gz file: %s", err) - return err - } - defer gz.Close() - scanner = bufio.NewScanner(gz) - - } else { - scanner = bufio.NewScanner(fd) - } - scanner.Split(bufio.ScanLines) - count := 0 - for scanner.Scan() { - count++ - l := types.Line{} - l.Raw = scanner.Text() - l.Time = time.Now() - l.Src = file - l.Labels = ctx.Labels - l.Process = true - //we're reading logs at once, it must be time-machine buckets - output <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.TIMEMACHINE} - } - clog.Warningf("read %d lines", count) - } else if ctx.Type == BINTYPE { - /*BINTYPE is only overflows for now*/ - dec := json.NewDecoder(fd) - count := 0 - for { - var p types.Event - if err := dec.Decode(&p); err == io.EOF { - break - } else if err != nil { - log.Warningf("While reading %s : %s", fd.Name(), err) - continue - } - count++ - p.Type = types.OVFLW - p.Process = true - //we're reading logs at once, it must be time-machine buckets - p.ExpectMode = leaky.TIMEMACHINE - output <- p - } - clog.Warningf("unmarshaled %d events", count) + if strings.HasSuffix(file, ".gz") { + gz, err := gzip.NewReader(fd) + if err != nil { + clog.Errorf("Failed to read gz file: %s", err) + return errors.Wrapf(err, "failed to read gz %s", f.Files[idx]) + } + defer gz.Close() + scanner = bufio.NewScanner(gz) + } else { + scanner = bufio.NewScanner(fd) } - clog.Infof("force commit") + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + log.Tracef("line %s", scanner.Text()) + l := types.Line{} + l.Raw = scanner.Text() + l.Time = time.Now() + l.Src = file + l.Labels = f.Config.Labels + l.Process = true + ReaderHits.With(prometheus.Labels{"source": file}).Inc() + //we're reading logs at once, it must be time-machine buckets + output <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.TIMEMACHINE} + } + AcquisTomb.Kill(nil) return nil } diff --git a/pkg/acquisition/file_reader_test.go b/pkg/acquisition/file_reader_test.go index 1f8d7b090..6151dd742 100644 --- a/pkg/acquisition/file_reader_test.go +++ b/pkg/acquisition/file_reader_test.go @@ -3,169 +3,225 @@ package acquisition import ( "fmt" "os" + "strings" "testing" "time" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "gopkg.in/tomb.v2" + tomb "gopkg.in/tomb.v2" ) -func TestLoadAcquisitionConfig(t *testing.T) { - testFilePath := "./tests/test.log" +func TestAcquisCat(t *testing.T) { tests := []struct { - csConfig *csconfig.CrowdSec - result *FileAcquisCtx - err string + cfg DataSourceCfg + //tombState + config_error string + read_error string + tomb_error string + lines int }{ - { - csConfig: &csconfig.CrowdSec{ - SingleFile: testFilePath, - SingleFileLabel: "my_test_log", - Profiling: false, + { //missing filename(s) + cfg: DataSourceCfg{ + Mode: CAT_MODE, }, - result: &FileAcquisCtx{ - Files: []FileCtx{ - { - Type: "file", - Mode: "cat", - Filename: testFilePath, - Filenames: []string{}, - Labels: map[string]string{ - "type": "my_test_log", - }, - Profiling: false, - }, - }, - Profiling: false, - }, - err: "", + config_error: "no filename or filenames", }, - { - csConfig: &csconfig.CrowdSec{ - SingleFile: testFilePath, - SingleFileLabel: "my_test_log", - Profiling: true, + { //forbiden file + cfg: DataSourceCfg{ + Mode: CAT_MODE, + Filename: "/etc/shadow", }, - result: &FileAcquisCtx{ - Files: []FileCtx{ - { - Type: "file", - Mode: "cat", - Filename: testFilePath, - Filenames: []string{}, - Labels: map[string]string{ - "type": "my_test_log", - }, - Profiling: false, - }, - }, - Profiling: true, + config_error: "unable to open /etc/shadow : permission denied", + }, + { //bad regexp + cfg: DataSourceCfg{ + Filename: "[a-", + Mode: CAT_MODE, }, - err: "", + config_error: "while globbing [a-: syntax error in pattern", + }, + { //inexisting file + cfg: DataSourceCfg{ + Filename: "/does/not/exists", + Mode: CAT_MODE, + }, + config_error: "no files to read for [/does/not/exists]", + }, + { //ok file + cfg: DataSourceCfg{ + Filename: "./tests/test.log", + Mode: CAT_MODE, + }, + lines: 1, + }, + { //invalid gz + cfg: DataSourceCfg{ + Filename: "./tests/badlog.gz", + Mode: CAT_MODE, + }, + lines: 0, + tomb_error: "failed to read gz ./tests/badlog.gz: EOF", + }, + { //good gz + cfg: DataSourceCfg{ + Filename: "./tests/test.log.gz", + Mode: CAT_MODE, + }, + lines: 1, }, } - for _, test := range tests { - result, err := LoadAcquisitionConfig(test.csConfig) - assert.Equal(t, test.result, result) - if test.err == "" && err == nil { + for tidx, test := range tests { + fileSrc := new(FileSource) + err := fileSrc.Configure(test.cfg) + if test.config_error != "" { + assert.Contains(t, fmt.Sprintf("%s", err), test.config_error) + log.Infof("expected config error ok : %s", test.config_error) continue - } - assert.EqualError(t, err, test.err) - } -} - -func TestAcquisStartReadingTailKilled(t *testing.T) { - acquisFilePath := "./tests/acquis_test_log.yaml" - csConfig := &csconfig.CrowdSec{ - AcquisitionFile: acquisFilePath, - Profiling: false, - } - fCTX, err := LoadAcquisitionConfig(csConfig) - if err != nil { - t.Fatalf(err.Error()) - } - outputChan := make(chan types.Event) - acquisTomb := tomb.Tomb{} - - AcquisStartReading(fCTX, outputChan, &acquisTomb) - if !acquisTomb.Alive() { - t.Fatal("acquisition tomb is not alive") - } - - time.Sleep(500 * time.Millisecond) - filename := "./tests/test.log" - - f, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644) - if err != nil { - t.Fatal(err) - } - for i := 0; i < 5; i++ { - _, err := f.WriteString(fmt.Sprintf("ratata%d\n", i)) - if err != nil { - t.Fatal(err) - } - } - f.Close() - - time.Sleep(500 * time.Millisecond) - reads := 0 -L: - for { - select { - case <-outputChan: - reads++ - if reads == 2 { - acquisTomb.Kill(nil) - time.Sleep(100 * time.Millisecond) + } else { + if err != nil { + t.Fatalf("%d/%d unexpected config error %s", tidx, len(tests), err) } - case <-time.After(1 * time.Second): - break L } + + out := make(chan types.Event) + tomb := tomb.Tomb{} + count := 0 + + err = fileSrc.StartReading(out, &tomb) + if test.read_error != "" { + assert.Contains(t, fmt.Sprintf("%s", err), test.read_error) + log.Infof("expected read error ok : %s", test.read_error) + continue + } else { + if err != nil { + t.Fatalf("%d/%d unexpected read error %s", tidx, len(tests), err) + } + } + + READLOOP: + for { + select { + case <-out: + count++ + case <-time.After(1 * time.Second): + break READLOOP + } + } + + if count != test.lines { + t.Fatalf("%d/%d expected %d line read, got %d", tidx, len(tests), test.lines, count) + } + + if test.tomb_error != "" { + assert.Contains(t, fmt.Sprintf("%s", tomb.Err()), test.tomb_error) + log.Infof("expected tomb error ok : %s", test.read_error) + continue + } else { + if tomb.Err() != nil { + t.Fatalf("%d/%d unexpected tomb error %s", tidx, len(tests), tomb.Err()) + } + } + } - log.Printf("-> %d", reads) - if reads != 2 { - t.Fatal() - } - - f, err = os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - t.Fatal(err) - } - _, err = f.WriteString("one log line\n") - if err != nil { - t.Fatal(err) - } - f.Close() } -func TestAcquisStartReadingTail(t *testing.T) { - acquisFilePath := "./tests/acquis_test_log.yaml" - filename := "./tests/test.log" - csConfig := &csconfig.CrowdSec{ - AcquisitionFile: acquisFilePath, - Profiling: false, +func TestTailKill(t *testing.T) { + cfg := DataSourceCfg{ + Filename: "./tests/test.log", + Mode: TAIL_MODE, } - fCTX, err := LoadAcquisitionConfig(csConfig) + + fileSrc := new(FileSource) + err := fileSrc.Configure(cfg) if err != nil { - t.Fatalf(err.Error()) - } - outputChan := make(chan types.Event) - acquisTomb := tomb.Tomb{} - - AcquisStartReading(fCTX, outputChan, &acquisTomb) - if !acquisTomb.Alive() { - t.Fatal("acquisition tomb is not alive") + t.Fatalf("unexpected config error %s", err) } - time.Sleep(500 * time.Millisecond) + out := make(chan types.Event) + tb := tomb.Tomb{} - f, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644) + err = fileSrc.StartReading(out, &tb) + if err != nil { + t.Fatalf("unexpected read error %s", err) + } + time.Sleep(1 * time.Second) + if tb.Err() != tomb.ErrStillAlive { + t.Fatalf("unexpected tomb error %s (should be alive)", tb.Err()) + } + //kill it :> + tb.Kill(nil) + time.Sleep(1 * time.Second) + if tb.Err() != nil { + t.Fatalf("unexpected tomb error %s (should be dead)", tb.Err()) + } + +} + +func TestTailKillBis(t *testing.T) { + cfg := DataSourceCfg{ + Filename: "./tests/test.log", + Mode: TAIL_MODE, + } + + fileSrc := new(FileSource) + err := fileSrc.Configure(cfg) + if err != nil { + t.Fatalf("unexpected config error %s", err) + } + + out := make(chan types.Event) + tb := tomb.Tomb{} + + err = fileSrc.StartReading(out, &tb) + if err != nil { + t.Fatalf("unexpected read error %s", err) + } + time.Sleep(1 * time.Second) + if tb.Err() != tomb.ErrStillAlive { + t.Fatalf("unexpected tomb error %s (should be alive)", tb.Err()) + } + //kill the underlying tomb of tailer + fileSrc.tails[0].Kill(fmt.Errorf("ratata")) + time.Sleep(1 * time.Second) + //it can be two errors : + if !strings.Contains(fmt.Sprintf("%s", tb.Err()), "dead reader for ./tests/test.log") && + !strings.Contains(fmt.Sprintf("%s", tb.Err()), "tail for ./tests/test.log is empty") { + t.Fatalf("unexpected error : %s", tb.Err()) + } + +} + +func TestTailRuntime(t *testing.T) { + //log.SetLevel(log.TraceLevel) + + cfg := DataSourceCfg{ + Filename: "./tests/test.log", + Mode: TAIL_MODE, + } + + fileSrc := new(FileSource) + err := fileSrc.Configure(cfg) + if err != nil { + t.Fatalf("unexpected config error %s", err) + } + + out := make(chan types.Event) + tb := tomb.Tomb{} + count := 0 + + err = fileSrc.StartReading(out, &tb) + if err != nil { + t.Fatalf("unexpected read error %s", err) + } + + time.Sleep(1 * time.Second) + //write data + f, err := os.OpenFile(cfg.Filename, os.O_APPEND|os.O_WRONLY, 0644) if err != nil { t.Fatal(err) } @@ -177,30 +233,26 @@ func TestAcquisStartReadingTail(t *testing.T) { } f.Close() - time.Sleep(500 * time.Millisecond) - reads := 0 -L: +READLOOP: for { select { - case <-outputChan: - reads++ - //log.Printf("evt %+v", evt) + case <-out: + count++ case <-time.After(1 * time.Second): - break L + break READLOOP } } - log.Printf("-> %d", reads) - if reads != 5 { - t.Fatal() + if count != 5 { + t.Fatalf("expected %d line read, got %d", 5, count) } - acquisTomb.Kill(nil) - if err := acquisTomb.Wait(); err != nil { - t.Fatalf("Acquisition returned error : %s", err) + if tb.Err() != tomb.ErrStillAlive { + t.Fatalf("unexpected tomb error %s", tb.Err()) } - f, err = os.OpenFile(filename, os.O_TRUNC|os.O_WRONLY, 0644) + /*reset the file*/ + f, err = os.OpenFile(cfg.Filename, os.O_CREATE|os.O_WRONLY, 0644) if err != nil { t.Fatal(err) } @@ -211,105 +263,121 @@ L: f.Close() } -func TestAcquisStartReadingCat(t *testing.T) { - testFilePath := "./tests/test.log" +func TestAcquisTail(t *testing.T) { - f, err := os.OpenFile(testFilePath, os.O_TRUNC|os.O_WRONLY, 0644) - if err != nil { - t.Fatal(err) + tests := []struct { + cfg DataSourceCfg + //tombState + config_error string + read_error string + tomb_error string + lines int + }{ + { //missing filename(s) + cfg: DataSourceCfg{ + Mode: TAIL_MODE, + }, + config_error: "no filename or filenames", + }, + { //forbiden file + cfg: DataSourceCfg{ + Mode: TAIL_MODE, + Filename: "/etc/shadow", + }, + config_error: "unable to open /etc/shadow : permission denied", + }, + { //bad regexp + cfg: DataSourceCfg{ + Filename: "[a-", + Mode: TAIL_MODE, + }, + config_error: "while globbing [a-: syntax error in pattern", + }, + { //inexisting file + cfg: DataSourceCfg{ + Filename: "/does/not/exists", + Mode: TAIL_MODE, + }, + config_error: "no files to read for [/does/not/exists]", + }, + { //ok file + cfg: DataSourceCfg{ + Filename: "./tests/test.log", + Mode: TAIL_MODE, + }, + lines: 0, + tomb_error: "still alive", + }, + { //invalid gz + cfg: DataSourceCfg{ + Filename: "./tests/badlog.gz", + Mode: TAIL_MODE, + }, + lines: 0, + tomb_error: "still alive", + }, + { //good gz + cfg: DataSourceCfg{ + Filename: "./tests/test.log.gz", + Mode: TAIL_MODE, + }, + lines: 0, + tomb_error: "still alive", + }, } - for i := 0; i < 5; i++ { - _, err := f.WriteString(fmt.Sprintf("ratata%d\n", i)) - if err != nil { - t.Fatal(err) + + for tidx, test := range tests { + fileSrc := new(FileSource) + err := fileSrc.Configure(test.cfg) + if test.config_error != "" { + assert.Contains(t, fmt.Sprintf("%s", err), test.config_error) + log.Infof("expected config error ok : %s", test.config_error) + continue + } else { + if err != nil { + t.Fatalf("%d/%d unexpected config error %s", tidx, len(tests), err) + } } - } - f.Close() - csConfig := &csconfig.CrowdSec{ - SingleFile: testFilePath, - SingleFileLabel: "my_test_log", - Profiling: false, - } - fCTX, err := LoadAcquisitionConfig(csConfig) - if err != nil { - t.Fatalf(err.Error()) - } - outputChan := make(chan types.Event) - acquisTomb := tomb.Tomb{} + out := make(chan types.Event) + tomb := tomb.Tomb{} + count := 0 - AcquisStartReading(fCTX, outputChan, &acquisTomb) - if !acquisTomb.Alive() { - t.Fatal("acquisition tomb is not alive") - } - - time.Sleep(500 * time.Millisecond) - reads := 0 -L: - for { - select { - case <-outputChan: - reads++ - case <-time.After(1 * time.Second): - break L + err = fileSrc.StartReading(out, &tomb) + if test.read_error != "" { + assert.Contains(t, fmt.Sprintf("%s", err), test.read_error) + log.Infof("expected read error ok : %s", test.read_error) + continue + } else { + if err != nil { + t.Fatalf("%d/%d unexpected read error %s", tidx, len(tests), err) + } } + + READLOOP: + for { + select { + case <-out: + count++ + case <-time.After(1 * time.Second): + break READLOOP + } + } + + if count != test.lines { + t.Fatalf("%d/%d expected %d line read, got %d", tidx, len(tests), test.lines, count) + } + + if test.tomb_error != "" { + assert.Contains(t, fmt.Sprintf("%s", tomb.Err()), test.tomb_error) + log.Infof("expected tomb error ok : %s", test.read_error) + continue + } else { + if tomb.Err() != nil { + t.Fatalf("%d/%d unexpected tomb error %s", tidx, len(tests), tomb.Err()) + } + } + } - log.Printf("-> %d", reads) - if reads != 5 { - t.Fatal() - } - - acquisTomb.Kill(nil) - if err := acquisTomb.Wait(); err != nil { - t.Fatalf("Acquisition returned error : %s", err) - } - - f, err = os.OpenFile(testFilePath, os.O_TRUNC|os.O_WRONLY, 0644) - if err != nil { - t.Fatal(err) - } - _, err = f.WriteString("one log line\n") - if err != nil { - t.Fatal(err) - } - f.Close() -} - -func TestAcquisStartReadingGzCat(t *testing.T) { - testFilePath := "./tests/test.log.gz" - - csConfig := &csconfig.CrowdSec{ - SingleFile: testFilePath, - SingleFileLabel: "my_test_log", - Profiling: false, - } - fCTX, err := LoadAcquisitionConfig(csConfig) - if err != nil { - t.Fatalf(err.Error()) - } - outputChan := make(chan types.Event) - acquisTomb := tomb.Tomb{} - - AcquisStartReading(fCTX, outputChan, &acquisTomb) - if !acquisTomb.Alive() { - t.Fatal("acquisition tomb is not alive") - } - - time.Sleep(500 * time.Millisecond) - reads := 0 -L: - for { - select { - case <-outputChan: - reads++ - case <-time.After(1 * time.Second): - break L - } - } - - log.Printf("-> %d", reads) - if reads != 1 { - t.Fatal() - } } diff --git a/pkg/acquisition/journalctl_reader.go b/pkg/acquisition/journalctl_reader.go new file mode 100644 index 000000000..089a907b3 --- /dev/null +++ b/pkg/acquisition/journalctl_reader.go @@ -0,0 +1,174 @@ +package acquisition + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "os/exec" + "strings" + "time" + + log "github.com/sirupsen/logrus" + + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + tomb "gopkg.in/tomb.v2" +) + +/* + journald/systemd support : + + systemd has its own logging system, which stores files in non-text mode. + To be able to read those, we're going to read the output of journalctl, see https://github.com/crowdsecurity/crowdsec/issues/423 + + + TBD : + - handle journalctl errors +*/ + +type JournaldSource struct { + Config DataSourceCfg + Cmd *exec.Cmd + Stdout io.ReadCloser + Stderr io.ReadCloser + Decoder *json.Decoder + SrcName string +} + +var JOURNALD_CMD = "journalctl" +var JOURNALD_DEFAULT_TAIL_ARGS = []string{"--follow"} +var JOURNALD_DEFAULT_CAT_ARGS = []string{} + +func (j *JournaldSource) Configure(config DataSourceCfg) error { + var journalArgs []string + + j.Config = config + if config.JournalctlFilters == nil { + return fmt.Errorf("journalctl_filter shouldn't be empty") + } + + if j.Config.Mode == TAIL_MODE { + journalArgs = JOURNALD_DEFAULT_TAIL_ARGS + } else if j.Config.Mode == CAT_MODE { + journalArgs = JOURNALD_DEFAULT_CAT_ARGS + } else { + return fmt.Errorf("unknown mode '%s' for journald source", j.Config.Mode) + } + journalArgs = append(journalArgs, config.JournalctlFilters...) + + j.Cmd = exec.Command(JOURNALD_CMD, journalArgs...) + j.Stderr, _ = j.Cmd.StderrPipe() + j.Stdout, _ = j.Cmd.StdoutPipe() + j.SrcName = fmt.Sprintf("journalctl-%s", strings.Join(config.JournalctlFilters, ".")) + log.Infof("[journald datasource] Configured with filters : %+v", journalArgs) + log.Debugf("cmd path : %s", j.Cmd.Path) + log.Debugf("cmd args : %+v", j.Cmd.Args) + + return nil +} + +func (j *JournaldSource) Mode() string { + return j.Config.Mode +} + +func (j *JournaldSource) readOutput(out chan types.Event, t *tomb.Tomb) error { + + /* + todo : handle the channel + */ + clog := log.WithFields(log.Fields{ + "acquisition file": j.SrcName, + }) + if err := j.Cmd.Start(); err != nil { + clog.Errorf("failed to start journalctl: %s", err) + return errors.Wrapf(err, "starting journalctl (%s)", j.SrcName) + } + + readErr := make(chan error) + + /*read stderr*/ + go func() { + scanner := bufio.NewScanner(j.Stderr) + if scanner == nil { + readErr <- fmt.Errorf("failed to create stderr scanner") + return + } + for scanner.Scan() { + txt := scanner.Text() + clog.Warningf("got stderr message : %s", txt) + readErr <- fmt.Errorf(txt) + } + }() + /*read stdout*/ + go func() { + scanner := bufio.NewScanner(j.Stdout) + if scanner == nil { + readErr <- fmt.Errorf("failed to create stdout scanner") + return + } + for scanner.Scan() { + l := types.Line{} + ReaderHits.With(prometheus.Labels{"source": j.SrcName}).Inc() + l.Raw = scanner.Text() + clog.Debugf("getting one line : %s", l.Raw) + l.Labels = j.Config.Labels + l.Time = time.Now() + l.Src = j.SrcName + l.Process = true + evt := types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE} + out <- evt + } + clog.Debugf("finished reading from journalctl") + if err := scanner.Err(); err != nil { + clog.Debugf("got an error while reading %s : %s", j.SrcName, err) + readErr <- err + return + } + readErr <- nil + }() + + for { + select { + case <-t.Dying(): + clog.Debugf("journalctl datasource %s stopping", j.SrcName) + return nil + case err := <-readErr: + clog.Debugf("the subroutine returned, leave as well") + if err != nil { + clog.Warningf("journalctl reader error : %s", err) + t.Kill(err) + } + return err + } + } +} + +func (j *JournaldSource) StartReading(out chan types.Event, t *tomb.Tomb) error { + + if j.Config.Mode == CAT_MODE { + return j.StartCat(out, t) + } else if j.Config.Mode == TAIL_MODE { + return j.StartTail(out, t) + } else { + return fmt.Errorf("unknown mode '%s' for file acquisition", j.Config.Mode) + } +} + +func (j *JournaldSource) StartCat(out chan types.Event, t *tomb.Tomb) error { + t.Go(func() error { + defer types.CatchPanic("crowdsec/acquis/tailjournalctl") + return j.readOutput(out, t) + }) + return nil +} + +func (j *JournaldSource) StartTail(out chan types.Event, t *tomb.Tomb) error { + t.Go(func() error { + defer types.CatchPanic("crowdsec/acquis/catjournalctl") + return j.readOutput(out, t) + }) + return nil +} diff --git a/pkg/acquisition/journalctl_reader_test.go b/pkg/acquisition/journalctl_reader_test.go new file mode 100644 index 000000000..00e43a697 --- /dev/null +++ b/pkg/acquisition/journalctl_reader_test.go @@ -0,0 +1,238 @@ +package acquisition + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + tomb "gopkg.in/tomb.v2" +) + +/* + As we can't decently run journalctl in the CI but we still need to test the command execution aspect : + - we create tests 'output only' (cf. TestSimJournalctlCat) that just produce outputs + - we run ourselves (os.Args[0]) with specific args to call specific 'output only' tests + - and this is how we test the behavior +*/ + +//14 lines of sshd logs +var testjournalctl_output_1 string = `-- Logs begin at Fri 2019-07-26 17:13:13 CEST, end at Mon 2020-11-23 09:17:34 CET. -- +Nov 22 11:22:19 zeroed sshd[1480]: Invalid user wqeqwe from 127.0.0.1 port 55818 +Nov 22 11:22:23 zeroed sshd[1480]: Failed password for invalid user wqeqwe from 127.0.0.1 port 55818 ssh2 +Nov 22 11:23:22 zeroed sshd[1769]: Invalid user wqeqwe1 from 127.0.0.1 port 55824 +Nov 22 11:23:24 zeroed sshd[1769]: Disconnecting invalid user wqeqwe1 127.0.0.1 port 55824: Too many authentication failures [preauth] +Nov 22 11:23:24 zeroed sshd[1777]: Invalid user wqeqwe2 from 127.0.0.1 port 55826 +Nov 22 11:23:25 zeroed sshd[1777]: Disconnecting invalid user wqeqwe2 127.0.0.1 port 55826: Too many authentication failures [preauth] +Nov 22 11:23:25 zeroed sshd[1780]: Invalid user wqeqwe3 from 127.0.0.1 port 55828 +Nov 22 11:23:26 zeroed sshd[1780]: Disconnecting invalid user wqeqwe3 127.0.0.1 port 55828: Too many authentication failures [preauth] +Nov 22 11:23:26 zeroed sshd[1786]: Invalid user wqeqwe4 from 127.0.0.1 port 55830 +Nov 22 11:23:27 zeroed sshd[1786]: Failed password for invalid user wqeqwe4 from 127.0.0.1 port 55830 ssh2 +Nov 22 11:23:27 zeroed sshd[1786]: Disconnecting invalid user wqeqwe4 127.0.0.1 port 55830: Too many authentication failures [preauth] +Nov 22 11:23:27 zeroed sshd[1791]: Invalid user wqeqwe5 from 127.0.0.1 port 55834 +Nov 22 11:23:27 zeroed sshd[1791]: Failed password for invalid user wqeqwe5 from 127.0.0.1 port 55834 ssh2 +` + +func TestSimJournalctlCat(t *testing.T) { + if os.Getenv("GO_WANT_TEST_OUTPUT") != "1" { + return + } + defer os.Exit(0) + fmt.Print(testjournalctl_output_1) +} + +func TestSimJournalctlCatError(t *testing.T) { + if os.Getenv("GO_WANT_TEST_OUTPUT") != "1" { + return + } + defer os.Exit(0) + fmt.Print("this is a single line being produced") + log.Warningf("this is an error message") +} + +func TestSimJournalctlCatOneLine(t *testing.T) { + if os.Getenv("GO_WANT_TEST_OUTPUT") != "1" { + return + } + defer os.Exit(0) + fmt.Print("this is a single line being produced") +} + +func TestJournaldTail(t *testing.T) { + tests := []struct { + cfg DataSourceCfg + config_error string + read_error string + tomb_error string + lines int + }{ + { //missing filename(s) + cfg: DataSourceCfg{ + Mode: TAIL_MODE, + }, + config_error: "journalctl_filter shouldn't be empty", + }, + { //bad mode + cfg: DataSourceCfg{ + Mode: "ratatata", + JournalctlFilters: []string{"-test.run=DoesNotExist", "--"}, + }, + /*here would actually be the journalctl error message on bad args, but you get the point*/ + config_error: "unknown mode 'ratatata' for journald source", + }, + { //wrong arguments + cfg: DataSourceCfg{ + Mode: TAIL_MODE, + JournalctlFilters: []string{"--this-is-bad-option", "--"}, + }, + /*here would actually be the journalctl error message on bad args, but you get the point*/ + tomb_error: "flag provided but not defined: -this-is-bad-option", + }, + } + + //we're actually using tests to do this, hold my beer and watch this + JOURNALD_CMD = os.Args[0] + JOURNALD_DEFAULT_TAIL_ARGS = []string{} + + for tidx, test := range tests { + journalSrc := new(JournaldSource) + err := journalSrc.Configure(test.cfg) + if test.config_error != "" { + assert.Contains(t, fmt.Sprintf("%s", err), test.config_error) + log.Infof("expected config error ok : %s", test.config_error) + continue + } else { + if err != nil { + t.Fatalf("%d/%d unexpected config error %s", tidx, len(tests), err) + } + } + + assert.Equal(t, journalSrc.Mode(), test.cfg.Mode) + + //this tells our fake tests to produce data + journalSrc.Cmd.Env = []string{"GO_WANT_TEST_OUTPUT=1"} + + out := make(chan types.Event) + tomb := tomb.Tomb{} + count := 0 + + //start consuming the data before we start the prog, so that chan isn't full + go func() { + for { + select { + case <-out: + count++ + case <-time.After(1 * time.Second): + return + } + } + }() + + err = journalSrc.StartReading(out, &tomb) + if test.read_error != "" { + assert.Contains(t, fmt.Sprintf("%s", err), test.read_error) + log.Infof("expected read error ok : %s", test.read_error) + continue + } else { + if err != nil { + t.Fatalf("%d/%d unexpected read error %s", tidx, len(tests), err) + } + } + + time.Sleep(2 * time.Second) + log.Printf("now let's check number of lines & errors") + if count != test.lines { + t.Fatalf("%d/%d expected %d line read, got %d", tidx, len(tests), test.lines, count) + } + + if test.tomb_error != "" { + assert.Contains(t, fmt.Sprintf("%s", tomb.Err()), test.tomb_error) + log.Infof("expected tomb error ok : %s", test.read_error) + continue + } else { + if tomb.Err() != nil { + t.Fatalf("%d/%d unexpected tomb error %s", tidx, len(tests), tomb.Err()) + } + } + + } +} + +func TestJournaldSimple(t *testing.T) { + JOURNALD_CMD = os.Args[0] + JOURNALD_DEFAULT_TAIL_ARGS = []string{} + jBaseCfg := DataSourceCfg{ + JournalctlFilters: []string{"-test.run=TestSimJournalctlCat", "--"}, + Mode: CAT_MODE, + } + + journalSrc := new(JournaldSource) + err := journalSrc.Configure(jBaseCfg) + if err != nil { + t.Fatalf("configuring journalctl : %s", err) + } + journalSrc.Cmd.Env = []string{"GO_WANT_TEST_OUTPUT=1"} + + out := make(chan types.Event) + tomb := tomb.Tomb{} + count := 0 + + //start the reading : it doesn't give hand back before it's done + err = journalSrc.StartReading(out, &tomb) + if err != nil { + t.Fatalf("unexpected read error %s", err) + } + +RLOOP: + for { + select { + case <-out: + count++ + case <-time.After(1 * time.Second): + break RLOOP + } + } + //we expect 14 lines to be read + assert.Equal(t, 14, count) + +} + +func TestJournalctlKill(t *testing.T) { + cfg := DataSourceCfg{ + Mode: CAT_MODE, + JournalctlFilters: []string{"-test.run=TestSimJournalctlCatOneLine", "--"}, + } + //we're actually using tests to do this, hold my beer and watch this + JOURNALD_CMD = os.Args[0] + JOURNALD_DEFAULT_TAIL_ARGS = []string{} + + log.SetLevel(log.TraceLevel) + journalSrc := new(JournaldSource) + err := journalSrc.Configure(cfg) + if err != nil { + t.Fatalf("unexpected config error %s", err) + } + journalSrc.Cmd.Env = []string{"GO_WANT_TEST_OUTPUT=1"} + + out := make(chan types.Event) + tb := tomb.Tomb{} + + err = journalSrc.StartReading(out, &tb) + if err != nil { + t.Fatalf("unexpected read error %s", err) + } + time.Sleep(1 * time.Second) + if tb.Err() != tomb.ErrStillAlive { + t.Fatalf("unexpected tomb error %s (should be alive)", tb.Err()) + } + //kill it :> + tb.Kill(nil) + time.Sleep(1 * time.Second) + if tb.Err() != nil { + t.Fatalf("unexpected tomb error %s (should be dead)", tb.Err()) + } + +} diff --git a/pkg/acquisition/tests/acquis_test_log.yaml b/pkg/acquisition/tests/acquis_test.yaml similarity index 100% rename from pkg/acquisition/tests/acquis_test_log.yaml rename to pkg/acquisition/tests/acquis_test.yaml diff --git a/pkg/acquisition/tests/badlog.gz b/pkg/acquisition/tests/badlog.gz new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/acquisition/tests/test.log b/pkg/acquisition/tests/test.log index e69de29bb..6347c5c19 100644 --- a/pkg/acquisition/tests/test.log +++ b/pkg/acquisition/tests/test.log @@ -0,0 +1 @@ +one log line diff --git a/pkg/apiclient/alerts_service.go b/pkg/apiclient/alerts_service.go new file mode 100644 index 000000000..07a9af74a --- /dev/null +++ b/pkg/apiclient/alerts_service.go @@ -0,0 +1,123 @@ +package apiclient + +import ( + "context" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/models" + qs "github.com/google/go-querystring/query" +) + +// type ApiAlerts service + +type AlertsService service + +type AlertsListOpts struct { + ScopeEquals *string `url:"scope,omitempty"` + ValueEquals *string `url:"value,omitempty"` + ScenarioEquals *string `url:"scenario,omitempty"` + IPEquals *string `url:"ip,omitempty"` + RangeEquals *string `url:"range,omitempty"` + Since *string `url:"since,omitempty"` + TypeEquals *string `url:"decision_type,omitempty"` + Until *string `url:"until,omitempty"` + IncludeSimulated *bool `url:"simulated,omitempty"` + ActiveDecisionEquals *bool `url:"has_active_decision,omitempty"` + IncludeCAPI *bool `url:"include_capi,omitempty"` + Limit *int `url:"limit,omitempty"` + ListOpts +} + +type AlertsDeleteOpts struct { + ScopeEquals *string `url:"scope,omitempty"` + ValueEquals *string `url:"value,omitempty"` + ScenarioEquals *string `url:"scenario,omitempty"` + IPEquals *string `url:"ip,omitempty"` + RangeEquals *string `url:"range,omitempty"` + Since *string `url:"since,omitempty"` + Until *string `url:"until,omitempty"` + ActiveDecisionEquals *bool `url:"has_active_decision,omitempty"` + SourceEquals *string `url:"alert_source,omitempty"` + ListOpts +} + +func (s *AlertsService) Add(ctx context.Context, alerts models.AddAlertsRequest) (*models.AddAlertsResponse, *Response, error) { + + var added_ids models.AddAlertsResponse + + u := fmt.Sprintf("%s/alerts", s.client.URLPrefix) + req, err := s.client.NewRequest("POST", u, &alerts) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &added_ids) + if err != nil { + return nil, resp, err + } + return &added_ids, resp, nil +} + +//to demo query arguments +func (s *AlertsService) List(ctx context.Context, opts AlertsListOpts) (*models.GetAlertsResponse, *Response, error) { + var alerts models.GetAlertsResponse + var URI string + u := fmt.Sprintf("%s/alerts", s.client.URLPrefix) + params, err := qs.Values(opts) + if err != nil { + return nil, nil, err + } + if len(params) > 0 { + URI = fmt.Sprintf("%s?%s", u, params.Encode()) + } else { + URI = u + } + + req, err := s.client.NewRequest("GET", URI, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &alerts) + if err != nil { + return nil, resp, err + } + return &alerts, resp, nil +} + +//to demo query arguments +func (s *AlertsService) Delete(ctx context.Context, opts AlertsDeleteOpts) (*models.DeleteAlertsResponse, *Response, error) { + var alerts models.DeleteAlertsResponse + params, err := qs.Values(opts) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("%s/alerts?%s", s.client.URLPrefix, params.Encode()) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &alerts) + if err != nil { + return nil, resp, err + } + return &alerts, resp, nil +} + +func (s *AlertsService) GetByID(ctx context.Context, alertID int) (*models.Alert, *Response, error) { + var alert models.Alert + u := fmt.Sprintf("%s/alerts/%d", s.client.URLPrefix, alertID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &alert) + if err != nil { + return nil, resp, err + } + return &alert, resp, nil +} diff --git a/pkg/apiclient/auth.go b/pkg/apiclient/auth.go new file mode 100644 index 000000000..7c4bcd687 --- /dev/null +++ b/pkg/apiclient/auth.go @@ -0,0 +1,224 @@ +package apiclient + +import ( + "bytes" + "encoding/json" + "time" + + //"errors" + "fmt" + "io" + "net/http" + "net/http/httputil" + "net/url" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + //"google.golang.org/appengine/log" +) + +type APIKeyTransport struct { + APIKey string + // Transport is the underlying HTTP transport to use when making requests. + // It will default to http.DefaultTransport if nil. + Transport http.RoundTripper + URL *url.URL + VersionPrefix string + UserAgent string +} + +// RoundTrip implements the RoundTripper interface. +func (t *APIKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.APIKey == "" { + return nil, errors.New("t.APIKey is empty") + } + + // We must make a copy of the Request so + // that we don't modify the Request we were given. This is required by the + // specification of http.RoundTripper. + req = cloneRequest(req) + req.Header.Add("X-Api-Key", t.APIKey) + if t.UserAgent != "" { + req.Header.Add("User-Agent", t.UserAgent) + } + log.Debugf("req-api: %s %s", req.Method, req.URL.String()) + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpRequest(req, true) + log.Tracef("auth-api request: %s", string(dump)) + } + // Make the HTTP request. + resp, err := t.transport().RoundTrip(req) + if err != nil { + log.Errorf("auth-api: auth with api key failed return nil response, error: %s", err) + return resp, err + } + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpResponse(resp, true) + log.Tracef("auth-api response: %s", string(dump)) + } + + log.Debugf("resp-api: http %d", resp.StatusCode) + + return resp, err +} + +func (t *APIKeyTransport) Client() *http.Client { + return &http.Client{Transport: t} +} + +func (t *APIKeyTransport) transport() http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + return http.DefaultTransport +} + +type JWTTransport struct { + MachineID *string + Password *strfmt.Password + token string + Expiration time.Time + Scenarios []string + URL *url.URL + VersionPrefix string + UserAgent string + // Transport is the underlying HTTP transport to use when making requests. + // It will default to http.DefaultTransport if nil. + Transport http.RoundTripper + UpdateScenario func() ([]string, error) +} + +func (t *JWTTransport) refreshJwtToken() error { + var err error + if t.UpdateScenario != nil { + t.Scenarios, err = t.UpdateScenario() + if err != nil { + return fmt.Errorf("can't update scenario list: %s", err) + } + log.Infof("scenarios liste updated for '%s'", *t.MachineID) + } + + var auth = models.WatcherAuthRequest{ + MachineID: t.MachineID, + Password: t.Password, + Scenarios: t.Scenarios, + } + + var response models.WatcherAuthResponse + + /* + we don't use the main client, so let's build the body + */ + var buf io.ReadWriter + buf = &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err = enc.Encode(auth) + if err != nil { + return errors.Wrap(err, "could not encode jwt auth body") + } + req, err := http.NewRequest("POST", fmt.Sprintf("%s%s/watchers/login", t.URL, t.VersionPrefix), buf) + if err != nil { + return errors.Wrap(err, "could not create request") + } + req.Header.Add("Content-Type", "application/json") + client := &http.Client{} + if t.UserAgent != "" { + req.Header.Add("User-Agent", t.UserAgent) + } + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpRequest(req, true) + log.Tracef("auth-jwt request: %s", string(dump)) + } + + log.Debugf("auth-jwt(auth): %s %s", req.Method, req.URL.String()) + + resp, err := client.Do(req) + if err != nil { + return errors.Wrap(err, "could not get jwt token") + } + log.Debugf("auth-jwt : http %d", resp.StatusCode) + + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpResponse(resp, true) + log.Tracef("auth-jwt response: %s", string(dump)) + } + + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("received response status %q when fetching %v", resp.Status, req.URL) + } + + if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { + return errors.Wrap(err, "unable to decode response") + } + if err := t.Expiration.UnmarshalText([]byte(response.Expire)); err != nil { + return errors.Wrap(err, "unable to parse jwt expiration") + } + t.token = response.Token + + log.Debugf("token %s will expire on %s", t.token, t.Expiration.String()) + return nil +} + +// RoundTrip implements the RoundTripper interface. +func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.token == "" || t.Expiration.Add(-time.Minute).Before(time.Now()) { + if err := t.refreshJwtToken(); err != nil { + return nil, err + } + } + + // We must make a copy of the Request so + // that we don't modify the Request we were given. This is required by the + // specification of http.RoundTripper. + req = cloneRequest(req) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", t.token)) + log.Debugf("req-jwt: %s %s", req.Method, req.URL.String()) + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpRequest(req, true) + log.Tracef("req-jwt: %s", string(dump)) + } + if t.UserAgent != "" { + req.Header.Add("User-Agent", t.UserAgent) + } + // Make the HTTP request. + resp, err := t.transport().RoundTrip(req) + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpResponse(resp, true) + log.Tracef("resp-jwt: %s", string(dump)) + } + if err != nil { + return resp, errors.Wrapf(err, "performing jwt auth") + } + log.Debugf("resp-jwt: %d", resp.StatusCode) + return resp, nil +} + +func (t *JWTTransport) Client() *http.Client { + return &http.Client{Transport: t} +} + +func (t *JWTTransport) transport() http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + return http.DefaultTransport +} + +// cloneRequest returns a clone of the provided *http.Request. The clone is a +// shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} diff --git a/pkg/apiclient/auth_service.go b/pkg/apiclient/auth_service.go new file mode 100644 index 000000000..cfb4b4803 --- /dev/null +++ b/pkg/apiclient/auth_service.go @@ -0,0 +1,57 @@ +package apiclient + +import ( + "context" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +// type ApiAlerts service + +type AuthService service + +func (s *AuthService) UnregisterWatcher(ctx context.Context) (*Response, error) { + + u := fmt.Sprintf("%s/watchers", s.client.URLPrefix) + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +func (s *AuthService) RegisterWatcher(ctx context.Context, registration models.WatcherRegistrationRequest) (*Response, error) { + + u := fmt.Sprintf("%s/watchers", s.client.URLPrefix) + + req, err := s.client.NewRequest("POST", u, ®istration) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +func (s *AuthService) AuthenticateWatcher(ctx context.Context, auth models.WatcherAuthRequest) (*Response, error) { + u := fmt.Sprintf("%s/watchers/login", s.client.URLPrefix) + req, err := s.client.NewRequest("POST", u, &auth) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go new file mode 100644 index 000000000..8b3f03ba3 --- /dev/null +++ b/pkg/apiclient/client.go @@ -0,0 +1,142 @@ +package apiclient + +import ( + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/pkg/errors" +) + +var ( + InsecureSkipVerify = true +) + +type ApiClient struct { + /*The http client used to make requests*/ + client *http.Client + /*Reuse a single struct instead of allocating one for each service on the heap.*/ + common service + /*config stuff*/ + BaseURL *url.URL + URLPrefix string + UserAgent string + /*exposed Services*/ + Decisions *DecisionsService + Alerts *AlertsService + Auth *AuthService + Metrics *MetricsService + Signal *SignalService +} + +type service struct { + client *ApiClient +} + +func NewClient(config *Config) (*ApiClient, error) { + t := &JWTTransport{ + MachineID: &config.MachineID, + Password: &config.Password, + Scenarios: config.Scenarios, + URL: config.URL, + UserAgent: config.UserAgent, + VersionPrefix: config.VersionPrefix, + UpdateScenario: config.UpdateScenario, + } + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: InsecureSkipVerify} + c := &ApiClient{client: t.Client(), BaseURL: config.URL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix} + c.common.client = c + c.Decisions = (*DecisionsService)(&c.common) + c.Alerts = (*AlertsService)(&c.common) + c.Auth = (*AuthService)(&c.common) + c.Metrics = (*MetricsService)(&c.common) + c.Signal = (*SignalService)(&c.common) + + return c, nil +} + +func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *http.Client) (*ApiClient, error) { + if client == nil { + client = &http.Client{} + } + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: InsecureSkipVerify} + c := &ApiClient{client: client, BaseURL: URL, UserAgent: userAgent, URLPrefix: prefix} + c.common.client = c + c.Decisions = (*DecisionsService)(&c.common) + c.Alerts = (*AlertsService)(&c.common) + c.Auth = (*AuthService)(&c.common) + c.Metrics = (*MetricsService)(&c.common) + c.Signal = (*SignalService)(&c.common) + return c, nil +} + +func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { + if client == nil { + client = &http.Client{} + } + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: InsecureSkipVerify} + c := &ApiClient{client: client, BaseURL: config.URL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix} + c.common.client = c + c.Decisions = (*DecisionsService)(&c.common) + c.Alerts = (*AlertsService)(&c.common) + c.Auth = (*AuthService)(&c.common) + + _, err := c.Auth.RegisterWatcher(context.Background(), models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password}) + if err != nil { + return c, errors.Wrapf(err, "api register (%s): %s", c.BaseURL, err) + } + + return c, nil + +} + +type Response struct { + Response *http.Response + //add our pagination stuff + //NextPage int + //... +} + +type ErrorResponse struct { + models.ErrorResponse +} + +func (e *ErrorResponse) Error() string { + return fmt.Sprintf("API error (%s) : %s", *e.Message, e.Errors) +} + +func newResponse(r *http.Response) *Response { + response := &Response{Response: r} + //response.populatePageValues() + return response +} + +func CheckResponse(r *http.Response) error { + if c := r.StatusCode; 200 <= c && c <= 299 { + return nil + } + errorResponse := &ErrorResponse{} + data, err := ioutil.ReadAll(r.Body) + if err == nil && data != nil { + json.Unmarshal(data, errorResponse) + } + return errorResponse +} + +type ListOpts struct { + //Page int + //PerPage int +} + +type DeleteOpts struct { + //?? +} + +type AddOpts struct { + //?? +} diff --git a/pkg/apiclient/client_http.go b/pkg/apiclient/client_http.go new file mode 100644 index 000000000..dffadb5ba --- /dev/null +++ b/pkg/apiclient/client_http.go @@ -0,0 +1,103 @@ +package apiclient + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" +) + +func (c *ApiClient) NewRequest(method, url string, body interface{}) (*http.Request, error) { + if !strings.HasSuffix(c.BaseURL.Path, "/") { + return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL) + } + u, err := c.BaseURL.Parse(url) + if err != nil { + return nil, err + } + + var buf io.ReadWriter + if body != nil { + buf = &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(body) + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(method, u.String(), buf) + if err != nil { + return nil, err + } + + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + + return req, nil +} + +func (c *ApiClient) Do(ctx context.Context, req *http.Request, v interface{}) (*Response, error) { + if ctx == nil { + return nil, errors.New("context must be non-nil") + } + req = req.WithContext(ctx) + + // Check rate limit + + if c.UserAgent != "" { + req.Header.Add("User-Agent", c.UserAgent) + } + + resp, err := c.client.Do(req) + + if err != nil { + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + // If the error type is *url.Error, sanitize its URL before returning. + if e, ok := err.(*url.Error); ok { + if url, err := url.Parse(e.URL); err == nil { + e.URL = url.String() + return nil, e + } + } + + return nil, err + } + + response := newResponse(resp) + + err = CheckResponse(resp) + if err != nil { + return response, err + } + + if v != nil { + if w, ok := v.(io.Writer); ok { + io.Copy(w, resp.Body) + } else { + decErr := json.NewDecoder(resp.Body).Decode(v) + if decErr == io.EOF { + decErr = nil // ignore EOF errors caused by empty response body + } + if decErr != nil { + err = decErr + } + } + } + + return response, err +} diff --git a/pkg/apiclient/config.go b/pkg/apiclient/config.go new file mode 100644 index 000000000..b87a7088b --- /dev/null +++ b/pkg/apiclient/config.go @@ -0,0 +1,17 @@ +package apiclient + +import ( + "net/url" + + "github.com/go-openapi/strfmt" +) + +type Config struct { + MachineID string + Password strfmt.Password + Scenarios []string + URL *url.URL + VersionPrefix string + UserAgent string + UpdateScenario func() ([]string, error) +} diff --git a/pkg/apiclient/decisions_service.go b/pkg/apiclient/decisions_service.go new file mode 100644 index 000000000..ed5f1da71 --- /dev/null +++ b/pkg/apiclient/decisions_service.go @@ -0,0 +1,118 @@ +package apiclient + +import ( + "context" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/models" + qs "github.com/google/go-querystring/query" +) + +type DecisionsService service + +type DecisionsListOpts struct { + ScopeEquals *string `url:"scope,omitempty"` + ValueEquals *string `url:"value,omitempty"` + TypeEquals *string `url:"type,omitempty"` + IPEquals *string `url:"ip,omitempty"` + RangeEquals *string `url:"range,omitempty"` + ListOpts +} + +type DecisionsDeleteOpts struct { + ScopeEquals *string `url:"scope,omitempty"` + ValueEquals *string `url:"value,omitempty"` + TypeEquals *string `url:"type,omitempty"` + IPEquals *string `url:"ip,omitempty"` + RangeEquals *string `url:"range,omitempty"` + ListOpts +} + +//to demo query arguments +func (s *DecisionsService) List(ctx context.Context, opts DecisionsListOpts) (*models.GetDecisionsResponse, *Response, error) { + var decisions models.GetDecisionsResponse + params, err := qs.Values(opts) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("%s/decisions/?%s", s.client.URLPrefix, params.Encode()) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &decisions) + if err != nil { + return nil, resp, err + } + return &decisions, resp, nil +} + +func (s *DecisionsService) GetStream(ctx context.Context, startup bool) (*models.DecisionsStreamResponse, *Response, error) { + var decisions models.DecisionsStreamResponse + + u := fmt.Sprintf("%s/decisions/stream?startup=%t", s.client.URLPrefix, startup) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &decisions) + if err != nil { + return nil, resp, err + } + + return &decisions, resp, nil +} + +func (s *DecisionsService) StopStream(ctx context.Context) (*Response, error) { + + u := fmt.Sprintf("%s/decisions", s.client.URLPrefix) + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +func (s *DecisionsService) Delete(ctx context.Context, opts DecisionsDeleteOpts) (*models.DeleteDecisionResponse, *Response, error) { + var deleteDecisionResponse models.DeleteDecisionResponse + params, err := qs.Values(opts) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("%s/decisions?%s", s.client.URLPrefix, params.Encode()) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &deleteDecisionResponse) + if err != nil { + return nil, resp, err + } + return &deleteDecisionResponse, resp, nil +} + +func (s *DecisionsService) DeleteOne(ctx context.Context, decision_id string) (*models.DeleteDecisionResponse, *Response, error) { + var deleteDecisionResponse models.DeleteDecisionResponse + u := fmt.Sprintf("%s/decisions/%s", s.client.URLPrefix, decision_id) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &deleteDecisionResponse) + if err != nil { + return nil, resp, err + } + return &deleteDecisionResponse, resp, nil +} diff --git a/pkg/apiclient/metrics.go b/pkg/apiclient/metrics.go new file mode 100644 index 000000000..3e655425c --- /dev/null +++ b/pkg/apiclient/metrics.go @@ -0,0 +1,26 @@ +package apiclient + +import ( + "context" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +type MetricsService service + +func (s *MetricsService) Add(ctx context.Context, metrics *models.Metrics) (interface{}, *Response, error) { + var response interface{} + + u := fmt.Sprintf("%s/metrics/", s.client.URLPrefix) + req, err := s.client.NewRequest("POST", u, &metrics) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &response) + if err != nil { + return nil, resp, err + } + return &response, resp, nil +} diff --git a/pkg/apiclient/signal.go b/pkg/apiclient/signal.go new file mode 100644 index 000000000..4d29148e4 --- /dev/null +++ b/pkg/apiclient/signal.go @@ -0,0 +1,29 @@ +package apiclient + +import ( + "context" + "fmt" + "log" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/pkg/errors" +) + +type SignalService service + +func (s *SignalService) Add(ctx context.Context, signals *models.AddSignalsRequest) (interface{}, *Response, error) { + var response interface{} + + u := fmt.Sprintf("%s/signals", s.client.URLPrefix) + req, err := s.client.NewRequest("POST", u, &signals) + if err != nil { + return nil, nil, errors.Wrap(err, "while building request") + } + + resp, err := s.client.Do(ctx, req, &response) + if err != nil { + return nil, resp, errors.Wrap(err, "while performing request") + } + log.Printf("Signal push response : http %s", resp.Response.Status) + return &response, resp, nil +} diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go new file mode 100644 index 000000000..becf128cd --- /dev/null +++ b/pkg/apiserver/alerts_test.go @@ -0,0 +1,542 @@ +package apiserver + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/gin-gonic/gin" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func InitMachineTest() (*gin.Engine, models.WatcherAuthResponse, error) { + router, err := NewAPITest() + if err != nil { + return nil, models.WatcherAuthResponse{}, fmt.Errorf("unable to run local API: %s", err) + } + + body, err := CreateTestMachine(router) + if err != nil { + return nil, models.WatcherAuthResponse{}, fmt.Errorf("%s", err.Error()) + } + + err = ValidateMachine("test") + if err != nil { + log.Fatalln(err.Error()) + } + + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/watchers/login", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + loginResp := models.WatcherAuthResponse{} + err = json.NewDecoder(w.Body).Decode(&loginResp) + if err != nil { + log.Fatalln(err.Error()) + } + + return router, loginResp, nil +} + +func TestSimulatedAlert(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + alertContentBytes, err := ioutil.ReadFile("./tests/alert_minibulk+simul.json") + if err != nil { + log.Fatal(err) + } + alertContent := string(alertContentBytes) + + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader(alertContent)) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + //exclude decision in simulation mode + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?simulated=false", strings.NewReader(alertContent)) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) + assert.NotContains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) + //include decision in simulation mode + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?simulated=true", strings.NewReader(alertContent)) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) + assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) +} + +func TestCreateAlert(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + // Create Alert with invalid format + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader("test")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + assert.Equal(t, 400, w.Code) + assert.Equal(t, "{\"message\":\"invalid character 'e' in literal true (expecting 'r')\"}", w.Body.String()) + + // Create Alert with invalid input + alertContentBytes, err := ioutil.ReadFile("./tests/invalidAlert_sample.json") + if err != nil { + log.Fatal(err) + } + alertContent := string(alertContentBytes) + + w = httptest.NewRecorder() + req, _ = http.NewRequest("POST", "/v1/alerts", strings.NewReader(alertContent)) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + assert.Equal(t, 500, w.Code) + assert.Equal(t, "{\"message\":\"validation failure list:\\nscenario in body is required\\nscenario_hash in body is required\\nscenario_version in body is required\\nsimulated in body is required\\nsource in body is required\"}", w.Body.String()) + + // Create Valid Alert + alertContentBytes, err = ioutil.ReadFile("./tests/alert_sample.json") + if err != nil { + log.Fatal(err) + } + alertContent = string(alertContentBytes) + + w = httptest.NewRecorder() + req, _ = http.NewRequest("POST", "/v1/alerts", strings.NewReader(alertContent)) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "[\"1\"]", w.Body.String()) +} + +func TestAlertListFilters(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + alertContentBytes, err := ioutil.ReadFile("./tests/alert_ssh-bf.json") + if err != nil { + log.Fatal(err) + } + + alerts := make([]*models.Alert, 0) + if err := json.Unmarshal(alertContentBytes, &alerts); err != nil { + log.Fatal(err) + } + + for _, alert := range alerts { + *alert.StartAt = time.Now().Format(time.RFC3339) + *alert.StopAt = time.Now().Format(time.RFC3339) + } + + alertContent, err := json.Marshal(alerts) + if err != nil { + log.Fatal(err) + } + + //create one alert + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader(string(alertContent))) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + //bad filter + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?test=test", strings.NewReader(string(alertContent))) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 500, w.Code) + assert.Equal(t, "{\"message\":\"Filter parameter 'test' is unknown (=test): invalid filter\"}", w.Body.String()) + + //get without filters + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + //check alert and decision + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"start_ip":1534676931,"type":"ban","value":"91.121.79.195"`) + + //test decision_type filter (ok) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?decision_type=ban", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"start_ip":1534676931,"type":"ban","value":"91.121.79.195"`) + + //test decision_type filter (bad value) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?decision_type=ratata", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test scope (ok) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?scope=Ip", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"start_ip":1534676931,"type":"ban","value":"91.121.79.195"`) + + //test scope (bad value) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?scope=rarara", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test scenario (ok) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?scenario=crowdsecurity/ssh-bf", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"start_ip":1534676931,"type":"ban","value":"91.121.79.195"`) + + //test scenario (bad value) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?scenario=crowdsecurity/nope", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test ip (ok) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?ip=91.121.79.195", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"start_ip":1534676931,"type":"ban","value":"91.121.79.195"`) + + //test ip (bad value) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?ip=99.122.77.195", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test ip (invalid value) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?ip=gruueq", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 500, w.Code) + assert.Equal(t, `{"message":"unable to parse 'gruueq': %!s(\u003cnil\u003e): invalid ip address / range"}`, w.Body.String()) + + //test range (ok) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?range=91.121.79.0/24", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"start_ip":1534676931,"type":"ban","value":"91.121.79.195"`) + + //test range + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?range=99.122.77.0/24", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test range (invalid value) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?range=ratata", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 500, w.Code) + assert.Equal(t, `{"message":"unable to convert 'ratata' to int interval: 'ratata' is not a valid CIDR: invalid ip address / range"}`, w.Body.String()) + + //test since (ok) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?since=1h", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"start_ip":1534676931,"type":"ban","value":"91.121.79.195"`) + + //test since (ok but yelds no results) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?since=1ns", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test since (invalid value) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?since=1zuzu", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 500, w.Code) + assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) + + //test until (ok) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?until=1ns", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"start_ip":1534676931,"type":"ban","value":"91.121.79.195"`) + + //test until (ok but no return) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?until=1m", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test until (invalid value) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?until=1zuzu", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 500, w.Code) + assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) + + //test simulated (ok) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?simulated=true", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"start_ip":1534676931,"type":"ban","value":"91.121.79.195"`) + + //test simulated (ok) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?simulated=false", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"start_ip":1534676931,"type":"ban","value":"91.121.79.195"`) + + //test has active decision + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?has_active_decision=true", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"start_ip":1534676931,"type":"ban","value":"91.121.79.195"`) + + //test has active decision + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?has_active_decision=false", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test has active decision (invalid value) + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?has_active_decision=ratatqata", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 500, w.Code) + assert.Equal(t, `{"message":"'ratatqata' is not a boolean: strconv.ParseBool: parsing \"ratatqata\": invalid syntax: unable to parse type"}`, w.Body.String()) + +} + +func TestAlertBulkInsert(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + //insert a bulk of 20 alerts to trigger bulk insert + alertContentBytes, err := ioutil.ReadFile("./tests/alert_bulk.json") + if err != nil { + log.Fatal(err) + } + alertContent := string(alertContentBytes) + + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader(alertContent)) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts", strings.NewReader(alertContent)) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) +} + +func TestListAlert(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + alertContentBytes, err := ioutil.ReadFile("./tests/alert_sample.json") + if err != nil { + log.Fatal(err) + } + alertContent := string(alertContentBytes) + + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader(alertContent)) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + // List Alert with invalid filter + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts?test=test", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 500, w.Code) + assert.Equal(t, "{\"message\":\"Filter parameter 'test' is unknown (=test): invalid filter\"}", w.Body.String()) + + // List Alert + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/alerts", nil) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "crowdsecurity/test") +} + +func TestCreateAlertErrors(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + alertContentBytes, err := ioutil.ReadFile("./tests/alert_sample.json") + if err != nil { + log.Fatal(err) + } + alertContent := string(alertContentBytes) + + //test invalid bearer + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader(alertContent)) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", "ratata")) + router.ServeHTTP(w, req) + assert.Equal(t, 401, w.Code) + + //test invalid bearer + w = httptest.NewRecorder() + req, _ = http.NewRequest("POST", "/v1/alerts", strings.NewReader(alertContent)) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token+"s")) + router.ServeHTTP(w, req) + assert.Equal(t, 401, w.Code) + +} + +func TestDeleteAlert(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + alertContentBytes, err := ioutil.ReadFile("./tests/alert_sample.json") + if err != nil { + log.Fatal(err) + } + alertContent := string(alertContentBytes) + + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader(alertContent)) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + // Fail Delete Alert + w = httptest.NewRecorder() + req, _ = http.NewRequest("DELETE", "/v1/alerts", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + req.RemoteAddr = "127.0.0.2:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, 403, w.Code) + assert.Equal(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) + + // Delete Alert + w = httptest.NewRecorder() + req, _ = http.NewRequest("DELETE", "/v1/alerts", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + req.RemoteAddr = "127.0.0.1:4242" + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"message":"1 deleted alerts"}`, w.Body.String()) +} diff --git a/pkg/apiserver/api_key_test.go b/pkg/apiserver/api_key_test.go new file mode 100644 index 000000000..0e91b79fd --- /dev/null +++ b/pkg/apiserver/api_key_test.go @@ -0,0 +1,52 @@ +package apiserver + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestAPIKey(t *testing.T) { + router, err := NewAPITest() + if err != nil { + log.Fatalf("unable to run local API: %s", err) + } + + APIKey, err := CreateTestBouncer() + if err != nil { + log.Fatalf("%s", err.Error()) + } + // Login with empty token + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 403, w.Code) + assert.Equal(t, "{\"message\":\"access forbidden\"}", w.Body.String()) + + // Login with invalid token + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", "a1b2c3d4e5f6") + router.ServeHTTP(w, req) + + assert.Equal(t, 403, w.Code) + assert.Equal(t, "{\"message\":\"access forbidden\"}", w.Body.String()) + + // Login with valid token + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + +} diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go new file mode 100644 index 000000000..c56df04c0 --- /dev/null +++ b/pkg/apiserver/apic.go @@ -0,0 +1,371 @@ +package apiserver + +import ( + "context" + "fmt" + "net/url" + "strings" + "sync" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + + "gopkg.in/tomb.v2" +) + +const ( + PullInterval = "2h" + PushInterval = "30s" + MetricsInterval = "30m" +) + +type apic struct { + pullInterval time.Duration + pushInterval time.Duration + metricsInterval time.Duration + dbClient *database.Client + apiClient *apiclient.ApiClient + alertToPush chan []*models.Alert + mu sync.Mutex + pushTomb tomb.Tomb + pullTomb tomb.Tomb + metricsTomb tomb.Tomb + startup bool + credentials *csconfig.ApiCredentialsCfg + scenarioList []string +} + +func IsInSlice(a string, b []string) bool { + for _, v := range b { + if a == v { + return true + } + } + return false +} + +func (a *apic) FetchScenariosListFromDB() ([]string, error) { + scenarios := make([]string, 0) + machines, err := a.dbClient.ListMachines() + if err != nil { + return nil, errors.Wrap(err, "while listing machines") + } + //merge all scenarios together + for _, v := range machines { + machineScenarios := strings.Split(v.Scenarios, ",") + log.Debugf("%d scenarios for machine %d", len(machineScenarios), v.ID) + for _, sv := range machineScenarios { + if !IsInSlice(sv, scenarios) && sv != "" { + scenarios = append(scenarios, sv) + } + } + } + log.Debugf("Returning list of scenarios : %+v", scenarios) + return scenarios, nil +} + +func AlertToSignal(alert *models.Alert) *models.AddSignalsRequestItem { + return &models.AddSignalsRequestItem{ + Message: alert.Message, + Scenario: alert.Scenario, + ScenarioHash: alert.ScenarioHash, + ScenarioVersion: alert.ScenarioVersion, + Source: alert.Source, + StartAt: alert.StartAt, + StopAt: alert.StopAt, + CreatedAt: alert.CreatedAt, + MachineID: alert.MachineID, + } +} + +func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client) (*apic, error) { + var err error + ret := &apic{ + alertToPush: make(chan []*models.Alert), + dbClient: dbClient, + mu: sync.Mutex{}, + startup: true, + credentials: config.Credentials, + pullTomb: tomb.Tomb{}, + pushTomb: tomb.Tomb{}, + metricsTomb: tomb.Tomb{}, + scenarioList: make([]string, 0), + } + + ret.pullInterval, err = time.ParseDuration(PullInterval) + if err != nil { + return ret, err + } + ret.pushInterval, err = time.ParseDuration(PushInterval) + if err != nil { + return ret, err + } + ret.metricsInterval, err = time.ParseDuration(MetricsInterval) + if err != nil { + return ret, err + } + + password := strfmt.Password(config.Credentials.Password) + apiURL, err := url.Parse(config.Credentials.URL) + if err != nil { + return nil, errors.Wrapf(err, "while parsing '%s'", config.Credentials.URL) + } + ret.scenarioList, err = ret.FetchScenariosListFromDB() + if err != nil { + return nil, errors.Wrap(err, "while fetching scenarios from db") + } + ret.apiClient, err = apiclient.NewClient(&apiclient.Config{ + MachineID: config.Credentials.Login, + Password: password, + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v2", + Scenarios: ret.scenarioList, + UpdateScenario: ret.FetchScenariosListFromDB, + }) + return ret, nil +} + +func (a *apic) Push() error { + defer types.CatchPanic("lapi/pushToAPIC") + + var cache models.AddSignalsRequest + ticker := time.NewTicker(a.pushInterval) + log.Infof("start crowdsec api push (interval: %s)", PushInterval) + + for { + select { + case <-a.pushTomb.Dying(): // if one apic routine is dying, do we kill the others? + a.pullTomb.Kill(nil) + a.metricsTomb.Kill(nil) + log.Infof("push tomb is dying, sending cache (%d elements) before exiting", len(cache)) + if len(cache) == 0 { + return nil + } + err := a.Send(&cache) + return err + case <-ticker.C: + if len(cache) > 0 { + a.mu.Lock() + cacheCopy := cache + cache = make(models.AddSignalsRequest, 0) + a.mu.Unlock() + log.Infof("Signal push: %d signals to push", len(cacheCopy)) + err := a.Send(&cacheCopy) + if err != nil { + log.Errorf("while sending signal to Central API : %s", err) + log.Debugf("dump: %+v", cacheCopy) + /* + even in case of error, we don't want to return here, or we need to kill everything. + this go-routine is in charge of pushing the signals to LAPI and is emptying the CAPIChan + */ + } + } + case alerts := <-a.alertToPush: + var signals []*models.AddSignalsRequestItem + for _, alert := range alerts { + signals = append(signals, AlertToSignal(alert)) + } + a.mu.Lock() + cache = append(cache, signals...) + a.mu.Unlock() + } + } +} + +func (a *apic) Send(cache *models.AddSignalsRequest) error { + /*we do have a problem with this : + The apic.Push background routine reads from alertToPush chan. + This chan is filled by Controller.CreateAlert + + If the chan apic.Send hangs, the alertToPush chan will become full, + with means that Controller.CreateAlert is going to hang, blocking API worker(s). + + So instead, we prefer to cancel write. + + I don't know enough about gin to tell how much of an issue it can be. + */ + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _, _, err := a.apiClient.Signal.Add(ctx, cache) + return err +} + +func (a *apic) PullTop() error { + var err error + + data, _, err := a.apiClient.Decisions.GetStream(context.Background(), a.startup) + if err != nil { + return errors.Wrap(err, "get stream") + } + if a.startup { + a.startup = false + } + // process deleted decisions + var filter map[string][]string + for _, decision := range data.Deleted { + if strings.ToLower(*decision.Scope) == "ip" { + filter = make(map[string][]string, 1) + filter["value"] = []string{*decision.Value} + } else { + filter = make(map[string][]string, 3) + filter["value"] = []string{*decision.Value} + filter["type"] = []string{*decision.Type} + filter["value"] = []string{*decision.Scope} + } + + nbDeleted, err := a.dbClient.SoftDeleteDecisionsWithFilter(filter) + if err != nil { + return err + } + + log.Printf("pull top: deleted %s entries", nbDeleted) + } + + alertCreated, err := a.dbClient.Ent.Alert. + Create(). + SetScenario(fmt.Sprintf("update : +%d/-%d IPs", len(data.New), len(data.Deleted))). + SetSourceScope("Comunity blocklist"). + Save(a.dbClient.CTX) + if err != nil { + return errors.Wrap(err, "create alert from crowdsec-api") + } + + // process new decisions + for _, decision := range data.New { + /*ensure scope makes sense no matter what consensus gives*/ + if strings.ToLower(*decision.Scope) == "ip" { + *decision.Scope = types.Ip + } else if strings.ToLower(*decision.Scope) == "range" { + *decision.Scope = types.Range + } + + duration, err := time.ParseDuration(*decision.Duration) + if err != nil { + return errors.Wrapf(err, "parse decision duration '%s':", *decision.Duration) + } + startIP, endIP, err := controllers.GetIpsFromIpRange(*decision.Value) + if err != nil { + return errors.Wrapf(err, "ip to int '%s':", *decision.Value) + } + + _, err = a.dbClient.Ent.Decision.Create(). + SetUntil(time.Now().Add(duration)). + SetScenario(*decision.Scenario). + SetType(*decision.Type). + SetStartIP(startIP). + SetEndIP(endIP). + SetValue(*decision.Value). + SetScope(*decision.Scope). + SetOrigin(*decision.Origin). + SetOwner(alertCreated).Save(a.dbClient.CTX) + if err != nil { + return errors.Wrap(err, "decision creation from crowdsec-api:") + } + } + log.Printf("pull top: added %d entries", len(data.New)) + return nil +} + +func (a *apic) Pull() error { + defer types.CatchPanic("lapi/pullFromAPIC") + log.Infof("start crowdsec api pull (interval: %s)", PullInterval) + var err error + + scenario := a.scenarioList + for { + if len(scenario) > 0 { + break + } + log.Warningf("scenario list is empty, will not pull yet") + time.Sleep(1 * time.Second) + scenario, err = a.FetchScenariosListFromDB() + if err != nil { + log.Errorf("unable to fetch scenarios from db: %s", err) + } + } + if err := a.PullTop(); err != nil { + log.Errorf("capi pull top: %s", err) + } + ticker := time.NewTicker(a.pullInterval) + for { + select { + case <-ticker.C: + if err := a.PullTop(); err != nil { + log.Errorf("capi pull top: %s", err) + continue + } + case <-a.pullTomb.Dying(): // if one apic routine is dying, do we kill the others? + a.metricsTomb.Kill(nil) + a.pushTomb.Kill(nil) + return nil + } + } +} + +func (a *apic) SendMetrics() error { + defer types.CatchPanic("lapi/metricsToAPIC") + + log.Infof("start crowdsec api send metrics (interval: %s)", MetricsInterval) + ticker := time.NewTicker(a.metricsInterval) + for { + select { + case <-ticker.C: + version := cwversion.VersionStr() + metric := &models.Metrics{ + ApilVersion: &version, + Machines: make([]*models.MetricsSoftInfo, 0), + Bouncers: make([]*models.MetricsSoftInfo, 0), + } + machines, err := a.dbClient.ListMachines() + if err != nil { + return err + } + bouncers, err := a.dbClient.ListBouncers() + if err != nil { + return err + } + // models.metric structure : len(machines), len(bouncers), a.credentials.Login + // _, _, err := a.apiClient.Metrics.Add(//*models.Metrics) + for _, machine := range machines { + m := &models.MetricsSoftInfo{ + Version: machine.Version, + Name: machine.MachineId, + } + metric.Machines = append(metric.Machines, m) + } + + for _, bouncer := range bouncers { + m := &models.MetricsSoftInfo{ + Version: bouncer.Version, + Name: bouncer.Type, + } + metric.Bouncers = append(metric.Bouncers, m) + } + _, _, err = a.apiClient.Metrics.Add(context.Background(), metric) + if err != nil { + return errors.Wrap(err, "sending metrics failed") + } + log.Infof("capi metrics: metrics sent successfully") + case <-a.metricsTomb.Dying(): // if one apic routine is dying, do we kill the others? + a.pullTomb.Kill(nil) + a.pushTomb.Kill(nil) + return nil + } + } +} + +func (a *apic) Shutdown() { + a.pushTomb.Kill(nil) + a.pullTomb.Kill(nil) + a.metricsTomb.Kill(nil) +} diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go new file mode 100644 index 000000000..112f38cb6 --- /dev/null +++ b/pkg/apiserver/apiserver.go @@ -0,0 +1,220 @@ +package apiserver + +import ( + "context" + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/gin-gonic/gin" + "github.com/go-co-op/gocron" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" +) + +var ( + keyLength = 32 +) + +type APIServer struct { + URL string + TLS *csconfig.TLSCfg + dbClient *database.Client + logFile string + ctx context.Context + controller *controllers.Controller + flushScheduler *gocron.Scheduler + router *gin.Engine + httpServer *http.Server + apic *apic + httpServerTomb tomb.Tomb +} + +func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { + var flushScheduler *gocron.Scheduler + dbClient, err := database.NewClient(config.DbConfig) + if err != nil { + return &APIServer{}, fmt.Errorf("unable to init database client: %s", err) + } + + if config.DbConfig.Flush != nil { + flushScheduler, err = dbClient.StartFlushScheduler(config.DbConfig.Flush) + if err != nil { + return &APIServer{}, err + } + } + + logFile := "" + if config.LogDir != "" { + logFile = fmt.Sprintf("%s/crowdsec_api.log", config.LogDir) + } + + if log.GetLevel() < log.DebugLevel { + gin.SetMode(gin.ReleaseMode) + } + log.Debugf("starting router, logging to %s", logFile) + router := gin.New() + + /*The logger that will be used by handlers*/ + clog := log.New() + if err := types.ConfigureLogger(clog); err != nil { + return nil, errors.Wrap(err, "while configuring gin logger") + } + if config.LogLevel != nil { + clog.SetLevel(*config.LogLevel) + } + + gin.DefaultErrorWriter = clog.Writer() + + // Logging to a file. + if logFile != "" { + file, err := os.Create(logFile) + if err != nil { + return &APIServer{}, errors.Wrapf(err, "creating api access log file: %s", logFile) + } + gin.DefaultWriter = io.MultiWriter(file, os.Stdout) + } + + router.Use(gin.LoggerWithFormatter(func(param gin.LogFormatterParams) string { + return fmt.Sprintf("%s - [%s] \"%s %s %s %d %s \"%s\" %s\"\n", + param.ClientIP, + param.TimeStamp.Format(time.RFC1123), + param.Method, + param.Path, + param.Request.Proto, + param.StatusCode, + param.Latency, + param.Request.UserAgent(), + param.ErrorMessage, + ) + })) + + router.NoRoute(func(c *gin.Context) { + c.JSON(http.StatusNotFound, gin.H{"message": "Page or Method not found"}) + return + }) + router.Use(gin.Recovery()) + controller := &controllers.Controller{ + DBClient: dbClient, + Ectx: context.Background(), + Router: router, + Profiles: config.Profiles, + Log: clog, + } + + var apiClient *apic + + if config.OnlineClient != nil { + log.Printf("Loading CAPI pusher") + apiClient, err = NewAPIC(config.OnlineClient, dbClient) + if err != nil { + return &APIServer{}, err + } + controller.CAPIChan = apiClient.alertToPush + } else { + apiClient = nil + controller.CAPIChan = nil + } + + if err := controller.Init(); err != nil { + return &APIServer{}, err + } + + return &APIServer{ + URL: config.ListenURI, + TLS: config.TLS, + logFile: logFile, + dbClient: dbClient, + controller: controller, + flushScheduler: flushScheduler, + router: router, + apic: apiClient, + httpServerTomb: tomb.Tomb{}, + }, nil + +} + +func (s *APIServer) Router() (*gin.Engine, error) { + return s.router, nil +} + +func (s *APIServer) Run() error { + defer types.CatchPanic("lapi/runServer") + + s.httpServer = &http.Server{ + Addr: s.URL, + Handler: s.router, + } + + if s.apic != nil { + s.apic.pushTomb.Go(func() error { + if err := s.apic.Push(); err != nil { + log.Errorf("capi push: %s", err) + return err + } + return nil + }) + s.apic.pullTomb.Go(func() error { + if err := s.apic.Pull(); err != nil { + log.Errorf("capi pull: %s", err) + return err + } + return nil + }) + s.apic.metricsTomb.Go(func() error { + if err := s.apic.SendMetrics(); err != nil { + log.Errorf("capi metrics: %s", err) + return err + } + return nil + }) + } + + s.httpServerTomb.Go(func() error { + go func() { + if s.TLS != nil && s.TLS.CertFilePath != "" && s.TLS.KeyFilePath != "" { + if err := s.httpServer.ListenAndServeTLS(s.TLS.CertFilePath, s.TLS.KeyFilePath); err != nil { + log.Fatalf(err.Error()) + } + } else { + if err := s.httpServer.ListenAndServe(); err != http.ErrServerClosed { + log.Fatalf(err.Error()) + } + } + }() + <-s.httpServerTomb.Dying() + log.Infof("run: shutting down api server") + if err := s.Shutdown(); err != nil { + log.Errorf("while shutting down API Server : %s", err) + return err + } + return nil + }) + + return nil +} + +func (s *APIServer) Close() { + if s.apic != nil { + s.apic.Shutdown() // stop apic first since it use dbClient + } + s.dbClient.Ent.Close() + if s.flushScheduler != nil { + s.flushScheduler.Stop() + } +} + +func (s *APIServer) Shutdown() error { + s.Close() + if err := s.httpServer.Shutdown(context.TODO()); err != nil { + return err + } + return nil +} diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go new file mode 100644 index 000000000..f0b7da3f7 --- /dev/null +++ b/pkg/apiserver/apiserver_test.go @@ -0,0 +1,155 @@ +package apiserver + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + + middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/go-openapi/strfmt" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/gin-gonic/gin" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +var testMachineID = "test" +var testPassword = strfmt.Password("test") +var MachineTest = models.WatcherAuthRequest{ + MachineID: &testMachineID, + Password: &testPassword, +} + +var UserAgent = fmt.Sprintf("crowdsec-test/%s", cwversion.Version) + +func LoadTestConfig() csconfig.GlobalConfig { + config := csconfig.GlobalConfig{} + maxAge := "1h" + flushConfig := csconfig.FlushDBCfg{ + MaxAge: &maxAge, + } + dbconfig := csconfig.DatabaseCfg{ + Type: "sqlite", + DbPath: "./ent", + Flush: &flushConfig, + } + apiServerConfig := csconfig.LocalApiServerCfg{ + ListenURI: "http://127.0.0.1:8080", + DbConfig: &dbconfig, + ProfilesPath: "./tests/profiles.yaml", + } + apiConfig := csconfig.APICfg{ + Server: &apiServerConfig, + } + config.API = &apiConfig + if err := config.API.Server.LoadProfiles(); err != nil { + log.Fatalf("failed to load profiles: %s", err) + } + return config +} + +func NewAPITest() (*gin.Engine, error) { + config := LoadTestConfig() + + os.Remove("./ent") + apiServer, err := NewServer(config.API.Server) + if err != nil { + return nil, fmt.Errorf("unable to run local API: %s", err) + } + log.Printf("Creating new API server") + gin.SetMode(gin.TestMode) + router, err := apiServer.Router() + if err != nil { + return nil, fmt.Errorf("unable to run local API: %s", err) + } + return router, nil +} + +func ValidateMachine(machineID string) error { + config := LoadTestConfig() + dbClient, err := database.NewClient(config.API.Server.DbConfig) + if err != nil { + return fmt.Errorf("unable to create new database client: %s", err) + } + if err := dbClient.ValidateMachine(machineID); err != nil { + return fmt.Errorf("unable to validate machine: %s", err) + } + return nil +} + +func CreateTestMachine(router *gin.Engine) (string, error) { + b, err := json.Marshal(MachineTest) + if err != nil { + return "", fmt.Errorf("unable to marshal MachineTest") + } + body := string(b) + + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/watchers", strings.NewReader(body)) + req.Header.Set("User-Agent", UserAgent) + router.ServeHTTP(w, req) + return body, nil +} + +func CreateTestBouncer() (string, error) { + config := LoadTestConfig() + + dbClient, err := database.NewClient(config.API.Server.DbConfig) + if err != nil { + log.Fatalf("unable to create new database client: %s", err) + } + apiKey, err := middlewares.GenerateAPIKey(keyLength) + if err != nil { + return "", fmt.Errorf("unable to generate api key: %s", err) + } + err = dbClient.CreateBouncer("test", "127.0.0.1", middlewares.HashSHA512(apiKey)) + if err != nil { + return "", fmt.Errorf("unable to create blocker: %s", err) + } + + return apiKey, nil +} + +func TestWithWrongDBConfig(t *testing.T) { + config := LoadTestConfig() + config.API.Server.DbConfig.Type = "test" + apiServer, err := NewServer(config.API.Server) + + assert.Equal(t, apiServer, &APIServer{}) + assert.Equal(t, "unable to init database client: unknown database type", err.Error()) +} + +func TestWithWrongFlushConfig(t *testing.T) { + config := LoadTestConfig() + maxItems := -1 + config.API.Server.DbConfig.Flush.MaxItems = &maxItems + apiServer, err := NewServer(config.API.Server) + + assert.Equal(t, apiServer, &APIServer{}) + assert.Equal(t, "max_items can't be zero or negative number", err.Error()) +} + +func TestUnknownPath(t *testing.T) { + router, err := NewAPITest() + if err != nil { + log.Fatalf("unable to run local API: %s", err) + } + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/test", nil) + req.Header.Set("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 404, w.Code) + assert.Equal(t, "{\"message\":\"Page or Method not found\"}", w.Body.String()) + +} diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go new file mode 100644 index 000000000..bf7dd764e --- /dev/null +++ b/pkg/apiserver/controllers/controller.go @@ -0,0 +1,108 @@ +package controllers + +import ( + "context" + + v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" +) + +type Controller struct { + Ectx context.Context + DBClient *database.Client + Router *gin.Engine + Profiles []*csconfig.ProfileCfg + CAPIChan chan []*models.Alert + Log *log.Logger +} + +func (c *Controller) Init() error { + if err := c.NewV1(); err != nil { + return err + } + + /* if we have a V2, just add + + if err := c.NewV2(); err != nil { + return err + } + + */ + + return nil +} + +func (c *Controller) NewV1() error { + handlerV1, err := v1.New(c.DBClient, c.Ectx, c.Profiles, c.CAPIChan) + if err != nil { + return err + } + + c.Router.Use(v1.PrometheusMiddleware()) + groupV1 := c.Router.Group("/v1") + groupV1.POST("/watchers", handlerV1.CreateMachine) + groupV1.POST("/watchers/login", handlerV1.Middlewares.JWT.Middleware.LoginHandler) + + jwtAuth := groupV1.Group("") + jwtAuth.GET("/refresh_token", handlerV1.Middlewares.JWT.Middleware.RefreshHandler) + jwtAuth.Use(handlerV1.Middlewares.JWT.Middleware.MiddlewareFunc(), v1.PrometheusMachinesMiddleware()) + { + jwtAuth.POST("/alerts", handlerV1.CreateAlert) + jwtAuth.GET("/alerts", handlerV1.FindAlerts) + jwtAuth.HEAD("/alerts", handlerV1.FindAlerts) + jwtAuth.GET("/alerts/:alert_id", handlerV1.FindAlertByID) + jwtAuth.HEAD("/alerts/:alert_id", handlerV1.FindAlertByID) + jwtAuth.DELETE("/alerts", handlerV1.DeleteAlerts) + jwtAuth.DELETE("/decisions", handlerV1.DeleteDecisions) + jwtAuth.DELETE("/decisions/:decision_id", handlerV1.DeleteDecisionById) + } + + apiKeyAuth := groupV1.Group("") + apiKeyAuth.Use(handlerV1.Middlewares.APIKey.MiddlewareFunc(), v1.PrometheusBouncersMiddleware()) + { + apiKeyAuth.GET("/decisions", handlerV1.GetDecision) + apiKeyAuth.HEAD("/decisions", handlerV1.GetDecision) + apiKeyAuth.GET("/decisions/stream", handlerV1.StreamDecision) + apiKeyAuth.HEAD("/decisions/stream", handlerV1.StreamDecision) + } + + return nil +} + +/* +func (c *Controller) NewV2() error { + handlerV2, err := v2.New(c.DBClient, c.Ectx) + if err != nil { + return err + } + + v2 := c.Router.Group("/v2") + v2.POST("/watchers", handlerV2.CreateMachine) + v2.POST("/watchers/login", handlerV2.Middlewares.JWT.Middleware.LoginHandler) + + jwtAuth := v2.Group("") + jwtAuth.GET("/refresh_token", handlerV2.Middlewares.JWT.Middleware.RefreshHandler) + jwtAuth.Use(handlerV2.Middlewares.JWT.Middleware.MiddlewareFunc()) + { + jwtAuth.POST("/alerts", handlerV2.CreateAlert) + jwtAuth.GET("/alerts", handlerV2.FindAlerts) + jwtAuth.DELETE("/alerts", handlerV2.DeleteAlerts) + jwtAuth.DELETE("/decisions", handlerV2.DeleteDecisions) + jwtAuth.DELETE("/decisions/:decision_id", handlerV2.DeleteDecisionById) + } + + apiKeyAuth := v2.Group("") + apiKeyAuth.Use(handlerV2.Middlewares.APIKey.MiddlewareFuncV2()) + { + apiKeyAuth.GET("/decisions", handlerV2.GetDecision) + apiKeyAuth.GET("/decisions/stream", handlerV2.StreamDecision) + } + + return nil +} + +*/ diff --git a/pkg/apiserver/controllers/utils.go b/pkg/apiserver/controllers/utils.go new file mode 100644 index 000000000..1af63478d --- /dev/null +++ b/pkg/apiserver/controllers/utils.go @@ -0,0 +1,65 @@ +package controllers + +import ( + "encoding/binary" + "fmt" + "net" +) + +func IP2Int(ip net.IP) uint32 { + if len(ip) == 16 { + return binary.BigEndian.Uint32(ip[12:16]) + } + return binary.BigEndian.Uint32(ip) +} + +func Int2ip(nn uint32) net.IP { + ip := make(net.IP, 4) + binary.BigEndian.PutUint32(ip, nn) + return ip +} + +func IsIpv4(host string) bool { + return net.ParseIP(host) != nil +} + +//Stolen from : https://github.com/llimllib/ipaddress/ +// Return the final address of a net range. Convert to IPv4 if possible, +// otherwise return an ipv6 +func LastAddress(n *net.IPNet) net.IP { + ip := n.IP.To4() + if ip == nil { + ip = n.IP + return net.IP{ + ip[0] | ^n.Mask[0], ip[1] | ^n.Mask[1], ip[2] | ^n.Mask[2], + ip[3] | ^n.Mask[3], ip[4] | ^n.Mask[4], ip[5] | ^n.Mask[5], + ip[6] | ^n.Mask[6], ip[7] | ^n.Mask[7], ip[8] | ^n.Mask[8], + ip[9] | ^n.Mask[9], ip[10] | ^n.Mask[10], ip[11] | ^n.Mask[11], + ip[12] | ^n.Mask[12], ip[13] | ^n.Mask[13], ip[14] | ^n.Mask[14], + ip[15] | ^n.Mask[15]} + } + + return net.IPv4( + ip[0]|^n.Mask[0], + ip[1]|^n.Mask[1], + ip[2]|^n.Mask[2], + ip[3]|^n.Mask[3]) +} + +func GetIpsFromIpRange(host string) (int64, int64, error) { + var ipStart int64 + var ipEnd int64 + var err error + var parsedRange *net.IPNet + + if _, parsedRange, err = net.ParseCIDR(host); err != nil { + return ipStart, ipEnd, fmt.Errorf("'%s' is not a valid CIDR", host) + } + if parsedRange == nil { + return ipStart, ipEnd, fmt.Errorf("unable to parse network : %s", err) + } + ipStart = int64(IP2Int(parsedRange.IP)) + ipEnd = int64(IP2Int(LastAddress(parsedRange))) + + return ipStart, ipEnd, nil +} diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go new file mode 100644 index 000000000..9d5289447 --- /dev/null +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -0,0 +1,212 @@ +package v1 + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" + + jwt "github.com/appleboy/gin-jwt/v2" + + "github.com/crowdsecurity/crowdsec/pkg/csprofiles" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/gin-gonic/gin" + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" +) + +func FormatOneAlert(alert *ent.Alert) *models.Alert { + var outputAlert models.Alert + var machineID string + startAt := alert.StartedAt.String() + StopAt := alert.StoppedAt.String() + if alert.Edges.Owner == nil { + machineID = "N/A" + } else { + machineID = alert.Edges.Owner.MachineId + } + + outputAlert = models.Alert{ + ID: int64(alert.ID), + MachineID: machineID, + CreatedAt: alert.CreatedAt.Format(time.RFC3339), + Scenario: &alert.Scenario, + ScenarioVersion: &alert.ScenarioVersion, + ScenarioHash: &alert.ScenarioHash, + Message: &alert.Message, + EventsCount: &alert.EventsCount, + StartAt: &startAt, + StopAt: &StopAt, + Capacity: &alert.Capacity, + Leakspeed: &alert.LeakSpeed, + Simulated: &alert.Simulated, + Source: &models.Source{ + Scope: &alert.SourceScope, + Value: &alert.SourceValue, + IP: alert.SourceIp, + Range: alert.SourceRange, + AsNumber: alert.SourceAsNumber, + AsName: alert.SourceAsName, + Cn: alert.SourceCountry, + Latitude: alert.SourceLatitude, + Longitude: alert.SourceLongitude, + }, + } + for _, eventItem := range alert.Edges.Events { + var Metas models.Meta + timestamp := eventItem.Time.String() + if err := json.Unmarshal([]byte(eventItem.Serialized), &Metas); err != nil { + log.Errorf("unable to unmarshall events meta '%s' : %s", eventItem.Serialized, err) + } + outputAlert.Events = append(outputAlert.Events, &models.Event{ + Timestamp: ×tamp, + Meta: Metas, + }) + } + for _, metaItem := range alert.Edges.Metas { + outputAlert.Meta = append(outputAlert.Meta, &models.MetaItems0{ + Key: metaItem.Key, + Value: metaItem.Value, + }) + } + for _, decisionItem := range alert.Edges.Decisions { + duration := decisionItem.Until.Sub(time.Now()).String() + outputAlert.Decisions = append(outputAlert.Decisions, &models.Decision{ + Duration: &duration, // transform into time.Time ? + Scenario: &decisionItem.Scenario, + Type: &decisionItem.Type, + StartIP: decisionItem.StartIP, + EndIP: decisionItem.EndIP, + Scope: &decisionItem.Scope, + Value: &decisionItem.Value, + Origin: &decisionItem.Origin, + Simulated: outputAlert.Simulated, + ID: int64(decisionItem.ID), + }) + } + return &outputAlert +} + +// FormatAlerts : Format results from the database to be swagger model compliant +func FormatAlerts(result []*ent.Alert) models.AddAlertsRequest { + var data models.AddAlertsRequest + for _, alertItem := range result { + data = append(data, FormatOneAlert(alertItem)) + } + return data +} + +// CreateAlert : write received alerts in body to the database +func (c *Controller) CreateAlert(gctx *gin.Context) { + defer types.CatchPanic("crowdsec/controllersV1/CreateAlert") + + var input models.AddAlertsRequest + + claims := jwt.ExtractClaims(gctx) + /*TBD : use defines rather than hardcoded key to find back owner*/ + machineID := claims["id"].(string) + + if err := gctx.ShouldBindJSON(&input); err != nil { + gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + return + } + if err := input.Validate(strfmt.Default); err != nil { + c.HandleDBErrors(gctx, err) + return + } + + for _, alert := range input { + if len(alert.Decisions) > 0 { + log.Debugf("alert %s already has decisions, don't apply profiles", *alert.Message) + } else { + decisions, err := csprofiles.EvaluateProfiles(c.Profiles, alert) + if err != nil { + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + alert.Decisions = decisions + } + } + + alerts, err := c.DBClient.CreateAlert(machineID, input) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + for _, alert := range input { + alert.MachineID = machineID + } + select { + case c.CAPIChan <- input: + log.Debugf("alert send to CAPI channel") + default: + log.Warningf("Cannot send alert to Central API channel") + } + + gctx.JSON(http.StatusOK, alerts) + return +} + +// FindAlerts : return alerts from database based on the specified filter +func (c *Controller) FindAlerts(gctx *gin.Context) { + defer types.CatchPanic("crowdsec/controllersV1/FindAlerts") + result, err := c.DBClient.QueryAlertWithFilter(gctx.Request.URL.Query()) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + data := FormatAlerts(result) + + if gctx.Request.Method == "HEAD" { + gctx.String(http.StatusOK, "") + return + } + gctx.JSON(http.StatusOK, data) + return +} + +// FindAlertByID return the alert assiocated to the ID +func (c *Controller) FindAlertByID(gctx *gin.Context) { + defer types.CatchPanic("crowdsec/controllersV1/FindAlertByID") + + alertIDStr := gctx.Param("alert_id") + alertID, err := strconv.Atoi(alertIDStr) + if err != nil { + gctx.JSON(http.StatusBadRequest, gin.H{"message": "alert_id must be valid integer"}) + return + } + result, err := c.DBClient.GetAlertByID(alertID) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + data := FormatOneAlert(result) + + if gctx.Request.Method == "HEAD" { + gctx.String(http.StatusOK, "") + return + } + gctx.JSON(http.StatusOK, data) + return +} + +// DeleteAlerts : delete alerts from database based on the specified filter +func (c *Controller) DeleteAlerts(gctx *gin.Context) { + defer types.CatchPanic("crowdsec/controllersV1/DeleteAlerts") + + if gctx.ClientIP() != "127.0.0.1" && gctx.ClientIP() != "::1" { + gctx.JSON(http.StatusForbidden, gin.H{"message": fmt.Sprintf("access forbidden from this IP (%s)", gctx.ClientIP())}) + return + } + var err error + deleted, err := c.DBClient.DeleteAlertWithFilter(gctx.Request.URL.Query()) + if err != nil { + c.HandleDBErrors(gctx, err) + } + + gctx.JSON(http.StatusOK, gin.H{"message": fmt.Sprintf("%d deleted alerts", len(deleted))}) + return +} diff --git a/pkg/apiserver/controllers/v1/controller.go b/pkg/apiserver/controllers/v1/controller.go new file mode 100644 index 000000000..8a11ec84c --- /dev/null +++ b/pkg/apiserver/controllers/v1/controller.go @@ -0,0 +1,36 @@ +package v1 + +import ( + "context" + + middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +type Controller struct { + Ectx context.Context + DBClient *database.Client + APIKeyHeader string + Middlewares *middlewares.Middlewares + Profiles []*csconfig.ProfileCfg + CAPIChan chan []*models.Alert +} + +func New(dbClient *database.Client, ctx context.Context, profiles []*csconfig.ProfileCfg, capiChan chan []*models.Alert) (*Controller, error) { + var err error + v1 := &Controller{ + Ectx: ctx, + DBClient: dbClient, + APIKeyHeader: middlewares.APIKeyHeader, + Profiles: profiles, + CAPIChan: capiChan, + } + v1.Middlewares, err = middlewares.NewMiddlewares(dbClient) + if err != nil { + return v1, err + } + + return v1, nil +} diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go new file mode 100644 index 000000000..f734291b5 --- /dev/null +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -0,0 +1,217 @@ +package v1 + +import ( + "crypto/sha512" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" +) + +func FormatDecisions(decisions []*ent.Decision) ([]*models.Decision, error) { + var results []*models.Decision + for _, dbDecision := range decisions { + duration := dbDecision.Until.Sub(time.Now()).String() + decision := models.Decision{ + ID: int64(dbDecision.ID), + Duration: &duration, + EndIP: dbDecision.EndIP, + StartIP: dbDecision.StartIP, + Scenario: &dbDecision.Scenario, + Scope: &dbDecision.Scope, + Value: &dbDecision.Value, + Type: &dbDecision.Type, + Origin: &dbDecision.Origin, + } + results = append(results, &decision) + } + return results, nil +} + +func (c *Controller) GetDecision(gctx *gin.Context) { + defer types.CatchPanic("crowdsec/controllersV1/GetDecision") + var err error + var results []*models.Decision + var data []*ent.Decision + + data, err = c.DBClient.QueryDecisionWithFilter(gctx.Request.URL.Query()) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + + results, err = FormatDecisions(data) + if err != nil { + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + /*let's follow a naive logic : when a bouncer queries /decisions, if the answer is empty, we assume there is no decision for this ip/user/..., + but if it's non-empty, it means that there is one or more decisions for this target*/ + if len(results) > 0 { + PrometheusBouncersHasNonEmptyDecision(gctx) + } else { + PrometheusBouncersHasEmptyDecision(gctx) + } + + if gctx.Request.Method == "HEAD" { + gctx.String(http.StatusOK, "") + return + } + gctx.JSON(http.StatusOK, results) + return +} + +func (c *Controller) DeleteDecisionById(gctx *gin.Context) { + defer types.CatchPanic("crowdsec/controllersV1/DeleteDecisionById") + var err error + + decisionIDStr := gctx.Param("decision_id") + decisionID, err := strconv.Atoi(decisionIDStr) + if err != nil { + gctx.JSON(http.StatusBadRequest, gin.H{"message": "decision_id must be valid integer"}) + return + } + err = c.DBClient.SoftDeleteDecisionByID(decisionID) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + + deleteDecisionResp := models.DeleteDecisionResponse{ + NbDeleted: "1", + } + + gctx.JSON(http.StatusOK, deleteDecisionResp) + return +} + +func (c *Controller) DeleteDecisions(gctx *gin.Context) { + defer types.CatchPanic("crowdsec/controllersV1/DeleteDecisions") + var err error + + nbDeleted, err := c.DBClient.SoftDeleteDecisionsWithFilter(gctx.Request.URL.Query()) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + deleteDecisionResp := models.DeleteDecisionResponse{ + NbDeleted: nbDeleted, + } + + gctx.JSON(http.StatusOK, deleteDecisionResp) + return +} + +func (c *Controller) StreamDecision(gctx *gin.Context) { + defer types.CatchPanic("crowdsec/controllersV1/StreamDecision") + var data []*ent.Decision + ret := make(map[string][]*models.Decision, 0) + ret["new"] = []*models.Decision{} + ret["deleted"] = []*models.Decision{} + + val := gctx.Request.Header.Get(c.APIKeyHeader) + hashedKey := sha512.New() + hashedKey.Write([]byte(val)) + hashStr := fmt.Sprintf("%x", hashedKey.Sum(nil)) + bouncerInfo, err := c.DBClient.SelectBouncer(hashStr) + if err != nil { + if _, ok := err.(*ent.NotFoundError); ok { + gctx.JSON(http.StatusForbidden, gin.H{"message": err.Error()}) + } else { + gctx.JSON(http.StatusUnauthorized, gin.H{"message": "not allowed"}) + } + return + } + + if bouncerInfo == nil { + gctx.JSON(http.StatusUnauthorized, gin.H{"message": "not allowed"}) + return + } + + // if the blocker just start, return all decisions + if val, ok := gctx.Request.URL.Query()["startup"]; ok { + if val[0] == "true" { + data, err := c.DBClient.QueryAllDecisions() + if err != nil { + log.Errorf("failed querying decisions: %v", err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + ret["new"], err = FormatDecisions(data) + if err != nil { + log.Errorf("unable to format expired decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + + // getting expired decisions + data, err = c.DBClient.QueryExpiredDecisions() + if err != nil { + log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + ret["deleted"], err = FormatDecisions(data) + if err != nil { + log.Errorf("unable to format expired decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + + if err := c.DBClient.UpdateBouncerLastPull(time.Now(), bouncerInfo.ID); err != nil { + log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + if gctx.Request.Method == "HEAD" { + gctx.String(http.StatusOK, "") + return + } + gctx.JSON(http.StatusOK, ret) + return + } + } + + // getting new decisions + data, err = c.DBClient.QueryNewDecisionsSince(bouncerInfo.LastPull) + if err != nil { + log.Errorf("unable to query new decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + ret["new"], err = FormatDecisions(data) + if err != nil { + log.Errorf("unable to format new decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + + // getting expired decisions + data, err = c.DBClient.QueryExpiredDecisionsSince(bouncerInfo.LastPull.Add((-2 * time.Second))) // do we want to give exactly lastPull time ? + if err != nil { + log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + ret["deleted"], err = FormatDecisions(data) + if err != nil { + log.Errorf("unable to format expired decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + + if err := c.DBClient.UpdateBouncerLastPull(time.Now(), bouncerInfo.ID); err != nil { + log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + + gctx.JSON(http.StatusOK, ret) + return +} diff --git a/pkg/apiserver/controllers/v1/errors.go b/pkg/apiserver/controllers/v1/errors.go new file mode 100644 index 000000000..4aaf1aaf7 --- /dev/null +++ b/pkg/apiserver/controllers/v1/errors.go @@ -0,0 +1,35 @@ +package v1 + +import ( + "net/http" + + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/gin-gonic/gin" + "github.com/pkg/errors" +) + +func (c *Controller) HandleDBErrors(gctx *gin.Context, err error) { + switch errors.Cause(err) { + case database.UserExists: + gctx.JSON(http.StatusForbidden, gin.H{"message": err.Error()}) + return + case database.HashError: + gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + return + case database.InsertFail: + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + case database.QueryFail: + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + case database.ParseTimeFail: + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + case database.ParseDurationFail: + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + default: + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } +} diff --git a/pkg/apiserver/controllers/v1/machines.go b/pkg/apiserver/controllers/v1/machines.go new file mode 100644 index 000000000..645eb636b --- /dev/null +++ b/pkg/apiserver/controllers/v1/machines.go @@ -0,0 +1,33 @@ +package v1 + +import ( + "net/http" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/gin-gonic/gin" + "github.com/go-openapi/strfmt" +) + +func (c *Controller) CreateMachine(gctx *gin.Context) { + defer types.CatchPanic("crowdsec/controllersV1/CreateMachine") + var err error + var input models.WatcherRegistrationRequest + if err = gctx.ShouldBindJSON(&input); err != nil { + gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + return + } + if err = input.Validate(strfmt.Default); err != nil { + c.HandleDBErrors(gctx, err) + return + } + + _, err = c.DBClient.CreateMachine(input.MachineID, input.Password, gctx.ClientIP(), false, false) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + + gctx.Status(http.StatusOK) + return +} diff --git a/pkg/apiserver/controllers/v1/metrics.go b/pkg/apiserver/controllers/v1/metrics.go new file mode 100644 index 000000000..33d13d3ca --- /dev/null +++ b/pkg/apiserver/controllers/v1/metrics.go @@ -0,0 +1,107 @@ +package v1 + +import ( + jwt "github.com/appleboy/gin-jwt/v2" + "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus" +) + +/*prometheus*/ +var LapiRouteHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_lapi_route_requests_total", + Help: "Number of calls to each route.", + }, + []string{"route", "method"}, +) + +/*hits per machine*/ +var LapiMachineHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_lapi_machine_requests_total", + Help: "Number of calls for each machine.", + }, + []string{"machine", "route", "method"}, +) + +/*hits per bouncer*/ +var LapiBouncerHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_lapi_bouncer_requests_total", + Help: "Number of calls for each bouncer.", + }, + []string{"bouncer", "route", "method"}, +) + +/* keep track of the number of calls (per bouncer) that lead to nil/non-nil responses. +while it's not exact, it's a good way to know - when you have a rutpure bouncer - what is the rate of ok/ko answers you got from lapi*/ +var LapiNilDecisions = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_lapi_decisions_ko_total", + Help: "Number of calls to /decisions that returned nil result.", + }, + []string{"bouncer"}, +) + +/*hits per bouncer*/ +var LapiNonNilDecisions = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_lapi_decisions_ok_total", + Help: "Number of calls to /decisions that returned non-nil result.", + }, + []string{"bouncer"}, +) + +func PrometheusBouncersHasEmptyDecision(c *gin.Context) { + name, ok := c.Get("BOUNCER_NAME") + if ok { + LapiNilDecisions.With(prometheus.Labels{ + "bouncer": name.(string)}).Inc() + } +} + +func PrometheusBouncersHasNonEmptyDecision(c *gin.Context) { + name, ok := c.Get("BOUNCER_NAME") + if ok { + LapiNonNilDecisions.With(prometheus.Labels{ + "bouncer": name.(string)}).Inc() + } +} + +func PrometheusMachinesMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + claims := jwt.ExtractClaims(c) + if claims != nil { + if rawID, ok := claims["id"]; ok { + machineID := rawID.(string) + LapiMachineHits.With(prometheus.Labels{ + "machine": machineID, + "route": c.Request.URL.Path, + "method": c.Request.Method}).Inc() + } + } + c.Next() + } +} + +func PrometheusBouncersMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + name, ok := c.Get("BOUNCER_NAME") + if ok { + LapiBouncerHits.With(prometheus.Labels{ + "bouncer": name.(string), + "route": c.Request.URL.Path, + "method": c.Request.Method}).Inc() + } + c.Next() + } +} + +func PrometheusMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + LapiRouteHits.With(prometheus.Labels{ + "route": c.Request.URL.Path, + "method": c.Request.Method}).Inc() + c.Next() + } +} diff --git a/pkg/apiserver/decisions_test.go b/pkg/apiserver/decisions_test.go new file mode 100644 index 000000000..7d467005f --- /dev/null +++ b/pkg/apiserver/decisions_test.go @@ -0,0 +1,453 @@ +package apiserver + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/models" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestDeleteDecisionRange(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + // Create Valid Alert + alertContentBytes, err := ioutil.ReadFile("./tests/alert_minibulk.json") + if err != nil { + log.Fatal(err) + } + alerts := make([]*models.Alert, 0) + if err := json.Unmarshal(alertContentBytes, &alerts); err != nil { + log.Fatal(err) + } + + for _, alert := range alerts { + *alert.StartAt = time.Now().Format(time.RFC3339) + *alert.StopAt = time.Now().Format(time.RFC3339) + } + + alertContent, err := json.Marshal(alerts) + if err != nil { + log.Fatal(err) + } + + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader(string(alertContent))) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + // delete by ip wrong + w = httptest.NewRecorder() + req, _ = http.NewRequest("DELETE", "/v1/decisions?range=1.2.3.0/24", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + + // delete by range + w = httptest.NewRecorder() + req, _ = http.NewRequest("DELETE", "/v1/decisions?range=91.121.79.0/24", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) + + // delete by range : ensure it was already deleted + w = httptest.NewRecorder() + req, _ = http.NewRequest("DELETE", "/v1/decisions?range=91.121.79.0/24", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) +} + +func TestDeleteDecisionFilter(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + // Create Valid Alert + alertContentBytes, err := ioutil.ReadFile("./tests/alert_minibulk.json") + if err != nil { + log.Fatal(err) + } + alerts := make([]*models.Alert, 0) + if err := json.Unmarshal(alertContentBytes, &alerts); err != nil { + log.Fatal(err) + } + + for _, alert := range alerts { + *alert.StartAt = time.Now().Format(time.RFC3339) + *alert.StopAt = time.Now().Format(time.RFC3339) + } + + alertContent, err := json.Marshal(alerts) + if err != nil { + log.Fatal(err) + } + + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader(string(alertContent))) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + // delete by ip wrong + w = httptest.NewRecorder() + req, _ = http.NewRequest("DELETE", "/v1/decisions?ip=1.2.3.4", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + + // delete by ip good + w = httptest.NewRecorder() + req, _ = http.NewRequest("DELETE", "/v1/decisions?ip=91.121.79.179", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + + // delete by scope/value + w = httptest.NewRecorder() + req, _ = http.NewRequest("DELETE", "/v1/decisions?scope=Ip&value=91.121.79.178", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) +} + +func TestGetDecisionFilters(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + // Create Valid Alert + alertContentBytes, err := ioutil.ReadFile("./tests/alert_minibulk.json") + if err != nil { + log.Fatal(err) + } + alerts := make([]*models.Alert, 0) + if err := json.Unmarshal(alertContentBytes, &alerts); err != nil { + log.Fatal(err) + } + + for _, alert := range alerts { + *alert.StartAt = time.Now().Format(time.RFC3339) + *alert.StopAt = time.Now().Format(time.RFC3339) + } + + alertContent, err := json.Marshal(alerts) + if err != nil { + log.Fatal(err) + } + + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader(string(alertContent))) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + APIKey, err := CreateTestBouncer() + if err != nil { + log.Fatalf("%s", err.Error()) + } + + // Get Decision + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), `"end_ip":1534676915,"id":1,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","start_ip":1534676915,"type":"ban","value":"91.121.79.179"`) + assert.Contains(t, w.Body.String(), `"end_ip":1534676914,"id":2,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","start_ip":1534676914,"type":"ban","value":"91.121.79.178"`) + + // Get Decision : type filter + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/decisions?type=ban", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), `"end_ip":1534676915,"id":1,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","start_ip":1534676915,"type":"ban","value":"91.121.79.179"`) + assert.Contains(t, w.Body.String(), `"end_ip":1534676914,"id":2,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","start_ip":1534676914,"type":"ban","value":"91.121.79.178"`) + + // Get Decision : scope/value + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/decisions?scope=Ip&value=91.121.79.179", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), `"end_ip":1534676915,"id":1,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","start_ip":1534676915,"type":"ban","value":"91.121.79.179"`) + assert.NotContains(t, w.Body.String(), `"end_ip":1534676914,"id":2,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","start_ip":1534676914,"type":"ban","value":"91.121.79.178"`) + + // Get Decision : ip filter + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/decisions?ip=91.121.79.179", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), `"end_ip":1534676915,"id":1,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","start_ip":1534676915,"type":"ban","value":"91.121.79.179"`) + assert.NotContains(t, w.Body.String(), `"end_ip":1534676914,"id":2,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","start_ip":1534676914,"type":"ban","value":"91.121.79.178"`) + + // Get decision : by range + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/decisions?range=91.121.79.0/24", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), `"end_ip":1534676915,"id":1,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","start_ip":1534676915,"type":"ban","value":"91.121.79.179"`) + assert.Contains(t, w.Body.String(), `"end_ip":1534676914,"id":2,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","start_ip":1534676914,"type":"ban","value":"91.121.79.178"`) +} + +func TestGetDecision(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + // Create Valid Alert + alertContentBytes, err := ioutil.ReadFile("./tests/alert_sample.json") + if err != nil { + log.Fatal(err) + } + alerts := make([]*models.Alert, 0) + if err := json.Unmarshal(alertContentBytes, &alerts); err != nil { + log.Fatal(err) + } + + for _, alert := range alerts { + *alert.StartAt = time.Now().Format(time.RFC3339) + *alert.StopAt = time.Now().Format(time.RFC3339) + } + + alertContent, err := json.Marshal(alerts) + if err != nil { + log.Fatal(err) + } + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader(string(alertContent))) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + APIKey, err := CreateTestBouncer() + if err != nil { + log.Fatalf("%s", err.Error()) + } + + // Get Decision with invalid filter + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/decisions?test=test", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + router.ServeHTTP(w, req) + + assert.Equal(t, 500, w.Code) + assert.Equal(t, "{\"message\":\"'test' doesn't exist: invalid filter\"}", w.Body.String()) + + // Get Decision + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "\"end_ip\":2130706433,\"id\":1,\"origin\":\"test\",\"scenario\":\"crowdsecurity/test\",\"scope\":\"ip\",\"start_ip\":2130706433,\"type\":\"ban\",\"value\":\"127.0.0.1\"}]") + +} + +func TestDeleteDecisionByID(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + // Create Valid Alert + alertContentBytes, err := ioutil.ReadFile("./tests/alert_sample.json") + if err != nil { + log.Fatal(err) + } + alerts := make([]*models.Alert, 0) + if err := json.Unmarshal(alertContentBytes, &alerts); err != nil { + log.Fatal(err) + } + + for _, alert := range alerts { + *alert.StartAt = time.Now().Format(time.RFC3339) + *alert.StopAt = time.Now().Format(time.RFC3339) + } + + alertContent, err := json.Marshal(alerts) + if err != nil { + log.Fatal(err) + } + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader(string(alertContent))) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + // Delete alert with Invalid ID + w = httptest.NewRecorder() + req, _ = http.NewRequest("DELETE", "/v1/decisions/test", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + assert.Equal(t, 400, w.Code) + assert.Equal(t, "{\"message\":\"decision_id must be valid integer\"}", w.Body.String()) + + // Delete alert with ID that not exist + w = httptest.NewRecorder() + req, _ = http.NewRequest("DELETE", "/v1/decisions/100", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + assert.Equal(t, 500, w.Code) + assert.Equal(t, "{\"message\":\"decision with id '100' doesn't exist: unable to delete\"}", w.Body.String()) + + // Delete alert with valid ID + w = httptest.NewRecorder() + req, _ = http.NewRequest("DELETE", "/v1/decisions/1", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "{\"nbDeleted\":\"1\"}", w.Body.String()) + +} + +func TestDeleteDecision(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + // Create Valid Alert + alertContentBytes, err := ioutil.ReadFile("./tests/alert_sample.json") + if err != nil { + log.Fatal(err) + } + alerts := make([]*models.Alert, 0) + if err := json.Unmarshal(alertContentBytes, &alerts); err != nil { + log.Fatal(err) + } + + for _, alert := range alerts { + *alert.StartAt = time.Now().Format(time.RFC3339) + *alert.StopAt = time.Now().Format(time.RFC3339) + } + + alertContent, err := json.Marshal(alerts) + if err != nil { + log.Fatal(err) + } + + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader(string(alertContent))) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + // Delete alert with Invalid filter + w = httptest.NewRecorder() + req, _ = http.NewRequest("DELETE", "/v1/decisions?test=test", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + assert.Equal(t, 500, w.Code) + assert.Equal(t, "{\"message\":\"'test' doesn't exist: invalid filter\"}", w.Body.String()) + + // Delete alert + w = httptest.NewRecorder() + req, _ = http.NewRequest("DELETE", "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "{\"nbDeleted\":\"1\"}", w.Body.String()) + +} + +func TestStreamDecision(t *testing.T) { + router, loginResp, err := InitMachineTest() + if err != nil { + log.Fatalln(err.Error()) + } + + // Create Valid Alert + alertContentBytes, err := ioutil.ReadFile("./tests/alert_sample.json") + if err != nil { + log.Fatal(err) + } + alerts := make([]*models.Alert, 0) + if err := json.Unmarshal(alertContentBytes, &alerts); err != nil { + log.Fatal(err) + } + + for _, alert := range alerts { + *alert.StartAt = time.Now().Format(time.RFC3339) + *alert.StopAt = time.Now().Format(time.RFC3339) + } + + alertContent, err := json.Marshal(alerts) + if err != nil { + log.Fatal(err) + } + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/alerts", strings.NewReader(string(alertContent))) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", loginResp.Token)) + router.ServeHTTP(w, req) + + APIKey, err := CreateTestBouncer() + if err != nil { + log.Fatalf("%s", err.Error()) + } + + // Get Stream + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/decisions/stream", strings.NewReader("")) + req.Header.Add("X-Api-Key", APIKey) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "{\"deleted\":null,\"new\":null}", w.Body.String()) + + // Get Stream just startup + w = httptest.NewRecorder() + req, _ = http.NewRequest("GET", "/v1/decisions/stream?startup=true", strings.NewReader("")) + req.Header.Add("X-Api-Key", APIKey) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "\"end_ip\":2130706433,\"id\":1,\"origin\":\"test\",\"scenario\":\"crowdsecurity/test\",\"scope\":\"ip\",\"start_ip\":2130706433,\"type\":\"ban\",\"value\":\"127.0.0.1\"}]}") +} diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go new file mode 100644 index 000000000..f959da820 --- /dev/null +++ b/pkg/apiserver/jwt_test.go @@ -0,0 +1,95 @@ +package apiserver + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestLogin(t *testing.T) { + router, err := NewAPITest() + if err != nil { + log.Fatalf("unable to run local API: %s", err) + } + + body, err := CreateTestMachine(router) + if err != nil { + log.Fatalln(err.Error()) + } + + // Login with machine not validated yet + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/watchers/login", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 401, w.Code) + assert.Equal(t, "{\"code\":401,\"message\":\"machine test not validated\"}", w.Body.String()) + + // Login with machine not exist + w = httptest.NewRecorder() + req, _ = http.NewRequest("POST", "/v1/watchers/login", strings.NewReader("{\"machine_id\": \"test1\", \"password\": \"test1\"}")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 401, w.Code) + assert.Equal(t, "{\"code\":401,\"message\":\"ent: machine not found\"}", w.Body.String()) + + // Login with invalid body + w = httptest.NewRecorder() + req, _ = http.NewRequest("POST", "/v1/watchers/login", strings.NewReader("test")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 401, w.Code) + assert.Equal(t, "{\"code\":401,\"message\":\"missing : invalid character 'e' in literal true (expecting 'r')\"}", w.Body.String()) + + // Login with invalid format + w = httptest.NewRecorder() + req, _ = http.NewRequest("POST", "/v1/watchers/login", strings.NewReader("{\"machine_id\": \"test1\"}")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 401, w.Code) + assert.Equal(t, "{\"code\":401,\"message\":\"input format error\"}", w.Body.String()) + + //Validate machine + err = ValidateMachine("test") + if err != nil { + log.Fatalln(err.Error()) + } + + // Login with invalid password + w = httptest.NewRecorder() + req, _ = http.NewRequest("POST", "/v1/watchers/login", strings.NewReader("{\"machine_id\": \"test\", \"password\": \"test1\"}")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 401, w.Code) + assert.Equal(t, "{\"code\":401,\"message\":\"incorrect Username or Password\"}", w.Body.String()) + + // Login with valid machine + w = httptest.NewRecorder() + req, _ = http.NewRequest("POST", "/v1/watchers/login", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "\"token\"") + assert.Contains(t, w.Body.String(), "\"expire\"") + + // Login with valid machine + scenarios + w = httptest.NewRecorder() + req, _ = http.NewRequest("POST", "/v1/watchers/login", strings.NewReader("{\"machine_id\": \"test\", \"password\": \"test\", \"scenarios\": [\"crowdsecurity/test\", \"crowdsecurity/test2\"]}")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "\"token\"") + assert.Contains(t, w.Body.String(), "\"expire\"") + +} diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go new file mode 100644 index 000000000..1ae05f0c0 --- /dev/null +++ b/pkg/apiserver/machines_test.go @@ -0,0 +1,79 @@ +package apiserver + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestCreateMachine(t *testing.T) { + router, err := NewAPITest() + if err != nil { + log.Fatalf("unable to run local API: %s", err) + } + + // Create machine with invalid format + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/watchers", strings.NewReader("test")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 400, w.Code) + assert.Equal(t, "{\"message\":\"invalid character 'e' in literal true (expecting 'r')\"}", w.Body.String()) + + // Create machine with invalid input + w = httptest.NewRecorder() + req, _ = http.NewRequest("POST", "/v1/watchers", strings.NewReader("{\"test\": \"test\"}")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 500, w.Code) + assert.Equal(t, "{\"message\":\"validation failure list:\\nmachine_id in body is required\\npassword in body is required\"}", w.Body.String()) + + // Create machine + b, err := json.Marshal(MachineTest) + if err != nil { + log.Fatalf("unable to marshal MachineTest") + } + body := string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest("POST", "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "", w.Body.String()) + +} + +func TestCreateMachineAlreadyExist(t *testing.T) { + router, err := NewAPITest() + if err != nil { + log.Fatalf("unable to run local API: %s", err) + } + + body, err := CreateTestMachine(router) + if err != nil { + log.Fatalln(err.Error()) + } + + w := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + w = httptest.NewRecorder() + req, _ = http.NewRequest("POST", "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 403, w.Code) + assert.Equal(t, "{\"message\":\"user 'test': user already exist\"}", w.Body.String()) + +} diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go new file mode 100644 index 000000000..9cb975017 --- /dev/null +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -0,0 +1,114 @@ +package v1 + +import ( + "crypto/rand" + "crypto/sha512" + "encoding/hex" + "fmt" + "net/http" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" +) + +var ( + APIKeyHeader = "X-Api-Key" +) + +type APIKey struct { + HeaderName string + DbClient *database.Client +} + +func GenerateAPIKey(n int) (string, error) { + bytes := make([]byte, n) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +func NewAPIKey(dbClient *database.Client) *APIKey { + return &APIKey{ + HeaderName: APIKeyHeader, + DbClient: dbClient, + } +} + +func HashSHA512(str string) string { + hashedKey := sha512.New() + hashedKey.Write([]byte(str)) + + hashStr := fmt.Sprintf("%x", hashedKey.Sum(nil)) + + return hashStr +} + +func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { + return func(c *gin.Context) { + defer types.CatchPanic("crowdsec/middlewaresV1/api_key/MiddlewareFunc") + val, ok := c.Request.Header[APIKeyHeader] + if !ok { + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + + hashStr := HashSHA512(val[0]) + bouncer, err := a.DbClient.SelectBouncer(hashStr) + if err != nil { + log.Errorf("auth api key error: %s", err) + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + + if bouncer == nil { + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + + c.Set("BOUNCER_NAME", bouncer.Name) + + if bouncer.IPAddress == "" { + err = a.DbClient.UpdateBouncerIP(c.ClientIP(), bouncer.ID) + if err != nil { + log.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + } + + if bouncer.IPAddress != c.ClientIP() && bouncer.IPAddress != "" { + log.Warningf("new IP address detected for bouncer '%s': %s (old: %s)", bouncer.Name, c.ClientIP(), bouncer.IPAddress) + err = a.DbClient.UpdateBouncerIP(c.ClientIP(), bouncer.ID) + if err != nil { + log.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + } + + useragent := strings.Split(c.Request.UserAgent(), "/") + + if len(useragent) != 2 { + log.Warningf("bad user agent '%s' from '%s'", c.Request.UserAgent(), c.ClientIP()) + useragent = []string{c.Request.UserAgent(), "N/A"} + } + + if err := a.DbClient.UpdateBouncerTypeAndVersion(useragent[0], useragent[1], bouncer.ID); err != nil { + log.Errorf("failed to update bouncer version and type from '%s' (%s): %s", c.Request.UserAgent(), c.ClientIP(), err) + c.JSON(http.StatusForbidden, gin.H{"message": "bad user agent"}) + c.Abort() + return + } + + c.Next() + } +} diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go new file mode 100644 index 000000000..c344a0100 --- /dev/null +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -0,0 +1,195 @@ +package v1 + +import ( + "crypto/rand" + "fmt" + "os" + "strings" + "time" + + "errors" + + jwt "github.com/appleboy/gin-jwt/v2" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/gin-gonic/gin" + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" + "golang.org/x/crypto/bcrypt" +) + +var identityKey = "id" + +type JWT struct { + Middleware *jwt.GinJWTMiddleware + DbClient *database.Client +} + +func PayloadFunc(data interface{}) jwt.MapClaims { + if value, ok := data.(*models.WatcherAuthRequest); ok { + return jwt.MapClaims{ + identityKey: &value.MachineID, + } + } + return jwt.MapClaims{} +} + +func IdentityHandler(c *gin.Context) interface{} { + claims := jwt.ExtractClaims(c) + machineId := claims[identityKey].(string) + return &models.WatcherAuthRequest{ + MachineID: &machineId, + } +} + +func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { + defer types.CatchPanic("crowdsec/middlewaresV1/jwt/Authenticator") + var loginInput models.WatcherAuthRequest + var scenarios string + var err error + if err := c.ShouldBindJSON(&loginInput); err != nil { + return "", errors.New(fmt.Sprintf("missing : %v", err.Error())) + } + if err := loginInput.Validate(strfmt.Default); err != nil { + return "", errors.New("input format error") + } + machineID := *loginInput.MachineID + password := *loginInput.Password + scenariosInput := loginInput.Scenarios + + machine, err := j.DbClient.Ent.Machine.Query(). + Where(machine.MachineId(machineID)). + First(j.DbClient.CTX) + if err != nil { + log.Printf("Error machine login for %s : %+v ", machineID, err) + return nil, err + } + + if machine == nil { + log.Errorf("Nothing for '%s'", machineID) + return nil, jwt.ErrFailedAuthentication + } + + if !machine.IsValidated { + return nil, fmt.Errorf("machine %s not validated", machineID) + } + + if err = bcrypt.CompareHashAndPassword([]byte(machine.Password), []byte(password)); err != nil { + return nil, jwt.ErrFailedAuthentication + } + + if len(scenariosInput) > 0 { + for _, scenario := range scenariosInput { + if scenarios == "" { + scenarios = scenario + } else { + scenarios += "," + scenario + } + } + err = j.DbClient.UpdateMachineScenarios(scenarios, machine.ID) + if err != nil { + log.Errorf("Failed to update scenarios list for '%s': %s\n", machineID, err) + return nil, jwt.ErrFailedAuthentication + } + } + + if machine.IpAddress == "" { + err = j.DbClient.UpdateMachineIP(c.ClientIP(), machine.ID) + if err != nil { + log.Errorf("Failed to update ip address for '%s': %s\n", machineID, err) + return nil, jwt.ErrFailedAuthentication + } + } + + if machine.IpAddress != c.ClientIP() && machine.IpAddress != "" { + log.Warningf("new IP address detected for machine '%s': %s (old: %s)", machine.MachineId, c.ClientIP(), machine.IpAddress) + err = j.DbClient.UpdateMachineIP(c.ClientIP(), machine.ID) + if err != nil { + log.Errorf("Failed to update ip address for '%s': %s\n", machine.MachineId, err) + return nil, jwt.ErrFailedAuthentication + } + } + + useragent := strings.Split(c.Request.UserAgent(), "/") + if len(useragent) != 2 { + log.Warningf("bad user agent '%s' from '%s'", c.Request.UserAgent(), c.ClientIP()) + return nil, jwt.ErrFailedAuthentication + } + + if err := j.DbClient.UpdateMachineVersion(useragent[1], machine.ID); err != nil { + log.Errorf("unable to update machine '%s' version '%s': %s", machine.MachineId, useragent[1], err) + log.Errorf("bad user agent from : %s", c.ClientIP()) + return nil, jwt.ErrFailedAuthentication + } + + return &models.WatcherAuthRequest{ + MachineID: &machineID, + }, nil + +} + +func Authorizator(data interface{}, c *gin.Context) bool { + return true +} + +func Unauthorized(c *gin.Context, code int, message string) { + c.JSON(code, gin.H{ + "code": code, + "message": message, + }) +} + +func NewJWT(dbClient *database.Client) (*JWT, error) { + // Get secret from environment variable "SECRET" + var ( + secret []byte + ) + + secret_string := os.Getenv("CS_LAPI_SECRET") + + if secret_string == "" { + secret = make([]byte, 8) + if n, err := rand.Reader.Read(secret); err != nil { + log.Fatalf("Unable to generate a new random seed for JWT generation") + } else { + if n != 8 { + log.Errorf("Not enough entropy at random seed generation for JWT generation") + } + } + } else { + secret = []byte(secret_string) + } + + jwtMiddleware := &JWT{ + DbClient: dbClient, + } + + ret, err := jwt.New(&jwt.GinJWTMiddleware{ + Realm: "Crowdsec API local", + Key: secret, + Timeout: time.Hour, + MaxRefresh: time.Hour, + IdentityKey: identityKey, + PayloadFunc: PayloadFunc, + IdentityHandler: IdentityHandler, + Authenticator: jwtMiddleware.Authenticator, + Authorizator: Authorizator, + Unauthorized: Unauthorized, + TokenLookup: "header: Authorization, query: token, cookie: jwt", + TokenHeadName: "Bearer", + TimeFunc: time.Now, + }) + + errInit := ret.MiddlewareInit() + if errInit != nil { + return &JWT{}, fmt.Errorf("authMiddleware.MiddlewareInit() Error:" + errInit.Error()) + } + + if err != nil { + return &JWT{}, err + } + + return &JWT{Middleware: ret}, nil +} diff --git a/pkg/apiserver/middlewares/v1/middlewares.go b/pkg/apiserver/middlewares/v1/middlewares.go new file mode 100644 index 000000000..7777f5857 --- /dev/null +++ b/pkg/apiserver/middlewares/v1/middlewares.go @@ -0,0 +1,23 @@ +package v1 + +import "github.com/crowdsecurity/crowdsec/pkg/database" + +type Middlewares struct { + APIKey *APIKey + JWT *JWT +} + +func NewMiddlewares(dbClient *database.Client) (*Middlewares, error) { + var err error + + ret := &Middlewares{} + + ret.JWT, err = NewJWT(dbClient) + if err != nil { + return &Middlewares{}, err + } + + ret.APIKey = NewAPIKey(dbClient) + + return ret, nil +} diff --git a/pkg/apiserver/tests/alertWithInvalidMachineID_sample.json b/pkg/apiserver/tests/alertWithInvalidMachineID_sample.json new file mode 100644 index 000000000..ecc937480 --- /dev/null +++ b/pkg/apiserver/tests/alertWithInvalidMachineID_sample.json @@ -0,0 +1,61 @@ +[ + { + "id": 1, + "machine_id": "test1", + "capacity": 1, + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "id": 1, + "duration": "1h", + "start_ip": 2130706433, + "end_ip": 2130706433, + "origin": "test", + "scenario": "crowdsecurity/test", + "scope": "ip", + "value": "127.0.0.1", + "type": "ban" + } + ], + "Events": [ + { + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "timestamp": "2020-10-09T10:00:01Z" + } + ], + "events_count": 1, + "labels": [ + "test" + ], + "leakspeed": "0.5s", + "message": "test", + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "scenario": "crowdsecurity/test", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "source": { + "as_name": "test", + "as_number": "0123456", + "cn": "france", + "ip": "127.0.0.1", + "latitude": 46.227638, + "logitude": 2.213749, + "range": "127.0.0.1/32", + "scope": "ip", + "value": "127.0.0.1" + }, + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + } +] \ No newline at end of file diff --git a/pkg/apiserver/tests/alert_bulk.json b/pkg/apiserver/tests/alert_bulk.json new file mode 100644 index 000000000..1144417db --- /dev/null +++ b/pkg/apiserver/tests/alert_bulk.json @@ -0,0 +1,22 @@ +[ + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.195"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.195"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.195"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.195"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.195"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.195"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.195","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.195"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"IsoCode","value":"US"},{"key":"IsInEU","value":"false"},{"key":"ASNNumber","value":"0"},{"key":"target_user","value":"ruru"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"1.2.3.4"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"IsInEU","value":"false"},{"key":"ASNNumber","value":"0"},{"key":"target_user","value":"ruru"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"1.2.3.4"},{"key":"IsoCode","value":"US"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"ruru"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"1.2.3.4"},{"key":"IsoCode","value":"US"},{"key":"IsInEU","value":"false"},{"key":"ASNNumber","value":"0"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"ruru"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"1.2.3.4"},{"key":"IsoCode","value":"US"},{"key":"IsInEU","value":"false"},{"key":"ASNNumber","value":"0"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"1.2.3.4"},{"key":"IsoCode","value":"US"},{"key":"IsInEU","value":"false"},{"key":"ASNNumber","value":"0"},{"key":"target_user","value":"ruru"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"1.2.3.4"},{"key":"IsoCode","value":"US"},{"key":"IsInEU","value":"false"},{"key":"ASNNumber","value":"0"},{"key":"target_user","value":"ruru"},{"key":"service","value":"ssh"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 1.2.3.4 performed 'crowdsecurity/ssh-bf' (6 events over 41.41343ms) at 2020-10-26 12:54:48.786745305 +0100 CET m=+118.777986380","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"cn":"US","ip":"1.2.3.4","latitude":47.913,"longitude":-122.3042,"scope":"Ip","value":"1.2.3.4"},"start_at":"2020-10-26T12:54:48.745331839+01:00","stop_at":"2020-10-26T12:54:48.786744746+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"target_user","value":"rura"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"1.2.3.6"},{"key":"IsoCode","value":"US"},{"key":"IsInEU","value":"false"},{"key":"ASNNumber","value":"0"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"1.2.3.6"},{"key":"IsoCode","value":"US"},{"key":"IsInEU","value":"false"},{"key":"ASNNumber","value":"0"},{"key":"target_user","value":"rura"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"1.2.3.6"},{"key":"IsoCode","value":"US"},{"key":"IsInEU","value":"false"},{"key":"ASNNumber","value":"0"},{"key":"target_user","value":"rura"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"1.2.3.6"},{"key":"IsoCode","value":"US"},{"key":"IsInEU","value":"false"},{"key":"ASNNumber","value":"0"},{"key":"target_user","value":"rura"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"rura"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"1.2.3.6"},{"key":"IsoCode","value":"US"},{"key":"IsInEU","value":"false"},{"key":"ASNNumber","value":"0"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"rura"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"1.2.3.6"},{"key":"IsoCode","value":"US"},{"key":"IsInEU","value":"false"},{"key":"ASNNumber","value":"0"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 1.2.3.6 performed 'crowdsecurity/ssh-bf' (6 events over 33.162937ms) at 2020-10-26 12:55:33.554883657 +0100 CET m=+163.546124740","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"cn":"US","ip":"1.2.3.6","latitude":47.913,"longitude":-122.3042,"scope":"Ip","value":"1.2.3.6"},"start_at":"2020-10-26T12:55:33.521720645+01:00","stop_at":"2020-10-26T12:55:33.554882819+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.194"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.194"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.194"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.194"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.194"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.194"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.194 performed 'crowdsecurity/ssh-bf' (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.194","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.194"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.193"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.193"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.193"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.193"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.193"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.193"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.193 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.193","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.193"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.192"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.192"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.192"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.192"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.192"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.192"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.192 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.192","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.192"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.191"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.191"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.191"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.191"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.191"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.191"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.191 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.191","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.191"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.190"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.190"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.190"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.190"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.190"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.190"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.190 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.190","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.190"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.189"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.189"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.189"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.189"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.189"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.189"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.189 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.189","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.189"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.188"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.188"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.188"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.188"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.188"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.188"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.188 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.188","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.188"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.187"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.187"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.187"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.187"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.187"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.187"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.187 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.187","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.187"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.186"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.186"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.186"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.186"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.186"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.186"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.186 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.186","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.186"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.185"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.185"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.185"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.185"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.185"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.185"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.185 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.185","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.185"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.184"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.184"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.184"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.184"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.184"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.184"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.184 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.184","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.184"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.183"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.183"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.183"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.183"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.183"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.183"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.183 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.183","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.183"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.182"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.182"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.182"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.182"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.182"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.182"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.182 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.182","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.182"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.181"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.181"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.181"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.181"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.181"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.181"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.181 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.181","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.181"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.180"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.180"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.180"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.180"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.180"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.180"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.180 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.180","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.180"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.179"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.179","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.179"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.178"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.178","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.178"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"} +] diff --git a/pkg/apiserver/tests/alert_minibulk+simul.json b/pkg/apiserver/tests/alert_minibulk+simul.json new file mode 100644 index 000000000..55f4a9471 --- /dev/null +++ b/pkg/apiserver/tests/alert_minibulk+simul.json @@ -0,0 +1,4 @@ +[ + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.179"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":true,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.179","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.179"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.178"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.178","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.178"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"} +] diff --git a/pkg/apiserver/tests/alert_minibulk.json b/pkg/apiserver/tests/alert_minibulk.json new file mode 100644 index 000000000..5d609f4fd --- /dev/null +++ b/pkg/apiserver/tests/alert_minibulk.json @@ -0,0 +1,4 @@ +[ + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.179"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.179"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.179","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.179"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"}, + {"capacity":5,"decisions":null,"events":[{"meta":[{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"service","value":"ssh"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"target_user","value":"root"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"service","value":"ssh"},{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.178"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"}],"timestamp":"2020-10-02T17:09:08Z"},{"meta":[{"key":"log_type","value":"ssh_failed-auth"},{"key":"source_ip","value":"91.121.79.178"},{"key":"ASNNumber","value":"16276"},{"key":"ASNOrg","value":"OVH SAS"},{"key":"SourceRange","value":"91.121.72.0/21"},{"key":"target_user","value":"root"},{"key":"service","value":"ssh"},{"key":"IsoCode","value":"FR"},{"key":"IsInEU","value":"true"}],"timestamp":"2020-10-02T17:09:08Z"}],"events_count":6,"labels":null,"leakspeed":"10s","message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202","remediation":true,"scenario":"crowdsecurity/ssh-bf","scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f","scenario_version":"0.1","simulated":false,"source":{"as_name":"OVH SAS","cn":"FR","ip":"91.121.79.178","latitude":50.646,"longitude":3.0758,"range":"91.121.72.0/21","scope":"Ip","value":"91.121.79.178"},"start_at":"2020-10-26T12:52:58.153861334+01:00","stop_at":"2020-10-26T12:52:58.200236582+01:00"} +] diff --git a/pkg/apiserver/tests/alert_sample.json b/pkg/apiserver/tests/alert_sample.json new file mode 100644 index 000000000..d76396197 --- /dev/null +++ b/pkg/apiserver/tests/alert_sample.json @@ -0,0 +1,61 @@ +[ + { + "id": 42, + "machine_id": "test", + "capacity": 1, + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "id": 1, + "duration": "1h", + "start_ip": 2130706433, + "end_ip": 2130706433, + "origin": "test", + "scenario": "crowdsecurity/test", + "scope": "ip", + "value": "127.0.0.1", + "type": "ban" + } + ], + "Events": [ + { + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "timestamp": "2020-10-09T10:00:01Z" + } + ], + "events_count": 1, + "labels": [ + "test" + ], + "leakspeed": "0.5s", + "message": "test", + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "scenario": "crowdsecurity/test", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "source": { + "as_name": "test", + "as_number": "0123456", + "cn": "france", + "ip": "127.0.0.1", + "latitude": 46.227638, + "logitude": 2.213749, + "range": "127.0.0.1/32", + "scope": "ip", + "value": "127.0.0.1" + }, + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + } +] diff --git a/pkg/apiserver/tests/alert_ssh-bf.json b/pkg/apiserver/tests/alert_ssh-bf.json new file mode 100644 index 000000000..b78be56eb --- /dev/null +++ b/pkg/apiserver/tests/alert_ssh-bf.json @@ -0,0 +1,275 @@ +[ + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over 30.18165ms) at 2020-10-26 09:50:32.055535505 +0100 CET m=+6.235529150", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.195", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.195" + }, + "start_at": "2020-10-26T09:50:32.025353849+01:00", + "stop_at": "2020-10-26T09:50:32.055534398+01:00" + } +] diff --git a/pkg/apiserver/tests/invalidAlert_sample.json b/pkg/apiserver/tests/invalidAlert_sample.json new file mode 100644 index 000000000..84c26a81e --- /dev/null +++ b/pkg/apiserver/tests/invalidAlert_sample.json @@ -0,0 +1,45 @@ +[ + { + "id": 1, + "machine_id": "test", + "capacity": 1, + "decisions": [ + { + "id": 1, + "duration": "1h", + "start_ip": 2130706433, + "end_ip": 2130706433, + "origin": "test", + "scenario": "crowdsecurity/test", + "scope": "ip", + "value": "127.0.0.1", + "type": "ban" + } + ], + "Events": [ + { + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "timestamp": "2020-10-09T10:00:01Z" + } + ], + "events_count": 1, + "labels": [ + "test" + ], + "leakspeed": "0.5s", + "message": "test", + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + } +] \ No newline at end of file diff --git a/pkg/apiserver/tests/profiles.yaml b/pkg/apiserver/tests/profiles.yaml new file mode 100644 index 000000000..5727f4edd --- /dev/null +++ b/pkg/apiserver/tests/profiles.yaml @@ -0,0 +1,31 @@ + +name: enforce_mfa +#debug: true +filters: + - 'Alert.Remediation == true && Alert.GetScenario() == "crowdsecurity/ssh-enforce-mfa" && Alert.GetScope() == "username"' +decisions: #remediation vs decision + - type: enforce_mfa + scope: "username" + duration: 1h +on_success: continue +--- +name: default_ip_remediation +#debug: true +filters: +# try types.Ip here :) + - Alert.Remediation == true && Alert.GetScope() == "Ip" +decisions: + - type: ban + duration: 1h +on_success: break +--- +#this one won't be reached ^^ +name: default_ip_remediation_2 +#debug: true +filters: +# try types.Ip here :) + - Alert.Remediation == true && Alert.GetScope() == "Ip" +decisions: + - type: ratatatata + duration: 1h +on_success: break diff --git a/pkg/apiserver/utils_test.go b/pkg/apiserver/utils_test.go new file mode 100644 index 000000000..f5bd08223 --- /dev/null +++ b/pkg/apiserver/utils_test.go @@ -0,0 +1,54 @@ +package apiserver + +import ( + "net" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers" + log "github.com/sirupsen/logrus" + + "github.com/stretchr/testify/assert" +) + +func TestIP2Int(t *testing.T) { + ipInt := controllers.IP2Int(net.ParseIP("127.0.0.1")) + assert.Equal(t, uint32(2130706433), ipInt) + + ipInt = controllers.IP2Int([]byte{127, 0, 0, 1}) + assert.Equal(t, uint32(2130706433), ipInt) +} + +func TestInt2IP(t *testing.T) { + IP := controllers.Int2ip(uint32(2130706433)) + assert.Equal(t, "127.0.0.1", IP.String()) +} + +func TestIsIPv4(t *testing.T) { + IsIpv4 := controllers.IsIpv4("127.0.0.1") + assert.Equal(t, true, IsIpv4) + + IsIpv4 = controllers.IsIpv4("127.0.0") + assert.Equal(t, false, IsIpv4) +} + +func TestLastAddress(t *testing.T) { + _, ipv4Net, err := net.ParseCIDR("192.168.0.1/24") + if err != nil { + log.Fatal(err) + } + + lastAddress := controllers.LastAddress(ipv4Net) + assert.Equal(t, "192.168.0.255", lastAddress.String()) +} + +func TestGetIpsFromIpRange(t *testing.T) { + IPStart, IPEnd, err := controllers.GetIpsFromIpRange("192.168.0.1/65") + assert.Equal(t, "'192.168.0.1/65' is not a valid CIDR", err.Error()) + assert.Equal(t, int64(0), IPStart) + assert.Equal(t, int64(0), IPEnd) + + IPStart, IPEnd, err = controllers.GetIpsFromIpRange("192.168.0.1/24") + assert.Equal(t, nil, err) + assert.Equal(t, int64(3232235520), IPStart) + assert.Equal(t, int64(3232235775), IPEnd) +} diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go new file mode 100644 index 000000000..40180090e --- /dev/null +++ b/pkg/csconfig/api.go @@ -0,0 +1,44 @@ +package csconfig + +import log "github.com/sirupsen/logrus" + +type APICfg struct { + Client *LocalApiClientCfg `yaml:"client"` + Server *LocalApiServerCfg `yaml:"server"` +} + +type ApiCredentialsCfg struct { + URL string `yaml:"url,omitempty" json:"url,omitempty"` + Login string `yaml:"login,omitempty" json:"login,omitempty"` + Password string `yaml:"password,omitempty" json:"-"` +} + +/*global api config (for lapi->oapi)*/ +type OnlineApiClientCfg struct { + CredentialsFilePath string `yaml:"credentials_path,omitempty"` //credz will be edited by software, store in diff file + Credentials *ApiCredentialsCfg `yaml:"-"` +} + +/*local api config (for crowdsec/cscli->lapi)*/ +type LocalApiClientCfg struct { + CredentialsFilePath string `yaml:"credentials_path,omitempty"` //credz will be edited by software, store in diff file + Credentials *ApiCredentialsCfg `yaml:"-"` + InsecureSkipVerify *bool `yaml:"insecure_skip_verify"` // check if api certificate is bad or not +} + +/*local api service configuration*/ +type LocalApiServerCfg struct { + ListenURI string `yaml:"listen_uri,omitempty"` //127.0.0.1:8080 + TLS *TLSCfg `yaml:"tls"` + DbConfig *DatabaseCfg `yaml:"-"` + LogDir string `yaml:"-"` + OnlineClient *OnlineApiClientCfg `yaml:"online_client"` + ProfilesPath string `yaml:"profiles_path,omitempty"` + Profiles []*ProfileCfg `yaml:"-"` + LogLevel *log.Level `yaml:"log_level"` +} + +type TLSCfg struct { + CertFilePath string `yaml:"cert_file"` + KeyFilePath string `yaml:"key_file"` +} diff --git a/pkg/csconfig/common.go b/pkg/csconfig/common.go new file mode 100644 index 000000000..abe7b202f --- /dev/null +++ b/pkg/csconfig/common.go @@ -0,0 +1,13 @@ +package csconfig + +import log "github.com/sirupsen/logrus" + +/*daemonization/service related stuff*/ +type CommonCfg struct { + Daemonize bool + PidDir string `yaml:"pid_dir"` + LogMedia string `yaml:"log_media"` + LogDir string `yaml:"log_dir,omitempty"` //if LogMedia = file + LogLevel *log.Level `yaml:"log_level"` + WorkingDir string `yaml:"working_dir,omitempty"` ///var/run +} diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index b8c4230ae..715e1632f 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -1,180 +1,300 @@ package csconfig import ( - "flag" "fmt" "io/ioutil" "path/filepath" + "strings" - "os" - - "github.com/crowdsecurity/crowdsec/pkg/cwversion" - "github.com/crowdsecurity/crowdsec/pkg/outputs" - + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" ) -type SimulationConfig struct { - Simulation bool `yaml:"simulation"` - Exclusions []string `yaml:"exclusions,omitempty"` +/*top-level config : defaults,overriden by cfg file,overriden by cli*/ +type GlobalConfig struct { + //just a path to ourself :p + Self *string `yaml:"-"` + Common *CommonCfg `yaml:"common,omitempty"` + Prometheus *PrometheusCfg `yaml:"prometheus,omitempty"` + Crowdsec *CrowdsecServiceCfg `yaml:"crowdsec_service,omitempty"` + Cscli *CscliCfg `yaml:"cscli,omitempty"` + DbConfig *DatabaseCfg `yaml:"db_config,omitempty"` + API *APICfg `yaml:"api,omitempty"` + ConfigPaths *ConfigurationPaths `yaml:"config_paths,omitempty"` } -// CrowdSec is the structure of the crowdsec configuration -type CrowdSec struct { - WorkingFolder string `yaml:"working_dir,omitempty"` - DataFolder string `yaml:"data_dir,omitempty"` - ConfigFolder string `yaml:"config_dir,omitempty"` - AcquisitionFile string `yaml:"acquis_path,omitempty"` - SingleFile string //for forensic mode - SingleFileLabel string //for forensic mode - PIDFolder string `yaml:"pid_dir,omitempty"` - LogFolder string `yaml:"log_dir,omitempty"` - LogMode string `yaml:"log_mode,omitempty"` //like file, syslog or stdout ? - LogLevel log.Level `yaml:"log_level,omitempty"` //trace,debug,info,warning,error - Daemonize bool `yaml:"daemon,omitempty"` //true -> go background - Profiling bool `yaml:"profiling,omitempty"` //true -> enable runtime profiling - APIMode bool `yaml:"apimode,omitempty"` //true -> enable api push - CsCliFolder string `yaml:"cscli_dir"` //cscli folder - NbParsers int `yaml:"parser_routines"` //the number of go routines to start for parsing - SimulationCfgPath string `yaml:"simulation_path,omitempty"` - SimulationCfg *SimulationConfig - Linter bool - Prometheus bool - PrometheusMode string `yaml:"prometheus_mode"` - HTTPListen string `yaml:"http_listen,omitempty"` - RestoreMode string - DumpBuckets bool - OutputConfig *outputs.OutputFactory `yaml:"plugin"` -} - -// NewCrowdSecConfig create a new crowdsec configuration with default configuration -func NewCrowdSecConfig() *CrowdSec { - return &CrowdSec{ - LogLevel: log.InfoLevel, - Daemonize: false, - Profiling: false, - WorkingFolder: "/tmp/", - DataFolder: "/var/lib/crowdsec/data/", - ConfigFolder: "/etc/crowdsec/config/", - PIDFolder: "/var/run/", - LogFolder: "/var/log/", - LogMode: "stdout", - APIMode: false, - NbParsers: 1, - Prometheus: false, - HTTPListen: "127.0.0.1:6060", - } -} - -func (c *CrowdSec) LoadSimulation() error { - if c.SimulationCfgPath != "" { - rcfg, err := ioutil.ReadFile(c.SimulationCfgPath) - if err != nil { - return fmt.Errorf("while reading '%s' : %s", c.SimulationCfgPath, err) - } - simCfg := SimulationConfig{} - if err := yaml.UnmarshalStrict(rcfg, &simCfg); err != nil { - return fmt.Errorf("while parsing '%s' : %s", c.SimulationCfgPath, err) - } - c.SimulationCfg = &simCfg +func (c *GlobalConfig) Dump() error { + out, err := yaml.Marshal(c) + if err != nil { + return errors.Wrap(err, "failed marshaling config") } + fmt.Printf("%s", string(out)) return nil } -func (c *CrowdSec) LoadConfigurationFile(configFile *string) error { - /*overriden by cfg file*/ +func (c *GlobalConfig) LoadConfigurationFile(path string) error { - if *configFile != "" { - rcfg, err := ioutil.ReadFile(*configFile) - if err != nil { - return fmt.Errorf("read '%s' : %s", *configFile, err) - } - if err := yaml.UnmarshalStrict(rcfg, c); err != nil { - return fmt.Errorf("parse '%s' : %s", *configFile, err) - } - if c.AcquisitionFile == "" { - c.AcquisitionFile = filepath.Clean(c.ConfigFolder + "/acquis.yaml") - } + fcontent, err := ioutil.ReadFile(path) + if err != nil { + return errors.Wrap(err, "failed to read config file") + } + err = yaml.UnmarshalStrict(fcontent, c) + if err != nil { + return errors.Wrap(err, "failed unmarshaling config") + } + path, err = filepath.Abs(path) + if err != nil { + return errors.Wrap(err, "failed to load absolute path") + } + c.Self = &path + if err := c.LoadConfiguration(); err != nil { + return errors.Wrap(err, "failed to load sub configurations") + } + + return nil +} + +func (c *GlobalConfig) LoadConfiguration() error { + if c.ConfigPaths.ConfigDir == "" { + return fmt.Errorf("please provide a configuration directory with the 'config_dir' directive in the 'config_paths' section") + } + + if c.ConfigPaths.DataDir == "" { + return fmt.Errorf("please provide a data directory with the 'data_dir' directive in the 'config_paths' section") + } + + if c.ConfigPaths.HubDir == "" { + c.ConfigPaths.HubDir = filepath.Clean(c.ConfigPaths.ConfigDir + "/hub") + } + + if c.ConfigPaths.HubIndexFile == "" { + c.ConfigPaths.HubIndexFile = filepath.Clean(c.ConfigPaths.HubDir + "/.index.json") } if err := c.LoadSimulation(); err != nil { - return fmt.Errorf("loading simulation config : %s", err) - } - return nil -} - -// LoadConfig return configuration parsed from command line and configuration file -func (c *CrowdSec) LoadConfig() error { - AcquisitionFile := flag.String("acquis", "", "path to acquis.yaml") - configFile := flag.String("c", "/etc/crowdsec/config/default.yaml", "configuration file") - printTrace := flag.Bool("trace", false, "VERY verbose") - printDebug := flag.Bool("debug", false, "print debug-level on stdout") - printInfo := flag.Bool("info", false, "print info-level on stdout") - printVersion := flag.Bool("version", false, "display version") - APIMode := flag.Bool("api", false, "perform pushes to api") - profileMode := flag.Bool("profile", false, "Enable performance profiling") - catFile := flag.String("file", "", "Process a single file in time-machine") - catFileType := flag.String("type", "", "Labels.type for file in time-machine") - daemonMode := flag.Bool("daemon", false, "Daemonize, go background, drop PID file, log to file") - testMode := flag.Bool("t", false, "only test configs") - prometheus := flag.Bool("prometheus-metrics", false, "expose http prometheus collector (see http_listen)") - restoreMode := flag.String("restore-state", "", "[dev] restore buckets state from json file") - dumpMode := flag.Bool("dump-state", false, "[dev] Dump bucket state at the end of run.") - - flag.Parse() - - if *printVersion { - cwversion.Show() - os.Exit(0) + return err } - if *catFile != "" { - if *catFileType == "" { - return fmt.Errorf("-file requires -type") + if c.Crowdsec != nil { + if c.Crowdsec.AcquisitionFilePath == "" { + c.Crowdsec.AcquisitionFilePath = filepath.Clean(c.ConfigPaths.ConfigDir + "/acquis.yaml") + } + c.Crowdsec.ConfigDir = c.ConfigPaths.ConfigDir + c.Crowdsec.DataDir = c.ConfigPaths.DataDir + c.Crowdsec.HubDir = c.ConfigPaths.HubDir + c.Crowdsec.HubIndexFile = c.ConfigPaths.HubIndexFile + if c.Crowdsec.ParserRoutinesCount <= 0 { + c.Crowdsec.ParserRoutinesCount = 1 + } + + if c.Crowdsec.BucketsRoutinesCount <= 0 { + c.Crowdsec.BucketsRoutinesCount = 1 + } + + if c.Crowdsec.OutputRoutinesCount <= 0 { + c.Crowdsec.OutputRoutinesCount = 1 } - c.SingleFile = *catFile - c.SingleFileLabel = *catFileType } - if err := c.LoadConfigurationFile(configFile); err != nil { - return fmt.Errorf("Error while loading configuration : %s", err) + if err := c.CleanupPaths(); err != nil { + return errors.Wrap(err, "invalid config") } - if *AcquisitionFile != "" { - c.AcquisitionFile = *AcquisitionFile - } - if *dumpMode { - c.DumpBuckets = true - } - if *prometheus { - c.Prometheus = true - } - if *testMode { - c.Linter = true - } - /*overriden by cmdline*/ - if *daemonMode { - c.Daemonize = true - } - if *profileMode { - c.Profiling = true - } - if *printDebug { - c.LogLevel = log.DebugLevel - } - if *printInfo { - c.LogLevel = log.InfoLevel - } - if *printTrace { - c.LogLevel = log.TraceLevel - } - if *APIMode { - c.APIMode = true + if c.Cscli != nil { + c.Cscli.DbConfig = c.DbConfig + c.Cscli.ConfigDir = c.ConfigPaths.ConfigDir + c.Cscli.DataDir = c.ConfigPaths.DataDir + c.Cscli.HubDir = c.ConfigPaths.HubDir + c.Cscli.HubIndexFile = c.ConfigPaths.HubIndexFile } - if *restoreMode != "" { - c.RestoreMode = *restoreMode + if c.API.Client != nil && c.API.Client.CredentialsFilePath != "" { + fcontent, err := ioutil.ReadFile(c.API.Client.CredentialsFilePath) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to read api client credential configuration file '%s'", c.API.Client.CredentialsFilePath)) + } + err = yaml.UnmarshalStrict(fcontent, &c.API.Client.Credentials) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed unmarshaling api client credential configuration file '%s'", c.API.Client.CredentialsFilePath)) + } + if c.API.Client.Credentials != nil && c.API.Client.Credentials.URL != "" { + if !strings.HasSuffix(c.API.Client.Credentials.URL, "/") { + c.API.Client.Credentials.URL = c.API.Client.Credentials.URL + "/" + } + } + if c.API.Client.InsecureSkipVerify == nil { + apiclient.InsecureSkipVerify = true + } else { + apiclient.InsecureSkipVerify = *c.API.Client.InsecureSkipVerify + } + } + + if c.API.Server != nil { + c.API.Server.DbConfig = c.DbConfig + c.API.Server.LogDir = c.Common.LogDir + if err := c.API.Server.LoadProfiles(); err != nil { + return errors.Wrap(err, "while loading profiles for LAPI") + } + if c.API.Server.OnlineClient != nil && c.API.Server.OnlineClient.CredentialsFilePath != "" { + c.API.Server.OnlineClient.Credentials = new(ApiCredentialsCfg) + fcontent, err := ioutil.ReadFile(c.API.Server.OnlineClient.CredentialsFilePath) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to read api server credentials configuration file '%s'", c.API.Server.OnlineClient.CredentialsFilePath)) + } + err = yaml.UnmarshalStrict(fcontent, c.API.Server.OnlineClient.Credentials) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed unmarshaling api server credentials configuration file '%s'", c.API.Server.OnlineClient.CredentialsFilePath)) + } + if c.API.Server.OnlineClient.Credentials == nil { + log.Debugf("online credentials not found in '%s', will not use crowdsec api", c.API.Server.OnlineClient.CredentialsFilePath) + } + } + } + + return nil +} + +func (c *GlobalConfig) LoadSimulation() error { + if c.ConfigPaths == nil { + return fmt.Errorf("ConfigPaths is empty") + } + + simCfg := SimulationConfig{} + + if c.ConfigPaths.SimulationFilePath == "" { + c.ConfigPaths.SimulationFilePath = filepath.Clean(c.ConfigPaths.ConfigDir + "/simulation.yaml") + } + + rcfg, err := ioutil.ReadFile(c.ConfigPaths.SimulationFilePath) + if err != nil { + return errors.Wrapf(err, "while reading '%s'", c.ConfigPaths.SimulationFilePath) + } else { + if err := yaml.UnmarshalStrict(rcfg, &simCfg); err != nil { + return fmt.Errorf("while unmarshaling simulation file '%s' : %s", c.ConfigPaths.SimulationFilePath, err) + } + } + if simCfg.Simulation == nil { + simCfg.Simulation = new(bool) + } + if c.Crowdsec != nil { + c.Crowdsec.SimulationConfig = &simCfg + } + if c.Cscli != nil { + c.Cscli.SimulationConfig = &simCfg + } + return nil +} + +func NewConfig() *GlobalConfig { + cfg := GlobalConfig{} + return &cfg +} + +func NewDefaultConfig() *GlobalConfig { + logLevel := log.InfoLevel + CommonCfg := CommonCfg{ + Daemonize: false, + PidDir: "/tmp/", + LogMedia: "stdout", + //LogDir unneeded + LogLevel: &logLevel, + WorkingDir: ".", + } + prometheus := PrometheusCfg{ + Enabled: true, + Level: "full", + } + configPaths := ConfigurationPaths{ + ConfigDir: "/etc/crowdsec/", + DataDir: "/var/lib/crowdsec/data/", + SimulationFilePath: "/etc/crowdsec/config/simulation.yaml", + HubDir: "/etc/crowdsec/hub", + HubIndexFile: "/etc/crowdsec/hub/.index.json", + } + crowdsecCfg := CrowdsecServiceCfg{ + AcquisitionFilePath: "/etc/crowdsec/config/acquis.yaml", + ParserRoutinesCount: 1, + } + + cscliCfg := CscliCfg{ + Output: "human", + } + + apiCfg := APICfg{ + Client: &LocalApiClientCfg{ + CredentialsFilePath: "/etc/crowdsec/config/lapi-secrets.yaml", + }, + Server: &LocalApiServerCfg{ + ListenURI: "127.0.0.1:8080", + OnlineClient: &OnlineApiClientCfg{ + CredentialsFilePath: "/etc/crowdsec/config/online-api-secrets.yaml", + }, + }, + } + + dbConfig := DatabaseCfg{ + Type: "sqlite", + DbPath: "/var/lib/crowdsec/data/crowdsec.db", + } + + globalCfg := GlobalConfig{ + Common: &CommonCfg, + Prometheus: &prometheus, + Crowdsec: &crowdsecCfg, + Cscli: &cscliCfg, + API: &apiCfg, + ConfigPaths: &configPaths, + DbConfig: &dbConfig, + } + + return &globalCfg +} + +func (c *GlobalConfig) CleanupPaths() error { + var err error + + if c.Common != nil { + var CommonCleanup = []*string{ + &c.Common.PidDir, + &c.Common.LogDir, + &c.Common.WorkingDir, + } + for _, k := range CommonCleanup { + *k, err = filepath.Abs(*k) + if err != nil { + return errors.Wrap(err, "failed to clean path") + } + } + } + + if c.Crowdsec != nil { + var crowdsecCleanup = []*string{ + &c.Crowdsec.AcquisitionFilePath, + } + for _, k := range crowdsecCleanup { + *k, err = filepath.Abs(*k) + if err != nil { + return errors.Wrap(err, "failed to clean path") + } + } + } + + if c.ConfigPaths != nil { + var configPathsCleanup = []*string{ + &c.ConfigPaths.HubDir, + &c.ConfigPaths.HubIndexFile, + &c.ConfigPaths.ConfigDir, + &c.ConfigPaths.DataDir, + &c.ConfigPaths.SimulationFilePath, + } + for _, k := range configPathsCleanup { + *k, err = filepath.Abs(*k) + if err != nil { + return errors.Wrap(err, "failed to clean path") + } + } } return nil diff --git a/pkg/csconfig/config_paths.go b/pkg/csconfig/config_paths.go new file mode 100644 index 000000000..c6c4a6faf --- /dev/null +++ b/pkg/csconfig/config_paths.go @@ -0,0 +1,9 @@ +package csconfig + +type ConfigurationPaths struct { + ConfigDir string `yaml:"config_dir"` + DataDir string `yaml:"data_dir,omitempty"` + SimulationFilePath string `yaml:"simulation_path,omitempty"` + HubIndexFile string `yaml:"index_path,omitempty"` //path of the .index.json + HubDir string `yaml:"hub_dir,omitempty"` +} diff --git a/pkg/csconfig/config_test.go b/pkg/csconfig/config_test.go index 130afb1e0..c12e52d6a 100644 --- a/pkg/csconfig/config_test.go +++ b/pkg/csconfig/config_test.go @@ -1,43 +1,199 @@ package csconfig import ( - "flag" - "os" + "fmt" + "strings" "testing" - "github.com/crowdsecurity/crowdsec/pkg/outputs" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) +func TestDefaultConfig(t *testing.T) { + x := NewDefaultConfig() + x.Dump() +} + +func TestNormalLoad(t *testing.T) { + + x := NewConfig() + err := x.LoadConfigurationFile("./tests/config.yaml") + if err != nil { + t.Fatalf("unexpected error %s", err) + } + + x = NewConfig() + err = x.LoadConfigurationFile("./tests/xxx.yaml") + if fmt.Sprintf("%s", err) != "failed to read config file: open ./tests/xxx.yaml: no such file or directory" { + t.Fatalf("unexpected error %s", err) + } + + x = NewConfig() + err = x.LoadConfigurationFile("./tests/simulation.yaml") + if !strings.HasPrefix(fmt.Sprintf("%s", err), "failed unmarshaling config: yaml: unmarshal error") { + t.Fatalf("unexpected error %s", err) + } + +} + +func TestCleanupPaths(t *testing.T) { + tests := []struct { + name string + Input *GlobalConfig + expectedResult *GlobalConfig + err string + }{ + { + name: "daemon cleanup", + Input: &GlobalConfig{ + Common: &CommonCfg{ + PidDir: "////tmp//", + LogDir: "/////tmp///", + WorkingDir: "/////tmp///", + }, + }, + expectedResult: &GlobalConfig{ + Common: &CommonCfg{ + PidDir: "/tmp", + LogDir: "/tmp", + WorkingDir: "/tmp", + }, + }, + }, + // + { + name: "crowdsec cleanup", + Input: &GlobalConfig{ + Crowdsec: &CrowdsecServiceCfg{ + AcquisitionFilePath: "////tmp//x.yaml", + }, + }, + expectedResult: &GlobalConfig{ + Crowdsec: &CrowdsecServiceCfg{ + AcquisitionFilePath: "/tmp/x.yaml", + }, + }, + }, + // + { + name: "config paths cleanup", + Input: &GlobalConfig{ + ConfigPaths: &ConfigurationPaths{ + HubDir: "////tmp//", + HubIndexFile: "////tmp//x.yaml", + ConfigDir: "////tmp//", + DataDir: "////tmp//", + SimulationFilePath: "//tmp///toto.yaml", + }, + }, + expectedResult: &GlobalConfig{ + ConfigPaths: &ConfigurationPaths{ + HubDir: "/tmp", + HubIndexFile: "/tmp/x.yaml", + ConfigDir: "/tmp", + DataDir: "/tmp", + SimulationFilePath: "/tmp/toto.yaml", + }, + }, + }, + } + for idx, test := range tests { + err := test.Input.CleanupPaths() + if test.err != "" { + if strings.HasPrefix(fmt.Sprintf("%s", err), test.err) { + t.Fatalf("%d/%d expected err %s got %s", idx, len(tests), test.err, fmt.Sprintf("%s", err)) + } + } + isOk := assert.Equal(t, test.expectedResult, test.Input) + if !isOk { + t.Fatalf("%d/%d failed test", idx, len(tests)) + } + } +} + +func TestSimulationLoading(t *testing.T) { + tests := []struct { + name string + Input *GlobalConfig + expectedResult *SimulationConfig + err string + }{ + { + name: "basic valid simulation", + Input: &GlobalConfig{ + ConfigPaths: &ConfigurationPaths{ + SimulationFilePath: "./tests/simulation.yaml", + }, + Crowdsec: &CrowdsecServiceCfg{}, + }, + expectedResult: &SimulationConfig{Simulation: new(bool)}, + }, + { + name: "basic bad file name", + Input: &GlobalConfig{ + ConfigPaths: &ConfigurationPaths{ + SimulationFilePath: "./tests/xxx.yaml", + }, + Crowdsec: &CrowdsecServiceCfg{}, + }, + err: "while reading './tests/xxx.yaml': open ./tests/xxx.yaml: no such file or directory", + }, + { + name: "basic nil config", + Input: &GlobalConfig{ + ConfigPaths: &ConfigurationPaths{ + SimulationFilePath: "", + }, + Crowdsec: &CrowdsecServiceCfg{}, + }, + }, + { + name: "basic bad file content", + Input: &GlobalConfig{ + ConfigPaths: &ConfigurationPaths{ + SimulationFilePath: "./tests/config.yaml", + }, + Crowdsec: &CrowdsecServiceCfg{}, + }, + err: "while unmarshaling simulation file './tests/config.yaml' : yaml: unmarshal errors", + }, + } + + for idx, test := range tests { + err := test.Input.LoadSimulation() + if err == nil && test.err != "" { + t.Fatalf("%d/%d expected error, didn't get it", idx, len(tests)) + } else if test.err != "" { + if !strings.HasPrefix(fmt.Sprintf("%s", err), test.err) { + t.Fatalf("%d/%d expected '%s' got '%s'", idx, len(tests), + test.err, + fmt.Sprintf("%s", err)) + } + } + + isOk := assert.Equal(t, test.expectedResult, test.Input.Crowdsec.SimulationConfig) + if !isOk { + t.Fatalf("test '%s' failed", test.name) + } + + } + +} + func TestNewCrowdSecConfig(t *testing.T) { tests := []struct { name string - expectedResult *CrowdSec + expectedResult *GlobalConfig err string }{ { - name: "new configuration: basic", - expectedResult: &CrowdSec{ - LogLevel: log.InfoLevel, - Daemonize: false, - Profiling: false, - WorkingFolder: "/tmp/", - DataFolder: "/var/lib/crowdsec/data/", - ConfigFolder: "/etc/crowdsec/config/", - PIDFolder: "/var/run/", - LogFolder: "/var/log/", - LogMode: "stdout", - APIMode: false, - NbParsers: 1, - Prometheus: false, - HTTPListen: "127.0.0.1:6060", - }, - err: "", + name: "new configuration: basic", + expectedResult: &GlobalConfig{}, + err: "", }, } for _, test := range tests { - result := NewCrowdSecConfig() + result := NewConfig() isOk := assert.Equal(t, test.expectedResult, result) if !isOk { t.Fatalf("test '%s' failed", test.name) @@ -46,366 +202,3 @@ func TestNewCrowdSecConfig(t *testing.T) { } } - -func TestLoadConfig(t *testing.T) { - tests := []struct { - name string - expectedResult *CrowdSec - Args []string - err string - }{ - { - name: "load configuration: basic", - expectedResult: &CrowdSec{ - LogLevel: log.InfoLevel, - Daemonize: true, - Profiling: true, - WorkingFolder: "./tests/", - DataFolder: "./tests/", - ConfigFolder: "./tests/", - PIDFolder: "./tests/", - LogFolder: "./tests/", - LogMode: "stdout", - APIMode: true, - NbParsers: 1, - Prometheus: true, - HTTPListen: "127.0.0.1:6060", - AcquisitionFile: "tests/acquis.yaml", - CsCliFolder: "./tests/cscli/", - SimulationCfg: &SimulationConfig{ - Simulation: false, - Exclusions: nil, - }, - SimulationCfgPath: "./tests/simulation.yaml", - OutputConfig: &outputs.OutputFactory{ - BackendFolder: "./tests/plugins/backend", - MaxRecords: "", - MaxRecordsAge: "720h", - Flush: false, - Debug: false, - }, - }, - Args: []string{ - "crowdsec", - "-c", - "./tests/config.yaml", - }, - err: "", - }, - { - name: "load configuration: with -file", - expectedResult: &CrowdSec{ - LogLevel: log.InfoLevel, - SingleFile: "./tests/test.file", - SingleFileLabel: "test", - Daemonize: true, - Profiling: true, - WorkingFolder: "./tests/", - DataFolder: "./tests/", - ConfigFolder: "./tests/", - PIDFolder: "./tests/", - LogFolder: "./tests/", - LogMode: "stdout", - APIMode: true, - NbParsers: 1, - Prometheus: true, - HTTPListen: "127.0.0.1:6060", - AcquisitionFile: "tests/acquis.yaml", - CsCliFolder: "./tests/cscli/", - SimulationCfg: &SimulationConfig{ - Simulation: false, - Exclusions: nil, - }, - SimulationCfgPath: "./tests/simulation.yaml", - OutputConfig: &outputs.OutputFactory{ - BackendFolder: "./tests/plugins/backend", - MaxRecords: "", - MaxRecordsAge: "720h", - Flush: false, - Debug: false, - }, - }, - Args: []string{ - "crowdsec", - "-c", - "./tests/config.yaml", - "-file", - "./tests/test.file", - "-type", - "test", - }, - err: "", - }, - { - name: "load configuration: with -file without -type", - expectedResult: &CrowdSec{ - LogLevel: log.InfoLevel, - Daemonize: false, - Profiling: false, - WorkingFolder: "/tmp/", - DataFolder: "/var/lib/crowdsec/data/", - ConfigFolder: "/etc/crowdsec/config/", - PIDFolder: "/var/run/", - LogFolder: "/var/log/", - LogMode: "stdout", - APIMode: false, - NbParsers: 1, - Prometheus: false, - HTTPListen: "127.0.0.1:6060", - }, - Args: []string{ - "crowdsec", - "-c", - "./tests/config.yaml", - "-file", - "./tests/test.file", - }, - err: "-file requires -type", - }, - { - name: "load configuration: all flags set", - expectedResult: &CrowdSec{ - LogLevel: log.TraceLevel, - Daemonize: true, - Profiling: true, - WorkingFolder: "./tests/", - DataFolder: "./tests/", - ConfigFolder: "./tests/", - PIDFolder: "./tests/", - LogFolder: "./tests/", - LogMode: "stdout", - APIMode: true, - Linter: true, - NbParsers: 1, - Prometheus: true, - HTTPListen: "127.0.0.1:6060", - AcquisitionFile: "./tests/acquis.yaml", - CsCliFolder: "./tests/cscli/", - SimulationCfg: &SimulationConfig{ - Simulation: false, - Exclusions: nil, - }, - SimulationCfgPath: "./tests/simulation.yaml", - OutputConfig: &outputs.OutputFactory{ - BackendFolder: "./tests/plugins/backend", - MaxRecords: "", - MaxRecordsAge: "720h", - Flush: false, - Debug: false, - }, - RestoreMode: "./tests/states.json", - DumpBuckets: true, - }, - Args: []string{ - "crowdsec", - "-c", - "./tests/config.yaml", - "-acquis", - "./tests/acquis.yaml", - "-dump-state", - "-prometheus-metrics", - "-t", - "-daemon", - "-profile", - "-debug", - "-trace", - "-info", - "-restore-state", - "./tests/states.json", - "-api", - }, - err: "", - }, - { - name: "load configuration: bad config file", - expectedResult: &CrowdSec{ - LogLevel: log.InfoLevel, - Daemonize: true, - Profiling: true, - WorkingFolder: "./tests/", - DataFolder: "./tests/", - ConfigFolder: "./tests/", - PIDFolder: "./tests/", - LogFolder: "./tests/", - LogMode: "stdout", - APIMode: true, - Linter: false, - NbParsers: 1, - Prometheus: true, - HTTPListen: "127.0.0.1:6060", - CsCliFolder: "./tests/cscli/", - SimulationCfgPath: "./tests/simulation.yaml", - OutputConfig: &outputs.OutputFactory{ - BackendFolder: "./tests/plugins/backend", - MaxRecords: "", - MaxRecordsAge: "720h", - Flush: false, - Debug: false, - }, - }, - Args: []string{ - "crowdsec", - "-c", - "./tests/bad_config.yaml", - }, - err: "Error while loading configuration : parse './tests/bad_config.yaml' : yaml: unmarshal errors:\n line 1: field non_existing_field not found in type csconfig.CrowdSec", - }, - { - name: "load configuration: bad simulation file", - expectedResult: &CrowdSec{ - LogLevel: log.InfoLevel, - Daemonize: true, - Profiling: true, - WorkingFolder: "./tests/", - DataFolder: "./tests/", - ConfigFolder: "./tests/", - PIDFolder: "./tests/", - LogFolder: "./tests/", - LogMode: "stdout", - APIMode: true, - Linter: false, - NbParsers: 1, - Prometheus: true, - AcquisitionFile: "tests/acquis.yaml", - HTTPListen: "127.0.0.1:6060", - CsCliFolder: "./tests/cscli/", - SimulationCfgPath: "./tests/bad_simulation.yaml", - OutputConfig: &outputs.OutputFactory{ - BackendFolder: "./tests/plugins/backend", - MaxRecords: "", - MaxRecordsAge: "720h", - Flush: false, - Debug: false, - }, - }, - Args: []string{ - "crowdsec", - "-c", - "./tests/bad_config_simulation.yaml", - }, - err: `Error while loading configuration : loading simulation config : while parsing './tests/bad_simulation.yaml' : yaml: unmarshal errors: - line 1: field test not found in type csconfig.SimulationConfig`, - }, - { - name: "load configuration: bad config file", - expectedResult: &CrowdSec{ - LogLevel: log.InfoLevel, - Daemonize: true, - Profiling: true, - WorkingFolder: "./tests/", - DataFolder: "./tests/", - ConfigFolder: "./tests/", - PIDFolder: "./tests/", - LogFolder: "./tests/", - LogMode: "stdout", - APIMode: true, - Linter: false, - NbParsers: 1, - Prometheus: true, - HTTPListen: "127.0.0.1:6060", - CsCliFolder: "./tests/cscli/", - SimulationCfgPath: "./tests/simulation.yaml", - OutputConfig: &outputs.OutputFactory{ - BackendFolder: "./tests/plugins/backend", - MaxRecords: "", - MaxRecordsAge: "720h", - Flush: false, - Debug: false, - }, - }, - Args: []string{ - "crowdsec", - "-c", - "./tests/bad_config.yaml", - }, - err: "Error while loading configuration : parse './tests/bad_config.yaml' : yaml: unmarshal errors:\n line 1: field non_existing_field not found in type csconfig.CrowdSec", - }, - { - name: "load configuration: non exist simulation file", - expectedResult: &CrowdSec{ - LogLevel: log.InfoLevel, - Daemonize: true, - Profiling: true, - WorkingFolder: "./tests/", - DataFolder: "./tests/", - ConfigFolder: "./tests/", - PIDFolder: "./tests/", - LogFolder: "./tests/", - LogMode: "stdout", - APIMode: true, - Linter: false, - NbParsers: 1, - Prometheus: true, - AcquisitionFile: "tests/acquis.yaml", - HTTPListen: "127.0.0.1:6060", - CsCliFolder: "./tests/cscli/", - SimulationCfgPath: "./tests/non_exist.yaml", - OutputConfig: &outputs.OutputFactory{ - BackendFolder: "./tests/plugins/backend", - MaxRecords: "", - MaxRecordsAge: "720h", - Flush: false, - Debug: false, - }, - }, - Args: []string{ - "crowdsec", - "-c", - "./tests/bad_config_simulation_1.yaml", - }, - err: "Error while loading configuration : loading simulation config : while reading './tests/non_exist.yaml' : open ./tests/non_exist.yaml: no such file or directory", - }, - { - name: "load configuration: non existent configuration file", - expectedResult: &CrowdSec{ - LogLevel: log.InfoLevel, - Daemonize: false, - Profiling: false, - WorkingFolder: "/tmp/", - DataFolder: "/var/lib/crowdsec/data/", - ConfigFolder: "/etc/crowdsec/config/", - PIDFolder: "/var/run/", - LogFolder: "/var/log/", - LogMode: "stdout", - APIMode: false, - NbParsers: 1, - Prometheus: false, - HTTPListen: "127.0.0.1:6060", - }, - Args: []string{ - "crowdsec", - "-c", - "./tests/non_exist.yaml", - }, - err: "Error while loading configuration : read './tests/non_exist.yaml' : open ./tests/non_exist.yaml: no such file or directory", - }, - } - - oldArgs := os.Args - defer func() { os.Args = oldArgs }() - for _, test := range tests { - log.Printf("testing '%s'", test.name) - flag.CommandLine = flag.NewFlagSet(test.Args[0], flag.ExitOnError) - result := NewCrowdSecConfig() - os.Args = test.Args - err := result.LoadConfig() - - if test.err != "" { - if err == nil { - t.Fatalf("test '%s' should returned an error", test.name) - } - isOk := assert.EqualErrorf(t, err, test.err, "") - if !isOk { - t.Fatalf("test '%s' failed", test.name) - } - } - if test.err == "" && err != nil { - t.Fatalf("test '%s' return an error : %s", test.name, err) - } - isOk := assert.Equal(t, test.expectedResult, result) - if !isOk { - t.Fatalf("test '%s' failed", test.name) - } - log.Infof("test '%s' : OK", test.name) - } -} diff --git a/pkg/csconfig/crowdsec_service.go b/pkg/csconfig/crowdsec_service.go new file mode 100644 index 000000000..9455a77a7 --- /dev/null +++ b/pkg/csconfig/crowdsec_service.go @@ -0,0 +1,20 @@ +package csconfig + +/*Configurations needed for crowdsec to load parser/scenarios/... + acquisition*/ +type CrowdsecServiceCfg struct { + AcquisitionFilePath string `yaml:"acquisition_path,omitempty"` + ParserRoutinesCount int `yaml:"parser_routines"` + BucketsRoutinesCount int `yaml:"buckets_routines"` + OutputRoutinesCount int `yaml:"output_routines"` + SimulationConfig *SimulationConfig `yaml:"-"` + LintOnly bool `yaml:"-"` //if set to true, exit after loading configs + BucketStateFile string `yaml:"state_input_file,omitempty"` //if we need to unserialize buckets at start + BucketStateDumpDir string `yaml:"state_output_dir,omitempty"` //if we need to unserialize buckets on shutdown + BucketsGCEnabled bool `yaml:"-"` //we need to garbage collect buckets when in forensic mode + + HubDir string `yaml:"-"` + DataDir string `yaml:"-"` + ConfigDir string `yaml:"-"` + HubIndexFile string `yaml:"-"` + SimulationFilePath string `yaml:"-"` +} diff --git a/pkg/csconfig/cscli.go b/pkg/csconfig/cscli.go new file mode 100644 index 000000000..be5bd1f29 --- /dev/null +++ b/pkg/csconfig/cscli.go @@ -0,0 +1,14 @@ +package csconfig + +/*cscli specific config, such as hub directory*/ +type CscliCfg struct { + Output string `yaml:"output,omitempty"` + HubBranch string `yaml:"hub_branch"` + SimulationConfig *SimulationConfig `yaml:"-"` + DbConfig *DatabaseCfg `yaml:"-"` + HubDir string `yaml:"-"` + DataDir string `yaml:"-"` + ConfigDir string `yaml:"-"` + HubIndexFile string `yaml:"-"` + SimulationFilePath string `yaml:"-"` +} diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go new file mode 100644 index 000000000..9ca2dd1c8 --- /dev/null +++ b/pkg/csconfig/database.go @@ -0,0 +1,20 @@ +package csconfig + +import log "github.com/sirupsen/logrus" + +type DatabaseCfg struct { + User string `yaml:"user"` + Password string `yaml:"password"` + DbName string `yaml:"db_name"` + Host string `yaml:"host"` + Port int `yaml:"port"` + DbPath string `yaml:"db_path"` + Type string `yaml:"type"` + Flush *FlushDBCfg `yaml:"flush"` + LogLevel *log.Level `yaml:"log_level"` +} + +type FlushDBCfg struct { + MaxItems *int `yaml:"max_items"` + MaxAge *string `yaml:"max_age"` +} diff --git a/pkg/csconfig/profiles.go b/pkg/csconfig/profiles.go new file mode 100644 index 000000000..9e4b3ff92 --- /dev/null +++ b/pkg/csconfig/profiles.go @@ -0,0 +1,78 @@ +package csconfig + +import ( + "fmt" + "io" + "os" + + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +//Profile structure(s) are used by the local API to "decide" what kind of decision should be applied when a scenario with an active remediation has been triggered +type ProfileCfg struct { + Name string `yaml:"name,omitempty"` + Debug *bool `yaml:"debug,omitempty"` + Filters []string `yaml:"filters,omitempty"` //A list of OR'ed expressions. the models.Alert object + RuntimeFilters []*vm.Program `json:"-"` + DebugFilters []*exprhelpers.ExprDebugger `json:"-"` + Decisions []models.Decision `yaml:"decisions,omitempty"` + OnSuccess string `yaml:"on_success,omitempty"` //continue or break + OnFailure string `yaml:"on_failure,omitempty"` //continue or break +} + +func (c *LocalApiServerCfg) LoadProfiles() error { + if c.ProfilesPath == "" { + return fmt.Errorf("empty profiles path") + } + + yamlFile, err := os.Open(c.ProfilesPath) + if err != nil { + return errors.Wrapf(err, "while opening %s", c.ProfilesPath) + } + + //process the yaml + dec := yaml.NewDecoder(yamlFile) + dec.SetStrict(true) + for { + t := ProfileCfg{} + err = dec.Decode(&t) + if err != nil { + if err == io.EOF { + break + } + return errors.Wrapf(err, "while decoding %s", c.ProfilesPath) + } + c.Profiles = append(c.Profiles, &t) + } + + for pIdx, profile := range c.Profiles { + var runtimeFilter *vm.Program + var debugFilter *exprhelpers.ExprDebugger + + c.Profiles[pIdx].RuntimeFilters = make([]*vm.Program, len(profile.Filters)) + c.Profiles[pIdx].DebugFilters = make([]*exprhelpers.ExprDebugger, len(profile.Filters)) + + for fIdx, filter := range profile.Filters { + if runtimeFilter, err = expr.Compile(filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"Alert": &models.Alert{}}))); err != nil { + return errors.Wrapf(err, "Error compiling filter of %s", profile.Name) + } + c.Profiles[pIdx].RuntimeFilters[fIdx] = runtimeFilter + if debugFilter, err = exprhelpers.NewDebugger(filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"Alert": &models.Alert{}}))); err != nil { + log.Debugf("Error compiling debug filter of %s : %s", profile.Name, err) + // Don't fail if we can't compile the filter - for now + // return errors.Wrapf(err, "Error compiling debug filter of %s", profile.Name) + } + c.Profiles[pIdx].DebugFilters[fIdx] = debugFilter + } + } + if len(c.Profiles) == 0 { + return fmt.Errorf("zero profiles loaded for LAPI") + } + return nil +} diff --git a/pkg/csconfig/prometheus.go b/pkg/csconfig/prometheus.go new file mode 100644 index 000000000..c93e02f23 --- /dev/null +++ b/pkg/csconfig/prometheus.go @@ -0,0 +1,9 @@ +package csconfig + +/**/ +type PrometheusCfg struct { + Enabled bool `yaml:"enabled"` + Level string `yaml:"level"` //aggregated|full + ListenAddr string `yaml:"listen_addr"` + ListenPort int `yaml:"listen_port"` +} diff --git a/pkg/csconfig/simulation.go b/pkg/csconfig/simulation.go new file mode 100644 index 000000000..6a0a99ffc --- /dev/null +++ b/pkg/csconfig/simulation.go @@ -0,0 +1,21 @@ +package csconfig + +type SimulationConfig struct { + Simulation *bool `yaml:"simulation"` + Exclusions []string `yaml:"exclusions,omitempty"` +} + +func (s *SimulationConfig) IsSimulated(scenario string) bool { + var simulated bool + + if s.Simulation != nil && *s.Simulation { + simulated = true + } + for _, excluded := range s.Exclusions { + if excluded == scenario { + simulated = !simulated + break + } + } + return simulated +} diff --git a/pkg/csconfig/tests/bad_config.yaml b/pkg/csconfig/tests/bad_config.yaml deleted file mode 100644 index bda935b58..000000000 --- a/pkg/csconfig/tests/bad_config.yaml +++ /dev/null @@ -1,19 +0,0 @@ -non_existing_field: "" -working_dir: ./tests/ -data_dir: ./tests/ -config_dir: ./tests/ -pid_dir: ./tests/ -log_dir: ./tests/ -cscli_dir: ./tests/cscli/ -simulation_path: ./tests/simulation.yaml -log_mode: stdout -log_level: info -profiling: true -apimode: true -daemon: true -prometheus: true -#for prometheus agent / golang debugging -http_listen: 127.0.0.1:6060 -plugin: - backend: "./tests/plugins/backend" - max_records_age: 720h diff --git a/pkg/csconfig/tests/bad_config_simulation.yaml b/pkg/csconfig/tests/bad_config_simulation.yaml deleted file mode 100644 index 58685f896..000000000 --- a/pkg/csconfig/tests/bad_config_simulation.yaml +++ /dev/null @@ -1,18 +0,0 @@ -working_dir: ./tests/ -data_dir: ./tests/ -config_dir: ./tests/ -pid_dir: ./tests/ -log_dir: ./tests/ -cscli_dir: ./tests/cscli/ -simulation_path: ./tests/bad_simulation.yaml -log_mode: stdout -log_level: info -profiling: true -apimode: true -daemon: true -prometheus: true -#for prometheus agent / golang debugging -http_listen: 127.0.0.1:6060 -plugin: - backend: "./tests/plugins/backend" - max_records_age: 720h diff --git a/pkg/csconfig/tests/bad_config_simulation_1.yaml b/pkg/csconfig/tests/bad_config_simulation_1.yaml deleted file mode 100644 index b34d8153e..000000000 --- a/pkg/csconfig/tests/bad_config_simulation_1.yaml +++ /dev/null @@ -1,18 +0,0 @@ -working_dir: ./tests/ -data_dir: ./tests/ -config_dir: ./tests/ -pid_dir: ./tests/ -log_dir: ./tests/ -cscli_dir: ./tests/cscli/ -simulation_path: ./tests/non_exist.yaml -log_mode: stdout -log_level: info -profiling: true -apimode: true -daemon: true -prometheus: true -#for prometheus agent / golang debugging -http_listen: 127.0.0.1:6060 -plugin: - backend: "./tests/plugins/backend" - max_records_age: 720h diff --git a/pkg/csconfig/tests/bad_simulation.yaml b/pkg/csconfig/tests/bad_simulation.yaml deleted file mode 100644 index 128e78a31..000000000 --- a/pkg/csconfig/tests/bad_simulation.yaml +++ /dev/null @@ -1 +0,0 @@ -test: "" \ No newline at end of file diff --git a/pkg/csconfig/tests/config.yaml b/pkg/csconfig/tests/config.yaml index 4c762313d..a8833f541 100644 --- a/pkg/csconfig/tests/config.yaml +++ b/pkg/csconfig/tests/config.yaml @@ -1,18 +1,38 @@ -working_dir: ./tests/ -data_dir: ./tests/ -config_dir: ./tests/ -pid_dir: ./tests/ -log_dir: ./tests/ -cscli_dir: ./tests/cscli/ -simulation_path: ./tests/simulation.yaml -log_mode: stdout -log_level: info -profiling: true -apimode: true -daemon: true -prometheus: true -#for prometheus agent / golang debugging -http_listen: 127.0.0.1:6060 -plugin: - backend: "./tests/plugins/backend" - max_records_age: 720h +common: + daemonize: false + pid_dir: /tmp/ + log_media: stdout + log_level: info + working_dir: . +prometheus: + enabled: true + level: full +crowdsec_service: +# acquisition_path: ./config/acquis.yaml + parser_routines: 1 +cscli: + output: human +db_config: + user: "" + password: "" + db_name: "" + host: "" + port: 0 + db_path: ./crowdsec.db + type: sqlite +api: + client: + credentials_path: ./tests/lapi-secrets.yaml + server: + profiles_path: ./tests/profiles.yaml + listen_uri: 127.0.0.1:8080 + tls: null + online_client: + credentials_path: ./tests/online-api-secrets.yaml +config_paths: + config_dir: ./tests + data_dir: . + simulation_path: ./tests/simulation.yaml + index_path: ./tests/hub/.index.json + hub_dir: ./tests/hub + diff --git a/pkg/csconfig/tests/lapi-secrets.yaml b/pkg/csconfig/tests/lapi-secrets.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/csconfig/tests/online-api-secrets.yaml b/pkg/csconfig/tests/online-api-secrets.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/csconfig/tests/profiles.yaml b/pkg/csconfig/tests/profiles.yaml new file mode 100644 index 000000000..5727f4edd --- /dev/null +++ b/pkg/csconfig/tests/profiles.yaml @@ -0,0 +1,31 @@ + +name: enforce_mfa +#debug: true +filters: + - 'Alert.Remediation == true && Alert.GetScenario() == "crowdsecurity/ssh-enforce-mfa" && Alert.GetScope() == "username"' +decisions: #remediation vs decision + - type: enforce_mfa + scope: "username" + duration: 1h +on_success: continue +--- +name: default_ip_remediation +#debug: true +filters: +# try types.Ip here :) + - Alert.Remediation == true && Alert.GetScope() == "Ip" +decisions: + - type: ban + duration: 1h +on_success: break +--- +#this one won't be reached ^^ +name: default_ip_remediation_2 +#debug: true +filters: +# try types.Ip here :) + - Alert.Remediation == true && Alert.GetScope() == "Ip" +decisions: + - type: ratatatata + duration: 1h +on_success: break diff --git a/pkg/csprofiles/csprofiles.go b/pkg/csprofiles/csprofiles.go new file mode 100644 index 000000000..5a8340ed2 --- /dev/null +++ b/pkg/csprofiles/csprofiles.go @@ -0,0 +1,145 @@ +package csprofiles + +import ( + "fmt" + "net" + + "github.com/antonmedv/expr" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +func GenerateDecisionFromProfile(Profile *csconfig.ProfileCfg, Alert *models.Alert) ([]*models.Decision, error) { + var decisions []*models.Decision + + for _, refDecision := range Profile.Decisions { + decision := models.Decision{} + /*the reference decision from profile is in sumulated mode */ + if refDecision.Simulated != nil && *refDecision.Simulated { + decision.Simulated = new(bool) + *decision.Simulated = true + /*the event is already in simulation mode */ + } else if Alert.Simulated != nil && *Alert.Simulated { + decision.Simulated = new(bool) + *decision.Simulated = true + } + /*If the profile specifies a scope, this will prevail. + If not, we're going to get the scope from the source itself*/ + decision.Scope = new(string) + if refDecision.Scope != nil && *refDecision.Scope != "" { + *decision.Scope = *refDecision.Scope + } else { + *decision.Scope = *Alert.Source.Scope + } + /*some fields are populated from the reference object : duration, scope, type*/ + decision.Duration = new(string) + *decision.Duration = *refDecision.Duration + decision.Type = new(string) + *decision.Type = *refDecision.Type + + /*for the others, let's populate it from the alert and its source*/ + decision.Value = new(string) + *decision.Value = *Alert.Source.Value + + if *decision.Scope == types.Ip { + srcAddr := net.ParseIP(Alert.Source.IP) + if srcAddr == nil { + return nil, fmt.Errorf("can't parse ip %s", Alert.Source.IP) + } + decision.StartIP = int64(types.IP2Int(srcAddr)) + decision.EndIP = decision.StartIP + } else if *decision.Scope == types.Range { + /*here we're asked to ban a full range. let's keep in mind that it's not always possible : + - the alert is about an IP, but the geolite enrichment failed + - the alert is about an IP, but the geolite enrichment isn't present + - the alert is about a range, in this case it should succeed + */ + if Alert.Source.Range != "" { + srcAddr, srcRange, err := net.ParseCIDR(Alert.Source.Range) + if err != nil { + log.Warningf("Profile [%s] requires IP decision, but can't parse '%s' from '%s'", + Profile.Name, *Alert.Source.Value, *Alert.Scenario) + continue + } + decision.StartIP = int64(types.IP2Int(srcAddr)) + decision.EndIP = int64(types.IP2Int(types.LastAddress(srcRange))) + decision.Value = new(string) + *decision.Value = Alert.Source.Range + } else { + log.Warningf("Profile [%s] requires scope decision, but information is missing from %s", Profile.Name, *Alert.Scenario) + continue + } + } + decision.Origin = new(string) + *decision.Origin = "crowdsec" + if refDecision.Origin != nil { + *decision.Origin = fmt.Sprintf("%s/%s", *decision.Origin, *refDecision.Origin) + } + decision.Scenario = new(string) + *decision.Scenario = *Alert.Scenario + decisions = append(decisions, &decision) + } + return decisions, nil +} + +var clog *log.Entry + +//EvaluateProfiles is going to evaluate an Alert against a set of profiles to generate Decisions +func EvaluateProfiles(Profiles []*csconfig.ProfileCfg, Alert *models.Alert) ([]*models.Decision, error) { + var decisions []*models.Decision + + if clog == nil { + xlog := log.New() + if err := types.ConfigureLogger(xlog); err != nil { + log.Fatalf("While creating profiles-specific logger : %s", err) + } + xlog.SetLevel(log.TraceLevel) + clog = xlog.WithFields(log.Fields{ + "type": "profile", + }) + } + + if !Alert.Remediation { + return nil, nil + } +PROFILE_LOOP: + for _, profile := range Profiles { + for eIdx, expression := range profile.RuntimeFilters { + output, err := expr.Run(expression, exprhelpers.GetExprEnv(map[string]interface{}{"Alert": Alert})) + if err != nil { + log.Warningf("failed to run whitelist expr : %v", err) + return nil, errors.Wrapf(err, "while running expression %s", profile.Filters[eIdx]) + } + switch out := output.(type) { + case bool: + if out { + /*the expression matched, create the associated decision*/ + subdecisions, err := GenerateDecisionFromProfile(profile, Alert) + if err != nil { + return nil, errors.Wrapf(err, "while generating decision from profile %s", profile.Name) + } + decisions = append(decisions, subdecisions...) + } else { + if profile.Debug != nil && *profile.Debug { + profile.DebugFilters[eIdx].Run(clog, false, exprhelpers.GetExprEnv(map[string]interface{}{"Alert": Alert})) + } + log.Debugf("Profile %s filter is unsuccessful", profile.Name) + if profile.OnFailure == "break" { + break PROFILE_LOOP + } + } + default: + return nil, fmt.Errorf("unexpected type %t (%v) while running '%s'", output, output, profile.Filters[eIdx]) + + } + if profile.OnSuccess == "break" { + break PROFILE_LOOP + } + } + } + return decisions, nil +} diff --git a/pkg/cwapi/auth.go b/pkg/cwapi/auth.go deleted file mode 100644 index f62e665b6..000000000 --- a/pkg/cwapi/auth.go +++ /dev/null @@ -1,218 +0,0 @@ -package cwapi - -import ( - "fmt" - "io/ioutil" - "net/http" - "regexp" - "strings" - "time" - - "github.com/crowdsecurity/crowdsec/pkg/cwversion" - "github.com/crowdsecurity/crowdsec/pkg/types" - - log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" - - "github.com/dghubble/sling" - "gopkg.in/tomb.v2" -) - -type ApiCtx struct { - /*config*/ - ApiVersion string `yaml:"version"` - PullPath string `yaml:"pull_path"` - PushPath string `yaml:"push_path"` - SigninPath string `yaml:"signin_path"` - RegisterPath string `yaml:"register_path"` - ResetPwdPath string `yaml:"reset_pwd_path"` - EnrollPath string `yaml:"enroll_path"` - BaseURL string `yaml:"url"` - CfgUser string `yaml:"machine_id"` - CfgPassword string `yaml:"password"` - Creds ApiCreds `yaml:"-"` - /*mostly for mocking/faking api*/ - Muted bool `yaml:"-"` - DebugDump bool `yaml:"-"` - /*runtime*/ - tokenExpired bool `yaml:"-"` - toPush []types.Event `yaml:"-"` - Http *sling.Sling `yaml:"-"` - PusherTomb tomb.Tomb `yaml:"-"` -} - -type ApiCreds struct { - User string `json:"machine_id" yaml:"machine_id"` - Password string `json:"password" yaml:"password"` - Profile string `json:"profile,omitempty" yaml:"profile,omitempty"` -} - -type ApiResp struct { - StatusCode int `json:"statusCode"` - Error string `json:"error"` - Message string `json:"message"` -} - -type PullResp struct { - StatusCode int `json:"statusCode"` - Body []map[string]string `json:"message"` -} - -func (ctx *ApiCtx) WriteConfig(cfg string) error { - ret, err := yaml.Marshal(ctx) - if err != nil { - return fmt.Errorf("failed to marshal config : %s", err) - } - if err := ioutil.WriteFile(cfg, ret, 0600); err != nil { - return fmt.Errorf("failed to write api file %s : %s", cfg, ret) - } - return nil -} - -func (ctx *ApiCtx) LoadConfig(cfg string) error { - rcfg, err := ioutil.ReadFile(cfg) - if err != nil { - return fmt.Errorf("api load configuration: unable to read configuration file '%s' : %s", cfg, err) - } - if err := yaml.UnmarshalStrict(rcfg, &ctx); err != nil { - return fmt.Errorf("api load configuration: unable to unmarshall configuration file '%s' : %s", cfg, err) - } - if ctx.ApiVersion != cwversion.Constraint_api { - return fmt.Errorf("api load configuration: cscli version only supports %s api, not %s", cwversion.Constraint_api, ctx.ApiVersion) - } - ctx.Creds.User = ctx.CfgUser - ctx.Creds.Password = ctx.CfgPassword - - /* - For sling, if a path starts with '/', it's an absolute path, and it will get rid of the 'prefix', - leading to bad urls - */ - if strings.HasPrefix(ctx.PullPath, "/") || - strings.HasPrefix(ctx.PushPath, "/") || - strings.HasPrefix(ctx.SigninPath, "/") || - strings.HasPrefix(ctx.RegisterPath, "/") || - strings.HasPrefix(ctx.ResetPwdPath, "/") || - strings.HasPrefix(ctx.EnrollPath, "/") { - log.Warningf("!API paths must not be prefixed by /") - } - - httpClient := &http.Client{Timeout: 20 * time.Second} - - ctx.Http = sling.New().Client(httpClient).Base(ctx.BaseURL+"/"+ctx.ApiVersion+"/").Set("User-Agent", fmt.Sprintf("Crowdsec/%s", cwversion.VersionStr())) - log.Printf("api load configuration: configuration loaded successfully (base:%s)", ctx.BaseURL+"/"+ctx.ApiVersion+"/") - return nil -} - -func (ctx *ApiCtx) Init(cfg string, profile string) error { - var err error - - err = ctx.LoadConfig(cfg) - if err != nil { - return err - } - ctx.Creds.Profile = profile - ctx.toPush = make([]types.Event, 0) - err = ctx.Signin() - if err != nil { - return err - } - //start the background go-routine - ctx.PusherTomb.Go(func() error { - err := ctx.pushLoop() - if err != nil { - log.Errorf("api push error : %s", err) - return err - } - return nil - }) - return nil -} - -func (ctx *ApiCtx) Shutdown() error { - ctx.PusherTomb.Kill(nil) - log.Infof("Waiting for API routine to finish") - if err := ctx.PusherTomb.Wait(); err != nil { - return fmt.Errorf("API routine returned error : %s", err) - } - return nil -} - -func (ctx *ApiCtx) Signin() error { - if ctx.Creds.User == "" || ctx.Creds.Password == "" { - return fmt.Errorf("api signin: missing credentials in api.yaml") - } - jsonResp := &ApiResp{} - errResp := &ApiResp{} - - resp, err := ctx.Http.New().Post(ctx.SigninPath).BodyJSON(ctx.Creds).Receive(jsonResp, errResp) - if err != nil { - return fmt.Errorf("api signin: HTTP request creation failed: %s", err) - } - - if resp.StatusCode != 200 { - return fmt.Errorf("api signin: return bad HTTP code (%d): %s", resp.StatusCode, errResp.Message) - } - if jsonResp.Message == "" || jsonResp.StatusCode != 200 { - return fmt.Errorf("api signin failed") - } - - ctx.Http = ctx.Http.Set("Authorization", jsonResp.Message) - - log.Printf("api signin: signed in successfuly") - return nil -} - -func (ctx *ApiCtx) RegisterMachine(machineID string, password string) error { - if !validate(machineID) { - log.Fatalf("Machine ID %s is not compliant to '^[a-zA-Z0-9]+$'", machineID) - } - - ctx.Creds.User = machineID - ctx.Creds.Password = password - jsonResp := &ApiResp{} - errResp := &ApiResp{} - resp, err := ctx.Http.New().Post(ctx.RegisterPath).BodyJSON(ctx.Creds).Receive(jsonResp, errResp) - if err != nil { - return fmt.Errorf("api register machine: HTTP request creation failed: %s", err) - } - - if resp.StatusCode != 200 { - return fmt.Errorf("api register machine: return bad HTTP code (%d) : %s", resp.StatusCode, errResp.Message) - } - - if jsonResp.Message == "" || jsonResp.Message != "OK" || jsonResp.StatusCode != 200 { - return fmt.Errorf("api register machine failed") - } - return nil -} - -func (ctx *ApiCtx) ResetPassword(machineID string, password string) error { - if !validate(machineID) { - log.Fatalf("Machine ID %s is not compliant to '^[a-zA-Z0-9]+$'", machineID) - } - - ctx.Creds.User = machineID - ctx.Creds.Password = password - jsonResp := &ApiResp{} - errResp := &ApiResp{} - - data := map[string]string{"machine_id": ctx.Creds.User, "password": ctx.Creds.Password} - resp, err := ctx.Http.New().Post(ctx.ResetPwdPath).BodyJSON(data).Receive(jsonResp, errResp) - if err != nil { - return fmt.Errorf("api reset password: HTTP request creation failed: %s", err) - } - - if resp.StatusCode != 200 { - return fmt.Errorf("api reset password: return bad HTTP code (%d): %s", resp.StatusCode, errResp.Message) - } - - if jsonResp.Message == "" || jsonResp.StatusCode != 200 { - return fmt.Errorf("api reset password failed") - } - return nil -} - -func validate(machineID string) bool { - re := regexp.MustCompile("^[a-zA-Z0-9]+$") - return re.MatchString(machineID) -} diff --git a/pkg/cwapi/auth_test.go b/pkg/cwapi/auth_test.go deleted file mode 100644 index 29e9432dd..000000000 --- a/pkg/cwapi/auth_test.go +++ /dev/null @@ -1,425 +0,0 @@ -package cwapi - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/dghubble/sling" - log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" - "gopkg.in/yaml.v2" -) - -func assertConfigFileEqual(t *testing.T, filepath1 string, filepath2 string) { - file1, err := ioutil.ReadFile(filepath1) - if err != nil { - t.Fatalf("unable to read file '%s': %s", filepath1, err) - } - apiCtx1 := &ApiCtx{} - if err := yaml.UnmarshalStrict(file1, &apiCtx1); err != nil { - t.Fatalf("unable to unmarshall configuration file '%s' : %s", filepath1, err) - } - - file2, err := ioutil.ReadFile(filepath2) - if err != nil { - t.Fatalf("unable to read file '%s': %s", filepath2, err) - } - apiCtx2 := &ApiCtx{} - if err := yaml.UnmarshalStrict(file2, &apiCtx2); err != nil { - t.Fatalf("unable to unmarshall configuration file '%s' : %s", filepath2, err) - } - assert.Equal(t, apiCtx1, apiCtx2) -} - -func TestWriteConfig(t *testing.T) { - tests := []struct { - name string - configPath string - compareToFile string - expectedErr bool - givenAPICtx *ApiCtx - }{ - { - name: "basic write config", - configPath: "./tests/tmp_api_config.yaml", - compareToFile: "./tests/api_config.yaml", - expectedErr: false, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - PullPath: "pull", - PushPath: "signals", - SigninPath: "signin", - RegisterPath: "register", - ResetPwdPath: "resetpassword", - EnrollPath: "enroll", - BaseURL: "https://my_testendpoint.com", - CfgUser: "test", - CfgPassword: "test", - Creds: ApiCreds{ - User: "test", - Password: "test", - }, - Muted: false, - DebugDump: false, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - } - - for _, test := range tests { - err := test.givenAPICtx.WriteConfig(test.configPath) - if test.expectedErr && err == nil { - t.Fatalf("test '%s' should return an error", test.name) - } - if !test.expectedErr && err != nil { - t.Fatalf("test '%s' returned an error", test.name) - } - if test.expectedErr { - continue - } - - assertConfigFileEqual(t, test.configPath, test.compareToFile) - os.Remove(test.configPath) - } - -} - -func TestLoadConfig(t *testing.T) { - tests := []struct { - name string - configPath string - expectedErr bool - expectedAPICtx *ApiCtx - }{ - { - name: "basic load config", - configPath: "./tests/api_config.yaml", - expectedErr: false, - expectedAPICtx: &ApiCtx{ - ApiVersion: "v1", - PullPath: "pull", - PushPath: "signals", - SigninPath: "signin", - RegisterPath: "register", - ResetPwdPath: "resetpassword", - EnrollPath: "enroll", - BaseURL: "https://my_testendpoint.com", - CfgUser: "test", - CfgPassword: "test", - Creds: ApiCreds{ - User: "test", - Password: "test", - }, - Muted: false, - DebugDump: false, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "load config with bad api version", - configPath: "./tests/api_config_bad_api_version.yaml", - expectedErr: true, - }, - { - name: "load config with bad format file", - configPath: "./tests/api_config_bad_format.yaml", - expectedErr: true, - }, - } - - for _, test := range tests { - apiCtx := &ApiCtx{} - err := apiCtx.LoadConfig(test.configPath) - if test.expectedErr && err == nil { - t.Fatalf("test '%s' should return an error", test.name) - } - if !test.expectedErr && err != nil { - t.Fatalf("test '%s' return an error : %s", test.name, err) - } - if test.expectedErr { - continue - } - apiCtx.Http = test.expectedAPICtx.Http // if we don't do that, assert will fail - assert.Equal(t, test.expectedAPICtx, apiCtx) - } -} - -func TestSignin(t *testing.T) { - - tests := []struct { - name string - givenAPICtx *ApiCtx - expectedErr bool - }{ - { - name: "basic api signin", - expectedErr: false, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - SigninPath: "signin", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api signin missing credentials", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - SigninPath: "signin", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api signin unknown api PATH", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - SigninPath: "unknown_path", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api signin malformed response", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - SigninPath: "malformed_response", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api signin bad response", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - SigninPath: "bad_response", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - } - - for _, test := range tests { - err := test.givenAPICtx.Signin() - if !test.expectedErr && err != nil { - t.Fatalf("test '%s' failed : %s", test.name, err) - } - if test.expectedErr && err == nil { - t.Fatalf("test '%s' should return an err", test.name) - } - log.Printf("test '%s' : OK", test.name) - } - -} - -func TestRegisterMachine(t *testing.T) { - - tests := []struct { - name string - givenAPICtx *ApiCtx - expectedErr bool - expectedAPICtx *ApiCtx - expectedAPICreds *ApiCreds - }{ - { - name: "basic api register machine", - expectedErr: false, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - RegisterPath: "register", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machineid", - CfgPassword: "machine_password", - Creds: ApiCreds{ - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - expectedAPICreds: &ApiCreds{ - User: "machineid", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - }, - { - name: "api register unknown api PATH", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - RegisterPath: "unknown_path", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machineid", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machineid", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api register bad response", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - RegisterPath: "bad_response", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machineid", - CfgPassword: "machine_password", - Creds: ApiCreds{ - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - } - - for _, test := range tests { - log.Printf("test '%s'", test.name) - err := test.givenAPICtx.RegisterMachine(test.givenAPICtx.CfgUser, test.givenAPICtx.CfgPassword) - if !test.expectedErr && err != nil { - t.Fatalf("test '%s' failed : %s", test.name, err) - } - if test.expectedErr && err == nil { - t.Fatalf("test '%s' should return an err", test.name) - } - if test.expectedAPICreds != nil { - assert.Equal(t, *test.expectedAPICreds, test.givenAPICtx.Creds) - } - log.Printf("test '%s' : OK", test.name) - } - -} - -func TestResetPassword(t *testing.T) { - - tests := []struct { - name string - givenAPICtx *ApiCtx - expectedErr bool - expectedAPICtx *ApiCtx - expectedAPICreds *ApiCreds - }{ - { - name: "basic api machine reset password", - expectedErr: false, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - ResetPwdPath: "resetpassword", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machineid", - CfgPassword: "new_machine_password", - Creds: ApiCreds{ - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - expectedAPICreds: &ApiCreds{ - User: "machineid", - Password: "new_machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - }, - { - name: "api reset password unknown api PATH", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - ResetPwdPath: "unknown_path", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machineid", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machineid", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api reset password bad response", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - ResetPwdPath: "bad_response", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machineid", - CfgPassword: "machine_password", - Creds: ApiCreds{ - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api reset password unknown user", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - ResetPwdPath: "resestpassword_unknown_user", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machineid", - CfgPassword: "machine_password", - Creds: ApiCreds{ - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - } - - for _, test := range tests { - err := test.givenAPICtx.ResetPassword(test.givenAPICtx.CfgUser, test.givenAPICtx.CfgPassword) - if !test.expectedErr && err != nil { - t.Fatalf("test '%s' failed : %s", test.name, err) - } - if test.expectedErr && err == nil { - t.Fatalf("test '%s' should return an err", test.name) - } - if test.expectedAPICreds != nil { - assert.Equal(t, *test.expectedAPICreds, test.givenAPICtx.Creds) - } - log.Printf("test '%s' : OK", test.name) - } - -} diff --git a/pkg/cwapi/enroll.go b/pkg/cwapi/enroll.go deleted file mode 100644 index 3c3d4188e..000000000 --- a/pkg/cwapi/enroll.go +++ /dev/null @@ -1,27 +0,0 @@ -package cwapi - -import ( - "fmt" - - log "github.com/sirupsen/logrus" -) - -func (ctx *ApiCtx) Enroll(userID string) error { - toPush := map[string]string{"user_id": userID} - jsonResp := &ApiResp{} - errResp := &ApiResp{} - - resp, err := ctx.Http.New().Post(ctx.EnrollPath).BodyJSON(&toPush).Receive(jsonResp, errResp) - if err != nil { - return fmt.Errorf("api enroll: HTTP request creation failed: %s", err) - } - - if resp.StatusCode != 200 { - return fmt.Errorf("api enroll: user '%s' return bad HTTP code (%d): %s", userID, resp.StatusCode, errResp.Message) - } - if jsonResp.Message == "" || jsonResp.Message != "OK" || jsonResp.StatusCode != 200 { - return fmt.Errorf("api user enroll failed") - } - log.Printf("user '%s' is enrolled successfully", string(userID)) - return nil -} diff --git a/pkg/cwapi/enroll_test.go b/pkg/cwapi/enroll_test.go deleted file mode 100644 index 4c4ccc350..000000000 --- a/pkg/cwapi/enroll_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package cwapi - -import ( - "testing" - - "github.com/dghubble/sling" - log "github.com/sirupsen/logrus" -) - -func TestEnroll(t *testing.T) { - - tests := []struct { - name string - givenAPICtx *ApiCtx - expectedErr bool - userID string - }{ - { - name: "basic api user enroll", - expectedErr: false, - userID: "1234", - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - EnrollPath: "enroll", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api signin unknown api PATH", - expectedErr: true, - userID: "1234", - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - EnrollPath: "unknown_path", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api signin malformed response", - expectedErr: true, - userID: "1234", - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - EnrollPath: "malformed_response", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api signin bad response", - expectedErr: true, - userID: "1234", - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - EnrollPath: "bad_response", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - } - - for _, test := range tests { - err := test.givenAPICtx.Enroll(test.userID) - if !test.expectedErr && err != nil { - t.Fatalf("test '%s' failed : %s", test.name, err) - } - if test.expectedErr && err == nil { - t.Fatalf("test '%s' should return an err", test.name) - } - log.Printf("test '%s' : OK", test.name) - } - -} diff --git a/pkg/cwapi/pull.go b/pkg/cwapi/pull.go deleted file mode 100644 index 39b6a949d..000000000 --- a/pkg/cwapi/pull.go +++ /dev/null @@ -1,24 +0,0 @@ -package cwapi - -import ( - "fmt" - - log "github.com/sirupsen/logrus" -) - -func (ctx *ApiCtx) PullTop() ([]map[string]string, error) { - top := &PullResp{} - errResp := &ApiResp{} - - resp, err := ctx.Http.New().Get(ctx.PullPath).Receive(top, errResp) - if err != nil { - return nil, fmt.Errorf("api pull: HTTP request creation failed: %s", err) - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("api pull: return bad HTTP code (%d): %s", resp.StatusCode, errResp.Message) - } - - log.Debugf("api pull: response : %+v", top.Body) - return top.Body, nil -} diff --git a/pkg/cwapi/pull_test.go b/pkg/cwapi/pull_test.go deleted file mode 100644 index 4e52a9c41..000000000 --- a/pkg/cwapi/pull_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package cwapi - -import ( - "encoding/json" - "testing" - - "github.com/dghubble/sling" - log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" -) - -func TestPullTop(t *testing.T) { - - tests := []struct { - name string - givenAPICtx *ApiCtx - expectedErr bool - expectedResult string - }{ - { - name: "basic api pull", - expectedErr: false, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - PullPath: "pull", - SigninPath: "signin", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - expectedResult: pullResponse, - }, - { - name: "basic api pull return non 200 Code", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - PullPath: "unknown_path", - SigninPath: "signin", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - expectedResult: pullResponse, - }, - } - - for _, test := range tests { - apiResponse := &PullResp{} - err := json.Unmarshal([]byte(test.expectedResult), apiResponse) - if err != nil { - t.Fatalf("unable to unmarshall expected result : %s", err) - } - result, err := test.givenAPICtx.PullTop() - if !test.expectedErr && err != nil { - t.Fatalf("test '%s' failed : %s", test.name, err) - } - if test.expectedErr && err == nil { - t.Fatalf("test '%s' should return an err", test.name) - } - if test.expectedErr { - continue - } - assert.Equal(t, apiResponse.Body, result) - log.Printf("test '%s' : OK", test.name) - } - -} diff --git a/pkg/cwapi/signals.go b/pkg/cwapi/signals.go deleted file mode 100644 index 1e2fc1f9f..000000000 --- a/pkg/cwapi/signals.go +++ /dev/null @@ -1,107 +0,0 @@ -package cwapi - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "time" - - "github.com/crowdsecurity/crowdsec/pkg/types" - - log "github.com/sirupsen/logrus" -) - -func (ctx *ApiCtx) AppendSignal(sig types.SignalOccurence) error { - ctx.toPush = append(ctx.toPush, types.Event{Overflow: sig}) - log.Debugf("api append signal: adding new signal (cache size : %d): %+v", len(ctx.toPush), sig) - return nil -} - -func (ctx *ApiCtx) pushSignals() error { - if len(ctx.toPush) == 0 { - return nil - } - jsonResp := &ApiResp{} - errResp := &ApiResp{} - resp, err := ctx.Http.New().Put(ctx.PushPath).BodyJSON(&ctx.toPush).Receive(jsonResp, errResp) - if err != nil { - return fmt.Errorf("api push signal: HTTP request creation failed: %s", err) - } - log.Debugf("api push signal: HTTP Code: %+v \n", resp.StatusCode) - if resp.StatusCode != 200 { - if resp.StatusCode == 401 && !ctx.tokenExpired { - log.Printf("api push signal: expired token, resigning to API") - ctx.tokenExpired = true - err := ctx.Signin() - if err != nil { - return err - } - log.Printf("api push signal: token renewed. Pushing signals") - err = ctx.pushSignals() - if err != nil { - return fmt.Errorf("api push signal: unable to renew api session token: %s", err.Error()) - } - } else { - return fmt.Errorf("api push signal: return bad HTTP code (%d): %s", resp.StatusCode, errResp.Message) - } - } - - if resp.StatusCode != 401 && (jsonResp.Message == "" || jsonResp.Message != "OK" || jsonResp.StatusCode != 200) { - return fmt.Errorf("api push failed") - } - - if len(ctx.toPush) > 0 { - log.Infof("api push signal: pushed %d signals successfully", len(ctx.toPush)) - } - ctx.toPush = make([]types.Event, 0) - ctx.tokenExpired = false - return nil -} - -func (ctx *ApiCtx) Flush() error { - - /*flag can be activated to dump to local file*/ - if ctx.DebugDump { - log.Warningf("api flush: dumping api cache to ./api-dump.json") - x, err := json.MarshalIndent(ctx.toPush, "", " ") - if err != nil { - return fmt.Errorf("api flush: failed to marshal data: %s", err) - } - if err := ioutil.WriteFile("./api-dump.json", x, 0755); err != nil { - return fmt.Errorf("api flush: failed to write marshaled data : %s", err) - } - } - - //pretend we did stuff - if ctx.Muted { - return nil - } - if err := ctx.pushSignals(); err != nil { - log.Errorf("api flush: fail to push signals: %s", err) - } - return nil -} - -//This one is called on a regular basis (decided by init) and push stacked events to API -func (ctx *ApiCtx) pushLoop() error { - log.Debugf("api push loop: running with a ticker every 2 minutes") - ticker := time.NewTicker(2 * time.Minute) - - for { - select { - case <-ticker.C: //push data. - if len(ctx.toPush) == 0 { - log.Debugf("api push loop: nothing to push") - continue - } - err := ctx.Flush() - if err != nil { - log.Errorf("api push loop: %s", err.Error()) - } - case <-ctx.PusherTomb.Dying(): //we are being killed by main - log.Infof("Killing api routine") - return nil - } - } - -} diff --git a/pkg/cwapi/signals_test.go b/pkg/cwapi/signals_test.go deleted file mode 100644 index 3e2405140..000000000 --- a/pkg/cwapi/signals_test.go +++ /dev/null @@ -1,248 +0,0 @@ -package cwapi - -import ( - "testing" - "time" - - "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/dghubble/sling" - log "github.com/sirupsen/logrus" -) - -var signalList = []types.Event{ - { - Overflow: types.SignalOccurence{ - Scenario: "crowdsec/test", - Bucket_id: "1234", - Events_count: 1, - Events_sequence: []types.EventSequence{}, - Start_at: time.Now(), - BanApplications: []types.BanApplication{}, - Stop_at: time.Now(), - Source_ip: "1.2.3.4", - Source_range: "1.2.3.0/24", - Source_AutonomousSystemNumber: "1234", - Source_AutonomousSystemOrganization: "TestAS", - Source_Country: "FR", - Dest_ip: "1.2.3.5", - Capacity: 1, - Whitelisted: false, - Simulation: false, - }, - }, - { - Overflow: types.SignalOccurence{ - Scenario: "crowdsec/test", - Bucket_id: "1235", - Events_count: 1, - Events_sequence: []types.EventSequence{}, - Start_at: time.Now(), - BanApplications: []types.BanApplication{}, - Stop_at: time.Now(), - Source_ip: "1.2.3.5", - Source_range: "1.2.3.0/24", - Source_AutonomousSystemNumber: "1234", - Source_AutonomousSystemOrganization: "TestAS", - Source_Country: "FR", - Dest_ip: "1.2.3.6", - Capacity: 1, - Whitelisted: false, - Simulation: false, - }, - }, -} - -func TestPushSignal(t *testing.T) { - - tests := []struct { - name string - givenAPICtx *ApiCtx - expectedErr bool - }{ - { - name: "basic api push signal", - expectedErr: false, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - PushPath: "signals", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - toPush: signalList, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api push signal unknown api PATH", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - PushPath: "unknown_path", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - toPush: signalList, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api push signal malformed response", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - PushPath: "malformed_response", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - toPush: signalList, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api push signal bad response", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - PushPath: "bad_response", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - toPush: signalList, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api push signal empty signal list", - expectedErr: false, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - PushPath: "signals", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - toPush: []types.Event{}, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api push signal expired token", - expectedErr: false, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - PushPath: "signals_token_expired", - SigninPath: "signin", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - toPush: signalList, - tokenExpired: false, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api push signal unable to renew expired token", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - PushPath: "signals_token_renew_fail", - SigninPath: "signin", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - toPush: signalList, - tokenExpired: false, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api push signal bad response code", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - PushPath: "signals_bad_response_code", - SigninPath: "signin", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - toPush: signalList, - tokenExpired: false, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - { - name: "api push signal signin while token expired failed", - expectedErr: true, - givenAPICtx: &ApiCtx{ - ApiVersion: "v1", - PushPath: "signals_token_expired", - SigninPath: "bad_response", - BaseURL: "https://my_testendpoint.com", - CfgUser: "machine_id", - CfgPassword: "machine_password", - Creds: ApiCreds{ - User: "machine_id", - Password: "machine_password", - Profile: "crowdsec/test1,crowdsec/test2", - }, - toPush: signalList, - tokenExpired: false, - Http: sling.New().Client(newMockClient()).Base(apiBaseURL), - }, - }, - } - - for _, test := range tests { - err := test.givenAPICtx.pushSignals() - if !test.expectedErr && err != nil { - t.Fatalf("test '%s' failed : %s", test.name, err) - } - if test.expectedErr && err == nil { - t.Fatalf("test '%s' should return an err", test.name) - } - if test.expectedErr { - continue - } - log.Printf("test '%s' : OK", test.name) - } - -} diff --git a/pkg/cwapi/tests/api_config.yaml b/pkg/cwapi/tests/api_config.yaml deleted file mode 100644 index b6a7145c4..000000000 --- a/pkg/cwapi/tests/api_config.yaml +++ /dev/null @@ -1,10 +0,0 @@ -version: v1 -url: https://my_testendpoint.com -signin_path: signin -push_path: signals -pull_path: pull -enroll_path: enroll -reset_pwd_path: resetpassword -register_path: register -machine_id: test -password: test \ No newline at end of file diff --git a/pkg/cwapi/tests/api_config_bad_api_version.yaml b/pkg/cwapi/tests/api_config_bad_api_version.yaml deleted file mode 100644 index defbd9f55..000000000 --- a/pkg/cwapi/tests/api_config_bad_api_version.yaml +++ /dev/null @@ -1,10 +0,0 @@ -version: v0 -url: https://test_endpoint -signin_path: signin -push_path: signals -pull_path: pull -enroll_path: enroll -reset_pwd_path: resetpassword -register_path: register -machine_id: test -password: test \ No newline at end of file diff --git a/pkg/cwapi/tests/api_config_bad_format.yaml b/pkg/cwapi/tests/api_config_bad_format.yaml deleted file mode 100644 index ca142b8ce..000000000 --- a/pkg/cwapi/tests/api_config_bad_format.yaml +++ /dev/null @@ -1,11 +0,0 @@ -bad_key: test -version: v1 -url: https://test_endpoint -signin_path: signin -push_path: signals -pull_path: pull -enroll_path: enroll -reset_pwd_path: resetpassword -register_path: register -machine_id: test -password: test \ No newline at end of file diff --git a/pkg/cwapi/utils_test.go b/pkg/cwapi/utils_test.go deleted file mode 100644 index b07ee2842..000000000 --- a/pkg/cwapi/utils_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package cwapi - -import ( - "fmt" - "io/ioutil" - "net/http" - "strings" - "time" -) - -const apiVersion = "v1" -const apiURL = "https://my_test_endpoint" - -var apiBaseURL = fmt.Sprintf("%s/%s/", apiURL, apiVersion) - -var pullResponse = `{ - "statusCode" : 200, - "message": - [ - { - "range_ip" : "1.2.3.4", - "country" : "FR", - "as_org" : "Test", - "as_num" : "1234", - "action" : "ban", - "reason": "crowdsec/test" - }, - { - "range_ip" : "1.2.3.5", - "country" : "FR", - "as_org" : "Test", - "as_num" : "1235", - "action" : "ban", - "reason": "crowdsec/test" - }, - { - "range_ip" : "1.2.3.6", - "country" : "FR", - "as_org" : "Test", - "as_num" : "1236", - "action" : "ban", - "reason": "crowdsec/test" - } - ] -}` - -type mockTransport struct { - nbTryPushTokenOK int // to test token expiration - nbTryPushTokenNOK int -} - -func newMockTransport() http.RoundTripper { - return &mockTransport{} -} - -func newMockClient() *http.Client { - return &http.Client{ - Transport: newMockTransport(), - Timeout: time.Second * 20, - } -} - -// Implement http.RoundTripper -func (t *mockTransport) RoundTrip(req *http.Request) (*http.Response, error) { - var responseBody string - var statusCode int - // Create mocked http.Response - response := &http.Response{ - Header: make(http.Header), - Request: req, - } - response.Header.Set("Content-Type", "application/json") - switch req.URL.Path { - case "/v1/signin": - responseBody = `{"statusCode": 200, "message": "crowdsec_api_token"}` - statusCode = 200 - case "/v1/register": - responseBody = `{"statusCode": 200, "message": "OK"}` - statusCode = 200 - case "/v1/signals": - responseBody = `{"statusCode": 200, "message": "OK"}` - statusCode = 200 - case "/v1/pull": - responseBody = pullResponse - statusCode = 200 - case "/v1/signals_token_expired": - if t.nbTryPushTokenOK == 0 { - responseBody = `{"statusCode": 200, "message": "crowdsec_api_token"}` - statusCode = 401 - t.nbTryPushTokenOK++ - } else { - responseBody = `{"statusCode": 200, "message": "OK"}` - statusCode = 200 - } - case "/v1/signals_token_renew_fail": - if t.nbTryPushTokenNOK == 0 { - responseBody = `{"statusCode": 200, "message": "crowdsec_api_token"}` - statusCode = 401 - t.nbTryPushTokenNOK++ - } else { - responseBody = `{"statusCode": 500, "message": "token expired"}` - statusCode = 500 - } - case "/v1/signals_bad_response_code": - responseBody = `{"statusCode": 200, "message": "OK"}` - statusCode = 500 - case "/v1/enroll": - responseBody = `{"statusCode": 200, "message": "OK"}` - statusCode = 200 - case "/v1/resetpassword": - responseBody = `{"statusCode": 200, "message": "password updated successfully"}` - statusCode = 200 - case "/v1/resetpassword_unknown_user": - responseBody = `{"statusCode": 500, "message": "User not found"}` - statusCode = 200 - case "/v1/unknown_path": - statusCode = 404 - responseBody = `{"error": "unknown URI"}` - case "/v1/malformed_response": - statusCode = 200 - responseBody = `{"statusCode" : 200, "msg" : "api_token"` - case "/v1/bad_response": - statusCode = 200 - responseBody = `{"statusCode" : 200, "msg" : "api_token"}` - } - response.StatusCode = statusCode - response.Body = ioutil.NopCloser(strings.NewReader(responseBody)) - return response, nil -} diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index 93be0fb10..2341453dd 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -2,32 +2,32 @@ package cwhub import ( "crypto/sha256" - "errors" + "path/filepath" + "strings" + + //"errors" "fmt" "io" "os" "github.com/enescakir/emoji" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) +/*managed configuration types*/ var PARSERS = "parsers" var PARSERS_OVFLW = "postoverflows" var SCENARIOS = "scenarios" var COLLECTIONS = "collections" - var ItemTypes = []string{PARSERS, PARSERS_OVFLW, SCENARIOS, COLLECTIONS} -var HubIdx map[string]map[string]Item - -var Installdir = "/etc/crowdsec/" -var Hubdir = "/etc/crowdsec/cscli/hub/" -var Cfgdir = "/etc/crowdsec/cscli/" +var hubIdx map[string]map[string]Item var RawFileURLTemplate = "https://raw.githubusercontent.com/crowdsecurity/hub/%s/%s" -var HubIndexFile = ".index.json" var HubBranch = "master" +var HubIndexFile = ".index.json" type ItemVersion struct { Digest string @@ -96,9 +96,80 @@ func getSHA256(filepath string) (string, error) { return fmt.Sprintf("%x", h.Sum(nil)), nil } +func GetItemMap(itemType string) map[string]Item { + var m map[string]Item + var ok bool + + if m, ok = hubIdx[itemType]; !ok { + return nil + } + return m +} + +//GetItemByPath retrieves the item from hubIdx based on the path. To achieve this it will resolve symlink to find associated hub item. +func GetItemByPath(itemType string, itemPath string) (*Item, error) { + /*try to resolve symlink*/ + finalName := "" + f, err := os.Lstat(itemPath) + if err != nil { + return nil, errors.Wrapf(err, "while performing lstat on %s", itemPath) + } + + if f.Mode()&os.ModeSymlink == 0 { + /*it's not a symlink, it should be the filename itsef the key*/ + finalName = filepath.Base(itemPath) + } else { + /*resolve the symlink to hub file*/ + pathInHub, err := os.Readlink(itemPath) + if err != nil { + return nil, errors.Wrapf(err, "while reading symlink of %s", itemPath) + } + //extract author from path + fname := filepath.Base(pathInHub) + author := filepath.Base(filepath.Dir(pathInHub)) + //trim yaml suffix + fname = strings.TrimSuffix(fname, ".yaml") + fname = strings.TrimSuffix(fname, ".yml") + finalName = fmt.Sprintf("%s/%s", author, fname) + } + + /*it's not a symlink, it should be the filename itsef the key*/ + if m := GetItemMap(itemType); m != nil { + if v, ok := m[finalName]; ok { + return &v, nil + } else { + return nil, fmt.Errorf("%s not found in %s", finalName, itemType) + } + } else { + return nil, fmt.Errorf("item type %s doesn't exist", itemType) + } + +} + +func GetItem(itemType string, itemName string) *Item { + if m, ok := GetItemMap(itemType)[itemName]; ok { + return &m + } + return nil +} + +func AddItem(itemType string, item Item) error { + in := false + for _, itype := range ItemTypes { + if itype == itemType { + in = true + } + } + if !in { + return fmt.Errorf("ItemType %s is unknown", itemType) + } + hubIdx[itemType][item.Name] = item + return nil +} + func DisplaySummary() { - log.Printf("Loaded %d collecs, %d parsers, %d scenarios, %d post-overflow parsers", len(HubIdx[COLLECTIONS]), - len(HubIdx[PARSERS]), len(HubIdx[SCENARIOS]), len(HubIdx[PARSERS_OVFLW])) + log.Printf("Loaded %d collecs, %d parsers, %d scenarios, %d post-overflow parsers", len(hubIdx[COLLECTIONS]), + len(hubIdx[PARSERS]), len(hubIdx[SCENARIOS]), len(hubIdx[PARSERS_OVFLW])) if skippedLocal > 0 || skippedTainted > 0 { log.Printf("unmanaged items : %d local, %d tainted", skippedLocal, skippedTainted) } @@ -128,47 +199,75 @@ func ItemStatus(v Item) (string, bool, bool, bool) { if v.Tainted { Warning = true strret += ",tainted" - } else if !v.UpToDate { + } else if !v.UpToDate && !v.Local { strret += ",update-available" Warning = true } return strret, Ok, Warning, Managed } +func GetUpstreamInstalledScenariosAsString() ([]string, error) { + var retStr []string + + items, err := GetUpstreamInstalledScenarios() + if err != nil { + return nil, errors.Wrap(err, "while fetching scenarios") + } + for _, it := range items { + retStr = append(retStr, it.Name) + } + return retStr, nil +} + +func GetUpstreamInstalledScenarios() ([]Item, error) { + var retItems []Item + + if _, ok := hubIdx[SCENARIOS]; !ok { + return nil, fmt.Errorf("no scenarios in hubIdx") + } + for _, item := range hubIdx[SCENARIOS] { + if item.Installed && !item.Tainted { + retItems = append(retItems, item) + } + } + return retItems, nil +} + //Returns a list of entries for packages : name, status, local_path, local_version, utf8_status (fancy) -func HubStatus(itype string, name string, list_all bool) []map[string]string { - if _, ok := HubIdx[itype]; !ok { - log.Errorf("type %s doesn't exist", itype) +func HubStatus(itemType string, name string, listAll bool) []map[string]string { + if _, ok := hubIdx[itemType]; !ok { + log.Errorf("type %s doesn't exist", itemType) + return nil } - var mli []map[string]string + var ret []map[string]string /*remember, you do it for the user :)*/ - for _, v := range HubIdx[itype] { - if name != "" && name != v.Name { + for _, item := range hubIdx[itemType] { + if name != "" && name != item.Name { //user has required a specific name continue } //Only enabled items ? - if !list_all && !v.Installed { + if !listAll && !item.Installed { continue } //Check the item status - st, ok, warning, managed := ItemStatus(v) + status, ok, warning, managed := ItemStatus(item) tmp := make(map[string]string) - tmp["name"] = v.Name - tmp["status"] = st - tmp["local_version"] = v.LocalVersion - tmp["local_path"] = v.LocalPath - tmp["description"] = v.Description - if !managed || !v.Installed { - tmp["utf8_status"] = fmt.Sprintf("%v %s", emoji.Prohibited, st) + tmp["name"] = item.Name + tmp["status"] = status + tmp["local_version"] = item.LocalVersion + tmp["local_path"] = item.LocalPath + tmp["description"] = item.Description + if !managed || !item.Installed { + tmp["utf8_status"] = fmt.Sprintf("%v %s", emoji.Prohibited, status) } else if warning { - tmp["utf8_status"] = fmt.Sprintf("%v %s", emoji.Warning, st) + tmp["utf8_status"] = fmt.Sprintf("%v %s", emoji.Warning, status) } else if ok { - tmp["utf8_status"] = fmt.Sprintf("%v %s", emoji.CheckMark, st) + tmp["utf8_status"] = fmt.Sprintf("%v %s", emoji.CheckMark, status) } - mli = append(mli, tmp) + ret = append(ret, tmp) } - return mli + return ret } diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index 02e601c04..6d7d85d8e 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -1,6 +1,7 @@ package cwhub import ( + "fmt" "io/ioutil" "net/http" "os" @@ -8,6 +9,7 @@ import ( "strings" "testing" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" log "github.com/sirupsen/logrus" ) @@ -22,90 +24,202 @@ import ( var testDataFolder = "." -func TestIndexDownload(t *testing.T) { +func TestItemStatus(t *testing.T) { + cfg := test_prepenv() - os.RemoveAll(Cfgdir) - test_prepenv() - - err := LoadHubIdx() + err := UpdateHubIdx(cfg.Cscli) //DownloadHubIdx() if err != nil { - t.Fatalf("failed to download index") + t.Fatalf("failed to download index : %s", err) } - if err := GetHubIdx(); err != nil { - t.Fatalf("failed to load hub index") + if err := GetHubIdx(cfg.Cscli); err != nil { + t.Fatalf("failed to load hub index : %s", err) + } + + //get existing map + x := GetItemMap(COLLECTIONS) + if len(x) == 0 { + t.Fatalf("expected non empty result") + } + + //Get item : good and bad + for k, _ := range x { + item := GetItem(COLLECTIONS, k) + if item == nil { + t.Fatalf("expected item") + } + item.Installed = true + item.UpToDate = false + item.Local = false + item.Tainted = false + txt, _, _, _ := ItemStatus(*item) + if txt != "enabled,update-available" { + log.Fatalf("got '%s'", txt) + } + + item.Installed = false + item.UpToDate = false + item.Local = true + item.Tainted = false + txt, _, _, _ = ItemStatus(*item) + if txt != "disabled,local" { + log.Fatalf("got '%s'", txt) + } + + break + } + DisplaySummary() +} + +func TestGetters(t *testing.T) { + cfg := test_prepenv() + + err := UpdateHubIdx(cfg.Cscli) + //DownloadHubIdx() + if err != nil { + t.Fatalf("failed to download index : %s", err) + } + if err := GetHubIdx(cfg.Cscli); err != nil { + t.Fatalf("failed to load hub index : %s", err) + } + + //get non existing map + empty := GetItemMap("ratata") + if empty != nil { + t.Fatalf("expected nil result") + } + //get existing map + x := GetItemMap(COLLECTIONS) + if len(x) == 0 { + t.Fatalf("expected non empty result") + } + + //Get item : good and bad + for k, _ := range x { + empty := GetItem(COLLECTIONS, k+"nope") + if empty != nil { + t.Fatalf("expected empty item") + } + + item := GetItem(COLLECTIONS, k) + if item == nil { + t.Fatalf("expected non empty item") + } + + //Add item and get it + item.Name += "nope" + if err := AddItem(COLLECTIONS, *item); err != nil { + t.Fatalf("didn't expect error : %s", err) + } + + newitem := GetItem(COLLECTIONS, item.Name) + if newitem == nil { + t.Fatalf("expected non empty item") + } + + //Add bad item + if err := AddItem("ratata", *item); err != nil { + if fmt.Sprintf("%s", err) != "ItemType ratata is unknown" { + t.Fatalf("unexpected error") + } + } else { + t.Fatalf("Expected error") + } + + break + } + +} + +func TestIndexDownload(t *testing.T) { + + cfg := test_prepenv() + + err := UpdateHubIdx(cfg.Cscli) + //DownloadHubIdx() + if err != nil { + t.Fatalf("failed to download index : %s", err) + } + if err := GetHubIdx(cfg.Cscli); err != nil { + t.Fatalf("failed to load hub index : %s", err) } } -func test_prepenv() { +func test_prepenv() *csconfig.GlobalConfig { log.SetLevel(log.DebugLevel) - Cfgdir = filepath.Clean("./cscli") - Installdir = filepath.Clean("./install") - Hubdir = filepath.Clean("./hubdir") + var cfg = csconfig.NewConfig() + cfg.Cscli = &csconfig.CscliCfg{} + cfg.Cscli.ConfigDir, _ = filepath.Abs("./install") + cfg.Cscli.HubDir, _ = filepath.Abs("./hubdir") + cfg.Cscli.HubIndexFile = filepath.Clean("./hubdir/.index.json") //Mock the http client http.DefaultClient.Transport = newMockTransport() - if err := os.RemoveAll(Cfgdir); err != nil { - log.Fatalf("failed to remove %s : %s", Installdir, err) + if err := os.RemoveAll(cfg.Cscli.ConfigDir); err != nil { + log.Fatalf("failed to remove %s : %s", cfg.Cscli.ConfigDir, err) } - if err := os.MkdirAll(Cfgdir, 0700); err != nil { + if err := os.MkdirAll(cfg.Cscli.ConfigDir, 0700); err != nil { log.Fatalf("mkdir : %s", err) } - if err := UpdateHubIdx(); err != nil { + + if err := os.RemoveAll(cfg.Cscli.HubDir); err != nil { + log.Fatalf("failed to remove %s : %s", cfg.Cscli.HubDir, err) + } + if err := os.MkdirAll(cfg.Cscli.HubDir, 0700); err != nil { + log.Fatalf("failed to mkdir %s : %s", cfg.Cscli.HubDir, err) + } + + if err := UpdateHubIdx(cfg.Cscli); err != nil { log.Fatalf("failed to download index : %s", err) } - if err := os.RemoveAll(Installdir); err != nil { - log.Fatalf("failed to remove %s : %s", Installdir, err) - } - if err := os.MkdirAll(Installdir, 0700); err != nil { - log.Fatalf("failed to mkdir %s : %s", Installdir, err) - } - if err := os.RemoveAll(Hubdir); err != nil { - log.Fatalf("failed to remove %s : %s", Hubdir, err) - } - if err := os.MkdirAll(Hubdir, 0700); err != nil { - log.Fatalf("failed to mkdir %s : %s", Hubdir, err) - } + // if err := os.RemoveAll(cfg.Cscli.InstallDir); err != nil { + // log.Fatalf("failed to remove %s : %s", cfg.Cscli.InstallDir, err) + // } + // if err := os.MkdirAll(cfg.Cscli.InstallDir, 0700); err != nil { + // log.Fatalf("failed to mkdir %s : %s", cfg.Cscli.InstallDir, err) + // } + return cfg } -func testInstallItem(t *testing.T, item Item) { +func testInstallItem(cfg *csconfig.CscliCfg, t *testing.T, item Item) { + //Install the parser - item, err := DownloadLatest(item, Hubdir, false, testDataFolder) + item, err := DownloadLatest(cfg, item, false) if err != nil { t.Fatalf("error while downloading %s : %v", item.Name, err) } - if err := LocalSync(); err != nil { + if err := LocalSync(cfg); err != nil { t.Fatalf("taint: failed to run localSync : %s", err) } - if !HubIdx[item.Type][item.Name].UpToDate { + if !hubIdx[item.Type][item.Name].UpToDate { t.Fatalf("download: %s should be up-to-date", item.Name) } - if HubIdx[item.Type][item.Name].Installed { + if hubIdx[item.Type][item.Name].Installed { t.Fatalf("download: %s should not be install", item.Name) } - if HubIdx[item.Type][item.Name].Tainted { + if hubIdx[item.Type][item.Name].Tainted { t.Fatalf("download: %s should not be tainted", item.Name) } - item, err = EnableItem(item, Installdir, Hubdir) + item, err = EnableItem(cfg, item) if err != nil { t.Fatalf("error while enabled %s : %v.", item.Name, err) } - if err := LocalSync(); err != nil { + if err := LocalSync(cfg); err != nil { t.Fatalf("taint: failed to run localSync : %s", err) } - if !HubIdx[item.Type][item.Name].Installed { + if !hubIdx[item.Type][item.Name].Installed { t.Fatalf("install: %s should be install", item.Name) } } -func testTaintItem(t *testing.T, item Item) { - if HubIdx[item.Type][item.Name].Tainted { +func testTaintItem(cfg *csconfig.CscliCfg, t *testing.T, item Item) { + if hubIdx[item.Type][item.Name].Tainted { t.Fatalf("pre-taint: %s should not be tainted", item.Name) } f, err := os.OpenFile(item.LocalPath, os.O_APPEND|os.O_WRONLY, 0600) @@ -118,71 +232,71 @@ func testTaintItem(t *testing.T, item Item) { } f.Close() //Local sync and check status - if err := LocalSync(); err != nil { + if err := LocalSync(cfg); err != nil { t.Fatalf("taint: failed to run localSync : %s", err) } - if !HubIdx[item.Type][item.Name].Tainted { + if !hubIdx[item.Type][item.Name].Tainted { t.Fatalf("taint: %s should be tainted", item.Name) } } -func testUpdateItem(t *testing.T, item Item) { +func testUpdateItem(cfg *csconfig.CscliCfg, t *testing.T, item Item) { - if HubIdx[item.Type][item.Name].UpToDate { + if hubIdx[item.Type][item.Name].UpToDate { t.Fatalf("update: %s should NOT be up-to-date", item.Name) } //Update it + check status - item, err := DownloadLatest(item, Hubdir, true, testDataFolder) + item, err := DownloadLatest(cfg, item, true) if err != nil { t.Fatalf("failed to update %s : %s", item.Name, err) } //Local sync and check status - if err := LocalSync(); err != nil { + if err := LocalSync(cfg); err != nil { t.Fatalf("failed to run localSync : %s", err) } - if !HubIdx[item.Type][item.Name].UpToDate { + if !hubIdx[item.Type][item.Name].UpToDate { t.Fatalf("update: %s should be up-to-date", item.Name) } - if HubIdx[item.Type][item.Name].Tainted { + if hubIdx[item.Type][item.Name].Tainted { t.Fatalf("update: %s should not be tainted anymore", item.Name) } } -func testDisableItem(t *testing.T, item Item) { +func testDisableItem(cfg *csconfig.CscliCfg, t *testing.T, item Item) { if !item.Installed { t.Fatalf("disable: %s should be installed", item.Name) } //Remove - item, err := DisableItem(item, Installdir, Hubdir, false) + item, err := DisableItem(cfg, item, false) if err != nil { t.Fatalf("failed to disable item : %v", err) } //Local sync and check status - if err := LocalSync(); err != nil { + if err := LocalSync(cfg); err != nil { t.Fatalf("failed to run localSync : %s", err) } - if HubIdx[item.Type][item.Name].Tainted { + if hubIdx[item.Type][item.Name].Tainted { t.Fatalf("disable: %s should not be tainted anymore", item.Name) } - if HubIdx[item.Type][item.Name].Installed { + if hubIdx[item.Type][item.Name].Installed { t.Fatalf("disable: %s should not be installed anymore", item.Name) } - if !HubIdx[item.Type][item.Name].Downloaded { + if !hubIdx[item.Type][item.Name].Downloaded { t.Fatalf("disable: %s should still be downloaded", item.Name) } //Purge - item, err = DisableItem(item, Installdir, Hubdir, true) + item, err = DisableItem(cfg, item, true) if err != nil { t.Fatalf("failed to purge item : %v", err) } //Local sync and check status - if err := LocalSync(); err != nil { + if err := LocalSync(cfg); err != nil { t.Fatalf("failed to run localSync : %s", err) } - if HubIdx[item.Type][item.Name].Installed { + if hubIdx[item.Type][item.Name].Installed { t.Fatalf("disable: %s should not be installed anymore", item.Name) } - if HubIdx[item.Type][item.Name].Downloaded { + if hubIdx[item.Type][item.Name].Downloaded { t.Fatalf("disable: %s should not be downloaded", item.Name) } } @@ -198,23 +312,23 @@ func TestInstallParser(t *testing.T) { - check its status - remove it */ - test_prepenv() + cfg := test_prepenv() - if err := GetHubIdx(); err != nil { + if err := GetHubIdx(cfg.Cscli); err != nil { t.Fatalf("failed to load hub index") } //map iteration is random by itself - for _, it := range HubIdx[PARSERS] { - testInstallItem(t, it) - it = HubIdx[PARSERS][it.Name] + for _, it := range hubIdx[PARSERS] { + testInstallItem(cfg.Cscli, t, it) + it = hubIdx[PARSERS][it.Name] _ = HubStatus(PARSERS, it.Name, false) - testTaintItem(t, it) - it = HubIdx[PARSERS][it.Name] + testTaintItem(cfg.Cscli, t, it) + it = hubIdx[PARSERS][it.Name] _ = HubStatus(PARSERS, it.Name, false) - testUpdateItem(t, it) - it = HubIdx[PARSERS][it.Name] - testDisableItem(t, it) - it = HubIdx[PARSERS][it.Name] + testUpdateItem(cfg.Cscli, t, it) + it = hubIdx[PARSERS][it.Name] + testDisableItem(cfg.Cscli, t, it) + it = hubIdx[PARSERS][it.Name] break } @@ -231,21 +345,22 @@ func TestInstallCollection(t *testing.T) { - check its status - remove it */ - test_prepenv() + cfg := test_prepenv() - if err := GetHubIdx(); err != nil { + if err := GetHubIdx(cfg.Cscli); err != nil { t.Fatalf("failed to load hub index") } //map iteration is random by itself - for _, it := range HubIdx[COLLECTIONS] { - testInstallItem(t, it) - it = HubIdx[COLLECTIONS][it.Name] - testTaintItem(t, it) - it = HubIdx[COLLECTIONS][it.Name] - testUpdateItem(t, it) - it = HubIdx[COLLECTIONS][it.Name] - testDisableItem(t, it) - it = HubIdx[COLLECTIONS][it.Name] + for _, it := range hubIdx[COLLECTIONS] { + testInstallItem(cfg.Cscli, t, it) + it = hubIdx[COLLECTIONS][it.Name] + testTaintItem(cfg.Cscli, t, it) + it = hubIdx[COLLECTIONS][it.Name] + testUpdateItem(cfg.Cscli, t, it) + it = hubIdx[COLLECTIONS][it.Name] + testDisableItem(cfg.Cscli, t, it) + + it = hubIdx[COLLECTIONS][it.Name] x := HubStatus(COLLECTIONS, it.Name, false) log.Printf("%+v", x) break diff --git a/pkg/cwhub/download.go b/pkg/cwhub/download.go index dc8773cfb..4bc8b6274 100644 --- a/pkg/cwhub/download.go +++ b/pkg/cwhub/download.go @@ -3,138 +3,119 @@ package cwhub import ( "bytes" "crypto/sha256" - "errors" + + //"errors" + "github.com/pkg/errors" + + //"errors" "fmt" "io" "io/ioutil" "net/http" "os" - "path" "strings" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" ) -func LoadHubIdx() error { - bidx, err := ioutil.ReadFile(path.Join(Cfgdir, "/.index.json")) +func UpdateHubIdx(cscli *csconfig.CscliCfg) error { + + bidx, err := DownloadHubIdx(cscli) if err != nil { - return err + return errors.Wrap(err, "failed to download index") } ret, err := LoadPkgIndex(bidx) if err != nil { if !errors.Is(err, ReferenceMissingError) { - log.Fatalf("Unable to load freshly downloaded index : %v.", err) + return errors.Wrap(err, "failed to read index") } } - HubIdx = ret - if err := LocalSync(); err != nil { - log.Fatalf("Failed to sync Hub index with local deployment : %v", err) + hubIdx = ret + if err := LocalSync(cscli); err != nil { + return errors.Wrap(err, "failed to sync") } return nil } -func UpdateHubIdx() error { - - bidx, err := DownloadHubIdx() - if err != nil { - log.Fatalf("Unable to download index : %v.", err) - } - ret, err := LoadPkgIndex(bidx) - if err != nil { - if !errors.Is(err, ReferenceMissingError) { - log.Fatalf("Unable to load freshly downloaded index : %v.", err) - } - } - HubIdx = ret - if err := LocalSync(); err != nil { - log.Fatalf("Failed to sync Hub index with local deployment : %v", err) - } - return nil -} - -func DownloadHubIdx() ([]byte, error) { +func DownloadHubIdx(cscli *csconfig.CscliCfg) ([]byte, error) { + log.Debugf("fetching index from branch %s (%s)", HubBranch, fmt.Sprintf(RawFileURLTemplate, HubBranch, HubIndexFile)) req, err := http.NewRequest("GET", fmt.Sprintf(RawFileURLTemplate, HubBranch, HubIndexFile), nil) if err != nil { - log.Errorf("failed request : %s", err) - return nil, err + return nil, errors.Wrap(err, "failed to build request for hub index") } resp, err := http.DefaultClient.Do(req) if err != nil { - log.Errorf("failed request Do : %s", err) - return nil, err + return nil, errors.Wrap(err, "failed http request for hub index") } if resp.StatusCode != 200 { - log.Errorf("got code %d while requesting %s, abort", resp.StatusCode, - fmt.Sprintf(RawFileURLTemplate, HubBranch, HubIndexFile)) - return nil, fmt.Errorf("bad http code") + return nil, fmt.Errorf("bad http code %d while requesting %s", resp.StatusCode, req.URL.String()) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { - log.Errorf("failed request reqd: %s", err) - return nil, err + return nil, errors.Wrap(err, "failed to read request answer for hub index") } - //os.Remove(path.Join(configFolder, GitIndexFile)) - file, err := os.OpenFile(path.Join(Cfgdir, "/.index.json"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + file, err := os.OpenFile(cscli.HubIndexFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { - log.Fatalf(err.Error()) + return nil, errors.Wrap(err, "while opening hub index file") } defer file.Close() wsize, err := file.WriteString(string(body)) if err != nil { - log.Fatalf(err.Error()) + return nil, errors.Wrap(err, "while writting hub index file") } - log.Infof("Wrote new %d bytes index to %s", wsize, path.Join(Cfgdir, "/.index.json")) + log.Infof("Wrote new %d bytes index to %s", wsize, cscli.HubIndexFile) return body, nil } //DownloadLatest will download the latest version of Item to the tdir directory -func DownloadLatest(target Item, tdir string, overwrite bool, dataFolder string) (Item, error) { +func DownloadLatest(cscli *csconfig.CscliCfg, target Item, overwrite bool) (Item, error) { var err error + log.Debugf("Downloading %s %s", target.Type, target.Name) if target.Type == COLLECTIONS { var tmp = [][]string{target.Parsers, target.PostOverflows, target.Scenarios, target.Collections} for idx, ptr := range tmp { ptrtype := ItemTypes[idx] for _, p := range ptr { - if val, ok := HubIdx[ptrtype][p]; ok { + if val, ok := hubIdx[ptrtype][p]; ok { log.Debugf("Download %s sub-item : %s %s", target.Name, ptrtype, p) //recurse as it's a collection if ptrtype == COLLECTIONS { log.Tracef("collection, recurse") - HubIdx[ptrtype][p], err = DownloadLatest(val, tdir, overwrite, dataFolder) + hubIdx[ptrtype][p], err = DownloadLatest(cscli, val, overwrite) if err != nil { - log.Errorf("Encountered error while downloading sub-item %s %s : %s.", ptrtype, p, err) - return target, fmt.Errorf("encountered error while downloading %s for %s, abort", val.Name, target.Name) + return target, errors.Wrap(err, fmt.Sprintf("while downloading %s", val.Name)) } } - HubIdx[ptrtype][p], err = DownloadItem(val, tdir, overwrite, dataFolder) + hubIdx[ptrtype][p], err = DownloadItem(cscli, val, overwrite) if err != nil { - log.Errorf("Encountered error while downloading sub-item %s %s : %s.", ptrtype, p, err) - return target, fmt.Errorf("encountered error while downloading %s for %s, abort", val.Name, target.Name) + return target, errors.Wrap(err, fmt.Sprintf("while downloading %s", val.Name)) } } else { - //log.Errorf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, target.Name) return target, fmt.Errorf("required %s %s of %s doesn't exist, abort", ptrtype, p, target.Name) } } } - target, err = DownloadItem(target, tdir, overwrite, dataFolder) + target, err = DownloadItem(cscli, target, overwrite) if err != nil { return target, fmt.Errorf("failed to download item : %s", err) } } else { - return DownloadItem(target, tdir, overwrite, dataFolder) + return DownloadItem(cscli, target, overwrite) } return target, nil } -func DownloadItem(target Item, tdir string, overwrite bool, dataFolder string) (Item, error) { +func DownloadItem(cscli *csconfig.CscliCfg, target Item, overwrite bool) (Item, error) { + var tdir = cscli.HubDir + var dataFolder = cscli.DataDir /*if user didn't --force, don't overwrite local, tainted, up-to-date files*/ if !overwrite { if target.Tainted { @@ -147,37 +128,31 @@ func DownloadItem(target Item, tdir string, overwrite bool, dataFolder string) ( } } - //log.Infof("Downloading %s to %s", target.Name, tdir) - uri := fmt.Sprintf(RawFileURLTemplate, HubBranch, target.RemotePath) - req, err := http.NewRequest("GET", uri, nil) + req, err := http.NewRequest("GET", fmt.Sprintf(RawFileURLTemplate, HubBranch, target.RemotePath), nil) if err != nil { - log.Errorf("%s : request creation failed : %s", target.Name, err) - return target, err + return target, errors.Wrap(err, fmt.Sprintf("while downloading %s", req.URL.String())) } resp, err := http.DefaultClient.Do(req) if err != nil { - log.Errorf("%s : request failed : %s", target.Name, err) - return target, err + return target, errors.Wrap(err, fmt.Sprintf("while downloading %s", req.URL.String())) } if resp.StatusCode != 200 { - log.Errorf("%s : We got an error reaching %s : %d", target.Name, uri, resp.StatusCode) - return target, fmt.Errorf("bad http code") + return target, fmt.Errorf("bad http code %d for %s", resp.StatusCode, req.URL.String()) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { - log.Errorf("%s : failed request read: %s", target.Name, err) - return target, err + return target, errors.Wrap(err, fmt.Sprintf("while reading %s", req.URL.String())) } h := sha256.New() if _, err := h.Write([]byte(body)); err != nil { - return target, fmt.Errorf("%s : failed to write : %s", target.Name, err) + return target, errors.Wrap(err, fmt.Sprintf("while hashing %s", target.Name)) } meow := fmt.Sprintf("%x", h.Sum(nil)) if meow != target.Versions[target.Version].Digest { log.Errorf("Downloaded version doesn't match index, please 'hub update'") log.Debugf("got %s, expected %s", meow, target.Versions[target.Version].Digest) - return target, fmt.Errorf("invalid download hash") + return target, fmt.Errorf("invalid download hash for %s", target.Name) } //all good, install //check if parent dir exists @@ -188,7 +163,7 @@ func DownloadItem(target Item, tdir string, overwrite bool, dataFolder string) ( if _, err = os.Stat(parent_dir); os.IsNotExist(err) { log.Debugf("%s doesn't exist, create", parent_dir) if err := os.MkdirAll(parent_dir, os.ModePerm); err != nil { - return target, fmt.Errorf("unable to create parent directories") + return target, errors.Wrap(err, "while creating parent directories") } } /*check actual file*/ @@ -201,12 +176,12 @@ func DownloadItem(target Item, tdir string, overwrite bool, dataFolder string) ( f, err := os.OpenFile(tdir+"/"+target.RemotePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { - return target, fmt.Errorf("failed to open destination file %s : %v", tdir+"/"+target.RemotePath, err) + return target, errors.Wrap(err, "while opening file") } defer f.Close() _, err = f.WriteString(string(body)) if err != nil { - return target, fmt.Errorf("failed to write destination file %s : %v", tdir+"/"+target.RemotePath, err) + return target, errors.Wrap(err, "while writting file") } target.Downloaded = true target.Tainted = false @@ -220,14 +195,14 @@ func DownloadItem(target Item, tdir string, overwrite bool, dataFolder string) ( if err == io.EOF { break } else { - return target, fmt.Errorf("unable to read file %s data: %s", tdir+"/"+target.RemotePath, err) + return target, errors.Wrap(err, "while reading file") } } err = types.GetData(data.Data, dataFolder) if err != nil { - return target, fmt.Errorf("unable to get data: %s", err) + return target, errors.Wrap(err, "while getting data") } } - HubIdx[target.Type][target.Name] = target + hubIdx[target.Type][target.Name] = target return target, nil } diff --git a/pkg/cwhub/download_test.go b/pkg/cwhub/download_test.go new file mode 100644 index 000000000..46c544335 --- /dev/null +++ b/pkg/cwhub/download_test.go @@ -0,0 +1,36 @@ +package cwhub + +import ( + "fmt" + "strings" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + log "github.com/sirupsen/logrus" +) + +func TestDownloadHubIdx(t *testing.T) { + back := RawFileURLTemplate + //bad url template + RawFileURLTemplate = "x" + ret, err := DownloadHubIdx(&csconfig.CscliCfg{}) + if err == nil || !strings.HasPrefix(fmt.Sprintf("%s", err), "failed to build request for hub index: parse ") { + log.Errorf("unexpected error %s", err) + } + //bad domain + RawFileURLTemplate = "https://baddomain/crowdsecurity/hub/%s/%s" + ret, err = DownloadHubIdx(&csconfig.CscliCfg{}) + if err == nil || !strings.HasPrefix(fmt.Sprintf("%s", err), "failed http request for hub index: Get") { + log.Errorf("unexpected error %s", err) + } + + //bad target path + RawFileURLTemplate = back + ret, err = DownloadHubIdx(&csconfig.CscliCfg{HubIndexFile: "/does/not/exist/index.json"}) + if err == nil || !strings.HasPrefix(fmt.Sprintf("%s", err), "while opening hub index file: open /does/not/exist/index.json:") { + log.Errorf("unexpected error %s", err) + } + + RawFileURLTemplate = back + fmt.Printf("->%+v", ret) +} diff --git a/pkg/cwhub/install.go b/pkg/cwhub/install.go index 68c57b031..7d66c7bf2 100644 --- a/pkg/cwhub/install.go +++ b/pkg/cwhub/install.go @@ -5,11 +5,16 @@ import ( "os" "path/filepath" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) //DisableItem to disable an item managed by the hub, removes the symlink if purge is true -func DisableItem(target Item, tdir string, hdir string, purge bool) (Item, error) { +func DisableItem(cscli *csconfig.CscliCfg, target Item, purge bool) (Item, error) { + var tdir = cscli.ConfigDir + var hdir = cscli.HubDir + syml, err := filepath.Abs(tdir + "/" + target.Type + "/" + target.Stage + "/" + target.FileName) if err != nil { return Item{}, err @@ -24,10 +29,10 @@ func DisableItem(target Item, tdir string, hdir string, purge bool) (Item, error for idx, ptr := range tmp { ptrtype := ItemTypes[idx] for _, p := range ptr { - if val, ok := HubIdx[ptrtype][p]; ok { - HubIdx[ptrtype][p], err = DisableItem(val, Installdir, Hubdir, false) + if val, ok := hubIdx[ptrtype][p]; ok { + hubIdx[ptrtype][p], err = DisableItem(cscli, val, purge) if err != nil { - log.Errorf("Encountered error while disabling %s %s : %s.", ptrtype, p, err) + return target, errors.Wrap(err, fmt.Sprintf("while disabling %s", p)) } } else { log.Errorf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, target.Name) @@ -39,21 +44,22 @@ func DisableItem(target Item, tdir string, hdir string, purge bool) (Item, error stat, err := os.Lstat(syml) if os.IsNotExist(err) { - log.Warningf("%s (%s) doesn't exist, can't disable", target.Name, syml) - //return target, nil //fmt.Errorf("'%s' doesn't exist", syml) + if !purge { //we only accept to "delete" non existing items if it's a purge + return target, fmt.Errorf("can't delete %s : %s doesn't exist", target.Name, syml) + } } else { - //if it's managed by hub, it's a symlink to Hubdir / ... + //if it's managed by hub, it's a symlink to csconfig.GConfig.Cscli.HubDir / ... if stat.Mode()&os.ModeSymlink == 0 { log.Warningf("%s (%s) isn't a symlink, can't disable", target.Name, syml) return target, fmt.Errorf("%s isn't managed by hub", target.Name) } hubpath, err := os.Readlink(syml) if err != nil { - return target, fmt.Errorf("unable to read symlink of %s (%s)", target.Name, syml) + return target, errors.Wrap(err, "while reading symlink") } absPath, err := filepath.Abs(hdir + "/" + target.RemotePath) if err != nil { - return target, err + return target, errors.Wrap(err, "while abs path") } if hubpath != absPath { log.Warningf("%s (%s) isn't a symlink to %s", target.Name, syml, absPath) @@ -62,7 +68,7 @@ func DisableItem(target Item, tdir string, hdir string, purge bool) (Item, error //remove the symlink if err = os.Remove(syml); err != nil { - return target, fmt.Errorf("failed to unlink %s: %+v", syml, err) + return target, errors.Wrap(err, "while removing symlink") } log.Infof("Removed symlink [%s] : %s", target.Name, syml) } @@ -72,16 +78,18 @@ func DisableItem(target Item, tdir string, hdir string, purge bool) (Item, error hubpath := hdir + "/" + target.RemotePath //if purge, disable hub file if err = os.Remove(hubpath); err != nil { - return target, fmt.Errorf("failed to purge hub file %s: %+v", hubpath, err) + return target, errors.Wrap(err, "while removing file") } target.Downloaded = false log.Infof("Removed source file [%s] : %s", target.Name, hubpath) } - HubIdx[target.Type][target.Name] = target + hubIdx[target.Type][target.Name] = target return target, nil } -func EnableItem(target Item, tdir string, hdir string) (Item, error) { +func EnableItem(cscli *csconfig.CscliCfg, target Item) (Item, error) { + var tdir = cscli.ConfigDir + var hdir = cscli.HubDir var err error parent_dir := filepath.Clean(tdir + "/" + target.Type + "/" + target.Stage + "/") /*create directories if needed*/ @@ -94,14 +102,14 @@ func EnableItem(target Item, tdir string, hdir string) (Item, error) { } /* if it's a collection, check sub-items even if the collection file itself is up-to-date */ if target.UpToDate && target.Type != COLLECTIONS { - log.Debugf("%s is installed and up-to-date, skip.", target.Name) + log.Tracef("%s is installed and up-to-date, skip.", target.Name) return target, nil } } if _, err := os.Stat(parent_dir); os.IsNotExist(err) { log.Printf("%s doesn't exist, create", parent_dir) if err := os.MkdirAll(parent_dir, os.ModePerm); err != nil { - return target, fmt.Errorf("unable to create parent directories") + return target, errors.Wrap(err, "while creating directory") } } @@ -111,14 +119,12 @@ func EnableItem(target Item, tdir string, hdir string) (Item, error) { for idx, ptr := range tmp { ptrtype := ItemTypes[idx] for _, p := range ptr { - if val, ok := HubIdx[ptrtype][p]; ok { - HubIdx[ptrtype][p], err = EnableItem(val, Installdir, Hubdir) + if val, ok := hubIdx[ptrtype][p]; ok { + hubIdx[ptrtype][p], err = EnableItem(cscli, val) if err != nil { - log.Errorf("Encountered error while installing sub-item %s %s : %s.", ptrtype, p, err) - return target, fmt.Errorf("encountered error while install %s for %s, abort.", val.Name, target.Name) + return target, errors.Wrap(err, fmt.Sprintf("while installing %s", p)) } } else { - //log.Errorf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, target.Name) return target, fmt.Errorf("required %s %s of %s doesn't exist, abort.", ptrtype, p, target.Name) } } @@ -129,16 +135,15 @@ func EnableItem(target Item, tdir string, hdir string) (Item, error) { //tdir+target.RemotePath srcPath, err := filepath.Abs(hdir + "/" + target.RemotePath) if err != nil { - return target, fmt.Errorf("failed to resolve %s : %s", hdir+"/"+target.RemotePath, err) + return target, errors.Wrap(err, "while getting source path") } dstPath, err := filepath.Abs(parent_dir + "/" + target.FileName) if err != nil { - return target, fmt.Errorf("failed to resolve %s : %s", parent_dir+"/"+target.FileName, err) + return target, errors.Wrap(err, "while getting destination path") } err = os.Symlink(srcPath, dstPath) if err != nil { - log.Fatalf("Failed to symlink %s to %s : %v", srcPath, dstPath, err) - return target, fmt.Errorf("failed to symlink %s to %s", srcPath, dstPath) + return target, errors.Wrap(err, fmt.Sprintf("while creating symlink from %s to %s", srcPath, dstPath)) } log.Printf("Enabled %s : %s", target.Type, target.Name) } else { @@ -146,6 +151,6 @@ func EnableItem(target Item, tdir string, hdir string) (Item, error) { return target, nil } target.Installed = true - HubIdx[target.Type][target.Name] = target + hubIdx[target.Type][target.Name] = target return target, nil } diff --git a/pkg/cwhub/loader.go b/pkg/cwhub/loader.go index 8d25bc9b0..ff605bc63 100644 --- a/pkg/cwhub/loader.go +++ b/pkg/cwhub/loader.go @@ -2,20 +2,26 @@ package cwhub import ( "encoding/json" - "errors" + //"errors" "fmt" "io/ioutil" "sort" + "github.com/pkg/errors" + //"log" "os" "path/filepath" "strings" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" log "github.com/sirupsen/logrus" ) +/*the walk/parser_visit function can't receive extra args*/ +var hubdir, installdir, indexpath string + func parser_visit(path string, f os.FileInfo, err error) error { var target Item @@ -26,6 +32,11 @@ func parser_visit(path string, f os.FileInfo, err error) error { var ftype string var fauthor string var stage string + + path, err = filepath.Abs(path) + if err != nil { + return err + } //we only care about files if f == nil || f.IsDir() { return nil @@ -37,37 +48,36 @@ func parser_visit(path string, f os.FileInfo, err error) error { subs := strings.Split(path, "/") - log.Tracef("path:%s, hubdir:%s, installdir:%s", path, Hubdir, Installdir) + log.Tracef("path:%s, hubdir:%s, installdir:%s", path, hubdir, installdir) /*we're in hub (~/.cscli/hub/)*/ - if strings.HasPrefix(path, Hubdir) { + if strings.HasPrefix(path, hubdir) { + log.Tracef("in hub dir") inhub = true - //~/.cscli/hub/parsers/s00-raw/crowdsec/skip-pretag.yaml - //~/.cscli/hub/scenarios/crowdsec/ssh_bf.yaml - //~/.cscli/hub/profiles/crowdsec/linux.yaml + //.../hub/parsers/s00-raw/crowdsec/skip-pretag.yaml + //.../hub/scenarios/crowdsec/ssh_bf.yaml + //.../hub/profiles/crowdsec/linux.yaml if len(subs) < 4 { - log.Fatalf("path is too short : %s", path) + log.Fatalf("path is too short : %s (%d)", path, len(subs)) } fname = subs[len(subs)-1] fauthor = subs[len(subs)-2] stage = subs[len(subs)-3] ftype = subs[len(subs)-4] - log.Tracef("HUBB check [%s] by [%s] in stage [%s] of type [%s]", fname, fauthor, stage, ftype) - - } else if strings.HasPrefix(path, Installdir) { /*we're in install /etc/crowdsec//... */ + } else if strings.HasPrefix(path, installdir) { /*we're in install /etc/crowdsec//... */ + log.Tracef("in install dir") if len(subs) < 3 { - log.Fatalf("path is too short : %s", path) + log.Fatalf("path is too short : %s (%d)", path, len(subs)) } - ///etc/.../parser/stage/file.yaml - ///etc/.../postoverflow/stage/file.yaml - ///etc/.../scenarios/scenar.yaml - ///etc/.../collections/linux.yaml //file is empty + ///.../config/parser/stage/file.yaml + ///.../config/postoverflow/stage/file.yaml + ///.../config/scenarios/scenar.yaml + ///.../config/collections/linux.yaml //file is empty fname = subs[len(subs)-1] stage = subs[len(subs)-2] ftype = subs[len(subs)-3] fauthor = "" - log.Tracef("INSTALL check [%s] by [%s] in stage [%s] of type [%s]", fname, fauthor, stage, ftype) } else { - log.Errorf("unknown prefix in %s (not install:%s and not hub:%s)", path, Installdir, Hubdir) + return fmt.Errorf("File '%s' is not from hub '%s' nor from the configuration directory '%s'", path, hubdir, installdir) } //log.Printf("%s -> name:%s stage:%s", path, fname, stage) @@ -78,7 +88,7 @@ func parser_visit(path string, f os.FileInfo, err error) error { ftype = COLLECTIONS stage = "" } else if ftype != PARSERS && ftype != PARSERS_OVFLW /*its a PARSER / PARSER_OVFLW with a stage */ { - return fmt.Errorf("unknown prefix in %s : fname:%s, fauthor:%s, stage:%s, ftype:%s", path, fname, fauthor, stage, ftype) + return fmt.Errorf("unknown configuration type for file '%s'", path) } log.Tracef("CORRECTED [%s] by [%s] in stage [%s] of type [%s]", fname, fauthor, stage, ftype) @@ -113,7 +123,7 @@ func parser_visit(path string, f os.FileInfo, err error) error { //if it's not a symlink and not in hub, it's a local file, don't bother if local && !inhub { - log.Debugf("%s is a local file, skip", path) + log.Tracef("%s is a local file, skip", path) skippedLocal++ // log.Printf("local scenario, skip.") target.Name = fname @@ -126,14 +136,14 @@ func parser_visit(path string, f os.FileInfo, err error) error { x := strings.Split(path, "/") target.FileName = x[len(x)-1] - HubIdx[ftype][fname] = target + hubIdx[ftype][fname] = target return nil } //try to find which configuration item it is log.Tracef("check [%s] of %s", fname, ftype) match := false - for k, v := range HubIdx[ftype] { + for k, v := range hubIdx[ftype] { log.Tracef("check [%s] vs [%s] : %s", fname, v.RemotePath, ftype+"/"+stage+"/"+fname+".yaml") if fname != v.FileName { log.Tracef("%s != %s (filename)", fname, v.FileName) @@ -153,7 +163,7 @@ func parser_visit(path string, f os.FileInfo, err error) error { if v.Name+".yaml" != fauthor+"/"+fname { continue } - if path == Hubdir+"/"+v.RemotePath { + if path == hubdir+"/"+v.RemotePath { log.Tracef("marking %s as downloaded", v.Name) v.Downloaded = true } @@ -185,7 +195,7 @@ func parser_visit(path string, f os.FileInfo, err error) error { } else { /*we got an exact match, update struct*/ if !inhub { - log.Debugf("found exact match for %s, version is %s, latest is %s", v.Name, version, v.Version) + log.Tracef("found exact match for %s, version is %s, latest is %s", v.Name, version, v.Version) v.LocalPath = path v.LocalVersion = version v.Tainted = false @@ -205,7 +215,7 @@ func parser_visit(path string, f os.FileInfo, err error) error { } } if !match { - log.Debugf("got tainted match for %s : %s", v.Name, path) + log.Tracef("got tainted match for %s : %s", v.Name, path) skippedTainted += 1 //the file and the stage is right, but the hash is wrong, it has been tainted by user if !inhub { @@ -220,8 +230,12 @@ func parser_visit(path string, f os.FileInfo, err error) error { target.FileName = x[len(x)-1] } - //update the entry - HubIdx[ftype][k] = v + //update the entry if appropriate + if _, ok := hubIdx[ftype][k]; !ok { + hubIdx[ftype][k] = v + } else if !inhub { + hubIdx[ftype][k] = v + } return nil } log.Infof("Ignoring file %s of type %s", path, ftype) @@ -236,7 +250,7 @@ func CollecDepsCheck(v *Item) error { for idx, ptr := range tmp { ptrtype := ItemTypes[idx] for _, p := range ptr { - if val, ok := HubIdx[ptrtype][p]; ok { + if val, ok := hubIdx[ptrtype][p]; ok { log.Tracef("check %s installed:%t", val.Name, val.Installed) if !v.Installed { continue @@ -246,7 +260,7 @@ func CollecDepsCheck(v *Item) error { if err := CollecDepsCheck(&val); err != nil { return fmt.Errorf("sub collection %s is broken : %s", val.Name, err) } - HubIdx[ptrtype][p] = val + hubIdx[ptrtype][p] = val } //propagate the state of sub-items to set @@ -260,8 +274,16 @@ func CollecDepsCheck(v *Item) error { v.UpToDate = false return fmt.Errorf("outdated %s %s", ptrtype, p) } - val.BelongsToCollections = append(val.BelongsToCollections, v.Name) - HubIdx[ptrtype][p] = val + skip := false + for idx := range val.BelongsToCollections { + if val.BelongsToCollections[idx] == v.Name { + skip = true + } + } + if !skip { + val.BelongsToCollections = append(val.BelongsToCollections, v.Name) + } + hubIdx[ptrtype][p] = val log.Tracef("checking for %s - tainted:%t uptodate:%t", p, v.Tainted, v.UpToDate) } else { log.Fatalf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, v.Name) @@ -272,35 +294,54 @@ func CollecDepsCheck(v *Item) error { return nil } -/* Updates the infos from HubInit() with the local state */ -func LocalSync() error { - skippedLocal = 0 - skippedTainted = 0 +func SyncDir(cscli *csconfig.CscliCfg, dir string) error { + hubdir = cscli.HubDir + installdir = cscli.ConfigDir + indexpath = cscli.HubIndexFile + /*For each, scan PARSERS, PARSERS_OVFLW, SCENARIOS and COLLECTIONS last*/ for _, scan := range ItemTypes { - /*Scan install and Hubdir to get local status*/ - for _, dir := range []string{Installdir, Hubdir} { - //walk the user's directory - err := filepath.Walk(dir+"/"+scan, parser_visit) - if err != nil { - return err - } + cpath, err := filepath.Abs(fmt.Sprintf("%s/%s", dir, scan)) + if err != nil { + log.Errorf("failed %s : %s", cpath, err) } + err = filepath.Walk(cpath, parser_visit) + if err != nil { + return err + } + } - for k, v := range HubIdx[COLLECTIONS] { + for k, v := range hubIdx[COLLECTIONS] { if err := CollecDepsCheck(&v); err != nil { log.Infof("dependency issue %s : %s", v.Name, err) } - HubIdx[COLLECTIONS][k] = v + hubIdx[COLLECTIONS][k] = v } return nil } -func GetHubIdx() error { +/* Updates the infos from HubInit() with the local state */ +func LocalSync(cscli *csconfig.CscliCfg) error { + skippedLocal = 0 + skippedTainted = 0 - bidx, err := ioutil.ReadFile(Cfgdir + "/.index.json") + for _, dir := range []string{cscli.ConfigDir, cscli.HubDir} { + log.Debugf("scanning %s", dir) + if err := SyncDir(cscli, dir); err != nil { + return fmt.Errorf("failed to scan %s : %s", dir, err) + } + } + + return nil +} + +func GetHubIdx(cscli *csconfig.CscliCfg) error { + + log.Debugf("loading hub idx %s", cscli.HubIndexFile) + bidx, err := ioutil.ReadFile(cscli.HubIndexFile) if err != nil { + return errors.Wrap(err, "unable to read index file") log.Fatalf("Unable to read downloaded index : %v. Please run update", err) } ret, err := LoadPkgIndex(bidx) @@ -310,8 +351,8 @@ func GetHubIdx() error { } return err } - HubIdx = ret - if err := LocalSync(); err != nil { + hubIdx = ret + if err := LocalSync(cscli); err != nil { log.Fatalf("Failed to sync Hub index with local deployment : %v", err) } return nil @@ -327,9 +368,11 @@ func LoadPkgIndex(buff []byte) (map[string]map[string]Item, error) { return nil, fmt.Errorf("failed to unmarshal index : %v", err) } + log.Debugf("%d item types in hub index", len(ItemTypes)) /*Iterate over the different types to complete struct */ for _, itemType := range ItemTypes { /*complete struct*/ + log.Tracef("%d item", len(RawIndex[itemType])) for idx, item := range RawIndex[itemType] { item.Name = idx item.Type = itemType diff --git a/pkg/cwplugin/backend.go b/pkg/cwplugin/backend.go deleted file mode 100644 index 0e608f346..000000000 --- a/pkg/cwplugin/backend.go +++ /dev/null @@ -1,231 +0,0 @@ -package cwplugin - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "plugin" - "time" - - "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" -) - -// the structure returned by the function New() of the plugin must match this interface -type Backend interface { - Insert(types.SignalOccurence) error - ReadAT(time.Time) ([]map[string]string, error) - Delete(string) (int, error) - Init(map[string]string) error - Flush() error - Shutdown() error - DeleteAll() error - StartAutoCommit() error -} - -type BackendPlugin struct { - Name string `yaml:"name"` - Path string `yaml:"path"` - ConfigFilePath string - //Config is passed to the backend plugin. - //It contains specific plugin config + plugin config from main yaml file - Config map[string]string `yaml:"config"` - ID string - funcs Backend -} - -type BackendManager struct { - backendPlugins map[string]BackendPlugin -} - -func NewBackendPlugin(outputConfig map[string]string) (*BackendManager, error) { - var files []string - var backendManager = &BackendManager{} - var path string - - if v, ok := outputConfig["backend"]; ok { - path = v - } else { - return nil, fmt.Errorf("missing 'backend' (path to backend plugins)") - } - err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error { - if filepath.Ext(path) == ".yaml" { - files = append(files, path) - } - return nil - }) - if err != nil { - panic(err) - } - - backendManager.backendPlugins = make(map[string]BackendPlugin, len(files)) - - for _, file := range files { - var newPlugin BackendPlugin - log.Debugf("opening plugin '%s'", file) - bConfig, err := ioutil.ReadFile(file) - if err != nil { - log.Errorf("unable to open file '%s' : %s, skipping", file, err) - continue - } - if err := yaml.UnmarshalStrict(bConfig, &newPlugin); err != nil { - log.Errorf("parsing '%s' yaml error : %s, skipping", file, err) - continue - } - - plug, err := plugin.Open(newPlugin.Path) - if err != nil { - return nil, err - } - //Lookup a function called 'New' to get the plugin interface - symbol, err := plug.Lookup("New") - if err != nil { - return nil, fmt.Errorf("no 'New' function in plugin : %s", err) - } - symNew, ok := symbol.(func() interface{}) - if !ok { - log.Errorf("plugin '%s' do not implement a GetFunctions() that return a list of string, skipping", file) - continue - } - - // cast the return interface to Backend interface - plugNew := symNew() - bInterface, ok := plugNew.(Backend) - if !ok { - return nil, fmt.Errorf("unexpected '%s' type (%T), skipping", newPlugin.Name, plugNew) - } - - // Add the interface and Init() - newPlugin.funcs = bInterface - // Merge backend config from main config file - if v, ok := outputConfig["debug"]; ok && v == "true" { - newPlugin.Config["debug"] = "true" - } - - if v, ok := outputConfig["max_records"]; ok { - newPlugin.Config["max_records"] = v - } - - if v, ok := outputConfig["max_records_age"]; ok { - newPlugin.Config["max_records_age"] = v - } - - if v, ok := outputConfig["flush"]; ok && v != "" { - newPlugin.Config["flush"] = v - } - err = newPlugin.funcs.Init(newPlugin.Config) - if err != nil { - return nil, fmt.Errorf("plugin '%s' init error : %s", newPlugin.Name, err) - } - log.Debugf("backend plugin '%s' loaded", newPlugin.Name) - backendManager.backendPlugins[newPlugin.Name] = newPlugin - - } - log.Debugf("loaded %d backend plugins", len(backendManager.backendPlugins)) - if len(backendManager.backendPlugins) == 0 { - return nil, fmt.Errorf("no plugins loaded from %s", path) - } - return backendManager, nil -} - -func (b *BackendManager) Delete(target string) (int, error) { - var err error - var nbDel int - for _, plugin := range b.backendPlugins { - nbDel, err = plugin.funcs.Delete(target) - if err != nil { - return 0, fmt.Errorf("failed to delete : %s", err) - } - } - return nbDel, nil -} - -func (b *BackendManager) Shutdown() error { - var err error - for _, plugin := range b.backendPlugins { - err = plugin.funcs.Shutdown() - if err != nil { - return fmt.Errorf("failed to shutdown : %s", err) - } - } - return nil -} - -func (b *BackendManager) DeleteAll() error { - var err error - for _, plugin := range b.backendPlugins { - err = plugin.funcs.DeleteAll() - if err != nil { - return fmt.Errorf("failed to delete : %s", err) - } - } - return nil -} - -// Insert the signal for the plugin specified in the config["plugin"] parameter -func (b *BackendManager) InsertOnePlugin(sig types.SignalOccurence, pluginName string) error { - if val, ok := b.backendPlugins[pluginName]; ok { - if err := val.funcs.Insert(sig); err != nil { - return fmt.Errorf("failed to load %s : %s", pluginName, err) - } - } else { - return fmt.Errorf("plugin '%s' not loaded", pluginName) - } - return nil -} - -// Insert the signal for all the plugins -func (b *BackendManager) Insert(sig types.SignalOccurence) error { - var err error - for _, plugin := range b.backendPlugins { - err = plugin.funcs.Insert(sig) - if err != nil { - return fmt.Errorf("flushing backend plugin '%s' failed: %s", plugin.Name, err) - } - } - - return nil -} - -func (b *BackendManager) IsBackendPlugin(plugin string) bool { - if _, ok := b.backendPlugins[plugin]; ok { - return true - } - return false -} - -func (b *BackendManager) StartAutoCommit() error { - var err error - for _, plugin := range b.backendPlugins { - err = plugin.funcs.StartAutoCommit() - if err != nil { - return err - } - } - return nil -} - -func (b *BackendManager) ReadAT(timeAT time.Time) ([]map[string]string, error) { - var ret []map[string]string - for _, plugin := range b.backendPlugins { - subret, err := plugin.funcs.ReadAT(timeAT) - ret = append(ret, subret...) - if err != nil { - return nil, err - } - } - return ret, nil -} - -func (b *BackendManager) Flush() error { - var err error - for _, plugin := range b.backendPlugins { - err = plugin.funcs.Flush() - if err != nil { - return fmt.Errorf("flushing backend plugin '%s' failed: %s", plugin.Name, err) - } - } - return nil -} diff --git a/pkg/cwplugin/notification.go b/pkg/cwplugin/notification.go deleted file mode 100644 index dc97f8a83..000000000 --- a/pkg/cwplugin/notification.go +++ /dev/null @@ -1,4 +0,0 @@ -package cwplugin - -type NotificationManager struct { -} diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index b4b68498d..df87ecbd8 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -33,6 +33,15 @@ var ( Constraint_acquis = ">= 1.0, < 2.0" ) +func ShowStr() string { + ret := "" + ret += fmt.Sprintf("version: %s-%s\n", Version, Tag) + ret += fmt.Sprintf("Codename: %s\n", Codename) + ret += fmt.Sprintf("BuildDate: %s\n", BuildDate) + ret += fmt.Sprintf("GoVersion: %s\n", GoVersion) + return ret +} + func Show() { log.Printf("version: %s-%s", Version, Tag) log.Printf("Codename: %s", Codename) @@ -78,7 +87,7 @@ func Latest() (string, error) { return "", err } if _, ok := latest["name"]; !ok { - return "", fmt.Errorf("unable to find latest release name from github api") + return "", fmt.Errorf("unable to find latest release name from github api: %+v", latest) } return latest["name"].(string), nil diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go new file mode 100644 index 000000000..91e354961 --- /dev/null +++ b/pkg/database/alerts.go @@ -0,0 +1,570 @@ +package database + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +const ( + paginationSize = 100 // used to queryAlert to avoid 'too many SQL variable' + defaultLimit = 100 // default limit of element to returns when query alerts + bulkSize = 50 // bulk size when create alerts +) + +func formatAlertAsString(machineId string, alert *models.Alert) []string { + var retStr []string + + /**/ + src := "" + if alert.Source != nil { + if *alert.Source.Scope == types.Ip { + src = fmt.Sprintf("ip %s", *alert.Source.Value) + if alert.Source.Cn != "" { + src += " (" + alert.Source.Cn + if alert.Source.AsNumber != "" { + src += "/" + alert.Source.AsNumber + } + src += ")" + } + } else if *alert.Source.Scope == types.Range { + src = fmt.Sprintf("range %s", *alert.Source.Value) + if alert.Source.Cn != "" { + src += " (" + alert.Source.Cn + if alert.Source.AsNumber != "" { + src += "/" + alert.Source.AsNumber + } + src += ")" + } + } else { + src = fmt.Sprintf("%s %s", *alert.Source.Scope, *alert.Source.Value) + } + } else { + src = "empty source" + } + + /**/ + reason := "" + if *alert.Scenario != "" { + reason = fmt.Sprintf("%s by %s", *alert.Scenario, src) + } else if *alert.Message != "" { + reason = fmt.Sprintf("%s by %s", *alert.Scenario, src) + } else { + reason = fmt.Sprintf("empty scenario by %s", src) + } + + if len(alert.Decisions) > 0 { + for _, decisionItem := range alert.Decisions { + decision := "" + if alert.Simulated != nil && *alert.Simulated { + decision = "(simulated alert)" + } else if decisionItem.Simulated != nil && *decisionItem.Simulated { + decision = "(simulated decision)" + } + if log.GetLevel() >= log.DebugLevel { + /*spew is expensive*/ + log.Debugf("%s", spew.Sdump(decisionItem)) + } + decision += fmt.Sprintf("%s %s on %s %s", *decisionItem.Duration, + *decisionItem.Type, *decisionItem.Scope, *decisionItem.Value) + retStr = append(retStr, + fmt.Sprintf("(%s/%s) %s : %s", machineId, + *decisionItem.Origin, reason, decision)) + } + } else { + retStr = append(retStr, fmt.Sprintf("(%s) alert : %s", machineId, reason)) + } + return retStr +} + +func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]string, error) { + pageStart := 0 + pageEnd := bulkSize + ret := []string{} + for { + if pageEnd >= len(alertList) { + results, err := c.CreateAlertBulk(machineID, alertList[pageStart:]) + if err != nil { + return []string{}, fmt.Errorf("unable to create alerts: %s", err) + } + ret = append(ret, results...) + break + } + results, err := c.CreateAlertBulk(machineID, alertList[pageStart:pageEnd]) + if err != nil { + return []string{}, fmt.Errorf("unable to create alerts: %s", err) + } + ret = append(ret, results...) + pageStart += bulkSize + pageEnd += bulkSize + } + return ret, nil +} + +func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([]string, error) { + var decisions []*ent.Decision + var metas []*ent.Meta + var events []*ent.Event + + ret := []string{} + bulkSize := 20 + + c.Log.Debugf("writting %d items", len(alertList)) + bulk := make([]*ent.AlertCreate, 0, bulkSize) + for i, alertItem := range alertList { + owner, err := c.QueryMachineByID(machineId) + if err != nil { + if errors.Cause(err) != UserNotExists { + return []string{}, errors.Wrapf(QueryFail, "machine '%s': %s", alertItem.MachineID, err) + } + log.Debugf("CreateAlertBulk: Machine Id %s doesn't exist", machineId) + owner = nil + } + startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt) + if err != nil { + return []string{}, errors.Wrapf(ParseTimeFail, "start_at field time '%s': %s", *alertItem.StartAt, err) + } + + stopAtTime, err := time.Parse(time.RFC3339, *alertItem.StopAt) + if err != nil { + return []string{}, errors.Wrapf(ParseTimeFail, "stop_at field time '%s': %s", *alertItem.StopAt, err) + } + /*display proper alert in logs*/ + for _, disp := range formatAlertAsString(machineId, alertItem) { + log.Info(disp) + } + + if len(alertItem.Events) > 0 { + eventBulk := make([]*ent.EventCreate, len(alertItem.Events)) + for i, eventItem := range alertItem.Events { + ts, err := time.Parse(time.RFC3339, *eventItem.Timestamp) + if err != nil { + return []string{}, errors.Wrapf(ParseTimeFail, "event timestamp '%s' : %s", *eventItem.Timestamp, err) + } + marshallMetas, err := json.Marshal(eventItem.Meta) + if err != nil { + return []string{}, errors.Wrapf(MarshalFail, "event meta '%v' : %s", eventItem.Meta, err) + } + + eventBulk[i] = c.Ent.Event.Create(). + SetTime(ts). + SetSerialized(string(marshallMetas)) + } + events, err = c.Ent.Event.CreateBulk(eventBulk...).Save(c.CTX) + if err != nil { + return []string{}, errors.Wrapf(BulkError, "creating alert events: %s", err) + } + } + + if len(alertItem.Meta) > 0 { + metaBulk := make([]*ent.MetaCreate, len(alertItem.Meta)) + for i, metaItem := range alertItem.Meta { + metaBulk[i] = c.Ent.Meta.Create(). + SetKey(metaItem.Key). + SetValue(metaItem.Value) + } + metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(c.CTX) + if err != nil { + return []string{}, errors.Wrapf(BulkError, "creating alert meta: %s", err) + } + } + + ts, err := time.Parse(time.RFC3339, *alertItem.StopAt) + if err != nil { + log.Errorf("While parsing StartAt of item %s : %s", *alertItem.StopAt, err) + ts = time.Now() + } + if len(alertItem.Decisions) > 0 { + decisionBulk := make([]*ent.DecisionCreate, len(alertItem.Decisions)) + for i, decisionItem := range alertItem.Decisions { + + duration, err := time.ParseDuration(*decisionItem.Duration) + if err != nil { + return []string{}, errors.Wrapf(ParseDurationFail, "decision duration '%v' : %s", decisionItem.Duration, err) + } + decisionBulk[i] = c.Ent.Decision.Create(). + SetUntil(ts.Add(duration)). + SetScenario(*decisionItem.Scenario). + SetType(*decisionItem.Type). + SetStartIP(decisionItem.StartIP). + SetEndIP(decisionItem.EndIP). + SetValue(*decisionItem.Value). + SetScope(*decisionItem.Scope). + SetOrigin(*decisionItem.Origin). + SetSimulated(*alertItem.Simulated) + } + decisions, err = c.Ent.Decision.CreateBulk(decisionBulk...).Save(c.CTX) + if err != nil { + return []string{}, errors.Wrapf(BulkError, "creating alert decisions: %s", err) + + } + } + alertB := c.Ent.Alert. + Create(). + SetScenario(*alertItem.Scenario). + SetMessage(*alertItem.Message). + SetEventsCount(*alertItem.EventsCount). + SetStartedAt(startAtTime). + SetStoppedAt(stopAtTime). + SetSourceScope(*alertItem.Source.Scope). + SetSourceValue(*alertItem.Source.Value). + SetSourceIp(alertItem.Source.IP). + SetSourceRange(alertItem.Source.Range). + SetSourceAsNumber(alertItem.Source.AsNumber). + SetSourceAsName(alertItem.Source.AsName). + SetSourceCountry(alertItem.Source.Cn). + SetSourceLatitude(alertItem.Source.Latitude). + SetSourceLongitude(alertItem.Source.Longitude). + SetCapacity(*alertItem.Capacity). + SetLeakSpeed(*alertItem.Leakspeed). + SetSimulated(*alertItem.Simulated). + SetScenarioVersion(*alertItem.ScenarioVersion). + SetScenarioHash(*alertItem.ScenarioHash). + AddDecisions(decisions...). + AddEvents(events...). + AddMetas(metas...) + + if owner != nil { + alertB.SetOwner(owner) + } + bulk = append(bulk, alertB) + + if len(bulk) == bulkSize { + alerts, err := c.Ent.Alert.CreateBulk(bulk...).Save(c.CTX) + if err != nil { + return []string{}, errors.Wrapf(BulkError, "creating alert : %s", err) + } + for _, alert := range alerts { + ret = append(ret, strconv.Itoa(alert.ID)) + } + + if len(alertList)-i <= bulkSize { + bulk = make([]*ent.AlertCreate, 0, (len(alertList) - i)) + } else { + bulk = make([]*ent.AlertCreate, 0, bulkSize) + } + } + } + + alerts, err := c.Ent.Alert.CreateBulk(bulk...).Save(c.CTX) + if err != nil { + return []string{}, errors.Wrapf(BulkError, "creating alert : %s", err) + } + + for _, alert := range alerts { + ret = append(ret, strconv.Itoa(alert.ID)) + } + + return ret, nil +} + +func BuildAlertRequestFromFilter(alerts *ent.AlertQuery, filter map[string][]string) (*ent.AlertQuery, error) { + var err error + var startIP, endIP int64 + var hasActiveDecision bool + + /*the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ + if v, ok := filter["simulated"]; ok { + if v[0] == "false" { + alerts = alerts.Where(alert.SimulatedEQ(false)) + } + delete(filter, "simulated") + } + + for param, value := range filter { + switch param { + case "scope": + var scope string = value[0] + if strings.ToLower(scope) == "ip" { + scope = types.Ip + } else if strings.ToLower(scope) == "range" { + scope = types.Range + } + alerts = alerts.Where(alert.SourceScopeEQ(scope)) + case "value": + alerts = alerts.Where(alert.SourceValueEQ(value[0])) + case "scenario": + alerts = alerts.Where(alert.ScenarioEQ(value[0])) + case "ip": + isValidIP := IsIpv4(value[0]) + if !isValidIP { + return nil, errors.Wrapf(InvalidIPOrRange, "unable to parse '%s': %s", value[0], err) + } + startIP, endIP, err = GetIpsFromIpRange(value[0] + "/32") + if err != nil { + return nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int interval: %s", value[0], err) + } + case "range": + startIP, endIP, err = GetIpsFromIpRange(value[0]) + if err != nil { + return nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int interval: %s", value[0], err) + } + case "since": + duration, err := types.ParseDuration(value[0]) + if err != nil { + return nil, errors.Wrap(err, "while parsing duration") + } + since := time.Now().Add(-duration) + if since.IsZero() { + return nil, fmt.Errorf("Empty time now() - %s", since.String()) + } + alerts = alerts.Where(alert.StartedAtGTE(since)) + case "created_before": + duration, err := types.ParseDuration(value[0]) + if err != nil { + return nil, errors.Wrap(err, "while parsing duration") + } + since := time.Now().Add(-duration) + if since.IsZero() { + return nil, fmt.Errorf("Empty time now() - %s", since.String()) + } + alerts = alerts.Where(alert.CreatedAtLTE(since)) + case "until": + duration, err := types.ParseDuration(value[0]) + if err != nil { + return nil, errors.Wrap(err, "while parsing duration") + } + until := time.Now().Add(-duration) + if until.IsZero() { + return nil, fmt.Errorf("Empty time now() - %s", until.String()) + } + alerts = alerts.Where(alert.StartedAtLTE(until)) + case "decision_type": + alerts = alerts.Where(alert.HasDecisionsWith(decision.TypeEQ(value[0]))) + case "include_capi": //allows to exclude one or more specific origins + if value[0] == "false" { + alerts = alerts.Where(alert.HasDecisionsWith(decision.OriginNEQ("CAPI"))) + } else if value[0] != "true" { + log.Errorf("Invalid bool '%s' for include_capi", value[0]) + } + case "has_active_decision": + if hasActiveDecision, err = strconv.ParseBool(value[0]); err != nil { + return nil, errors.Wrapf(ParseType, "'%s' is not a boolean: %s", value[0], err) + } + if hasActiveDecision { + alerts = alerts.Where(alert.HasDecisionsWith(decision.UntilGTE(time.Now()))) + } else { + alerts = alerts.Where(alert.Not(alert.HasDecisions())) + } + case "limit": + continue + case "sort": + continue + default: + return nil, errors.Wrapf(InvalidFilter, "Filter parameter '%s' is unknown (=%s)", param, value[0]) + } + } + if startIP != 0 && endIP != 0 { + /*the user is checking for a single IP*/ + if startIP == endIP { + //DECISION_START <= IP_Q >= DECISON_END + alerts = alerts.Where(alert.And( + alert.HasDecisionsWith(decision.StartIPLTE(startIP)), + alert.HasDecisionsWith(decision.EndIPGTE(endIP)), + )) + } else { /*the user is checking for a RANGE */ + //START_Q >= DECISION_START AND END_Q <= DECISION_END + alerts = alerts.Where(alert.And( + alert.HasDecisionsWith(decision.StartIPGTE(startIP)), + alert.HasDecisionsWith(decision.EndIPLTE(endIP)), + )) + } + } + return alerts, nil +} + +func (c *Client) TotalAlerts() (int, error) { + return c.Ent.Alert.Query().Count(c.CTX) +} + +func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, error) { + sort := "DESC" // we sort by desc by default + if val, ok := filter["sort"]; ok { + if val[0] != "ASC" && val[0] != "DESC" { + log.Errorf("invalid 'sort' parameter: %s", val) + } else { + sort = val[0] + } + } + limit := defaultLimit + if val, ok := filter["limit"]; ok { + limitConv, err := strconv.Atoi(val[0]) + if err != nil { + return []*ent.Alert{}, errors.Wrapf(QueryFail, "bad limit in parameters: %s", val) + } + limit = limitConv + + } + offset := 0 + ret := make([]*ent.Alert, 0) + for { + alerts := c.Ent.Alert.Query() + alerts, err := BuildAlertRequestFromFilter(alerts, filter) + if err != nil { + return []*ent.Alert{}, err + } + alerts = alerts. + WithDecisions(). + WithEvents(). + WithMetas(). + WithOwner() + if sort == "ASC" { + alerts = alerts.Order(ent.Asc(alert.FieldCreatedAt)) + } else { + alerts = alerts.Order(ent.Desc(alert.FieldCreatedAt)) + } + if limit == 0 { + limit, err = alerts.Count(c.CTX) + if err != nil { + return []*ent.Alert{}, fmt.Errorf("unable to count nb alerts: %s", err) + } + } + result, err := alerts.Limit(paginationSize).Offset(offset).All(c.CTX) + if err != nil { + return []*ent.Alert{}, errors.Wrapf(QueryFail, "pagination size: %d, offset: %d: %s", paginationSize, offset, err) + } + if diff := limit - len(ret); diff < paginationSize { + if len(result) < diff { + ret = append(ret, result...) + log.Debugf("Pagination done, %d < %d", len(result), diff) + break + } + ret = append(ret, result[0:diff]...) + } else { + ret = append(ret, result...) + } + if len(ret) == limit || len(ret) == 0 { + log.Debugf("Pagination done len(ret) = %d", len(ret)) + break + } + offset += paginationSize + } + + return ret, nil +} + +func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { + // delete the associated events + _, err := c.Ent.Event.Delete(). + Where(event.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + if err != nil { + log.Warningf("DeleteAlertGraph : %s", err) + return errors.Wrapf(DeleteFail, "event with alert ID '%d'", alertItem.ID) + } + + // delete the associated meta + _, err = c.Ent.Meta.Delete(). + Where(meta.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + if err != nil { + log.Warningf("DeleteAlertGraph : %s", err) + return errors.Wrapf(DeleteFail, "meta with alert ID '%d'", alertItem.ID) + } + + // delete the associated decisions + _, err = c.Ent.Decision.Delete(). + Where(decision.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + if err != nil { + log.Warningf("DeleteAlertGraph : %s", err) + return errors.Wrapf(DeleteFail, "decision with alert ID '%d'", alertItem.ID) + } + + // delete the alert + err = c.Ent.Alert.DeleteOne(alertItem).Exec(c.CTX) + if err != nil { + log.Warningf("DeleteAlertGraph : %s", err) + return errors.Wrapf(DeleteFail, "alert with ID '%d'", alertItem.ID) + } + + return nil +} + +func (c *Client) DeleteAlertWithFilter(filter map[string][]string) ([]*ent.Alert, error) { + var err error + + // Get all the alerts that match the filter + alertsToDelete, err := c.QueryAlertWithFilter(filter) + + for _, alertItem := range alertsToDelete { + err = c.DeleteAlertGraph(alertItem) + if err != nil { + log.Warningf("DeleteAlertWithFilter : %s", err) + return []*ent.Alert{}, errors.Wrapf(DeleteFail, "event with alert ID '%d'", alertItem.ID) + } + } + return alertsToDelete, nil +} + +func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { + var deletedByAge int + var deletedByNbItem int + var totalAlerts int + var err error + totalAlerts, err = c.TotalAlerts() + if err != nil { + log.Warningf("FlushAlerts (max items count) : %s", err) + return errors.Wrap(err, "unable to get alerts count") + } + if MaxAge != "" { + filter := map[string][]string{ + "created_before": {MaxAge}, + } + deleted, err := c.DeleteAlertWithFilter(filter) + if err != nil { + log.Warningf("FlushAlerts (max age) : %s", err) + return errors.Wrapf(err, "unable to flush alerts with filter until: %s", MaxAge) + } + deletedByAge = len(deleted) + } + if MaxItems > 0 { + if totalAlerts > MaxItems { + nbToDelete := totalAlerts - MaxItems + alerts, err := c.QueryAlertWithFilter(map[string][]string{ + "sort": {"ASC"}, + "limit": {strconv.Itoa(nbToDelete)}, + }) // we want to delete older alerts if we reach the max number of items + if err != nil { + log.Warningf("FlushAlerts (max items query) : %s", err) + return errors.Wrap(err, "unable to get all alerts") + } + for itemNb, alert := range alerts { + if itemNb < nbToDelete { + err := c.DeleteAlertGraph(alert) + if err != nil { + log.Warningf("FlushAlerts : %s", err) + return errors.Wrap(err, "unable to flush alert") + } + deletedByNbItem++ + } + } + } + } + if deletedByNbItem > 0 { + log.Infof("flushed %d/%d alerts because max number of alerts has been reached (%d max)", deletedByNbItem, totalAlerts, MaxItems) + } + if deletedByAge > 0 { + log.Infof("flushed %d/%d alerts because they were created %s ago or more", deletedByAge, totalAlerts, MaxAge) + } + return nil +} + +func (c *Client) GetAlertByID(alertID int) (*ent.Alert, error) { + alert, err := c.Ent.Alert.Query().Where(alert.IDEQ(alertID)).WithDecisions().WithEvents().WithMetas().WithOwner().First(c.CTX) + if err != nil { + log.Warningf("GetAlertByID : %s", err) + return &ent.Alert{}, errors.Wrapf(QueryFail, "alert id '%d'", alertID) + } + return alert, nil +} diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go new file mode 100644 index 000000000..ecedcecd4 --- /dev/null +++ b/pkg/database/bouncers.go @@ -0,0 +1,77 @@ +package database + +import ( + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/pkg/errors" +) + +func (c *Client) SelectBouncer(apiKeyHash string) (*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash)).First(c.CTX) + if err != nil { + return &ent.Bouncer{}, errors.Wrapf(QueryFail, "select bouncer: %s", err) + } + + return result, nil +} + +func (c *Client) ListBouncers() ([]*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().All(c.CTX) + if err != nil { + return []*ent.Bouncer{}, errors.Wrapf(QueryFail, "listing bouncer: %s", err) + } + return result, nil +} + +func (c *Client) CreateBouncer(name string, ipAddr string, apiKey string) error { + _, err := c.Ent.Bouncer. + Create(). + SetName(name). + SetAPIKey(apiKey). + SetRevoked(false). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to save api key in database: %s", err) + } + return nil +} + +func (c *Client) DeleteBouncer(name string) error { + _, err := c.Ent.Bouncer. + Delete(). + Where(bouncer.NameEQ(name)). + Exec(c.CTX) + if err != nil { + return fmt.Errorf("unable to save api key in database: %s", err) + } + return nil +} + +func (c *Client) UpdateBouncerLastPull(lastPull time.Time, ID int) error { + _, err := c.Ent.Bouncer.UpdateOneID(ID). + SetLastPull(lastPull). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update machine in database: %s", err) + } + return nil +} + +func (c *Client) UpdateBouncerIP(ipAddr string, ID int) error { + _, err := c.Ent.Bouncer.UpdateOneID(ID).SetIPAddress(ipAddr).Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update bouncer ip address in database: %s", err) + } + return nil +} + +func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, ID int) error { + _, err := c.Ent.Bouncer.UpdateOneID(ID).SetVersion(version).SetType(bType).Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update bouncer type and version in database: %s", err) + } + return nil +} diff --git a/pkg/database/commit.go b/pkg/database/commit.go deleted file mode 100644 index 4e8a33b3a..000000000 --- a/pkg/database/commit.go +++ /dev/null @@ -1,56 +0,0 @@ -package database - -import ( - "time" - - log "github.com/sirupsen/logrus" -) - -/*Flush doesn't do anything here : we are not using transactions or such, nothing to "flush" per se*/ -func (c *Context) Flush() error { - return nil -} - -func (c *Context) StartAutoCommit() error { - //TBD : we shouldn't start auto-commit if we are in cli mode ? - c.PusherTomb.Go(func() error { - c.autoCommit() - return nil - }) - return nil -} - -func (c *Context) autoCommit() { - log.Debugf("starting autocommit") - cleanUpTicker := time.NewTicker(1 * time.Minute) - expireTicker := time.NewTicker(1 * time.Second) - if !c.flush { - log.Debugf("flush is disabled") - } - for { - select { - case <-c.PusherTomb.Dying(): - //we need to shutdown - log.Infof("database routine shutdown") - if err := c.Flush(); err != nil { - log.Errorf("error while flushing records: %s", err) - } - if err := c.Db.Close(); err != nil { - log.Errorf("error while closing db : %s", err) - } - return - case <-expireTicker.C: - if _, err := c.DeleteExpired(); err != nil { - log.Errorf("Error while deleting expired records: %s", err) - } - case <-cleanUpTicker.C: - if _, err := c.CleanUpRecordsByCount(); err != nil { - log.Errorf("error in max records cleanup : %s", err) - } - if _, err := c.CleanUpRecordsByAge(); err != nil { - log.Errorf("error in old records cleanup : %s", err) - - } - } - } -} diff --git a/pkg/database/database.go b/pkg/database/database.go index a7b2123d3..07e28205e 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -1,112 +1,88 @@ package database import ( + "context" "fmt" - "strconv" - "sync" "time" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" - - "github.com/jinzhu/gorm" - _ "github.com/jinzhu/gorm/dialects/mysql" - _ "github.com/jinzhu/gorm/dialects/sqlite" + "github.com/go-co-op/gocron" + _ "github.com/go-sql-driver/mysql" + _ "github.com/lib/pq" _ "github.com/mattn/go-sqlite3" - "gopkg.in/tomb.v2" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" ) -type Context struct { - Db *gorm.DB //Pointer to database - lastCommit time.Time - flush bool - count int32 - lock sync.Mutex //booboo - PusherTomb tomb.Tomb - //to manage auto cleanup : max number of records *or* oldest - maxEventRetention int - maxDurationRetention time.Duration +type Client struct { + Ent *ent.Client + CTX context.Context + Log *log.Logger } -func checkConfig(cfg map[string]string) error { - switch dbType := cfg["type"]; dbType { +func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { + var client *ent.Client + var err error + if config == nil { + return &Client{}, fmt.Errorf("DB config is empty") + } + switch config.Type { case "sqlite": - if val, ok := cfg["db_path"]; !ok || val == "" { - return fmt.Errorf("please specify a 'db_path' to SQLite db in the configuration") + client, err = ent.Open("sqlite3", fmt.Sprintf("file:%s?_busy_timeout=100000&_fk=1", config.DbPath)) + if err != nil { + return &Client{}, fmt.Errorf("failed opening connection to sqlite: %v", err) } case "mysql": - if val, ok := cfg["db_host"]; !ok || val == "" { - return fmt.Errorf("please specify a 'db_host' to MySQL db in the configuration") + client, err = ent.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=True", config.User, config.Password, config.Host, config.Port, config.DbName)) + if err != nil { + return &Client{}, fmt.Errorf("failed opening connection to mysql: %v", err) } - - if val, ok := cfg["db_username"]; !ok || val == "" { - return fmt.Errorf("please specify a 'db_username' to MySQL db in the configuration") - } - - if val, ok := cfg["db_password"]; !ok || val == "" { - return fmt.Errorf("please specify a 'db_password' to MySQL db in the configuration") - } - - if val, ok := cfg["db_name"]; !ok || val == "" { - return fmt.Errorf("please specify a 'db_name' to MySQL db in the configuration") + case "postgres", "postgresql": + client, err = ent.Open("postgres", fmt.Sprintf("host=%s port=%d user=%s dbname=%s password=%s", config.Host, config.Port, config.User, config.DbName, config.Password)) + if err != nil { + return &Client{}, fmt.Errorf("failed opening connection to postgres: %v", err) } default: - return fmt.Errorf("please specify a proper 'type' to the database configuration ") + return &Client{}, fmt.Errorf("unknown database type") } - return nil + /*The logger that will be used by db operations*/ + clog := log.New() + if err := types.ConfigureLogger(clog); err != nil { + return nil, errors.Wrap(err, "while configuring db logger") + } + if config.LogLevel != nil { + clog.SetLevel(*config.LogLevel) + if *config.LogLevel >= log.TraceLevel { + log.Debugf("Enabling request debug") + client = client.Debug() + } + } + if err = client.Schema.Create(context.Background()); err != nil { + return nil, fmt.Errorf("failed creating schema resources: %v", err) + } + return &Client{Ent: client, CTX: context.Background(), Log: clog}, nil } -func NewDatabase(cfg map[string]string) (*Context, error) { - var err error - c := &Context{} - - if err = checkConfig(cfg); err != nil { - return nil, fmt.Errorf("bad database configuration : %v", err) +func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { + maxItems := 0 + maxAge := "" + if config.MaxItems != nil && *config.MaxItems <= 0 { + return nil, fmt.Errorf("max_items can't be zero or negative number") + } + if config.MaxItems != nil { + maxItems = *config.MaxItems } - if cfg["type"] == "sqlite" { - c.Db, err = gorm.Open("sqlite3", cfg["db_path"]+"?_busy_timeout=10000") - if err != nil { - return nil, fmt.Errorf("failed to open %s : %s", cfg["db_path"], err) - } + if config.MaxAge != nil && *config.MaxAge != "" { + maxAge = *config.MaxAge } + // Init & Start cronjob every minute + scheduler := gocron.NewScheduler(time.UTC) + scheduler.Every(1).Minute().Do(c.FlushAlerts, maxAge, maxItems) + scheduler.StartAsync() - if cfg["type"] == "mysql" { - gormArg := cfg["db_username"] + ":" + cfg["db_password"] + "@(" + cfg["db_host"] + ")/" + cfg["db_name"] + "?charset=utf8&parseTime=True&loc=Local" - c.Db, err = gorm.Open("mysql", gormArg) - if err != nil { - return nil, fmt.Errorf("failed to open %s database : %s", cfg["db_name"], err) - } - } - - if v, ok := cfg["max_records"]; ok { - c.maxEventRetention, err = strconv.Atoi(v) - if err != nil { - log.Errorf("Ignoring invalid max_records '%s' : %s", v, err) - } - } - if v, ok := cfg["max_records_age"]; ok { - c.maxDurationRetention, err = time.ParseDuration(v) - if err != nil { - log.Errorf("Ignoring invalid duration '%s' : %s", v, err) - } - } - - if val, ok := cfg["debug"]; ok && val == "true" { - log.Infof("Enabling debug for %s", cfg["type"]) - c.Db.LogMode(true) - } - - c.flush, err = strconv.ParseBool(cfg["flush"]) - if err != nil { - return nil, fmt.Errorf("failed to parse 'flush' value %s : %s", cfg["flush"], err) - } - // Migrate the schema - c.Db.AutoMigrate(&types.EventSequence{}, &types.SignalOccurence{}, &types.BanApplication{}) - c.Db.Model(&types.SignalOccurence{}).Related(&types.EventSequence{}) - c.Db.Model(&types.SignalOccurence{}).Related(&types.BanApplication{}) - - c.lastCommit = time.Now() - return c, nil + return scheduler, nil } diff --git a/pkg/database/database_test.go b/pkg/database/database_test.go deleted file mode 100644 index 8ac605d8b..000000000 --- a/pkg/database/database_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package database - -import ( - "net" - "testing" - "time" - - "github.com/crowdsecurity/crowdsec/pkg/types" -) - -func genSignalOccurence(ip string) types.SignalOccurence { - target_ip := net.ParseIP(ip) - - Ban := types.BanApplication{ - MeasureType: "ban", - MeasureSource: "local", - //for 10 minutes - Until: time.Now().Add(10 * time.Minute), - StartIp: types.IP2Int(target_ip), - EndIp: types.IP2Int(target_ip), - TargetCN: "FR", - TargetAS: 1234, - TargetASName: "Random AS", - IpText: target_ip.String(), - Reason: "A reason", - Scenario: "A scenario", - } - Signal := types.SignalOccurence{ - MapKey: "lala", - Scenario: "old_overflow", - //a few minutes ago - Start_at: time.Now().Add(-10 * time.Minute), - Stop_at: time.Now().Add(-5 * time.Minute), - BanApplications: []types.BanApplication{Ban}, - } - return Signal -} - -func TestCreateDB(t *testing.T) { - - var CfgTests = []struct { - cfg map[string]string - valid bool - }{ - {map[string]string{ - "type": "sqlite", - "db_path": "./test.db", - "max_records": "1000", - "max_records_age": "72h", - "debug": "false", - "flush": "true", - }, true}, - - //bad type - {map[string]string{ - "type": "inexistant_DB", - "db_path": "./test.db", - "max_records": "1000", - "debug": "false", - "flush": "true", - }, false}, - - //missing db_path - {map[string]string{ - "type": "sqlite", - "max_records": "1000", - "debug": "false", - "flush": "true", - }, false}, - - //valid mysql, but won't be able to connect and thus fail - {map[string]string{ - "type": "mysql", - "db_host": "localhost", - "db_username": "crowdsec", - "db_password": "password", - "db_name": "crowdsec", - "max_records": "1000", - "debug": "false", - "flush": "true", - }, false}, - - //mysql : missing host - {map[string]string{ - "type": "mysql", - "db_username": "crowdsec", - "db_password": "password", - "db_name": "crowdsec", - "max_records": "1000", - "debug": "false", - "flush": "true", - }, false}, - - //mysql : missing username - {map[string]string{ - "type": "mysql", - "db_host": "localhost", - "db_password": "password", - "db_name": "crowdsec", - "max_records": "1000", - "debug": "false", - "flush": "true", - }, false}, - - //mysql : missing password - {map[string]string{ - "type": "mysql", - "db_host": "localhost", - "db_username": "crowdsec", - "db_name": "crowdsec", - "max_records": "1000", - "debug": "false", - "flush": "true", - }, false}, - - //mysql : missing db_name - {map[string]string{ - "type": "mysql", - "db_host": "localhost", - "db_username": "crowdsec", - "db_password": "password", - "max_records": "1000", - "debug": "false", - "flush": "true", - }, false}, - - //sqlite : bad bools - {map[string]string{ - "type": "sqlite", - "db_path": "./test.db", - "max_records": "1000", - "max_records_age": "72h", - "debug": "false", - "flush": "ratata", - }, false}, - } - - for idx, TestCase := range CfgTests { - ctx, err := NewDatabase(TestCase.cfg) - if TestCase.valid { - if err != nil { - t.Fatalf("didn't expect error (case %d/%d) : %s", idx, len(CfgTests), err) - } - if ctx == nil { - t.Fatalf("didn't expect empty ctx (case %d/%d)", idx, len(CfgTests)) - } - } else { - if err == nil { - t.Fatalf("expected error (case %d/%d)", idx, len(CfgTests)) - } - if ctx != nil { - t.Fatalf("expected nil ctx (case %d/%d)", idx, len(CfgTests)) - } - } - } - -} diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go new file mode 100644 index 000000000..2f2ec9e8f --- /dev/null +++ b/pkg/database/decisions.go @@ -0,0 +1,282 @@ +package database + +import ( + "fmt" + "strings" + "time" + + "strconv" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string][]string) (*ent.DecisionQuery, error) { + var err error + var startIP, endIP int64 + + /*the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ + if v, ok := filter["simulated"]; ok { + if v[0] == "false" { + query = query.Where(decision.SimulatedEQ(false)) + } + delete(filter, "simulated") + } else { + query = query.Where(decision.SimulatedEQ(false)) + } + + for param, value := range filter { + switch param { + case "scope": + var scope string = value[0] + if strings.ToLower(scope) == "ip" { + scope = types.Ip + } else if strings.ToLower(scope) == "range" { + scope = types.Range + } + query = query.Where(decision.ScopeEQ(scope)) + case "value": + query = query.Where(decision.ValueEQ(value[0])) + case "type": + query = query.Where(decision.TypeEQ(value[0])) + case "ip": + isValidIP := IsIpv4(value[0]) + if !isValidIP { + return nil, errors.Wrapf(InvalidIPOrRange, "unable to parse '%s': %s", value[0], err) + } + startIP, endIP, err = GetIpsFromIpRange(value[0] + "/32") + if err != nil { + return nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int interval: %s", value[0], err) + } + case "range": + startIP, endIP, err = GetIpsFromIpRange(value[0]) + if err != nil { + return nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int interval: %s", value[0], err) + } + default: + return query, errors.Wrapf(InvalidFilter, "'%s' doesn't exist", param) + } + } + + if startIP != 0 && endIP != 0 { + /*the user is checking for a single IP*/ + if startIP == endIP { + //DECISION_START <= IP_Q >= DECISON_END + query = query.Where(decision.And( + decision.StartIPLTE(startIP), + decision.EndIPGTE(endIP), + )) + } else { /*the user is checking for a RANGE */ + //START_Q >= DECISION_START AND END_Q <= DECISION_END + query = query.Where(decision.And( + decision.StartIPGTE(startIP), + decision.EndIPLTE(endIP), + )) + } + } + return query, nil +} + +func (c *Client) QueryDecisionWithFilter(filter map[string][]string) ([]*ent.Decision, error) { + var data []*ent.Decision + var err error + + decisions := c.Ent.Decision.Query(). + Where(decision.UntilGTE(time.Now())) + + decisions, err = BuildDecisionRequestWithFilter(decisions, filter) + if err != nil { + return []*ent.Decision{}, err + } + + err = decisions.Select( + decision.FieldID, + decision.FieldUntil, + decision.FieldScenario, + decision.FieldType, + decision.FieldStartIP, + decision.FieldEndIP, + decision.FieldValue, + decision.FieldScope, + decision.FieldOrigin, + ).Scan(c.CTX, &data) + if err != nil { + log.Warningf("QueryDecisionWithFilter : %s", err) + return []*ent.Decision{}, errors.Wrap(QueryFail, "query decision failed") + } + + return data, nil +} + +func (c *Client) QueryAllDecisions() ([]*ent.Decision, error) { + data, err := c.Ent.Decision.Query().Where(decision.UntilGT(time.Now())).All(c.CTX) + if err != nil { + log.Warningf("QueryAllDecisions : %s", err) + return []*ent.Decision{}, errors.Wrap(QueryFail, "get all decisions") + } + return data, nil +} + +func (c *Client) QueryExpiredDecisions() ([]*ent.Decision, error) { + data, err := c.Ent.Decision.Query().Where(decision.UntilLT(time.Now())).All(c.CTX) + if err != nil { + log.Warningf("QueryExpiredDecisions : %s", err) + return []*ent.Decision{}, errors.Wrap(QueryFail, "expired decisions") + } + return data, nil +} + +func (c *Client) QueryExpiredDecisionsSince(since time.Time) ([]*ent.Decision, error) { + data, err := c.Ent.Decision.Query().Where(decision.UntilLT(time.Now())).Where(decision.UntilGT(since)).All(c.CTX) + if err != nil { + log.Warningf("QueryExpiredDecisionsSince : %s", err) + return []*ent.Decision{}, errors.Wrap(QueryFail, "expired decisions") + } + return data, nil +} + +func (c *Client) QueryNewDecisionsSince(since time.Time) ([]*ent.Decision, error) { + data, err := c.Ent.Decision.Query().Where(decision.CreatedAtGT(since)).All(c.CTX) + if err != nil { + log.Warningf("QueryNewDecisionsSince : %s", err) + return []*ent.Decision{}, errors.Wrapf(QueryFail, "new decisions since '%s'", since.String()) + } + return data, nil +} + +func (c *Client) DeleteDecisionById(decisionId int) error { + err := c.Ent.Decision.DeleteOneID(decisionId).Exec(c.CTX) + if err != nil { + log.Warningf("DeleteDecisionById : %s", err) + return errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionId) + } + return nil +} + +func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, error) { + var err error + var startIP, endIP int64 + + decisions := c.Ent.Decision.Delete() + + for param, value := range filter { + switch param { + case "scope": + decisions = decisions.Where(decision.ScopeEQ(value[0])) + case "value": + decisions = decisions.Where(decision.ValueEQ(value[0])) + case "type": + decisions = decisions.Where(decision.TypeEQ(value[0])) + case "ip": + isValidIP := IsIpv4(value[0]) + if !isValidIP { + return "0", errors.Wrap(InvalidIPOrRange, fmt.Sprintf("unable to parse '%s': %s", value[0], err)) + } + startIP, endIP, err = GetIpsFromIpRange(value[0] + "/32") + if err != nil { + return "0", errors.Wrap(InvalidIPOrRange, fmt.Sprintf("unable to convert '%s' to int interval: %s", value[0], err)) + } + case "range": + startIP, endIP, err = GetIpsFromIpRange(value[0]) + if err != nil { + return "0", errors.Wrap(InvalidIPOrRange, fmt.Sprintf("unable to convert '%s' to int interval: %s", value[0], err)) + } + default: + return "0", errors.Wrap(InvalidFilter, fmt.Sprintf("'%s' doesn't exist", param)) + } + + if startIP != 0 && endIP != 0 { + /*the user is checking for a single IP*/ + if startIP == endIP { + //DECISION_START <= IP_Q >= DECISON_END + decisions = decisions.Where(decision.And( + decision.StartIPLTE(startIP), + decision.EndIPGTE(endIP), + )) + } else { /*the user is checking for a RANGE */ + //START_Q >= DECISION_START AND END_Q <= DECISION_END + decisions = decisions.Where(decision.And( + decision.StartIPGTE(startIP), + decision.EndIPLTE(endIP), + )) + } + } + } + + nbDeleted, err := decisions.Exec(c.CTX) + if err != nil { + log.Warningf("DeleteDecisionsWithFilter : %s", err) + return "0", errors.Wrap(DeleteFail, "decisions with provided filter") + } + return strconv.Itoa(nbDeleted), nil +} + +// SoftDeleteDecisionsWithFilter udpate the expiration time to now() for the decisions matching the filter +func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (string, error) { + var err error + var startIP, endIP int64 + + decisions := c.Ent.Decision.Update().Where(decision.UntilGT(time.Now())) + for param, value := range filter { + switch param { + case "scope": + decisions = decisions.Where(decision.ScopeEQ(value[0])) + case "value": + decisions = decisions.Where(decision.ValueEQ(value[0])) + case "type": + decisions = decisions.Where(decision.TypeEQ(value[0])) + case "ip": + isValidIP := IsIpv4(value[0]) + if !isValidIP { + return "0", errors.Wrapf(InvalidIPOrRange, "unable to parse '%s': %s", value[0], err) + } + startIP, endIP, err = GetIpsFromIpRange(value[0] + "/32") + if err != nil { + return "0", errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int interval: %s", value[0], err) + } + case "range": + startIP, endIP, err = GetIpsFromIpRange(value[0]) + if err != nil { + return "0", errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int interval: %s", value[0], err) + } + default: + return "0", errors.Wrapf(InvalidFilter, "'%s' doesn't exist", param) + } + + if startIP != 0 && endIP != 0 { + /*the user is checking for a single IP*/ + if startIP == endIP { + //DECISION_START <= IP_Q >= DECISON_END + decisions = decisions.Where(decision.And( + decision.StartIPLTE(startIP), + decision.EndIPGTE(endIP), + )) + } else { /*the user is checking for a RANGE */ + //START_Q >= DECISION_START AND END_Q <= DECISION_END + decisions = decisions.Where(decision.And( + decision.StartIPGTE(startIP), + decision.EndIPLTE(endIP), + )) + } + } + } + nbDeleted, err := decisions.SetUntil(time.Now()).Save(c.CTX) + if err != nil { + log.Warningf("SoftDeleteDecisionsWithFilter : %s", err) + return "0", errors.Wrap(DeleteFail, "soft delete decisions with provided filter") + } + return strconv.Itoa(nbDeleted), nil +} + +//SoftDeleteDecisionByID set the expiration of a decision to now() +func (c *Client) SoftDeleteDecisionByID(decisionID int) error { + nbUpdated, err := c.Ent.Decision.Update().Where(decision.IDEQ(decisionID)).SetUntil(time.Now()).Save(c.CTX) + if err != nil || nbUpdated == 0 { + log.Warningf("SoftDeleteDecisionByID : %v (nb soft deleted: %d)", err, nbUpdated) + return errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID) + } + return nil +} diff --git a/pkg/database/delete.go b/pkg/database/delete.go deleted file mode 100644 index 711e9143f..000000000 --- a/pkg/database/delete.go +++ /dev/null @@ -1,158 +0,0 @@ -package database - -import ( - "fmt" - "time" - - "github.com/pkg/errors" - - "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" -) - -/*try to delete entries with matching fields */ -func (c *Context) DeleteBan(target string) (int, error) { - - if target != "" { - ret := c.Db.Delete(types.BanApplication{}, "ip_text = ?", target) - if ret.Error != nil { - log.Errorf("Failed to delete record with BanTarget %s : %v", target, ret.Error) - return 0, ret.Error - } - return int(ret.RowsAffected), nil - } - return 0, fmt.Errorf("no target provided") -} - -func (c *Context) DeleteAll() error { - allBa := types.BanApplication{} - records := c.Db.Delete(&allBa) - if records.Error != nil { - return records.Error - } - return nil -} - -func (c *Context) HardDeleteAll() error { - allBa := types.BanApplication{} - records := c.Db.Unscoped().Delete(&allBa) - if records.Error != nil { - return records.Error - } - return nil -} - -func (c *Context) DeleteExpired() (int, error) { - c.lock.Lock() - defer c.lock.Unlock() - //Delete the expired records - now := time.Now() - count := 0 - if c.flush { - retx := c.Db.Delete(types.BanApplication{}, "until < ?", now) - if retx.Error != nil { - return 0, retx.Error - } - if retx.RowsAffected > 0 { - log.Infof("Flushed %d expired entries from Ban Application", retx.RowsAffected) - count = int(retx.RowsAffected) - } - } - return count, nil -} - -func (c *Context) CleanUpRecordsByAge() (int, error) { - //let's fetch all expired records that are more than XX days olds - sos := []types.BanApplication{} - - if c.maxDurationRetention == 0 { - return 0, nil - } - - //look for soft-deleted events that are OLDER than maxDurationRetention - ret := c.Db.Unscoped().Table("ban_applications").Where("deleted_at is not NULL"). - Where("until < ?", time.Now().Add(-c.maxDurationRetention)). - Order("updated_at desc").Find(&sos) - - if ret.Error != nil { - return 0, errors.Wrap(ret.Error, "failed to get count of old records") - } - - //no events elligible - if len(sos) == 0 || ret.RowsAffected == 0 { - log.Debugf("no event older than %s", c.maxDurationRetention.String()) - return 0, nil - } - - /*This is clearly suboptimal, and 'left join' and stuff gives way better results, but doesn't seem to behave equally on sqlite and mysql*/ - delRecords := 0 - for _, record := range sos { - copy := record - if ret := c.Db.Unscoped().Table("signal_occurences").Where("ID = ?", copy.SignalOccurenceID).Delete(&types.SignalOccurence{}); ret.Error != nil { - return 0, errors.Wrap(ret.Error, "failed to clean signal_occurences") - } - if ret := c.Db.Unscoped().Table("event_sequences").Where("signal_occurence_id = ?", copy.SignalOccurenceID).Delete(&types.EventSequence{}); ret.Error != nil { - return 0, errors.Wrap(ret.Error, "failed to clean event_sequences") - } - if ret := c.Db.Unscoped().Table("ban_applications").Delete(©); ret.Error != nil { - return 0, errors.Wrap(ret.Error, "failed to clean ban_applications") - } - delRecords++ - } - log.Printf("max_records_age: deleting %d events (max age:%s)", delRecords, c.maxDurationRetention) - return delRecords, nil -} - -func (c *Context) CleanUpRecordsByCount() (int, error) { - var count int - - if c.maxEventRetention <= 0 { - return 0, nil - } - - ret := c.Db.Unscoped().Table("ban_applications").Count(&count) - - if ret.Error != nil { - return 0, errors.Wrap(ret.Error, "failed to get bans count") - } - if count < c.maxEventRetention { - log.Debugf("%d < %d, don't cleanup", count, c.maxEventRetention) - return 0, nil - } - - sos := []types.BanApplication{} - now := time.Now() - /*get soft deleted records oldest to youngest*/ - //records := c.Db.Unscoped().Table("ban_applications").Where("deleted_at is not NULL").Where(`strftime("%s", deleted_at) < strftime("%s", "now")`).Find(&sos) - records := c.Db.Unscoped().Table("ban_applications").Where("deleted_at is not NULL").Where("deleted_at < ?", now).Find(&sos) - if records.Error != nil { - return 0, errors.Wrap(records.Error, "failed to list expired bans for flush") - } - - //let's do it in a single transaction - delRecords := 0 - for _, ld := range sos { - copy := ld - if ret := c.Db.Unscoped().Table("signal_occurences").Where("ID = ?", copy.SignalOccurenceID).Delete(&types.SignalOccurence{}); ret.Error != nil { - return 0, errors.Wrap(ret.Error, "failed to clean signal_occurences") - } - if ret := c.Db.Unscoped().Table("event_sequences").Where("signal_occurence_id = ?", copy.SignalOccurenceID).Delete(&types.EventSequence{}); ret.Error != nil { - return 0, errors.Wrap(ret.Error, "failed to clean event_sequences") - } - if ret := c.Db.Unscoped().Table("ban_applications").Delete(©); ret.Error != nil { - return 0, errors.Wrap(ret.Error, "failed to clean ban_applications") - } - //we need to delete associations : event_sequences, signal_occurences - delRecords++ - //let's delete as well the associated event_sequence - if count-delRecords <= c.maxEventRetention { - break - } - } - if len(sos) > 0 { - log.Printf("max_records: deleting %d events. (%d soft-deleted)", delRecords, len(sos)) - } else { - log.Debugf("didn't find any record to clean") - } - return delRecords, nil -} diff --git a/pkg/database/delete_test.go b/pkg/database/delete_test.go deleted file mode 100644 index 2c18a908c..000000000 --- a/pkg/database/delete_test.go +++ /dev/null @@ -1,346 +0,0 @@ -package database - -import ( - "fmt" - "testing" - "time" -) - -func TestNoCleanUpParams(t *testing.T) { - validCfg := map[string]string{ - "type": "sqlite", - "db_path": "./test.db", - "debug": "false", - "max_records": "0", - "max_records_age": "0s", - "flush": "true", - } - ctx, err := NewDatabase(validCfg) - if err != nil || ctx == nil { - t.Fatalf("failed to create simple sqlite") - } - - if err := ctx.HardDeleteAll(); err != nil { - t.Fatalf("failed to flush existing bans") - } - - freshRecordsCount := 12 - - for i := 0; i < freshRecordsCount; i++ { - //this one expires in the future - OldSignal := genSignalOccurence(fmt.Sprintf("2.2.2.%d", i)) - - OldSignal.BanApplications[0].Until = time.Now().Add(1 * time.Hour) - if err = ctx.WriteBanApplication(OldSignal.BanApplications[0]); err != nil { - t.Fatalf("Failed to insert old signal : %s", err) - } - } - - bans, err := ctx.GetBansAt(time.Now()) - if err != nil { - t.Fatalf("%s", err) - } - if len(bans) != freshRecordsCount { - t.Fatalf("expected %d, got %d", freshRecordsCount, len(bans)) - } - - //Cleanup by age should hard delete old records - deleted, err := ctx.CleanUpRecordsByCount() - if err != nil { - t.Fatalf("error %s", err) - } - if deleted != 0 { - t.Fatalf("unexpected %d deleted events", deleted) - } - - //Cleanup by age should hard delete old records - deleted, err = ctx.CleanUpRecordsByAge() - if err != nil { - t.Fatalf("error %s", err) - } - if deleted != 0 { - t.Fatalf("unexpected %d deleted events ", deleted) - } - -} - -func TestNoCleanUp(t *testing.T) { - validCfg := map[string]string{ - "type": "sqlite", - "db_path": "./test.db", - "debug": "false", - "max_records": "1000", - "max_records_age": "24h", - "flush": "true", - } - ctx, err := NewDatabase(validCfg) - if err != nil || ctx == nil { - t.Fatalf("failed to create simple sqlite") - } - - if err := ctx.HardDeleteAll(); err != nil { - t.Fatalf("failed to flush existing bans") - } - - freshRecordsCount := 12 - - for i := 0; i < freshRecordsCount; i++ { - //this one expires in the future - OldSignal := genSignalOccurence(fmt.Sprintf("2.2.2.%d", i)) - - OldSignal.BanApplications[0].Until = time.Now().Add(1 * time.Hour) - if err = ctx.WriteBanApplication(OldSignal.BanApplications[0]); err != nil { - t.Fatalf("Failed to insert old signal : %s", err) - } - } - - bans, err := ctx.GetBansAt(time.Now()) - if err != nil { - t.Fatalf("%s", err) - } - if len(bans) != freshRecordsCount { - t.Fatalf("expected %d, got %d", freshRecordsCount, len(bans)) - } - - //Cleanup by age should hard delete old records - deleted, err := ctx.CleanUpRecordsByCount() - if err != nil { - t.Fatalf("error %s", err) - } - if deleted != 0 { - t.Fatalf("unexpected %d deleted events", deleted) - } - - //Cleanup by age should hard delete old records - deleted, err = ctx.CleanUpRecordsByAge() - if err != nil { - t.Fatalf("error %s", err) - } - if deleted != 0 { - t.Fatalf("unexpected %d deleted events ", deleted) - } - -} - -func TestCleanUpByCount(t *testing.T) { - //plan : - // - insert one current event - // - insert 150 old events - // - check DeletedExpired behavior - // - check CleanUpByCount behavior - - maxCount := 72 - validCfg := map[string]string{ - "type": "sqlite", - "db_path": "./test.db", - //that's 15 days - "max_records": fmt.Sprintf("%d", maxCount), - "debug": "false", - "flush": "true", - } - ctx, err := NewDatabase(validCfg) - if err != nil || ctx == nil { - t.Fatalf("failed to create simple sqlite") - } - - if err := ctx.HardDeleteAll(); err != nil { - t.Fatalf("failed to flush existing bans") - } - - freshRecordsCount := 12 - - for i := 0; i < freshRecordsCount; i++ { - //this one expires in the future - OldSignal := genSignalOccurence(fmt.Sprintf("2.2.2.%d", i)) - OldSignal.BanApplications[0].Until = time.Now().Add(1 * time.Hour) - if err = ctx.WriteSignal(OldSignal); err != nil { - t.Fatalf("Failed to insert old signal : %s", err) - } - } - - oldRecordsCount := 136 - - for i := 0; i < oldRecordsCount; i++ { - OldSignal := genSignalOccurence(fmt.Sprintf("1.2.3.%d", i)) - //let's make the event a month old - OldSignal.Start_at = time.Now().Add(-30 * 24 * time.Hour) - OldSignal.Stop_at = time.Now().Add(-30 * 24 * time.Hour) - //ban was like for an hour - OldSignal.BanApplications[0].Until = time.Now().Add(-30*24*time.Hour + 1*time.Hour) - //write the old signal - if err = ctx.WriteSignal(OldSignal); err != nil { - t.Fatalf("Failed to insert old signal : %s", err) - } - } - - evtsCount := 0 - ret := ctx.Db.Unscoped().Table("ban_applications").Count(&evtsCount) - if ret.Error != nil { - t.Fatalf("got err : %s", ret.Error) - } - if evtsCount != oldRecordsCount+freshRecordsCount { - t.Fatalf("got %d events", evtsCount) - } - - //if we call DeleteExpired, it will soft deleted those events in the past - - softDeleted, err := ctx.DeleteExpired() - - if err != nil { - t.Fatalf("%s", err) - } - - if softDeleted != oldRecordsCount { - t.Fatalf("%d deleted records", softDeleted) - } - - //we should be left with *one* non-deleted record - evtsCount = 0 - ret = ctx.Db.Table("ban_applications").Where("deleted_at is NULL").Count(&evtsCount) - if ret.Error != nil { - t.Fatalf("got err : %s", ret.Error) - } - if evtsCount != freshRecordsCount { - t.Fatalf("got %d events", evtsCount) - } - - evtsCount = 0 - ret = ctx.Db.Table("ban_applications").Where("deleted_at is not NULL").Count(&evtsCount) - if ret.Error != nil { - t.Fatalf("got err : %s", ret.Error) - } - if evtsCount != oldRecordsCount { - t.Fatalf("got %d events", evtsCount) - } - - //ctx.Db.LogMode(true) - - //Cleanup by age should hard delete old records - deleted, err := ctx.CleanUpRecordsByCount() - if err != nil { - t.Fatalf("error %s", err) - } - if deleted != (oldRecordsCount+freshRecordsCount)-maxCount { - t.Fatalf("unexpected %d deleted events (expected: %d)", deleted, oldRecordsCount-maxCount) - } - - //and now we should have *one* record left ! - evtsCount = 0 - ret = ctx.Db.Unscoped().Table("ban_applications").Count(&evtsCount) - if ret.Error != nil { - t.Fatalf("got err : %s", ret.Error) - } - if evtsCount != maxCount { - t.Fatalf("got %d events", evtsCount) - } -} - -func TestCleanUpByAge(t *testing.T) { - //plan : - // - insert one current event - // - insert 150 old events - // - check DeletedExpired behavior - // - check CleanUpByAge behavior - - validCfg := map[string]string{ - "type": "sqlite", - "db_path": "./test.db", - //that's 15 days - "max_records_age": "360h", - "debug": "false", - "flush": "true", - } - ctx, err := NewDatabase(validCfg) - if err != nil || ctx == nil { - t.Fatalf("failed to create simple sqlite") - } - - if err := ctx.HardDeleteAll(); err != nil { - t.Fatalf("failed to flush existing bans") - } - - freshRecordsCount := 8 - - for i := 0; i < freshRecordsCount; i++ { - //this one expires in the future - OldSignal := genSignalOccurence(fmt.Sprintf("2.2.2.%d", i)) - OldSignal.BanApplications[0].Until = time.Now().Add(1 * time.Hour) - if err = ctx.WriteSignal(OldSignal); err != nil { - t.Fatalf("Failed to insert old signal : %s", err) - } - } - - oldRecordsCount := 150 - - for i := 0; i < oldRecordsCount; i++ { - OldSignal := genSignalOccurence(fmt.Sprintf("1.2.3.%d", i)) - //let's make the event a month old - OldSignal.Start_at = time.Now().Add(-30 * 24 * time.Hour) - OldSignal.Stop_at = time.Now().Add(-30 * 24 * time.Hour) - //ban was like for an hour - OldSignal.BanApplications[0].Until = time.Now().Add(-30*24*time.Hour + 1*time.Hour) - //write the old signal - if err = ctx.WriteSignal(OldSignal); err != nil { - t.Fatalf("Failed to insert old signal : %s", err) - } - } - - evtsCount := 0 - ret := ctx.Db.Unscoped().Table("ban_applications").Count(&evtsCount) - if ret.Error != nil { - t.Fatalf("got err : %s", ret.Error) - } - if evtsCount != oldRecordsCount+freshRecordsCount { - t.Fatalf("got %d events", evtsCount) - } - - //if we call DeleteExpired, it will soft deleted those events in the past - - softDeleted, err := ctx.DeleteExpired() - - if err != nil { - t.Fatalf("%s", err) - } - - if softDeleted != oldRecordsCount { - t.Fatalf("%d deleted records", softDeleted) - } - - //we should be left with *one* non-deleted record - evtsCount = 0 - ret = ctx.Db.Table("ban_applications").Where("deleted_at is NULL").Count(&evtsCount) - if ret.Error != nil { - t.Fatalf("got err : %s", ret.Error) - } - if evtsCount != freshRecordsCount { - t.Fatalf("got %d events", evtsCount) - } - - evtsCount = 0 - ret = ctx.Db.Table("ban_applications").Where("deleted_at is not NULL").Count(&evtsCount) - if ret.Error != nil { - t.Fatalf("got err : %s", ret.Error) - } - if evtsCount != oldRecordsCount { - t.Fatalf("got %d events", evtsCount) - } - - //Cleanup by age should hard delete old records - deleted, err := ctx.CleanUpRecordsByAge() - if err != nil { - t.Fatal() - } - if deleted != oldRecordsCount { - t.Fatalf("unexpected %d deleted events", deleted) - } - - //and now we should have *one* record left ! - evtsCount = 0 - ret = ctx.Db.Unscoped().Table("ban_applications").Count(&evtsCount) - if ret.Error != nil { - t.Fatalf("got err : %s", ret.Error) - } - if evtsCount != freshRecordsCount { - t.Fatalf("got %d events", evtsCount) - } -} diff --git a/pkg/database/ent/alert.go b/pkg/database/ent/alert.go new file mode 100644 index 000000000..b9656345e --- /dev/null +++ b/pkg/database/ent/alert.go @@ -0,0 +1,394 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/facebook/ent/dialect/sql" +) + +// Alert is the model entity for the Alert schema. +type Alert struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Scenario holds the value of the "scenario" field. + Scenario string `json:"scenario,omitempty"` + // BucketId holds the value of the "bucketId" field. + BucketId string `json:"bucketId,omitempty"` + // Message holds the value of the "message" field. + Message string `json:"message,omitempty"` + // EventsCount holds the value of the "eventsCount" field. + EventsCount int32 `json:"eventsCount,omitempty"` + // StartedAt holds the value of the "startedAt" field. + StartedAt time.Time `json:"startedAt,omitempty"` + // StoppedAt holds the value of the "stoppedAt" field. + StoppedAt time.Time `json:"stoppedAt,omitempty"` + // SourceIp holds the value of the "sourceIp" field. + SourceIp string `json:"sourceIp,omitempty"` + // SourceRange holds the value of the "sourceRange" field. + SourceRange string `json:"sourceRange,omitempty"` + // SourceAsNumber holds the value of the "sourceAsNumber" field. + SourceAsNumber string `json:"sourceAsNumber,omitempty"` + // SourceAsName holds the value of the "sourceAsName" field. + SourceAsName string `json:"sourceAsName,omitempty"` + // SourceCountry holds the value of the "sourceCountry" field. + SourceCountry string `json:"sourceCountry,omitempty"` + // SourceLatitude holds the value of the "sourceLatitude" field. + SourceLatitude float32 `json:"sourceLatitude,omitempty"` + // SourceLongitude holds the value of the "sourceLongitude" field. + SourceLongitude float32 `json:"sourceLongitude,omitempty"` + // SourceScope holds the value of the "sourceScope" field. + SourceScope string `json:"sourceScope,omitempty"` + // SourceValue holds the value of the "sourceValue" field. + SourceValue string `json:"sourceValue,omitempty"` + // Capacity holds the value of the "capacity" field. + Capacity int32 `json:"capacity,omitempty"` + // LeakSpeed holds the value of the "leakSpeed" field. + LeakSpeed string `json:"leakSpeed,omitempty"` + // ScenarioVersion holds the value of the "scenarioVersion" field. + ScenarioVersion string `json:"scenarioVersion,omitempty"` + // ScenarioHash holds the value of the "scenarioHash" field. + ScenarioHash string `json:"scenarioHash,omitempty"` + // Simulated holds the value of the "simulated" field. + Simulated bool `json:"simulated,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AlertQuery when eager-loading is set. + Edges AlertEdges `json:"edges"` + machine_alerts *int +} + +// AlertEdges holds the relations/edges for other nodes in the graph. +type AlertEdges struct { + // Owner holds the value of the owner edge. + Owner *Machine + // Decisions holds the value of the decisions edge. + Decisions []*Decision + // Events holds the value of the events edge. + Events []*Event + // Metas holds the value of the metas edge. + Metas []*Meta + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [4]bool +} + +// OwnerOrErr returns the Owner value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AlertEdges) OwnerOrErr() (*Machine, error) { + if e.loadedTypes[0] { + if e.Owner == nil { + // The edge owner was loaded in eager-loading, + // but was not found. + return nil, &NotFoundError{label: machine.Label} + } + return e.Owner, nil + } + return nil, &NotLoadedError{edge: "owner"} +} + +// DecisionsOrErr returns the Decisions value or an error if the edge +// was not loaded in eager-loading. +func (e AlertEdges) DecisionsOrErr() ([]*Decision, error) { + if e.loadedTypes[1] { + return e.Decisions, nil + } + return nil, &NotLoadedError{edge: "decisions"} +} + +// EventsOrErr returns the Events value or an error if the edge +// was not loaded in eager-loading. +func (e AlertEdges) EventsOrErr() ([]*Event, error) { + if e.loadedTypes[2] { + return e.Events, nil + } + return nil, &NotLoadedError{edge: "events"} +} + +// MetasOrErr returns the Metas value or an error if the edge +// was not loaded in eager-loading. +func (e AlertEdges) MetasOrErr() ([]*Meta, error) { + if e.loadedTypes[3] { + return e.Metas, nil + } + return nil, &NotLoadedError{edge: "metas"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Alert) scanValues() []interface{} { + return []interface{}{ + &sql.NullInt64{}, // id + &sql.NullTime{}, // created_at + &sql.NullTime{}, // updated_at + &sql.NullString{}, // scenario + &sql.NullString{}, // bucketId + &sql.NullString{}, // message + &sql.NullInt64{}, // eventsCount + &sql.NullTime{}, // startedAt + &sql.NullTime{}, // stoppedAt + &sql.NullString{}, // sourceIp + &sql.NullString{}, // sourceRange + &sql.NullString{}, // sourceAsNumber + &sql.NullString{}, // sourceAsName + &sql.NullString{}, // sourceCountry + &sql.NullFloat64{}, // sourceLatitude + &sql.NullFloat64{}, // sourceLongitude + &sql.NullString{}, // sourceScope + &sql.NullString{}, // sourceValue + &sql.NullInt64{}, // capacity + &sql.NullString{}, // leakSpeed + &sql.NullString{}, // scenarioVersion + &sql.NullString{}, // scenarioHash + &sql.NullBool{}, // simulated + } +} + +// fkValues returns the types for scanning foreign-keys values from sql.Rows. +func (*Alert) fkValues() []interface{} { + return []interface{}{ + &sql.NullInt64{}, // machine_alerts + } +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Alert fields. +func (a *Alert) assignValues(values ...interface{}) error { + if m, n := len(values), len(alert.Columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + value, ok := values[0].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + a.ID = int(value.Int64) + values = values[1:] + if value, ok := values[0].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[0]) + } else if value.Valid { + a.CreatedAt = value.Time + } + if value, ok := values[1].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[1]) + } else if value.Valid { + a.UpdatedAt = value.Time + } + if value, ok := values[2].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scenario", values[2]) + } else if value.Valid { + a.Scenario = value.String + } + if value, ok := values[3].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field bucketId", values[3]) + } else if value.Valid { + a.BucketId = value.String + } + if value, ok := values[4].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field message", values[4]) + } else if value.Valid { + a.Message = value.String + } + if value, ok := values[5].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field eventsCount", values[5]) + } else if value.Valid { + a.EventsCount = int32(value.Int64) + } + if value, ok := values[6].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field startedAt", values[6]) + } else if value.Valid { + a.StartedAt = value.Time + } + if value, ok := values[7].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field stoppedAt", values[7]) + } else if value.Valid { + a.StoppedAt = value.Time + } + if value, ok := values[8].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceIp", values[8]) + } else if value.Valid { + a.SourceIp = value.String + } + if value, ok := values[9].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceRange", values[9]) + } else if value.Valid { + a.SourceRange = value.String + } + if value, ok := values[10].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceAsNumber", values[10]) + } else if value.Valid { + a.SourceAsNumber = value.String + } + if value, ok := values[11].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceAsName", values[11]) + } else if value.Valid { + a.SourceAsName = value.String + } + if value, ok := values[12].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceCountry", values[12]) + } else if value.Valid { + a.SourceCountry = value.String + } + if value, ok := values[13].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field sourceLatitude", values[13]) + } else if value.Valid { + a.SourceLatitude = float32(value.Float64) + } + if value, ok := values[14].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field sourceLongitude", values[14]) + } else if value.Valid { + a.SourceLongitude = float32(value.Float64) + } + if value, ok := values[15].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceScope", values[15]) + } else if value.Valid { + a.SourceScope = value.String + } + if value, ok := values[16].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceValue", values[16]) + } else if value.Valid { + a.SourceValue = value.String + } + if value, ok := values[17].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field capacity", values[17]) + } else if value.Valid { + a.Capacity = int32(value.Int64) + } + if value, ok := values[18].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field leakSpeed", values[18]) + } else if value.Valid { + a.LeakSpeed = value.String + } + if value, ok := values[19].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scenarioVersion", values[19]) + } else if value.Valid { + a.ScenarioVersion = value.String + } + if value, ok := values[20].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scenarioHash", values[20]) + } else if value.Valid { + a.ScenarioHash = value.String + } + if value, ok := values[21].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field simulated", values[21]) + } else if value.Valid { + a.Simulated = value.Bool + } + values = values[22:] + if len(values) == len(alert.ForeignKeys) { + if value, ok := values[0].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field machine_alerts", value) + } else if value.Valid { + a.machine_alerts = new(int) + *a.machine_alerts = int(value.Int64) + } + } + return nil +} + +// QueryOwner queries the owner edge of the Alert. +func (a *Alert) QueryOwner() *MachineQuery { + return (&AlertClient{config: a.config}).QueryOwner(a) +} + +// QueryDecisions queries the decisions edge of the Alert. +func (a *Alert) QueryDecisions() *DecisionQuery { + return (&AlertClient{config: a.config}).QueryDecisions(a) +} + +// QueryEvents queries the events edge of the Alert. +func (a *Alert) QueryEvents() *EventQuery { + return (&AlertClient{config: a.config}).QueryEvents(a) +} + +// QueryMetas queries the metas edge of the Alert. +func (a *Alert) QueryMetas() *MetaQuery { + return (&AlertClient{config: a.config}).QueryMetas(a) +} + +// Update returns a builder for updating this Alert. +// Note that, you need to call Alert.Unwrap() before calling this method, if this Alert +// was returned from a transaction, and the transaction was committed or rolled back. +func (a *Alert) Update() *AlertUpdateOne { + return (&AlertClient{config: a.config}).UpdateOne(a) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (a *Alert) Unwrap() *Alert { + tx, ok := a.config.driver.(*txDriver) + if !ok { + panic("ent: Alert is not a transactional entity") + } + a.config.driver = tx.drv + return a +} + +// String implements the fmt.Stringer. +func (a *Alert) String() string { + var builder strings.Builder + builder.WriteString("Alert(") + builder.WriteString(fmt.Sprintf("id=%v", a.ID)) + builder.WriteString(", created_at=") + builder.WriteString(a.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", updated_at=") + builder.WriteString(a.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", scenario=") + builder.WriteString(a.Scenario) + builder.WriteString(", bucketId=") + builder.WriteString(a.BucketId) + builder.WriteString(", message=") + builder.WriteString(a.Message) + builder.WriteString(", eventsCount=") + builder.WriteString(fmt.Sprintf("%v", a.EventsCount)) + builder.WriteString(", startedAt=") + builder.WriteString(a.StartedAt.Format(time.ANSIC)) + builder.WriteString(", stoppedAt=") + builder.WriteString(a.StoppedAt.Format(time.ANSIC)) + builder.WriteString(", sourceIp=") + builder.WriteString(a.SourceIp) + builder.WriteString(", sourceRange=") + builder.WriteString(a.SourceRange) + builder.WriteString(", sourceAsNumber=") + builder.WriteString(a.SourceAsNumber) + builder.WriteString(", sourceAsName=") + builder.WriteString(a.SourceAsName) + builder.WriteString(", sourceCountry=") + builder.WriteString(a.SourceCountry) + builder.WriteString(", sourceLatitude=") + builder.WriteString(fmt.Sprintf("%v", a.SourceLatitude)) + builder.WriteString(", sourceLongitude=") + builder.WriteString(fmt.Sprintf("%v", a.SourceLongitude)) + builder.WriteString(", sourceScope=") + builder.WriteString(a.SourceScope) + builder.WriteString(", sourceValue=") + builder.WriteString(a.SourceValue) + builder.WriteString(", capacity=") + builder.WriteString(fmt.Sprintf("%v", a.Capacity)) + builder.WriteString(", leakSpeed=") + builder.WriteString(a.LeakSpeed) + builder.WriteString(", scenarioVersion=") + builder.WriteString(a.ScenarioVersion) + builder.WriteString(", scenarioHash=") + builder.WriteString(a.ScenarioHash) + builder.WriteString(", simulated=") + builder.WriteString(fmt.Sprintf("%v", a.Simulated)) + builder.WriteByte(')') + return builder.String() +} + +// Alerts is a parsable slice of Alert. +type Alerts []*Alert + +func (a Alerts) config(cfg config) { + for _i := range a { + a[_i].config = cfg + } +} diff --git a/pkg/database/ent/alert/alert.go b/pkg/database/ent/alert/alert.go new file mode 100644 index 000000000..130d7ef66 --- /dev/null +++ b/pkg/database/ent/alert/alert.go @@ -0,0 +1,164 @@ +// Code generated by entc, DO NOT EDIT. + +package alert + +import ( + "time" +) + +const ( + // Label holds the string label denoting the alert type in the database. + Label = "alert" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldScenario holds the string denoting the scenario field in the database. + FieldScenario = "scenario" + // FieldBucketId holds the string denoting the bucketid field in the database. + FieldBucketId = "bucket_id" + // FieldMessage holds the string denoting the message field in the database. + FieldMessage = "message" + // FieldEventsCount holds the string denoting the eventscount field in the database. + FieldEventsCount = "events_count" + // FieldStartedAt holds the string denoting the startedat field in the database. + FieldStartedAt = "started_at" + // FieldStoppedAt holds the string denoting the stoppedat field in the database. + FieldStoppedAt = "stopped_at" + // FieldSourceIp holds the string denoting the sourceip field in the database. + FieldSourceIp = "source_ip" + // FieldSourceRange holds the string denoting the sourcerange field in the database. + FieldSourceRange = "source_range" + // FieldSourceAsNumber holds the string denoting the sourceasnumber field in the database. + FieldSourceAsNumber = "source_as_number" + // FieldSourceAsName holds the string denoting the sourceasname field in the database. + FieldSourceAsName = "source_as_name" + // FieldSourceCountry holds the string denoting the sourcecountry field in the database. + FieldSourceCountry = "source_country" + // FieldSourceLatitude holds the string denoting the sourcelatitude field in the database. + FieldSourceLatitude = "source_latitude" + // FieldSourceLongitude holds the string denoting the sourcelongitude field in the database. + FieldSourceLongitude = "source_longitude" + // FieldSourceScope holds the string denoting the sourcescope field in the database. + FieldSourceScope = "source_scope" + // FieldSourceValue holds the string denoting the sourcevalue field in the database. + FieldSourceValue = "source_value" + // FieldCapacity holds the string denoting the capacity field in the database. + FieldCapacity = "capacity" + // FieldLeakSpeed holds the string denoting the leakspeed field in the database. + FieldLeakSpeed = "leak_speed" + // FieldScenarioVersion holds the string denoting the scenarioversion field in the database. + FieldScenarioVersion = "scenario_version" + // FieldScenarioHash holds the string denoting the scenariohash field in the database. + FieldScenarioHash = "scenario_hash" + // FieldSimulated holds the string denoting the simulated field in the database. + FieldSimulated = "simulated" + + // EdgeOwner holds the string denoting the owner edge name in mutations. + EdgeOwner = "owner" + // EdgeDecisions holds the string denoting the decisions edge name in mutations. + EdgeDecisions = "decisions" + // EdgeEvents holds the string denoting the events edge name in mutations. + EdgeEvents = "events" + // EdgeMetas holds the string denoting the metas edge name in mutations. + EdgeMetas = "metas" + + // Table holds the table name of the alert in the database. + Table = "alerts" + // OwnerTable is the table the holds the owner relation/edge. + OwnerTable = "alerts" + // OwnerInverseTable is the table name for the Machine entity. + // It exists in this package in order to avoid circular dependency with the "machine" package. + OwnerInverseTable = "machines" + // OwnerColumn is the table column denoting the owner relation/edge. + OwnerColumn = "machine_alerts" + // DecisionsTable is the table the holds the decisions relation/edge. + DecisionsTable = "decisions" + // DecisionsInverseTable is the table name for the Decision entity. + // It exists in this package in order to avoid circular dependency with the "decision" package. + DecisionsInverseTable = "decisions" + // DecisionsColumn is the table column denoting the decisions relation/edge. + DecisionsColumn = "alert_decisions" + // EventsTable is the table the holds the events relation/edge. + EventsTable = "events" + // EventsInverseTable is the table name for the Event entity. + // It exists in this package in order to avoid circular dependency with the "event" package. + EventsInverseTable = "events" + // EventsColumn is the table column denoting the events relation/edge. + EventsColumn = "alert_events" + // MetasTable is the table the holds the metas relation/edge. + MetasTable = "meta" + // MetasInverseTable is the table name for the Meta entity. + // It exists in this package in order to avoid circular dependency with the "meta" package. + MetasInverseTable = "meta" + // MetasColumn is the table column denoting the metas relation/edge. + MetasColumn = "alert_metas" +) + +// Columns holds all SQL columns for alert fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldScenario, + FieldBucketId, + FieldMessage, + FieldEventsCount, + FieldStartedAt, + FieldStoppedAt, + FieldSourceIp, + FieldSourceRange, + FieldSourceAsNumber, + FieldSourceAsName, + FieldSourceCountry, + FieldSourceLatitude, + FieldSourceLongitude, + FieldSourceScope, + FieldSourceValue, + FieldCapacity, + FieldLeakSpeed, + FieldScenarioVersion, + FieldScenarioHash, + FieldSimulated, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the Alert type. +var ForeignKeys = []string{ + "machine_alerts", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the created_at field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the updated_at field. + DefaultUpdatedAt func() time.Time + // DefaultBucketId holds the default value on creation for the bucketId field. + DefaultBucketId string + // DefaultMessage holds the default value on creation for the message field. + DefaultMessage string + // DefaultEventsCount holds the default value on creation for the eventsCount field. + DefaultEventsCount int32 + // DefaultStartedAt holds the default value on creation for the startedAt field. + DefaultStartedAt func() time.Time + // DefaultStoppedAt holds the default value on creation for the stoppedAt field. + DefaultStoppedAt func() time.Time + // DefaultSimulated holds the default value on creation for the simulated field. + DefaultSimulated bool +) diff --git a/pkg/database/ent/alert/where.go b/pkg/database/ent/alert/where.go new file mode 100644 index 000000000..a40588664 --- /dev/null +++ b/pkg/database/ent/alert/where.go @@ -0,0 +1,2709 @@ +// Code generated by entc, DO NOT EDIT. + +package alert + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their identifier. +func ID(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Scenario applies equality check predicate on the "scenario" field. It's identical to ScenarioEQ. +func Scenario(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenario), v)) + }) +} + +// BucketId applies equality check predicate on the "bucketId" field. It's identical to BucketIdEQ. +func BucketId(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldBucketId), v)) + }) +} + +// Message applies equality check predicate on the "message" field. It's identical to MessageEQ. +func Message(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldMessage), v)) + }) +} + +// EventsCount applies equality check predicate on the "eventsCount" field. It's identical to EventsCountEQ. +func EventsCount(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldEventsCount), v)) + }) +} + +// StartedAt applies equality check predicate on the "startedAt" field. It's identical to StartedAtEQ. +func StartedAt(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStartedAt), v)) + }) +} + +// StoppedAt applies equality check predicate on the "stoppedAt" field. It's identical to StoppedAtEQ. +func StoppedAt(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStoppedAt), v)) + }) +} + +// SourceIp applies equality check predicate on the "sourceIp" field. It's identical to SourceIpEQ. +func SourceIp(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceIp), v)) + }) +} + +// SourceRange applies equality check predicate on the "sourceRange" field. It's identical to SourceRangeEQ. +func SourceRange(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceRange), v)) + }) +} + +// SourceAsNumber applies equality check predicate on the "sourceAsNumber" field. It's identical to SourceAsNumberEQ. +func SourceAsNumber(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsName applies equality check predicate on the "sourceAsName" field. It's identical to SourceAsNameEQ. +func SourceAsName(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceAsName), v)) + }) +} + +// SourceCountry applies equality check predicate on the "sourceCountry" field. It's identical to SourceCountryEQ. +func SourceCountry(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceCountry), v)) + }) +} + +// SourceLatitude applies equality check predicate on the "sourceLatitude" field. It's identical to SourceLatitudeEQ. +func SourceLatitude(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLongitude applies equality check predicate on the "sourceLongitude" field. It's identical to SourceLongitudeEQ. +func SourceLongitude(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceScope applies equality check predicate on the "sourceScope" field. It's identical to SourceScopeEQ. +func SourceScope(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceScope), v)) + }) +} + +// SourceValue applies equality check predicate on the "sourceValue" field. It's identical to SourceValueEQ. +func SourceValue(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceValue), v)) + }) +} + +// Capacity applies equality check predicate on the "capacity" field. It's identical to CapacityEQ. +func Capacity(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCapacity), v)) + }) +} + +// LeakSpeed applies equality check predicate on the "leakSpeed" field. It's identical to LeakSpeedEQ. +func LeakSpeed(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLeakSpeed), v)) + }) +} + +// ScenarioVersion applies equality check predicate on the "scenarioVersion" field. It's identical to ScenarioVersionEQ. +func ScenarioVersion(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioHash applies equality check predicate on the "scenarioHash" field. It's identical to ScenarioHashEQ. +func ScenarioHash(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenarioHash), v)) + }) +} + +// Simulated applies equality check predicate on the "simulated" field. It's identical to SimulatedEQ. +func Simulated(v bool) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSimulated), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// ScenarioEQ applies the EQ predicate on the "scenario" field. +func ScenarioEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenario), v)) + }) +} + +// ScenarioNEQ applies the NEQ predicate on the "scenario" field. +func ScenarioNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldScenario), v)) + }) +} + +// ScenarioIn applies the In predicate on the "scenario" field. +func ScenarioIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldScenario), v...)) + }) +} + +// ScenarioNotIn applies the NotIn predicate on the "scenario" field. +func ScenarioNotIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldScenario), v...)) + }) +} + +// ScenarioGT applies the GT predicate on the "scenario" field. +func ScenarioGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldScenario), v)) + }) +} + +// ScenarioGTE applies the GTE predicate on the "scenario" field. +func ScenarioGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldScenario), v)) + }) +} + +// ScenarioLT applies the LT predicate on the "scenario" field. +func ScenarioLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldScenario), v)) + }) +} + +// ScenarioLTE applies the LTE predicate on the "scenario" field. +func ScenarioLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldScenario), v)) + }) +} + +// ScenarioContains applies the Contains predicate on the "scenario" field. +func ScenarioContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldScenario), v)) + }) +} + +// ScenarioHasPrefix applies the HasPrefix predicate on the "scenario" field. +func ScenarioHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldScenario), v)) + }) +} + +// ScenarioHasSuffix applies the HasSuffix predicate on the "scenario" field. +func ScenarioHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldScenario), v)) + }) +} + +// ScenarioEqualFold applies the EqualFold predicate on the "scenario" field. +func ScenarioEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldScenario), v)) + }) +} + +// ScenarioContainsFold applies the ContainsFold predicate on the "scenario" field. +func ScenarioContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldScenario), v)) + }) +} + +// BucketIdEQ applies the EQ predicate on the "bucketId" field. +func BucketIdEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldBucketId), v)) + }) +} + +// BucketIdNEQ applies the NEQ predicate on the "bucketId" field. +func BucketIdNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldBucketId), v)) + }) +} + +// BucketIdIn applies the In predicate on the "bucketId" field. +func BucketIdIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldBucketId), v...)) + }) +} + +// BucketIdNotIn applies the NotIn predicate on the "bucketId" field. +func BucketIdNotIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldBucketId), v...)) + }) +} + +// BucketIdGT applies the GT predicate on the "bucketId" field. +func BucketIdGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldBucketId), v)) + }) +} + +// BucketIdGTE applies the GTE predicate on the "bucketId" field. +func BucketIdGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldBucketId), v)) + }) +} + +// BucketIdLT applies the LT predicate on the "bucketId" field. +func BucketIdLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldBucketId), v)) + }) +} + +// BucketIdLTE applies the LTE predicate on the "bucketId" field. +func BucketIdLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldBucketId), v)) + }) +} + +// BucketIdContains applies the Contains predicate on the "bucketId" field. +func BucketIdContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldBucketId), v)) + }) +} + +// BucketIdHasPrefix applies the HasPrefix predicate on the "bucketId" field. +func BucketIdHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldBucketId), v)) + }) +} + +// BucketIdHasSuffix applies the HasSuffix predicate on the "bucketId" field. +func BucketIdHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldBucketId), v)) + }) +} + +// BucketIdIsNil applies the IsNil predicate on the "bucketId" field. +func BucketIdIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldBucketId))) + }) +} + +// BucketIdNotNil applies the NotNil predicate on the "bucketId" field. +func BucketIdNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldBucketId))) + }) +} + +// BucketIdEqualFold applies the EqualFold predicate on the "bucketId" field. +func BucketIdEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldBucketId), v)) + }) +} + +// BucketIdContainsFold applies the ContainsFold predicate on the "bucketId" field. +func BucketIdContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldBucketId), v)) + }) +} + +// MessageEQ applies the EQ predicate on the "message" field. +func MessageEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldMessage), v)) + }) +} + +// MessageNEQ applies the NEQ predicate on the "message" field. +func MessageNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldMessage), v)) + }) +} + +// MessageIn applies the In predicate on the "message" field. +func MessageIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldMessage), v...)) + }) +} + +// MessageNotIn applies the NotIn predicate on the "message" field. +func MessageNotIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldMessage), v...)) + }) +} + +// MessageGT applies the GT predicate on the "message" field. +func MessageGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldMessage), v)) + }) +} + +// MessageGTE applies the GTE predicate on the "message" field. +func MessageGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldMessage), v)) + }) +} + +// MessageLT applies the LT predicate on the "message" field. +func MessageLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldMessage), v)) + }) +} + +// MessageLTE applies the LTE predicate on the "message" field. +func MessageLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldMessage), v)) + }) +} + +// MessageContains applies the Contains predicate on the "message" field. +func MessageContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldMessage), v)) + }) +} + +// MessageHasPrefix applies the HasPrefix predicate on the "message" field. +func MessageHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldMessage), v)) + }) +} + +// MessageHasSuffix applies the HasSuffix predicate on the "message" field. +func MessageHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldMessage), v)) + }) +} + +// MessageIsNil applies the IsNil predicate on the "message" field. +func MessageIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldMessage))) + }) +} + +// MessageNotNil applies the NotNil predicate on the "message" field. +func MessageNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldMessage))) + }) +} + +// MessageEqualFold applies the EqualFold predicate on the "message" field. +func MessageEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldMessage), v)) + }) +} + +// MessageContainsFold applies the ContainsFold predicate on the "message" field. +func MessageContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldMessage), v)) + }) +} + +// EventsCountEQ applies the EQ predicate on the "eventsCount" field. +func EventsCountEQ(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldEventsCount), v)) + }) +} + +// EventsCountNEQ applies the NEQ predicate on the "eventsCount" field. +func EventsCountNEQ(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldEventsCount), v)) + }) +} + +// EventsCountIn applies the In predicate on the "eventsCount" field. +func EventsCountIn(vs ...int32) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldEventsCount), v...)) + }) +} + +// EventsCountNotIn applies the NotIn predicate on the "eventsCount" field. +func EventsCountNotIn(vs ...int32) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldEventsCount), v...)) + }) +} + +// EventsCountGT applies the GT predicate on the "eventsCount" field. +func EventsCountGT(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldEventsCount), v)) + }) +} + +// EventsCountGTE applies the GTE predicate on the "eventsCount" field. +func EventsCountGTE(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldEventsCount), v)) + }) +} + +// EventsCountLT applies the LT predicate on the "eventsCount" field. +func EventsCountLT(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldEventsCount), v)) + }) +} + +// EventsCountLTE applies the LTE predicate on the "eventsCount" field. +func EventsCountLTE(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldEventsCount), v)) + }) +} + +// EventsCountIsNil applies the IsNil predicate on the "eventsCount" field. +func EventsCountIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldEventsCount))) + }) +} + +// EventsCountNotNil applies the NotNil predicate on the "eventsCount" field. +func EventsCountNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldEventsCount))) + }) +} + +// StartedAtEQ applies the EQ predicate on the "startedAt" field. +func StartedAtEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStartedAt), v)) + }) +} + +// StartedAtNEQ applies the NEQ predicate on the "startedAt" field. +func StartedAtNEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldStartedAt), v)) + }) +} + +// StartedAtIn applies the In predicate on the "startedAt" field. +func StartedAtIn(vs ...time.Time) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldStartedAt), v...)) + }) +} + +// StartedAtNotIn applies the NotIn predicate on the "startedAt" field. +func StartedAtNotIn(vs ...time.Time) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldStartedAt), v...)) + }) +} + +// StartedAtGT applies the GT predicate on the "startedAt" field. +func StartedAtGT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldStartedAt), v)) + }) +} + +// StartedAtGTE applies the GTE predicate on the "startedAt" field. +func StartedAtGTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldStartedAt), v)) + }) +} + +// StartedAtLT applies the LT predicate on the "startedAt" field. +func StartedAtLT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldStartedAt), v)) + }) +} + +// StartedAtLTE applies the LTE predicate on the "startedAt" field. +func StartedAtLTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldStartedAt), v)) + }) +} + +// StartedAtIsNil applies the IsNil predicate on the "startedAt" field. +func StartedAtIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldStartedAt))) + }) +} + +// StartedAtNotNil applies the NotNil predicate on the "startedAt" field. +func StartedAtNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldStartedAt))) + }) +} + +// StoppedAtEQ applies the EQ predicate on the "stoppedAt" field. +func StoppedAtEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStoppedAt), v)) + }) +} + +// StoppedAtNEQ applies the NEQ predicate on the "stoppedAt" field. +func StoppedAtNEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldStoppedAt), v)) + }) +} + +// StoppedAtIn applies the In predicate on the "stoppedAt" field. +func StoppedAtIn(vs ...time.Time) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldStoppedAt), v...)) + }) +} + +// StoppedAtNotIn applies the NotIn predicate on the "stoppedAt" field. +func StoppedAtNotIn(vs ...time.Time) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldStoppedAt), v...)) + }) +} + +// StoppedAtGT applies the GT predicate on the "stoppedAt" field. +func StoppedAtGT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldStoppedAt), v)) + }) +} + +// StoppedAtGTE applies the GTE predicate on the "stoppedAt" field. +func StoppedAtGTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldStoppedAt), v)) + }) +} + +// StoppedAtLT applies the LT predicate on the "stoppedAt" field. +func StoppedAtLT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldStoppedAt), v)) + }) +} + +// StoppedAtLTE applies the LTE predicate on the "stoppedAt" field. +func StoppedAtLTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldStoppedAt), v)) + }) +} + +// StoppedAtIsNil applies the IsNil predicate on the "stoppedAt" field. +func StoppedAtIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldStoppedAt))) + }) +} + +// StoppedAtNotNil applies the NotNil predicate on the "stoppedAt" field. +func StoppedAtNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldStoppedAt))) + }) +} + +// SourceIpEQ applies the EQ predicate on the "sourceIp" field. +func SourceIpEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpNEQ applies the NEQ predicate on the "sourceIp" field. +func SourceIpNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpIn applies the In predicate on the "sourceIp" field. +func SourceIpIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldSourceIp), v...)) + }) +} + +// SourceIpNotIn applies the NotIn predicate on the "sourceIp" field. +func SourceIpNotIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldSourceIp), v...)) + }) +} + +// SourceIpGT applies the GT predicate on the "sourceIp" field. +func SourceIpGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpGTE applies the GTE predicate on the "sourceIp" field. +func SourceIpGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpLT applies the LT predicate on the "sourceIp" field. +func SourceIpLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpLTE applies the LTE predicate on the "sourceIp" field. +func SourceIpLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpContains applies the Contains predicate on the "sourceIp" field. +func SourceIpContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpHasPrefix applies the HasPrefix predicate on the "sourceIp" field. +func SourceIpHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpHasSuffix applies the HasSuffix predicate on the "sourceIp" field. +func SourceIpHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpIsNil applies the IsNil predicate on the "sourceIp" field. +func SourceIpIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceIp))) + }) +} + +// SourceIpNotNil applies the NotNil predicate on the "sourceIp" field. +func SourceIpNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceIp))) + }) +} + +// SourceIpEqualFold applies the EqualFold predicate on the "sourceIp" field. +func SourceIpEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpContainsFold applies the ContainsFold predicate on the "sourceIp" field. +func SourceIpContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceIp), v)) + }) +} + +// SourceRangeEQ applies the EQ predicate on the "sourceRange" field. +func SourceRangeEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeNEQ applies the NEQ predicate on the "sourceRange" field. +func SourceRangeNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeIn applies the In predicate on the "sourceRange" field. +func SourceRangeIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldSourceRange), v...)) + }) +} + +// SourceRangeNotIn applies the NotIn predicate on the "sourceRange" field. +func SourceRangeNotIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldSourceRange), v...)) + }) +} + +// SourceRangeGT applies the GT predicate on the "sourceRange" field. +func SourceRangeGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeGTE applies the GTE predicate on the "sourceRange" field. +func SourceRangeGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeLT applies the LT predicate on the "sourceRange" field. +func SourceRangeLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeLTE applies the LTE predicate on the "sourceRange" field. +func SourceRangeLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeContains applies the Contains predicate on the "sourceRange" field. +func SourceRangeContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeHasPrefix applies the HasPrefix predicate on the "sourceRange" field. +func SourceRangeHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeHasSuffix applies the HasSuffix predicate on the "sourceRange" field. +func SourceRangeHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeIsNil applies the IsNil predicate on the "sourceRange" field. +func SourceRangeIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceRange))) + }) +} + +// SourceRangeNotNil applies the NotNil predicate on the "sourceRange" field. +func SourceRangeNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceRange))) + }) +} + +// SourceRangeEqualFold applies the EqualFold predicate on the "sourceRange" field. +func SourceRangeEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeContainsFold applies the ContainsFold predicate on the "sourceRange" field. +func SourceRangeContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceRange), v)) + }) +} + +// SourceAsNumberEQ applies the EQ predicate on the "sourceAsNumber" field. +func SourceAsNumberEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberNEQ applies the NEQ predicate on the "sourceAsNumber" field. +func SourceAsNumberNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberIn applies the In predicate on the "sourceAsNumber" field. +func SourceAsNumberIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldSourceAsNumber), v...)) + }) +} + +// SourceAsNumberNotIn applies the NotIn predicate on the "sourceAsNumber" field. +func SourceAsNumberNotIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldSourceAsNumber), v...)) + }) +} + +// SourceAsNumberGT applies the GT predicate on the "sourceAsNumber" field. +func SourceAsNumberGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberGTE applies the GTE predicate on the "sourceAsNumber" field. +func SourceAsNumberGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberLT applies the LT predicate on the "sourceAsNumber" field. +func SourceAsNumberLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberLTE applies the LTE predicate on the "sourceAsNumber" field. +func SourceAsNumberLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberContains applies the Contains predicate on the "sourceAsNumber" field. +func SourceAsNumberContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberHasPrefix applies the HasPrefix predicate on the "sourceAsNumber" field. +func SourceAsNumberHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberHasSuffix applies the HasSuffix predicate on the "sourceAsNumber" field. +func SourceAsNumberHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberIsNil applies the IsNil predicate on the "sourceAsNumber" field. +func SourceAsNumberIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceAsNumber))) + }) +} + +// SourceAsNumberNotNil applies the NotNil predicate on the "sourceAsNumber" field. +func SourceAsNumberNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceAsNumber))) + }) +} + +// SourceAsNumberEqualFold applies the EqualFold predicate on the "sourceAsNumber" field. +func SourceAsNumberEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberContainsFold applies the ContainsFold predicate on the "sourceAsNumber" field. +func SourceAsNumberContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNameEQ applies the EQ predicate on the "sourceAsName" field. +func SourceAsNameEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameNEQ applies the NEQ predicate on the "sourceAsName" field. +func SourceAsNameNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameIn applies the In predicate on the "sourceAsName" field. +func SourceAsNameIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldSourceAsName), v...)) + }) +} + +// SourceAsNameNotIn applies the NotIn predicate on the "sourceAsName" field. +func SourceAsNameNotIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldSourceAsName), v...)) + }) +} + +// SourceAsNameGT applies the GT predicate on the "sourceAsName" field. +func SourceAsNameGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameGTE applies the GTE predicate on the "sourceAsName" field. +func SourceAsNameGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameLT applies the LT predicate on the "sourceAsName" field. +func SourceAsNameLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameLTE applies the LTE predicate on the "sourceAsName" field. +func SourceAsNameLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameContains applies the Contains predicate on the "sourceAsName" field. +func SourceAsNameContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameHasPrefix applies the HasPrefix predicate on the "sourceAsName" field. +func SourceAsNameHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameHasSuffix applies the HasSuffix predicate on the "sourceAsName" field. +func SourceAsNameHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameIsNil applies the IsNil predicate on the "sourceAsName" field. +func SourceAsNameIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceAsName))) + }) +} + +// SourceAsNameNotNil applies the NotNil predicate on the "sourceAsName" field. +func SourceAsNameNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceAsName))) + }) +} + +// SourceAsNameEqualFold applies the EqualFold predicate on the "sourceAsName" field. +func SourceAsNameEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameContainsFold applies the ContainsFold predicate on the "sourceAsName" field. +func SourceAsNameContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceAsName), v)) + }) +} + +// SourceCountryEQ applies the EQ predicate on the "sourceCountry" field. +func SourceCountryEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryNEQ applies the NEQ predicate on the "sourceCountry" field. +func SourceCountryNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryIn applies the In predicate on the "sourceCountry" field. +func SourceCountryIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldSourceCountry), v...)) + }) +} + +// SourceCountryNotIn applies the NotIn predicate on the "sourceCountry" field. +func SourceCountryNotIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldSourceCountry), v...)) + }) +} + +// SourceCountryGT applies the GT predicate on the "sourceCountry" field. +func SourceCountryGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryGTE applies the GTE predicate on the "sourceCountry" field. +func SourceCountryGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryLT applies the LT predicate on the "sourceCountry" field. +func SourceCountryLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryLTE applies the LTE predicate on the "sourceCountry" field. +func SourceCountryLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryContains applies the Contains predicate on the "sourceCountry" field. +func SourceCountryContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryHasPrefix applies the HasPrefix predicate on the "sourceCountry" field. +func SourceCountryHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryHasSuffix applies the HasSuffix predicate on the "sourceCountry" field. +func SourceCountryHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryIsNil applies the IsNil predicate on the "sourceCountry" field. +func SourceCountryIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceCountry))) + }) +} + +// SourceCountryNotNil applies the NotNil predicate on the "sourceCountry" field. +func SourceCountryNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceCountry))) + }) +} + +// SourceCountryEqualFold applies the EqualFold predicate on the "sourceCountry" field. +func SourceCountryEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryContainsFold applies the ContainsFold predicate on the "sourceCountry" field. +func SourceCountryContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceCountry), v)) + }) +} + +// SourceLatitudeEQ applies the EQ predicate on the "sourceLatitude" field. +func SourceLatitudeEQ(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLatitudeNEQ applies the NEQ predicate on the "sourceLatitude" field. +func SourceLatitudeNEQ(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLatitudeIn applies the In predicate on the "sourceLatitude" field. +func SourceLatitudeIn(vs ...float32) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldSourceLatitude), v...)) + }) +} + +// SourceLatitudeNotIn applies the NotIn predicate on the "sourceLatitude" field. +func SourceLatitudeNotIn(vs ...float32) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldSourceLatitude), v...)) + }) +} + +// SourceLatitudeGT applies the GT predicate on the "sourceLatitude" field. +func SourceLatitudeGT(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLatitudeGTE applies the GTE predicate on the "sourceLatitude" field. +func SourceLatitudeGTE(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLatitudeLT applies the LT predicate on the "sourceLatitude" field. +func SourceLatitudeLT(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLatitudeLTE applies the LTE predicate on the "sourceLatitude" field. +func SourceLatitudeLTE(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLatitudeIsNil applies the IsNil predicate on the "sourceLatitude" field. +func SourceLatitudeIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceLatitude))) + }) +} + +// SourceLatitudeNotNil applies the NotNil predicate on the "sourceLatitude" field. +func SourceLatitudeNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceLatitude))) + }) +} + +// SourceLongitudeEQ applies the EQ predicate on the "sourceLongitude" field. +func SourceLongitudeEQ(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceLongitudeNEQ applies the NEQ predicate on the "sourceLongitude" field. +func SourceLongitudeNEQ(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceLongitudeIn applies the In predicate on the "sourceLongitude" field. +func SourceLongitudeIn(vs ...float32) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldSourceLongitude), v...)) + }) +} + +// SourceLongitudeNotIn applies the NotIn predicate on the "sourceLongitude" field. +func SourceLongitudeNotIn(vs ...float32) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldSourceLongitude), v...)) + }) +} + +// SourceLongitudeGT applies the GT predicate on the "sourceLongitude" field. +func SourceLongitudeGT(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceLongitudeGTE applies the GTE predicate on the "sourceLongitude" field. +func SourceLongitudeGTE(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceLongitudeLT applies the LT predicate on the "sourceLongitude" field. +func SourceLongitudeLT(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceLongitudeLTE applies the LTE predicate on the "sourceLongitude" field. +func SourceLongitudeLTE(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceLongitudeIsNil applies the IsNil predicate on the "sourceLongitude" field. +func SourceLongitudeIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceLongitude))) + }) +} + +// SourceLongitudeNotNil applies the NotNil predicate on the "sourceLongitude" field. +func SourceLongitudeNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceLongitude))) + }) +} + +// SourceScopeEQ applies the EQ predicate on the "sourceScope" field. +func SourceScopeEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeNEQ applies the NEQ predicate on the "sourceScope" field. +func SourceScopeNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeIn applies the In predicate on the "sourceScope" field. +func SourceScopeIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldSourceScope), v...)) + }) +} + +// SourceScopeNotIn applies the NotIn predicate on the "sourceScope" field. +func SourceScopeNotIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldSourceScope), v...)) + }) +} + +// SourceScopeGT applies the GT predicate on the "sourceScope" field. +func SourceScopeGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeGTE applies the GTE predicate on the "sourceScope" field. +func SourceScopeGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeLT applies the LT predicate on the "sourceScope" field. +func SourceScopeLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeLTE applies the LTE predicate on the "sourceScope" field. +func SourceScopeLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeContains applies the Contains predicate on the "sourceScope" field. +func SourceScopeContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeHasPrefix applies the HasPrefix predicate on the "sourceScope" field. +func SourceScopeHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeHasSuffix applies the HasSuffix predicate on the "sourceScope" field. +func SourceScopeHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeIsNil applies the IsNil predicate on the "sourceScope" field. +func SourceScopeIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceScope))) + }) +} + +// SourceScopeNotNil applies the NotNil predicate on the "sourceScope" field. +func SourceScopeNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceScope))) + }) +} + +// SourceScopeEqualFold applies the EqualFold predicate on the "sourceScope" field. +func SourceScopeEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeContainsFold applies the ContainsFold predicate on the "sourceScope" field. +func SourceScopeContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceScope), v)) + }) +} + +// SourceValueEQ applies the EQ predicate on the "sourceValue" field. +func SourceValueEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueNEQ applies the NEQ predicate on the "sourceValue" field. +func SourceValueNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueIn applies the In predicate on the "sourceValue" field. +func SourceValueIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldSourceValue), v...)) + }) +} + +// SourceValueNotIn applies the NotIn predicate on the "sourceValue" field. +func SourceValueNotIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldSourceValue), v...)) + }) +} + +// SourceValueGT applies the GT predicate on the "sourceValue" field. +func SourceValueGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueGTE applies the GTE predicate on the "sourceValue" field. +func SourceValueGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueLT applies the LT predicate on the "sourceValue" field. +func SourceValueLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueLTE applies the LTE predicate on the "sourceValue" field. +func SourceValueLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueContains applies the Contains predicate on the "sourceValue" field. +func SourceValueContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueHasPrefix applies the HasPrefix predicate on the "sourceValue" field. +func SourceValueHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueHasSuffix applies the HasSuffix predicate on the "sourceValue" field. +func SourceValueHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueIsNil applies the IsNil predicate on the "sourceValue" field. +func SourceValueIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceValue))) + }) +} + +// SourceValueNotNil applies the NotNil predicate on the "sourceValue" field. +func SourceValueNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceValue))) + }) +} + +// SourceValueEqualFold applies the EqualFold predicate on the "sourceValue" field. +func SourceValueEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueContainsFold applies the ContainsFold predicate on the "sourceValue" field. +func SourceValueContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceValue), v)) + }) +} + +// CapacityEQ applies the EQ predicate on the "capacity" field. +func CapacityEQ(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCapacity), v)) + }) +} + +// CapacityNEQ applies the NEQ predicate on the "capacity" field. +func CapacityNEQ(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCapacity), v)) + }) +} + +// CapacityIn applies the In predicate on the "capacity" field. +func CapacityIn(vs ...int32) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldCapacity), v...)) + }) +} + +// CapacityNotIn applies the NotIn predicate on the "capacity" field. +func CapacityNotIn(vs ...int32) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldCapacity), v...)) + }) +} + +// CapacityGT applies the GT predicate on the "capacity" field. +func CapacityGT(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCapacity), v)) + }) +} + +// CapacityGTE applies the GTE predicate on the "capacity" field. +func CapacityGTE(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCapacity), v)) + }) +} + +// CapacityLT applies the LT predicate on the "capacity" field. +func CapacityLT(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCapacity), v)) + }) +} + +// CapacityLTE applies the LTE predicate on the "capacity" field. +func CapacityLTE(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCapacity), v)) + }) +} + +// CapacityIsNil applies the IsNil predicate on the "capacity" field. +func CapacityIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldCapacity))) + }) +} + +// CapacityNotNil applies the NotNil predicate on the "capacity" field. +func CapacityNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldCapacity))) + }) +} + +// LeakSpeedEQ applies the EQ predicate on the "leakSpeed" field. +func LeakSpeedEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedNEQ applies the NEQ predicate on the "leakSpeed" field. +func LeakSpeedNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedIn applies the In predicate on the "leakSpeed" field. +func LeakSpeedIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldLeakSpeed), v...)) + }) +} + +// LeakSpeedNotIn applies the NotIn predicate on the "leakSpeed" field. +func LeakSpeedNotIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldLeakSpeed), v...)) + }) +} + +// LeakSpeedGT applies the GT predicate on the "leakSpeed" field. +func LeakSpeedGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedGTE applies the GTE predicate on the "leakSpeed" field. +func LeakSpeedGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedLT applies the LT predicate on the "leakSpeed" field. +func LeakSpeedLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedLTE applies the LTE predicate on the "leakSpeed" field. +func LeakSpeedLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedContains applies the Contains predicate on the "leakSpeed" field. +func LeakSpeedContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedHasPrefix applies the HasPrefix predicate on the "leakSpeed" field. +func LeakSpeedHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedHasSuffix applies the HasSuffix predicate on the "leakSpeed" field. +func LeakSpeedHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedIsNil applies the IsNil predicate on the "leakSpeed" field. +func LeakSpeedIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldLeakSpeed))) + }) +} + +// LeakSpeedNotNil applies the NotNil predicate on the "leakSpeed" field. +func LeakSpeedNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldLeakSpeed))) + }) +} + +// LeakSpeedEqualFold applies the EqualFold predicate on the "leakSpeed" field. +func LeakSpeedEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedContainsFold applies the ContainsFold predicate on the "leakSpeed" field. +func LeakSpeedContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldLeakSpeed), v)) + }) +} + +// ScenarioVersionEQ applies the EQ predicate on the "scenarioVersion" field. +func ScenarioVersionEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionNEQ applies the NEQ predicate on the "scenarioVersion" field. +func ScenarioVersionNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionIn applies the In predicate on the "scenarioVersion" field. +func ScenarioVersionIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldScenarioVersion), v...)) + }) +} + +// ScenarioVersionNotIn applies the NotIn predicate on the "scenarioVersion" field. +func ScenarioVersionNotIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldScenarioVersion), v...)) + }) +} + +// ScenarioVersionGT applies the GT predicate on the "scenarioVersion" field. +func ScenarioVersionGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionGTE applies the GTE predicate on the "scenarioVersion" field. +func ScenarioVersionGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionLT applies the LT predicate on the "scenarioVersion" field. +func ScenarioVersionLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionLTE applies the LTE predicate on the "scenarioVersion" field. +func ScenarioVersionLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionContains applies the Contains predicate on the "scenarioVersion" field. +func ScenarioVersionContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionHasPrefix applies the HasPrefix predicate on the "scenarioVersion" field. +func ScenarioVersionHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionHasSuffix applies the HasSuffix predicate on the "scenarioVersion" field. +func ScenarioVersionHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionIsNil applies the IsNil predicate on the "scenarioVersion" field. +func ScenarioVersionIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldScenarioVersion))) + }) +} + +// ScenarioVersionNotNil applies the NotNil predicate on the "scenarioVersion" field. +func ScenarioVersionNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldScenarioVersion))) + }) +} + +// ScenarioVersionEqualFold applies the EqualFold predicate on the "scenarioVersion" field. +func ScenarioVersionEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionContainsFold applies the ContainsFold predicate on the "scenarioVersion" field. +func ScenarioVersionContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioHashEQ applies the EQ predicate on the "scenarioHash" field. +func ScenarioHashEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashNEQ applies the NEQ predicate on the "scenarioHash" field. +func ScenarioHashNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashIn applies the In predicate on the "scenarioHash" field. +func ScenarioHashIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldScenarioHash), v...)) + }) +} + +// ScenarioHashNotIn applies the NotIn predicate on the "scenarioHash" field. +func ScenarioHashNotIn(vs ...string) predicate.Alert { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldScenarioHash), v...)) + }) +} + +// ScenarioHashGT applies the GT predicate on the "scenarioHash" field. +func ScenarioHashGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashGTE applies the GTE predicate on the "scenarioHash" field. +func ScenarioHashGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashLT applies the LT predicate on the "scenarioHash" field. +func ScenarioHashLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashLTE applies the LTE predicate on the "scenarioHash" field. +func ScenarioHashLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashContains applies the Contains predicate on the "scenarioHash" field. +func ScenarioHashContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashHasPrefix applies the HasPrefix predicate on the "scenarioHash" field. +func ScenarioHashHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashHasSuffix applies the HasSuffix predicate on the "scenarioHash" field. +func ScenarioHashHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashIsNil applies the IsNil predicate on the "scenarioHash" field. +func ScenarioHashIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldScenarioHash))) + }) +} + +// ScenarioHashNotNil applies the NotNil predicate on the "scenarioHash" field. +func ScenarioHashNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldScenarioHash))) + }) +} + +// ScenarioHashEqualFold applies the EqualFold predicate on the "scenarioHash" field. +func ScenarioHashEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashContainsFold applies the ContainsFold predicate on the "scenarioHash" field. +func ScenarioHashContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldScenarioHash), v)) + }) +} + +// SimulatedEQ applies the EQ predicate on the "simulated" field. +func SimulatedEQ(v bool) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSimulated), v)) + }) +} + +// SimulatedNEQ applies the NEQ predicate on the "simulated" field. +func SimulatedNEQ(v bool) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSimulated), v)) + }) +} + +// HasOwner applies the HasEdge predicate on the "owner" edge. +func HasOwner() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). +func HasOwnerWith(preds ...predicate.Machine) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasDecisions applies the HasEdge predicate on the "decisions" edge. +func HasDecisions() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DecisionsTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DecisionsTable, DecisionsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDecisionsWith applies the HasEdge predicate on the "decisions" edge with a given conditions (other predicates). +func HasDecisionsWith(preds ...predicate.Decision) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DecisionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DecisionsTable, DecisionsColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasEvents applies the HasEdge predicate on the "events" edge. +func HasEvents() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EventsTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EventsTable, EventsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasEventsWith applies the HasEdge predicate on the "events" edge with a given conditions (other predicates). +func HasEventsWith(preds ...predicate.Event) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EventsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EventsTable, EventsColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasMetas applies the HasEdge predicate on the "metas" edge. +func HasMetas() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(MetasTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, MetasTable, MetasColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasMetasWith applies the HasEdge predicate on the "metas" edge with a given conditions (other predicates). +func HasMetasWith(preds ...predicate.Meta) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(MetasInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, MetasTable, MetasColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups list of predicates with the AND operator between them. +func And(predicates ...predicate.Alert) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups list of predicates with the OR operator between them. +func Or(predicates ...predicate.Alert) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Alert) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/pkg/database/ent/alert_create.go b/pkg/database/ent/alert_create.go new file mode 100644 index 000000000..de5daa144 --- /dev/null +++ b/pkg/database/ent/alert_create.go @@ -0,0 +1,838 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// AlertCreate is the builder for creating a Alert entity. +type AlertCreate struct { + config + mutation *AlertMutation + hooks []Hook +} + +// SetCreatedAt sets the created_at field. +func (ac *AlertCreate) SetCreatedAt(t time.Time) *AlertCreate { + ac.mutation.SetCreatedAt(t) + return ac +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (ac *AlertCreate) SetNillableCreatedAt(t *time.Time) *AlertCreate { + if t != nil { + ac.SetCreatedAt(*t) + } + return ac +} + +// SetUpdatedAt sets the updated_at field. +func (ac *AlertCreate) SetUpdatedAt(t time.Time) *AlertCreate { + ac.mutation.SetUpdatedAt(t) + return ac +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (ac *AlertCreate) SetNillableUpdatedAt(t *time.Time) *AlertCreate { + if t != nil { + ac.SetUpdatedAt(*t) + } + return ac +} + +// SetScenario sets the scenario field. +func (ac *AlertCreate) SetScenario(s string) *AlertCreate { + ac.mutation.SetScenario(s) + return ac +} + +// SetBucketId sets the bucketId field. +func (ac *AlertCreate) SetBucketId(s string) *AlertCreate { + ac.mutation.SetBucketId(s) + return ac +} + +// SetNillableBucketId sets the bucketId field if the given value is not nil. +func (ac *AlertCreate) SetNillableBucketId(s *string) *AlertCreate { + if s != nil { + ac.SetBucketId(*s) + } + return ac +} + +// SetMessage sets the message field. +func (ac *AlertCreate) SetMessage(s string) *AlertCreate { + ac.mutation.SetMessage(s) + return ac +} + +// SetNillableMessage sets the message field if the given value is not nil. +func (ac *AlertCreate) SetNillableMessage(s *string) *AlertCreate { + if s != nil { + ac.SetMessage(*s) + } + return ac +} + +// SetEventsCount sets the eventsCount field. +func (ac *AlertCreate) SetEventsCount(i int32) *AlertCreate { + ac.mutation.SetEventsCount(i) + return ac +} + +// SetNillableEventsCount sets the eventsCount field if the given value is not nil. +func (ac *AlertCreate) SetNillableEventsCount(i *int32) *AlertCreate { + if i != nil { + ac.SetEventsCount(*i) + } + return ac +} + +// SetStartedAt sets the startedAt field. +func (ac *AlertCreate) SetStartedAt(t time.Time) *AlertCreate { + ac.mutation.SetStartedAt(t) + return ac +} + +// SetNillableStartedAt sets the startedAt field if the given value is not nil. +func (ac *AlertCreate) SetNillableStartedAt(t *time.Time) *AlertCreate { + if t != nil { + ac.SetStartedAt(*t) + } + return ac +} + +// SetStoppedAt sets the stoppedAt field. +func (ac *AlertCreate) SetStoppedAt(t time.Time) *AlertCreate { + ac.mutation.SetStoppedAt(t) + return ac +} + +// SetNillableStoppedAt sets the stoppedAt field if the given value is not nil. +func (ac *AlertCreate) SetNillableStoppedAt(t *time.Time) *AlertCreate { + if t != nil { + ac.SetStoppedAt(*t) + } + return ac +} + +// SetSourceIp sets the sourceIp field. +func (ac *AlertCreate) SetSourceIp(s string) *AlertCreate { + ac.mutation.SetSourceIp(s) + return ac +} + +// SetNillableSourceIp sets the sourceIp field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceIp(s *string) *AlertCreate { + if s != nil { + ac.SetSourceIp(*s) + } + return ac +} + +// SetSourceRange sets the sourceRange field. +func (ac *AlertCreate) SetSourceRange(s string) *AlertCreate { + ac.mutation.SetSourceRange(s) + return ac +} + +// SetNillableSourceRange sets the sourceRange field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceRange(s *string) *AlertCreate { + if s != nil { + ac.SetSourceRange(*s) + } + return ac +} + +// SetSourceAsNumber sets the sourceAsNumber field. +func (ac *AlertCreate) SetSourceAsNumber(s string) *AlertCreate { + ac.mutation.SetSourceAsNumber(s) + return ac +} + +// SetNillableSourceAsNumber sets the sourceAsNumber field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceAsNumber(s *string) *AlertCreate { + if s != nil { + ac.SetSourceAsNumber(*s) + } + return ac +} + +// SetSourceAsName sets the sourceAsName field. +func (ac *AlertCreate) SetSourceAsName(s string) *AlertCreate { + ac.mutation.SetSourceAsName(s) + return ac +} + +// SetNillableSourceAsName sets the sourceAsName field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceAsName(s *string) *AlertCreate { + if s != nil { + ac.SetSourceAsName(*s) + } + return ac +} + +// SetSourceCountry sets the sourceCountry field. +func (ac *AlertCreate) SetSourceCountry(s string) *AlertCreate { + ac.mutation.SetSourceCountry(s) + return ac +} + +// SetNillableSourceCountry sets the sourceCountry field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceCountry(s *string) *AlertCreate { + if s != nil { + ac.SetSourceCountry(*s) + } + return ac +} + +// SetSourceLatitude sets the sourceLatitude field. +func (ac *AlertCreate) SetSourceLatitude(f float32) *AlertCreate { + ac.mutation.SetSourceLatitude(f) + return ac +} + +// SetNillableSourceLatitude sets the sourceLatitude field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceLatitude(f *float32) *AlertCreate { + if f != nil { + ac.SetSourceLatitude(*f) + } + return ac +} + +// SetSourceLongitude sets the sourceLongitude field. +func (ac *AlertCreate) SetSourceLongitude(f float32) *AlertCreate { + ac.mutation.SetSourceLongitude(f) + return ac +} + +// SetNillableSourceLongitude sets the sourceLongitude field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceLongitude(f *float32) *AlertCreate { + if f != nil { + ac.SetSourceLongitude(*f) + } + return ac +} + +// SetSourceScope sets the sourceScope field. +func (ac *AlertCreate) SetSourceScope(s string) *AlertCreate { + ac.mutation.SetSourceScope(s) + return ac +} + +// SetNillableSourceScope sets the sourceScope field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceScope(s *string) *AlertCreate { + if s != nil { + ac.SetSourceScope(*s) + } + return ac +} + +// SetSourceValue sets the sourceValue field. +func (ac *AlertCreate) SetSourceValue(s string) *AlertCreate { + ac.mutation.SetSourceValue(s) + return ac +} + +// SetNillableSourceValue sets the sourceValue field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceValue(s *string) *AlertCreate { + if s != nil { + ac.SetSourceValue(*s) + } + return ac +} + +// SetCapacity sets the capacity field. +func (ac *AlertCreate) SetCapacity(i int32) *AlertCreate { + ac.mutation.SetCapacity(i) + return ac +} + +// SetNillableCapacity sets the capacity field if the given value is not nil. +func (ac *AlertCreate) SetNillableCapacity(i *int32) *AlertCreate { + if i != nil { + ac.SetCapacity(*i) + } + return ac +} + +// SetLeakSpeed sets the leakSpeed field. +func (ac *AlertCreate) SetLeakSpeed(s string) *AlertCreate { + ac.mutation.SetLeakSpeed(s) + return ac +} + +// SetNillableLeakSpeed sets the leakSpeed field if the given value is not nil. +func (ac *AlertCreate) SetNillableLeakSpeed(s *string) *AlertCreate { + if s != nil { + ac.SetLeakSpeed(*s) + } + return ac +} + +// SetScenarioVersion sets the scenarioVersion field. +func (ac *AlertCreate) SetScenarioVersion(s string) *AlertCreate { + ac.mutation.SetScenarioVersion(s) + return ac +} + +// SetNillableScenarioVersion sets the scenarioVersion field if the given value is not nil. +func (ac *AlertCreate) SetNillableScenarioVersion(s *string) *AlertCreate { + if s != nil { + ac.SetScenarioVersion(*s) + } + return ac +} + +// SetScenarioHash sets the scenarioHash field. +func (ac *AlertCreate) SetScenarioHash(s string) *AlertCreate { + ac.mutation.SetScenarioHash(s) + return ac +} + +// SetNillableScenarioHash sets the scenarioHash field if the given value is not nil. +func (ac *AlertCreate) SetNillableScenarioHash(s *string) *AlertCreate { + if s != nil { + ac.SetScenarioHash(*s) + } + return ac +} + +// SetSimulated sets the simulated field. +func (ac *AlertCreate) SetSimulated(b bool) *AlertCreate { + ac.mutation.SetSimulated(b) + return ac +} + +// SetNillableSimulated sets the simulated field if the given value is not nil. +func (ac *AlertCreate) SetNillableSimulated(b *bool) *AlertCreate { + if b != nil { + ac.SetSimulated(*b) + } + return ac +} + +// SetOwnerID sets the owner edge to Machine by id. +func (ac *AlertCreate) SetOwnerID(id int) *AlertCreate { + ac.mutation.SetOwnerID(id) + return ac +} + +// SetNillableOwnerID sets the owner edge to Machine by id if the given value is not nil. +func (ac *AlertCreate) SetNillableOwnerID(id *int) *AlertCreate { + if id != nil { + ac = ac.SetOwnerID(*id) + } + return ac +} + +// SetOwner sets the owner edge to Machine. +func (ac *AlertCreate) SetOwner(m *Machine) *AlertCreate { + return ac.SetOwnerID(m.ID) +} + +// AddDecisionIDs adds the decisions edge to Decision by ids. +func (ac *AlertCreate) AddDecisionIDs(ids ...int) *AlertCreate { + ac.mutation.AddDecisionIDs(ids...) + return ac +} + +// AddDecisions adds the decisions edges to Decision. +func (ac *AlertCreate) AddDecisions(d ...*Decision) *AlertCreate { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return ac.AddDecisionIDs(ids...) +} + +// AddEventIDs adds the events edge to Event by ids. +func (ac *AlertCreate) AddEventIDs(ids ...int) *AlertCreate { + ac.mutation.AddEventIDs(ids...) + return ac +} + +// AddEvents adds the events edges to Event. +func (ac *AlertCreate) AddEvents(e ...*Event) *AlertCreate { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return ac.AddEventIDs(ids...) +} + +// AddMetaIDs adds the metas edge to Meta by ids. +func (ac *AlertCreate) AddMetaIDs(ids ...int) *AlertCreate { + ac.mutation.AddMetaIDs(ids...) + return ac +} + +// AddMetas adds the metas edges to Meta. +func (ac *AlertCreate) AddMetas(m ...*Meta) *AlertCreate { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return ac.AddMetaIDs(ids...) +} + +// Mutation returns the AlertMutation object of the builder. +func (ac *AlertCreate) Mutation() *AlertMutation { + return ac.mutation +} + +// Save creates the Alert in the database. +func (ac *AlertCreate) Save(ctx context.Context) (*Alert, error) { + var ( + err error + node *Alert + ) + ac.defaults() + if len(ac.hooks) == 0 { + if err = ac.check(); err != nil { + return nil, err + } + node, err = ac.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AlertMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = ac.check(); err != nil { + return nil, err + } + ac.mutation = mutation + node, err = ac.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(ac.hooks) - 1; i >= 0; i-- { + mut = ac.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, ac.mutation); err != nil { + return nil, err + } + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (ac *AlertCreate) SaveX(ctx context.Context) *Alert { + v, err := ac.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// defaults sets the default values of the builder before save. +func (ac *AlertCreate) defaults() { + if _, ok := ac.mutation.CreatedAt(); !ok { + v := alert.DefaultCreatedAt() + ac.mutation.SetCreatedAt(v) + } + if _, ok := ac.mutation.UpdatedAt(); !ok { + v := alert.DefaultUpdatedAt() + ac.mutation.SetUpdatedAt(v) + } + if _, ok := ac.mutation.BucketId(); !ok { + v := alert.DefaultBucketId + ac.mutation.SetBucketId(v) + } + if _, ok := ac.mutation.Message(); !ok { + v := alert.DefaultMessage + ac.mutation.SetMessage(v) + } + if _, ok := ac.mutation.EventsCount(); !ok { + v := alert.DefaultEventsCount + ac.mutation.SetEventsCount(v) + } + if _, ok := ac.mutation.StartedAt(); !ok { + v := alert.DefaultStartedAt() + ac.mutation.SetStartedAt(v) + } + if _, ok := ac.mutation.StoppedAt(); !ok { + v := alert.DefaultStoppedAt() + ac.mutation.SetStoppedAt(v) + } + if _, ok := ac.mutation.Simulated(); !ok { + v := alert.DefaultSimulated + ac.mutation.SetSimulated(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ac *AlertCreate) check() error { + if _, ok := ac.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")} + } + if _, ok := ac.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")} + } + if _, ok := ac.mutation.Scenario(); !ok { + return &ValidationError{Name: "scenario", err: errors.New("ent: missing required field \"scenario\"")} + } + if _, ok := ac.mutation.Simulated(); !ok { + return &ValidationError{Name: "simulated", err: errors.New("ent: missing required field \"simulated\"")} + } + return nil +} + +func (ac *AlertCreate) sqlSave(ctx context.Context) (*Alert, error) { + _node, _spec := ac.createSpec() + if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil { + if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + return _node, nil +} + +func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { + var ( + _node = &Alert{config: ac.config} + _spec = &sqlgraph.CreateSpec{ + Table: alert.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + } + ) + if value, ok := ac.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldCreatedAt, + }) + _node.CreatedAt = value + } + if value, ok := ac.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldUpdatedAt, + }) + _node.UpdatedAt = value + } + if value, ok := ac.mutation.Scenario(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenario, + }) + _node.Scenario = value + } + if value, ok := ac.mutation.BucketId(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldBucketId, + }) + _node.BucketId = value + } + if value, ok := ac.mutation.Message(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldMessage, + }) + _node.Message = value + } + if value, ok := ac.mutation.EventsCount(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldEventsCount, + }) + _node.EventsCount = value + } + if value, ok := ac.mutation.StartedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldStartedAt, + }) + _node.StartedAt = value + } + if value, ok := ac.mutation.StoppedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldStoppedAt, + }) + _node.StoppedAt = value + } + if value, ok := ac.mutation.SourceIp(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceIp, + }) + _node.SourceIp = value + } + if value, ok := ac.mutation.SourceRange(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceRange, + }) + _node.SourceRange = value + } + if value, ok := ac.mutation.SourceAsNumber(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceAsNumber, + }) + _node.SourceAsNumber = value + } + if value, ok := ac.mutation.SourceAsName(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceAsName, + }) + _node.SourceAsName = value + } + if value, ok := ac.mutation.SourceCountry(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceCountry, + }) + _node.SourceCountry = value + } + if value, ok := ac.mutation.SourceLatitude(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLatitude, + }) + _node.SourceLatitude = value + } + if value, ok := ac.mutation.SourceLongitude(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLongitude, + }) + _node.SourceLongitude = value + } + if value, ok := ac.mutation.SourceScope(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceScope, + }) + _node.SourceScope = value + } + if value, ok := ac.mutation.SourceValue(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceValue, + }) + _node.SourceValue = value + } + if value, ok := ac.mutation.Capacity(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldCapacity, + }) + _node.Capacity = value + } + if value, ok := ac.mutation.LeakSpeed(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldLeakSpeed, + }) + _node.LeakSpeed = value + } + if value, ok := ac.mutation.ScenarioVersion(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenarioVersion, + }) + _node.ScenarioVersion = value + } + if value, ok := ac.mutation.ScenarioHash(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenarioHash, + }) + _node.ScenarioHash = value + } + if value, ok := ac.mutation.Simulated(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: alert.FieldSimulated, + }) + _node.Simulated = value + } + if nodes := ac.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: alert.OwnerTable, + Columns: []string{alert.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := ac.mutation.DecisionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := ac.mutation.EventsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := ac.mutation.MetasIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// AlertCreateBulk is the builder for creating a bulk of Alert entities. +type AlertCreateBulk struct { + config + builders []*AlertCreate +} + +// Save creates the Alert entities in the database. +func (acb *AlertCreateBulk) Save(ctx context.Context) ([]*Alert, error) { + specs := make([]*sqlgraph.CreateSpec, len(acb.builders)) + nodes := make([]*Alert, len(acb.builders)) + mutators := make([]Mutator, len(acb.builders)) + for i := range acb.builders { + func(i int, root context.Context) { + builder := acb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AlertMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation) + } else { + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, acb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil { + if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + } + } + mutation.done = true + if err != nil { + return nil, err + } + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, acb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX calls Save and panics if Save returns an error. +func (acb *AlertCreateBulk) SaveX(ctx context.Context) []*Alert { + v, err := acb.Save(ctx) + if err != nil { + panic(err) + } + return v +} diff --git a/pkg/database/ent/alert_delete.go b/pkg/database/ent/alert_delete.go new file mode 100644 index 000000000..2532aa915 --- /dev/null +++ b/pkg/database/ent/alert_delete.go @@ -0,0 +1,109 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// AlertDelete is the builder for deleting a Alert entity. +type AlertDelete struct { + config + hooks []Hook + mutation *AlertMutation + predicates []predicate.Alert +} + +// Where adds a new predicate to the delete builder. +func (ad *AlertDelete) Where(ps ...predicate.Alert) *AlertDelete { + ad.predicates = append(ad.predicates, ps...) + return ad +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ad *AlertDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(ad.hooks) == 0 { + affected, err = ad.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AlertMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + ad.mutation = mutation + affected, err = ad.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(ad.hooks) - 1; i >= 0; i-- { + mut = ad.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, ad.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ad *AlertDelete) ExecX(ctx context.Context) int { + n, err := ad.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ad *AlertDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: alert.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + if ps := ad.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return sqlgraph.DeleteNodes(ctx, ad.driver, _spec) +} + +// AlertDeleteOne is the builder for deleting a single Alert entity. +type AlertDeleteOne struct { + ad *AlertDelete +} + +// Exec executes the deletion query. +func (ado *AlertDeleteOne) Exec(ctx context.Context) error { + n, err := ado.ad.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{alert.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ado *AlertDeleteOne) ExecX(ctx context.Context) { + ado.ad.ExecX(ctx) +} diff --git a/pkg/database/ent/alert_query.go b/pkg/database/ent/alert_query.go new file mode 100644 index 000000000..55d196953 --- /dev/null +++ b/pkg/database/ent/alert_query.go @@ -0,0 +1,1150 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "math" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// AlertQuery is the builder for querying Alert entities. +type AlertQuery struct { + config + limit *int + offset *int + order []OrderFunc + unique []string + predicates []predicate.Alert + // eager-loading edges. + withOwner *MachineQuery + withDecisions *DecisionQuery + withEvents *EventQuery + withMetas *MetaQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the builder. +func (aq *AlertQuery) Where(ps ...predicate.Alert) *AlertQuery { + aq.predicates = append(aq.predicates, ps...) + return aq +} + +// Limit adds a limit step to the query. +func (aq *AlertQuery) Limit(limit int) *AlertQuery { + aq.limit = &limit + return aq +} + +// Offset adds an offset step to the query. +func (aq *AlertQuery) Offset(offset int) *AlertQuery { + aq.offset = &offset + return aq +} + +// Order adds an order step to the query. +func (aq *AlertQuery) Order(o ...OrderFunc) *AlertQuery { + aq.order = append(aq.order, o...) + return aq +} + +// QueryOwner chains the current query on the owner edge. +func (aq *AlertQuery) QueryOwner() *MachineQuery { + query := &MachineQuery{config: aq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := aq.sqlQuery() + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, selector), + sqlgraph.To(machine.Table, machine.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, alert.OwnerTable, alert.OwnerColumn), + ) + fromU = sqlgraph.SetNeighbors(aq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryDecisions chains the current query on the decisions edge. +func (aq *AlertQuery) QueryDecisions() *DecisionQuery { + query := &DecisionQuery{config: aq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := aq.sqlQuery() + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, selector), + sqlgraph.To(decision.Table, decision.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, alert.DecisionsTable, alert.DecisionsColumn), + ) + fromU = sqlgraph.SetNeighbors(aq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryEvents chains the current query on the events edge. +func (aq *AlertQuery) QueryEvents() *EventQuery { + query := &EventQuery{config: aq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := aq.sqlQuery() + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, selector), + sqlgraph.To(event.Table, event.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, alert.EventsTable, alert.EventsColumn), + ) + fromU = sqlgraph.SetNeighbors(aq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryMetas chains the current query on the metas edge. +func (aq *AlertQuery) QueryMetas() *MetaQuery { + query := &MetaQuery{config: aq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := aq.sqlQuery() + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, selector), + sqlgraph.To(meta.Table, meta.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, alert.MetasTable, alert.MetasColumn), + ) + fromU = sqlgraph.SetNeighbors(aq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Alert entity in the query. Returns *NotFoundError when no alert was found. +func (aq *AlertQuery) First(ctx context.Context) (*Alert, error) { + nodes, err := aq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{alert.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (aq *AlertQuery) FirstX(ctx context.Context) *Alert { + node, err := aq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Alert id in the query. Returns *NotFoundError when no id was found. +func (aq *AlertQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = aq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{alert.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (aq *AlertQuery) FirstXID(ctx context.Context) int { + id, err := aq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only Alert entity in the query, returns an error if not exactly one entity was returned. +func (aq *AlertQuery) Only(ctx context.Context) (*Alert, error) { + nodes, err := aq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{alert.Label} + default: + return nil, &NotSingularError{alert.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (aq *AlertQuery) OnlyX(ctx context.Context) *Alert { + node, err := aq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID returns the only Alert id in the query, returns an error if not exactly one id was returned. +func (aq *AlertQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = aq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{alert.Label} + default: + err = &NotSingularError{alert.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (aq *AlertQuery) OnlyIDX(ctx context.Context) int { + id, err := aq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Alerts. +func (aq *AlertQuery) All(ctx context.Context) ([]*Alert, error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + return aq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (aq *AlertQuery) AllX(ctx context.Context) []*Alert { + nodes, err := aq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Alert ids. +func (aq *AlertQuery) IDs(ctx context.Context) ([]int, error) { + var ids []int + if err := aq.Select(alert.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (aq *AlertQuery) IDsX(ctx context.Context) []int { + ids, err := aq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (aq *AlertQuery) Count(ctx context.Context) (int, error) { + if err := aq.prepareQuery(ctx); err != nil { + return 0, err + } + return aq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (aq *AlertQuery) CountX(ctx context.Context) int { + count, err := aq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (aq *AlertQuery) Exist(ctx context.Context) (bool, error) { + if err := aq.prepareQuery(ctx); err != nil { + return false, err + } + return aq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (aq *AlertQuery) ExistX(ctx context.Context) bool { + exist, err := aq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the query builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (aq *AlertQuery) Clone() *AlertQuery { + return &AlertQuery{ + config: aq.config, + limit: aq.limit, + offset: aq.offset, + order: append([]OrderFunc{}, aq.order...), + unique: append([]string{}, aq.unique...), + predicates: append([]predicate.Alert{}, aq.predicates...), + // clone intermediate query. + sql: aq.sql.Clone(), + path: aq.path, + } +} + +// WithOwner tells the query-builder to eager-loads the nodes that are connected to +// the "owner" edge. The optional arguments used to configure the query builder of the edge. +func (aq *AlertQuery) WithOwner(opts ...func(*MachineQuery)) *AlertQuery { + query := &MachineQuery{config: aq.config} + for _, opt := range opts { + opt(query) + } + aq.withOwner = query + return aq +} + +// WithDecisions tells the query-builder to eager-loads the nodes that are connected to +// the "decisions" edge. The optional arguments used to configure the query builder of the edge. +func (aq *AlertQuery) WithDecisions(opts ...func(*DecisionQuery)) *AlertQuery { + query := &DecisionQuery{config: aq.config} + for _, opt := range opts { + opt(query) + } + aq.withDecisions = query + return aq +} + +// WithEvents tells the query-builder to eager-loads the nodes that are connected to +// the "events" edge. The optional arguments used to configure the query builder of the edge. +func (aq *AlertQuery) WithEvents(opts ...func(*EventQuery)) *AlertQuery { + query := &EventQuery{config: aq.config} + for _, opt := range opts { + opt(query) + } + aq.withEvents = query + return aq +} + +// WithMetas tells the query-builder to eager-loads the nodes that are connected to +// the "metas" edge. The optional arguments used to configure the query builder of the edge. +func (aq *AlertQuery) WithMetas(opts ...func(*MetaQuery)) *AlertQuery { + query := &MetaQuery{config: aq.config} + for _, opt := range opts { + opt(query) + } + aq.withMetas = query + return aq +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Alert.Query(). +// GroupBy(alert.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +// +func (aq *AlertQuery) GroupBy(field string, fields ...string) *AlertGroupBy { + group := &AlertGroupBy{config: aq.config} + group.fields = append([]string{field}, fields...) + group.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + return aq.sqlQuery(), nil + } + return group +} + +// Select one or more fields from the given query. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Alert.Query(). +// Select(alert.FieldCreatedAt). +// Scan(ctx, &v) +// +func (aq *AlertQuery) Select(field string, fields ...string) *AlertSelect { + selector := &AlertSelect{config: aq.config} + selector.fields = append([]string{field}, fields...) + selector.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + return aq.sqlQuery(), nil + } + return selector +} + +func (aq *AlertQuery) prepareQuery(ctx context.Context) error { + if aq.path != nil { + prev, err := aq.path(ctx) + if err != nil { + return err + } + aq.sql = prev + } + return nil +} + +func (aq *AlertQuery) sqlAll(ctx context.Context) ([]*Alert, error) { + var ( + nodes = []*Alert{} + withFKs = aq.withFKs + _spec = aq.querySpec() + loadedTypes = [4]bool{ + aq.withOwner != nil, + aq.withDecisions != nil, + aq.withEvents != nil, + aq.withMetas != nil, + } + ) + if aq.withOwner != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, alert.ForeignKeys...) + } + _spec.ScanValues = func() []interface{} { + node := &Alert{config: aq.config} + nodes = append(nodes, node) + values := node.scanValues() + if withFKs { + values = append(values, node.fkValues()...) + } + return values + } + _spec.Assign = func(values ...interface{}) error { + if len(nodes) == 0 { + return fmt.Errorf("ent: Assign called without calling ScanValues") + } + node := nodes[len(nodes)-1] + node.Edges.loadedTypes = loadedTypes + return node.assignValues(values...) + } + if err := sqlgraph.QueryNodes(ctx, aq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + + if query := aq.withOwner; query != nil { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*Alert) + for i := range nodes { + if fk := nodes[i].machine_alerts; fk != nil { + ids = append(ids, *fk) + nodeids[*fk] = append(nodeids[*fk], nodes[i]) + } + } + query.Where(machine.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return nil, err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return nil, fmt.Errorf(`unexpected foreign-key "machine_alerts" returned %v`, n.ID) + } + for i := range nodes { + nodes[i].Edges.Owner = n + } + } + } + + if query := aq.withDecisions; query != nil { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Alert) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.Decision(func(s *sql.Selector) { + s.Where(sql.InValues(alert.DecisionsColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return nil, err + } + for _, n := range neighbors { + fk := n.alert_decisions + if fk == nil { + return nil, fmt.Errorf(`foreign-key "alert_decisions" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return nil, fmt.Errorf(`unexpected foreign-key "alert_decisions" returned %v for node %v`, *fk, n.ID) + } + node.Edges.Decisions = append(node.Edges.Decisions, n) + } + } + + if query := aq.withEvents; query != nil { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Alert) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.Event(func(s *sql.Selector) { + s.Where(sql.InValues(alert.EventsColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return nil, err + } + for _, n := range neighbors { + fk := n.alert_events + if fk == nil { + return nil, fmt.Errorf(`foreign-key "alert_events" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return nil, fmt.Errorf(`unexpected foreign-key "alert_events" returned %v for node %v`, *fk, n.ID) + } + node.Edges.Events = append(node.Edges.Events, n) + } + } + + if query := aq.withMetas; query != nil { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Alert) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.Meta(func(s *sql.Selector) { + s.Where(sql.InValues(alert.MetasColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return nil, err + } + for _, n := range neighbors { + fk := n.alert_metas + if fk == nil { + return nil, fmt.Errorf(`foreign-key "alert_metas" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return nil, fmt.Errorf(`unexpected foreign-key "alert_metas" returned %v for node %v`, *fk, n.ID) + } + node.Edges.Metas = append(node.Edges.Metas, n) + } + } + + return nodes, nil +} + +func (aq *AlertQuery) sqlCount(ctx context.Context) (int, error) { + _spec := aq.querySpec() + return sqlgraph.CountNodes(ctx, aq.driver, _spec) +} + +func (aq *AlertQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := aq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: alert.Table, + Columns: alert.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + From: aq.sql, + Unique: true, + } + if ps := aq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := aq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := aq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := aq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector, alert.ValidColumn) + } + } + } + return _spec +} + +func (aq *AlertQuery) sqlQuery() *sql.Selector { + builder := sql.Dialect(aq.driver.Dialect()) + t1 := builder.Table(alert.Table) + selector := builder.Select(t1.Columns(alert.Columns...)...).From(t1) + if aq.sql != nil { + selector = aq.sql + selector.Select(selector.Columns(alert.Columns...)...) + } + for _, p := range aq.predicates { + p(selector) + } + for _, p := range aq.order { + p(selector, alert.ValidColumn) + } + if offset := aq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := aq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// AlertGroupBy is the builder for group-by Alert entities. +type AlertGroupBy struct { + config + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (agb *AlertGroupBy) Aggregate(fns ...AggregateFunc) *AlertGroupBy { + agb.fns = append(agb.fns, fns...) + return agb +} + +// Scan applies the group-by query and scan the result into the given value. +func (agb *AlertGroupBy) Scan(ctx context.Context, v interface{}) error { + query, err := agb.path(ctx) + if err != nil { + return err + } + agb.sql = query + return agb.sqlScan(ctx, v) +} + +// ScanX is like Scan, but panics if an error occurs. +func (agb *AlertGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := agb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (agb *AlertGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(agb.fields) > 1 { + return nil, errors.New("ent: AlertGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := agb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (agb *AlertGroupBy) StringsX(ctx context.Context) []string { + v, err := agb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from group-by. It is only allowed when querying group-by with one field. +func (agb *AlertGroupBy) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = agb.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{alert.Label} + default: + err = fmt.Errorf("ent: AlertGroupBy.Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (agb *AlertGroupBy) StringX(ctx context.Context) string { + v, err := agb.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (agb *AlertGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(agb.fields) > 1 { + return nil, errors.New("ent: AlertGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := agb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (agb *AlertGroupBy) IntsX(ctx context.Context) []int { + v, err := agb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from group-by. It is only allowed when querying group-by with one field. +func (agb *AlertGroupBy) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = agb.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{alert.Label} + default: + err = fmt.Errorf("ent: AlertGroupBy.Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (agb *AlertGroupBy) IntX(ctx context.Context) int { + v, err := agb.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (agb *AlertGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(agb.fields) > 1 { + return nil, errors.New("ent: AlertGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := agb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (agb *AlertGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := agb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from group-by. It is only allowed when querying group-by with one field. +func (agb *AlertGroupBy) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = agb.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{alert.Label} + default: + err = fmt.Errorf("ent: AlertGroupBy.Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (agb *AlertGroupBy) Float64X(ctx context.Context) float64 { + v, err := agb.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (agb *AlertGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(agb.fields) > 1 { + return nil, errors.New("ent: AlertGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := agb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (agb *AlertGroupBy) BoolsX(ctx context.Context) []bool { + v, err := agb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from group-by. It is only allowed when querying group-by with one field. +func (agb *AlertGroupBy) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = agb.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{alert.Label} + default: + err = fmt.Errorf("ent: AlertGroupBy.Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (agb *AlertGroupBy) BoolX(ctx context.Context) bool { + v, err := agb.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +func (agb *AlertGroupBy) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range agb.fields { + if !alert.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := agb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := agb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (agb *AlertGroupBy) sqlQuery() *sql.Selector { + selector := agb.sql + columns := make([]string, 0, len(agb.fields)+len(agb.fns)) + columns = append(columns, agb.fields...) + for _, fn := range agb.fns { + columns = append(columns, fn(selector, alert.ValidColumn)) + } + return selector.Select(columns...).GroupBy(agb.fields...) +} + +// AlertSelect is the builder for select fields of Alert entities. +type AlertSelect struct { + config + fields []string + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Scan applies the selector query and scan the result into the given value. +func (as *AlertSelect) Scan(ctx context.Context, v interface{}) error { + query, err := as.path(ctx) + if err != nil { + return err + } + as.sql = query + return as.sqlScan(ctx, v) +} + +// ScanX is like Scan, but panics if an error occurs. +func (as *AlertSelect) ScanX(ctx context.Context, v interface{}) { + if err := as.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from selector. It is only allowed when selecting one field. +func (as *AlertSelect) Strings(ctx context.Context) ([]string, error) { + if len(as.fields) > 1 { + return nil, errors.New("ent: AlertSelect.Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := as.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (as *AlertSelect) StringsX(ctx context.Context) []string { + v, err := as.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from selector. It is only allowed when selecting one field. +func (as *AlertSelect) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = as.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{alert.Label} + default: + err = fmt.Errorf("ent: AlertSelect.Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (as *AlertSelect) StringX(ctx context.Context) string { + v, err := as.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from selector. It is only allowed when selecting one field. +func (as *AlertSelect) Ints(ctx context.Context) ([]int, error) { + if len(as.fields) > 1 { + return nil, errors.New("ent: AlertSelect.Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := as.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (as *AlertSelect) IntsX(ctx context.Context) []int { + v, err := as.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from selector. It is only allowed when selecting one field. +func (as *AlertSelect) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = as.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{alert.Label} + default: + err = fmt.Errorf("ent: AlertSelect.Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (as *AlertSelect) IntX(ctx context.Context) int { + v, err := as.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from selector. It is only allowed when selecting one field. +func (as *AlertSelect) Float64s(ctx context.Context) ([]float64, error) { + if len(as.fields) > 1 { + return nil, errors.New("ent: AlertSelect.Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := as.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (as *AlertSelect) Float64sX(ctx context.Context) []float64 { + v, err := as.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from selector. It is only allowed when selecting one field. +func (as *AlertSelect) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = as.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{alert.Label} + default: + err = fmt.Errorf("ent: AlertSelect.Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (as *AlertSelect) Float64X(ctx context.Context) float64 { + v, err := as.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from selector. It is only allowed when selecting one field. +func (as *AlertSelect) Bools(ctx context.Context) ([]bool, error) { + if len(as.fields) > 1 { + return nil, errors.New("ent: AlertSelect.Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := as.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (as *AlertSelect) BoolsX(ctx context.Context) []bool { + v, err := as.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from selector. It is only allowed when selecting one field. +func (as *AlertSelect) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = as.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{alert.Label} + default: + err = fmt.Errorf("ent: AlertSelect.Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (as *AlertSelect) BoolX(ctx context.Context) bool { + v, err := as.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +func (as *AlertSelect) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range as.fields { + if !alert.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for selection", f)} + } + } + rows := &sql.Rows{} + query, args := as.sqlQuery().Query() + if err := as.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (as *AlertSelect) sqlQuery() sql.Querier { + selector := as.sql + selector.Select(selector.Columns(as.fields...)...) + return selector +} diff --git a/pkg/database/ent/alert_update.go b/pkg/database/ent/alert_update.go new file mode 100644 index 000000000..48b4daef9 --- /dev/null +++ b/pkg/database/ent/alert_update.go @@ -0,0 +1,2323 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// AlertUpdate is the builder for updating Alert entities. +type AlertUpdate struct { + config + hooks []Hook + mutation *AlertMutation + predicates []predicate.Alert +} + +// Where adds a new predicate for the builder. +func (au *AlertUpdate) Where(ps ...predicate.Alert) *AlertUpdate { + au.predicates = append(au.predicates, ps...) + return au +} + +// SetCreatedAt sets the created_at field. +func (au *AlertUpdate) SetCreatedAt(t time.Time) *AlertUpdate { + au.mutation.SetCreatedAt(t) + return au +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (au *AlertUpdate) SetNillableCreatedAt(t *time.Time) *AlertUpdate { + if t != nil { + au.SetCreatedAt(*t) + } + return au +} + +// SetUpdatedAt sets the updated_at field. +func (au *AlertUpdate) SetUpdatedAt(t time.Time) *AlertUpdate { + au.mutation.SetUpdatedAt(t) + return au +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (au *AlertUpdate) SetNillableUpdatedAt(t *time.Time) *AlertUpdate { + if t != nil { + au.SetUpdatedAt(*t) + } + return au +} + +// SetScenario sets the scenario field. +func (au *AlertUpdate) SetScenario(s string) *AlertUpdate { + au.mutation.SetScenario(s) + return au +} + +// SetBucketId sets the bucketId field. +func (au *AlertUpdate) SetBucketId(s string) *AlertUpdate { + au.mutation.SetBucketId(s) + return au +} + +// SetNillableBucketId sets the bucketId field if the given value is not nil. +func (au *AlertUpdate) SetNillableBucketId(s *string) *AlertUpdate { + if s != nil { + au.SetBucketId(*s) + } + return au +} + +// ClearBucketId clears the value of bucketId. +func (au *AlertUpdate) ClearBucketId() *AlertUpdate { + au.mutation.ClearBucketId() + return au +} + +// SetMessage sets the message field. +func (au *AlertUpdate) SetMessage(s string) *AlertUpdate { + au.mutation.SetMessage(s) + return au +} + +// SetNillableMessage sets the message field if the given value is not nil. +func (au *AlertUpdate) SetNillableMessage(s *string) *AlertUpdate { + if s != nil { + au.SetMessage(*s) + } + return au +} + +// ClearMessage clears the value of message. +func (au *AlertUpdate) ClearMessage() *AlertUpdate { + au.mutation.ClearMessage() + return au +} + +// SetEventsCount sets the eventsCount field. +func (au *AlertUpdate) SetEventsCount(i int32) *AlertUpdate { + au.mutation.ResetEventsCount() + au.mutation.SetEventsCount(i) + return au +} + +// SetNillableEventsCount sets the eventsCount field if the given value is not nil. +func (au *AlertUpdate) SetNillableEventsCount(i *int32) *AlertUpdate { + if i != nil { + au.SetEventsCount(*i) + } + return au +} + +// AddEventsCount adds i to eventsCount. +func (au *AlertUpdate) AddEventsCount(i int32) *AlertUpdate { + au.mutation.AddEventsCount(i) + return au +} + +// ClearEventsCount clears the value of eventsCount. +func (au *AlertUpdate) ClearEventsCount() *AlertUpdate { + au.mutation.ClearEventsCount() + return au +} + +// SetStartedAt sets the startedAt field. +func (au *AlertUpdate) SetStartedAt(t time.Time) *AlertUpdate { + au.mutation.SetStartedAt(t) + return au +} + +// SetNillableStartedAt sets the startedAt field if the given value is not nil. +func (au *AlertUpdate) SetNillableStartedAt(t *time.Time) *AlertUpdate { + if t != nil { + au.SetStartedAt(*t) + } + return au +} + +// ClearStartedAt clears the value of startedAt. +func (au *AlertUpdate) ClearStartedAt() *AlertUpdate { + au.mutation.ClearStartedAt() + return au +} + +// SetStoppedAt sets the stoppedAt field. +func (au *AlertUpdate) SetStoppedAt(t time.Time) *AlertUpdate { + au.mutation.SetStoppedAt(t) + return au +} + +// SetNillableStoppedAt sets the stoppedAt field if the given value is not nil. +func (au *AlertUpdate) SetNillableStoppedAt(t *time.Time) *AlertUpdate { + if t != nil { + au.SetStoppedAt(*t) + } + return au +} + +// ClearStoppedAt clears the value of stoppedAt. +func (au *AlertUpdate) ClearStoppedAt() *AlertUpdate { + au.mutation.ClearStoppedAt() + return au +} + +// SetSourceIp sets the sourceIp field. +func (au *AlertUpdate) SetSourceIp(s string) *AlertUpdate { + au.mutation.SetSourceIp(s) + return au +} + +// SetNillableSourceIp sets the sourceIp field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceIp(s *string) *AlertUpdate { + if s != nil { + au.SetSourceIp(*s) + } + return au +} + +// ClearSourceIp clears the value of sourceIp. +func (au *AlertUpdate) ClearSourceIp() *AlertUpdate { + au.mutation.ClearSourceIp() + return au +} + +// SetSourceRange sets the sourceRange field. +func (au *AlertUpdate) SetSourceRange(s string) *AlertUpdate { + au.mutation.SetSourceRange(s) + return au +} + +// SetNillableSourceRange sets the sourceRange field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceRange(s *string) *AlertUpdate { + if s != nil { + au.SetSourceRange(*s) + } + return au +} + +// ClearSourceRange clears the value of sourceRange. +func (au *AlertUpdate) ClearSourceRange() *AlertUpdate { + au.mutation.ClearSourceRange() + return au +} + +// SetSourceAsNumber sets the sourceAsNumber field. +func (au *AlertUpdate) SetSourceAsNumber(s string) *AlertUpdate { + au.mutation.SetSourceAsNumber(s) + return au +} + +// SetNillableSourceAsNumber sets the sourceAsNumber field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceAsNumber(s *string) *AlertUpdate { + if s != nil { + au.SetSourceAsNumber(*s) + } + return au +} + +// ClearSourceAsNumber clears the value of sourceAsNumber. +func (au *AlertUpdate) ClearSourceAsNumber() *AlertUpdate { + au.mutation.ClearSourceAsNumber() + return au +} + +// SetSourceAsName sets the sourceAsName field. +func (au *AlertUpdate) SetSourceAsName(s string) *AlertUpdate { + au.mutation.SetSourceAsName(s) + return au +} + +// SetNillableSourceAsName sets the sourceAsName field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceAsName(s *string) *AlertUpdate { + if s != nil { + au.SetSourceAsName(*s) + } + return au +} + +// ClearSourceAsName clears the value of sourceAsName. +func (au *AlertUpdate) ClearSourceAsName() *AlertUpdate { + au.mutation.ClearSourceAsName() + return au +} + +// SetSourceCountry sets the sourceCountry field. +func (au *AlertUpdate) SetSourceCountry(s string) *AlertUpdate { + au.mutation.SetSourceCountry(s) + return au +} + +// SetNillableSourceCountry sets the sourceCountry field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceCountry(s *string) *AlertUpdate { + if s != nil { + au.SetSourceCountry(*s) + } + return au +} + +// ClearSourceCountry clears the value of sourceCountry. +func (au *AlertUpdate) ClearSourceCountry() *AlertUpdate { + au.mutation.ClearSourceCountry() + return au +} + +// SetSourceLatitude sets the sourceLatitude field. +func (au *AlertUpdate) SetSourceLatitude(f float32) *AlertUpdate { + au.mutation.ResetSourceLatitude() + au.mutation.SetSourceLatitude(f) + return au +} + +// SetNillableSourceLatitude sets the sourceLatitude field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceLatitude(f *float32) *AlertUpdate { + if f != nil { + au.SetSourceLatitude(*f) + } + return au +} + +// AddSourceLatitude adds f to sourceLatitude. +func (au *AlertUpdate) AddSourceLatitude(f float32) *AlertUpdate { + au.mutation.AddSourceLatitude(f) + return au +} + +// ClearSourceLatitude clears the value of sourceLatitude. +func (au *AlertUpdate) ClearSourceLatitude() *AlertUpdate { + au.mutation.ClearSourceLatitude() + return au +} + +// SetSourceLongitude sets the sourceLongitude field. +func (au *AlertUpdate) SetSourceLongitude(f float32) *AlertUpdate { + au.mutation.ResetSourceLongitude() + au.mutation.SetSourceLongitude(f) + return au +} + +// SetNillableSourceLongitude sets the sourceLongitude field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceLongitude(f *float32) *AlertUpdate { + if f != nil { + au.SetSourceLongitude(*f) + } + return au +} + +// AddSourceLongitude adds f to sourceLongitude. +func (au *AlertUpdate) AddSourceLongitude(f float32) *AlertUpdate { + au.mutation.AddSourceLongitude(f) + return au +} + +// ClearSourceLongitude clears the value of sourceLongitude. +func (au *AlertUpdate) ClearSourceLongitude() *AlertUpdate { + au.mutation.ClearSourceLongitude() + return au +} + +// SetSourceScope sets the sourceScope field. +func (au *AlertUpdate) SetSourceScope(s string) *AlertUpdate { + au.mutation.SetSourceScope(s) + return au +} + +// SetNillableSourceScope sets the sourceScope field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceScope(s *string) *AlertUpdate { + if s != nil { + au.SetSourceScope(*s) + } + return au +} + +// ClearSourceScope clears the value of sourceScope. +func (au *AlertUpdate) ClearSourceScope() *AlertUpdate { + au.mutation.ClearSourceScope() + return au +} + +// SetSourceValue sets the sourceValue field. +func (au *AlertUpdate) SetSourceValue(s string) *AlertUpdate { + au.mutation.SetSourceValue(s) + return au +} + +// SetNillableSourceValue sets the sourceValue field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceValue(s *string) *AlertUpdate { + if s != nil { + au.SetSourceValue(*s) + } + return au +} + +// ClearSourceValue clears the value of sourceValue. +func (au *AlertUpdate) ClearSourceValue() *AlertUpdate { + au.mutation.ClearSourceValue() + return au +} + +// SetCapacity sets the capacity field. +func (au *AlertUpdate) SetCapacity(i int32) *AlertUpdate { + au.mutation.ResetCapacity() + au.mutation.SetCapacity(i) + return au +} + +// SetNillableCapacity sets the capacity field if the given value is not nil. +func (au *AlertUpdate) SetNillableCapacity(i *int32) *AlertUpdate { + if i != nil { + au.SetCapacity(*i) + } + return au +} + +// AddCapacity adds i to capacity. +func (au *AlertUpdate) AddCapacity(i int32) *AlertUpdate { + au.mutation.AddCapacity(i) + return au +} + +// ClearCapacity clears the value of capacity. +func (au *AlertUpdate) ClearCapacity() *AlertUpdate { + au.mutation.ClearCapacity() + return au +} + +// SetLeakSpeed sets the leakSpeed field. +func (au *AlertUpdate) SetLeakSpeed(s string) *AlertUpdate { + au.mutation.SetLeakSpeed(s) + return au +} + +// SetNillableLeakSpeed sets the leakSpeed field if the given value is not nil. +func (au *AlertUpdate) SetNillableLeakSpeed(s *string) *AlertUpdate { + if s != nil { + au.SetLeakSpeed(*s) + } + return au +} + +// ClearLeakSpeed clears the value of leakSpeed. +func (au *AlertUpdate) ClearLeakSpeed() *AlertUpdate { + au.mutation.ClearLeakSpeed() + return au +} + +// SetScenarioVersion sets the scenarioVersion field. +func (au *AlertUpdate) SetScenarioVersion(s string) *AlertUpdate { + au.mutation.SetScenarioVersion(s) + return au +} + +// SetNillableScenarioVersion sets the scenarioVersion field if the given value is not nil. +func (au *AlertUpdate) SetNillableScenarioVersion(s *string) *AlertUpdate { + if s != nil { + au.SetScenarioVersion(*s) + } + return au +} + +// ClearScenarioVersion clears the value of scenarioVersion. +func (au *AlertUpdate) ClearScenarioVersion() *AlertUpdate { + au.mutation.ClearScenarioVersion() + return au +} + +// SetScenarioHash sets the scenarioHash field. +func (au *AlertUpdate) SetScenarioHash(s string) *AlertUpdate { + au.mutation.SetScenarioHash(s) + return au +} + +// SetNillableScenarioHash sets the scenarioHash field if the given value is not nil. +func (au *AlertUpdate) SetNillableScenarioHash(s *string) *AlertUpdate { + if s != nil { + au.SetScenarioHash(*s) + } + return au +} + +// ClearScenarioHash clears the value of scenarioHash. +func (au *AlertUpdate) ClearScenarioHash() *AlertUpdate { + au.mutation.ClearScenarioHash() + return au +} + +// SetSimulated sets the simulated field. +func (au *AlertUpdate) SetSimulated(b bool) *AlertUpdate { + au.mutation.SetSimulated(b) + return au +} + +// SetNillableSimulated sets the simulated field if the given value is not nil. +func (au *AlertUpdate) SetNillableSimulated(b *bool) *AlertUpdate { + if b != nil { + au.SetSimulated(*b) + } + return au +} + +// SetOwnerID sets the owner edge to Machine by id. +func (au *AlertUpdate) SetOwnerID(id int) *AlertUpdate { + au.mutation.SetOwnerID(id) + return au +} + +// SetNillableOwnerID sets the owner edge to Machine by id if the given value is not nil. +func (au *AlertUpdate) SetNillableOwnerID(id *int) *AlertUpdate { + if id != nil { + au = au.SetOwnerID(*id) + } + return au +} + +// SetOwner sets the owner edge to Machine. +func (au *AlertUpdate) SetOwner(m *Machine) *AlertUpdate { + return au.SetOwnerID(m.ID) +} + +// AddDecisionIDs adds the decisions edge to Decision by ids. +func (au *AlertUpdate) AddDecisionIDs(ids ...int) *AlertUpdate { + au.mutation.AddDecisionIDs(ids...) + return au +} + +// AddDecisions adds the decisions edges to Decision. +func (au *AlertUpdate) AddDecisions(d ...*Decision) *AlertUpdate { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return au.AddDecisionIDs(ids...) +} + +// AddEventIDs adds the events edge to Event by ids. +func (au *AlertUpdate) AddEventIDs(ids ...int) *AlertUpdate { + au.mutation.AddEventIDs(ids...) + return au +} + +// AddEvents adds the events edges to Event. +func (au *AlertUpdate) AddEvents(e ...*Event) *AlertUpdate { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return au.AddEventIDs(ids...) +} + +// AddMetaIDs adds the metas edge to Meta by ids. +func (au *AlertUpdate) AddMetaIDs(ids ...int) *AlertUpdate { + au.mutation.AddMetaIDs(ids...) + return au +} + +// AddMetas adds the metas edges to Meta. +func (au *AlertUpdate) AddMetas(m ...*Meta) *AlertUpdate { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return au.AddMetaIDs(ids...) +} + +// Mutation returns the AlertMutation object of the builder. +func (au *AlertUpdate) Mutation() *AlertMutation { + return au.mutation +} + +// ClearOwner clears the "owner" edge to type Machine. +func (au *AlertUpdate) ClearOwner() *AlertUpdate { + au.mutation.ClearOwner() + return au +} + +// ClearDecisions clears all "decisions" edges to type Decision. +func (au *AlertUpdate) ClearDecisions() *AlertUpdate { + au.mutation.ClearDecisions() + return au +} + +// RemoveDecisionIDs removes the decisions edge to Decision by ids. +func (au *AlertUpdate) RemoveDecisionIDs(ids ...int) *AlertUpdate { + au.mutation.RemoveDecisionIDs(ids...) + return au +} + +// RemoveDecisions removes decisions edges to Decision. +func (au *AlertUpdate) RemoveDecisions(d ...*Decision) *AlertUpdate { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return au.RemoveDecisionIDs(ids...) +} + +// ClearEvents clears all "events" edges to type Event. +func (au *AlertUpdate) ClearEvents() *AlertUpdate { + au.mutation.ClearEvents() + return au +} + +// RemoveEventIDs removes the events edge to Event by ids. +func (au *AlertUpdate) RemoveEventIDs(ids ...int) *AlertUpdate { + au.mutation.RemoveEventIDs(ids...) + return au +} + +// RemoveEvents removes events edges to Event. +func (au *AlertUpdate) RemoveEvents(e ...*Event) *AlertUpdate { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return au.RemoveEventIDs(ids...) +} + +// ClearMetas clears all "metas" edges to type Meta. +func (au *AlertUpdate) ClearMetas() *AlertUpdate { + au.mutation.ClearMetas() + return au +} + +// RemoveMetaIDs removes the metas edge to Meta by ids. +func (au *AlertUpdate) RemoveMetaIDs(ids ...int) *AlertUpdate { + au.mutation.RemoveMetaIDs(ids...) + return au +} + +// RemoveMetas removes metas edges to Meta. +func (au *AlertUpdate) RemoveMetas(m ...*Meta) *AlertUpdate { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return au.RemoveMetaIDs(ids...) +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (au *AlertUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(au.hooks) == 0 { + affected, err = au.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AlertMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + au.mutation = mutation + affected, err = au.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(au.hooks) - 1; i >= 0; i-- { + mut = au.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, au.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (au *AlertUpdate) SaveX(ctx context.Context) int { + affected, err := au.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (au *AlertUpdate) Exec(ctx context.Context) error { + _, err := au.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (au *AlertUpdate) ExecX(ctx context.Context) { + if err := au.Exec(ctx); err != nil { + panic(err) + } +} + +func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: alert.Table, + Columns: alert.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + if ps := au.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := au.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldCreatedAt, + }) + } + if value, ok := au.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldUpdatedAt, + }) + } + if value, ok := au.mutation.Scenario(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenario, + }) + } + if value, ok := au.mutation.BucketId(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldBucketId, + }) + } + if au.mutation.BucketIdCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldBucketId, + }) + } + if value, ok := au.mutation.Message(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldMessage, + }) + } + if au.mutation.MessageCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldMessage, + }) + } + if value, ok := au.mutation.EventsCount(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldEventsCount, + }) + } + if value, ok := au.mutation.AddedEventsCount(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldEventsCount, + }) + } + if au.mutation.EventsCountCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Column: alert.FieldEventsCount, + }) + } + if value, ok := au.mutation.StartedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldStartedAt, + }) + } + if au.mutation.StartedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: alert.FieldStartedAt, + }) + } + if value, ok := au.mutation.StoppedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldStoppedAt, + }) + } + if au.mutation.StoppedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: alert.FieldStoppedAt, + }) + } + if value, ok := au.mutation.SourceIp(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceIp, + }) + } + if au.mutation.SourceIpCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceIp, + }) + } + if value, ok := au.mutation.SourceRange(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceRange, + }) + } + if au.mutation.SourceRangeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceRange, + }) + } + if value, ok := au.mutation.SourceAsNumber(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceAsNumber, + }) + } + if au.mutation.SourceAsNumberCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceAsNumber, + }) + } + if value, ok := au.mutation.SourceAsName(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceAsName, + }) + } + if au.mutation.SourceAsNameCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceAsName, + }) + } + if value, ok := au.mutation.SourceCountry(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceCountry, + }) + } + if au.mutation.SourceCountryCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceCountry, + }) + } + if value, ok := au.mutation.SourceLatitude(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLatitude, + }) + } + if value, ok := au.mutation.AddedSourceLatitude(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLatitude, + }) + } + if au.mutation.SourceLatitudeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Column: alert.FieldSourceLatitude, + }) + } + if value, ok := au.mutation.SourceLongitude(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLongitude, + }) + } + if value, ok := au.mutation.AddedSourceLongitude(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLongitude, + }) + } + if au.mutation.SourceLongitudeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Column: alert.FieldSourceLongitude, + }) + } + if value, ok := au.mutation.SourceScope(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceScope, + }) + } + if au.mutation.SourceScopeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceScope, + }) + } + if value, ok := au.mutation.SourceValue(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceValue, + }) + } + if au.mutation.SourceValueCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceValue, + }) + } + if value, ok := au.mutation.Capacity(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldCapacity, + }) + } + if value, ok := au.mutation.AddedCapacity(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldCapacity, + }) + } + if au.mutation.CapacityCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Column: alert.FieldCapacity, + }) + } + if value, ok := au.mutation.LeakSpeed(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldLeakSpeed, + }) + } + if au.mutation.LeakSpeedCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldLeakSpeed, + }) + } + if value, ok := au.mutation.ScenarioVersion(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenarioVersion, + }) + } + if au.mutation.ScenarioVersionCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldScenarioVersion, + }) + } + if value, ok := au.mutation.ScenarioHash(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenarioHash, + }) + } + if au.mutation.ScenarioHashCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldScenarioHash, + }) + } + if value, ok := au.mutation.Simulated(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: alert.FieldSimulated, + }) + } + if au.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: alert.OwnerTable, + Columns: []string{alert.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: alert.OwnerTable, + Columns: []string{alert.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if au.mutation.DecisionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.RemovedDecisionsIDs(); len(nodes) > 0 && !au.mutation.DecisionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.DecisionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if au.mutation.EventsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.RemovedEventsIDs(); len(nodes) > 0 && !au.mutation.EventsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.EventsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if au.mutation.MetasCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.RemovedMetasIDs(); len(nodes) > 0 && !au.mutation.MetasCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.MetasIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, au.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{alert.Label} + } else if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return 0, err + } + return n, nil +} + +// AlertUpdateOne is the builder for updating a single Alert entity. +type AlertUpdateOne struct { + config + hooks []Hook + mutation *AlertMutation +} + +// SetCreatedAt sets the created_at field. +func (auo *AlertUpdateOne) SetCreatedAt(t time.Time) *AlertUpdateOne { + auo.mutation.SetCreatedAt(t) + return auo +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableCreatedAt(t *time.Time) *AlertUpdateOne { + if t != nil { + auo.SetCreatedAt(*t) + } + return auo +} + +// SetUpdatedAt sets the updated_at field. +func (auo *AlertUpdateOne) SetUpdatedAt(t time.Time) *AlertUpdateOne { + auo.mutation.SetUpdatedAt(t) + return auo +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableUpdatedAt(t *time.Time) *AlertUpdateOne { + if t != nil { + auo.SetUpdatedAt(*t) + } + return auo +} + +// SetScenario sets the scenario field. +func (auo *AlertUpdateOne) SetScenario(s string) *AlertUpdateOne { + auo.mutation.SetScenario(s) + return auo +} + +// SetBucketId sets the bucketId field. +func (auo *AlertUpdateOne) SetBucketId(s string) *AlertUpdateOne { + auo.mutation.SetBucketId(s) + return auo +} + +// SetNillableBucketId sets the bucketId field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableBucketId(s *string) *AlertUpdateOne { + if s != nil { + auo.SetBucketId(*s) + } + return auo +} + +// ClearBucketId clears the value of bucketId. +func (auo *AlertUpdateOne) ClearBucketId() *AlertUpdateOne { + auo.mutation.ClearBucketId() + return auo +} + +// SetMessage sets the message field. +func (auo *AlertUpdateOne) SetMessage(s string) *AlertUpdateOne { + auo.mutation.SetMessage(s) + return auo +} + +// SetNillableMessage sets the message field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableMessage(s *string) *AlertUpdateOne { + if s != nil { + auo.SetMessage(*s) + } + return auo +} + +// ClearMessage clears the value of message. +func (auo *AlertUpdateOne) ClearMessage() *AlertUpdateOne { + auo.mutation.ClearMessage() + return auo +} + +// SetEventsCount sets the eventsCount field. +func (auo *AlertUpdateOne) SetEventsCount(i int32) *AlertUpdateOne { + auo.mutation.ResetEventsCount() + auo.mutation.SetEventsCount(i) + return auo +} + +// SetNillableEventsCount sets the eventsCount field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableEventsCount(i *int32) *AlertUpdateOne { + if i != nil { + auo.SetEventsCount(*i) + } + return auo +} + +// AddEventsCount adds i to eventsCount. +func (auo *AlertUpdateOne) AddEventsCount(i int32) *AlertUpdateOne { + auo.mutation.AddEventsCount(i) + return auo +} + +// ClearEventsCount clears the value of eventsCount. +func (auo *AlertUpdateOne) ClearEventsCount() *AlertUpdateOne { + auo.mutation.ClearEventsCount() + return auo +} + +// SetStartedAt sets the startedAt field. +func (auo *AlertUpdateOne) SetStartedAt(t time.Time) *AlertUpdateOne { + auo.mutation.SetStartedAt(t) + return auo +} + +// SetNillableStartedAt sets the startedAt field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableStartedAt(t *time.Time) *AlertUpdateOne { + if t != nil { + auo.SetStartedAt(*t) + } + return auo +} + +// ClearStartedAt clears the value of startedAt. +func (auo *AlertUpdateOne) ClearStartedAt() *AlertUpdateOne { + auo.mutation.ClearStartedAt() + return auo +} + +// SetStoppedAt sets the stoppedAt field. +func (auo *AlertUpdateOne) SetStoppedAt(t time.Time) *AlertUpdateOne { + auo.mutation.SetStoppedAt(t) + return auo +} + +// SetNillableStoppedAt sets the stoppedAt field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableStoppedAt(t *time.Time) *AlertUpdateOne { + if t != nil { + auo.SetStoppedAt(*t) + } + return auo +} + +// ClearStoppedAt clears the value of stoppedAt. +func (auo *AlertUpdateOne) ClearStoppedAt() *AlertUpdateOne { + auo.mutation.ClearStoppedAt() + return auo +} + +// SetSourceIp sets the sourceIp field. +func (auo *AlertUpdateOne) SetSourceIp(s string) *AlertUpdateOne { + auo.mutation.SetSourceIp(s) + return auo +} + +// SetNillableSourceIp sets the sourceIp field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceIp(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceIp(*s) + } + return auo +} + +// ClearSourceIp clears the value of sourceIp. +func (auo *AlertUpdateOne) ClearSourceIp() *AlertUpdateOne { + auo.mutation.ClearSourceIp() + return auo +} + +// SetSourceRange sets the sourceRange field. +func (auo *AlertUpdateOne) SetSourceRange(s string) *AlertUpdateOne { + auo.mutation.SetSourceRange(s) + return auo +} + +// SetNillableSourceRange sets the sourceRange field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceRange(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceRange(*s) + } + return auo +} + +// ClearSourceRange clears the value of sourceRange. +func (auo *AlertUpdateOne) ClearSourceRange() *AlertUpdateOne { + auo.mutation.ClearSourceRange() + return auo +} + +// SetSourceAsNumber sets the sourceAsNumber field. +func (auo *AlertUpdateOne) SetSourceAsNumber(s string) *AlertUpdateOne { + auo.mutation.SetSourceAsNumber(s) + return auo +} + +// SetNillableSourceAsNumber sets the sourceAsNumber field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceAsNumber(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceAsNumber(*s) + } + return auo +} + +// ClearSourceAsNumber clears the value of sourceAsNumber. +func (auo *AlertUpdateOne) ClearSourceAsNumber() *AlertUpdateOne { + auo.mutation.ClearSourceAsNumber() + return auo +} + +// SetSourceAsName sets the sourceAsName field. +func (auo *AlertUpdateOne) SetSourceAsName(s string) *AlertUpdateOne { + auo.mutation.SetSourceAsName(s) + return auo +} + +// SetNillableSourceAsName sets the sourceAsName field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceAsName(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceAsName(*s) + } + return auo +} + +// ClearSourceAsName clears the value of sourceAsName. +func (auo *AlertUpdateOne) ClearSourceAsName() *AlertUpdateOne { + auo.mutation.ClearSourceAsName() + return auo +} + +// SetSourceCountry sets the sourceCountry field. +func (auo *AlertUpdateOne) SetSourceCountry(s string) *AlertUpdateOne { + auo.mutation.SetSourceCountry(s) + return auo +} + +// SetNillableSourceCountry sets the sourceCountry field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceCountry(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceCountry(*s) + } + return auo +} + +// ClearSourceCountry clears the value of sourceCountry. +func (auo *AlertUpdateOne) ClearSourceCountry() *AlertUpdateOne { + auo.mutation.ClearSourceCountry() + return auo +} + +// SetSourceLatitude sets the sourceLatitude field. +func (auo *AlertUpdateOne) SetSourceLatitude(f float32) *AlertUpdateOne { + auo.mutation.ResetSourceLatitude() + auo.mutation.SetSourceLatitude(f) + return auo +} + +// SetNillableSourceLatitude sets the sourceLatitude field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceLatitude(f *float32) *AlertUpdateOne { + if f != nil { + auo.SetSourceLatitude(*f) + } + return auo +} + +// AddSourceLatitude adds f to sourceLatitude. +func (auo *AlertUpdateOne) AddSourceLatitude(f float32) *AlertUpdateOne { + auo.mutation.AddSourceLatitude(f) + return auo +} + +// ClearSourceLatitude clears the value of sourceLatitude. +func (auo *AlertUpdateOne) ClearSourceLatitude() *AlertUpdateOne { + auo.mutation.ClearSourceLatitude() + return auo +} + +// SetSourceLongitude sets the sourceLongitude field. +func (auo *AlertUpdateOne) SetSourceLongitude(f float32) *AlertUpdateOne { + auo.mutation.ResetSourceLongitude() + auo.mutation.SetSourceLongitude(f) + return auo +} + +// SetNillableSourceLongitude sets the sourceLongitude field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceLongitude(f *float32) *AlertUpdateOne { + if f != nil { + auo.SetSourceLongitude(*f) + } + return auo +} + +// AddSourceLongitude adds f to sourceLongitude. +func (auo *AlertUpdateOne) AddSourceLongitude(f float32) *AlertUpdateOne { + auo.mutation.AddSourceLongitude(f) + return auo +} + +// ClearSourceLongitude clears the value of sourceLongitude. +func (auo *AlertUpdateOne) ClearSourceLongitude() *AlertUpdateOne { + auo.mutation.ClearSourceLongitude() + return auo +} + +// SetSourceScope sets the sourceScope field. +func (auo *AlertUpdateOne) SetSourceScope(s string) *AlertUpdateOne { + auo.mutation.SetSourceScope(s) + return auo +} + +// SetNillableSourceScope sets the sourceScope field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceScope(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceScope(*s) + } + return auo +} + +// ClearSourceScope clears the value of sourceScope. +func (auo *AlertUpdateOne) ClearSourceScope() *AlertUpdateOne { + auo.mutation.ClearSourceScope() + return auo +} + +// SetSourceValue sets the sourceValue field. +func (auo *AlertUpdateOne) SetSourceValue(s string) *AlertUpdateOne { + auo.mutation.SetSourceValue(s) + return auo +} + +// SetNillableSourceValue sets the sourceValue field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceValue(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceValue(*s) + } + return auo +} + +// ClearSourceValue clears the value of sourceValue. +func (auo *AlertUpdateOne) ClearSourceValue() *AlertUpdateOne { + auo.mutation.ClearSourceValue() + return auo +} + +// SetCapacity sets the capacity field. +func (auo *AlertUpdateOne) SetCapacity(i int32) *AlertUpdateOne { + auo.mutation.ResetCapacity() + auo.mutation.SetCapacity(i) + return auo +} + +// SetNillableCapacity sets the capacity field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableCapacity(i *int32) *AlertUpdateOne { + if i != nil { + auo.SetCapacity(*i) + } + return auo +} + +// AddCapacity adds i to capacity. +func (auo *AlertUpdateOne) AddCapacity(i int32) *AlertUpdateOne { + auo.mutation.AddCapacity(i) + return auo +} + +// ClearCapacity clears the value of capacity. +func (auo *AlertUpdateOne) ClearCapacity() *AlertUpdateOne { + auo.mutation.ClearCapacity() + return auo +} + +// SetLeakSpeed sets the leakSpeed field. +func (auo *AlertUpdateOne) SetLeakSpeed(s string) *AlertUpdateOne { + auo.mutation.SetLeakSpeed(s) + return auo +} + +// SetNillableLeakSpeed sets the leakSpeed field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableLeakSpeed(s *string) *AlertUpdateOne { + if s != nil { + auo.SetLeakSpeed(*s) + } + return auo +} + +// ClearLeakSpeed clears the value of leakSpeed. +func (auo *AlertUpdateOne) ClearLeakSpeed() *AlertUpdateOne { + auo.mutation.ClearLeakSpeed() + return auo +} + +// SetScenarioVersion sets the scenarioVersion field. +func (auo *AlertUpdateOne) SetScenarioVersion(s string) *AlertUpdateOne { + auo.mutation.SetScenarioVersion(s) + return auo +} + +// SetNillableScenarioVersion sets the scenarioVersion field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableScenarioVersion(s *string) *AlertUpdateOne { + if s != nil { + auo.SetScenarioVersion(*s) + } + return auo +} + +// ClearScenarioVersion clears the value of scenarioVersion. +func (auo *AlertUpdateOne) ClearScenarioVersion() *AlertUpdateOne { + auo.mutation.ClearScenarioVersion() + return auo +} + +// SetScenarioHash sets the scenarioHash field. +func (auo *AlertUpdateOne) SetScenarioHash(s string) *AlertUpdateOne { + auo.mutation.SetScenarioHash(s) + return auo +} + +// SetNillableScenarioHash sets the scenarioHash field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableScenarioHash(s *string) *AlertUpdateOne { + if s != nil { + auo.SetScenarioHash(*s) + } + return auo +} + +// ClearScenarioHash clears the value of scenarioHash. +func (auo *AlertUpdateOne) ClearScenarioHash() *AlertUpdateOne { + auo.mutation.ClearScenarioHash() + return auo +} + +// SetSimulated sets the simulated field. +func (auo *AlertUpdateOne) SetSimulated(b bool) *AlertUpdateOne { + auo.mutation.SetSimulated(b) + return auo +} + +// SetNillableSimulated sets the simulated field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSimulated(b *bool) *AlertUpdateOne { + if b != nil { + auo.SetSimulated(*b) + } + return auo +} + +// SetOwnerID sets the owner edge to Machine by id. +func (auo *AlertUpdateOne) SetOwnerID(id int) *AlertUpdateOne { + auo.mutation.SetOwnerID(id) + return auo +} + +// SetNillableOwnerID sets the owner edge to Machine by id if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableOwnerID(id *int) *AlertUpdateOne { + if id != nil { + auo = auo.SetOwnerID(*id) + } + return auo +} + +// SetOwner sets the owner edge to Machine. +func (auo *AlertUpdateOne) SetOwner(m *Machine) *AlertUpdateOne { + return auo.SetOwnerID(m.ID) +} + +// AddDecisionIDs adds the decisions edge to Decision by ids. +func (auo *AlertUpdateOne) AddDecisionIDs(ids ...int) *AlertUpdateOne { + auo.mutation.AddDecisionIDs(ids...) + return auo +} + +// AddDecisions adds the decisions edges to Decision. +func (auo *AlertUpdateOne) AddDecisions(d ...*Decision) *AlertUpdateOne { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return auo.AddDecisionIDs(ids...) +} + +// AddEventIDs adds the events edge to Event by ids. +func (auo *AlertUpdateOne) AddEventIDs(ids ...int) *AlertUpdateOne { + auo.mutation.AddEventIDs(ids...) + return auo +} + +// AddEvents adds the events edges to Event. +func (auo *AlertUpdateOne) AddEvents(e ...*Event) *AlertUpdateOne { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return auo.AddEventIDs(ids...) +} + +// AddMetaIDs adds the metas edge to Meta by ids. +func (auo *AlertUpdateOne) AddMetaIDs(ids ...int) *AlertUpdateOne { + auo.mutation.AddMetaIDs(ids...) + return auo +} + +// AddMetas adds the metas edges to Meta. +func (auo *AlertUpdateOne) AddMetas(m ...*Meta) *AlertUpdateOne { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return auo.AddMetaIDs(ids...) +} + +// Mutation returns the AlertMutation object of the builder. +func (auo *AlertUpdateOne) Mutation() *AlertMutation { + return auo.mutation +} + +// ClearOwner clears the "owner" edge to type Machine. +func (auo *AlertUpdateOne) ClearOwner() *AlertUpdateOne { + auo.mutation.ClearOwner() + return auo +} + +// ClearDecisions clears all "decisions" edges to type Decision. +func (auo *AlertUpdateOne) ClearDecisions() *AlertUpdateOne { + auo.mutation.ClearDecisions() + return auo +} + +// RemoveDecisionIDs removes the decisions edge to Decision by ids. +func (auo *AlertUpdateOne) RemoveDecisionIDs(ids ...int) *AlertUpdateOne { + auo.mutation.RemoveDecisionIDs(ids...) + return auo +} + +// RemoveDecisions removes decisions edges to Decision. +func (auo *AlertUpdateOne) RemoveDecisions(d ...*Decision) *AlertUpdateOne { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return auo.RemoveDecisionIDs(ids...) +} + +// ClearEvents clears all "events" edges to type Event. +func (auo *AlertUpdateOne) ClearEvents() *AlertUpdateOne { + auo.mutation.ClearEvents() + return auo +} + +// RemoveEventIDs removes the events edge to Event by ids. +func (auo *AlertUpdateOne) RemoveEventIDs(ids ...int) *AlertUpdateOne { + auo.mutation.RemoveEventIDs(ids...) + return auo +} + +// RemoveEvents removes events edges to Event. +func (auo *AlertUpdateOne) RemoveEvents(e ...*Event) *AlertUpdateOne { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return auo.RemoveEventIDs(ids...) +} + +// ClearMetas clears all "metas" edges to type Meta. +func (auo *AlertUpdateOne) ClearMetas() *AlertUpdateOne { + auo.mutation.ClearMetas() + return auo +} + +// RemoveMetaIDs removes the metas edge to Meta by ids. +func (auo *AlertUpdateOne) RemoveMetaIDs(ids ...int) *AlertUpdateOne { + auo.mutation.RemoveMetaIDs(ids...) + return auo +} + +// RemoveMetas removes metas edges to Meta. +func (auo *AlertUpdateOne) RemoveMetas(m ...*Meta) *AlertUpdateOne { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return auo.RemoveMetaIDs(ids...) +} + +// Save executes the query and returns the updated entity. +func (auo *AlertUpdateOne) Save(ctx context.Context) (*Alert, error) { + var ( + err error + node *Alert + ) + if len(auo.hooks) == 0 { + node, err = auo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AlertMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + auo.mutation = mutation + node, err = auo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(auo.hooks) - 1; i >= 0; i-- { + mut = auo.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, auo.mutation); err != nil { + return nil, err + } + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (auo *AlertUpdateOne) SaveX(ctx context.Context) *Alert { + node, err := auo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (auo *AlertUpdateOne) Exec(ctx context.Context) error { + _, err := auo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (auo *AlertUpdateOne) ExecX(ctx context.Context) { + if err := auo.Exec(ctx); err != nil { + panic(err) + } +} + +func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: alert.Table, + Columns: alert.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + id, ok := auo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Alert.ID for update")} + } + _spec.Node.ID.Value = id + if value, ok := auo.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldCreatedAt, + }) + } + if value, ok := auo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldUpdatedAt, + }) + } + if value, ok := auo.mutation.Scenario(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenario, + }) + } + if value, ok := auo.mutation.BucketId(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldBucketId, + }) + } + if auo.mutation.BucketIdCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldBucketId, + }) + } + if value, ok := auo.mutation.Message(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldMessage, + }) + } + if auo.mutation.MessageCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldMessage, + }) + } + if value, ok := auo.mutation.EventsCount(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldEventsCount, + }) + } + if value, ok := auo.mutation.AddedEventsCount(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldEventsCount, + }) + } + if auo.mutation.EventsCountCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Column: alert.FieldEventsCount, + }) + } + if value, ok := auo.mutation.StartedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldStartedAt, + }) + } + if auo.mutation.StartedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: alert.FieldStartedAt, + }) + } + if value, ok := auo.mutation.StoppedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldStoppedAt, + }) + } + if auo.mutation.StoppedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: alert.FieldStoppedAt, + }) + } + if value, ok := auo.mutation.SourceIp(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceIp, + }) + } + if auo.mutation.SourceIpCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceIp, + }) + } + if value, ok := auo.mutation.SourceRange(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceRange, + }) + } + if auo.mutation.SourceRangeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceRange, + }) + } + if value, ok := auo.mutation.SourceAsNumber(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceAsNumber, + }) + } + if auo.mutation.SourceAsNumberCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceAsNumber, + }) + } + if value, ok := auo.mutation.SourceAsName(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceAsName, + }) + } + if auo.mutation.SourceAsNameCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceAsName, + }) + } + if value, ok := auo.mutation.SourceCountry(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceCountry, + }) + } + if auo.mutation.SourceCountryCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceCountry, + }) + } + if value, ok := auo.mutation.SourceLatitude(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLatitude, + }) + } + if value, ok := auo.mutation.AddedSourceLatitude(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLatitude, + }) + } + if auo.mutation.SourceLatitudeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Column: alert.FieldSourceLatitude, + }) + } + if value, ok := auo.mutation.SourceLongitude(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLongitude, + }) + } + if value, ok := auo.mutation.AddedSourceLongitude(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLongitude, + }) + } + if auo.mutation.SourceLongitudeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Column: alert.FieldSourceLongitude, + }) + } + if value, ok := auo.mutation.SourceScope(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceScope, + }) + } + if auo.mutation.SourceScopeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceScope, + }) + } + if value, ok := auo.mutation.SourceValue(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceValue, + }) + } + if auo.mutation.SourceValueCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceValue, + }) + } + if value, ok := auo.mutation.Capacity(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldCapacity, + }) + } + if value, ok := auo.mutation.AddedCapacity(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldCapacity, + }) + } + if auo.mutation.CapacityCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Column: alert.FieldCapacity, + }) + } + if value, ok := auo.mutation.LeakSpeed(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldLeakSpeed, + }) + } + if auo.mutation.LeakSpeedCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldLeakSpeed, + }) + } + if value, ok := auo.mutation.ScenarioVersion(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenarioVersion, + }) + } + if auo.mutation.ScenarioVersionCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldScenarioVersion, + }) + } + if value, ok := auo.mutation.ScenarioHash(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenarioHash, + }) + } + if auo.mutation.ScenarioHashCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldScenarioHash, + }) + } + if value, ok := auo.mutation.Simulated(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: alert.FieldSimulated, + }) + } + if auo.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: alert.OwnerTable, + Columns: []string{alert.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: alert.OwnerTable, + Columns: []string{alert.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if auo.mutation.DecisionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.RemovedDecisionsIDs(); len(nodes) > 0 && !auo.mutation.DecisionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.DecisionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if auo.mutation.EventsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.RemovedEventsIDs(); len(nodes) > 0 && !auo.mutation.EventsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.EventsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if auo.mutation.MetasCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.RemovedMetasIDs(); len(nodes) > 0 && !auo.mutation.MetasCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.MetasIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Alert{config: auo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues() + if err = sqlgraph.UpdateNode(ctx, auo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{alert.Label} + } else if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return nil, err + } + return _node, nil +} diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go new file mode 100644 index 000000000..e1041f40c --- /dev/null +++ b/pkg/database/ent/bouncer.go @@ -0,0 +1,177 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/facebook/ent/dialect/sql" +) + +// Bouncer is the model entity for the Bouncer schema. +type Bouncer struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // APIKey holds the value of the "api_key" field. + APIKey string `json:"api_key,omitempty"` + // Revoked holds the value of the "revoked" field. + Revoked bool `json:"revoked,omitempty"` + // IPAddress holds the value of the "ip_address" field. + IPAddress string `json:"ip_address,omitempty"` + // Type holds the value of the "type" field. + Type string `json:"type,omitempty"` + // Version holds the value of the "version" field. + Version string `json:"version,omitempty"` + // Until holds the value of the "until" field. + Until time.Time `json:"until,omitempty"` + // LastPull holds the value of the "last_pull" field. + LastPull time.Time `json:"last_pull,omitempty"` +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Bouncer) scanValues() []interface{} { + return []interface{}{ + &sql.NullInt64{}, // id + &sql.NullTime{}, // created_at + &sql.NullTime{}, // updated_at + &sql.NullString{}, // name + &sql.NullString{}, // api_key + &sql.NullBool{}, // revoked + &sql.NullString{}, // ip_address + &sql.NullString{}, // type + &sql.NullString{}, // version + &sql.NullTime{}, // until + &sql.NullTime{}, // last_pull + } +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Bouncer fields. +func (b *Bouncer) assignValues(values ...interface{}) error { + if m, n := len(values), len(bouncer.Columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + value, ok := values[0].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + b.ID = int(value.Int64) + values = values[1:] + if value, ok := values[0].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[0]) + } else if value.Valid { + b.CreatedAt = value.Time + } + if value, ok := values[1].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[1]) + } else if value.Valid { + b.UpdatedAt = value.Time + } + if value, ok := values[2].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[2]) + } else if value.Valid { + b.Name = value.String + } + if value, ok := values[3].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field api_key", values[3]) + } else if value.Valid { + b.APIKey = value.String + } + if value, ok := values[4].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field revoked", values[4]) + } else if value.Valid { + b.Revoked = value.Bool + } + if value, ok := values[5].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field ip_address", values[5]) + } else if value.Valid { + b.IPAddress = value.String + } + if value, ok := values[6].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field type", values[6]) + } else if value.Valid { + b.Type = value.String + } + if value, ok := values[7].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field version", values[7]) + } else if value.Valid { + b.Version = value.String + } + if value, ok := values[8].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field until", values[8]) + } else if value.Valid { + b.Until = value.Time + } + if value, ok := values[9].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field last_pull", values[9]) + } else if value.Valid { + b.LastPull = value.Time + } + return nil +} + +// Update returns a builder for updating this Bouncer. +// Note that, you need to call Bouncer.Unwrap() before calling this method, if this Bouncer +// was returned from a transaction, and the transaction was committed or rolled back. +func (b *Bouncer) Update() *BouncerUpdateOne { + return (&BouncerClient{config: b.config}).UpdateOne(b) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (b *Bouncer) Unwrap() *Bouncer { + tx, ok := b.config.driver.(*txDriver) + if !ok { + panic("ent: Bouncer is not a transactional entity") + } + b.config.driver = tx.drv + return b +} + +// String implements the fmt.Stringer. +func (b *Bouncer) String() string { + var builder strings.Builder + builder.WriteString("Bouncer(") + builder.WriteString(fmt.Sprintf("id=%v", b.ID)) + builder.WriteString(", created_at=") + builder.WriteString(b.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", updated_at=") + builder.WriteString(b.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", name=") + builder.WriteString(b.Name) + builder.WriteString(", api_key=") + builder.WriteString(b.APIKey) + builder.WriteString(", revoked=") + builder.WriteString(fmt.Sprintf("%v", b.Revoked)) + builder.WriteString(", ip_address=") + builder.WriteString(b.IPAddress) + builder.WriteString(", type=") + builder.WriteString(b.Type) + builder.WriteString(", version=") + builder.WriteString(b.Version) + builder.WriteString(", until=") + builder.WriteString(b.Until.Format(time.ANSIC)) + builder.WriteString(", last_pull=") + builder.WriteString(b.LastPull.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Bouncers is a parsable slice of Bouncer. +type Bouncers []*Bouncer + +func (b Bouncers) config(cfg config) { + for _i := range b { + b[_i].config = cfg + } +} diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go new file mode 100644 index 000000000..6ed228a27 --- /dev/null +++ b/pkg/database/ent/bouncer/bouncer.go @@ -0,0 +1,75 @@ +// Code generated by entc, DO NOT EDIT. + +package bouncer + +import ( + "time" +) + +const ( + // Label holds the string label denoting the bouncer type in the database. + Label = "bouncer" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldAPIKey holds the string denoting the api_key field in the database. + FieldAPIKey = "api_key" + // FieldRevoked holds the string denoting the revoked field in the database. + FieldRevoked = "revoked" + // FieldIPAddress holds the string denoting the ip_address field in the database. + FieldIPAddress = "ip_address" + // FieldType holds the string denoting the type field in the database. + FieldType = "type" + // FieldVersion holds the string denoting the version field in the database. + FieldVersion = "version" + // FieldUntil holds the string denoting the until field in the database. + FieldUntil = "until" + // FieldLastPull holds the string denoting the last_pull field in the database. + FieldLastPull = "last_pull" + + // Table holds the table name of the bouncer in the database. + Table = "bouncers" +) + +// Columns holds all SQL columns for bouncer fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldName, + FieldAPIKey, + FieldRevoked, + FieldIPAddress, + FieldType, + FieldVersion, + FieldUntil, + FieldLastPull, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the created_at field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the updated_at field. + DefaultUpdatedAt func() time.Time + // DefaultIPAddress holds the default value on creation for the ip_address field. + DefaultIPAddress string + // DefaultUntil holds the default value on creation for the until field. + DefaultUntil func() time.Time + // DefaultLastPull holds the default value on creation for the last_pull field. + DefaultLastPull func() time.Time +) diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go new file mode 100644 index 000000000..4672e9514 --- /dev/null +++ b/pkg/database/ent/bouncer/where.go @@ -0,0 +1,1124 @@ +// Code generated by entc, DO NOT EDIT. + +package bouncer + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" +) + +// ID filters vertices based on their identifier. +func ID(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldName), v)) + }) +} + +// APIKey applies equality check predicate on the "api_key" field. It's identical to APIKeyEQ. +func APIKey(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldAPIKey), v)) + }) +} + +// Revoked applies equality check predicate on the "revoked" field. It's identical to RevokedEQ. +func Revoked(v bool) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldRevoked), v)) + }) +} + +// IPAddress applies equality check predicate on the "ip_address" field. It's identical to IPAddressEQ. +func IPAddress(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIPAddress), v)) + }) +} + +// Type applies equality check predicate on the "type" field. It's identical to TypeEQ. +func Type(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldType), v)) + }) +} + +// Version applies equality check predicate on the "version" field. It's identical to VersionEQ. +func Version(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldVersion), v)) + }) +} + +// Until applies equality check predicate on the "until" field. It's identical to UntilEQ. +func Until(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUntil), v)) + }) +} + +// LastPull applies equality check predicate on the "last_pull" field. It's identical to LastPullEQ. +func LastPull(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLastPull), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldName), v)) + }) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldName), v)) + }) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldName), v...)) + }) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldName), v...)) + }) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldName), v)) + }) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldName), v)) + }) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldName), v)) + }) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldName), v)) + }) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldName), v)) + }) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldName), v)) + }) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldName), v)) + }) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldName), v)) + }) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldName), v)) + }) +} + +// APIKeyEQ applies the EQ predicate on the "api_key" field. +func APIKeyEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyNEQ applies the NEQ predicate on the "api_key" field. +func APIKeyNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyIn applies the In predicate on the "api_key" field. +func APIKeyIn(vs ...string) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldAPIKey), v...)) + }) +} + +// APIKeyNotIn applies the NotIn predicate on the "api_key" field. +func APIKeyNotIn(vs ...string) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldAPIKey), v...)) + }) +} + +// APIKeyGT applies the GT predicate on the "api_key" field. +func APIKeyGT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyGTE applies the GTE predicate on the "api_key" field. +func APIKeyGTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyLT applies the LT predicate on the "api_key" field. +func APIKeyLT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyLTE applies the LTE predicate on the "api_key" field. +func APIKeyLTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyContains applies the Contains predicate on the "api_key" field. +func APIKeyContains(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyHasPrefix applies the HasPrefix predicate on the "api_key" field. +func APIKeyHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyHasSuffix applies the HasSuffix predicate on the "api_key" field. +func APIKeyHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyEqualFold applies the EqualFold predicate on the "api_key" field. +func APIKeyEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyContainsFold applies the ContainsFold predicate on the "api_key" field. +func APIKeyContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldAPIKey), v)) + }) +} + +// RevokedEQ applies the EQ predicate on the "revoked" field. +func RevokedEQ(v bool) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldRevoked), v)) + }) +} + +// RevokedNEQ applies the NEQ predicate on the "revoked" field. +func RevokedNEQ(v bool) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldRevoked), v)) + }) +} + +// IPAddressEQ applies the EQ predicate on the "ip_address" field. +func IPAddressEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressNEQ applies the NEQ predicate on the "ip_address" field. +func IPAddressNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressIn applies the In predicate on the "ip_address" field. +func IPAddressIn(vs ...string) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldIPAddress), v...)) + }) +} + +// IPAddressNotIn applies the NotIn predicate on the "ip_address" field. +func IPAddressNotIn(vs ...string) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldIPAddress), v...)) + }) +} + +// IPAddressGT applies the GT predicate on the "ip_address" field. +func IPAddressGT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressGTE applies the GTE predicate on the "ip_address" field. +func IPAddressGTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressLT applies the LT predicate on the "ip_address" field. +func IPAddressLT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressLTE applies the LTE predicate on the "ip_address" field. +func IPAddressLTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressContains applies the Contains predicate on the "ip_address" field. +func IPAddressContains(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressHasPrefix applies the HasPrefix predicate on the "ip_address" field. +func IPAddressHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressHasSuffix applies the HasSuffix predicate on the "ip_address" field. +func IPAddressHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressIsNil applies the IsNil predicate on the "ip_address" field. +func IPAddressIsNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldIPAddress))) + }) +} + +// IPAddressNotNil applies the NotNil predicate on the "ip_address" field. +func IPAddressNotNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldIPAddress))) + }) +} + +// IPAddressEqualFold applies the EqualFold predicate on the "ip_address" field. +func IPAddressEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressContainsFold applies the ContainsFold predicate on the "ip_address" field. +func IPAddressContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldIPAddress), v)) + }) +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldType), v)) + }) +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldType), v)) + }) +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...string) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldType), v...)) + }) +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...string) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldType), v...)) + }) +} + +// TypeGT applies the GT predicate on the "type" field. +func TypeGT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldType), v)) + }) +} + +// TypeGTE applies the GTE predicate on the "type" field. +func TypeGTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldType), v)) + }) +} + +// TypeLT applies the LT predicate on the "type" field. +func TypeLT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldType), v)) + }) +} + +// TypeLTE applies the LTE predicate on the "type" field. +func TypeLTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldType), v)) + }) +} + +// TypeContains applies the Contains predicate on the "type" field. +func TypeContains(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldType), v)) + }) +} + +// TypeHasPrefix applies the HasPrefix predicate on the "type" field. +func TypeHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldType), v)) + }) +} + +// TypeHasSuffix applies the HasSuffix predicate on the "type" field. +func TypeHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldType), v)) + }) +} + +// TypeIsNil applies the IsNil predicate on the "type" field. +func TypeIsNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldType))) + }) +} + +// TypeNotNil applies the NotNil predicate on the "type" field. +func TypeNotNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldType))) + }) +} + +// TypeEqualFold applies the EqualFold predicate on the "type" field. +func TypeEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldType), v)) + }) +} + +// TypeContainsFold applies the ContainsFold predicate on the "type" field. +func TypeContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldType), v)) + }) +} + +// VersionEQ applies the EQ predicate on the "version" field. +func VersionEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldVersion), v)) + }) +} + +// VersionNEQ applies the NEQ predicate on the "version" field. +func VersionNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldVersion), v)) + }) +} + +// VersionIn applies the In predicate on the "version" field. +func VersionIn(vs ...string) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldVersion), v...)) + }) +} + +// VersionNotIn applies the NotIn predicate on the "version" field. +func VersionNotIn(vs ...string) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldVersion), v...)) + }) +} + +// VersionGT applies the GT predicate on the "version" field. +func VersionGT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldVersion), v)) + }) +} + +// VersionGTE applies the GTE predicate on the "version" field. +func VersionGTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldVersion), v)) + }) +} + +// VersionLT applies the LT predicate on the "version" field. +func VersionLT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldVersion), v)) + }) +} + +// VersionLTE applies the LTE predicate on the "version" field. +func VersionLTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldVersion), v)) + }) +} + +// VersionContains applies the Contains predicate on the "version" field. +func VersionContains(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldVersion), v)) + }) +} + +// VersionHasPrefix applies the HasPrefix predicate on the "version" field. +func VersionHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldVersion), v)) + }) +} + +// VersionHasSuffix applies the HasSuffix predicate on the "version" field. +func VersionHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldVersion), v)) + }) +} + +// VersionIsNil applies the IsNil predicate on the "version" field. +func VersionIsNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldVersion))) + }) +} + +// VersionNotNil applies the NotNil predicate on the "version" field. +func VersionNotNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldVersion))) + }) +} + +// VersionEqualFold applies the EqualFold predicate on the "version" field. +func VersionEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldVersion), v)) + }) +} + +// VersionContainsFold applies the ContainsFold predicate on the "version" field. +func VersionContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldVersion), v)) + }) +} + +// UntilEQ applies the EQ predicate on the "until" field. +func UntilEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUntil), v)) + }) +} + +// UntilNEQ applies the NEQ predicate on the "until" field. +func UntilNEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUntil), v)) + }) +} + +// UntilIn applies the In predicate on the "until" field. +func UntilIn(vs ...time.Time) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldUntil), v...)) + }) +} + +// UntilNotIn applies the NotIn predicate on the "until" field. +func UntilNotIn(vs ...time.Time) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldUntil), v...)) + }) +} + +// UntilGT applies the GT predicate on the "until" field. +func UntilGT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUntil), v)) + }) +} + +// UntilGTE applies the GTE predicate on the "until" field. +func UntilGTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUntil), v)) + }) +} + +// UntilLT applies the LT predicate on the "until" field. +func UntilLT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUntil), v)) + }) +} + +// UntilLTE applies the LTE predicate on the "until" field. +func UntilLTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUntil), v)) + }) +} + +// UntilIsNil applies the IsNil predicate on the "until" field. +func UntilIsNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldUntil))) + }) +} + +// UntilNotNil applies the NotNil predicate on the "until" field. +func UntilNotNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldUntil))) + }) +} + +// LastPullEQ applies the EQ predicate on the "last_pull" field. +func LastPullEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLastPull), v)) + }) +} + +// LastPullNEQ applies the NEQ predicate on the "last_pull" field. +func LastPullNEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldLastPull), v)) + }) +} + +// LastPullIn applies the In predicate on the "last_pull" field. +func LastPullIn(vs ...time.Time) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldLastPull), v...)) + }) +} + +// LastPullNotIn applies the NotIn predicate on the "last_pull" field. +func LastPullNotIn(vs ...time.Time) predicate.Bouncer { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldLastPull), v...)) + }) +} + +// LastPullGT applies the GT predicate on the "last_pull" field. +func LastPullGT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldLastPull), v)) + }) +} + +// LastPullGTE applies the GTE predicate on the "last_pull" field. +func LastPullGTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldLastPull), v)) + }) +} + +// LastPullLT applies the LT predicate on the "last_pull" field. +func LastPullLT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldLastPull), v)) + }) +} + +// LastPullLTE applies the LTE predicate on the "last_pull" field. +func LastPullLTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldLastPull), v)) + }) +} + +// And groups list of predicates with the AND operator between them. +func And(predicates ...predicate.Bouncer) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups list of predicates with the OR operator between them. +func Or(predicates ...predicate.Bouncer) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Bouncer) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go new file mode 100644 index 000000000..1186343b1 --- /dev/null +++ b/pkg/database/ent/bouncer_create.go @@ -0,0 +1,408 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// BouncerCreate is the builder for creating a Bouncer entity. +type BouncerCreate struct { + config + mutation *BouncerMutation + hooks []Hook +} + +// SetCreatedAt sets the created_at field. +func (bc *BouncerCreate) SetCreatedAt(t time.Time) *BouncerCreate { + bc.mutation.SetCreatedAt(t) + return bc +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (bc *BouncerCreate) SetNillableCreatedAt(t *time.Time) *BouncerCreate { + if t != nil { + bc.SetCreatedAt(*t) + } + return bc +} + +// SetUpdatedAt sets the updated_at field. +func (bc *BouncerCreate) SetUpdatedAt(t time.Time) *BouncerCreate { + bc.mutation.SetUpdatedAt(t) + return bc +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (bc *BouncerCreate) SetNillableUpdatedAt(t *time.Time) *BouncerCreate { + if t != nil { + bc.SetUpdatedAt(*t) + } + return bc +} + +// SetName sets the name field. +func (bc *BouncerCreate) SetName(s string) *BouncerCreate { + bc.mutation.SetName(s) + return bc +} + +// SetAPIKey sets the api_key field. +func (bc *BouncerCreate) SetAPIKey(s string) *BouncerCreate { + bc.mutation.SetAPIKey(s) + return bc +} + +// SetRevoked sets the revoked field. +func (bc *BouncerCreate) SetRevoked(b bool) *BouncerCreate { + bc.mutation.SetRevoked(b) + return bc +} + +// SetIPAddress sets the ip_address field. +func (bc *BouncerCreate) SetIPAddress(s string) *BouncerCreate { + bc.mutation.SetIPAddress(s) + return bc +} + +// SetNillableIPAddress sets the ip_address field if the given value is not nil. +func (bc *BouncerCreate) SetNillableIPAddress(s *string) *BouncerCreate { + if s != nil { + bc.SetIPAddress(*s) + } + return bc +} + +// SetType sets the type field. +func (bc *BouncerCreate) SetType(s string) *BouncerCreate { + bc.mutation.SetType(s) + return bc +} + +// SetNillableType sets the type field if the given value is not nil. +func (bc *BouncerCreate) SetNillableType(s *string) *BouncerCreate { + if s != nil { + bc.SetType(*s) + } + return bc +} + +// SetVersion sets the version field. +func (bc *BouncerCreate) SetVersion(s string) *BouncerCreate { + bc.mutation.SetVersion(s) + return bc +} + +// SetNillableVersion sets the version field if the given value is not nil. +func (bc *BouncerCreate) SetNillableVersion(s *string) *BouncerCreate { + if s != nil { + bc.SetVersion(*s) + } + return bc +} + +// SetUntil sets the until field. +func (bc *BouncerCreate) SetUntil(t time.Time) *BouncerCreate { + bc.mutation.SetUntil(t) + return bc +} + +// SetNillableUntil sets the until field if the given value is not nil. +func (bc *BouncerCreate) SetNillableUntil(t *time.Time) *BouncerCreate { + if t != nil { + bc.SetUntil(*t) + } + return bc +} + +// SetLastPull sets the last_pull field. +func (bc *BouncerCreate) SetLastPull(t time.Time) *BouncerCreate { + bc.mutation.SetLastPull(t) + return bc +} + +// SetNillableLastPull sets the last_pull field if the given value is not nil. +func (bc *BouncerCreate) SetNillableLastPull(t *time.Time) *BouncerCreate { + if t != nil { + bc.SetLastPull(*t) + } + return bc +} + +// Mutation returns the BouncerMutation object of the builder. +func (bc *BouncerCreate) Mutation() *BouncerMutation { + return bc.mutation +} + +// Save creates the Bouncer in the database. +func (bc *BouncerCreate) Save(ctx context.Context) (*Bouncer, error) { + var ( + err error + node *Bouncer + ) + bc.defaults() + if len(bc.hooks) == 0 { + if err = bc.check(); err != nil { + return nil, err + } + node, err = bc.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BouncerMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = bc.check(); err != nil { + return nil, err + } + bc.mutation = mutation + node, err = bc.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(bc.hooks) - 1; i >= 0; i-- { + mut = bc.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, bc.mutation); err != nil { + return nil, err + } + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (bc *BouncerCreate) SaveX(ctx context.Context) *Bouncer { + v, err := bc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// defaults sets the default values of the builder before save. +func (bc *BouncerCreate) defaults() { + if _, ok := bc.mutation.CreatedAt(); !ok { + v := bouncer.DefaultCreatedAt() + bc.mutation.SetCreatedAt(v) + } + if _, ok := bc.mutation.UpdatedAt(); !ok { + v := bouncer.DefaultUpdatedAt() + bc.mutation.SetUpdatedAt(v) + } + if _, ok := bc.mutation.IPAddress(); !ok { + v := bouncer.DefaultIPAddress + bc.mutation.SetIPAddress(v) + } + if _, ok := bc.mutation.Until(); !ok { + v := bouncer.DefaultUntil() + bc.mutation.SetUntil(v) + } + if _, ok := bc.mutation.LastPull(); !ok { + v := bouncer.DefaultLastPull() + bc.mutation.SetLastPull(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (bc *BouncerCreate) check() error { + if _, ok := bc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")} + } + if _, ok := bc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")} + } + if _, ok := bc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New("ent: missing required field \"name\"")} + } + if _, ok := bc.mutation.APIKey(); !ok { + return &ValidationError{Name: "api_key", err: errors.New("ent: missing required field \"api_key\"")} + } + if _, ok := bc.mutation.Revoked(); !ok { + return &ValidationError{Name: "revoked", err: errors.New("ent: missing required field \"revoked\"")} + } + if _, ok := bc.mutation.LastPull(); !ok { + return &ValidationError{Name: "last_pull", err: errors.New("ent: missing required field \"last_pull\"")} + } + return nil +} + +func (bc *BouncerCreate) sqlSave(ctx context.Context) (*Bouncer, error) { + _node, _spec := bc.createSpec() + if err := sqlgraph.CreateNode(ctx, bc.driver, _spec); err != nil { + if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + return _node, nil +} + +func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { + var ( + _node = &Bouncer{config: bc.config} + _spec = &sqlgraph.CreateSpec{ + Table: bouncer.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: bouncer.FieldID, + }, + } + ) + if value, ok := bc.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldCreatedAt, + }) + _node.CreatedAt = value + } + if value, ok := bc.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldUpdatedAt, + }) + _node.UpdatedAt = value + } + if value, ok := bc.mutation.Name(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldName, + }) + _node.Name = value + } + if value, ok := bc.mutation.APIKey(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldAPIKey, + }) + _node.APIKey = value + } + if value, ok := bc.mutation.Revoked(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: bouncer.FieldRevoked, + }) + _node.Revoked = value + } + if value, ok := bc.mutation.IPAddress(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldIPAddress, + }) + _node.IPAddress = value + } + if value, ok := bc.mutation.GetType(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldType, + }) + _node.Type = value + } + if value, ok := bc.mutation.Version(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldVersion, + }) + _node.Version = value + } + if value, ok := bc.mutation.Until(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldUntil, + }) + _node.Until = value + } + if value, ok := bc.mutation.LastPull(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldLastPull, + }) + _node.LastPull = value + } + return _node, _spec +} + +// BouncerCreateBulk is the builder for creating a bulk of Bouncer entities. +type BouncerCreateBulk struct { + config + builders []*BouncerCreate +} + +// Save creates the Bouncer entities in the database. +func (bcb *BouncerCreateBulk) Save(ctx context.Context) ([]*Bouncer, error) { + specs := make([]*sqlgraph.CreateSpec, len(bcb.builders)) + nodes := make([]*Bouncer, len(bcb.builders)) + mutators := make([]Mutator, len(bcb.builders)) + for i := range bcb.builders { + func(i int, root context.Context) { + builder := bcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BouncerMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, bcb.builders[i+1].mutation) + } else { + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, bcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil { + if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + } + } + mutation.done = true + if err != nil { + return nil, err + } + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, bcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX calls Save and panics if Save returns an error. +func (bcb *BouncerCreateBulk) SaveX(ctx context.Context) []*Bouncer { + v, err := bcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} diff --git a/pkg/database/ent/bouncer_delete.go b/pkg/database/ent/bouncer_delete.go new file mode 100644 index 000000000..9596532ce --- /dev/null +++ b/pkg/database/ent/bouncer_delete.go @@ -0,0 +1,109 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// BouncerDelete is the builder for deleting a Bouncer entity. +type BouncerDelete struct { + config + hooks []Hook + mutation *BouncerMutation + predicates []predicate.Bouncer +} + +// Where adds a new predicate to the delete builder. +func (bd *BouncerDelete) Where(ps ...predicate.Bouncer) *BouncerDelete { + bd.predicates = append(bd.predicates, ps...) + return bd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (bd *BouncerDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(bd.hooks) == 0 { + affected, err = bd.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BouncerMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + bd.mutation = mutation + affected, err = bd.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(bd.hooks) - 1; i >= 0; i-- { + mut = bd.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, bd.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (bd *BouncerDelete) ExecX(ctx context.Context) int { + n, err := bd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (bd *BouncerDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: bouncer.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: bouncer.FieldID, + }, + }, + } + if ps := bd.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return sqlgraph.DeleteNodes(ctx, bd.driver, _spec) +} + +// BouncerDeleteOne is the builder for deleting a single Bouncer entity. +type BouncerDeleteOne struct { + bd *BouncerDelete +} + +// Exec executes the deletion query. +func (bdo *BouncerDeleteOne) Exec(ctx context.Context) error { + n, err := bdo.bd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{bouncer.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (bdo *BouncerDeleteOne) ExecX(ctx context.Context) { + bdo.bd.ExecX(ctx) +} diff --git a/pkg/database/ent/bouncer_query.go b/pkg/database/ent/bouncer_query.go new file mode 100644 index 000000000..330bb94bc --- /dev/null +++ b/pkg/database/ent/bouncer_query.go @@ -0,0 +1,880 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "math" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// BouncerQuery is the builder for querying Bouncer entities. +type BouncerQuery struct { + config + limit *int + offset *int + order []OrderFunc + unique []string + predicates []predicate.Bouncer + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the builder. +func (bq *BouncerQuery) Where(ps ...predicate.Bouncer) *BouncerQuery { + bq.predicates = append(bq.predicates, ps...) + return bq +} + +// Limit adds a limit step to the query. +func (bq *BouncerQuery) Limit(limit int) *BouncerQuery { + bq.limit = &limit + return bq +} + +// Offset adds an offset step to the query. +func (bq *BouncerQuery) Offset(offset int) *BouncerQuery { + bq.offset = &offset + return bq +} + +// Order adds an order step to the query. +func (bq *BouncerQuery) Order(o ...OrderFunc) *BouncerQuery { + bq.order = append(bq.order, o...) + return bq +} + +// First returns the first Bouncer entity in the query. Returns *NotFoundError when no bouncer was found. +func (bq *BouncerQuery) First(ctx context.Context) (*Bouncer, error) { + nodes, err := bq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{bouncer.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (bq *BouncerQuery) FirstX(ctx context.Context) *Bouncer { + node, err := bq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Bouncer id in the query. Returns *NotFoundError when no id was found. +func (bq *BouncerQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = bq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{bouncer.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (bq *BouncerQuery) FirstXID(ctx context.Context) int { + id, err := bq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only Bouncer entity in the query, returns an error if not exactly one entity was returned. +func (bq *BouncerQuery) Only(ctx context.Context) (*Bouncer, error) { + nodes, err := bq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{bouncer.Label} + default: + return nil, &NotSingularError{bouncer.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (bq *BouncerQuery) OnlyX(ctx context.Context) *Bouncer { + node, err := bq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID returns the only Bouncer id in the query, returns an error if not exactly one id was returned. +func (bq *BouncerQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = bq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{bouncer.Label} + default: + err = &NotSingularError{bouncer.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (bq *BouncerQuery) OnlyIDX(ctx context.Context) int { + id, err := bq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Bouncers. +func (bq *BouncerQuery) All(ctx context.Context) ([]*Bouncer, error) { + if err := bq.prepareQuery(ctx); err != nil { + return nil, err + } + return bq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (bq *BouncerQuery) AllX(ctx context.Context) []*Bouncer { + nodes, err := bq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Bouncer ids. +func (bq *BouncerQuery) IDs(ctx context.Context) ([]int, error) { + var ids []int + if err := bq.Select(bouncer.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (bq *BouncerQuery) IDsX(ctx context.Context) []int { + ids, err := bq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (bq *BouncerQuery) Count(ctx context.Context) (int, error) { + if err := bq.prepareQuery(ctx); err != nil { + return 0, err + } + return bq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (bq *BouncerQuery) CountX(ctx context.Context) int { + count, err := bq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (bq *BouncerQuery) Exist(ctx context.Context) (bool, error) { + if err := bq.prepareQuery(ctx); err != nil { + return false, err + } + return bq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (bq *BouncerQuery) ExistX(ctx context.Context) bool { + exist, err := bq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the query builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (bq *BouncerQuery) Clone() *BouncerQuery { + return &BouncerQuery{ + config: bq.config, + limit: bq.limit, + offset: bq.offset, + order: append([]OrderFunc{}, bq.order...), + unique: append([]string{}, bq.unique...), + predicates: append([]predicate.Bouncer{}, bq.predicates...), + // clone intermediate query. + sql: bq.sql.Clone(), + path: bq.path, + } +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Bouncer.Query(). +// GroupBy(bouncer.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +// +func (bq *BouncerQuery) GroupBy(field string, fields ...string) *BouncerGroupBy { + group := &BouncerGroupBy{config: bq.config} + group.fields = append([]string{field}, fields...) + group.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := bq.prepareQuery(ctx); err != nil { + return nil, err + } + return bq.sqlQuery(), nil + } + return group +} + +// Select one or more fields from the given query. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Bouncer.Query(). +// Select(bouncer.FieldCreatedAt). +// Scan(ctx, &v) +// +func (bq *BouncerQuery) Select(field string, fields ...string) *BouncerSelect { + selector := &BouncerSelect{config: bq.config} + selector.fields = append([]string{field}, fields...) + selector.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := bq.prepareQuery(ctx); err != nil { + return nil, err + } + return bq.sqlQuery(), nil + } + return selector +} + +func (bq *BouncerQuery) prepareQuery(ctx context.Context) error { + if bq.path != nil { + prev, err := bq.path(ctx) + if err != nil { + return err + } + bq.sql = prev + } + return nil +} + +func (bq *BouncerQuery) sqlAll(ctx context.Context) ([]*Bouncer, error) { + var ( + nodes = []*Bouncer{} + _spec = bq.querySpec() + ) + _spec.ScanValues = func() []interface{} { + node := &Bouncer{config: bq.config} + nodes = append(nodes, node) + values := node.scanValues() + return values + } + _spec.Assign = func(values ...interface{}) error { + if len(nodes) == 0 { + return fmt.Errorf("ent: Assign called without calling ScanValues") + } + node := nodes[len(nodes)-1] + return node.assignValues(values...) + } + if err := sqlgraph.QueryNodes(ctx, bq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (bq *BouncerQuery) sqlCount(ctx context.Context) (int, error) { + _spec := bq.querySpec() + return sqlgraph.CountNodes(ctx, bq.driver, _spec) +} + +func (bq *BouncerQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := bq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: bouncer.Table, + Columns: bouncer.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: bouncer.FieldID, + }, + }, + From: bq.sql, + Unique: true, + } + if ps := bq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := bq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := bq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := bq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector, bouncer.ValidColumn) + } + } + } + return _spec +} + +func (bq *BouncerQuery) sqlQuery() *sql.Selector { + builder := sql.Dialect(bq.driver.Dialect()) + t1 := builder.Table(bouncer.Table) + selector := builder.Select(t1.Columns(bouncer.Columns...)...).From(t1) + if bq.sql != nil { + selector = bq.sql + selector.Select(selector.Columns(bouncer.Columns...)...) + } + for _, p := range bq.predicates { + p(selector) + } + for _, p := range bq.order { + p(selector, bouncer.ValidColumn) + } + if offset := bq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := bq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// BouncerGroupBy is the builder for group-by Bouncer entities. +type BouncerGroupBy struct { + config + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (bgb *BouncerGroupBy) Aggregate(fns ...AggregateFunc) *BouncerGroupBy { + bgb.fns = append(bgb.fns, fns...) + return bgb +} + +// Scan applies the group-by query and scan the result into the given value. +func (bgb *BouncerGroupBy) Scan(ctx context.Context, v interface{}) error { + query, err := bgb.path(ctx) + if err != nil { + return err + } + bgb.sql = query + return bgb.sqlScan(ctx, v) +} + +// ScanX is like Scan, but panics if an error occurs. +func (bgb *BouncerGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := bgb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (bgb *BouncerGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(bgb.fields) > 1 { + return nil, errors.New("ent: BouncerGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := bgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (bgb *BouncerGroupBy) StringsX(ctx context.Context) []string { + v, err := bgb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from group-by. It is only allowed when querying group-by with one field. +func (bgb *BouncerGroupBy) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = bgb.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{bouncer.Label} + default: + err = fmt.Errorf("ent: BouncerGroupBy.Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (bgb *BouncerGroupBy) StringX(ctx context.Context) string { + v, err := bgb.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (bgb *BouncerGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(bgb.fields) > 1 { + return nil, errors.New("ent: BouncerGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := bgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (bgb *BouncerGroupBy) IntsX(ctx context.Context) []int { + v, err := bgb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from group-by. It is only allowed when querying group-by with one field. +func (bgb *BouncerGroupBy) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = bgb.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{bouncer.Label} + default: + err = fmt.Errorf("ent: BouncerGroupBy.Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (bgb *BouncerGroupBy) IntX(ctx context.Context) int { + v, err := bgb.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (bgb *BouncerGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(bgb.fields) > 1 { + return nil, errors.New("ent: BouncerGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := bgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (bgb *BouncerGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := bgb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from group-by. It is only allowed when querying group-by with one field. +func (bgb *BouncerGroupBy) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = bgb.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{bouncer.Label} + default: + err = fmt.Errorf("ent: BouncerGroupBy.Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (bgb *BouncerGroupBy) Float64X(ctx context.Context) float64 { + v, err := bgb.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (bgb *BouncerGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(bgb.fields) > 1 { + return nil, errors.New("ent: BouncerGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := bgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (bgb *BouncerGroupBy) BoolsX(ctx context.Context) []bool { + v, err := bgb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from group-by. It is only allowed when querying group-by with one field. +func (bgb *BouncerGroupBy) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = bgb.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{bouncer.Label} + default: + err = fmt.Errorf("ent: BouncerGroupBy.Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (bgb *BouncerGroupBy) BoolX(ctx context.Context) bool { + v, err := bgb.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +func (bgb *BouncerGroupBy) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range bgb.fields { + if !bouncer.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := bgb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := bgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (bgb *BouncerGroupBy) sqlQuery() *sql.Selector { + selector := bgb.sql + columns := make([]string, 0, len(bgb.fields)+len(bgb.fns)) + columns = append(columns, bgb.fields...) + for _, fn := range bgb.fns { + columns = append(columns, fn(selector, bouncer.ValidColumn)) + } + return selector.Select(columns...).GroupBy(bgb.fields...) +} + +// BouncerSelect is the builder for select fields of Bouncer entities. +type BouncerSelect struct { + config + fields []string + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Scan applies the selector query and scan the result into the given value. +func (bs *BouncerSelect) Scan(ctx context.Context, v interface{}) error { + query, err := bs.path(ctx) + if err != nil { + return err + } + bs.sql = query + return bs.sqlScan(ctx, v) +} + +// ScanX is like Scan, but panics if an error occurs. +func (bs *BouncerSelect) ScanX(ctx context.Context, v interface{}) { + if err := bs.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from selector. It is only allowed when selecting one field. +func (bs *BouncerSelect) Strings(ctx context.Context) ([]string, error) { + if len(bs.fields) > 1 { + return nil, errors.New("ent: BouncerSelect.Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := bs.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (bs *BouncerSelect) StringsX(ctx context.Context) []string { + v, err := bs.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from selector. It is only allowed when selecting one field. +func (bs *BouncerSelect) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = bs.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{bouncer.Label} + default: + err = fmt.Errorf("ent: BouncerSelect.Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (bs *BouncerSelect) StringX(ctx context.Context) string { + v, err := bs.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from selector. It is only allowed when selecting one field. +func (bs *BouncerSelect) Ints(ctx context.Context) ([]int, error) { + if len(bs.fields) > 1 { + return nil, errors.New("ent: BouncerSelect.Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := bs.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (bs *BouncerSelect) IntsX(ctx context.Context) []int { + v, err := bs.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from selector. It is only allowed when selecting one field. +func (bs *BouncerSelect) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = bs.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{bouncer.Label} + default: + err = fmt.Errorf("ent: BouncerSelect.Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (bs *BouncerSelect) IntX(ctx context.Context) int { + v, err := bs.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from selector. It is only allowed when selecting one field. +func (bs *BouncerSelect) Float64s(ctx context.Context) ([]float64, error) { + if len(bs.fields) > 1 { + return nil, errors.New("ent: BouncerSelect.Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := bs.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (bs *BouncerSelect) Float64sX(ctx context.Context) []float64 { + v, err := bs.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from selector. It is only allowed when selecting one field. +func (bs *BouncerSelect) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = bs.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{bouncer.Label} + default: + err = fmt.Errorf("ent: BouncerSelect.Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (bs *BouncerSelect) Float64X(ctx context.Context) float64 { + v, err := bs.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from selector. It is only allowed when selecting one field. +func (bs *BouncerSelect) Bools(ctx context.Context) ([]bool, error) { + if len(bs.fields) > 1 { + return nil, errors.New("ent: BouncerSelect.Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := bs.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (bs *BouncerSelect) BoolsX(ctx context.Context) []bool { + v, err := bs.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from selector. It is only allowed when selecting one field. +func (bs *BouncerSelect) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = bs.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{bouncer.Label} + default: + err = fmt.Errorf("ent: BouncerSelect.Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (bs *BouncerSelect) BoolX(ctx context.Context) bool { + v, err := bs.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +func (bs *BouncerSelect) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range bs.fields { + if !bouncer.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for selection", f)} + } + } + rows := &sql.Rows{} + query, args := bs.sqlQuery().Query() + if err := bs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (bs *BouncerSelect) sqlQuery() sql.Querier { + selector := bs.sql + selector.Select(selector.Columns(bs.fields...)...) + return selector +} diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go new file mode 100644 index 000000000..fcf4970e0 --- /dev/null +++ b/pkg/database/ent/bouncer_update.go @@ -0,0 +1,675 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// BouncerUpdate is the builder for updating Bouncer entities. +type BouncerUpdate struct { + config + hooks []Hook + mutation *BouncerMutation + predicates []predicate.Bouncer +} + +// Where adds a new predicate for the builder. +func (bu *BouncerUpdate) Where(ps ...predicate.Bouncer) *BouncerUpdate { + bu.predicates = append(bu.predicates, ps...) + return bu +} + +// SetCreatedAt sets the created_at field. +func (bu *BouncerUpdate) SetCreatedAt(t time.Time) *BouncerUpdate { + bu.mutation.SetCreatedAt(t) + return bu +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableCreatedAt(t *time.Time) *BouncerUpdate { + if t != nil { + bu.SetCreatedAt(*t) + } + return bu +} + +// SetUpdatedAt sets the updated_at field. +func (bu *BouncerUpdate) SetUpdatedAt(t time.Time) *BouncerUpdate { + bu.mutation.SetUpdatedAt(t) + return bu +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableUpdatedAt(t *time.Time) *BouncerUpdate { + if t != nil { + bu.SetUpdatedAt(*t) + } + return bu +} + +// SetName sets the name field. +func (bu *BouncerUpdate) SetName(s string) *BouncerUpdate { + bu.mutation.SetName(s) + return bu +} + +// SetAPIKey sets the api_key field. +func (bu *BouncerUpdate) SetAPIKey(s string) *BouncerUpdate { + bu.mutation.SetAPIKey(s) + return bu +} + +// SetRevoked sets the revoked field. +func (bu *BouncerUpdate) SetRevoked(b bool) *BouncerUpdate { + bu.mutation.SetRevoked(b) + return bu +} + +// SetIPAddress sets the ip_address field. +func (bu *BouncerUpdate) SetIPAddress(s string) *BouncerUpdate { + bu.mutation.SetIPAddress(s) + return bu +} + +// SetNillableIPAddress sets the ip_address field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableIPAddress(s *string) *BouncerUpdate { + if s != nil { + bu.SetIPAddress(*s) + } + return bu +} + +// ClearIPAddress clears the value of ip_address. +func (bu *BouncerUpdate) ClearIPAddress() *BouncerUpdate { + bu.mutation.ClearIPAddress() + return bu +} + +// SetType sets the type field. +func (bu *BouncerUpdate) SetType(s string) *BouncerUpdate { + bu.mutation.SetType(s) + return bu +} + +// SetNillableType sets the type field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableType(s *string) *BouncerUpdate { + if s != nil { + bu.SetType(*s) + } + return bu +} + +// ClearType clears the value of type. +func (bu *BouncerUpdate) ClearType() *BouncerUpdate { + bu.mutation.ClearType() + return bu +} + +// SetVersion sets the version field. +func (bu *BouncerUpdate) SetVersion(s string) *BouncerUpdate { + bu.mutation.SetVersion(s) + return bu +} + +// SetNillableVersion sets the version field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableVersion(s *string) *BouncerUpdate { + if s != nil { + bu.SetVersion(*s) + } + return bu +} + +// ClearVersion clears the value of version. +func (bu *BouncerUpdate) ClearVersion() *BouncerUpdate { + bu.mutation.ClearVersion() + return bu +} + +// SetUntil sets the until field. +func (bu *BouncerUpdate) SetUntil(t time.Time) *BouncerUpdate { + bu.mutation.SetUntil(t) + return bu +} + +// SetNillableUntil sets the until field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableUntil(t *time.Time) *BouncerUpdate { + if t != nil { + bu.SetUntil(*t) + } + return bu +} + +// ClearUntil clears the value of until. +func (bu *BouncerUpdate) ClearUntil() *BouncerUpdate { + bu.mutation.ClearUntil() + return bu +} + +// SetLastPull sets the last_pull field. +func (bu *BouncerUpdate) SetLastPull(t time.Time) *BouncerUpdate { + bu.mutation.SetLastPull(t) + return bu +} + +// SetNillableLastPull sets the last_pull field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableLastPull(t *time.Time) *BouncerUpdate { + if t != nil { + bu.SetLastPull(*t) + } + return bu +} + +// Mutation returns the BouncerMutation object of the builder. +func (bu *BouncerUpdate) Mutation() *BouncerMutation { + return bu.mutation +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (bu *BouncerUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(bu.hooks) == 0 { + affected, err = bu.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BouncerMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + bu.mutation = mutation + affected, err = bu.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(bu.hooks) - 1; i >= 0; i-- { + mut = bu.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, bu.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (bu *BouncerUpdate) SaveX(ctx context.Context) int { + affected, err := bu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (bu *BouncerUpdate) Exec(ctx context.Context) error { + _, err := bu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (bu *BouncerUpdate) ExecX(ctx context.Context) { + if err := bu.Exec(ctx); err != nil { + panic(err) + } +} + +func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: bouncer.Table, + Columns: bouncer.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: bouncer.FieldID, + }, + }, + } + if ps := bu.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := bu.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldCreatedAt, + }) + } + if value, ok := bu.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldUpdatedAt, + }) + } + if value, ok := bu.mutation.Name(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldName, + }) + } + if value, ok := bu.mutation.APIKey(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldAPIKey, + }) + } + if value, ok := bu.mutation.Revoked(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: bouncer.FieldRevoked, + }) + } + if value, ok := bu.mutation.IPAddress(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldIPAddress, + }) + } + if bu.mutation.IPAddressCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: bouncer.FieldIPAddress, + }) + } + if value, ok := bu.mutation.GetType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldType, + }) + } + if bu.mutation.TypeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: bouncer.FieldType, + }) + } + if value, ok := bu.mutation.Version(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldVersion, + }) + } + if bu.mutation.VersionCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: bouncer.FieldVersion, + }) + } + if value, ok := bu.mutation.Until(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldUntil, + }) + } + if bu.mutation.UntilCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: bouncer.FieldUntil, + }) + } + if value, ok := bu.mutation.LastPull(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldLastPull, + }) + } + if n, err = sqlgraph.UpdateNodes(ctx, bu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{bouncer.Label} + } else if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return 0, err + } + return n, nil +} + +// BouncerUpdateOne is the builder for updating a single Bouncer entity. +type BouncerUpdateOne struct { + config + hooks []Hook + mutation *BouncerMutation +} + +// SetCreatedAt sets the created_at field. +func (buo *BouncerUpdateOne) SetCreatedAt(t time.Time) *BouncerUpdateOne { + buo.mutation.SetCreatedAt(t) + return buo +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableCreatedAt(t *time.Time) *BouncerUpdateOne { + if t != nil { + buo.SetCreatedAt(*t) + } + return buo +} + +// SetUpdatedAt sets the updated_at field. +func (buo *BouncerUpdateOne) SetUpdatedAt(t time.Time) *BouncerUpdateOne { + buo.mutation.SetUpdatedAt(t) + return buo +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableUpdatedAt(t *time.Time) *BouncerUpdateOne { + if t != nil { + buo.SetUpdatedAt(*t) + } + return buo +} + +// SetName sets the name field. +func (buo *BouncerUpdateOne) SetName(s string) *BouncerUpdateOne { + buo.mutation.SetName(s) + return buo +} + +// SetAPIKey sets the api_key field. +func (buo *BouncerUpdateOne) SetAPIKey(s string) *BouncerUpdateOne { + buo.mutation.SetAPIKey(s) + return buo +} + +// SetRevoked sets the revoked field. +func (buo *BouncerUpdateOne) SetRevoked(b bool) *BouncerUpdateOne { + buo.mutation.SetRevoked(b) + return buo +} + +// SetIPAddress sets the ip_address field. +func (buo *BouncerUpdateOne) SetIPAddress(s string) *BouncerUpdateOne { + buo.mutation.SetIPAddress(s) + return buo +} + +// SetNillableIPAddress sets the ip_address field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableIPAddress(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetIPAddress(*s) + } + return buo +} + +// ClearIPAddress clears the value of ip_address. +func (buo *BouncerUpdateOne) ClearIPAddress() *BouncerUpdateOne { + buo.mutation.ClearIPAddress() + return buo +} + +// SetType sets the type field. +func (buo *BouncerUpdateOne) SetType(s string) *BouncerUpdateOne { + buo.mutation.SetType(s) + return buo +} + +// SetNillableType sets the type field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableType(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetType(*s) + } + return buo +} + +// ClearType clears the value of type. +func (buo *BouncerUpdateOne) ClearType() *BouncerUpdateOne { + buo.mutation.ClearType() + return buo +} + +// SetVersion sets the version field. +func (buo *BouncerUpdateOne) SetVersion(s string) *BouncerUpdateOne { + buo.mutation.SetVersion(s) + return buo +} + +// SetNillableVersion sets the version field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableVersion(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetVersion(*s) + } + return buo +} + +// ClearVersion clears the value of version. +func (buo *BouncerUpdateOne) ClearVersion() *BouncerUpdateOne { + buo.mutation.ClearVersion() + return buo +} + +// SetUntil sets the until field. +func (buo *BouncerUpdateOne) SetUntil(t time.Time) *BouncerUpdateOne { + buo.mutation.SetUntil(t) + return buo +} + +// SetNillableUntil sets the until field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableUntil(t *time.Time) *BouncerUpdateOne { + if t != nil { + buo.SetUntil(*t) + } + return buo +} + +// ClearUntil clears the value of until. +func (buo *BouncerUpdateOne) ClearUntil() *BouncerUpdateOne { + buo.mutation.ClearUntil() + return buo +} + +// SetLastPull sets the last_pull field. +func (buo *BouncerUpdateOne) SetLastPull(t time.Time) *BouncerUpdateOne { + buo.mutation.SetLastPull(t) + return buo +} + +// SetNillableLastPull sets the last_pull field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableLastPull(t *time.Time) *BouncerUpdateOne { + if t != nil { + buo.SetLastPull(*t) + } + return buo +} + +// Mutation returns the BouncerMutation object of the builder. +func (buo *BouncerUpdateOne) Mutation() *BouncerMutation { + return buo.mutation +} + +// Save executes the query and returns the updated entity. +func (buo *BouncerUpdateOne) Save(ctx context.Context) (*Bouncer, error) { + var ( + err error + node *Bouncer + ) + if len(buo.hooks) == 0 { + node, err = buo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BouncerMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + buo.mutation = mutation + node, err = buo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(buo.hooks) - 1; i >= 0; i-- { + mut = buo.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, buo.mutation); err != nil { + return nil, err + } + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (buo *BouncerUpdateOne) SaveX(ctx context.Context) *Bouncer { + node, err := buo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (buo *BouncerUpdateOne) Exec(ctx context.Context) error { + _, err := buo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (buo *BouncerUpdateOne) ExecX(ctx context.Context) { + if err := buo.Exec(ctx); err != nil { + panic(err) + } +} + +func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: bouncer.Table, + Columns: bouncer.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: bouncer.FieldID, + }, + }, + } + id, ok := buo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Bouncer.ID for update")} + } + _spec.Node.ID.Value = id + if value, ok := buo.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldCreatedAt, + }) + } + if value, ok := buo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldUpdatedAt, + }) + } + if value, ok := buo.mutation.Name(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldName, + }) + } + if value, ok := buo.mutation.APIKey(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldAPIKey, + }) + } + if value, ok := buo.mutation.Revoked(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: bouncer.FieldRevoked, + }) + } + if value, ok := buo.mutation.IPAddress(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldIPAddress, + }) + } + if buo.mutation.IPAddressCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: bouncer.FieldIPAddress, + }) + } + if value, ok := buo.mutation.GetType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldType, + }) + } + if buo.mutation.TypeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: bouncer.FieldType, + }) + } + if value, ok := buo.mutation.Version(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldVersion, + }) + } + if buo.mutation.VersionCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: bouncer.FieldVersion, + }) + } + if value, ok := buo.mutation.Until(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldUntil, + }) + } + if buo.mutation.UntilCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: bouncer.FieldUntil, + }) + } + if value, ok := buo.mutation.LastPull(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldLastPull, + }) + } + _node = &Bouncer{config: buo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues() + if err = sqlgraph.UpdateNode(ctx, buo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{bouncer.Label} + } else if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return nil, err + } + return _node, nil +} diff --git a/pkg/database/ent/client.go b/pkg/database/ent/client.go new file mode 100644 index 000000000..87aedaac0 --- /dev/null +++ b/pkg/database/ent/client.go @@ -0,0 +1,809 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "log" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/migrate" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + + "github.com/facebook/ent/dialect" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // Alert is the client for interacting with the Alert builders. + Alert *AlertClient + // Bouncer is the client for interacting with the Bouncer builders. + Bouncer *BouncerClient + // Decision is the client for interacting with the Decision builders. + Decision *DecisionClient + // Event is the client for interacting with the Event builders. + Event *EventClient + // Machine is the client for interacting with the Machine builders. + Machine *MachineClient + // Meta is the client for interacting with the Meta builders. + Meta *MetaClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + cfg := config{log: log.Println, hooks: &hooks{}} + cfg.options(opts...) + client := &Client{config: cfg} + client.init() + return client +} + +func (c *Client) init() { + c.Schema = migrate.NewSchema(c.driver) + c.Alert = NewAlertClient(c.config) + c.Bouncer = NewBouncerClient(c.config) + c.Decision = NewDecisionClient(c.config) + c.Event = NewEventClient(c.config) + c.Machine = NewMachineClient(c.config) + c.Meta = NewMetaClient(c.config) +} + +// Open opens a database/sql.DB specified by the driver name and +// the data source name, and returns a new client attached to it. +// Optional parameters can be added for configuring the client. +func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { + switch driverName { + case dialect.MySQL, dialect.Postgres, dialect.SQLite: + drv, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return NewClient(append(options, Driver(drv))...), nil + default: + return nil, fmt.Errorf("unsupported driver: %q", driverName) + } +} + +// Tx returns a new transactional client. The provided context +// is used until the transaction is committed or rolled back. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, fmt.Errorf("ent: cannot start a transaction within a transaction") + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %v", err) + } + cfg := config{driver: tx, log: c.log, debug: c.debug, hooks: c.hooks} + return &Tx{ + ctx: ctx, + config: cfg, + Alert: NewAlertClient(cfg), + Bouncer: NewBouncerClient(cfg), + Decision: NewDecisionClient(cfg), + Event: NewEventClient(cfg), + Machine: NewMachineClient(cfg), + Meta: NewMetaClient(cfg), + }, nil +} + +// BeginTx returns a transactional client with options. +func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, fmt.Errorf("ent: cannot start a transaction within a transaction") + } + tx, err := c.driver.(*sql.Driver).BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %v", err) + } + cfg := config{driver: &txDriver{tx: tx, drv: c.driver}, log: c.log, debug: c.debug, hooks: c.hooks} + return &Tx{ + config: cfg, + Alert: NewAlertClient(cfg), + Bouncer: NewBouncerClient(cfg), + Decision: NewDecisionClient(cfg), + Event: NewEventClient(cfg), + Machine: NewMachineClient(cfg), + Meta: NewMetaClient(cfg), + }, nil +} + +// Debug returns a new debug-client. It's used to get verbose logging on specific operations. +// +// client.Debug(). +// Alert. +// Query(). +// Count(ctx) +// +func (c *Client) Debug() *Client { + if c.debug { + return c + } + cfg := config{driver: dialect.Debug(c.driver, c.log), log: c.log, debug: true, hooks: c.hooks} + client := &Client{config: cfg} + client.init() + return client +} + +// Close closes the database connection and prevents new queries from starting. +func (c *Client) Close() error { + return c.driver.Close() +} + +// Use adds the mutation hooks to all the entity clients. +// In order to add hooks to a specific client, call: `client.Node.Use(...)`. +func (c *Client) Use(hooks ...Hook) { + c.Alert.Use(hooks...) + c.Bouncer.Use(hooks...) + c.Decision.Use(hooks...) + c.Event.Use(hooks...) + c.Machine.Use(hooks...) + c.Meta.Use(hooks...) +} + +// AlertClient is a client for the Alert schema. +type AlertClient struct { + config +} + +// NewAlertClient returns a client for the Alert from the given config. +func NewAlertClient(c config) *AlertClient { + return &AlertClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `alert.Hooks(f(g(h())))`. +func (c *AlertClient) Use(hooks ...Hook) { + c.hooks.Alert = append(c.hooks.Alert, hooks...) +} + +// Create returns a create builder for Alert. +func (c *AlertClient) Create() *AlertCreate { + mutation := newAlertMutation(c.config, OpCreate) + return &AlertCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// BulkCreate returns a builder for creating a bulk of Alert entities. +func (c *AlertClient) CreateBulk(builders ...*AlertCreate) *AlertCreateBulk { + return &AlertCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Alert. +func (c *AlertClient) Update() *AlertUpdate { + mutation := newAlertMutation(c.config, OpUpdate) + return &AlertUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AlertClient) UpdateOne(a *Alert) *AlertUpdateOne { + mutation := newAlertMutation(c.config, OpUpdateOne, withAlert(a)) + return &AlertUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AlertClient) UpdateOneID(id int) *AlertUpdateOne { + mutation := newAlertMutation(c.config, OpUpdateOne, withAlertID(id)) + return &AlertUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Alert. +func (c *AlertClient) Delete() *AlertDelete { + mutation := newAlertMutation(c.config, OpDelete) + return &AlertDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *AlertClient) DeleteOne(a *Alert) *AlertDeleteOne { + return c.DeleteOneID(a.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *AlertClient) DeleteOneID(id int) *AlertDeleteOne { + builder := c.Delete().Where(alert.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AlertDeleteOne{builder} +} + +// Query returns a query builder for Alert. +func (c *AlertClient) Query() *AlertQuery { + return &AlertQuery{config: c.config} +} + +// Get returns a Alert entity by its id. +func (c *AlertClient) Get(ctx context.Context, id int) (*Alert, error) { + return c.Query().Where(alert.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AlertClient) GetX(ctx context.Context, id int) *Alert { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryOwner queries the owner edge of a Alert. +func (c *AlertClient) QueryOwner(a *Alert) *MachineQuery { + query := &MachineQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := a.ID + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, id), + sqlgraph.To(machine.Table, machine.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, alert.OwnerTable, alert.OwnerColumn), + ) + fromV = sqlgraph.Neighbors(a.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryDecisions queries the decisions edge of a Alert. +func (c *AlertClient) QueryDecisions(a *Alert) *DecisionQuery { + query := &DecisionQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := a.ID + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, id), + sqlgraph.To(decision.Table, decision.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, alert.DecisionsTable, alert.DecisionsColumn), + ) + fromV = sqlgraph.Neighbors(a.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryEvents queries the events edge of a Alert. +func (c *AlertClient) QueryEvents(a *Alert) *EventQuery { + query := &EventQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := a.ID + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, id), + sqlgraph.To(event.Table, event.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, alert.EventsTable, alert.EventsColumn), + ) + fromV = sqlgraph.Neighbors(a.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryMetas queries the metas edge of a Alert. +func (c *AlertClient) QueryMetas(a *Alert) *MetaQuery { + query := &MetaQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := a.ID + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, id), + sqlgraph.To(meta.Table, meta.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, alert.MetasTable, alert.MetasColumn), + ) + fromV = sqlgraph.Neighbors(a.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *AlertClient) Hooks() []Hook { + return c.hooks.Alert +} + +// BouncerClient is a client for the Bouncer schema. +type BouncerClient struct { + config +} + +// NewBouncerClient returns a client for the Bouncer from the given config. +func NewBouncerClient(c config) *BouncerClient { + return &BouncerClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `bouncer.Hooks(f(g(h())))`. +func (c *BouncerClient) Use(hooks ...Hook) { + c.hooks.Bouncer = append(c.hooks.Bouncer, hooks...) +} + +// Create returns a create builder for Bouncer. +func (c *BouncerClient) Create() *BouncerCreate { + mutation := newBouncerMutation(c.config, OpCreate) + return &BouncerCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// BulkCreate returns a builder for creating a bulk of Bouncer entities. +func (c *BouncerClient) CreateBulk(builders ...*BouncerCreate) *BouncerCreateBulk { + return &BouncerCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Bouncer. +func (c *BouncerClient) Update() *BouncerUpdate { + mutation := newBouncerMutation(c.config, OpUpdate) + return &BouncerUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *BouncerClient) UpdateOne(b *Bouncer) *BouncerUpdateOne { + mutation := newBouncerMutation(c.config, OpUpdateOne, withBouncer(b)) + return &BouncerUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *BouncerClient) UpdateOneID(id int) *BouncerUpdateOne { + mutation := newBouncerMutation(c.config, OpUpdateOne, withBouncerID(id)) + return &BouncerUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Bouncer. +func (c *BouncerClient) Delete() *BouncerDelete { + mutation := newBouncerMutation(c.config, OpDelete) + return &BouncerDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *BouncerClient) DeleteOne(b *Bouncer) *BouncerDeleteOne { + return c.DeleteOneID(b.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *BouncerClient) DeleteOneID(id int) *BouncerDeleteOne { + builder := c.Delete().Where(bouncer.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &BouncerDeleteOne{builder} +} + +// Query returns a query builder for Bouncer. +func (c *BouncerClient) Query() *BouncerQuery { + return &BouncerQuery{config: c.config} +} + +// Get returns a Bouncer entity by its id. +func (c *BouncerClient) Get(ctx context.Context, id int) (*Bouncer, error) { + return c.Query().Where(bouncer.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *BouncerClient) GetX(ctx context.Context, id int) *Bouncer { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *BouncerClient) Hooks() []Hook { + return c.hooks.Bouncer +} + +// DecisionClient is a client for the Decision schema. +type DecisionClient struct { + config +} + +// NewDecisionClient returns a client for the Decision from the given config. +func NewDecisionClient(c config) *DecisionClient { + return &DecisionClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `decision.Hooks(f(g(h())))`. +func (c *DecisionClient) Use(hooks ...Hook) { + c.hooks.Decision = append(c.hooks.Decision, hooks...) +} + +// Create returns a create builder for Decision. +func (c *DecisionClient) Create() *DecisionCreate { + mutation := newDecisionMutation(c.config, OpCreate) + return &DecisionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// BulkCreate returns a builder for creating a bulk of Decision entities. +func (c *DecisionClient) CreateBulk(builders ...*DecisionCreate) *DecisionCreateBulk { + return &DecisionCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Decision. +func (c *DecisionClient) Update() *DecisionUpdate { + mutation := newDecisionMutation(c.config, OpUpdate) + return &DecisionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *DecisionClient) UpdateOne(d *Decision) *DecisionUpdateOne { + mutation := newDecisionMutation(c.config, OpUpdateOne, withDecision(d)) + return &DecisionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *DecisionClient) UpdateOneID(id int) *DecisionUpdateOne { + mutation := newDecisionMutation(c.config, OpUpdateOne, withDecisionID(id)) + return &DecisionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Decision. +func (c *DecisionClient) Delete() *DecisionDelete { + mutation := newDecisionMutation(c.config, OpDelete) + return &DecisionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *DecisionClient) DeleteOne(d *Decision) *DecisionDeleteOne { + return c.DeleteOneID(d.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *DecisionClient) DeleteOneID(id int) *DecisionDeleteOne { + builder := c.Delete().Where(decision.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &DecisionDeleteOne{builder} +} + +// Query returns a query builder for Decision. +func (c *DecisionClient) Query() *DecisionQuery { + return &DecisionQuery{config: c.config} +} + +// Get returns a Decision entity by its id. +func (c *DecisionClient) Get(ctx context.Context, id int) (*Decision, error) { + return c.Query().Where(decision.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *DecisionClient) GetX(ctx context.Context, id int) *Decision { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryOwner queries the owner edge of a Decision. +func (c *DecisionClient) QueryOwner(d *Decision) *AlertQuery { + query := &AlertQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := d.ID + step := sqlgraph.NewStep( + sqlgraph.From(decision.Table, decision.FieldID, id), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, decision.OwnerTable, decision.OwnerColumn), + ) + fromV = sqlgraph.Neighbors(d.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *DecisionClient) Hooks() []Hook { + return c.hooks.Decision +} + +// EventClient is a client for the Event schema. +type EventClient struct { + config +} + +// NewEventClient returns a client for the Event from the given config. +func NewEventClient(c config) *EventClient { + return &EventClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `event.Hooks(f(g(h())))`. +func (c *EventClient) Use(hooks ...Hook) { + c.hooks.Event = append(c.hooks.Event, hooks...) +} + +// Create returns a create builder for Event. +func (c *EventClient) Create() *EventCreate { + mutation := newEventMutation(c.config, OpCreate) + return &EventCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// BulkCreate returns a builder for creating a bulk of Event entities. +func (c *EventClient) CreateBulk(builders ...*EventCreate) *EventCreateBulk { + return &EventCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Event. +func (c *EventClient) Update() *EventUpdate { + mutation := newEventMutation(c.config, OpUpdate) + return &EventUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *EventClient) UpdateOne(e *Event) *EventUpdateOne { + mutation := newEventMutation(c.config, OpUpdateOne, withEvent(e)) + return &EventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *EventClient) UpdateOneID(id int) *EventUpdateOne { + mutation := newEventMutation(c.config, OpUpdateOne, withEventID(id)) + return &EventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Event. +func (c *EventClient) Delete() *EventDelete { + mutation := newEventMutation(c.config, OpDelete) + return &EventDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *EventClient) DeleteOne(e *Event) *EventDeleteOne { + return c.DeleteOneID(e.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *EventClient) DeleteOneID(id int) *EventDeleteOne { + builder := c.Delete().Where(event.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &EventDeleteOne{builder} +} + +// Query returns a query builder for Event. +func (c *EventClient) Query() *EventQuery { + return &EventQuery{config: c.config} +} + +// Get returns a Event entity by its id. +func (c *EventClient) Get(ctx context.Context, id int) (*Event, error) { + return c.Query().Where(event.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *EventClient) GetX(ctx context.Context, id int) *Event { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryOwner queries the owner edge of a Event. +func (c *EventClient) QueryOwner(e *Event) *AlertQuery { + query := &AlertQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := e.ID + step := sqlgraph.NewStep( + sqlgraph.From(event.Table, event.FieldID, id), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, event.OwnerTable, event.OwnerColumn), + ) + fromV = sqlgraph.Neighbors(e.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *EventClient) Hooks() []Hook { + return c.hooks.Event +} + +// MachineClient is a client for the Machine schema. +type MachineClient struct { + config +} + +// NewMachineClient returns a client for the Machine from the given config. +func NewMachineClient(c config) *MachineClient { + return &MachineClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `machine.Hooks(f(g(h())))`. +func (c *MachineClient) Use(hooks ...Hook) { + c.hooks.Machine = append(c.hooks.Machine, hooks...) +} + +// Create returns a create builder for Machine. +func (c *MachineClient) Create() *MachineCreate { + mutation := newMachineMutation(c.config, OpCreate) + return &MachineCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// BulkCreate returns a builder for creating a bulk of Machine entities. +func (c *MachineClient) CreateBulk(builders ...*MachineCreate) *MachineCreateBulk { + return &MachineCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Machine. +func (c *MachineClient) Update() *MachineUpdate { + mutation := newMachineMutation(c.config, OpUpdate) + return &MachineUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *MachineClient) UpdateOne(m *Machine) *MachineUpdateOne { + mutation := newMachineMutation(c.config, OpUpdateOne, withMachine(m)) + return &MachineUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *MachineClient) UpdateOneID(id int) *MachineUpdateOne { + mutation := newMachineMutation(c.config, OpUpdateOne, withMachineID(id)) + return &MachineUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Machine. +func (c *MachineClient) Delete() *MachineDelete { + mutation := newMachineMutation(c.config, OpDelete) + return &MachineDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *MachineClient) DeleteOne(m *Machine) *MachineDeleteOne { + return c.DeleteOneID(m.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *MachineClient) DeleteOneID(id int) *MachineDeleteOne { + builder := c.Delete().Where(machine.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &MachineDeleteOne{builder} +} + +// Query returns a query builder for Machine. +func (c *MachineClient) Query() *MachineQuery { + return &MachineQuery{config: c.config} +} + +// Get returns a Machine entity by its id. +func (c *MachineClient) Get(ctx context.Context, id int) (*Machine, error) { + return c.Query().Where(machine.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *MachineClient) GetX(ctx context.Context, id int) *Machine { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAlerts queries the alerts edge of a Machine. +func (c *MachineClient) QueryAlerts(m *Machine) *AlertQuery { + query := &AlertQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := m.ID + step := sqlgraph.NewStep( + sqlgraph.From(machine.Table, machine.FieldID, id), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, machine.AlertsTable, machine.AlertsColumn), + ) + fromV = sqlgraph.Neighbors(m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *MachineClient) Hooks() []Hook { + return c.hooks.Machine +} + +// MetaClient is a client for the Meta schema. +type MetaClient struct { + config +} + +// NewMetaClient returns a client for the Meta from the given config. +func NewMetaClient(c config) *MetaClient { + return &MetaClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `meta.Hooks(f(g(h())))`. +func (c *MetaClient) Use(hooks ...Hook) { + c.hooks.Meta = append(c.hooks.Meta, hooks...) +} + +// Create returns a create builder for Meta. +func (c *MetaClient) Create() *MetaCreate { + mutation := newMetaMutation(c.config, OpCreate) + return &MetaCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// BulkCreate returns a builder for creating a bulk of Meta entities. +func (c *MetaClient) CreateBulk(builders ...*MetaCreate) *MetaCreateBulk { + return &MetaCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Meta. +func (c *MetaClient) Update() *MetaUpdate { + mutation := newMetaMutation(c.config, OpUpdate) + return &MetaUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *MetaClient) UpdateOne(m *Meta) *MetaUpdateOne { + mutation := newMetaMutation(c.config, OpUpdateOne, withMeta(m)) + return &MetaUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *MetaClient) UpdateOneID(id int) *MetaUpdateOne { + mutation := newMetaMutation(c.config, OpUpdateOne, withMetaID(id)) + return &MetaUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Meta. +func (c *MetaClient) Delete() *MetaDelete { + mutation := newMetaMutation(c.config, OpDelete) + return &MetaDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a delete builder for the given entity. +func (c *MetaClient) DeleteOne(m *Meta) *MetaDeleteOne { + return c.DeleteOneID(m.ID) +} + +// DeleteOneID returns a delete builder for the given id. +func (c *MetaClient) DeleteOneID(id int) *MetaDeleteOne { + builder := c.Delete().Where(meta.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &MetaDeleteOne{builder} +} + +// Query returns a query builder for Meta. +func (c *MetaClient) Query() *MetaQuery { + return &MetaQuery{config: c.config} +} + +// Get returns a Meta entity by its id. +func (c *MetaClient) Get(ctx context.Context, id int) (*Meta, error) { + return c.Query().Where(meta.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *MetaClient) GetX(ctx context.Context, id int) *Meta { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryOwner queries the owner edge of a Meta. +func (c *MetaClient) QueryOwner(m *Meta) *AlertQuery { + query := &AlertQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := m.ID + step := sqlgraph.NewStep( + sqlgraph.From(meta.Table, meta.FieldID, id), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, meta.OwnerTable, meta.OwnerColumn), + ) + fromV = sqlgraph.Neighbors(m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *MetaClient) Hooks() []Hook { + return c.hooks.Meta +} diff --git a/pkg/database/ent/config.go b/pkg/database/ent/config.go new file mode 100644 index 000000000..caef01e1e --- /dev/null +++ b/pkg/database/ent/config.go @@ -0,0 +1,64 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "github.com/facebook/ent" + "github.com/facebook/ent/dialect" +) + +// Option function to configure the client. +type Option func(*config) + +// Config is the configuration for the client and its builder. +type config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...interface{}) + // hooks to execute on mutations. + hooks *hooks +} + +// hooks per client, for fast access. +type hooks struct { + Alert []ent.Hook + Bouncer []ent.Hook + Decision []ent.Hook + Event []ent.Hook + Machine []ent.Hook + Meta []ent.Hook +} + +// Options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...interface{})) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} diff --git a/pkg/database/ent/context.go b/pkg/database/ent/context.go new file mode 100644 index 000000000..b432b5961 --- /dev/null +++ b/pkg/database/ent/context.go @@ -0,0 +1,33 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" +) + +type clientCtxKey struct{} + +// FromContext returns the Client stored in a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns the Tx stored in a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Client attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} diff --git a/pkg/database/ent/decision.go b/pkg/database/ent/decision.go new file mode 100644 index 000000000..c2abb2020 --- /dev/null +++ b/pkg/database/ent/decision.go @@ -0,0 +1,236 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/facebook/ent/dialect/sql" +) + +// Decision is the model entity for the Decision schema. +type Decision struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Until holds the value of the "until" field. + Until time.Time `json:"until,omitempty"` + // Scenario holds the value of the "scenario" field. + Scenario string `json:"scenario,omitempty"` + // Type holds the value of the "type" field. + Type string `json:"type,omitempty"` + // StartIP holds the value of the "start_ip" field. + StartIP int64 `json:"start_ip,omitempty"` + // EndIP holds the value of the "end_ip" field. + EndIP int64 `json:"end_ip,omitempty"` + // Scope holds the value of the "scope" field. + Scope string `json:"scope,omitempty"` + // Value holds the value of the "value" field. + Value string `json:"value,omitempty"` + // Origin holds the value of the "origin" field. + Origin string `json:"origin,omitempty"` + // Simulated holds the value of the "simulated" field. + Simulated bool `json:"simulated,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the DecisionQuery when eager-loading is set. + Edges DecisionEdges `json:"edges"` + alert_decisions *int +} + +// DecisionEdges holds the relations/edges for other nodes in the graph. +type DecisionEdges struct { + // Owner holds the value of the owner edge. + Owner *Alert + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// OwnerOrErr returns the Owner value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e DecisionEdges) OwnerOrErr() (*Alert, error) { + if e.loadedTypes[0] { + if e.Owner == nil { + // The edge owner was loaded in eager-loading, + // but was not found. + return nil, &NotFoundError{label: alert.Label} + } + return e.Owner, nil + } + return nil, &NotLoadedError{edge: "owner"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Decision) scanValues() []interface{} { + return []interface{}{ + &sql.NullInt64{}, // id + &sql.NullTime{}, // created_at + &sql.NullTime{}, // updated_at + &sql.NullTime{}, // until + &sql.NullString{}, // scenario + &sql.NullString{}, // type + &sql.NullInt64{}, // start_ip + &sql.NullInt64{}, // end_ip + &sql.NullString{}, // scope + &sql.NullString{}, // value + &sql.NullString{}, // origin + &sql.NullBool{}, // simulated + } +} + +// fkValues returns the types for scanning foreign-keys values from sql.Rows. +func (*Decision) fkValues() []interface{} { + return []interface{}{ + &sql.NullInt64{}, // alert_decisions + } +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Decision fields. +func (d *Decision) assignValues(values ...interface{}) error { + if m, n := len(values), len(decision.Columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + value, ok := values[0].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + d.ID = int(value.Int64) + values = values[1:] + if value, ok := values[0].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[0]) + } else if value.Valid { + d.CreatedAt = value.Time + } + if value, ok := values[1].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[1]) + } else if value.Valid { + d.UpdatedAt = value.Time + } + if value, ok := values[2].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field until", values[2]) + } else if value.Valid { + d.Until = value.Time + } + if value, ok := values[3].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scenario", values[3]) + } else if value.Valid { + d.Scenario = value.String + } + if value, ok := values[4].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field type", values[4]) + } else if value.Valid { + d.Type = value.String + } + if value, ok := values[5].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field start_ip", values[5]) + } else if value.Valid { + d.StartIP = value.Int64 + } + if value, ok := values[6].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field end_ip", values[6]) + } else if value.Valid { + d.EndIP = value.Int64 + } + if value, ok := values[7].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scope", values[7]) + } else if value.Valid { + d.Scope = value.String + } + if value, ok := values[8].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field value", values[8]) + } else if value.Valid { + d.Value = value.String + } + if value, ok := values[9].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field origin", values[9]) + } else if value.Valid { + d.Origin = value.String + } + if value, ok := values[10].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field simulated", values[10]) + } else if value.Valid { + d.Simulated = value.Bool + } + values = values[11:] + if len(values) == len(decision.ForeignKeys) { + if value, ok := values[0].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field alert_decisions", value) + } else if value.Valid { + d.alert_decisions = new(int) + *d.alert_decisions = int(value.Int64) + } + } + return nil +} + +// QueryOwner queries the owner edge of the Decision. +func (d *Decision) QueryOwner() *AlertQuery { + return (&DecisionClient{config: d.config}).QueryOwner(d) +} + +// Update returns a builder for updating this Decision. +// Note that, you need to call Decision.Unwrap() before calling this method, if this Decision +// was returned from a transaction, and the transaction was committed or rolled back. +func (d *Decision) Update() *DecisionUpdateOne { + return (&DecisionClient{config: d.config}).UpdateOne(d) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (d *Decision) Unwrap() *Decision { + tx, ok := d.config.driver.(*txDriver) + if !ok { + panic("ent: Decision is not a transactional entity") + } + d.config.driver = tx.drv + return d +} + +// String implements the fmt.Stringer. +func (d *Decision) String() string { + var builder strings.Builder + builder.WriteString("Decision(") + builder.WriteString(fmt.Sprintf("id=%v", d.ID)) + builder.WriteString(", created_at=") + builder.WriteString(d.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", updated_at=") + builder.WriteString(d.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", until=") + builder.WriteString(d.Until.Format(time.ANSIC)) + builder.WriteString(", scenario=") + builder.WriteString(d.Scenario) + builder.WriteString(", type=") + builder.WriteString(d.Type) + builder.WriteString(", start_ip=") + builder.WriteString(fmt.Sprintf("%v", d.StartIP)) + builder.WriteString(", end_ip=") + builder.WriteString(fmt.Sprintf("%v", d.EndIP)) + builder.WriteString(", scope=") + builder.WriteString(d.Scope) + builder.WriteString(", value=") + builder.WriteString(d.Value) + builder.WriteString(", origin=") + builder.WriteString(d.Origin) + builder.WriteString(", simulated=") + builder.WriteString(fmt.Sprintf("%v", d.Simulated)) + builder.WriteByte(')') + return builder.String() +} + +// Decisions is a parsable slice of Decision. +type Decisions []*Decision + +func (d Decisions) config(cfg config) { + for _i := range d { + d[_i].config = cfg + } +} diff --git a/pkg/database/ent/decision/decision.go b/pkg/database/ent/decision/decision.go new file mode 100644 index 000000000..e48b04dc5 --- /dev/null +++ b/pkg/database/ent/decision/decision.go @@ -0,0 +1,94 @@ +// Code generated by entc, DO NOT EDIT. + +package decision + +import ( + "time" +) + +const ( + // Label holds the string label denoting the decision type in the database. + Label = "decision" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldUntil holds the string denoting the until field in the database. + FieldUntil = "until" + // FieldScenario holds the string denoting the scenario field in the database. + FieldScenario = "scenario" + // FieldType holds the string denoting the type field in the database. + FieldType = "type" + // FieldStartIP holds the string denoting the start_ip field in the database. + FieldStartIP = "start_ip" + // FieldEndIP holds the string denoting the end_ip field in the database. + FieldEndIP = "end_ip" + // FieldScope holds the string denoting the scope field in the database. + FieldScope = "scope" + // FieldValue holds the string denoting the value field in the database. + FieldValue = "value" + // FieldOrigin holds the string denoting the origin field in the database. + FieldOrigin = "origin" + // FieldSimulated holds the string denoting the simulated field in the database. + FieldSimulated = "simulated" + + // EdgeOwner holds the string denoting the owner edge name in mutations. + EdgeOwner = "owner" + + // Table holds the table name of the decision in the database. + Table = "decisions" + // OwnerTable is the table the holds the owner relation/edge. + OwnerTable = "decisions" + // OwnerInverseTable is the table name for the Alert entity. + // It exists in this package in order to avoid circular dependency with the "alert" package. + OwnerInverseTable = "alerts" + // OwnerColumn is the table column denoting the owner relation/edge. + OwnerColumn = "alert_decisions" +) + +// Columns holds all SQL columns for decision fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldUntil, + FieldScenario, + FieldType, + FieldStartIP, + FieldEndIP, + FieldScope, + FieldValue, + FieldOrigin, + FieldSimulated, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the Decision type. +var ForeignKeys = []string{ + "alert_decisions", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the created_at field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the updated_at field. + DefaultUpdatedAt func() time.Time + // DefaultSimulated holds the default value on creation for the simulated field. + DefaultSimulated bool +) diff --git a/pkg/database/ent/decision/where.go b/pkg/database/ent/decision/where.go new file mode 100644 index 000000000..ae53e41c3 --- /dev/null +++ b/pkg/database/ent/decision/where.go @@ -0,0 +1,1208 @@ +// Code generated by entc, DO NOT EDIT. + +package decision + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their identifier. +func ID(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Until applies equality check predicate on the "until" field. It's identical to UntilEQ. +func Until(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUntil), v)) + }) +} + +// Scenario applies equality check predicate on the "scenario" field. It's identical to ScenarioEQ. +func Scenario(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenario), v)) + }) +} + +// Type applies equality check predicate on the "type" field. It's identical to TypeEQ. +func Type(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldType), v)) + }) +} + +// StartIP applies equality check predicate on the "start_ip" field. It's identical to StartIPEQ. +func StartIP(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStartIP), v)) + }) +} + +// EndIP applies equality check predicate on the "end_ip" field. It's identical to EndIPEQ. +func EndIP(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldEndIP), v)) + }) +} + +// Scope applies equality check predicate on the "scope" field. It's identical to ScopeEQ. +func Scope(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScope), v)) + }) +} + +// Value applies equality check predicate on the "value" field. It's identical to ValueEQ. +func Value(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldValue), v)) + }) +} + +// Origin applies equality check predicate on the "origin" field. It's identical to OriginEQ. +func Origin(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldOrigin), v)) + }) +} + +// Simulated applies equality check predicate on the "simulated" field. It's identical to SimulatedEQ. +func Simulated(v bool) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSimulated), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UntilEQ applies the EQ predicate on the "until" field. +func UntilEQ(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUntil), v)) + }) +} + +// UntilNEQ applies the NEQ predicate on the "until" field. +func UntilNEQ(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUntil), v)) + }) +} + +// UntilIn applies the In predicate on the "until" field. +func UntilIn(vs ...time.Time) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldUntil), v...)) + }) +} + +// UntilNotIn applies the NotIn predicate on the "until" field. +func UntilNotIn(vs ...time.Time) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldUntil), v...)) + }) +} + +// UntilGT applies the GT predicate on the "until" field. +func UntilGT(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUntil), v)) + }) +} + +// UntilGTE applies the GTE predicate on the "until" field. +func UntilGTE(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUntil), v)) + }) +} + +// UntilLT applies the LT predicate on the "until" field. +func UntilLT(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUntil), v)) + }) +} + +// UntilLTE applies the LTE predicate on the "until" field. +func UntilLTE(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUntil), v)) + }) +} + +// ScenarioEQ applies the EQ predicate on the "scenario" field. +func ScenarioEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenario), v)) + }) +} + +// ScenarioNEQ applies the NEQ predicate on the "scenario" field. +func ScenarioNEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldScenario), v)) + }) +} + +// ScenarioIn applies the In predicate on the "scenario" field. +func ScenarioIn(vs ...string) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldScenario), v...)) + }) +} + +// ScenarioNotIn applies the NotIn predicate on the "scenario" field. +func ScenarioNotIn(vs ...string) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldScenario), v...)) + }) +} + +// ScenarioGT applies the GT predicate on the "scenario" field. +func ScenarioGT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldScenario), v)) + }) +} + +// ScenarioGTE applies the GTE predicate on the "scenario" field. +func ScenarioGTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldScenario), v)) + }) +} + +// ScenarioLT applies the LT predicate on the "scenario" field. +func ScenarioLT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldScenario), v)) + }) +} + +// ScenarioLTE applies the LTE predicate on the "scenario" field. +func ScenarioLTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldScenario), v)) + }) +} + +// ScenarioContains applies the Contains predicate on the "scenario" field. +func ScenarioContains(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldScenario), v)) + }) +} + +// ScenarioHasPrefix applies the HasPrefix predicate on the "scenario" field. +func ScenarioHasPrefix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldScenario), v)) + }) +} + +// ScenarioHasSuffix applies the HasSuffix predicate on the "scenario" field. +func ScenarioHasSuffix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldScenario), v)) + }) +} + +// ScenarioEqualFold applies the EqualFold predicate on the "scenario" field. +func ScenarioEqualFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldScenario), v)) + }) +} + +// ScenarioContainsFold applies the ContainsFold predicate on the "scenario" field. +func ScenarioContainsFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldScenario), v)) + }) +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldType), v)) + }) +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldType), v)) + }) +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...string) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldType), v...)) + }) +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...string) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldType), v...)) + }) +} + +// TypeGT applies the GT predicate on the "type" field. +func TypeGT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldType), v)) + }) +} + +// TypeGTE applies the GTE predicate on the "type" field. +func TypeGTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldType), v)) + }) +} + +// TypeLT applies the LT predicate on the "type" field. +func TypeLT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldType), v)) + }) +} + +// TypeLTE applies the LTE predicate on the "type" field. +func TypeLTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldType), v)) + }) +} + +// TypeContains applies the Contains predicate on the "type" field. +func TypeContains(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldType), v)) + }) +} + +// TypeHasPrefix applies the HasPrefix predicate on the "type" field. +func TypeHasPrefix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldType), v)) + }) +} + +// TypeHasSuffix applies the HasSuffix predicate on the "type" field. +func TypeHasSuffix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldType), v)) + }) +} + +// TypeEqualFold applies the EqualFold predicate on the "type" field. +func TypeEqualFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldType), v)) + }) +} + +// TypeContainsFold applies the ContainsFold predicate on the "type" field. +func TypeContainsFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldType), v)) + }) +} + +// StartIPEQ applies the EQ predicate on the "start_ip" field. +func StartIPEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStartIP), v)) + }) +} + +// StartIPNEQ applies the NEQ predicate on the "start_ip" field. +func StartIPNEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldStartIP), v)) + }) +} + +// StartIPIn applies the In predicate on the "start_ip" field. +func StartIPIn(vs ...int64) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldStartIP), v...)) + }) +} + +// StartIPNotIn applies the NotIn predicate on the "start_ip" field. +func StartIPNotIn(vs ...int64) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldStartIP), v...)) + }) +} + +// StartIPGT applies the GT predicate on the "start_ip" field. +func StartIPGT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldStartIP), v)) + }) +} + +// StartIPGTE applies the GTE predicate on the "start_ip" field. +func StartIPGTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldStartIP), v)) + }) +} + +// StartIPLT applies the LT predicate on the "start_ip" field. +func StartIPLT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldStartIP), v)) + }) +} + +// StartIPLTE applies the LTE predicate on the "start_ip" field. +func StartIPLTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldStartIP), v)) + }) +} + +// StartIPIsNil applies the IsNil predicate on the "start_ip" field. +func StartIPIsNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldStartIP))) + }) +} + +// StartIPNotNil applies the NotNil predicate on the "start_ip" field. +func StartIPNotNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldStartIP))) + }) +} + +// EndIPEQ applies the EQ predicate on the "end_ip" field. +func EndIPEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldEndIP), v)) + }) +} + +// EndIPNEQ applies the NEQ predicate on the "end_ip" field. +func EndIPNEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldEndIP), v)) + }) +} + +// EndIPIn applies the In predicate on the "end_ip" field. +func EndIPIn(vs ...int64) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldEndIP), v...)) + }) +} + +// EndIPNotIn applies the NotIn predicate on the "end_ip" field. +func EndIPNotIn(vs ...int64) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldEndIP), v...)) + }) +} + +// EndIPGT applies the GT predicate on the "end_ip" field. +func EndIPGT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldEndIP), v)) + }) +} + +// EndIPGTE applies the GTE predicate on the "end_ip" field. +func EndIPGTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldEndIP), v)) + }) +} + +// EndIPLT applies the LT predicate on the "end_ip" field. +func EndIPLT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldEndIP), v)) + }) +} + +// EndIPLTE applies the LTE predicate on the "end_ip" field. +func EndIPLTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldEndIP), v)) + }) +} + +// EndIPIsNil applies the IsNil predicate on the "end_ip" field. +func EndIPIsNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldEndIP))) + }) +} + +// EndIPNotNil applies the NotNil predicate on the "end_ip" field. +func EndIPNotNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldEndIP))) + }) +} + +// ScopeEQ applies the EQ predicate on the "scope" field. +func ScopeEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScope), v)) + }) +} + +// ScopeNEQ applies the NEQ predicate on the "scope" field. +func ScopeNEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldScope), v)) + }) +} + +// ScopeIn applies the In predicate on the "scope" field. +func ScopeIn(vs ...string) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldScope), v...)) + }) +} + +// ScopeNotIn applies the NotIn predicate on the "scope" field. +func ScopeNotIn(vs ...string) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldScope), v...)) + }) +} + +// ScopeGT applies the GT predicate on the "scope" field. +func ScopeGT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldScope), v)) + }) +} + +// ScopeGTE applies the GTE predicate on the "scope" field. +func ScopeGTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldScope), v)) + }) +} + +// ScopeLT applies the LT predicate on the "scope" field. +func ScopeLT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldScope), v)) + }) +} + +// ScopeLTE applies the LTE predicate on the "scope" field. +func ScopeLTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldScope), v)) + }) +} + +// ScopeContains applies the Contains predicate on the "scope" field. +func ScopeContains(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldScope), v)) + }) +} + +// ScopeHasPrefix applies the HasPrefix predicate on the "scope" field. +func ScopeHasPrefix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldScope), v)) + }) +} + +// ScopeHasSuffix applies the HasSuffix predicate on the "scope" field. +func ScopeHasSuffix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldScope), v)) + }) +} + +// ScopeEqualFold applies the EqualFold predicate on the "scope" field. +func ScopeEqualFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldScope), v)) + }) +} + +// ScopeContainsFold applies the ContainsFold predicate on the "scope" field. +func ScopeContainsFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldScope), v)) + }) +} + +// ValueEQ applies the EQ predicate on the "value" field. +func ValueEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldValue), v)) + }) +} + +// ValueNEQ applies the NEQ predicate on the "value" field. +func ValueNEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldValue), v)) + }) +} + +// ValueIn applies the In predicate on the "value" field. +func ValueIn(vs ...string) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldValue), v...)) + }) +} + +// ValueNotIn applies the NotIn predicate on the "value" field. +func ValueNotIn(vs ...string) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldValue), v...)) + }) +} + +// ValueGT applies the GT predicate on the "value" field. +func ValueGT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldValue), v)) + }) +} + +// ValueGTE applies the GTE predicate on the "value" field. +func ValueGTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldValue), v)) + }) +} + +// ValueLT applies the LT predicate on the "value" field. +func ValueLT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldValue), v)) + }) +} + +// ValueLTE applies the LTE predicate on the "value" field. +func ValueLTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldValue), v)) + }) +} + +// ValueContains applies the Contains predicate on the "value" field. +func ValueContains(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldValue), v)) + }) +} + +// ValueHasPrefix applies the HasPrefix predicate on the "value" field. +func ValueHasPrefix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldValue), v)) + }) +} + +// ValueHasSuffix applies the HasSuffix predicate on the "value" field. +func ValueHasSuffix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldValue), v)) + }) +} + +// ValueEqualFold applies the EqualFold predicate on the "value" field. +func ValueEqualFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldValue), v)) + }) +} + +// ValueContainsFold applies the ContainsFold predicate on the "value" field. +func ValueContainsFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldValue), v)) + }) +} + +// OriginEQ applies the EQ predicate on the "origin" field. +func OriginEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldOrigin), v)) + }) +} + +// OriginNEQ applies the NEQ predicate on the "origin" field. +func OriginNEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldOrigin), v)) + }) +} + +// OriginIn applies the In predicate on the "origin" field. +func OriginIn(vs ...string) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldOrigin), v...)) + }) +} + +// OriginNotIn applies the NotIn predicate on the "origin" field. +func OriginNotIn(vs ...string) predicate.Decision { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldOrigin), v...)) + }) +} + +// OriginGT applies the GT predicate on the "origin" field. +func OriginGT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldOrigin), v)) + }) +} + +// OriginGTE applies the GTE predicate on the "origin" field. +func OriginGTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldOrigin), v)) + }) +} + +// OriginLT applies the LT predicate on the "origin" field. +func OriginLT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldOrigin), v)) + }) +} + +// OriginLTE applies the LTE predicate on the "origin" field. +func OriginLTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldOrigin), v)) + }) +} + +// OriginContains applies the Contains predicate on the "origin" field. +func OriginContains(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldOrigin), v)) + }) +} + +// OriginHasPrefix applies the HasPrefix predicate on the "origin" field. +func OriginHasPrefix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldOrigin), v)) + }) +} + +// OriginHasSuffix applies the HasSuffix predicate on the "origin" field. +func OriginHasSuffix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldOrigin), v)) + }) +} + +// OriginEqualFold applies the EqualFold predicate on the "origin" field. +func OriginEqualFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldOrigin), v)) + }) +} + +// OriginContainsFold applies the ContainsFold predicate on the "origin" field. +func OriginContainsFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldOrigin), v)) + }) +} + +// SimulatedEQ applies the EQ predicate on the "simulated" field. +func SimulatedEQ(v bool) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSimulated), v)) + }) +} + +// SimulatedNEQ applies the NEQ predicate on the "simulated" field. +func SimulatedNEQ(v bool) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSimulated), v)) + }) +} + +// HasOwner applies the HasEdge predicate on the "owner" edge. +func HasOwner() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). +func HasOwnerWith(preds ...predicate.Alert) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups list of predicates with the AND operator between them. +func And(predicates ...predicate.Decision) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups list of predicates with the OR operator between them. +func Or(predicates ...predicate.Decision) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Decision) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/pkg/database/ent/decision_create.go b/pkg/database/ent/decision_create.go new file mode 100644 index 000000000..a2e9adbea --- /dev/null +++ b/pkg/database/ent/decision_create.go @@ -0,0 +1,446 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// DecisionCreate is the builder for creating a Decision entity. +type DecisionCreate struct { + config + mutation *DecisionMutation + hooks []Hook +} + +// SetCreatedAt sets the created_at field. +func (dc *DecisionCreate) SetCreatedAt(t time.Time) *DecisionCreate { + dc.mutation.SetCreatedAt(t) + return dc +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (dc *DecisionCreate) SetNillableCreatedAt(t *time.Time) *DecisionCreate { + if t != nil { + dc.SetCreatedAt(*t) + } + return dc +} + +// SetUpdatedAt sets the updated_at field. +func (dc *DecisionCreate) SetUpdatedAt(t time.Time) *DecisionCreate { + dc.mutation.SetUpdatedAt(t) + return dc +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (dc *DecisionCreate) SetNillableUpdatedAt(t *time.Time) *DecisionCreate { + if t != nil { + dc.SetUpdatedAt(*t) + } + return dc +} + +// SetUntil sets the until field. +func (dc *DecisionCreate) SetUntil(t time.Time) *DecisionCreate { + dc.mutation.SetUntil(t) + return dc +} + +// SetScenario sets the scenario field. +func (dc *DecisionCreate) SetScenario(s string) *DecisionCreate { + dc.mutation.SetScenario(s) + return dc +} + +// SetType sets the type field. +func (dc *DecisionCreate) SetType(s string) *DecisionCreate { + dc.mutation.SetType(s) + return dc +} + +// SetStartIP sets the start_ip field. +func (dc *DecisionCreate) SetStartIP(i int64) *DecisionCreate { + dc.mutation.SetStartIP(i) + return dc +} + +// SetNillableStartIP sets the start_ip field if the given value is not nil. +func (dc *DecisionCreate) SetNillableStartIP(i *int64) *DecisionCreate { + if i != nil { + dc.SetStartIP(*i) + } + return dc +} + +// SetEndIP sets the end_ip field. +func (dc *DecisionCreate) SetEndIP(i int64) *DecisionCreate { + dc.mutation.SetEndIP(i) + return dc +} + +// SetNillableEndIP sets the end_ip field if the given value is not nil. +func (dc *DecisionCreate) SetNillableEndIP(i *int64) *DecisionCreate { + if i != nil { + dc.SetEndIP(*i) + } + return dc +} + +// SetScope sets the scope field. +func (dc *DecisionCreate) SetScope(s string) *DecisionCreate { + dc.mutation.SetScope(s) + return dc +} + +// SetValue sets the value field. +func (dc *DecisionCreate) SetValue(s string) *DecisionCreate { + dc.mutation.SetValue(s) + return dc +} + +// SetOrigin sets the origin field. +func (dc *DecisionCreate) SetOrigin(s string) *DecisionCreate { + dc.mutation.SetOrigin(s) + return dc +} + +// SetSimulated sets the simulated field. +func (dc *DecisionCreate) SetSimulated(b bool) *DecisionCreate { + dc.mutation.SetSimulated(b) + return dc +} + +// SetNillableSimulated sets the simulated field if the given value is not nil. +func (dc *DecisionCreate) SetNillableSimulated(b *bool) *DecisionCreate { + if b != nil { + dc.SetSimulated(*b) + } + return dc +} + +// SetOwnerID sets the owner edge to Alert by id. +func (dc *DecisionCreate) SetOwnerID(id int) *DecisionCreate { + dc.mutation.SetOwnerID(id) + return dc +} + +// SetNillableOwnerID sets the owner edge to Alert by id if the given value is not nil. +func (dc *DecisionCreate) SetNillableOwnerID(id *int) *DecisionCreate { + if id != nil { + dc = dc.SetOwnerID(*id) + } + return dc +} + +// SetOwner sets the owner edge to Alert. +func (dc *DecisionCreate) SetOwner(a *Alert) *DecisionCreate { + return dc.SetOwnerID(a.ID) +} + +// Mutation returns the DecisionMutation object of the builder. +func (dc *DecisionCreate) Mutation() *DecisionMutation { + return dc.mutation +} + +// Save creates the Decision in the database. +func (dc *DecisionCreate) Save(ctx context.Context) (*Decision, error) { + var ( + err error + node *Decision + ) + dc.defaults() + if len(dc.hooks) == 0 { + if err = dc.check(); err != nil { + return nil, err + } + node, err = dc.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DecisionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = dc.check(); err != nil { + return nil, err + } + dc.mutation = mutation + node, err = dc.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(dc.hooks) - 1; i >= 0; i-- { + mut = dc.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, dc.mutation); err != nil { + return nil, err + } + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (dc *DecisionCreate) SaveX(ctx context.Context) *Decision { + v, err := dc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// defaults sets the default values of the builder before save. +func (dc *DecisionCreate) defaults() { + if _, ok := dc.mutation.CreatedAt(); !ok { + v := decision.DefaultCreatedAt() + dc.mutation.SetCreatedAt(v) + } + if _, ok := dc.mutation.UpdatedAt(); !ok { + v := decision.DefaultUpdatedAt() + dc.mutation.SetUpdatedAt(v) + } + if _, ok := dc.mutation.Simulated(); !ok { + v := decision.DefaultSimulated + dc.mutation.SetSimulated(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dc *DecisionCreate) check() error { + if _, ok := dc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")} + } + if _, ok := dc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")} + } + if _, ok := dc.mutation.Until(); !ok { + return &ValidationError{Name: "until", err: errors.New("ent: missing required field \"until\"")} + } + if _, ok := dc.mutation.Scenario(); !ok { + return &ValidationError{Name: "scenario", err: errors.New("ent: missing required field \"scenario\"")} + } + if _, ok := dc.mutation.GetType(); !ok { + return &ValidationError{Name: "type", err: errors.New("ent: missing required field \"type\"")} + } + if _, ok := dc.mutation.Scope(); !ok { + return &ValidationError{Name: "scope", err: errors.New("ent: missing required field \"scope\"")} + } + if _, ok := dc.mutation.Value(); !ok { + return &ValidationError{Name: "value", err: errors.New("ent: missing required field \"value\"")} + } + if _, ok := dc.mutation.Origin(); !ok { + return &ValidationError{Name: "origin", err: errors.New("ent: missing required field \"origin\"")} + } + if _, ok := dc.mutation.Simulated(); !ok { + return &ValidationError{Name: "simulated", err: errors.New("ent: missing required field \"simulated\"")} + } + return nil +} + +func (dc *DecisionCreate) sqlSave(ctx context.Context) (*Decision, error) { + _node, _spec := dc.createSpec() + if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil { + if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + return _node, nil +} + +func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) { + var ( + _node = &Decision{config: dc.config} + _spec = &sqlgraph.CreateSpec{ + Table: decision.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + } + ) + if value, ok := dc.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldCreatedAt, + }) + _node.CreatedAt = value + } + if value, ok := dc.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldUpdatedAt, + }) + _node.UpdatedAt = value + } + if value, ok := dc.mutation.Until(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldUntil, + }) + _node.Until = value + } + if value, ok := dc.mutation.Scenario(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldScenario, + }) + _node.Scenario = value + } + if value, ok := dc.mutation.GetType(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldType, + }) + _node.Type = value + } + if value, ok := dc.mutation.StartIP(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartIP, + }) + _node.StartIP = value + } + if value, ok := dc.mutation.EndIP(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndIP, + }) + _node.EndIP = value + } + if value, ok := dc.mutation.Scope(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldScope, + }) + _node.Scope = value + } + if value, ok := dc.mutation.Value(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldValue, + }) + _node.Value = value + } + if value, ok := dc.mutation.Origin(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldOrigin, + }) + _node.Origin = value + } + if value, ok := dc.mutation.Simulated(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: decision.FieldSimulated, + }) + _node.Simulated = value + } + if nodes := dc.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: decision.OwnerTable, + Columns: []string{decision.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// DecisionCreateBulk is the builder for creating a bulk of Decision entities. +type DecisionCreateBulk struct { + config + builders []*DecisionCreate +} + +// Save creates the Decision entities in the database. +func (dcb *DecisionCreateBulk) Save(ctx context.Context) ([]*Decision, error) { + specs := make([]*sqlgraph.CreateSpec, len(dcb.builders)) + nodes := make([]*Decision, len(dcb.builders)) + mutators := make([]Mutator, len(dcb.builders)) + for i := range dcb.builders { + func(i int, root context.Context) { + builder := dcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DecisionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation) + } else { + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, dcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil { + if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + } + } + mutation.done = true + if err != nil { + return nil, err + } + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, dcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX calls Save and panics if Save returns an error. +func (dcb *DecisionCreateBulk) SaveX(ctx context.Context) []*Decision { + v, err := dcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} diff --git a/pkg/database/ent/decision_delete.go b/pkg/database/ent/decision_delete.go new file mode 100644 index 000000000..58bca2292 --- /dev/null +++ b/pkg/database/ent/decision_delete.go @@ -0,0 +1,109 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// DecisionDelete is the builder for deleting a Decision entity. +type DecisionDelete struct { + config + hooks []Hook + mutation *DecisionMutation + predicates []predicate.Decision +} + +// Where adds a new predicate to the delete builder. +func (dd *DecisionDelete) Where(ps ...predicate.Decision) *DecisionDelete { + dd.predicates = append(dd.predicates, ps...) + return dd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (dd *DecisionDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(dd.hooks) == 0 { + affected, err = dd.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DecisionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + dd.mutation = mutation + affected, err = dd.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(dd.hooks) - 1; i >= 0; i-- { + mut = dd.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, dd.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dd *DecisionDelete) ExecX(ctx context.Context) int { + n, err := dd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (dd *DecisionDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: decision.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + if ps := dd.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return sqlgraph.DeleteNodes(ctx, dd.driver, _spec) +} + +// DecisionDeleteOne is the builder for deleting a single Decision entity. +type DecisionDeleteOne struct { + dd *DecisionDelete +} + +// Exec executes the deletion query. +func (ddo *DecisionDeleteOne) Exec(ctx context.Context) error { + n, err := ddo.dd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{decision.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ddo *DecisionDeleteOne) ExecX(ctx context.Context) { + ddo.dd.ExecX(ctx) +} diff --git a/pkg/database/ent/decision_query.go b/pkg/database/ent/decision_query.go new file mode 100644 index 000000000..14b3f28da --- /dev/null +++ b/pkg/database/ent/decision_query.go @@ -0,0 +1,957 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "math" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// DecisionQuery is the builder for querying Decision entities. +type DecisionQuery struct { + config + limit *int + offset *int + order []OrderFunc + unique []string + predicates []predicate.Decision + // eager-loading edges. + withOwner *AlertQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the builder. +func (dq *DecisionQuery) Where(ps ...predicate.Decision) *DecisionQuery { + dq.predicates = append(dq.predicates, ps...) + return dq +} + +// Limit adds a limit step to the query. +func (dq *DecisionQuery) Limit(limit int) *DecisionQuery { + dq.limit = &limit + return dq +} + +// Offset adds an offset step to the query. +func (dq *DecisionQuery) Offset(offset int) *DecisionQuery { + dq.offset = &offset + return dq +} + +// Order adds an order step to the query. +func (dq *DecisionQuery) Order(o ...OrderFunc) *DecisionQuery { + dq.order = append(dq.order, o...) + return dq +} + +// QueryOwner chains the current query on the owner edge. +func (dq *DecisionQuery) QueryOwner() *AlertQuery { + query := &AlertQuery{config: dq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dq.sqlQuery() + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(decision.Table, decision.FieldID, selector), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, decision.OwnerTable, decision.OwnerColumn), + ) + fromU = sqlgraph.SetNeighbors(dq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Decision entity in the query. Returns *NotFoundError when no decision was found. +func (dq *DecisionQuery) First(ctx context.Context) (*Decision, error) { + nodes, err := dq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{decision.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (dq *DecisionQuery) FirstX(ctx context.Context) *Decision { + node, err := dq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Decision id in the query. Returns *NotFoundError when no id was found. +func (dq *DecisionQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = dq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{decision.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (dq *DecisionQuery) FirstXID(ctx context.Context) int { + id, err := dq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only Decision entity in the query, returns an error if not exactly one entity was returned. +func (dq *DecisionQuery) Only(ctx context.Context) (*Decision, error) { + nodes, err := dq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{decision.Label} + default: + return nil, &NotSingularError{decision.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (dq *DecisionQuery) OnlyX(ctx context.Context) *Decision { + node, err := dq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID returns the only Decision id in the query, returns an error if not exactly one id was returned. +func (dq *DecisionQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = dq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{decision.Label} + default: + err = &NotSingularError{decision.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (dq *DecisionQuery) OnlyIDX(ctx context.Context) int { + id, err := dq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Decisions. +func (dq *DecisionQuery) All(ctx context.Context) ([]*Decision, error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + return dq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (dq *DecisionQuery) AllX(ctx context.Context) []*Decision { + nodes, err := dq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Decision ids. +func (dq *DecisionQuery) IDs(ctx context.Context) ([]int, error) { + var ids []int + if err := dq.Select(decision.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (dq *DecisionQuery) IDsX(ctx context.Context) []int { + ids, err := dq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (dq *DecisionQuery) Count(ctx context.Context) (int, error) { + if err := dq.prepareQuery(ctx); err != nil { + return 0, err + } + return dq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (dq *DecisionQuery) CountX(ctx context.Context) int { + count, err := dq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (dq *DecisionQuery) Exist(ctx context.Context) (bool, error) { + if err := dq.prepareQuery(ctx); err != nil { + return false, err + } + return dq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (dq *DecisionQuery) ExistX(ctx context.Context) bool { + exist, err := dq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the query builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (dq *DecisionQuery) Clone() *DecisionQuery { + return &DecisionQuery{ + config: dq.config, + limit: dq.limit, + offset: dq.offset, + order: append([]OrderFunc{}, dq.order...), + unique: append([]string{}, dq.unique...), + predicates: append([]predicate.Decision{}, dq.predicates...), + // clone intermediate query. + sql: dq.sql.Clone(), + path: dq.path, + } +} + +// WithOwner tells the query-builder to eager-loads the nodes that are connected to +// the "owner" edge. The optional arguments used to configure the query builder of the edge. +func (dq *DecisionQuery) WithOwner(opts ...func(*AlertQuery)) *DecisionQuery { + query := &AlertQuery{config: dq.config} + for _, opt := range opts { + opt(query) + } + dq.withOwner = query + return dq +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Decision.Query(). +// GroupBy(decision.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +// +func (dq *DecisionQuery) GroupBy(field string, fields ...string) *DecisionGroupBy { + group := &DecisionGroupBy{config: dq.config} + group.fields = append([]string{field}, fields...) + group.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + return dq.sqlQuery(), nil + } + return group +} + +// Select one or more fields from the given query. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Decision.Query(). +// Select(decision.FieldCreatedAt). +// Scan(ctx, &v) +// +func (dq *DecisionQuery) Select(field string, fields ...string) *DecisionSelect { + selector := &DecisionSelect{config: dq.config} + selector.fields = append([]string{field}, fields...) + selector.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + return dq.sqlQuery(), nil + } + return selector +} + +func (dq *DecisionQuery) prepareQuery(ctx context.Context) error { + if dq.path != nil { + prev, err := dq.path(ctx) + if err != nil { + return err + } + dq.sql = prev + } + return nil +} + +func (dq *DecisionQuery) sqlAll(ctx context.Context) ([]*Decision, error) { + var ( + nodes = []*Decision{} + withFKs = dq.withFKs + _spec = dq.querySpec() + loadedTypes = [1]bool{ + dq.withOwner != nil, + } + ) + if dq.withOwner != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, decision.ForeignKeys...) + } + _spec.ScanValues = func() []interface{} { + node := &Decision{config: dq.config} + nodes = append(nodes, node) + values := node.scanValues() + if withFKs { + values = append(values, node.fkValues()...) + } + return values + } + _spec.Assign = func(values ...interface{}) error { + if len(nodes) == 0 { + return fmt.Errorf("ent: Assign called without calling ScanValues") + } + node := nodes[len(nodes)-1] + node.Edges.loadedTypes = loadedTypes + return node.assignValues(values...) + } + if err := sqlgraph.QueryNodes(ctx, dq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + + if query := dq.withOwner; query != nil { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*Decision) + for i := range nodes { + if fk := nodes[i].alert_decisions; fk != nil { + ids = append(ids, *fk) + nodeids[*fk] = append(nodeids[*fk], nodes[i]) + } + } + query.Where(alert.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return nil, err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return nil, fmt.Errorf(`unexpected foreign-key "alert_decisions" returned %v`, n.ID) + } + for i := range nodes { + nodes[i].Edges.Owner = n + } + } + } + + return nodes, nil +} + +func (dq *DecisionQuery) sqlCount(ctx context.Context) (int, error) { + _spec := dq.querySpec() + return sqlgraph.CountNodes(ctx, dq.driver, _spec) +} + +func (dq *DecisionQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := dq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: decision.Table, + Columns: decision.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + From: dq.sql, + Unique: true, + } + if ps := dq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := dq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := dq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := dq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector, decision.ValidColumn) + } + } + } + return _spec +} + +func (dq *DecisionQuery) sqlQuery() *sql.Selector { + builder := sql.Dialect(dq.driver.Dialect()) + t1 := builder.Table(decision.Table) + selector := builder.Select(t1.Columns(decision.Columns...)...).From(t1) + if dq.sql != nil { + selector = dq.sql + selector.Select(selector.Columns(decision.Columns...)...) + } + for _, p := range dq.predicates { + p(selector) + } + for _, p := range dq.order { + p(selector, decision.ValidColumn) + } + if offset := dq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := dq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// DecisionGroupBy is the builder for group-by Decision entities. +type DecisionGroupBy struct { + config + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (dgb *DecisionGroupBy) Aggregate(fns ...AggregateFunc) *DecisionGroupBy { + dgb.fns = append(dgb.fns, fns...) + return dgb +} + +// Scan applies the group-by query and scan the result into the given value. +func (dgb *DecisionGroupBy) Scan(ctx context.Context, v interface{}) error { + query, err := dgb.path(ctx) + if err != nil { + return err + } + dgb.sql = query + return dgb.sqlScan(ctx, v) +} + +// ScanX is like Scan, but panics if an error occurs. +func (dgb *DecisionGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := dgb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (dgb *DecisionGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(dgb.fields) > 1 { + return nil, errors.New("ent: DecisionGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := dgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (dgb *DecisionGroupBy) StringsX(ctx context.Context) []string { + v, err := dgb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from group-by. It is only allowed when querying group-by with one field. +func (dgb *DecisionGroupBy) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = dgb.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{decision.Label} + default: + err = fmt.Errorf("ent: DecisionGroupBy.Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (dgb *DecisionGroupBy) StringX(ctx context.Context) string { + v, err := dgb.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (dgb *DecisionGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(dgb.fields) > 1 { + return nil, errors.New("ent: DecisionGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := dgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (dgb *DecisionGroupBy) IntsX(ctx context.Context) []int { + v, err := dgb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from group-by. It is only allowed when querying group-by with one field. +func (dgb *DecisionGroupBy) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = dgb.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{decision.Label} + default: + err = fmt.Errorf("ent: DecisionGroupBy.Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (dgb *DecisionGroupBy) IntX(ctx context.Context) int { + v, err := dgb.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (dgb *DecisionGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(dgb.fields) > 1 { + return nil, errors.New("ent: DecisionGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := dgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (dgb *DecisionGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := dgb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from group-by. It is only allowed when querying group-by with one field. +func (dgb *DecisionGroupBy) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = dgb.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{decision.Label} + default: + err = fmt.Errorf("ent: DecisionGroupBy.Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (dgb *DecisionGroupBy) Float64X(ctx context.Context) float64 { + v, err := dgb.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (dgb *DecisionGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(dgb.fields) > 1 { + return nil, errors.New("ent: DecisionGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := dgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (dgb *DecisionGroupBy) BoolsX(ctx context.Context) []bool { + v, err := dgb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from group-by. It is only allowed when querying group-by with one field. +func (dgb *DecisionGroupBy) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = dgb.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{decision.Label} + default: + err = fmt.Errorf("ent: DecisionGroupBy.Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (dgb *DecisionGroupBy) BoolX(ctx context.Context) bool { + v, err := dgb.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +func (dgb *DecisionGroupBy) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range dgb.fields { + if !decision.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := dgb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (dgb *DecisionGroupBy) sqlQuery() *sql.Selector { + selector := dgb.sql + columns := make([]string, 0, len(dgb.fields)+len(dgb.fns)) + columns = append(columns, dgb.fields...) + for _, fn := range dgb.fns { + columns = append(columns, fn(selector, decision.ValidColumn)) + } + return selector.Select(columns...).GroupBy(dgb.fields...) +} + +// DecisionSelect is the builder for select fields of Decision entities. +type DecisionSelect struct { + config + fields []string + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Scan applies the selector query and scan the result into the given value. +func (ds *DecisionSelect) Scan(ctx context.Context, v interface{}) error { + query, err := ds.path(ctx) + if err != nil { + return err + } + ds.sql = query + return ds.sqlScan(ctx, v) +} + +// ScanX is like Scan, but panics if an error occurs. +func (ds *DecisionSelect) ScanX(ctx context.Context, v interface{}) { + if err := ds.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from selector. It is only allowed when selecting one field. +func (ds *DecisionSelect) Strings(ctx context.Context) ([]string, error) { + if len(ds.fields) > 1 { + return nil, errors.New("ent: DecisionSelect.Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := ds.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (ds *DecisionSelect) StringsX(ctx context.Context) []string { + v, err := ds.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from selector. It is only allowed when selecting one field. +func (ds *DecisionSelect) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = ds.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{decision.Label} + default: + err = fmt.Errorf("ent: DecisionSelect.Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (ds *DecisionSelect) StringX(ctx context.Context) string { + v, err := ds.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from selector. It is only allowed when selecting one field. +func (ds *DecisionSelect) Ints(ctx context.Context) ([]int, error) { + if len(ds.fields) > 1 { + return nil, errors.New("ent: DecisionSelect.Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := ds.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (ds *DecisionSelect) IntsX(ctx context.Context) []int { + v, err := ds.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from selector. It is only allowed when selecting one field. +func (ds *DecisionSelect) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = ds.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{decision.Label} + default: + err = fmt.Errorf("ent: DecisionSelect.Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (ds *DecisionSelect) IntX(ctx context.Context) int { + v, err := ds.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from selector. It is only allowed when selecting one field. +func (ds *DecisionSelect) Float64s(ctx context.Context) ([]float64, error) { + if len(ds.fields) > 1 { + return nil, errors.New("ent: DecisionSelect.Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := ds.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (ds *DecisionSelect) Float64sX(ctx context.Context) []float64 { + v, err := ds.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from selector. It is only allowed when selecting one field. +func (ds *DecisionSelect) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = ds.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{decision.Label} + default: + err = fmt.Errorf("ent: DecisionSelect.Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (ds *DecisionSelect) Float64X(ctx context.Context) float64 { + v, err := ds.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from selector. It is only allowed when selecting one field. +func (ds *DecisionSelect) Bools(ctx context.Context) ([]bool, error) { + if len(ds.fields) > 1 { + return nil, errors.New("ent: DecisionSelect.Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := ds.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (ds *DecisionSelect) BoolsX(ctx context.Context) []bool { + v, err := ds.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from selector. It is only allowed when selecting one field. +func (ds *DecisionSelect) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = ds.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{decision.Label} + default: + err = fmt.Errorf("ent: DecisionSelect.Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (ds *DecisionSelect) BoolX(ctx context.Context) bool { + v, err := ds.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +func (ds *DecisionSelect) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range ds.fields { + if !decision.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for selection", f)} + } + } + rows := &sql.Rows{} + query, args := ds.sqlQuery().Query() + if err := ds.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (ds *DecisionSelect) sqlQuery() sql.Querier { + selector := ds.sql + selector.Select(selector.Columns(ds.fields...)...) + return selector +} diff --git a/pkg/database/ent/decision_update.go b/pkg/database/ent/decision_update.go new file mode 100644 index 000000000..4aa681304 --- /dev/null +++ b/pkg/database/ent/decision_update.go @@ -0,0 +1,798 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// DecisionUpdate is the builder for updating Decision entities. +type DecisionUpdate struct { + config + hooks []Hook + mutation *DecisionMutation + predicates []predicate.Decision +} + +// Where adds a new predicate for the builder. +func (du *DecisionUpdate) Where(ps ...predicate.Decision) *DecisionUpdate { + du.predicates = append(du.predicates, ps...) + return du +} + +// SetCreatedAt sets the created_at field. +func (du *DecisionUpdate) SetCreatedAt(t time.Time) *DecisionUpdate { + du.mutation.SetCreatedAt(t) + return du +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (du *DecisionUpdate) SetNillableCreatedAt(t *time.Time) *DecisionUpdate { + if t != nil { + du.SetCreatedAt(*t) + } + return du +} + +// SetUpdatedAt sets the updated_at field. +func (du *DecisionUpdate) SetUpdatedAt(t time.Time) *DecisionUpdate { + du.mutation.SetUpdatedAt(t) + return du +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (du *DecisionUpdate) SetNillableUpdatedAt(t *time.Time) *DecisionUpdate { + if t != nil { + du.SetUpdatedAt(*t) + } + return du +} + +// SetUntil sets the until field. +func (du *DecisionUpdate) SetUntil(t time.Time) *DecisionUpdate { + du.mutation.SetUntil(t) + return du +} + +// SetScenario sets the scenario field. +func (du *DecisionUpdate) SetScenario(s string) *DecisionUpdate { + du.mutation.SetScenario(s) + return du +} + +// SetType sets the type field. +func (du *DecisionUpdate) SetType(s string) *DecisionUpdate { + du.mutation.SetType(s) + return du +} + +// SetStartIP sets the start_ip field. +func (du *DecisionUpdate) SetStartIP(i int64) *DecisionUpdate { + du.mutation.ResetStartIP() + du.mutation.SetStartIP(i) + return du +} + +// SetNillableStartIP sets the start_ip field if the given value is not nil. +func (du *DecisionUpdate) SetNillableStartIP(i *int64) *DecisionUpdate { + if i != nil { + du.SetStartIP(*i) + } + return du +} + +// AddStartIP adds i to start_ip. +func (du *DecisionUpdate) AddStartIP(i int64) *DecisionUpdate { + du.mutation.AddStartIP(i) + return du +} + +// ClearStartIP clears the value of start_ip. +func (du *DecisionUpdate) ClearStartIP() *DecisionUpdate { + du.mutation.ClearStartIP() + return du +} + +// SetEndIP sets the end_ip field. +func (du *DecisionUpdate) SetEndIP(i int64) *DecisionUpdate { + du.mutation.ResetEndIP() + du.mutation.SetEndIP(i) + return du +} + +// SetNillableEndIP sets the end_ip field if the given value is not nil. +func (du *DecisionUpdate) SetNillableEndIP(i *int64) *DecisionUpdate { + if i != nil { + du.SetEndIP(*i) + } + return du +} + +// AddEndIP adds i to end_ip. +func (du *DecisionUpdate) AddEndIP(i int64) *DecisionUpdate { + du.mutation.AddEndIP(i) + return du +} + +// ClearEndIP clears the value of end_ip. +func (du *DecisionUpdate) ClearEndIP() *DecisionUpdate { + du.mutation.ClearEndIP() + return du +} + +// SetScope sets the scope field. +func (du *DecisionUpdate) SetScope(s string) *DecisionUpdate { + du.mutation.SetScope(s) + return du +} + +// SetValue sets the value field. +func (du *DecisionUpdate) SetValue(s string) *DecisionUpdate { + du.mutation.SetValue(s) + return du +} + +// SetOrigin sets the origin field. +func (du *DecisionUpdate) SetOrigin(s string) *DecisionUpdate { + du.mutation.SetOrigin(s) + return du +} + +// SetSimulated sets the simulated field. +func (du *DecisionUpdate) SetSimulated(b bool) *DecisionUpdate { + du.mutation.SetSimulated(b) + return du +} + +// SetNillableSimulated sets the simulated field if the given value is not nil. +func (du *DecisionUpdate) SetNillableSimulated(b *bool) *DecisionUpdate { + if b != nil { + du.SetSimulated(*b) + } + return du +} + +// SetOwnerID sets the owner edge to Alert by id. +func (du *DecisionUpdate) SetOwnerID(id int) *DecisionUpdate { + du.mutation.SetOwnerID(id) + return du +} + +// SetNillableOwnerID sets the owner edge to Alert by id if the given value is not nil. +func (du *DecisionUpdate) SetNillableOwnerID(id *int) *DecisionUpdate { + if id != nil { + du = du.SetOwnerID(*id) + } + return du +} + +// SetOwner sets the owner edge to Alert. +func (du *DecisionUpdate) SetOwner(a *Alert) *DecisionUpdate { + return du.SetOwnerID(a.ID) +} + +// Mutation returns the DecisionMutation object of the builder. +func (du *DecisionUpdate) Mutation() *DecisionMutation { + return du.mutation +} + +// ClearOwner clears the "owner" edge to type Alert. +func (du *DecisionUpdate) ClearOwner() *DecisionUpdate { + du.mutation.ClearOwner() + return du +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (du *DecisionUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(du.hooks) == 0 { + affected, err = du.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DecisionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + du.mutation = mutation + affected, err = du.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(du.hooks) - 1; i >= 0; i-- { + mut = du.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, du.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (du *DecisionUpdate) SaveX(ctx context.Context) int { + affected, err := du.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (du *DecisionUpdate) Exec(ctx context.Context) error { + _, err := du.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (du *DecisionUpdate) ExecX(ctx context.Context) { + if err := du.Exec(ctx); err != nil { + panic(err) + } +} + +func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: decision.Table, + Columns: decision.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + if ps := du.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := du.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldCreatedAt, + }) + } + if value, ok := du.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldUpdatedAt, + }) + } + if value, ok := du.mutation.Until(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldUntil, + }) + } + if value, ok := du.mutation.Scenario(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldScenario, + }) + } + if value, ok := du.mutation.GetType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldType, + }) + } + if value, ok := du.mutation.StartIP(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartIP, + }) + } + if value, ok := du.mutation.AddedStartIP(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartIP, + }) + } + if du.mutation.StartIPCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldStartIP, + }) + } + if value, ok := du.mutation.EndIP(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndIP, + }) + } + if value, ok := du.mutation.AddedEndIP(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndIP, + }) + } + if du.mutation.EndIPCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldEndIP, + }) + } + if value, ok := du.mutation.Scope(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldScope, + }) + } + if value, ok := du.mutation.Value(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldValue, + }) + } + if value, ok := du.mutation.Origin(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldOrigin, + }) + } + if value, ok := du.mutation.Simulated(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: decision.FieldSimulated, + }) + } + if du.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: decision.OwnerTable, + Columns: []string{decision.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: decision.OwnerTable, + Columns: []string{decision.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, du.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{decision.Label} + } else if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return 0, err + } + return n, nil +} + +// DecisionUpdateOne is the builder for updating a single Decision entity. +type DecisionUpdateOne struct { + config + hooks []Hook + mutation *DecisionMutation +} + +// SetCreatedAt sets the created_at field. +func (duo *DecisionUpdateOne) SetCreatedAt(t time.Time) *DecisionUpdateOne { + duo.mutation.SetCreatedAt(t) + return duo +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableCreatedAt(t *time.Time) *DecisionUpdateOne { + if t != nil { + duo.SetCreatedAt(*t) + } + return duo +} + +// SetUpdatedAt sets the updated_at field. +func (duo *DecisionUpdateOne) SetUpdatedAt(t time.Time) *DecisionUpdateOne { + duo.mutation.SetUpdatedAt(t) + return duo +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableUpdatedAt(t *time.Time) *DecisionUpdateOne { + if t != nil { + duo.SetUpdatedAt(*t) + } + return duo +} + +// SetUntil sets the until field. +func (duo *DecisionUpdateOne) SetUntil(t time.Time) *DecisionUpdateOne { + duo.mutation.SetUntil(t) + return duo +} + +// SetScenario sets the scenario field. +func (duo *DecisionUpdateOne) SetScenario(s string) *DecisionUpdateOne { + duo.mutation.SetScenario(s) + return duo +} + +// SetType sets the type field. +func (duo *DecisionUpdateOne) SetType(s string) *DecisionUpdateOne { + duo.mutation.SetType(s) + return duo +} + +// SetStartIP sets the start_ip field. +func (duo *DecisionUpdateOne) SetStartIP(i int64) *DecisionUpdateOne { + duo.mutation.ResetStartIP() + duo.mutation.SetStartIP(i) + return duo +} + +// SetNillableStartIP sets the start_ip field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableStartIP(i *int64) *DecisionUpdateOne { + if i != nil { + duo.SetStartIP(*i) + } + return duo +} + +// AddStartIP adds i to start_ip. +func (duo *DecisionUpdateOne) AddStartIP(i int64) *DecisionUpdateOne { + duo.mutation.AddStartIP(i) + return duo +} + +// ClearStartIP clears the value of start_ip. +func (duo *DecisionUpdateOne) ClearStartIP() *DecisionUpdateOne { + duo.mutation.ClearStartIP() + return duo +} + +// SetEndIP sets the end_ip field. +func (duo *DecisionUpdateOne) SetEndIP(i int64) *DecisionUpdateOne { + duo.mutation.ResetEndIP() + duo.mutation.SetEndIP(i) + return duo +} + +// SetNillableEndIP sets the end_ip field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableEndIP(i *int64) *DecisionUpdateOne { + if i != nil { + duo.SetEndIP(*i) + } + return duo +} + +// AddEndIP adds i to end_ip. +func (duo *DecisionUpdateOne) AddEndIP(i int64) *DecisionUpdateOne { + duo.mutation.AddEndIP(i) + return duo +} + +// ClearEndIP clears the value of end_ip. +func (duo *DecisionUpdateOne) ClearEndIP() *DecisionUpdateOne { + duo.mutation.ClearEndIP() + return duo +} + +// SetScope sets the scope field. +func (duo *DecisionUpdateOne) SetScope(s string) *DecisionUpdateOne { + duo.mutation.SetScope(s) + return duo +} + +// SetValue sets the value field. +func (duo *DecisionUpdateOne) SetValue(s string) *DecisionUpdateOne { + duo.mutation.SetValue(s) + return duo +} + +// SetOrigin sets the origin field. +func (duo *DecisionUpdateOne) SetOrigin(s string) *DecisionUpdateOne { + duo.mutation.SetOrigin(s) + return duo +} + +// SetSimulated sets the simulated field. +func (duo *DecisionUpdateOne) SetSimulated(b bool) *DecisionUpdateOne { + duo.mutation.SetSimulated(b) + return duo +} + +// SetNillableSimulated sets the simulated field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableSimulated(b *bool) *DecisionUpdateOne { + if b != nil { + duo.SetSimulated(*b) + } + return duo +} + +// SetOwnerID sets the owner edge to Alert by id. +func (duo *DecisionUpdateOne) SetOwnerID(id int) *DecisionUpdateOne { + duo.mutation.SetOwnerID(id) + return duo +} + +// SetNillableOwnerID sets the owner edge to Alert by id if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableOwnerID(id *int) *DecisionUpdateOne { + if id != nil { + duo = duo.SetOwnerID(*id) + } + return duo +} + +// SetOwner sets the owner edge to Alert. +func (duo *DecisionUpdateOne) SetOwner(a *Alert) *DecisionUpdateOne { + return duo.SetOwnerID(a.ID) +} + +// Mutation returns the DecisionMutation object of the builder. +func (duo *DecisionUpdateOne) Mutation() *DecisionMutation { + return duo.mutation +} + +// ClearOwner clears the "owner" edge to type Alert. +func (duo *DecisionUpdateOne) ClearOwner() *DecisionUpdateOne { + duo.mutation.ClearOwner() + return duo +} + +// Save executes the query and returns the updated entity. +func (duo *DecisionUpdateOne) Save(ctx context.Context) (*Decision, error) { + var ( + err error + node *Decision + ) + if len(duo.hooks) == 0 { + node, err = duo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DecisionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + duo.mutation = mutation + node, err = duo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(duo.hooks) - 1; i >= 0; i-- { + mut = duo.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, duo.mutation); err != nil { + return nil, err + } + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (duo *DecisionUpdateOne) SaveX(ctx context.Context) *Decision { + node, err := duo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (duo *DecisionUpdateOne) Exec(ctx context.Context) error { + _, err := duo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (duo *DecisionUpdateOne) ExecX(ctx context.Context) { + if err := duo.Exec(ctx); err != nil { + panic(err) + } +} + +func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: decision.Table, + Columns: decision.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + id, ok := duo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Decision.ID for update")} + } + _spec.Node.ID.Value = id + if value, ok := duo.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldCreatedAt, + }) + } + if value, ok := duo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldUpdatedAt, + }) + } + if value, ok := duo.mutation.Until(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldUntil, + }) + } + if value, ok := duo.mutation.Scenario(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldScenario, + }) + } + if value, ok := duo.mutation.GetType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldType, + }) + } + if value, ok := duo.mutation.StartIP(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartIP, + }) + } + if value, ok := duo.mutation.AddedStartIP(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartIP, + }) + } + if duo.mutation.StartIPCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldStartIP, + }) + } + if value, ok := duo.mutation.EndIP(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndIP, + }) + } + if value, ok := duo.mutation.AddedEndIP(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndIP, + }) + } + if duo.mutation.EndIPCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldEndIP, + }) + } + if value, ok := duo.mutation.Scope(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldScope, + }) + } + if value, ok := duo.mutation.Value(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldValue, + }) + } + if value, ok := duo.mutation.Origin(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldOrigin, + }) + } + if value, ok := duo.mutation.Simulated(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: decision.FieldSimulated, + }) + } + if duo.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: decision.OwnerTable, + Columns: []string{decision.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: decision.OwnerTable, + Columns: []string{decision.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Decision{config: duo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues() + if err = sqlgraph.UpdateNode(ctx, duo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{decision.Label} + } else if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return nil, err + } + return _node, nil +} diff --git a/pkg/database/ent/ent.go b/pkg/database/ent/ent.go new file mode 100644 index 000000000..587180b3c --- /dev/null +++ b/pkg/database/ent/ent.go @@ -0,0 +1,270 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "errors" + "fmt" + "strings" + + "github.com/facebook/ent" + "github.com/facebook/ent/dialect" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" +) + +// ent aliases to avoid import conflict in user's code. +type ( + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc +) + +// OrderFunc applies an ordering on the sql selector. +type OrderFunc func(*sql.Selector, func(string) bool) + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) OrderFunc { + return func(s *sql.Selector, check func(string) bool) { + for _, f := range fields { + if check(f) { + s.OrderBy(sql.Asc(f)) + } else { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("invalid field %q for ordering", f)}) + } + } + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) OrderFunc { + return func(s *sql.Selector, check func(string) bool) { + for _, f := range fields { + if check(f) { + s.OrderBy(sql.Desc(f)) + } else { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("invalid field %q for ordering", f)}) + } + } + } +} + +// AggregateFunc applies an aggregation step on the group-by traversal/selector. +type AggregateFunc func(*sql.Selector, func(string) bool) string + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +// +func As(fn AggregateFunc, end string) AggregateFunc { + return func(s *sql.Selector, check func(string) bool) string { + return sql.As(fn(s, check), end) + } +} + +// Count applies the "count" aggregation function on each group. +func Count() AggregateFunc { + return func(s *sql.Selector, _ func(string) bool) string { + return sql.Count("*") + } +} + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) AggregateFunc { + return func(s *sql.Selector, check func(string) bool) string { + if !check(field) { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)}) + return "" + } + return sql.Max(s.C(field)) + } +} + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) AggregateFunc { + return func(s *sql.Selector, check func(string) bool) string { + if !check(field) { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)}) + return "" + } + return sql.Avg(s.C(field)) + } +} + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) AggregateFunc { + return func(s *sql.Selector, check func(string) bool) string { + if !check(field) { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)}) + return "" + } + return sql.Min(s.C(field)) + } +} + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) AggregateFunc { + return func(s *sql.Selector, check func(string) bool) string { + if !check(field) { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)}) + return "" + } + return sql.Sum(s.C(field)) + } +} + +// ValidationError returns when validating a field fails. +type ValidationError struct { + Name string // Field or edge name. + err error +} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + return e.err.Error() +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ValidationError) Unwrap() error { + return e.err +} + +// IsValidationError returns a boolean indicating whether the error is a validaton error. +func IsValidationError(err error) bool { + if err == nil { + return false + } + var e *ValidationError + return errors.As(err, &e) +} + +// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. +type NotFoundError struct { + label string +} + +// Error implements the error interface. +func (e *NotFoundError) Error() string { + return "ent: " + e.label + " not found" +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + if err == nil { + return false + } + var e *NotFoundError + return errors.As(err, &e) +} + +// MaskNotFound masks nor found error. +func MaskNotFound(err error) error { + if IsNotFound(err) { + return nil + } + return err +} + +// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. +type NotSingularError struct { + label string +} + +// Error implements the error interface. +func (e *NotSingularError) Error() string { + return "ent: " + e.label + " not singular" +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + if err == nil { + return false + } + var e *NotSingularError + return errors.As(err, &e) +} + +// NotLoadedError returns when trying to get a node that was not loaded by the query. +type NotLoadedError struct { + edge string +} + +// Error implements the error interface. +func (e *NotLoadedError) Error() string { + return "ent: " + e.edge + " edge was not loaded" +} + +// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. +func IsNotLoaded(err error) bool { + if err == nil { + return false + } + var e *NotLoadedError + return errors.As(err, &e) +} + +// ConstraintError returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or +// field uniqueness. +type ConstraintError struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ConstraintError) Error() string { + return "ent: constraint failed: " + e.msg +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ConstraintError) Unwrap() error { + return e.wrap +} + +// IsConstraintError returns a boolean indicating whether the error is a constraint failure. +func IsConstraintError(err error) bool { + if err == nil { + return false + } + var e *ConstraintError + return errors.As(err, &e) +} + +func isSQLConstraintError(err error) (*ConstraintError, bool) { + var ( + msg = err.Error() + // error format per dialect. + errors = [...]string{ + "Error 1062", // MySQL 1062 error (ER_DUP_ENTRY). + "UNIQUE constraint failed", // SQLite. + "duplicate key value violates unique constraint", // PostgreSQL. + } + ) + if _, ok := err.(*sqlgraph.ConstraintError); ok { + return &ConstraintError{msg, err}, true + } + for i := range errors { + if strings.Contains(msg, errors[i]) { + return &ConstraintError{msg, err}, true + } + } + return nil, false +} + +// rollback calls to tx.Rollback and wraps the given error with the rollback error if occurred. +func rollback(tx dialect.Tx, err error) error { + if rerr := tx.Rollback(); rerr != nil { + err = fmt.Errorf("%s: %v", err.Error(), rerr) + } + if err, ok := isSQLConstraintError(err); ok { + return err + } + return err +} diff --git a/pkg/database/ent/enttest/enttest.go b/pkg/database/ent/enttest/enttest.go new file mode 100644 index 000000000..eaf258a47 --- /dev/null +++ b/pkg/database/ent/enttest/enttest.go @@ -0,0 +1,78 @@ +// Code generated by entc, DO NOT EDIT. + +package enttest + +import ( + "context" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + // required by schema hooks. + _ "github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime" + + "github.com/facebook/ent/dialect/sql/schema" +) + +type ( + // TestingT is the interface that is shared between + // testing.T and testing.B and used by enttest. + TestingT interface { + FailNow() + Error(...interface{}) + } + + // Option configures client creation. + Option func(*options) + + options struct { + opts []ent.Option + migrateOpts []schema.MigrateOption + } +) + +// WithOptions forwards options to client creation. +func WithOptions(opts ...ent.Option) Option { + return func(o *options) { + o.opts = append(o.opts, opts...) + } +} + +// WithMigrateOptions forwards options to auto migration. +func WithMigrateOptions(opts ...schema.MigrateOption) Option { + return func(o *options) { + o.migrateOpts = append(o.migrateOpts, opts...) + } +} + +func newOptions(opts []Option) *options { + o := &options{} + for _, opt := range opts { + opt(o) + } + return o +} + +// Open calls ent.Open and auto-run migration. +func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client { + o := newOptions(opts) + c, err := ent.Open(driverName, dataSourceName, o.opts...) + if err != nil { + t.Error(err) + t.FailNow() + } + if err := c.Schema.Create(context.Background(), o.migrateOpts...); err != nil { + t.Error(err) + t.FailNow() + } + return c +} + +// NewClient calls ent.NewClient and auto-run migration. +func NewClient(t TestingT, opts ...Option) *ent.Client { + o := newOptions(opts) + c := ent.NewClient(o.opts...) + if err := c.Schema.Create(context.Background(), o.migrateOpts...); err != nil { + t.Error(err) + t.FailNow() + } + return c +} diff --git a/pkg/database/ent/event.go b/pkg/database/ent/event.go new file mode 100644 index 000000000..f10f0dfba --- /dev/null +++ b/pkg/database/ent/event.go @@ -0,0 +1,166 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/facebook/ent/dialect/sql" +) + +// Event is the model entity for the Event schema. +type Event struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Time holds the value of the "time" field. + Time time.Time `json:"time,omitempty"` + // Serialized holds the value of the "serialized" field. + Serialized string `json:"serialized,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the EventQuery when eager-loading is set. + Edges EventEdges `json:"edges"` + alert_events *int +} + +// EventEdges holds the relations/edges for other nodes in the graph. +type EventEdges struct { + // Owner holds the value of the owner edge. + Owner *Alert + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// OwnerOrErr returns the Owner value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e EventEdges) OwnerOrErr() (*Alert, error) { + if e.loadedTypes[0] { + if e.Owner == nil { + // The edge owner was loaded in eager-loading, + // but was not found. + return nil, &NotFoundError{label: alert.Label} + } + return e.Owner, nil + } + return nil, &NotLoadedError{edge: "owner"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Event) scanValues() []interface{} { + return []interface{}{ + &sql.NullInt64{}, // id + &sql.NullTime{}, // created_at + &sql.NullTime{}, // updated_at + &sql.NullTime{}, // time + &sql.NullString{}, // serialized + } +} + +// fkValues returns the types for scanning foreign-keys values from sql.Rows. +func (*Event) fkValues() []interface{} { + return []interface{}{ + &sql.NullInt64{}, // alert_events + } +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Event fields. +func (e *Event) assignValues(values ...interface{}) error { + if m, n := len(values), len(event.Columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + value, ok := values[0].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + e.ID = int(value.Int64) + values = values[1:] + if value, ok := values[0].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[0]) + } else if value.Valid { + e.CreatedAt = value.Time + } + if value, ok := values[1].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[1]) + } else if value.Valid { + e.UpdatedAt = value.Time + } + if value, ok := values[2].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field time", values[2]) + } else if value.Valid { + e.Time = value.Time + } + if value, ok := values[3].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field serialized", values[3]) + } else if value.Valid { + e.Serialized = value.String + } + values = values[4:] + if len(values) == len(event.ForeignKeys) { + if value, ok := values[0].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field alert_events", value) + } else if value.Valid { + e.alert_events = new(int) + *e.alert_events = int(value.Int64) + } + } + return nil +} + +// QueryOwner queries the owner edge of the Event. +func (e *Event) QueryOwner() *AlertQuery { + return (&EventClient{config: e.config}).QueryOwner(e) +} + +// Update returns a builder for updating this Event. +// Note that, you need to call Event.Unwrap() before calling this method, if this Event +// was returned from a transaction, and the transaction was committed or rolled back. +func (e *Event) Update() *EventUpdateOne { + return (&EventClient{config: e.config}).UpdateOne(e) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (e *Event) Unwrap() *Event { + tx, ok := e.config.driver.(*txDriver) + if !ok { + panic("ent: Event is not a transactional entity") + } + e.config.driver = tx.drv + return e +} + +// String implements the fmt.Stringer. +func (e *Event) String() string { + var builder strings.Builder + builder.WriteString("Event(") + builder.WriteString(fmt.Sprintf("id=%v", e.ID)) + builder.WriteString(", created_at=") + builder.WriteString(e.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", updated_at=") + builder.WriteString(e.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", time=") + builder.WriteString(e.Time.Format(time.ANSIC)) + builder.WriteString(", serialized=") + builder.WriteString(e.Serialized) + builder.WriteByte(')') + return builder.String() +} + +// Events is a parsable slice of Event. +type Events []*Event + +func (e Events) config(cfg config) { + for _i := range e { + e[_i].config = cfg + } +} diff --git a/pkg/database/ent/event/event.go b/pkg/database/ent/event/event.go new file mode 100644 index 000000000..9ae9d43e8 --- /dev/null +++ b/pkg/database/ent/event/event.go @@ -0,0 +1,73 @@ +// Code generated by entc, DO NOT EDIT. + +package event + +import ( + "time" +) + +const ( + // Label holds the string label denoting the event type in the database. + Label = "event" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldTime holds the string denoting the time field in the database. + FieldTime = "time" + // FieldSerialized holds the string denoting the serialized field in the database. + FieldSerialized = "serialized" + + // EdgeOwner holds the string denoting the owner edge name in mutations. + EdgeOwner = "owner" + + // Table holds the table name of the event in the database. + Table = "events" + // OwnerTable is the table the holds the owner relation/edge. + OwnerTable = "events" + // OwnerInverseTable is the table name for the Alert entity. + // It exists in this package in order to avoid circular dependency with the "alert" package. + OwnerInverseTable = "alerts" + // OwnerColumn is the table column denoting the owner relation/edge. + OwnerColumn = "alert_events" +) + +// Columns holds all SQL columns for event fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldTime, + FieldSerialized, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the Event type. +var ForeignKeys = []string{ + "alert_events", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the created_at field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the updated_at field. + DefaultUpdatedAt func() time.Time + // SerializedValidator is a validator for the "serialized" field. It is called by the builders before save. + SerializedValidator func(string) error +) diff --git a/pkg/database/ent/event/where.go b/pkg/database/ent/event/where.go new file mode 100644 index 000000000..8e057d7a3 --- /dev/null +++ b/pkg/database/ent/event/where.go @@ -0,0 +1,521 @@ +// Code generated by entc, DO NOT EDIT. + +package event + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their identifier. +func ID(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Time applies equality check predicate on the "time" field. It's identical to TimeEQ. +func Time(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldTime), v)) + }) +} + +// Serialized applies equality check predicate on the "serialized" field. It's identical to SerializedEQ. +func Serialized(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSerialized), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Event { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Event { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Event { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Event { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// TimeEQ applies the EQ predicate on the "time" field. +func TimeEQ(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldTime), v)) + }) +} + +// TimeNEQ applies the NEQ predicate on the "time" field. +func TimeNEQ(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldTime), v)) + }) +} + +// TimeIn applies the In predicate on the "time" field. +func TimeIn(vs ...time.Time) predicate.Event { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldTime), v...)) + }) +} + +// TimeNotIn applies the NotIn predicate on the "time" field. +func TimeNotIn(vs ...time.Time) predicate.Event { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldTime), v...)) + }) +} + +// TimeGT applies the GT predicate on the "time" field. +func TimeGT(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldTime), v)) + }) +} + +// TimeGTE applies the GTE predicate on the "time" field. +func TimeGTE(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldTime), v)) + }) +} + +// TimeLT applies the LT predicate on the "time" field. +func TimeLT(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldTime), v)) + }) +} + +// TimeLTE applies the LTE predicate on the "time" field. +func TimeLTE(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldTime), v)) + }) +} + +// SerializedEQ applies the EQ predicate on the "serialized" field. +func SerializedEQ(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSerialized), v)) + }) +} + +// SerializedNEQ applies the NEQ predicate on the "serialized" field. +func SerializedNEQ(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSerialized), v)) + }) +} + +// SerializedIn applies the In predicate on the "serialized" field. +func SerializedIn(vs ...string) predicate.Event { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldSerialized), v...)) + }) +} + +// SerializedNotIn applies the NotIn predicate on the "serialized" field. +func SerializedNotIn(vs ...string) predicate.Event { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldSerialized), v...)) + }) +} + +// SerializedGT applies the GT predicate on the "serialized" field. +func SerializedGT(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSerialized), v)) + }) +} + +// SerializedGTE applies the GTE predicate on the "serialized" field. +func SerializedGTE(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSerialized), v)) + }) +} + +// SerializedLT applies the LT predicate on the "serialized" field. +func SerializedLT(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSerialized), v)) + }) +} + +// SerializedLTE applies the LTE predicate on the "serialized" field. +func SerializedLTE(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSerialized), v)) + }) +} + +// SerializedContains applies the Contains predicate on the "serialized" field. +func SerializedContains(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSerialized), v)) + }) +} + +// SerializedHasPrefix applies the HasPrefix predicate on the "serialized" field. +func SerializedHasPrefix(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSerialized), v)) + }) +} + +// SerializedHasSuffix applies the HasSuffix predicate on the "serialized" field. +func SerializedHasSuffix(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSerialized), v)) + }) +} + +// SerializedEqualFold applies the EqualFold predicate on the "serialized" field. +func SerializedEqualFold(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSerialized), v)) + }) +} + +// SerializedContainsFold applies the ContainsFold predicate on the "serialized" field. +func SerializedContainsFold(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSerialized), v)) + }) +} + +// HasOwner applies the HasEdge predicate on the "owner" edge. +func HasOwner() predicate.Event { + return predicate.Event(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). +func HasOwnerWith(preds ...predicate.Alert) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups list of predicates with the AND operator between them. +func And(predicates ...predicate.Event) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups list of predicates with the OR operator between them. +func Or(predicates ...predicate.Event) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Event) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/pkg/database/ent/event_create.go b/pkg/database/ent/event_create.go new file mode 100644 index 000000000..ca8c8153d --- /dev/null +++ b/pkg/database/ent/event_create.go @@ -0,0 +1,310 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// EventCreate is the builder for creating a Event entity. +type EventCreate struct { + config + mutation *EventMutation + hooks []Hook +} + +// SetCreatedAt sets the created_at field. +func (ec *EventCreate) SetCreatedAt(t time.Time) *EventCreate { + ec.mutation.SetCreatedAt(t) + return ec +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (ec *EventCreate) SetNillableCreatedAt(t *time.Time) *EventCreate { + if t != nil { + ec.SetCreatedAt(*t) + } + return ec +} + +// SetUpdatedAt sets the updated_at field. +func (ec *EventCreate) SetUpdatedAt(t time.Time) *EventCreate { + ec.mutation.SetUpdatedAt(t) + return ec +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (ec *EventCreate) SetNillableUpdatedAt(t *time.Time) *EventCreate { + if t != nil { + ec.SetUpdatedAt(*t) + } + return ec +} + +// SetTime sets the time field. +func (ec *EventCreate) SetTime(t time.Time) *EventCreate { + ec.mutation.SetTime(t) + return ec +} + +// SetSerialized sets the serialized field. +func (ec *EventCreate) SetSerialized(s string) *EventCreate { + ec.mutation.SetSerialized(s) + return ec +} + +// SetOwnerID sets the owner edge to Alert by id. +func (ec *EventCreate) SetOwnerID(id int) *EventCreate { + ec.mutation.SetOwnerID(id) + return ec +} + +// SetNillableOwnerID sets the owner edge to Alert by id if the given value is not nil. +func (ec *EventCreate) SetNillableOwnerID(id *int) *EventCreate { + if id != nil { + ec = ec.SetOwnerID(*id) + } + return ec +} + +// SetOwner sets the owner edge to Alert. +func (ec *EventCreate) SetOwner(a *Alert) *EventCreate { + return ec.SetOwnerID(a.ID) +} + +// Mutation returns the EventMutation object of the builder. +func (ec *EventCreate) Mutation() *EventMutation { + return ec.mutation +} + +// Save creates the Event in the database. +func (ec *EventCreate) Save(ctx context.Context) (*Event, error) { + var ( + err error + node *Event + ) + ec.defaults() + if len(ec.hooks) == 0 { + if err = ec.check(); err != nil { + return nil, err + } + node, err = ec.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*EventMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = ec.check(); err != nil { + return nil, err + } + ec.mutation = mutation + node, err = ec.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(ec.hooks) - 1; i >= 0; i-- { + mut = ec.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, ec.mutation); err != nil { + return nil, err + } + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (ec *EventCreate) SaveX(ctx context.Context) *Event { + v, err := ec.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// defaults sets the default values of the builder before save. +func (ec *EventCreate) defaults() { + if _, ok := ec.mutation.CreatedAt(); !ok { + v := event.DefaultCreatedAt() + ec.mutation.SetCreatedAt(v) + } + if _, ok := ec.mutation.UpdatedAt(); !ok { + v := event.DefaultUpdatedAt() + ec.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ec *EventCreate) check() error { + if _, ok := ec.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")} + } + if _, ok := ec.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")} + } + if _, ok := ec.mutation.Time(); !ok { + return &ValidationError{Name: "time", err: errors.New("ent: missing required field \"time\"")} + } + if _, ok := ec.mutation.Serialized(); !ok { + return &ValidationError{Name: "serialized", err: errors.New("ent: missing required field \"serialized\"")} + } + if v, ok := ec.mutation.Serialized(); ok { + if err := event.SerializedValidator(v); err != nil { + return &ValidationError{Name: "serialized", err: fmt.Errorf("ent: validator failed for field \"serialized\": %w", err)} + } + } + return nil +} + +func (ec *EventCreate) sqlSave(ctx context.Context) (*Event, error) { + _node, _spec := ec.createSpec() + if err := sqlgraph.CreateNode(ctx, ec.driver, _spec); err != nil { + if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + return _node, nil +} + +func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) { + var ( + _node = &Event{config: ec.config} + _spec = &sqlgraph.CreateSpec{ + Table: event.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + } + ) + if value, ok := ec.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldCreatedAt, + }) + _node.CreatedAt = value + } + if value, ok := ec.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldUpdatedAt, + }) + _node.UpdatedAt = value + } + if value, ok := ec.mutation.Time(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldTime, + }) + _node.Time = value + } + if value, ok := ec.mutation.Serialized(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: event.FieldSerialized, + }) + _node.Serialized = value + } + if nodes := ec.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: event.OwnerTable, + Columns: []string{event.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// EventCreateBulk is the builder for creating a bulk of Event entities. +type EventCreateBulk struct { + config + builders []*EventCreate +} + +// Save creates the Event entities in the database. +func (ecb *EventCreateBulk) Save(ctx context.Context) ([]*Event, error) { + specs := make([]*sqlgraph.CreateSpec, len(ecb.builders)) + nodes := make([]*Event, len(ecb.builders)) + mutators := make([]Mutator, len(ecb.builders)) + for i := range ecb.builders { + func(i int, root context.Context) { + builder := ecb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*EventMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ecb.builders[i+1].mutation) + } else { + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ecb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil { + if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + } + } + mutation.done = true + if err != nil { + return nil, err + } + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ecb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX calls Save and panics if Save returns an error. +func (ecb *EventCreateBulk) SaveX(ctx context.Context) []*Event { + v, err := ecb.Save(ctx) + if err != nil { + panic(err) + } + return v +} diff --git a/pkg/database/ent/event_delete.go b/pkg/database/ent/event_delete.go new file mode 100644 index 000000000..71a7753b8 --- /dev/null +++ b/pkg/database/ent/event_delete.go @@ -0,0 +1,109 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// EventDelete is the builder for deleting a Event entity. +type EventDelete struct { + config + hooks []Hook + mutation *EventMutation + predicates []predicate.Event +} + +// Where adds a new predicate to the delete builder. +func (ed *EventDelete) Where(ps ...predicate.Event) *EventDelete { + ed.predicates = append(ed.predicates, ps...) + return ed +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ed *EventDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(ed.hooks) == 0 { + affected, err = ed.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*EventMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + ed.mutation = mutation + affected, err = ed.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(ed.hooks) - 1; i >= 0; i-- { + mut = ed.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, ed.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ed *EventDelete) ExecX(ctx context.Context) int { + n, err := ed.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ed *EventDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: event.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + if ps := ed.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return sqlgraph.DeleteNodes(ctx, ed.driver, _spec) +} + +// EventDeleteOne is the builder for deleting a single Event entity. +type EventDeleteOne struct { + ed *EventDelete +} + +// Exec executes the deletion query. +func (edo *EventDeleteOne) Exec(ctx context.Context) error { + n, err := edo.ed.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{event.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (edo *EventDeleteOne) ExecX(ctx context.Context) { + edo.ed.ExecX(ctx) +} diff --git a/pkg/database/ent/event_query.go b/pkg/database/ent/event_query.go new file mode 100644 index 000000000..002c3f88c --- /dev/null +++ b/pkg/database/ent/event_query.go @@ -0,0 +1,957 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "math" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// EventQuery is the builder for querying Event entities. +type EventQuery struct { + config + limit *int + offset *int + order []OrderFunc + unique []string + predicates []predicate.Event + // eager-loading edges. + withOwner *AlertQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the builder. +func (eq *EventQuery) Where(ps ...predicate.Event) *EventQuery { + eq.predicates = append(eq.predicates, ps...) + return eq +} + +// Limit adds a limit step to the query. +func (eq *EventQuery) Limit(limit int) *EventQuery { + eq.limit = &limit + return eq +} + +// Offset adds an offset step to the query. +func (eq *EventQuery) Offset(offset int) *EventQuery { + eq.offset = &offset + return eq +} + +// Order adds an order step to the query. +func (eq *EventQuery) Order(o ...OrderFunc) *EventQuery { + eq.order = append(eq.order, o...) + return eq +} + +// QueryOwner chains the current query on the owner edge. +func (eq *EventQuery) QueryOwner() *AlertQuery { + query := &AlertQuery{config: eq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := eq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := eq.sqlQuery() + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(event.Table, event.FieldID, selector), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, event.OwnerTable, event.OwnerColumn), + ) + fromU = sqlgraph.SetNeighbors(eq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Event entity in the query. Returns *NotFoundError when no event was found. +func (eq *EventQuery) First(ctx context.Context) (*Event, error) { + nodes, err := eq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{event.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (eq *EventQuery) FirstX(ctx context.Context) *Event { + node, err := eq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Event id in the query. Returns *NotFoundError when no id was found. +func (eq *EventQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = eq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{event.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (eq *EventQuery) FirstXID(ctx context.Context) int { + id, err := eq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only Event entity in the query, returns an error if not exactly one entity was returned. +func (eq *EventQuery) Only(ctx context.Context) (*Event, error) { + nodes, err := eq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{event.Label} + default: + return nil, &NotSingularError{event.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (eq *EventQuery) OnlyX(ctx context.Context) *Event { + node, err := eq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID returns the only Event id in the query, returns an error if not exactly one id was returned. +func (eq *EventQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = eq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{event.Label} + default: + err = &NotSingularError{event.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (eq *EventQuery) OnlyIDX(ctx context.Context) int { + id, err := eq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Events. +func (eq *EventQuery) All(ctx context.Context) ([]*Event, error) { + if err := eq.prepareQuery(ctx); err != nil { + return nil, err + } + return eq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (eq *EventQuery) AllX(ctx context.Context) []*Event { + nodes, err := eq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Event ids. +func (eq *EventQuery) IDs(ctx context.Context) ([]int, error) { + var ids []int + if err := eq.Select(event.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (eq *EventQuery) IDsX(ctx context.Context) []int { + ids, err := eq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (eq *EventQuery) Count(ctx context.Context) (int, error) { + if err := eq.prepareQuery(ctx); err != nil { + return 0, err + } + return eq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (eq *EventQuery) CountX(ctx context.Context) int { + count, err := eq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (eq *EventQuery) Exist(ctx context.Context) (bool, error) { + if err := eq.prepareQuery(ctx); err != nil { + return false, err + } + return eq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (eq *EventQuery) ExistX(ctx context.Context) bool { + exist, err := eq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the query builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (eq *EventQuery) Clone() *EventQuery { + return &EventQuery{ + config: eq.config, + limit: eq.limit, + offset: eq.offset, + order: append([]OrderFunc{}, eq.order...), + unique: append([]string{}, eq.unique...), + predicates: append([]predicate.Event{}, eq.predicates...), + // clone intermediate query. + sql: eq.sql.Clone(), + path: eq.path, + } +} + +// WithOwner tells the query-builder to eager-loads the nodes that are connected to +// the "owner" edge. The optional arguments used to configure the query builder of the edge. +func (eq *EventQuery) WithOwner(opts ...func(*AlertQuery)) *EventQuery { + query := &AlertQuery{config: eq.config} + for _, opt := range opts { + opt(query) + } + eq.withOwner = query + return eq +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Event.Query(). +// GroupBy(event.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +// +func (eq *EventQuery) GroupBy(field string, fields ...string) *EventGroupBy { + group := &EventGroupBy{config: eq.config} + group.fields = append([]string{field}, fields...) + group.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := eq.prepareQuery(ctx); err != nil { + return nil, err + } + return eq.sqlQuery(), nil + } + return group +} + +// Select one or more fields from the given query. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Event.Query(). +// Select(event.FieldCreatedAt). +// Scan(ctx, &v) +// +func (eq *EventQuery) Select(field string, fields ...string) *EventSelect { + selector := &EventSelect{config: eq.config} + selector.fields = append([]string{field}, fields...) + selector.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := eq.prepareQuery(ctx); err != nil { + return nil, err + } + return eq.sqlQuery(), nil + } + return selector +} + +func (eq *EventQuery) prepareQuery(ctx context.Context) error { + if eq.path != nil { + prev, err := eq.path(ctx) + if err != nil { + return err + } + eq.sql = prev + } + return nil +} + +func (eq *EventQuery) sqlAll(ctx context.Context) ([]*Event, error) { + var ( + nodes = []*Event{} + withFKs = eq.withFKs + _spec = eq.querySpec() + loadedTypes = [1]bool{ + eq.withOwner != nil, + } + ) + if eq.withOwner != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, event.ForeignKeys...) + } + _spec.ScanValues = func() []interface{} { + node := &Event{config: eq.config} + nodes = append(nodes, node) + values := node.scanValues() + if withFKs { + values = append(values, node.fkValues()...) + } + return values + } + _spec.Assign = func(values ...interface{}) error { + if len(nodes) == 0 { + return fmt.Errorf("ent: Assign called without calling ScanValues") + } + node := nodes[len(nodes)-1] + node.Edges.loadedTypes = loadedTypes + return node.assignValues(values...) + } + if err := sqlgraph.QueryNodes(ctx, eq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + + if query := eq.withOwner; query != nil { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*Event) + for i := range nodes { + if fk := nodes[i].alert_events; fk != nil { + ids = append(ids, *fk) + nodeids[*fk] = append(nodeids[*fk], nodes[i]) + } + } + query.Where(alert.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return nil, err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return nil, fmt.Errorf(`unexpected foreign-key "alert_events" returned %v`, n.ID) + } + for i := range nodes { + nodes[i].Edges.Owner = n + } + } + } + + return nodes, nil +} + +func (eq *EventQuery) sqlCount(ctx context.Context) (int, error) { + _spec := eq.querySpec() + return sqlgraph.CountNodes(ctx, eq.driver, _spec) +} + +func (eq *EventQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := eq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: event.Table, + Columns: event.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + From: eq.sql, + Unique: true, + } + if ps := eq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := eq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := eq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := eq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector, event.ValidColumn) + } + } + } + return _spec +} + +func (eq *EventQuery) sqlQuery() *sql.Selector { + builder := sql.Dialect(eq.driver.Dialect()) + t1 := builder.Table(event.Table) + selector := builder.Select(t1.Columns(event.Columns...)...).From(t1) + if eq.sql != nil { + selector = eq.sql + selector.Select(selector.Columns(event.Columns...)...) + } + for _, p := range eq.predicates { + p(selector) + } + for _, p := range eq.order { + p(selector, event.ValidColumn) + } + if offset := eq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := eq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// EventGroupBy is the builder for group-by Event entities. +type EventGroupBy struct { + config + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (egb *EventGroupBy) Aggregate(fns ...AggregateFunc) *EventGroupBy { + egb.fns = append(egb.fns, fns...) + return egb +} + +// Scan applies the group-by query and scan the result into the given value. +func (egb *EventGroupBy) Scan(ctx context.Context, v interface{}) error { + query, err := egb.path(ctx) + if err != nil { + return err + } + egb.sql = query + return egb.sqlScan(ctx, v) +} + +// ScanX is like Scan, but panics if an error occurs. +func (egb *EventGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := egb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (egb *EventGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(egb.fields) > 1 { + return nil, errors.New("ent: EventGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := egb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (egb *EventGroupBy) StringsX(ctx context.Context) []string { + v, err := egb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from group-by. It is only allowed when querying group-by with one field. +func (egb *EventGroupBy) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = egb.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{event.Label} + default: + err = fmt.Errorf("ent: EventGroupBy.Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (egb *EventGroupBy) StringX(ctx context.Context) string { + v, err := egb.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (egb *EventGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(egb.fields) > 1 { + return nil, errors.New("ent: EventGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := egb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (egb *EventGroupBy) IntsX(ctx context.Context) []int { + v, err := egb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from group-by. It is only allowed when querying group-by with one field. +func (egb *EventGroupBy) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = egb.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{event.Label} + default: + err = fmt.Errorf("ent: EventGroupBy.Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (egb *EventGroupBy) IntX(ctx context.Context) int { + v, err := egb.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (egb *EventGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(egb.fields) > 1 { + return nil, errors.New("ent: EventGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := egb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (egb *EventGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := egb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from group-by. It is only allowed when querying group-by with one field. +func (egb *EventGroupBy) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = egb.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{event.Label} + default: + err = fmt.Errorf("ent: EventGroupBy.Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (egb *EventGroupBy) Float64X(ctx context.Context) float64 { + v, err := egb.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (egb *EventGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(egb.fields) > 1 { + return nil, errors.New("ent: EventGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := egb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (egb *EventGroupBy) BoolsX(ctx context.Context) []bool { + v, err := egb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from group-by. It is only allowed when querying group-by with one field. +func (egb *EventGroupBy) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = egb.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{event.Label} + default: + err = fmt.Errorf("ent: EventGroupBy.Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (egb *EventGroupBy) BoolX(ctx context.Context) bool { + v, err := egb.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +func (egb *EventGroupBy) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range egb.fields { + if !event.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := egb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := egb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (egb *EventGroupBy) sqlQuery() *sql.Selector { + selector := egb.sql + columns := make([]string, 0, len(egb.fields)+len(egb.fns)) + columns = append(columns, egb.fields...) + for _, fn := range egb.fns { + columns = append(columns, fn(selector, event.ValidColumn)) + } + return selector.Select(columns...).GroupBy(egb.fields...) +} + +// EventSelect is the builder for select fields of Event entities. +type EventSelect struct { + config + fields []string + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Scan applies the selector query and scan the result into the given value. +func (es *EventSelect) Scan(ctx context.Context, v interface{}) error { + query, err := es.path(ctx) + if err != nil { + return err + } + es.sql = query + return es.sqlScan(ctx, v) +} + +// ScanX is like Scan, but panics if an error occurs. +func (es *EventSelect) ScanX(ctx context.Context, v interface{}) { + if err := es.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from selector. It is only allowed when selecting one field. +func (es *EventSelect) Strings(ctx context.Context) ([]string, error) { + if len(es.fields) > 1 { + return nil, errors.New("ent: EventSelect.Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := es.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (es *EventSelect) StringsX(ctx context.Context) []string { + v, err := es.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from selector. It is only allowed when selecting one field. +func (es *EventSelect) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = es.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{event.Label} + default: + err = fmt.Errorf("ent: EventSelect.Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (es *EventSelect) StringX(ctx context.Context) string { + v, err := es.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from selector. It is only allowed when selecting one field. +func (es *EventSelect) Ints(ctx context.Context) ([]int, error) { + if len(es.fields) > 1 { + return nil, errors.New("ent: EventSelect.Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := es.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (es *EventSelect) IntsX(ctx context.Context) []int { + v, err := es.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from selector. It is only allowed when selecting one field. +func (es *EventSelect) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = es.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{event.Label} + default: + err = fmt.Errorf("ent: EventSelect.Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (es *EventSelect) IntX(ctx context.Context) int { + v, err := es.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from selector. It is only allowed when selecting one field. +func (es *EventSelect) Float64s(ctx context.Context) ([]float64, error) { + if len(es.fields) > 1 { + return nil, errors.New("ent: EventSelect.Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := es.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (es *EventSelect) Float64sX(ctx context.Context) []float64 { + v, err := es.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from selector. It is only allowed when selecting one field. +func (es *EventSelect) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = es.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{event.Label} + default: + err = fmt.Errorf("ent: EventSelect.Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (es *EventSelect) Float64X(ctx context.Context) float64 { + v, err := es.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from selector. It is only allowed when selecting one field. +func (es *EventSelect) Bools(ctx context.Context) ([]bool, error) { + if len(es.fields) > 1 { + return nil, errors.New("ent: EventSelect.Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := es.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (es *EventSelect) BoolsX(ctx context.Context) []bool { + v, err := es.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from selector. It is only allowed when selecting one field. +func (es *EventSelect) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = es.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{event.Label} + default: + err = fmt.Errorf("ent: EventSelect.Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (es *EventSelect) BoolX(ctx context.Context) bool { + v, err := es.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +func (es *EventSelect) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range es.fields { + if !event.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for selection", f)} + } + } + rows := &sql.Rows{} + query, args := es.sqlQuery().Query() + if err := es.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (es *EventSelect) sqlQuery() sql.Querier { + selector := es.sql + selector.Select(selector.Columns(es.fields...)...) + return selector +} diff --git a/pkg/database/ent/event_update.go b/pkg/database/ent/event_update.go new file mode 100644 index 000000000..5ebc11678 --- /dev/null +++ b/pkg/database/ent/event_update.go @@ -0,0 +1,496 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// EventUpdate is the builder for updating Event entities. +type EventUpdate struct { + config + hooks []Hook + mutation *EventMutation + predicates []predicate.Event +} + +// Where adds a new predicate for the builder. +func (eu *EventUpdate) Where(ps ...predicate.Event) *EventUpdate { + eu.predicates = append(eu.predicates, ps...) + return eu +} + +// SetCreatedAt sets the created_at field. +func (eu *EventUpdate) SetCreatedAt(t time.Time) *EventUpdate { + eu.mutation.SetCreatedAt(t) + return eu +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (eu *EventUpdate) SetNillableCreatedAt(t *time.Time) *EventUpdate { + if t != nil { + eu.SetCreatedAt(*t) + } + return eu +} + +// SetUpdatedAt sets the updated_at field. +func (eu *EventUpdate) SetUpdatedAt(t time.Time) *EventUpdate { + eu.mutation.SetUpdatedAt(t) + return eu +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (eu *EventUpdate) SetNillableUpdatedAt(t *time.Time) *EventUpdate { + if t != nil { + eu.SetUpdatedAt(*t) + } + return eu +} + +// SetTime sets the time field. +func (eu *EventUpdate) SetTime(t time.Time) *EventUpdate { + eu.mutation.SetTime(t) + return eu +} + +// SetSerialized sets the serialized field. +func (eu *EventUpdate) SetSerialized(s string) *EventUpdate { + eu.mutation.SetSerialized(s) + return eu +} + +// SetOwnerID sets the owner edge to Alert by id. +func (eu *EventUpdate) SetOwnerID(id int) *EventUpdate { + eu.mutation.SetOwnerID(id) + return eu +} + +// SetNillableOwnerID sets the owner edge to Alert by id if the given value is not nil. +func (eu *EventUpdate) SetNillableOwnerID(id *int) *EventUpdate { + if id != nil { + eu = eu.SetOwnerID(*id) + } + return eu +} + +// SetOwner sets the owner edge to Alert. +func (eu *EventUpdate) SetOwner(a *Alert) *EventUpdate { + return eu.SetOwnerID(a.ID) +} + +// Mutation returns the EventMutation object of the builder. +func (eu *EventUpdate) Mutation() *EventMutation { + return eu.mutation +} + +// ClearOwner clears the "owner" edge to type Alert. +func (eu *EventUpdate) ClearOwner() *EventUpdate { + eu.mutation.ClearOwner() + return eu +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (eu *EventUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(eu.hooks) == 0 { + if err = eu.check(); err != nil { + return 0, err + } + affected, err = eu.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*EventMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = eu.check(); err != nil { + return 0, err + } + eu.mutation = mutation + affected, err = eu.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(eu.hooks) - 1; i >= 0; i-- { + mut = eu.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, eu.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (eu *EventUpdate) SaveX(ctx context.Context) int { + affected, err := eu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (eu *EventUpdate) Exec(ctx context.Context) error { + _, err := eu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (eu *EventUpdate) ExecX(ctx context.Context) { + if err := eu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (eu *EventUpdate) check() error { + if v, ok := eu.mutation.Serialized(); ok { + if err := event.SerializedValidator(v); err != nil { + return &ValidationError{Name: "serialized", err: fmt.Errorf("ent: validator failed for field \"serialized\": %w", err)} + } + } + return nil +} + +func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: event.Table, + Columns: event.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + if ps := eu.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := eu.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldCreatedAt, + }) + } + if value, ok := eu.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldUpdatedAt, + }) + } + if value, ok := eu.mutation.Time(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldTime, + }) + } + if value, ok := eu.mutation.Serialized(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: event.FieldSerialized, + }) + } + if eu.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: event.OwnerTable, + Columns: []string{event.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := eu.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: event.OwnerTable, + Columns: []string{event.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, eu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{event.Label} + } else if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return 0, err + } + return n, nil +} + +// EventUpdateOne is the builder for updating a single Event entity. +type EventUpdateOne struct { + config + hooks []Hook + mutation *EventMutation +} + +// SetCreatedAt sets the created_at field. +func (euo *EventUpdateOne) SetCreatedAt(t time.Time) *EventUpdateOne { + euo.mutation.SetCreatedAt(t) + return euo +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (euo *EventUpdateOne) SetNillableCreatedAt(t *time.Time) *EventUpdateOne { + if t != nil { + euo.SetCreatedAt(*t) + } + return euo +} + +// SetUpdatedAt sets the updated_at field. +func (euo *EventUpdateOne) SetUpdatedAt(t time.Time) *EventUpdateOne { + euo.mutation.SetUpdatedAt(t) + return euo +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (euo *EventUpdateOne) SetNillableUpdatedAt(t *time.Time) *EventUpdateOne { + if t != nil { + euo.SetUpdatedAt(*t) + } + return euo +} + +// SetTime sets the time field. +func (euo *EventUpdateOne) SetTime(t time.Time) *EventUpdateOne { + euo.mutation.SetTime(t) + return euo +} + +// SetSerialized sets the serialized field. +func (euo *EventUpdateOne) SetSerialized(s string) *EventUpdateOne { + euo.mutation.SetSerialized(s) + return euo +} + +// SetOwnerID sets the owner edge to Alert by id. +func (euo *EventUpdateOne) SetOwnerID(id int) *EventUpdateOne { + euo.mutation.SetOwnerID(id) + return euo +} + +// SetNillableOwnerID sets the owner edge to Alert by id if the given value is not nil. +func (euo *EventUpdateOne) SetNillableOwnerID(id *int) *EventUpdateOne { + if id != nil { + euo = euo.SetOwnerID(*id) + } + return euo +} + +// SetOwner sets the owner edge to Alert. +func (euo *EventUpdateOne) SetOwner(a *Alert) *EventUpdateOne { + return euo.SetOwnerID(a.ID) +} + +// Mutation returns the EventMutation object of the builder. +func (euo *EventUpdateOne) Mutation() *EventMutation { + return euo.mutation +} + +// ClearOwner clears the "owner" edge to type Alert. +func (euo *EventUpdateOne) ClearOwner() *EventUpdateOne { + euo.mutation.ClearOwner() + return euo +} + +// Save executes the query and returns the updated entity. +func (euo *EventUpdateOne) Save(ctx context.Context) (*Event, error) { + var ( + err error + node *Event + ) + if len(euo.hooks) == 0 { + if err = euo.check(); err != nil { + return nil, err + } + node, err = euo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*EventMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = euo.check(); err != nil { + return nil, err + } + euo.mutation = mutation + node, err = euo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(euo.hooks) - 1; i >= 0; i-- { + mut = euo.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, euo.mutation); err != nil { + return nil, err + } + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (euo *EventUpdateOne) SaveX(ctx context.Context) *Event { + node, err := euo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (euo *EventUpdateOne) Exec(ctx context.Context) error { + _, err := euo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (euo *EventUpdateOne) ExecX(ctx context.Context) { + if err := euo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (euo *EventUpdateOne) check() error { + if v, ok := euo.mutation.Serialized(); ok { + if err := event.SerializedValidator(v); err != nil { + return &ValidationError{Name: "serialized", err: fmt.Errorf("ent: validator failed for field \"serialized\": %w", err)} + } + } + return nil +} + +func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: event.Table, + Columns: event.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + id, ok := euo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Event.ID for update")} + } + _spec.Node.ID.Value = id + if value, ok := euo.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldCreatedAt, + }) + } + if value, ok := euo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldUpdatedAt, + }) + } + if value, ok := euo.mutation.Time(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldTime, + }) + } + if value, ok := euo.mutation.Serialized(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: event.FieldSerialized, + }) + } + if euo.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: event.OwnerTable, + Columns: []string{event.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := euo.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: event.OwnerTable, + Columns: []string{event.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Event{config: euo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues() + if err = sqlgraph.UpdateNode(ctx, euo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{event.Label} + } else if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return nil, err + } + return _node, nil +} diff --git a/pkg/database/ent/hook/hook.go b/pkg/database/ent/hook/hook.go new file mode 100644 index 000000000..964665d4c --- /dev/null +++ b/pkg/database/ent/hook/hook.go @@ -0,0 +1,264 @@ +// Code generated by entc, DO NOT EDIT. + +package hook + +import ( + "context" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" +) + +// The AlertFunc type is an adapter to allow the use of ordinary +// function as Alert mutator. +type AlertFunc func(context.Context, *ent.AlertMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AlertFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.AlertMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AlertMutation", m) + } + return f(ctx, mv) +} + +// The BouncerFunc type is an adapter to allow the use of ordinary +// function as Bouncer mutator. +type BouncerFunc func(context.Context, *ent.BouncerMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f BouncerFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.BouncerMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BouncerMutation", m) + } + return f(ctx, mv) +} + +// The DecisionFunc type is an adapter to allow the use of ordinary +// function as Decision mutator. +type DecisionFunc func(context.Context, *ent.DecisionMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f DecisionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.DecisionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DecisionMutation", m) + } + return f(ctx, mv) +} + +// The EventFunc type is an adapter to allow the use of ordinary +// function as Event mutator. +type EventFunc func(context.Context, *ent.EventMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f EventFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.EventMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EventMutation", m) + } + return f(ctx, mv) +} + +// The MachineFunc type is an adapter to allow the use of ordinary +// function as Machine mutator. +type MachineFunc func(context.Context, *ent.MachineMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f MachineFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.MachineMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MachineMutation", m) + } + return f(ctx, mv) +} + +// The MetaFunc type is an adapter to allow the use of ordinary +// function as Meta mutator. +type MetaFunc func(context.Context, *ent.MetaMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f MetaFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.MetaMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetaMutation", m) + } + return f(ctx, mv) +} + +// Condition is a hook condition function. +type Condition func(context.Context, ent.Mutation) bool + +// And groups conditions with the AND operator. +func And(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if !first(ctx, m) || !second(ctx, m) { + return false + } + for _, cond := range rest { + if !cond(ctx, m) { + return false + } + } + return true + } +} + +// Or groups conditions with the OR operator. +func Or(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if first(ctx, m) || second(ctx, m) { + return true + } + for _, cond := range rest { + if cond(ctx, m) { + return true + } + } + return false + } +} + +// Not negates a given condition. +func Not(cond Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + return !cond(ctx, m) + } +} + +// HasOp is a condition testing mutation operation. +func HasOp(op ent.Op) Condition { + return func(_ context.Context, m ent.Mutation) bool { + return m.Op().Is(op) + } +} + +// HasAddedFields is a condition validating `.AddedField` on fields. +func HasAddedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.AddedField(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.AddedField(field); !exists { + return false + } + } + return true + } +} + +// HasClearedFields is a condition validating `.FieldCleared` on fields. +func HasClearedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if exists := m.FieldCleared(field); !exists { + return false + } + for _, field := range fields { + if exists := m.FieldCleared(field); !exists { + return false + } + } + return true + } +} + +// HasFields is a condition validating `.Field` on fields. +func HasFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.Field(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.Field(field); !exists { + return false + } + } + return true + } +} + +// If executes the given hook under condition. +// +// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) +// +func If(hk ent.Hook, cond Condition) ent.Hook { + return func(next ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if cond(ctx, m) { + return hk(next).Mutate(ctx, m) + } + return next.Mutate(ctx, m) + }) + } +} + +// On executes the given hook only for the given operation. +// +// hook.On(Log, ent.Delete|ent.Create) +// +func On(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, HasOp(op)) +} + +// Unless skips the given hook only for the given operation. +// +// hook.Unless(Log, ent.Update|ent.UpdateOne) +// +func Unless(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, Not(HasOp(op))) +} + +// Reject returns a hook that rejects all operations that match op. +// +// func (T) Hooks() []ent.Hook { +// return []ent.Hook{ +// Reject(ent.Delete|ent.Update), +// } +// } +// +func Reject(op ent.Op) ent.Hook { + hk := func(ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(_ context.Context, m ent.Mutation) (ent.Value, error) { + return nil, fmt.Errorf("%s operation is not allowed", m.Op()) + }) + } + return On(hk, op) +} + +// Chain acts as a list of hooks and is effectively immutable. +// Once created, it will always hold the same set of hooks in the same order. +type Chain struct { + hooks []ent.Hook +} + +// NewChain creates a new chain of hooks. +func NewChain(hooks ...ent.Hook) Chain { + return Chain{append([]ent.Hook(nil), hooks...)} +} + +// Hook chains the list of hooks and returns the final hook. +func (c Chain) Hook() ent.Hook { + return func(mutator ent.Mutator) ent.Mutator { + for i := len(c.hooks) - 1; i >= 0; i-- { + mutator = c.hooks[i](mutator) + } + return mutator + } +} + +// Append extends a chain, adding the specified hook +// as the last ones in the mutation flow. +func (c Chain) Append(hooks ...ent.Hook) Chain { + newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks)) + newHooks = append(newHooks, c.hooks...) + newHooks = append(newHooks, hooks...) + return Chain{newHooks} +} + +// Extend extends a chain, adding the specified chain +// as the last ones in the mutation flow. +func (c Chain) Extend(chain Chain) Chain { + return c.Append(chain.hooks...) +} diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go new file mode 100644 index 000000000..1efb9386f --- /dev/null +++ b/pkg/database/ent/machine.go @@ -0,0 +1,192 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/facebook/ent/dialect/sql" +) + +// Machine is the model entity for the Machine schema. +type Machine struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // MachineId holds the value of the "machineId" field. + MachineId string `json:"machineId,omitempty"` + // Password holds the value of the "password" field. + Password string `json:"-"` + // IpAddress holds the value of the "ipAddress" field. + IpAddress string `json:"ipAddress,omitempty"` + // Scenarios holds the value of the "scenarios" field. + Scenarios string `json:"scenarios,omitempty"` + // Version holds the value of the "version" field. + Version string `json:"version,omitempty"` + // IsValidated holds the value of the "isValidated" field. + IsValidated bool `json:"isValidated,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the MachineQuery when eager-loading is set. + Edges MachineEdges `json:"edges"` +} + +// MachineEdges holds the relations/edges for other nodes in the graph. +type MachineEdges struct { + // Alerts holds the value of the alerts edge. + Alerts []*Alert + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// AlertsOrErr returns the Alerts value or an error if the edge +// was not loaded in eager-loading. +func (e MachineEdges) AlertsOrErr() ([]*Alert, error) { + if e.loadedTypes[0] { + return e.Alerts, nil + } + return nil, &NotLoadedError{edge: "alerts"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Machine) scanValues() []interface{} { + return []interface{}{ + &sql.NullInt64{}, // id + &sql.NullTime{}, // created_at + &sql.NullTime{}, // updated_at + &sql.NullString{}, // machineId + &sql.NullString{}, // password + &sql.NullString{}, // ipAddress + &sql.NullString{}, // scenarios + &sql.NullString{}, // version + &sql.NullBool{}, // isValidated + &sql.NullString{}, // status + } +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Machine fields. +func (m *Machine) assignValues(values ...interface{}) error { + if m, n := len(values), len(machine.Columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + value, ok := values[0].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + m.ID = int(value.Int64) + values = values[1:] + if value, ok := values[0].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[0]) + } else if value.Valid { + m.CreatedAt = value.Time + } + if value, ok := values[1].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[1]) + } else if value.Valid { + m.UpdatedAt = value.Time + } + if value, ok := values[2].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field machineId", values[2]) + } else if value.Valid { + m.MachineId = value.String + } + if value, ok := values[3].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field password", values[3]) + } else if value.Valid { + m.Password = value.String + } + if value, ok := values[4].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field ipAddress", values[4]) + } else if value.Valid { + m.IpAddress = value.String + } + if value, ok := values[5].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scenarios", values[5]) + } else if value.Valid { + m.Scenarios = value.String + } + if value, ok := values[6].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field version", values[6]) + } else if value.Valid { + m.Version = value.String + } + if value, ok := values[7].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field isValidated", values[7]) + } else if value.Valid { + m.IsValidated = value.Bool + } + if value, ok := values[8].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[8]) + } else if value.Valid { + m.Status = value.String + } + return nil +} + +// QueryAlerts queries the alerts edge of the Machine. +func (m *Machine) QueryAlerts() *AlertQuery { + return (&MachineClient{config: m.config}).QueryAlerts(m) +} + +// Update returns a builder for updating this Machine. +// Note that, you need to call Machine.Unwrap() before calling this method, if this Machine +// was returned from a transaction, and the transaction was committed or rolled back. +func (m *Machine) Update() *MachineUpdateOne { + return (&MachineClient{config: m.config}).UpdateOne(m) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (m *Machine) Unwrap() *Machine { + tx, ok := m.config.driver.(*txDriver) + if !ok { + panic("ent: Machine is not a transactional entity") + } + m.config.driver = tx.drv + return m +} + +// String implements the fmt.Stringer. +func (m *Machine) String() string { + var builder strings.Builder + builder.WriteString("Machine(") + builder.WriteString(fmt.Sprintf("id=%v", m.ID)) + builder.WriteString(", created_at=") + builder.WriteString(m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", updated_at=") + builder.WriteString(m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", machineId=") + builder.WriteString(m.MachineId) + builder.WriteString(", password=") + builder.WriteString(", ipAddress=") + builder.WriteString(m.IpAddress) + builder.WriteString(", scenarios=") + builder.WriteString(m.Scenarios) + builder.WriteString(", version=") + builder.WriteString(m.Version) + builder.WriteString(", isValidated=") + builder.WriteString(fmt.Sprintf("%v", m.IsValidated)) + builder.WriteString(", status=") + builder.WriteString(m.Status) + builder.WriteByte(')') + return builder.String() +} + +// Machines is a parsable slice of Machine. +type Machines []*Machine + +func (m Machines) config(cfg config) { + for _i := range m { + m[_i].config = cfg + } +} diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go new file mode 100644 index 000000000..472df83cc --- /dev/null +++ b/pkg/database/ent/machine/machine.go @@ -0,0 +1,80 @@ +// Code generated by entc, DO NOT EDIT. + +package machine + +import ( + "time" +) + +const ( + // Label holds the string label denoting the machine type in the database. + Label = "machine" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldMachineId holds the string denoting the machineid field in the database. + FieldMachineId = "machine_id" + // FieldPassword holds the string denoting the password field in the database. + FieldPassword = "password" + // FieldIpAddress holds the string denoting the ipaddress field in the database. + FieldIpAddress = "ip_address" + // FieldScenarios holds the string denoting the scenarios field in the database. + FieldScenarios = "scenarios" + // FieldVersion holds the string denoting the version field in the database. + FieldVersion = "version" + // FieldIsValidated holds the string denoting the isvalidated field in the database. + FieldIsValidated = "is_validated" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + + // EdgeAlerts holds the string denoting the alerts edge name in mutations. + EdgeAlerts = "alerts" + + // Table holds the table name of the machine in the database. + Table = "machines" + // AlertsTable is the table the holds the alerts relation/edge. + AlertsTable = "alerts" + // AlertsInverseTable is the table name for the Alert entity. + // It exists in this package in order to avoid circular dependency with the "alert" package. + AlertsInverseTable = "alerts" + // AlertsColumn is the table column denoting the alerts relation/edge. + AlertsColumn = "machine_alerts" +) + +// Columns holds all SQL columns for machine fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldMachineId, + FieldPassword, + FieldIpAddress, + FieldScenarios, + FieldVersion, + FieldIsValidated, + FieldStatus, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the created_at field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the updated_at field. + DefaultUpdatedAt func() time.Time + // ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save. + ScenariosValidator func(string) error + // DefaultIsValidated holds the default value on creation for the isValidated field. + DefaultIsValidated bool +) diff --git a/pkg/database/ent/machine/where.go b/pkg/database/ent/machine/where.go new file mode 100644 index 000000000..e4533e9f0 --- /dev/null +++ b/pkg/database/ent/machine/where.go @@ -0,0 +1,1091 @@ +// Code generated by entc, DO NOT EDIT. + +package machine + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their identifier. +func ID(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// MachineId applies equality check predicate on the "machineId" field. It's identical to MachineIdEQ. +func MachineId(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldMachineId), v)) + }) +} + +// Password applies equality check predicate on the "password" field. It's identical to PasswordEQ. +func Password(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldPassword), v)) + }) +} + +// IpAddress applies equality check predicate on the "ipAddress" field. It's identical to IpAddressEQ. +func IpAddress(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIpAddress), v)) + }) +} + +// Scenarios applies equality check predicate on the "scenarios" field. It's identical to ScenariosEQ. +func Scenarios(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenarios), v)) + }) +} + +// Version applies equality check predicate on the "version" field. It's identical to VersionEQ. +func Version(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldVersion), v)) + }) +} + +// IsValidated applies equality check predicate on the "isValidated" field. It's identical to IsValidatedEQ. +func IsValidated(v bool) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIsValidated), v)) + }) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStatus), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// MachineIdEQ applies the EQ predicate on the "machineId" field. +func MachineIdEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldMachineId), v)) + }) +} + +// MachineIdNEQ applies the NEQ predicate on the "machineId" field. +func MachineIdNEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldMachineId), v)) + }) +} + +// MachineIdIn applies the In predicate on the "machineId" field. +func MachineIdIn(vs ...string) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldMachineId), v...)) + }) +} + +// MachineIdNotIn applies the NotIn predicate on the "machineId" field. +func MachineIdNotIn(vs ...string) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldMachineId), v...)) + }) +} + +// MachineIdGT applies the GT predicate on the "machineId" field. +func MachineIdGT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldMachineId), v)) + }) +} + +// MachineIdGTE applies the GTE predicate on the "machineId" field. +func MachineIdGTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldMachineId), v)) + }) +} + +// MachineIdLT applies the LT predicate on the "machineId" field. +func MachineIdLT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldMachineId), v)) + }) +} + +// MachineIdLTE applies the LTE predicate on the "machineId" field. +func MachineIdLTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldMachineId), v)) + }) +} + +// MachineIdContains applies the Contains predicate on the "machineId" field. +func MachineIdContains(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldMachineId), v)) + }) +} + +// MachineIdHasPrefix applies the HasPrefix predicate on the "machineId" field. +func MachineIdHasPrefix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldMachineId), v)) + }) +} + +// MachineIdHasSuffix applies the HasSuffix predicate on the "machineId" field. +func MachineIdHasSuffix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldMachineId), v)) + }) +} + +// MachineIdEqualFold applies the EqualFold predicate on the "machineId" field. +func MachineIdEqualFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldMachineId), v)) + }) +} + +// MachineIdContainsFold applies the ContainsFold predicate on the "machineId" field. +func MachineIdContainsFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldMachineId), v)) + }) +} + +// PasswordEQ applies the EQ predicate on the "password" field. +func PasswordEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldPassword), v)) + }) +} + +// PasswordNEQ applies the NEQ predicate on the "password" field. +func PasswordNEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldPassword), v)) + }) +} + +// PasswordIn applies the In predicate on the "password" field. +func PasswordIn(vs ...string) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldPassword), v...)) + }) +} + +// PasswordNotIn applies the NotIn predicate on the "password" field. +func PasswordNotIn(vs ...string) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldPassword), v...)) + }) +} + +// PasswordGT applies the GT predicate on the "password" field. +func PasswordGT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldPassword), v)) + }) +} + +// PasswordGTE applies the GTE predicate on the "password" field. +func PasswordGTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldPassword), v)) + }) +} + +// PasswordLT applies the LT predicate on the "password" field. +func PasswordLT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldPassword), v)) + }) +} + +// PasswordLTE applies the LTE predicate on the "password" field. +func PasswordLTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldPassword), v)) + }) +} + +// PasswordContains applies the Contains predicate on the "password" field. +func PasswordContains(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldPassword), v)) + }) +} + +// PasswordHasPrefix applies the HasPrefix predicate on the "password" field. +func PasswordHasPrefix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldPassword), v)) + }) +} + +// PasswordHasSuffix applies the HasSuffix predicate on the "password" field. +func PasswordHasSuffix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldPassword), v)) + }) +} + +// PasswordEqualFold applies the EqualFold predicate on the "password" field. +func PasswordEqualFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldPassword), v)) + }) +} + +// PasswordContainsFold applies the ContainsFold predicate on the "password" field. +func PasswordContainsFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldPassword), v)) + }) +} + +// IpAddressEQ applies the EQ predicate on the "ipAddress" field. +func IpAddressEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressNEQ applies the NEQ predicate on the "ipAddress" field. +func IpAddressNEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressIn applies the In predicate on the "ipAddress" field. +func IpAddressIn(vs ...string) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldIpAddress), v...)) + }) +} + +// IpAddressNotIn applies the NotIn predicate on the "ipAddress" field. +func IpAddressNotIn(vs ...string) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldIpAddress), v...)) + }) +} + +// IpAddressGT applies the GT predicate on the "ipAddress" field. +func IpAddressGT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressGTE applies the GTE predicate on the "ipAddress" field. +func IpAddressGTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressLT applies the LT predicate on the "ipAddress" field. +func IpAddressLT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressLTE applies the LTE predicate on the "ipAddress" field. +func IpAddressLTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressContains applies the Contains predicate on the "ipAddress" field. +func IpAddressContains(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressHasPrefix applies the HasPrefix predicate on the "ipAddress" field. +func IpAddressHasPrefix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressHasSuffix applies the HasSuffix predicate on the "ipAddress" field. +func IpAddressHasSuffix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressEqualFold applies the EqualFold predicate on the "ipAddress" field. +func IpAddressEqualFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressContainsFold applies the ContainsFold predicate on the "ipAddress" field. +func IpAddressContainsFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldIpAddress), v)) + }) +} + +// ScenariosEQ applies the EQ predicate on the "scenarios" field. +func ScenariosEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenarios), v)) + }) +} + +// ScenariosNEQ applies the NEQ predicate on the "scenarios" field. +func ScenariosNEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldScenarios), v)) + }) +} + +// ScenariosIn applies the In predicate on the "scenarios" field. +func ScenariosIn(vs ...string) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldScenarios), v...)) + }) +} + +// ScenariosNotIn applies the NotIn predicate on the "scenarios" field. +func ScenariosNotIn(vs ...string) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldScenarios), v...)) + }) +} + +// ScenariosGT applies the GT predicate on the "scenarios" field. +func ScenariosGT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldScenarios), v)) + }) +} + +// ScenariosGTE applies the GTE predicate on the "scenarios" field. +func ScenariosGTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldScenarios), v)) + }) +} + +// ScenariosLT applies the LT predicate on the "scenarios" field. +func ScenariosLT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldScenarios), v)) + }) +} + +// ScenariosLTE applies the LTE predicate on the "scenarios" field. +func ScenariosLTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldScenarios), v)) + }) +} + +// ScenariosContains applies the Contains predicate on the "scenarios" field. +func ScenariosContains(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldScenarios), v)) + }) +} + +// ScenariosHasPrefix applies the HasPrefix predicate on the "scenarios" field. +func ScenariosHasPrefix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldScenarios), v)) + }) +} + +// ScenariosHasSuffix applies the HasSuffix predicate on the "scenarios" field. +func ScenariosHasSuffix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldScenarios), v)) + }) +} + +// ScenariosIsNil applies the IsNil predicate on the "scenarios" field. +func ScenariosIsNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldScenarios))) + }) +} + +// ScenariosNotNil applies the NotNil predicate on the "scenarios" field. +func ScenariosNotNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldScenarios))) + }) +} + +// ScenariosEqualFold applies the EqualFold predicate on the "scenarios" field. +func ScenariosEqualFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldScenarios), v)) + }) +} + +// ScenariosContainsFold applies the ContainsFold predicate on the "scenarios" field. +func ScenariosContainsFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldScenarios), v)) + }) +} + +// VersionEQ applies the EQ predicate on the "version" field. +func VersionEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldVersion), v)) + }) +} + +// VersionNEQ applies the NEQ predicate on the "version" field. +func VersionNEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldVersion), v)) + }) +} + +// VersionIn applies the In predicate on the "version" field. +func VersionIn(vs ...string) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldVersion), v...)) + }) +} + +// VersionNotIn applies the NotIn predicate on the "version" field. +func VersionNotIn(vs ...string) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldVersion), v...)) + }) +} + +// VersionGT applies the GT predicate on the "version" field. +func VersionGT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldVersion), v)) + }) +} + +// VersionGTE applies the GTE predicate on the "version" field. +func VersionGTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldVersion), v)) + }) +} + +// VersionLT applies the LT predicate on the "version" field. +func VersionLT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldVersion), v)) + }) +} + +// VersionLTE applies the LTE predicate on the "version" field. +func VersionLTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldVersion), v)) + }) +} + +// VersionContains applies the Contains predicate on the "version" field. +func VersionContains(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldVersion), v)) + }) +} + +// VersionHasPrefix applies the HasPrefix predicate on the "version" field. +func VersionHasPrefix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldVersion), v)) + }) +} + +// VersionHasSuffix applies the HasSuffix predicate on the "version" field. +func VersionHasSuffix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldVersion), v)) + }) +} + +// VersionIsNil applies the IsNil predicate on the "version" field. +func VersionIsNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldVersion))) + }) +} + +// VersionNotNil applies the NotNil predicate on the "version" field. +func VersionNotNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldVersion))) + }) +} + +// VersionEqualFold applies the EqualFold predicate on the "version" field. +func VersionEqualFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldVersion), v)) + }) +} + +// VersionContainsFold applies the ContainsFold predicate on the "version" field. +func VersionContainsFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldVersion), v)) + }) +} + +// IsValidatedEQ applies the EQ predicate on the "isValidated" field. +func IsValidatedEQ(v bool) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIsValidated), v)) + }) +} + +// IsValidatedNEQ applies the NEQ predicate on the "isValidated" field. +func IsValidatedNEQ(v bool) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldIsValidated), v)) + }) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStatus), v)) + }) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldStatus), v)) + }) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldStatus), v...)) + }) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.Machine { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldStatus), v...)) + }) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldStatus), v)) + }) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldStatus), v)) + }) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldStatus), v)) + }) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldStatus), v)) + }) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldStatus), v)) + }) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldStatus), v)) + }) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldStatus), v)) + }) +} + +// StatusIsNil applies the IsNil predicate on the "status" field. +func StatusIsNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldStatus))) + }) +} + +// StatusNotNil applies the NotNil predicate on the "status" field. +func StatusNotNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldStatus))) + }) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldStatus), v)) + }) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldStatus), v)) + }) +} + +// HasAlerts applies the HasEdge predicate on the "alerts" edge. +func HasAlerts() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AlertsTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AlertsTable, AlertsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAlertsWith applies the HasEdge predicate on the "alerts" edge with a given conditions (other predicates). +func HasAlertsWith(preds ...predicate.Alert) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AlertsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AlertsTable, AlertsColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups list of predicates with the AND operator between them. +func And(predicates ...predicate.Machine) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups list of predicates with the OR operator between them. +func Or(predicates ...predicate.Machine) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Machine) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go new file mode 100644 index 000000000..e81185109 --- /dev/null +++ b/pkg/database/ent/machine_create.go @@ -0,0 +1,418 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// MachineCreate is the builder for creating a Machine entity. +type MachineCreate struct { + config + mutation *MachineMutation + hooks []Hook +} + +// SetCreatedAt sets the created_at field. +func (mc *MachineCreate) SetCreatedAt(t time.Time) *MachineCreate { + mc.mutation.SetCreatedAt(t) + return mc +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (mc *MachineCreate) SetNillableCreatedAt(t *time.Time) *MachineCreate { + if t != nil { + mc.SetCreatedAt(*t) + } + return mc +} + +// SetUpdatedAt sets the updated_at field. +func (mc *MachineCreate) SetUpdatedAt(t time.Time) *MachineCreate { + mc.mutation.SetUpdatedAt(t) + return mc +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (mc *MachineCreate) SetNillableUpdatedAt(t *time.Time) *MachineCreate { + if t != nil { + mc.SetUpdatedAt(*t) + } + return mc +} + +// SetMachineId sets the machineId field. +func (mc *MachineCreate) SetMachineId(s string) *MachineCreate { + mc.mutation.SetMachineId(s) + return mc +} + +// SetPassword sets the password field. +func (mc *MachineCreate) SetPassword(s string) *MachineCreate { + mc.mutation.SetPassword(s) + return mc +} + +// SetIpAddress sets the ipAddress field. +func (mc *MachineCreate) SetIpAddress(s string) *MachineCreate { + mc.mutation.SetIpAddress(s) + return mc +} + +// SetScenarios sets the scenarios field. +func (mc *MachineCreate) SetScenarios(s string) *MachineCreate { + mc.mutation.SetScenarios(s) + return mc +} + +// SetNillableScenarios sets the scenarios field if the given value is not nil. +func (mc *MachineCreate) SetNillableScenarios(s *string) *MachineCreate { + if s != nil { + mc.SetScenarios(*s) + } + return mc +} + +// SetVersion sets the version field. +func (mc *MachineCreate) SetVersion(s string) *MachineCreate { + mc.mutation.SetVersion(s) + return mc +} + +// SetNillableVersion sets the version field if the given value is not nil. +func (mc *MachineCreate) SetNillableVersion(s *string) *MachineCreate { + if s != nil { + mc.SetVersion(*s) + } + return mc +} + +// SetIsValidated sets the isValidated field. +func (mc *MachineCreate) SetIsValidated(b bool) *MachineCreate { + mc.mutation.SetIsValidated(b) + return mc +} + +// SetNillableIsValidated sets the isValidated field if the given value is not nil. +func (mc *MachineCreate) SetNillableIsValidated(b *bool) *MachineCreate { + if b != nil { + mc.SetIsValidated(*b) + } + return mc +} + +// SetStatus sets the status field. +func (mc *MachineCreate) SetStatus(s string) *MachineCreate { + mc.mutation.SetStatus(s) + return mc +} + +// SetNillableStatus sets the status field if the given value is not nil. +func (mc *MachineCreate) SetNillableStatus(s *string) *MachineCreate { + if s != nil { + mc.SetStatus(*s) + } + return mc +} + +// AddAlertIDs adds the alerts edge to Alert by ids. +func (mc *MachineCreate) AddAlertIDs(ids ...int) *MachineCreate { + mc.mutation.AddAlertIDs(ids...) + return mc +} + +// AddAlerts adds the alerts edges to Alert. +func (mc *MachineCreate) AddAlerts(a ...*Alert) *MachineCreate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return mc.AddAlertIDs(ids...) +} + +// Mutation returns the MachineMutation object of the builder. +func (mc *MachineCreate) Mutation() *MachineMutation { + return mc.mutation +} + +// Save creates the Machine in the database. +func (mc *MachineCreate) Save(ctx context.Context) (*Machine, error) { + var ( + err error + node *Machine + ) + mc.defaults() + if len(mc.hooks) == 0 { + if err = mc.check(); err != nil { + return nil, err + } + node, err = mc.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MachineMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = mc.check(); err != nil { + return nil, err + } + mc.mutation = mutation + node, err = mc.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(mc.hooks) - 1; i >= 0; i-- { + mut = mc.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, mc.mutation); err != nil { + return nil, err + } + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (mc *MachineCreate) SaveX(ctx context.Context) *Machine { + v, err := mc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// defaults sets the default values of the builder before save. +func (mc *MachineCreate) defaults() { + if _, ok := mc.mutation.CreatedAt(); !ok { + v := machine.DefaultCreatedAt() + mc.mutation.SetCreatedAt(v) + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + v := machine.DefaultUpdatedAt() + mc.mutation.SetUpdatedAt(v) + } + if _, ok := mc.mutation.IsValidated(); !ok { + v := machine.DefaultIsValidated + mc.mutation.SetIsValidated(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mc *MachineCreate) check() error { + if _, ok := mc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")} + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")} + } + if _, ok := mc.mutation.MachineId(); !ok { + return &ValidationError{Name: "machineId", err: errors.New("ent: missing required field \"machineId\"")} + } + if _, ok := mc.mutation.Password(); !ok { + return &ValidationError{Name: "password", err: errors.New("ent: missing required field \"password\"")} + } + if _, ok := mc.mutation.IpAddress(); !ok { + return &ValidationError{Name: "ipAddress", err: errors.New("ent: missing required field \"ipAddress\"")} + } + if v, ok := mc.mutation.Scenarios(); ok { + if err := machine.ScenariosValidator(v); err != nil { + return &ValidationError{Name: "scenarios", err: fmt.Errorf("ent: validator failed for field \"scenarios\": %w", err)} + } + } + if _, ok := mc.mutation.IsValidated(); !ok { + return &ValidationError{Name: "isValidated", err: errors.New("ent: missing required field \"isValidated\"")} + } + return nil +} + +func (mc *MachineCreate) sqlSave(ctx context.Context) (*Machine, error) { + _node, _spec := mc.createSpec() + if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { + if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + return _node, nil +} + +func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { + var ( + _node = &Machine{config: mc.config} + _spec = &sqlgraph.CreateSpec{ + Table: machine.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + } + ) + if value, ok := mc.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldCreatedAt, + }) + _node.CreatedAt = value + } + if value, ok := mc.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldUpdatedAt, + }) + _node.UpdatedAt = value + } + if value, ok := mc.mutation.MachineId(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldMachineId, + }) + _node.MachineId = value + } + if value, ok := mc.mutation.Password(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldPassword, + }) + _node.Password = value + } + if value, ok := mc.mutation.IpAddress(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldIpAddress, + }) + _node.IpAddress = value + } + if value, ok := mc.mutation.Scenarios(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldScenarios, + }) + _node.Scenarios = value + } + if value, ok := mc.mutation.Version(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldVersion, + }) + _node.Version = value + } + if value, ok := mc.mutation.IsValidated(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: machine.FieldIsValidated, + }) + _node.IsValidated = value + } + if value, ok := mc.mutation.Status(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldStatus, + }) + _node.Status = value + } + if nodes := mc.mutation.AlertsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// MachineCreateBulk is the builder for creating a bulk of Machine entities. +type MachineCreateBulk struct { + config + builders []*MachineCreate +} + +// Save creates the Machine entities in the database. +func (mcb *MachineCreateBulk) Save(ctx context.Context) ([]*Machine, error) { + specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) + nodes := make([]*Machine, len(mcb.builders)) + mutators := make([]Mutator, len(mcb.builders)) + for i := range mcb.builders { + func(i int, root context.Context) { + builder := mcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MachineMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) + } else { + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, mcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil { + if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + } + } + mutation.done = true + if err != nil { + return nil, err + } + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, mcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX calls Save and panics if Save returns an error. +func (mcb *MachineCreateBulk) SaveX(ctx context.Context) []*Machine { + v, err := mcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} diff --git a/pkg/database/ent/machine_delete.go b/pkg/database/ent/machine_delete.go new file mode 100644 index 000000000..ecd2a79f1 --- /dev/null +++ b/pkg/database/ent/machine_delete.go @@ -0,0 +1,109 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// MachineDelete is the builder for deleting a Machine entity. +type MachineDelete struct { + config + hooks []Hook + mutation *MachineMutation + predicates []predicate.Machine +} + +// Where adds a new predicate to the delete builder. +func (md *MachineDelete) Where(ps ...predicate.Machine) *MachineDelete { + md.predicates = append(md.predicates, ps...) + return md +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (md *MachineDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(md.hooks) == 0 { + affected, err = md.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MachineMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + md.mutation = mutation + affected, err = md.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(md.hooks) - 1; i >= 0; i-- { + mut = md.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, md.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (md *MachineDelete) ExecX(ctx context.Context) int { + n, err := md.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (md *MachineDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: machine.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + if ps := md.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return sqlgraph.DeleteNodes(ctx, md.driver, _spec) +} + +// MachineDeleteOne is the builder for deleting a single Machine entity. +type MachineDeleteOne struct { + md *MachineDelete +} + +// Exec executes the deletion query. +func (mdo *MachineDeleteOne) Exec(ctx context.Context) error { + n, err := mdo.md.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{machine.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (mdo *MachineDeleteOne) ExecX(ctx context.Context) { + mdo.md.ExecX(ctx) +} diff --git a/pkg/database/ent/machine_query.go b/pkg/database/ent/machine_query.go new file mode 100644 index 000000000..96d950194 --- /dev/null +++ b/pkg/database/ent/machine_query.go @@ -0,0 +1,950 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "math" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// MachineQuery is the builder for querying Machine entities. +type MachineQuery struct { + config + limit *int + offset *int + order []OrderFunc + unique []string + predicates []predicate.Machine + // eager-loading edges. + withAlerts *AlertQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the builder. +func (mq *MachineQuery) Where(ps ...predicate.Machine) *MachineQuery { + mq.predicates = append(mq.predicates, ps...) + return mq +} + +// Limit adds a limit step to the query. +func (mq *MachineQuery) Limit(limit int) *MachineQuery { + mq.limit = &limit + return mq +} + +// Offset adds an offset step to the query. +func (mq *MachineQuery) Offset(offset int) *MachineQuery { + mq.offset = &offset + return mq +} + +// Order adds an order step to the query. +func (mq *MachineQuery) Order(o ...OrderFunc) *MachineQuery { + mq.order = append(mq.order, o...) + return mq +} + +// QueryAlerts chains the current query on the alerts edge. +func (mq *MachineQuery) QueryAlerts() *AlertQuery { + query := &AlertQuery{config: mq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := mq.sqlQuery() + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(machine.Table, machine.FieldID, selector), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, machine.AlertsTable, machine.AlertsColumn), + ) + fromU = sqlgraph.SetNeighbors(mq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Machine entity in the query. Returns *NotFoundError when no machine was found. +func (mq *MachineQuery) First(ctx context.Context) (*Machine, error) { + nodes, err := mq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{machine.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (mq *MachineQuery) FirstX(ctx context.Context) *Machine { + node, err := mq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Machine id in the query. Returns *NotFoundError when no id was found. +func (mq *MachineQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{machine.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (mq *MachineQuery) FirstXID(ctx context.Context) int { + id, err := mq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only Machine entity in the query, returns an error if not exactly one entity was returned. +func (mq *MachineQuery) Only(ctx context.Context) (*Machine, error) { + nodes, err := mq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{machine.Label} + default: + return nil, &NotSingularError{machine.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (mq *MachineQuery) OnlyX(ctx context.Context) *Machine { + node, err := mq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID returns the only Machine id in the query, returns an error if not exactly one id was returned. +func (mq *MachineQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{machine.Label} + default: + err = &NotSingularError{machine.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (mq *MachineQuery) OnlyIDX(ctx context.Context) int { + id, err := mq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Machines. +func (mq *MachineQuery) All(ctx context.Context) ([]*Machine, error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + return mq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (mq *MachineQuery) AllX(ctx context.Context) []*Machine { + nodes, err := mq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Machine ids. +func (mq *MachineQuery) IDs(ctx context.Context) ([]int, error) { + var ids []int + if err := mq.Select(machine.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (mq *MachineQuery) IDsX(ctx context.Context) []int { + ids, err := mq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (mq *MachineQuery) Count(ctx context.Context) (int, error) { + if err := mq.prepareQuery(ctx); err != nil { + return 0, err + } + return mq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (mq *MachineQuery) CountX(ctx context.Context) int { + count, err := mq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (mq *MachineQuery) Exist(ctx context.Context) (bool, error) { + if err := mq.prepareQuery(ctx); err != nil { + return false, err + } + return mq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (mq *MachineQuery) ExistX(ctx context.Context) bool { + exist, err := mq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the query builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (mq *MachineQuery) Clone() *MachineQuery { + return &MachineQuery{ + config: mq.config, + limit: mq.limit, + offset: mq.offset, + order: append([]OrderFunc{}, mq.order...), + unique: append([]string{}, mq.unique...), + predicates: append([]predicate.Machine{}, mq.predicates...), + // clone intermediate query. + sql: mq.sql.Clone(), + path: mq.path, + } +} + +// WithAlerts tells the query-builder to eager-loads the nodes that are connected to +// the "alerts" edge. The optional arguments used to configure the query builder of the edge. +func (mq *MachineQuery) WithAlerts(opts ...func(*AlertQuery)) *MachineQuery { + query := &AlertQuery{config: mq.config} + for _, opt := range opts { + opt(query) + } + mq.withAlerts = query + return mq +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Machine.Query(). +// GroupBy(machine.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +// +func (mq *MachineQuery) GroupBy(field string, fields ...string) *MachineGroupBy { + group := &MachineGroupBy{config: mq.config} + group.fields = append([]string{field}, fields...) + group.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + return mq.sqlQuery(), nil + } + return group +} + +// Select one or more fields from the given query. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Machine.Query(). +// Select(machine.FieldCreatedAt). +// Scan(ctx, &v) +// +func (mq *MachineQuery) Select(field string, fields ...string) *MachineSelect { + selector := &MachineSelect{config: mq.config} + selector.fields = append([]string{field}, fields...) + selector.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + return mq.sqlQuery(), nil + } + return selector +} + +func (mq *MachineQuery) prepareQuery(ctx context.Context) error { + if mq.path != nil { + prev, err := mq.path(ctx) + if err != nil { + return err + } + mq.sql = prev + } + return nil +} + +func (mq *MachineQuery) sqlAll(ctx context.Context) ([]*Machine, error) { + var ( + nodes = []*Machine{} + _spec = mq.querySpec() + loadedTypes = [1]bool{ + mq.withAlerts != nil, + } + ) + _spec.ScanValues = func() []interface{} { + node := &Machine{config: mq.config} + nodes = append(nodes, node) + values := node.scanValues() + return values + } + _spec.Assign = func(values ...interface{}) error { + if len(nodes) == 0 { + return fmt.Errorf("ent: Assign called without calling ScanValues") + } + node := nodes[len(nodes)-1] + node.Edges.loadedTypes = loadedTypes + return node.assignValues(values...) + } + if err := sqlgraph.QueryNodes(ctx, mq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + + if query := mq.withAlerts; query != nil { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Machine) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + } + query.withFKs = true + query.Where(predicate.Alert(func(s *sql.Selector) { + s.Where(sql.InValues(machine.AlertsColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return nil, err + } + for _, n := range neighbors { + fk := n.machine_alerts + if fk == nil { + return nil, fmt.Errorf(`foreign-key "machine_alerts" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return nil, fmt.Errorf(`unexpected foreign-key "machine_alerts" returned %v for node %v`, *fk, n.ID) + } + node.Edges.Alerts = append(node.Edges.Alerts, n) + } + } + + return nodes, nil +} + +func (mq *MachineQuery) sqlCount(ctx context.Context) (int, error) { + _spec := mq.querySpec() + return sqlgraph.CountNodes(ctx, mq.driver, _spec) +} + +func (mq *MachineQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := mq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: machine.Table, + Columns: machine.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + From: mq.sql, + Unique: true, + } + if ps := mq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := mq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := mq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := mq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector, machine.ValidColumn) + } + } + } + return _spec +} + +func (mq *MachineQuery) sqlQuery() *sql.Selector { + builder := sql.Dialect(mq.driver.Dialect()) + t1 := builder.Table(machine.Table) + selector := builder.Select(t1.Columns(machine.Columns...)...).From(t1) + if mq.sql != nil { + selector = mq.sql + selector.Select(selector.Columns(machine.Columns...)...) + } + for _, p := range mq.predicates { + p(selector) + } + for _, p := range mq.order { + p(selector, machine.ValidColumn) + } + if offset := mq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := mq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// MachineGroupBy is the builder for group-by Machine entities. +type MachineGroupBy struct { + config + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (mgb *MachineGroupBy) Aggregate(fns ...AggregateFunc) *MachineGroupBy { + mgb.fns = append(mgb.fns, fns...) + return mgb +} + +// Scan applies the group-by query and scan the result into the given value. +func (mgb *MachineGroupBy) Scan(ctx context.Context, v interface{}) error { + query, err := mgb.path(ctx) + if err != nil { + return err + } + mgb.sql = query + return mgb.sqlScan(ctx, v) +} + +// ScanX is like Scan, but panics if an error occurs. +func (mgb *MachineGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := mgb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (mgb *MachineGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(mgb.fields) > 1 { + return nil, errors.New("ent: MachineGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := mgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (mgb *MachineGroupBy) StringsX(ctx context.Context) []string { + v, err := mgb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from group-by. It is only allowed when querying group-by with one field. +func (mgb *MachineGroupBy) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = mgb.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{machine.Label} + default: + err = fmt.Errorf("ent: MachineGroupBy.Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (mgb *MachineGroupBy) StringX(ctx context.Context) string { + v, err := mgb.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (mgb *MachineGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(mgb.fields) > 1 { + return nil, errors.New("ent: MachineGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := mgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (mgb *MachineGroupBy) IntsX(ctx context.Context) []int { + v, err := mgb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from group-by. It is only allowed when querying group-by with one field. +func (mgb *MachineGroupBy) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = mgb.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{machine.Label} + default: + err = fmt.Errorf("ent: MachineGroupBy.Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (mgb *MachineGroupBy) IntX(ctx context.Context) int { + v, err := mgb.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (mgb *MachineGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(mgb.fields) > 1 { + return nil, errors.New("ent: MachineGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := mgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (mgb *MachineGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := mgb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from group-by. It is only allowed when querying group-by with one field. +func (mgb *MachineGroupBy) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = mgb.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{machine.Label} + default: + err = fmt.Errorf("ent: MachineGroupBy.Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (mgb *MachineGroupBy) Float64X(ctx context.Context) float64 { + v, err := mgb.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (mgb *MachineGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(mgb.fields) > 1 { + return nil, errors.New("ent: MachineGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := mgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (mgb *MachineGroupBy) BoolsX(ctx context.Context) []bool { + v, err := mgb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from group-by. It is only allowed when querying group-by with one field. +func (mgb *MachineGroupBy) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = mgb.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{machine.Label} + default: + err = fmt.Errorf("ent: MachineGroupBy.Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (mgb *MachineGroupBy) BoolX(ctx context.Context) bool { + v, err := mgb.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +func (mgb *MachineGroupBy) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range mgb.fields { + if !machine.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := mgb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := mgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (mgb *MachineGroupBy) sqlQuery() *sql.Selector { + selector := mgb.sql + columns := make([]string, 0, len(mgb.fields)+len(mgb.fns)) + columns = append(columns, mgb.fields...) + for _, fn := range mgb.fns { + columns = append(columns, fn(selector, machine.ValidColumn)) + } + return selector.Select(columns...).GroupBy(mgb.fields...) +} + +// MachineSelect is the builder for select fields of Machine entities. +type MachineSelect struct { + config + fields []string + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Scan applies the selector query and scan the result into the given value. +func (ms *MachineSelect) Scan(ctx context.Context, v interface{}) error { + query, err := ms.path(ctx) + if err != nil { + return err + } + ms.sql = query + return ms.sqlScan(ctx, v) +} + +// ScanX is like Scan, but panics if an error occurs. +func (ms *MachineSelect) ScanX(ctx context.Context, v interface{}) { + if err := ms.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from selector. It is only allowed when selecting one field. +func (ms *MachineSelect) Strings(ctx context.Context) ([]string, error) { + if len(ms.fields) > 1 { + return nil, errors.New("ent: MachineSelect.Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := ms.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (ms *MachineSelect) StringsX(ctx context.Context) []string { + v, err := ms.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from selector. It is only allowed when selecting one field. +func (ms *MachineSelect) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = ms.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{machine.Label} + default: + err = fmt.Errorf("ent: MachineSelect.Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (ms *MachineSelect) StringX(ctx context.Context) string { + v, err := ms.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from selector. It is only allowed when selecting one field. +func (ms *MachineSelect) Ints(ctx context.Context) ([]int, error) { + if len(ms.fields) > 1 { + return nil, errors.New("ent: MachineSelect.Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := ms.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (ms *MachineSelect) IntsX(ctx context.Context) []int { + v, err := ms.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from selector. It is only allowed when selecting one field. +func (ms *MachineSelect) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = ms.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{machine.Label} + default: + err = fmt.Errorf("ent: MachineSelect.Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (ms *MachineSelect) IntX(ctx context.Context) int { + v, err := ms.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from selector. It is only allowed when selecting one field. +func (ms *MachineSelect) Float64s(ctx context.Context) ([]float64, error) { + if len(ms.fields) > 1 { + return nil, errors.New("ent: MachineSelect.Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := ms.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (ms *MachineSelect) Float64sX(ctx context.Context) []float64 { + v, err := ms.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from selector. It is only allowed when selecting one field. +func (ms *MachineSelect) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = ms.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{machine.Label} + default: + err = fmt.Errorf("ent: MachineSelect.Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (ms *MachineSelect) Float64X(ctx context.Context) float64 { + v, err := ms.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from selector. It is only allowed when selecting one field. +func (ms *MachineSelect) Bools(ctx context.Context) ([]bool, error) { + if len(ms.fields) > 1 { + return nil, errors.New("ent: MachineSelect.Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := ms.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (ms *MachineSelect) BoolsX(ctx context.Context) []bool { + v, err := ms.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from selector. It is only allowed when selecting one field. +func (ms *MachineSelect) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = ms.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{machine.Label} + default: + err = fmt.Errorf("ent: MachineSelect.Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (ms *MachineSelect) BoolX(ctx context.Context) bool { + v, err := ms.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +func (ms *MachineSelect) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range ms.fields { + if !machine.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for selection", f)} + } + } + rows := &sql.Rows{} + query, args := ms.sqlQuery().Query() + if err := ms.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (ms *MachineSelect) sqlQuery() sql.Querier { + selector := ms.sql + selector.Select(selector.Columns(ms.fields...)...) + return selector +} diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go new file mode 100644 index 000000000..fb7b99160 --- /dev/null +++ b/pkg/database/ent/machine_update.go @@ -0,0 +1,822 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// MachineUpdate is the builder for updating Machine entities. +type MachineUpdate struct { + config + hooks []Hook + mutation *MachineMutation + predicates []predicate.Machine +} + +// Where adds a new predicate for the builder. +func (mu *MachineUpdate) Where(ps ...predicate.Machine) *MachineUpdate { + mu.predicates = append(mu.predicates, ps...) + return mu +} + +// SetCreatedAt sets the created_at field. +func (mu *MachineUpdate) SetCreatedAt(t time.Time) *MachineUpdate { + mu.mutation.SetCreatedAt(t) + return mu +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (mu *MachineUpdate) SetNillableCreatedAt(t *time.Time) *MachineUpdate { + if t != nil { + mu.SetCreatedAt(*t) + } + return mu +} + +// SetUpdatedAt sets the updated_at field. +func (mu *MachineUpdate) SetUpdatedAt(t time.Time) *MachineUpdate { + mu.mutation.SetUpdatedAt(t) + return mu +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (mu *MachineUpdate) SetNillableUpdatedAt(t *time.Time) *MachineUpdate { + if t != nil { + mu.SetUpdatedAt(*t) + } + return mu +} + +// SetMachineId sets the machineId field. +func (mu *MachineUpdate) SetMachineId(s string) *MachineUpdate { + mu.mutation.SetMachineId(s) + return mu +} + +// SetPassword sets the password field. +func (mu *MachineUpdate) SetPassword(s string) *MachineUpdate { + mu.mutation.SetPassword(s) + return mu +} + +// SetIpAddress sets the ipAddress field. +func (mu *MachineUpdate) SetIpAddress(s string) *MachineUpdate { + mu.mutation.SetIpAddress(s) + return mu +} + +// SetScenarios sets the scenarios field. +func (mu *MachineUpdate) SetScenarios(s string) *MachineUpdate { + mu.mutation.SetScenarios(s) + return mu +} + +// SetNillableScenarios sets the scenarios field if the given value is not nil. +func (mu *MachineUpdate) SetNillableScenarios(s *string) *MachineUpdate { + if s != nil { + mu.SetScenarios(*s) + } + return mu +} + +// ClearScenarios clears the value of scenarios. +func (mu *MachineUpdate) ClearScenarios() *MachineUpdate { + mu.mutation.ClearScenarios() + return mu +} + +// SetVersion sets the version field. +func (mu *MachineUpdate) SetVersion(s string) *MachineUpdate { + mu.mutation.SetVersion(s) + return mu +} + +// SetNillableVersion sets the version field if the given value is not nil. +func (mu *MachineUpdate) SetNillableVersion(s *string) *MachineUpdate { + if s != nil { + mu.SetVersion(*s) + } + return mu +} + +// ClearVersion clears the value of version. +func (mu *MachineUpdate) ClearVersion() *MachineUpdate { + mu.mutation.ClearVersion() + return mu +} + +// SetIsValidated sets the isValidated field. +func (mu *MachineUpdate) SetIsValidated(b bool) *MachineUpdate { + mu.mutation.SetIsValidated(b) + return mu +} + +// SetNillableIsValidated sets the isValidated field if the given value is not nil. +func (mu *MachineUpdate) SetNillableIsValidated(b *bool) *MachineUpdate { + if b != nil { + mu.SetIsValidated(*b) + } + return mu +} + +// SetStatus sets the status field. +func (mu *MachineUpdate) SetStatus(s string) *MachineUpdate { + mu.mutation.SetStatus(s) + return mu +} + +// SetNillableStatus sets the status field if the given value is not nil. +func (mu *MachineUpdate) SetNillableStatus(s *string) *MachineUpdate { + if s != nil { + mu.SetStatus(*s) + } + return mu +} + +// ClearStatus clears the value of status. +func (mu *MachineUpdate) ClearStatus() *MachineUpdate { + mu.mutation.ClearStatus() + return mu +} + +// AddAlertIDs adds the alerts edge to Alert by ids. +func (mu *MachineUpdate) AddAlertIDs(ids ...int) *MachineUpdate { + mu.mutation.AddAlertIDs(ids...) + return mu +} + +// AddAlerts adds the alerts edges to Alert. +func (mu *MachineUpdate) AddAlerts(a ...*Alert) *MachineUpdate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return mu.AddAlertIDs(ids...) +} + +// Mutation returns the MachineMutation object of the builder. +func (mu *MachineUpdate) Mutation() *MachineMutation { + return mu.mutation +} + +// ClearAlerts clears all "alerts" edges to type Alert. +func (mu *MachineUpdate) ClearAlerts() *MachineUpdate { + mu.mutation.ClearAlerts() + return mu +} + +// RemoveAlertIDs removes the alerts edge to Alert by ids. +func (mu *MachineUpdate) RemoveAlertIDs(ids ...int) *MachineUpdate { + mu.mutation.RemoveAlertIDs(ids...) + return mu +} + +// RemoveAlerts removes alerts edges to Alert. +func (mu *MachineUpdate) RemoveAlerts(a ...*Alert) *MachineUpdate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return mu.RemoveAlertIDs(ids...) +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (mu *MachineUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(mu.hooks) == 0 { + if err = mu.check(); err != nil { + return 0, err + } + affected, err = mu.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MachineMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = mu.check(); err != nil { + return 0, err + } + mu.mutation = mutation + affected, err = mu.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(mu.hooks) - 1; i >= 0; i-- { + mut = mu.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, mu.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (mu *MachineUpdate) SaveX(ctx context.Context) int { + affected, err := mu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (mu *MachineUpdate) Exec(ctx context.Context) error { + _, err := mu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mu *MachineUpdate) ExecX(ctx context.Context) { + if err := mu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mu *MachineUpdate) check() error { + if v, ok := mu.mutation.Scenarios(); ok { + if err := machine.ScenariosValidator(v); err != nil { + return &ValidationError{Name: "scenarios", err: fmt.Errorf("ent: validator failed for field \"scenarios\": %w", err)} + } + } + return nil +} + +func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: machine.Table, + Columns: machine.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + if ps := mu.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := mu.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldCreatedAt, + }) + } + if value, ok := mu.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldUpdatedAt, + }) + } + if value, ok := mu.mutation.MachineId(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldMachineId, + }) + } + if value, ok := mu.mutation.Password(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldPassword, + }) + } + if value, ok := mu.mutation.IpAddress(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldIpAddress, + }) + } + if value, ok := mu.mutation.Scenarios(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldScenarios, + }) + } + if mu.mutation.ScenariosCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: machine.FieldScenarios, + }) + } + if value, ok := mu.mutation.Version(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldVersion, + }) + } + if mu.mutation.VersionCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: machine.FieldVersion, + }) + } + if value, ok := mu.mutation.IsValidated(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: machine.FieldIsValidated, + }) + } + if value, ok := mu.mutation.Status(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldStatus, + }) + } + if mu.mutation.StatusCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: machine.FieldStatus, + }) + } + if mu.mutation.AlertsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := mu.mutation.RemovedAlertsIDs(); len(nodes) > 0 && !mu.mutation.AlertsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := mu.mutation.AlertsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{machine.Label} + } else if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return 0, err + } + return n, nil +} + +// MachineUpdateOne is the builder for updating a single Machine entity. +type MachineUpdateOne struct { + config + hooks []Hook + mutation *MachineMutation +} + +// SetCreatedAt sets the created_at field. +func (muo *MachineUpdateOne) SetCreatedAt(t time.Time) *MachineUpdateOne { + muo.mutation.SetCreatedAt(t) + return muo +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableCreatedAt(t *time.Time) *MachineUpdateOne { + if t != nil { + muo.SetCreatedAt(*t) + } + return muo +} + +// SetUpdatedAt sets the updated_at field. +func (muo *MachineUpdateOne) SetUpdatedAt(t time.Time) *MachineUpdateOne { + muo.mutation.SetUpdatedAt(t) + return muo +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableUpdatedAt(t *time.Time) *MachineUpdateOne { + if t != nil { + muo.SetUpdatedAt(*t) + } + return muo +} + +// SetMachineId sets the machineId field. +func (muo *MachineUpdateOne) SetMachineId(s string) *MachineUpdateOne { + muo.mutation.SetMachineId(s) + return muo +} + +// SetPassword sets the password field. +func (muo *MachineUpdateOne) SetPassword(s string) *MachineUpdateOne { + muo.mutation.SetPassword(s) + return muo +} + +// SetIpAddress sets the ipAddress field. +func (muo *MachineUpdateOne) SetIpAddress(s string) *MachineUpdateOne { + muo.mutation.SetIpAddress(s) + return muo +} + +// SetScenarios sets the scenarios field. +func (muo *MachineUpdateOne) SetScenarios(s string) *MachineUpdateOne { + muo.mutation.SetScenarios(s) + return muo +} + +// SetNillableScenarios sets the scenarios field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableScenarios(s *string) *MachineUpdateOne { + if s != nil { + muo.SetScenarios(*s) + } + return muo +} + +// ClearScenarios clears the value of scenarios. +func (muo *MachineUpdateOne) ClearScenarios() *MachineUpdateOne { + muo.mutation.ClearScenarios() + return muo +} + +// SetVersion sets the version field. +func (muo *MachineUpdateOne) SetVersion(s string) *MachineUpdateOne { + muo.mutation.SetVersion(s) + return muo +} + +// SetNillableVersion sets the version field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableVersion(s *string) *MachineUpdateOne { + if s != nil { + muo.SetVersion(*s) + } + return muo +} + +// ClearVersion clears the value of version. +func (muo *MachineUpdateOne) ClearVersion() *MachineUpdateOne { + muo.mutation.ClearVersion() + return muo +} + +// SetIsValidated sets the isValidated field. +func (muo *MachineUpdateOne) SetIsValidated(b bool) *MachineUpdateOne { + muo.mutation.SetIsValidated(b) + return muo +} + +// SetNillableIsValidated sets the isValidated field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableIsValidated(b *bool) *MachineUpdateOne { + if b != nil { + muo.SetIsValidated(*b) + } + return muo +} + +// SetStatus sets the status field. +func (muo *MachineUpdateOne) SetStatus(s string) *MachineUpdateOne { + muo.mutation.SetStatus(s) + return muo +} + +// SetNillableStatus sets the status field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableStatus(s *string) *MachineUpdateOne { + if s != nil { + muo.SetStatus(*s) + } + return muo +} + +// ClearStatus clears the value of status. +func (muo *MachineUpdateOne) ClearStatus() *MachineUpdateOne { + muo.mutation.ClearStatus() + return muo +} + +// AddAlertIDs adds the alerts edge to Alert by ids. +func (muo *MachineUpdateOne) AddAlertIDs(ids ...int) *MachineUpdateOne { + muo.mutation.AddAlertIDs(ids...) + return muo +} + +// AddAlerts adds the alerts edges to Alert. +func (muo *MachineUpdateOne) AddAlerts(a ...*Alert) *MachineUpdateOne { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return muo.AddAlertIDs(ids...) +} + +// Mutation returns the MachineMutation object of the builder. +func (muo *MachineUpdateOne) Mutation() *MachineMutation { + return muo.mutation +} + +// ClearAlerts clears all "alerts" edges to type Alert. +func (muo *MachineUpdateOne) ClearAlerts() *MachineUpdateOne { + muo.mutation.ClearAlerts() + return muo +} + +// RemoveAlertIDs removes the alerts edge to Alert by ids. +func (muo *MachineUpdateOne) RemoveAlertIDs(ids ...int) *MachineUpdateOne { + muo.mutation.RemoveAlertIDs(ids...) + return muo +} + +// RemoveAlerts removes alerts edges to Alert. +func (muo *MachineUpdateOne) RemoveAlerts(a ...*Alert) *MachineUpdateOne { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return muo.RemoveAlertIDs(ids...) +} + +// Save executes the query and returns the updated entity. +func (muo *MachineUpdateOne) Save(ctx context.Context) (*Machine, error) { + var ( + err error + node *Machine + ) + if len(muo.hooks) == 0 { + if err = muo.check(); err != nil { + return nil, err + } + node, err = muo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MachineMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = muo.check(); err != nil { + return nil, err + } + muo.mutation = mutation + node, err = muo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(muo.hooks) - 1; i >= 0; i-- { + mut = muo.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, muo.mutation); err != nil { + return nil, err + } + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (muo *MachineUpdateOne) SaveX(ctx context.Context) *Machine { + node, err := muo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (muo *MachineUpdateOne) Exec(ctx context.Context) error { + _, err := muo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (muo *MachineUpdateOne) ExecX(ctx context.Context) { + if err := muo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (muo *MachineUpdateOne) check() error { + if v, ok := muo.mutation.Scenarios(); ok { + if err := machine.ScenariosValidator(v); err != nil { + return &ValidationError{Name: "scenarios", err: fmt.Errorf("ent: validator failed for field \"scenarios\": %w", err)} + } + } + return nil +} + +func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: machine.Table, + Columns: machine.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + id, ok := muo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Machine.ID for update")} + } + _spec.Node.ID.Value = id + if value, ok := muo.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldCreatedAt, + }) + } + if value, ok := muo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldUpdatedAt, + }) + } + if value, ok := muo.mutation.MachineId(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldMachineId, + }) + } + if value, ok := muo.mutation.Password(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldPassword, + }) + } + if value, ok := muo.mutation.IpAddress(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldIpAddress, + }) + } + if value, ok := muo.mutation.Scenarios(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldScenarios, + }) + } + if muo.mutation.ScenariosCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: machine.FieldScenarios, + }) + } + if value, ok := muo.mutation.Version(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldVersion, + }) + } + if muo.mutation.VersionCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: machine.FieldVersion, + }) + } + if value, ok := muo.mutation.IsValidated(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: machine.FieldIsValidated, + }) + } + if value, ok := muo.mutation.Status(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldStatus, + }) + } + if muo.mutation.StatusCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: machine.FieldStatus, + }) + } + if muo.mutation.AlertsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := muo.mutation.RemovedAlertsIDs(); len(nodes) > 0 && !muo.mutation.AlertsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := muo.mutation.AlertsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Machine{config: muo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues() + if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{machine.Label} + } else if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return nil, err + } + return _node, nil +} diff --git a/pkg/database/ent/meta.go b/pkg/database/ent/meta.go new file mode 100644 index 000000000..257f59893 --- /dev/null +++ b/pkg/database/ent/meta.go @@ -0,0 +1,166 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/facebook/ent/dialect/sql" +) + +// Meta is the model entity for the Meta schema. +type Meta struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Key holds the value of the "key" field. + Key string `json:"key,omitempty"` + // Value holds the value of the "value" field. + Value string `json:"value,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the MetaQuery when eager-loading is set. + Edges MetaEdges `json:"edges"` + alert_metas *int +} + +// MetaEdges holds the relations/edges for other nodes in the graph. +type MetaEdges struct { + // Owner holds the value of the owner edge. + Owner *Alert + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// OwnerOrErr returns the Owner value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e MetaEdges) OwnerOrErr() (*Alert, error) { + if e.loadedTypes[0] { + if e.Owner == nil { + // The edge owner was loaded in eager-loading, + // but was not found. + return nil, &NotFoundError{label: alert.Label} + } + return e.Owner, nil + } + return nil, &NotLoadedError{edge: "owner"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Meta) scanValues() []interface{} { + return []interface{}{ + &sql.NullInt64{}, // id + &sql.NullTime{}, // created_at + &sql.NullTime{}, // updated_at + &sql.NullString{}, // key + &sql.NullString{}, // value + } +} + +// fkValues returns the types for scanning foreign-keys values from sql.Rows. +func (*Meta) fkValues() []interface{} { + return []interface{}{ + &sql.NullInt64{}, // alert_metas + } +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Meta fields. +func (m *Meta) assignValues(values ...interface{}) error { + if m, n := len(values), len(meta.Columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + value, ok := values[0].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + m.ID = int(value.Int64) + values = values[1:] + if value, ok := values[0].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[0]) + } else if value.Valid { + m.CreatedAt = value.Time + } + if value, ok := values[1].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[1]) + } else if value.Valid { + m.UpdatedAt = value.Time + } + if value, ok := values[2].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field key", values[2]) + } else if value.Valid { + m.Key = value.String + } + if value, ok := values[3].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field value", values[3]) + } else if value.Valid { + m.Value = value.String + } + values = values[4:] + if len(values) == len(meta.ForeignKeys) { + if value, ok := values[0].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field alert_metas", value) + } else if value.Valid { + m.alert_metas = new(int) + *m.alert_metas = int(value.Int64) + } + } + return nil +} + +// QueryOwner queries the owner edge of the Meta. +func (m *Meta) QueryOwner() *AlertQuery { + return (&MetaClient{config: m.config}).QueryOwner(m) +} + +// Update returns a builder for updating this Meta. +// Note that, you need to call Meta.Unwrap() before calling this method, if this Meta +// was returned from a transaction, and the transaction was committed or rolled back. +func (m *Meta) Update() *MetaUpdateOne { + return (&MetaClient{config: m.config}).UpdateOne(m) +} + +// Unwrap unwraps the entity that was returned from a transaction after it was closed, +// so that all next queries will be executed through the driver which created the transaction. +func (m *Meta) Unwrap() *Meta { + tx, ok := m.config.driver.(*txDriver) + if !ok { + panic("ent: Meta is not a transactional entity") + } + m.config.driver = tx.drv + return m +} + +// String implements the fmt.Stringer. +func (m *Meta) String() string { + var builder strings.Builder + builder.WriteString("Meta(") + builder.WriteString(fmt.Sprintf("id=%v", m.ID)) + builder.WriteString(", created_at=") + builder.WriteString(m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", updated_at=") + builder.WriteString(m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", key=") + builder.WriteString(m.Key) + builder.WriteString(", value=") + builder.WriteString(m.Value) + builder.WriteByte(')') + return builder.String() +} + +// MetaSlice is a parsable slice of Meta. +type MetaSlice []*Meta + +func (m MetaSlice) config(cfg config) { + for _i := range m { + m[_i].config = cfg + } +} diff --git a/pkg/database/ent/meta/meta.go b/pkg/database/ent/meta/meta.go new file mode 100644 index 000000000..7dd0a48ea --- /dev/null +++ b/pkg/database/ent/meta/meta.go @@ -0,0 +1,73 @@ +// Code generated by entc, DO NOT EDIT. + +package meta + +import ( + "time" +) + +const ( + // Label holds the string label denoting the meta type in the database. + Label = "meta" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldKey holds the string denoting the key field in the database. + FieldKey = "key" + // FieldValue holds the string denoting the value field in the database. + FieldValue = "value" + + // EdgeOwner holds the string denoting the owner edge name in mutations. + EdgeOwner = "owner" + + // Table holds the table name of the meta in the database. + Table = "meta" + // OwnerTable is the table the holds the owner relation/edge. + OwnerTable = "meta" + // OwnerInverseTable is the table name for the Alert entity. + // It exists in this package in order to avoid circular dependency with the "alert" package. + OwnerInverseTable = "alerts" + // OwnerColumn is the table column denoting the owner relation/edge. + OwnerColumn = "alert_metas" +) + +// Columns holds all SQL columns for meta fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldKey, + FieldValue, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the Meta type. +var ForeignKeys = []string{ + "alert_metas", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the created_at field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the updated_at field. + DefaultUpdatedAt func() time.Time + // ValueValidator is a validator for the "value" field. It is called by the builders before save. + ValueValidator func(string) error +) diff --git a/pkg/database/ent/meta/where.go b/pkg/database/ent/meta/where.go new file mode 100644 index 000000000..40a15d6fe --- /dev/null +++ b/pkg/database/ent/meta/where.go @@ -0,0 +1,556 @@ +// Code generated by entc, DO NOT EDIT. + +package meta + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their identifier. +func ID(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(ids) == 0 { + s.Where(sql.False()) + return + } + v := make([]interface{}, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Key applies equality check predicate on the "key" field. It's identical to KeyEQ. +func Key(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldKey), v)) + }) +} + +// Value applies equality check predicate on the "value" field. It's identical to ValueEQ. +func Value(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldValue), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Meta { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Meta { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Meta { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Meta { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// KeyEQ applies the EQ predicate on the "key" field. +func KeyEQ(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldKey), v)) + }) +} + +// KeyNEQ applies the NEQ predicate on the "key" field. +func KeyNEQ(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldKey), v)) + }) +} + +// KeyIn applies the In predicate on the "key" field. +func KeyIn(vs ...string) predicate.Meta { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldKey), v...)) + }) +} + +// KeyNotIn applies the NotIn predicate on the "key" field. +func KeyNotIn(vs ...string) predicate.Meta { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldKey), v...)) + }) +} + +// KeyGT applies the GT predicate on the "key" field. +func KeyGT(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldKey), v)) + }) +} + +// KeyGTE applies the GTE predicate on the "key" field. +func KeyGTE(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldKey), v)) + }) +} + +// KeyLT applies the LT predicate on the "key" field. +func KeyLT(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldKey), v)) + }) +} + +// KeyLTE applies the LTE predicate on the "key" field. +func KeyLTE(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldKey), v)) + }) +} + +// KeyContains applies the Contains predicate on the "key" field. +func KeyContains(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldKey), v)) + }) +} + +// KeyHasPrefix applies the HasPrefix predicate on the "key" field. +func KeyHasPrefix(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldKey), v)) + }) +} + +// KeyHasSuffix applies the HasSuffix predicate on the "key" field. +func KeyHasSuffix(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldKey), v)) + }) +} + +// KeyEqualFold applies the EqualFold predicate on the "key" field. +func KeyEqualFold(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldKey), v)) + }) +} + +// KeyContainsFold applies the ContainsFold predicate on the "key" field. +func KeyContainsFold(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldKey), v)) + }) +} + +// ValueEQ applies the EQ predicate on the "value" field. +func ValueEQ(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldValue), v)) + }) +} + +// ValueNEQ applies the NEQ predicate on the "value" field. +func ValueNEQ(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldValue), v)) + }) +} + +// ValueIn applies the In predicate on the "value" field. +func ValueIn(vs ...string) predicate.Meta { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.In(s.C(FieldValue), v...)) + }) +} + +// ValueNotIn applies the NotIn predicate on the "value" field. +func ValueNotIn(vs ...string) predicate.Meta { + v := make([]interface{}, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + // if not arguments were provided, append the FALSE constants, + // since we can't apply "IN ()". This will make this predicate falsy. + if len(v) == 0 { + s.Where(sql.False()) + return + } + s.Where(sql.NotIn(s.C(FieldValue), v...)) + }) +} + +// ValueGT applies the GT predicate on the "value" field. +func ValueGT(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldValue), v)) + }) +} + +// ValueGTE applies the GTE predicate on the "value" field. +func ValueGTE(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldValue), v)) + }) +} + +// ValueLT applies the LT predicate on the "value" field. +func ValueLT(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldValue), v)) + }) +} + +// ValueLTE applies the LTE predicate on the "value" field. +func ValueLTE(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldValue), v)) + }) +} + +// ValueContains applies the Contains predicate on the "value" field. +func ValueContains(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldValue), v)) + }) +} + +// ValueHasPrefix applies the HasPrefix predicate on the "value" field. +func ValueHasPrefix(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldValue), v)) + }) +} + +// ValueHasSuffix applies the HasSuffix predicate on the "value" field. +func ValueHasSuffix(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldValue), v)) + }) +} + +// ValueEqualFold applies the EqualFold predicate on the "value" field. +func ValueEqualFold(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldValue), v)) + }) +} + +// ValueContainsFold applies the ContainsFold predicate on the "value" field. +func ValueContainsFold(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldValue), v)) + }) +} + +// HasOwner applies the HasEdge predicate on the "owner" edge. +func HasOwner() predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). +func HasOwnerWith(preds ...predicate.Alert) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups list of predicates with the AND operator between them. +func And(predicates ...predicate.Meta) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups list of predicates with the OR operator between them. +func Or(predicates ...predicate.Meta) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Meta) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/pkg/database/ent/meta_create.go b/pkg/database/ent/meta_create.go new file mode 100644 index 000000000..65129c119 --- /dev/null +++ b/pkg/database/ent/meta_create.go @@ -0,0 +1,310 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// MetaCreate is the builder for creating a Meta entity. +type MetaCreate struct { + config + mutation *MetaMutation + hooks []Hook +} + +// SetCreatedAt sets the created_at field. +func (mc *MetaCreate) SetCreatedAt(t time.Time) *MetaCreate { + mc.mutation.SetCreatedAt(t) + return mc +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (mc *MetaCreate) SetNillableCreatedAt(t *time.Time) *MetaCreate { + if t != nil { + mc.SetCreatedAt(*t) + } + return mc +} + +// SetUpdatedAt sets the updated_at field. +func (mc *MetaCreate) SetUpdatedAt(t time.Time) *MetaCreate { + mc.mutation.SetUpdatedAt(t) + return mc +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (mc *MetaCreate) SetNillableUpdatedAt(t *time.Time) *MetaCreate { + if t != nil { + mc.SetUpdatedAt(*t) + } + return mc +} + +// SetKey sets the key field. +func (mc *MetaCreate) SetKey(s string) *MetaCreate { + mc.mutation.SetKey(s) + return mc +} + +// SetValue sets the value field. +func (mc *MetaCreate) SetValue(s string) *MetaCreate { + mc.mutation.SetValue(s) + return mc +} + +// SetOwnerID sets the owner edge to Alert by id. +func (mc *MetaCreate) SetOwnerID(id int) *MetaCreate { + mc.mutation.SetOwnerID(id) + return mc +} + +// SetNillableOwnerID sets the owner edge to Alert by id if the given value is not nil. +func (mc *MetaCreate) SetNillableOwnerID(id *int) *MetaCreate { + if id != nil { + mc = mc.SetOwnerID(*id) + } + return mc +} + +// SetOwner sets the owner edge to Alert. +func (mc *MetaCreate) SetOwner(a *Alert) *MetaCreate { + return mc.SetOwnerID(a.ID) +} + +// Mutation returns the MetaMutation object of the builder. +func (mc *MetaCreate) Mutation() *MetaMutation { + return mc.mutation +} + +// Save creates the Meta in the database. +func (mc *MetaCreate) Save(ctx context.Context) (*Meta, error) { + var ( + err error + node *Meta + ) + mc.defaults() + if len(mc.hooks) == 0 { + if err = mc.check(); err != nil { + return nil, err + } + node, err = mc.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MetaMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = mc.check(); err != nil { + return nil, err + } + mc.mutation = mutation + node, err = mc.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(mc.hooks) - 1; i >= 0; i-- { + mut = mc.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, mc.mutation); err != nil { + return nil, err + } + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (mc *MetaCreate) SaveX(ctx context.Context) *Meta { + v, err := mc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// defaults sets the default values of the builder before save. +func (mc *MetaCreate) defaults() { + if _, ok := mc.mutation.CreatedAt(); !ok { + v := meta.DefaultCreatedAt() + mc.mutation.SetCreatedAt(v) + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + v := meta.DefaultUpdatedAt() + mc.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mc *MetaCreate) check() error { + if _, ok := mc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")} + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")} + } + if _, ok := mc.mutation.Key(); !ok { + return &ValidationError{Name: "key", err: errors.New("ent: missing required field \"key\"")} + } + if _, ok := mc.mutation.Value(); !ok { + return &ValidationError{Name: "value", err: errors.New("ent: missing required field \"value\"")} + } + if v, ok := mc.mutation.Value(); ok { + if err := meta.ValueValidator(v); err != nil { + return &ValidationError{Name: "value", err: fmt.Errorf("ent: validator failed for field \"value\": %w", err)} + } + } + return nil +} + +func (mc *MetaCreate) sqlSave(ctx context.Context) (*Meta, error) { + _node, _spec := mc.createSpec() + if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { + if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + return _node, nil +} + +func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) { + var ( + _node = &Meta{config: mc.config} + _spec = &sqlgraph.CreateSpec{ + Table: meta.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + } + ) + if value, ok := mc.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: meta.FieldCreatedAt, + }) + _node.CreatedAt = value + } + if value, ok := mc.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: meta.FieldUpdatedAt, + }) + _node.UpdatedAt = value + } + if value, ok := mc.mutation.Key(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: meta.FieldKey, + }) + _node.Key = value + } + if value, ok := mc.mutation.Value(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: meta.FieldValue, + }) + _node.Value = value + } + if nodes := mc.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: meta.OwnerTable, + Columns: []string{meta.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// MetaCreateBulk is the builder for creating a bulk of Meta entities. +type MetaCreateBulk struct { + config + builders []*MetaCreate +} + +// Save creates the Meta entities in the database. +func (mcb *MetaCreateBulk) Save(ctx context.Context) ([]*Meta, error) { + specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) + nodes := make([]*Meta, len(mcb.builders)) + mutators := make([]Mutator, len(mcb.builders)) + for i := range mcb.builders { + func(i int, root context.Context) { + builder := mcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MetaMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) + } else { + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, mcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil { + if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + } + } + mutation.done = true + if err != nil { + return nil, err + } + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, mcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX calls Save and panics if Save returns an error. +func (mcb *MetaCreateBulk) SaveX(ctx context.Context) []*Meta { + v, err := mcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} diff --git a/pkg/database/ent/meta_delete.go b/pkg/database/ent/meta_delete.go new file mode 100644 index 000000000..ec43e945b --- /dev/null +++ b/pkg/database/ent/meta_delete.go @@ -0,0 +1,109 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// MetaDelete is the builder for deleting a Meta entity. +type MetaDelete struct { + config + hooks []Hook + mutation *MetaMutation + predicates []predicate.Meta +} + +// Where adds a new predicate to the delete builder. +func (md *MetaDelete) Where(ps ...predicate.Meta) *MetaDelete { + md.predicates = append(md.predicates, ps...) + return md +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (md *MetaDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(md.hooks) == 0 { + affected, err = md.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MetaMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + md.mutation = mutation + affected, err = md.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(md.hooks) - 1; i >= 0; i-- { + mut = md.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, md.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (md *MetaDelete) ExecX(ctx context.Context) int { + n, err := md.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (md *MetaDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: meta.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + if ps := md.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return sqlgraph.DeleteNodes(ctx, md.driver, _spec) +} + +// MetaDeleteOne is the builder for deleting a single Meta entity. +type MetaDeleteOne struct { + md *MetaDelete +} + +// Exec executes the deletion query. +func (mdo *MetaDeleteOne) Exec(ctx context.Context) error { + n, err := mdo.md.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{meta.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (mdo *MetaDeleteOne) ExecX(ctx context.Context) { + mdo.md.ExecX(ctx) +} diff --git a/pkg/database/ent/meta_query.go b/pkg/database/ent/meta_query.go new file mode 100644 index 000000000..2aa311854 --- /dev/null +++ b/pkg/database/ent/meta_query.go @@ -0,0 +1,957 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "math" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// MetaQuery is the builder for querying Meta entities. +type MetaQuery struct { + config + limit *int + offset *int + order []OrderFunc + unique []string + predicates []predicate.Meta + // eager-loading edges. + withOwner *AlertQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the builder. +func (mq *MetaQuery) Where(ps ...predicate.Meta) *MetaQuery { + mq.predicates = append(mq.predicates, ps...) + return mq +} + +// Limit adds a limit step to the query. +func (mq *MetaQuery) Limit(limit int) *MetaQuery { + mq.limit = &limit + return mq +} + +// Offset adds an offset step to the query. +func (mq *MetaQuery) Offset(offset int) *MetaQuery { + mq.offset = &offset + return mq +} + +// Order adds an order step to the query. +func (mq *MetaQuery) Order(o ...OrderFunc) *MetaQuery { + mq.order = append(mq.order, o...) + return mq +} + +// QueryOwner chains the current query on the owner edge. +func (mq *MetaQuery) QueryOwner() *AlertQuery { + query := &AlertQuery{config: mq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := mq.sqlQuery() + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(meta.Table, meta.FieldID, selector), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, meta.OwnerTable, meta.OwnerColumn), + ) + fromU = sqlgraph.SetNeighbors(mq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Meta entity in the query. Returns *NotFoundError when no meta was found. +func (mq *MetaQuery) First(ctx context.Context) (*Meta, error) { + nodes, err := mq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{meta.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (mq *MetaQuery) FirstX(ctx context.Context) *Meta { + node, err := mq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Meta id in the query. Returns *NotFoundError when no id was found. +func (mq *MetaQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{meta.Label} + return + } + return ids[0], nil +} + +// FirstXID is like FirstID, but panics if an error occurs. +func (mq *MetaQuery) FirstXID(ctx context.Context) int { + id, err := mq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns the only Meta entity in the query, returns an error if not exactly one entity was returned. +func (mq *MetaQuery) Only(ctx context.Context) (*Meta, error) { + nodes, err := mq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{meta.Label} + default: + return nil, &NotSingularError{meta.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (mq *MetaQuery) OnlyX(ctx context.Context) *Meta { + node, err := mq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID returns the only Meta id in the query, returns an error if not exactly one id was returned. +func (mq *MetaQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{meta.Label} + default: + err = &NotSingularError{meta.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (mq *MetaQuery) OnlyIDX(ctx context.Context) int { + id, err := mq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of MetaSlice. +func (mq *MetaQuery) All(ctx context.Context) ([]*Meta, error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + return mq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (mq *MetaQuery) AllX(ctx context.Context) []*Meta { + nodes, err := mq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Meta ids. +func (mq *MetaQuery) IDs(ctx context.Context) ([]int, error) { + var ids []int + if err := mq.Select(meta.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (mq *MetaQuery) IDsX(ctx context.Context) []int { + ids, err := mq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (mq *MetaQuery) Count(ctx context.Context) (int, error) { + if err := mq.prepareQuery(ctx); err != nil { + return 0, err + } + return mq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (mq *MetaQuery) CountX(ctx context.Context) int { + count, err := mq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (mq *MetaQuery) Exist(ctx context.Context) (bool, error) { + if err := mq.prepareQuery(ctx); err != nil { + return false, err + } + return mq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (mq *MetaQuery) ExistX(ctx context.Context) bool { + exist, err := mq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the query builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (mq *MetaQuery) Clone() *MetaQuery { + return &MetaQuery{ + config: mq.config, + limit: mq.limit, + offset: mq.offset, + order: append([]OrderFunc{}, mq.order...), + unique: append([]string{}, mq.unique...), + predicates: append([]predicate.Meta{}, mq.predicates...), + // clone intermediate query. + sql: mq.sql.Clone(), + path: mq.path, + } +} + +// WithOwner tells the query-builder to eager-loads the nodes that are connected to +// the "owner" edge. The optional arguments used to configure the query builder of the edge. +func (mq *MetaQuery) WithOwner(opts ...func(*AlertQuery)) *MetaQuery { + query := &AlertQuery{config: mq.config} + for _, opt := range opts { + opt(query) + } + mq.withOwner = query + return mq +} + +// GroupBy used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Meta.Query(). +// GroupBy(meta.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +// +func (mq *MetaQuery) GroupBy(field string, fields ...string) *MetaGroupBy { + group := &MetaGroupBy{config: mq.config} + group.fields = append([]string{field}, fields...) + group.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + return mq.sqlQuery(), nil + } + return group +} + +// Select one or more fields from the given query. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Meta.Query(). +// Select(meta.FieldCreatedAt). +// Scan(ctx, &v) +// +func (mq *MetaQuery) Select(field string, fields ...string) *MetaSelect { + selector := &MetaSelect{config: mq.config} + selector.fields = append([]string{field}, fields...) + selector.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + return mq.sqlQuery(), nil + } + return selector +} + +func (mq *MetaQuery) prepareQuery(ctx context.Context) error { + if mq.path != nil { + prev, err := mq.path(ctx) + if err != nil { + return err + } + mq.sql = prev + } + return nil +} + +func (mq *MetaQuery) sqlAll(ctx context.Context) ([]*Meta, error) { + var ( + nodes = []*Meta{} + withFKs = mq.withFKs + _spec = mq.querySpec() + loadedTypes = [1]bool{ + mq.withOwner != nil, + } + ) + if mq.withOwner != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, meta.ForeignKeys...) + } + _spec.ScanValues = func() []interface{} { + node := &Meta{config: mq.config} + nodes = append(nodes, node) + values := node.scanValues() + if withFKs { + values = append(values, node.fkValues()...) + } + return values + } + _spec.Assign = func(values ...interface{}) error { + if len(nodes) == 0 { + return fmt.Errorf("ent: Assign called without calling ScanValues") + } + node := nodes[len(nodes)-1] + node.Edges.loadedTypes = loadedTypes + return node.assignValues(values...) + } + if err := sqlgraph.QueryNodes(ctx, mq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + + if query := mq.withOwner; query != nil { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*Meta) + for i := range nodes { + if fk := nodes[i].alert_metas; fk != nil { + ids = append(ids, *fk) + nodeids[*fk] = append(nodeids[*fk], nodes[i]) + } + } + query.Where(alert.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return nil, err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return nil, fmt.Errorf(`unexpected foreign-key "alert_metas" returned %v`, n.ID) + } + for i := range nodes { + nodes[i].Edges.Owner = n + } + } + } + + return nodes, nil +} + +func (mq *MetaQuery) sqlCount(ctx context.Context) (int, error) { + _spec := mq.querySpec() + return sqlgraph.CountNodes(ctx, mq.driver, _spec) +} + +func (mq *MetaQuery) sqlExist(ctx context.Context) (bool, error) { + n, err := mq.sqlCount(ctx) + if err != nil { + return false, fmt.Errorf("ent: check existence: %v", err) + } + return n > 0, nil +} + +func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: meta.Table, + Columns: meta.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + From: mq.sql, + Unique: true, + } + if ps := mq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := mq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := mq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := mq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector, meta.ValidColumn) + } + } + } + return _spec +} + +func (mq *MetaQuery) sqlQuery() *sql.Selector { + builder := sql.Dialect(mq.driver.Dialect()) + t1 := builder.Table(meta.Table) + selector := builder.Select(t1.Columns(meta.Columns...)...).From(t1) + if mq.sql != nil { + selector = mq.sql + selector.Select(selector.Columns(meta.Columns...)...) + } + for _, p := range mq.predicates { + p(selector) + } + for _, p := range mq.order { + p(selector, meta.ValidColumn) + } + if offset := mq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := mq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// MetaGroupBy is the builder for group-by Meta entities. +type MetaGroupBy struct { + config + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (mgb *MetaGroupBy) Aggregate(fns ...AggregateFunc) *MetaGroupBy { + mgb.fns = append(mgb.fns, fns...) + return mgb +} + +// Scan applies the group-by query and scan the result into the given value. +func (mgb *MetaGroupBy) Scan(ctx context.Context, v interface{}) error { + query, err := mgb.path(ctx) + if err != nil { + return err + } + mgb.sql = query + return mgb.sqlScan(ctx, v) +} + +// ScanX is like Scan, but panics if an error occurs. +func (mgb *MetaGroupBy) ScanX(ctx context.Context, v interface{}) { + if err := mgb.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field. +func (mgb *MetaGroupBy) Strings(ctx context.Context) ([]string, error) { + if len(mgb.fields) > 1 { + return nil, errors.New("ent: MetaGroupBy.Strings is not achievable when grouping more than 1 field") + } + var v []string + if err := mgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (mgb *MetaGroupBy) StringsX(ctx context.Context) []string { + v, err := mgb.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from group-by. It is only allowed when querying group-by with one field. +func (mgb *MetaGroupBy) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = mgb.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{meta.Label} + default: + err = fmt.Errorf("ent: MetaGroupBy.Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (mgb *MetaGroupBy) StringX(ctx context.Context) string { + v, err := mgb.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field. +func (mgb *MetaGroupBy) Ints(ctx context.Context) ([]int, error) { + if len(mgb.fields) > 1 { + return nil, errors.New("ent: MetaGroupBy.Ints is not achievable when grouping more than 1 field") + } + var v []int + if err := mgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (mgb *MetaGroupBy) IntsX(ctx context.Context) []int { + v, err := mgb.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from group-by. It is only allowed when querying group-by with one field. +func (mgb *MetaGroupBy) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = mgb.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{meta.Label} + default: + err = fmt.Errorf("ent: MetaGroupBy.Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (mgb *MetaGroupBy) IntX(ctx context.Context) int { + v, err := mgb.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field. +func (mgb *MetaGroupBy) Float64s(ctx context.Context) ([]float64, error) { + if len(mgb.fields) > 1 { + return nil, errors.New("ent: MetaGroupBy.Float64s is not achievable when grouping more than 1 field") + } + var v []float64 + if err := mgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (mgb *MetaGroupBy) Float64sX(ctx context.Context) []float64 { + v, err := mgb.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from group-by. It is only allowed when querying group-by with one field. +func (mgb *MetaGroupBy) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = mgb.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{meta.Label} + default: + err = fmt.Errorf("ent: MetaGroupBy.Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (mgb *MetaGroupBy) Float64X(ctx context.Context) float64 { + v, err := mgb.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field. +func (mgb *MetaGroupBy) Bools(ctx context.Context) ([]bool, error) { + if len(mgb.fields) > 1 { + return nil, errors.New("ent: MetaGroupBy.Bools is not achievable when grouping more than 1 field") + } + var v []bool + if err := mgb.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (mgb *MetaGroupBy) BoolsX(ctx context.Context) []bool { + v, err := mgb.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from group-by. It is only allowed when querying group-by with one field. +func (mgb *MetaGroupBy) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = mgb.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{meta.Label} + default: + err = fmt.Errorf("ent: MetaGroupBy.Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (mgb *MetaGroupBy) BoolX(ctx context.Context) bool { + v, err := mgb.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +func (mgb *MetaGroupBy) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range mgb.fields { + if !meta.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := mgb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := mgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (mgb *MetaGroupBy) sqlQuery() *sql.Selector { + selector := mgb.sql + columns := make([]string, 0, len(mgb.fields)+len(mgb.fns)) + columns = append(columns, mgb.fields...) + for _, fn := range mgb.fns { + columns = append(columns, fn(selector, meta.ValidColumn)) + } + return selector.Select(columns...).GroupBy(mgb.fields...) +} + +// MetaSelect is the builder for select fields of Meta entities. +type MetaSelect struct { + config + fields []string + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Scan applies the selector query and scan the result into the given value. +func (ms *MetaSelect) Scan(ctx context.Context, v interface{}) error { + query, err := ms.path(ctx) + if err != nil { + return err + } + ms.sql = query + return ms.sqlScan(ctx, v) +} + +// ScanX is like Scan, but panics if an error occurs. +func (ms *MetaSelect) ScanX(ctx context.Context, v interface{}) { + if err := ms.Scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from selector. It is only allowed when selecting one field. +func (ms *MetaSelect) Strings(ctx context.Context) ([]string, error) { + if len(ms.fields) > 1 { + return nil, errors.New("ent: MetaSelect.Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := ms.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (ms *MetaSelect) StringsX(ctx context.Context) []string { + v, err := ms.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from selector. It is only allowed when selecting one field. +func (ms *MetaSelect) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = ms.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{meta.Label} + default: + err = fmt.Errorf("ent: MetaSelect.Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (ms *MetaSelect) StringX(ctx context.Context) string { + v, err := ms.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from selector. It is only allowed when selecting one field. +func (ms *MetaSelect) Ints(ctx context.Context) ([]int, error) { + if len(ms.fields) > 1 { + return nil, errors.New("ent: MetaSelect.Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := ms.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (ms *MetaSelect) IntsX(ctx context.Context) []int { + v, err := ms.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from selector. It is only allowed when selecting one field. +func (ms *MetaSelect) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = ms.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{meta.Label} + default: + err = fmt.Errorf("ent: MetaSelect.Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (ms *MetaSelect) IntX(ctx context.Context) int { + v, err := ms.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from selector. It is only allowed when selecting one field. +func (ms *MetaSelect) Float64s(ctx context.Context) ([]float64, error) { + if len(ms.fields) > 1 { + return nil, errors.New("ent: MetaSelect.Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := ms.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (ms *MetaSelect) Float64sX(ctx context.Context) []float64 { + v, err := ms.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from selector. It is only allowed when selecting one field. +func (ms *MetaSelect) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = ms.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{meta.Label} + default: + err = fmt.Errorf("ent: MetaSelect.Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (ms *MetaSelect) Float64X(ctx context.Context) float64 { + v, err := ms.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from selector. It is only allowed when selecting one field. +func (ms *MetaSelect) Bools(ctx context.Context) ([]bool, error) { + if len(ms.fields) > 1 { + return nil, errors.New("ent: MetaSelect.Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := ms.Scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (ms *MetaSelect) BoolsX(ctx context.Context) []bool { + v, err := ms.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from selector. It is only allowed when selecting one field. +func (ms *MetaSelect) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = ms.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{meta.Label} + default: + err = fmt.Errorf("ent: MetaSelect.Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (ms *MetaSelect) BoolX(ctx context.Context) bool { + v, err := ms.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +func (ms *MetaSelect) sqlScan(ctx context.Context, v interface{}) error { + for _, f := range ms.fields { + if !meta.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for selection", f)} + } + } + rows := &sql.Rows{} + query, args := ms.sqlQuery().Query() + if err := ms.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (ms *MetaSelect) sqlQuery() sql.Querier { + selector := ms.sql + selector.Select(selector.Columns(ms.fields...)...) + return selector +} diff --git a/pkg/database/ent/meta_update.go b/pkg/database/ent/meta_update.go new file mode 100644 index 000000000..da5bf973d --- /dev/null +++ b/pkg/database/ent/meta_update.go @@ -0,0 +1,496 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/facebook/ent/dialect/sql" + "github.com/facebook/ent/dialect/sql/sqlgraph" + "github.com/facebook/ent/schema/field" +) + +// MetaUpdate is the builder for updating Meta entities. +type MetaUpdate struct { + config + hooks []Hook + mutation *MetaMutation + predicates []predicate.Meta +} + +// Where adds a new predicate for the builder. +func (mu *MetaUpdate) Where(ps ...predicate.Meta) *MetaUpdate { + mu.predicates = append(mu.predicates, ps...) + return mu +} + +// SetCreatedAt sets the created_at field. +func (mu *MetaUpdate) SetCreatedAt(t time.Time) *MetaUpdate { + mu.mutation.SetCreatedAt(t) + return mu +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (mu *MetaUpdate) SetNillableCreatedAt(t *time.Time) *MetaUpdate { + if t != nil { + mu.SetCreatedAt(*t) + } + return mu +} + +// SetUpdatedAt sets the updated_at field. +func (mu *MetaUpdate) SetUpdatedAt(t time.Time) *MetaUpdate { + mu.mutation.SetUpdatedAt(t) + return mu +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (mu *MetaUpdate) SetNillableUpdatedAt(t *time.Time) *MetaUpdate { + if t != nil { + mu.SetUpdatedAt(*t) + } + return mu +} + +// SetKey sets the key field. +func (mu *MetaUpdate) SetKey(s string) *MetaUpdate { + mu.mutation.SetKey(s) + return mu +} + +// SetValue sets the value field. +func (mu *MetaUpdate) SetValue(s string) *MetaUpdate { + mu.mutation.SetValue(s) + return mu +} + +// SetOwnerID sets the owner edge to Alert by id. +func (mu *MetaUpdate) SetOwnerID(id int) *MetaUpdate { + mu.mutation.SetOwnerID(id) + return mu +} + +// SetNillableOwnerID sets the owner edge to Alert by id if the given value is not nil. +func (mu *MetaUpdate) SetNillableOwnerID(id *int) *MetaUpdate { + if id != nil { + mu = mu.SetOwnerID(*id) + } + return mu +} + +// SetOwner sets the owner edge to Alert. +func (mu *MetaUpdate) SetOwner(a *Alert) *MetaUpdate { + return mu.SetOwnerID(a.ID) +} + +// Mutation returns the MetaMutation object of the builder. +func (mu *MetaUpdate) Mutation() *MetaMutation { + return mu.mutation +} + +// ClearOwner clears the "owner" edge to type Alert. +func (mu *MetaUpdate) ClearOwner() *MetaUpdate { + mu.mutation.ClearOwner() + return mu +} + +// Save executes the query and returns the number of rows/vertices matched by this operation. +func (mu *MetaUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(mu.hooks) == 0 { + if err = mu.check(); err != nil { + return 0, err + } + affected, err = mu.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MetaMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = mu.check(); err != nil { + return 0, err + } + mu.mutation = mutation + affected, err = mu.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(mu.hooks) - 1; i >= 0; i-- { + mut = mu.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, mu.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (mu *MetaUpdate) SaveX(ctx context.Context) int { + affected, err := mu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (mu *MetaUpdate) Exec(ctx context.Context) error { + _, err := mu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mu *MetaUpdate) ExecX(ctx context.Context) { + if err := mu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mu *MetaUpdate) check() error { + if v, ok := mu.mutation.Value(); ok { + if err := meta.ValueValidator(v); err != nil { + return &ValidationError{Name: "value", err: fmt.Errorf("ent: validator failed for field \"value\": %w", err)} + } + } + return nil +} + +func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: meta.Table, + Columns: meta.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + if ps := mu.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := mu.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: meta.FieldCreatedAt, + }) + } + if value, ok := mu.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: meta.FieldUpdatedAt, + }) + } + if value, ok := mu.mutation.Key(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: meta.FieldKey, + }) + } + if value, ok := mu.mutation.Value(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: meta.FieldValue, + }) + } + if mu.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: meta.OwnerTable, + Columns: []string{meta.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := mu.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: meta.OwnerTable, + Columns: []string{meta.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{meta.Label} + } else if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return 0, err + } + return n, nil +} + +// MetaUpdateOne is the builder for updating a single Meta entity. +type MetaUpdateOne struct { + config + hooks []Hook + mutation *MetaMutation +} + +// SetCreatedAt sets the created_at field. +func (muo *MetaUpdateOne) SetCreatedAt(t time.Time) *MetaUpdateOne { + muo.mutation.SetCreatedAt(t) + return muo +} + +// SetNillableCreatedAt sets the created_at field if the given value is not nil. +func (muo *MetaUpdateOne) SetNillableCreatedAt(t *time.Time) *MetaUpdateOne { + if t != nil { + muo.SetCreatedAt(*t) + } + return muo +} + +// SetUpdatedAt sets the updated_at field. +func (muo *MetaUpdateOne) SetUpdatedAt(t time.Time) *MetaUpdateOne { + muo.mutation.SetUpdatedAt(t) + return muo +} + +// SetNillableUpdatedAt sets the updated_at field if the given value is not nil. +func (muo *MetaUpdateOne) SetNillableUpdatedAt(t *time.Time) *MetaUpdateOne { + if t != nil { + muo.SetUpdatedAt(*t) + } + return muo +} + +// SetKey sets the key field. +func (muo *MetaUpdateOne) SetKey(s string) *MetaUpdateOne { + muo.mutation.SetKey(s) + return muo +} + +// SetValue sets the value field. +func (muo *MetaUpdateOne) SetValue(s string) *MetaUpdateOne { + muo.mutation.SetValue(s) + return muo +} + +// SetOwnerID sets the owner edge to Alert by id. +func (muo *MetaUpdateOne) SetOwnerID(id int) *MetaUpdateOne { + muo.mutation.SetOwnerID(id) + return muo +} + +// SetNillableOwnerID sets the owner edge to Alert by id if the given value is not nil. +func (muo *MetaUpdateOne) SetNillableOwnerID(id *int) *MetaUpdateOne { + if id != nil { + muo = muo.SetOwnerID(*id) + } + return muo +} + +// SetOwner sets the owner edge to Alert. +func (muo *MetaUpdateOne) SetOwner(a *Alert) *MetaUpdateOne { + return muo.SetOwnerID(a.ID) +} + +// Mutation returns the MetaMutation object of the builder. +func (muo *MetaUpdateOne) Mutation() *MetaMutation { + return muo.mutation +} + +// ClearOwner clears the "owner" edge to type Alert. +func (muo *MetaUpdateOne) ClearOwner() *MetaUpdateOne { + muo.mutation.ClearOwner() + return muo +} + +// Save executes the query and returns the updated entity. +func (muo *MetaUpdateOne) Save(ctx context.Context) (*Meta, error) { + var ( + err error + node *Meta + ) + if len(muo.hooks) == 0 { + if err = muo.check(); err != nil { + return nil, err + } + node, err = muo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MetaMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = muo.check(); err != nil { + return nil, err + } + muo.mutation = mutation + node, err = muo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(muo.hooks) - 1; i >= 0; i-- { + mut = muo.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, muo.mutation); err != nil { + return nil, err + } + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (muo *MetaUpdateOne) SaveX(ctx context.Context) *Meta { + node, err := muo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (muo *MetaUpdateOne) Exec(ctx context.Context) error { + _, err := muo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (muo *MetaUpdateOne) ExecX(ctx context.Context) { + if err := muo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (muo *MetaUpdateOne) check() error { + if v, ok := muo.mutation.Value(); ok { + if err := meta.ValueValidator(v); err != nil { + return &ValidationError{Name: "value", err: fmt.Errorf("ent: validator failed for field \"value\": %w", err)} + } + } + return nil +} + +func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: meta.Table, + Columns: meta.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + id, ok := muo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Meta.ID for update")} + } + _spec.Node.ID.Value = id + if value, ok := muo.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: meta.FieldCreatedAt, + }) + } + if value, ok := muo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: meta.FieldUpdatedAt, + }) + } + if value, ok := muo.mutation.Key(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: meta.FieldKey, + }) + } + if value, ok := muo.mutation.Value(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: meta.FieldValue, + }) + } + if muo.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: meta.OwnerTable, + Columns: []string{meta.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := muo.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: meta.OwnerTable, + Columns: []string{meta.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Meta{config: muo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues() + if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{meta.Label} + } else if cerr, ok := isSQLConstraintError(err); ok { + err = cerr + } + return nil, err + } + return _node, nil +} diff --git a/pkg/database/ent/migrate/migrate.go b/pkg/database/ent/migrate/migrate.go new file mode 100644 index 000000000..7e1d225b9 --- /dev/null +++ b/pkg/database/ent/migrate/migrate.go @@ -0,0 +1,70 @@ +// Code generated by entc, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + "io" + + "github.com/facebook/ent/dialect" + "github.com/facebook/ent/dialect/sql/schema" +) + +var ( + // WithGlobalUniqueID sets the universal ids options to the migration. + // If this option is enabled, ent migration will allocate a 1<<32 range + // for the ids of each entity (table). + // Note that this option cannot be applied on tables that already exist. + WithGlobalUniqueID = schema.WithGlobalUniqueID + // WithDropColumn sets the drop column option to the migration. + // If this option is enabled, ent migration will drop old columns + // that were used for both fields and edges. This defaults to false. + WithDropColumn = schema.WithDropColumn + // WithDropIndex sets the drop index option to the migration. + // If this option is enabled, ent migration will drop old indexes + // that were defined in the schema. This defaults to false. + // Note that unique constraints are defined using `UNIQUE INDEX`, + // and therefore, it's recommended to enable this option to get more + // flexibility in the schema changes. + WithDropIndex = schema.WithDropIndex + // WithFixture sets the foreign-key renaming option to the migration when upgrading + // ent from v0.1.0 (issue-#285). Defaults to false. + WithFixture = schema.WithFixture +) + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv dialect.Driver + universalID bool +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %v", err) + } + return migrate.Create(ctx, Tables...) +} + +// WriteTo writes the schema changes to w instead of running them against the database. +// +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// log.Fatal(err) +// } +// +func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { + drv := &schema.WriteDriver{ + Writer: w, + Driver: s.drv, + } + migrate, err := schema.NewMigrate(drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %v", err) + } + return migrate.Create(ctx, Tables...) +} diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go new file mode 100644 index 000000000..cea75ced2 --- /dev/null +++ b/pkg/database/ent/migrate/schema.go @@ -0,0 +1,189 @@ +// Code generated by entc, DO NOT EDIT. + +package migrate + +import ( + "github.com/facebook/ent/dialect/sql/schema" + "github.com/facebook/ent/schema/field" +) + +var ( + // AlertsColumns holds the columns for the "alerts" table. + AlertsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "scenario", Type: field.TypeString}, + {Name: "bucket_id", Type: field.TypeString, Nullable: true, Default: ""}, + {Name: "message", Type: field.TypeString, Nullable: true, Default: ""}, + {Name: "events_count", Type: field.TypeInt32, Nullable: true}, + {Name: "started_at", Type: field.TypeTime, Nullable: true}, + {Name: "stopped_at", Type: field.TypeTime, Nullable: true}, + {Name: "source_ip", Type: field.TypeString, Nullable: true}, + {Name: "source_range", Type: field.TypeString, Nullable: true}, + {Name: "source_as_number", Type: field.TypeString, Nullable: true}, + {Name: "source_as_name", Type: field.TypeString, Nullable: true}, + {Name: "source_country", Type: field.TypeString, Nullable: true}, + {Name: "source_latitude", Type: field.TypeFloat32, Nullable: true}, + {Name: "source_longitude", Type: field.TypeFloat32, Nullable: true}, + {Name: "source_scope", Type: field.TypeString, Nullable: true}, + {Name: "source_value", Type: field.TypeString, Nullable: true}, + {Name: "capacity", Type: field.TypeInt32, Nullable: true}, + {Name: "leak_speed", Type: field.TypeString, Nullable: true}, + {Name: "scenario_version", Type: field.TypeString, Nullable: true}, + {Name: "scenario_hash", Type: field.TypeString, Nullable: true}, + {Name: "simulated", Type: field.TypeBool}, + {Name: "machine_alerts", Type: field.TypeInt, Nullable: true}, + } + // AlertsTable holds the schema information for the "alerts" table. + AlertsTable = &schema.Table{ + Name: "alerts", + Columns: AlertsColumns, + PrimaryKey: []*schema.Column{AlertsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "alerts_machines_alerts", + Columns: []*schema.Column{AlertsColumns[23]}, + + RefColumns: []*schema.Column{MachinesColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // BouncersColumns holds the columns for the "bouncers" table. + BouncersColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "name", Type: field.TypeString, Unique: true}, + {Name: "api_key", Type: field.TypeString}, + {Name: "revoked", Type: field.TypeBool}, + {Name: "ip_address", Type: field.TypeString, Nullable: true, Default: ""}, + {Name: "type", Type: field.TypeString, Nullable: true}, + {Name: "version", Type: field.TypeString, Nullable: true}, + {Name: "until", Type: field.TypeTime, Nullable: true}, + {Name: "last_pull", Type: field.TypeTime}, + } + // BouncersTable holds the schema information for the "bouncers" table. + BouncersTable = &schema.Table{ + Name: "bouncers", + Columns: BouncersColumns, + PrimaryKey: []*schema.Column{BouncersColumns[0]}, + ForeignKeys: []*schema.ForeignKey{}, + } + // DecisionsColumns holds the columns for the "decisions" table. + DecisionsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "until", Type: field.TypeTime}, + {Name: "scenario", Type: field.TypeString}, + {Name: "type", Type: field.TypeString}, + {Name: "start_ip", Type: field.TypeInt64, Nullable: true}, + {Name: "end_ip", Type: field.TypeInt64, Nullable: true}, + {Name: "scope", Type: field.TypeString}, + {Name: "value", Type: field.TypeString}, + {Name: "origin", Type: field.TypeString}, + {Name: "simulated", Type: field.TypeBool}, + {Name: "alert_decisions", Type: field.TypeInt, Nullable: true}, + } + // DecisionsTable holds the schema information for the "decisions" table. + DecisionsTable = &schema.Table{ + Name: "decisions", + Columns: DecisionsColumns, + PrimaryKey: []*schema.Column{DecisionsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "decisions_alerts_decisions", + Columns: []*schema.Column{DecisionsColumns[12]}, + + RefColumns: []*schema.Column{AlertsColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // EventsColumns holds the columns for the "events" table. + EventsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "time", Type: field.TypeTime}, + {Name: "serialized", Type: field.TypeString, Size: 4095}, + {Name: "alert_events", Type: field.TypeInt, Nullable: true}, + } + // EventsTable holds the schema information for the "events" table. + EventsTable = &schema.Table{ + Name: "events", + Columns: EventsColumns, + PrimaryKey: []*schema.Column{EventsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "events_alerts_events", + Columns: []*schema.Column{EventsColumns[5]}, + + RefColumns: []*schema.Column{AlertsColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // MachinesColumns holds the columns for the "machines" table. + MachinesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "machine_id", Type: field.TypeString, Unique: true}, + {Name: "password", Type: field.TypeString}, + {Name: "ip_address", Type: field.TypeString}, + {Name: "scenarios", Type: field.TypeString, Nullable: true, Size: 4095}, + {Name: "version", Type: field.TypeString, Nullable: true}, + {Name: "is_validated", Type: field.TypeBool}, + {Name: "status", Type: field.TypeString, Nullable: true}, + } + // MachinesTable holds the schema information for the "machines" table. + MachinesTable = &schema.Table{ + Name: "machines", + Columns: MachinesColumns, + PrimaryKey: []*schema.Column{MachinesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{}, + } + // MetaColumns holds the columns for the "meta" table. + MetaColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "key", Type: field.TypeString}, + {Name: "value", Type: field.TypeString, Size: 4095}, + {Name: "alert_metas", Type: field.TypeInt, Nullable: true}, + } + // MetaTable holds the schema information for the "meta" table. + MetaTable = &schema.Table{ + Name: "meta", + Columns: MetaColumns, + PrimaryKey: []*schema.Column{MetaColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "meta_alerts_metas", + Columns: []*schema.Column{MetaColumns[5]}, + + RefColumns: []*schema.Column{AlertsColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + AlertsTable, + BouncersTable, + DecisionsTable, + EventsTable, + MachinesTable, + MetaTable, + } +) + +func init() { + AlertsTable.ForeignKeys[0].RefTable = MachinesTable + DecisionsTable.ForeignKeys[0].RefTable = AlertsTable + EventsTable.ForeignKeys[0].RefTable = AlertsTable + MetaTable.ForeignKeys[0].RefTable = AlertsTable +} diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go new file mode 100644 index 000000000..059d7d738 --- /dev/null +++ b/pkg/database/ent/mutation.go @@ -0,0 +1,6113 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + + "github.com/facebook/ent" +) + +const ( + // Operation types. + OpCreate = ent.OpCreate + OpDelete = ent.OpDelete + OpDeleteOne = ent.OpDeleteOne + OpUpdate = ent.OpUpdate + OpUpdateOne = ent.OpUpdateOne + + // Node types. + TypeAlert = "Alert" + TypeBouncer = "Bouncer" + TypeDecision = "Decision" + TypeEvent = "Event" + TypeMachine = "Machine" + TypeMeta = "Meta" +) + +// AlertMutation represents an operation that mutate the Alerts +// nodes in the graph. +type AlertMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + scenario *string + bucketId *string + message *string + eventsCount *int32 + addeventsCount *int32 + startedAt *time.Time + stoppedAt *time.Time + sourceIp *string + sourceRange *string + sourceAsNumber *string + sourceAsName *string + sourceCountry *string + sourceLatitude *float32 + addsourceLatitude *float32 + sourceLongitude *float32 + addsourceLongitude *float32 + sourceScope *string + sourceValue *string + capacity *int32 + addcapacity *int32 + leakSpeed *string + scenarioVersion *string + scenarioHash *string + simulated *bool + clearedFields map[string]struct{} + owner *int + clearedowner bool + decisions map[int]struct{} + removeddecisions map[int]struct{} + cleareddecisions bool + events map[int]struct{} + removedevents map[int]struct{} + clearedevents bool + metas map[int]struct{} + removedmetas map[int]struct{} + clearedmetas bool + done bool + oldValue func(context.Context) (*Alert, error) +} + +var _ ent.Mutation = (*AlertMutation)(nil) + +// alertOption allows to manage the mutation configuration using functional options. +type alertOption func(*AlertMutation) + +// newAlertMutation creates new mutation for $n.Name. +func newAlertMutation(c config, op Op, opts ...alertOption) *AlertMutation { + m := &AlertMutation{ + config: c, + op: op, + typ: TypeAlert, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAlertID sets the id field of the mutation. +func withAlertID(id int) alertOption { + return func(m *AlertMutation) { + var ( + err error + once sync.Once + value *Alert + ) + m.oldValue = func(ctx context.Context) (*Alert, error) { + once.Do(func() { + if m.done { + err = fmt.Errorf("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Alert.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAlert sets the old Alert of the mutation. +func withAlert(node *Alert) alertOption { + return func(m *AlertMutation) { + m.oldValue = func(context.Context) (*Alert, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AlertMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AlertMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, fmt.Errorf("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the id value in the mutation. Note that, the id +// is available only if it was provided to the builder. +func (m *AlertMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// SetCreatedAt sets the created_at field. +func (m *AlertMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the created_at value in the mutation. +func (m *AlertMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old created_at value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldCreatedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt reset all changes of the "created_at" field. +func (m *AlertMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the updated_at field. +func (m *AlertMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the updated_at value in the mutation. +func (m *AlertMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old updated_at value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldUpdatedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt reset all changes of the "updated_at" field. +func (m *AlertMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetScenario sets the scenario field. +func (m *AlertMutation) SetScenario(s string) { + m.scenario = &s +} + +// Scenario returns the scenario value in the mutation. +func (m *AlertMutation) Scenario() (r string, exists bool) { + v := m.scenario + if v == nil { + return + } + return *v, true +} + +// OldScenario returns the old scenario value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldScenario(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldScenario is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldScenario requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScenario: %w", err) + } + return oldValue.Scenario, nil +} + +// ResetScenario reset all changes of the "scenario" field. +func (m *AlertMutation) ResetScenario() { + m.scenario = nil +} + +// SetBucketId sets the bucketId field. +func (m *AlertMutation) SetBucketId(s string) { + m.bucketId = &s +} + +// BucketId returns the bucketId value in the mutation. +func (m *AlertMutation) BucketId() (r string, exists bool) { + v := m.bucketId + if v == nil { + return + } + return *v, true +} + +// OldBucketId returns the old bucketId value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldBucketId(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldBucketId is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldBucketId requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBucketId: %w", err) + } + return oldValue.BucketId, nil +} + +// ClearBucketId clears the value of bucketId. +func (m *AlertMutation) ClearBucketId() { + m.bucketId = nil + m.clearedFields[alert.FieldBucketId] = struct{}{} +} + +// BucketIdCleared returns if the field bucketId was cleared in this mutation. +func (m *AlertMutation) BucketIdCleared() bool { + _, ok := m.clearedFields[alert.FieldBucketId] + return ok +} + +// ResetBucketId reset all changes of the "bucketId" field. +func (m *AlertMutation) ResetBucketId() { + m.bucketId = nil + delete(m.clearedFields, alert.FieldBucketId) +} + +// SetMessage sets the message field. +func (m *AlertMutation) SetMessage(s string) { + m.message = &s +} + +// Message returns the message value in the mutation. +func (m *AlertMutation) Message() (r string, exists bool) { + v := m.message + if v == nil { + return + } + return *v, true +} + +// OldMessage returns the old message value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldMessage(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldMessage is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldMessage requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMessage: %w", err) + } + return oldValue.Message, nil +} + +// ClearMessage clears the value of message. +func (m *AlertMutation) ClearMessage() { + m.message = nil + m.clearedFields[alert.FieldMessage] = struct{}{} +} + +// MessageCleared returns if the field message was cleared in this mutation. +func (m *AlertMutation) MessageCleared() bool { + _, ok := m.clearedFields[alert.FieldMessage] + return ok +} + +// ResetMessage reset all changes of the "message" field. +func (m *AlertMutation) ResetMessage() { + m.message = nil + delete(m.clearedFields, alert.FieldMessage) +} + +// SetEventsCount sets the eventsCount field. +func (m *AlertMutation) SetEventsCount(i int32) { + m.eventsCount = &i + m.addeventsCount = nil +} + +// EventsCount returns the eventsCount value in the mutation. +func (m *AlertMutation) EventsCount() (r int32, exists bool) { + v := m.eventsCount + if v == nil { + return + } + return *v, true +} + +// OldEventsCount returns the old eventsCount value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldEventsCount(ctx context.Context) (v int32, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldEventsCount is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldEventsCount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEventsCount: %w", err) + } + return oldValue.EventsCount, nil +} + +// AddEventsCount adds i to eventsCount. +func (m *AlertMutation) AddEventsCount(i int32) { + if m.addeventsCount != nil { + *m.addeventsCount += i + } else { + m.addeventsCount = &i + } +} + +// AddedEventsCount returns the value that was added to the eventsCount field in this mutation. +func (m *AlertMutation) AddedEventsCount() (r int32, exists bool) { + v := m.addeventsCount + if v == nil { + return + } + return *v, true +} + +// ClearEventsCount clears the value of eventsCount. +func (m *AlertMutation) ClearEventsCount() { + m.eventsCount = nil + m.addeventsCount = nil + m.clearedFields[alert.FieldEventsCount] = struct{}{} +} + +// EventsCountCleared returns if the field eventsCount was cleared in this mutation. +func (m *AlertMutation) EventsCountCleared() bool { + _, ok := m.clearedFields[alert.FieldEventsCount] + return ok +} + +// ResetEventsCount reset all changes of the "eventsCount" field. +func (m *AlertMutation) ResetEventsCount() { + m.eventsCount = nil + m.addeventsCount = nil + delete(m.clearedFields, alert.FieldEventsCount) +} + +// SetStartedAt sets the startedAt field. +func (m *AlertMutation) SetStartedAt(t time.Time) { + m.startedAt = &t +} + +// StartedAt returns the startedAt value in the mutation. +func (m *AlertMutation) StartedAt() (r time.Time, exists bool) { + v := m.startedAt + if v == nil { + return + } + return *v, true +} + +// OldStartedAt returns the old startedAt value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldStartedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldStartedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldStartedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartedAt: %w", err) + } + return oldValue.StartedAt, nil +} + +// ClearStartedAt clears the value of startedAt. +func (m *AlertMutation) ClearStartedAt() { + m.startedAt = nil + m.clearedFields[alert.FieldStartedAt] = struct{}{} +} + +// StartedAtCleared returns if the field startedAt was cleared in this mutation. +func (m *AlertMutation) StartedAtCleared() bool { + _, ok := m.clearedFields[alert.FieldStartedAt] + return ok +} + +// ResetStartedAt reset all changes of the "startedAt" field. +func (m *AlertMutation) ResetStartedAt() { + m.startedAt = nil + delete(m.clearedFields, alert.FieldStartedAt) +} + +// SetStoppedAt sets the stoppedAt field. +func (m *AlertMutation) SetStoppedAt(t time.Time) { + m.stoppedAt = &t +} + +// StoppedAt returns the stoppedAt value in the mutation. +func (m *AlertMutation) StoppedAt() (r time.Time, exists bool) { + v := m.stoppedAt + if v == nil { + return + } + return *v, true +} + +// OldStoppedAt returns the old stoppedAt value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldStoppedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldStoppedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldStoppedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStoppedAt: %w", err) + } + return oldValue.StoppedAt, nil +} + +// ClearStoppedAt clears the value of stoppedAt. +func (m *AlertMutation) ClearStoppedAt() { + m.stoppedAt = nil + m.clearedFields[alert.FieldStoppedAt] = struct{}{} +} + +// StoppedAtCleared returns if the field stoppedAt was cleared in this mutation. +func (m *AlertMutation) StoppedAtCleared() bool { + _, ok := m.clearedFields[alert.FieldStoppedAt] + return ok +} + +// ResetStoppedAt reset all changes of the "stoppedAt" field. +func (m *AlertMutation) ResetStoppedAt() { + m.stoppedAt = nil + delete(m.clearedFields, alert.FieldStoppedAt) +} + +// SetSourceIp sets the sourceIp field. +func (m *AlertMutation) SetSourceIp(s string) { + m.sourceIp = &s +} + +// SourceIp returns the sourceIp value in the mutation. +func (m *AlertMutation) SourceIp() (r string, exists bool) { + v := m.sourceIp + if v == nil { + return + } + return *v, true +} + +// OldSourceIp returns the old sourceIp value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldSourceIp(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldSourceIp is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldSourceIp requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceIp: %w", err) + } + return oldValue.SourceIp, nil +} + +// ClearSourceIp clears the value of sourceIp. +func (m *AlertMutation) ClearSourceIp() { + m.sourceIp = nil + m.clearedFields[alert.FieldSourceIp] = struct{}{} +} + +// SourceIpCleared returns if the field sourceIp was cleared in this mutation. +func (m *AlertMutation) SourceIpCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceIp] + return ok +} + +// ResetSourceIp reset all changes of the "sourceIp" field. +func (m *AlertMutation) ResetSourceIp() { + m.sourceIp = nil + delete(m.clearedFields, alert.FieldSourceIp) +} + +// SetSourceRange sets the sourceRange field. +func (m *AlertMutation) SetSourceRange(s string) { + m.sourceRange = &s +} + +// SourceRange returns the sourceRange value in the mutation. +func (m *AlertMutation) SourceRange() (r string, exists bool) { + v := m.sourceRange + if v == nil { + return + } + return *v, true +} + +// OldSourceRange returns the old sourceRange value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldSourceRange(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldSourceRange is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldSourceRange requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceRange: %w", err) + } + return oldValue.SourceRange, nil +} + +// ClearSourceRange clears the value of sourceRange. +func (m *AlertMutation) ClearSourceRange() { + m.sourceRange = nil + m.clearedFields[alert.FieldSourceRange] = struct{}{} +} + +// SourceRangeCleared returns if the field sourceRange was cleared in this mutation. +func (m *AlertMutation) SourceRangeCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceRange] + return ok +} + +// ResetSourceRange reset all changes of the "sourceRange" field. +func (m *AlertMutation) ResetSourceRange() { + m.sourceRange = nil + delete(m.clearedFields, alert.FieldSourceRange) +} + +// SetSourceAsNumber sets the sourceAsNumber field. +func (m *AlertMutation) SetSourceAsNumber(s string) { + m.sourceAsNumber = &s +} + +// SourceAsNumber returns the sourceAsNumber value in the mutation. +func (m *AlertMutation) SourceAsNumber() (r string, exists bool) { + v := m.sourceAsNumber + if v == nil { + return + } + return *v, true +} + +// OldSourceAsNumber returns the old sourceAsNumber value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldSourceAsNumber(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldSourceAsNumber is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldSourceAsNumber requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceAsNumber: %w", err) + } + return oldValue.SourceAsNumber, nil +} + +// ClearSourceAsNumber clears the value of sourceAsNumber. +func (m *AlertMutation) ClearSourceAsNumber() { + m.sourceAsNumber = nil + m.clearedFields[alert.FieldSourceAsNumber] = struct{}{} +} + +// SourceAsNumberCleared returns if the field sourceAsNumber was cleared in this mutation. +func (m *AlertMutation) SourceAsNumberCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceAsNumber] + return ok +} + +// ResetSourceAsNumber reset all changes of the "sourceAsNumber" field. +func (m *AlertMutation) ResetSourceAsNumber() { + m.sourceAsNumber = nil + delete(m.clearedFields, alert.FieldSourceAsNumber) +} + +// SetSourceAsName sets the sourceAsName field. +func (m *AlertMutation) SetSourceAsName(s string) { + m.sourceAsName = &s +} + +// SourceAsName returns the sourceAsName value in the mutation. +func (m *AlertMutation) SourceAsName() (r string, exists bool) { + v := m.sourceAsName + if v == nil { + return + } + return *v, true +} + +// OldSourceAsName returns the old sourceAsName value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldSourceAsName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldSourceAsName is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldSourceAsName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceAsName: %w", err) + } + return oldValue.SourceAsName, nil +} + +// ClearSourceAsName clears the value of sourceAsName. +func (m *AlertMutation) ClearSourceAsName() { + m.sourceAsName = nil + m.clearedFields[alert.FieldSourceAsName] = struct{}{} +} + +// SourceAsNameCleared returns if the field sourceAsName was cleared in this mutation. +func (m *AlertMutation) SourceAsNameCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceAsName] + return ok +} + +// ResetSourceAsName reset all changes of the "sourceAsName" field. +func (m *AlertMutation) ResetSourceAsName() { + m.sourceAsName = nil + delete(m.clearedFields, alert.FieldSourceAsName) +} + +// SetSourceCountry sets the sourceCountry field. +func (m *AlertMutation) SetSourceCountry(s string) { + m.sourceCountry = &s +} + +// SourceCountry returns the sourceCountry value in the mutation. +func (m *AlertMutation) SourceCountry() (r string, exists bool) { + v := m.sourceCountry + if v == nil { + return + } + return *v, true +} + +// OldSourceCountry returns the old sourceCountry value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldSourceCountry(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldSourceCountry is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldSourceCountry requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceCountry: %w", err) + } + return oldValue.SourceCountry, nil +} + +// ClearSourceCountry clears the value of sourceCountry. +func (m *AlertMutation) ClearSourceCountry() { + m.sourceCountry = nil + m.clearedFields[alert.FieldSourceCountry] = struct{}{} +} + +// SourceCountryCleared returns if the field sourceCountry was cleared in this mutation. +func (m *AlertMutation) SourceCountryCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceCountry] + return ok +} + +// ResetSourceCountry reset all changes of the "sourceCountry" field. +func (m *AlertMutation) ResetSourceCountry() { + m.sourceCountry = nil + delete(m.clearedFields, alert.FieldSourceCountry) +} + +// SetSourceLatitude sets the sourceLatitude field. +func (m *AlertMutation) SetSourceLatitude(f float32) { + m.sourceLatitude = &f + m.addsourceLatitude = nil +} + +// SourceLatitude returns the sourceLatitude value in the mutation. +func (m *AlertMutation) SourceLatitude() (r float32, exists bool) { + v := m.sourceLatitude + if v == nil { + return + } + return *v, true +} + +// OldSourceLatitude returns the old sourceLatitude value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldSourceLatitude(ctx context.Context) (v float32, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldSourceLatitude is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldSourceLatitude requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceLatitude: %w", err) + } + return oldValue.SourceLatitude, nil +} + +// AddSourceLatitude adds f to sourceLatitude. +func (m *AlertMutation) AddSourceLatitude(f float32) { + if m.addsourceLatitude != nil { + *m.addsourceLatitude += f + } else { + m.addsourceLatitude = &f + } +} + +// AddedSourceLatitude returns the value that was added to the sourceLatitude field in this mutation. +func (m *AlertMutation) AddedSourceLatitude() (r float32, exists bool) { + v := m.addsourceLatitude + if v == nil { + return + } + return *v, true +} + +// ClearSourceLatitude clears the value of sourceLatitude. +func (m *AlertMutation) ClearSourceLatitude() { + m.sourceLatitude = nil + m.addsourceLatitude = nil + m.clearedFields[alert.FieldSourceLatitude] = struct{}{} +} + +// SourceLatitudeCleared returns if the field sourceLatitude was cleared in this mutation. +func (m *AlertMutation) SourceLatitudeCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceLatitude] + return ok +} + +// ResetSourceLatitude reset all changes of the "sourceLatitude" field. +func (m *AlertMutation) ResetSourceLatitude() { + m.sourceLatitude = nil + m.addsourceLatitude = nil + delete(m.clearedFields, alert.FieldSourceLatitude) +} + +// SetSourceLongitude sets the sourceLongitude field. +func (m *AlertMutation) SetSourceLongitude(f float32) { + m.sourceLongitude = &f + m.addsourceLongitude = nil +} + +// SourceLongitude returns the sourceLongitude value in the mutation. +func (m *AlertMutation) SourceLongitude() (r float32, exists bool) { + v := m.sourceLongitude + if v == nil { + return + } + return *v, true +} + +// OldSourceLongitude returns the old sourceLongitude value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldSourceLongitude(ctx context.Context) (v float32, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldSourceLongitude is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldSourceLongitude requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceLongitude: %w", err) + } + return oldValue.SourceLongitude, nil +} + +// AddSourceLongitude adds f to sourceLongitude. +func (m *AlertMutation) AddSourceLongitude(f float32) { + if m.addsourceLongitude != nil { + *m.addsourceLongitude += f + } else { + m.addsourceLongitude = &f + } +} + +// AddedSourceLongitude returns the value that was added to the sourceLongitude field in this mutation. +func (m *AlertMutation) AddedSourceLongitude() (r float32, exists bool) { + v := m.addsourceLongitude + if v == nil { + return + } + return *v, true +} + +// ClearSourceLongitude clears the value of sourceLongitude. +func (m *AlertMutation) ClearSourceLongitude() { + m.sourceLongitude = nil + m.addsourceLongitude = nil + m.clearedFields[alert.FieldSourceLongitude] = struct{}{} +} + +// SourceLongitudeCleared returns if the field sourceLongitude was cleared in this mutation. +func (m *AlertMutation) SourceLongitudeCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceLongitude] + return ok +} + +// ResetSourceLongitude reset all changes of the "sourceLongitude" field. +func (m *AlertMutation) ResetSourceLongitude() { + m.sourceLongitude = nil + m.addsourceLongitude = nil + delete(m.clearedFields, alert.FieldSourceLongitude) +} + +// SetSourceScope sets the sourceScope field. +func (m *AlertMutation) SetSourceScope(s string) { + m.sourceScope = &s +} + +// SourceScope returns the sourceScope value in the mutation. +func (m *AlertMutation) SourceScope() (r string, exists bool) { + v := m.sourceScope + if v == nil { + return + } + return *v, true +} + +// OldSourceScope returns the old sourceScope value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldSourceScope(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldSourceScope is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldSourceScope requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceScope: %w", err) + } + return oldValue.SourceScope, nil +} + +// ClearSourceScope clears the value of sourceScope. +func (m *AlertMutation) ClearSourceScope() { + m.sourceScope = nil + m.clearedFields[alert.FieldSourceScope] = struct{}{} +} + +// SourceScopeCleared returns if the field sourceScope was cleared in this mutation. +func (m *AlertMutation) SourceScopeCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceScope] + return ok +} + +// ResetSourceScope reset all changes of the "sourceScope" field. +func (m *AlertMutation) ResetSourceScope() { + m.sourceScope = nil + delete(m.clearedFields, alert.FieldSourceScope) +} + +// SetSourceValue sets the sourceValue field. +func (m *AlertMutation) SetSourceValue(s string) { + m.sourceValue = &s +} + +// SourceValue returns the sourceValue value in the mutation. +func (m *AlertMutation) SourceValue() (r string, exists bool) { + v := m.sourceValue + if v == nil { + return + } + return *v, true +} + +// OldSourceValue returns the old sourceValue value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldSourceValue(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldSourceValue is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldSourceValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceValue: %w", err) + } + return oldValue.SourceValue, nil +} + +// ClearSourceValue clears the value of sourceValue. +func (m *AlertMutation) ClearSourceValue() { + m.sourceValue = nil + m.clearedFields[alert.FieldSourceValue] = struct{}{} +} + +// SourceValueCleared returns if the field sourceValue was cleared in this mutation. +func (m *AlertMutation) SourceValueCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceValue] + return ok +} + +// ResetSourceValue reset all changes of the "sourceValue" field. +func (m *AlertMutation) ResetSourceValue() { + m.sourceValue = nil + delete(m.clearedFields, alert.FieldSourceValue) +} + +// SetCapacity sets the capacity field. +func (m *AlertMutation) SetCapacity(i int32) { + m.capacity = &i + m.addcapacity = nil +} + +// Capacity returns the capacity value in the mutation. +func (m *AlertMutation) Capacity() (r int32, exists bool) { + v := m.capacity + if v == nil { + return + } + return *v, true +} + +// OldCapacity returns the old capacity value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldCapacity(ctx context.Context) (v int32, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldCapacity is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldCapacity requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCapacity: %w", err) + } + return oldValue.Capacity, nil +} + +// AddCapacity adds i to capacity. +func (m *AlertMutation) AddCapacity(i int32) { + if m.addcapacity != nil { + *m.addcapacity += i + } else { + m.addcapacity = &i + } +} + +// AddedCapacity returns the value that was added to the capacity field in this mutation. +func (m *AlertMutation) AddedCapacity() (r int32, exists bool) { + v := m.addcapacity + if v == nil { + return + } + return *v, true +} + +// ClearCapacity clears the value of capacity. +func (m *AlertMutation) ClearCapacity() { + m.capacity = nil + m.addcapacity = nil + m.clearedFields[alert.FieldCapacity] = struct{}{} +} + +// CapacityCleared returns if the field capacity was cleared in this mutation. +func (m *AlertMutation) CapacityCleared() bool { + _, ok := m.clearedFields[alert.FieldCapacity] + return ok +} + +// ResetCapacity reset all changes of the "capacity" field. +func (m *AlertMutation) ResetCapacity() { + m.capacity = nil + m.addcapacity = nil + delete(m.clearedFields, alert.FieldCapacity) +} + +// SetLeakSpeed sets the leakSpeed field. +func (m *AlertMutation) SetLeakSpeed(s string) { + m.leakSpeed = &s +} + +// LeakSpeed returns the leakSpeed value in the mutation. +func (m *AlertMutation) LeakSpeed() (r string, exists bool) { + v := m.leakSpeed + if v == nil { + return + } + return *v, true +} + +// OldLeakSpeed returns the old leakSpeed value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldLeakSpeed(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldLeakSpeed is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldLeakSpeed requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLeakSpeed: %w", err) + } + return oldValue.LeakSpeed, nil +} + +// ClearLeakSpeed clears the value of leakSpeed. +func (m *AlertMutation) ClearLeakSpeed() { + m.leakSpeed = nil + m.clearedFields[alert.FieldLeakSpeed] = struct{}{} +} + +// LeakSpeedCleared returns if the field leakSpeed was cleared in this mutation. +func (m *AlertMutation) LeakSpeedCleared() bool { + _, ok := m.clearedFields[alert.FieldLeakSpeed] + return ok +} + +// ResetLeakSpeed reset all changes of the "leakSpeed" field. +func (m *AlertMutation) ResetLeakSpeed() { + m.leakSpeed = nil + delete(m.clearedFields, alert.FieldLeakSpeed) +} + +// SetScenarioVersion sets the scenarioVersion field. +func (m *AlertMutation) SetScenarioVersion(s string) { + m.scenarioVersion = &s +} + +// ScenarioVersion returns the scenarioVersion value in the mutation. +func (m *AlertMutation) ScenarioVersion() (r string, exists bool) { + v := m.scenarioVersion + if v == nil { + return + } + return *v, true +} + +// OldScenarioVersion returns the old scenarioVersion value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldScenarioVersion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldScenarioVersion is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldScenarioVersion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScenarioVersion: %w", err) + } + return oldValue.ScenarioVersion, nil +} + +// ClearScenarioVersion clears the value of scenarioVersion. +func (m *AlertMutation) ClearScenarioVersion() { + m.scenarioVersion = nil + m.clearedFields[alert.FieldScenarioVersion] = struct{}{} +} + +// ScenarioVersionCleared returns if the field scenarioVersion was cleared in this mutation. +func (m *AlertMutation) ScenarioVersionCleared() bool { + _, ok := m.clearedFields[alert.FieldScenarioVersion] + return ok +} + +// ResetScenarioVersion reset all changes of the "scenarioVersion" field. +func (m *AlertMutation) ResetScenarioVersion() { + m.scenarioVersion = nil + delete(m.clearedFields, alert.FieldScenarioVersion) +} + +// SetScenarioHash sets the scenarioHash field. +func (m *AlertMutation) SetScenarioHash(s string) { + m.scenarioHash = &s +} + +// ScenarioHash returns the scenarioHash value in the mutation. +func (m *AlertMutation) ScenarioHash() (r string, exists bool) { + v := m.scenarioHash + if v == nil { + return + } + return *v, true +} + +// OldScenarioHash returns the old scenarioHash value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldScenarioHash(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldScenarioHash is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldScenarioHash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScenarioHash: %w", err) + } + return oldValue.ScenarioHash, nil +} + +// ClearScenarioHash clears the value of scenarioHash. +func (m *AlertMutation) ClearScenarioHash() { + m.scenarioHash = nil + m.clearedFields[alert.FieldScenarioHash] = struct{}{} +} + +// ScenarioHashCleared returns if the field scenarioHash was cleared in this mutation. +func (m *AlertMutation) ScenarioHashCleared() bool { + _, ok := m.clearedFields[alert.FieldScenarioHash] + return ok +} + +// ResetScenarioHash reset all changes of the "scenarioHash" field. +func (m *AlertMutation) ResetScenarioHash() { + m.scenarioHash = nil + delete(m.clearedFields, alert.FieldScenarioHash) +} + +// SetSimulated sets the simulated field. +func (m *AlertMutation) SetSimulated(b bool) { + m.simulated = &b +} + +// Simulated returns the simulated value in the mutation. +func (m *AlertMutation) Simulated() (r bool, exists bool) { + v := m.simulated + if v == nil { + return + } + return *v, true +} + +// OldSimulated returns the old simulated value of the Alert. +// If the Alert object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *AlertMutation) OldSimulated(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldSimulated is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldSimulated requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSimulated: %w", err) + } + return oldValue.Simulated, nil +} + +// ResetSimulated reset all changes of the "simulated" field. +func (m *AlertMutation) ResetSimulated() { + m.simulated = nil +} + +// SetOwnerID sets the owner edge to Machine by id. +func (m *AlertMutation) SetOwnerID(id int) { + m.owner = &id +} + +// ClearOwner clears the owner edge to Machine. +func (m *AlertMutation) ClearOwner() { + m.clearedowner = true +} + +// OwnerCleared returns if the edge owner was cleared. +func (m *AlertMutation) OwnerCleared() bool { + return m.clearedowner +} + +// OwnerID returns the owner id in the mutation. +func (m *AlertMutation) OwnerID() (id int, exists bool) { + if m.owner != nil { + return *m.owner, true + } + return +} + +// OwnerIDs returns the owner ids in the mutation. +// Note that ids always returns len(ids) <= 1 for unique edges, and you should use +// OwnerID instead. It exists only for internal usage by the builders. +func (m *AlertMutation) OwnerIDs() (ids []int) { + if id := m.owner; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetOwner reset all changes of the "owner" edge. +func (m *AlertMutation) ResetOwner() { + m.owner = nil + m.clearedowner = false +} + +// AddDecisionIDs adds the decisions edge to Decision by ids. +func (m *AlertMutation) AddDecisionIDs(ids ...int) { + if m.decisions == nil { + m.decisions = make(map[int]struct{}) + } + for i := range ids { + m.decisions[ids[i]] = struct{}{} + } +} + +// ClearDecisions clears the decisions edge to Decision. +func (m *AlertMutation) ClearDecisions() { + m.cleareddecisions = true +} + +// DecisionsCleared returns if the edge decisions was cleared. +func (m *AlertMutation) DecisionsCleared() bool { + return m.cleareddecisions +} + +// RemoveDecisionIDs removes the decisions edge to Decision by ids. +func (m *AlertMutation) RemoveDecisionIDs(ids ...int) { + if m.removeddecisions == nil { + m.removeddecisions = make(map[int]struct{}) + } + for i := range ids { + m.removeddecisions[ids[i]] = struct{}{} + } +} + +// RemovedDecisions returns the removed ids of decisions. +func (m *AlertMutation) RemovedDecisionsIDs() (ids []int) { + for id := range m.removeddecisions { + ids = append(ids, id) + } + return +} + +// DecisionsIDs returns the decisions ids in the mutation. +func (m *AlertMutation) DecisionsIDs() (ids []int) { + for id := range m.decisions { + ids = append(ids, id) + } + return +} + +// ResetDecisions reset all changes of the "decisions" edge. +func (m *AlertMutation) ResetDecisions() { + m.decisions = nil + m.cleareddecisions = false + m.removeddecisions = nil +} + +// AddEventIDs adds the events edge to Event by ids. +func (m *AlertMutation) AddEventIDs(ids ...int) { + if m.events == nil { + m.events = make(map[int]struct{}) + } + for i := range ids { + m.events[ids[i]] = struct{}{} + } +} + +// ClearEvents clears the events edge to Event. +func (m *AlertMutation) ClearEvents() { + m.clearedevents = true +} + +// EventsCleared returns if the edge events was cleared. +func (m *AlertMutation) EventsCleared() bool { + return m.clearedevents +} + +// RemoveEventIDs removes the events edge to Event by ids. +func (m *AlertMutation) RemoveEventIDs(ids ...int) { + if m.removedevents == nil { + m.removedevents = make(map[int]struct{}) + } + for i := range ids { + m.removedevents[ids[i]] = struct{}{} + } +} + +// RemovedEvents returns the removed ids of events. +func (m *AlertMutation) RemovedEventsIDs() (ids []int) { + for id := range m.removedevents { + ids = append(ids, id) + } + return +} + +// EventsIDs returns the events ids in the mutation. +func (m *AlertMutation) EventsIDs() (ids []int) { + for id := range m.events { + ids = append(ids, id) + } + return +} + +// ResetEvents reset all changes of the "events" edge. +func (m *AlertMutation) ResetEvents() { + m.events = nil + m.clearedevents = false + m.removedevents = nil +} + +// AddMetaIDs adds the metas edge to Meta by ids. +func (m *AlertMutation) AddMetaIDs(ids ...int) { + if m.metas == nil { + m.metas = make(map[int]struct{}) + } + for i := range ids { + m.metas[ids[i]] = struct{}{} + } +} + +// ClearMetas clears the metas edge to Meta. +func (m *AlertMutation) ClearMetas() { + m.clearedmetas = true +} + +// MetasCleared returns if the edge metas was cleared. +func (m *AlertMutation) MetasCleared() bool { + return m.clearedmetas +} + +// RemoveMetaIDs removes the metas edge to Meta by ids. +func (m *AlertMutation) RemoveMetaIDs(ids ...int) { + if m.removedmetas == nil { + m.removedmetas = make(map[int]struct{}) + } + for i := range ids { + m.removedmetas[ids[i]] = struct{}{} + } +} + +// RemovedMetas returns the removed ids of metas. +func (m *AlertMutation) RemovedMetasIDs() (ids []int) { + for id := range m.removedmetas { + ids = append(ids, id) + } + return +} + +// MetasIDs returns the metas ids in the mutation. +func (m *AlertMutation) MetasIDs() (ids []int) { + for id := range m.metas { + ids = append(ids, id) + } + return +} + +// ResetMetas reset all changes of the "metas" edge. +func (m *AlertMutation) ResetMetas() { + m.metas = nil + m.clearedmetas = false + m.removedmetas = nil +} + +// Op returns the operation name. +func (m *AlertMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Alert). +func (m *AlertMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during +// this mutation. Note that, in order to get all numeric +// fields that were in/decremented, call AddedFields(). +func (m *AlertMutation) Fields() []string { + fields := make([]string, 0, 22) + if m.created_at != nil { + fields = append(fields, alert.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, alert.FieldUpdatedAt) + } + if m.scenario != nil { + fields = append(fields, alert.FieldScenario) + } + if m.bucketId != nil { + fields = append(fields, alert.FieldBucketId) + } + if m.message != nil { + fields = append(fields, alert.FieldMessage) + } + if m.eventsCount != nil { + fields = append(fields, alert.FieldEventsCount) + } + if m.startedAt != nil { + fields = append(fields, alert.FieldStartedAt) + } + if m.stoppedAt != nil { + fields = append(fields, alert.FieldStoppedAt) + } + if m.sourceIp != nil { + fields = append(fields, alert.FieldSourceIp) + } + if m.sourceRange != nil { + fields = append(fields, alert.FieldSourceRange) + } + if m.sourceAsNumber != nil { + fields = append(fields, alert.FieldSourceAsNumber) + } + if m.sourceAsName != nil { + fields = append(fields, alert.FieldSourceAsName) + } + if m.sourceCountry != nil { + fields = append(fields, alert.FieldSourceCountry) + } + if m.sourceLatitude != nil { + fields = append(fields, alert.FieldSourceLatitude) + } + if m.sourceLongitude != nil { + fields = append(fields, alert.FieldSourceLongitude) + } + if m.sourceScope != nil { + fields = append(fields, alert.FieldSourceScope) + } + if m.sourceValue != nil { + fields = append(fields, alert.FieldSourceValue) + } + if m.capacity != nil { + fields = append(fields, alert.FieldCapacity) + } + if m.leakSpeed != nil { + fields = append(fields, alert.FieldLeakSpeed) + } + if m.scenarioVersion != nil { + fields = append(fields, alert.FieldScenarioVersion) + } + if m.scenarioHash != nil { + fields = append(fields, alert.FieldScenarioHash) + } + if m.simulated != nil { + fields = append(fields, alert.FieldSimulated) + } + return fields +} + +// Field returns the value of a field with the given name. +// The second boolean value indicates that this field was +// not set, or was not define in the schema. +func (m *AlertMutation) Field(name string) (ent.Value, bool) { + switch name { + case alert.FieldCreatedAt: + return m.CreatedAt() + case alert.FieldUpdatedAt: + return m.UpdatedAt() + case alert.FieldScenario: + return m.Scenario() + case alert.FieldBucketId: + return m.BucketId() + case alert.FieldMessage: + return m.Message() + case alert.FieldEventsCount: + return m.EventsCount() + case alert.FieldStartedAt: + return m.StartedAt() + case alert.FieldStoppedAt: + return m.StoppedAt() + case alert.FieldSourceIp: + return m.SourceIp() + case alert.FieldSourceRange: + return m.SourceRange() + case alert.FieldSourceAsNumber: + return m.SourceAsNumber() + case alert.FieldSourceAsName: + return m.SourceAsName() + case alert.FieldSourceCountry: + return m.SourceCountry() + case alert.FieldSourceLatitude: + return m.SourceLatitude() + case alert.FieldSourceLongitude: + return m.SourceLongitude() + case alert.FieldSourceScope: + return m.SourceScope() + case alert.FieldSourceValue: + return m.SourceValue() + case alert.FieldCapacity: + return m.Capacity() + case alert.FieldLeakSpeed: + return m.LeakSpeed() + case alert.FieldScenarioVersion: + return m.ScenarioVersion() + case alert.FieldScenarioHash: + return m.ScenarioHash() + case alert.FieldSimulated: + return m.Simulated() + } + return nil, false +} + +// OldField returns the old value of the field from the database. +// An error is returned if the mutation operation is not UpdateOne, +// or the query to the database was failed. +func (m *AlertMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case alert.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case alert.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case alert.FieldScenario: + return m.OldScenario(ctx) + case alert.FieldBucketId: + return m.OldBucketId(ctx) + case alert.FieldMessage: + return m.OldMessage(ctx) + case alert.FieldEventsCount: + return m.OldEventsCount(ctx) + case alert.FieldStartedAt: + return m.OldStartedAt(ctx) + case alert.FieldStoppedAt: + return m.OldStoppedAt(ctx) + case alert.FieldSourceIp: + return m.OldSourceIp(ctx) + case alert.FieldSourceRange: + return m.OldSourceRange(ctx) + case alert.FieldSourceAsNumber: + return m.OldSourceAsNumber(ctx) + case alert.FieldSourceAsName: + return m.OldSourceAsName(ctx) + case alert.FieldSourceCountry: + return m.OldSourceCountry(ctx) + case alert.FieldSourceLatitude: + return m.OldSourceLatitude(ctx) + case alert.FieldSourceLongitude: + return m.OldSourceLongitude(ctx) + case alert.FieldSourceScope: + return m.OldSourceScope(ctx) + case alert.FieldSourceValue: + return m.OldSourceValue(ctx) + case alert.FieldCapacity: + return m.OldCapacity(ctx) + case alert.FieldLeakSpeed: + return m.OldLeakSpeed(ctx) + case alert.FieldScenarioVersion: + return m.OldScenarioVersion(ctx) + case alert.FieldScenarioHash: + return m.OldScenarioHash(ctx) + case alert.FieldSimulated: + return m.OldSimulated(ctx) + } + return nil, fmt.Errorf("unknown Alert field %s", name) +} + +// SetField sets the value for the given name. It returns an +// error if the field is not defined in the schema, or if the +// type mismatch the field type. +func (m *AlertMutation) SetField(name string, value ent.Value) error { + switch name { + case alert.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case alert.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case alert.FieldScenario: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScenario(v) + return nil + case alert.FieldBucketId: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBucketId(v) + return nil + case alert.FieldMessage: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMessage(v) + return nil + case alert.FieldEventsCount: + v, ok := value.(int32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEventsCount(v) + return nil + case alert.FieldStartedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartedAt(v) + return nil + case alert.FieldStoppedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStoppedAt(v) + return nil + case alert.FieldSourceIp: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceIp(v) + return nil + case alert.FieldSourceRange: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceRange(v) + return nil + case alert.FieldSourceAsNumber: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceAsNumber(v) + return nil + case alert.FieldSourceAsName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceAsName(v) + return nil + case alert.FieldSourceCountry: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceCountry(v) + return nil + case alert.FieldSourceLatitude: + v, ok := value.(float32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceLatitude(v) + return nil + case alert.FieldSourceLongitude: + v, ok := value.(float32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceLongitude(v) + return nil + case alert.FieldSourceScope: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceScope(v) + return nil + case alert.FieldSourceValue: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceValue(v) + return nil + case alert.FieldCapacity: + v, ok := value.(int32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCapacity(v) + return nil + case alert.FieldLeakSpeed: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLeakSpeed(v) + return nil + case alert.FieldScenarioVersion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScenarioVersion(v) + return nil + case alert.FieldScenarioHash: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScenarioHash(v) + return nil + case alert.FieldSimulated: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSimulated(v) + return nil + } + return fmt.Errorf("unknown Alert field %s", name) +} + +// AddedFields returns all numeric fields that were incremented +// or decremented during this mutation. +func (m *AlertMutation) AddedFields() []string { + var fields []string + if m.addeventsCount != nil { + fields = append(fields, alert.FieldEventsCount) + } + if m.addsourceLatitude != nil { + fields = append(fields, alert.FieldSourceLatitude) + } + if m.addsourceLongitude != nil { + fields = append(fields, alert.FieldSourceLongitude) + } + if m.addcapacity != nil { + fields = append(fields, alert.FieldCapacity) + } + return fields +} + +// AddedField returns the numeric value that was in/decremented +// from a field with the given name. The second value indicates +// that this field was not set, or was not define in the schema. +func (m *AlertMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case alert.FieldEventsCount: + return m.AddedEventsCount() + case alert.FieldSourceLatitude: + return m.AddedSourceLatitude() + case alert.FieldSourceLongitude: + return m.AddedSourceLongitude() + case alert.FieldCapacity: + return m.AddedCapacity() + } + return nil, false +} + +// AddField adds the value for the given name. It returns an +// error if the field is not defined in the schema, or if the +// type mismatch the field type. +func (m *AlertMutation) AddField(name string, value ent.Value) error { + switch name { + case alert.FieldEventsCount: + v, ok := value.(int32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddEventsCount(v) + return nil + case alert.FieldSourceLatitude: + v, ok := value.(float32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddSourceLatitude(v) + return nil + case alert.FieldSourceLongitude: + v, ok := value.(float32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddSourceLongitude(v) + return nil + case alert.FieldCapacity: + v, ok := value.(int32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCapacity(v) + return nil + } + return fmt.Errorf("unknown Alert numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared +// during this mutation. +func (m *AlertMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(alert.FieldBucketId) { + fields = append(fields, alert.FieldBucketId) + } + if m.FieldCleared(alert.FieldMessage) { + fields = append(fields, alert.FieldMessage) + } + if m.FieldCleared(alert.FieldEventsCount) { + fields = append(fields, alert.FieldEventsCount) + } + if m.FieldCleared(alert.FieldStartedAt) { + fields = append(fields, alert.FieldStartedAt) + } + if m.FieldCleared(alert.FieldStoppedAt) { + fields = append(fields, alert.FieldStoppedAt) + } + if m.FieldCleared(alert.FieldSourceIp) { + fields = append(fields, alert.FieldSourceIp) + } + if m.FieldCleared(alert.FieldSourceRange) { + fields = append(fields, alert.FieldSourceRange) + } + if m.FieldCleared(alert.FieldSourceAsNumber) { + fields = append(fields, alert.FieldSourceAsNumber) + } + if m.FieldCleared(alert.FieldSourceAsName) { + fields = append(fields, alert.FieldSourceAsName) + } + if m.FieldCleared(alert.FieldSourceCountry) { + fields = append(fields, alert.FieldSourceCountry) + } + if m.FieldCleared(alert.FieldSourceLatitude) { + fields = append(fields, alert.FieldSourceLatitude) + } + if m.FieldCleared(alert.FieldSourceLongitude) { + fields = append(fields, alert.FieldSourceLongitude) + } + if m.FieldCleared(alert.FieldSourceScope) { + fields = append(fields, alert.FieldSourceScope) + } + if m.FieldCleared(alert.FieldSourceValue) { + fields = append(fields, alert.FieldSourceValue) + } + if m.FieldCleared(alert.FieldCapacity) { + fields = append(fields, alert.FieldCapacity) + } + if m.FieldCleared(alert.FieldLeakSpeed) { + fields = append(fields, alert.FieldLeakSpeed) + } + if m.FieldCleared(alert.FieldScenarioVersion) { + fields = append(fields, alert.FieldScenarioVersion) + } + if m.FieldCleared(alert.FieldScenarioHash) { + fields = append(fields, alert.FieldScenarioHash) + } + return fields +} + +// FieldCleared returns a boolean indicates if this field was +// cleared in this mutation. +func (m *AlertMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value for the given name. It returns an +// error if the field is not defined in the schema. +func (m *AlertMutation) ClearField(name string) error { + switch name { + case alert.FieldBucketId: + m.ClearBucketId() + return nil + case alert.FieldMessage: + m.ClearMessage() + return nil + case alert.FieldEventsCount: + m.ClearEventsCount() + return nil + case alert.FieldStartedAt: + m.ClearStartedAt() + return nil + case alert.FieldStoppedAt: + m.ClearStoppedAt() + return nil + case alert.FieldSourceIp: + m.ClearSourceIp() + return nil + case alert.FieldSourceRange: + m.ClearSourceRange() + return nil + case alert.FieldSourceAsNumber: + m.ClearSourceAsNumber() + return nil + case alert.FieldSourceAsName: + m.ClearSourceAsName() + return nil + case alert.FieldSourceCountry: + m.ClearSourceCountry() + return nil + case alert.FieldSourceLatitude: + m.ClearSourceLatitude() + return nil + case alert.FieldSourceLongitude: + m.ClearSourceLongitude() + return nil + case alert.FieldSourceScope: + m.ClearSourceScope() + return nil + case alert.FieldSourceValue: + m.ClearSourceValue() + return nil + case alert.FieldCapacity: + m.ClearCapacity() + return nil + case alert.FieldLeakSpeed: + m.ClearLeakSpeed() + return nil + case alert.FieldScenarioVersion: + m.ClearScenarioVersion() + return nil + case alert.FieldScenarioHash: + m.ClearScenarioHash() + return nil + } + return fmt.Errorf("unknown Alert nullable field %s", name) +} + +// ResetField resets all changes in the mutation regarding the +// given field name. It returns an error if the field is not +// defined in the schema. +func (m *AlertMutation) ResetField(name string) error { + switch name { + case alert.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case alert.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case alert.FieldScenario: + m.ResetScenario() + return nil + case alert.FieldBucketId: + m.ResetBucketId() + return nil + case alert.FieldMessage: + m.ResetMessage() + return nil + case alert.FieldEventsCount: + m.ResetEventsCount() + return nil + case alert.FieldStartedAt: + m.ResetStartedAt() + return nil + case alert.FieldStoppedAt: + m.ResetStoppedAt() + return nil + case alert.FieldSourceIp: + m.ResetSourceIp() + return nil + case alert.FieldSourceRange: + m.ResetSourceRange() + return nil + case alert.FieldSourceAsNumber: + m.ResetSourceAsNumber() + return nil + case alert.FieldSourceAsName: + m.ResetSourceAsName() + return nil + case alert.FieldSourceCountry: + m.ResetSourceCountry() + return nil + case alert.FieldSourceLatitude: + m.ResetSourceLatitude() + return nil + case alert.FieldSourceLongitude: + m.ResetSourceLongitude() + return nil + case alert.FieldSourceScope: + m.ResetSourceScope() + return nil + case alert.FieldSourceValue: + m.ResetSourceValue() + return nil + case alert.FieldCapacity: + m.ResetCapacity() + return nil + case alert.FieldLeakSpeed: + m.ResetLeakSpeed() + return nil + case alert.FieldScenarioVersion: + m.ResetScenarioVersion() + return nil + case alert.FieldScenarioHash: + m.ResetScenarioHash() + return nil + case alert.FieldSimulated: + m.ResetSimulated() + return nil + } + return fmt.Errorf("unknown Alert field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this +// mutation. +func (m *AlertMutation) AddedEdges() []string { + edges := make([]string, 0, 4) + if m.owner != nil { + edges = append(edges, alert.EdgeOwner) + } + if m.decisions != nil { + edges = append(edges, alert.EdgeDecisions) + } + if m.events != nil { + edges = append(edges, alert.EdgeEvents) + } + if m.metas != nil { + edges = append(edges, alert.EdgeMetas) + } + return edges +} + +// AddedIDs returns all ids (to other nodes) that were added for +// the given edge name. +func (m *AlertMutation) AddedIDs(name string) []ent.Value { + switch name { + case alert.EdgeOwner: + if id := m.owner; id != nil { + return []ent.Value{*id} + } + case alert.EdgeDecisions: + ids := make([]ent.Value, 0, len(m.decisions)) + for id := range m.decisions { + ids = append(ids, id) + } + return ids + case alert.EdgeEvents: + ids := make([]ent.Value, 0, len(m.events)) + for id := range m.events { + ids = append(ids, id) + } + return ids + case alert.EdgeMetas: + ids := make([]ent.Value, 0, len(m.metas)) + for id := range m.metas { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this +// mutation. +func (m *AlertMutation) RemovedEdges() []string { + edges := make([]string, 0, 4) + if m.removeddecisions != nil { + edges = append(edges, alert.EdgeDecisions) + } + if m.removedevents != nil { + edges = append(edges, alert.EdgeEvents) + } + if m.removedmetas != nil { + edges = append(edges, alert.EdgeMetas) + } + return edges +} + +// RemovedIDs returns all ids (to other nodes) that were removed for +// the given edge name. +func (m *AlertMutation) RemovedIDs(name string) []ent.Value { + switch name { + case alert.EdgeDecisions: + ids := make([]ent.Value, 0, len(m.removeddecisions)) + for id := range m.removeddecisions { + ids = append(ids, id) + } + return ids + case alert.EdgeEvents: + ids := make([]ent.Value, 0, len(m.removedevents)) + for id := range m.removedevents { + ids = append(ids, id) + } + return ids + case alert.EdgeMetas: + ids := make([]ent.Value, 0, len(m.removedmetas)) + for id := range m.removedmetas { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this +// mutation. +func (m *AlertMutation) ClearedEdges() []string { + edges := make([]string, 0, 4) + if m.clearedowner { + edges = append(edges, alert.EdgeOwner) + } + if m.cleareddecisions { + edges = append(edges, alert.EdgeDecisions) + } + if m.clearedevents { + edges = append(edges, alert.EdgeEvents) + } + if m.clearedmetas { + edges = append(edges, alert.EdgeMetas) + } + return edges +} + +// EdgeCleared returns a boolean indicates if this edge was +// cleared in this mutation. +func (m *AlertMutation) EdgeCleared(name string) bool { + switch name { + case alert.EdgeOwner: + return m.clearedowner + case alert.EdgeDecisions: + return m.cleareddecisions + case alert.EdgeEvents: + return m.clearedevents + case alert.EdgeMetas: + return m.clearedmetas + } + return false +} + +// ClearEdge clears the value for the given name. It returns an +// error if the edge name is not defined in the schema. +func (m *AlertMutation) ClearEdge(name string) error { + switch name { + case alert.EdgeOwner: + m.ClearOwner() + return nil + } + return fmt.Errorf("unknown Alert unique edge %s", name) +} + +// ResetEdge resets all changes in the mutation regarding the +// given edge name. It returns an error if the edge is not +// defined in the schema. +func (m *AlertMutation) ResetEdge(name string) error { + switch name { + case alert.EdgeOwner: + m.ResetOwner() + return nil + case alert.EdgeDecisions: + m.ResetDecisions() + return nil + case alert.EdgeEvents: + m.ResetEvents() + return nil + case alert.EdgeMetas: + m.ResetMetas() + return nil + } + return fmt.Errorf("unknown Alert edge %s", name) +} + +// BouncerMutation represents an operation that mutate the Bouncers +// nodes in the graph. +type BouncerMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + name *string + api_key *string + revoked *bool + ip_address *string + _type *string + version *string + until *time.Time + last_pull *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Bouncer, error) +} + +var _ ent.Mutation = (*BouncerMutation)(nil) + +// bouncerOption allows to manage the mutation configuration using functional options. +type bouncerOption func(*BouncerMutation) + +// newBouncerMutation creates new mutation for $n.Name. +func newBouncerMutation(c config, op Op, opts ...bouncerOption) *BouncerMutation { + m := &BouncerMutation{ + config: c, + op: op, + typ: TypeBouncer, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withBouncerID sets the id field of the mutation. +func withBouncerID(id int) bouncerOption { + return func(m *BouncerMutation) { + var ( + err error + once sync.Once + value *Bouncer + ) + m.oldValue = func(ctx context.Context) (*Bouncer, error) { + once.Do(func() { + if m.done { + err = fmt.Errorf("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Bouncer.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withBouncer sets the old Bouncer of the mutation. +func withBouncer(node *Bouncer) bouncerOption { + return func(m *BouncerMutation) { + m.oldValue = func(context.Context) (*Bouncer, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m BouncerMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m BouncerMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, fmt.Errorf("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the id value in the mutation. Note that, the id +// is available only if it was provided to the builder. +func (m *BouncerMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// SetCreatedAt sets the created_at field. +func (m *BouncerMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the created_at value in the mutation. +func (m *BouncerMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old created_at value of the Bouncer. +// If the Bouncer object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *BouncerMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldCreatedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt reset all changes of the "created_at" field. +func (m *BouncerMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the updated_at field. +func (m *BouncerMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the updated_at value in the mutation. +func (m *BouncerMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old updated_at value of the Bouncer. +// If the Bouncer object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *BouncerMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldUpdatedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt reset all changes of the "updated_at" field. +func (m *BouncerMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetName sets the name field. +func (m *BouncerMutation) SetName(s string) { + m.name = &s +} + +// Name returns the name value in the mutation. +func (m *BouncerMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old name value of the Bouncer. +// If the Bouncer object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *BouncerMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldName is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName reset all changes of the "name" field. +func (m *BouncerMutation) ResetName() { + m.name = nil +} + +// SetAPIKey sets the api_key field. +func (m *BouncerMutation) SetAPIKey(s string) { + m.api_key = &s +} + +// APIKey returns the api_key value in the mutation. +func (m *BouncerMutation) APIKey() (r string, exists bool) { + v := m.api_key + if v == nil { + return + } + return *v, true +} + +// OldAPIKey returns the old api_key value of the Bouncer. +// If the Bouncer object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *BouncerMutation) OldAPIKey(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldAPIKey is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldAPIKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAPIKey: %w", err) + } + return oldValue.APIKey, nil +} + +// ResetAPIKey reset all changes of the "api_key" field. +func (m *BouncerMutation) ResetAPIKey() { + m.api_key = nil +} + +// SetRevoked sets the revoked field. +func (m *BouncerMutation) SetRevoked(b bool) { + m.revoked = &b +} + +// Revoked returns the revoked value in the mutation. +func (m *BouncerMutation) Revoked() (r bool, exists bool) { + v := m.revoked + if v == nil { + return + } + return *v, true +} + +// OldRevoked returns the old revoked value of the Bouncer. +// If the Bouncer object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *BouncerMutation) OldRevoked(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldRevoked is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldRevoked requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRevoked: %w", err) + } + return oldValue.Revoked, nil +} + +// ResetRevoked reset all changes of the "revoked" field. +func (m *BouncerMutation) ResetRevoked() { + m.revoked = nil +} + +// SetIPAddress sets the ip_address field. +func (m *BouncerMutation) SetIPAddress(s string) { + m.ip_address = &s +} + +// IPAddress returns the ip_address value in the mutation. +func (m *BouncerMutation) IPAddress() (r string, exists bool) { + v := m.ip_address + if v == nil { + return + } + return *v, true +} + +// OldIPAddress returns the old ip_address value of the Bouncer. +// If the Bouncer object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *BouncerMutation) OldIPAddress(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldIPAddress is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldIPAddress requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIPAddress: %w", err) + } + return oldValue.IPAddress, nil +} + +// ClearIPAddress clears the value of ip_address. +func (m *BouncerMutation) ClearIPAddress() { + m.ip_address = nil + m.clearedFields[bouncer.FieldIPAddress] = struct{}{} +} + +// IPAddressCleared returns if the field ip_address was cleared in this mutation. +func (m *BouncerMutation) IPAddressCleared() bool { + _, ok := m.clearedFields[bouncer.FieldIPAddress] + return ok +} + +// ResetIPAddress reset all changes of the "ip_address" field. +func (m *BouncerMutation) ResetIPAddress() { + m.ip_address = nil + delete(m.clearedFields, bouncer.FieldIPAddress) +} + +// SetType sets the type field. +func (m *BouncerMutation) SetType(s string) { + m._type = &s +} + +// GetType returns the type value in the mutation. +func (m *BouncerMutation) GetType() (r string, exists bool) { + v := m._type + if v == nil { + return + } + return *v, true +} + +// OldType returns the old type value of the Bouncer. +// If the Bouncer object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *BouncerMutation) OldType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldType is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldType: %w", err) + } + return oldValue.Type, nil +} + +// ClearType clears the value of type. +func (m *BouncerMutation) ClearType() { + m._type = nil + m.clearedFields[bouncer.FieldType] = struct{}{} +} + +// TypeCleared returns if the field type was cleared in this mutation. +func (m *BouncerMutation) TypeCleared() bool { + _, ok := m.clearedFields[bouncer.FieldType] + return ok +} + +// ResetType reset all changes of the "type" field. +func (m *BouncerMutation) ResetType() { + m._type = nil + delete(m.clearedFields, bouncer.FieldType) +} + +// SetVersion sets the version field. +func (m *BouncerMutation) SetVersion(s string) { + m.version = &s +} + +// Version returns the version value in the mutation. +func (m *BouncerMutation) Version() (r string, exists bool) { + v := m.version + if v == nil { + return + } + return *v, true +} + +// OldVersion returns the old version value of the Bouncer. +// If the Bouncer object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *BouncerMutation) OldVersion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldVersion is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldVersion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldVersion: %w", err) + } + return oldValue.Version, nil +} + +// ClearVersion clears the value of version. +func (m *BouncerMutation) ClearVersion() { + m.version = nil + m.clearedFields[bouncer.FieldVersion] = struct{}{} +} + +// VersionCleared returns if the field version was cleared in this mutation. +func (m *BouncerMutation) VersionCleared() bool { + _, ok := m.clearedFields[bouncer.FieldVersion] + return ok +} + +// ResetVersion reset all changes of the "version" field. +func (m *BouncerMutation) ResetVersion() { + m.version = nil + delete(m.clearedFields, bouncer.FieldVersion) +} + +// SetUntil sets the until field. +func (m *BouncerMutation) SetUntil(t time.Time) { + m.until = &t +} + +// Until returns the until value in the mutation. +func (m *BouncerMutation) Until() (r time.Time, exists bool) { + v := m.until + if v == nil { + return + } + return *v, true +} + +// OldUntil returns the old until value of the Bouncer. +// If the Bouncer object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *BouncerMutation) OldUntil(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldUntil is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldUntil requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUntil: %w", err) + } + return oldValue.Until, nil +} + +// ClearUntil clears the value of until. +func (m *BouncerMutation) ClearUntil() { + m.until = nil + m.clearedFields[bouncer.FieldUntil] = struct{}{} +} + +// UntilCleared returns if the field until was cleared in this mutation. +func (m *BouncerMutation) UntilCleared() bool { + _, ok := m.clearedFields[bouncer.FieldUntil] + return ok +} + +// ResetUntil reset all changes of the "until" field. +func (m *BouncerMutation) ResetUntil() { + m.until = nil + delete(m.clearedFields, bouncer.FieldUntil) +} + +// SetLastPull sets the last_pull field. +func (m *BouncerMutation) SetLastPull(t time.Time) { + m.last_pull = &t +} + +// LastPull returns the last_pull value in the mutation. +func (m *BouncerMutation) LastPull() (r time.Time, exists bool) { + v := m.last_pull + if v == nil { + return + } + return *v, true +} + +// OldLastPull returns the old last_pull value of the Bouncer. +// If the Bouncer object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *BouncerMutation) OldLastPull(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldLastPull is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldLastPull requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLastPull: %w", err) + } + return oldValue.LastPull, nil +} + +// ResetLastPull reset all changes of the "last_pull" field. +func (m *BouncerMutation) ResetLastPull() { + m.last_pull = nil +} + +// Op returns the operation name. +func (m *BouncerMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Bouncer). +func (m *BouncerMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during +// this mutation. Note that, in order to get all numeric +// fields that were in/decremented, call AddedFields(). +func (m *BouncerMutation) Fields() []string { + fields := make([]string, 0, 10) + if m.created_at != nil { + fields = append(fields, bouncer.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, bouncer.FieldUpdatedAt) + } + if m.name != nil { + fields = append(fields, bouncer.FieldName) + } + if m.api_key != nil { + fields = append(fields, bouncer.FieldAPIKey) + } + if m.revoked != nil { + fields = append(fields, bouncer.FieldRevoked) + } + if m.ip_address != nil { + fields = append(fields, bouncer.FieldIPAddress) + } + if m._type != nil { + fields = append(fields, bouncer.FieldType) + } + if m.version != nil { + fields = append(fields, bouncer.FieldVersion) + } + if m.until != nil { + fields = append(fields, bouncer.FieldUntil) + } + if m.last_pull != nil { + fields = append(fields, bouncer.FieldLastPull) + } + return fields +} + +// Field returns the value of a field with the given name. +// The second boolean value indicates that this field was +// not set, or was not define in the schema. +func (m *BouncerMutation) Field(name string) (ent.Value, bool) { + switch name { + case bouncer.FieldCreatedAt: + return m.CreatedAt() + case bouncer.FieldUpdatedAt: + return m.UpdatedAt() + case bouncer.FieldName: + return m.Name() + case bouncer.FieldAPIKey: + return m.APIKey() + case bouncer.FieldRevoked: + return m.Revoked() + case bouncer.FieldIPAddress: + return m.IPAddress() + case bouncer.FieldType: + return m.GetType() + case bouncer.FieldVersion: + return m.Version() + case bouncer.FieldUntil: + return m.Until() + case bouncer.FieldLastPull: + return m.LastPull() + } + return nil, false +} + +// OldField returns the old value of the field from the database. +// An error is returned if the mutation operation is not UpdateOne, +// or the query to the database was failed. +func (m *BouncerMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case bouncer.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case bouncer.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case bouncer.FieldName: + return m.OldName(ctx) + case bouncer.FieldAPIKey: + return m.OldAPIKey(ctx) + case bouncer.FieldRevoked: + return m.OldRevoked(ctx) + case bouncer.FieldIPAddress: + return m.OldIPAddress(ctx) + case bouncer.FieldType: + return m.OldType(ctx) + case bouncer.FieldVersion: + return m.OldVersion(ctx) + case bouncer.FieldUntil: + return m.OldUntil(ctx) + case bouncer.FieldLastPull: + return m.OldLastPull(ctx) + } + return nil, fmt.Errorf("unknown Bouncer field %s", name) +} + +// SetField sets the value for the given name. It returns an +// error if the field is not defined in the schema, or if the +// type mismatch the field type. +func (m *BouncerMutation) SetField(name string, value ent.Value) error { + switch name { + case bouncer.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case bouncer.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case bouncer.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case bouncer.FieldAPIKey: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAPIKey(v) + return nil + case bouncer.FieldRevoked: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRevoked(v) + return nil + case bouncer.FieldIPAddress: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIPAddress(v) + return nil + case bouncer.FieldType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetType(v) + return nil + case bouncer.FieldVersion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetVersion(v) + return nil + case bouncer.FieldUntil: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUntil(v) + return nil + case bouncer.FieldLastPull: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLastPull(v) + return nil + } + return fmt.Errorf("unknown Bouncer field %s", name) +} + +// AddedFields returns all numeric fields that were incremented +// or decremented during this mutation. +func (m *BouncerMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was in/decremented +// from a field with the given name. The second value indicates +// that this field was not set, or was not define in the schema. +func (m *BouncerMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value for the given name. It returns an +// error if the field is not defined in the schema, or if the +// type mismatch the field type. +func (m *BouncerMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Bouncer numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared +// during this mutation. +func (m *BouncerMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(bouncer.FieldIPAddress) { + fields = append(fields, bouncer.FieldIPAddress) + } + if m.FieldCleared(bouncer.FieldType) { + fields = append(fields, bouncer.FieldType) + } + if m.FieldCleared(bouncer.FieldVersion) { + fields = append(fields, bouncer.FieldVersion) + } + if m.FieldCleared(bouncer.FieldUntil) { + fields = append(fields, bouncer.FieldUntil) + } + return fields +} + +// FieldCleared returns a boolean indicates if this field was +// cleared in this mutation. +func (m *BouncerMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value for the given name. It returns an +// error if the field is not defined in the schema. +func (m *BouncerMutation) ClearField(name string) error { + switch name { + case bouncer.FieldIPAddress: + m.ClearIPAddress() + return nil + case bouncer.FieldType: + m.ClearType() + return nil + case bouncer.FieldVersion: + m.ClearVersion() + return nil + case bouncer.FieldUntil: + m.ClearUntil() + return nil + } + return fmt.Errorf("unknown Bouncer nullable field %s", name) +} + +// ResetField resets all changes in the mutation regarding the +// given field name. It returns an error if the field is not +// defined in the schema. +func (m *BouncerMutation) ResetField(name string) error { + switch name { + case bouncer.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case bouncer.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case bouncer.FieldName: + m.ResetName() + return nil + case bouncer.FieldAPIKey: + m.ResetAPIKey() + return nil + case bouncer.FieldRevoked: + m.ResetRevoked() + return nil + case bouncer.FieldIPAddress: + m.ResetIPAddress() + return nil + case bouncer.FieldType: + m.ResetType() + return nil + case bouncer.FieldVersion: + m.ResetVersion() + return nil + case bouncer.FieldUntil: + m.ResetUntil() + return nil + case bouncer.FieldLastPull: + m.ResetLastPull() + return nil + } + return fmt.Errorf("unknown Bouncer field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this +// mutation. +func (m *BouncerMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all ids (to other nodes) that were added for +// the given edge name. +func (m *BouncerMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this +// mutation. +func (m *BouncerMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all ids (to other nodes) that were removed for +// the given edge name. +func (m *BouncerMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this +// mutation. +func (m *BouncerMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean indicates if this edge was +// cleared in this mutation. +func (m *BouncerMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value for the given name. It returns an +// error if the edge name is not defined in the schema. +func (m *BouncerMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Bouncer unique edge %s", name) +} + +// ResetEdge resets all changes in the mutation regarding the +// given edge name. It returns an error if the edge is not +// defined in the schema. +func (m *BouncerMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Bouncer edge %s", name) +} + +// DecisionMutation represents an operation that mutate the Decisions +// nodes in the graph. +type DecisionMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + until *time.Time + scenario *string + _type *string + start_ip *int64 + addstart_ip *int64 + end_ip *int64 + addend_ip *int64 + scope *string + value *string + origin *string + simulated *bool + clearedFields map[string]struct{} + owner *int + clearedowner bool + done bool + oldValue func(context.Context) (*Decision, error) +} + +var _ ent.Mutation = (*DecisionMutation)(nil) + +// decisionOption allows to manage the mutation configuration using functional options. +type decisionOption func(*DecisionMutation) + +// newDecisionMutation creates new mutation for $n.Name. +func newDecisionMutation(c config, op Op, opts ...decisionOption) *DecisionMutation { + m := &DecisionMutation{ + config: c, + op: op, + typ: TypeDecision, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withDecisionID sets the id field of the mutation. +func withDecisionID(id int) decisionOption { + return func(m *DecisionMutation) { + var ( + err error + once sync.Once + value *Decision + ) + m.oldValue = func(ctx context.Context) (*Decision, error) { + once.Do(func() { + if m.done { + err = fmt.Errorf("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Decision.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withDecision sets the old Decision of the mutation. +func withDecision(node *Decision) decisionOption { + return func(m *DecisionMutation) { + m.oldValue = func(context.Context) (*Decision, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m DecisionMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m DecisionMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, fmt.Errorf("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the id value in the mutation. Note that, the id +// is available only if it was provided to the builder. +func (m *DecisionMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// SetCreatedAt sets the created_at field. +func (m *DecisionMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the created_at value in the mutation. +func (m *DecisionMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old created_at value of the Decision. +// If the Decision object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *DecisionMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldCreatedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt reset all changes of the "created_at" field. +func (m *DecisionMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the updated_at field. +func (m *DecisionMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the updated_at value in the mutation. +func (m *DecisionMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old updated_at value of the Decision. +// If the Decision object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *DecisionMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldUpdatedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt reset all changes of the "updated_at" field. +func (m *DecisionMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetUntil sets the until field. +func (m *DecisionMutation) SetUntil(t time.Time) { + m.until = &t +} + +// Until returns the until value in the mutation. +func (m *DecisionMutation) Until() (r time.Time, exists bool) { + v := m.until + if v == nil { + return + } + return *v, true +} + +// OldUntil returns the old until value of the Decision. +// If the Decision object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *DecisionMutation) OldUntil(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldUntil is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldUntil requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUntil: %w", err) + } + return oldValue.Until, nil +} + +// ResetUntil reset all changes of the "until" field. +func (m *DecisionMutation) ResetUntil() { + m.until = nil +} + +// SetScenario sets the scenario field. +func (m *DecisionMutation) SetScenario(s string) { + m.scenario = &s +} + +// Scenario returns the scenario value in the mutation. +func (m *DecisionMutation) Scenario() (r string, exists bool) { + v := m.scenario + if v == nil { + return + } + return *v, true +} + +// OldScenario returns the old scenario value of the Decision. +// If the Decision object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *DecisionMutation) OldScenario(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldScenario is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldScenario requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScenario: %w", err) + } + return oldValue.Scenario, nil +} + +// ResetScenario reset all changes of the "scenario" field. +func (m *DecisionMutation) ResetScenario() { + m.scenario = nil +} + +// SetType sets the type field. +func (m *DecisionMutation) SetType(s string) { + m._type = &s +} + +// GetType returns the type value in the mutation. +func (m *DecisionMutation) GetType() (r string, exists bool) { + v := m._type + if v == nil { + return + } + return *v, true +} + +// OldType returns the old type value of the Decision. +// If the Decision object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *DecisionMutation) OldType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldType is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldType: %w", err) + } + return oldValue.Type, nil +} + +// ResetType reset all changes of the "type" field. +func (m *DecisionMutation) ResetType() { + m._type = nil +} + +// SetStartIP sets the start_ip field. +func (m *DecisionMutation) SetStartIP(i int64) { + m.start_ip = &i + m.addstart_ip = nil +} + +// StartIP returns the start_ip value in the mutation. +func (m *DecisionMutation) StartIP() (r int64, exists bool) { + v := m.start_ip + if v == nil { + return + } + return *v, true +} + +// OldStartIP returns the old start_ip value of the Decision. +// If the Decision object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *DecisionMutation) OldStartIP(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldStartIP is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldStartIP requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartIP: %w", err) + } + return oldValue.StartIP, nil +} + +// AddStartIP adds i to start_ip. +func (m *DecisionMutation) AddStartIP(i int64) { + if m.addstart_ip != nil { + *m.addstart_ip += i + } else { + m.addstart_ip = &i + } +} + +// AddedStartIP returns the value that was added to the start_ip field in this mutation. +func (m *DecisionMutation) AddedStartIP() (r int64, exists bool) { + v := m.addstart_ip + if v == nil { + return + } + return *v, true +} + +// ClearStartIP clears the value of start_ip. +func (m *DecisionMutation) ClearStartIP() { + m.start_ip = nil + m.addstart_ip = nil + m.clearedFields[decision.FieldStartIP] = struct{}{} +} + +// StartIPCleared returns if the field start_ip was cleared in this mutation. +func (m *DecisionMutation) StartIPCleared() bool { + _, ok := m.clearedFields[decision.FieldStartIP] + return ok +} + +// ResetStartIP reset all changes of the "start_ip" field. +func (m *DecisionMutation) ResetStartIP() { + m.start_ip = nil + m.addstart_ip = nil + delete(m.clearedFields, decision.FieldStartIP) +} + +// SetEndIP sets the end_ip field. +func (m *DecisionMutation) SetEndIP(i int64) { + m.end_ip = &i + m.addend_ip = nil +} + +// EndIP returns the end_ip value in the mutation. +func (m *DecisionMutation) EndIP() (r int64, exists bool) { + v := m.end_ip + if v == nil { + return + } + return *v, true +} + +// OldEndIP returns the old end_ip value of the Decision. +// If the Decision object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *DecisionMutation) OldEndIP(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldEndIP is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldEndIP requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEndIP: %w", err) + } + return oldValue.EndIP, nil +} + +// AddEndIP adds i to end_ip. +func (m *DecisionMutation) AddEndIP(i int64) { + if m.addend_ip != nil { + *m.addend_ip += i + } else { + m.addend_ip = &i + } +} + +// AddedEndIP returns the value that was added to the end_ip field in this mutation. +func (m *DecisionMutation) AddedEndIP() (r int64, exists bool) { + v := m.addend_ip + if v == nil { + return + } + return *v, true +} + +// ClearEndIP clears the value of end_ip. +func (m *DecisionMutation) ClearEndIP() { + m.end_ip = nil + m.addend_ip = nil + m.clearedFields[decision.FieldEndIP] = struct{}{} +} + +// EndIPCleared returns if the field end_ip was cleared in this mutation. +func (m *DecisionMutation) EndIPCleared() bool { + _, ok := m.clearedFields[decision.FieldEndIP] + return ok +} + +// ResetEndIP reset all changes of the "end_ip" field. +func (m *DecisionMutation) ResetEndIP() { + m.end_ip = nil + m.addend_ip = nil + delete(m.clearedFields, decision.FieldEndIP) +} + +// SetScope sets the scope field. +func (m *DecisionMutation) SetScope(s string) { + m.scope = &s +} + +// Scope returns the scope value in the mutation. +func (m *DecisionMutation) Scope() (r string, exists bool) { + v := m.scope + if v == nil { + return + } + return *v, true +} + +// OldScope returns the old scope value of the Decision. +// If the Decision object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *DecisionMutation) OldScope(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldScope is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldScope requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScope: %w", err) + } + return oldValue.Scope, nil +} + +// ResetScope reset all changes of the "scope" field. +func (m *DecisionMutation) ResetScope() { + m.scope = nil +} + +// SetValue sets the value field. +func (m *DecisionMutation) SetValue(s string) { + m.value = &s +} + +// Value returns the value value in the mutation. +func (m *DecisionMutation) Value() (r string, exists bool) { + v := m.value + if v == nil { + return + } + return *v, true +} + +// OldValue returns the old value value of the Decision. +// If the Decision object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *DecisionMutation) OldValue(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldValue is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValue: %w", err) + } + return oldValue.Value, nil +} + +// ResetValue reset all changes of the "value" field. +func (m *DecisionMutation) ResetValue() { + m.value = nil +} + +// SetOrigin sets the origin field. +func (m *DecisionMutation) SetOrigin(s string) { + m.origin = &s +} + +// Origin returns the origin value in the mutation. +func (m *DecisionMutation) Origin() (r string, exists bool) { + v := m.origin + if v == nil { + return + } + return *v, true +} + +// OldOrigin returns the old origin value of the Decision. +// If the Decision object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *DecisionMutation) OldOrigin(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldOrigin is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldOrigin requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOrigin: %w", err) + } + return oldValue.Origin, nil +} + +// ResetOrigin reset all changes of the "origin" field. +func (m *DecisionMutation) ResetOrigin() { + m.origin = nil +} + +// SetSimulated sets the simulated field. +func (m *DecisionMutation) SetSimulated(b bool) { + m.simulated = &b +} + +// Simulated returns the simulated value in the mutation. +func (m *DecisionMutation) Simulated() (r bool, exists bool) { + v := m.simulated + if v == nil { + return + } + return *v, true +} + +// OldSimulated returns the old simulated value of the Decision. +// If the Decision object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *DecisionMutation) OldSimulated(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldSimulated is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldSimulated requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSimulated: %w", err) + } + return oldValue.Simulated, nil +} + +// ResetSimulated reset all changes of the "simulated" field. +func (m *DecisionMutation) ResetSimulated() { + m.simulated = nil +} + +// SetOwnerID sets the owner edge to Alert by id. +func (m *DecisionMutation) SetOwnerID(id int) { + m.owner = &id +} + +// ClearOwner clears the owner edge to Alert. +func (m *DecisionMutation) ClearOwner() { + m.clearedowner = true +} + +// OwnerCleared returns if the edge owner was cleared. +func (m *DecisionMutation) OwnerCleared() bool { + return m.clearedowner +} + +// OwnerID returns the owner id in the mutation. +func (m *DecisionMutation) OwnerID() (id int, exists bool) { + if m.owner != nil { + return *m.owner, true + } + return +} + +// OwnerIDs returns the owner ids in the mutation. +// Note that ids always returns len(ids) <= 1 for unique edges, and you should use +// OwnerID instead. It exists only for internal usage by the builders. +func (m *DecisionMutation) OwnerIDs() (ids []int) { + if id := m.owner; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetOwner reset all changes of the "owner" edge. +func (m *DecisionMutation) ResetOwner() { + m.owner = nil + m.clearedowner = false +} + +// Op returns the operation name. +func (m *DecisionMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Decision). +func (m *DecisionMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during +// this mutation. Note that, in order to get all numeric +// fields that were in/decremented, call AddedFields(). +func (m *DecisionMutation) Fields() []string { + fields := make([]string, 0, 11) + if m.created_at != nil { + fields = append(fields, decision.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, decision.FieldUpdatedAt) + } + if m.until != nil { + fields = append(fields, decision.FieldUntil) + } + if m.scenario != nil { + fields = append(fields, decision.FieldScenario) + } + if m._type != nil { + fields = append(fields, decision.FieldType) + } + if m.start_ip != nil { + fields = append(fields, decision.FieldStartIP) + } + if m.end_ip != nil { + fields = append(fields, decision.FieldEndIP) + } + if m.scope != nil { + fields = append(fields, decision.FieldScope) + } + if m.value != nil { + fields = append(fields, decision.FieldValue) + } + if m.origin != nil { + fields = append(fields, decision.FieldOrigin) + } + if m.simulated != nil { + fields = append(fields, decision.FieldSimulated) + } + return fields +} + +// Field returns the value of a field with the given name. +// The second boolean value indicates that this field was +// not set, or was not define in the schema. +func (m *DecisionMutation) Field(name string) (ent.Value, bool) { + switch name { + case decision.FieldCreatedAt: + return m.CreatedAt() + case decision.FieldUpdatedAt: + return m.UpdatedAt() + case decision.FieldUntil: + return m.Until() + case decision.FieldScenario: + return m.Scenario() + case decision.FieldType: + return m.GetType() + case decision.FieldStartIP: + return m.StartIP() + case decision.FieldEndIP: + return m.EndIP() + case decision.FieldScope: + return m.Scope() + case decision.FieldValue: + return m.Value() + case decision.FieldOrigin: + return m.Origin() + case decision.FieldSimulated: + return m.Simulated() + } + return nil, false +} + +// OldField returns the old value of the field from the database. +// An error is returned if the mutation operation is not UpdateOne, +// or the query to the database was failed. +func (m *DecisionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case decision.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case decision.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case decision.FieldUntil: + return m.OldUntil(ctx) + case decision.FieldScenario: + return m.OldScenario(ctx) + case decision.FieldType: + return m.OldType(ctx) + case decision.FieldStartIP: + return m.OldStartIP(ctx) + case decision.FieldEndIP: + return m.OldEndIP(ctx) + case decision.FieldScope: + return m.OldScope(ctx) + case decision.FieldValue: + return m.OldValue(ctx) + case decision.FieldOrigin: + return m.OldOrigin(ctx) + case decision.FieldSimulated: + return m.OldSimulated(ctx) + } + return nil, fmt.Errorf("unknown Decision field %s", name) +} + +// SetField sets the value for the given name. It returns an +// error if the field is not defined in the schema, or if the +// type mismatch the field type. +func (m *DecisionMutation) SetField(name string, value ent.Value) error { + switch name { + case decision.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case decision.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case decision.FieldUntil: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUntil(v) + return nil + case decision.FieldScenario: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScenario(v) + return nil + case decision.FieldType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetType(v) + return nil + case decision.FieldStartIP: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartIP(v) + return nil + case decision.FieldEndIP: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEndIP(v) + return nil + case decision.FieldScope: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScope(v) + return nil + case decision.FieldValue: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValue(v) + return nil + case decision.FieldOrigin: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOrigin(v) + return nil + case decision.FieldSimulated: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSimulated(v) + return nil + } + return fmt.Errorf("unknown Decision field %s", name) +} + +// AddedFields returns all numeric fields that were incremented +// or decremented during this mutation. +func (m *DecisionMutation) AddedFields() []string { + var fields []string + if m.addstart_ip != nil { + fields = append(fields, decision.FieldStartIP) + } + if m.addend_ip != nil { + fields = append(fields, decision.FieldEndIP) + } + return fields +} + +// AddedField returns the numeric value that was in/decremented +// from a field with the given name. The second value indicates +// that this field was not set, or was not define in the schema. +func (m *DecisionMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case decision.FieldStartIP: + return m.AddedStartIP() + case decision.FieldEndIP: + return m.AddedEndIP() + } + return nil, false +} + +// AddField adds the value for the given name. It returns an +// error if the field is not defined in the schema, or if the +// type mismatch the field type. +func (m *DecisionMutation) AddField(name string, value ent.Value) error { + switch name { + case decision.FieldStartIP: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddStartIP(v) + return nil + case decision.FieldEndIP: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddEndIP(v) + return nil + } + return fmt.Errorf("unknown Decision numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared +// during this mutation. +func (m *DecisionMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(decision.FieldStartIP) { + fields = append(fields, decision.FieldStartIP) + } + if m.FieldCleared(decision.FieldEndIP) { + fields = append(fields, decision.FieldEndIP) + } + return fields +} + +// FieldCleared returns a boolean indicates if this field was +// cleared in this mutation. +func (m *DecisionMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value for the given name. It returns an +// error if the field is not defined in the schema. +func (m *DecisionMutation) ClearField(name string) error { + switch name { + case decision.FieldStartIP: + m.ClearStartIP() + return nil + case decision.FieldEndIP: + m.ClearEndIP() + return nil + } + return fmt.Errorf("unknown Decision nullable field %s", name) +} + +// ResetField resets all changes in the mutation regarding the +// given field name. It returns an error if the field is not +// defined in the schema. +func (m *DecisionMutation) ResetField(name string) error { + switch name { + case decision.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case decision.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case decision.FieldUntil: + m.ResetUntil() + return nil + case decision.FieldScenario: + m.ResetScenario() + return nil + case decision.FieldType: + m.ResetType() + return nil + case decision.FieldStartIP: + m.ResetStartIP() + return nil + case decision.FieldEndIP: + m.ResetEndIP() + return nil + case decision.FieldScope: + m.ResetScope() + return nil + case decision.FieldValue: + m.ResetValue() + return nil + case decision.FieldOrigin: + m.ResetOrigin() + return nil + case decision.FieldSimulated: + m.ResetSimulated() + return nil + } + return fmt.Errorf("unknown Decision field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this +// mutation. +func (m *DecisionMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.owner != nil { + edges = append(edges, decision.EdgeOwner) + } + return edges +} + +// AddedIDs returns all ids (to other nodes) that were added for +// the given edge name. +func (m *DecisionMutation) AddedIDs(name string) []ent.Value { + switch name { + case decision.EdgeOwner: + if id := m.owner; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this +// mutation. +func (m *DecisionMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all ids (to other nodes) that were removed for +// the given edge name. +func (m *DecisionMutation) RemovedIDs(name string) []ent.Value { + switch name { + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this +// mutation. +func (m *DecisionMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedowner { + edges = append(edges, decision.EdgeOwner) + } + return edges +} + +// EdgeCleared returns a boolean indicates if this edge was +// cleared in this mutation. +func (m *DecisionMutation) EdgeCleared(name string) bool { + switch name { + case decision.EdgeOwner: + return m.clearedowner + } + return false +} + +// ClearEdge clears the value for the given name. It returns an +// error if the edge name is not defined in the schema. +func (m *DecisionMutation) ClearEdge(name string) error { + switch name { + case decision.EdgeOwner: + m.ClearOwner() + return nil + } + return fmt.Errorf("unknown Decision unique edge %s", name) +} + +// ResetEdge resets all changes in the mutation regarding the +// given edge name. It returns an error if the edge is not +// defined in the schema. +func (m *DecisionMutation) ResetEdge(name string) error { + switch name { + case decision.EdgeOwner: + m.ResetOwner() + return nil + } + return fmt.Errorf("unknown Decision edge %s", name) +} + +// EventMutation represents an operation that mutate the Events +// nodes in the graph. +type EventMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + time *time.Time + serialized *string + clearedFields map[string]struct{} + owner *int + clearedowner bool + done bool + oldValue func(context.Context) (*Event, error) +} + +var _ ent.Mutation = (*EventMutation)(nil) + +// eventOption allows to manage the mutation configuration using functional options. +type eventOption func(*EventMutation) + +// newEventMutation creates new mutation for $n.Name. +func newEventMutation(c config, op Op, opts ...eventOption) *EventMutation { + m := &EventMutation{ + config: c, + op: op, + typ: TypeEvent, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withEventID sets the id field of the mutation. +func withEventID(id int) eventOption { + return func(m *EventMutation) { + var ( + err error + once sync.Once + value *Event + ) + m.oldValue = func(ctx context.Context) (*Event, error) { + once.Do(func() { + if m.done { + err = fmt.Errorf("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Event.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withEvent sets the old Event of the mutation. +func withEvent(node *Event) eventOption { + return func(m *EventMutation) { + m.oldValue = func(context.Context) (*Event, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m EventMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m EventMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, fmt.Errorf("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the id value in the mutation. Note that, the id +// is available only if it was provided to the builder. +func (m *EventMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// SetCreatedAt sets the created_at field. +func (m *EventMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the created_at value in the mutation. +func (m *EventMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old created_at value of the Event. +// If the Event object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *EventMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldCreatedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt reset all changes of the "created_at" field. +func (m *EventMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the updated_at field. +func (m *EventMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the updated_at value in the mutation. +func (m *EventMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old updated_at value of the Event. +// If the Event object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *EventMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldUpdatedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt reset all changes of the "updated_at" field. +func (m *EventMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetTime sets the time field. +func (m *EventMutation) SetTime(t time.Time) { + m.time = &t +} + +// Time returns the time value in the mutation. +func (m *EventMutation) Time() (r time.Time, exists bool) { + v := m.time + if v == nil { + return + } + return *v, true +} + +// OldTime returns the old time value of the Event. +// If the Event object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *EventMutation) OldTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldTime is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTime: %w", err) + } + return oldValue.Time, nil +} + +// ResetTime reset all changes of the "time" field. +func (m *EventMutation) ResetTime() { + m.time = nil +} + +// SetSerialized sets the serialized field. +func (m *EventMutation) SetSerialized(s string) { + m.serialized = &s +} + +// Serialized returns the serialized value in the mutation. +func (m *EventMutation) Serialized() (r string, exists bool) { + v := m.serialized + if v == nil { + return + } + return *v, true +} + +// OldSerialized returns the old serialized value of the Event. +// If the Event object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *EventMutation) OldSerialized(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldSerialized is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldSerialized requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSerialized: %w", err) + } + return oldValue.Serialized, nil +} + +// ResetSerialized reset all changes of the "serialized" field. +func (m *EventMutation) ResetSerialized() { + m.serialized = nil +} + +// SetOwnerID sets the owner edge to Alert by id. +func (m *EventMutation) SetOwnerID(id int) { + m.owner = &id +} + +// ClearOwner clears the owner edge to Alert. +func (m *EventMutation) ClearOwner() { + m.clearedowner = true +} + +// OwnerCleared returns if the edge owner was cleared. +func (m *EventMutation) OwnerCleared() bool { + return m.clearedowner +} + +// OwnerID returns the owner id in the mutation. +func (m *EventMutation) OwnerID() (id int, exists bool) { + if m.owner != nil { + return *m.owner, true + } + return +} + +// OwnerIDs returns the owner ids in the mutation. +// Note that ids always returns len(ids) <= 1 for unique edges, and you should use +// OwnerID instead. It exists only for internal usage by the builders. +func (m *EventMutation) OwnerIDs() (ids []int) { + if id := m.owner; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetOwner reset all changes of the "owner" edge. +func (m *EventMutation) ResetOwner() { + m.owner = nil + m.clearedowner = false +} + +// Op returns the operation name. +func (m *EventMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Event). +func (m *EventMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during +// this mutation. Note that, in order to get all numeric +// fields that were in/decremented, call AddedFields(). +func (m *EventMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.created_at != nil { + fields = append(fields, event.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, event.FieldUpdatedAt) + } + if m.time != nil { + fields = append(fields, event.FieldTime) + } + if m.serialized != nil { + fields = append(fields, event.FieldSerialized) + } + return fields +} + +// Field returns the value of a field with the given name. +// The second boolean value indicates that this field was +// not set, or was not define in the schema. +func (m *EventMutation) Field(name string) (ent.Value, bool) { + switch name { + case event.FieldCreatedAt: + return m.CreatedAt() + case event.FieldUpdatedAt: + return m.UpdatedAt() + case event.FieldTime: + return m.Time() + case event.FieldSerialized: + return m.Serialized() + } + return nil, false +} + +// OldField returns the old value of the field from the database. +// An error is returned if the mutation operation is not UpdateOne, +// or the query to the database was failed. +func (m *EventMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case event.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case event.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case event.FieldTime: + return m.OldTime(ctx) + case event.FieldSerialized: + return m.OldSerialized(ctx) + } + return nil, fmt.Errorf("unknown Event field %s", name) +} + +// SetField sets the value for the given name. It returns an +// error if the field is not defined in the schema, or if the +// type mismatch the field type. +func (m *EventMutation) SetField(name string, value ent.Value) error { + switch name { + case event.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case event.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case event.FieldTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTime(v) + return nil + case event.FieldSerialized: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSerialized(v) + return nil + } + return fmt.Errorf("unknown Event field %s", name) +} + +// AddedFields returns all numeric fields that were incremented +// or decremented during this mutation. +func (m *EventMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was in/decremented +// from a field with the given name. The second value indicates +// that this field was not set, or was not define in the schema. +func (m *EventMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value for the given name. It returns an +// error if the field is not defined in the schema, or if the +// type mismatch the field type. +func (m *EventMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Event numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared +// during this mutation. +func (m *EventMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicates if this field was +// cleared in this mutation. +func (m *EventMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value for the given name. It returns an +// error if the field is not defined in the schema. +func (m *EventMutation) ClearField(name string) error { + return fmt.Errorf("unknown Event nullable field %s", name) +} + +// ResetField resets all changes in the mutation regarding the +// given field name. It returns an error if the field is not +// defined in the schema. +func (m *EventMutation) ResetField(name string) error { + switch name { + case event.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case event.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case event.FieldTime: + m.ResetTime() + return nil + case event.FieldSerialized: + m.ResetSerialized() + return nil + } + return fmt.Errorf("unknown Event field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this +// mutation. +func (m *EventMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.owner != nil { + edges = append(edges, event.EdgeOwner) + } + return edges +} + +// AddedIDs returns all ids (to other nodes) that were added for +// the given edge name. +func (m *EventMutation) AddedIDs(name string) []ent.Value { + switch name { + case event.EdgeOwner: + if id := m.owner; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this +// mutation. +func (m *EventMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all ids (to other nodes) that were removed for +// the given edge name. +func (m *EventMutation) RemovedIDs(name string) []ent.Value { + switch name { + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this +// mutation. +func (m *EventMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedowner { + edges = append(edges, event.EdgeOwner) + } + return edges +} + +// EdgeCleared returns a boolean indicates if this edge was +// cleared in this mutation. +func (m *EventMutation) EdgeCleared(name string) bool { + switch name { + case event.EdgeOwner: + return m.clearedowner + } + return false +} + +// ClearEdge clears the value for the given name. It returns an +// error if the edge name is not defined in the schema. +func (m *EventMutation) ClearEdge(name string) error { + switch name { + case event.EdgeOwner: + m.ClearOwner() + return nil + } + return fmt.Errorf("unknown Event unique edge %s", name) +} + +// ResetEdge resets all changes in the mutation regarding the +// given edge name. It returns an error if the edge is not +// defined in the schema. +func (m *EventMutation) ResetEdge(name string) error { + switch name { + case event.EdgeOwner: + m.ResetOwner() + return nil + } + return fmt.Errorf("unknown Event edge %s", name) +} + +// MachineMutation represents an operation that mutate the Machines +// nodes in the graph. +type MachineMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + machineId *string + password *string + ipAddress *string + scenarios *string + version *string + isValidated *bool + status *string + clearedFields map[string]struct{} + alerts map[int]struct{} + removedalerts map[int]struct{} + clearedalerts bool + done bool + oldValue func(context.Context) (*Machine, error) +} + +var _ ent.Mutation = (*MachineMutation)(nil) + +// machineOption allows to manage the mutation configuration using functional options. +type machineOption func(*MachineMutation) + +// newMachineMutation creates new mutation for $n.Name. +func newMachineMutation(c config, op Op, opts ...machineOption) *MachineMutation { + m := &MachineMutation{ + config: c, + op: op, + typ: TypeMachine, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withMachineID sets the id field of the mutation. +func withMachineID(id int) machineOption { + return func(m *MachineMutation) { + var ( + err error + once sync.Once + value *Machine + ) + m.oldValue = func(ctx context.Context) (*Machine, error) { + once.Do(func() { + if m.done { + err = fmt.Errorf("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Machine.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withMachine sets the old Machine of the mutation. +func withMachine(node *Machine) machineOption { + return func(m *MachineMutation) { + m.oldValue = func(context.Context) (*Machine, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m MachineMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m MachineMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, fmt.Errorf("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the id value in the mutation. Note that, the id +// is available only if it was provided to the builder. +func (m *MachineMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// SetCreatedAt sets the created_at field. +func (m *MachineMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the created_at value in the mutation. +func (m *MachineMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old created_at value of the Machine. +// If the Machine object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *MachineMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldCreatedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt reset all changes of the "created_at" field. +func (m *MachineMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the updated_at field. +func (m *MachineMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the updated_at value in the mutation. +func (m *MachineMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old updated_at value of the Machine. +// If the Machine object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *MachineMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldUpdatedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt reset all changes of the "updated_at" field. +func (m *MachineMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetMachineId sets the machineId field. +func (m *MachineMutation) SetMachineId(s string) { + m.machineId = &s +} + +// MachineId returns the machineId value in the mutation. +func (m *MachineMutation) MachineId() (r string, exists bool) { + v := m.machineId + if v == nil { + return + } + return *v, true +} + +// OldMachineId returns the old machineId value of the Machine. +// If the Machine object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *MachineMutation) OldMachineId(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldMachineId is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldMachineId requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMachineId: %w", err) + } + return oldValue.MachineId, nil +} + +// ResetMachineId reset all changes of the "machineId" field. +func (m *MachineMutation) ResetMachineId() { + m.machineId = nil +} + +// SetPassword sets the password field. +func (m *MachineMutation) SetPassword(s string) { + m.password = &s +} + +// Password returns the password value in the mutation. +func (m *MachineMutation) Password() (r string, exists bool) { + v := m.password + if v == nil { + return + } + return *v, true +} + +// OldPassword returns the old password value of the Machine. +// If the Machine object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *MachineMutation) OldPassword(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldPassword is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldPassword requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPassword: %w", err) + } + return oldValue.Password, nil +} + +// ResetPassword reset all changes of the "password" field. +func (m *MachineMutation) ResetPassword() { + m.password = nil +} + +// SetIpAddress sets the ipAddress field. +func (m *MachineMutation) SetIpAddress(s string) { + m.ipAddress = &s +} + +// IpAddress returns the ipAddress value in the mutation. +func (m *MachineMutation) IpAddress() (r string, exists bool) { + v := m.ipAddress + if v == nil { + return + } + return *v, true +} + +// OldIpAddress returns the old ipAddress value of the Machine. +// If the Machine object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *MachineMutation) OldIpAddress(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldIpAddress is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldIpAddress requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIpAddress: %w", err) + } + return oldValue.IpAddress, nil +} + +// ResetIpAddress reset all changes of the "ipAddress" field. +func (m *MachineMutation) ResetIpAddress() { + m.ipAddress = nil +} + +// SetScenarios sets the scenarios field. +func (m *MachineMutation) SetScenarios(s string) { + m.scenarios = &s +} + +// Scenarios returns the scenarios value in the mutation. +func (m *MachineMutation) Scenarios() (r string, exists bool) { + v := m.scenarios + if v == nil { + return + } + return *v, true +} + +// OldScenarios returns the old scenarios value of the Machine. +// If the Machine object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *MachineMutation) OldScenarios(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldScenarios is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldScenarios requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScenarios: %w", err) + } + return oldValue.Scenarios, nil +} + +// ClearScenarios clears the value of scenarios. +func (m *MachineMutation) ClearScenarios() { + m.scenarios = nil + m.clearedFields[machine.FieldScenarios] = struct{}{} +} + +// ScenariosCleared returns if the field scenarios was cleared in this mutation. +func (m *MachineMutation) ScenariosCleared() bool { + _, ok := m.clearedFields[machine.FieldScenarios] + return ok +} + +// ResetScenarios reset all changes of the "scenarios" field. +func (m *MachineMutation) ResetScenarios() { + m.scenarios = nil + delete(m.clearedFields, machine.FieldScenarios) +} + +// SetVersion sets the version field. +func (m *MachineMutation) SetVersion(s string) { + m.version = &s +} + +// Version returns the version value in the mutation. +func (m *MachineMutation) Version() (r string, exists bool) { + v := m.version + if v == nil { + return + } + return *v, true +} + +// OldVersion returns the old version value of the Machine. +// If the Machine object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *MachineMutation) OldVersion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldVersion is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldVersion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldVersion: %w", err) + } + return oldValue.Version, nil +} + +// ClearVersion clears the value of version. +func (m *MachineMutation) ClearVersion() { + m.version = nil + m.clearedFields[machine.FieldVersion] = struct{}{} +} + +// VersionCleared returns if the field version was cleared in this mutation. +func (m *MachineMutation) VersionCleared() bool { + _, ok := m.clearedFields[machine.FieldVersion] + return ok +} + +// ResetVersion reset all changes of the "version" field. +func (m *MachineMutation) ResetVersion() { + m.version = nil + delete(m.clearedFields, machine.FieldVersion) +} + +// SetIsValidated sets the isValidated field. +func (m *MachineMutation) SetIsValidated(b bool) { + m.isValidated = &b +} + +// IsValidated returns the isValidated value in the mutation. +func (m *MachineMutation) IsValidated() (r bool, exists bool) { + v := m.isValidated + if v == nil { + return + } + return *v, true +} + +// OldIsValidated returns the old isValidated value of the Machine. +// If the Machine object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *MachineMutation) OldIsValidated(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldIsValidated is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldIsValidated requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsValidated: %w", err) + } + return oldValue.IsValidated, nil +} + +// ResetIsValidated reset all changes of the "isValidated" field. +func (m *MachineMutation) ResetIsValidated() { + m.isValidated = nil +} + +// SetStatus sets the status field. +func (m *MachineMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the status value in the mutation. +func (m *MachineMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old status value of the Machine. +// If the Machine object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *MachineMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldStatus is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ClearStatus clears the value of status. +func (m *MachineMutation) ClearStatus() { + m.status = nil + m.clearedFields[machine.FieldStatus] = struct{}{} +} + +// StatusCleared returns if the field status was cleared in this mutation. +func (m *MachineMutation) StatusCleared() bool { + _, ok := m.clearedFields[machine.FieldStatus] + return ok +} + +// ResetStatus reset all changes of the "status" field. +func (m *MachineMutation) ResetStatus() { + m.status = nil + delete(m.clearedFields, machine.FieldStatus) +} + +// AddAlertIDs adds the alerts edge to Alert by ids. +func (m *MachineMutation) AddAlertIDs(ids ...int) { + if m.alerts == nil { + m.alerts = make(map[int]struct{}) + } + for i := range ids { + m.alerts[ids[i]] = struct{}{} + } +} + +// ClearAlerts clears the alerts edge to Alert. +func (m *MachineMutation) ClearAlerts() { + m.clearedalerts = true +} + +// AlertsCleared returns if the edge alerts was cleared. +func (m *MachineMutation) AlertsCleared() bool { + return m.clearedalerts +} + +// RemoveAlertIDs removes the alerts edge to Alert by ids. +func (m *MachineMutation) RemoveAlertIDs(ids ...int) { + if m.removedalerts == nil { + m.removedalerts = make(map[int]struct{}) + } + for i := range ids { + m.removedalerts[ids[i]] = struct{}{} + } +} + +// RemovedAlerts returns the removed ids of alerts. +func (m *MachineMutation) RemovedAlertsIDs() (ids []int) { + for id := range m.removedalerts { + ids = append(ids, id) + } + return +} + +// AlertsIDs returns the alerts ids in the mutation. +func (m *MachineMutation) AlertsIDs() (ids []int) { + for id := range m.alerts { + ids = append(ids, id) + } + return +} + +// ResetAlerts reset all changes of the "alerts" edge. +func (m *MachineMutation) ResetAlerts() { + m.alerts = nil + m.clearedalerts = false + m.removedalerts = nil +} + +// Op returns the operation name. +func (m *MachineMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Machine). +func (m *MachineMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during +// this mutation. Note that, in order to get all numeric +// fields that were in/decremented, call AddedFields(). +func (m *MachineMutation) Fields() []string { + fields := make([]string, 0, 9) + if m.created_at != nil { + fields = append(fields, machine.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, machine.FieldUpdatedAt) + } + if m.machineId != nil { + fields = append(fields, machine.FieldMachineId) + } + if m.password != nil { + fields = append(fields, machine.FieldPassword) + } + if m.ipAddress != nil { + fields = append(fields, machine.FieldIpAddress) + } + if m.scenarios != nil { + fields = append(fields, machine.FieldScenarios) + } + if m.version != nil { + fields = append(fields, machine.FieldVersion) + } + if m.isValidated != nil { + fields = append(fields, machine.FieldIsValidated) + } + if m.status != nil { + fields = append(fields, machine.FieldStatus) + } + return fields +} + +// Field returns the value of a field with the given name. +// The second boolean value indicates that this field was +// not set, or was not define in the schema. +func (m *MachineMutation) Field(name string) (ent.Value, bool) { + switch name { + case machine.FieldCreatedAt: + return m.CreatedAt() + case machine.FieldUpdatedAt: + return m.UpdatedAt() + case machine.FieldMachineId: + return m.MachineId() + case machine.FieldPassword: + return m.Password() + case machine.FieldIpAddress: + return m.IpAddress() + case machine.FieldScenarios: + return m.Scenarios() + case machine.FieldVersion: + return m.Version() + case machine.FieldIsValidated: + return m.IsValidated() + case machine.FieldStatus: + return m.Status() + } + return nil, false +} + +// OldField returns the old value of the field from the database. +// An error is returned if the mutation operation is not UpdateOne, +// or the query to the database was failed. +func (m *MachineMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case machine.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case machine.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case machine.FieldMachineId: + return m.OldMachineId(ctx) + case machine.FieldPassword: + return m.OldPassword(ctx) + case machine.FieldIpAddress: + return m.OldIpAddress(ctx) + case machine.FieldScenarios: + return m.OldScenarios(ctx) + case machine.FieldVersion: + return m.OldVersion(ctx) + case machine.FieldIsValidated: + return m.OldIsValidated(ctx) + case machine.FieldStatus: + return m.OldStatus(ctx) + } + return nil, fmt.Errorf("unknown Machine field %s", name) +} + +// SetField sets the value for the given name. It returns an +// error if the field is not defined in the schema, or if the +// type mismatch the field type. +func (m *MachineMutation) SetField(name string, value ent.Value) error { + switch name { + case machine.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case machine.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case machine.FieldMachineId: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMachineId(v) + return nil + case machine.FieldPassword: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPassword(v) + return nil + case machine.FieldIpAddress: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIpAddress(v) + return nil + case machine.FieldScenarios: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScenarios(v) + return nil + case machine.FieldVersion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetVersion(v) + return nil + case machine.FieldIsValidated: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsValidated(v) + return nil + case machine.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + } + return fmt.Errorf("unknown Machine field %s", name) +} + +// AddedFields returns all numeric fields that were incremented +// or decremented during this mutation. +func (m *MachineMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was in/decremented +// from a field with the given name. The second value indicates +// that this field was not set, or was not define in the schema. +func (m *MachineMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value for the given name. It returns an +// error if the field is not defined in the schema, or if the +// type mismatch the field type. +func (m *MachineMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Machine numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared +// during this mutation. +func (m *MachineMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(machine.FieldScenarios) { + fields = append(fields, machine.FieldScenarios) + } + if m.FieldCleared(machine.FieldVersion) { + fields = append(fields, machine.FieldVersion) + } + if m.FieldCleared(machine.FieldStatus) { + fields = append(fields, machine.FieldStatus) + } + return fields +} + +// FieldCleared returns a boolean indicates if this field was +// cleared in this mutation. +func (m *MachineMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value for the given name. It returns an +// error if the field is not defined in the schema. +func (m *MachineMutation) ClearField(name string) error { + switch name { + case machine.FieldScenarios: + m.ClearScenarios() + return nil + case machine.FieldVersion: + m.ClearVersion() + return nil + case machine.FieldStatus: + m.ClearStatus() + return nil + } + return fmt.Errorf("unknown Machine nullable field %s", name) +} + +// ResetField resets all changes in the mutation regarding the +// given field name. It returns an error if the field is not +// defined in the schema. +func (m *MachineMutation) ResetField(name string) error { + switch name { + case machine.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case machine.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case machine.FieldMachineId: + m.ResetMachineId() + return nil + case machine.FieldPassword: + m.ResetPassword() + return nil + case machine.FieldIpAddress: + m.ResetIpAddress() + return nil + case machine.FieldScenarios: + m.ResetScenarios() + return nil + case machine.FieldVersion: + m.ResetVersion() + return nil + case machine.FieldIsValidated: + m.ResetIsValidated() + return nil + case machine.FieldStatus: + m.ResetStatus() + return nil + } + return fmt.Errorf("unknown Machine field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this +// mutation. +func (m *MachineMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.alerts != nil { + edges = append(edges, machine.EdgeAlerts) + } + return edges +} + +// AddedIDs returns all ids (to other nodes) that were added for +// the given edge name. +func (m *MachineMutation) AddedIDs(name string) []ent.Value { + switch name { + case machine.EdgeAlerts: + ids := make([]ent.Value, 0, len(m.alerts)) + for id := range m.alerts { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this +// mutation. +func (m *MachineMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedalerts != nil { + edges = append(edges, machine.EdgeAlerts) + } + return edges +} + +// RemovedIDs returns all ids (to other nodes) that were removed for +// the given edge name. +func (m *MachineMutation) RemovedIDs(name string) []ent.Value { + switch name { + case machine.EdgeAlerts: + ids := make([]ent.Value, 0, len(m.removedalerts)) + for id := range m.removedalerts { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this +// mutation. +func (m *MachineMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedalerts { + edges = append(edges, machine.EdgeAlerts) + } + return edges +} + +// EdgeCleared returns a boolean indicates if this edge was +// cleared in this mutation. +func (m *MachineMutation) EdgeCleared(name string) bool { + switch name { + case machine.EdgeAlerts: + return m.clearedalerts + } + return false +} + +// ClearEdge clears the value for the given name. It returns an +// error if the edge name is not defined in the schema. +func (m *MachineMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown Machine unique edge %s", name) +} + +// ResetEdge resets all changes in the mutation regarding the +// given edge name. It returns an error if the edge is not +// defined in the schema. +func (m *MachineMutation) ResetEdge(name string) error { + switch name { + case machine.EdgeAlerts: + m.ResetAlerts() + return nil + } + return fmt.Errorf("unknown Machine edge %s", name) +} + +// MetaMutation represents an operation that mutate the MetaSlice +// nodes in the graph. +type MetaMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + key *string + value *string + clearedFields map[string]struct{} + owner *int + clearedowner bool + done bool + oldValue func(context.Context) (*Meta, error) +} + +var _ ent.Mutation = (*MetaMutation)(nil) + +// metaOption allows to manage the mutation configuration using functional options. +type metaOption func(*MetaMutation) + +// newMetaMutation creates new mutation for $n.Name. +func newMetaMutation(c config, op Op, opts ...metaOption) *MetaMutation { + m := &MetaMutation{ + config: c, + op: op, + typ: TypeMeta, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withMetaID sets the id field of the mutation. +func withMetaID(id int) metaOption { + return func(m *MetaMutation) { + var ( + err error + once sync.Once + value *Meta + ) + m.oldValue = func(ctx context.Context) (*Meta, error) { + once.Do(func() { + if m.done { + err = fmt.Errorf("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Meta.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withMeta sets the old Meta of the mutation. +func withMeta(node *Meta) metaOption { + return func(m *MetaMutation) { + m.oldValue = func(context.Context) (*Meta, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m MetaMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m MetaMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, fmt.Errorf("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the id value in the mutation. Note that, the id +// is available only if it was provided to the builder. +func (m *MetaMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// SetCreatedAt sets the created_at field. +func (m *MetaMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the created_at value in the mutation. +func (m *MetaMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old created_at value of the Meta. +// If the Meta object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *MetaMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldCreatedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt reset all changes of the "created_at" field. +func (m *MetaMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the updated_at field. +func (m *MetaMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the updated_at value in the mutation. +func (m *MetaMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old updated_at value of the Meta. +// If the Meta object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *MetaMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldUpdatedAt is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt reset all changes of the "updated_at" field. +func (m *MetaMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetKey sets the key field. +func (m *MetaMutation) SetKey(s string) { + m.key = &s +} + +// Key returns the key value in the mutation. +func (m *MetaMutation) Key() (r string, exists bool) { + v := m.key + if v == nil { + return + } + return *v, true +} + +// OldKey returns the old key value of the Meta. +// If the Meta object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *MetaMutation) OldKey(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldKey is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldKey: %w", err) + } + return oldValue.Key, nil +} + +// ResetKey reset all changes of the "key" field. +func (m *MetaMutation) ResetKey() { + m.key = nil +} + +// SetValue sets the value field. +func (m *MetaMutation) SetValue(s string) { + m.value = &s +} + +// Value returns the value value in the mutation. +func (m *MetaMutation) Value() (r string, exists bool) { + v := m.value + if v == nil { + return + } + return *v, true +} + +// OldValue returns the old value value of the Meta. +// If the Meta object wasn't provided to the builder, the object is fetched +// from the database. +// An error is returned if the mutation operation is not UpdateOne, or database query fails. +func (m *MetaMutation) OldValue(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, fmt.Errorf("OldValue is allowed only on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, fmt.Errorf("OldValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValue: %w", err) + } + return oldValue.Value, nil +} + +// ResetValue reset all changes of the "value" field. +func (m *MetaMutation) ResetValue() { + m.value = nil +} + +// SetOwnerID sets the owner edge to Alert by id. +func (m *MetaMutation) SetOwnerID(id int) { + m.owner = &id +} + +// ClearOwner clears the owner edge to Alert. +func (m *MetaMutation) ClearOwner() { + m.clearedowner = true +} + +// OwnerCleared returns if the edge owner was cleared. +func (m *MetaMutation) OwnerCleared() bool { + return m.clearedowner +} + +// OwnerID returns the owner id in the mutation. +func (m *MetaMutation) OwnerID() (id int, exists bool) { + if m.owner != nil { + return *m.owner, true + } + return +} + +// OwnerIDs returns the owner ids in the mutation. +// Note that ids always returns len(ids) <= 1 for unique edges, and you should use +// OwnerID instead. It exists only for internal usage by the builders. +func (m *MetaMutation) OwnerIDs() (ids []int) { + if id := m.owner; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetOwner reset all changes of the "owner" edge. +func (m *MetaMutation) ResetOwner() { + m.owner = nil + m.clearedowner = false +} + +// Op returns the operation name. +func (m *MetaMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Meta). +func (m *MetaMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during +// this mutation. Note that, in order to get all numeric +// fields that were in/decremented, call AddedFields(). +func (m *MetaMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.created_at != nil { + fields = append(fields, meta.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, meta.FieldUpdatedAt) + } + if m.key != nil { + fields = append(fields, meta.FieldKey) + } + if m.value != nil { + fields = append(fields, meta.FieldValue) + } + return fields +} + +// Field returns the value of a field with the given name. +// The second boolean value indicates that this field was +// not set, or was not define in the schema. +func (m *MetaMutation) Field(name string) (ent.Value, bool) { + switch name { + case meta.FieldCreatedAt: + return m.CreatedAt() + case meta.FieldUpdatedAt: + return m.UpdatedAt() + case meta.FieldKey: + return m.Key() + case meta.FieldValue: + return m.Value() + } + return nil, false +} + +// OldField returns the old value of the field from the database. +// An error is returned if the mutation operation is not UpdateOne, +// or the query to the database was failed. +func (m *MetaMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case meta.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case meta.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case meta.FieldKey: + return m.OldKey(ctx) + case meta.FieldValue: + return m.OldValue(ctx) + } + return nil, fmt.Errorf("unknown Meta field %s", name) +} + +// SetField sets the value for the given name. It returns an +// error if the field is not defined in the schema, or if the +// type mismatch the field type. +func (m *MetaMutation) SetField(name string, value ent.Value) error { + switch name { + case meta.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case meta.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case meta.FieldKey: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetKey(v) + return nil + case meta.FieldValue: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValue(v) + return nil + } + return fmt.Errorf("unknown Meta field %s", name) +} + +// AddedFields returns all numeric fields that were incremented +// or decremented during this mutation. +func (m *MetaMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was in/decremented +// from a field with the given name. The second value indicates +// that this field was not set, or was not define in the schema. +func (m *MetaMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value for the given name. It returns an +// error if the field is not defined in the schema, or if the +// type mismatch the field type. +func (m *MetaMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Meta numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared +// during this mutation. +func (m *MetaMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicates if this field was +// cleared in this mutation. +func (m *MetaMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value for the given name. It returns an +// error if the field is not defined in the schema. +func (m *MetaMutation) ClearField(name string) error { + return fmt.Errorf("unknown Meta nullable field %s", name) +} + +// ResetField resets all changes in the mutation regarding the +// given field name. It returns an error if the field is not +// defined in the schema. +func (m *MetaMutation) ResetField(name string) error { + switch name { + case meta.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case meta.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case meta.FieldKey: + m.ResetKey() + return nil + case meta.FieldValue: + m.ResetValue() + return nil + } + return fmt.Errorf("unknown Meta field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this +// mutation. +func (m *MetaMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.owner != nil { + edges = append(edges, meta.EdgeOwner) + } + return edges +} + +// AddedIDs returns all ids (to other nodes) that were added for +// the given edge name. +func (m *MetaMutation) AddedIDs(name string) []ent.Value { + switch name { + case meta.EdgeOwner: + if id := m.owner; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this +// mutation. +func (m *MetaMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all ids (to other nodes) that were removed for +// the given edge name. +func (m *MetaMutation) RemovedIDs(name string) []ent.Value { + switch name { + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this +// mutation. +func (m *MetaMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedowner { + edges = append(edges, meta.EdgeOwner) + } + return edges +} + +// EdgeCleared returns a boolean indicates if this edge was +// cleared in this mutation. +func (m *MetaMutation) EdgeCleared(name string) bool { + switch name { + case meta.EdgeOwner: + return m.clearedowner + } + return false +} + +// ClearEdge clears the value for the given name. It returns an +// error if the edge name is not defined in the schema. +func (m *MetaMutation) ClearEdge(name string) error { + switch name { + case meta.EdgeOwner: + m.ClearOwner() + return nil + } + return fmt.Errorf("unknown Meta unique edge %s", name) +} + +// ResetEdge resets all changes in the mutation regarding the +// given edge name. It returns an error if the edge is not +// defined in the schema. +func (m *MetaMutation) ResetEdge(name string) error { + switch name { + case meta.EdgeOwner: + m.ResetOwner() + return nil + } + return fmt.Errorf("unknown Meta edge %s", name) +} diff --git a/pkg/database/ent/predicate/predicate.go b/pkg/database/ent/predicate/predicate.go new file mode 100644 index 000000000..28aee33d7 --- /dev/null +++ b/pkg/database/ent/predicate/predicate.go @@ -0,0 +1,25 @@ +// Code generated by entc, DO NOT EDIT. + +package predicate + +import ( + "github.com/facebook/ent/dialect/sql" +) + +// Alert is the predicate function for alert builders. +type Alert func(*sql.Selector) + +// Bouncer is the predicate function for bouncer builders. +type Bouncer func(*sql.Selector) + +// Decision is the predicate function for decision builders. +type Decision func(*sql.Selector) + +// Event is the predicate function for event builders. +type Event func(*sql.Selector) + +// Machine is the predicate function for machine builders. +type Machine func(*sql.Selector) + +// Meta is the predicate function for meta builders. +type Meta func(*sql.Selector) diff --git a/pkg/database/ent/privacy/privacy.go b/pkg/database/ent/privacy/privacy.go new file mode 100644 index 000000000..5407a51be --- /dev/null +++ b/pkg/database/ent/privacy/privacy.go @@ -0,0 +1,355 @@ +// Code generated by entc, DO NOT EDIT. + +package privacy + +import ( + "context" + "errors" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" +) + +var ( + // Allow may be returned by rules to indicate that the policy + // evaluation should terminate with an allow decision. + Allow = errors.New("ent/privacy: allow rule") + + // Deny may be returned by rules to indicate that the policy + // evaluation should terminate with an deny decision. + Deny = errors.New("ent/privacy: deny rule") + + // Skip may be returned by rules to indicate that the policy + // evaluation should continue to the next rule. + Skip = errors.New("ent/privacy: skip rule") +) + +// Allowf returns an formatted wrapped Allow decision. +func Allowf(format string, a ...interface{}) error { + return fmt.Errorf(format+": %w", append(a, Allow)...) +} + +// Denyf returns an formatted wrapped Deny decision. +func Denyf(format string, a ...interface{}) error { + return fmt.Errorf(format+": %w", append(a, Deny)...) +} + +// Skipf returns an formatted wrapped Skip decision. +func Skipf(format string, a ...interface{}) error { + return fmt.Errorf(format+": %w", append(a, Skip)...) +} + +type decisionCtxKey struct{} + +// DecisionContext creates a decision context. +func DecisionContext(parent context.Context, decision error) context.Context { + if decision == nil || errors.Is(decision, Skip) { + return parent + } + return context.WithValue(parent, decisionCtxKey{}, decision) +} + +func decisionFromContext(ctx context.Context) (error, bool) { + decision, ok := ctx.Value(decisionCtxKey{}).(error) + if ok && errors.Is(decision, Allow) { + decision = nil + } + return decision, ok +} + +type ( + // QueryPolicy combines multiple query rules into a single policy. + QueryPolicy []QueryRule + + // QueryRule defines the interface deciding whether a + // query is allowed and optionally modify it. + QueryRule interface { + EvalQuery(context.Context, ent.Query) error + } +) + +// EvalQuery evaluates a query against a query policy. +func (policy QueryPolicy) EvalQuery(ctx context.Context, q ent.Query) error { + if decision, ok := decisionFromContext(ctx); ok { + return decision + } + for _, rule := range policy { + switch decision := rule.EvalQuery(ctx, q); { + case decision == nil || errors.Is(decision, Skip): + case errors.Is(decision, Allow): + return nil + default: + return decision + } + } + return nil +} + +// QueryRuleFunc type is an adapter to allow the use of +// ordinary functions as query rules. +type QueryRuleFunc func(context.Context, ent.Query) error + +// Eval returns f(ctx, q). +func (f QueryRuleFunc) EvalQuery(ctx context.Context, q ent.Query) error { + return f(ctx, q) +} + +type ( + // MutationPolicy combines multiple mutation rules into a single policy. + MutationPolicy []MutationRule + + // MutationRule defines the interface deciding whether a + // mutation is allowed and optionally modify it. + MutationRule interface { + EvalMutation(context.Context, ent.Mutation) error + } +) + +// EvalMutation evaluates a mutation against a mutation policy. +func (policy MutationPolicy) EvalMutation(ctx context.Context, m ent.Mutation) error { + if decision, ok := decisionFromContext(ctx); ok { + return decision + } + for _, rule := range policy { + switch decision := rule.EvalMutation(ctx, m); { + case decision == nil || errors.Is(decision, Skip): + case errors.Is(decision, Allow): + return nil + default: + return decision + } + } + return nil +} + +// MutationRuleFunc type is an adapter to allow the use of +// ordinary functions as mutation rules. +type MutationRuleFunc func(context.Context, ent.Mutation) error + +// EvalMutation returns f(ctx, m). +func (f MutationRuleFunc) EvalMutation(ctx context.Context, m ent.Mutation) error { + return f(ctx, m) +} + +// Policy groups query and mutation policies. +type Policy struct { + Query QueryPolicy + Mutation MutationPolicy +} + +// EvalQuery forwards evaluation to query policy. +func (policy Policy) EvalQuery(ctx context.Context, q ent.Query) error { + return policy.Query.EvalQuery(ctx, q) +} + +// EvalMutation forwards evaluation to mutation policy. +func (policy Policy) EvalMutation(ctx context.Context, m ent.Mutation) error { + return policy.Mutation.EvalMutation(ctx, m) +} + +// QueryMutationRule is the interface that groups query and mutation rules. +type QueryMutationRule interface { + QueryRule + MutationRule +} + +// AlwaysAllowRule returns a rule that returns an allow decision. +func AlwaysAllowRule() QueryMutationRule { + return fixedDecision{Allow} +} + +// AlwaysDenyRule returns a rule that returns a deny decision. +func AlwaysDenyRule() QueryMutationRule { + return fixedDecision{Deny} +} + +type fixedDecision struct { + decision error +} + +func (f fixedDecision) EvalQuery(context.Context, ent.Query) error { + return f.decision +} + +func (f fixedDecision) EvalMutation(context.Context, ent.Mutation) error { + return f.decision +} + +type contextDecision struct { + eval func(context.Context) error +} + +// ContextQueryMutationRule creates a query/mutation rule from a context eval func. +func ContextQueryMutationRule(eval func(context.Context) error) QueryMutationRule { + return contextDecision{eval} +} + +func (c contextDecision) EvalQuery(ctx context.Context, _ ent.Query) error { + return c.eval(ctx) +} + +func (c contextDecision) EvalMutation(ctx context.Context, _ ent.Mutation) error { + return c.eval(ctx) +} + +// OnMutationOperation evaluates the given rule only on a given mutation operation. +func OnMutationOperation(rule MutationRule, op ent.Op) MutationRule { + return MutationRuleFunc(func(ctx context.Context, m ent.Mutation) error { + if m.Op().Is(op) { + return rule.EvalMutation(ctx, m) + } + return Skip + }) +} + +// DenyMutationOperationRule returns a rule denying specified mutation operation. +func DenyMutationOperationRule(op ent.Op) MutationRule { + rule := MutationRuleFunc(func(_ context.Context, m ent.Mutation) error { + return Denyf("ent/privacy: operation %s is not allowed", m.Op()) + }) + return OnMutationOperation(rule, op) +} + +// The AlertQueryRuleFunc type is an adapter to allow the use of ordinary +// functions as a query rule. +type AlertQueryRuleFunc func(context.Context, *ent.AlertQuery) error + +// EvalQuery return f(ctx, q). +func (f AlertQueryRuleFunc) EvalQuery(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.AlertQuery); ok { + return f(ctx, q) + } + return Denyf("ent/privacy: unexpected query type %T, expect *ent.AlertQuery", q) +} + +// The AlertMutationRuleFunc type is an adapter to allow the use of ordinary +// functions as a mutation rule. +type AlertMutationRuleFunc func(context.Context, *ent.AlertMutation) error + +// EvalMutation calls f(ctx, m). +func (f AlertMutationRuleFunc) EvalMutation(ctx context.Context, m ent.Mutation) error { + if m, ok := m.(*ent.AlertMutation); ok { + return f(ctx, m) + } + return Denyf("ent/privacy: unexpected mutation type %T, expect *ent.AlertMutation", m) +} + +// The BouncerQueryRuleFunc type is an adapter to allow the use of ordinary +// functions as a query rule. +type BouncerQueryRuleFunc func(context.Context, *ent.BouncerQuery) error + +// EvalQuery return f(ctx, q). +func (f BouncerQueryRuleFunc) EvalQuery(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.BouncerQuery); ok { + return f(ctx, q) + } + return Denyf("ent/privacy: unexpected query type %T, expect *ent.BouncerQuery", q) +} + +// The BouncerMutationRuleFunc type is an adapter to allow the use of ordinary +// functions as a mutation rule. +type BouncerMutationRuleFunc func(context.Context, *ent.BouncerMutation) error + +// EvalMutation calls f(ctx, m). +func (f BouncerMutationRuleFunc) EvalMutation(ctx context.Context, m ent.Mutation) error { + if m, ok := m.(*ent.BouncerMutation); ok { + return f(ctx, m) + } + return Denyf("ent/privacy: unexpected mutation type %T, expect *ent.BouncerMutation", m) +} + +// The DecisionQueryRuleFunc type is an adapter to allow the use of ordinary +// functions as a query rule. +type DecisionQueryRuleFunc func(context.Context, *ent.DecisionQuery) error + +// EvalQuery return f(ctx, q). +func (f DecisionQueryRuleFunc) EvalQuery(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.DecisionQuery); ok { + return f(ctx, q) + } + return Denyf("ent/privacy: unexpected query type %T, expect *ent.DecisionQuery", q) +} + +// The DecisionMutationRuleFunc type is an adapter to allow the use of ordinary +// functions as a mutation rule. +type DecisionMutationRuleFunc func(context.Context, *ent.DecisionMutation) error + +// EvalMutation calls f(ctx, m). +func (f DecisionMutationRuleFunc) EvalMutation(ctx context.Context, m ent.Mutation) error { + if m, ok := m.(*ent.DecisionMutation); ok { + return f(ctx, m) + } + return Denyf("ent/privacy: unexpected mutation type %T, expect *ent.DecisionMutation", m) +} + +// The EventQueryRuleFunc type is an adapter to allow the use of ordinary +// functions as a query rule. +type EventQueryRuleFunc func(context.Context, *ent.EventQuery) error + +// EvalQuery return f(ctx, q). +func (f EventQueryRuleFunc) EvalQuery(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.EventQuery); ok { + return f(ctx, q) + } + return Denyf("ent/privacy: unexpected query type %T, expect *ent.EventQuery", q) +} + +// The EventMutationRuleFunc type is an adapter to allow the use of ordinary +// functions as a mutation rule. +type EventMutationRuleFunc func(context.Context, *ent.EventMutation) error + +// EvalMutation calls f(ctx, m). +func (f EventMutationRuleFunc) EvalMutation(ctx context.Context, m ent.Mutation) error { + if m, ok := m.(*ent.EventMutation); ok { + return f(ctx, m) + } + return Denyf("ent/privacy: unexpected mutation type %T, expect *ent.EventMutation", m) +} + +// The MachineQueryRuleFunc type is an adapter to allow the use of ordinary +// functions as a query rule. +type MachineQueryRuleFunc func(context.Context, *ent.MachineQuery) error + +// EvalQuery return f(ctx, q). +func (f MachineQueryRuleFunc) EvalQuery(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.MachineQuery); ok { + return f(ctx, q) + } + return Denyf("ent/privacy: unexpected query type %T, expect *ent.MachineQuery", q) +} + +// The MachineMutationRuleFunc type is an adapter to allow the use of ordinary +// functions as a mutation rule. +type MachineMutationRuleFunc func(context.Context, *ent.MachineMutation) error + +// EvalMutation calls f(ctx, m). +func (f MachineMutationRuleFunc) EvalMutation(ctx context.Context, m ent.Mutation) error { + if m, ok := m.(*ent.MachineMutation); ok { + return f(ctx, m) + } + return Denyf("ent/privacy: unexpected mutation type %T, expect *ent.MachineMutation", m) +} + +// The MetaQueryRuleFunc type is an adapter to allow the use of ordinary +// functions as a query rule. +type MetaQueryRuleFunc func(context.Context, *ent.MetaQuery) error + +// EvalQuery return f(ctx, q). +func (f MetaQueryRuleFunc) EvalQuery(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.MetaQuery); ok { + return f(ctx, q) + } + return Denyf("ent/privacy: unexpected query type %T, expect *ent.MetaQuery", q) +} + +// The MetaMutationRuleFunc type is an adapter to allow the use of ordinary +// functions as a mutation rule. +type MetaMutationRuleFunc func(context.Context, *ent.MetaMutation) error + +// EvalMutation calls f(ctx, m). +func (f MetaMutationRuleFunc) EvalMutation(ctx context.Context, m ent.Mutation) error { + if m, ok := m.(*ent.MetaMutation); ok { + return f(ctx, m) + } + return Denyf("ent/privacy: unexpected mutation type %T, expect *ent.MetaMutation", m) +} diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go new file mode 100644 index 000000000..e95336d98 --- /dev/null +++ b/pkg/database/ent/runtime.go @@ -0,0 +1,137 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" +) + +// The init function reads all schema descriptors with runtime +// code (default values, validators or hooks) and stitches it +// to their package variables. +func init() { + alertFields := schema.Alert{}.Fields() + _ = alertFields + // alertDescCreatedAt is the schema descriptor for created_at field. + alertDescCreatedAt := alertFields[0].Descriptor() + // alert.DefaultCreatedAt holds the default value on creation for the created_at field. + alert.DefaultCreatedAt = alertDescCreatedAt.Default.(func() time.Time) + // alertDescUpdatedAt is the schema descriptor for updated_at field. + alertDescUpdatedAt := alertFields[1].Descriptor() + // alert.DefaultUpdatedAt holds the default value on creation for the updated_at field. + alert.DefaultUpdatedAt = alertDescUpdatedAt.Default.(func() time.Time) + // alertDescBucketId is the schema descriptor for bucketId field. + alertDescBucketId := alertFields[3].Descriptor() + // alert.DefaultBucketId holds the default value on creation for the bucketId field. + alert.DefaultBucketId = alertDescBucketId.Default.(string) + // alertDescMessage is the schema descriptor for message field. + alertDescMessage := alertFields[4].Descriptor() + // alert.DefaultMessage holds the default value on creation for the message field. + alert.DefaultMessage = alertDescMessage.Default.(string) + // alertDescEventsCount is the schema descriptor for eventsCount field. + alertDescEventsCount := alertFields[5].Descriptor() + // alert.DefaultEventsCount holds the default value on creation for the eventsCount field. + alert.DefaultEventsCount = alertDescEventsCount.Default.(int32) + // alertDescStartedAt is the schema descriptor for startedAt field. + alertDescStartedAt := alertFields[6].Descriptor() + // alert.DefaultStartedAt holds the default value on creation for the startedAt field. + alert.DefaultStartedAt = alertDescStartedAt.Default.(func() time.Time) + // alertDescStoppedAt is the schema descriptor for stoppedAt field. + alertDescStoppedAt := alertFields[7].Descriptor() + // alert.DefaultStoppedAt holds the default value on creation for the stoppedAt field. + alert.DefaultStoppedAt = alertDescStoppedAt.Default.(func() time.Time) + // alertDescSimulated is the schema descriptor for simulated field. + alertDescSimulated := alertFields[21].Descriptor() + // alert.DefaultSimulated holds the default value on creation for the simulated field. + alert.DefaultSimulated = alertDescSimulated.Default.(bool) + bouncerFields := schema.Bouncer{}.Fields() + _ = bouncerFields + // bouncerDescCreatedAt is the schema descriptor for created_at field. + bouncerDescCreatedAt := bouncerFields[0].Descriptor() + // bouncer.DefaultCreatedAt holds the default value on creation for the created_at field. + bouncer.DefaultCreatedAt = bouncerDescCreatedAt.Default.(func() time.Time) + // bouncerDescUpdatedAt is the schema descriptor for updated_at field. + bouncerDescUpdatedAt := bouncerFields[1].Descriptor() + // bouncer.DefaultUpdatedAt holds the default value on creation for the updated_at field. + bouncer.DefaultUpdatedAt = bouncerDescUpdatedAt.Default.(func() time.Time) + // bouncerDescIPAddress is the schema descriptor for ip_address field. + bouncerDescIPAddress := bouncerFields[5].Descriptor() + // bouncer.DefaultIPAddress holds the default value on creation for the ip_address field. + bouncer.DefaultIPAddress = bouncerDescIPAddress.Default.(string) + // bouncerDescUntil is the schema descriptor for until field. + bouncerDescUntil := bouncerFields[8].Descriptor() + // bouncer.DefaultUntil holds the default value on creation for the until field. + bouncer.DefaultUntil = bouncerDescUntil.Default.(func() time.Time) + // bouncerDescLastPull is the schema descriptor for last_pull field. + bouncerDescLastPull := bouncerFields[9].Descriptor() + // bouncer.DefaultLastPull holds the default value on creation for the last_pull field. + bouncer.DefaultLastPull = bouncerDescLastPull.Default.(func() time.Time) + decisionFields := schema.Decision{}.Fields() + _ = decisionFields + // decisionDescCreatedAt is the schema descriptor for created_at field. + decisionDescCreatedAt := decisionFields[0].Descriptor() + // decision.DefaultCreatedAt holds the default value on creation for the created_at field. + decision.DefaultCreatedAt = decisionDescCreatedAt.Default.(func() time.Time) + // decisionDescUpdatedAt is the schema descriptor for updated_at field. + decisionDescUpdatedAt := decisionFields[1].Descriptor() + // decision.DefaultUpdatedAt holds the default value on creation for the updated_at field. + decision.DefaultUpdatedAt = decisionDescUpdatedAt.Default.(func() time.Time) + // decisionDescSimulated is the schema descriptor for simulated field. + decisionDescSimulated := decisionFields[10].Descriptor() + // decision.DefaultSimulated holds the default value on creation for the simulated field. + decision.DefaultSimulated = decisionDescSimulated.Default.(bool) + eventFields := schema.Event{}.Fields() + _ = eventFields + // eventDescCreatedAt is the schema descriptor for created_at field. + eventDescCreatedAt := eventFields[0].Descriptor() + // event.DefaultCreatedAt holds the default value on creation for the created_at field. + event.DefaultCreatedAt = eventDescCreatedAt.Default.(func() time.Time) + // eventDescUpdatedAt is the schema descriptor for updated_at field. + eventDescUpdatedAt := eventFields[1].Descriptor() + // event.DefaultUpdatedAt holds the default value on creation for the updated_at field. + event.DefaultUpdatedAt = eventDescUpdatedAt.Default.(func() time.Time) + // eventDescSerialized is the schema descriptor for serialized field. + eventDescSerialized := eventFields[3].Descriptor() + // event.SerializedValidator is a validator for the "serialized" field. It is called by the builders before save. + event.SerializedValidator = eventDescSerialized.Validators[0].(func(string) error) + machineFields := schema.Machine{}.Fields() + _ = machineFields + // machineDescCreatedAt is the schema descriptor for created_at field. + machineDescCreatedAt := machineFields[0].Descriptor() + // machine.DefaultCreatedAt holds the default value on creation for the created_at field. + machine.DefaultCreatedAt = machineDescCreatedAt.Default.(func() time.Time) + // machineDescUpdatedAt is the schema descriptor for updated_at field. + machineDescUpdatedAt := machineFields[1].Descriptor() + // machine.DefaultUpdatedAt holds the default value on creation for the updated_at field. + machine.DefaultUpdatedAt = machineDescUpdatedAt.Default.(func() time.Time) + // machineDescScenarios is the schema descriptor for scenarios field. + machineDescScenarios := machineFields[5].Descriptor() + // machine.ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save. + machine.ScenariosValidator = machineDescScenarios.Validators[0].(func(string) error) + // machineDescIsValidated is the schema descriptor for isValidated field. + machineDescIsValidated := machineFields[7].Descriptor() + // machine.DefaultIsValidated holds the default value on creation for the isValidated field. + machine.DefaultIsValidated = machineDescIsValidated.Default.(bool) + metaFields := schema.Meta{}.Fields() + _ = metaFields + // metaDescCreatedAt is the schema descriptor for created_at field. + metaDescCreatedAt := metaFields[0].Descriptor() + // meta.DefaultCreatedAt holds the default value on creation for the created_at field. + meta.DefaultCreatedAt = metaDescCreatedAt.Default.(func() time.Time) + // metaDescUpdatedAt is the schema descriptor for updated_at field. + metaDescUpdatedAt := metaFields[1].Descriptor() + // meta.DefaultUpdatedAt holds the default value on creation for the updated_at field. + meta.DefaultUpdatedAt = metaDescUpdatedAt.Default.(func() time.Time) + // metaDescValue is the schema descriptor for value field. + metaDescValue := metaFields[3].Descriptor() + // meta.ValueValidator is a validator for the "value" field. It is called by the builders before save. + meta.ValueValidator = metaDescValue.Validators[0].(func(string) error) +} diff --git a/pkg/database/ent/runtime/runtime.go b/pkg/database/ent/runtime/runtime.go new file mode 100644 index 000000000..301ca1f6d --- /dev/null +++ b/pkg/database/ent/runtime/runtime.go @@ -0,0 +1,10 @@ +// Code generated by entc, DO NOT EDIT. + +package runtime + +// The schema-stitching logic is generated in github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime.go + +const ( + Version = "v0.4.3" // Version of ent codegen. + Sum = "h1:ds9HENceKzpGBgCRlkZNq6TqBIegwKcF3e5reuV9Z0M=" // Sum of ent codegen. +) diff --git a/pkg/database/ent/schema/alert.go b/pkg/database/ent/schema/alert.go new file mode 100644 index 000000000..368d1ccab --- /dev/null +++ b/pkg/database/ent/schema/alert.go @@ -0,0 +1,63 @@ +package schema + +import ( + "time" + + "github.com/facebook/ent" + "github.com/facebook/ent/schema/edge" + "github.com/facebook/ent/schema/field" +) + +// Alert holds the schema definition for the Alert entity. +type Alert struct { + ent.Schema +} + +// Fields of the Alert. +func (Alert) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(time.Now), + field.Time("updated_at"). + Default(time.Now), + field.String("scenario"), + field.String("bucketId").Default("").Optional(), + field.String("message").Default("").Optional(), + field.Int32("eventsCount").Default(0).Optional(), + field.Time("startedAt").Default(time.Now).Optional(), + field.Time("stoppedAt").Default(time.Now).Optional(), + field.String("sourceIp"). + Optional(), + field.String("sourceRange"). + Optional(), + field.String("sourceAsNumber"). + Optional(), + field.String("sourceAsName"). + Optional(), + field.String("sourceCountry"). + Optional(), + field.Float32("sourceLatitude"). + Optional(), + field.Float32("sourceLongitude"). + Optional(), + field.String("sourceScope").Optional(), + field.String("sourceValue").Optional(), + field.Int32("capacity").Optional(), + field.String("leakSpeed").Optional(), + field.String("scenarioVersion").Optional(), + field.String("scenarioHash").Optional(), + field.Bool("simulated").Default(false), + } +} + +// Edges of the Alert. +func (Alert) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("owner", Machine.Type). + Ref("alerts"). + Unique(), + edge.To("decisions", Decision.Type), + edge.To("events", Event.Type), + edge.To("metas", Meta.Type), + } +} diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go new file mode 100644 index 000000000..3b206ed5e --- /dev/null +++ b/pkg/database/ent/schema/bouncer.go @@ -0,0 +1,37 @@ +package schema + +import ( + "time" + + "github.com/facebook/ent" + "github.com/facebook/ent/schema/field" +) + +// Bouncer holds the schema definition for the Bouncer entity. +type Bouncer struct { + ent.Schema +} + +// Fields of the Bouncer. +func (Bouncer) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(time.Now), + field.Time("updated_at"). + Default(time.Now), + field.String("name").Unique(), + field.String("api_key"), // hash of api_key + field.Bool("revoked"), + field.String("ip_address").Default("").Optional(), + field.String("type").Optional(), + field.String("version").Optional(), + field.Time("until").Default(time.Now).Optional(), + field.Time("last_pull"). + Default(time.Now), + } +} + +// Edges of the Bouncer. +func (Bouncer) Edges() []ent.Edge { + return nil +} diff --git a/pkg/database/ent/schema/decision.go b/pkg/database/ent/schema/decision.go new file mode 100644 index 000000000..a102f9f1a --- /dev/null +++ b/pkg/database/ent/schema/decision.go @@ -0,0 +1,42 @@ +package schema + +import ( + "time" + + "github.com/facebook/ent" + "github.com/facebook/ent/schema/edge" + "github.com/facebook/ent/schema/field" +) + +// Decision holds the schema definition for the Decision entity. +type Decision struct { + ent.Schema +} + +// Fields of the Decision. +func (Decision) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(time.Now), + field.Time("updated_at"). + Default(time.Now), + field.Time("until"), + field.String("scenario"), + field.String("type"), + field.Int64("start_ip").Optional(), + field.Int64("end_ip").Optional(), + field.String("scope"), + field.String("value"), + field.String("origin"), + field.Bool("simulated").Default(false), + } +} + +// Edges of the Decision. +func (Decision) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("owner", Alert.Type). + Ref("decisions"). + Unique(), + } +} diff --git a/pkg/database/ent/schema/event.go b/pkg/database/ent/schema/event.go new file mode 100644 index 000000000..00933ebed --- /dev/null +++ b/pkg/database/ent/schema/event.go @@ -0,0 +1,35 @@ +package schema + +import ( + "time" + + "github.com/facebook/ent" + "github.com/facebook/ent/schema/edge" + "github.com/facebook/ent/schema/field" +) + +// Event holds the schema definition for the Event entity. +type Event struct { + ent.Schema +} + +// Fields of the Event. +func (Event) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(time.Now), + field.Time("updated_at"). + Default(time.Now), + field.Time("time"), + field.String("serialized").MaxLen(4095), + } +} + +// Edges of the Event. +func (Event) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("owner", Alert.Type). + Ref("events"). + Unique(), + } +} diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go new file mode 100644 index 000000000..df3097f51 --- /dev/null +++ b/pkg/database/ent/schema/machine.go @@ -0,0 +1,39 @@ +package schema + +import ( + "time" + + "github.com/facebook/ent" + "github.com/facebook/ent/schema/edge" + "github.com/facebook/ent/schema/field" +) + +// Machine holds the schema definition for the Machine entity. +type Machine struct { + ent.Schema +} + +// Fields of the Machine. +func (Machine) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(time.Now), + field.Time("updated_at"). + Default(time.Now), + field.String("machineId").Unique(), + field.String("password").Sensitive(), + field.String("ipAddress"), + field.String("scenarios").MaxLen(4095).Optional(), + field.String("version").Optional(), + field.Bool("isValidated"). + Default(false), + field.String("status").Optional(), + } +} + +// Edges of the Machine. +func (Machine) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("alerts", Alert.Type), + } +} diff --git a/pkg/database/ent/schema/meta.go b/pkg/database/ent/schema/meta.go new file mode 100644 index 000000000..65e33f0ec --- /dev/null +++ b/pkg/database/ent/schema/meta.go @@ -0,0 +1,35 @@ +package schema + +import ( + "time" + + "github.com/facebook/ent" + "github.com/facebook/ent/schema/edge" + "github.com/facebook/ent/schema/field" +) + +// Meta holds the schema definition for the Meta entity. +type Meta struct { + ent.Schema +} + +// Fields of the Meta. +func (Meta) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(time.Now), + field.Time("updated_at"). + Default(time.Now), + field.String("key"), + field.String("value").MaxLen(4095), + } +} + +// Edges of the Meta. +func (Meta) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("owner", Alert.Type). + Ref("metas"). + Unique(), + } +} diff --git a/pkg/database/ent/tx.go b/pkg/database/ent/tx.go new file mode 100644 index 000000000..18574bbb4 --- /dev/null +++ b/pkg/database/ent/tx.go @@ -0,0 +1,225 @@ +// Code generated by entc, DO NOT EDIT. + +package ent + +import ( + "context" + "sync" + + "github.com/facebook/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // Alert is the client for interacting with the Alert builders. + Alert *AlertClient + // Bouncer is the client for interacting with the Bouncer builders. + Bouncer *BouncerClient + // Decision is the client for interacting with the Decision builders. + Decision *DecisionClient + // Event is the client for interacting with the Event builders. + Event *EventClient + // Machine is the client for interacting with the Machine builders. + Machine *MachineClient + // Meta is the client for interacting with the Meta builders. + Meta *MetaClient + + // lazily loaded. + client *Client + clientOnce sync.Once + + // completion callbacks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook + + // ctx lives for the life of the transaction. It is + // the same context used by the underlying connection. + ctx context.Context +} + +type ( + // Committer is the interface that wraps the Committer method. + Committer interface { + Commit(context.Context, *Tx) error + } + + // The CommitFunc type is an adapter to allow the use of ordinary + // function as a Committer. If f is a function with the appropriate + // signature, CommitFunc(f) is a Committer that calls f. + CommitFunc func(context.Context, *Tx) error + + // CommitHook defines the "commit middleware". A function that gets a Committer + // and returns a Committer. For example: + // + // hook := func(next ent.Committer) ent.Committer { + // return ent.CommitFunc(func(context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Commit(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + CommitHook func(Committer) Committer +) + +// Commit calls f(ctx, m). +func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + txDriver := tx.config.driver.(*txDriver) + var fn Committer = CommitFunc(func(context.Context, *Tx) error { + return txDriver.tx.Commit() + }) + tx.mu.Lock() + hooks := append([]CommitHook(nil), tx.onCommit...) + tx.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Commit(tx.ctx, tx) +} + +// OnCommit adds a hook to call on commit. +func (tx *Tx) OnCommit(f CommitHook) { + tx.mu.Lock() + defer tx.mu.Unlock() + tx.onCommit = append(tx.onCommit, f) +} + +type ( + // Rollbacker is the interface that wraps the Rollbacker method. + Rollbacker interface { + Rollback(context.Context, *Tx) error + } + + // The RollbackFunc type is an adapter to allow the use of ordinary + // function as a Rollbacker. If f is a function with the appropriate + // signature, RollbackFunc(f) is a Rollbacker that calls f. + RollbackFunc func(context.Context, *Tx) error + + // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker + // and returns a Rollbacker. For example: + // + // hook := func(next ent.Rollbacker) ent.Rollbacker { + // return ent.RollbackFunc(func(context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Rollback(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + RollbackHook func(Rollbacker) Rollbacker +) + +// Rollback calls f(ctx, m). +func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + txDriver := tx.config.driver.(*txDriver) + var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { + return txDriver.tx.Rollback() + }) + tx.mu.Lock() + hooks := append([]RollbackHook(nil), tx.onRollback...) + tx.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Rollback(tx.ctx, tx) +} + +// OnRollback adds a hook to call on rollback. +func (tx *Tx) OnRollback(f RollbackHook) { + tx.mu.Lock() + defer tx.mu.Unlock() + tx.onRollback = append(tx.onRollback, f) +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + tx.clientOnce.Do(func() { + tx.client = &Client{config: tx.config} + tx.client.init() + }) + return tx.client +} + +func (tx *Tx) init() { + tx.Alert = NewAlertClient(tx.config) + tx.Bouncer = NewBouncerClient(tx.config) + tx.Decision = NewDecisionClient(tx.config) + tx.Event = NewEventClient(tx.config) + tx.Machine = NewMachineClient(tx.config) + tx.Meta = NewMetaClient(tx.config) +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: Alert.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that txDriver is not goroutine safe. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // tx is the underlying transaction. + tx dialect.Tx +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args, v interface{}) error { + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args, v interface{}) error { + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/pkg/database/errors.go b/pkg/database/errors.go new file mode 100644 index 000000000..042d84408 --- /dev/null +++ b/pkg/database/errors.go @@ -0,0 +1,21 @@ +package database + +import "errors" + +var ( + UserExists = errors.New("user already exist") + UserNotExists = errors.New("user doesn't exist") + HashError = errors.New("unable to hash") + InsertFail = errors.New("unable to insert row") + QueryFail = errors.New("unable to query") + UpdateFail = errors.New("unable to update") + DeleteFail = errors.New("unable to delete") + ParseTimeFail = errors.New("unable to parse time") + ParseDurationFail = errors.New("unable to parse duration") + MarshalFail = errors.New("unable to marshal") + UnmarshalFail = errors.New("unable to unmarshal") + BulkError = errors.New("unable to insert bulk") + ParseType = errors.New("unable to parse type") + InvalidIPOrRange = errors.New("invalid ip address / range") + InvalidFilter = errors.New("invalid filter") +) diff --git a/pkg/database/machines.go b/pkg/database/machines.go new file mode 100644 index 000000000..4b2da779a --- /dev/null +++ b/pkg/database/machines.go @@ -0,0 +1,156 @@ +package database + +import ( + "fmt" + "time" + + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/pkg/errors" + "golang.org/x/crypto/bcrypt" +) + +func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool) (int, error) { + hashPassword, err := bcrypt.GenerateFromPassword([]byte(*password), bcrypt.DefaultCost) + if err != nil { + log.Warningf("CreateMachine : %s", err) + return 0, errors.Wrap(HashError, "") + } + + machineExist, err := c.Ent.Machine. + Query(). + Where(machine.MachineIdEQ(*machineID)). + Select(machine.FieldMachineId).Strings(c.CTX) + if err != nil { + return 0, errors.Wrapf(QueryFail, "machine '%s': %s", *machineID, err) + } + if len(machineExist) > 0 { + if force { + _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(*machineID)).SetPassword(string(hashPassword)).Save(c.CTX) + if err != nil { + log.Warningf("CreateMachine : %s", err) + return 0, errors.Wrapf(UpdateFail, "machine '%s'", *machineID) + } + return 1, nil + } + return 0, errors.Wrapf(UserExists, "user '%s'", *machineID) + } + + _, err = c.Ent.Machine. + Create(). + SetMachineId(*machineID). + SetPassword(string(hashPassword)). + SetIpAddress(ipAddress). + SetIsValidated(isValidated). + Save(c.CTX) + + if err != nil { + log.Warningf("CreateMachine : %s", err) + return 0, errors.Wrapf(InsertFail, "creating machine '%s'", *machineID) + } + + return 1, nil +} + +func (c *Client) QueryMachineByID(machineID string) (*ent.Machine, error) { + machine, err := c.Ent.Machine. + Query(). + Where(machine.MachineIdEQ(machineID)). + Only(c.CTX) + if err != nil { + log.Warningf("QueryMachineByID : %s", err) + return &ent.Machine{}, errors.Wrapf(UserNotExists, "user '%s'", machineID) + } + return machine, nil +} + +func (c *Client) ListMachines() ([]*ent.Machine, error) { + machines, err := c.Ent.Machine.Query().All(c.CTX) + if err != nil { + log.Warningf("ListMachines : %s", err) + return []*ent.Machine{}, errors.Wrap(UpdateFail, "setting machine status") + } + return machines, nil +} + +func (c *Client) ValidateMachine(machineID string) error { + _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetIsValidated(true).Save(c.CTX) + if err != nil { + log.Warningf("ValidateMachine : %s", err) + return errors.Wrap(UpdateFail, "setting machine status") + } + return nil +} + +func (c *Client) QueryPendingMachine() ([]*ent.Machine, error) { + var machines []*ent.Machine + var err error + + machines, err = c.Ent.Machine.Query().Where(machine.IsValidatedEQ(false)).All(c.CTX) + if err != nil { + log.Warningf("QueryPendingMachine : %s", err) + return []*ent.Machine{}, errors.Wrap(UpdateFail, "setting machine status") + } + return machines, nil +} + +func (c *Client) DeleteWatcher(name string) error { + _, err := c.Ent.Machine. + Delete(). + Where(machine.MachineIdEQ(name)). + Exec(c.CTX) + if err != nil { + return fmt.Errorf("unable to save api key in database: %s", err) + } + return nil +} + +func (c *Client) UpdateMachineScenarios(scenarios string, ID int) error { + _, err := c.Ent.Machine.UpdateOneID(ID). + SetUpdatedAt(time.Now()). + SetScenarios(scenarios). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update machine in database: %s", err) + } + return nil +} + +func (c *Client) UpdateMachineIP(ipAddr string, ID int) error { + _, err := c.Ent.Machine.UpdateOneID(ID). + SetIpAddress(ipAddr). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update machine in database: %s", err) + } + return nil +} + +func (c *Client) UpdateMachineVersion(ipAddr string, ID int) error { + _, err := c.Ent.Machine.UpdateOneID(ID). + SetVersion(ipAddr). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update machine in database: %s", err) + } + return nil +} + +func (c *Client) IsMachineRegistered(machineID string) (bool, error) { + exist, err := c.Ent.Machine.Query().Where().Select(machine.FieldMachineId).Strings(c.CTX) + if err != nil { + return false, err + } + if len(exist) == 1 { + return true, nil + } + if len(exist) > 1 { + return false, fmt.Errorf("More than one item with the same machineID in database") + } + + return false, nil + +} diff --git a/pkg/database/read.go b/pkg/database/read.go deleted file mode 100644 index 8c967be75..000000000 --- a/pkg/database/read.go +++ /dev/null @@ -1,164 +0,0 @@ -package database - -import ( - "fmt" - "time" - - "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/jinzhu/gorm" - log "github.com/sirupsen/logrus" -) - -//GetBansAt returns the IPs that were banned at a given time -func (c *Context) GetBansAt(at time.Time) ([]map[string]string, error) { - - bas := []types.BanApplication{} - rets := make([]map[string]string, 0) - /*get non-expired records*/ - records := c.Db.Order("updated_at desc").Where("until >= ?", at).Group("ip_text").Find(&bas) /*.Count(&count)*/ - if records.Error != nil { - return nil, records.Error - } - for _, ba := range bas { - var count int - /* - fetch count of bans for this specific ip_text - */ - ret := c.Db.Table("ban_applications").Order("updated_at desc").Where(`ip_text = ? AND until >= ? AND deleted_at is NULL`, ba.IpText, at).Count(&count) - if ret.Error != nil { - return nil, fmt.Errorf("failed to fetch records count for %s : %v", ba.IpText, ret.Error) - } - sOs := []types.SignalOccurence{} - nbSo := 0 - records := c.Db.Where(`source_ip = ?`, ba.IpText).Group("id").Find(&sOs).Count(&nbSo) - if records.Error != nil { - //record not found can be ok - if gorm.IsRecordNotFoundError(records.Error) { - bancom := make(map[string]string) - bancom["iptext"] = ba.IpText - bancom["bancount"] = fmt.Sprintf("%d", count) - bancom["as"] = ba.TargetASName - bancom["asnum"] = fmt.Sprintf("%d", ba.TargetAS) - bancom["cn"] = ba.TargetCN - bancom["scenario"] = "?" - bancom["source"] = ba.MeasureSource - bancom["events_count"] = "0" - bancom["action"] = ba.MeasureType - bancom["until"] = time.Until(ba.Until).Round(time.Second).String() - bancom["reason"] = ba.Reason - rets = append(rets, bancom) - continue - } - } - - evtCount := 0 - for _, s := range sOs { - evtCount += s.Events_count - } - - so := types.SignalOccurence{} - records = c.Db.Where(`id = ?`, ba.SignalOccurenceID).Find(&so) - if records.Error != nil { - //record not found can be ok - if gorm.IsRecordNotFoundError(records.Error) { - bancom := make(map[string]string) - bancom["iptext"] = ba.IpText - bancom["bancount"] = fmt.Sprintf("%d", count) - bancom["as"] = ba.TargetASName - bancom["asnum"] = fmt.Sprintf("%d", ba.TargetAS) - bancom["cn"] = ba.TargetCN - bancom["source"] = ba.MeasureSource - bancom["scenario"] = "?" - bancom["events_count"] = "0" - bancom["action"] = ba.MeasureType - bancom["until"] = time.Until(ba.Until).Round(time.Second).String() - bancom["reason"] = ba.Reason - rets = append(rets, bancom) - continue - } - fmt.Printf("err : %v", records.Error) - return nil, records.Error - } - if records.RowsAffected != 1 { - log.Errorf("more than one signal_occurence for local_decision, discard") - break - } - bancom := make(map[string]string) - bancom["iptext"] = ba.IpText - bancom["as"] = so.Source_AutonomousSystemNumber + " " + so.Source_AutonomousSystemOrganization - bancom["cn"] = so.Source_Country - bancom["bancount"] = fmt.Sprintf("%d", nbSo) - bancom["scenario"] = so.Scenario - bancom["events_count"] = fmt.Sprintf("%d", evtCount) - bancom["action"] = ba.MeasureType - bancom["source"] = ba.MeasureSource - bancom["until"] = time.Until(ba.Until).Round(time.Second).String() - bancom["reason"] = so.Scenario - rets = append(rets, bancom) - } - return rets, nil -} - -func (c *Context) GetNewBan() ([]types.BanApplication, error) { - - var bas []types.BanApplication - - //select the news bans - banRecords := c.Db. - Order("updated_at desc"). - /*Get non expired (until) bans*/ - Where(`until >= ?`, time.Now()). - /*Only get one ban per unique ip_text*/ - Group("ip_text"). - Find(&bas) - if banRecords.Error != nil { - return nil, fmt.Errorf("failed when selection bans : %v", banRecords.Error) - } - - return bas, nil - -} - -func (c *Context) GetNewBanSince(since time.Time) ([]types.BanApplication, error) { - - var bas []types.BanApplication - - //select the news bans - banRecords := c.Db. - Order("updated_at desc"). - /*Get non expired (until) bans*/ - Where(`until >= ?`, time.Now()). - /*That were added since last tick*/ - Where(`updated_at >= ?`, since). - /*Only get one ban per unique ip_text*/ - Group("ip_text"). - Find(&bas) /*.Count(&count)*/ - if banRecords.Error != nil { - return nil, fmt.Errorf("failed when selection bans : %v", banRecords.Error) - } - - return bas, nil - -} - -func (c *Context) GetDeletedBanSince(since time.Time) ([]types.BanApplication, error) { - var bas []types.BanApplication - - deletedRecords := c.Db. - /*ignore the soft delete*/ - Unscoped(). - Order("updated_at desc"). - /*ban that were deleted since since or bans that expired since since*/ - Where(`deleted_at >= ? OR - (until >= ? AND until <= ?)`, - since.Add(1*time.Second), since.Add(1*time.Second), time.Now()). - /*Only get one ban per unique ip_text*/ - Group("ip_text"). - Find(&bas) /*.Count(&count)*/ - - if deletedRecords.Error != nil { - return nil, fmt.Errorf("failed when selection deleted bans : %v", deletedRecords.Error) - } - - return bas, nil -} diff --git a/pkg/database/read_test.go b/pkg/database/read_test.go deleted file mode 100644 index e9ccdce55..000000000 --- a/pkg/database/read_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package database - -import ( - "testing" - "time" -) - -func TestFetchBans(t *testing.T) { - //Plan: - // - flush db - // - write signal+ban for 1.2.3.4 - // - get bans (as a connector) + check - // - write signal+ban for 1.2.3.5 - // - get new bans (as a connector) + check - // - delete ban for 1.2.3.4 - // - get deleted bans (as a connector) + check - - validCfg := map[string]string{ - "type": "sqlite", - "db_path": "./test.db", - "max_records_age": "72h", - "debug": "false", - "flush": "true", - } - ctx, err := NewDatabase(validCfg) - if err != nil || ctx == nil { - t.Fatalf("failed to create simple sqlite") - } - - if err := ctx.HardDeleteAll(); err != nil { - t.Fatalf("failed to flush existing bans") - } - - OldSignal := genSignalOccurence("1.2.3.4") - //write the old signal - if err = ctx.WriteSignal(OldSignal); err != nil { - t.Fatalf("Failed to insert old signal : %s", err) - } - - //we startup, we should get one ban - firstFetch := time.Now() - bans, err := ctx.GetNewBan() - if err != nil { - t.Fatalf("%s", err) - } - if len(bans) != 1 { - t.Fatalf("expected one ban") - } - - NewSignal := genSignalOccurence("1.2.3.5") - - //write the old signal - if err = ctx.WriteSignal(NewSignal); err != nil { - t.Fatalf("Failed to insert old signal : %s", err) - } - - //we startup, we should get one ban - bans, err = ctx.GetNewBanSince(firstFetch) - if err != nil { - t.Fatalf("%s", err) - } - firstFetch = time.Now() - if len(bans) != 1 { - t.Fatal() - } - if bans[0].MeasureSource != NewSignal.BanApplications[0].MeasureSource { - t.Fatal() - } - if bans[0].MeasureType != NewSignal.BanApplications[0].MeasureType { - t.Fatal() - } - if bans[0].StartIp != NewSignal.BanApplications[0].StartIp { - t.Fatal() - } - if bans[0].EndIp != NewSignal.BanApplications[0].EndIp { - t.Fatal() - } - if bans[0].Reason != NewSignal.BanApplications[0].Reason { - t.Fatal() - } - //Delete a ban - count, err := ctx.DeleteBan("1.2.3.4") - if err != nil { - t.Fatal() - } - if count != 1 { - t.Fatal() - } - //we shouldn't have any new bans - bans, err = ctx.GetNewBanSince(firstFetch) - if err != nil { - t.Fatal() - } - if len(bans) != 0 { - t.Fatal() - } - // //GetDeletedBanSince adds one second to the timestamp. why ? I'm not sure - // time.Sleep(1 * time.Second) - //but we should get a deleted ban - bans, err = ctx.GetDeletedBanSince(firstFetch.Add(-2 * time.Second)) - if err != nil { - t.Fatalf("%s", err) - } - if len(bans) != 1 { - t.Fatalf("got %d", len(bans)) - } - //OldSignal - if bans[0].MeasureSource != OldSignal.BanApplications[0].MeasureSource { - t.Fatal() - } - if bans[0].MeasureType != OldSignal.BanApplications[0].MeasureType { - t.Fatal() - } - if bans[0].StartIp != OldSignal.BanApplications[0].StartIp { - t.Fatal() - } - if bans[0].EndIp != OldSignal.BanApplications[0].EndIp { - t.Fatal() - } - if bans[0].Reason != OldSignal.BanApplications[0].Reason { - t.Fatal() - } -} diff --git a/pkg/database/utils.go b/pkg/database/utils.go new file mode 100644 index 000000000..5d6d4a442 --- /dev/null +++ b/pkg/database/utils.go @@ -0,0 +1,65 @@ +package database + +import ( + "encoding/binary" + "fmt" + "net" +) + +func IP2Int(ip net.IP) uint32 { + if len(ip) == 16 { + return binary.BigEndian.Uint32(ip[12:16]) + } + return binary.BigEndian.Uint32(ip) +} + +func Int2ip(nn uint32) net.IP { + ip := make(net.IP, 4) + binary.BigEndian.PutUint32(ip, nn) + return ip +} + +func IsIpv4(host string) bool { + return net.ParseIP(host) != nil +} + +//Stolen from : https://github.com/llimllib/ipaddress/ +// Return the final address of a net range. Convert to IPv4 if possible, +// otherwise return an ipv6 +func LastAddress(n *net.IPNet) net.IP { + ip := n.IP.To4() + if ip == nil { + ip = n.IP + return net.IP{ + ip[0] | ^n.Mask[0], ip[1] | ^n.Mask[1], ip[2] | ^n.Mask[2], + ip[3] | ^n.Mask[3], ip[4] | ^n.Mask[4], ip[5] | ^n.Mask[5], + ip[6] | ^n.Mask[6], ip[7] | ^n.Mask[7], ip[8] | ^n.Mask[8], + ip[9] | ^n.Mask[9], ip[10] | ^n.Mask[10], ip[11] | ^n.Mask[11], + ip[12] | ^n.Mask[12], ip[13] | ^n.Mask[13], ip[14] | ^n.Mask[14], + ip[15] | ^n.Mask[15]} + } + + return net.IPv4( + ip[0]|^n.Mask[0], + ip[1]|^n.Mask[1], + ip[2]|^n.Mask[2], + ip[3]|^n.Mask[3]) +} + +func GetIpsFromIpRange(host string) (int64, int64, error) { + var ipStart int64 + var ipEnd int64 + var err error + var parsedRange *net.IPNet + + if _, parsedRange, err = net.ParseCIDR(host); err != nil { + return ipStart, ipEnd, fmt.Errorf("'%s' is not a valid CIDR", host) + } + if parsedRange == nil { + return ipStart, ipEnd, fmt.Errorf("unable to parse network : %s", err) + } + ipStart = int64(IP2Int(parsedRange.IP)) + ipEnd = int64(IP2Int(LastAddress(parsedRange))) + + return ipStart, ipEnd, nil +} diff --git a/pkg/database/write.go b/pkg/database/write.go deleted file mode 100644 index 10a39133b..000000000 --- a/pkg/database/write.go +++ /dev/null @@ -1,45 +0,0 @@ -package database - -import ( - "fmt" - "sync/atomic" - - "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" -) - -//we simply append the event to the transaction -func (c *Context) WriteBanApplication(ban types.BanApplication) error { - atomic.AddInt32(&c.count, 1) - - c.lock.Lock() - defer c.lock.Unlock() - log.Debugf("Ban application being called : %s %s", ban.Scenario, ban.IpText) - ret := c.Db.Where(types.BanApplication{IpText: ban.IpText}).Assign(types.BanApplication{Until: ban.Until}).Assign(types.BanApplication{Reason: ban.Reason}).Assign(types.BanApplication{MeasureType: ban.MeasureType}).FirstOrCreate(&ban) - if ret.Error != nil { - return fmt.Errorf("failed to write ban record : %v", ret.Error) - } - return nil -} - -func (c *Context) WriteSignal(sig types.SignalOccurence) error { - atomic.AddInt32(&c.count, 1) - c.lock.Lock() - defer c.lock.Unlock() - /*let's ensure we only have one ban active for a given scope*/ - for _, ba := range sig.BanApplications { - ret := c.Db.Where("ip_text = ?", ba.IpText).Delete(types.BanApplication{}) - if ret.Error != nil { - log.Errorf("While delete overlaping bans : %s", ret.Error) - return fmt.Errorf("failed to write signal occurrence : %v", ret.Error) - } - } - /*and add the new one(s)*/ - ret := c.Db.Create(&sig) - if ret.Error != nil { - log.Errorf("While creating new bans : %s", ret.Error) - return fmt.Errorf("failed to write signal occurrence : %s", ret.Error) - } - - return nil -} diff --git a/pkg/database/write_test.go b/pkg/database/write_test.go deleted file mode 100644 index a6a877ba4..000000000 --- a/pkg/database/write_test.go +++ /dev/null @@ -1,386 +0,0 @@ -package database - -import ( - "database/sql" - "database/sql/driver" - "fmt" - "net" - "reflect" - "regexp" - "testing" - "time" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/jinzhu/gorm" - "github.com/onsi/ginkgo" - "github.com/onsi/gomega" -) - -type AnyTime struct{} - -// Match satisfies sqlmock.Argument interface -func (a AnyTime) Match(v driver.Value) bool { - _, ok := v.(time.Time) - return ok -} - -var _ = ginkgo.Describe("TestWrites", func() { - var ctx *Context - var mock sqlmock.Sqlmock - - ginkgo.BeforeEach(func() { - var db *sql.DB - var err error - - db, mock, err = sqlmock.New() // mock sql.DB - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - - gdb, err := gorm.Open("sqlite", db) // open gorm db - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - - ctx = &Context{Db: gdb} - //ctx.Db.LogMode(true) - }) - ginkgo.AfterEach(func() { - err := mock.ExpectationsWereMet() // make sure all expectations were met - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - }) - - ginkgo.Context("insert ban_applications", func() { - ginkgo.It("insert 1.2.3.4", func() { - - const sqlSelectAll = `SELECT * FROM "ban_applications" WHERE "ban_applications"."deleted_at" IS NULL AND (("ban_applications"."ip_text" = ?)) ORDER BY "ban_applications"."id" ASC LIMIT 1` - - insertBan := types.BanApplication{IpText: "1.2.3.4"} - - mock.ExpectQuery(regexp.QuoteMeta(sqlSelectAll)).WithArgs("1.2.3.4").WillReturnRows(sqlmock.NewRows(nil)) - - mock.ExpectBegin() - - const sqlInsertBanApplication = `INSERT INTO "ban_applications" ("created_at","updated_at","deleted_at","measure_source","measure_type","measure_extra","until","start_ip","end_ip","target_cn","target_as","target_as_name","ip_text","reason","scenario","signal_occurence_id") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` - InsertExpectedResult := sqlmock.NewResult(1, 1) - mock.ExpectExec(regexp.QuoteMeta(sqlInsertBanApplication)).WithArgs( - AnyTime{}, - AnyTime{}, - nil, - insertBan.MeasureSource, - insertBan.MeasureType, - insertBan.MeasureExtra, - AnyTime{}, - insertBan.StartIp, - insertBan.EndIp, - insertBan.TargetCN, - insertBan.TargetAS, - insertBan.TargetASName, - insertBan.IpText, - insertBan.Reason, - insertBan.Scenario, - insertBan.SignalOccurenceID).WillReturnResult(InsertExpectedResult) - - mock.ExpectCommit() - - err := ctx.WriteBanApplication(insertBan) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - }) - }) - - ginkgo.Context("insert signal_occurence", func() { - ginkgo.It("insert signal+ban for 1.2.3.4", func() { - insertBan := types.BanApplication{IpText: "1.2.3.4", SignalOccurenceID: 1} - insertSig := types.SignalOccurence{ - MapKey: "ratata", - Scenario: "test_1", - BanApplications: []types.BanApplication{insertBan}, - Source_ip: "1.2.3.4", - Source_range: "1.2.3.0/24", - Source_AutonomousSystemNumber: "1234", - } - - //the part that try to delete pending existing bans - mock.ExpectBegin() - const sqlDeleteOldBan = `UPDATE "ban_applications" SET "deleted_at"=? WHERE "ban_applications"."deleted_at" IS NULL AND ((ip_text = ?))` - sqlDeleteOldBanResult := sqlmock.NewResult(1, 1) - mock.ExpectExec(regexp.QuoteMeta(sqlDeleteOldBan)).WithArgs(AnyTime{}, "1.2.3.4").WillReturnResult(sqlDeleteOldBanResult) - mock.ExpectCommit() - - //insert the signal occurence - mock.ExpectBegin() - const sqlInsertNewEvent = `INSERT INTO "signal_occurences" ("created_at","updated_at","deleted_at","map_key","scenario","bucket_id","alert_message","events_count","start_at","stop_at","source_ip","source_range","source_autonomous_system_number","source_autonomous_system_organization","source_country","source_latitude","source_longitude","dest_ip","capacity","leak_speed","reprocess") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` - sqlInsertNewEventResult := sqlmock.NewResult(1, 1) - mock.ExpectExec(regexp.QuoteMeta(sqlInsertNewEvent)).WithArgs( - AnyTime{}, - AnyTime{}, - nil, - insertSig.MapKey, - insertSig.Scenario, - "", - "", - 0, - AnyTime{}, - AnyTime{}, - insertSig.Source_ip, - insertSig.Source_range, - insertSig.Source_AutonomousSystemNumber, - "", - "", - 0.0, - 0.0, - "", - 0, - 0, - false, - ).WillReturnResult(sqlInsertNewEventResult) - - //insert the ban application - const sqlInsertBanApplication = `INSERT INTO "ban_applications" ("created_at","updated_at","deleted_at","measure_source","measure_type","measure_extra","until","start_ip","end_ip","target_cn","target_as","target_as_name","ip_text","reason","scenario","signal_occurence_id") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` - sqlInsertBanApplicationResults := sqlmock.NewResult(1, 1) - mock.ExpectExec(regexp.QuoteMeta(sqlInsertBanApplication)).WithArgs( - AnyTime{}, - AnyTime{}, - nil, - insertBan.MeasureSource, - insertBan.MeasureType, - insertBan.MeasureExtra, - AnyTime{}, - insertBan.StartIp, - insertBan.EndIp, - insertBan.TargetCN, - insertBan.TargetAS, - insertBan.TargetASName, - insertBan.IpText, - insertBan.Reason, - insertBan.Scenario, - insertBan.SignalOccurenceID).WillReturnResult(sqlInsertBanApplicationResults) - - mock.ExpectCommit() - - err := ctx.WriteSignal(insertSig) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - }) - }) - - ginkgo.Context("insert old signal_occurence + cleanup", func() { - ginkgo.It("insert signal+ban for 1.2.3.4", func() { - - target_ip := net.ParseIP("1.2.3.4") - - OldBan := types.BanApplication{ - MeasureType: "ban", - MeasureSource: "local", - //expired one month ago - Until: time.Now().Add(-24 * 30 * time.Hour), - StartIp: types.IP2Int(target_ip), - EndIp: types.IP2Int(target_ip), - TargetCN: "FR", - TargetAS: 1234, - TargetASName: "Random AS", - IpText: target_ip.String(), - Reason: "A reason", - Scenario: "A scenario", - } - OldSignal := types.SignalOccurence{ - MapKey: "lala", - Scenario: "old_overflow", - //two month ago : 24*60 - Start_at: time.Now().Add(-24 * 60 * time.Hour), - Stop_at: time.Now().Add(-24 * 60 * time.Hour), - BanApplications: []types.BanApplication{OldBan}, - } - - //the part that try to delete pending existing bans - mock.ExpectBegin() - const sqlDeleteOldBan = `UPDATE "ban_applications" SET "deleted_at"=? WHERE "ban_applications"."deleted_at" IS NULL AND ((ip_text = ?))` - sqlDeleteOldBanResult := sqlmock.NewResult(1, 1) - mock.ExpectExec(regexp.QuoteMeta(sqlDeleteOldBan)).WithArgs(AnyTime{}, target_ip.String()).WillReturnResult(sqlDeleteOldBanResult) - mock.ExpectCommit() - - //insert the signal occurence - mock.ExpectBegin() - const sqlInsertNewEvent = `INSERT INTO "signal_occurences" ("created_at","updated_at","deleted_at","map_key","scenario","bucket_id","alert_message","events_count","start_at","stop_at","source_ip","source_range","source_autonomous_system_number","source_autonomous_system_organization","source_country","source_latitude","source_longitude","dest_ip","capacity","leak_speed","reprocess") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` - sqlInsertNewEventResult := sqlmock.NewResult(1, 1) - mock.ExpectExec(regexp.QuoteMeta(sqlInsertNewEvent)).WithArgs( - AnyTime{}, - AnyTime{}, - nil, - OldSignal.MapKey, - OldSignal.Scenario, - "", - "", - 0, - AnyTime{}, - AnyTime{}, - OldSignal.Source_ip, - OldSignal.Source_range, - OldSignal.Source_AutonomousSystemNumber, - "", - "", - 0.0, - 0.0, - "", - 0, - 0, - false, - ).WillReturnResult(sqlInsertNewEventResult) - - //insert the ban application - const sqlInsertBanApplication = `INSERT INTO "ban_applications" ("created_at","updated_at","deleted_at","measure_source","measure_type","measure_extra","until","start_ip","end_ip","target_cn","target_as","target_as_name","ip_text","reason","scenario","signal_occurence_id") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)` - sqlInsertBanApplicationResults := sqlmock.NewResult(1, 1) - mock.ExpectExec(regexp.QuoteMeta(sqlInsertBanApplication)).WithArgs( - AnyTime{}, - AnyTime{}, - nil, - OldBan.MeasureSource, - OldBan.MeasureType, - OldBan.MeasureExtra, - AnyTime{}, - OldBan.StartIp, - OldBan.EndIp, - OldBan.TargetCN, - OldBan.TargetAS, - OldBan.TargetASName, - OldBan.IpText, - OldBan.Reason, - OldBan.Scenario, - 1).WillReturnResult(sqlInsertBanApplicationResults) - - mock.ExpectCommit() - - err := ctx.WriteSignal(OldSignal) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - - }) - }) - -}) - -func TestInsertSqlMock(t *testing.T) { - gomega.RegisterFailHandler(ginkgo.Fail) - ginkgo.RunSpecs(t, "TestWrites") -} - -func TestInsertOldBans(t *testing.T) { - //Plan: - // - flush db - // - insert month old ban - // - use GetBansAt on current + past time and check results - // - @todo : we need to call the DeleteExpired and such - - validCfg := map[string]string{ - "type": "sqlite", - "db_path": "./test.db", - "max_records": "1000", - "max_records_age": "72h", - "debug": "false", - "flush": "true", - } - ctx, err := NewDatabase(validCfg) - if err != nil || ctx == nil { - t.Fatalf("failed to create simple sqlite") - } - - if err := ctx.DeleteAll(); err != nil { - t.Fatalf("failed to flush existing bans") - } - - target_ip := net.ParseIP("1.2.3.4") - - OldBan := types.BanApplication{ - MeasureType: "ban", - MeasureSource: "local", - //expired one month ago - Until: time.Now().Add(-24 * 30 * time.Hour), - StartIp: types.IP2Int(target_ip), - EndIp: types.IP2Int(target_ip), - TargetCN: "FR", - TargetAS: 1234, - TargetASName: "Random AS", - IpText: target_ip.String(), - Reason: "A reason", - Scenario: "A scenario", - } - OldSignal := types.SignalOccurence{ - MapKey: "lala", - Scenario: "old_overflow", - //two month ago : 24*60 - Start_at: time.Now().Add(-24 * 60 * time.Hour), - Stop_at: time.Now().Add(-24 * 60 * time.Hour), - BanApplications: []types.BanApplication{OldBan}, - } - //write the old signal - err = ctx.WriteSignal(OldSignal) - if err != nil { - t.Fatalf("Failed to insert old signal : %s", err) - } - - //fetch bans at current time - bans, err := ctx.GetBansAt(time.Now()) - if err != nil { - t.Fatalf("failed to get bans : %s", err) - } - - if len(bans) != 0 { - t.Fatalf("should not have bans, got %d bans", len(bans)) - } - //get bans in the past - bans, err = ctx.GetBansAt(time.Now().Add(-24 * 31 * time.Hour)) - if err != nil { - t.Fatalf("failed to get bans : %s", err) - } - - if len(bans) != 1 { - t.Fatalf("should had 1 ban, got %d bans", len(bans)) - } - if !reflect.DeepEqual(bans, []map[string]string{map[string]string{ - "source": "local", - "until": "-720h0m0s", - "reason": "old_overflow", - "iptext": "1.2.3.4", - "cn": "", - "events_count": "0", - "action": "ban", - "as": " ", - "bancount": "0", - "scenario": "old_overflow", - }}) { - t.Fatalf("unexpected results") - } - -} - -func TestWriteBanApplicationOnly(t *testing.T) { - validCfg := map[string]string{ - "type": "sqlite", - "db_path": "./test.db", - "debug": "false", - "flush": "true", - } - ctx, err := NewDatabase(validCfg) - if err != nil || ctx == nil { - t.Fatalf("failed to create simple sqlite") - } - - if err := ctx.DeleteAll(); err != nil { - t.Fatalf("failed to flush existing bans") - } - - freshRecordsCount := 12 - - for i := 0; i < freshRecordsCount; i++ { - //this one expires in the future - OldSignal := genSignalOccurence(fmt.Sprintf("2.2.2.%d", i)) - - OldSignal.BanApplications[0].Until = time.Now().Add(1 * time.Hour) - if err = ctx.WriteBanApplication(OldSignal.BanApplications[0]); err != nil { - t.Fatalf("Failed to insert old signal : %s", err) - } - } - - bans, err := ctx.GetBansAt(time.Now()) - if err != nil { - t.Fatalf("%s", err) - } - if len(bans) != freshRecordsCount { - t.Fatalf("expected %d, got %d", freshRecordsCount, len(bans)) - } -} diff --git a/pkg/exprhelpers/exprlib.go b/pkg/exprhelpers/exprlib.go index 320daa59e..b158a269f 100644 --- a/pkg/exprhelpers/exprlib.go +++ b/pkg/exprhelpers/exprlib.go @@ -10,6 +10,7 @@ import ( "strconv" "strings" + "github.com/davecgh/go-spew/spew" log "github.com/sirupsen/logrus" ) @@ -93,6 +94,7 @@ func File(filename string) []string { return dataFile[filename] } log.Errorf("file '%s' (type:string) not found in expr library", filename) + log.Errorf("expr library : %s", spew.Sdump(dataFile)) return []string{} } @@ -105,6 +107,7 @@ func RegexpInFile(data string, filename string) bool { } } else { log.Errorf("file '%s' (type:regexp) not found in expr library", filename) + log.Errorf("expr library : %s", spew.Sdump(dataFileRegex)) } return false } diff --git a/pkg/exprhelpers/jsonextract.go b/pkg/exprhelpers/jsonextract.go index 9a3f4d72e..8c60a50bd 100644 --- a/pkg/exprhelpers/jsonextract.go +++ b/pkg/exprhelpers/jsonextract.go @@ -32,6 +32,6 @@ func JsonExtract(jsblob string, target string) string { } fullpath := strings.Split(target, ".") - log.Debugf("extract path %+v", fullpath) + log.Tracef("extract path %+v", fullpath) return JsonExtractLib(jsblob, fullpath...) } diff --git a/pkg/leakybucket/blackhole.go b/pkg/leakybucket/blackhole.go index 20775a53a..03e0357bd 100644 --- a/pkg/leakybucket/blackhole.go +++ b/pkg/leakybucket/blackhole.go @@ -18,12 +18,12 @@ type Blackhole struct { DumbProcessor } -func NewBlackhole(g *BucketFactory) (*Blackhole, error) { +func NewBlackhole(bucketFactory *BucketFactory) (*Blackhole, error) { var duration time.Duration - if d, err := time.ParseDuration(g.Blackhole); err != nil { - g.logger.Warning("Blackhole duration not valid, using 1h") - return nil, fmt.Errorf("blackhole duration not valid '%s'", g.Blackhole) + if d, err := time.ParseDuration(bucketFactory.Blackhole); err != nil { + bucketFactory.logger.Warning("Blackhole duration not valid, using 1h") + return nil, fmt.Errorf("blackhole duration not valid '%s'", bucketFactory.Blackhole) } else { duration = d } @@ -34,39 +34,38 @@ func NewBlackhole(g *BucketFactory) (*Blackhole, error) { }, nil } -func (bl *Blackhole) OnBucketOverflow(b *BucketFactory) func(*Leaky, types.SignalOccurence, *Queue) (types.SignalOccurence, *Queue) { - return func(l *Leaky, s types.SignalOccurence, q *Queue) (types.SignalOccurence, *Queue) { +func (bl *Blackhole) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue) { + return func(leaky *Leaky, alert types.RuntimeAlert, queue *Queue) (types.RuntimeAlert, *Queue) { var blackholed bool = false var tmp []HiddenKey // search if we are blackholed and refresh the slice for _, element := range bl.hiddenKeys { - if element.key == l.Mapkey { - if element.expiration.After(l.Ovflw_ts) { - l.logger.Debugf("Overflow discarded, still blackholed for %s", element.expiration.Sub(l.Ovflw_ts)) + if element.key == leaky.Mapkey { + if element.expiration.After(leaky.Ovflw_ts) { + leaky.logger.Debugf("Overflow discarded, still blackholed for %s", element.expiration.Sub(leaky.Ovflw_ts)) blackholed = true } } - if element.expiration.After(l.Ovflw_ts) { + if element.expiration.After(leaky.Ovflw_ts) { tmp = append(tmp, element) } else { - l.logger.Debugf("%s left blackhole %s ago", element.key, l.Ovflw_ts.Sub(element.expiration)) + leaky.logger.Debugf("%s left blackhole %s ago", element.key, leaky.Ovflw_ts.Sub(element.expiration)) } } bl.hiddenKeys = tmp if blackholed { - l.logger.Tracef("Event is blackholed (%s)", l.First_ts) - return types.SignalOccurence{ - MapKey: l.Mapkey, - // BucketConfiguration: bcfg, + leaky.logger.Tracef("Event is blackholed (%s)", leaky.First_ts) + return types.RuntimeAlert{ + Mapkey: leaky.Mapkey, }, nil } - bl.hiddenKeys = append(bl.hiddenKeys, HiddenKey{l.Mapkey, l.Ovflw_ts.Add(bl.duration)}) - l.logger.Debugf("Adding overflow to blackhole (%s)", l.First_ts) - return s, q + bl.hiddenKeys = append(bl.hiddenKeys, HiddenKey{leaky.Mapkey, leaky.Ovflw_ts.Add(bl.duration)}) + leaky.logger.Debugf("Adding overflow to blackhole (%s)", leaky.First_ts) + return alert, queue } } diff --git a/pkg/leakybucket/bucket.go b/pkg/leakybucket/bucket.go index bcbe2f422..7cc3f0db6 100644 --- a/pkg/leakybucket/bucket.go +++ b/pkg/leakybucket/bucket.go @@ -1,6 +1,7 @@ package leakybucket import ( + "fmt" "sync/atomic" "time" @@ -47,6 +48,7 @@ type Leaky struct { // chan for signaling Signal chan bool `json:"-"` Reprocess bool + Simulated bool Uuid string First_ts time.Time Last_ts time.Time @@ -57,9 +59,12 @@ type Leaky struct { Duration time.Duration Pour func(*Leaky, types.Event) `json:"-"` //Profiling when set to true enables profiling of bucket - Profiling bool - timedOverflow bool - logger *log.Entry + Profiling bool + timedOverflow bool + logger *log.Entry + scopeType types.ScopeType + hash string + scenarioVersion string } var BucketsPour = prometheus.NewCounterVec( @@ -108,49 +113,53 @@ var LeakyRoutineCount int64 // Events created by the bucket (overflow, bucket empty) are sent to a chan defined by BucketFactory // The leaky bucket implementation is based on rate limiter (see https://godoc.org/golang.org/x/time/rate) // There's a trick to have an event said when the bucket gets empty to allow its destruction -func NewLeaky(g BucketFactory) *Leaky { - g.logger.Tracef("Instantiating live bucket %s", g.Name) - return FromFactory(g) +func NewLeaky(bucketFactory BucketFactory) *Leaky { + bucketFactory.logger.Tracef("Instantiating live bucket %s", bucketFactory.Name) + return FromFactory(bucketFactory) } -func FromFactory(g BucketFactory) *Leaky { +func FromFactory(bucketFactory BucketFactory) *Leaky { var limiter rate.RateLimiter //golang rate limiter. It's mainly intended for http rate limiter - Qsize := g.Capacity - if g.CacheSize > 0 { + Qsize := bucketFactory.Capacity + if bucketFactory.CacheSize > 0 { //cache is smaller than actual capacity - if g.CacheSize <= g.Capacity { - Qsize = g.CacheSize + if bucketFactory.CacheSize <= bucketFactory.Capacity { + Qsize = bucketFactory.CacheSize //bucket might be counter (infinite size), allow cache limitation - } else if g.Capacity == -1 { - Qsize = g.CacheSize + } else if bucketFactory.Capacity == -1 { + Qsize = bucketFactory.CacheSize } } - if g.Capacity == -1 { + if bucketFactory.Capacity == -1 { //In this case we allow all events to pass. //maybe in the future we could avoid using a limiter limiter = &rate.AlwaysFull{} } else { - limiter = rate.NewLimiter(rate.Every(g.leakspeed), g.Capacity) + limiter = rate.NewLimiter(rate.Every(bucketFactory.leakspeed), bucketFactory.Capacity) } - BucketsInstanciation.With(prometheus.Labels{"name": g.Name}).Inc() + BucketsInstanciation.With(prometheus.Labels{"name": bucketFactory.Name}).Inc() //create the leaky bucket per se l := &Leaky{ - Name: g.Name, - Limiter: limiter, - Uuid: namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()).Generate(), - Queue: NewQueue(Qsize), - CacheSize: g.CacheSize, - Out: make(chan *Queue, 1), - AllOut: g.ret, - Capacity: g.Capacity, - Leakspeed: g.leakspeed, - BucketConfig: &g, - Pour: Pour, - Reprocess: g.Reprocess, - Profiling: g.Profiling, - Mode: LIVE, + Name: bucketFactory.Name, + Limiter: limiter, + Uuid: namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()).Generate(), + Queue: NewQueue(Qsize), + CacheSize: bucketFactory.CacheSize, + Out: make(chan *Queue, 1), + AllOut: bucketFactory.ret, + Capacity: bucketFactory.Capacity, + Leakspeed: bucketFactory.leakspeed, + BucketConfig: &bucketFactory, + Pour: Pour, + Reprocess: bucketFactory.Reprocess, + Profiling: bucketFactory.Profiling, + Mode: LIVE, + scopeType: bucketFactory.ScopeType, + scenarioVersion: bucketFactory.ScenarioVersion, + hash: bucketFactory.hash, + Simulated: bucketFactory.Simulated, } if l.BucketConfig.Capacity > 0 && l.BucketConfig.leakspeed != time.Duration(0) { l.Duration = time.Duration(l.BucketConfig.Capacity+1) * l.BucketConfig.leakspeed @@ -165,128 +174,145 @@ func FromFactory(g BucketFactory) *Leaky { /* for now mimic a leak routine */ //LeakRoutine us the life of a bucket. It dies when the bucket underflows or overflows -func LeakRoutine(l *Leaky) { +func LeakRoutine(leaky *Leaky) { var ( durationTicker <-chan time.Time = make(<-chan time.Time) ) - BucketsCurrentCount.With(prometheus.Labels{"name": l.Name}).Inc() - defer BucketsCurrentCount.With(prometheus.Labels{"name": l.Name}).Dec() + defer types.CatchPanic(fmt.Sprintf("crowdsec/LeakRoutine/%s", leaky.Name)) + + BucketsCurrentCount.With(prometheus.Labels{"name": leaky.Name}).Inc() + defer BucketsCurrentCount.With(prometheus.Labels{"name": leaky.Name}).Dec() /*todo : we create a logger at runtime while we want leakroutine to be up asap, might not be a good idea*/ - l.logger = l.BucketConfig.logger.WithFields(log.Fields{"capacity": l.Capacity, "partition": l.Mapkey, "bucket_id": l.Uuid}) + leaky.logger = leaky.BucketConfig.logger.WithFields(log.Fields{"capacity": leaky.Capacity, "partition": leaky.Mapkey, "bucket_id": leaky.Uuid}) - l.Signal <- true + leaky.Signal <- true atomic.AddInt64(&LeakyRoutineCount, 1) defer atomic.AddInt64(&LeakyRoutineCount, -1) - for _, f := range l.BucketConfig.processors { - err := f.OnBucketInit(l.BucketConfig) + for _, f := range leaky.BucketConfig.processors { + err := f.OnBucketInit(leaky.BucketConfig) if err != nil { - l.logger.Errorf("Problem at bucket initializiation. Bail out %T : %v", f, err) - close(l.Signal) + leaky.logger.Errorf("Problem at bucket initializiation. Bail out %T : %v", f, err) + close(leaky.Signal) return } } - l.logger.Debugf("Leaky routine starting, lifetime : %s", l.Duration) + leaky.logger.Debugf("Leaky routine starting, lifetime : %s", leaky.Duration) for { select { /*receiving an event*/ - case msg := <-l.In: + case msg := <-leaky.In: /*the msg var use is confusing and is redeclared in a different type :/*/ - for _, f := range l.BucketConfig.processors { - msg := f.OnBucketPour(l.BucketConfig)(msg, l) + for _, processor := range leaky.BucketConfig.processors { + msg := processor.OnBucketPour(leaky.BucketConfig)(msg, leaky) // if &msg == nil we stop processing if msg == nil { goto End } } - l.logger.Tracef("Pour event: %s", spew.Sdump(msg)) - l.logger.Debugf("Pouring event.") + if leaky.logger.Level >= log.TraceLevel { + leaky.logger.Tracef("Pour event: %s", spew.Sdump(msg)) + } + BucketsPour.With(prometheus.Labels{"name": leaky.Name, "source": msg.Line.Src}).Inc() - BucketsPour.With(prometheus.Labels{"name": l.Name, "source": msg.Line.Src}).Inc() - - l.Pour(l, msg) // glue for now + leaky.Pour(leaky, msg) // glue for now //Clear cache on behalf of pour - tmp := time.NewTicker(l.Duration) + tmp := time.NewTicker(leaky.Duration) durationTicker = tmp.C - l.Signal <- true + leaky.Signal <- true defer tmp.Stop() /*a kill chan to allow externally killing the leaky routines*/ - case <-l.KillSwitch: - close(l.Signal) - l.logger.Debugf("Bucket externally killed, return") - l.AllOut <- types.Event{Overflow: types.SignalOccurence{MapKey: l.Mapkey}, Type: types.OVFLW} + case <-leaky.KillSwitch: + close(leaky.Signal) + leaky.logger.Debugf("Bucket externally killed, return") + leaky.AllOut <- types.Event{Type: types.OVFLW, Overflow: types.RuntimeAlert{Mapkey: leaky.Mapkey}} return /*we overflowed*/ - case ofw := <-l.Out: - close(l.Signal) - sig := FormatOverflow(l, ofw) - l.logger.Tracef("Overflow hooks time : %v", l.BucketConfig.processors) - for _, f := range l.BucketConfig.processors { - sig, ofw = f.OnBucketOverflow(l.BucketConfig)(l, sig, ofw) + case ofw := <-leaky.Out: + close(leaky.Signal) + alert, err := NewAlert(leaky, ofw) + if err != nil { + log.Errorf("%s", err) + } + leaky.logger.Tracef("Overflow hooks time : %v", leaky.BucketConfig.processors) + for _, f := range leaky.BucketConfig.processors { + alert, ofw = f.OnBucketOverflow(leaky.BucketConfig)(leaky, alert, ofw) if ofw == nil { - l.logger.Debugf("Overflow has been discard (%T)", f) + leaky.logger.Debugf("Overflow has been discarded (%T)", f) break } } - l.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: sig})) - mt, _ := l.Ovflw_ts.MarshalText() - l.logger.Tracef("overflow time : %s", mt) + if leaky.logger.Level >= log.TraceLevel { + leaky.logger.Tracef("Overflow event: %s", spew.Sdump(types.RuntimeAlert(alert))) + } + mt, _ := leaky.Ovflw_ts.MarshalText() + leaky.logger.Tracef("overflow time : %s", mt) - BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc() + BucketsOverflow.With(prometheus.Labels{"name": leaky.Name}).Inc() - l.AllOut <- types.Event{Overflow: sig, Type: types.OVFLW, MarshaledTime: string(mt)} + leaky.AllOut <- types.Event{Overflow: alert, Type: types.OVFLW, MarshaledTime: string(mt)} return /*we underflow or reach bucket deadline (timers)*/ case <-durationTicker: - l.Ovflw_ts = time.Now() - close(l.Signal) - ofw := l.Queue - sig := types.SignalOccurence{MapKey: l.Mapkey} + var ( + alert types.RuntimeAlert + err error + ) + leaky.Ovflw_ts = time.Now() + close(leaky.Signal) + ofw := leaky.Queue + alert = types.RuntimeAlert{Mapkey: leaky.Mapkey} - if l.timedOverflow { - BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc() + if leaky.timedOverflow { + BucketsOverflow.With(prometheus.Labels{"name": leaky.Name}).Inc() - sig = FormatOverflow(l, ofw) - for _, f := range l.BucketConfig.processors { - sig, ofw = f.OnBucketOverflow(l.BucketConfig)(l, sig, ofw) + alert, err = NewAlert(leaky, ofw) + if err != nil { + log.Errorf("%s", err) + } + for _, f := range leaky.BucketConfig.processors { + alert, ofw = f.OnBucketOverflow(leaky.BucketConfig)(leaky, alert, ofw) if ofw == nil { - l.logger.Debugf("Overflow has been discard (%T)", f) + leaky.logger.Debugf("Overflow has been discarded (%T)", f) break } } - l.logger.Infof("Timed Overflow") + leaky.logger.Infof("Timed Overflow") } else { - l.logger.Debugf("bucket underflow, destroy") - BucketsUnderflow.With(prometheus.Labels{"name": l.Name}).Inc() + leaky.logger.Debugf("bucket underflow, destroy") + BucketsUnderflow.With(prometheus.Labels{"name": leaky.Name}).Inc() } - l.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: sig})) + if leaky.logger.Level >= log.TraceLevel { + /*don't sdump if it's not going to printed, it's expensive*/ + leaky.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: alert})) + } - l.AllOut <- types.Event{Overflow: sig, Type: types.OVFLW} - l.logger.Tracef("Returning from leaky routine.") + leaky.AllOut <- types.Event{Overflow: alert, Type: types.OVFLW} + leaky.logger.Tracef("Returning from leaky routine.") return } End: } } -func Pour(l *Leaky, msg types.Event) { +func Pour(leaky *Leaky, msg types.Event) { - l.Total_count += 1 - if l.First_ts.IsZero() { - l.First_ts = time.Now() + leaky.Total_count += 1 + if leaky.First_ts.IsZero() { + leaky.First_ts = time.Now() } - l.Last_ts = time.Now() - if l.Limiter.Allow() { - l.Queue.Add(msg) + leaky.Last_ts = time.Now() + if leaky.Limiter.Allow() { + leaky.Queue.Add(msg) } else { - l.Ovflw_ts = time.Now() - l.logger.Debugf("Last event to be poured, bucket overflow.") - l.Queue.Add(msg) - l.Out <- l.Queue + leaky.Ovflw_ts = time.Now() + leaky.logger.Debugf("Last event to be poured, bucket overflow.") + leaky.Queue.Add(msg) + leaky.Out <- leaky.Queue } } diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index b490c4634..76417a517 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -2,14 +2,17 @@ package leakybucket import ( "bytes" + "encoding/json" "fmt" "html/template" "io" "io/ioutil" "os" + "reflect" "testing" "time" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/parser" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -81,12 +84,16 @@ func testOneBucket(t *testing.T, dir string) error { for _, x := range stages { files = append(files, x.Filename) } - holders, response, err := LoadBuckets(files, dir) + + cscfg := &csconfig.CrowdsecServiceCfg{ + DataDir: "tests", + } + holders, response, err := LoadBuckets(cscfg, files) if err != nil { t.Fatalf("failed loading bucket : %s", err) } - if !testFile(t, dir+"/test.yaml", dir+"/in-buckets_state.json", holders, response) { - t.Fatalf("the test failed") + if !testFile(t, dir+"/test.json", dir+"/in-buckets_state.json", holders, response) { + return fmt.Errorf("tests from %s failed", dir) } return nil } @@ -112,8 +119,9 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res if err != nil { t.Errorf("yamlFile.Get err #%v ", err) } - dec := yaml.NewDecoder(yamlFile) - dec.SetStrict(true) + dec := json.NewDecoder(yamlFile) + dec.DisallowUnknownFields() + //dec.SetStrict(true) tf := TestFile{} err = dec.Decode(&tf) if err != nil { @@ -139,7 +147,7 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res } in.ExpectMode = TIMEMACHINE - log.Debugf("Buckets input : %s", spew.Sdump(in)) + log.Infof("Buckets input : %s", spew.Sdump(in)) ok, err := PourItemToHolders(in, holders, buckets) if err != nil { t.Fatalf("Failed to pour : %s", err) @@ -161,7 +169,7 @@ POLL_AGAIN: log.Warningf("got one result") results = append(results, ret) if ret.Overflow.Reprocess { - log.Debugf("Overflow being reprocessed.") + log.Errorf("Overflow being reprocessed.") ok, err := PourItemToHolders(ret, holders, buckets) if err != nil { t.Fatalf("Failed to pour : %s", err) @@ -189,7 +197,7 @@ POLL_AGAIN: if len(tf.Results) == 0 && len(results) == 0 { log.Warningf("Test is successfull") if dump { - if tmpFile, err = DumpBucketsStateAt(latest_ts, buckets); err != nil { + if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil { t.Fatalf("Failed dumping bucket state : %s", err) } log.Infof("dumped bucket to %s", tmpFile) @@ -199,7 +207,7 @@ POLL_AGAIN: log.Warningf("%d results to check against %d expected results", len(results), len(tf.Results)) if len(tf.Results) != len(results) { if dump { - if tmpFile, err = DumpBucketsStateAt(latest_ts, buckets); err != nil { + if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil { t.Fatalf("Failed dumping bucket state : %s", err) } log.Infof("dumped bucket to %s", tmpFile) @@ -209,55 +217,69 @@ POLL_AGAIN: return false } } - var valid bool checkresultsloop: for eidx, out := range results { for ridx, expected := range tf.Results { - log.Debugf("Checking next expected result.") - valid = true + log.Tracef("Checking next expected result.") - log.Infof("go %s", spew.Sdump(out)) - //Scenario - if out.Overflow.Scenario != expected.Overflow.Scenario { - log.Errorf("(scenario) %s != %s", out.Overflow.Scenario, expected.Overflow.Scenario) - valid = false - continue + //empty overflow + if out.Overflow.Alert == nil && expected.Overflow.Alert == nil { + //match stuff } else { - log.Infof("(scenario) %s == %s", out.Overflow.Scenario, expected.Overflow.Scenario) - } - //Events_count - if out.Overflow.Events_count != expected.Overflow.Events_count { - log.Errorf("(Events_count) %d != %d", out.Overflow.Events_count, expected.Overflow.Events_count) - valid = false - continue - } else { - log.Infof("(Events_count) %d == %d", out.Overflow.Events_count, expected.Overflow.Events_count) - } - //Source_ip - if out.Overflow.Source_ip != expected.Overflow.Source_ip { - log.Errorf("(Source_ip) %s != %s", out.Overflow.Source_ip, expected.Overflow.Source_ip) - valid = false - continue - } else { - log.Infof("(Source_ip) %s == %s", out.Overflow.Source_ip, expected.Overflow.Source_ip) + if out.Overflow.Alert == nil || expected.Overflow.Alert == nil { + log.Printf("Here ?") + continue + } + //Scenario + + if *out.Overflow.Alert.Scenario != *expected.Overflow.Alert.Scenario { + log.Errorf("(scenario) %v != %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario) + continue + } else { + log.Infof("(scenario) %v == %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario) + } + //EventsCount + if *out.Overflow.Alert.EventsCount != *expected.Overflow.Alert.EventsCount { + log.Errorf("(EventsCount) %d != %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount) + continue + } else { + log.Infof("(EventsCount) %d == %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount) + } + //Sources + if !reflect.DeepEqual(out.Overflow.Sources, expected.Overflow.Sources) { + log.Errorf("(Sources %s != %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources)) + continue + } else { + log.Infof("(Sources: %s == %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources)) + } + } + //Events + // if !reflect.DeepEqual(out.Overflow.Alert.Events, expected.Overflow.Alert.Events) { + // log.Errorf("(Events %s != %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events)) + // valid = false + // continue + // } else { + // log.Infof("(Events: %s == %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events)) + // } //CheckFailed: - if valid { - log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx) - //don't do this at home : delete current element from list and redo - results[eidx] = results[len(results)-1] - results = results[:len(results)-1] - tf.Results[ridx] = tf.Results[len(tf.Results)-1] - tf.Results = tf.Results[:len(tf.Results)-1] - break checkresultsloop - } + log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx) + //don't do this at home : delete current element from list and redo + results[eidx] = results[len(results)-1] + results = results[:len(results)-1] + tf.Results[ridx] = tf.Results[len(tf.Results)-1] + tf.Results = tf.Results[:len(tf.Results)-1] + goto checkresultsloop } } - if !valid { - t.Fatalf("mismatching entries left") + if len(results) != 0 && len(tf.Results) != 0 { + log.Errorf("mismatching entries left") + log.Errorf("we got: %s", spew.Sdump(results)) + log.Errorf("we expected: %s", spew.Sdump(tf.Results)) + return false } else { log.Warningf("entry valid at end of loop") } diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 9086a8e7e..445a70b2a 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -10,6 +10,8 @@ import ( "strings" "time" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -28,82 +30,99 @@ import ( // BucketFactory struct holds all fields for any bucket configuration. This is to have a // generic struct for buckets. This can be seen as a bucket factory. type BucketFactory struct { - FormatVersion string `yaml:"format"` - Author string `yaml:"author"` - Description string `yaml:"description"` - References []string `yaml:"references"` - Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics - Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique - Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity - LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket - Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time - Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct - GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip - Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on uniq_filter expr result) - Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically - Labels map[string]string `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow - Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration - logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well) - Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain - CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket - Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc. - OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through - BucketName string `yaml:"-"` - Filename string `yaml:"-"` - RunTimeFilter *vm.Program `json:"-"` - ExprDebugger *exprhelpers.ExprDebugger `yaml:"-" json:"-"` // used to debug expression by printing the content of each variable of the expression - RunTimeGroupBy *vm.Program `json:"-"` - Data []*types.DataSource `yaml:"data,omitempty"` - leakspeed time.Duration //internal representation of `Leakspeed` - duration time.Duration //internal representation of `Duration` - ret chan types.Event //the bucket-specific output chan for overflows - processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.) - output bool //?? + FormatVersion string `yaml:"format"` + Author string `yaml:"author"` + Description string `yaml:"description"` + References []string `yaml:"references"` + Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics + Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique + Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity + LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket + Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time + Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct + GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip + Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result) + Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically + Labels map[string]string `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow + Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration + logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well) + Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain + CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket + Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc. + OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through + ScopeType types.ScopeType `yaml:"scope,omitempty"` //to enforce a different remediation than blocking an IP. Will default this to IP + BucketName string `yaml:"-"` + Filename string `yaml:"-"` + RunTimeFilter *vm.Program `json:"-"` + ExprDebugger *exprhelpers.ExprDebugger `yaml:"-" json:"-"` // used to debug expression by printing the content of each variable of the expression + RunTimeGroupBy *vm.Program `json:"-"` + Data []*types.DataSource `yaml:"data,omitempty"` + DataDir string `yaml:"-"` + leakspeed time.Duration //internal representation of `Leakspeed` + duration time.Duration //internal representation of `Duration` + ret chan types.Event //the bucket-specific output chan for overflows + processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.) + output bool //?? + ScenarioVersion string `yaml:"version,omitempty"` + hash string `yaml:"-"` + Simulated bool `yaml:"simulated"` //Set to true if the scenario instanciating the bucket was in the exclusion list } -func ValidateFactory(b *BucketFactory) error { - if b.Name == "" { +func ValidateFactory(bucketFactory *BucketFactory) error { + if bucketFactory.Name == "" { return fmt.Errorf("bucket must have name") } - if b.Description == "" { + if bucketFactory.Description == "" { return fmt.Errorf("description is mandatory") } - if b.Type == "leaky" { - if b.Capacity <= 0 { //capacity must be a positive int - return fmt.Errorf("bad capacity for leaky '%d'", b.Capacity) + if bucketFactory.Type == "leaky" { + if bucketFactory.Capacity <= 0 { //capacity must be a positive int + return fmt.Errorf("bad capacity for leaky '%d'", bucketFactory.Capacity) } - if b.LeakSpeed == "" { + if bucketFactory.LeakSpeed == "" { return fmt.Errorf("leakspeed can't be empty for leaky") } - if b.leakspeed == 0 { - return fmt.Errorf("bad leakspeed for leaky '%s'", b.LeakSpeed) + if bucketFactory.leakspeed == 0 { + return fmt.Errorf("bad leakspeed for leaky '%s'", bucketFactory.LeakSpeed) } - } else if b.Type == "counter" { - if b.Duration == "" { + } else if bucketFactory.Type == "counter" { + if bucketFactory.Duration == "" { return fmt.Errorf("duration ca't be empty for counter") } - if b.duration == 0 { - return fmt.Errorf("bad duration for counter bucket '%d'", b.duration) + if bucketFactory.duration == 0 { + return fmt.Errorf("bad duration for counter bucket '%d'", bucketFactory.duration) } - if b.Capacity != -1 { + if bucketFactory.Capacity != -1 { return fmt.Errorf("counter bucket must have -1 capacity") } - } else if b.Type == "trigger" { - if b.Capacity != 0 { + } else if bucketFactory.Type == "trigger" { + if bucketFactory.Capacity != 0 { return fmt.Errorf("trigger bucket must have 0 capacity") } } else { - return fmt.Errorf("unknown bucket type '%s'", b.Type) + return fmt.Errorf("unknown bucket type '%s'", bucketFactory.Type) + } + + switch bucketFactory.ScopeType.Scope { + case types.Undefined: + bucketFactory.ScopeType.Scope = types.Ip + case types.Ip: + case types.Range: + default: + //Compile the scope filter + var ( + runTimeFilter *vm.Program + err error + ) + if runTimeFilter, err = expr.Compile(bucketFactory.ScopeType.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))); err != nil { + return fmt.Errorf("Error compiling the scope filter: %s", err) + } + bucketFactory.ScopeType.RunTimeFilter = runTimeFilter } return nil } -/* Init recursively process yaml files from a directory and loads them as BucketFactory */ -func Init(cfg map[string]string) ([]BucketFactory, chan types.Event, error) { - return LoadBucketDir(cfg["patterns"], cfg["data"]) -} - -func LoadBuckets(files []string, dataFolder string) ([]BucketFactory, chan types.Event, error) { +func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, files []string) ([]BucketFactory, chan types.Event, error) { var ( ret []BucketFactory = []BucketFactory{} response chan types.Event @@ -113,7 +132,7 @@ func LoadBuckets(files []string, dataFolder string) ([]BucketFactory, chan types response = make(chan types.Event, 1) for _, f := range files { - log.Debugf("Loading %s", f) + log.Debugf("Loading '%s'", f) if !strings.HasSuffix(f, ".yaml") { log.Debugf("Skipping %s : not a yaml file", f) continue @@ -128,8 +147,8 @@ func LoadBuckets(files []string, dataFolder string) ([]BucketFactory, chan types dec := yaml.NewDecoder(bucketConfigurationFile) dec.SetStrict(true) for { - g := BucketFactory{} - err = dec.Decode(&g) + bucketFactory := BucketFactory{} + err = dec.Decode(&bucketFactory) if err != nil { if err == io.EOF { log.Tracef("End of yaml file") @@ -139,172 +158,175 @@ func LoadBuckets(files []string, dataFolder string) ([]BucketFactory, chan types return nil, nil, fmt.Errorf("bad yaml in %s : %v", f, err) } } + bucketFactory.DataDir = cscfg.DataDir //check empty - if g.Name == "" { + if bucketFactory.Name == "" { log.Errorf("Won't load nameless bucket") return nil, nil, fmt.Errorf("nameless bucket") } //check compat - if g.FormatVersion == "" { - log.Debugf("no version in %s : %s, assuming '1.0'", g.Name, f) - g.FormatVersion = "1.0" + if bucketFactory.FormatVersion == "" { + log.Tracef("no version in %s : %s, assuming '1.0'", bucketFactory.Name, f) + bucketFactory.FormatVersion = "1.0" } - ok, err := cwversion.Statisfies(g.FormatVersion, cwversion.Constraint_scenario) + ok, err := cwversion.Statisfies(bucketFactory.FormatVersion, cwversion.Constraint_scenario) if err != nil { log.Fatalf("Failed to check version : %s", err) } if !ok { - log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", g.Name, g.FormatVersion, cwversion.Constraint_scenario) + log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", bucketFactory.Name, bucketFactory.FormatVersion, cwversion.Constraint_scenario) continue } - g.Filename = filepath.Clean(f) - g.BucketName = seed.Generate() - g.ret = response - err = LoadBucket(&g, dataFolder) + + bucketFactory.Filename = filepath.Clean(f) + bucketFactory.BucketName = seed.Generate() + bucketFactory.ret = response + hubItem, err := cwhub.GetItemByPath(cwhub.SCENARIOS, bucketFactory.Filename) if err != nil { - log.Errorf("Failed to load bucket %s : %v", g.Name, err) - return nil, nil, fmt.Errorf("loading of %s failed : %v", g.Name, err) + log.Errorf("scenario %s (%s) couldn't be find in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) + } else { + if cscfg.SimulationConfig != nil { + bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(hubItem.Name) + } + if hubItem != nil { + bucketFactory.ScenarioVersion = hubItem.LocalVersion + bucketFactory.hash = hubItem.LocalHash + } else { + log.Errorf("scenario %s (%s) couldn't be find in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) + } } - ret = append(ret, g) + + err = LoadBucket(&bucketFactory) + if err != nil { + log.Errorf("Failed to load bucket %s : %v", bucketFactory.Name, err) + return nil, nil, fmt.Errorf("loading of %s failed : %v", bucketFactory.Name, err) + } + ret = append(ret, bucketFactory) } } log.Warningf("Loaded %d scenarios", len(ret)) return ret, response, nil } -func LoadBucketDir(dir string, dataFolder string) ([]BucketFactory, chan types.Event, error) { - var ( - filenames []string - ) - files, err := ioutil.ReadDir(dir) - if err != nil { - return nil, nil, err - } - for _, f := range files { - filenames = append(filenames, dir+f.Name()) - } - return LoadBuckets(filenames, dataFolder) -} - /* Init recursively process yaml files from a directory and loads them as BucketFactory */ -func LoadBucket(g *BucketFactory, dataFolder string) error { +func LoadBucket(bucketFactory *BucketFactory) error { var err error - if g.Debug { + if bucketFactory.Debug { var clog = logrus.New() if err := types.ConfigureLogger(clog); err != nil { log.Fatalf("While creating bucket-specific logger : %s", err) } clog.SetLevel(log.DebugLevel) - g.logger = clog.WithFields(log.Fields{ - "cfg": g.BucketName, - "name": g.Name, - "file": g.Filename, + bucketFactory.logger = clog.WithFields(log.Fields{ + "cfg": bucketFactory.BucketName, + "name": bucketFactory.Name, + "file": bucketFactory.Filename, }) } else { /* else bind it to the default one (might find something more elegant here)*/ - g.logger = log.WithFields(log.Fields{ - "cfg": g.BucketName, - "name": g.Name, - "file": g.Filename, + bucketFactory.logger = log.WithFields(log.Fields{ + "cfg": bucketFactory.BucketName, + "name": bucketFactory.Name, + "file": bucketFactory.Filename, }) } - if g.LeakSpeed != "" { - if g.leakspeed, err = time.ParseDuration(g.LeakSpeed); err != nil { - return fmt.Errorf("bad leakspeed '%s' in %s : %v", g.LeakSpeed, g.Filename, err) + if bucketFactory.LeakSpeed != "" { + if bucketFactory.leakspeed, err = time.ParseDuration(bucketFactory.LeakSpeed); err != nil { + return fmt.Errorf("bad leakspeed '%s' in %s : %v", bucketFactory.LeakSpeed, bucketFactory.Filename, err) } } else { - g.leakspeed = time.Duration(0) + bucketFactory.leakspeed = time.Duration(0) } - if g.Duration != "" { - if g.duration, err = time.ParseDuration(g.Duration); err != nil { - return fmt.Errorf("invalid Duration '%s' in %s : %v", g.Duration, g.Filename, err) + if bucketFactory.Duration != "" { + if bucketFactory.duration, err = time.ParseDuration(bucketFactory.Duration); err != nil { + return fmt.Errorf("invalid Duration '%s' in %s : %v", bucketFactory.Duration, bucketFactory.Filename, err) } } - if g.Filter == "" { - g.logger.Warningf("Bucket without filter, abort.") + if bucketFactory.Filter == "" { + bucketFactory.logger.Warningf("Bucket without filter, abort.") return fmt.Errorf("bucket without filter directive") } - g.RunTimeFilter, err = expr.Compile(g.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + bucketFactory.RunTimeFilter, err = expr.Compile(bucketFactory.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) if err != nil { - return fmt.Errorf("invalid filter '%s' in %s : %v", g.Filter, g.Filename, err) + return fmt.Errorf("invalid filter '%s' in %s : %v", bucketFactory.Filter, bucketFactory.Filename, err) } - if g.Debug { - g.ExprDebugger, err = exprhelpers.NewDebugger(g.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if bucketFactory.Debug { + bucketFactory.ExprDebugger, err = exprhelpers.NewDebugger(bucketFactory.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) if err != nil { - log.Errorf("unable to build debug filter for '%s' : %s", g.Filter, err) + log.Errorf("unable to build debug filter for '%s' : %s", bucketFactory.Filter, err) } } - if g.GroupBy != "" { - g.RunTimeGroupBy, err = expr.Compile(g.GroupBy, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if bucketFactory.GroupBy != "" { + bucketFactory.RunTimeGroupBy, err = expr.Compile(bucketFactory.GroupBy, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) if err != nil { - return fmt.Errorf("invalid groupby '%s' in %s : %v", g.GroupBy, g.Filename, err) + return fmt.Errorf("invalid groupby '%s' in %s : %v", bucketFactory.GroupBy, bucketFactory.Filename, err) } } - g.logger.Infof("Adding %s bucket", g.Type) + bucketFactory.logger.Infof("Adding %s bucket", bucketFactory.Type) //return the Holder correponding to the type of bucket - g.processors = []Processor{} - switch g.Type { + bucketFactory.processors = []Processor{} + switch bucketFactory.Type { case "leaky": - g.processors = append(g.processors, &DumbProcessor{}) + bucketFactory.processors = append(bucketFactory.processors, &DumbProcessor{}) case "trigger": - g.processors = append(g.processors, &Trigger{}) + bucketFactory.processors = append(bucketFactory.processors, &Trigger{}) case "counter": - g.processors = append(g.processors, &DumbProcessor{}) + bucketFactory.processors = append(bucketFactory.processors, &DumbProcessor{}) default: - return fmt.Errorf("invalid type '%s' in %s : %v", g.Type, g.Filename, err) + return fmt.Errorf("invalid type '%s' in %s : %v", bucketFactory.Type, bucketFactory.Filename, err) } - if g.Distinct != "" { - g.logger.Debugf("Adding a non duplicate filter on %s.", g.Name) - g.processors = append(g.processors, &Uniq{}) + if bucketFactory.Distinct != "" { + bucketFactory.logger.Tracef("Adding a non duplicate filter on %s.", bucketFactory.Name) + bucketFactory.processors = append(bucketFactory.processors, &Uniq{}) } - if g.OverflowFilter != "" { - g.logger.Debugf("Adding an overflow filter") - filovflw, err := NewOverflowFilter(g) + if bucketFactory.OverflowFilter != "" { + bucketFactory.logger.Tracef("Adding an overflow filter") + filovflw, err := NewOverflowFilter(bucketFactory) if err != nil { - g.logger.Errorf("Error creating overflow_filter : %s", err) + bucketFactory.logger.Errorf("Error creating overflow_filter : %s", err) return fmt.Errorf("error creating overflow_filter : %s", err) } - g.processors = append(g.processors, filovflw) + bucketFactory.processors = append(bucketFactory.processors, filovflw) } - if g.Blackhole != "" { - g.logger.Debugf("Adding blackhole.") - blackhole, err := NewBlackhole(g) + if bucketFactory.Blackhole != "" { + bucketFactory.logger.Tracef("Adding blackhole.") + blackhole, err := NewBlackhole(bucketFactory) if err != nil { - g.logger.Errorf("Error creating blackhole : %s", err) + bucketFactory.logger.Errorf("Error creating blackhole : %s", err) return fmt.Errorf("error creating blackhole : %s", err) } - g.processors = append(g.processors, blackhole) + bucketFactory.processors = append(bucketFactory.processors, blackhole) } - if len(g.Data) > 0 { - for _, data := range g.Data { + if len(bucketFactory.Data) > 0 { + for _, data := range bucketFactory.Data { if data.DestPath == "" { - g.logger.Errorf("no dest_file provided for '%s'", g.Name) + bucketFactory.logger.Errorf("no dest_file provided for '%s'", bucketFactory.Name) continue } - err = exprhelpers.FileInit(dataFolder, data.DestPath, data.Type) + err = exprhelpers.FileInit(bucketFactory.DataDir, data.DestPath, data.Type) if err != nil { - g.logger.Errorf("unable to init data for file '%s': %s", data.DestPath, err.Error()) + bucketFactory.logger.Errorf("unable to init data for file '%s': %s", data.DestPath, err.Error()) } } } - g.output = false - if err := ValidateFactory(g); err != nil { - return fmt.Errorf("invalid bucket from %s : %v", g.Filename, err) + bucketFactory.output = false + if err := ValidateFactory(bucketFactory); err != nil { + return fmt.Errorf("invalid bucket from %s : %v", bucketFactory.Filename, err) } return nil } -func LoadBucketsState(file string, buckets *Buckets, holders []BucketFactory) error { +func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFactory) error { var state map[string]Leaky body, err := ioutil.ReadFile(file) if err != nil { @@ -322,7 +344,7 @@ func LoadBucketsState(file string, buckets *Buckets, holders []BucketFactory) er } //find back our holder found := false - for _, h := range holders { + for _, h := range bucketFactories { if h.Name == v.Name { log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) //check in which mode the bucket was diff --git a/pkg/leakybucket/manager_load_test.go b/pkg/leakybucket/manager_load_test.go index 861679673..caf6943f1 100644 --- a/pkg/leakybucket/manager_load_test.go +++ b/pkg/leakybucket/manager_load_test.go @@ -13,7 +13,7 @@ type cfgTest struct { func runTest(tests []cfgTest) error { for idx, cfg := range tests { - err := LoadBucket(&cfg.cfg, ".") + err := LoadBucket(&cfg.cfg) if cfg.loadable && err != nil { return fmt.Errorf("expected loadable result (%d/%d), got: %s", idx+1, len(tests), err) } diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index 1f6917a6d..7658a5643 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -52,7 +52,7 @@ func GarbageCollectBuckets(deadline time.Time, buckets *Buckets) error { val.KillSwitch <- true return true } else { - val.logger.Debugf("(%s) not dead, count:%f capacity:%f", val.First_ts, tokat, tokcapa) + val.logger.Tracef("(%s) not dead, count:%f capacity:%f", val.First_ts, tokat, tokcapa) } if _, ok := serialized[key]; ok { log.Errorf("entry %s already exists", key) @@ -69,8 +69,12 @@ func GarbageCollectBuckets(deadline time.Time, buckets *Buckets) error { return nil } -func DumpBucketsStateAt(deadline time.Time, buckets *Buckets) (string, error) { +func DumpBucketsStateAt(deadline time.Time, outputdir string, buckets *Buckets) (string, error) { //var file string + + if outputdir == "" { + return "", fmt.Errorf("empty output dir for dump bucket state") + } tmpFd, err := ioutil.TempFile(os.TempDir(), "crowdsec-buckets-dump-") if err != nil { return "", fmt.Errorf("failed to create temp file : %s", err) @@ -148,7 +152,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc for idx, holder := range holders { if holder.RunTimeFilter != nil { - log.Debugf("event against holder %d/%d", idx, len(holders)) + holder.logger.Tracef("event against holder %d/%d", idx, len(holders)) output, err := expr.Run(holder.RunTimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &parsed})) if err != nil { holder.logger.Errorf("failed parsing : %v", err) @@ -157,14 +161,14 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc // we assume we a bool should add type check here if condition, ok = output.(bool); !ok { holder.logger.Errorf("unexpected non-bool return : %T", output) - log.Fatalf("Filter issue") + holder.logger.Fatalf("Filter issue") } if holder.Debug { holder.ExprDebugger.Run(holder.logger, condition, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &parsed})) } if !condition { - holder.logger.Debugf("Event leaving node : ko") + holder.logger.Debugf("Event leaving node : ko (filter mismatch)") continue } } @@ -174,12 +178,12 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc if holder.RunTimeGroupBy != nil { tmpGroupBy, err := expr.Run(holder.RunTimeGroupBy, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &parsed})) if err != nil { - log.Errorf("failed groupby : %v", err) + holder.logger.Errorf("failed groupby : %v", err) return false, errors.New("leaky failed :/") } if groupby, ok = tmpGroupBy.(string); !ok { - log.Fatalf("failed groupby type : %v", err) + holder.logger.Fatalf("failed groupby type : %v", err) return false, errors.New("groupby wrong type") } } @@ -194,7 +198,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc attempts += 1 /* Warn the user if we used more than a 100 ms to pour an event, it's at least an half lock*/ if attempts%100000 == 0 && start.Add(100*time.Millisecond).Before(time.Now()) { - log.Warningf("stuck for %s sending event to %s (sigclosed:%d keymiss:%d failed_sent:%d attempts:%d)", time.Since(start), + holder.logger.Warningf("stuck for %s sending event to %s (sigclosed:%d keymiss:%d failed_sent:%d attempts:%d)", time.Since(start), buckey, sigclosed, keymiss, failed_sent, attempts) } biface, ok := buckets.Bucket_map.Load(buckey) @@ -205,7 +209,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc not found in map */ - log.Debugf("Creating bucket %s", buckey) + holder.logger.Debugf("Creating bucket %s", buckey) keymiss += 1 var fresh_bucket *Leaky @@ -217,7 +221,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc fresh_bucket = NewLeaky(holder) holder.logger.Debugf("Creating Live bucket") default: - log.Fatalf("input event has no expected mode, malformed : %+v", parsed) + holder.logger.Fatalf("input event has no expected mode, malformed : %+v", parsed) } fresh_bucket.In = make(chan types.Event) fresh_bucket.Mapkey = buckey @@ -225,7 +229,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc fresh_bucket.KillSwitch = make(chan bool, 1) buckets.Bucket_map.Store(buckey, fresh_bucket) go LeakRoutine(fresh_bucket) - log.Debugf("Created new bucket %s", buckey) + holder.logger.Debugf("Created new bucket %s", buckey) //wait for signal to be opened <-fresh_bucket.Signal continue @@ -242,11 +246,11 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc sigclosed += 1 continue } - log.Debugf("Signal exists, try to pour :)") + holder.logger.Tracef("Signal exists, try to pour :)") default: /*nothing to read, but not closed, try to pour */ - log.Debugf("Signal exists but empty, try to pour :)") + holder.logger.Tracef("Signal exists but empty, try to pour :)") } /*let's see if this time-bucket should have expired */ @@ -254,10 +258,10 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc var d time.Time err = d.UnmarshalText([]byte(parsed.MarshaledTime)) if err != nil { - log.Warningf("Failed unmarshaling event time (%s) : %v", parsed.MarshaledTime, err) + holder.logger.Warningf("Failed unmarshaling event time (%s) : %v", parsed.MarshaledTime, err) } if d.After(bucket.Last_ts.Add(bucket.Duration)) { - bucket.logger.Debugf("bucket is expired (curr event: %s, bucket deadline: %s), kill", d, bucket.Last_ts.Add(bucket.Duration)) + bucket.logger.Tracef("bucket is expired (curr event: %s, bucket deadline: %s), kill", d, bucket.Last_ts.Add(bucket.Duration)) buckets.Bucket_map.Delete(buckey) continue } @@ -266,19 +270,19 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc select { case bucket.In <- parsed: - log.Debugf("Successfully sent !") + holder.logger.Tracef("Successfully sent !") //sent was successful ! sent = true continue default: failed_sent += 1 - log.Debugf("Failed to send, try again") + holder.logger.Tracef("Failed to send, try again") continue } } - log.Debugf("bucket '%s' is poured", holder.Name) + holder.logger.Debugf("bucket '%s' is poured", holder.Name) } return sent, nil } diff --git a/pkg/leakybucket/manager_run_test.go b/pkg/leakybucket/manager_run_test.go index e5b01de2d..6a98aa654 100644 --- a/pkg/leakybucket/manager_run_test.go +++ b/pkg/leakybucket/manager_run_test.go @@ -35,7 +35,7 @@ func TestGCandDump(t *testing.T) { } for idx := range Holders { - if err := LoadBucket(&Holders[idx], "."); err != nil { + if err := LoadBucket(&Holders[idx]); err != nil { t.Fatalf("while loading (%d/%d): %s", idx, len(Holders), err) } if err := ValidateFactory(&Holders[idx]); err != nil { @@ -73,14 +73,13 @@ func TestGCandDump(t *testing.T) { log.Printf("Dumping buckets state") //dump remaining buckets - if _, err := DumpBucketsStateAt(time.Now(), buckets); err != nil { + if _, err := DumpBucketsStateAt(time.Now(), ".", buckets); err != nil { t.Fatalf("failed to dump buckets : %s", err) } } -func TestBucketsShutdown(t *testing.T) { +func TestShutdownBuckets(t *testing.T) { var buckets *Buckets = NewBuckets() - var Holders = []BucketFactory{ //one long counter BucketFactory{Name: "test_counter_slow", Description: "test_counter_slow", Debug: true, Type: "counter", Capacity: -1, Duration: "10m", Filter: "true"}, @@ -89,7 +88,7 @@ func TestBucketsShutdown(t *testing.T) { } for idx := range Holders { - if err := LoadBucket(&Holders[idx], "."); err != nil { + if err := LoadBucket(&Holders[idx]); err != nil { t.Fatalf("while loading (%d/%d): %s", idx, len(Holders), err) } if err := ValidateFactory(&Holders[idx]); err != nil { diff --git a/pkg/leakybucket/overflow_filter.go b/pkg/leakybucket/overflow_filter.go index 7df119d3f..afb600d0c 100644 --- a/pkg/leakybucket/overflow_filter.go +++ b/pkg/leakybucket/overflow_filter.go @@ -28,7 +28,7 @@ func NewOverflowFilter(g *BucketFactory) (*OverflowFilter, error) { u := OverflowFilter{} u.Filter = g.OverflowFilter u.FilterRuntime, err = expr.Compile(u.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{ - "queue": &Queue{}, "signal": &types.SignalOccurence{}, "leaky": &Leaky{}}))) + "queue": &Queue{}, "signal": &types.RuntimeAlert{}, "leaky": &Leaky{}}))) if err != nil { g.logger.Errorf("Unable to compile filter : %v", err) return nil, fmt.Errorf("unable to compile filter : %v", err) @@ -36,8 +36,8 @@ func NewOverflowFilter(g *BucketFactory) (*OverflowFilter, error) { return &u, nil } -func (u *OverflowFilter) OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.SignalOccurence, *Queue) (types.SignalOccurence, *Queue) { - return func(l *Leaky, s types.SignalOccurence, q *Queue) (types.SignalOccurence, *Queue) { +func (u *OverflowFilter) OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue) { + return func(l *Leaky, s types.RuntimeAlert, q *Queue) (types.RuntimeAlert, *Queue) { el, err := expr.Run(u.FilterRuntime, exprhelpers.GetExprEnv(map[string]interface{}{ "queue": q, "signal": s, "leaky": l})) if err != nil { @@ -52,12 +52,11 @@ func (u *OverflowFilter) OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, ty /*filter returned false, event is blackholded*/ if !element { l.logger.Infof("Event is discard by overflow filter (%s)", u.Filter) - return types.SignalOccurence{ - MapKey: l.Mapkey, - // BucketConfiguration: bcfg, + return types.RuntimeAlert{ + Mapkey: l.Mapkey, }, nil } else { - l.logger.Debugf("Event is not discard by overflow filter (%s)", u.Filter) + l.logger.Tracef("Event is not discard by overflow filter (%s)", u.Filter) } return s, q } diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index f90114626..866c42e3f 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -1,135 +1,251 @@ package leakybucket import ( - "encoding/json" "fmt" "net" "strconv" + "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + + "github.com/antonmedv/expr" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" ) -func FormatOverflow(l *Leaky, queue *Queue) types.SignalOccurence { - var am string +//SourceFromEvent extracts and formats a valid models.Source object from an Event +func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, error) { + src := models.Source{} + srcs := make(map[string]models.Source) - l.logger.Debugf("Overflow (start: %s, end: %s)", l.First_ts, l.Ovflw_ts) - - sig := types.SignalOccurence{ - Scenario: l.Name, - Bucket_id: l.Uuid, - Alert_message: am, - Start_at: l.First_ts, - Stop_at: l.Ovflw_ts, - Events_count: l.Total_count, - Capacity: l.Capacity, - Reprocess: l.Reprocess, - Leak_speed: l.Leakspeed, - MapKey: l.Mapkey, - Sources: make(map[string]types.Source), - Labels: l.BucketConfig.Labels, + if evt.Type == types.OVFLW { + return evt.Overflow.Sources, nil } + switch leaky.scopeType.Scope { + case types.Range, types.Ip: + if v, ok := evt.Meta["source_ip"]; ok { + if net.ParseIP(v) == nil { + return srcs, fmt.Errorf("scope is %s but '%s' isn't a valid ip", leaky.scopeType.Scope, v) + } else { + src.IP = v + } + } else { + return srcs, fmt.Errorf("scope is %s but Meta[source_ip] doesn't exist", leaky.scopeType.Scope) + } + + src.Scope = &leaky.scopeType.Scope + if v, ok := evt.Enriched["ASNumber"]; ok { + src.AsNumber = v + } + if v, ok := evt.Enriched["IsoCode"]; ok { + src.Cn = v + } + if v, ok := evt.Enriched["ASNOrg"]; ok { + src.AsName = v + } + if v, ok := evt.Enriched["Latitude"]; ok { + l, err := strconv.ParseFloat(v, 32) + if err != nil { + log.Warningf("bad latitude %s : %s", v, err) + } + src.Latitude = float32(l) + } + if v, ok := evt.Enriched["Longitude"]; ok { + l, err := strconv.ParseFloat(v, 32) + if err != nil { + log.Warningf("bad longitude %s : %s", v, err) + } + src.Longitude = float32(l) + } + if v, ok := evt.Meta["SourceRange"]; ok && v != "" { + _, ipNet, err := net.ParseCIDR(v) + if err != nil { + return srcs, fmt.Errorf("Declared range %s of %s can't be parsed", v, src.IP) + } else if ipNet != nil { + src.Range = ipNet.String() + leaky.logger.Tracef("Valid range from %s : %s", src.IP, src.Range) + } + } + if leaky.scopeType.Scope == types.Ip { + src.Value = &src.IP + } else if leaky.scopeType.Scope == types.Range { + src.Value = &src.Range + } + srcs[src.IP] = src + default: + if leaky.scopeType.RunTimeFilter != nil { + retValue, err := expr.Run(leaky.scopeType.RunTimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &evt})) + if err != nil { + return srcs, errors.Wrapf(err, "while running scope filter") + } + + value, ok := retValue.(string) + if !ok { + value = "" + } + src.Value = &value + src.Scope = new(string) + *src.Scope = leaky.scopeType.Scope + srcs[*src.Value] = src + log.Debugf("source[%s] - %s = %s", leaky.Name, leaky.scopeType.Scope, *src.Value) + } else { + return srcs, fmt.Errorf("empty scope information") + } + } + return srcs, nil +} + +//EventsFromQueue iterates the queue to collect & prepare meta-datas from alert +func EventsFromQueue(queue *Queue) []*models.Event { + + events := []*models.Event{} for _, evt := range queue.Queue { - //either it's a collection of logs, or a collection of past overflows being reprocessed. - //one overflow can have multiple sources for example - if evt.Type == types.LOG { - if _, ok := evt.Meta["source_ip"]; !ok { - continue - } - source_ip := evt.Meta["source_ip"] - if _, ok := sig.Sources[source_ip]; !ok { - src := types.Source{} - src.Ip = net.ParseIP(source_ip) - if v, ok := evt.Enriched["ASNNumber"]; ok { - src.AutonomousSystemNumber = v - } - if v, ok := evt.Enriched["IsoCode"]; ok { - src.Country = v - } - if v, ok := evt.Enriched["ASNOrg"]; ok { - src.AutonomousSystemOrganization = v - } - if v, ok := evt.Enriched["Latitude"]; ok { - src.Latitude, _ = strconv.ParseFloat(v, 32) - } - if v, ok := evt.Enriched["Longitude"]; ok { - src.Longitude, _ = strconv.ParseFloat(v, 32) - } - if v, ok := evt.Meta["SourceRange"]; ok { - _, ipNet, err := net.ParseCIDR(v) - if err != nil { - l.logger.Errorf("Declared range %s of %s can't be parsed", v, src.Ip.String()) - } else if ipNet != nil { - src.Range = *ipNet - l.logger.Tracef("Valid range from %s : %s", src.Ip.String(), src.Range.String()) - } - } - sig.Sources[source_ip] = src - if sig.Source == nil { - sig.Source = &src - sig.Source_ip = src.Ip.String() - sig.Source_AutonomousSystemNumber = src.AutonomousSystemNumber - sig.Source_AutonomousSystemOrganization = src.AutonomousSystemOrganization - sig.Source_Country = src.Country - sig.Source_range = src.Range.String() - sig.Source_Latitude = src.Latitude - sig.Source_Longitude = src.Longitude - } - } - } else if evt.Type == types.OVFLW { - for _, src := range evt.Overflow.Sources { - if _, ok := sig.Sources[src.Ip.String()]; !ok { - sig.Sources[src.Ip.String()] = src - if sig.Source == nil { - l.logger.Tracef("populating overflow with source : %+v", src) - src := src //src will be reused, copy before giving pointer - sig.Source = &src - sig.Source_ip = src.Ip.String() - sig.Source_AutonomousSystemNumber = src.AutonomousSystemNumber - sig.Source_AutonomousSystemOrganization = src.AutonomousSystemOrganization - sig.Source_Country = src.Country - sig.Source_range = src.Range.String() - sig.Source_Latitude = src.Latitude - sig.Source_Longitude = src.Longitude - } - } - - } - - } - - strret, err := json.Marshal(evt.Meta) - if err != nil { - l.logger.Errorf("failed to marshal ret : %v", err) + if evt.Meta == nil { continue } - if sig.Source != nil { - sig.Events_sequence = append(sig.Events_sequence, types.EventSequence{ - Source: *sig.Source, - Source_ip: sig.Source_ip, - Source_AutonomousSystemNumber: sig.Source.AutonomousSystemNumber, - Source_AutonomousSystemOrganization: sig.Source.AutonomousSystemOrganization, - Source_Country: sig.Source.Country, - Serialized: string(strret), - Time: l.First_ts}) + meta := models.Meta{} + for k, v := range evt.Meta { + subMeta := models.MetaItems0{Key: k, Value: v} + meta = append(meta, &subMeta) + } + + /*check which date to use*/ + ovflwEvent := models.Event{ + Meta: meta, + } + //either MarshaledTime is present and is extracted from log + if evt.MarshaledTime != "" { + ovflwEvent.Timestamp = &evt.MarshaledTime + } else if !evt.Time.IsZero() { //or .Time has been set during parse as time.Now() + ovflwEvent.Timestamp = new(string) + raw, err := evt.Time.MarshalText() + if err != nil { + log.Warningf("while marshaling time '%s' : %s", evt.Time.String(), err) + } else { + *ovflwEvent.Timestamp = string(raw) + } } else { - l.logger.Warningf("Event without source ?!") + log.Warningf("Event has no parsed time, no runtime timestamp") + } + + events = append(events, &ovflwEvent) + } + return events +} + +//alertFormatSource iterates over the queue to collect sources +func alertFormatSource(leaky *Leaky, queue *Queue) (map[string]models.Source, string, error) { + var sources map[string]models.Source = make(map[string]models.Source) + var source_type string + + log.Debugf("Formatting (%s) - scope Info : scope_type:%s / scope_filter:%s", leaky.Name, leaky.scopeType.Scope, leaky.scopeType.Filter) + + for _, evt := range queue.Queue { + srcs, err := SourceFromEvent(evt, leaky) + if err != nil { + return nil, "", errors.Wrapf(err, "while extracting scope from bucket %s", leaky.Name) + } + for key, src := range srcs { + if source_type == types.Undefined { + source_type = *src.Scope + } + if *src.Scope != source_type { + return nil, "", + fmt.Errorf("event has multiple source types : %s != %s", *src.Scope, source_type) + } + sources[key] = src } } + return sources, source_type, nil +} - if len(sig.Sources) > 1 { - am = fmt.Sprintf("%d IPs", len(sig.Sources)) - } else if len(sig.Sources) == 1 { - if sig.Source != nil { - am = sig.Source.Ip.String() - } else { - am = "??" +//NewAlert will generate a RuntimeAlert and its APIAlert(s) from a bucket that overflowed +func NewAlert(leaky *Leaky, queue *Queue) (types.RuntimeAlert, error) { + + var runtimeAlert types.RuntimeAlert + + leaky.logger.Tracef("Overflow (start: %s, end: %s)", leaky.First_ts, leaky.Ovflw_ts) + /* + Craft the models.Alert that is going to be duplicated for each source + */ + start_at, err := leaky.First_ts.MarshalText() + if err != nil { + log.Warningf("failed to marshal start ts %s : %s", leaky.First_ts.String(), err) + } + stop_at, err := leaky.Ovflw_ts.MarshalText() + if err != nil { + log.Warningf("failed to marshal ovflw ts %s : %s", leaky.First_ts.String(), err) + } + capacity := int32(leaky.Capacity) + EventsCount := int32(leaky.Total_count) + leakSpeed := leaky.Leakspeed.String() + startAt := string(start_at) + stopAt := string(stop_at) + apiAlert := models.Alert{ + Scenario: &leaky.Name, + ScenarioHash: &leaky.hash, + ScenarioVersion: &leaky.scenarioVersion, + Capacity: &capacity, + EventsCount: &EventsCount, + Leakspeed: &leakSpeed, + Message: new(string), + StartAt: &startAt, + StopAt: &stopAt, + Simulated: &leaky.Simulated, + } + if leaky.BucketConfig == nil { + return runtimeAlert, fmt.Errorf("leaky.BucketConfig is nil") + } + + //give information about the bucket + runtimeAlert.Mapkey = leaky.Mapkey + + //Get the sources from Leaky/Queue + sources, source_scope, err := alertFormatSource(leaky, queue) + if err != nil { + return runtimeAlert, errors.Wrap(err, "unable to collect sources from bucket") + } + runtimeAlert.Sources = sources + //Include source info in format string + sourceStr := "" + if len(sources) > 1 { + sourceStr = fmt.Sprintf("%d Sources on scope.", len(sources)) + } else if len(sources) == 1 { + for k, _ := range sources { + sourceStr = k + break } } else { - am = "UNKNOWN" + sourceStr = "UNKNOWN" + } + *apiAlert.Message = fmt.Sprintf("%s %s performed '%s' (%d events over %s) at %s", source_scope, sourceStr, leaky.Name, leaky.Total_count, leaky.Ovflw_ts.Sub(leaky.First_ts), leaky.Last_ts) + //Get the events from Leaky/Queue + apiAlert.Events = EventsFromQueue(queue) + + //Loop over the Sources and generate appropriate number of ApiAlerts + for _, srcValue := range sources { + newApiAlert := apiAlert + srcCopy := srcValue + newApiAlert.Source = &srcCopy + if v, ok := leaky.BucketConfig.Labels["remediation"]; ok && v == "true" { + newApiAlert.Remediation = true + } + + if err := newApiAlert.Validate(strfmt.Default); err != nil { + log.Errorf("Generated alerts isn't valid") + log.Errorf("->%s", spew.Sdump(newApiAlert)) + log.Fatalf("error : %s", err) + } + runtimeAlert.APIAlerts = append(runtimeAlert.APIAlerts, newApiAlert) } - am += fmt.Sprintf(" performed '%s' (%d events over %s) at %s", l.Name, l.Total_count, l.Ovflw_ts.Sub(l.First_ts), l.Ovflw_ts) - sig.Alert_message = am - return sig + runtimeAlert.Alert = &runtimeAlert.APIAlerts[0] + if leaky.Reprocess { + runtimeAlert.Reprocess = true + } + return runtimeAlert, nil } diff --git a/pkg/leakybucket/processor.go b/pkg/leakybucket/processor.go index 16f7ac800..50693e7bc 100644 --- a/pkg/leakybucket/processor.go +++ b/pkg/leakybucket/processor.go @@ -5,25 +5,25 @@ import "github.com/crowdsecurity/crowdsec/pkg/types" type Processor interface { OnBucketInit(Bucket *BucketFactory) error OnBucketPour(Bucket *BucketFactory) func(types.Event, *Leaky) *types.Event - OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.SignalOccurence, *Queue) (types.SignalOccurence, *Queue) + OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue) } type DumbProcessor struct { } -func (d *DumbProcessor) OnBucketInit(b *BucketFactory) error { +func (d *DumbProcessor) OnBucketInit(bucketFactory *BucketFactory) error { return nil } -func (d *DumbProcessor) OnBucketPour(b *BucketFactory) func(types.Event, *Leaky) *types.Event { - return func(msg types.Event, l *Leaky) *types.Event { +func (d *DumbProcessor) OnBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event { + return func(msg types.Event, leaky *Leaky) *types.Event { return &msg } } -func (d *DumbProcessor) OnBucketOverflow(b *BucketFactory) func(*Leaky, types.SignalOccurence, *Queue) (types.SignalOccurence, *Queue) { - return func(l *Leaky, s types.SignalOccurence, q *Queue) (types.SignalOccurence, *Queue) { - return s, q +func (d *DumbProcessor) OnBucketOverflow(b *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue) { + return func(leaky *Leaky, alert types.RuntimeAlert, queue *Queue) (types.RuntimeAlert, *Queue) { + return alert, queue } } diff --git a/pkg/leakybucket/tests/leaky-fixedqueue/test.json b/pkg/leakybucket/tests/leaky-fixedqueue/test.json new file mode 100644 index 000000000..55f76a989 --- /dev/null +++ b/pkg/leakybucket/tests/leaky-fixedqueue/test.json @@ -0,0 +1,98 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00.000Z", + "Meta": { + "source_ip": "2a00:1450:4007:816::200e" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05.000Z", + "Meta": { + "source_ip": "2a00:1450:4007:816::200e" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE3 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05.000Z", + "Meta": { + "source_ip": "2a00:1450:4007:816::200e", + "ratata" : "foobar2" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE4 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05.000Z", + "Meta": { + "source_ip": "2a00:1450:4007:816::200e", + "ratata" : "foobar0" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE5 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05.000Z", + "Meta": { + "source_ip": "2a00:1450:4007:816::200e", + "ratata" : "foobar1" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE6 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05.000Z", + "Meta": { + "source_ip": "2a00:1450:4007:816::200e", + "ratata" : "foobar" + } + } + ], + "results": [ + { + "Type" : 1, + "Alert": { + "sources" : { + "2a00:1450:4007:816::200e": { + "ip": "2a00:1450:4007:816::200e", + "scope": "Ip", + "value": "2a00:1450:4007:816::200e" + } + }, + "Alert" : { + "scenario": "test/simple-leaky", + "events_count": 6 + } + } + } + ] +} \ No newline at end of file diff --git a/pkg/leakybucket/tests/leaky-fixedqueue/test.yaml b/pkg/leakybucket/tests/leaky-fixedqueue/test.yaml deleted file mode 100644 index 93097dac3..000000000 --- a/pkg/leakybucket/tests/leaky-fixedqueue/test.yaml +++ /dev/null @@ -1,51 +0,0 @@ -#this one will trigger a simple overflow -lines: - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE2 trailing stuff - MarshaledTime: 2020-01-01T10:00:05Z - Meta: - source_ip: 1.2.3.4 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE3 trailing stuff - MarshaledTime: 2020-01-01T10:00:05Z - Meta: - source_ip: 1.2.3.4 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE4 trailing stuff - MarshaledTime: 2020-01-01T10:00:05Z - Meta: - source_ip: 1.2.3.4 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE5 trailing stuff - MarshaledTime: 2020-01-01T10:00:05Z - Meta: - source_ip: 1.2.3.4 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE6 trailing stuff - MarshaledTime: 2020-01-01T10:00:05Z - Meta: - source_ip: 1.2.3.4 -results: - - Overflow: - scenario: test/simple-leaky - Source_ip: 1.2.3.4 - Events_count: 6 - - diff --git a/pkg/leakybucket/tests/overflow-with-meta-and-information/bucket.yaml b/pkg/leakybucket/tests/overflow-with-meta-and-information/bucket.yaml new file mode 100644 index 000000000..566a47dca --- /dev/null +++ b/pkg/leakybucket/tests/overflow-with-meta-and-information/bucket.yaml @@ -0,0 +1,9 @@ +# ssh bruteforce +version: 1.0 +type: trigger +debug: true +name: test/simple-trigger +description: "Simple trigger" +filter: "evt.Line.Labels.type =='testlog'" +labels: + type: overflow_1 diff --git a/pkg/leakybucket/tests/overflow-with-meta-and-information/scenarios.yaml b/pkg/leakybucket/tests/overflow-with-meta-and-information/scenarios.yaml new file mode 100644 index 000000000..f45f7be12 --- /dev/null +++ b/pkg/leakybucket/tests/overflow-with-meta-and-information/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/overflow-with-meta-and-information/test.json b/pkg/leakybucket/tests/overflow-with-meta-and-information/test.json new file mode 100644 index 000000000..3f8c8a673 --- /dev/null +++ b/pkg/leakybucket/tests/overflow-with-meta-and-information/test.json @@ -0,0 +1,81 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00.000Z", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + }, + "Enriched": { + "ASNumber": "1234", + "IsoCode": "FR", + "ASNOrg": "random AS" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00.000Z", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + }, + "Enriched": { + "ASNumber": "1234", + "IsoCode": "FR", + "ASNOrg": "random AS" + } + } + ], + "results" : [ + { + "Alert": { + "Sources": { + "1.2.3.4": { + "as_name": "random AS", + "as_number": "1234", + "cn": "FR", + "ip": "1.2.3.4", + "scope": "Ip", + "value": "1.2.3.4" + } + }, + "Alert" : { + "events_count": 1, + "scenario": "test/simple-trigger", + "scenario_version": "1.0" + } + } + }, + { + "Alert": { + "Sources": { + "1.2.3.4": { + "as_name": "random AS", + "as_number": "1234", + "cn": "FR", + "ip": "1.2.3.4", + "scope": "Ip", + "value": "1.2.3.4" + } + }, + "Alert" : { + "events_count": 1, + "scenario": "test/simple-trigger", + "scenario_version": "1.0" + } + } + + } + ] + } diff --git a/pkg/leakybucket/tests/overflow-with-meta/test.json b/pkg/leakybucket/tests/overflow-with-meta/test.json new file mode 100644 index 000000000..e6a24ba1e --- /dev/null +++ b/pkg/leakybucket/tests/overflow-with-meta/test.json @@ -0,0 +1,79 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00.000Z", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + }, + "Enriched": { + "ASNumber": "1234", + "IsoCode": "FR", + "ASNOrg": "random AS" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00.000Z", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + }, + "Enriched": { + "ASNumber": "1234", + "IsoCode": "FR", + "ASNOrg": "random AS" + } + } + ], + "results" : [ + { + "Alert": { + "Sources": { + "1.2.3.4": { + "as_name": "random AS", + "as_number": "1234", + "cn": "FR", + "ip": "1.2.3.4", + "scope": "Ip", + "value": "1.2.3.4" + } + }, + "Alert" : { + "events_count": 1, + "scenario": "test/simple-trigger" + } + } + }, + { + "Alert": { + "Sources": { + "1.2.3.4": { + "as_name": "random AS", + "as_number": "1234", + "cn": "FR", + "ip": "1.2.3.4", + "scope": "Ip", + "value": "1.2.3.4" + } + }, + "Alert" : { + "events_count": 1, + "scenario": "test/simple-trigger" + } + } + + } + ] + } \ No newline at end of file diff --git a/pkg/leakybucket/tests/overflow-with-meta/test.yaml b/pkg/leakybucket/tests/overflow-with-meta/test.yaml deleted file mode 100644 index 1434489ce..000000000 --- a/pkg/leakybucket/tests/overflow-with-meta/test.yaml +++ /dev/null @@ -1,39 +0,0 @@ -#this one won't due to leakspeed / delay -lines: - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 - uniq_key: aaa - Enriched: - ASNumber: 1234 - IsoCode: FR - ASNOrg: random AS - SourceRange: 1.2.3.0/24 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 - uniq_key: aaa - Enriched: - ASNumber: 1234 - IsoCode: FR - ASNOrg: random AS - SourceRange: ratata -results: - - Overflow: - scenario: test/simple-trigger - Source_ip: 1.2.3.4 - Events_count: 1 - - Overflow: - scenario: test/simple-trigger - Source_ip: 1.2.3.4 - Events_count: 1 - - diff --git a/pkg/leakybucket/tests/simple-counter-bh/test.json b/pkg/leakybucket/tests/simple-counter-bh/test.json new file mode 100644 index 000000000..3529de917 --- /dev/null +++ b/pkg/leakybucket/tests/simple-counter-bh/test.json @@ -0,0 +1,35 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": [ + { + "Alert": { + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-counter-bh/test.yaml b/pkg/leakybucket/tests/simple-counter-bh/test.yaml deleted file mode 100644 index fe457aef1..000000000 --- a/pkg/leakybucket/tests/simple-counter-bh/test.yaml +++ /dev/null @@ -1,22 +0,0 @@ -#this one will trigger a simple overflow -lines: - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 -results: - - Overflow: - scenario: "" - - - diff --git a/pkg/leakybucket/tests/simple-counter-timeout/test.json b/pkg/leakybucket/tests/simple-counter-timeout/test.json new file mode 100644 index 000000000..b348ee77b --- /dev/null +++ b/pkg/leakybucket/tests/simple-counter-timeout/test.json @@ -0,0 +1,30 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": null +} + diff --git a/pkg/leakybucket/tests/simple-counter-timeout/test.yaml b/pkg/leakybucket/tests/simple-counter-timeout/test.yaml deleted file mode 100644 index e58f4c828..000000000 --- a/pkg/leakybucket/tests/simple-counter-timeout/test.yaml +++ /dev/null @@ -1,18 +0,0 @@ -#this one will trigger a simple overflow -lines: - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 -results: - diff --git a/pkg/leakybucket/tests/simple-counter/bucket.yaml b/pkg/leakybucket/tests/simple-counter/bucket.yaml index 50427aa98..3a04dda91 100644 --- a/pkg/leakybucket/tests/simple-counter/bucket.yaml +++ b/pkg/leakybucket/tests/simple-counter/bucket.yaml @@ -1,5 +1,5 @@ type: counter -name: test/simple-trigger +name: test/simple-counter description: "Simple leaky" filter: "evt.Line.Labels.type =='testlog'" duration: 1s diff --git a/pkg/leakybucket/tests/simple-counter/test.json b/pkg/leakybucket/tests/simple-counter/test.json new file mode 100644 index 000000000..e6e6f03f6 --- /dev/null +++ b/pkg/leakybucket/tests/simple-counter/test.json @@ -0,0 +1,46 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert": { + "scenario": "test/simple-counter", + "events_count": 2 + } + } + } + ] +} diff --git a/pkg/leakybucket/tests/simple-counter/test.yaml b/pkg/leakybucket/tests/simple-counter/test.yaml deleted file mode 100644 index d87dcdbe3..000000000 --- a/pkg/leakybucket/tests/simple-counter/test.yaml +++ /dev/null @@ -1,23 +0,0 @@ -#this one will trigger a simple overflow -lines: - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 -results: - - Overflow: - scenario: test/simple-trigger - Source_ip: 1.2.3.4 - Events_count: 2 - - diff --git a/pkg/leakybucket/tests/simple-leaky-blackhole/test.json b/pkg/leakybucket/tests/simple-leaky-blackhole/test.json new file mode 100644 index 000000000..af33b217a --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-blackhole/test.json @@ -0,0 +1,123 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "entry": "1" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:04+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "entry": "2" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE3 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:15+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "entry": "3" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE4 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:16+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "entry": "4" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE5 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:01:15+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "entry": "5" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE6 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:01:16+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "entry": "6" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-leaky", + "events_count": 2 + + } + } + }, + { + "Alert": { + } + }, + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-leaky", + "events_count": 2 + + } + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-leaky-blackhole/test.yaml b/pkg/leakybucket/tests/simple-leaky-blackhole/test.yaml deleted file mode 100644 index 8d897e997..000000000 --- a/pkg/leakybucket/tests/simple-leaky-blackhole/test.yaml +++ /dev/null @@ -1,66 +0,0 @@ -lines: -#the first two will trigger overflow - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 - entry: 1 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE2 trailing stuff - MarshaledTime: 2020-01-01T10:00:04Z - Meta: - source_ip: 1.2.3.4 - entry: 2 -#the next overflow will be blackholed - - Line: - Labels: - type: testlog - Raw: xxheader VALUE3 trailing stuff - MarshaledTime: 2020-01-01T10:00:15Z - Meta: - source_ip: 1.2.3.4 - entry: 3 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE4 trailing stuff - MarshaledTime: 2020-01-01T10:00:16Z - Meta: - source_ip: 1.2.3.4 - entry: 4 -#but this one won't - - Line: - Labels: - type: testlog - Raw: xxheader VALUE5 trailing stuff - MarshaledTime: 2020-01-01T10:01:15Z - Meta: - source_ip: 1.2.3.4 - entry: 5 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE6 trailing stuff - MarshaledTime: 2020-01-01T10:01:16Z - Meta: - source_ip: 1.2.3.4 - entry: 6 -results: -#the first overflow - - Overflow: - scenario: test/simple-leaky - Source_ip: 1.2.3.4 - Events_count: 2 -#the blackholed one - - Overflow: - scenario: -#the second one - - Overflow: - scenario: test/simple-leaky - Source_ip: 1.2.3.4 - Events_count: 2 diff --git a/pkg/leakybucket/tests/simple-leaky-overflow/test.json b/pkg/leakybucket/tests/simple-leaky-overflow/test.json new file mode 100644 index 000000000..2d3f813c8 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-overflow/test.json @@ -0,0 +1,46 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-leaky", + "events_count": 2 + } + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-leaky-overflow/test.yaml b/pkg/leakybucket/tests/simple-leaky-overflow/test.yaml deleted file mode 100644 index 7fc1bb83b..000000000 --- a/pkg/leakybucket/tests/simple-leaky-overflow/test.yaml +++ /dev/null @@ -1,23 +0,0 @@ -#this one will trigger a simple overflow -lines: - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE2 trailing stuff - MarshaledTime: 2020-01-01T10:00:05Z - Meta: - source_ip: 1.2.3.4 -results: - - Overflow: - scenario: test/simple-leaky - Source_ip: 1.2.3.4 - Events_count: 2 - - diff --git a/pkg/leakybucket/tests/simple-leaky-ovflwfilter/test.json b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/test.json new file mode 100644 index 000000000..0ec5dfab9 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/test.json @@ -0,0 +1,54 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "specvalue": "1" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:01+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "specvalue": "2" + } + } + ], + "results": [ + { + "Alert": { + } + }, + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/filter-ok", + "events_count": 2 + } + + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-leaky-ovflwfilter/test.yaml b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/test.yaml deleted file mode 100644 index eee84b4dc..000000000 --- a/pkg/leakybucket/tests/simple-leaky-ovflwfilter/test.yaml +++ /dev/null @@ -1,28 +0,0 @@ -#this one won't due to leakspeed / delay -lines: - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 - specvalue: 1 - - Line: - Labels: - type: testlog - Raw: xxheader VALUE2 trailing stuff - MarshaledTime: 2020-01-01T10:00:01Z - Meta: - source_ip: 1.2.3.4 - specvalue: 2 -results: - - Overflow: - scenario: - - Overflow: - scenario: test/filter-ok - Events_count: 2 - Source_ip: 1.2.3.4 - - - diff --git a/pkg/leakybucket/tests/simple-leaky-underflow/test.json b/pkg/leakybucket/tests/simple-leaky-underflow/test.json new file mode 100644 index 000000000..20d383b59 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-underflow/test.json @@ -0,0 +1,22 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": [ + { + "Alert": { + } + } + ] +} diff --git a/pkg/leakybucket/tests/simple-leaky-underflow/test.yaml b/pkg/leakybucket/tests/simple-leaky-underflow/test.yaml deleted file mode 100644 index 32da72930..000000000 --- a/pkg/leakybucket/tests/simple-leaky-underflow/test.yaml +++ /dev/null @@ -1,14 +0,0 @@ -#this one won't due to leakspeed / delay -lines: - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 -results: - - Overflow: - scenario: "" - - diff --git a/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/test.json b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/test.json new file mode 100644 index 000000000..e01689883 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/test.json @@ -0,0 +1,63 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE3 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:06+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "baa" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE3 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:07+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "baa" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE4 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:08+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "bab" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-leaky", + "events_count": 4 + } + + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/test.yaml b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/test.yaml deleted file mode 100644 index 565f06c48..000000000 --- a/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/test.yaml +++ /dev/null @@ -1,35 +0,0 @@ -#this one won't due to leakspeed / delay -lines: - - Line: - Labels: - type: testlog - Raw: xxheader VALUE3 trailing stuff - MarshaledTime: 2020-01-01T10:00:06Z - Meta: - source_ip: 1.2.3.4 - uniq_key: baa -#this one will be discarded because of uniq - - Line: - Labels: - type: testlog - Raw: xxheader VALUE3 trailing stuff - MarshaledTime: 2020-01-01T10:00:07Z - Meta: - source_ip: 1.2.3.4 - uniq_key: baa -#not this one - - Line: - Labels: - type: testlog - Raw: xxheader VALUE4 trailing stuff - MarshaledTime: 2020-01-01T10:00:08Z - Meta: - source_ip: 1.2.3.4 - uniq_key: bab -results: - - Overflow: - scenario: test/simple-leaky - Source_ip: 1.2.3.4 - Events_count: 4 - - diff --git a/pkg/leakybucket/tests/simple-leaky-uniq/test.json b/pkg/leakybucket/tests/simple-leaky-uniq/test.json new file mode 100644 index 000000000..07a89bb8a --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq/test.json @@ -0,0 +1,63 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:01+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:02+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aab" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-leaky", + "events_count": 2 + } + + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq/test.yaml b/pkg/leakybucket/tests/simple-leaky-uniq/test.yaml deleted file mode 100644 index 9e1b1c5e2..000000000 --- a/pkg/leakybucket/tests/simple-leaky-uniq/test.yaml +++ /dev/null @@ -1,33 +0,0 @@ -#this one won't due to leakspeed / delay -lines: - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 - uniq_key: aaa - - Line: - Labels: - type: testlog - Raw: xxheader VALUE2 trailing stuff - MarshaledTime: 2020-01-01T10:00:01Z - Meta: - source_ip: 1.2.3.4 - uniq_key: aaa - - Line: - Labels: - type: testlog - Raw: xxheader VALUE2 trailing stuff - MarshaledTime: 2020-01-01T10:00:02Z - Meta: - source_ip: 1.2.3.4 - uniq_key: aab -results: - - Overflow: - scenario: test/simple-leaky - Source_ip: 1.2.3.4 - Events_count: 2 - - diff --git a/pkg/leakybucket/tests/simple-trigger-external-data/bucket.yaml b/pkg/leakybucket/tests/simple-trigger-external-data/bucket.yaml index af5771906..aca521996 100644 --- a/pkg/leakybucket/tests/simple-trigger-external-data/bucket.yaml +++ b/pkg/leakybucket/tests/simple-trigger-external-data/bucket.yaml @@ -3,10 +3,10 @@ debug: true name: test/simple-trigger data: - source_url: https://invalid.com/test.list - dest_file: ./simple_patterns.txt + dest_file: simple-trigger-external-data/simple_patterns.txt type: string description: "Simple trigger with external data" -filter: "evt.Line.Labels.type =='testlog' && evt.Parsed.tainted_data in File('./simple_patterns.txt')" +filter: "evt.Line.Labels.type =='testlog' && evt.Parsed.tainted_data in File('simple-trigger-external-data/simple_patterns.txt')" groupby: evt.Meta.source_ip labels: type: overflow_1 diff --git a/pkg/leakybucket/tests/simple-trigger-external-data/test.json b/pkg/leakybucket/tests/simple-trigger-external-data/test.json new file mode 100644 index 000000000..6261fe685 --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-external-data/test.json @@ -0,0 +1,55 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + }, + "Parsed": { + "tainted_data": "AAAABBBBBBB11111XXX" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.5" + }, + "Parsed": { + "tainted_data": "ZZZBBBBBBB11111XXX" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-trigger", + "events_count": 1 + } + + } + } + ] +} + + diff --git a/pkg/leakybucket/tests/simple-trigger-external-data/test.yaml b/pkg/leakybucket/tests/simple-trigger-external-data/test.yaml deleted file mode 100644 index 1a8025a84..000000000 --- a/pkg/leakybucket/tests/simple-trigger-external-data/test.yaml +++ /dev/null @@ -1,27 +0,0 @@ -#this one will trigger a simple overflow -lines: - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 - Parsed: - tainted_data: AAAABBBBBBB11111XXX - - Line: - Labels: - type: testlog - Raw: xxheader VALUE2 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.5 - Parsed: - tainted_data: ZZZBBBBBBB11111XXX -results: - - Overflow: - scenario: test/simple-trigger - Source_ip: 1.2.3.4 - Events_count: 1 - - diff --git a/pkg/leakybucket/tests/simple-trigger-reprocess/bucket.yaml b/pkg/leakybucket/tests/simple-trigger-reprocess/bucket.yaml index 47acd710d..b4759dac9 100644 --- a/pkg/leakybucket/tests/simple-trigger-reprocess/bucket.yaml +++ b/pkg/leakybucket/tests/simple-trigger-reprocess/bucket.yaml @@ -1,6 +1,6 @@ type: trigger debug: true -name: test/simple-trigger +name: test/simple-trigger-reprocess description: "Simple leaky" filter: "evt.Line.Labels.type =='testlog'" groupby: evt.Meta.source_ip diff --git a/pkg/leakybucket/tests/simple-trigger-reprocess/reprocess.yaml b/pkg/leakybucket/tests/simple-trigger-reprocess/reprocess.yaml index aee5996f7..2ebef8f6d 100644 --- a/pkg/leakybucket/tests/simple-trigger-reprocess/reprocess.yaml +++ b/pkg/leakybucket/tests/simple-trigger-reprocess/reprocess.yaml @@ -2,7 +2,8 @@ type: trigger debug: true name: test/simple-postoverflow-scenario description: "Simple post overflow" -filter: "len(evt.Overflow.Scenario) > 0" +#filter: true +filter: "evt.Overflow.Alert != nil && evt.Overflow.Alert.Scenario != nil" labels: type: overflow_2 diff --git a/pkg/leakybucket/tests/simple-trigger-reprocess/test.json b/pkg/leakybucket/tests/simple-trigger-reprocess/test.json new file mode 100644 index 000000000..696821a0a --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-reprocess/test.json @@ -0,0 +1,52 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-trigger-reprocess", + "events_count": 1 + } + + } + }, + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-postoverflow-scenario", + "events_count": 1 + } + + } + } + ] +} diff --git a/pkg/leakybucket/tests/simple-trigger-reprocess/test.yaml b/pkg/leakybucket/tests/simple-trigger-reprocess/test.yaml deleted file mode 100644 index 6fe037a14..000000000 --- a/pkg/leakybucket/tests/simple-trigger-reprocess/test.yaml +++ /dev/null @@ -1,19 +0,0 @@ -#this one will trigger a simple overflow -lines: - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 -results: - - Overflow: - scenario: test/simple-trigger - Source_ip: 1.2.3.4 - Events_count: 1 - - Overflow: - scenario: test/simple-postoverflow-scenario - Source_ip: 1.2.3.4 - Events_count: 1 - diff --git a/pkg/leakybucket/tests/simple-trigger/test.json b/pkg/leakybucket/tests/simple-trigger/test.json new file mode 100644 index 000000000..3b100fd52 --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger/test.json @@ -0,0 +1,35 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-trigger", + "events_count": 1 + } + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-trigger/test.yaml b/pkg/leakybucket/tests/simple-trigger/test.yaml deleted file mode 100644 index e8ea9821d..000000000 --- a/pkg/leakybucket/tests/simple-trigger/test.yaml +++ /dev/null @@ -1,16 +0,0 @@ -#this one will trigger a simple overflow -lines: - - Line: - Labels: - type: testlog - Raw: xxheader VALUE1 trailing stuff - MarshaledTime: 2020-01-01T10:00:00Z - Meta: - source_ip: 1.2.3.4 -results: - - Overflow: - scenario: test/simple-trigger - Source_ip: 1.2.3.4 - Events_count: 1 - - diff --git a/pkg/leakybucket/uniq.go b/pkg/leakybucket/uniq.go index f1486acab..288a0a22a 100644 --- a/pkg/leakybucket/uniq.go +++ b/pkg/leakybucket/uniq.go @@ -18,38 +18,38 @@ type Uniq struct { DistinctCompiled *vm.Program } -func (u *Uniq) OnBucketPour(Bucket *BucketFactory) func(types.Event, *Leaky) *types.Event { - return func(msg types.Event, Leaky *Leaky) *types.Event { +func (u *Uniq) OnBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event { + return func(msg types.Event, leaky *Leaky) *types.Event { element, err := getElement(msg, u.DistinctCompiled) if err != nil { - Leaky.logger.Errorf("Uniq filter exec failed : %v", err) + leaky.logger.Errorf("Uniq filter exec failed : %v", err) return &msg } - Leaky.logger.Tracef("Uniq '%s' -> '%s'", Bucket.Distinct, element) - for _, evt := range Leaky.Queue.GetQueue() { + leaky.logger.Tracef("Uniq '%s' -> '%s'", bucketFactory.Distinct, element) + for _, evt := range leaky.Queue.GetQueue() { if val, err := getElement(evt, u.DistinctCompiled); err == nil && val == element { - Leaky.logger.Debugf("Uniq(%s) : ko, discard event", element) + leaky.logger.Debugf("Uniq(%s) : ko, discard event", element) return nil } if err != nil { - Leaky.logger.Errorf("Uniq filter exec failed : %v", err) + leaky.logger.Errorf("Uniq filter exec failed : %v", err) } } - Leaky.logger.Debugf("Uniq(%s) : ok", element) + leaky.logger.Debugf("Uniq(%s) : ok", element) return &msg } } -func (u *Uniq) OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.SignalOccurence, *Queue) (types.SignalOccurence, *Queue) { - return func(l *Leaky, sig types.SignalOccurence, queue *Queue) (types.SignalOccurence, *Queue) { - return sig, queue +func (u *Uniq) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue) { + return func(leaky *Leaky, alert types.RuntimeAlert, queue *Queue) (types.RuntimeAlert, *Queue) { + return alert, queue } } -func (u *Uniq) OnBucketInit(Bucket *BucketFactory) error { +func (u *Uniq) OnBucketInit(bucketFactory *BucketFactory) error { var err error - u.DistinctCompiled, err = expr.Compile(Bucket.Distinct, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + u.DistinctCompiled, err = expr.Compile(bucketFactory.Distinct, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) return err } diff --git a/pkg/metabase/api.go b/pkg/metabase/api.go new file mode 100644 index 000000000..4e33b9349 --- /dev/null +++ b/pkg/metabase/api.go @@ -0,0 +1,84 @@ +package metabase + +import ( + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/dghubble/sling" + log "github.com/sirupsen/logrus" +) + +type APIClient struct { + CTX *sling.Sling + Client *http.Client +} + +const ( + sessionEndpoint = "login" + scanEndpoint = "scan" + resetPasswordEndpoint = "reset_password" + userEndpoint = "user" + databaseEndpoint = "database" +) + +var ( + routes = map[string]string{ + sessionEndpoint: "api/session", + scanEndpoint: "api/database/2/rescan_values", + resetPasswordEndpoint: "api/user/1/password", + userEndpoint: "api/user/1", + databaseEndpoint: "api/database/2", + } +) + +func NewAPIClient(url string) (*APIClient, error) { + httpClient := &http.Client{Timeout: 20 * time.Second} + return &APIClient{ + CTX: sling.New().Client(httpClient).Base(url).Set("User-Agent", fmt.Sprintf("crowdsec/%s", cwversion.VersionStr())), + Client: httpClient, + }, nil +} + +func (h *APIClient) Do(method string, route string, body interface{}) (interface{}, interface{}, error) { + var Success interface{} + var Error interface{} + var resp *http.Response + var err error + var data []byte + if body != nil { + data, _ = json.Marshal(body) + } + + switch method { + case "POST": + log.Debugf("POST /%s", route) + log.Debugf("%s", string(data)) + resp, err = h.CTX.New().Post(route).BodyJSON(body).Receive(&Success, &Error) + case "GET": + log.Debugf("GET /%s", route) + resp, err = h.CTX.New().Get(route).Receive(&Success, &Error) + case "PUT": + log.Debugf("PUT /%s", route) + log.Debugf("%s", string(data)) + resp, err = h.CTX.New().Put(route).BodyJSON(body).Receive(&Success, &Error) + case "DELETE": + default: + return nil, nil, fmt.Errorf("unsupported method '%s'", method) + } + if Error != nil { + return Success, Error, fmt.Errorf("http error: %v", Error) + } + + if resp != nil && resp.StatusCode != 200 && resp.StatusCode != 202 { + return Success, Error, fmt.Errorf("bad status code '%d': (success: %+v) | (error: %+v)", resp.StatusCode, Success, Error) + } + return Success, Error, err +} + +// Set set headers as key:value +func (h *APIClient) Set(key string, value string) { + h.CTX = h.CTX.Set(key, value) +} diff --git a/pkg/metabase/container.go b/pkg/metabase/container.go new file mode 100644 index 000000000..d5c7c3e9e --- /dev/null +++ b/pkg/metabase/container.go @@ -0,0 +1,174 @@ +package metabase + +import ( + "bufio" + "context" + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/client" + "github.com/docker/go-connections/nat" + log "github.com/sirupsen/logrus" +) + +type Container struct { + ListenAddr string + ListenPort string + SharedFolder string + Image string + Name string + ID string + CLI *client.Client + MBDBUri string +} + +func NewContainer(listenAddr string, listenPort string, sharedFolder string, name string, image string, mbDBURI string) (*Container, error) { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return nil, fmt.Errorf("failed to create docker client : %s", err) + } + return &Container{ + ListenAddr: listenAddr, + ListenPort: listenPort, + SharedFolder: sharedFolder, + Image: image, + Name: name, + CLI: cli, + MBDBUri: mbDBURI, + }, nil +} + +func (c *Container) Create() error { + ctx := context.Background() + log.Printf("Pulling docker image %s", c.Image) + reader, err := c.CLI.ImagePull(ctx, c.Image, types.ImagePullOptions{}) + if err != nil { + return fmt.Errorf("failed to pull docker image : %s", err) + } + defer reader.Close() + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + fmt.Print(".") + } + if err := scanner.Err(); err != nil { + return fmt.Errorf("failed to read imagepull reader: %s", err) + } + fmt.Print("\n") + + hostConfig := &container.HostConfig{ + PortBindings: nat.PortMap{ + "3000/tcp": []nat.PortBinding{ + { + HostIP: c.ListenAddr, + HostPort: c.ListenPort, + }, + }, + }, + Mounts: []mount.Mount{ + { + Type: mount.TypeBind, + Source: c.SharedFolder, + Target: containerSharedFolder, + }, + }, + } + + env := []string{ + fmt.Sprintf("MB_DB_FILE=%s/metabase.db", containerSharedFolder), + } + if c.MBDBUri != "" { + env = append(env, c.MBDBUri) + } + + dockerConfig := &container.Config{ + Image: c.Image, + Tty: true, + Env: env, + } + + log.Infof("creating container '%s'", c.Name) + resp, err := c.CLI.ContainerCreate(ctx, dockerConfig, hostConfig, nil, c.Name) + if err != nil { + return fmt.Errorf("failed to create container : %s", err) + } + c.ID = resp.ID + + return nil +} + +func (c *Container) Start() error { + ctx := context.Background() + if err := c.CLI.ContainerStart(ctx, c.Name, types.ContainerStartOptions{}); err != nil { + return fmt.Errorf("failed while starting %s : %s", c.ID, err) + } + + return nil +} + +func StartContainer(name string) error { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("failed to create docker client : %s", err) + } + ctx := context.Background() + if err := cli.ContainerStart(ctx, name, types.ContainerStartOptions{}); err != nil { + return fmt.Errorf("failed while starting %s : %s", name, err) + } + + return nil +} + +func StopContainer(name string) error { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("failed to create docker client : %s", err) + } + ctx := context.Background() + var to time.Duration = 20 * time.Second + if err := cli.ContainerStop(ctx, name, &to); err != nil { + return fmt.Errorf("failed while stopping %s : %s", name, err) + } + log.Printf("container stopped successfully") + return nil +} + +func RemoveContainer(name string) error { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("failed to create docker client : %s", err) + } + ctx := context.Background() + log.Printf("Removing docker metabase %s", name) + if err := cli.ContainerRemove(ctx, name, types.ContainerRemoveOptions{}); err != nil { + return fmt.Errorf("failed remove container %s : %s", name, err) + } + return nil +} + +func RemoveImageContainer(image string) error { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("failed to create docker client : %s", err) + } + ctx := context.Background() + log.Printf("Removing docker metabase %s", image) + if err := cli.ContainerRemove(ctx, image, types.ContainerRemoveOptions{}); err != nil { + return fmt.Errorf("failed remove container %s : %s", image, err) + } + return nil +} + +func IsContainerExist(name string) bool { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + log.Fatalf("failed to create docker client : %s", err) + } + ctx := context.Background() + if _, err := cli.ContainerInspect(ctx, name); err != nil { + return false + } + return true +} \ No newline at end of file diff --git a/pkg/metabase/database.go b/pkg/metabase/database.go new file mode 100644 index 000000000..5ba577875 --- /dev/null +++ b/pkg/metabase/database.go @@ -0,0 +1,101 @@ +package metabase + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/pkg/errors" +) + +type Database struct { + DBUrl string + Model *Model + Config *csconfig.DatabaseCfg + Client *APIClient + Details *Details + // in case mysql host is 127.0.0.1 the ip address of mysql/pgsql host will be the docker gateway since metabase run in a container +} + +type Details struct { + Db string `json:"db"` + Host string `json:"host"` + Port int `json:"port"` + Dbname string `json:"dbname"` + User string `json:"user"` + Password string `json:"password"` + Ssl bool `json:"ssl"` + AdditionalOptions interface{} `json:"additional-options"` + TunnelEnabled bool `json:"tunnel-enabled"` +} + +type Model struct { + Engine string `json:"engine"` + Name string `json:"name"` + Details *Details `json:"details"` + AutoRunQueries bool `json:"auto_run_queries"` + IsFullSync bool `json:"is_full_sync"` + IsOnDemand bool `json:"is_on_demand"` + Schedules map[string]interface{} `json:"schedules"` +} + +func NewDatabase(config *csconfig.DatabaseCfg, client *APIClient, remoteDBAddr string) (*Database, error) { + var details *Details + + database := Database{} + + switch config.Type { + case "mysql": + return nil, fmt.Errorf("database '%s' is not supported yet", config.Type) + case "sqlite": + database.DBUrl = metabaseSQLiteDBURL + localFolder := filepath.Dir(config.DbPath) + // replace /var/lib/crowdsec/data/ with /metabase-data/ + dbPath := strings.Replace(config.DbPath, localFolder, containerSharedFolder, 1) + details = &Details{ + Db: dbPath, + } + case "postgresql", "postgres", "pgsql": + return nil, fmt.Errorf("database '%s' is not supported yet", config.Type) + default: + return nil, fmt.Errorf("database '%s' not supported", config.Type) + } + database.Details = details + database.Client = client + database.Config = config + + return &database, nil +} + +func (d *Database) Update() error { + success, errormsg, err := d.Client.Do("GET", routes[databaseEndpoint], nil) + if err != nil { + return err + } + if errormsg != nil { + return fmt.Errorf("update sqlite db http error: %+v", errormsg) + } + + data, err := json.Marshal(success) + if err != nil { + return errors.Wrap(err, "update sqlite db response (marshal)") + } + + model := Model{} + + if err := json.Unmarshal(data, &model); err != nil { + return errors.Wrap(err, "update sqlite db response (unmarshal)") + } + model.Details = d.Details + success, errormsg, err = d.Client.Do("PUT", routes[databaseEndpoint], model) + if err != nil { + return err + } + if errormsg != nil { + return fmt.Errorf("update sqlite db http error: %+v", errormsg) + } + + return nil +} diff --git a/pkg/metabase/metabase.go b/pkg/metabase/metabase.go new file mode 100644 index 000000000..ff18a0c66 --- /dev/null +++ b/pkg/metabase/metabase.go @@ -0,0 +1,368 @@ +package metabase + +import ( + "archive/zip" + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "strings" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/pkg/errors" + "gopkg.in/yaml.v2" +) + +type Metabase struct { + Config *Config + Client *APIClient + Container *Container + Database *Database + InternalDBURL string +} + +type Config struct { + Database *csconfig.DatabaseCfg `yaml:"database"` + ListenAddr string `yaml:"listen_addr"` + ListenPort string `yaml:"listen_port"` + ListenURL string `yaml:"listen_url"` + Username string `yaml:"username"` + Password string `yaml:"password"` + DBPath string `yaml:"metabase_db_path"` +} + +var ( + metabaseDefaultUser = "crowdsec@crowdsec.net" + metabaseDefaultPassword = "!!Cr0wdS3c_M3t4b4s3??" + containerName = "/crowdsec-metabase" + metabaseImage = "metabase/metabase:v0.37.0.2" + containerSharedFolder = "/metabase-data" + + metabaseSQLiteDBURL = "https://crowdsec-statics-assets.s3-eu-west-1.amazonaws.com/metabase_sqlite.zip" +) + +func (m *Metabase) Init() error { + var err error + var DBConnectionURI string + var remoteDBAddr string + + switch m.Config.Database.Type { + case "mysql": + return fmt.Errorf("'mysql' is not supported yet for cscli dashboard") + //DBConnectionURI = fmt.Sprintf("MB_DB_CONNECTION_URI=mysql://%s:%d/%s?user=%s&password=%s&allowPublicKeyRetrieval=true", remoteDBAddr, m.Config.Database.Port, m.Config.Database.DbName, m.Config.Database.User, m.Config.Database.Password) + case "sqlite": + m.InternalDBURL = metabaseSQLiteDBURL + case "postgresql", "postgres", "pgsql": + return fmt.Errorf("'postgresql' is not supported yet by cscli dashboard") + default: + return fmt.Errorf("database '%s' not supported", m.Config.Database.Type) + } + + m.Client, err = NewAPIClient(m.Config.ListenURL) + if err != nil { + return err + } + m.Database, err = NewDatabase(m.Config.Database, m.Client, remoteDBAddr) + + m.Container, err = NewContainer(m.Config.ListenAddr, m.Config.ListenPort, m.Config.DBPath, containerName, metabaseImage, DBConnectionURI) + if err != nil { + return errors.Wrap(err, "container init") + } + + return nil +} + +func NewMetabase(configPath string) (*Metabase, error) { + m := &Metabase{} + if err := m.LoadConfig(configPath); err != nil { + return m, err + } + if err := m.Init(); err != nil { + return m, err + } + return m, nil +} + +func (m *Metabase) LoadConfig(configPath string) error { + yamlFile, err := ioutil.ReadFile(configPath) + if err != nil { + return err + } + + config := &Config{} + + err = yaml.Unmarshal(yamlFile, config) + if err != nil { + return err + } + if config.Username == "" { + return fmt.Errorf("'username' not found in configuration file '%s'", configPath) + } + + if config.Password == "" { + return fmt.Errorf("'password' not found in configuration file '%s'", configPath) + } + + if config.ListenURL == "" { + return fmt.Errorf("'listen_url' not found in configuration file '%s'", configPath) + } + + m.Config = config + + if err := m.Init(); err != nil { + return err + } + + return nil + +} + +func SetupMetabase(dbConfig *csconfig.DatabaseCfg, listenAddr string, listenPort string, username string, password string, mbDBPath string) (*Metabase, error) { + metabase := &Metabase{ + Config: &Config{ + Database: dbConfig, + ListenAddr: listenAddr, + ListenPort: listenPort, + Username: username, + Password: password, + ListenURL: fmt.Sprintf("http://%s:%s", listenAddr, listenPort), + DBPath: mbDBPath, + }, + } + if err := metabase.Init(); err != nil { + return nil, errors.Wrap(err, "metabase setup init") + } + + if err := metabase.DownloadDatabase(false); err != nil { + return nil, errors.Wrap(err, "metabase db download") + } + + if err := metabase.Container.Create(); err != nil { + return nil, errors.Wrap(err, "container create") + } + + if err := metabase.Container.Start(); err != nil { + return nil, errors.Wrap(err, "container start") + } + + log.Infof("waiting for metabase to be up (can take up to a minute)") + if err := metabase.WaitAlive(); err != nil { + return nil, errors.Wrap(err, "wait alive") + } + + if err := metabase.Database.Update(); err != nil { + return nil, errors.Wrap(err, "update database") + } + + if err := metabase.Scan(); err != nil { + return nil, errors.Wrap(err, "db scan") + } + + if err := metabase.ResetCredentials(); err != nil { + return nil, errors.Wrap(err, "reset creds") + } + + return metabase, nil +} + +func (m *Metabase) WaitAlive() error { + var err error + for { + err = m.Login(metabaseDefaultUser, metabaseDefaultPassword) + if err != nil { + if strings.Contains(err.Error(), "password:did not match stored password") { + log.Errorf("Password mismatch error, is your dashboard already setup ? Run 'cscli dashboard remove' to reset it.") + return errors.Wrapf(err, "Password mismatch error") + } + log.Debugf("%+v", err) + } else { + break + } + + fmt.Printf(".") + time.Sleep(2 * time.Second) + } + fmt.Printf("\n") + return nil +} + +func (m *Metabase) Login(username string, password string) error { + body := map[string]string{"username": username, "password": password} + successmsg, errormsg, err := m.Client.Do("POST", routes[sessionEndpoint], body) + if err != nil { + return err + } + + if errormsg != nil { + return errors.Wrap(err, "http login") + } + resp, ok := successmsg.(map[string]interface{}) + if !ok { + return fmt.Errorf("login: bad response type: %+v", successmsg) + } + if _, ok := resp["id"]; !ok { + return fmt.Errorf("login: can't update session id, no id in response: %v", successmsg) + } + id, ok := resp["id"].(string) + if !ok { + return fmt.Errorf("login: bad id type: %+v", resp["id"]) + } + m.Client.Set("Cookie", fmt.Sprintf("metabase.SESSION=%s", id)) + return nil +} + +func (m *Metabase) Scan() error { + _, errormsg, err := m.Client.Do("POST", routes[scanEndpoint], nil) + if err != nil { + return err + } + if errormsg != nil { + return errors.Wrap(err, "http scan") + } + + return nil +} + +func (m *Metabase) ResetPassword(current string, new string) error { + body := map[string]string{ + "id": "1", + "password": new, + "old_password": current, + } + _, errormsg, err := m.Client.Do("PUT", routes[resetPasswordEndpoint], body) + if err != nil { + return errors.Wrap(err, "reset username") + } + if errormsg != nil { + return errors.Wrap(err, "http reset password") + } + return nil +} + +func (m *Metabase) ResetUsername(username string) error { + body := struct { + FirstName string `json:"first_name"` + LastName string `json:"last_name"` + Email string `json:"email"` + GroupIDs []int `json:"group_ids"` + }{ + FirstName: "Crowdsec", + LastName: "Crowdsec", + Email: username, + GroupIDs: []int{1, 2}, + } + + _, errormsg, err := m.Client.Do("PUT", routes[userEndpoint], body) + if err != nil { + return errors.Wrap(err, "reset username") + } + + if errormsg != nil { + return errors.Wrap(err, "http reset username") + } + + return nil +} + +func (m *Metabase) ResetCredentials() error { + if err := m.ResetPassword(metabaseDefaultPassword, m.Config.Password); err != nil { + return err + } + + /*if err := m.ResetUsername(m.Config.Username); err != nil { + return err + }*/ + + return nil +} + +func (m *Metabase) DumpConfig(path string) error { + data, err := yaml.Marshal(m.Config) + if err != nil { + return err + } + return ioutil.WriteFile(path, data, 0600) +} + +func (m *Metabase) DownloadDatabase(force bool) error { + + metabaseDBSubpath := path.Join(m.Config.DBPath, "metabase.db") + _, err := os.Stat(metabaseDBSubpath) + if err == nil && !force { + log.Printf("%s exists, skip.", metabaseDBSubpath) + return nil + } + + if err := os.MkdirAll(metabaseDBSubpath, 0755); err != nil { + return fmt.Errorf("failed to create %s : %s", metabaseDBSubpath, err) + } + + req, err := http.NewRequest("GET", m.InternalDBURL, nil) + if err != nil { + return fmt.Errorf("failed to build request to fetch metabase db : %s", err) + } + //This needs to be removed once we move the zip out of github + //req.Header.Add("Accept", `application/vnd.github.v3.raw`) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("failed request to fetch metabase db : %s", err) + } + if resp.StatusCode != 200 { + return fmt.Errorf("got http %d while requesting metabase db %s, stop", resp.StatusCode, m.InternalDBURL) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("failed request read while fetching metabase db : %s", err) + } + log.Debugf("Got %d bytes archive", len(body)) + + if err := m.ExtractDatabase(bytes.NewReader(body)); err != nil { + return fmt.Errorf("while extracting zip : %s", err) + } + return nil +} + +func (m *Metabase) ExtractDatabase(buf *bytes.Reader) error { + r, err := zip.NewReader(buf, int64(buf.Len())) + if err != nil { + return err + } + for _, f := range r.File { + if strings.Contains(f.Name, "..") { + return fmt.Errorf("invalid path '%s' in archive", f.Name) + } + tfname := fmt.Sprintf("%s/%s", m.Config.DBPath, f.Name) + log.Tracef("%s -> %d", f.Name, f.UncompressedSize64) + if f.UncompressedSize64 == 0 { + continue + } + tfd, err := os.OpenFile(tfname, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0644) + if err != nil { + return fmt.Errorf("failed opening target file '%s' : %s", tfname, err) + } + rc, err := f.Open() + if err != nil { + return fmt.Errorf("while opening zip content %s : %s", f.Name, err) + } + written, err := io.Copy(tfd, rc) + if err == io.EOF { + log.Printf("files finished ok") + } else if err != nil { + return fmt.Errorf("while copying content to %s : %s", tfname, err) + } + log.Debugf("written %d bytes to %s", written, tfname) + rc.Close() + } + return nil +} + +func RemoveDatabase(dataDir string) error { + return os.RemoveAll(path.Join(dataDir, "metabase.db")) +} diff --git a/pkg/models/add_alerts_request.go b/pkg/models/add_alerts_request.go new file mode 100644 index 000000000..ead8662f6 --- /dev/null +++ b/pkg/models/add_alerts_request.go @@ -0,0 +1,45 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// AddAlertsRequest AddAlertsRequest +// +// swagger:model AddAlertsRequest +type AddAlertsRequest []*Alert + +// Validate validates this add alerts request +func (m AddAlertsRequest) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/pkg/models/add_alerts_response.go b/pkg/models/add_alerts_response.go new file mode 100644 index 000000000..aeb70bb77 --- /dev/null +++ b/pkg/models/add_alerts_response.go @@ -0,0 +1,20 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" +) + +// AddAlertsResponse AddAlertsResponse +// +// swagger:model AddAlertsResponse +type AddAlertsResponse []string + +// Validate validates this add alerts response +func (m AddAlertsResponse) Validate(formats strfmt.Registry) error { + return nil +} diff --git a/pkg/models/add_signals_request.go b/pkg/models/add_signals_request.go new file mode 100644 index 000000000..87b47807d --- /dev/null +++ b/pkg/models/add_signals_request.go @@ -0,0 +1,47 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// AddSignalsRequest add signals request +// +// All signals request model +// +// swagger:model AddSignalsRequest +type AddSignalsRequest []*AddSignalsRequestItem + +// Validate validates this add signals request +func (m AddSignalsRequest) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/pkg/models/add_signals_request_item.go b/pkg/models/add_signals_request_item.go new file mode 100644 index 000000000..555566cfb --- /dev/null +++ b/pkg/models/add_signals_request_item.go @@ -0,0 +1,181 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// AddSignalsRequestItem Signal +// +// swagger:model AddSignalsRequestItem +type AddSignalsRequestItem struct { + + // created at + CreatedAt string `json:"created_at,omitempty"` + + // machine id + MachineID string `json:"machine_id,omitempty"` + + // a human readable message + // Required: true + Message *string `json:"message"` + + // scenario + // Required: true + Scenario *string `json:"scenario"` + + // scenario hash + // Required: true + ScenarioHash *string `json:"scenario_hash"` + + // scenario version + // Required: true + ScenarioVersion *string `json:"scenario_version"` + + // source + // Required: true + Source *Source `json:"source"` + + // start at + // Required: true + StartAt *string `json:"start_at"` + + // stop at + // Required: true + StopAt *string `json:"stop_at"` +} + +// Validate validates this add signals request item +func (m *AddSignalsRequestItem) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMessage(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenario(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenarioHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenarioVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSource(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStartAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStopAt(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AddSignalsRequestItem) validateMessage(formats strfmt.Registry) error { + + if err := validate.Required("message", "body", m.Message); err != nil { + return err + } + + return nil +} + +func (m *AddSignalsRequestItem) validateScenario(formats strfmt.Registry) error { + + if err := validate.Required("scenario", "body", m.Scenario); err != nil { + return err + } + + return nil +} + +func (m *AddSignalsRequestItem) validateScenarioHash(formats strfmt.Registry) error { + + if err := validate.Required("scenario_hash", "body", m.ScenarioHash); err != nil { + return err + } + + return nil +} + +func (m *AddSignalsRequestItem) validateScenarioVersion(formats strfmt.Registry) error { + + if err := validate.Required("scenario_version", "body", m.ScenarioVersion); err != nil { + return err + } + + return nil +} + +func (m *AddSignalsRequestItem) validateSource(formats strfmt.Registry) error { + + if err := validate.Required("source", "body", m.Source); err != nil { + return err + } + + if m.Source != nil { + if err := m.Source.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("source") + } + return err + } + } + + return nil +} + +func (m *AddSignalsRequestItem) validateStartAt(formats strfmt.Registry) error { + + if err := validate.Required("start_at", "body", m.StartAt); err != nil { + return err + } + + return nil +} + +func (m *AddSignalsRequestItem) validateStopAt(formats strfmt.Registry) error { + + if err := validate.Required("stop_at", "body", m.StopAt); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *AddSignalsRequestItem) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AddSignalsRequestItem) UnmarshalBinary(b []byte) error { + var res AddSignalsRequestItem + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/alert.go b/pkg/models/alert.go new file mode 100644 index 000000000..b9a4c84c2 --- /dev/null +++ b/pkg/models/alert.go @@ -0,0 +1,351 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Alert Alert +// +// swagger:model Alert +type Alert struct { + + // capacity + // Required: true + Capacity *int32 `json:"capacity"` + + // only relevant for GET, ignored in POST requests + // Read Only: true + CreatedAt string `json:"created_at,omitempty"` + + // decisions + Decisions []*Decision `json:"decisions"` + + // the Meta of the events leading to overflow + // Required: true + Events []*Event `json:"events"` + + // events count + // Required: true + EventsCount *int32 `json:"events_count"` + + // only relevant for GET, ignored in POST requests + // Read Only: true + ID int64 `json:"id,omitempty"` + + // labels + Labels []string `json:"labels"` + + // leakspeed + // Required: true + Leakspeed *string `json:"leakspeed"` + + // only relevant for APIL->APIC, ignored for cscli->APIL and crowdsec->APIL + // Read Only: true + MachineID string `json:"machine_id,omitempty"` + + // a human readable message + // Required: true + Message *string `json:"message"` + + // meta + Meta Meta `json:"meta,omitempty"` + + // remediation + Remediation bool `json:"remediation,omitempty"` + + // scenario + // Required: true + Scenario *string `json:"scenario"` + + // scenario hash + // Required: true + ScenarioHash *string `json:"scenario_hash"` + + // scenario version + // Required: true + ScenarioVersion *string `json:"scenario_version"` + + // simulated + // Required: true + Simulated *bool `json:"simulated"` + + // source + // Required: true + Source *Source `json:"source"` + + // start at + // Required: true + StartAt *string `json:"start_at"` + + // stop at + // Required: true + StopAt *string `json:"stop_at"` +} + +// Validate validates this alert +func (m *Alert) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCapacity(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDecisions(formats); err != nil { + res = append(res, err) + } + + if err := m.validateEvents(formats); err != nil { + res = append(res, err) + } + + if err := m.validateEventsCount(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLeakspeed(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMessage(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMeta(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenario(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenarioHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenarioVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSimulated(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSource(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStartAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStopAt(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Alert) validateCapacity(formats strfmt.Registry) error { + + if err := validate.Required("capacity", "body", m.Capacity); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateDecisions(formats strfmt.Registry) error { + + if swag.IsZero(m.Decisions) { // not required + return nil + } + + for i := 0; i < len(m.Decisions); i++ { + if swag.IsZero(m.Decisions[i]) { // not required + continue + } + + if m.Decisions[i] != nil { + if err := m.Decisions[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("decisions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Alert) validateEvents(formats strfmt.Registry) error { + + if err := validate.Required("events", "body", m.Events); err != nil { + return err + } + + for i := 0; i < len(m.Events); i++ { + if swag.IsZero(m.Events[i]) { // not required + continue + } + + if m.Events[i] != nil { + if err := m.Events[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("events" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Alert) validateEventsCount(formats strfmt.Registry) error { + + if err := validate.Required("events_count", "body", m.EventsCount); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateLeakspeed(formats strfmt.Registry) error { + + if err := validate.Required("leakspeed", "body", m.Leakspeed); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateMessage(formats strfmt.Registry) error { + + if err := validate.Required("message", "body", m.Message); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateMeta(formats strfmt.Registry) error { + + if swag.IsZero(m.Meta) { // not required + return nil + } + + if err := m.Meta.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } + return err + } + + return nil +} + +func (m *Alert) validateScenario(formats strfmt.Registry) error { + + if err := validate.Required("scenario", "body", m.Scenario); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateScenarioHash(formats strfmt.Registry) error { + + if err := validate.Required("scenario_hash", "body", m.ScenarioHash); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateScenarioVersion(formats strfmt.Registry) error { + + if err := validate.Required("scenario_version", "body", m.ScenarioVersion); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateSimulated(formats strfmt.Registry) error { + + if err := validate.Required("simulated", "body", m.Simulated); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateSource(formats strfmt.Registry) error { + + if err := validate.Required("source", "body", m.Source); err != nil { + return err + } + + if m.Source != nil { + if err := m.Source.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("source") + } + return err + } + } + + return nil +} + +func (m *Alert) validateStartAt(formats strfmt.Registry) error { + + if err := validate.Required("start_at", "body", m.StartAt); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateStopAt(formats strfmt.Registry) error { + + if err := validate.Required("stop_at", "body", m.StopAt); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Alert) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Alert) UnmarshalBinary(b []byte) error { + var res Alert + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/decision.go b/pkg/models/decision.go new file mode 100644 index 000000000..0d05c1a93 --- /dev/null +++ b/pkg/models/decision.go @@ -0,0 +1,163 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Decision Decision +// +// swagger:model Decision +type Decision struct { + + // duration + // Required: true + Duration *string `json:"duration"` + + // (only relevant for GET ops) when the value is an IP or range, its numeric representation + EndIP int64 `json:"end_ip,omitempty"` + + // (only relevant for GET ops) the unique id + // Read Only: true + ID int64 `json:"id,omitempty"` + + // the origin of the decision : cscli, crowdsec + // Required: true + Origin *string `json:"origin"` + + // scenario + // Required: true + Scenario *string `json:"scenario"` + + // the scope of decision : does it apply to an IP, a range, a username, etc + // Required: true + Scope *string `json:"scope"` + + // true if the decision result from a scenario in simulation mode + // Read Only: true + Simulated *bool `json:"simulated,omitempty"` + + // (only relevant for GET ops) when the value is an IP or range, its numeric representation + StartIP int64 `json:"start_ip,omitempty"` + + // the type of decision, might be 'ban', 'captcha' or something custom. Ignored when watcher (cscli/crowdsec) is pushing to APIL. + // Required: true + Type *string `json:"type"` + + // the value of the decision scope : an IP, a range, a username, etc + // Required: true + Value *string `json:"value"` +} + +// Validate validates this decision +func (m *Decision) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDuration(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOrigin(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenario(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScope(formats); err != nil { + res = append(res, err) + } + + if err := m.validateType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Decision) validateDuration(formats strfmt.Registry) error { + + if err := validate.Required("duration", "body", m.Duration); err != nil { + return err + } + + return nil +} + +func (m *Decision) validateOrigin(formats strfmt.Registry) error { + + if err := validate.Required("origin", "body", m.Origin); err != nil { + return err + } + + return nil +} + +func (m *Decision) validateScenario(formats strfmt.Registry) error { + + if err := validate.Required("scenario", "body", m.Scenario); err != nil { + return err + } + + return nil +} + +func (m *Decision) validateScope(formats strfmt.Registry) error { + + if err := validate.Required("scope", "body", m.Scope); err != nil { + return err + } + + return nil +} + +func (m *Decision) validateType(formats strfmt.Registry) error { + + if err := validate.Required("type", "body", m.Type); err != nil { + return err + } + + return nil +} + +func (m *Decision) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Decision) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Decision) UnmarshalBinary(b []byte) error { + var res Decision + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/decisions_stream_response.go b/pkg/models/decisions_stream_response.go new file mode 100644 index 000000000..132681cb5 --- /dev/null +++ b/pkg/models/decisions_stream_response.go @@ -0,0 +1,92 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// DecisionsStreamResponse DecisionsStreamResponse +// +// swagger:model DecisionsStreamResponse +type DecisionsStreamResponse struct { + + // deleted + Deleted GetDecisionsResponse `json:"deleted,omitempty"` + + // new + New GetDecisionsResponse `json:"new,omitempty"` +} + +// Validate validates this decisions stream response +func (m *DecisionsStreamResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDeleted(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNew(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DecisionsStreamResponse) validateDeleted(formats strfmt.Registry) error { + + if swag.IsZero(m.Deleted) { // not required + return nil + } + + if err := m.Deleted.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("deleted") + } + return err + } + + return nil +} + +func (m *DecisionsStreamResponse) validateNew(formats strfmt.Registry) error { + + if swag.IsZero(m.New) { // not required + return nil + } + + if err := m.New.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("new") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *DecisionsStreamResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DecisionsStreamResponse) UnmarshalBinary(b []byte) error { + var res DecisionsStreamResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/delete_alerts_response.go b/pkg/models/delete_alerts_response.go new file mode 100644 index 000000000..f13688954 --- /dev/null +++ b/pkg/models/delete_alerts_response.go @@ -0,0 +1,43 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// DeleteAlertsResponse DeleteAlertsResponse +// +// swagger:model DeleteAlertsResponse +type DeleteAlertsResponse struct { + + // number of deleted alerts + NbDeleted string `json:"nbDeleted,omitempty"` +} + +// Validate validates this delete alerts response +func (m *DeleteAlertsResponse) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *DeleteAlertsResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DeleteAlertsResponse) UnmarshalBinary(b []byte) error { + var res DeleteAlertsResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/delete_decision_response.go b/pkg/models/delete_decision_response.go new file mode 100644 index 000000000..d934bd56a --- /dev/null +++ b/pkg/models/delete_decision_response.go @@ -0,0 +1,43 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// DeleteDecisionResponse DeleteDecisionResponse +// +// swagger:model DeleteDecisionResponse +type DeleteDecisionResponse struct { + + // number of deleted decisions + NbDeleted string `json:"nbDeleted,omitempty"` +} + +// Validate validates this delete decision response +func (m *DeleteDecisionResponse) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *DeleteDecisionResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DeleteDecisionResponse) UnmarshalBinary(b []byte) error { + var res DeleteDecisionResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/error_response.go b/pkg/models/error_response.go new file mode 100644 index 000000000..2ef64302e --- /dev/null +++ b/pkg/models/error_response.go @@ -0,0 +1,69 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ErrorResponse error response +// +// error response return by the API +// +// swagger:model ErrorResponse +type ErrorResponse struct { + + // more detail on individual errors + Errors string `json:"errors,omitempty"` + + // Error message + // Required: true + Message *string `json:"message"` +} + +// Validate validates this error response +func (m *ErrorResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMessage(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ErrorResponse) validateMessage(formats strfmt.Registry) error { + + if err := validate.Required("message", "body", m.Message); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ErrorResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ErrorResponse) UnmarshalBinary(b []byte) error { + var res ErrorResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/event.go b/pkg/models/event.go new file mode 100644 index 000000000..b9ff31917 --- /dev/null +++ b/pkg/models/event.go @@ -0,0 +1,88 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Event Event +// +// swagger:model Event +type Event struct { + + // meta + // Required: true + Meta Meta `json:"meta"` + + // timestamp + // Required: true + Timestamp *string `json:"timestamp"` +} + +// Validate validates this event +func (m *Event) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMeta(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTimestamp(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Event) validateMeta(formats strfmt.Registry) error { + + if err := validate.Required("meta", "body", m.Meta); err != nil { + return err + } + + if err := m.Meta.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } + return err + } + + return nil +} + +func (m *Event) validateTimestamp(formats strfmt.Registry) error { + + if err := validate.Required("timestamp", "body", m.Timestamp); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Event) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Event) UnmarshalBinary(b []byte) error { + var res Event + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/flush_decision_response.go b/pkg/models/flush_decision_response.go new file mode 100644 index 000000000..b01dc3559 --- /dev/null +++ b/pkg/models/flush_decision_response.go @@ -0,0 +1,43 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// FlushDecisionResponse FlushDecisionResponse +// +// swagger:model FlushDecisionResponse +type FlushDecisionResponse struct { + + // decision id + DecisionID string `json:"decision_id,omitempty"` +} + +// Validate validates this flush decision response +func (m *FlushDecisionResponse) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *FlushDecisionResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *FlushDecisionResponse) UnmarshalBinary(b []byte) error { + var res FlushDecisionResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/get_alerts_response.go b/pkg/models/get_alerts_response.go new file mode 100644 index 000000000..2c673d650 --- /dev/null +++ b/pkg/models/get_alerts_response.go @@ -0,0 +1,45 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// GetAlertsResponse AlertsResponse +// +// swagger:model GetAlertsResponse +type GetAlertsResponse []*Alert + +// Validate validates this get alerts response +func (m GetAlertsResponse) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/pkg/models/get_decisions_response.go b/pkg/models/get_decisions_response.go new file mode 100644 index 000000000..7113212ba --- /dev/null +++ b/pkg/models/get_decisions_response.go @@ -0,0 +1,45 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// GetDecisionsResponse GetDecisionsResponse +// +// swagger:model GetDecisionsResponse +type GetDecisionsResponse []*Decision + +// Validate validates this get decisions response +func (m GetDecisionsResponse) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/pkg/models/helpers.go b/pkg/models/helpers.go new file mode 100644 index 000000000..b9ea4a92f --- /dev/null +++ b/pkg/models/helpers.go @@ -0,0 +1,19 @@ +package models + +func (a *Alert) HasRemediation() bool { + return true +} + +func (a *Alert) GetScope() string { + if a.Source.Scope == nil { + return "" + } + return *a.Source.Scope +} + +func (a *Alert) GetScenario() string { + if a.Scenario == nil { + return "" + } + return *a.Scenario +} diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml new file mode 100644 index 000000000..5dbff0f57 --- /dev/null +++ b/pkg/models/localapi_swagger.yaml @@ -0,0 +1,948 @@ +swagger: '2.0' +info: + version: 1.0.0 + title: Swagger CrowdSec + description: CrowdSec local API + contact: + email: contact@crowdsec.net +host: localhost +basePath: /v1 +securityDefinitions: + JWTAuthorizer: + type: "apiKey" + name: "Authorization: Bearer" + in: "header" + APIKeyAuthorizer: + type: "apiKey" + name: "X-Api-Key" + in: "header" +schemes: + - https + - http +consumes: + - application/json +produces: + - application/json +paths: + /decisions/stream: + get: + description: Returns a list of new/expired decisions. Intended for blockers that need to "stream" decisions + summary: getDecisionsStream + tags: + - blockers + operationId: getDecisionsStream + deprecated: false + produces: + - application/json + parameters: + - name: startup + in: query + required: false + type: boolean + description: 'If true, means that the blocker is starting and a full list must be provided' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/DecisionsStreamResponse' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - APIKeyAuthorizer: [] + head: + description: Returns a list of new/expired decisions. Intended for blockers that need to "stream" decisions + summary: GetDecisionsStream + tags: + - blockers + operationId: headDecisionsStream + deprecated: false + produces: + - application/json + parameters: + - name: startup + in: query + required: false + type: boolean + description: 'If true, means that the blocker is starting and a full list must be provided' + responses: + '200': + description: successful operation + headers: {} + '400': + description: "400 response" + security: + - APIKeyAuthorizer: [] + /decisions: + get: + description: Returns information about existing decisions + summary: getDecisions + tags: + - blockers + operationId: getDecisions + deprecated: false + produces: + - application/json + parameters: + - name: scope + in: query + required: false + type: string + description: scope to which the decision applies (ie. IP/Range/Username/Session/...) + - name: value + in: query + required: false + type: string + description: the value to match for in the specified scope + - name: type + in: query + required: false + type: string + description: type of decision + - name: ip + in: query + required: false + type: string + description: IP to search for (shorthand for scope=ip&value=) + - name: range + in: query + required: false + type: string + description: range to search for (shorthand for scope=range&value=) + - name: simulated + in: query + required: false + type: boolean + description: if set to true, decisions in simulation mode will be returned as well + responses: + '200': + description: "successful operation" + schema: + $ref: '#/definitions/GetDecisionsResponse' + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + head: + description: Returns information about existing decisions + summary: GetDecisions + tags: + - blockers + operationId: headDecisions + deprecated: false + produces: + - application/json + parameters: + - name: scope + in: query + required: false + type: string + description: scope to which the decision applies (ie. IP/Range/Username/Session/...) + - name: value + in: query + required: false + type: string + description: the value to match for in the specified scope + - name: type + in: query + required: false + type: string + description: type of decision + - name: ip + in: query + required: false + type: string + description: IP to search for (shorthand for scope=ip&value=) + - name: range + in: query + required: false + type: string + description: range to search for (shorthand for scope=range&value=) + - name: simulated + in: query + required: false + type: boolean + description: if set to true, decisions in simulation mode will be returned as well + responses: + '200': + description: "successful operation" + '400': + description: "400 response" + security: + - APIKeyAuthorizer: [] + delete: + description: Delete decisions(s) for given filters (only from cscli) + summary: deleteDecisions + tags: + - watchers + operationId: deleteDecisions + deprecated: false + produces: + - application/json + parameters: + - name: scope + in: query + required: false + type: string + description: scope to which the decision applies (ie. IP/Range/Username/Session/...) + - name: value + in: query + required: false + type: string + description: the value to match for in the specified scope + - name: type + in: query + required: false + type: string + description: type of decision + - name: ip + in: query + required: false + type: string + description: IP to search for (shorthand for scope=ip&value=) + - name: range + in: query + required: false + type: string + description: range to search for (shorthand for scope=range&value=) + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/DeleteDecisionResponse' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] + '/decisions/{decision_id}': + delete: + description: Delete decision for given ban ID (only from cscli) + summary: DeleteDecision + tags: + - watchers + operationId: DeleteDecision + deprecated: false + produces: + - application/json + parameters: + - name: decision_id + in: path + required: true + type: string + description: '' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/DeleteDecisionResponse' + headers: {} + '404': + description: "404 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] + /watchers: + post: + description: This method is used when installing crowdsec (cscli->APIL) + summary: RegisterWatcher + tags: + - watchers + operationId: RegisterWatcher + deprecated: false + produces: + - application/json + consumes: + - application/json + parameters: + - name: body + in: body + required: true + description: Information about the watcher to be registered + schema: + $ref: '#/definitions/WatcherRegistrationRequest' + responses: + '200': + description: Watcher registered + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + /watchers/login: + post: + description: Authenticate current to get session ID + summary: AuthenticateWatcher + tags: + - watchers + operationId: AuthenticateWatcher + deprecated: false + produces: + - application/json + consumes: + - application/json + parameters: + - name: body + in: body + required: true + description: Information about the watcher to be reset + schema: + $ref: '#/definitions/WatcherAuthRequest' + responses: + '200': + description: Login successful + schema: + $ref: '#/definitions/WatcherAuthResponse' + '403': + description: "403 response" + schema: + $ref: "#/definitions/ErrorResponse" + /alerts: + post: + description: Push alerts to API + summary: pushAlerts + tags: + - watchers + operationId: pushAlerts + deprecated: false + produces: + - application/json + consumes: + - application/json + parameters: + - name: body + in: body + required: true + description: Push alerts to the API + schema: + $ref: '#/definitions/AddAlertsRequest' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/AddAlertsResponse' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] + get: + description: Allows to search for alerts + summary: searchAlerts + tags: + - watchers + operationId: searchAlerts + deprecated: false + produces: + - application/json + parameters: + - name: scope + in: query + required: false + type: string + description: show alerts for this scope + - name: value + in: query + required: false + type: string + description: show alerts for this value (used with scope) + - name: scenario + in: query + required: false + type: string + description: show alerts for this scenario + - name: ip + in: query + required: false + type: string + description: IP to search for (shorthand for scope=ip&value=) + - name: range + in: query + required: false + type: string + description: range to search for (shorthand for scope=range&value=) + - name: since #shouldn't "since" be a golang-duration format ? + in: query + required: false + type: string + format: date-time + description: 'search alerts newer than delay (format must be compatible with time.ParseDuration)' + - name: until #same as for "since" + in: query + description: 'search alerts older than delay (format must be compatible with time.ParseDuration)' + required: false + type: string + format: date-time + - name: simulated + in: query + required: false + type: boolean + description: if set to true, decisions in simulation mode will be returned as well + - name: has_active_decision + in: query + required: false + type: boolean + description: 'only return alerts with decisions not expired yet' + - name: decision_type + in: query + required: false + type: string + description: 'restrict results to alerts with decisions matching given type' + - name: limit + in: query + required: false + type: number + description: 'number of alerts to return' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/GetAlertsResponse' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] + head: + description: Allows to search for alerts + summary: searchAlerts + tags: + - watchers + operationId: headAlerts + deprecated: false + produces: + - application/json + parameters: + - name: scope + in: query + required: false + type: string + description: show alerts for this scope + - name: value + in: query + required: false + type: string + description: show alerts for this value (used with scope) + - name: scenario + in: query + required: false + type: string + description: show alerts for this scenario + - name: ip + in: query + required: false + type: string + description: IP to search for (shorthand for scope=ip&value=) + - name: range + in: query + required: false + type: string + description: range to search for (shorthand for scope=range&value=) + - name: since #shouldn't "since" be a golang-duration format ? + in: query + required: false + type: string + format: date-time + description: 'search alerts newer than delay (format must be compatible with time.ParseDuration)' + - name: until #same as for "since" + in: query + description: 'search alerts older than delay (format must be compatible with time.ParseDuration)' + required: false + type: string + format: date-time + - name: simulated + in: query + required: false + type: boolean + description: if set to true, decisions in simulation mode will be returned as well + - name: has_active_decision + in: query + required: false + type: boolean + description: 'only return alerts with decisions not expired yet' + - name: decision_type + in: query + required: false + type: string + description: 'restrict results to alerts with decisions matching given type' + - name: limit + in: query + required: false + type: number + description: 'number of alerts to return' + responses: + '200': + description: successful operation + headers: {} + '400': + description: "400 response" + security: + - JWTAuthorizer: [] + delete: + description: Allows to delete alerts + summary: deleteAlerts + tags: + - watchers + operationId: deleteAlerts + deprecated: false + produces: + - application/json + parameters: + - name: scope + in: query + required: false + type: string + description: delete alerts for this scope + - name: value + in: query + required: false + type: string + description: delete alerts for this value (used with scope) + - name: scenario + in: query + required: false + type: string + description: delete alerts for this scenario + - name: ip + in: query + required: false + type: string + description: delete Alerts with IP (shorthand for scope=ip&value=) + - name: range + in: query + required: false + type: string + description: delete alerts concerned by range (shorthand for scope=range&value=) + - name: since + in: query + required: false + type: string + format: date-time + description: 'delete alerts added after YYYY-mm-DD-HH:MM:SS' + - name: until + in: query + required: false + type: string + format: date-time + description: 'delete alerts added before YYYY-mm-DD-HH:MM:SS' + - name: has_active_decision + in: query + required: false + type: boolean + description: 'delete only alerts with decisions not expired yet' + - name: alert_source + in: query + required: false + type: string + description: delete only alerts with matching source (ie. cscli/crowdsec) + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/DeleteAlertsResponse' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] + '/alerts/{alert_id}': + get: + description: Get alert by ID + summary: GetAlertByID + tags: + - watchers + operationId: GetAlertbyID + deprecated: false + produces: + - application/json + parameters: + - name: alert_id + in: path + required: true + type: string + description: '' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/Alert' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] + head: + description: Get alert by ID + summary: GetAlertByID + tags: + - watchers + operationId: HeadAlertbyID + deprecated: false + produces: + - application/json + parameters: + - name: alert_id + in: path + required: true + type: string + description: '' + responses: + '200': + description: successful operation + headers: {} + '400': + description: "400 response" + security: + - JWTAuthorizer: [] +definitions: + WatcherRegistrationRequest: + title: WatcherRegistrationRequest + type: object + properties: + machine_id: + type: string + password: + type: string + format: password + required: + - machine_id + - password + WatcherAuthRequest: + title: WatcherAuthRequest + type: object + properties: + machine_id: + type: string + password: + type: string + format: password + scenarios: + description: the list of scenarios enabled on the watcher + type: array + items: + type: string + required: + - machine_id + - password + WatcherAuthResponse: + title: WatcherAuthResponse + description: the response of a successful authentication + type: object + properties: + code: + type: integer + expire: + type: string + token: + type: string + Alert: + title: Alert + type: object + properties: + id: + description: 'only relevant for GET, ignored in POST requests' + type: integer + readOnly: true + machine_id: + description: 'only relevant for APIL->APIC, ignored for cscli->APIL and crowdsec->APIL' + type: string + readOnly: true + created_at: + description: 'only relevant for GET, ignored in POST requests' + type: string + readOnly: true + scenario: + type: string + scenario_hash: + type: string + scenario_version: + type: string + message: + description: a human readable message + type: string + events_count: + type: integer + format: int32 + start_at: + type: string + stop_at: + type: string + capacity: + type: integer + format: int32 + leakspeed: + type: string + simulated: + type: boolean + events: + description: the Meta of the events leading to overflow + type: array + items: + $ref: '#/definitions/Event' + remediation: + type: boolean + decisions: + type: array + items: + $ref: '#/definitions/Decision' + source: + $ref: '#/definitions/Source' + meta: + $ref: '#/definitions/Meta' + labels: + type: array + items: + type: string + required: + - scenario + - scenario_hash + - scenario_version + - message + - events_count + - start_at + - stop_at + - capacity + - leakspeed + - simulated + - events + - source + Source: + title: Source + type: object + properties: + scope: + description: 'the scope of a source : ip,range,username,etc' + type: string + value: + description: 'the value of a source : the ip, the range, the username,etc' + type: string + ip: + description: provided as a convenience when the source is an IP + type: string + range: + description: provided as a convenience when the source is an IP + type: string + as_number: + description: provided as a convenience when the source is an IP + type: string + as_name: + description: provided as a convenience when the source is an IP + type: string + cn: + type: string + latitude: + type: number + format: float + longitude: + type: number + format: float + required: + - scope + - value + Metrics: + title: Metrics + type: object + properties: + apil_version: + description: the local version of crowdsec/apil + type: string + bouncers: + type: array + items: + $ref: '#/definitions/MetricsSoftInfo' + machines: + type: array + items: + $ref: '#/definitions/MetricsSoftInfo' + required: + - apil_version + - bouncers + - machines + MetricsSoftInfo: + title: MetricsSoftInfo + description: Software version info (so we can warn users about out-of-date software). The software name and the version are "guessed" from the user-agent + type: object + properties: + name: + type: string + description: name of the component + version: + type: string + description: software version + Decision: + title: Decision + type: object + properties: + id: + description: (only relevant for GET ops) the unique id + type: integer + readOnly: true + origin: + description: 'the origin of the decision : cscli, crowdsec' + type: string + type: + description: 'the type of decision, might be ''ban'', ''captcha'' or something custom. Ignored when watcher (cscli/crowdsec) is pushing to APIL.' + type: string + scope: + description: 'the scope of decision : does it apply to an IP, a range, a username, etc' + type: string + value: + description: 'the value of the decision scope : an IP, a range, a username, etc' + type: string + start_ip: + description: '(only relevant for GET ops) when the value is an IP or range, its numeric representation' + type: integer + end_ip: + description: '(only relevant for GET ops) when the value is an IP or range, its numeric representation' + type: integer + duration: + type: string + scenario: + type: string + simulated: + type: boolean + description: 'true if the decision result from a scenario in simulation mode' + readOnly: true + required: + - origin + - type + - scope + - value + - duration + - scenario + DeleteDecisionResponse: + title: DeleteDecisionResponse + type: object + properties: + nbDeleted: + type: string + description: "number of deleted decisions" + AddAlertsRequest: + title: AddAlertsRequest + type: array + items: + $ref: '#/definitions/Alert' + AddAlertsResponse: + title: AddAlertsResponse + type: array + items: + type: string + description: alert_id + GetAlertsResponse: + title: AlertsResponse + type: array + items: + $ref: '#/definitions/Alert' + DeleteAlertsResponse: + title: DeleteAlertsResponse + type: object + properties: + nbDeleted: + type: string + description: "number of deleted alerts" + DecisionsStreamResponse: + title: DecisionsStreamResponse + type: object + properties: + new: + $ref: '#/definitions/GetDecisionsResponse' + deleted: + $ref: '#/definitions/GetDecisionsResponse' + Event: + title: Event + type: object + properties: + timestamp: + type: string + meta: + $ref: '#/definitions/Meta' + required: + - timestamp + - meta + GetDecisionsResponse: + title: GetDecisionsResponse + type: array + items: + $ref: '#/definitions/Decision' + Meta: + title: Meta + description: the Meta data of the Alert itself + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + ErrorResponse: + type: "object" + required: + - "message" + properties: + message: + type: "string" + description: "Error message" + errors: + type: "string" + description: "more detail on individual errors" + title: "error response" + description: "error response return by the API" + AddSignalsRequest: + title: "add signals request" + type: "array" + description: "All signals request model" + items: + $ref: "#/definitions/AddSignalsRequestItem" + AddSignalsRequestItem: + type: "object" + required: + - "message" + - "scenario" + - "scenario_hash" + - "scenario_version" + - "source" + - "start_at" + - "stop_at" + properties: + scenario_hash: + type: "string" + scenario: + type: "string" + created_at: + type: "string" + machine_id: + type: "string" + source: + $ref: "#/definitions/Source" + scenario_version: + type: "string" + message: + type: "string" + description: "a human readable message" + start_at: + type: "string" + stop_at: + type: "string" + title: "Signal" +tags: + - name: blockers + description: 'Operations about decisions : bans, captcha, rate-limit etc.' + - name: watchers + description: 'Operations about watchers : cscli & crowdsec' +externalDocs: + url: 'https://github.com/crowdsecurity/crowdsec' + description: Find out more about CrowdSec diff --git a/pkg/models/meta.go b/pkg/models/meta.go new file mode 100644 index 000000000..74e7532fc --- /dev/null +++ b/pkg/models/meta.go @@ -0,0 +1,82 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Meta Meta +// +// the Meta data of the Alert itself +// +// swagger:model Meta +type Meta []*MetaItems0 + +// Validate validates this meta +func (m Meta) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MetaItems0 meta items0 +// +// swagger:model MetaItems0 +type MetaItems0 struct { + + // key + Key string `json:"key,omitempty"` + + // value + Value string `json:"value,omitempty"` +} + +// Validate validates this meta items0 +func (m *MetaItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MetaItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MetaItems0) UnmarshalBinary(b []byte) error { + var res MetaItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/metrics.go b/pkg/models/metrics.go new file mode 100644 index 000000000..1e96d37c1 --- /dev/null +++ b/pkg/models/metrics.go @@ -0,0 +1,132 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Metrics Metrics +// +// swagger:model Metrics +type Metrics struct { + + // the local version of crowdsec/apil + // Required: true + ApilVersion *string `json:"apil_version"` + + // bouncers + // Required: true + Bouncers []*MetricsSoftInfo `json:"bouncers"` + + // machines + // Required: true + Machines []*MetricsSoftInfo `json:"machines"` +} + +// Validate validates this metrics +func (m *Metrics) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateApilVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateBouncers(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMachines(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Metrics) validateApilVersion(formats strfmt.Registry) error { + + if err := validate.Required("apil_version", "body", m.ApilVersion); err != nil { + return err + } + + return nil +} + +func (m *Metrics) validateBouncers(formats strfmt.Registry) error { + + if err := validate.Required("bouncers", "body", m.Bouncers); err != nil { + return err + } + + for i := 0; i < len(m.Bouncers); i++ { + if swag.IsZero(m.Bouncers[i]) { // not required + continue + } + + if m.Bouncers[i] != nil { + if err := m.Bouncers[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("bouncers" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Metrics) validateMachines(formats strfmt.Registry) error { + + if err := validate.Required("machines", "body", m.Machines); err != nil { + return err + } + + for i := 0; i < len(m.Machines); i++ { + if swag.IsZero(m.Machines[i]) { // not required + continue + } + + if m.Machines[i] != nil { + if err := m.Machines[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("machines" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Metrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Metrics) UnmarshalBinary(b []byte) error { + var res Metrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/metrics_soft_info.go b/pkg/models/metrics_soft_info.go new file mode 100644 index 000000000..60aa727bb --- /dev/null +++ b/pkg/models/metrics_soft_info.go @@ -0,0 +1,48 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// MetricsSoftInfo MetricsSoftInfo +// +// Software version info (so we can warn users about out-of-date software). The software name and the version are "guessed" from the user-agent +// +// swagger:model MetricsSoftInfo +type MetricsSoftInfo struct { + + // name of the component + Name string `json:"name,omitempty"` + + // software version + Version string `json:"version,omitempty"` +} + +// Validate validates this metrics soft info +func (m *MetricsSoftInfo) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MetricsSoftInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MetricsSoftInfo) UnmarshalBinary(b []byte) error { + var res MetricsSoftInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/source.go b/pkg/models/source.go new file mode 100644 index 000000000..c6dc57905 --- /dev/null +++ b/pkg/models/source.go @@ -0,0 +1,102 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Source Source +// +// swagger:model Source +type Source struct { + + // provided as a convenience when the source is an IP + AsName string `json:"as_name,omitempty"` + + // provided as a convenience when the source is an IP + AsNumber string `json:"as_number,omitempty"` + + // cn + Cn string `json:"cn,omitempty"` + + // provided as a convenience when the source is an IP + IP string `json:"ip,omitempty"` + + // latitude + Latitude float32 `json:"latitude,omitempty"` + + // longitude + Longitude float32 `json:"longitude,omitempty"` + + // provided as a convenience when the source is an IP + Range string `json:"range,omitempty"` + + // the scope of a source : ip,range,username,etc + // Required: true + Scope *string `json:"scope"` + + // the value of a source : the ip, the range, the username,etc + // Required: true + Value *string `json:"value"` +} + +// Validate validates this source +func (m *Source) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateScope(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Source) validateScope(formats strfmt.Registry) error { + + if err := validate.Required("scope", "body", m.Scope); err != nil { + return err + } + + return nil +} + +func (m *Source) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Source) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Source) UnmarshalBinary(b []byte) error { + var res Source + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/topx_response.go b/pkg/models/topx_response.go new file mode 100644 index 000000000..fe23d39a8 --- /dev/null +++ b/pkg/models/topx_response.go @@ -0,0 +1,110 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// TopxResponse TopxResponse +// +// swagger:model TopxResponse +type TopxResponse struct { + + // We keep the deleted array for the duration of the initial decision. So that when the initial decision is expired, it won't be present in deleted array anymore. + Deleted [][]GetDecisionsResponse `json:"deleted"` + + // new + New [][]GetDecisionsResponse `json:"new"` +} + +// Validate validates this topx response +func (m *TopxResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDeleted(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNew(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TopxResponse) validateDeleted(formats strfmt.Registry) error { + + if swag.IsZero(m.Deleted) { // not required + return nil + } + + for i := 0; i < len(m.Deleted); i++ { + + for ii := 0; ii < len(m.Deleted[i]); ii++ { + + if err := m.Deleted[i][ii].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("deleted" + "." + strconv.Itoa(i) + "." + strconv.Itoa(ii)) + } + return err + } + + } + + } + + return nil +} + +func (m *TopxResponse) validateNew(formats strfmt.Registry) error { + + if swag.IsZero(m.New) { // not required + return nil + } + + for i := 0; i < len(m.New); i++ { + + for ii := 0; ii < len(m.New[i]); ii++ { + + if err := m.New[i][ii].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("new" + "." + strconv.Itoa(i) + "." + strconv.Itoa(ii)) + } + return err + } + + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *TopxResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TopxResponse) UnmarshalBinary(b []byte) error { + var res TopxResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/watcher_auth_request.go b/pkg/models/watcher_auth_request.go new file mode 100644 index 000000000..1be134a8b --- /dev/null +++ b/pkg/models/watcher_auth_request.go @@ -0,0 +1,89 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// WatcherAuthRequest WatcherAuthRequest +// +// swagger:model WatcherAuthRequest +type WatcherAuthRequest struct { + + // machine id + // Required: true + MachineID *string `json:"machine_id"` + + // password + // Required: true + // Format: password + Password *strfmt.Password `json:"password"` + + // the list of scenarios enabled on the watcher + Scenarios []string `json:"scenarios"` +} + +// Validate validates this watcher auth request +func (m *WatcherAuthRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMachineID(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePassword(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *WatcherAuthRequest) validateMachineID(formats strfmt.Registry) error { + + if err := validate.Required("machine_id", "body", m.MachineID); err != nil { + return err + } + + return nil +} + +func (m *WatcherAuthRequest) validatePassword(formats strfmt.Registry) error { + + if err := validate.Required("password", "body", m.Password); err != nil { + return err + } + + if err := validate.FormatOf("password", "body", "password", m.Password.String(), formats); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *WatcherAuthRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *WatcherAuthRequest) UnmarshalBinary(b []byte) error { + var res WatcherAuthRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/watcher_auth_response.go b/pkg/models/watcher_auth_response.go new file mode 100644 index 000000000..185aa875e --- /dev/null +++ b/pkg/models/watcher_auth_response.go @@ -0,0 +1,51 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// WatcherAuthResponse WatcherAuthResponse +// +// the response of a successful authentication +// +// swagger:model WatcherAuthResponse +type WatcherAuthResponse struct { + + // code + Code int64 `json:"code,omitempty"` + + // expire + Expire string `json:"expire,omitempty"` + + // token + Token string `json:"token,omitempty"` +} + +// Validate validates this watcher auth response +func (m *WatcherAuthResponse) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *WatcherAuthResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *WatcherAuthResponse) UnmarshalBinary(b []byte) error { + var res WatcherAuthResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/watcher_registration_request.go b/pkg/models/watcher_registration_request.go new file mode 100644 index 000000000..194a1e715 --- /dev/null +++ b/pkg/models/watcher_registration_request.go @@ -0,0 +1,86 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// WatcherRegistrationRequest WatcherRegistrationRequest +// +// swagger:model WatcherRegistrationRequest +type WatcherRegistrationRequest struct { + + // machine id + // Required: true + MachineID *string `json:"machine_id"` + + // password + // Required: true + // Format: password + Password *strfmt.Password `json:"password"` +} + +// Validate validates this watcher registration request +func (m *WatcherRegistrationRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMachineID(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePassword(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *WatcherRegistrationRequest) validateMachineID(formats strfmt.Registry) error { + + if err := validate.Required("machine_id", "body", m.MachineID); err != nil { + return err + } + + return nil +} + +func (m *WatcherRegistrationRequest) validatePassword(formats strfmt.Registry) error { + + if err := validate.Required("password", "body", m.Password); err != nil { + return err + } + + if err := validate.FormatOf("password", "body", "password", m.Password.String(), formats); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *WatcherRegistrationRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *WatcherRegistrationRequest) UnmarshalBinary(b []byte) error { + var res WatcherRegistrationRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/outputs/ouputs.go b/pkg/outputs/ouputs.go deleted file mode 100644 index 8c4f77243..000000000 --- a/pkg/outputs/ouputs.go +++ /dev/null @@ -1,361 +0,0 @@ -package outputs - -import ( - "fmt" - "io" - "os" - "strconv" - "time" - - "github.com/crowdsecurity/crowdsec/pkg/cwplugin" - "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" - "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/pkg/errors" - - "github.com/crowdsecurity/crowdsec/pkg/cwapi" - - "github.com/antonmedv/expr" - log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" -) - -//OutputFactory is part of the main yaml configuration file, and holds generic backend config -type OutputFactory struct { - BackendFolder string `yaml:"backend,omitempty"` - //For the db GC : how many records can we keep at most - MaxRecords string `yaml:"max_records,omitempty"` - //For the db GC what is the oldest records we tolerate - MaxRecordsAge string `yaml:"max_records_age,omitempty"` - //Should we automatically flush expired bans - Flush bool - Debug bool `yaml:"debug"` -} - -//Output holds the runtime objects of backend -type Output struct { - API *cwapi.ApiCtx - bManager *cwplugin.BackendManager - Config *OutputFactory -} - -/* -Transform an overflow (SignalOccurence) and a Profile into a BanOrder -*/ -func OvflwToOrder(sig types.SignalOccurence, prof types.Profile) (*types.BanOrder, error, error) { - var ordr types.BanOrder - var warn error - - if sig.Simulation { - log.Debugf("signal for '%s' is whitelisted", sig.Source_ip) - ordr.MeasureType = "simulation:" - } - //Identify remediation type - if prof.Remediation.Ban { - ordr.MeasureType += "ban" - } else if prof.Remediation.Slow { - ordr.MeasureType += "slow" - } else if prof.Remediation.Captcha { - ordr.MeasureType += "captcha" - } else { - /*if the profil has no remediation, no order */ - return nil, nil, fmt.Errorf("no remediation") - } - - if sig.Source == nil { - return nil, nil, fmt.Errorf("no 'source' in event (Meta.source_ip empty?)") - } - - ordr.MeasureSource = "local" - ordr.Reason = sig.Scenario - //Identify scope - v, ok := sig.Labels["scope"] - if !ok { - //if remediation_scope isn't specified, it's IP - v = "ip" - } - ordr.Scope = v - asn, err := strconv.Atoi(sig.Source.AutonomousSystemNumber) - if err != nil { - warn = fmt.Errorf("invalid as number : %s : %s", sig.Source.AutonomousSystemNumber, err) - } - ordr.TargetAS = asn - ordr.TargetASName = sig.Source.AutonomousSystemOrganization - ordr.TargetIP = sig.Source.Ip - ordr.TargetRange = sig.Source.Range - ordr.TargetCountry = sig.Source.Country - switch v { - case "range": - ordr.TxtTarget = ordr.TargetRange.String() - case "ip": - ordr.TxtTarget = ordr.TargetIP.String() - case "as": - ordr.TxtTarget = fmt.Sprintf("ban as %d (unsupported)", ordr.TargetAS) - case "country": - ordr.TxtTarget = fmt.Sprintf("ban country %s (unsupported)", ordr.TargetCountry) - default: - log.Errorf("Unknown remediation scope '%s'", sig.Labels["remediation_Scope"]) - return nil, fmt.Errorf("unknown remediation scope"), nil - } - //Set deadline - ordr.Until = sig.Stop_at.Add(prof.Remediation.TimeDuration) - return &ordr, nil, warn -} - -func (o *Output) StartAutoCommit() error { - return o.bManager.StartAutoCommit() -} - -func (o *Output) Shutdown() error { - var reterr error - if o.API != nil { - if err := o.API.Shutdown(); err != nil { - log.Errorf("error while shutting down API : %s", err) - reterr = err - } - } - if o.bManager != nil { - if err := o.bManager.Shutdown(); err != nil { - log.Errorf("error while shutting down backend : %s", err) - reterr = err - } - } - return reterr -} - -func (o *Output) FlushAll() { - if o.API != nil { - if err := o.API.Flush(); err != nil { - log.Errorf("Failing API flush : %s", err) - } - } - if o.bManager != nil { - if err := o.bManager.Flush(); err != nil { - log.Errorf("Failing database flush : %s", err) - } - } -} - -func (o *Output) ProcessOutput(sig types.SignalOccurence, profiles []types.Profile) error { - - var logger *log.Entry - if sig.Source != nil { - logger = log.WithFields(log.Fields{ - "source_ip": sig.Source.Ip.String(), - "scenario": sig.Scenario, - "bucket_id": sig.Bucket_id, - "event_time": sig.Stop_at, - }) - } else { - logger = log.WithFields(log.Fields{ - "scenario": sig.Scenario, - "bucket_id": sig.Bucket_id, - "event_time": sig.Stop_at, - }) - } - - for _, profile := range profiles { - if profile.RunTimeFilter != nil { - //Evaluate node's filter - output, err := expr.Run(profile.RunTimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"sig": sig})) - if err != nil { - logger.Warningf("failed to run filter : %v", err) - continue - } - switch out := output.(type) { - case bool: - /* filter returned false, don't process Node */ - if !out { - logger.Debugf("eval(FALSE) '%s'", profile.Filter) - continue - } - default: - logger.Warningf("Expr '%s' returned non-bool", profile.Filter) - continue - } - logger.Debugf("eval(TRUE) '%s'", profile.Filter) - } - /*the filter was ok*/ - ordr, err, warn := OvflwToOrder(sig, profile) - if err != nil { - logger.Errorf("Unable to turn Overflow to Order : %v", err) - return err - } - if warn != nil { - logger.Debugf("node warning : %s", warn) - } - if ordr != nil { - bans, err := types.OrderToApplications(ordr) - if err != nil { - logger.Errorf("Error turning order to ban applications : %v", err) - return err - } - logger.Warningf("%s triggered a %s %s %s remediation for [%s]", ordr.TxtTarget, ordr.Until.Sub(sig.Stop_at), ordr.Scope, ordr.MeasureType, sig.Scenario) - sig.BanApplications = bans - } else { - //Order didn't lead to concrete bans - logger.Infof("Processing Overflow with no decisions %s", sig.Alert_message) - } - - // if ApiPush is nil (not specified in profile configuration) we use global api config (from default.yaml) - if profile.ApiPush == nil || *profile.ApiPush { - if o.API != nil { // if API is not nil, we can push - if err = o.API.AppendSignal((sig)); err != nil { - return fmt.Errorf("failed to append signal : %s", err) - } - } - } - for _, outputConfig := range profile.OutputConfigs { - if pluginName, ok := outputConfig["plugin"]; ok { - if o.bManager.IsBackendPlugin(pluginName) { - if toStore, ok := outputConfig["store"]; ok { - boolConv, err := strconv.ParseBool(toStore) - if err != nil { - log.Errorf("unable to parse boolean value of store configuration '%s' : %s", toStore, err) - } - if !boolConv { - continue - } - } - if err = o.bManager.InsertOnePlugin(sig, pluginName); err != nil { - return fmt.Errorf("failed to insert plugin %s : %s", pluginName, err) - } - } - } - } - } - return nil -} - -func LoadOutputProfiles(profileConfig string) ([]types.Profile, error) { - - var ( - profiles []types.Profile - ) - - yamlFile, err := os.Open(profileConfig) - if err != nil { - log.Errorf("Can't access parsing configuration file with '%v'.", err) - return nil, err - } - //process the yaml - dec := yaml.NewDecoder(yamlFile) - dec.SetStrict(true) - for { - profile := types.Profile{} - err = dec.Decode(&profile) - if err != nil { - if err == io.EOF { - log.Tracef("End of yaml file") - break - } - log.Errorf("Error decoding profile configuration file with '%s': %v", profileConfig, err) - return nil, err - } - //compile filter if present - if profile.Filter != "" { - profile.RunTimeFilter, err = expr.Compile(profile.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"sig": &types.SignalOccurence{}}))) - if err != nil { - log.Errorf("Compilation failed %v\n", err) - return nil, err - } - } - - if profile.Remediation.Ban || profile.Remediation.Slow || profile.Remediation.Captcha { - profile.Remediation.TimeDuration, err = time.ParseDuration(profile.Remediation.Duration) - if err != nil { - log.Fatalf("Unable to parse profile duration '%s'", profile.Remediation.Duration) - } - } - //ensure we have outputs :) - if profile.OutputConfigs == nil { - log.Errorf("Profile has empty OutputConfigs") - return nil, err - } - - profiles = append(profiles, profile) - } - - /*Initialize individual connectors*/ - return profiles, nil - -} - -func (o *Output) InitAPI(config map[string]string) error { - var err error - o.API = &cwapi.ApiCtx{} - log.Infof("API connector init") - err = o.API.Init(config["path"], config["profile"]) - if err != nil { - log.Errorf("API init failed, won't push/pull : %v", err) - return err - } - return nil -} - -func (o *Output) LoadAPIConfig(configFile string) error { - var err error - o.API = &cwapi.ApiCtx{} - - err = o.API.LoadConfig(configFile) - if err != nil { - return err - } - return nil -} - -func (o *Output) Delete(target string) (int, error) { - nbDel, err := o.bManager.Delete(target) - return nbDel, err -} - -func (o *Output) DeleteAll() error { - err := o.bManager.DeleteAll() - return err -} - -func (o *Output) Insert(sig types.SignalOccurence) error { - err := o.bManager.Insert(sig) - return err -} - -func (o *Output) Flush() error { - err := o.bManager.Flush() - return err -} - -func (o *Output) ReadAT(timeAT time.Time) ([]map[string]string, error) { - ret, err := o.bManager.ReadAT(timeAT) - if err != nil { - return nil, err - } - return ret, nil -} - -func NewOutput(config *OutputFactory) (*Output, error) { - var output Output - var err error - - if config == nil { - return nil, fmt.Errorf("missing output plugin configuration") - } - log.Debugf("loading backend plugins ...") - //turn the *OutputFactory into a map[string]string for less constraint - backendConfig := map[string]string{ - "backend": config.BackendFolder, - "flush": strconv.FormatBool(config.Flush), - "debug": strconv.FormatBool(config.Debug)} - - if config.MaxRecords != "" { - backendConfig["max_records"] = config.MaxRecords - } - if config.MaxRecordsAge != "" { - backendConfig["max_records_age"] = config.MaxRecordsAge - } - - output.bManager, err = cwplugin.NewBackendPlugin(backendConfig) - if err != nil { - return nil, errors.Wrap(err, "failed to load backend plugin") - } - output.Config = config - return &output, nil -} diff --git a/pkg/parser/enrich.go b/pkg/parser/enrich.go index 28b385295..4aa8a343c 100644 --- a/pkg/parser/enrich.go +++ b/pkg/parser/enrich.go @@ -23,7 +23,8 @@ type EnricherCtx struct { } /* mimic plugin loading */ -func Loadplugin(path string) (EnricherCtx, error) { +// TODO fix this shit with real plugin loading +func Loadplugin(path string) ([]EnricherCtx, error) { var err error c := EnricherCtx{} @@ -45,7 +46,7 @@ func Loadplugin(path string) (EnricherCtx, error) { c.initiated = false } c.initiated = true - return c, nil + return []EnricherCtx{c}, nil } func GenDateParse(date string) (string, time.Time) { diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go index b0d9b7f68..c07fead62 100644 --- a/pkg/parser/enrich_geoip.go +++ b/pkg/parser/enrich_geoip.go @@ -54,9 +54,13 @@ func GeoIpASN(field string, p *types.Event, ctx interface{}) (map[string]string, } ip := net.ParseIP(field) + if ip == nil { + log.Infof("Can't parse ip %s, no ASN enrich", ip) + return nil, nil + } record, err := ctx.(GeoIpEnricherCtx).dba.ASN(ip) if err != nil { - log.Debugf("Unable to enrich ip '%s'", field) + log.Errorf("Unable to enrich ip '%s'", field) return nil, nil } ret["ASNNumber"] = fmt.Sprintf("%d", record.AutonomousSystemNumber) @@ -71,10 +75,14 @@ func GeoIpCity(field string, p *types.Event, ctx interface{}) (map[string]string return nil, nil } ip := net.ParseIP(field) + if ip == nil { + log.Infof("Can't parse ip %s, no City enrich", ip) + return nil, nil + } record, err := ctx.(GeoIpEnricherCtx).dbc.City(ip) if err != nil { - log.Fatal(err) - return nil, err + log.Debugf("Unable to enrich ip '%s'", ip) + return nil, nil } ret["IsoCode"] = record.Country.IsoCode ret["IsInEU"] = strconv.FormatBool(record.Country.IsInEuropeanUnion) diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 4551a1f94..399cd4a2a 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -42,10 +42,9 @@ type Node struct { RunTimeFilter *vm.Program `yaml:"-" json:"-"` //the actual compiled filter ExprDebugger *exprhelpers.ExprDebugger `yaml:"-" json:"-"` //used to debug expression by printing the content of each variable of the expression //If node has leafs, execute all of them until one asks for a 'break' - SuccessNodes []Node `yaml:"nodes,omitempty"` + LeavesNodes []Node `yaml:"nodes,omitempty"` //Flag used to describe when to 'break' or return an 'error' - // BreakBehaviour string `yaml:"break,omitempty"` - // Error string `yaml:"error,omitempty"` + EnrichFunctions []EnricherCtx /* If the node is actually a leaf, it can have : grok, enrich, statics */ //pattern_syntax are named grok patterns that are re-utilised over several grok patterns @@ -59,7 +58,7 @@ type Node struct { Data []*types.DataSource `yaml:"data,omitempty"` } -func (n *Node) validate(pctx *UnixParserCtx) error { +func (n *Node) validate(pctx *UnixParserCtx, ectx []EnricherCtx) error { //stage is being set automagically if n.Stage == "" { @@ -89,7 +88,7 @@ func (n *Node) validate(pctx *UnixParserCtx) error { return fmt.Errorf("static %d : when method is set, expression must be present", idx) } method_found := false - for _, enricherCtx := range ECTX { + for _, enricherCtx := range ectx { if _, ok := enricherCtx.Funcs[static.Method]; ok && enricherCtx.initiated { method_found = true break @@ -114,7 +113,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) { var NodeState bool clog := n.logger - clog.Debugf("Event entering node") + clog.Tracef("Event entering node") if n.RunTimeFilter != nil { //Evaluate node's filter output, err := expr.Run(n.RunTimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p})) @@ -130,7 +129,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) { n.ExprDebugger.Run(clog, out, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p})) } if !out { - clog.Debugf("Event leaving node : ko") + clog.Debugf("Event leaving node : ko (failed filter)") return false, nil } default: @@ -140,7 +139,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) { } NodeState = true } else { - clog.Debugf("Node has not filter, enter") + clog.Tracef("Node has not filter, enter") NodeState = true } @@ -149,40 +148,45 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) { } isWhitelisted := false hasWhitelist := false - var src net.IP + var srcs []net.IP /*overflow and log don't hold the source ip in the same field, should be changed */ /* perform whitelist checks for ips, cidr accordingly */ + /* TODO move whitelist elsewhere */ if p.Type == types.LOG { if _, ok := p.Meta["source_ip"]; ok { - src = net.ParseIP(p.Meta["source_ip"]) + srcs = append(srcs, net.ParseIP(p.Meta["source_ip"])) } } else if p.Type == types.OVFLW { - src = net.ParseIP(p.Overflow.Source_ip) + for k, _ := range p.Overflow.Sources { + srcs = append(srcs, net.ParseIP(k)) + } } - if src != nil { + for _, src := range srcs { + if isWhitelisted { + break + } for _, v := range n.Whitelist.B_Ips { if v.Equal(src) { clog.Debugf("Event from [%s] is whitelisted by Ips !", src) - p.Whitelisted = true isWhitelisted = true } else { - clog.Debugf("whitelist: %s is not eq [%s]", src, v) + clog.Tracef("whitelist: %s is not eq [%s]", src, v) } hasWhitelist = true } - for _, v := range n.Whitelist.B_Cidrs { if v.Contains(src) { clog.Debugf("Event from [%s] is whitelisted by Cidrs !", src) - p.Whitelisted = true isWhitelisted = true } else { - clog.Debugf("whitelist: %s not in [%s]", src, v) + clog.Tracef("whitelist: %s not in [%s]", src, v) } hasWhitelist = true } - } else { - clog.Debugf("no ip in event, cidr/ip whitelists not checked") + } + + if isWhitelisted { + p.Whitelisted = true } /* run whitelist expression tests anyway */ for eidx, e := range n.Whitelist.B_Exprs { @@ -198,7 +202,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) { e.ExprDebugger.Run(clog, out, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p})) } if out { - clog.Debugf("Event is whitelisted by Expr !") + clog.Infof("Event is whitelisted by Expr !") p.Whitelisted = true isWhitelisted = true } @@ -211,42 +215,15 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) { p.WhiteListReason = n.Whitelist.Reason /*huglily wipe the ban order if the event is whitelisted and it's an overflow */ if p.Type == types.OVFLW { /*don't do this at home kids */ - // p.Overflow.OverflowAction = "" - //Break this for now. Souldn't have been done this way, but that's not taht serious - /*only display logs when we discard ban to avoid spam*/ - clog.Infof("Ban for %s whitelisted, reason [%s]", p.Overflow.Source.Ip.String(), n.Whitelist.Reason) + ips := []string{} + for _, src := range srcs { + ips = append(ips, src.String()) + } + clog.Infof("Ban for %s whitelisted, reason [%s]", strings.Join(ips, ","), n.Whitelist.Reason) p.Overflow.Whitelisted = true } } - //Iterate on leafs - if len(n.SuccessNodes) > 0 { - for _, leaf := range n.SuccessNodes { - //clog.Debugf("Processing sub-node %d/%d : %s", idx, len(n.SuccessNodes), leaf.rn) - ret, err := leaf.process(p, ctx) - if err != nil { - clog.Tracef("\tNode (%s) failed : %v", leaf.rn, err) - clog.Debugf("Event leaving node : ko") - return false, err - } - clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess) - if ret { - NodeState = true - /* if chil is successful, stop processing */ - if n.OnSuccess == "next_stage" { - clog.Debugf("child is success, OnSuccess=next_stage, skip") - break - } - } else { - NodeState = false - } - } - } - /*todo : check if a node made the state change ?*/ - /* should the childs inherit the on_success behaviour */ - - clog.Tracef("State after nodes : %v", NodeState) - //Process grok if present, should be exclusive with nodes :) gstr := "" if n.Grok.RunTimeRegexp != nil { @@ -282,7 +259,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) { p.Parsed[k] = v } // if the grok succeed, process associated statics - err := ProcessStatics(n.Grok.Statics, p, clog) + err := n.ProcessStatics(n.Grok.Statics, p) if err != nil { clog.Fatalf("(%s) Failed to process statics : %v", n.rn, err) } @@ -297,6 +274,34 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) { clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp) } + //Iterate on leafs + if len(n.LeavesNodes) > 0 { + for _, leaf := range n.LeavesNodes { + //clog.Debugf("Processing sub-node %d/%d : %s", idx, len(n.SuccessNodes), leaf.rn) + ret, err := leaf.process(p, ctx) + if err != nil { + clog.Tracef("\tNode (%s) failed : %v", leaf.rn, err) + clog.Debugf("Event leaving node : ko") + return false, err + } + clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess) + if ret { + NodeState = true + /* if child is successful, stop processing */ + if n.OnSuccess == "next_stage" { + clog.Debugf("child is success, OnSuccess=next_stage, skip") + break + } + } else { + NodeState = false + } + } + } + /*todo : check if a node made the state change ?*/ + /* should the childs inherit the on_success behaviour */ + + clog.Tracef("State after nodes : %v", NodeState) + //grok or leafs failed, don't process statics if !NodeState { if n.Name != "" { @@ -309,10 +314,13 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) { if n.Name != "" { NodesHitsOk.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc() } + /* + Please kill me. this is to apply statics when the node *has* whitelists that successfully matched the node. + */ if hasWhitelist && isWhitelisted && len(n.Statics) > 0 || len(n.Statics) > 0 && !hasWhitelist { clog.Debugf("+ Processing %d statics", len(n.Statics)) // if all else is good in whitelist, process node's statics - err := ProcessStatics(n.Statics, p, clog) + err := n.ProcessStatics(n.Statics, p) if err != nil { clog.Fatalf("Failed to process statics : %v", err) } @@ -342,7 +350,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) { return NodeState, nil } -func (n *Node) compile(pctx *UnixParserCtx) error { +func (n *Node) compile(pctx *UnixParserCtx, ectx []EnricherCtx) error { var err error var valid bool @@ -351,7 +359,8 @@ func (n *Node) compile(pctx *UnixParserCtx) error { dumpr := spew.ConfigState{MaxDepth: 1, DisablePointerAddresses: true} n.rn = seed.Generate() - log.Debugf("compile, node is %s", n.Stage) + n.EnrichFunctions = ectx + log.Tracef("compile, node is %s", n.Stage) /* if the node has debugging enabled, create a specific logger with debug that will be used only for processing this node ;) */ if n.Debug { @@ -394,7 +403,7 @@ func (n *Node) compile(pctx *UnixParserCtx) error { /* handle pattern_syntax and groks */ for node, pattern := range n.SubGroks { - n.logger.Debugf("Adding subpattern '%s' : '%s'", node, pattern) + n.logger.Tracef("Adding subpattern '%s' : '%s'", node, pattern) if err := pctx.Grok.Add(node, pattern); err != nil { n.logger.Errorf("Unable to compile subpattern %s : %v", node, err) return err @@ -402,7 +411,7 @@ func (n *Node) compile(pctx *UnixParserCtx) error { } /* load grok by name or compile in-place */ if n.Grok.RegexpName != "" { - n.logger.Debugf("+ Regexp Compilation '%s'", n.Grok.RegexpName) + n.logger.Tracef("+ Regexp Compilation '%s'", n.Grok.RegexpName) n.Grok.RunTimeRegexp, err = pctx.Grok.Get(n.Grok.RegexpName) if err != nil { return fmt.Errorf("Unable to find grok '%s' : %v", n.Grok.RegexpName, err) @@ -410,13 +419,12 @@ func (n *Node) compile(pctx *UnixParserCtx) error { if n.Grok.RunTimeRegexp == nil { return fmt.Errorf("Empty grok '%s'", n.Grok.RegexpName) } - n.logger.Debugf("%s regexp: %s", n.Grok.RegexpName, n.Grok.RunTimeRegexp.Regexp.String()) + n.logger.Tracef("%s regexp: %s", n.Grok.RegexpName, n.Grok.RunTimeRegexp.Regexp.String()) valid = true } else if n.Grok.RegexpValue != "" { if strings.HasSuffix(n.Grok.RegexpValue, "\n") { n.logger.Debugf("Beware, pattern ends with \\n : '%s'", n.Grok.RegexpValue) } - //n.logger.Debugf("+ Regexp Compilation '%s'", n.Grok.RegexpValue) n.Grok.RunTimeRegexp, err = pctx.Grok.Compile(n.Grok.RegexpValue) if err != nil { return fmt.Errorf("Failed to compile grok '%s': %v\n", n.Grok.RegexpValue, err) @@ -425,7 +433,7 @@ func (n *Node) compile(pctx *UnixParserCtx) error { // We shouldn't be here because compilation succeeded, so regexp shouldn't be nil return fmt.Errorf("Grok compilation failure: %s", n.Grok.RegexpValue) } - n.logger.Debugf("%s regexp : %s", n.Grok.RegexpValue, n.Grok.RunTimeRegexp.Regexp.String()) + n.logger.Tracef("%s regexp : %s", n.Grok.RegexpValue, n.Grok.RunTimeRegexp.Regexp.String()) valid = true } /* load grok statics */ @@ -443,20 +451,20 @@ func (n *Node) compile(pctx *UnixParserCtx) error { valid = true } /* compile leafs if present */ - if len(n.SuccessNodes) > 0 { - for idx := range n.SuccessNodes { - if n.SuccessNodes[idx].Name == "" { - n.SuccessNodes[idx].Name = fmt.Sprintf("child-%s", n.Name) + if len(n.LeavesNodes) > 0 { + for idx := range n.LeavesNodes { + if n.LeavesNodes[idx].Name == "" { + n.LeavesNodes[idx].Name = fmt.Sprintf("child-%s", n.Name) } /*propagate debug/stats to child nodes*/ - if !n.SuccessNodes[idx].Debug && n.Debug { - n.SuccessNodes[idx].Debug = true + if !n.LeavesNodes[idx].Debug && n.Debug { + n.LeavesNodes[idx].Debug = true } - if !n.SuccessNodes[idx].Profiling && n.Profiling { - n.SuccessNodes[idx].Profiling = true + if !n.LeavesNodes[idx].Profiling && n.Profiling { + n.LeavesNodes[idx].Profiling = true } - n.SuccessNodes[idx].Stage = n.Stage - err = n.SuccessNodes[idx].compile(pctx) + n.LeavesNodes[idx].Stage = n.Stage + err = n.LeavesNodes[idx].compile(pctx, ectx) if err != nil { return err } @@ -510,7 +518,7 @@ func (n *Node) compile(pctx *UnixParserCtx) error { n.logger.Infof("Node is empty: %s", spew.Sdump(n)) n.Stage = "" } - if err := n.validate(pctx); err != nil { + if err := n.validate(pctx, ectx); err != nil { return err //n.logger.Fatalf("Node is invalid : %s", err) } diff --git a/pkg/parser/node_test.go b/pkg/parser/node_test.go index 9b033c43a..4724fc799 100644 --- a/pkg/parser/node_test.go +++ b/pkg/parser/node_test.go @@ -7,8 +7,7 @@ import ( ) func TestParserConfigs(t *testing.T) { - var p UnixParser - pctx, err := p.Init(map[string]interface{}{"patterns": "../../config/patterns/", "data": "./tests/"}) + pctx, err := Init(map[string]interface{}{"patterns": "../../config/patterns/", "data": "./tests/"}) if err != nil { t.Fatalf("unable to load patterns : %s", err) } @@ -42,7 +41,7 @@ func TestParserConfigs(t *testing.T) { //{&Node{Debug: true, Grok: []GrokPattern{ GrokPattern{}, }}, false}, } for idx := range CfgTests { - err := CfgTests[idx].NodeCfg.compile(pctx) + err := CfgTests[idx].NodeCfg.compile(pctx, []EnricherCtx{}) if CfgTests[idx].Compiles == true && err != nil { t.Fatalf("Compile: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err) } @@ -50,7 +49,7 @@ func TestParserConfigs(t *testing.T) { t.Fatalf("Compile: (%d/%d) expected errror", idx+1, len(CfgTests)) } - err = CfgTests[idx].NodeCfg.validate(pctx) + err = CfgTests[idx].NodeCfg.validate(pctx, []EnricherCtx{}) if CfgTests[idx].Valid == true && err != nil { t.Fatalf("Valid: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err) } diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 0eeb0454f..2a57b3a1d 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -7,6 +7,7 @@ import ( "io" "io/ioutil" "os" + "sort" "strings" "testing" @@ -28,13 +29,13 @@ func TestParser(t *testing.T) { debug = true log.SetLevel(log.InfoLevel) var envSetting = os.Getenv("TEST_ONLY") - pctx, err := prepTests() + pctx, ectx, err := prepTests() if err != nil { t.Fatalf("failed to load env : %s", err) } //Init the enricher if envSetting != "" { - if err := testOneParser(pctx, envSetting, nil); err != nil { + if err := testOneParser(pctx, ectx, envSetting, nil); err != nil { t.Fatalf("Test '%s' failed : %s", envSetting, err) } } else { @@ -48,7 +49,7 @@ func TestParser(t *testing.T) { } fname := "./tests/" + fd.Name() log.Infof("Running test on %s", fname) - if err := testOneParser(pctx, fname, nil); err != nil { + if err := testOneParser(pctx, ectx, fname, nil); err != nil { t.Fatalf("Test '%s' failed : %s", fname, err) } } @@ -60,14 +61,14 @@ func BenchmarkParser(t *testing.B) { log.Printf("start bench !!!!") debug = false log.SetLevel(log.ErrorLevel) - pctx, err := prepTests() + pctx, ectx, err := prepTests() if err != nil { t.Fatalf("failed to load env : %s", err) } var envSetting = os.Getenv("TEST_ONLY") if envSetting != "" { - if err := testOneParser(pctx, envSetting, t); err != nil { + if err := testOneParser(pctx, ectx, envSetting, t); err != nil { t.Fatalf("Test '%s' failed : %s", envSetting, err) } } else { @@ -81,20 +82,21 @@ func BenchmarkParser(t *testing.B) { } fname := "./tests/" + fd.Name() log.Infof("Running test on %s", fname) - if err := testOneParser(pctx, fname, t); err != nil { + if err := testOneParser(pctx, ectx, fname, t); err != nil { t.Fatalf("Test '%s' failed : %s", fname, err) } } } } -func testOneParser(pctx *UnixParserCtx, dir string, b *testing.B) error { +func testOneParser(pctx *UnixParserCtx, ectx []EnricherCtx, dir string, b *testing.B) error { - var err error - var pnodes []Node - - var parser_configs []Stagefile + var ( + err error + pnodes []Node + parser_configs []Stagefile + ) log.Warningf("testing %s", dir) parser_cfg_file := fmt.Sprintf("%s/parsers.yaml", dir) cfg, err := ioutil.ReadFile(parser_cfg_file) @@ -114,7 +116,7 @@ func testOneParser(pctx *UnixParserCtx, dir string, b *testing.B) error { return fmt.Errorf("failed unmarshaling %s : %s", parser_cfg_file, err) } - pnodes, err = LoadStages(parser_configs, pctx) + pnodes, err = LoadStages(parser_configs, pctx, ectx) if err != nil { return fmt.Errorf("unable to load parser config : %s", err) } @@ -137,34 +139,36 @@ func testOneParser(pctx *UnixParserCtx, dir string, b *testing.B) error { } //prepTests is going to do the initialisation of parser : it's going to load enrichment plugins and load the patterns. This is done here so that we don't redo it for each test -func prepTests() (*UnixParserCtx, error) { - var pctx *UnixParserCtx - var p UnixParser - err := exprhelpers.Init() +func prepTests() (*UnixParserCtx, []EnricherCtx, error) { + var ( + err error + pctx *UnixParserCtx + ectx []EnricherCtx + ) + + err = exprhelpers.Init() if err != nil { log.Fatalf("exprhelpers init failed: %s", err) } //Load enrichment - datadir := "../../data/" - pplugins, err := Loadplugin(datadir) + datadir := "./test_data/" + ectx, err = Loadplugin(datadir) if err != nil { log.Fatalf("failed to load plugin geoip : %v", err) } - ECTX = nil - ECTX = append(ECTX, pplugins) - log.Printf("Loaded -> %+v", ECTX) + log.Printf("Loaded -> %+v", ectx) //Load the parser patterns cfgdir := "../../config/" /* this should be refactored to 2 lines :p */ // Init the parser - pctx, err = p.Init(map[string]interface{}{"patterns": cfgdir + string("/patterns/"), "data": "./tests/"}) + pctx, err = Init(map[string]interface{}{"patterns": cfgdir + string("/patterns/"), "data": "./tests/"}) if err != nil { - return nil, fmt.Errorf("failed to initialize parser : %v", err) + return nil, nil, fmt.Errorf("failed to initialize parser : %v", err) } - return pctx, nil + return pctx, ectx, nil } func loadTestFile(file string) []TestFile { @@ -192,7 +196,7 @@ func loadTestFile(file string) []TestFile { func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bool) { var retInfo []string - var valid bool + var valid bool = false expectMaps := []map[string]string{expected.Parsed, expected.Meta, expected.Enriched} outMaps := []map[string]string{out.Parsed, out.Meta, out.Enriched} outLabels := []string{"Parsed", "Meta", "Enriched"} @@ -203,7 +207,6 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo if debug { retInfo = append(retInfo, fmt.Sprintf("mismatch stage %s != %s", expected.Stage, out.Stage)) } - valid = false goto checkFinished } else { valid = true @@ -217,7 +220,6 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo if debug { retInfo = append(retInfo, fmt.Sprintf("mismatch process %t != %t", expected.Process, out.Process)) } - valid = false goto checkFinished } else { valid = true @@ -230,7 +232,6 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo if debug { retInfo = append(retInfo, fmt.Sprintf("mismatch whitelist %t != %t", expected.Whitelisted, out.Whitelisted)) } - valid = false goto checkFinished } else { if debug { @@ -251,7 +252,6 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo if debug { retInfo = append(retInfo, fmt.Sprintf("mismatch %s[%s] %s != %s", outLabels[mapIdx], expKey, expVal, outVal)) } - valid = false goto checkFinished } } else { //missing entry @@ -347,3 +347,71 @@ func testFile(testSet []TestFile, pctx UnixParserCtx, nodes []Node) bool { } return true } + +/*THIS IS ONLY PRESENT TO BE ABLE TO GENERATE DOCUMENTATION OF EXISTING PATTERNS*/ +type Pair struct { + Key string + Value string +} + +type PairList []Pair + +func (p PairList) Len() int { return len(p) } +func (p PairList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p PairList) Less(i, j int) bool { return len(p[i].Value) < len(p[j].Value) } + +func TestGeneratePatternsDoc(t *testing.T) { + if os.Getenv("GO_WANT_TEST_DOC") != "1" { + return + } + + pctx, err := Init(map[string]interface{}{"patterns": "../../config/patterns/", "data": "./tests/"}) + if err != nil { + t.Fatalf("unable to load patterns : %s", err) + } + log.Infof("-> %s", spew.Sdump(pctx)) + /*don't judge me, we do it for the users*/ + p := make(PairList, len(pctx.Grok)) + + i := 0 + for key, val := range pctx.Grok { + p[i] = Pair{key, val} + p[i].Value = strings.Replace(p[i].Value, "{%{", "\\{\\%\\{", -1) + i++ + } + sort.Sort(p) + + f, err := os.OpenFile("./patterns-documentation.md", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + t.Fatalf("failed to open : %s", err) + } + if _, err := f.WriteString("# Patterns documentation\n\n"); err != nil { + t.Fatal("failed to write to file") + } + if _, err := f.WriteString("You will find here a generated documentation of all the patterns loaded by crowdsec.\n"); err != nil { + t.Fatal("failed to write to file") + } + if _, err := f.WriteString("They are sorted by pattern length, and are meant to be used in parsers, in the form %{PATTERN_NAME}.\n"); err != nil { + t.Fatal("failed to write to file") + } + if _, err := f.WriteString("\n\n"); err != nil { + t.Fatal("failed to write to file") + } + for _, k := range p { + if _, err := f.WriteString(fmt.Sprintf("## %s\n\nPattern :\n```\n%s\n```\n\n", k.Key, k.Value)); err != nil { + t.Fatal("failed to write to file") + } + fmt.Printf("%v\t%v\n", k.Key, k.Value) + } + if _, err := f.WriteString("\n"); err != nil { + t.Fatal("failed to write to file") + } + if _, err := f.WriteString("# Documentation generation\n"); err != nil { + t.Fatal("failed to write to file") + } + if _, err := f.WriteString("This documentation is generated by `pkg/parser` : `GO_WANT_TEST_DOC=1 go test -run TestGeneratePatternsDoc`\n"); err != nil { + t.Fatal("failed to write to file") + } + f.Close() + +} diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go index 00cbe0748..c77741491 100644 --- a/pkg/parser/runtime.go +++ b/pkg/parser/runtime.go @@ -10,6 +10,7 @@ import ( "fmt" "reflect" "strings" + "time" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -17,22 +18,13 @@ import ( "strconv" "github.com/davecgh/go-spew/spew" + "github.com/mohae/deepcopy" "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" "github.com/antonmedv/expr" ) -//ECTX : DID YOU SEE THAT GLOBAL, ISN'T IT HUGLY -var ECTX []EnricherCtx - -type Parser interface { - Init(map[string]interface{}) (interface{}, error) - IsParsable(types.Line) (bool, error) - Parse(interface{}, types.Line) (map[string]interface{}, error) -} - /* ok, this is kinda experimental, I don't know how bad of an idea it is .. */ func SetTargetByName(target string, value string, evt *types.Event) bool { @@ -113,17 +105,18 @@ func printStaticTarget(static types.ExtraField) string { } } -func ProcessStatics(statics []types.ExtraField, p *types.Event, clog *logrus.Entry) error { +func (n *Node) ProcessStatics(statics []types.ExtraField, event *types.Event) error { //we have a few cases : //(meta||key) + (static||reference||expr) var value string + clog := n.logger for _, static := range statics { value = "" if static.Value != "" { value = static.Value } else if static.RunTimeValue != nil { - output, err := expr.Run(static.RunTimeValue, exprhelpers.GetExprEnv(map[string]interface{}{"evt": p})) + output, err := expr.Run(static.RunTimeValue, exprhelpers.GetExprEnv(map[string]interface{}{"evt": event})) if err != nil { clog.Warningf("failed to run RunTimeValue : %v", err) continue @@ -147,10 +140,10 @@ func ProcessStatics(statics []types.ExtraField, p *types.Event, clog *logrus.Ent if static.Method != "" { processed := false /*still way too hackish, but : inject all the results in enriched, and */ - for _, x := range ECTX { + for _, x := range n.EnrichFunctions { if fptr, ok := x.Funcs[static.Method]; ok && x.initiated { clog.Tracef("Found method '%s'", static.Method) - ret, err := fptr(value, p, x.RuntimeCtx) + ret, err := fptr(value, event, x.RuntimeCtx) if err != nil { clog.Fatalf("plugin function error : %v", err) } @@ -161,7 +154,7 @@ func ProcessStatics(statics []types.ExtraField, p *types.Event, clog *logrus.Ent } for k, v := range ret { clog.Debugf("\t.Enriched[%s] = '%s'\n", k, v) - p.Enriched[k] = v + event.Enriched[k] = v } break } else { @@ -173,15 +166,15 @@ func ProcessStatics(statics []types.ExtraField, p *types.Event, clog *logrus.Ent } } else if static.Parsed != "" { clog.Debugf(".Parsed[%s] = '%s'", static.Parsed, value) - p.Parsed[static.Parsed] = value + event.Parsed[static.Parsed] = value } else if static.Meta != "" { clog.Debugf(".Meta[%s] = '%s'", static.Meta, value) - p.Meta[static.Meta] = value + event.Meta[static.Meta] = value } else if static.Enriched != "" { clog.Debugf(".Enriched[%s] = '%s'", static.Enriched, value) - p.Enriched[static.Enriched] = value + event.Enriched[static.Enriched] = value } else if static.TargetByName != "" { - if !SetTargetByName(static.TargetByName, value, p) { + if !SetTargetByName(static.TargetByName, value, event) { clog.Errorf("Unable to set value of '%s'", static.TargetByName) } else { clog.Debugf("%s = '%s'", static.TargetByName, value) @@ -230,15 +223,18 @@ func stageidx(stage string, stages []string) int { var ParseDump bool var StageParseCache map[string]map[string]types.Event -func /*(u types.UnixParser)*/ Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error) { +func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error) { var event types.Event = xp /* the stage is undefined, probably line is freshly acquired, set to first stage !*/ if event.Stage == "" && len(ctx.Stages) > 0 { event.Stage = ctx.Stages[0] - log.Debugf("no stage, set to : %s", event.Stage) + log.Tracef("no stage, set to : %s", event.Stage) } event.Process = false + if event.Time.IsZero() { + event.Time = time.Now() + } if event.Parsed == nil { event.Parsed = make(map[string]string) @@ -297,11 +293,8 @@ func /*(u types.UnixParser)*/ Parse(ctx UnixParserCtx, xp types.Event, nodes []N if ret { isStageOK = true if ParseDump { - evtcopy := types.Event{} - if err := types.Clone(&event, &evtcopy); err != nil { - log.Fatalf("while cloning Event in parser : %s", err) - } - StageParseCache[stage][node.Name] = evtcopy + evtcopy := deepcopy.Copy(event) + StageParseCache[stage][node.Name] = evtcopy.(types.Event) } } if ret && node.OnSuccess == "next_stage" { diff --git a/pkg/parser/stage.go b/pkg/parser/stage.go index e2da66e59..a5635b4d6 100644 --- a/pkg/parser/stage.go +++ b/pkg/parser/stage.go @@ -13,7 +13,6 @@ import ( "io" _ "net/http/pprof" "os" - "path/filepath" "sort" "strings" "time" @@ -38,7 +37,7 @@ type Stagefile struct { Stage string `yaml:"stage"` } -func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx) ([]Node, error) { +func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx, ectx []EnricherCtx) ([]Node, error) { var nodes []Node tmpstages := make(map[string]bool) pctx.Stages = []string{} @@ -83,7 +82,7 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx) ([]Node, error) { } //check compat if node.FormatVersion == "" { - log.Debugf("no version in %s, assuming '1.0'", stageFile.Filename) + log.Tracef("no version in %s, assuming '1.0'", node.Name) node.FormatVersion = "1.0" } ok, err := cwversion.Statisfies(node.FormatVersion, cwversion.Constraint_parser) @@ -100,7 +99,7 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx) ([]Node, error) { tmpstages[stageFile.Stage] = true } //compile the node : grok pattern and expression - err = node.compile(pctx) + err = node.compile(pctx, ectx) if err != nil { if node.Name != "" { return nil, fmt.Errorf("failed to compile node '%s' in '%s' : %s", node.Name, stageFile.Filename, err.Error()) @@ -134,23 +133,3 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx) ([]Node, error) { return nodes, nil } - -func LoadStageDir(dir string, pctx *UnixParserCtx) ([]Node, error) { - - var files []Stagefile - - m, err := filepath.Glob(dir + "/*/*") - if err != nil { - return nil, fmt.Errorf("unable to find configs in '%s' : %v", dir, err) - } - for _, f := range m { - tmp := Stagefile{} - tmp.Filename = f - //guess stage : (prefix - file).split('/')[0] - stages := strings.Split(f, "/") - stage := stages[len(stages)-2] - tmp.Stage = stage - files = append(files, tmp) - } - return LoadStages(files, pctx) -} diff --git a/pkg/parser/test_data/GeoLite2-ASN.mmdb b/pkg/parser/test_data/GeoLite2-ASN.mmdb new file mode 100644 index 000000000..bbf6bf260 Binary files /dev/null and b/pkg/parser/test_data/GeoLite2-ASN.mmdb differ diff --git a/pkg/parser/test_data/GeoLite2-City.mmdb b/pkg/parser/test_data/GeoLite2-City.mmdb new file mode 100644 index 000000000..a27da7fea Binary files /dev/null and b/pkg/parser/test_data/GeoLite2-City.mmdb differ diff --git a/pkg/parser/unix_parser.go b/pkg/parser/unix_parser.go index 09ef10758..c21d4eddc 100644 --- a/pkg/parser/unix_parser.go +++ b/pkg/parser/unix_parser.go @@ -2,15 +2,14 @@ package parser import ( "io/ioutil" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/logrusorgru/grokky" - "github.com/prometheus/common/log" + log "github.com/sirupsen/logrus" ) -type UnixParser struct { -} - type UnixParserCtx struct { Grok grokky.Host Stages []string @@ -18,11 +17,17 @@ type UnixParserCtx struct { DataFolder string } -func (u UnixParser) IsParsable(ctx interface{}, l types.Line) (bool, error) { - return true, nil +type Parsers struct { + Ctx *UnixParserCtx + Povfwctx *UnixParserCtx + StageFiles []Stagefile + PovfwStageFiles []Stagefile + Nodes []Node + Povfwnodes []Node + EnricherCtx []EnricherCtx } -func (u UnixParser) Init(c map[string]interface{}) (*UnixParserCtx, error) { +func Init(c map[string]interface{}) (*UnixParserCtx, error) { r := UnixParserCtx{} r.Grok = grokky.NewBase() files, err := ioutil.ReadDir(c["patterns"].(string)) @@ -31,11 +36,64 @@ func (u UnixParser) Init(c map[string]interface{}) (*UnixParserCtx, error) { } r.DataFolder = c["data"].(string) for _, f := range files { - log.Debugf("Loading %s", f.Name()) if err := r.Grok.AddFromFile(c["patterns"].(string) + f.Name()); err != nil { log.Errorf("failed to load pattern %s : %v", f.Name(), err) return nil, err } } + log.Debugf("Loaded %d pattern files", len(files)) return &r, nil } + + +func LoadParsers(cConfig *csconfig.GlobalConfig, parsers *Parsers) (*Parsers, error) { + var err error + + log.Infof("Loading grok library %s", cConfig.Crowdsec.ConfigDir+string("/patterns/")) + /* load base regexps for two grok parsers */ + parsers.Ctx, err = Init(map[string]interface{}{"patterns": cConfig.Crowdsec.ConfigDir + string("/patterns/"), + "data": cConfig.Crowdsec.DataDir}) + if err != nil { + return parsers, fmt.Errorf("failed to load parser patterns : %v", err) + } + parsers.Povfwctx, err = Init(map[string]interface{}{"patterns": cConfig.Crowdsec.ConfigDir + string("/patterns/"), + "data": cConfig.Crowdsec.DataDir}) + if err != nil { + return parsers, fmt.Errorf("failed to load postovflw parser patterns : %v", err) + } + + /* + Load enrichers + */ + log.Infof("Loading enrich plugins") + + parsers.EnricherCtx, err = Loadplugin(cConfig.Crowdsec.DataDir) + if err != nil { + return parsers, fmt.Errorf("Failed to load enrich plugin : %v", err) + } + + /* + Load the actual parsers + */ + + log.Infof("Loading parsers %d stages", len(parsers.StageFiles)) + + parsers.Nodes, err = LoadStages(parsers.StageFiles, parsers.Ctx, parsers.EnricherCtx) + if err != nil { + return parsers, fmt.Errorf("failed to load parser config : %v", err) + } + + log.Infof("Loading postoverflow Parsers") + parsers.Povfwnodes, err = LoadStages(parsers.PovfwStageFiles, parsers.Povfwctx, parsers.EnricherCtx) + + if err != nil { + return parsers, fmt.Errorf("failed to load postoverflow config : %v", err) + } + + if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { + parsers.Ctx.Profiling = true + parsers.Povfwctx.Profiling = true + } + + return parsers, nil +} diff --git a/pkg/types/ban_application.go b/pkg/types/ban_application.go deleted file mode 100644 index 5f7a3d5b6..000000000 --- a/pkg/types/ban_application.go +++ /dev/null @@ -1,34 +0,0 @@ -package types - -import ( - "time" - - "github.com/jinzhu/gorm" -) - -//BanApplication is the in-db representation of a ban order. IPs/Ranges are represented as a integer interval. -//one BanOrder can lead to multiple BanApplication -type BanApplication struct { - gorm.Model `json:"-"` - - MeasureSource string /*api,local*/ - MeasureType string /*ban,slow,captcha*/ - MeasureExtra string /*in case we need extra info for the connector ?*/ - Until time.Time /*expiration of ban*/ - - StartIp uint32 - EndIp uint32 - - TargetCN string - TargetAS int - TargetASName string - - IpText string /*only for humans*/ - - Reason string /*long human reason of the ban 'ban AS1234' */ - Scenario string /*the type of scenario that led to ban*/ - - //SignalOccurence *parser.SignalOccurence /*the signal occurence it's attached to */ - SignalOccurenceID uint //so we can link local decision to actual overflow - -} diff --git a/pkg/types/ban_order.go b/pkg/types/ban_order.go deleted file mode 100644 index 6de3aa809..000000000 --- a/pkg/types/ban_order.go +++ /dev/null @@ -1,58 +0,0 @@ -package types - -import ( - "log" - "net" - "time" -) - -//BanOrder is what is generated from a SignalOccurence : it describes what action to take -//it is in-memory only and never touches the DB. It will be turned into one or several "parser.BanApplication" -type BanOrder struct { - MeasureSource string /*api,local*/ - MeasureType string /*ban,slow,captcha*/ - Scope string /*ip,multi_ip,as,country*/ - TargetAS int /*if non-empty, applies to this AS*/ - TargetASName string /*if non-empty, applies to this AS*/ - TargetRange net.IPNet /*if non-empty, applies to this IP*/ - TargetIP net.IP /*if non-empty, applies to this range*/ - TargetCountry string - Until time.Time /*when would the measure expire*/ - TxtTarget string - Reason string -} - -func OrderToApplications(ordr *BanOrder) ([]BanApplication, error) { - var bas []BanApplication - var ba BanApplication - /* - pseudo-code for as/country scope would be : - - fetch ranges of AS/Country - - for ipnet := range Country.Ranges { - ba.append(...) - } - */ - - ba.MeasureType = ordr.MeasureType - ba.MeasureSource = ordr.MeasureSource - ba.Until = ordr.Until - ba.Reason = ordr.Reason - ba.TargetAS = ordr.TargetAS - ba.TargetASName = ordr.TargetASName - - ba.TargetCN = ordr.TargetCountry - if ordr.Scope == "ip" { - ba.StartIp = IP2Int(ordr.TargetIP) - ba.EndIp = IP2Int(ordr.TargetIP) - ba.IpText = ordr.TargetIP.String() - bas = append(bas, ba) - } else if ordr.Scope == "range" { - ba.StartIp = IP2Int(ordr.TargetRange.IP) - ba.EndIp = IP2Int(LastAddress(&ordr.TargetRange)) - ba.IpText = ordr.TargetRange.String() - bas = append(bas, ba) - } else { - log.Fatalf("only 'ip' and 'range' scopes are supported.") - } - return bas, nil -} diff --git a/pkg/types/event.go b/pkg/types/event.go index 37202ef48..8ecfae18a 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -2,6 +2,9 @@ package types import ( "time" + + "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/crowdsec/pkg/models" ) const ( @@ -9,27 +12,54 @@ const ( OVFLW ) +//Event is the structure representing a runtime event (log or overflow) type Event struct { /* is it a log or an overflow */ - Type int `yaml:"Type,omitempty"` - ExpectMode int `yaml:"ExpectMode,omitempty"` //how to buckets should handle event : leaky.TIMEMACHINE or leaky.LIVE - Whitelisted bool `yaml:"Whitelisted,omitempty"` - WhiteListReason string `json:"whitelist_reason,omitempty"` + Type int `yaml:"Type,omitempty" json:"Type,omitempty"` //Can be types.LOG (0) or types.OVFLOW (1) + ExpectMode int `yaml:"ExpectMode,omitempty" json:"ExpectMode,omitempty"` //how to buckets should handle event : leaky.TIMEMACHINE or leaky.LIVE + Whitelisted bool `yaml:"Whitelisted,omitempty" json:"Whitelisted,omitempty"` + WhiteListReason string `yaml:"whitelist_reason,omitempty" json:"whitelist_reason,omitempty"` //should add whitelist reason ? /* the current stage of the line being parsed */ - Stage string `yaml:"Stage,omitempty"` + Stage string `yaml:"Stage,omitempty" json:"Stage,omitempty"` /* original line (produced by acquisition) */ - Line Line `json:"Line" yaml:"Line,omitempty"` + Line Line `yaml:"Line,omitempty" json:"Line,omitempty"` /* output of groks */ - Parsed map[string]string `json:"Parsed,omitempty" yaml:"Parsed,omitempty"` + Parsed map[string]string `yaml:"Parsed,omitempty" json:"Parsed,omitempty"` /* output of enrichment */ - Enriched map[string]string `json:"Enriched,omitempty" yaml:"Enriched,omitempty"` + Enriched map[string]string `yaml:"Enriched,omitempty" json:"Enriched,omitempty"` /* Overflow */ - Overflow SignalOccurence `yaml:"Overflow,omitempty"` - Time time.Time `json:"Time,omitempty"` //parsed time `json:"-"` `` - StrTime string `yaml:"StrTime,omitempty"` - MarshaledTime string `yaml:"MarshaledTime,omitempty"` - Process bool `yaml:"Process,omitempty"` //can be set to false to avoid processing line + Overflow RuntimeAlert `yaml:"Alert,omitempty" json:"Alert,omitempty"` + Time time.Time `yaml:"Time,omitempty" json:"Time,omitempty"` //parsed time `json:"-"` `` + StrTime string `yaml:"StrTime,omitempty" json:"StrTime,omitempty"` + MarshaledTime string `yaml:"MarshaledTime,omitempty" json:"MarshaledTime,omitempty"` + Process bool `yaml:"Process,omitempty" json:"Process,omitempty"` //can be set to false to avoid processing line /* Meta is the only part that will make it to the API - it should be normalized */ - Meta map[string]string `json:"Meta,omitempty" yaml:"Meta,omitempty"` + Meta map[string]string `yaml:"Meta,omitempty" json:"Meta,omitempty"` +} + +//Move in leakybuckets +const ( + Undefined = "" + Ip = "Ip" + Range = "Range" + Filter = "Filter" +) + +//Move in leakybuckets +type ScopeType struct { + Scope string `yaml:"type"` + Filter string `yaml:"expression"` + RunTimeFilter *vm.Program +} + +type RuntimeAlert struct { + Mapkey string `yaml:"MapKey,omitempty" json:"MapKey,omitempty"` + BucketId string `yaml:"BucketId,omitempty" json:"BucketId,omitempty"` + Whitelisted bool `yaml:"Whitelisted,omitempty" json:"Whitelisted,omitempty"` + Reprocess bool `yaml:"Reprocess,omitempty" json:"Reprocess,omitempty"` + Sources map[string]models.Source `yaml:"Sources,omitempty" json:"Sources,omitempty"` + Alert *models.Alert `yaml:"Alert,omitempty" json:"Alert,omitempty"` //this one is a pointer to APIAlerts[0] for convenience. + //APIAlerts will be populated at the end when there is more than one source + APIAlerts []models.Alert `yaml:"APIAlerts,omitempty" json:"APIAlerts,omitempty"` } diff --git a/pkg/types/event_sequence.go b/pkg/types/event_sequence.go deleted file mode 100644 index 0294b88b1..000000000 --- a/pkg/types/event_sequence.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -import ( - "time" - - "github.com/jinzhu/gorm" -) - -//EventSequence is used to represent the summarized version of events that lead to overflow -type EventSequence struct { - gorm.Model `json:"-"` - Time time.Time - Source Source `json:"-"` - /*for db only :/ */ - Source_ip string - Source_range string - Source_AutonomousSystemNumber string - Source_AutonomousSystemOrganization string - Source_Country string - /*stop db only */ - SignalOccurenceID uint //unique ID for the hasMany relation - Serialized string `gorm:"size:65535"` //the serialized dict -} diff --git a/pkg/types/signal_occurence.go b/pkg/types/signal_occurence.go deleted file mode 100644 index f7f259281..000000000 --- a/pkg/types/signal_occurence.go +++ /dev/null @@ -1,44 +0,0 @@ -package types - -import ( - "time" - - "github.com/jinzhu/gorm" -) - -type SignalOccurence struct { - gorm.Model `json:"-"` - // ID uint // `json:"-" gorm:"primary_key,AUTO_INCREMENT"` - MapKey string //for Delete - Scenario string `json:"scenario,omitempty"` //The unique name of the scenario, ie. ssh_bruteforce_multi-user - Bucket_id string `json:"bucket_id,omitempty"` //The 'runtime' bucket-name (mostly for debug), ie. `sunny-flower` - Alert_message string `json:"alert_message,omitempty"` //Human-friendly label (to be displayed) - Events_count int `json:"events_count,omitempty" yaml:"Events_count,omitempty"` //Number of events between first occurence and ban - Events_sequence []EventSequence `json:"-" gorm:"foreignkey:SignalOccurenceID;association_foreignkey:ID"` //When adapted, a unique list of string representing the individual events that lead to the overflow - Start_at time.Time `json:"start_at,omitempty"` //first event (usually bucket creation time) - BanApplications []BanApplication `json:"ban_applications,omitempty" gorm:"foreignkey:SignalOccurenceID;association_foreignkey:ID"` - Stop_at time.Time `json:"stop_at,omitempty"` //last event (usually bucket overflow time) - Source *Source `json:"source"` //`json:"source,omitempty"` - /*for db*/ - Source_ip string `yaml:"Source_ip,omitempty"` - Source_range string - Source_AutonomousSystemNumber string - Source_AutonomousSystemOrganization string - Source_Country string - Source_Latitude float64 - Source_Longitude float64 - /*/for db*/ - Sources map[string]Source `json:"sources,omitempty" gorm:"-"` - // Source_ip string `json:"src_ip,omitempty"` //for now just the IP - // Source_as string `json:"src_as,omitempty"` //for now just the as (AS number) - // Source_country string `json:"src_country,omitempty"` //for now just the county (two-letter iso-code) - Dest_ip string `json:"dst_ip,omitempty"` //for now just the destination IP - //Policy string `json:"policy,omitempty"` //for now we forward it as well :) - //bucket info - Capacity int `json:"capacity,omitempty"` - Leak_speed time.Duration `json:"leak_speed,omitempty"` - Whitelisted bool `gorm:"-"` - Simulation bool `gorm:"-"` - Reprocess bool //Reprocess, when true, will make the overflow being processed again as a fresh log would - Labels map[string]string `gorm:"-"` -} diff --git a/pkg/types/source.go b/pkg/types/source.go deleted file mode 100644 index 6dc8577ca..000000000 --- a/pkg/types/source.go +++ /dev/null @@ -1,20 +0,0 @@ -package types - -import ( - "net" - - "github.com/jinzhu/gorm" -) - -//Source is the generic representation of a source ip implicated in events / overflows. It contains both information extracted directly from logs and enrichment -type Source struct { - gorm.Model `json:"-"` - Ip net.IP - Range net.IPNet - AutonomousSystemNumber string - AutonomousSystemOrganization string - Country string - Latitude float64 - Longitude float64 - Flags map[string]bool //a list of flags we can set -} diff --git a/pkg/types/utils.go b/pkg/types/utils.go index 12ae3457a..867064e81 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -5,8 +5,17 @@ import ( "encoding/binary" "encoding/gob" "fmt" + "io" + "io/ioutil" "net" + "os" + "path/filepath" + "runtime/debug" + "strconv" + "strings" + "time" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" log "github.com/sirupsen/logrus" "gopkg.in/natefinch/lumberjack.v2" ) @@ -50,7 +59,6 @@ func LastAddress(n *net.IPNet) net.IP { var logFormatter log.Formatter var LogOutput *lumberjack.Logger //io.Writer var logLevel log.Level -var logReportCaller bool func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level) error { @@ -73,10 +81,7 @@ func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level logFormatter = &log.TextFormatter{TimestampFormat: "02-01-2006 15:04:05", FullTimestamp: true} log.SetFormatter(logFormatter) } - if logLevel >= log.DebugLevel { - logReportCaller = true - log.SetReportCaller(true) - } + return nil } @@ -85,9 +90,7 @@ func ConfigureLogger(clog *log.Logger) error { if LogOutput != nil { clog.SetOutput(LogOutput) } - if logReportCaller { - clog.SetReportCaller(true) - } + if logFormatter != nil { clog.SetFormatter(logFormatter) } @@ -108,3 +111,112 @@ func Clone(a, b interface{}) error { } return nil } + +//CatchPanic is a util func that we should call from all go-routines to ensure proper stacktrace handling +func CatchPanic(component string) { + + if r := recover(); r != nil { + tmpfile, err := ioutil.TempFile("/tmp/", "crowdsec-crash.*.txt") + if err != nil { + log.Fatal(err) + } + if _, err := tmpfile.Write([]byte(cwversion.ShowStr())); err != nil { + tmpfile.Close() + log.Fatal(err) + } + if _, err := tmpfile.Write(debug.Stack()); err != nil { + tmpfile.Close() + log.Fatal(err) + } + if err := tmpfile.Close(); err != nil { + log.Fatal(err) + } + log.Errorf("crowdsec - goroutine %s crashed : %s", component, r) + log.Errorf("please report this error to https://github.com/crowdsecurity/crowdsec/") + log.Errorf("stacktrace/report is written to %s : please join it to your issue", tmpfile.Name()) + log.Fatalf("crowdsec stopped") + } +} + +func ParseDuration(d string) (time.Duration, error) { + durationStr := d + if strings.HasSuffix(d, "d") { + days := strings.Split(d, "d")[0] + if len(days) == 0 { + return 0, fmt.Errorf("'%s' can't be parsed as duration", d) + } + daysInt, err := strconv.Atoi(days) + if err != nil { + return 0, err + } + durationStr = strconv.Itoa(daysInt*24) + "h" + } + duration, err := time.ParseDuration(durationStr) + if err != nil { + return 0, err + } + return duration, nil +} + +/*help to copy the file, ioutil doesn't offer the feature*/ + +func copyFileContents(src, dst string) (err error) { + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return + } + defer func() { + cerr := out.Close() + if err == nil { + err = cerr + } + }() + if _, err = io.Copy(out, in); err != nil { + return + } + err = out.Sync() + return +} + +/*copy the file, ioutile doesn't offer the feature*/ +func CopyFile(sourceSymLink, destinationFile string) (err error) { + + sourceFile, err := filepath.EvalSymlinks(sourceSymLink) + if err != nil { + log.Infof("Not a symlink : %s", err) + sourceFile = sourceSymLink + } + + sourceFileStat, err := os.Stat(sourceFile) + if err != nil { + return + } + if !sourceFileStat.Mode().IsRegular() { + // cannot copy non-regular files (e.g., directories, + // symlinks, devices, etc.) + return fmt.Errorf("copyFile: non-regular source file %s (%q)", sourceFileStat.Name(), sourceFileStat.Mode().String()) + } + destinationFileStat, err := os.Stat(destinationFile) + if err != nil { + if !os.IsNotExist(err) { + return + } + } else { + if !(destinationFileStat.Mode().IsRegular()) { + return fmt.Errorf("copyFile: non-regular destination file %s (%q)", destinationFileStat.Name(), destinationFileStat.Mode().String()) + } + if os.SameFile(sourceFileStat, destinationFileStat) { + return + } + } + if err = os.Link(sourceFile, destinationFile); err == nil { + return + } + err = copyFileContents(sourceFile, destinationFile) + return +} diff --git a/plugins/backend/database.go b/plugins/backend/database.go deleted file mode 100644 index 9733ede27..000000000 --- a/plugins/backend/database.go +++ /dev/null @@ -1,89 +0,0 @@ -package main - -import ( - "fmt" - "time" - - "github.com/crowdsecurity/crowdsec/pkg/database" - "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" -) - -//nolint:unused // pluginDB is the interface for database output plugin -type pluginDB struct { - CTX *database.Context -} - -func (p *pluginDB) Shutdown() error { - p.CTX.PusherTomb.Kill(nil) - if err := p.CTX.PusherTomb.Wait(); err != nil { - return fmt.Errorf("DB shutdown error : %s", err) - } - - return nil -} - -func (p *pluginDB) StartAutoCommit() error { - return p.CTX.StartAutoCommit() -} - -func (p *pluginDB) Init(config map[string]string) error { - var err error - log.Debugf("database config : %+v \n", config) - p.CTX, err = database.NewDatabase(config) - - if err != nil { - return err - } - return nil -} - -func (p *pluginDB) Delete(target string) (int, error) { - nbDel, err := p.CTX.DeleteBan(target) - if err != nil { - return 0, err - } - log.Debugf("deleted '%d' entry from database", nbDel) - return nbDel, nil -} - -func (p *pluginDB) DeleteAll() error { - err := p.CTX.DeleteAll() - if err != nil { - return err - } - return nil -} - -func (p *pluginDB) Insert(sig types.SignalOccurence) error { - err := p.CTX.WriteSignal(sig) - if err != nil { - return err - } - return nil -} - -func (p *pluginDB) Flush() error { - err := p.CTX.Flush() - if err != nil { - return err - } - - return nil -} - -func (p *pluginDB) ReadAT(timeAT time.Time) ([]map[string]string, error) { - ret, err := p.CTX.GetBansAt(timeAT) - if err != nil { - return nil, err - } - return ret, nil -} - -//nolint:deadcode,unused // New is used by the plugin system -func New() interface{} { - return &pluginDB{} -} - -// empty main function is mandatory since we are in a main package -func main() {} diff --git a/scripts/install_all.sh b/scripts/install_all.sh index 38e15991c..2fe94d40b 100644 --- a/scripts/install_all.sh +++ b/scripts/install_all.sh @@ -1,7 +1,7 @@ #!/bin/sh -./cscli -c dev.yaml list parsers -a -o json | jq -r ".[].name" > installed_parsers.txt +./cscli -c dev.yaml list parser list -a -o json | jq -r ".[].name" > installed_parsers.txt cat installed_parsers.txt | while read parser; do echo "install ${parser}" ; - ./cscli -c dev.yaml install parser ${parser} ; + ./cscli -c dev.yaml parsers install ${parser} ; done diff --git a/scripts/test_env.sh b/scripts/test_env.sh index a0c5edb31..4666cead8 100755 --- a/scripts/test_env.sh +++ b/scripts/test_env.sh @@ -38,17 +38,7 @@ PARSER_S01="$PARSER_DIR/s01-parse" PARSER_S02="$PARSER_DIR/s02-enrich" SCENARIOS_DIR="$CONFIG_DIR/scenarios" POSTOVERFLOWS_DIR="$CONFIG_DIR/postoverflows" -PLUGIN_BACKEND_DIR="$CONFIG_DIR/plugins/backend/" -DB_PLUGIN_FILE="$PLUGIN_BACKEND_DIR/database.yaml" - -gen_sqlite_config() { - echo "name: database" >> "$DB_PLUGIN_FILE" - echo "path: ./plugins/backend/database.so" >> "$DB_PLUGIN_FILE" - echo "config:" >> "$DB_PLUGIN_FILE" - echo " type: sqlite" >> "$DB_PLUGIN_FILE" - echo " db_path: ./test.db" >> "$DB_PLUGIN_FILE" - echo " flush: true" >> "$DB_PLUGIN_FILE" -} +HUB_DIR="$CONFIG_DIR/hub" log_info() { msg=$1 @@ -68,7 +58,7 @@ create_arbo() { mkdir -p "$SCENARIOS_DIR" mkdir -p "$POSTOVERFLOWS_DIR" mkdir -p "$CSCLI_DIR" - mkdir -p "$PLUGIN_BACKEND_DIR" + mkdir -p "$HUB_DIR" } copy_files() { @@ -78,13 +68,19 @@ copy_files() { cp "./cmd/crowdsec/crowdsec" "$BASE" cp "./cmd/crowdsec-cli/cscli" "$BASE" cp -r "./config/patterns" "$CONFIG_DIR" - cp -r "./plugins/" "$BASE" + cp "./config/acquis.yaml" "$CONFIG_DIR" + touch "$CONFIG_DIR"/local_api_credentials.yaml + touch "$CONFIG_DIR"/online_api_credentials.yaml } setup() { - $BASE/cscli -c "$CONFIG_FILE" update - $BASE/cscli -c "$CONFIG_FILE" install collection crowdsecurity/linux + $BASE/cscli -c "$CONFIG_FILE" hub update + $BASE/cscli -c "$CONFIG_FILE" collections install crowdsecurity/linux +} + +setup_api() { + $BASE/cscli -c "$CONFIG_FILE" machines add test -p testpassword -f $CONFIG_DIR/local_api_credentials.yaml --force } @@ -98,9 +94,9 @@ main() { log_info "Setting up configurations" CURRENT_PWD=$(pwd) cd $BASE + setup_api setup cd $CURRENT_PWD - gen_sqlite_config log_info "Environment is ready in $BASE" } diff --git a/wizard.sh b/wizard.sh index dc8dab354..5bfa4572d 100755 --- a/wizard.sh +++ b/wizard.sh @@ -11,42 +11,33 @@ YELLOW='\033[0;33m' NC='\033[0m' SILENT="false" +DOCKER_MODE="false" CROWDSEC_RUN_DIR="/var/run" CROWDSEC_LIB_DIR="/var/lib/crowdsec" CROWDSEC_USR_DIR="/usr/local/lib/crowdsec" CROWDSEC_DATA_DIR="${CROWDSEC_LIB_DIR}/data" -CROWDSEC_PLUGIN_DIR="${CROWDSEC_USR_DIR}/plugins" -CROWDSEC_PLUGIN_BACKEND_DIR="${CROWDSEC_PLUGIN_DIR}/backend" CROWDSEC_DB_PATH="${CROWDSEC_DATA_DIR}/crowdsec.db" CROWDSEC_PATH="/etc/crowdsec" -CROWDSEC_CONFIG_PATH="${CROWDSEC_PATH}/config" +CROWDSEC_CONFIG_PATH="${CROWDSEC_PATH}" CROWDSEC_LOG_FILE="/var/log/crowdsec.log" -CROWDSEC_BACKEND_FOLDER="/etc/crowdsec/plugins/backend" CSCLI_FOLDER="/etc/crowdsec/config/cscli" - -DBOX_BIN="" - -WHIPTAIL_BIN="whiptail" -DIALOG_BIN="dialog" - - CROWDSEC_BIN="./cmd/crowdsec/crowdsec" CSCLI_BIN="./cmd/crowdsec-cli/cscli" -CROWDSEC_BIN_INSTALLED="/usr/local/bin/crowdsec" -CSCLI_BIN_INSTALLED="/usr/local/bin/cscli" + +CLIENT_SECRETS="local_api_credentials.yaml" +LAPI_SECRETS="online_api_credentials.yaml" + +BIN_INSTALL_PATH="/usr/local/bin" +CROWDSEC_BIN_INSTALLED="${BIN_INSTALL_PATH}/crowdsec" +CSCLI_BIN_INSTALLED="${BIN_INSTALL_PATH}/cscli" ACQUIS_PATH="${CROWDSEC_CONFIG_PATH}" TMP_ACQUIS_FILE="tmp-acquis.yaml" ACQUIS_TARGET="${ACQUIS_PATH}/acquis.yaml" -setup_cron_pull() { - cp ./config/crowdsec_pull /etc/cron.d/ -} - - PID_DIR="${CROWDSEC_RUN_DIR}" SYSTEMD_PATH_FILE="/etc/systemd/system/crowdsec.service" @@ -98,16 +89,16 @@ detect_services () { #raw ps PSAX=`ps ax -o comm=` for SVC in ${SUPPORTED_SERVICES} ; do - log_info "Checking if service '${SVC}' is running (ps+systemd)" - for SRC in "${SYSTEMD_SERVICES}" "${PSAX}" ; do - echo ${SRC} | grep ${SVC} >/dev/null - if [ $? -eq 0 ]; then - DETECTED_SERVICES+=(${SVC}) - HMENU+=(${SVC} "on") - log_info "Found '${SVC}' running" - break; - fi; - done; + log_info "Checking if service '${SVC}' is running (ps+systemd)" + for SRC in "${SYSTEMD_SERVICES}" "${PSAX}" ; do + echo ${SRC} | grep ${SVC} >/dev/null + if [ $? -eq 0 ]; then + DETECTED_SERVICES+=(${SVC}) + HMENU+=(${SVC} "on") + log_info "Found '${SVC}' running" + break; + fi; + done; done; if [[ ${OSTYPE} == "linux-gnu" ]]; then DETECTED_SERVICES+=("linux") @@ -118,11 +109,14 @@ detect_services () { if [[ ${SILENT} == "false" ]]; then #we put whiptail results in an array, notice the dark magic fd redirection - DETECTED_SERVICES=($(${DBOX_BIN} --separate-output --noitem --ok-button Continue --title "Services to monitor" --checklist "Detected services, uncheck to ignore. Ignored services won't be monitored." 18 70 10 ${HMENU[@]} 3>&1 1>&2 2>&3)) + DETECTED_SERVICES=($(whiptail --separate-output --noitem --ok-button Continue --title "Services to monitor" --checklist "Detected services, uncheck to ignore. Ignored services won't be monitored." 18 70 10 ${HMENU[@]} 3>&1 1>&2 2>&3)) if [ $? -eq 1 ]; then - log_err "user bailed out at services selection" - exit 1; + log_err "user bailed out at services selection" + exit 1; fi; + echo "Detected services (interactive) : ${DETECTED_SERVICES[@]}" + else + echo "Detected services (unattended) : ${DETECTED_SERVICES[@]}" fi; } @@ -178,10 +172,10 @@ find_logs_for() { done; if [[ ${SILENT} == "false" ]]; then - DETECTED_LOGFILES=($(${DBOX_BIN} --separate-output --noitem --ok-button Continue --title "Log files to process for ${SVC}" --checklist "Detected logfiles for ${SVC}, uncheck to ignore" 18 70 10 ${HMENU[@]} 3>&1 1>&2 2>&3)) + DETECTED_LOGFILES=($(whiptail --separate-output --noitem --ok-button Continue --title "Log files to process for ${SVC}" --checklist "Detected logfiles for ${SVC}, uncheck to ignore" 18 70 10 ${HMENU[@]} 3>&1 1>&2 2>&3)) if [ $? -eq 1 ]; then - log_err "user bailed out at log file selection" - exit 1; + log_err "user bailed out at log file selection" + exit 1; fi; fi } @@ -201,49 +195,52 @@ in_array() { install_collection() { HMENU=() - readarray -t AVAILABLE_COLLECTION < <(${CSCLI_BIN_INSTALLED} list collections -o raw -a) + readarray -t AVAILABLE_COLLECTION < <(${CSCLI_BIN_INSTALLED} collections list -o raw -a) COLLECTION_TO_INSTALL=() - if [[ ${SILENT} == "false" ]]; then - for collect_info in "${AVAILABLE_COLLECTION[@]}"; do - #echo "collection raw : ${collect_info}" >> out.txt - collection="$(echo ${collect_info} | cut -d " " -f1)" - description="$(echo ${collect_info} | cut -d " " -f2-)" - in_array $collection "${DETECTED_SERVICES[@]}" - if [[ $? == 0 ]]; then - HMENU+=("${collection}" "${description}" "ON") - else - if [[ ${collection} == "linux" ]]; then - HMENU+=("${collection}" "${description}" "ON") - else - HMENU+=("${collection}" "${description}" "OFF") - fi - fi - done - COLLECTION_TO_INSTALL=($(${DBOX_BIN} --separate-output --ok-button Continue --title "Crowdsec collections" --checklist "Available collections in crowdsec, try to pick one that fits your profile. Collections contains parsers and scenarios to protect your system." 20 120 10 "${HMENU[@]}" 3>&1 1>&2 2>&3)) - if [ $? -eq 1 ]; then - log_err "user bailed out at collection selection" - exit 1; - fi; - else - for collection in "${DETECTED_SERVICES[@]}"; do + for collect_info in "${AVAILABLE_COLLECTION[@]}"; do + collection="$(echo ${collect_info} | cut -d " " -f1)" + description="$(echo ${collect_info} | cut -d " " -f2-)" + in_array $collection "${DETECTED_SERVICES[@]}" + if [[ $? == 0 ]]; then + HMENU+=("${collection}" "${description}" "ON") + #in case we're not in interactive mode, assume defaults COLLECTION_TO_INSTALL+=(${collection}) - done - fi + else + if [[ ${collection} == "linux" ]]; then + HMENU+=("${collection}" "${description}" "ON") + #in case we're not in interactive mode, assume defaults + COLLECTION_TO_INSTALL+=(${collection}) + else + HMENU+=("${collection}" "${description}" "OFF") + fi + fi + done + + if [[ ${SILENT} == "false" ]]; then + COLLECTION_TO_INSTALL=($(whiptail --separate-output --ok-button Continue --title "Crowdsec collections" --checklist "Available collections in crowdsec, try to pick one that fits your profile. Collections contains parsers and scenarios to protect your system." 20 120 10 "${HMENU[@]}" 3>&1 1>&2 2>&3)) + if [ $? -eq 1 ]; then + log_err "user bailed out at collection selection" + exit 1; + fi; + fi; for collection in "${COLLECTION_TO_INSTALL[@]}"; do log_info "Installing collection '${collection}'" - ${CSCLI_BIN_INSTALLED} install collection "${collection}" > /dev/null 2>&1 || log_err "fail to install collection ${collection}" + ${CSCLI_BIN_INSTALLED} collections install "${collection}" > /dev/null 2>&1 || log_err "fail to install collection ${collection}" done - ${CSCLI_BIN_INSTALLED} install parser "crowdsecurity/whitelists" > /dev/null 2>&1 || log_err "fail to install collection crowdsec/whitelists" + ${CSCLI_BIN_INSTALLED} parsers install "crowdsecurity/whitelists" > /dev/null 2>&1 || log_err "fail to install collection crowdsec/whitelists" if [[ ${SILENT} == "false" ]]; then whiptail --msgbox "Out of safety, I installed a parser called 'crowdsecurity/whitelists'. This one will prevent private IP adresses from being banned, feel free to remove it any time." 20 50 - whiptail --msgbox "CrowdSec alone will not block any IP address. If you want to block them, you must use a blocker. You can find them on https://hub.crowdsec.net/" 20 50 fi + if [[ ${SILENT} == "false" ]]; then + whiptail --msgbox "CrowdSec alone will not block any IP address. If you want to block them, you must use a bouncer. You can find them on https://hub.crowdsec.net/" 20 50 + fi + } @@ -276,62 +273,47 @@ genacquisition() { done } -delete_plugins(){ - rm -rf "${CROWDSEC_PLUGIN_DIR}" - rm -rf "${CROWDSEC_BACKEND_FOLDER}" -} - -install_plugins() { - install_plugins_bin - mkdir -p "${CROWDSEC_BACKEND_FOLDER}" || exit - cp -r ./config/plugins/backend/* "${CROWDSEC_BACKEND_FOLDER}" || exit -} - -install_plugins_bin() { - mkdir -p "${CROWDSEC_PLUGIN_BACKEND_DIR}" || exit - (cd ./plugins && find . -type f -name "*.so" -exec install -Dm 644 {} "${CROWDSEC_PLUGIN_DIR}/{}" \; && cd ../) || exit -} - #install crowdsec and cscli install_crowdsec() { - - if [[ ! -f "$CROWDSEC_BIN" ]]; then - log_err "Crowdsec binary '$CROWDSEC_BIN' not found, you have likely clone the repository." - log_err "If you are trying to install crowdsec, please download a release :" - log_err "https://github.com/crowdsecurity/crowdsec/releases/latest" - exit - fi - mkdir -p "${CROWDSEC_DATA_DIR}" (cd config && find patterns -type f -exec install -Dm 644 "{}" "${CROWDSEC_CONFIG_PATH}/{}" \; && cd ../) || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/scenarios" || exit - mkdir -p "${CROWDSEC_CONFIG_PATH}/parsers" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/postoverflows" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/collections" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/patterns" || exit mkdir -p "${CSCLI_FOLDER}" || exit - install -v -m 644 -D ./config/prod.yaml "${CROWDSEC_CONFIG_PATH}" || exit + #tmp + mkdir -p /tmp/data + mkdir -p /etc/crowdsec/hub/ + install -v -m 600 -D "./config/${CLIENT_SECRETS}" "${CROWDSEC_CONFIG_PATH}" || exit + install -v -m 600 -D "./config/${LAPI_SECRETS}" "${CROWDSEC_CONFIG_PATH}" || exit + + ## end tmp + + install -v -m 644 -D ./config/config.yaml "${CROWDSEC_CONFIG_PATH}" || exit install -v -m 644 -D ./config/dev.yaml "${CROWDSEC_CONFIG_PATH}" || exit + install -v -m 644 -D ./config/user.yaml "${CROWDSEC_CONFIG_PATH}" || exit install -v -m 644 -D ./config/acquis.yaml "${CROWDSEC_CONFIG_PATH}" || exit install -v -m 644 -D ./config/profiles.yaml "${CROWDSEC_CONFIG_PATH}" || exit - install -v -m 600 -D ./config/api.yaml "${CROWDSEC_CONFIG_PATH}" || exit install -v -m 644 -D ./config/simulation.yaml "${CROWDSEC_CONFIG_PATH}" || exit mkdir -p ${PID_DIR} || exit - PID=${PID_DIR} DATA=${CROWDSEC_DATA_DIR} CFG=${CROWDSEC_CONFIG_PATH} envsubst '$CFG $PID $DATA' < ./config/prod.yaml > ${CROWDSEC_CONFIG_PATH}"/default.yaml" PID=${PID_DIR} DATA=${CROWDSEC_DATA_DIR} CFG=${CROWDSEC_CONFIG_PATH} envsubst '$CFG $PID $DATA' < ./config/user.yaml > ${CROWDSEC_CONFIG_PATH}"/user.yaml" - CFG=${CROWDSEC_CONFIG_PATH} PID=${PID_DIR} BIN=${CROWDSEC_BIN_INSTALLED} envsubst '$CFG $PID $BIN' < ./config/crowdsec.service > "${SYSTEMD_PATH_FILE}" + if [[ ${DOCKER_MODE} == "false" ]]; then + CFG=${CROWDSEC_CONFIG_PATH} PID=${PID_DIR} BIN=${CROWDSEC_BIN_INSTALLED} envsubst '$CFG $PID $BIN' < ./config/crowdsec.service > "${SYSTEMD_PATH_FILE}" + fi install_bins - install_plugins - systemctl daemon-reload + + if [[ ${DOCKER_MODE} == "false" ]]; then + systemctl daemon-reload + fi } update_bins() { log_info "Only upgrading binaries" delete_bins install_bins - install_plugins_bin log_info "Upgrade finished" systemctl restart crowdsec } @@ -347,7 +329,7 @@ update_full() { fi log_info "Backing up existing configuration" - ${CSCLI_BIN_INSTALLED} backup save ${BACKUP_DIR} + ${CSCLI_BIN_INSTALLED} config backup ${BACKUP_DIR} log_info "Saving default database content" cp /var/lib/crowdsec/data/crowdsec.db ${BACKUP_DIR}/crowdsec.db log_info "Cleanup existing crowdsec configuration" @@ -355,8 +337,8 @@ update_full() { log_info "Installing crowdsec" install_crowdsec log_info "Restoring configuration" - ${CSCLI_BIN_INSTALLED} update - ${CSCLI_BIN_INSTALLED} backup restore ${BACKUP_DIR} + ${CSCLI_BIN_INSTALLED} hub update + ${CSCLI_BIN_INSTALLED} config restore ${BACKUP_DIR} log_info "Restoring saved database" cp ${BACKUP_DIR}/crowdsec.db /var/lib/crowdsec/data/crowdsec.db log_info "Finished, restarting" @@ -367,7 +349,16 @@ install_bins() { log_info "Installing crowdsec binaries" install -v -m 755 -D "${CROWDSEC_BIN}" "${CROWDSEC_BIN_INSTALLED}" || exit install -v -m 755 -D "${CSCLI_BIN}" "${CSCLI_BIN_INSTALLED}" || exit - install_plugins_bin || exit + symlink_bins +} + +symlink_bins() { + if grep -q "${BIN_INSTALL_PATH}" <<< $PATH; then + log_dbg "${BIN_INSTALL_PATH} found in PATH" + else + ln -s "${CSCLI_BIN_INSTALLED}" /usr/bin/cscli + ln -s "${CROWDSEC_BIN_INSTALLED}" /usr/bin/crowdsec + fi } delete_bins() { @@ -380,9 +371,13 @@ delete_bins() { uninstall_crowdsec() { systemctl stop crowdsec.service systemctl disable crowdsec.service - ${CSCLI_BIN} dashboard stop --remove + ${CSCLI_BIN} dashboard remove -f -y delete_bins - delete_plugins + + # tmp + rm -rf /tmp/data/ + ## end tmp + rm -rf ${CROWDSEC_PATH} || echo "" rm -f ${CROWDSEC_LOG_FILE} || echo "" rm -f ${CROWDSEC_DB_PATH} || echo "" @@ -393,14 +388,8 @@ uninstall_crowdsec() { } -setup_cron_pull() { - cp ./config/crowdsec_pull /etc/cron.d/ -} - - main() { - if [[ "$1" == "backup_to_dir" ]]; then backup_to_dir @@ -456,9 +445,6 @@ main() { fi log_info "installing crowdsec" install_crowdsec - # api register - ${CSCLI_BIN_INSTALLED} api register >> /etc/crowdsec/config/api.yaml || ${CSCLI_BIN_INSTALLED} api reset >> /etc/crowdsec/config/api.yaml || log_err "unable to register, skipping crowdsec api registration" - log_info "Crowdsec api registered" return fi @@ -470,26 +456,11 @@ main() { exit 1 fi - if [[ ${SILENT} == "false" ]]; then - which ${WHIPTAIL_BIN} >/dev/null - if [ $? -ne 0 ]; then - which ${DIALOG_BIN} >/dev/null - if [ $? -ne 0 ]; then - log_err "please install whiptail or dialog to run interactive wizard" - exit 1 - else - DBOX_BIN=${DIALOG_BIN} - fi - else - DBOX_BIN=${WHIPTAIL_BIN} - fi - fi; - ## Do make build before installing (as non--root) in order to have the binary and then install crowdsec as root log_info "installing crowdsec" install_crowdsec log_info "configuring ${CSCLI_BIN_INSTALLED}" - ${CSCLI_BIN_INSTALLED} update > /dev/null 2>&1 || (log_err "fail to update crowdsec hub. exiting" && exit 1) + ${CSCLI_BIN_INSTALLED} hub update > /dev/null 2>&1 || (log_err "fail to update crowdsec hub. exiting" && exit 1) # detect running services detect_services @@ -513,17 +484,15 @@ main() { # api register - ${CSCLI_BIN_INSTALLED} api register >> /etc/crowdsec/config/api.yaml || ${CSCLI_BIN_INSTALLED} api reset >> /etc/crowdsec/config/api.yaml || log_err "unable to register, skipping crowdsec api registration" - log_info "Crowdsec api registered" - - if [[ ${SILENT} == "false" ]]; then - (systemctl start crowdsec && log_info "crowdsec started") || log_err "unable to start crowdsec. exiting" - ${CSCLI_BIN_INSTALLED} api pull - fi; - # Set the cscli api pull cronjob - setup_cron_pull - - systemctl enable crowdsec && log_info "crowdsec daemon enabled" || log_err "Unable to enable crowdsec daemon" + ${CSCLI_BIN_INSTALLED} machines add --force "$(cat /etc/machine-id)" --password "$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)" -f "${CROWDSEC_CONFIG_PATH}/${CLIENT_SECRETS}" + log_info "Crowdsec LAPI registered" + + ${CSCLI_BIN_INSTALLED} capi register + log_info "Crowdsec CAPI registered" + + systemctl enable crowdsec + systemctl start crowdsec + log_info "Enabling and starting crowdsec daemon" return fi @@ -555,6 +524,7 @@ usage() { echo " ./wizard.sh --binupgrade Upgrade crowdsec/cscli binaries" echo " ./wizard.sh --upgrade Perform a full upgrade and try to migrate configs" echo " ./wizard.sh --unattended Install in unattended mode, no question will be asked and defaults will be followed" + echo " ./wizard.sh --docker-mode Will install crowdsec without systemd and generate random machine-id" echo " ./wizard.sh -r|--restore Restore saved configurations from ${BACKUP_DIR} to ${CROWDSEC_CONFIG_PATH}" echo " ./wizard.sh -b|--backup Backup existing configurations to ${BACKUP_DIR}" @@ -589,6 +559,11 @@ do ACTION="bininstall" shift # past argument ;; + --docker-mode) + DOCKER_MODE="true" + ACTION="bininstall" + shift # past argument + ;; -b|--backup) ACTION="backup_to_dir" shift # past argument